Commit b0a0ccfa authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman

Staging: android: delete android drivers

These drivers are no longer being developed and the original authors
seem to have abandonded them and hence, do not want them in the mainline
kernel tree.

So sad :(

Cc: Brian Swetland <swetland@google.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@suse.de>
parent 99fd99f6
...@@ -83,8 +83,6 @@ source "drivers/staging/mimio/Kconfig" ...@@ -83,8 +83,6 @@ source "drivers/staging/mimio/Kconfig"
source "drivers/staging/frontier/Kconfig" source "drivers/staging/frontier/Kconfig"
source "drivers/staging/android/Kconfig"
source "drivers/staging/dream/Kconfig" source "drivers/staging/dream/Kconfig"
source "drivers/staging/dst/Kconfig" source "drivers/staging/dst/Kconfig"
......
...@@ -24,7 +24,6 @@ obj-$(CONFIG_RTL8192SU) += rtl8192su/ ...@@ -24,7 +24,6 @@ obj-$(CONFIG_RTL8192SU) += rtl8192su/
obj-$(CONFIG_RTL8192E) += rtl8192e/ obj-$(CONFIG_RTL8192E) += rtl8192e/
obj-$(CONFIG_INPUT_MIMIO) += mimio/ obj-$(CONFIG_INPUT_MIMIO) += mimio/
obj-$(CONFIG_TRANZPORT) += frontier/ obj-$(CONFIG_TRANZPORT) += frontier/
obj-$(CONFIG_ANDROID) += android/
obj-$(CONFIG_DREAM) += dream/ obj-$(CONFIG_DREAM) += dream/
obj-$(CONFIG_DST) += dst/ obj-$(CONFIG_DST) += dst/
obj-$(CONFIG_POHMELFS) += pohmelfs/ obj-$(CONFIG_POHMELFS) += pohmelfs/
......
menu "Android"
config ANDROID
bool "Android Drivers"
depends on BROKEN
default N
---help---
Enable support for various drivers needed on the Android platform
if ANDROID
config ANDROID_BINDER_IPC
bool "Android Binder IPC Driver"
default n
config ANDROID_LOGGER
tristate "Android log driver"
default n
config ANDROID_RAM_CONSOLE
bool "Android RAM buffer console"
default n
config ANDROID_RAM_CONSOLE_ENABLE_VERBOSE
bool "Enable verbose console messages on Android RAM console"
default y
depends on ANDROID_RAM_CONSOLE
menuconfig ANDROID_RAM_CONSOLE_ERROR_CORRECTION
bool "Android RAM Console Enable error correction"
default n
depends on ANDROID_RAM_CONSOLE
depends on !ANDROID_RAM_CONSOLE_EARLY_INIT
select REED_SOLOMON
select REED_SOLOMON_ENC8
select REED_SOLOMON_DEC8
if ANDROID_RAM_CONSOLE_ERROR_CORRECTION
config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE
int "Android RAM Console Data data size"
default 128
help
Must be a power of 2.
config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE
int "Android RAM Console ECC size"
default 16
config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE
int "Android RAM Console Symbol size"
default 8
config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL
hex "Android RAM Console Polynomial"
default 0x19 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 4)
default 0x29 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 5)
default 0x61 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 6)
default 0x89 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 7)
default 0x11d if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 8)
endif # ANDROID_RAM_CONSOLE_ERROR_CORRECTION
config ANDROID_RAM_CONSOLE_EARLY_INIT
bool "Start Android RAM console early"
default n
depends on ANDROID_RAM_CONSOLE
config ANDROID_RAM_CONSOLE_EARLY_ADDR
hex "Android RAM console virtual address"
default 0
depends on ANDROID_RAM_CONSOLE_EARLY_INIT
config ANDROID_RAM_CONSOLE_EARLY_SIZE
hex "Android RAM console buffer size"
default 0
depends on ANDROID_RAM_CONSOLE_EARLY_INIT
config ANDROID_TIMED_OUTPUT
bool "Timed output class driver"
default y
config ANDROID_TIMED_GPIO
tristate "Android timed gpio driver"
depends on GENERIC_GPIO && ANDROID_TIMED_OUTPUT
default n
config ANDROID_LOW_MEMORY_KILLER
bool "Android Low Memory Killer"
default N
---help---
Register processes to be killed when memory is low
endif # if ANDROID
endmenu
obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o
obj-$(CONFIG_ANDROID_LOGGER) += logger.o
obj-$(CONFIG_ANDROID_RAM_CONSOLE) += ram_console.o
obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o
obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o
obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o
TODO:
- checkpatch.pl cleanups
- sparse fixes
- rename files to be not so "generic"
- make sure things build as modules properly
- add proper arch dependancies as needed
- audit userspace interfaces to make sure they are sane
Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc:
Brian Swetland <swetland@google.com>
This source diff could not be displayed because it is too large. You can view the blob instead.
/*
* Copyright (C) 2008 Google, Inc.
*
* Based on, but no longer compatible with, the original
* OpenBinder.org binder driver interface, which is:
*
* Copyright (c) 2005 Palmsource, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef _LINUX_BINDER_H
#define _LINUX_BINDER_H
#include <linux/ioctl.h>
#define B_PACK_CHARS(c1, c2, c3, c4) \
((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4))
#define B_TYPE_LARGE 0x85
enum {
BINDER_TYPE_BINDER = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE),
BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE),
BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
};
enum {
FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
};
/*
* This is the flattened representation of a Binder object for transfer
* between processes. The 'offsets' supplied as part of a binder transaction
* contains offsets into the data where these structures occur. The Binder
* driver takes care of re-writing the structure type and data as it moves
* between processes.
*/
struct flat_binder_object {
/* 8 bytes for large_flat_header. */
unsigned long type;
unsigned long flags;
/* 8 bytes of data. */
union {
void *binder; /* local object */
signed long handle; /* remote object */
};
/* extra data associated with local object */
void *cookie;
};
/*
* On 64-bit platforms where user code may run in 32-bits the driver must
* translate the buffer (and local binder) addresses apropriately.
*/
struct binder_write_read {
signed long write_size; /* bytes to write */
signed long write_consumed; /* bytes consumed by driver */
unsigned long write_buffer;
signed long read_size; /* bytes to read */
signed long read_consumed; /* bytes consumed by driver */
unsigned long read_buffer;
};
/* Use with BINDER_VERSION, driver fills in fields. */
struct binder_version {
/* driver protocol version -- increment with incompatible change */
signed long protocol_version;
};
/* This is the current protocol version. */
#define BINDER_CURRENT_PROTOCOL_VERSION 7
#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, int64_t)
#define BINDER_SET_MAX_THREADS _IOW('b', 5, size_t)
#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, int)
#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, int)
#define BINDER_THREAD_EXIT _IOW('b', 8, int)
#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
/*
* NOTE: Two special error codes you should check for when calling
* in to the driver are:
*
* EINTR -- The operation has been interupted. This should be
* handled by retrying the ioctl() until a different error code
* is returned.
*
* ECONNREFUSED -- The driver is no longer accepting operations
* from your process. That is, the process is being destroyed.
* You should handle this by exiting from your process. Note
* that once this error code is returned, all further calls to
* the driver from any thread will return this same code.
*/
enum transaction_flags {
TF_ONE_WAY = 0x01, /* this is a one-way call: async, no return */
TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */
TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */
TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */
};
struct binder_transaction_data {
/* The first two are only used for bcTRANSACTION and brTRANSACTION,
* identifying the target and contents of the transaction.
*/
union {
size_t handle; /* target descriptor of command transaction */
void *ptr; /* target descriptor of return transaction */
} target;
void *cookie; /* target object cookie */
unsigned int code; /* transaction command */
/* General information about the transaction. */
unsigned int flags;
pid_t sender_pid;
uid_t sender_euid;
size_t data_size; /* number of bytes of data */
size_t offsets_size; /* number of bytes of offsets */
/* If this transaction is inline, the data immediately
* follows here; otherwise, it ends with a pointer to
* the data buffer.
*/
union {
struct {
/* transaction data */
const void *buffer;
/* offsets from buffer to flat_binder_object structs */
const void *offsets;
} ptr;
uint8_t buf[8];
} data;
};
struct binder_ptr_cookie {
void *ptr;
void *cookie;
};
struct binder_pri_desc {
int priority;
int desc;
};
struct binder_pri_ptr_cookie {
int priority;
void *ptr;
void *cookie;
};
enum BinderDriverReturnProtocol {
BR_ERROR = _IOR('r', 0, int),
/*
* int: error code
*/
BR_OK = _IO('r', 1),
/* No parameters! */
BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
/*
* binder_transaction_data: the received command.
*/
BR_ACQUIRE_RESULT = _IOR('r', 4, int),
/*
* not currently supported
* int: 0 if the last bcATTEMPT_ACQUIRE was not successful.
* Else the remote object has acquired a primary reference.
*/
BR_DEAD_REPLY = _IO('r', 5),
/*
* The target of the last transaction (either a bcTRANSACTION or
* a bcATTEMPT_ACQUIRE) is no longer with us. No parameters.
*/
BR_TRANSACTION_COMPLETE = _IO('r', 6),
/*
* No parameters... always refers to the last transaction requested
* (including replies). Note that this will be sent even for
* asynchronous transactions.
*/
BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie),
BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie),
BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie),
BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie),
/*
* void *: ptr to binder
* void *: cookie for binder
*/
BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie),
/*
* not currently supported
* int: priority
* void *: ptr to binder
* void *: cookie for binder
*/
BR_NOOP = _IO('r', 12),
/*
* No parameters. Do nothing and examine the next command. It exists
* primarily so that we can replace it with a BR_SPAWN_LOOPER command.
*/
BR_SPAWN_LOOPER = _IO('r', 13),
/*
* No parameters. The driver has determined that a process has no
* threads waiting to service incomming transactions. When a process
* receives this command, it must spawn a new service thread and
* register it via bcENTER_LOOPER.
*/
BR_FINISHED = _IO('r', 14),
/*
* not currently supported
* stop threadpool thread
*/
BR_DEAD_BINDER = _IOR('r', 15, void *),
/*
* void *: cookie
*/
BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, void *),
/*
* void *: cookie
*/
BR_FAILED_REPLY = _IO('r', 17),
/*
* The the last transaction (either a bcTRANSACTION or
* a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters.
*/
};
enum BinderDriverCommandProtocol {
BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),
BC_REPLY = _IOW('c', 1, struct binder_transaction_data),
/*
* binder_transaction_data: the sent command.
*/
BC_ACQUIRE_RESULT = _IOW('c', 2, int),
/*
* not currently supported
* int: 0 if the last BR_ATTEMPT_ACQUIRE was not successful.
* Else you have acquired a primary reference on the object.
*/
BC_FREE_BUFFER = _IOW('c', 3, int),
/*
* void *: ptr to transaction data received on a read
*/
BC_INCREFS = _IOW('c', 4, int),
BC_ACQUIRE = _IOW('c', 5, int),
BC_RELEASE = _IOW('c', 6, int),
BC_DECREFS = _IOW('c', 7, int),
/*
* int: descriptor
*/
BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie),
BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie),
/*
* void *: ptr to binder
* void *: cookie for binder
*/
BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc),
/*
* not currently supported
* int: priority
* int: descriptor
*/
BC_REGISTER_LOOPER = _IO('c', 11),
/*
* No parameters.
* Register a spawned looper thread with the device.
*/
BC_ENTER_LOOPER = _IO('c', 12),
BC_EXIT_LOOPER = _IO('c', 13),
/*
* No parameters.
* These two commands are sent as an application-level thread
* enters and exits the binder loop, respectively. They are
* used so the binder can have an accurate count of the number
* of looping threads it has available.
*/
BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, struct binder_ptr_cookie),
/*
* void *: ptr to binder
* void *: cookie
*/
BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, struct binder_ptr_cookie),
/*
* void *: ptr to binder
* void *: cookie
*/
BC_DEAD_BINDER_DONE = _IOW('c', 16, void *),
/*
* void *: cookie
*/
};
#endif /* _LINUX_BINDER_H */
/*
* drivers/misc/logger.c
*
* A Logging Subsystem
*
* Copyright (C) 2007-2008 Google, Inc.
*
* Robert Love <rlove@google.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/uaccess.h>
#include <linux/poll.h>
#include <linux/time.h>
#include "logger.h"
#include <asm/ioctls.h>
/*
* struct logger_log - represents a specific log, such as 'main' or 'radio'
*
* This structure lives from module insertion until module removal, so it does
* not need additional reference counting. The structure is protected by the
* mutex 'mutex'.
*/
struct logger_log {
unsigned char *buffer;/* the ring buffer itself */
struct miscdevice misc; /* misc device representing the log */
wait_queue_head_t wq; /* wait queue for readers */
struct list_head readers; /* this log's readers */
struct mutex mutex; /* mutex protecting buffer */
size_t w_off; /* current write head offset */
size_t head; /* new readers start here */
size_t size; /* size of the log */
};
/*
* struct logger_reader - a logging device open for reading
*
* This object lives from open to release, so we don't need additional
* reference counting. The structure is protected by log->mutex.
*/
struct logger_reader {
struct logger_log *log; /* associated log */
struct list_head list; /* entry in logger_log's list */
size_t r_off; /* current read head offset */
};
/* logger_offset - returns index 'n' into the log via (optimized) modulus */
#define logger_offset(n) ((n) & (log->size - 1))
/*
* file_get_log - Given a file structure, return the associated log
*
* This isn't aesthetic. We have several goals:
*
* 1) Need to quickly obtain the associated log during an I/O operation
* 2) Readers need to maintain state (logger_reader)
* 3) Writers need to be very fast (open() should be a near no-op)
*
* In the reader case, we can trivially go file->logger_reader->logger_log.
* For a writer, we don't want to maintain a logger_reader, so we just go
* file->logger_log. Thus what file->private_data points at depends on whether
* or not the file was opened for reading. This function hides that dirtiness.
*/
static inline struct logger_log *file_get_log(struct file *file)
{
if (file->f_mode & FMODE_READ) {
struct logger_reader *reader = file->private_data;
return reader->log;
} else
return file->private_data;
}
/*
* get_entry_len - Grabs the length of the payload of the next entry starting
* from 'off'.
*
* Caller needs to hold log->mutex.
*/
static __u32 get_entry_len(struct logger_log *log, size_t off)
{
__u16 val;
switch (log->size - off) {
case 1:
memcpy(&val, log->buffer + off, 1);
memcpy(((char *) &val) + 1, log->buffer, 1);
break;
default:
memcpy(&val, log->buffer + off, 2);
}
return sizeof(struct logger_entry) + val;
}
/*
* do_read_log_to_user - reads exactly 'count' bytes from 'log' into the
* user-space buffer 'buf'. Returns 'count' on success.
*
* Caller must hold log->mutex.
*/
static ssize_t do_read_log_to_user(struct logger_log *log,
struct logger_reader *reader,
char __user *buf,
size_t count)
{
size_t len;
/*
* We read from the log in two disjoint operations. First, we read from
* the current read head offset up to 'count' bytes or to the end of
* the log, whichever comes first.
*/
len = min(count, log->size - reader->r_off);
if (copy_to_user(buf, log->buffer + reader->r_off, len))
return -EFAULT;
/*
* Second, we read any remaining bytes, starting back at the head of
* the log.
*/
if (count != len)
if (copy_to_user(buf + len, log->buffer, count - len))
return -EFAULT;
reader->r_off = logger_offset(reader->r_off + count);
return count;
}
/*
* logger_read - our log's read() method
*
* Behavior:
*
* - O_NONBLOCK works
* - If there are no log entries to read, blocks until log is written to
* - Atomically reads exactly one log entry
*
* Optimal read size is LOGGER_ENTRY_MAX_LEN. Will set errno to EINVAL if read
* buffer is insufficient to hold next entry.
*/
static ssize_t logger_read(struct file *file, char __user *buf,
size_t count, loff_t *pos)
{
struct logger_reader *reader = file->private_data;
struct logger_log *log = reader->log;
ssize_t ret;
DEFINE_WAIT(wait);
start:
while (1) {
prepare_to_wait(&log->wq, &wait, TASK_INTERRUPTIBLE);
mutex_lock(&log->mutex);
ret = (log->w_off == reader->r_off);
mutex_unlock(&log->mutex);
if (!ret)
break;
if (file->f_flags & O_NONBLOCK) {
ret = -EAGAIN;
break;
}
if (signal_pending(current)) {
ret = -EINTR;
break;
}
schedule();
}
finish_wait(&log->wq, &wait);
if (ret)
return ret;
mutex_lock(&log->mutex);
/* is there still something to read or did we race? */
if (unlikely(log->w_off == reader->r_off)) {
mutex_unlock(&log->mutex);
goto start;
}
/* get the size of the next entry */
ret = get_entry_len(log, reader->r_off);
if (count < ret) {
ret = -EINVAL;
goto out;
}
/* get exactly one entry from the log */
ret = do_read_log_to_user(log, reader, buf, ret);
out:
mutex_unlock(&log->mutex);
return ret;
}
/*
* get_next_entry - return the offset of the first valid entry at least 'len'
* bytes after 'off'.
*
* Caller must hold log->mutex.
*/
static size_t get_next_entry(struct logger_log *log, size_t off, size_t len)
{
size_t count = 0;
do {
size_t nr = get_entry_len(log, off);
off = logger_offset(off + nr);
count += nr;
} while (count < len);
return off;
}
/*
* clock_interval - is a < c < b in mod-space? Put another way, does the line
* from a to b cross c?
*/
static inline int clock_interval(size_t a, size_t b, size_t c)
{
if (b < a) {
if (a < c || b >= c)
return 1;
} else {
if (a < c && b >= c)
return 1;
}
return 0;
}
/*
* fix_up_readers - walk the list of all readers and "fix up" any who were
* lapped by the writer; also do the same for the default "start head".
* We do this by "pulling forward" the readers and start head to the first
* entry after the new write head.
*
* The caller needs to hold log->mutex.
*/
static void fix_up_readers(struct logger_log *log, size_t len)
{
size_t old = log->w_off;
size_t new = logger_offset(old + len);
struct logger_reader *reader;
if (clock_interval(old, new, log->head))
log->head = get_next_entry(log, log->head, len);
list_for_each_entry(reader, &log->readers, list)
if (clock_interval(old, new, reader->r_off))
reader->r_off = get_next_entry(log, reader->r_off, len);
}
/*
* do_write_log - writes 'len' bytes from 'buf' to 'log'
*
* The caller needs to hold log->mutex.
*/
static void do_write_log(struct logger_log *log, const void *buf, size_t count)
{
size_t len;
len = min(count, log->size - log->w_off);
memcpy(log->buffer + log->w_off, buf, len);
if (count != len)
memcpy(log->buffer, buf + len, count - len);
log->w_off = logger_offset(log->w_off + count);
}
/*
* do_write_log_user - writes 'len' bytes from the user-space buffer 'buf' to
* the log 'log'
*
* The caller needs to hold log->mutex.
*
* Returns 'count' on success, negative error code on failure.
*/
static ssize_t do_write_log_from_user(struct logger_log *log,
const void __user *buf, size_t count)
{
size_t len;
len = min(count, log->size - log->w_off);
if (len && copy_from_user(log->buffer + log->w_off, buf, len))
return -EFAULT;
if (count != len)
if (copy_from_user(log->buffer, buf + len, count - len))
return -EFAULT;
log->w_off = logger_offset(log->w_off + count);
return count;
}
/*
* logger_aio_write - our write method, implementing support for write(),
* writev(), and aio_write(). Writes are our fast path, and we try to optimize
* them above all else.
*/
ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t ppos)
{
struct logger_log *log = file_get_log(iocb->ki_filp);
size_t orig = log->w_off;
struct logger_entry header;
struct timespec now;
ssize_t ret = 0;
now = current_kernel_time();
header.pid = current->tgid;
header.tid = current->pid;
header.sec = now.tv_sec;
header.nsec = now.tv_nsec;
header.len = min_t(size_t, iocb->ki_left, LOGGER_ENTRY_MAX_PAYLOAD);
/* null writes succeed, return zero */
if (unlikely(!header.len))
return 0;
mutex_lock(&log->mutex);
/*
* Fix up any readers, pulling them forward to the first readable
* entry after (what will be) the new write offset. We do this now
* because if we partially fail, we can end up with clobbered log
* entries that encroach on readable buffer.
*/
fix_up_readers(log, sizeof(struct logger_entry) + header.len);
do_write_log(log, &header, sizeof(struct logger_entry));
while (nr_segs-- > 0) {
size_t len;
ssize_t nr;
/* figure out how much of this vector we can keep */
len = min_t(size_t, iov->iov_len, header.len - ret);
/* write out this segment's payload */
nr = do_write_log_from_user(log, iov->iov_base, len);
if (unlikely(nr < 0)) {
log->w_off = orig;
mutex_unlock(&log->mutex);
return nr;
}
iov++;
ret += nr;
}
mutex_unlock(&log->mutex);
/* wake up any blocked readers */
wake_up_interruptible(&log->wq);
return ret;
}
static struct logger_log *get_log_from_minor(int);
/*
* logger_open - the log's open() file operation
*
* Note how near a no-op this is in the write-only case. Keep it that way!
*/
static int logger_open(struct inode *inode, struct file *file)
{
struct logger_log *log;
int ret;
ret = nonseekable_open(inode, file);
if (ret)
return ret;
log = get_log_from_minor(MINOR(inode->i_rdev));
if (!log)
return -ENODEV;
if (file->f_mode & FMODE_READ) {
struct logger_reader *reader;
reader = kmalloc(sizeof(struct logger_reader), GFP_KERNEL);
if (!reader)
return -ENOMEM;
reader->log = log;
INIT_LIST_HEAD(&reader->list);
mutex_lock(&log->mutex);
reader->r_off = log->head;
list_add_tail(&reader->list, &log->readers);
mutex_unlock(&log->mutex);
file->private_data = reader;
} else
file->private_data = log;
return 0;
}
/*
* logger_release - the log's release file operation
*
* Note this is a total no-op in the write-only case. Keep it that way!
*/
static int logger_release(struct inode *ignored, struct file *file)
{
if (file->f_mode & FMODE_READ) {
struct logger_reader *reader = file->private_data;
list_del(&reader->list);
kfree(reader);
}
return 0;
}
/*
* logger_poll - the log's poll file operation, for poll/select/epoll
*
* Note we always return POLLOUT, because you can always write() to the log.
* Note also that, strictly speaking, a return value of POLLIN does not
* guarantee that the log is readable without blocking, as there is a small
* chance that the writer can lap the reader in the interim between poll()
* returning and the read() request.
*/
static unsigned int logger_poll(struct file *file, poll_table *wait)
{
struct logger_reader *reader;
struct logger_log *log;
unsigned int ret = POLLOUT | POLLWRNORM;
if (!(file->f_mode & FMODE_READ))
return ret;
reader = file->private_data;
log = reader->log;
poll_wait(file, &log->wq, wait);
mutex_lock(&log->mutex);
if (log->w_off != reader->r_off)
ret |= POLLIN | POLLRDNORM;
mutex_unlock(&log->mutex);
return ret;
}
static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct logger_log *log = file_get_log(file);
struct logger_reader *reader;
long ret = -ENOTTY;
mutex_lock(&log->mutex);
switch (cmd) {
case LOGGER_GET_LOG_BUF_SIZE:
ret = log->size;
break;
case LOGGER_GET_LOG_LEN:
if (!(file->f_mode & FMODE_READ)) {
ret = -EBADF;
break;
}
reader = file->private_data;
if (log->w_off >= reader->r_off)
ret = log->w_off - reader->r_off;
else
ret = (log->size - reader->r_off) + log->w_off;
break;
case LOGGER_GET_NEXT_ENTRY_LEN:
if (!(file->f_mode & FMODE_READ)) {
ret = -EBADF;
break;
}
reader = file->private_data;
if (log->w_off != reader->r_off)
ret = get_entry_len(log, reader->r_off);
else
ret = 0;
break;
case LOGGER_FLUSH_LOG:
if (!(file->f_mode & FMODE_WRITE)) {
ret = -EBADF;
break;
}
list_for_each_entry(reader, &log->readers, list)
reader->r_off = log->w_off;
log->head = log->w_off;
ret = 0;
break;
}
mutex_unlock(&log->mutex);
return ret;
}
static const struct file_operations logger_fops = {
.owner = THIS_MODULE,
.read = logger_read,
.aio_write = logger_aio_write,
.poll = logger_poll,
.unlocked_ioctl = logger_ioctl,
.compat_ioctl = logger_ioctl,
.open = logger_open,
.release = logger_release,
};
/*
* Defines a log structure with name 'NAME' and a size of 'SIZE' bytes, which
* must be a power of two, greater than LOGGER_ENTRY_MAX_LEN, and less than
* LONG_MAX minus LOGGER_ENTRY_MAX_LEN.
*/
#define DEFINE_LOGGER_DEVICE(VAR, NAME, SIZE) \
static unsigned char _buf_ ## VAR[SIZE]; \
static struct logger_log VAR = { \
.buffer = _buf_ ## VAR, \
.misc = { \
.minor = MISC_DYNAMIC_MINOR, \
.name = NAME, \
.fops = &logger_fops, \
.parent = NULL, \
}, \
.wq = __WAIT_QUEUE_HEAD_INITIALIZER(VAR .wq), \
.readers = LIST_HEAD_INIT(VAR .readers), \
.mutex = __MUTEX_INITIALIZER(VAR .mutex), \
.w_off = 0, \
.head = 0, \
.size = SIZE, \
};
DEFINE_LOGGER_DEVICE(log_main, LOGGER_LOG_MAIN, 64*1024)
DEFINE_LOGGER_DEVICE(log_events, LOGGER_LOG_EVENTS, 256*1024)
DEFINE_LOGGER_DEVICE(log_radio, LOGGER_LOG_RADIO, 64*1024)
static struct logger_log *get_log_from_minor(int minor)
{
if (log_main.misc.minor == minor)
return &log_main;
if (log_events.misc.minor == minor)
return &log_events;
if (log_radio.misc.minor == minor)
return &log_radio;
return NULL;
}
static int __init init_log(struct logger_log *log)
{
int ret;
ret = misc_register(&log->misc);
if (unlikely(ret)) {
printk(KERN_ERR "logger: failed to register misc "
"device for log '%s'!\n", log->misc.name);
return ret;
}
printk(KERN_INFO "logger: created %luK log '%s'\n",
(unsigned long) log->size >> 10, log->misc.name);
return 0;
}
static int __init logger_init(void)
{
int ret;
ret = init_log(&log_main);
if (unlikely(ret))
goto out;
ret = init_log(&log_events);
if (unlikely(ret))
goto out;
ret = init_log(&log_radio);
if (unlikely(ret))
goto out;
out:
return ret;
}
device_initcall(logger_init);
/* include/linux/logger.h
*
* Copyright (C) 2007-2008 Google, Inc.
* Author: Robert Love <rlove@android.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef _LINUX_LOGGER_H
#define _LINUX_LOGGER_H
#include <linux/types.h>
#include <linux/ioctl.h>
struct logger_entry {
__u16 len; /* length of the payload */
__u16 __pad; /* no matter what, we get 2 bytes of padding */
__s32 pid; /* generating process's pid */
__s32 tid; /* generating process's tid */
__s32 sec; /* seconds since Epoch */
__s32 nsec; /* nanoseconds */
char msg[0]; /* the entry's payload */
};
#define LOGGER_LOG_RADIO "log_radio" /* radio-related messages */
#define LOGGER_LOG_EVENTS "log_events" /* system/hardware events */
#define LOGGER_LOG_MAIN "log_main" /* everything else */
#define LOGGER_ENTRY_MAX_LEN (4*1024)
#define LOGGER_ENTRY_MAX_PAYLOAD \
(LOGGER_ENTRY_MAX_LEN - sizeof(struct logger_entry))
#define __LOGGERIO 0xAE
#define LOGGER_GET_LOG_BUF_SIZE _IO(__LOGGERIO, 1) /* size of log */
#define LOGGER_GET_LOG_LEN _IO(__LOGGERIO, 2) /* used log len */
#define LOGGER_GET_NEXT_ENTRY_LEN _IO(__LOGGERIO, 3) /* next entry len */
#define LOGGER_FLUSH_LOG _IO(__LOGGERIO, 4) /* flush log */
#endif /* _LINUX_LOGGER_H */
/* drivers/misc/lowmemorykiller.c
*
* The lowmemorykiller driver lets user-space specify a set of memory thresholds
* where processes with a range of oom_adj values will get killed. Specify the
* minimum oom_adj values in /sys/module/lowmemorykiller/parameters/adj and the
* number of free pages in /sys/module/lowmemorykiller/parameters/minfree. Both
* files take a comma separated list of numbers in ascending order.
*
* For example, write "0,8" to /sys/module/lowmemorykiller/parameters/adj and
* "1024,4096" to /sys/module/lowmemorykiller/parameters/minfree to kill processes
* with a oom_adj value of 8 or higher when the free memory drops below 4096 pages
* and kill processes with a oom_adj value of 0 or higher when the free memory
* drops below 1024 pages.
*
* The driver considers memory used for caches to be free, but if a large
* percentage of the cached memory is locked this can be very inaccurate
* and processes may not get killed until the normal oom killer is triggered.
*
* Copyright (C) 2007-2008 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/oom.h>
#include <linux/sched.h>
static uint32_t lowmem_debug_level = 2;
static int lowmem_adj[6] = {
0,
1,
6,
12,
};
static int lowmem_adj_size = 4;
static size_t lowmem_minfree[6] = {
3 * 512, /* 6MB */
2 * 1024, /* 8MB */
4 * 1024, /* 16MB */
16 * 1024, /* 64MB */
};
static int lowmem_minfree_size = 4;
#define lowmem_print(level, x...) \
do { \
if (lowmem_debug_level >= (level)) \
printk(x); \
} while (0)
static int lowmem_shrink(int nr_to_scan, gfp_t gfp_mask)
{
struct task_struct *p;
struct task_struct *selected = NULL;
int rem = 0;
int tasksize;
int i;
int min_adj = OOM_ADJUST_MAX + 1;
int selected_tasksize = 0;
int selected_oom_adj;
int array_size = ARRAY_SIZE(lowmem_adj);
int other_free = global_page_state(NR_FREE_PAGES);
int other_file = global_page_state(NR_FILE_PAGES);
if (lowmem_adj_size < array_size)
array_size = lowmem_adj_size;
if (lowmem_minfree_size < array_size)
array_size = lowmem_minfree_size;
for (i = 0; i < array_size; i++) {
if (other_free < lowmem_minfree[i] &&
other_file < lowmem_minfree[i]) {
min_adj = lowmem_adj[i];
break;
}
}
if (nr_to_scan > 0)
lowmem_print(3, "lowmem_shrink %d, %x, ofree %d %d, ma %d\n",
nr_to_scan, gfp_mask, other_free, other_file,
min_adj);
rem = global_page_state(NR_ACTIVE_ANON) +
global_page_state(NR_ACTIVE_FILE) +
global_page_state(NR_INACTIVE_ANON) +
global_page_state(NR_INACTIVE_FILE);
if (nr_to_scan <= 0 || min_adj == OOM_ADJUST_MAX + 1) {
lowmem_print(5, "lowmem_shrink %d, %x, return %d\n",
nr_to_scan, gfp_mask, rem);
return rem;
}
selected_oom_adj = min_adj;
read_lock(&tasklist_lock);
for_each_process(p) {
struct mm_struct *mm;
int oom_adj;
task_lock(p);
mm = p->mm;
if (!mm) {
task_unlock(p);
continue;
}
oom_adj = mm->oom_adj;
if (oom_adj < min_adj) {
task_unlock(p);
continue;
}
tasksize = get_mm_rss(mm);
task_unlock(p);
if (tasksize <= 0)
continue;
if (selected) {
if (oom_adj < selected_oom_adj)
continue;
if (oom_adj == selected_oom_adj &&
tasksize <= selected_tasksize)
continue;
}
selected = p;
selected_tasksize = tasksize;
selected_oom_adj = oom_adj;
lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n",
p->pid, p->comm, oom_adj, tasksize);
}
if (selected) {
lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n",
selected->pid, selected->comm,
selected_oom_adj, selected_tasksize);
force_sig(SIGKILL, selected);
rem -= selected_tasksize;
}
lowmem_print(4, "lowmem_shrink %d, %x, return %d\n",
nr_to_scan, gfp_mask, rem);
read_unlock(&tasklist_lock);
return rem;
}
static struct shrinker lowmem_shrinker = {
.shrink = lowmem_shrink,
.seeks = DEFAULT_SEEKS * 16
};
static int __init lowmem_init(void)
{
register_shrinker(&lowmem_shrinker);
return 0;
}
static void __exit lowmem_exit(void)
{
unregister_shrinker(&lowmem_shrinker);
}
module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR);
module_param_array_named(adj, lowmem_adj, int, &lowmem_adj_size,
S_IRUGO | S_IWUSR);
module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size,
S_IRUGO | S_IWUSR);
module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR);
module_init(lowmem_init);
module_exit(lowmem_exit);
MODULE_LICENSE("GPL");
/* drivers/android/ram_console.c
*
* Copyright (C) 2007-2008 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/console.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/proc_fs.h>
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
#include <linux/rslib.h>
#endif
struct ram_console_buffer {
uint32_t sig;
uint32_t start;
uint32_t size;
uint8_t data[0];
};
#define RAM_CONSOLE_SIG (0x43474244) /* DBGC */
#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
static char __initdata
ram_console_old_log_init_buffer[CONFIG_ANDROID_RAM_CONSOLE_EARLY_SIZE];
#endif
static char *ram_console_old_log;
static size_t ram_console_old_log_size;
static struct ram_console_buffer *ram_console_buffer;
static size_t ram_console_buffer_size;
#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
static char *ram_console_par_buffer;
static struct rs_control *ram_console_rs_decoder;
static int ram_console_corrected_bytes;
static int ram_console_bad_blocks;
#define ECC_BLOCK_SIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE
#define ECC_SIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE
#define ECC_SYMSIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE
#define ECC_POLY CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL
#endif
#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
static void ram_console_encode_rs8(uint8_t *data, size_t len, uint8_t *ecc)
{
int i;
uint16_t par[ECC_SIZE];
/* Initialize the parity buffer */
memset(par, 0, sizeof(par));
encode_rs8(ram_console_rs_decoder, data, len, par, 0);
for (i = 0; i < ECC_SIZE; i++)
ecc[i] = par[i];
}
static int ram_console_decode_rs8(void *data, size_t len, uint8_t *ecc)
{
int i;
uint16_t par[ECC_SIZE];
for (i = 0; i < ECC_SIZE; i++)
par[i] = ecc[i];
return decode_rs8(ram_console_rs_decoder, data, par, len,
NULL, 0, NULL, 0, NULL);
}
#endif
static void ram_console_update(const char *s, unsigned int count)
{
struct ram_console_buffer *buffer = ram_console_buffer;
#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
uint8_t *buffer_end = buffer->data + ram_console_buffer_size;
uint8_t *block;
uint8_t *par;
int size = ECC_BLOCK_SIZE;
#endif
memcpy(buffer->data + buffer->start, s, count);
#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
block = buffer->data + (buffer->start & ~(ECC_BLOCK_SIZE - 1));
par = ram_console_par_buffer +
(buffer->start / ECC_BLOCK_SIZE) * ECC_SIZE;
do {
if (block + ECC_BLOCK_SIZE > buffer_end)
size = buffer_end - block;
ram_console_encode_rs8(block, size, par);
block += ECC_BLOCK_SIZE;
par += ECC_SIZE;
} while (block < buffer->data + buffer->start + count);
#endif
}
static void ram_console_update_header(void)
{
#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
struct ram_console_buffer *buffer = ram_console_buffer;
uint8_t *par;
par = ram_console_par_buffer +
DIV_ROUND_UP(ram_console_buffer_size, ECC_BLOCK_SIZE) * ECC_SIZE;
ram_console_encode_rs8((uint8_t *)buffer, sizeof(*buffer), par);
#endif
}
static void
ram_console_write(struct console *console, const char *s, unsigned int count)
{
int rem;
struct ram_console_buffer *buffer = ram_console_buffer;
if (count > ram_console_buffer_size) {
s += count - ram_console_buffer_size;
count = ram_console_buffer_size;
}
rem = ram_console_buffer_size - buffer->start;
if (rem < count) {
ram_console_update(s, rem);
s += rem;
count -= rem;
buffer->start = 0;
buffer->size = ram_console_buffer_size;
}
ram_console_update(s, count);
buffer->start += count;
if (buffer->size < ram_console_buffer_size)
buffer->size += count;
ram_console_update_header();
}
static struct console ram_console = {
.name = "ram",
.write = ram_console_write,
.flags = CON_PRINTBUFFER | CON_ENABLED,
.index = -1,
};
static void __init
ram_console_save_old(struct ram_console_buffer *buffer, char *dest)
{
size_t old_log_size = buffer->size;
#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
uint8_t *block;
uint8_t *par;
char strbuf[80];
int strbuf_len;
block = buffer->data;
par = ram_console_par_buffer;
while (block < buffer->data + buffer->size) {
int numerr;
int size = ECC_BLOCK_SIZE;
if (block + size > buffer->data + ram_console_buffer_size)
size = buffer->data + ram_console_buffer_size - block;
numerr = ram_console_decode_rs8(block, size, par);
if (numerr > 0) {
#if 0
printk(KERN_INFO "ram_console: error in block %p, %d\n",
block, numerr);
#endif
ram_console_corrected_bytes += numerr;
} else if (numerr < 0) {
#if 0
printk(KERN_INFO "ram_console: uncorrectable error in "
"block %p\n", block);
#endif
ram_console_bad_blocks++;
}
block += ECC_BLOCK_SIZE;
par += ECC_SIZE;
}
if (ram_console_corrected_bytes || ram_console_bad_blocks)
strbuf_len = snprintf(strbuf, sizeof(strbuf),
"\n%d Corrected bytes, %d unrecoverable blocks\n",
ram_console_corrected_bytes, ram_console_bad_blocks);
else
strbuf_len = snprintf(strbuf, sizeof(strbuf),
"\nNo errors detected\n");
if (strbuf_len >= sizeof(strbuf))
strbuf_len = sizeof(strbuf) - 1;
old_log_size += strbuf_len;
#endif
if (dest == NULL) {
dest = kmalloc(old_log_size, GFP_KERNEL);
if (dest == NULL) {
printk(KERN_ERR
"ram_console: failed to allocate buffer\n");
return;
}
}
ram_console_old_log = dest;
ram_console_old_log_size = old_log_size;
memcpy(ram_console_old_log,
&buffer->data[buffer->start], buffer->size - buffer->start);
memcpy(ram_console_old_log + buffer->size - buffer->start,
&buffer->data[0], buffer->start);
#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
memcpy(ram_console_old_log + old_log_size - strbuf_len,
strbuf, strbuf_len);
#endif
}
static int __init ram_console_init(struct ram_console_buffer *buffer,
size_t buffer_size, char *old_buf)
{
#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
int numerr;
uint8_t *par;
#endif
ram_console_buffer = buffer;
ram_console_buffer_size =
buffer_size - sizeof(struct ram_console_buffer);
if (ram_console_buffer_size > buffer_size) {
pr_err("ram_console: buffer %p, invalid size %zu, "
"datasize %zu\n", buffer, buffer_size,
ram_console_buffer_size);
return 0;
}
#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
ram_console_buffer_size -= (DIV_ROUND_UP(ram_console_buffer_size,
ECC_BLOCK_SIZE) + 1) * ECC_SIZE;
if (ram_console_buffer_size > buffer_size) {
pr_err("ram_console: buffer %p, invalid size %zu, "
"non-ecc datasize %zu\n",
buffer, buffer_size, ram_console_buffer_size);
return 0;
}
ram_console_par_buffer = buffer->data + ram_console_buffer_size;
/* first consecutive root is 0
* primitive element to generate roots = 1
*/
ram_console_rs_decoder = init_rs(ECC_SYMSIZE, ECC_POLY, 0, 1, ECC_SIZE);
if (ram_console_rs_decoder == NULL) {
printk(KERN_INFO "ram_console: init_rs failed\n");
return 0;
}
ram_console_corrected_bytes = 0;
ram_console_bad_blocks = 0;
par = ram_console_par_buffer +
DIV_ROUND_UP(ram_console_buffer_size, ECC_BLOCK_SIZE) * ECC_SIZE;
numerr = ram_console_decode_rs8(buffer, sizeof(*buffer), par);
if (numerr > 0) {
printk(KERN_INFO "ram_console: error in header, %d\n", numerr);
ram_console_corrected_bytes += numerr;
} else if (numerr < 0) {
printk(KERN_INFO
"ram_console: uncorrectable error in header\n");
ram_console_bad_blocks++;
}
#endif
if (buffer->sig == RAM_CONSOLE_SIG) {
if (buffer->size > ram_console_buffer_size
|| buffer->start > buffer->size)
printk(KERN_INFO "ram_console: found existing invalid "
"buffer, size %d, start %d\n",
buffer->size, buffer->start);
else {
printk(KERN_INFO "ram_console: found existing buffer, "
"size %d, start %d\n",
buffer->size, buffer->start);
ram_console_save_old(buffer, old_buf);
}
} else {
printk(KERN_INFO "ram_console: no valid data in buffer "
"(sig = 0x%08x)\n", buffer->sig);
}
buffer->sig = RAM_CONSOLE_SIG;
buffer->start = 0;
buffer->size = 0;
register_console(&ram_console);
#ifdef CONFIG_ANDROID_RAM_CONSOLE_ENABLE_VERBOSE
console_verbose();
#endif
return 0;
}
#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
static int __init ram_console_early_init(void)
{
return ram_console_init((struct ram_console_buffer *)
CONFIG_ANDROID_RAM_CONSOLE_EARLY_ADDR,
CONFIG_ANDROID_RAM_CONSOLE_EARLY_SIZE,
ram_console_old_log_init_buffer);
}
#else
static int ram_console_driver_probe(struct platform_device *pdev)
{
struct resource *res = pdev->resource;
size_t start;
size_t buffer_size;
void *buffer;
if (res == NULL || pdev->num_resources != 1 ||
!(res->flags & IORESOURCE_MEM)) {
printk(KERN_ERR "ram_console: invalid resource, %p %d flags "
"%lx\n", res, pdev->num_resources, res ? res->flags : 0);
return -ENXIO;
}
buffer_size = res->end - res->start + 1;
start = res->start;
printk(KERN_INFO "ram_console: got buffer at %zx, size %zx\n",
start, buffer_size);
buffer = ioremap(res->start, buffer_size);
if (buffer == NULL) {
printk(KERN_ERR "ram_console: failed to map memory\n");
return -ENOMEM;
}
return ram_console_init(buffer, buffer_size, NULL/* allocate */);
}
static struct platform_driver ram_console_driver = {
.probe = ram_console_driver_probe,
.driver = {
.name = "ram_console",
},
};
static int __init ram_console_module_init(void)
{
int err;
err = platform_driver_register(&ram_console_driver);
return err;
}
#endif
static ssize_t ram_console_read_old(struct file *file, char __user *buf,
size_t len, loff_t *offset)
{
loff_t pos = *offset;
ssize_t count;
if (pos >= ram_console_old_log_size)
return 0;
count = min(len, (size_t)(ram_console_old_log_size - pos));
if (copy_to_user(buf, ram_console_old_log + pos, count))
return -EFAULT;
*offset += count;
return count;
}
static const struct file_operations ram_console_file_ops = {
.owner = THIS_MODULE,
.read = ram_console_read_old,
};
static int __init ram_console_late_init(void)
{
struct proc_dir_entry *entry;
if (ram_console_old_log == NULL)
return 0;
#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
ram_console_old_log = kmalloc(ram_console_old_log_size, GFP_KERNEL);
if (ram_console_old_log == NULL) {
printk(KERN_ERR
"ram_console: failed to allocate buffer for old log\n");
ram_console_old_log_size = 0;
return 0;
}
memcpy(ram_console_old_log,
ram_console_old_log_init_buffer, ram_console_old_log_size);
#endif
entry = create_proc_entry("last_kmsg", S_IFREG | S_IRUGO, NULL);
if (!entry) {
printk(KERN_ERR "ram_console: failed to create proc entry\n");
kfree(ram_console_old_log);
ram_console_old_log = NULL;
return 0;
}
entry->proc_fops = &ram_console_file_ops;
entry->size = ram_console_old_log_size;
return 0;
}
#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
console_initcall(ram_console_early_init);
#else
module_init(ram_console_module_init);
#endif
late_initcall(ram_console_late_init);
/* drivers/misc/timed_gpio.c
*
* Copyright (C) 2008 Google, Inc.
* Author: Mike Lockwood <lockwood@android.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/hrtimer.h>
#include <linux/err.h>
#include <linux/gpio.h>
#include "timed_output.h"
#include "timed_gpio.h"
struct timed_gpio_data {
struct timed_output_dev dev;
struct hrtimer timer;
spinlock_t lock;
unsigned gpio;
int max_timeout;
u8 active_low;
};
static enum hrtimer_restart gpio_timer_func(struct hrtimer *timer)
{
struct timed_gpio_data *data =
container_of(timer, struct timed_gpio_data, timer);
gpio_direction_output(data->gpio, data->active_low ? 1 : 0);
return HRTIMER_NORESTART;
}
static int gpio_get_time(struct timed_output_dev *dev)
{
struct timed_gpio_data *data =
container_of(dev, struct timed_gpio_data, dev);
if (hrtimer_active(&data->timer)) {
ktime_t r = hrtimer_get_remaining(&data->timer);
struct timeval t = ktime_to_timeval(r);
return t.tv_sec * 1000 + t.tv_usec / 1000;
} else
return 0;
}
static void gpio_enable(struct timed_output_dev *dev, int value)
{
struct timed_gpio_data *data =
container_of(dev, struct timed_gpio_data, dev);
unsigned long flags;
spin_lock_irqsave(&data->lock, flags);
/* cancel previous timer and set GPIO according to value */
hrtimer_cancel(&data->timer);
gpio_direction_output(data->gpio, data->active_low ? !value : !!value);
if (value > 0) {
if (value > data->max_timeout)
value = data->max_timeout;
hrtimer_start(&data->timer,
ktime_set(value / 1000, (value % 1000) * 1000000),
HRTIMER_MODE_REL);
}
spin_unlock_irqrestore(&data->lock, flags);
}
static int timed_gpio_probe(struct platform_device *pdev)
{
struct timed_gpio_platform_data *pdata = pdev->dev.platform_data;
struct timed_gpio *cur_gpio;
struct timed_gpio_data *gpio_data, *gpio_dat;
int i, j, ret = 0;
if (!pdata)
return -EBUSY;
gpio_data = kzalloc(sizeof(struct timed_gpio_data) * pdata->num_gpios,
GFP_KERNEL);
if (!gpio_data)
return -ENOMEM;
for (i = 0; i < pdata->num_gpios; i++) {
cur_gpio = &pdata->gpios[i];
gpio_dat = &gpio_data[i];
hrtimer_init(&gpio_dat->timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
gpio_dat->timer.function = gpio_timer_func;
spin_lock_init(&gpio_dat->lock);
gpio_dat->dev.name = cur_gpio->name;
gpio_dat->dev.get_time = gpio_get_time;
gpio_dat->dev.enable = gpio_enable;
ret = timed_output_dev_register(&gpio_dat->dev);
if (ret < 0) {
for (j = 0; j < i; j++)
timed_output_dev_unregister(&gpio_data[i].dev);
kfree(gpio_data);
return ret;
}
gpio_dat->gpio = cur_gpio->gpio;
gpio_dat->max_timeout = cur_gpio->max_timeout;
gpio_dat->active_low = cur_gpio->active_low;
gpio_direction_output(gpio_dat->gpio, gpio_dat->active_low);
}
platform_set_drvdata(pdev, gpio_data);
return 0;
}
static int timed_gpio_remove(struct platform_device *pdev)
{
struct timed_gpio_platform_data *pdata = pdev->dev.platform_data;
struct timed_gpio_data *gpio_data = platform_get_drvdata(pdev);
int i;
for (i = 0; i < pdata->num_gpios; i++)
timed_output_dev_unregister(&gpio_data[i].dev);
kfree(gpio_data);
return 0;
}
static struct platform_driver timed_gpio_driver = {
.probe = timed_gpio_probe,
.remove = timed_gpio_remove,
.driver = {
.name = TIMED_GPIO_NAME,
.owner = THIS_MODULE,
},
};
static int __init timed_gpio_init(void)
{
return platform_driver_register(&timed_gpio_driver);
}
static void __exit timed_gpio_exit(void)
{
platform_driver_unregister(&timed_gpio_driver);
}
module_init(timed_gpio_init);
module_exit(timed_gpio_exit);
MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
MODULE_DESCRIPTION("timed gpio driver");
MODULE_LICENSE("GPL");
/* include/linux/timed_gpio.h
*
* Copyright (C) 2008 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef _LINUX_TIMED_GPIO_H
#define _LINUX_TIMED_GPIO_H
#define TIMED_GPIO_NAME "timed-gpio"
struct timed_gpio {
const char *name;
unsigned gpio;
int max_timeout;
u8 active_low;
};
struct timed_gpio_platform_data {
int num_gpios;
struct timed_gpio *gpios;
};
#endif
/* drivers/misc/timed_output.c
*
* Copyright (C) 2009 Google, Inc.
* Author: Mike Lockwood <lockwood@android.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/err.h>
#include "timed_output.h"
static struct class *timed_output_class;
static atomic_t device_count;
static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct timed_output_dev *tdev = dev_get_drvdata(dev);
int remaining = tdev->get_time(tdev);
return sprintf(buf, "%d\n", remaining);
}
static ssize_t enable_store(
struct device *dev, struct device_attribute *attr,
const char *buf, size_t size)
{
struct timed_output_dev *tdev = dev_get_drvdata(dev);
int value;
sscanf(buf, "%d", &value);
tdev->enable(tdev, value);
return size;
}
static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, enable_show, enable_store);
static int create_timed_output_class(void)
{
if (!timed_output_class) {
timed_output_class = class_create(THIS_MODULE, "timed_output");
if (IS_ERR(timed_output_class))
return PTR_ERR(timed_output_class);
atomic_set(&device_count, 0);
}
return 0;
}
int timed_output_dev_register(struct timed_output_dev *tdev)
{
int ret;
if (!tdev || !tdev->name || !tdev->enable || !tdev->get_time)
return -EINVAL;
ret = create_timed_output_class();
if (ret < 0)
return ret;
tdev->index = atomic_inc_return(&device_count);
tdev->dev = device_create(timed_output_class, NULL,
MKDEV(0, tdev->index), NULL, tdev->name);
if (IS_ERR(tdev->dev))
return PTR_ERR(tdev->dev);
ret = device_create_file(tdev->dev, &dev_attr_enable);
if (ret < 0)
goto err_create_file;
dev_set_drvdata(tdev->dev, tdev);
tdev->state = 0;
return 0;
err_create_file:
device_destroy(timed_output_class, MKDEV(0, tdev->index));
printk(KERN_ERR "timed_output: Failed to register driver %s\n",
tdev->name);
return ret;
}
EXPORT_SYMBOL_GPL(timed_output_dev_register);
void timed_output_dev_unregister(struct timed_output_dev *tdev)
{
device_remove_file(tdev->dev, &dev_attr_enable);
device_destroy(timed_output_class, MKDEV(0, tdev->index));
dev_set_drvdata(tdev->dev, NULL);
}
EXPORT_SYMBOL_GPL(timed_output_dev_unregister);
static int __init timed_output_init(void)
{
return create_timed_output_class();
}
static void __exit timed_output_exit(void)
{
class_destroy(timed_output_class);
}
module_init(timed_output_init);
module_exit(timed_output_exit);
MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
MODULE_DESCRIPTION("timed output class driver");
MODULE_LICENSE("GPL");
/* include/linux/timed_output.h
*
* Copyright (C) 2008 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef _LINUX_TIMED_OUTPUT_H
#define _LINUX_TIMED_OUTPUT_H
struct timed_output_dev {
const char *name;
/* enable the output and set the timer */
void (*enable)(struct timed_output_dev *sdev, int timeout);
/* returns the current number of milliseconds remaining on the timer */
int (*get_time)(struct timed_output_dev *sdev);
/* private data */
struct device *dev;
int index;
int state;
};
extern int timed_output_dev_register(struct timed_output_dev *dev);
extern void timed_output_dev_unregister(struct timed_output_dev *dev);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment