diff options
Diffstat (limited to 'drivers/staging/android')
| -rw-r--r-- | drivers/staging/android/Kconfig | 30 | ||||
| -rw-r--r-- | drivers/staging/android/Makefile | 1 | ||||
| -rw-r--r-- | drivers/staging/android/TODO | 7 | ||||
| -rw-r--r-- | drivers/staging/android/binder.c | 3673 | ||||
| -rw-r--r-- | drivers/staging/android/binder.h | 30 | ||||
| -rw-r--r-- | drivers/staging/android/binder_trace.h | 329 | ||||
| -rw-r--r-- | drivers/staging/android/ion/ion.c | 10 | ||||
| -rw-r--r-- | drivers/staging/android/ion/ion.h | 2 | ||||
| -rw-r--r-- | drivers/staging/android/ion/ion_dummy_driver.c | 6 | ||||
| -rw-r--r-- | drivers/staging/android/ion/ion_page_pool.c | 2 | ||||
| -rw-r--r-- | drivers/staging/android/ion/ion_priv.h | 4 | ||||
| -rw-r--r-- | drivers/staging/android/ion/tegra/tegra_ion.c | 6 | ||||
| -rw-r--r-- | drivers/staging/android/sync_debug.c | 5 | ||||
| -rw-r--r-- | drivers/staging/android/timed_gpio.c | 7 | ||||
| -rw-r--r-- | drivers/staging/android/uapi/binder.h | 351 |
15 files changed, 26 insertions, 4437 deletions
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig index 7a0e28852965..7e012f37792b 100644 --- a/drivers/staging/android/Kconfig +++ b/drivers/staging/android/Kconfig @@ -1,37 +1,7 @@ menu "Android" -config ANDROID - bool "Android Drivers" - ---help--- - Enable support for various drivers needed on the Android platform - if ANDROID -config ANDROID_BINDER_IPC - bool "Android Binder IPC Driver" - depends on MMU - default n - ---help--- - Binder is used in Android for both communication between processes, - and remote method invocation. - - This means one Android process can call a method/routine in another - Android process, using Binder to identify, invoke and pass arguments - between said processes. - -config ANDROID_BINDER_IPC_32BIT - bool - depends on !64BIT && ANDROID_BINDER_IPC - default y - ---help--- - The Binder API has been changed to support both 32 and 64bit - applications in a mixed environment. - - Enable this to support an old 32-bit Android user-space (v4.4 and - earlier). - - Note that enabling this will break newer Android user-space. - config ASHMEM bool "Enable the Anonymous Shared Memory Subsystem" default n diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile index 517ad5ffa429..479b2b86f8c8 100644 --- a/drivers/staging/android/Makefile +++ b/drivers/staging/android/Makefile @@ -2,7 +2,6 @@ ccflags-y += -I$(src) # needed for trace events obj-y += ion/ -obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o obj-$(CONFIG_ASHMEM) += ashmem.o obj-$(CONFIG_ANDROID_LOGGER) += logger.o obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO index b15fb0d6b152..06954cdf3dba 100644 --- a/drivers/staging/android/TODO +++ b/drivers/staging/android/TODO @@ -5,6 +5,13 @@ TODO: - make sure things build as modules properly - add proper arch dependencies as needed - audit userspace interfaces to make sure they are sane + - kuid_t should never be exposed to user space as it is + kernel internal type. Data structure for this kuid_t is: + typedef struct { + uid_t val; + } kuid_t; + - This bug is introduced by Xiong Zhou in the patch bd471258f2e09 + - ("staging: android: logger: use kuid_t instead of uid_t") Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc: Brian Swetland <swetland@google.com> diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c deleted file mode 100644 index c69c40d69d5c..000000000000 --- a/drivers/staging/android/binder.c +++ /dev/null @@ -1,3673 +0,0 @@ -/* binder.c - * - * Android IPC Subsystem - * - * Copyright (C) 2007-2008 Google, Inc. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <asm/cacheflush.h> -#include <linux/fdtable.h> -#include <linux/file.h> -#include <linux/freezer.h> -#include <linux/fs.h> -#include <linux/list.h> -#include <linux/miscdevice.h> -#include <linux/mm.h> -#include <linux/module.h> -#include <linux/mutex.h> -#include <linux/nsproxy.h> -#include <linux/poll.h> -#include <linux/debugfs.h> -#include <linux/rbtree.h> -#include <linux/sched.h> -#include <linux/seq_file.h> -#include <linux/uaccess.h> -#include <linux/vmalloc.h> -#include <linux/slab.h> -#include <linux/pid_namespace.h> - -#include "binder.h" -#include "binder_trace.h" - -static DEFINE_MUTEX(binder_main_lock); -static DEFINE_MUTEX(binder_deferred_lock); -static DEFINE_MUTEX(binder_mmap_lock); - -static HLIST_HEAD(binder_procs); -static HLIST_HEAD(binder_deferred_list); -static HLIST_HEAD(binder_dead_nodes); - -static struct dentry *binder_debugfs_dir_entry_root; -static struct dentry *binder_debugfs_dir_entry_proc; -static struct binder_node *binder_context_mgr_node; -static kuid_t binder_context_mgr_uid = INVALID_UID; -static int binder_last_id; -static struct workqueue_struct *binder_deferred_workqueue; - -#define BINDER_DEBUG_ENTRY(name) \ -static int binder_##name##_open(struct inode *inode, struct file *file) \ -{ \ - return single_open(file, binder_##name##_show, inode->i_private); \ -} \ -\ -static const struct file_operations binder_##name##_fops = { \ - .owner = THIS_MODULE, \ - .open = binder_##name##_open, \ - .read = seq_read, \ - .llseek = seq_lseek, \ - .release = single_release, \ -} - -static int binder_proc_show(struct seq_file *m, void *unused); -BINDER_DEBUG_ENTRY(proc); - -/* This is only defined in include/asm-arm/sizes.h */ -#ifndef SZ_1K -#define SZ_1K 0x400 -#endif - -#ifndef SZ_4M -#define SZ_4M 0x400000 -#endif - -#define FORBIDDEN_MMAP_FLAGS (VM_WRITE) - -#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64) - -enum { - BINDER_DEBUG_USER_ERROR = 1U << 0, - BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1, - BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2, - BINDER_DEBUG_OPEN_CLOSE = 1U << 3, - BINDER_DEBUG_DEAD_BINDER = 1U << 4, - BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5, - BINDER_DEBUG_READ_WRITE = 1U << 6, - BINDER_DEBUG_USER_REFS = 1U << 7, - BINDER_DEBUG_THREADS = 1U << 8, - BINDER_DEBUG_TRANSACTION = 1U << 9, - BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10, - BINDER_DEBUG_FREE_BUFFER = 1U << 11, - BINDER_DEBUG_INTERNAL_REFS = 1U << 12, - BINDER_DEBUG_BUFFER_ALLOC = 1U << 13, - BINDER_DEBUG_PRIORITY_CAP = 1U << 14, - BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15, -}; -static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | - BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; -module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO); - -static bool binder_debug_no_lock; -module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO); - -static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); -static int binder_stop_on_user_error; - -static int binder_set_stop_on_user_error(const char *val, - struct kernel_param *kp) -{ - int ret; - - ret = param_set_int(val, kp); - if (binder_stop_on_user_error < 2) - wake_up(&binder_user_error_wait); - return ret; -} -module_param_call(stop_on_user_error, binder_set_stop_on_user_error, - param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO); - -#define binder_debug(mask, x...) \ - do { \ - if (binder_debug_mask & mask) \ - pr_info(x); \ - } while (0) - -#define binder_user_error(x...) \ - do { \ - if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \ - pr_info(x); \ - if (binder_stop_on_user_error) \ - binder_stop_on_user_error = 2; \ - } while (0) - -enum binder_stat_types { - BINDER_STAT_PROC, - BINDER_STAT_THREAD, - BINDER_STAT_NODE, - BINDER_STAT_REF, - BINDER_STAT_DEATH, - BINDER_STAT_TRANSACTION, - BINDER_STAT_TRANSACTION_COMPLETE, - BINDER_STAT_COUNT -}; - -struct binder_stats { - int br[_IOC_NR(BR_FAILED_REPLY) + 1]; - int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1]; - int obj_created[BINDER_STAT_COUNT]; - int obj_deleted[BINDER_STAT_COUNT]; -}; - -static struct binder_stats binder_stats; - -static inline void binder_stats_deleted(enum binder_stat_types type) -{ - binder_stats.obj_deleted[type]++; -} - -static inline void binder_stats_created(enum binder_stat_types type) -{ - binder_stats.obj_created[type]++; -} - -struct binder_transaction_log_entry { - int debug_id; - int call_type; - int from_proc; - int from_thread; - int target_handle; - int to_proc; - int to_thread; - int to_node; - int data_size; - int offsets_size; -}; -struct binder_transaction_log { - int next; - int full; - struct binder_transaction_log_entry entry[32]; -}; -static struct binder_transaction_log binder_transaction_log; -static struct binder_transaction_log binder_transaction_log_failed; - -static struct binder_transaction_log_entry *binder_transaction_log_add( - struct binder_transaction_log *log) -{ - struct binder_transaction_log_entry *e; - - e = &log->entry[log->next]; - memset(e, 0, sizeof(*e)); - log->next++; - if (log->next == ARRAY_SIZE(log->entry)) { - log->next = 0; - log->full = 1; - } - return e; -} - -struct binder_work { - struct list_head entry; - enum { - BINDER_WORK_TRANSACTION = 1, - BINDER_WORK_TRANSACTION_COMPLETE, - BINDER_WORK_NODE, - BINDER_WORK_DEAD_BINDER, - BINDER_WORK_DEAD_BINDER_AND_CLEAR, - BINDER_WORK_CLEAR_DEATH_NOTIFICATION, - } type; -}; - -struct binder_node { - int debug_id; - struct binder_work work; - union { - struct rb_node rb_node; - struct hlist_node dead_node; - }; - struct binder_proc *proc; - struct hlist_head refs; - int internal_strong_refs; - int local_weak_refs; - int local_strong_refs; - binder_uintptr_t ptr; - binder_uintptr_t cookie; - unsigned has_strong_ref:1; - unsigned pending_strong_ref:1; - unsigned has_weak_ref:1; - unsigned pending_weak_ref:1; - unsigned has_async_transaction:1; - unsigned accept_fds:1; - unsigned min_priority:8; - struct list_head async_todo; -}; - -struct binder_ref_death { - struct binder_work work; - binder_uintptr_t cookie; -}; - -struct binder_ref { - /* Lookups needed: */ - /* node + proc => ref (transaction) */ - /* desc + proc => ref (transaction, inc/dec ref) */ - /* node => refs + procs (proc exit) */ - int debug_id; - struct rb_node rb_node_desc; - struct rb_node rb_node_node; - struct hlist_node node_entry; - struct binder_proc *proc; - struct binder_node *node; - uint32_t desc; - int strong; - int weak; - struct binder_ref_death *death; -}; - -struct binder_buffer { - struct list_head entry; /* free and allocated entries by address */ - struct rb_node rb_node; /* free entry by size or allocated entry */ - /* by address */ - unsigned free:1; - unsigned allow_user_free:1; - unsigned async_transaction:1; - unsigned debug_id:29; - - struct binder_transaction *transaction; - - struct binder_node *target_node; - size_t data_size; - size_t offsets_size; - uint8_t data[0]; -}; - -enum binder_deferred_state { - BINDER_DEFERRED_PUT_FILES = 0x01, - BINDER_DEFERRED_FLUSH = 0x02, - BINDER_DEFERRED_RELEASE = 0x04, -}; - -struct binder_proc { - struct hlist_node proc_node; - struct rb_root threads; - struct rb_root nodes; - struct rb_root refs_by_desc; - struct rb_root refs_by_node; - int pid; - struct vm_area_struct *vma; - struct mm_struct *vma_vm_mm; - struct task_struct *tsk; - struct files_struct *files; - struct hlist_node deferred_work_node; - int deferred_work; - void *buffer; - ptrdiff_t user_buffer_offset; - - struct list_head buffers; - struct rb_root free_buffers; - struct rb_root allocated_buffers; - size_t free_async_space; - - struct page **pages; - size_t buffer_size; - uint32_t buffer_free; - struct list_head todo; - wait_queue_head_t wait; - struct binder_stats stats; - struct list_head delivered_death; - int max_threads; - int requested_threads; - int requested_threads_started; - int ready_threads; - long default_priority; - struct dentry *debugfs_entry; -}; - -enum { - BINDER_LOOPER_STATE_REGISTERED = 0x01, - BINDER_LOOPER_STATE_ENTERED = 0x02, - BINDER_LOOPER_STATE_EXITED = 0x04, - BINDER_LOOPER_STATE_INVALID = 0x08, - BINDER_LOOPER_STATE_WAITING = 0x10, - BINDER_LOOPER_STATE_NEED_RETURN = 0x20 -}; - -struct binder_thread { - struct binder_proc *proc; - struct rb_node rb_node; - int pid; - int looper; - struct binder_transaction *transaction_stack; - struct list_head todo; - uint32_t return_error; /* Write failed, return error code in read buf */ - uint32_t return_error2; /* Write failed, return error code in read */ - /* buffer. Used when sending a reply to a dead process that */ - /* we are also waiting on */ - wait_queue_head_t wait; - struct binder_stats stats; -}; - -struct binder_transaction { - int debug_id; - struct binder_work work; - struct binder_thread *from; - struct binder_transaction *from_parent; - struct binder_proc *to_proc; - struct binder_thread *to_thread; - struct binder_transaction *to_parent; - unsigned need_reply:1; - /* unsigned is_dead:1; */ /* not used at the moment */ - - struct binder_buffer *buffer; - unsigned int code; - unsigned int flags; - long priority; - long saved_priority; - kuid_t sender_euid; -}; - -static void -binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); - -static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) -{ - struct files_struct *files = proc->files; - unsigned long rlim_cur; - unsigned long irqs; - - if (files == NULL) - return -ESRCH; - - if (!lock_task_sighand(proc->tsk, &irqs)) - return -EMFILE; - - rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE); - unlock_task_sighand(proc->tsk, &irqs); - - return __alloc_fd(files, 0, rlim_cur, flags); -} - -/* - * copied from fd_install - */ -static void task_fd_install( - struct binder_proc *proc, unsigned int fd, struct file *file) -{ - if (proc->files) - __fd_install(proc->files, fd, file); -} - -/* - * copied from sys_close - */ -static long task_close_fd(struct binder_proc *proc, unsigned int fd) -{ - int retval; - - if (proc->files == NULL) - return -ESRCH; - - retval = __close_fd(proc->files, fd); - /* can't restart close syscall because file table entry was cleared */ - if (unlikely(retval == -ERESTARTSYS || - retval == -ERESTARTNOINTR || - retval == -ERESTARTNOHAND || - retval == -ERESTART_RESTARTBLOCK)) - retval = -EINTR; - - return retval; -} - -static inline void binder_lock(const char *tag) -{ - trace_binder_lock(tag); - mutex_lock(&binder_main_lock); - trace_binder_locked(tag); -} - -static inline void binder_unlock(const char *tag) -{ - trace_binder_unlock(tag); - mutex_unlock(&binder_main_lock); -} - -static void binder_set_nice(long nice) -{ - long min_nice; - - if (can_nice(current, nice)) { - set_user_nice(current, nice); - return; - } - min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur); - binder_debug(BINDER_DEBUG_PRIORITY_CAP, - "%d: nice value %ld not allowed use %ld instead\n", - current->pid, nice, min_nice); - set_user_nice(current, min_nice); - if (min_nice <= MAX_NICE) - return; - binder_user_error("%d RLIMIT_NICE not set\n", current->pid); -} - -static size_t binder_buffer_size(struct binder_proc *proc, - struct binder_buffer *buffer) -{ - if (list_is_last(&buffer->entry, &proc->buffers)) - return proc->buffer + proc->buffer_size - (void *)buffer->data; - return (size_t)list_entry(buffer->entry.next, - struct binder_buffer, entry) - (size_t)buffer->data; -} - -static void binder_insert_free_buffer(struct binder_proc *proc, - struct binder_buffer *new_buffer) -{ - struct rb_node **p = &proc->free_buffers.rb_node; - struct rb_node *parent = NULL; - struct binder_buffer *buffer; - size_t buffer_size; - size_t new_buffer_size; - - BUG_ON(!new_buffer->free); - - new_buffer_size = binder_buffer_size(proc, new_buffer); - - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: add free buffer, size %zd, at %p\n", - proc->pid, new_buffer_size, new_buffer); - - while (*p) { - parent = *p; - buffer = rb_entry(parent, struct binder_buffer, rb_node); - BUG_ON(!buffer->free); - - buffer_size = binder_buffer_size(proc, buffer); - - if (new_buffer_size < buffer_size) - p = &parent->rb_left; - else - p = &parent->rb_right; - } - rb_link_node(&new_buffer->rb_node, parent, p); - rb_insert_color(&new_buffer->rb_node, &proc->free_buffers); -} - -static void binder_insert_allocated_buffer(struct binder_proc *proc, - struct binder_buffer *new_buffer) -{ - struct rb_node **p = &proc->allocated_buffers.rb_node; - struct rb_node *parent = NULL; - struct binder_buffer *buffer; - - BUG_ON(new_buffer->free); - - while (*p) { - parent = *p; - buffer = rb_entry(parent, struct binder_buffer, rb_node); - BUG_ON(buffer->free); - - if (new_buffer < buffer) - p = &parent->rb_left; - else if (new_buffer > buffer) - p = &parent->rb_right; - else - BUG(); - } - rb_link_node(&new_buffer->rb_node, parent, p); - rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers); -} - -static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc, - uintptr_t user_ptr) -{ - struct rb_node *n = proc->allocated_buffers.rb_node; - struct binder_buffer *buffer; - struct binder_buffer *kern_ptr; - - kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset - - offsetof(struct binder_buffer, data)); - - while (n) { - buffer = rb_entry(n, struct binder_buffer, rb_node); - BUG_ON(buffer->free); - - if (kern_ptr < buffer) - n = n->rb_left; - else if (kern_ptr > buffer) - n = n->rb_right; - else - return buffer; - } - return NULL; -} - -static int binder_update_page_range(struct binder_proc *proc, int allocate, - void *start, void *end, - struct vm_area_struct *vma) -{ - void *page_addr; - unsigned long user_page_addr; - struct vm_struct tmp_area; - struct page **page; - struct mm_struct *mm; - - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: %s pages %p-%p\n", proc->pid, - allocate ? "allocate" : "free", start, end); - - if (end <= start) - return 0; - - trace_binder_update_page_range(proc, allocate, start, end); - - if (vma) - mm = NULL; - else - mm = get_task_mm(proc->tsk); - - if (mm) { - down_write(&mm->mmap_sem); - vma = proc->vma; - if (vma && mm != proc->vma_vm_mm) { - pr_err("%d: vma mm and task mm mismatch\n", - proc->pid); - vma = NULL; - } - } - - if (allocate == 0) - goto free_range; - - if (vma == NULL) { - pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n", - proc->pid); - goto err_no_vma; - } - - for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { - int ret; - - page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; - - BUG_ON(*page); - *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); - if (*page == NULL) { - pr_err("%d: binder_alloc_buf failed for page at %p\n", - proc->pid, page_addr); - goto err_alloc_page_failed; - } - tmp_area.addr = page_addr; - tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */; - ret = map_vm_area(&tmp_area, PAGE_KERNEL, page); - if (ret) { - pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n", - proc->pid, page_addr); - goto err_map_kernel_failed; - } - user_page_addr = - (uintptr_t)page_addr + proc->user_buffer_offset; - ret = vm_insert_page(vma, user_page_addr, page[0]); - if (ret) { - pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", - proc->pid, user_page_addr); - goto err_vm_insert_page_failed; - } - /* vm_insert_page does not seem to increment the refcount */ - } - if (mm) { - up_write(&mm->mmap_sem); - mmput(mm); - } - return 0; - -free_range: - for (page_addr = end - PAGE_SIZE; page_addr >= start; - page_addr -= PAGE_SIZE) { - page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; - if (vma) - zap_page_range(vma, (uintptr_t)page_addr + - proc->user_buffer_offset, PAGE_SIZE, NULL); -err_vm_insert_page_failed: - unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); -err_map_kernel_failed: - __free_page(*page); - *page = NULL; -err_alloc_page_failed: - ; - } -err_no_vma: - if (mm) { - up_write(&mm->mmap_sem); - mmput(mm); - } - return -ENOMEM; -} - -static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc, - size_t data_size, - size_t offsets_size, int is_async) -{ - struct rb_node *n = proc->free_buffers.rb_node; - struct binder_buffer *buffer; - size_t buffer_size; - struct rb_node *best_fit = NULL; - void *has_page_addr; - void *end_page_addr; - size_t size; - - if (proc->vma == NULL) { - pr_err("%d: binder_alloc_buf, no vma\n", - proc->pid); - return NULL; - } - - size = ALIGN(data_size, sizeof(void *)) + - ALIGN(offsets_size, sizeof(void *)); - - if (size < data_size || size < offsets_size) { - binder_user_error("%d: got transaction with invalid size %zd-%zd\n", - proc->pid, data_size, offsets_size); - return NULL; - } - - if (is_async && - proc->free_async_space < size + sizeof(struct binder_buffer)) { - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: binder_alloc_buf size %zd failed, no async space left\n", - proc->pid, size); - return NULL; - } - - while (n) { - buffer = rb_entry(n, struct binder_buffer, rb_node); - BUG_ON(!buffer->free); - buffer_size = binder_buffer_size(proc, buffer); - - if (size < buffer_size) { - best_fit = n; - n = n->rb_left; - } else if (size > buffer_size) - n = n->rb_right; - else { - best_fit = n; - break; - } - } - if (best_fit == NULL) { - pr_err("%d: binder_alloc_buf size %zd failed, no address space\n", - proc->pid, size); - return NULL; - } - if (n == NULL) { - buffer = rb_entry(best_fit, struct binder_buffer, rb_node); - buffer_size = binder_buffer_size(proc, buffer); - } - - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: binder_alloc_buf size %zd got buffer %p size %zd\n", - proc->pid, size, buffer, buffer_size); - - has_page_addr = - (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); - if (n == NULL) { - if (size + sizeof(struct binder_buffer) + 4 >= buffer_size) - buffer_size = size; /* no room for other buffers */ - else - buffer_size = size + sizeof(struct binder_buffer); - } - end_page_addr = - (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size); - if (end_page_addr > has_page_addr) - end_page_addr = has_page_addr; - if (binder_update_page_range(proc, 1, - (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL)) - return NULL; - - rb_erase(best_fit, &proc->free_buffers); - buffer->free = 0; - binder_insert_allocated_buffer(proc, buffer); - if (buffer_size != size) { - struct binder_buffer *new_buffer = (void *)buffer->data + size; - - list_add(&new_buffer->entry, &buffer->entry); - new_buffer->free = 1; - binder_insert_free_buffer(proc, new_buffer); - } - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: binder_alloc_buf size %zd got %p\n", - proc->pid, size, buffer); - buffer->data_size = data_size; - buffer->offsets_size = offsets_size; - buffer->async_transaction = is_async; - if (is_async) { - proc->free_async_space -= size + sizeof(struct binder_buffer); - binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, - "%d: binder_alloc_buf size %zd async free %zd\n", - proc->pid, size, proc->free_async_space); - } - - return buffer; -} - -static void *buffer_start_page(struct binder_buffer *buffer) -{ - return (void *)((uintptr_t)buffer & PAGE_MASK); -} - -static void *buffer_end_page(struct binder_buffer *buffer) -{ - return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK); -} - -static void binder_delete_free_buffer(struct binder_proc *proc, - struct binder_buffer *buffer) -{ - struct binder_buffer *prev, *next = NULL; - int free_page_end = 1; - int free_page_start = 1; - - BUG_ON(proc->buffers.next == &buffer->entry); - prev = list_entry(buffer->entry.prev, struct binder_buffer, entry); - BUG_ON(!prev->free); - if (buffer_end_page(prev) == buffer_start_page(buffer)) { - free_page_start = 0; - if (buffer_end_page(prev) == buffer_end_page(buffer)) - free_page_end = 0; - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: merge free, buffer %p share page with %p\n", - proc->pid, buffer, prev); - } - - if (!list_is_last(&buffer->entry, &proc->buffers)) { - next = list_entry(buffer->entry.next, - struct binder_buffer, entry); - if (buffer_start_page(next) == buffer_end_page(buffer)) { - free_page_end = 0; - if (buffer_start_page(next) == - buffer_start_page(buffer)) - free_page_start = 0; - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: merge free, buffer %p share page with %p\n", - proc->pid, buffer, prev); - } - } - list_del(&buffer->entry); - if (free_page_start || free_page_end) { - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: merge free, buffer %p do not share page%s%s with %p or %p\n", - proc->pid, buffer, free_page_start ? "" : " end", - free_page_end ? "" : " start", prev, next); - binder_update_page_range(proc, 0, free_page_start ? - buffer_start_page(buffer) : buffer_end_page(buffer), - (free_page_end ? buffer_end_page(buffer) : - buffer_start_page(buffer)) + PAGE_SIZE, NULL); - } -} - -static void binder_free_buf(struct binder_proc *proc, - struct binder_buffer *buffer) -{ - size_t size, buffer_size; - - buffer_size = binder_buffer_size(proc, buffer); - - size = ALIGN(buffer->data_size, sizeof(void *)) + - ALIGN(buffer->offsets_size, sizeof(void *)); - - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: binder_free_buf %p size %zd buffer_size %zd\n", - proc->pid, buffer, size, buffer_size); - - BUG_ON(buffer->free); - BUG_ON(size > buffer_size); - BUG_ON(buffer->transaction != NULL); - BUG_ON((void *)buffer < proc->buffer); - BUG_ON((void *)buffer > proc->buffer + proc->buffer_size); - - if (buffer->async_transaction) { - proc->free_async_space += size + sizeof(struct binder_buffer); - - binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, - "%d: binder_free_buf size %zd async free %zd\n", - proc->pid, size, proc->free_async_space); - } - - binder_update_page_range(proc, 0, - (void *)PAGE_ALIGN((uintptr_t)buffer->data), - (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK), - NULL); - rb_erase(&buffer->rb_node, &proc->allocated_buffers); - buffer->free = 1; - if (!list_is_last(&buffer->entry, &proc->buffers)) { - struct binder_buffer *next = list_entry(buffer->entry.next, - struct binder_buffer, entry); - - if (next->free) { - rb_erase(&next->rb_node, &proc->free_buffers); - binder_delete_free_buffer(proc, next); - } - } - if (proc->buffers.next != &buffer->entry) { - struct binder_buffer *prev = list_entry(buffer->entry.prev, - struct binder_buffer, entry); - - if (prev->free) { - binder_delete_free_buffer(proc, buffer); - rb_erase(&prev->rb_node, &proc->free_buffers); - buffer = prev; - } - } - binder_insert_free_buffer(proc, buffer); -} - -static struct binder_node *binder_get_node(struct binder_proc *proc, - binder_uintptr_t ptr) -{ - struct rb_node *n = proc->nodes.rb_node; - struct binder_node *node; - - while (n) { - node = rb_entry(n, struct binder_node, rb_node); - - if (ptr < node->ptr) - n = n->rb_left; - else if (ptr > node->ptr) - n = n->rb_right; - else - return node; - } - return NULL; -} - -static struct binder_node *binder_new_node(struct binder_proc *proc, - binder_uintptr_t ptr, - binder_uintptr_t cookie) -{ - struct rb_node **p = &proc->nodes.rb_node; - struct rb_node *parent = NULL; - struct binder_node *node; - - while (*p) { - parent = *p; - node = rb_entry(parent, struct binder_node, rb_node); - - if (ptr < node->ptr) - p = &(*p)->rb_left; - else if (ptr > node->ptr) - p = &(*p)->rb_right; - else - return NULL; - } - - node = kzalloc(sizeof(*node), GFP_KERNEL); - if (node == NULL) - return NULL; - binder_stats_created(BINDER_STAT_NODE); - rb_link_node(&node->rb_node, parent, p); - rb_insert_color(&node->rb_node, &proc->nodes); - node->debug_id = ++binder_last_id; - node->proc = proc; - node->ptr = ptr; - node->cookie = cookie; - node->work.type = BINDER_WORK_NODE; - INIT_LIST_HEAD(&node->work.entry); - INIT_LIST_HEAD(&node->async_todo); - binder_debug(BINDER_DEBUG_INTERNAL_REFS, - "%d:%d node %d u%016llx c%016llx created\n", - proc->pid, current->pid, node->debug_id, - (u64)node->ptr, (u64)node->cookie); - return node; -} - -static int binder_inc_node(struct binder_node *node, int strong, int internal, - struct list_head *target_list) -{ - if (strong) { - if (internal) { - if (target_list == NULL && - node->internal_strong_refs == 0 && - !(node == binder_context_mgr_node && - node->has_strong_ref)) { - pr_err("invalid inc strong node for %d\n", - node->debug_id); - return -EINVAL; - } - node->internal_strong_refs++; - } else - node->local_strong_refs++; - if (!node->has_strong_ref && target_list) { - list_del_init(&node->work.entry); - list_add_tail(&node->work.entry, target_list); - } - } else { - if (!internal) - node->local_weak_refs++; - if (!node->has_weak_ref && list_empty(&node->work.entry)) { - if (target_list == NULL) { - pr_err("inva |
