summaryrefslogtreecommitdiff
path: root/drivers/android/binder.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/android/binder.c')
-rw-r--r--drivers/android/binder.c3677
1 files changed, 3677 insertions, 0 deletions
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
new file mode 100644
index 000000000000..8c43521d3f11
--- /dev/null
+++ b/drivers/android/binder.c
@@ -0,0 +1,3677 @@
+/* binder.c
+ *
+ * Android IPC Subsystem
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <asm/cacheflush.h>
+#include <linux/fdtable.h>
+#include <linux/file.h>
+#include <linux/freezer.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/nsproxy.h>
+#include <linux/poll.h>
+#include <linux/debugfs.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/pid_namespace.h>
+
+#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
+#define BINDER_IPC_32BIT 1
+#endif
+
+#include <uapi/linux/android/binder.h>
+#include "binder_trace.h"
+
+static DEFINE_MUTEX(binder_main_lock);
+static DEFINE_MUTEX(binder_deferred_lock);
+static DEFINE_MUTEX(binder_mmap_lock);
+
+static HLIST_HEAD(binder_procs);
+static HLIST_HEAD(binder_deferred_list);
+static HLIST_HEAD(binder_dead_nodes);
+
+static struct dentry *binder_debugfs_dir_entry_root;
+static struct dentry *binder_debugfs_dir_entry_proc;
+static struct binder_node *binder_context_mgr_node;
+static kuid_t binder_context_mgr_uid = INVALID_UID;
+static int binder_last_id;
+static struct workqueue_struct *binder_deferred_workqueue;
+
+#define BINDER_DEBUG_ENTRY(name) \
+static int binder_##name##_open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, binder_##name##_show, inode->i_private); \
+} \
+\
+static const struct file_operations binder_##name##_fops = { \
+ .owner = THIS_MODULE, \
+ .open = binder_##name##_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
+static int binder_proc_show(struct seq_file *m, void *unused);
+BINDER_DEBUG_ENTRY(proc);
+
+/* This is only defined in include/asm-arm/sizes.h */
+#ifndef SZ_1K
+#define SZ_1K 0x400
+#endif
+
+#ifndef SZ_4M
+#define SZ_4M 0x400000
+#endif
+
+#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
+
+#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
+
+enum {
+ BINDER_DEBUG_USER_ERROR = 1U << 0,
+ BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
+ BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
+ BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
+ BINDER_DEBUG_DEAD_BINDER = 1U << 4,
+ BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
+ BINDER_DEBUG_READ_WRITE = 1U << 6,
+ BINDER_DEBUG_USER_REFS = 1U << 7,
+ BINDER_DEBUG_THREADS = 1U << 8,
+ BINDER_DEBUG_TRANSACTION = 1U << 9,
+ BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
+ BINDER_DEBUG_FREE_BUFFER = 1U << 11,
+ BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
+ BINDER_DEBUG_BUFFER_ALLOC = 1U << 13,
+ BINDER_DEBUG_PRIORITY_CAP = 1U << 14,
+ BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15,
+};
+static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
+ BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
+module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
+
+static bool binder_debug_no_lock;
+module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
+
+static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
+static int binder_stop_on_user_error;
+
+static int binder_set_stop_on_user_error(const char *val,
+ struct kernel_param *kp)
+{
+ int ret;
+
+ ret = param_set_int(val, kp);
+ if (binder_stop_on_user_error < 2)
+ wake_up(&binder_user_error_wait);
+ return ret;
+}
+module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
+ param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
+
+#define binder_debug(mask, x...) \
+ do { \
+ if (binder_debug_mask & mask) \
+ pr_info(x); \
+ } while (0)
+
+#define binder_user_error(x...) \
+ do { \
+ if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
+ pr_info(x); \
+ if (binder_stop_on_user_error) \
+ binder_stop_on_user_error = 2; \
+ } while (0)
+
+enum binder_stat_types {
+ BINDER_STAT_PROC,
+ BINDER_STAT_THREAD,
+ BINDER_STAT_NODE,
+ BINDER_STAT_REF,
+ BINDER_STAT_DEATH,
+ BINDER_STAT_TRANSACTION,
+ BINDER_STAT_TRANSACTION_COMPLETE,
+ BINDER_STAT_COUNT
+};
+
+struct binder_stats {
+ int br[_IOC_NR(BR_FAILED_REPLY) + 1];
+ int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1];
+ int obj_created[BINDER_STAT_COUNT];
+ int obj_deleted[BINDER_STAT_COUNT];
+};
+
+static struct binder_stats binder_stats;
+
+static inline void binder_stats_deleted(enum binder_stat_types type)
+{
+ binder_stats.obj_deleted[type]++;
+}
+
+static inline void binder_stats_created(enum binder_stat_types type)
+{
+ binder_stats.obj_created[type]++;
+}
+
+struct binder_transaction_log_entry {
+ int debug_id;
+ int call_type;
+ int from_proc;
+ int from_thread;
+ int target_handle;
+ int to_proc;
+ int to_thread;
+ int to_node;
+ int data_size;
+ int offsets_size;
+};
+struct binder_transaction_log {
+ int next;
+ int full;
+ struct binder_transaction_log_entry entry[32];
+};
+static struct binder_transaction_log binder_transaction_log;
+static struct binder_transaction_log binder_transaction_log_failed;
+
+static struct binder_transaction_log_entry *binder_transaction_log_add(
+ struct binder_transaction_log *log)
+{
+ struct binder_transaction_log_entry *e;
+
+ e = &log->entry[log->next];
+ memset(e, 0, sizeof(*e));
+ log->next++;
+ if (log->next == ARRAY_SIZE(log->entry)) {
+ log->next = 0;
+ log->full = 1;
+ }
+ return e;
+}
+
+struct binder_work {
+ struct list_head entry;
+ enum {
+ BINDER_WORK_TRANSACTION = 1,
+ BINDER_WORK_TRANSACTION_COMPLETE,
+ BINDER_WORK_NODE,
+ BINDER_WORK_DEAD_BINDER,
+ BINDER_WORK_DEAD_BINDER_AND_CLEAR,
+ BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
+ } type;
+};
+
+struct binder_node {
+ int debug_id;
+ struct binder_work work;
+ union {
+ struct rb_node rb_node;
+ struct hlist_node dead_node;
+ };
+ struct binder_proc *proc;
+ struct hlist_head refs;
+ int internal_strong_refs;
+ int local_weak_refs;
+ int local_strong_refs;
+ binder_uintptr_t ptr;
+ binder_uintptr_t cookie;
+ unsigned has_strong_ref:1;
+ unsigned pending_strong_ref:1;
+ unsigned has_weak_ref:1;
+ unsigned pending_weak_ref:1;
+ unsigned has_async_transaction:1;
+ unsigned accept_fds:1;
+ unsigned min_priority:8;
+ struct list_head async_todo;
+};
+
+struct binder_ref_death {
+ struct binder_work work;
+ binder_uintptr_t cookie;
+};
+
+struct binder_ref {
+ /* Lookups needed: */
+ /* node + proc => ref (transaction) */
+ /* desc + proc => ref (transaction, inc/dec ref) */
+ /* node => refs + procs (proc exit) */
+ int debug_id;
+ struct rb_node rb_node_desc;
+ struct rb_node rb_node_node;
+ struct hlist_node node_entry;
+ struct binder_proc *proc;
+ struct binder_node *node;
+ uint32_t desc;
+ int strong;
+ int weak;
+ struct binder_ref_death *death;
+};
+
+struct binder_buffer {
+ struct list_head entry; /* free and allocated entries by address */
+ struct rb_node rb_node; /* free entry by size or allocated entry */
+ /* by address */
+ unsigned free:1;
+ unsigned allow_user_free:1;
+ unsigned async_transaction:1;
+ unsigned debug_id:29;
+
+ struct binder_transaction *transaction;
+
+ struct binder_node *target_node;
+ size_t data_size;
+ size_t offsets_size;
+ uint8_t data[0];
+};
+
+enum binder_deferred_state {
+ BINDER_DEFERRED_PUT_FILES = 0x01,
+ BINDER_DEFERRED_FLUSH = 0x02,
+ BINDER_DEFERRED_RELEASE = 0x04,
+};
+
+struct binder_proc {
+ struct hlist_node proc_node;
+ struct rb_root threads;
+ struct rb_root nodes;
+ struct rb_root refs_by_desc;
+ struct rb_root refs_by_node;
+ int pid;
+ struct vm_area_struct *vma;
+ struct mm_struct *vma_vm_mm;
+ struct task_struct *tsk;
+ struct files_struct *files;
+ struct hlist_node deferred_work_node;
+ int deferred_work;
+ void *buffer;
+ ptrdiff_t user_buffer_offset;
+
+ struct list_head buffers;
+ struct rb_root free_buffers;
+ struct rb_root allocated_buffers;
+ size_t free_async_space;
+
+ struct page **pages;
+ size_t buffer_size;
+ uint32_t buffer_free;
+ struct list_head todo;
+ wait_queue_head_t wait;
+ struct binder_stats stats;
+ struct list_head delivered_death;
+ int max_threads;
+ int requested_threads;
+ int requested_threads_started;
+ int ready_threads;
+ long default_priority;
+ struct dentry *debugfs_entry;
+};
+
+enum {
+ BINDER_LOOPER_STATE_REGISTERED = 0x01,
+ BINDER_LOOPER_STATE_ENTERED = 0x02,
+ BINDER_LOOPER_STATE_EXITED = 0x04,
+ BINDER_LOOPER_STATE_INVALID = 0x08,
+ BINDER_LOOPER_STATE_WAITING = 0x10,
+ BINDER_LOOPER_STATE_NEED_RETURN = 0x20
+};
+
+struct binder_thread {
+ struct binder_proc *proc;
+ struct rb_node rb_node;
+ int pid;
+ int looper;
+ struct binder_transaction *transaction_stack;
+ struct list_head todo;
+ uint32_t return_error; /* Write failed, return error code in read buf */
+ uint32_t return_error2; /* Write failed, return error code in read */
+ /* buffer. Used when sending a reply to a dead process that */
+ /* we are also waiting on */
+ wait_queue_head_t wait;
+ struct binder_stats stats;
+};
+
+struct binder_transaction {
+ int debug_id;
+ struct binder_work work;
+ struct binder_thread *from;
+ struct binder_transaction *from_parent;
+ struct binder_proc *to_proc;
+ struct binder_thread *to_thread;
+ struct binder_transaction *to_parent;
+ unsigned need_reply:1;
+ /* unsigned is_dead:1; */ /* not used at the moment */
+
+ struct binder_buffer *buffer;
+ unsigned int code;
+ unsigned int flags;
+ long priority;
+ long saved_priority;
+ kuid_t sender_euid;
+};
+
+static void
+binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
+
+static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
+{
+ struct files_struct *files = proc->files;
+ unsigned long rlim_cur;
+ unsigned long irqs;
+
+ if (files == NULL)
+ return -ESRCH;
+
+ if (!lock_task_sighand(proc->tsk, &irqs))
+ return -EMFILE;
+
+ rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
+ unlock_task_sighand(proc->tsk, &irqs);
+
+ return __alloc_fd(files, 0, rlim_cur, flags);
+}
+
+/*
+ * copied from fd_install
+ */
+static void task_fd_install(
+ struct binder_proc *proc, unsigned int fd, struct file *file)
+{
+ if (proc->files)
+ __fd_install(proc->files, fd, file);
+}
+
+/*
+ * copied from sys_close
+ */
+static long task_close_fd(struct binder_proc *proc, unsigned int fd)
+{
+ int retval;
+
+ if (proc->files == NULL)
+ return -ESRCH;
+
+ retval = __close_fd(proc->files, fd);
+ /* can't restart close syscall because file table entry was cleared */
+ if (unlikely(retval == -ERESTARTSYS ||
+ retval == -ERESTARTNOINTR ||
+ retval == -ERESTARTNOHAND ||
+ retval == -ERESTART_RESTARTBLOCK))
+ retval = -EINTR;
+
+ return retval;
+}
+
+static inline void binder_lock(const char *tag)
+{
+ trace_binder_lock(tag);
+ mutex_lock(&binder_main_lock);
+ trace_binder_locked(tag);
+}
+
+static inline void binder_unlock(const char *tag)
+{
+ trace_binder_unlock(tag);
+ mutex_unlock(&binder_main_lock);
+}
+
+static void binder_set_nice(long nice)
+{
+ long min_nice;
+
+ if (can_nice(current, nice)) {
+ set_user_nice(current, nice);
+ return;
+ }
+ min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);
+ binder_debug(BINDER_DEBUG_PRIORITY_CAP,
+ "%d: nice value %ld not allowed use %ld instead\n",
+ current->pid, nice, min_nice);
+ set_user_nice(current, min_nice);
+ if (min_nice <= MAX_NICE)
+ return;
+ binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
+}
+
+static size_t binder_buffer_size(struct binder_proc *proc,
+ struct binder_buffer *buffer)
+{
+ if (list_is_last(&buffer->entry, &proc->buffers))
+ return proc->buffer + proc->buffer_size - (void *)buffer->data;
+ return (size_t)list_entry(buffer->entry.next,
+ struct binder_buffer, entry) - (size_t)buffer->data;
+}
+
+static void binder_insert_free_buffer(struct binder_proc *proc,
+ struct binder_buffer *new_buffer)
+{
+ struct rb_node **p = &proc->free_buffers.rb_node;
+ struct rb_node *parent = NULL;
+ struct binder_buffer *buffer;
+ size_t buffer_size;
+ size_t new_buffer_size;
+
+ BUG_ON(!new_buffer->free);
+
+ new_buffer_size = binder_buffer_size(proc, new_buffer);
+
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: add free buffer, size %zd, at %p\n",
+ proc->pid, new_buffer_size, new_buffer);
+
+ while (*p) {
+ parent = *p;
+ buffer = rb_entry(parent, struct binder_buffer, rb_node);
+ BUG_ON(!buffer->free);
+
+ buffer_size = binder_buffer_size(proc, buffer);
+
+ if (new_buffer_size < buffer_size)
+ p = &parent->rb_left;
+ else
+ p = &parent->rb_right;
+ }
+ rb_link_node(&new_buffer->rb_node, parent, p);
+ rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
+}
+
+static void binder_insert_allocated_buffer(struct binder_proc *proc,
+ struct binder_buffer *new_buffer)
+{
+ struct rb_node **p = &proc->allocated_buffers.rb_node;
+ struct rb_node *parent = NULL;
+ struct binder_buffer *buffer;
+
+ BUG_ON(new_buffer->free);
+
+ while (*p) {
+ parent = *p;
+ buffer = rb_entry(parent, struct binder_buffer, rb_node);
+ BUG_ON(buffer->free);
+
+ if (new_buffer < buffer)
+ p = &parent->rb_left;
+ else if (new_buffer > buffer)
+ p = &parent->rb_right;
+ else
+ BUG();
+ }
+ rb_link_node(&new_buffer->rb_node, parent, p);
+ rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
+}
+
+static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
+ uintptr_t user_ptr)
+{
+ struct rb_node *n = proc->allocated_buffers.rb_node;
+ struct binder_buffer *buffer;
+ struct binder_buffer *kern_ptr;
+
+ kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset
+ - offsetof(struct binder_buffer, data));
+
+ while (n) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ BUG_ON(buffer->free);
+
+ if (kern_ptr < buffer)
+ n = n->rb_left;
+ else if (kern_ptr > buffer)
+ n = n->rb_right;
+ else
+ return buffer;
+ }
+ return NULL;
+}
+
+static int binder_update_page_range(struct binder_proc *proc, int allocate,
+ void *start, void *end,
+ struct vm_area_struct *vma)
+{
+ void *page_addr;
+ unsigned long user_page_addr;
+ struct vm_struct tmp_area;
+ struct page **page;
+ struct mm_struct *mm;
+
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: %s pages %p-%p\n", proc->pid,
+ allocate ? "allocate" : "free", start, end);
+
+ if (end <= start)
+ return 0;
+
+ trace_binder_update_page_range(proc, allocate, start, end);
+
+ if (vma)
+ mm = NULL;
+ else
+ mm = get_task_mm(proc->tsk);
+
+ if (mm) {
+ down_write(&mm->mmap_sem);
+ vma = proc->vma;
+ if (vma && mm != proc->vma_vm_mm) {
+ pr_err("%d: vma mm and task mm mismatch\n",
+ proc->pid);
+ vma = NULL;
+ }
+ }
+
+ if (allocate == 0)
+ goto free_range;
+
+ if (vma == NULL) {
+ pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
+ proc->pid);
+ goto err_no_vma;
+ }
+
+ for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
+ int ret;
+
+ page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
+
+ BUG_ON(*page);
+ *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
+ if (*page == NULL) {
+ pr_err("%d: binder_alloc_buf failed for page at %p\n",
+ proc->pid, page_addr);
+ goto err_alloc_page_failed;
+ }
+ tmp_area.addr = page_addr;
+ tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */;
+ ret = map_vm_area(&tmp_area, PAGE_KERNEL, page);
+ if (ret) {
+ pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
+ proc->pid, page_addr);
+ goto err_map_kernel_failed;
+ }
+ user_page_addr =
+ (uintptr_t)page_addr + proc->user_buffer_offset;
+ ret = vm_insert_page(vma, user_page_addr, page[0]);
+ if (ret) {
+ pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
+ proc->pid, user_page_addr);
+ goto err_vm_insert_page_failed;
+ }
+ /* vm_insert_page does not seem to increment the refcount */
+ }
+ if (mm) {
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+ return 0;
+
+free_range:
+ for (page_addr = end - PAGE_SIZE; page_addr >= start;
+ page_addr -= PAGE_SIZE) {
+ page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
+ if (vma)
+ zap_page_range(vma, (uintptr_t)page_addr +
+ proc->user_buffer_offset, PAGE_SIZE, NULL);
+err_vm_insert_page_failed:
+ unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
+err_map_kernel_failed:
+ __free_page(*page);
+ *page = NULL;
+err_alloc_page_failed:
+ ;
+ }
+err_no_vma:
+ if (mm) {
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+ return -ENOMEM;
+}
+
+static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
+ size_t data_size,
+ size_t offsets_size, int is_async)
+{
+ struct rb_node *n = proc->free_buffers.rb_node;
+ struct binder_buffer *buffer;
+ size_t buffer_size;
+ struct rb_node *best_fit = NULL;
+ void *has_page_addr;
+ void *end_page_addr;
+ size_t size;
+
+ if (proc->vma == NULL) {
+ pr_err("%d: binder_alloc_buf, no vma\n",
+ proc->pid);
+ return NULL;
+ }
+
+ size = ALIGN(data_size, sizeof(void *)) +
+ ALIGN(offsets_size, sizeof(void *));
+
+ if (size < data_size || size < offsets_size) {
+ binder_user_error("%d: got transaction with invalid size %zd-%zd\n",
+ proc->pid, data_size, offsets_size);
+ return NULL;
+ }
+
+ if (is_async &&
+ proc->free_async_space < size + sizeof(struct binder_buffer)) {
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: binder_alloc_buf size %zd failed, no async space left\n",
+ proc->pid, size);
+ return NULL;
+ }
+
+ while (n) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ BUG_ON(!buffer->free);
+ buffer_size = binder_buffer_size(proc, buffer);
+
+ if (size < buffer_size) {
+ best_fit = n;
+ n = n->rb_left;
+ } else if (size > buffer_size)
+ n = n->rb_right;
+ else {
+ best_fit = n;
+ break;
+ }
+ }
+ if (best_fit == NULL) {
+ pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
+ proc->pid, size);
+ return NULL;
+ }
+ if (n == NULL) {
+ buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
+ buffer_size = binder_buffer_size(proc, buffer);
+ }
+
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: binder_alloc_buf size %zd got buffer %p size %zd\n",
+ proc->pid, size, buffer, buffer_size);
+
+ has_page_addr =
+ (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
+ if (n == NULL) {
+ if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
+ buffer_size = size; /* no room for other buffers */
+ else
+ buffer_size = size + sizeof(struct binder_buffer);
+ }
+ end_page_addr =
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
+ if (end_page_addr > has_page_addr)
+ end_page_addr = has_page_addr;
+ if (binder_update_page_range(proc, 1,
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
+ return NULL;
+
+ rb_erase(best_fit, &proc->free_buffers);
+ buffer->free = 0;
+ binder_insert_allocated_buffer(proc, buffer);
+ if (buffer_size != size) {
+ struct binder_buffer *new_buffer = (void *)buffer->data + size;
+
+ list_add(&new_buffer->entry, &buffer->entry);
+ new_buffer->free = 1;
+ binder_insert_free_buffer(proc, new_buffer);
+ }
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: binder_alloc_buf size %zd got %p\n",
+ proc->pid, size, buffer);
+ buffer->data_size = data_size;
+ buffer->offsets_size = offsets_size;
+ buffer->async_transaction = is_async;
+ if (is_async) {
+ proc->free_async_space -= size + sizeof(struct binder_buffer);
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+ "%d: binder_alloc_buf size %zd async free %zd\n",
+ proc->pid, size, proc->free_async_space);
+ }
+
+ return buffer;
+}
+
+static void *buffer_start_page(struct binder_buffer *buffer)
+{
+ return (void *)((uintptr_t)buffer & PAGE_MASK);
+}
+
+static void *buffer_end_page(struct binder_buffer *buffer)
+{
+ return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
+}
+
+static void binder_delete_free_buffer(struct binder_proc *proc,
+ struct binder_buffer *buffer)
+{
+ struct binder_buffer *prev, *next = NULL;
+ int free_page_end = 1;
+ int free_page_start = 1;
+
+ BUG_ON(proc->buffers.next == &buffer->entry);
+ prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
+ BUG_ON(!prev->free);
+ if (buffer_end_page(prev) == buffer_start_page(buffer)) {
+ free_page_start = 0;
+ if (buffer_end_page(prev) == buffer_end_page(buffer))
+ free_page_end = 0;
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: merge free, buffer %p share page with %p\n",
+ proc->pid, buffer, prev);
+ }
+
+ if (!list_is_last(&buffer->entry, &proc->buffers)) {
+ next = list_entry(buffer->entry.next,
+ struct binder_buffer, entry);
+ if (buffer_start_page(next) == buffer_end_page(buffer)) {
+ free_page_end = 0;
+ if (buffer_start_page(next) ==
+ buffer_start_page(buffer))
+ free_page_start = 0;
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: merge free, buffer %p share page with %p\n",
+ proc->pid, buffer, prev);
+ }
+ }
+ list_del(&buffer->entry);
+ if (free_page_start || free_page_end) {
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: merge free, buffer %p do not share page%s%s with %p or %p\n",
+ proc->pid, buffer, free_page_start ? "" : " end",
+ free_page_end ? "" : " start", prev, next);
+ binder_update_page_range(proc, 0, free_page_start ?
+ buffer_start_page(buffer) : buffer_end_page(buffer),
+ (free_page_end ? buffer_end_page(buffer) :
+ buffer_start_page(buffer)) + PAGE_SIZE, NULL);
+ }
+}
+
+static void binder_free_buf(struct binder_proc *proc,
+ struct binder_buffer *buffer)
+{
+ size_t size, buffer_size;
+
+ buffer_size = binder_buffer_size(proc, buffer);
+
+ size = ALIGN(buffer->data_size, sizeof(void *)) +
+ ALIGN(buffer->offsets_size, sizeof(void *));
+
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: binder_free_buf %p size %zd buffer_size %zd\n",
+ proc->pid, buffer, size, buffer_size);
+
+ BUG_ON(buffer->free);
+ BUG_ON(size > buffer_size);
+ BUG_ON(buffer->transaction != NULL);
+ BUG_ON((void *)buffer < proc->buffer);
+ BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
+
+ if (buffer->async_transaction) {
+ proc->free_async_space += size + sizeof(struct binder_buffer);
+
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+ "%d: binder_free_buf size %zd async free %zd\n",
+ proc->pid, size, proc->free_async_space);
+ }
+
+ binder_update_page_range(proc, 0,
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data),
+ (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
+ NULL);
+ rb_erase(&buffer->rb_node, &proc->allocated_buffers);
+ buffer->free = 1;
+ if (!list_is_last(&buffer->entry, &proc->buffers)) {
+ struct binder_buffer *next = list_entry(buffer->entry.next,
+ struct binder_buffer, entry);
+
+ if (next->free) {
+ rb_erase(&next->rb_node, &proc->free_buffers);
+ binder_delete_free_buffer(proc, next);
+ }
+ }
+ if (proc->buffers.next != &buffer->entry) {
+ struct binder_buffer *prev = list_entry(buffer->entry.prev,
+ struct binder_buffer, entry);
+
+ if (prev->free) {
+ binder_delete_free_buffer(proc, buffer);
+ rb_erase(&prev->rb_node, &proc->free_buffers);
+ buffer = prev;
+ }
+ }
+ binder_insert_free_buffer(proc, buffer);
+}
+
+static struct binder_node *binder_get_node(struct binder_proc *proc,
+ binder_uintptr_t ptr)
+{
+ struct rb_node *n = proc->nodes.rb_node;
+ struct binder_node *node;
+
+ while (n) {
+ node = rb_entry(n, struct binder_node, rb_node);
+
+ if (ptr < node->ptr)
+ n = n->rb_left;
+ else if (ptr > node->ptr)
+ n = n->rb_right;
+ else
+ return node;
+ }
+ return NULL;
+}
+
+static struct binder_node *binder_new_node(struct binder_proc *proc,
+ binder_uintptr_t ptr,
+ binder_uintptr_t cookie)
+{
+ struct rb_node **p = &proc->nodes.rb_node;
+ struct rb_node *parent = NULL;
+ struct binder_node *node;
+
+ while (*p) {
+ parent = *p;
+ node = rb_entry(parent, struct binder_node, rb_node);
+
+ if (ptr < node->ptr)
+ p = &(*p)->rb_left;
+ else if (ptr > node->ptr)
+ p = &(*p)->rb_right;
+ else
+ return NULL;
+ }
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (node == NULL)
+ return NULL;
+ binder_stats_created(BINDER_STAT_NODE);
+ rb_link_node(&node->rb_node, parent, p);
+ rb_insert_color(&node->rb_node, &proc->nodes);
+ node->debug_id = ++binder_last_id;
+ node->proc = proc;
+ node->ptr = ptr;
+ node->cookie = cookie;
+ node->work.type = BINDER_WORK_NODE;
+ INIT_LIST_HEAD(&node->work.entry);
+ INIT_LIST_HEAD(&node->async_todo);
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "%d:%d node %d u%016llx c%016llx created\n",
+ proc->pid, current->pid, node->debug_id,
+ (u64)node->ptr, (u64)node->cookie);
+ return node;
+}
+
+static int binder_inc_node(struct binder_node *node, int strong, int internal,
+ struct list_head *target_list)
+{
+ if (strong) {
+ if (internal) {
+ if (target_list == NULL &&
+ node->internal_strong_refs == 0 &&
+ !(node == binder_context_mgr_node &&
+ node->has_strong_ref)) {
+ pr_err("invalid inc strong node for %d\n",
+ node->debug_id);
+ return -EINVAL;
+ }
+ node->internal_strong_refs++;
+ } else
+ node->local_strong_refs++;
+ if (!node->has_strong_ref && target_list) {
+ list_del_init(&node->work.entry);
+ list_add_tail(&node->work.entry, target_list);
+ }
+ } else {
+ if (!internal)
+ node->local_weak_refs++;
+ if (!node->has_weak_ref && list_empty(&node->work.entry)) {
+ if (target_list == NULL) {
+ pr_err("invalid inc weak node for %d\n",
+ node->debug_id);
+ return -EINVAL;
+ }
+ list_add_tail(&node->work.entry, target_list);
+ }
+ }
+ return 0;
+}
+
+static int binder_dec_node(struct binder_node *node, int strong, int internal)
+{
+ if (strong) {
+ if (internal)
+ node->internal_strong_refs--;
+ else
+ node->local_strong_refs--;
+ if (node->local_strong_refs || node->internal_strong_refs)
+ return 0;
+ } else {
+ if (!internal)
+ node->local_weak_refs--;
+ if (node->local_weak_refs || !hlist_empty(&node->refs))
+ return 0;
+ }
+ if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
+ if (list_empty(&node->work.entry)) {
+ list_add_tail(&node->work.entry, &node->proc->todo);
+ wake_up_interruptible(&node->proc->wait);
+ }
+ } else {
+ if (hlist_empty(&node->refs) && !node->local_strong_refs &&
+ !node->local_weak_refs) {
+ list_del_init(&node->work.entry);
+ if (node->proc) {
+ rb_erase(&node->rb_node, &node->proc->nodes);
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "refless node %d deleted\n",
+ node->debug_id);
+ } else {
+ hlist_del(&node->dead_node);
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "dead node %d deleted\n",
+ node->debug_id);
+ }
+ kfree(node);
+ binder_stats_deleted(BINDER_STAT_NODE);
+ }
+ }
+
+ return 0;
+}
+
+
+static struct binder_ref *binder_get_ref(struct binder_proc *proc,
+ uint32_t desc)
+{
+ struct rb_node *n = proc->refs_by_desc.rb_node;
+ struct binder_ref *ref;
+
+ while (n) {
+ ref = rb_entry(n, struct binder_ref, rb_node_desc);
+
+ if (desc < ref->desc)
+ n = n->rb_left;
+ else if (desc > ref->desc)
+ n = n->rb_right;
+ else
+ return ref;
+ }
+ return NULL;
+}
+
+static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
+ struct binder_node *node)
+{
+ struct rb_node *n;
+ struct rb_node **p = &proc->refs_by_node.rb_node;
+ struct rb_node *parent = NULL;
+ struct binder_ref *ref, *new_ref;
+
+ while (*p) {
+ parent = *p;
+ ref = rb_entry(parent, struct binder_ref, rb_node_node);
+
+ if (node < ref->node)
+ p = &(*p)->rb_left;
+ else if (node > ref->node)
+ p = &(*p)->rb_right;
+ else
+ return ref;
+ }
+ new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+ if (new_ref == NULL)
+ return NULL;
+ binder_stats_created(BINDER_STAT_REF);
+ new_ref->debug_id = ++binder_last_id;
+ new_ref->proc = proc;
+ new_ref->node = node;
+ rb_link_node(&new_ref->rb_node_node, parent, p);
+ rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
+
+ new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;
+ for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
+ ref = rb_entry(n, struct binder_ref, rb_node_desc);
+ if (ref->desc > new_ref->desc)
+ break;
+ new_ref->desc = ref->desc + 1;
+ }
+
+ p = &proc->refs_by_desc.rb_node;
+ while (*p) {
+ parent = *p;
+ ref = rb_entry(parent, struct binder_ref, rb_node_desc);
+
+ if (new_ref->desc < ref->desc)
+ p = &(*p)->rb_left;
+ else if (new_ref->desc > ref->desc)
+ p = &(*p)->rb_right;
+ else
+ BUG();
+ }
+ rb_link_node(&new_ref->rb_node_desc, parent, p);
+ rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
+ if (node) {
+ hlist_add_head(&new_ref->node_entry, &node->refs);
+
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "%d new ref %d desc %d for node %d\n",
+ proc->pid, new_ref->debug_id, new_ref->desc,
+ node->debug_id);
+ } else {
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "%d new ref %d desc %d for dead node\n",
+ proc->pid, new_ref->debug_id, new_ref->desc);
+ }
+ return new_ref;
+}
+
+static void binder_delete_ref(struct binder_ref *ref)
+{
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "%d delete ref %d desc %d for node %d\n",
+ ref->proc->pid, ref->debug_id, ref->desc,
+ ref->node->debug_id);
+
+ rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
+ rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
+ if (ref->strong)
+ binder_dec_node(ref->node, 1, 1);
+ hlist_del(&ref->node_entry);
+ binder_dec_node(ref->node, 0, 1);
+ if (ref->death) {
+ binder_debug(BINDER_DEBUG_DEAD_BINDER,
+ "%d delete ref %d desc %d has death notification\n",
+ ref->proc->pid, ref->debug_id, ref->desc);
+ list_del(&ref->death->work.entry);
+ kfree(ref->death);
+ binder_stats_deleted(BINDER_STAT_DEATH);
+ }
+ kfree(ref);
+ binder_stats_deleted(BINDER_STAT_REF);
+}
+
+static int binder_inc_ref(struct binder_ref *ref, int strong,
+ struct list_head *target_list)
+{
+ int ret;
+
+ if (strong) {
+ if (ref->strong == 0) {
+ ret = binder_inc_node(ref->node, 1, 1, target_list);
+ if (ret)
+ return ret;
+ }
+ ref->strong++;
+ } else {
+ if (ref->weak == 0) {
+ ret = binder_inc_node(ref->node, 0, 1, target_list);
+ if (ret)
+ return ret;
+ }
+ ref->weak++;
+ }
+ return 0;
+}
+
+
+static int binder_dec_ref(struct binder_ref *ref, int strong)
+{
+ if (strong) {
+ if (ref->strong == 0) {
+ binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
+ ref->proc->pid, ref->debug_id,
+ ref->desc, ref->strong, ref->weak);
+ return -EINVAL;
+ }
+ ref->strong--;
+ if (ref->strong == 0) {
+ int ret;
+
+ ret = binder_dec_node(ref->node, strong, 1);
+ if (ret)
+ return ret;
+ }
+ } else {
+ if (ref->weak == 0) {
+ binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
+ ref->proc->pid, ref->debug_id,
+ ref->desc, ref->strong, ref->weak);
+ return -EINVAL;
+ }
+ ref->weak--;
+ }
+ if (ref->strong == 0 && ref->weak == 0)
+ binder_delete_ref(