android: binder: move global binder state into context struct.

This change moves all global binder state into
the context struct, thereby completely separating
the state and the locks between two different contexts.

The debugfs entries remain global, printing entries
from all contexts.

Change-Id: If8e3e2bece7bc6f974b66fbcf1d91d529ffa62f0
Signed-off-by: Martijn Coenen <maco@google.com>
This commit is contained in:
Martijn Coenen 2016-09-30 16:40:04 +02:00 committed by Joel16
parent 440bad6ac2
commit fd60ed95b5

View File

@ -18,6 +18,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <asm/cacheflush.h>
#include <linux/atomic.h>
#include <linux/fdtable.h>
#include <linux/file.h>
#include <linux/freezer.h>
@ -46,19 +47,11 @@
#include <uapi/linux/android/binder.h>
#include "binder_trace.h"
static DEFINE_MUTEX(binder_main_lock);
static DEFINE_MUTEX(binder_deferred_lock);
static DEFINE_MUTEX(binder_mmap_lock);
static HLIST_HEAD(binder_devices);
static HLIST_HEAD(binder_procs);
static HLIST_HEAD(binder_deferred_list);
static HLIST_HEAD(binder_dead_nodes);
static struct dentry *binder_debugfs_dir_entry_root;
static struct dentry *binder_debugfs_dir_entry_proc;
static int binder_last_id;
static struct workqueue_struct *binder_deferred_workqueue;
atomic_t binder_last_id;
#define BINDER_DEBUG_ENTRY(name) \
static int binder_##name##_open(struct inode *inode, struct file *file) \
@ -173,20 +166,24 @@ enum binder_stat_types {
struct binder_stats {
int br[_IOC_NR(BR_FAILED_REPLY) + 1];
int bc[_IOC_NR(BC_REPLY_SG) + 1];
int obj_created[BINDER_STAT_COUNT];
int obj_deleted[BINDER_STAT_COUNT];
};
static struct binder_stats binder_stats;
/* These are still global, since it's not always easy to get the context */
struct binder_obj_stats {
atomic_t obj_created[BINDER_STAT_COUNT];
atomic_t obj_deleted[BINDER_STAT_COUNT];
};
static struct binder_obj_stats binder_obj_stats;
static inline void binder_stats_deleted(enum binder_stat_types type)
{
binder_stats.obj_deleted[type]++;
atomic_inc(&binder_obj_stats.obj_deleted[type]);
}
static inline void binder_stats_created(enum binder_stat_types type)
{
binder_stats.obj_created[type]++;
atomic_inc(&binder_obj_stats.obj_created[type]);
}
struct binder_transaction_log_entry {
@ -207,8 +204,6 @@ struct binder_transaction_log {
int full;
struct binder_transaction_log_entry entry[32];
};
static struct binder_transaction_log binder_transaction_log;
static struct binder_transaction_log binder_transaction_log_failed;
static struct binder_transaction_log_entry *binder_transaction_log_add(
struct binder_transaction_log *log)
@ -229,6 +224,21 @@ struct binder_context {
struct binder_node *binder_context_mgr_node;
kuid_t binder_context_mgr_uid;
const char *name;
struct mutex binder_main_lock;
struct mutex binder_deferred_lock;
struct mutex binder_mmap_lock;
struct hlist_head binder_procs;
struct hlist_head binder_dead_nodes;
struct hlist_head binder_deferred_list;
struct work_struct deferred_work;
struct workqueue_struct *binder_deferred_workqueue;
struct binder_transaction_log transaction_log;
struct binder_transaction_log transaction_log_failed;
struct binder_stats binder_stats;
};
struct binder_device {
@ -459,18 +469,19 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd)
return retval;
}
static inline void binder_lock(const char *tag)
static inline void binder_lock(struct binder_context *context, const char *tag)
{
trace_binder_lock(tag);
mutex_lock(&binder_main_lock);
mutex_lock(&context->binder_main_lock);
preempt_disable();
trace_binder_locked(tag);
}
static inline void binder_unlock(const char *tag)
static inline void binder_unlock(struct binder_context *context,
const char *tag)
{
trace_binder_unlock(tag);
mutex_unlock(&binder_main_lock);
mutex_unlock(&context->binder_main_lock);
preempt_enable();
}
@ -1019,7 +1030,7 @@ static struct binder_node *binder_new_node(struct binder_proc *proc,
binder_stats_created(BINDER_STAT_NODE);
rb_link_node(&node->rb_node, parent, p);
rb_insert_color(&node->rb_node, &proc->nodes);
node->debug_id = ++binder_last_id;
node->debug_id = atomic_inc_return(&binder_last_id);
node->proc = proc;
node->ptr = ptr;
node->cookie = cookie;
@ -1161,7 +1172,7 @@ static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
if (new_ref == NULL)
return NULL;
binder_stats_created(BINDER_STAT_REF);
new_ref->debug_id = ++binder_last_id;
new_ref->debug_id = atomic_inc_return(&binder_last_id);
new_ref->proc = proc;
new_ref->node = node;
rb_link_node(&new_ref->rb_node_node, parent, p);
@ -1924,7 +1935,7 @@ static void binder_transaction(struct binder_proc *proc,
binder_size_t last_fixup_min_off = 0;
struct binder_context *context = proc->context;
e = binder_transaction_log_add(&binder_transaction_log);
e = binder_transaction_log_add(&context->transaction_log);
e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
e->from_proc = proc->pid;
e->from_thread = thread->pid;
@ -2045,7 +2056,7 @@ static void binder_transaction(struct binder_proc *proc,
}
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
t->debug_id = ++binder_last_id;
t->debug_id = atomic_inc_return(&binder_last_id);
e->debug_id = t->debug_id;
if (reply)
@ -2316,7 +2327,8 @@ err_no_context_mgr_node:
{
struct binder_transaction_log_entry *fe;
fe = binder_transaction_log_add(&binder_transaction_log_failed);
fe = binder_transaction_log_add(
&context->transaction_log_failed);
*fe = *e;
}
@ -2344,8 +2356,8 @@ int binder_thread_write(struct binder_proc *proc,
return -EFAULT;
ptr += sizeof(uint32_t);
trace_binder_command(cmd);
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
binder_stats.bc[_IOC_NR(cmd)]++;
if (_IOC_NR(cmd) < ARRAY_SIZE(context->binder_stats.bc)) {
context->binder_stats.bc[_IOC_NR(cmd)]++;
proc->stats.bc[_IOC_NR(cmd)]++;
thread->stats.bc[_IOC_NR(cmd)]++;
}
@ -2717,8 +2729,8 @@ static void binder_stat_br(struct binder_proc *proc,
struct binder_thread *thread, uint32_t cmd)
{
trace_binder_return(cmd);
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
binder_stats.br[_IOC_NR(cmd)]++;
if (_IOC_NR(cmd) < ARRAY_SIZE(proc->stats.br)) {
proc->context->binder_stats.br[_IOC_NR(cmd)]++;
proc->stats.br[_IOC_NR(cmd)]++;
thread->stats.br[_IOC_NR(cmd)]++;
}
@ -2782,7 +2794,7 @@ retry:
if (wait_for_proc_work)
proc->ready_threads++;
binder_unlock(__func__);
binder_unlock(proc->context, __func__);
trace_binder_wait_for_work(wait_for_proc_work,
!!thread->transaction_stack,
@ -2809,7 +2821,7 @@ retry:
ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
}
binder_lock(__func__);
binder_lock(proc->context, __func__);
if (wait_for_proc_work)
proc->ready_threads--;
@ -3196,14 +3208,14 @@ static unsigned int binder_poll(struct file *filp,
struct binder_thread *thread = NULL;
int wait_for_proc_work;
binder_lock(__func__);
binder_lock(proc->context, __func__);
thread = binder_get_thread(proc);
wait_for_proc_work = thread->transaction_stack == NULL &&
list_empty(&thread->todo) && thread->return_error == BR_OK;
binder_unlock(__func__);
binder_unlock(proc->context, __func__);
if (wait_for_proc_work) {
if (binder_has_proc_work(proc, thread))
@ -3330,6 +3342,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
struct binder_proc *proc = filp->private_data;
struct binder_context *context = proc->context;
struct binder_thread *thread;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
@ -3343,7 +3356,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (ret)
goto err_unlocked;
binder_lock(__func__);
binder_lock(context, __func__);
thread = binder_get_thread(proc);
if (thread == NULL) {
ret = -ENOMEM;
@ -3394,7 +3407,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
err:
if (thread)
thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
binder_unlock(__func__);
binder_unlock(context, __func__);
wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret && ret != -ERESTARTSYS)
pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
@ -3467,7 +3480,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
}
vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
mutex_lock(&binder_mmap_lock);
mutex_lock(&proc->context->binder_mmap_lock);
if (proc->buffer) {
ret = -EBUSY;
failure_string = "already mapped";
@ -3482,7 +3495,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
}
proc->buffer = area->addr;
proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
mutex_unlock(&binder_mmap_lock);
mutex_unlock(&proc->context->binder_mmap_lock);
#ifdef CONFIG_CPU_CACHE_VIPT
if (cache_is_vipt_aliasing()) {
@ -3531,12 +3544,12 @@ err_alloc_small_buf_failed:
kfree(proc->pages);
proc->pages = NULL;
err_alloc_pages_failed:
mutex_lock(&binder_mmap_lock);
mutex_lock(&proc->context->binder_mmap_lock);
vfree(proc->buffer);
proc->buffer = NULL;
err_get_vm_area_failed:
err_already_mapped:
mutex_unlock(&binder_mmap_lock);
mutex_unlock(&proc->context->binder_mmap_lock);
err_bad_arg:
pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
@ -3563,15 +3576,15 @@ static int binder_open(struct inode *nodp, struct file *filp)
miscdev);
proc->context = &binder_dev->context;
binder_lock(__func__);
binder_lock(proc->context, __func__);
binder_stats_created(BINDER_STAT_PROC);
hlist_add_head(&proc->proc_node, &binder_procs);
hlist_add_head(&proc->proc_node, &proc->context->binder_procs);
proc->pid = current->group_leader->pid;
INIT_LIST_HEAD(&proc->delivered_death);
filp->private_data = proc;
binder_unlock(__func__);
binder_unlock(proc->context, __func__);
if (binder_debugfs_dir_entry_proc) {
char strbuf[11];
@ -3636,6 +3649,7 @@ static int binder_release(struct inode *nodp, struct file *filp)
static int binder_node_release(struct binder_node *node, int refs)
{
struct binder_ref *ref;
struct binder_context *context = node->proc->context;
int death = 0;
list_del_init(&node->work.entry);
@ -3651,7 +3665,7 @@ static int binder_node_release(struct binder_node *node, int refs)
node->proc = NULL;
node->local_strong_refs = 0;
node->local_weak_refs = 0;
hlist_add_head(&node->dead_node, &binder_dead_nodes);
hlist_add_head(&node->dead_node, &context->binder_dead_nodes);
hlist_for_each_entry(ref, &node->refs, node_entry) {
refs++;
@ -3716,7 +3730,8 @@ static void binder_deferred_release(struct binder_proc *proc)
node = rb_entry(n, struct binder_node, rb_node);
nodes++;
rb_erase(&node->rb_node, &proc->nodes);
incoming_refs = binder_node_release(node, incoming_refs);
incoming_refs = binder_node_release(node,
incoming_refs);
}
outgoing_refs = 0;
@ -3788,19 +3803,22 @@ static void binder_deferred_func(struct work_struct *work)
{
struct binder_proc *proc;
struct files_struct *files;
struct binder_context *context =
container_of(work, struct binder_context, deferred_work);
int defer;
do {
trace_binder_lock(__func__);
mutex_lock(&binder_main_lock);
mutex_lock(&context->binder_main_lock);
trace_binder_locked(__func__);
mutex_lock(&binder_deferred_lock);
mutex_lock(&context->binder_deferred_lock);
preempt_disable();
if (!hlist_empty(&binder_deferred_list)) {
proc = hlist_entry(binder_deferred_list.first,
struct binder_proc, deferred_work_node);
if (!hlist_empty(&context->binder_deferred_list)) {
proc = hlist_entry(context->binder_deferred_list.first,
struct binder_proc,
deferred_work_node);
hlist_del_init(&proc->deferred_work_node);
defer = proc->deferred_work;
proc->deferred_work = 0;
@ -3808,7 +3826,7 @@ static void binder_deferred_func(struct work_struct *work)
proc = NULL;
defer = 0;
}
mutex_unlock(&binder_deferred_lock);
mutex_unlock(&context->binder_deferred_lock);
files = NULL;
if (defer & BINDER_DEFERRED_PUT_FILES) {
@ -3824,25 +3842,25 @@ static void binder_deferred_func(struct work_struct *work)
binder_deferred_release(proc); /* frees proc */
trace_binder_unlock(__func__);
mutex_unlock(&binder_main_lock);
mutex_unlock(&context->binder_main_lock);
preempt_enable_no_resched();
if (files)
put_files_struct(files);
} while (proc);
}
static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
static void
binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
{
mutex_lock(&binder_deferred_lock);
mutex_lock(&proc->context->binder_deferred_lock);
proc->deferred_work |= defer;
if (hlist_unhashed(&proc->deferred_work_node)) {
hlist_add_head(&proc->deferred_work_node,
&binder_deferred_list);
queue_work(binder_deferred_workqueue, &binder_deferred_work);
&proc->context->binder_deferred_list);
queue_work(proc->context->binder_deferred_workqueue,
&proc->context->deferred_work);
}
mutex_unlock(&binder_deferred_lock);
mutex_unlock(&proc->context->binder_deferred_lock);
}
static void print_binder_transaction(struct seq_file *m, const char *prefix,
@ -4073,8 +4091,20 @@ static const char * const binder_objstat_strings[] = {
"transaction_complete"
};
static void add_binder_stats(struct binder_stats *from, struct binder_stats *to)
{
int i;
for (i = 0; i < ARRAY_SIZE(to->bc); i++)
to->bc[i] += from->bc[i];
for (i = 0; i < ARRAY_SIZE(to->br); i++)
to->br[i] += from->br[i];
}
static void print_binder_stats(struct seq_file *m, const char *prefix,
struct binder_stats *stats)
struct binder_stats *stats,
struct binder_obj_stats *obj_stats)
{
int i;
@ -4094,16 +4124,21 @@ static void print_binder_stats(struct seq_file *m, const char *prefix,
binder_return_strings[i], stats->br[i]);
}
BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
if (!obj_stats)
return;
BUILD_BUG_ON(ARRAY_SIZE(obj_stats->obj_created) !=
ARRAY_SIZE(binder_objstat_strings));
BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
ARRAY_SIZE(stats->obj_deleted));
for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
if (stats->obj_created[i] || stats->obj_deleted[i])
BUILD_BUG_ON(ARRAY_SIZE(obj_stats->obj_created) !=
ARRAY_SIZE(obj_stats->obj_deleted));
for (i = 0; i < ARRAY_SIZE(obj_stats->obj_created); i++) {
int obj_created = atomic_read(&obj_stats->obj_created[i]);
int obj_deleted = atomic_read(&obj_stats->obj_deleted[i]);
if (obj_created || obj_deleted)
seq_printf(m, "%s%s: active %d total %d\n", prefix,
binder_objstat_strings[i],
stats->obj_created[i] - stats->obj_deleted[i],
stats->obj_created[i]);
binder_objstat_strings[i],
obj_created - obj_deleted, obj_created);
}
}
@ -4158,85 +4193,131 @@ static void print_binder_proc_stats(struct seq_file *m,
}
seq_printf(m, " pending transactions: %d\n", count);
print_binder_stats(m, " ", &proc->stats);
print_binder_stats(m, " ", &proc->stats, NULL);
}
static int binder_state_show(struct seq_file *m, void *unused)
{
struct binder_device *device;
struct binder_context *context;
struct binder_proc *proc;
struct binder_node *node;
int do_lock = !binder_debug_no_lock;
if (do_lock)
binder_lock(__func__);
bool wrote_dead_nodes_header = false;
seq_puts(m, "binder state:\n");
if (!hlist_empty(&binder_dead_nodes))
seq_puts(m, "dead nodes:\n");
hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
print_binder_node(m, node);
hlist_for_each_entry(device, &binder_devices, hlist) {
context = &device->context;
if (do_lock)
binder_lock(context, __func__);
if (!wrote_dead_nodes_header &&
!hlist_empty(&context->binder_dead_nodes)) {
seq_puts(m, "dead nodes:\n");
wrote_dead_nodes_header = true;
}
hlist_for_each_entry(node, &context->binder_dead_nodes,
dead_node)
print_binder_node(m, node);
hlist_for_each_entry(proc, &binder_procs, proc_node)
print_binder_proc(m, proc, 1);
if (do_lock)
binder_unlock(__func__);
if (do_lock)
binder_unlock(context, __func__);
}
hlist_for_each_entry(device, &binder_devices, hlist) {
context = &device->context;
if (do_lock)
binder_lock(context, __func__);
hlist_for_each_entry(proc, &context->binder_procs, proc_node)
print_binder_proc(m, proc, 1);
if (do_lock)
binder_unlock(context, __func__);
}
return 0;
}
static int binder_stats_show(struct seq_file *m, void *unused)
{
struct binder_device *device;
struct binder_context *context;
struct binder_proc *proc;
struct binder_stats total_binder_stats;
int do_lock = !binder_debug_no_lock;
if (do_lock)
binder_lock(__func__);
memset(&total_binder_stats, 0, sizeof(struct binder_stats));
hlist_for_each_entry(device, &binder_devices, hlist) {
context = &device->context;
if (do_lock)
binder_lock(context, __func__);
add_binder_stats(&context->binder_stats, &total_binder_stats);
if (do_lock)
binder_unlock(context, __func__);
}
seq_puts(m, "binder stats:\n");
print_binder_stats(m, "", &total_binder_stats, &binder_obj_stats);
print_binder_stats(m, "", &binder_stats);
hlist_for_each_entry(device, &binder_devices, hlist) {
context = &device->context;
if (do_lock)
binder_lock(context, __func__);
hlist_for_each_entry(proc, &binder_procs, proc_node)
print_binder_proc_stats(m, proc);
if (do_lock)
binder_unlock(__func__);
hlist_for_each_entry(proc, &context->binder_procs, proc_node)
print_binder_proc_stats(m, proc);
if (do_lock)
binder_unlock(context, __func__);
}
return 0;
}
static int binder_transactions_show(struct seq_file *m, void *unused)
{
struct binder_device *device;
struct binder_context *context;
struct binder_proc *proc;
int do_lock = !binder_debug_no_lock;
if (do_lock)
binder_lock(__func__);
seq_puts(m, "binder transactions:\n");
hlist_for_each_entry(proc, &binder_procs, proc_node)
print_binder_proc(m, proc, 0);
if (do_lock)
binder_unlock(__func__);
hlist_for_each_entry(device, &binder_devices, hlist) {
context = &device->context;
if (do_lock)
binder_lock(context, __func__);
hlist_for_each_entry(proc, &context->binder_procs, proc_node)
print_binder_proc(m, proc, 0);
if (do_lock)
binder_unlock(context, __func__);
}
return 0;
}
static int binder_proc_show(struct seq_file *m, void *unused)
{
struct binder_device *device;
struct binder_context *context;
struct binder_proc *itr;
int pid = (unsigned long)m->private;
int do_lock = !binder_debug_no_lock;
if (do_lock)
binder_lock(__func__);
hlist_for_each_entry(device, &binder_devices, hlist) {
context = &device->context;
if (do_lock)
binder_lock(context, __func__);
hlist_for_each_entry(itr, &binder_procs, proc_node) {
if (itr->pid == pid) {
seq_puts(m, "binder proc state:\n");
print_binder_proc(m, itr, 1);
hlist_for_each_entry(itr, &context->binder_procs, proc_node) {
if (itr->pid == pid) {
seq_puts(m, "binder proc state:\n");
print_binder_proc(m, itr, 1);
}
}
if (do_lock)
binder_unlock(context, __func__);
}
if (do_lock)
binder_unlock(__func__);
return 0;
}
@ -4251,11 +4332,10 @@ static void print_binder_transaction_log_entry(struct seq_file *m,
e->to_node, e->target_handle, e->data_size, e->offsets_size);
}
static int binder_transaction_log_show(struct seq_file *m, void *unused)
static int print_binder_transaction_log(struct seq_file *m,
struct binder_transaction_log *log)
{
struct binder_transaction_log *log = m->private;
int i;
if (log->full) {
for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
print_binder_transaction_log_entry(m, &log->entry[i]);
@ -4265,6 +4345,31 @@ static int binder_transaction_log_show(struct seq_file *m, void *unused)
return 0;
}
static int binder_transaction_log_show(struct seq_file *m, void *unused)
{
struct binder_device *device;
struct binder_context *context;
hlist_for_each_entry(device, &binder_devices, hlist) {
context = &device->context;
print_binder_transaction_log(m, &context->transaction_log);
}
return 0;
}
static int binder_failed_transaction_log_show(struct seq_file *m, void *unused)
{
struct binder_device *device;
struct binder_context *context;
hlist_for_each_entry(device, &binder_devices, hlist) {
context = &device->context;
print_binder_transaction_log(m,
&context->transaction_log_failed);
}
return 0;
}
static const struct file_operations binder_fops = {
.owner = THIS_MODULE,
.poll = binder_poll,
@ -4280,11 +4385,20 @@ BINDER_DEBUG_ENTRY(state);
BINDER_DEBUG_ENTRY(stats);
BINDER_DEBUG_ENTRY(transactions);
BINDER_DEBUG_ENTRY(transaction_log);
BINDER_DEBUG_ENTRY(failed_transaction_log);
static void __init free_binder_device(struct binder_device *device)
{
if (device->context.binder_deferred_workqueue)
destroy_workqueue(device->context.binder_deferred_workqueue);
kfree(device);
}
static int __init init_binder_device(const char *name)
{
int ret;
struct binder_device *binder_device;
struct binder_context *context;
binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
if (!binder_device)
@ -4294,31 +4408,65 @@ static int __init init_binder_device(const char *name)
binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
binder_device->miscdev.name = name;
binder_device->context.binder_context_mgr_uid = INVALID_UID;
binder_device->context.name = name;
context = &binder_device->context;
context->binder_context_mgr_uid = INVALID_UID;
context->name = name;
mutex_init(&context->binder_main_lock);
mutex_init(&context->binder_deferred_lock);
mutex_init(&context->binder_mmap_lock);
context->binder_deferred_workqueue =
create_singlethread_workqueue(name);
if (!context->binder_deferred_workqueue) {
ret = -ENOMEM;
goto err_create_singlethread_workqueue_failed;
}
INIT_HLIST_HEAD(&context->binder_procs);
INIT_HLIST_HEAD(&context->binder_dead_nodes);
INIT_HLIST_HEAD(&context->binder_deferred_list);
INIT_WORK(&context->deferred_work, binder_deferred_func);
ret = misc_register(&binder_device->miscdev);
if (ret < 0) {
kfree(binder_device);
return ret;
goto err_misc_register_failed;
}
hlist_add_head(&binder_device->hlist, &binder_devices);
return ret;
err_create_singlethread_workqueue_failed:
err_misc_register_failed:
free_binder_device(binder_device);
return ret;
}
static int __init binder_init(void)
{
int ret;
int ret = 0;
char *device_name, *device_names;
struct binder_device *device;
struct hlist_node *tmp;
binder_deferred_workqueue = create_singlethread_workqueue("binder");
if (!binder_deferred_workqueue)
/*
* Copy the module_parameter string, because we don't want to
* tokenize it in-place.
*/
device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
if (!device_names)
return -ENOMEM;
strcpy(device_names, binder_devices_param);
while ((device_name = strsep(&device_names, ","))) {
ret = init_binder_device(device_name);
if (ret)
goto err_init_binder_device_failed;
}
binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
if (binder_debugfs_dir_entry_root)
binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
@ -4343,30 +4491,13 @@ static int __init binder_init(void)
debugfs_create_file("transaction_log",
S_IRUGO,
binder_debugfs_dir_entry_root,
&binder_transaction_log,
NULL,
&binder_transaction_log_fops);
debugfs_create_file("failed_transaction_log",
S_IRUGO,
binder_debugfs_dir_entry_root,
&binder_transaction_log_failed,
&binder_transaction_log_fops);
}
/*
* Copy the module_parameter string, because we don't want to
* tokenize it in-place.
*/
device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
if (!device_names) {
ret = -ENOMEM;
goto err_alloc_device_names_failed;
}
strcpy(device_names, binder_devices_param);
while ((device_name = strsep(&device_names, ","))) {
ret = init_binder_device(device_name);
if (ret)
goto err_init_binder_device_failed;
NULL,
&binder_failed_transaction_log_fops);
}
return ret;
@ -4375,12 +4506,8 @@ err_init_binder_device_failed:
hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
misc_deregister(&device->miscdev);
hlist_del(&device->hlist);
kfree(device);
free_binder_device(device);
}
err_alloc_device_names_failed:
debugfs_remove_recursive(binder_debugfs_dir_entry_root);
destroy_workqueue(binder_deferred_workqueue);
return ret;
}