xref: /openbmc/linux/drivers/android/binder.c (revision 53d311cf)
1 /* binder.c
2  *
3  * Android IPC Subsystem
4  *
5  * Copyright (C) 2007-2008 Google, Inc.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17 
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 
20 #include <asm/cacheflush.h>
21 #include <linux/fdtable.h>
22 #include <linux/file.h>
23 #include <linux/freezer.h>
24 #include <linux/fs.h>
25 #include <linux/list.h>
26 #include <linux/miscdevice.h>
27 #include <linux/module.h>
28 #include <linux/mutex.h>
29 #include <linux/nsproxy.h>
30 #include <linux/poll.h>
31 #include <linux/debugfs.h>
32 #include <linux/rbtree.h>
33 #include <linux/sched/signal.h>
34 #include <linux/sched/mm.h>
35 #include <linux/seq_file.h>
36 #include <linux/uaccess.h>
37 #include <linux/pid_namespace.h>
38 #include <linux/security.h>
39 
40 #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
41 #define BINDER_IPC_32BIT 1
42 #endif
43 
44 #include <uapi/linux/android/binder.h>
45 #include "binder_alloc.h"
46 #include "binder_trace.h"
47 
48 static DEFINE_MUTEX(binder_main_lock);
49 
50 static HLIST_HEAD(binder_deferred_list);
51 static DEFINE_MUTEX(binder_deferred_lock);
52 
53 static HLIST_HEAD(binder_devices);
54 static HLIST_HEAD(binder_procs);
55 static DEFINE_MUTEX(binder_procs_lock);
56 
57 static HLIST_HEAD(binder_dead_nodes);
58 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
59 
60 static struct dentry *binder_debugfs_dir_entry_root;
61 static struct dentry *binder_debugfs_dir_entry_proc;
62 static atomic_t binder_last_id;
63 
64 #define BINDER_DEBUG_ENTRY(name) \
65 static int binder_##name##_open(struct inode *inode, struct file *file) \
66 { \
67 	return single_open(file, binder_##name##_show, inode->i_private); \
68 } \
69 \
70 static const struct file_operations binder_##name##_fops = { \
71 	.owner = THIS_MODULE, \
72 	.open = binder_##name##_open, \
73 	.read = seq_read, \
74 	.llseek = seq_lseek, \
75 	.release = single_release, \
76 }
77 
78 static int binder_proc_show(struct seq_file *m, void *unused);
79 BINDER_DEBUG_ENTRY(proc);
80 
81 /* This is only defined in include/asm-arm/sizes.h */
82 #ifndef SZ_1K
83 #define SZ_1K                               0x400
84 #endif
85 
86 #ifndef SZ_4M
87 #define SZ_4M                               0x400000
88 #endif
89 
90 #define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
91 
92 #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
93 
94 enum {
95 	BINDER_DEBUG_USER_ERROR             = 1U << 0,
96 	BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
97 	BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
98 	BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
99 	BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
100 	BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
101 	BINDER_DEBUG_READ_WRITE             = 1U << 6,
102 	BINDER_DEBUG_USER_REFS              = 1U << 7,
103 	BINDER_DEBUG_THREADS                = 1U << 8,
104 	BINDER_DEBUG_TRANSACTION            = 1U << 9,
105 	BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
106 	BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
107 	BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
108 	BINDER_DEBUG_PRIORITY_CAP           = 1U << 13,
109 };
110 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
111 	BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
112 module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
113 
114 static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
115 module_param_named(devices, binder_devices_param, charp, 0444);
116 
117 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
118 static int binder_stop_on_user_error;
119 
120 static int binder_set_stop_on_user_error(const char *val,
121 					 struct kernel_param *kp)
122 {
123 	int ret;
124 
125 	ret = param_set_int(val, kp);
126 	if (binder_stop_on_user_error < 2)
127 		wake_up(&binder_user_error_wait);
128 	return ret;
129 }
130 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
131 	param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
132 
133 #define binder_debug(mask, x...) \
134 	do { \
135 		if (binder_debug_mask & mask) \
136 			pr_info(x); \
137 	} while (0)
138 
139 #define binder_user_error(x...) \
140 	do { \
141 		if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
142 			pr_info(x); \
143 		if (binder_stop_on_user_error) \
144 			binder_stop_on_user_error = 2; \
145 	} while (0)
146 
147 #define to_flat_binder_object(hdr) \
148 	container_of(hdr, struct flat_binder_object, hdr)
149 
150 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
151 
152 #define to_binder_buffer_object(hdr) \
153 	container_of(hdr, struct binder_buffer_object, hdr)
154 
155 #define to_binder_fd_array_object(hdr) \
156 	container_of(hdr, struct binder_fd_array_object, hdr)
157 
158 enum binder_stat_types {
159 	BINDER_STAT_PROC,
160 	BINDER_STAT_THREAD,
161 	BINDER_STAT_NODE,
162 	BINDER_STAT_REF,
163 	BINDER_STAT_DEATH,
164 	BINDER_STAT_TRANSACTION,
165 	BINDER_STAT_TRANSACTION_COMPLETE,
166 	BINDER_STAT_COUNT
167 };
168 
169 struct binder_stats {
170 	atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
171 	atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
172 	atomic_t obj_created[BINDER_STAT_COUNT];
173 	atomic_t obj_deleted[BINDER_STAT_COUNT];
174 };
175 
176 static struct binder_stats binder_stats;
177 
178 static inline void binder_stats_deleted(enum binder_stat_types type)
179 {
180 	atomic_inc(&binder_stats.obj_deleted[type]);
181 }
182 
183 static inline void binder_stats_created(enum binder_stat_types type)
184 {
185 	atomic_inc(&binder_stats.obj_created[type]);
186 }
187 
188 struct binder_transaction_log_entry {
189 	int debug_id;
190 	int call_type;
191 	int from_proc;
192 	int from_thread;
193 	int target_handle;
194 	int to_proc;
195 	int to_thread;
196 	int to_node;
197 	int data_size;
198 	int offsets_size;
199 	int return_error_line;
200 	uint32_t return_error;
201 	uint32_t return_error_param;
202 	const char *context_name;
203 };
204 struct binder_transaction_log {
205 	int next;
206 	int full;
207 	struct binder_transaction_log_entry entry[32];
208 };
209 static struct binder_transaction_log binder_transaction_log;
210 static struct binder_transaction_log binder_transaction_log_failed;
211 
212 static struct binder_transaction_log_entry *binder_transaction_log_add(
213 	struct binder_transaction_log *log)
214 {
215 	struct binder_transaction_log_entry *e;
216 
217 	e = &log->entry[log->next];
218 	memset(e, 0, sizeof(*e));
219 	log->next++;
220 	if (log->next == ARRAY_SIZE(log->entry)) {
221 		log->next = 0;
222 		log->full = 1;
223 	}
224 	return e;
225 }
226 
227 struct binder_context {
228 	struct binder_node *binder_context_mgr_node;
229 	struct mutex context_mgr_node_lock;
230 
231 	kuid_t binder_context_mgr_uid;
232 	const char *name;
233 };
234 
235 struct binder_device {
236 	struct hlist_node hlist;
237 	struct miscdevice miscdev;
238 	struct binder_context context;
239 };
240 
241 struct binder_work {
242 	struct list_head entry;
243 	enum {
244 		BINDER_WORK_TRANSACTION = 1,
245 		BINDER_WORK_TRANSACTION_COMPLETE,
246 		BINDER_WORK_NODE,
247 		BINDER_WORK_DEAD_BINDER,
248 		BINDER_WORK_DEAD_BINDER_AND_CLEAR,
249 		BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
250 	} type;
251 };
252 
253 struct binder_node {
254 	int debug_id;
255 	struct binder_work work;
256 	union {
257 		struct rb_node rb_node;
258 		struct hlist_node dead_node;
259 	};
260 	struct binder_proc *proc;
261 	struct hlist_head refs;
262 	int internal_strong_refs;
263 	int local_weak_refs;
264 	int local_strong_refs;
265 	binder_uintptr_t ptr;
266 	binder_uintptr_t cookie;
267 	unsigned has_strong_ref:1;
268 	unsigned pending_strong_ref:1;
269 	unsigned has_weak_ref:1;
270 	unsigned pending_weak_ref:1;
271 	unsigned has_async_transaction:1;
272 	unsigned accept_fds:1;
273 	unsigned min_priority:8;
274 	struct list_head async_todo;
275 };
276 
277 struct binder_ref_death {
278 	struct binder_work work;
279 	binder_uintptr_t cookie;
280 };
281 
282 struct binder_ref {
283 	/* Lookups needed: */
284 	/*   node + proc => ref (transaction) */
285 	/*   desc + proc => ref (transaction, inc/dec ref) */
286 	/*   node => refs + procs (proc exit) */
287 	int debug_id;
288 	struct rb_node rb_node_desc;
289 	struct rb_node rb_node_node;
290 	struct hlist_node node_entry;
291 	struct binder_proc *proc;
292 	struct binder_node *node;
293 	uint32_t desc;
294 	int strong;
295 	int weak;
296 	struct binder_ref_death *death;
297 };
298 
299 enum binder_deferred_state {
300 	BINDER_DEFERRED_PUT_FILES    = 0x01,
301 	BINDER_DEFERRED_FLUSH        = 0x02,
302 	BINDER_DEFERRED_RELEASE      = 0x04,
303 };
304 
305 struct binder_proc {
306 	struct hlist_node proc_node;
307 	struct rb_root threads;
308 	struct rb_root nodes;
309 	struct rb_root refs_by_desc;
310 	struct rb_root refs_by_node;
311 	int pid;
312 	struct task_struct *tsk;
313 	struct files_struct *files;
314 	struct hlist_node deferred_work_node;
315 	int deferred_work;
316 
317 	struct list_head todo;
318 	wait_queue_head_t wait;
319 	struct binder_stats stats;
320 	struct list_head delivered_death;
321 	int max_threads;
322 	int requested_threads;
323 	int requested_threads_started;
324 	int ready_threads;
325 	long default_priority;
326 	struct dentry *debugfs_entry;
327 	struct binder_alloc alloc;
328 	struct binder_context *context;
329 };
330 
331 enum {
332 	BINDER_LOOPER_STATE_REGISTERED  = 0x01,
333 	BINDER_LOOPER_STATE_ENTERED     = 0x02,
334 	BINDER_LOOPER_STATE_EXITED      = 0x04,
335 	BINDER_LOOPER_STATE_INVALID     = 0x08,
336 	BINDER_LOOPER_STATE_WAITING     = 0x10,
337 };
338 
339 struct binder_thread {
340 	struct binder_proc *proc;
341 	struct rb_node rb_node;
342 	int pid;
343 	int looper;              /* only modified by this thread */
344 	bool looper_need_return; /* can be written by other thread */
345 	struct binder_transaction *transaction_stack;
346 	struct list_head todo;
347 	uint32_t return_error; /* Write failed, return error code in read buf */
348 	uint32_t return_error2; /* Write failed, return error code in read */
349 		/* buffer. Used when sending a reply to a dead process that */
350 		/* we are also waiting on */
351 	wait_queue_head_t wait;
352 	struct binder_stats stats;
353 };
354 
355 struct binder_transaction {
356 	int debug_id;
357 	struct binder_work work;
358 	struct binder_thread *from;
359 	struct binder_transaction *from_parent;
360 	struct binder_proc *to_proc;
361 	struct binder_thread *to_thread;
362 	struct binder_transaction *to_parent;
363 	unsigned need_reply:1;
364 	/* unsigned is_dead:1; */	/* not used at the moment */
365 
366 	struct binder_buffer *buffer;
367 	unsigned int	code;
368 	unsigned int	flags;
369 	long	priority;
370 	long	saved_priority;
371 	kuid_t	sender_euid;
372 };
373 
374 static void
375 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
376 
377 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
378 {
379 	struct files_struct *files = proc->files;
380 	unsigned long rlim_cur;
381 	unsigned long irqs;
382 
383 	if (files == NULL)
384 		return -ESRCH;
385 
386 	if (!lock_task_sighand(proc->tsk, &irqs))
387 		return -EMFILE;
388 
389 	rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
390 	unlock_task_sighand(proc->tsk, &irqs);
391 
392 	return __alloc_fd(files, 0, rlim_cur, flags);
393 }
394 
395 /*
396  * copied from fd_install
397  */
398 static void task_fd_install(
399 	struct binder_proc *proc, unsigned int fd, struct file *file)
400 {
401 	if (proc->files)
402 		__fd_install(proc->files, fd, file);
403 }
404 
405 /*
406  * copied from sys_close
407  */
408 static long task_close_fd(struct binder_proc *proc, unsigned int fd)
409 {
410 	int retval;
411 
412 	if (proc->files == NULL)
413 		return -ESRCH;
414 
415 	retval = __close_fd(proc->files, fd);
416 	/* can't restart close syscall because file table entry was cleared */
417 	if (unlikely(retval == -ERESTARTSYS ||
418 		     retval == -ERESTARTNOINTR ||
419 		     retval == -ERESTARTNOHAND ||
420 		     retval == -ERESTART_RESTARTBLOCK))
421 		retval = -EINTR;
422 
423 	return retval;
424 }
425 
426 static inline void binder_lock(const char *tag)
427 {
428 	trace_binder_lock(tag);
429 	mutex_lock(&binder_main_lock);
430 	trace_binder_locked(tag);
431 }
432 
433 static inline void binder_unlock(const char *tag)
434 {
435 	trace_binder_unlock(tag);
436 	mutex_unlock(&binder_main_lock);
437 }
438 
439 static void binder_set_nice(long nice)
440 {
441 	long min_nice;
442 
443 	if (can_nice(current, nice)) {
444 		set_user_nice(current, nice);
445 		return;
446 	}
447 	min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);
448 	binder_debug(BINDER_DEBUG_PRIORITY_CAP,
449 		     "%d: nice value %ld not allowed use %ld instead\n",
450 		      current->pid, nice, min_nice);
451 	set_user_nice(current, min_nice);
452 	if (min_nice <= MAX_NICE)
453 		return;
454 	binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
455 }
456 
457 static struct binder_node *binder_get_node(struct binder_proc *proc,
458 					   binder_uintptr_t ptr)
459 {
460 	struct rb_node *n = proc->nodes.rb_node;
461 	struct binder_node *node;
462 
463 	while (n) {
464 		node = rb_entry(n, struct binder_node, rb_node);
465 
466 		if (ptr < node->ptr)
467 			n = n->rb_left;
468 		else if (ptr > node->ptr)
469 			n = n->rb_right;
470 		else
471 			return node;
472 	}
473 	return NULL;
474 }
475 
476 static struct binder_node *binder_new_node(struct binder_proc *proc,
477 					   binder_uintptr_t ptr,
478 					   binder_uintptr_t cookie)
479 {
480 	struct rb_node **p = &proc->nodes.rb_node;
481 	struct rb_node *parent = NULL;
482 	struct binder_node *node;
483 
484 	while (*p) {
485 		parent = *p;
486 		node = rb_entry(parent, struct binder_node, rb_node);
487 
488 		if (ptr < node->ptr)
489 			p = &(*p)->rb_left;
490 		else if (ptr > node->ptr)
491 			p = &(*p)->rb_right;
492 		else
493 			return NULL;
494 	}
495 
496 	node = kzalloc(sizeof(*node), GFP_KERNEL);
497 	if (node == NULL)
498 		return NULL;
499 	binder_stats_created(BINDER_STAT_NODE);
500 	rb_link_node(&node->rb_node, parent, p);
501 	rb_insert_color(&node->rb_node, &proc->nodes);
502 	node->debug_id = atomic_inc_return(&binder_last_id);
503 	node->proc = proc;
504 	node->ptr = ptr;
505 	node->cookie = cookie;
506 	node->work.type = BINDER_WORK_NODE;
507 	INIT_LIST_HEAD(&node->work.entry);
508 	INIT_LIST_HEAD(&node->async_todo);
509 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
510 		     "%d:%d node %d u%016llx c%016llx created\n",
511 		     proc->pid, current->pid, node->debug_id,
512 		     (u64)node->ptr, (u64)node->cookie);
513 	return node;
514 }
515 
516 static int binder_inc_node(struct binder_node *node, int strong, int internal,
517 			   struct list_head *target_list)
518 {
519 	if (strong) {
520 		if (internal) {
521 			if (target_list == NULL &&
522 			    node->internal_strong_refs == 0 &&
523 			    !(node->proc &&
524 			      node == node->proc->context->binder_context_mgr_node &&
525 			      node->has_strong_ref)) {
526 				pr_err("invalid inc strong node for %d\n",
527 					node->debug_id);
528 				return -EINVAL;
529 			}
530 			node->internal_strong_refs++;
531 		} else
532 			node->local_strong_refs++;
533 		if (!node->has_strong_ref && target_list) {
534 			list_del_init(&node->work.entry);
535 			list_add_tail(&node->work.entry, target_list);
536 		}
537 	} else {
538 		if (!internal)
539 			node->local_weak_refs++;
540 		if (!node->has_weak_ref && list_empty(&node->work.entry)) {
541 			if (target_list == NULL) {
542 				pr_err("invalid inc weak node for %d\n",
543 					node->debug_id);
544 				return -EINVAL;
545 			}
546 			list_add_tail(&node->work.entry, target_list);
547 		}
548 	}
549 	return 0;
550 }
551 
552 static int binder_dec_node(struct binder_node *node, int strong, int internal)
553 {
554 	if (strong) {
555 		if (internal)
556 			node->internal_strong_refs--;
557 		else
558 			node->local_strong_refs--;
559 		if (node->local_strong_refs || node->internal_strong_refs)
560 			return 0;
561 	} else {
562 		if (!internal)
563 			node->local_weak_refs--;
564 		if (node->local_weak_refs || !hlist_empty(&node->refs))
565 			return 0;
566 	}
567 	if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
568 		if (list_empty(&node->work.entry)) {
569 			list_add_tail(&node->work.entry, &node->proc->todo);
570 			wake_up_interruptible(&node->proc->wait);
571 		}
572 	} else {
573 		if (hlist_empty(&node->refs) && !node->local_strong_refs &&
574 		    !node->local_weak_refs) {
575 			list_del_init(&node->work.entry);
576 			if (node->proc) {
577 				rb_erase(&node->rb_node, &node->proc->nodes);
578 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
579 					     "refless node %d deleted\n",
580 					     node->debug_id);
581 			} else {
582 				spin_lock(&binder_dead_nodes_lock);
583 				hlist_del(&node->dead_node);
584 				spin_unlock(&binder_dead_nodes_lock);
585 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
586 					     "dead node %d deleted\n",
587 					     node->debug_id);
588 			}
589 			kfree(node);
590 			binder_stats_deleted(BINDER_STAT_NODE);
591 		}
592 	}
593 
594 	return 0;
595 }
596 
597 
598 static struct binder_ref *binder_get_ref(struct binder_proc *proc,
599 					 u32 desc, bool need_strong_ref)
600 {
601 	struct rb_node *n = proc->refs_by_desc.rb_node;
602 	struct binder_ref *ref;
603 
604 	while (n) {
605 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
606 
607 		if (desc < ref->desc) {
608 			n = n->rb_left;
609 		} else if (desc > ref->desc) {
610 			n = n->rb_right;
611 		} else if (need_strong_ref && !ref->strong) {
612 			binder_user_error("tried to use weak ref as strong ref\n");
613 			return NULL;
614 		} else {
615 			return ref;
616 		}
617 	}
618 	return NULL;
619 }
620 
621 static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
622 						  struct binder_node *node)
623 {
624 	struct rb_node *n;
625 	struct rb_node **p = &proc->refs_by_node.rb_node;
626 	struct rb_node *parent = NULL;
627 	struct binder_ref *ref, *new_ref;
628 	struct binder_context *context = proc->context;
629 
630 	while (*p) {
631 		parent = *p;
632 		ref = rb_entry(parent, struct binder_ref, rb_node_node);
633 
634 		if (node < ref->node)
635 			p = &(*p)->rb_left;
636 		else if (node > ref->node)
637 			p = &(*p)->rb_right;
638 		else
639 			return ref;
640 	}
641 	new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
642 	if (new_ref == NULL)
643 		return NULL;
644 	binder_stats_created(BINDER_STAT_REF);
645 	new_ref->debug_id = atomic_inc_return(&binder_last_id);
646 	new_ref->proc = proc;
647 	new_ref->node = node;
648 	rb_link_node(&new_ref->rb_node_node, parent, p);
649 	rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
650 
651 	new_ref->desc = (node == context->binder_context_mgr_node) ? 0 : 1;
652 	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
653 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
654 		if (ref->desc > new_ref->desc)
655 			break;
656 		new_ref->desc = ref->desc + 1;
657 	}
658 
659 	p = &proc->refs_by_desc.rb_node;
660 	while (*p) {
661 		parent = *p;
662 		ref = rb_entry(parent, struct binder_ref, rb_node_desc);
663 
664 		if (new_ref->desc < ref->desc)
665 			p = &(*p)->rb_left;
666 		else if (new_ref->desc > ref->desc)
667 			p = &(*p)->rb_right;
668 		else
669 			BUG();
670 	}
671 	rb_link_node(&new_ref->rb_node_desc, parent, p);
672 	rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
673 	hlist_add_head(&new_ref->node_entry, &node->refs);
674 
675 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
676 		     "%d new ref %d desc %d for node %d\n",
677 		      proc->pid, new_ref->debug_id, new_ref->desc,
678 		      node->debug_id);
679 	return new_ref;
680 }
681 
682 static void binder_delete_ref(struct binder_ref *ref)
683 {
684 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
685 		     "%d delete ref %d desc %d for node %d\n",
686 		      ref->proc->pid, ref->debug_id, ref->desc,
687 		      ref->node->debug_id);
688 
689 	rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
690 	rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
691 	if (ref->strong)
692 		binder_dec_node(ref->node, 1, 1);
693 	hlist_del(&ref->node_entry);
694 	binder_dec_node(ref->node, 0, 1);
695 	if (ref->death) {
696 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
697 			     "%d delete ref %d desc %d has death notification\n",
698 			      ref->proc->pid, ref->debug_id, ref->desc);
699 		list_del(&ref->death->work.entry);
700 		kfree(ref->death);
701 		binder_stats_deleted(BINDER_STAT_DEATH);
702 	}
703 	kfree(ref);
704 	binder_stats_deleted(BINDER_STAT_REF);
705 }
706 
707 static int binder_inc_ref(struct binder_ref *ref, int strong,
708 			  struct list_head *target_list)
709 {
710 	int ret;
711 
712 	if (strong) {
713 		if (ref->strong == 0) {
714 			ret = binder_inc_node(ref->node, 1, 1, target_list);
715 			if (ret)
716 				return ret;
717 		}
718 		ref->strong++;
719 	} else {
720 		if (ref->weak == 0) {
721 			ret = binder_inc_node(ref->node, 0, 1, target_list);
722 			if (ret)
723 				return ret;
724 		}
725 		ref->weak++;
726 	}
727 	return 0;
728 }
729 
730 
731 static int binder_dec_ref(struct binder_ref *ref, int strong)
732 {
733 	if (strong) {
734 		if (ref->strong == 0) {
735 			binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
736 					  ref->proc->pid, ref->debug_id,
737 					  ref->desc, ref->strong, ref->weak);
738 			return -EINVAL;
739 		}
740 		ref->strong--;
741 		if (ref->strong == 0) {
742 			int ret;
743 
744 			ret = binder_dec_node(ref->node, strong, 1);
745 			if (ret)
746 				return ret;
747 		}
748 	} else {
749 		if (ref->weak == 0) {
750 			binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
751 					  ref->proc->pid, ref->debug_id,
752 					  ref->desc, ref->strong, ref->weak);
753 			return -EINVAL;
754 		}
755 		ref->weak--;
756 	}
757 	if (ref->strong == 0 && ref->weak == 0)
758 		binder_delete_ref(ref);
759 	return 0;
760 }
761 
762 static void binder_pop_transaction(struct binder_thread *target_thread,
763 				   struct binder_transaction *t)
764 {
765 	if (target_thread) {
766 		BUG_ON(target_thread->transaction_stack != t);
767 		BUG_ON(target_thread->transaction_stack->from != target_thread);
768 		target_thread->transaction_stack =
769 			target_thread->transaction_stack->from_parent;
770 		t->from = NULL;
771 	}
772 	t->need_reply = 0;
773 	if (t->buffer)
774 		t->buffer->transaction = NULL;
775 	kfree(t);
776 	binder_stats_deleted(BINDER_STAT_TRANSACTION);
777 }
778 
779 static void binder_send_failed_reply(struct binder_transaction *t,
780 				     uint32_t error_code)
781 {
782 	struct binder_thread *target_thread;
783 	struct binder_transaction *next;
784 
785 	BUG_ON(t->flags & TF_ONE_WAY);
786 	while (1) {
787 		target_thread = t->from;
788 		if (target_thread) {
789 			if (target_thread->return_error != BR_OK &&
790 			   target_thread->return_error2 == BR_OK) {
791 				target_thread->return_error2 =
792 					target_thread->return_error;
793 				target_thread->return_error = BR_OK;
794 			}
795 			if (target_thread->return_error == BR_OK) {
796 				binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
797 					     "send failed reply for transaction %d to %d:%d\n",
798 					      t->debug_id,
799 					      target_thread->proc->pid,
800 					      target_thread->pid);
801 
802 				binder_pop_transaction(target_thread, t);
803 				target_thread->return_error = error_code;
804 				wake_up_interruptible(&target_thread->wait);
805 			} else {
806 				pr_err("reply failed, target thread, %d:%d, has error code %d already\n",
807 					target_thread->proc->pid,
808 					target_thread->pid,
809 					target_thread->return_error);
810 			}
811 			return;
812 		}
813 		next = t->from_parent;
814 
815 		binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
816 			     "send failed reply for transaction %d, target dead\n",
817 			     t->debug_id);
818 
819 		binder_pop_transaction(target_thread, t);
820 		if (next == NULL) {
821 			binder_debug(BINDER_DEBUG_DEAD_BINDER,
822 				     "reply failed, no target thread at root\n");
823 			return;
824 		}
825 		t = next;
826 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
827 			     "reply failed, no target thread -- retry %d\n",
828 			      t->debug_id);
829 	}
830 }
831 
832 /**
833  * binder_validate_object() - checks for a valid metadata object in a buffer.
834  * @buffer:	binder_buffer that we're parsing.
835  * @offset:	offset in the buffer at which to validate an object.
836  *
837  * Return:	If there's a valid metadata object at @offset in @buffer, the
838  *		size of that object. Otherwise, it returns zero.
839  */
840 static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
841 {
842 	/* Check if we can read a header first */
843 	struct binder_object_header *hdr;
844 	size_t object_size = 0;
845 
846 	if (offset > buffer->data_size - sizeof(*hdr) ||
847 	    buffer->data_size < sizeof(*hdr) ||
848 	    !IS_ALIGNED(offset, sizeof(u32)))
849 		return 0;
850 
851 	/* Ok, now see if we can read a complete object. */
852 	hdr = (struct binder_object_header *)(buffer->data + offset);
853 	switch (hdr->type) {
854 	case BINDER_TYPE_BINDER:
855 	case BINDER_TYPE_WEAK_BINDER:
856 	case BINDER_TYPE_HANDLE:
857 	case BINDER_TYPE_WEAK_HANDLE:
858 		object_size = sizeof(struct flat_binder_object);
859 		break;
860 	case BINDER_TYPE_FD:
861 		object_size = sizeof(struct binder_fd_object);
862 		break;
863 	case BINDER_TYPE_PTR:
864 		object_size = sizeof(struct binder_buffer_object);
865 		break;
866 	case BINDER_TYPE_FDA:
867 		object_size = sizeof(struct binder_fd_array_object);
868 		break;
869 	default:
870 		return 0;
871 	}
872 	if (offset <= buffer->data_size - object_size &&
873 	    buffer->data_size >= object_size)
874 		return object_size;
875 	else
876 		return 0;
877 }
878 
879 /**
880  * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
881  * @b:		binder_buffer containing the object
882  * @index:	index in offset array at which the binder_buffer_object is
883  *		located
884  * @start:	points to the start of the offset array
885  * @num_valid:	the number of valid offsets in the offset array
886  *
887  * Return:	If @index is within the valid range of the offset array
888  *		described by @start and @num_valid, and if there's a valid
889  *		binder_buffer_object at the offset found in index @index
890  *		of the offset array, that object is returned. Otherwise,
891  *		%NULL is returned.
892  *		Note that the offset found in index @index itself is not
893  *		verified; this function assumes that @num_valid elements
894  *		from @start were previously verified to have valid offsets.
895  */
896 static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
897 							binder_size_t index,
898 							binder_size_t *start,
899 							binder_size_t num_valid)
900 {
901 	struct binder_buffer_object *buffer_obj;
902 	binder_size_t *offp;
903 
904 	if (index >= num_valid)
905 		return NULL;
906 
907 	offp = start + index;
908 	buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
909 	if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
910 		return NULL;
911 
912 	return buffer_obj;
913 }
914 
915 /**
916  * binder_validate_fixup() - validates pointer/fd fixups happen in order.
917  * @b:			transaction buffer
918  * @objects_start	start of objects buffer
919  * @buffer:		binder_buffer_object in which to fix up
920  * @offset:		start offset in @buffer to fix up
921  * @last_obj:		last binder_buffer_object that we fixed up in
922  * @last_min_offset:	minimum fixup offset in @last_obj
923  *
924  * Return:		%true if a fixup in buffer @buffer at offset @offset is
925  *			allowed.
926  *
927  * For safety reasons, we only allow fixups inside a buffer to happen
928  * at increasing offsets; additionally, we only allow fixup on the last
929  * buffer object that was verified, or one of its parents.
930  *
931  * Example of what is allowed:
932  *
933  * A
934  *   B (parent = A, offset = 0)
935  *   C (parent = A, offset = 16)
936  *     D (parent = C, offset = 0)
937  *   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
938  *
939  * Examples of what is not allowed:
940  *
941  * Decreasing offsets within the same parent:
942  * A
943  *   C (parent = A, offset = 16)
944  *   B (parent = A, offset = 0) // decreasing offset within A
945  *
946  * Referring to a parent that wasn't the last object or any of its parents:
947  * A
948  *   B (parent = A, offset = 0)
949  *   C (parent = A, offset = 0)
950  *   C (parent = A, offset = 16)
951  *     D (parent = B, offset = 0) // B is not A or any of A's parents
952  */
953 static bool binder_validate_fixup(struct binder_buffer *b,
954 				  binder_size_t *objects_start,
955 				  struct binder_buffer_object *buffer,
956 				  binder_size_t fixup_offset,
957 				  struct binder_buffer_object *last_obj,
958 				  binder_size_t last_min_offset)
959 {
960 	if (!last_obj) {
961 		/* Nothing to fix up in */
962 		return false;
963 	}
964 
965 	while (last_obj != buffer) {
966 		/*
967 		 * Safe to retrieve the parent of last_obj, since it
968 		 * was already previously verified by the driver.
969 		 */
970 		if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
971 			return false;
972 		last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
973 		last_obj = (struct binder_buffer_object *)
974 			(b->data + *(objects_start + last_obj->parent));
975 	}
976 	return (fixup_offset >= last_min_offset);
977 }
978 
979 static void binder_transaction_buffer_release(struct binder_proc *proc,
980 					      struct binder_buffer *buffer,
981 					      binder_size_t *failed_at)
982 {
983 	binder_size_t *offp, *off_start, *off_end;
984 	int debug_id = buffer->debug_id;
985 
986 	binder_debug(BINDER_DEBUG_TRANSACTION,
987 		     "%d buffer release %d, size %zd-%zd, failed at %p\n",
988 		     proc->pid, buffer->debug_id,
989 		     buffer->data_size, buffer->offsets_size, failed_at);
990 
991 	if (buffer->target_node)
992 		binder_dec_node(buffer->target_node, 1, 0);
993 
994 	off_start = (binder_size_t *)(buffer->data +
995 				      ALIGN(buffer->data_size, sizeof(void *)));
996 	if (failed_at)
997 		off_end = failed_at;
998 	else
999 		off_end = (void *)off_start + buffer->offsets_size;
1000 	for (offp = off_start; offp < off_end; offp++) {
1001 		struct binder_object_header *hdr;
1002 		size_t object_size = binder_validate_object(buffer, *offp);
1003 
1004 		if (object_size == 0) {
1005 			pr_err("transaction release %d bad object at offset %lld, size %zd\n",
1006 			       debug_id, (u64)*offp, buffer->data_size);
1007 			continue;
1008 		}
1009 		hdr = (struct binder_object_header *)(buffer->data + *offp);
1010 		switch (hdr->type) {
1011 		case BINDER_TYPE_BINDER:
1012 		case BINDER_TYPE_WEAK_BINDER: {
1013 			struct flat_binder_object *fp;
1014 			struct binder_node *node;
1015 
1016 			fp = to_flat_binder_object(hdr);
1017 			node = binder_get_node(proc, fp->binder);
1018 			if (node == NULL) {
1019 				pr_err("transaction release %d bad node %016llx\n",
1020 				       debug_id, (u64)fp->binder);
1021 				break;
1022 			}
1023 			binder_debug(BINDER_DEBUG_TRANSACTION,
1024 				     "        node %d u%016llx\n",
1025 				     node->debug_id, (u64)node->ptr);
1026 			binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
1027 					0);
1028 		} break;
1029 		case BINDER_TYPE_HANDLE:
1030 		case BINDER_TYPE_WEAK_HANDLE: {
1031 			struct flat_binder_object *fp;
1032 			struct binder_ref *ref;
1033 
1034 			fp = to_flat_binder_object(hdr);
1035 			ref = binder_get_ref(proc, fp->handle,
1036 					     hdr->type == BINDER_TYPE_HANDLE);
1037 			if (ref == NULL) {
1038 				pr_err("transaction release %d bad handle %d\n",
1039 				 debug_id, fp->handle);
1040 				break;
1041 			}
1042 			binder_debug(BINDER_DEBUG_TRANSACTION,
1043 				     "        ref %d desc %d (node %d)\n",
1044 				     ref->debug_id, ref->desc, ref->node->debug_id);
1045 			binder_dec_ref(ref, hdr->type == BINDER_TYPE_HANDLE);
1046 		} break;
1047 
1048 		case BINDER_TYPE_FD: {
1049 			struct binder_fd_object *fp = to_binder_fd_object(hdr);
1050 
1051 			binder_debug(BINDER_DEBUG_TRANSACTION,
1052 				     "        fd %d\n", fp->fd);
1053 			if (failed_at)
1054 				task_close_fd(proc, fp->fd);
1055 		} break;
1056 		case BINDER_TYPE_PTR:
1057 			/*
1058 			 * Nothing to do here, this will get cleaned up when the
1059 			 * transaction buffer gets freed
1060 			 */
1061 			break;
1062 		case BINDER_TYPE_FDA: {
1063 			struct binder_fd_array_object *fda;
1064 			struct binder_buffer_object *parent;
1065 			uintptr_t parent_buffer;
1066 			u32 *fd_array;
1067 			size_t fd_index;
1068 			binder_size_t fd_buf_size;
1069 
1070 			fda = to_binder_fd_array_object(hdr);
1071 			parent = binder_validate_ptr(buffer, fda->parent,
1072 						     off_start,
1073 						     offp - off_start);
1074 			if (!parent) {
1075 				pr_err("transaction release %d bad parent offset",
1076 				       debug_id);
1077 				continue;
1078 			}
1079 			/*
1080 			 * Since the parent was already fixed up, convert it
1081 			 * back to kernel address space to access it
1082 			 */
1083 			parent_buffer = parent->buffer -
1084 				binder_alloc_get_user_buffer_offset(
1085 						&proc->alloc);
1086 
1087 			fd_buf_size = sizeof(u32) * fda->num_fds;
1088 			if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
1089 				pr_err("transaction release %d invalid number of fds (%lld)\n",
1090 				       debug_id, (u64)fda->num_fds);
1091 				continue;
1092 			}
1093 			if (fd_buf_size > parent->length ||
1094 			    fda->parent_offset > parent->length - fd_buf_size) {
1095 				/* No space for all file descriptors here. */
1096 				pr_err("transaction release %d not enough space for %lld fds in buffer\n",
1097 				       debug_id, (u64)fda->num_fds);
1098 				continue;
1099 			}
1100 			fd_array = (u32 *)(parent_buffer + fda->parent_offset);
1101 			for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
1102 				task_close_fd(proc, fd_array[fd_index]);
1103 		} break;
1104 		default:
1105 			pr_err("transaction release %d bad object type %x\n",
1106 				debug_id, hdr->type);
1107 			break;
1108 		}
1109 	}
1110 }
1111 
1112 static int binder_translate_binder(struct flat_binder_object *fp,
1113 				   struct binder_transaction *t,
1114 				   struct binder_thread *thread)
1115 {
1116 	struct binder_node *node;
1117 	struct binder_ref *ref;
1118 	struct binder_proc *proc = thread->proc;
1119 	struct binder_proc *target_proc = t->to_proc;
1120 
1121 	node = binder_get_node(proc, fp->binder);
1122 	if (!node) {
1123 		node = binder_new_node(proc, fp->binder, fp->cookie);
1124 		if (!node)
1125 			return -ENOMEM;
1126 
1127 		node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1128 		node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1129 	}
1130 	if (fp->cookie != node->cookie) {
1131 		binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
1132 				  proc->pid, thread->pid, (u64)fp->binder,
1133 				  node->debug_id, (u64)fp->cookie,
1134 				  (u64)node->cookie);
1135 		return -EINVAL;
1136 	}
1137 	if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
1138 		return -EPERM;
1139 
1140 	ref = binder_get_ref_for_node(target_proc, node);
1141 	if (!ref)
1142 		return -ENOMEM;
1143 
1144 	if (fp->hdr.type == BINDER_TYPE_BINDER)
1145 		fp->hdr.type = BINDER_TYPE_HANDLE;
1146 	else
1147 		fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
1148 	fp->binder = 0;
1149 	fp->handle = ref->desc;
1150 	fp->cookie = 0;
1151 	binder_inc_ref(ref, fp->hdr.type == BINDER_TYPE_HANDLE, &thread->todo);
1152 
1153 	trace_binder_transaction_node_to_ref(t, node, ref);
1154 	binder_debug(BINDER_DEBUG_TRANSACTION,
1155 		     "        node %d u%016llx -> ref %d desc %d\n",
1156 		     node->debug_id, (u64)node->ptr,
1157 		     ref->debug_id, ref->desc);
1158 
1159 	return 0;
1160 }
1161 
1162 static int binder_translate_handle(struct flat_binder_object *fp,
1163 				   struct binder_transaction *t,
1164 				   struct binder_thread *thread)
1165 {
1166 	struct binder_ref *ref;
1167 	struct binder_proc *proc = thread->proc;
1168 	struct binder_proc *target_proc = t->to_proc;
1169 
1170 	ref = binder_get_ref(proc, fp->handle,
1171 			     fp->hdr.type == BINDER_TYPE_HANDLE);
1172 	if (!ref) {
1173 		binder_user_error("%d:%d got transaction with invalid handle, %d\n",
1174 				  proc->pid, thread->pid, fp->handle);
1175 		return -EINVAL;
1176 	}
1177 	if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
1178 		return -EPERM;
1179 
1180 	if (ref->node->proc == target_proc) {
1181 		if (fp->hdr.type == BINDER_TYPE_HANDLE)
1182 			fp->hdr.type = BINDER_TYPE_BINDER;
1183 		else
1184 			fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
1185 		fp->binder = ref->node->ptr;
1186 		fp->cookie = ref->node->cookie;
1187 		binder_inc_node(ref->node, fp->hdr.type == BINDER_TYPE_BINDER,
1188 				0, NULL);
1189 		trace_binder_transaction_ref_to_node(t, ref);
1190 		binder_debug(BINDER_DEBUG_TRANSACTION,
1191 			     "        ref %d desc %d -> node %d u%016llx\n",
1192 			     ref->debug_id, ref->desc, ref->node->debug_id,
1193 			     (u64)ref->node->ptr);
1194 	} else {
1195 		struct binder_ref *new_ref;
1196 
1197 		new_ref = binder_get_ref_for_node(target_proc, ref->node);
1198 		if (!new_ref)
1199 			return -ENOMEM;
1200 
1201 		fp->binder = 0;
1202 		fp->handle = new_ref->desc;
1203 		fp->cookie = 0;
1204 		binder_inc_ref(new_ref, fp->hdr.type == BINDER_TYPE_HANDLE,
1205 			       NULL);
1206 		trace_binder_transaction_ref_to_ref(t, ref, new_ref);
1207 		binder_debug(BINDER_DEBUG_TRANSACTION,
1208 			     "        ref %d desc %d -> ref %d desc %d (node %d)\n",
1209 			     ref->debug_id, ref->desc, new_ref->debug_id,
1210 			     new_ref->desc, ref->node->debug_id);
1211 	}
1212 	return 0;
1213 }
1214 
1215 static int binder_translate_fd(int fd,
1216 			       struct binder_transaction *t,
1217 			       struct binder_thread *thread,
1218 			       struct binder_transaction *in_reply_to)
1219 {
1220 	struct binder_proc *proc = thread->proc;
1221 	struct binder_proc *target_proc = t->to_proc;
1222 	int target_fd;
1223 	struct file *file;
1224 	int ret;
1225 	bool target_allows_fd;
1226 
1227 	if (in_reply_to)
1228 		target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
1229 	else
1230 		target_allows_fd = t->buffer->target_node->accept_fds;
1231 	if (!target_allows_fd) {
1232 		binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
1233 				  proc->pid, thread->pid,
1234 				  in_reply_to ? "reply" : "transaction",
1235 				  fd);
1236 		ret = -EPERM;
1237 		goto err_fd_not_accepted;
1238 	}
1239 
1240 	file = fget(fd);
1241 	if (!file) {
1242 		binder_user_error("%d:%d got transaction with invalid fd, %d\n",
1243 				  proc->pid, thread->pid, fd);
1244 		ret = -EBADF;
1245 		goto err_fget;
1246 	}
1247 	ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
1248 	if (ret < 0) {
1249 		ret = -EPERM;
1250 		goto err_security;
1251 	}
1252 
1253 	target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
1254 	if (target_fd < 0) {
1255 		ret = -ENOMEM;
1256 		goto err_get_unused_fd;
1257 	}
1258 	task_fd_install(target_proc, target_fd, file);
1259 	trace_binder_transaction_fd(t, fd, target_fd);
1260 	binder_debug(BINDER_DEBUG_TRANSACTION, "        fd %d -> %d\n",
1261 		     fd, target_fd);
1262 
1263 	return target_fd;
1264 
1265 err_get_unused_fd:
1266 err_security:
1267 	fput(file);
1268 err_fget:
1269 err_fd_not_accepted:
1270 	return ret;
1271 }
1272 
1273 static int binder_translate_fd_array(struct binder_fd_array_object *fda,
1274 				     struct binder_buffer_object *parent,
1275 				     struct binder_transaction *t,
1276 				     struct binder_thread *thread,
1277 				     struct binder_transaction *in_reply_to)
1278 {
1279 	binder_size_t fdi, fd_buf_size, num_installed_fds;
1280 	int target_fd;
1281 	uintptr_t parent_buffer;
1282 	u32 *fd_array;
1283 	struct binder_proc *proc = thread->proc;
1284 	struct binder_proc *target_proc = t->to_proc;
1285 
1286 	fd_buf_size = sizeof(u32) * fda->num_fds;
1287 	if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
1288 		binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
1289 				  proc->pid, thread->pid, (u64)fda->num_fds);
1290 		return -EINVAL;
1291 	}
1292 	if (fd_buf_size > parent->length ||
1293 	    fda->parent_offset > parent->length - fd_buf_size) {
1294 		/* No space for all file descriptors here. */
1295 		binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
1296 				  proc->pid, thread->pid, (u64)fda->num_fds);
1297 		return -EINVAL;
1298 	}
1299 	/*
1300 	 * Since the parent was already fixed up, convert it
1301 	 * back to the kernel address space to access it
1302 	 */
1303 	parent_buffer = parent->buffer -
1304 		binder_alloc_get_user_buffer_offset(&target_proc->alloc);
1305 	fd_array = (u32 *)(parent_buffer + fda->parent_offset);
1306 	if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
1307 		binder_user_error("%d:%d parent offset not aligned correctly.\n",
1308 				  proc->pid, thread->pid);
1309 		return -EINVAL;
1310 	}
1311 	for (fdi = 0; fdi < fda->num_fds; fdi++) {
1312 		target_fd = binder_translate_fd(fd_array[fdi], t, thread,
1313 						in_reply_to);
1314 		if (target_fd < 0)
1315 			goto err_translate_fd_failed;
1316 		fd_array[fdi] = target_fd;
1317 	}
1318 	return 0;
1319 
1320 err_translate_fd_failed:
1321 	/*
1322 	 * Failed to allocate fd or security error, free fds
1323 	 * installed so far.
1324 	 */
1325 	num_installed_fds = fdi;
1326 	for (fdi = 0; fdi < num_installed_fds; fdi++)
1327 		task_close_fd(target_proc, fd_array[fdi]);
1328 	return target_fd;
1329 }
1330 
1331 static int binder_fixup_parent(struct binder_transaction *t,
1332 			       struct binder_thread *thread,
1333 			       struct binder_buffer_object *bp,
1334 			       binder_size_t *off_start,
1335 			       binder_size_t num_valid,
1336 			       struct binder_buffer_object *last_fixup_obj,
1337 			       binder_size_t last_fixup_min_off)
1338 {
1339 	struct binder_buffer_object *parent;
1340 	u8 *parent_buffer;
1341 	struct binder_buffer *b = t->buffer;
1342 	struct binder_proc *proc = thread->proc;
1343 	struct binder_proc *target_proc = t->to_proc;
1344 
1345 	if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
1346 		return 0;
1347 
1348 	parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
1349 	if (!parent) {
1350 		binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
1351 				  proc->pid, thread->pid);
1352 		return -EINVAL;
1353 	}
1354 
1355 	if (!binder_validate_fixup(b, off_start,
1356 				   parent, bp->parent_offset,
1357 				   last_fixup_obj,
1358 				   last_fixup_min_off)) {
1359 		binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
1360 				  proc->pid, thread->pid);
1361 		return -EINVAL;
1362 	}
1363 
1364 	if (parent->length < sizeof(binder_uintptr_t) ||
1365 	    bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
1366 		/* No space for a pointer here! */
1367 		binder_user_error("%d:%d got transaction with invalid parent offset\n",
1368 				  proc->pid, thread->pid);
1369 		return -EINVAL;
1370 	}
1371 	parent_buffer = (u8 *)(parent->buffer -
1372 			binder_alloc_get_user_buffer_offset(
1373 				&target_proc->alloc));
1374 	*(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
1375 
1376 	return 0;
1377 }
1378 
1379 static void binder_transaction(struct binder_proc *proc,
1380 			       struct binder_thread *thread,
1381 			       struct binder_transaction_data *tr, int reply,
1382 			       binder_size_t extra_buffers_size)
1383 {
1384 	int ret;
1385 	struct binder_transaction *t;
1386 	struct binder_work *tcomplete;
1387 	binder_size_t *offp, *off_end, *off_start;
1388 	binder_size_t off_min;
1389 	u8 *sg_bufp, *sg_buf_end;
1390 	struct binder_proc *target_proc;
1391 	struct binder_thread *target_thread = NULL;
1392 	struct binder_node *target_node = NULL;
1393 	struct list_head *target_list;
1394 	wait_queue_head_t *target_wait;
1395 	struct binder_transaction *in_reply_to = NULL;
1396 	struct binder_transaction_log_entry *e;
1397 	uint32_t return_error = 0;
1398 	uint32_t return_error_param = 0;
1399 	uint32_t return_error_line = 0;
1400 	struct binder_buffer_object *last_fixup_obj = NULL;
1401 	binder_size_t last_fixup_min_off = 0;
1402 	struct binder_context *context = proc->context;
1403 
1404 	e = binder_transaction_log_add(&binder_transaction_log);
1405 	e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
1406 	e->from_proc = proc->pid;
1407 	e->from_thread = thread->pid;
1408 	e->target_handle = tr->target.handle;
1409 	e->data_size = tr->data_size;
1410 	e->offsets_size = tr->offsets_size;
1411 	e->context_name = proc->context->name;
1412 
1413 	if (reply) {
1414 		in_reply_to = thread->transaction_stack;
1415 		if (in_reply_to == NULL) {
1416 			binder_user_error("%d:%d got reply transaction with no transaction stack\n",
1417 					  proc->pid, thread->pid);
1418 			return_error = BR_FAILED_REPLY;
1419 			return_error_param = -EPROTO;
1420 			return_error_line = __LINE__;
1421 			goto err_empty_call_stack;
1422 		}
1423 		binder_set_nice(in_reply_to->saved_priority);
1424 		if (in_reply_to->to_thread != thread) {
1425 			binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
1426 				proc->pid, thread->pid, in_reply_to->debug_id,
1427 				in_reply_to->to_proc ?
1428 				in_reply_to->to_proc->pid : 0,
1429 				in_reply_to->to_thread ?
1430 				in_reply_to->to_thread->pid : 0);
1431 			return_error = BR_FAILED_REPLY;
1432 			return_error_param = -EPROTO;
1433 			return_error_line = __LINE__;
1434 			in_reply_to = NULL;
1435 			goto err_bad_call_stack;
1436 		}
1437 		thread->transaction_stack = in_reply_to->to_parent;
1438 		target_thread = in_reply_to->from;
1439 		if (target_thread == NULL) {
1440 			return_error = BR_DEAD_REPLY;
1441 			return_error_line = __LINE__;
1442 			goto err_dead_binder;
1443 		}
1444 		if (target_thread->transaction_stack != in_reply_to) {
1445 			binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
1446 				proc->pid, thread->pid,
1447 				target_thread->transaction_stack ?
1448 				target_thread->transaction_stack->debug_id : 0,
1449 				in_reply_to->debug_id);
1450 			return_error = BR_FAILED_REPLY;
1451 			return_error_param = -EPROTO;
1452 			return_error_line = __LINE__;
1453 			in_reply_to = NULL;
1454 			target_thread = NULL;
1455 			goto err_dead_binder;
1456 		}
1457 		target_proc = target_thread->proc;
1458 	} else {
1459 		if (tr->target.handle) {
1460 			struct binder_ref *ref;
1461 
1462 			ref = binder_get_ref(proc, tr->target.handle, true);
1463 			if (ref == NULL) {
1464 				binder_user_error("%d:%d got transaction to invalid handle\n",
1465 					proc->pid, thread->pid);
1466 				return_error = BR_FAILED_REPLY;
1467 				return_error_param = -EINVAL;
1468 				return_error_line = __LINE__;
1469 				goto err_invalid_target_handle;
1470 			}
1471 			target_node = ref->node;
1472 		} else {
1473 			mutex_lock(&context->context_mgr_node_lock);
1474 			target_node = context->binder_context_mgr_node;
1475 			if (target_node == NULL) {
1476 				return_error = BR_DEAD_REPLY;
1477 				mutex_unlock(&context->context_mgr_node_lock);
1478 				return_error_line = __LINE__;
1479 				goto err_no_context_mgr_node;
1480 			}
1481 			mutex_unlock(&context->context_mgr_node_lock);
1482 		}
1483 		e->to_node = target_node->debug_id;
1484 		target_proc = target_node->proc;
1485 		if (target_proc == NULL) {
1486 			return_error = BR_DEAD_REPLY;
1487 			return_error_line = __LINE__;
1488 			goto err_dead_binder;
1489 		}
1490 		if (security_binder_transaction(proc->tsk,
1491 						target_proc->tsk) < 0) {
1492 			return_error = BR_FAILED_REPLY;
1493 			return_error_param = -EPERM;
1494 			return_error_line = __LINE__;
1495 			goto err_invalid_target_handle;
1496 		}
1497 		if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
1498 			struct binder_transaction *tmp;
1499 
1500 			tmp = thread->transaction_stack;
1501 			if (tmp->to_thread != thread) {
1502 				binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
1503 					proc->pid, thread->pid, tmp->debug_id,
1504 					tmp->to_proc ? tmp->to_proc->pid : 0,
1505 					tmp->to_thread ?
1506 					tmp->to_thread->pid : 0);
1507 				return_error = BR_FAILED_REPLY;
1508 				return_error_param = -EPROTO;
1509 				return_error_line = __LINE__;
1510 				goto err_bad_call_stack;
1511 			}
1512 			while (tmp) {
1513 				if (tmp->from && tmp->from->proc == target_proc)
1514 					target_thread = tmp->from;
1515 				tmp = tmp->from_parent;
1516 			}
1517 		}
1518 	}
1519 	if (target_thread) {
1520 		e->to_thread = target_thread->pid;
1521 		target_list = &target_thread->todo;
1522 		target_wait = &target_thread->wait;
1523 	} else {
1524 		target_list = &target_proc->todo;
1525 		target_wait = &target_proc->wait;
1526 	}
1527 	e->to_proc = target_proc->pid;
1528 
1529 	/* TODO: reuse incoming transaction for reply */
1530 	t = kzalloc(sizeof(*t), GFP_KERNEL);
1531 	if (t == NULL) {
1532 		return_error = BR_FAILED_REPLY;
1533 		return_error_param = -ENOMEM;
1534 		return_error_line = __LINE__;
1535 		goto err_alloc_t_failed;
1536 	}
1537 	binder_stats_created(BINDER_STAT_TRANSACTION);
1538 
1539 	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
1540 	if (tcomplete == NULL) {
1541 		return_error = BR_FAILED_REPLY;
1542 		return_error_param = -ENOMEM;
1543 		return_error_line = __LINE__;
1544 		goto err_alloc_tcomplete_failed;
1545 	}
1546 	binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
1547 
1548 	t->debug_id = atomic_inc_return(&binder_last_id);
1549 	e->debug_id = t->debug_id;
1550 
1551 	if (reply)
1552 		binder_debug(BINDER_DEBUG_TRANSACTION,
1553 			     "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
1554 			     proc->pid, thread->pid, t->debug_id,
1555 			     target_proc->pid, target_thread->pid,
1556 			     (u64)tr->data.ptr.buffer,
1557 			     (u64)tr->data.ptr.offsets,
1558 			     (u64)tr->data_size, (u64)tr->offsets_size,
1559 			     (u64)extra_buffers_size);
1560 	else
1561 		binder_debug(BINDER_DEBUG_TRANSACTION,
1562 			     "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
1563 			     proc->pid, thread->pid, t->debug_id,
1564 			     target_proc->pid, target_node->debug_id,
1565 			     (u64)tr->data.ptr.buffer,
1566 			     (u64)tr->data.ptr.offsets,
1567 			     (u64)tr->data_size, (u64)tr->offsets_size,
1568 			     (u64)extra_buffers_size);
1569 
1570 	if (!reply && !(tr->flags & TF_ONE_WAY))
1571 		t->from = thread;
1572 	else
1573 		t->from = NULL;
1574 	t->sender_euid = task_euid(proc->tsk);
1575 	t->to_proc = target_proc;
1576 	t->to_thread = target_thread;
1577 	t->code = tr->code;
1578 	t->flags = tr->flags;
1579 	t->priority = task_nice(current);
1580 
1581 	trace_binder_transaction(reply, t, target_node);
1582 
1583 	t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
1584 		tr->offsets_size, extra_buffers_size,
1585 		!reply && (t->flags & TF_ONE_WAY));
1586 	if (IS_ERR(t->buffer)) {
1587 		/*
1588 		 * -ESRCH indicates VMA cleared. The target is dying.
1589 		 */
1590 		return_error_param = PTR_ERR(t->buffer);
1591 		return_error = return_error_param == -ESRCH ?
1592 			BR_DEAD_REPLY : BR_FAILED_REPLY;
1593 		return_error_line = __LINE__;
1594 		t->buffer = NULL;
1595 		goto err_binder_alloc_buf_failed;
1596 	}
1597 	t->buffer->allow_user_free = 0;
1598 	t->buffer->debug_id = t->debug_id;
1599 	t->buffer->transaction = t;
1600 	t->buffer->target_node = target_node;
1601 	trace_binder_transaction_alloc_buf(t->buffer);
1602 	if (target_node)
1603 		binder_inc_node(target_node, 1, 0, NULL);
1604 
1605 	off_start = (binder_size_t *)(t->buffer->data +
1606 				      ALIGN(tr->data_size, sizeof(void *)));
1607 	offp = off_start;
1608 
1609 	if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
1610 			   tr->data.ptr.buffer, tr->data_size)) {
1611 		binder_user_error("%d:%d got transaction with invalid data ptr\n",
1612 				proc->pid, thread->pid);
1613 		return_error = BR_FAILED_REPLY;
1614 		return_error_param = -EFAULT;
1615 		return_error_line = __LINE__;
1616 		goto err_copy_data_failed;
1617 	}
1618 	if (copy_from_user(offp, (const void __user *)(uintptr_t)
1619 			   tr->data.ptr.offsets, tr->offsets_size)) {
1620 		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
1621 				proc->pid, thread->pid);
1622 		return_error = BR_FAILED_REPLY;
1623 		return_error_param = -EFAULT;
1624 		return_error_line = __LINE__;
1625 		goto err_copy_data_failed;
1626 	}
1627 	if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
1628 		binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
1629 				proc->pid, thread->pid, (u64)tr->offsets_size);
1630 		return_error = BR_FAILED_REPLY;
1631 		return_error_param = -EINVAL;
1632 		return_error_line = __LINE__;
1633 		goto err_bad_offset;
1634 	}
1635 	if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
1636 		binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
1637 				  proc->pid, thread->pid,
1638 				  (u64)extra_buffers_size);
1639 		return_error = BR_FAILED_REPLY;
1640 		return_error_param = -EINVAL;
1641 		return_error_line = __LINE__;
1642 		goto err_bad_offset;
1643 	}
1644 	off_end = (void *)off_start + tr->offsets_size;
1645 	sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
1646 	sg_buf_end = sg_bufp + extra_buffers_size;
1647 	off_min = 0;
1648 	for (; offp < off_end; offp++) {
1649 		struct binder_object_header *hdr;
1650 		size_t object_size = binder_validate_object(t->buffer, *offp);
1651 
1652 		if (object_size == 0 || *offp < off_min) {
1653 			binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
1654 					  proc->pid, thread->pid, (u64)*offp,
1655 					  (u64)off_min,
1656 					  (u64)t->buffer->data_size);
1657 			return_error = BR_FAILED_REPLY;
1658 			return_error_param = -EINVAL;
1659 			return_error_line = __LINE__;
1660 			goto err_bad_offset;
1661 		}
1662 
1663 		hdr = (struct binder_object_header *)(t->buffer->data + *offp);
1664 		off_min = *offp + object_size;
1665 		switch (hdr->type) {
1666 		case BINDER_TYPE_BINDER:
1667 		case BINDER_TYPE_WEAK_BINDER: {
1668 			struct flat_binder_object *fp;
1669 
1670 			fp = to_flat_binder_object(hdr);
1671 			ret = binder_translate_binder(fp, t, thread);
1672 			if (ret < 0) {
1673 				return_error = BR_FAILED_REPLY;
1674 				return_error_param = ret;
1675 				return_error_line = __LINE__;
1676 				goto err_translate_failed;
1677 			}
1678 		} break;
1679 		case BINDER_TYPE_HANDLE:
1680 		case BINDER_TYPE_WEAK_HANDLE: {
1681 			struct flat_binder_object *fp;
1682 
1683 			fp = to_flat_binder_object(hdr);
1684 			ret = binder_translate_handle(fp, t, thread);
1685 			if (ret < 0) {
1686 				return_error = BR_FAILED_REPLY;
1687 				return_error_param = ret;
1688 				return_error_line = __LINE__;
1689 				goto err_translate_failed;
1690 			}
1691 		} break;
1692 
1693 		case BINDER_TYPE_FD: {
1694 			struct binder_fd_object *fp = to_binder_fd_object(hdr);
1695 			int target_fd = binder_translate_fd(fp->fd, t, thread,
1696 							    in_reply_to);
1697 
1698 			if (target_fd < 0) {
1699 				return_error = BR_FAILED_REPLY;
1700 				return_error_param = target_fd;
1701 				return_error_line = __LINE__;
1702 				goto err_translate_failed;
1703 			}
1704 			fp->pad_binder = 0;
1705 			fp->fd = target_fd;
1706 		} break;
1707 		case BINDER_TYPE_FDA: {
1708 			struct binder_fd_array_object *fda =
1709 				to_binder_fd_array_object(hdr);
1710 			struct binder_buffer_object *parent =
1711 				binder_validate_ptr(t->buffer, fda->parent,
1712 						    off_start,
1713 						    offp - off_start);
1714 			if (!parent) {
1715 				binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
1716 						  proc->pid, thread->pid);
1717 				return_error = BR_FAILED_REPLY;
1718 				return_error_param = -EINVAL;
1719 				return_error_line = __LINE__;
1720 				goto err_bad_parent;
1721 			}
1722 			if (!binder_validate_fixup(t->buffer, off_start,
1723 						   parent, fda->parent_offset,
1724 						   last_fixup_obj,
1725 						   last_fixup_min_off)) {
1726 				binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
1727 						  proc->pid, thread->pid);
1728 				return_error = BR_FAILED_REPLY;
1729 				return_error_param = -EINVAL;
1730 				return_error_line = __LINE__;
1731 				goto err_bad_parent;
1732 			}
1733 			ret = binder_translate_fd_array(fda, parent, t, thread,
1734 							in_reply_to);
1735 			if (ret < 0) {
1736 				return_error = BR_FAILED_REPLY;
1737 				return_error_param = ret;
1738 				return_error_line = __LINE__;
1739 				goto err_translate_failed;
1740 			}
1741 			last_fixup_obj = parent;
1742 			last_fixup_min_off =
1743 				fda->parent_offset + sizeof(u32) * fda->num_fds;
1744 		} break;
1745 		case BINDER_TYPE_PTR: {
1746 			struct binder_buffer_object *bp =
1747 				to_binder_buffer_object(hdr);
1748 			size_t buf_left = sg_buf_end - sg_bufp;
1749 
1750 			if (bp->length > buf_left) {
1751 				binder_user_error("%d:%d got transaction with too large buffer\n",
1752 						  proc->pid, thread->pid);
1753 				return_error = BR_FAILED_REPLY;
1754 				return_error_param = -EINVAL;
1755 				return_error_line = __LINE__;
1756 				goto err_bad_offset;
1757 			}
1758 			if (copy_from_user(sg_bufp,
1759 					   (const void __user *)(uintptr_t)
1760 					   bp->buffer, bp->length)) {
1761 				binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
1762 						  proc->pid, thread->pid);
1763 				return_error_param = -EFAULT;
1764 				return_error = BR_FAILED_REPLY;
1765 				return_error_line = __LINE__;
1766 				goto err_copy_data_failed;
1767 			}
1768 			/* Fixup buffer pointer to target proc address space */
1769 			bp->buffer = (uintptr_t)sg_bufp +
1770 				binder_alloc_get_user_buffer_offset(
1771 						&target_proc->alloc);
1772 			sg_bufp += ALIGN(bp->length, sizeof(u64));
1773 
1774 			ret = binder_fixup_parent(t, thread, bp, off_start,
1775 						  offp - off_start,
1776 						  last_fixup_obj,
1777 						  last_fixup_min_off);
1778 			if (ret < 0) {
1779 				return_error = BR_FAILED_REPLY;
1780 				return_error_param = ret;
1781 				return_error_line = __LINE__;
1782 				goto err_translate_failed;
1783 			}
1784 			last_fixup_obj = bp;
1785 			last_fixup_min_off = 0;
1786 		} break;
1787 		default:
1788 			binder_user_error("%d:%d got transaction with invalid object type, %x\n",
1789 				proc->pid, thread->pid, hdr->type);
1790 			return_error = BR_FAILED_REPLY;
1791 			return_error_param = -EINVAL;
1792 			return_error_line = __LINE__;
1793 			goto err_bad_object_type;
1794 		}
1795 	}
1796 	tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
1797 	list_add_tail(&tcomplete->entry, &thread->todo);
1798 
1799 	if (reply) {
1800 		BUG_ON(t->buffer->async_transaction != 0);
1801 		binder_pop_transaction(target_thread, in_reply_to);
1802 	} else if (!(t->flags & TF_ONE_WAY)) {
1803 		BUG_ON(t->buffer->async_transaction != 0);
1804 		t->need_reply = 1;
1805 		t->from_parent = thread->transaction_stack;
1806 		thread->transaction_stack = t;
1807 	} else {
1808 		BUG_ON(target_node == NULL);
1809 		BUG_ON(t->buffer->async_transaction != 1);
1810 		if (target_node->has_async_transaction) {
1811 			target_list = &target_node->async_todo;
1812 			target_wait = NULL;
1813 		} else
1814 			target_node->has_async_transaction = 1;
1815 	}
1816 	t->work.type = BINDER_WORK_TRANSACTION;
1817 	list_add_tail(&t->work.entry, target_list);
1818 	if (target_wait) {
1819 		if (reply || !(tr->flags & TF_ONE_WAY))
1820 			wake_up_interruptible_sync(target_wait);
1821 		else
1822 			wake_up_interruptible(target_wait);
1823 	}
1824 	return;
1825 
1826 err_translate_failed:
1827 err_bad_object_type:
1828 err_bad_offset:
1829 err_bad_parent:
1830 err_copy_data_failed:
1831 	trace_binder_transaction_failed_buffer_release(t->buffer);
1832 	binder_transaction_buffer_release(target_proc, t->buffer, offp);
1833 	t->buffer->transaction = NULL;
1834 	binder_alloc_free_buf(&target_proc->alloc, t->buffer);
1835 err_binder_alloc_buf_failed:
1836 	kfree(tcomplete);
1837 	binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
1838 err_alloc_tcomplete_failed:
1839 	kfree(t);
1840 	binder_stats_deleted(BINDER_STAT_TRANSACTION);
1841 err_alloc_t_failed:
1842 err_bad_call_stack:
1843 err_empty_call_stack:
1844 err_dead_binder:
1845 err_invalid_target_handle:
1846 err_no_context_mgr_node:
1847 	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1848 		     "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
1849 		     proc->pid, thread->pid, return_error, return_error_param,
1850 		     (u64)tr->data_size, (u64)tr->offsets_size,
1851 		     return_error_line);
1852 
1853 	{
1854 		struct binder_transaction_log_entry *fe;
1855 
1856 		e->return_error = return_error;
1857 		e->return_error_param = return_error_param;
1858 		e->return_error_line = return_error_line;
1859 		fe = binder_transaction_log_add(&binder_transaction_log_failed);
1860 		*fe = *e;
1861 	}
1862 
1863 	BUG_ON(thread->return_error != BR_OK);
1864 	if (in_reply_to) {
1865 		thread->return_error = BR_TRANSACTION_COMPLETE;
1866 		binder_send_failed_reply(in_reply_to, return_error);
1867 	} else
1868 		thread->return_error = return_error;
1869 }
1870 
1871 static int binder_thread_write(struct binder_proc *proc,
1872 			struct binder_thread *thread,
1873 			binder_uintptr_t binder_buffer, size_t size,
1874 			binder_size_t *consumed)
1875 {
1876 	uint32_t cmd;
1877 	struct binder_context *context = proc->context;
1878 	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
1879 	void __user *ptr = buffer + *consumed;
1880 	void __user *end = buffer + size;
1881 
1882 	while (ptr < end && thread->return_error == BR_OK) {
1883 		if (get_user(cmd, (uint32_t __user *)ptr))
1884 			return -EFAULT;
1885 		ptr += sizeof(uint32_t);
1886 		trace_binder_command(cmd);
1887 		if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
1888 			atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
1889 			atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
1890 			atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
1891 		}
1892 		switch (cmd) {
1893 		case BC_INCREFS:
1894 		case BC_ACQUIRE:
1895 		case BC_RELEASE:
1896 		case BC_DECREFS: {
1897 			uint32_t target;
1898 			struct binder_ref *ref = NULL;
1899 			const char *debug_string;
1900 
1901 			if (get_user(target, (uint32_t __user *)ptr))
1902 				return -EFAULT;
1903 
1904 			ptr += sizeof(uint32_t);
1905 			if (target == 0 &&
1906 			    (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
1907 				struct binder_node *ctx_mgr_node;
1908 
1909 				mutex_lock(&context->context_mgr_node_lock);
1910 				ctx_mgr_node = context->binder_context_mgr_node;
1911 				if (ctx_mgr_node) {
1912 					ref = binder_get_ref_for_node(proc,
1913 							ctx_mgr_node);
1914 					if (ref && ref->desc != target) {
1915 						binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
1916 							proc->pid, thread->pid,
1917 							ref->desc);
1918 					}
1919 				}
1920 				mutex_unlock(&context->context_mgr_node_lock);
1921 			}
1922 			if (ref == NULL)
1923 				ref = binder_get_ref(proc, target,
1924 						     cmd == BC_ACQUIRE ||
1925 						     cmd == BC_RELEASE);
1926 			if (ref == NULL) {
1927 				binder_user_error("%d:%d refcount change on invalid ref %d\n",
1928 					proc->pid, thread->pid, target);
1929 				break;
1930 			}
1931 			switch (cmd) {
1932 			case BC_INCREFS:
1933 				debug_string = "IncRefs";
1934 				binder_inc_ref(ref, 0, NULL);
1935 				break;
1936 			case BC_ACQUIRE:
1937 				debug_string = "Acquire";
1938 				binder_inc_ref(ref, 1, NULL);
1939 				break;
1940 			case BC_RELEASE:
1941 				debug_string = "Release";
1942 				binder_dec_ref(ref, 1);
1943 				break;
1944 			case BC_DECREFS:
1945 			default:
1946 				debug_string = "DecRefs";
1947 				binder_dec_ref(ref, 0);
1948 				break;
1949 			}
1950 			binder_debug(BINDER_DEBUG_USER_REFS,
1951 				     "%d:%d %s ref %d desc %d s %d w %d for node %d\n",
1952 				     proc->pid, thread->pid, debug_string, ref->debug_id,
1953 				     ref->desc, ref->strong, ref->weak, ref->node->debug_id);
1954 			break;
1955 		}
1956 		case BC_INCREFS_DONE:
1957 		case BC_ACQUIRE_DONE: {
1958 			binder_uintptr_t node_ptr;
1959 			binder_uintptr_t cookie;
1960 			struct binder_node *node;
1961 
1962 			if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
1963 				return -EFAULT;
1964 			ptr += sizeof(binder_uintptr_t);
1965 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
1966 				return -EFAULT;
1967 			ptr += sizeof(binder_uintptr_t);
1968 			node = binder_get_node(proc, node_ptr);
1969 			if (node == NULL) {
1970 				binder_user_error("%d:%d %s u%016llx no match\n",
1971 					proc->pid, thread->pid,
1972 					cmd == BC_INCREFS_DONE ?
1973 					"BC_INCREFS_DONE" :
1974 					"BC_ACQUIRE_DONE",
1975 					(u64)node_ptr);
1976 				break;
1977 			}
1978 			if (cookie != node->cookie) {
1979 				binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
1980 					proc->pid, thread->pid,
1981 					cmd == BC_INCREFS_DONE ?
1982 					"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
1983 					(u64)node_ptr, node->debug_id,
1984 					(u64)cookie, (u64)node->cookie);
1985 				break;
1986 			}
1987 			if (cmd == BC_ACQUIRE_DONE) {
1988 				if (node->pending_strong_ref == 0) {
1989 					binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
1990 						proc->pid, thread->pid,
1991 						node->debug_id);
1992 					break;
1993 				}
1994 				node->pending_strong_ref = 0;
1995 			} else {
1996 				if (node->pending_weak_ref == 0) {
1997 					binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
1998 						proc->pid, thread->pid,
1999 						node->debug_id);
2000 					break;
2001 				}
2002 				node->pending_weak_ref = 0;
2003 			}
2004 			binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
2005 			binder_debug(BINDER_DEBUG_USER_REFS,
2006 				     "%d:%d %s node %d ls %d lw %d\n",
2007 				     proc->pid, thread->pid,
2008 				     cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
2009 				     node->debug_id, node->local_strong_refs, node->local_weak_refs);
2010 			break;
2011 		}
2012 		case BC_ATTEMPT_ACQUIRE:
2013 			pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
2014 			return -EINVAL;
2015 		case BC_ACQUIRE_RESULT:
2016 			pr_err("BC_ACQUIRE_RESULT not supported\n");
2017 			return -EINVAL;
2018 
2019 		case BC_FREE_BUFFER: {
2020 			binder_uintptr_t data_ptr;
2021 			struct binder_buffer *buffer;
2022 
2023 			if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
2024 				return -EFAULT;
2025 			ptr += sizeof(binder_uintptr_t);
2026 
2027 			buffer = binder_alloc_prepare_to_free(&proc->alloc,
2028 							      data_ptr);
2029 			if (buffer == NULL) {
2030 				binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
2031 					proc->pid, thread->pid, (u64)data_ptr);
2032 				break;
2033 			}
2034 			if (!buffer->allow_user_free) {
2035 				binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
2036 					proc->pid, thread->pid, (u64)data_ptr);
2037 				break;
2038 			}
2039 			binder_debug(BINDER_DEBUG_FREE_BUFFER,
2040 				     "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
2041 				     proc->pid, thread->pid, (u64)data_ptr,
2042 				     buffer->debug_id,
2043 				     buffer->transaction ? "active" : "finished");
2044 
2045 			if (buffer->transaction) {
2046 				buffer->transaction->buffer = NULL;
2047 				buffer->transaction = NULL;
2048 			}
2049 			if (buffer->async_transaction && buffer->target_node) {
2050 				BUG_ON(!buffer->target_node->has_async_transaction);
2051 				if (list_empty(&buffer->target_node->async_todo))
2052 					buffer->target_node->has_async_transaction = 0;
2053 				else
2054 					list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
2055 			}
2056 			trace_binder_transaction_buffer_release(buffer);
2057 			binder_transaction_buffer_release(proc, buffer, NULL);
2058 			binder_alloc_free_buf(&proc->alloc, buffer);
2059 			break;
2060 		}
2061 
2062 		case BC_TRANSACTION_SG:
2063 		case BC_REPLY_SG: {
2064 			struct binder_transaction_data_sg tr;
2065 
2066 			if (copy_from_user(&tr, ptr, sizeof(tr)))
2067 				return -EFAULT;
2068 			ptr += sizeof(tr);
2069 			binder_transaction(proc, thread, &tr.transaction_data,
2070 					   cmd == BC_REPLY_SG, tr.buffers_size);
2071 			break;
2072 		}
2073 		case BC_TRANSACTION:
2074 		case BC_REPLY: {
2075 			struct binder_transaction_data tr;
2076 
2077 			if (copy_from_user(&tr, ptr, sizeof(tr)))
2078 				return -EFAULT;
2079 			ptr += sizeof(tr);
2080 			binder_transaction(proc, thread, &tr,
2081 					   cmd == BC_REPLY, 0);
2082 			break;
2083 		}
2084 
2085 		case BC_REGISTER_LOOPER:
2086 			binder_debug(BINDER_DEBUG_THREADS,
2087 				     "%d:%d BC_REGISTER_LOOPER\n",
2088 				     proc->pid, thread->pid);
2089 			if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
2090 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
2091 				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
2092 					proc->pid, thread->pid);
2093 			} else if (proc->requested_threads == 0) {
2094 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
2095 				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
2096 					proc->pid, thread->pid);
2097 			} else {
2098 				proc->requested_threads--;
2099 				proc->requested_threads_started++;
2100 			}
2101 			thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
2102 			break;
2103 		case BC_ENTER_LOOPER:
2104 			binder_debug(BINDER_DEBUG_THREADS,
2105 				     "%d:%d BC_ENTER_LOOPER\n",
2106 				     proc->pid, thread->pid);
2107 			if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
2108 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
2109 				binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
2110 					proc->pid, thread->pid);
2111 			}
2112 			thread->looper |= BINDER_LOOPER_STATE_ENTERED;
2113 			break;
2114 		case BC_EXIT_LOOPER:
2115 			binder_debug(BINDER_DEBUG_THREADS,
2116 				     "%d:%d BC_EXIT_LOOPER\n",
2117 				     proc->pid, thread->pid);
2118 			thread->looper |= BINDER_LOOPER_STATE_EXITED;
2119 			break;
2120 
2121 		case BC_REQUEST_DEATH_NOTIFICATION:
2122 		case BC_CLEAR_DEATH_NOTIFICATION: {
2123 			uint32_t target;
2124 			binder_uintptr_t cookie;
2125 			struct binder_ref *ref;
2126 			struct binder_ref_death *death;
2127 
2128 			if (get_user(target, (uint32_t __user *)ptr))
2129 				return -EFAULT;
2130 			ptr += sizeof(uint32_t);
2131 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
2132 				return -EFAULT;
2133 			ptr += sizeof(binder_uintptr_t);
2134 			ref = binder_get_ref(proc, target, false);
2135 			if (ref == NULL) {
2136 				binder_user_error("%d:%d %s invalid ref %d\n",
2137 					proc->pid, thread->pid,
2138 					cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2139 					"BC_REQUEST_DEATH_NOTIFICATION" :
2140 					"BC_CLEAR_DEATH_NOTIFICATION",
2141 					target);
2142 				break;
2143 			}
2144 
2145 			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
2146 				     "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
2147 				     proc->pid, thread->pid,
2148 				     cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2149 				     "BC_REQUEST_DEATH_NOTIFICATION" :
2150 				     "BC_CLEAR_DEATH_NOTIFICATION",
2151 				     (u64)cookie, ref->debug_id, ref->desc,
2152 				     ref->strong, ref->weak, ref->node->debug_id);
2153 
2154 			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
2155 				if (ref->death) {
2156 					binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
2157 						proc->pid, thread->pid);
2158 					break;
2159 				}
2160 				death = kzalloc(sizeof(*death), GFP_KERNEL);
2161 				if (death == NULL) {
2162 					thread->return_error = BR_ERROR;
2163 					binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2164 						     "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
2165 						     proc->pid, thread->pid);
2166 					break;
2167 				}
2168 				binder_stats_created(BINDER_STAT_DEATH);
2169 				INIT_LIST_HEAD(&death->work.entry);
2170 				death->cookie = cookie;
2171 				ref->death = death;
2172 				if (ref->node->proc == NULL) {
2173 					ref->death->work.type = BINDER_WORK_DEAD_BINDER;
2174 					if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2175 						list_add_tail(&ref->death->work.entry, &thread->todo);
2176 					} else {
2177 						list_add_tail(&ref->death->work.entry, &proc->todo);
2178 						wake_up_interruptible(&proc->wait);
2179 					}
2180 				}
2181 			} else {
2182 				if (ref->death == NULL) {
2183 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
2184 						proc->pid, thread->pid);
2185 					break;
2186 				}
2187 				death = ref->death;
2188 				if (death->cookie != cookie) {
2189 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
2190 						proc->pid, thread->pid,
2191 						(u64)death->cookie,
2192 						(u64)cookie);
2193 					break;
2194 				}
2195 				ref->death = NULL;
2196 				if (list_empty(&death->work.entry)) {
2197 					death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2198 					if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2199 						list_add_tail(&death->work.entry, &thread->todo);
2200 					} else {
2201 						list_add_tail(&death->work.entry, &proc->todo);
2202 						wake_up_interruptible(&proc->wait);
2203 					}
2204 				} else {
2205 					BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
2206 					death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
2207 				}
2208 			}
2209 		} break;
2210 		case BC_DEAD_BINDER_DONE: {
2211 			struct binder_work *w;
2212 			binder_uintptr_t cookie;
2213 			struct binder_ref_death *death = NULL;
2214 
2215 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
2216 				return -EFAULT;
2217 
2218 			ptr += sizeof(cookie);
2219 			list_for_each_entry(w, &proc->delivered_death, entry) {
2220 				struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
2221 
2222 				if (tmp_death->cookie == cookie) {
2223 					death = tmp_death;
2224 					break;
2225 				}
2226 			}
2227 			binder_debug(BINDER_DEBUG_DEAD_BINDER,
2228 				     "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
2229 				     proc->pid, thread->pid, (u64)cookie,
2230 				     death);
2231 			if (death == NULL) {
2232 				binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
2233 					proc->pid, thread->pid, (u64)cookie);
2234 				break;
2235 			}
2236 
2237 			list_del_init(&death->work.entry);
2238 			if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
2239 				death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2240 				if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2241 					list_add_tail(&death->work.entry, &thread->todo);
2242 				} else {
2243 					list_add_tail(&death->work.entry, &proc->todo);
2244 					wake_up_interruptible(&proc->wait);
2245 				}
2246 			}
2247 		} break;
2248 
2249 		default:
2250 			pr_err("%d:%d unknown command %d\n",
2251 			       proc->pid, thread->pid, cmd);
2252 			return -EINVAL;
2253 		}
2254 		*consumed = ptr - buffer;
2255 	}
2256 	return 0;
2257 }
2258 
2259 static void binder_stat_br(struct binder_proc *proc,
2260 			   struct binder_thread *thread, uint32_t cmd)
2261 {
2262 	trace_binder_return(cmd);
2263 	if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
2264 		atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
2265 		atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
2266 		atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
2267 	}
2268 }
2269 
2270 static int binder_has_proc_work(struct binder_proc *proc,
2271 				struct binder_thread *thread)
2272 {
2273 	return !list_empty(&proc->todo) || thread->looper_need_return;
2274 }
2275 
2276 static int binder_has_thread_work(struct binder_thread *thread)
2277 {
2278 	return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
2279 		thread->looper_need_return;
2280 }
2281 
2282 static int binder_put_node_cmd(struct binder_proc *proc,
2283 			       struct binder_thread *thread,
2284 			       void __user **ptrp,
2285 			       binder_uintptr_t node_ptr,
2286 			       binder_uintptr_t node_cookie,
2287 			       int node_debug_id,
2288 			       uint32_t cmd, const char *cmd_name)
2289 {
2290 	void __user *ptr = *ptrp;
2291 
2292 	if (put_user(cmd, (uint32_t __user *)ptr))
2293 		return -EFAULT;
2294 	ptr += sizeof(uint32_t);
2295 
2296 	if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
2297 		return -EFAULT;
2298 	ptr += sizeof(binder_uintptr_t);
2299 
2300 	if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
2301 		return -EFAULT;
2302 	ptr += sizeof(binder_uintptr_t);
2303 
2304 	binder_stat_br(proc, thread, cmd);
2305 	binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
2306 		     proc->pid, thread->pid, cmd_name, node_debug_id,
2307 		     (u64)node_ptr, (u64)node_cookie);
2308 
2309 	*ptrp = ptr;
2310 	return 0;
2311 }
2312 
2313 static int binder_thread_read(struct binder_proc *proc,
2314 			      struct binder_thread *thread,
2315 			      binder_uintptr_t binder_buffer, size_t size,
2316 			      binder_size_t *consumed, int non_block)
2317 {
2318 	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
2319 	void __user *ptr = buffer + *consumed;
2320 	void __user *end = buffer + size;
2321 
2322 	int ret = 0;
2323 	int wait_for_proc_work;
2324 
2325 	if (*consumed == 0) {
2326 		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
2327 			return -EFAULT;
2328 		ptr += sizeof(uint32_t);
2329 	}
2330 
2331 retry:
2332 	wait_for_proc_work = thread->transaction_stack == NULL &&
2333 				list_empty(&thread->todo);
2334 
2335 	if (thread->return_error != BR_OK && ptr < end) {
2336 		if (thread->return_error2 != BR_OK) {
2337 			if (put_user(thread->return_error2, (uint32_t __user *)ptr))
2338 				return -EFAULT;
2339 			ptr += sizeof(uint32_t);
2340 			binder_stat_br(proc, thread, thread->return_error2);
2341 			if (ptr == end)
2342 				goto done;
2343 			thread->return_error2 = BR_OK;
2344 		}
2345 		if (put_user(thread->return_error, (uint32_t __user *)ptr))
2346 			return -EFAULT;
2347 		ptr += sizeof(uint32_t);
2348 		binder_stat_br(proc, thread, thread->return_error);
2349 		thread->return_error = BR_OK;
2350 		goto done;
2351 	}
2352 
2353 
2354 	thread->looper |= BINDER_LOOPER_STATE_WAITING;
2355 	if (wait_for_proc_work)
2356 		proc->ready_threads++;
2357 
2358 	binder_unlock(__func__);
2359 
2360 	trace_binder_wait_for_work(wait_for_proc_work,
2361 				   !!thread->transaction_stack,
2362 				   !list_empty(&thread->todo));
2363 	if (wait_for_proc_work) {
2364 		if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2365 					BINDER_LOOPER_STATE_ENTERED))) {
2366 			binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
2367 				proc->pid, thread->pid, thread->looper);
2368 			wait_event_interruptible(binder_user_error_wait,
2369 						 binder_stop_on_user_error < 2);
2370 		}
2371 		binder_set_nice(proc->default_priority);
2372 		if (non_block) {
2373 			if (!binder_has_proc_work(proc, thread))
2374 				ret = -EAGAIN;
2375 		} else
2376 			ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
2377 	} else {
2378 		if (non_block) {
2379 			if (!binder_has_thread_work(thread))
2380 				ret = -EAGAIN;
2381 		} else
2382 			ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
2383 	}
2384 
2385 	binder_lock(__func__);
2386 
2387 	if (wait_for_proc_work)
2388 		proc->ready_threads--;
2389 	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
2390 
2391 	if (ret)
2392 		return ret;
2393 
2394 	while (1) {
2395 		uint32_t cmd;
2396 		struct binder_transaction_data tr;
2397 		struct binder_work *w;
2398 		struct binder_transaction *t = NULL;
2399 
2400 		if (!list_empty(&thread->todo)) {
2401 			w = list_first_entry(&thread->todo, struct binder_work,
2402 					     entry);
2403 		} else if (!list_empty(&proc->todo) && wait_for_proc_work) {
2404 			w = list_first_entry(&proc->todo, struct binder_work,
2405 					     entry);
2406 		} else {
2407 			/* no data added */
2408 			if (ptr - buffer == 4 && !thread->looper_need_return)
2409 				goto retry;
2410 			break;
2411 		}
2412 
2413 		if (end - ptr < sizeof(tr) + 4)
2414 			break;
2415 
2416 		switch (w->type) {
2417 		case BINDER_WORK_TRANSACTION: {
2418 			t = container_of(w, struct binder_transaction, work);
2419 		} break;
2420 		case BINDER_WORK_TRANSACTION_COMPLETE: {
2421 			cmd = BR_TRANSACTION_COMPLETE;
2422 			if (put_user(cmd, (uint32_t __user *)ptr))
2423 				return -EFAULT;
2424 			ptr += sizeof(uint32_t);
2425 
2426 			binder_stat_br(proc, thread, cmd);
2427 			binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
2428 				     "%d:%d BR_TRANSACTION_COMPLETE\n",
2429 				     proc->pid, thread->pid);
2430 
2431 			list_del(&w->entry);
2432 			kfree(w);
2433 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2434 		} break;
2435 		case BINDER_WORK_NODE: {
2436 			struct binder_node *node = container_of(w, struct binder_node, work);
2437 			int strong, weak;
2438 			binder_uintptr_t node_ptr = node->ptr;
2439 			binder_uintptr_t node_cookie = node->cookie;
2440 			int node_debug_id = node->debug_id;
2441 			int has_weak_ref;
2442 			int has_strong_ref;
2443 			void __user *orig_ptr = ptr;
2444 
2445 			BUG_ON(proc != node->proc);
2446 			strong = node->internal_strong_refs ||
2447 					node->local_strong_refs;
2448 			weak = !hlist_empty(&node->refs) ||
2449 					node->local_weak_refs || strong;
2450 			has_strong_ref = node->has_strong_ref;
2451 			has_weak_ref = node->has_weak_ref;
2452 
2453 			if (weak && !has_weak_ref) {
2454 				node->has_weak_ref = 1;
2455 				node->pending_weak_ref = 1;
2456 				node->local_weak_refs++;
2457 			}
2458 			if (strong && !has_strong_ref) {
2459 				node->has_strong_ref = 1;
2460 				node->pending_strong_ref = 1;
2461 				node->local_strong_refs++;
2462 			}
2463 			if (!strong && has_strong_ref)
2464 				node->has_strong_ref = 0;
2465 			if (!weak && has_weak_ref)
2466 				node->has_weak_ref = 0;
2467 			list_del(&w->entry);
2468 
2469 			if (!weak && !strong) {
2470 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2471 					     "%d:%d node %d u%016llx c%016llx deleted\n",
2472 					     proc->pid, thread->pid,
2473 					     node_debug_id,
2474 					     (u64)node_ptr,
2475 					     (u64)node_cookie);
2476 				rb_erase(&node->rb_node, &proc->nodes);
2477 				kfree(node);
2478 				binder_stats_deleted(BINDER_STAT_NODE);
2479 			}
2480 			if (weak && !has_weak_ref)
2481 				ret = binder_put_node_cmd(
2482 						proc, thread, &ptr, node_ptr,
2483 						node_cookie, node_debug_id,
2484 						BR_INCREFS, "BR_INCREFS");
2485 			if (!ret && strong && !has_strong_ref)
2486 				ret = binder_put_node_cmd(
2487 						proc, thread, &ptr, node_ptr,
2488 						node_cookie, node_debug_id,
2489 						BR_ACQUIRE, "BR_ACQUIRE");
2490 			if (!ret && !strong && has_strong_ref)
2491 				ret = binder_put_node_cmd(
2492 						proc, thread, &ptr, node_ptr,
2493 						node_cookie, node_debug_id,
2494 						BR_RELEASE, "BR_RELEASE");
2495 			if (!ret && !weak && has_weak_ref)
2496 				ret = binder_put_node_cmd(
2497 						proc, thread, &ptr, node_ptr,
2498 						node_cookie, node_debug_id,
2499 						BR_DECREFS, "BR_DECREFS");
2500 			if (orig_ptr == ptr)
2501 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2502 					     "%d:%d node %d u%016llx c%016llx state unchanged\n",
2503 					     proc->pid, thread->pid,
2504 					     node_debug_id,
2505 					     (u64)node_ptr,
2506 					     (u64)node_cookie);
2507 			if (ret)
2508 				return ret;
2509 		} break;
2510 		case BINDER_WORK_DEAD_BINDER:
2511 		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2512 		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2513 			struct binder_ref_death *death;
2514 			uint32_t cmd;
2515 
2516 			death = container_of(w, struct binder_ref_death, work);
2517 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
2518 				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
2519 			else
2520 				cmd = BR_DEAD_BINDER;
2521 			if (put_user(cmd, (uint32_t __user *)ptr))
2522 				return -EFAULT;
2523 			ptr += sizeof(uint32_t);
2524 			if (put_user(death->cookie,
2525 				     (binder_uintptr_t __user *)ptr))
2526 				return -EFAULT;
2527 			ptr += sizeof(binder_uintptr_t);
2528 			binder_stat_br(proc, thread, cmd);
2529 			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
2530 				     "%d:%d %s %016llx\n",
2531 				      proc->pid, thread->pid,
2532 				      cmd == BR_DEAD_BINDER ?
2533 				      "BR_DEAD_BINDER" :
2534 				      "BR_CLEAR_DEATH_NOTIFICATION_DONE",
2535 				      (u64)death->cookie);
2536 
2537 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
2538 				list_del(&w->entry);
2539 				kfree(death);
2540 				binder_stats_deleted(BINDER_STAT_DEATH);
2541 			} else
2542 				list_move(&w->entry, &proc->delivered_death);
2543 			if (cmd == BR_DEAD_BINDER)
2544 				goto done; /* DEAD_BINDER notifications can cause transactions */
2545 		} break;
2546 		}
2547 
2548 		if (!t)
2549 			continue;
2550 
2551 		BUG_ON(t->buffer == NULL);
2552 		if (t->buffer->target_node) {
2553 			struct binder_node *target_node = t->buffer->target_node;
2554 
2555 			tr.target.ptr = target_node->ptr;
2556 			tr.cookie =  target_node->cookie;
2557 			t->saved_priority = task_nice(current);
2558 			if (t->priority < target_node->min_priority &&
2559 			    !(t->flags & TF_ONE_WAY))
2560 				binder_set_nice(t->priority);
2561 			else if (!(t->flags & TF_ONE_WAY) ||
2562 				 t->saved_priority > target_node->min_priority)
2563 				binder_set_nice(target_node->min_priority);
2564 			cmd = BR_TRANSACTION;
2565 		} else {
2566 			tr.target.ptr = 0;
2567 			tr.cookie = 0;
2568 			cmd = BR_REPLY;
2569 		}
2570 		tr.code = t->code;
2571 		tr.flags = t->flags;
2572 		tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
2573 
2574 		if (t->from) {
2575 			struct task_struct *sender = t->from->proc->tsk;
2576 
2577 			tr.sender_pid = task_tgid_nr_ns(sender,
2578 							task_active_pid_ns(current));
2579 		} else {
2580 			tr.sender_pid = 0;
2581 		}
2582 
2583 		tr.data_size = t->buffer->data_size;
2584 		tr.offsets_size = t->buffer->offsets_size;
2585 		tr.data.ptr.buffer = (binder_uintptr_t)
2586 			((uintptr_t)t->buffer->data +
2587 			binder_alloc_get_user_buffer_offset(&proc->alloc));
2588 		tr.data.ptr.offsets = tr.data.ptr.buffer +
2589 					ALIGN(t->buffer->data_size,
2590 					    sizeof(void *));
2591 
2592 		if (put_user(cmd, (uint32_t __user *)ptr))
2593 			return -EFAULT;
2594 		ptr += sizeof(uint32_t);
2595 		if (copy_to_user(ptr, &tr, sizeof(tr)))
2596 			return -EFAULT;
2597 		ptr += sizeof(tr);
2598 
2599 		trace_binder_transaction_received(t);
2600 		binder_stat_br(proc, thread, cmd);
2601 		binder_debug(BINDER_DEBUG_TRANSACTION,
2602 			     "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
2603 			     proc->pid, thread->pid,
2604 			     (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
2605 			     "BR_REPLY",
2606 			     t->debug_id, t->from ? t->from->proc->pid : 0,
2607 			     t->from ? t->from->pid : 0, cmd,
2608 			     t->buffer->data_size, t->buffer->offsets_size,
2609 			     (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
2610 
2611 		list_del(&t->work.entry);
2612 		t->buffer->allow_user_free = 1;
2613 		if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
2614 			t->to_parent = thread->transaction_stack;
2615 			t->to_thread = thread;
2616 			thread->transaction_stack = t;
2617 		} else {
2618 			t->buffer->transaction = NULL;
2619 			kfree(t);
2620 			binder_stats_deleted(BINDER_STAT_TRANSACTION);
2621 		}
2622 		break;
2623 	}
2624 
2625 done:
2626 
2627 	*consumed = ptr - buffer;
2628 	if (proc->requested_threads + proc->ready_threads == 0 &&
2629 	    proc->requested_threads_started < proc->max_threads &&
2630 	    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2631 	     BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
2632 	     /*spawn a new thread if we leave this out */) {
2633 		proc->requested_threads++;
2634 		binder_debug(BINDER_DEBUG_THREADS,
2635 			     "%d:%d BR_SPAWN_LOOPER\n",
2636 			     proc->pid, thread->pid);
2637 		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
2638 			return -EFAULT;
2639 		binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
2640 	}
2641 	return 0;
2642 }
2643 
2644 static void binder_release_work(struct list_head *list)
2645 {
2646 	struct binder_work *w;
2647 
2648 	while (!list_empty(list)) {
2649 		w = list_first_entry(list, struct binder_work, entry);
2650 		list_del_init(&w->entry);
2651 		switch (w->type) {
2652 		case BINDER_WORK_TRANSACTION: {
2653 			struct binder_transaction *t;
2654 
2655 			t = container_of(w, struct binder_transaction, work);
2656 			if (t->buffer->target_node &&
2657 			    !(t->flags & TF_ONE_WAY)) {
2658 				binder_send_failed_reply(t, BR_DEAD_REPLY);
2659 			} else {
2660 				binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2661 					"undelivered transaction %d\n",
2662 					t->debug_id);
2663 				t->buffer->transaction = NULL;
2664 				kfree(t);
2665 				binder_stats_deleted(BINDER_STAT_TRANSACTION);
2666 			}
2667 		} break;
2668 		case BINDER_WORK_TRANSACTION_COMPLETE: {
2669 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2670 				"undelivered TRANSACTION_COMPLETE\n");
2671 			kfree(w);
2672 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2673 		} break;
2674 		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2675 		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2676 			struct binder_ref_death *death;
2677 
2678 			death = container_of(w, struct binder_ref_death, work);
2679 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2680 				"undelivered death notification, %016llx\n",
2681 				(u64)death->cookie);
2682 			kfree(death);
2683 			binder_stats_deleted(BINDER_STAT_DEATH);
2684 		} break;
2685 		default:
2686 			pr_err("unexpected work type, %d, not freed\n",
2687 			       w->type);
2688 			break;
2689 		}
2690 	}
2691 
2692 }
2693 
2694 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
2695 {
2696 	struct binder_thread *thread = NULL;
2697 	struct rb_node *parent = NULL;
2698 	struct rb_node **p = &proc->threads.rb_node;
2699 
2700 	while (*p) {
2701 		parent = *p;
2702 		thread = rb_entry(parent, struct binder_thread, rb_node);
2703 
2704 		if (current->pid < thread->pid)
2705 			p = &(*p)->rb_left;
2706 		else if (current->pid > thread->pid)
2707 			p = &(*p)->rb_right;
2708 		else
2709 			break;
2710 	}
2711 	if (*p == NULL) {
2712 		thread = kzalloc(sizeof(*thread), GFP_KERNEL);
2713 		if (thread == NULL)
2714 			return NULL;
2715 		binder_stats_created(BINDER_STAT_THREAD);
2716 		thread->proc = proc;
2717 		thread->pid = current->pid;
2718 		init_waitqueue_head(&thread->wait);
2719 		INIT_LIST_HEAD(&thread->todo);
2720 		rb_link_node(&thread->rb_node, parent, p);
2721 		rb_insert_color(&thread->rb_node, &proc->threads);
2722 		thread->looper_need_return = true;
2723 		thread->return_error = BR_OK;
2724 		thread->return_error2 = BR_OK;
2725 	}
2726 	return thread;
2727 }
2728 
2729 static int binder_free_thread(struct binder_proc *proc,
2730 			      struct binder_thread *thread)
2731 {
2732 	struct binder_transaction *t;
2733 	struct binder_transaction *send_reply = NULL;
2734 	int active_transactions = 0;
2735 
2736 	rb_erase(&thread->rb_node, &proc->threads);
2737 	t = thread->transaction_stack;
2738 	if (t && t->to_thread == thread)
2739 		send_reply = t;
2740 	while (t) {
2741 		active_transactions++;
2742 		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2743 			     "release %d:%d transaction %d %s, still active\n",
2744 			      proc->pid, thread->pid,
2745 			     t->debug_id,
2746 			     (t->to_thread == thread) ? "in" : "out");
2747 
2748 		if (t->to_thread == thread) {
2749 			t->to_proc = NULL;
2750 			t->to_thread = NULL;
2751 			if (t->buffer) {
2752 				t->buffer->transaction = NULL;
2753 				t->buffer = NULL;
2754 			}
2755 			t = t->to_parent;
2756 		} else if (t->from == thread) {
2757 			t->from = NULL;
2758 			t = t->from_parent;
2759 		} else
2760 			BUG();
2761 	}
2762 	if (send_reply)
2763 		binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
2764 	binder_release_work(&thread->todo);
2765 	kfree(thread);
2766 	binder_stats_deleted(BINDER_STAT_THREAD);
2767 	return active_transactions;
2768 }
2769 
2770 static unsigned int binder_poll(struct file *filp,
2771 				struct poll_table_struct *wait)
2772 {
2773 	struct binder_proc *proc = filp->private_data;
2774 	struct binder_thread *thread = NULL;
2775 	int wait_for_proc_work;
2776 
2777 	binder_lock(__func__);
2778 
2779 	thread = binder_get_thread(proc);
2780 
2781 	wait_for_proc_work = thread->transaction_stack == NULL &&
2782 		list_empty(&thread->todo) && thread->return_error == BR_OK;
2783 
2784 	binder_unlock(__func__);
2785 
2786 	if (wait_for_proc_work) {
2787 		if (binder_has_proc_work(proc, thread))
2788 			return POLLIN;
2789 		poll_wait(filp, &proc->wait, wait);
2790 		if (binder_has_proc_work(proc, thread))
2791 			return POLLIN;
2792 	} else {
2793 		if (binder_has_thread_work(thread))
2794 			return POLLIN;
2795 		poll_wait(filp, &thread->wait, wait);
2796 		if (binder_has_thread_work(thread))
2797 			return POLLIN;
2798 	}
2799 	return 0;
2800 }
2801 
2802 static int binder_ioctl_write_read(struct file *filp,
2803 				unsigned int cmd, unsigned long arg,
2804 				struct binder_thread *thread)
2805 {
2806 	int ret = 0;
2807 	struct binder_proc *proc = filp->private_data;
2808 	unsigned int size = _IOC_SIZE(cmd);
2809 	void __user *ubuf = (void __user *)arg;
2810 	struct binder_write_read bwr;
2811 
2812 	if (size != sizeof(struct binder_write_read)) {
2813 		ret = -EINVAL;
2814 		goto out;
2815 	}
2816 	if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
2817 		ret = -EFAULT;
2818 		goto out;
2819 	}
2820 	binder_debug(BINDER_DEBUG_READ_WRITE,
2821 		     "%d:%d write %lld at %016llx, read %lld at %016llx\n",
2822 		     proc->pid, thread->pid,
2823 		     (u64)bwr.write_size, (u64)bwr.write_buffer,
2824 		     (u64)bwr.read_size, (u64)bwr.read_buffer);
2825 
2826 	if (bwr.write_size > 0) {
2827 		ret = binder_thread_write(proc, thread,
2828 					  bwr.write_buffer,
2829 					  bwr.write_size,
2830 					  &bwr.write_consumed);
2831 		trace_binder_write_done(ret);
2832 		if (ret < 0) {
2833 			bwr.read_consumed = 0;
2834 			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
2835 				ret = -EFAULT;
2836 			goto out;
2837 		}
2838 	}
2839 	if (bwr.read_size > 0) {
2840 		ret = binder_thread_read(proc, thread, bwr.read_buffer,
2841 					 bwr.read_size,
2842 					 &bwr.read_consumed,
2843 					 filp->f_flags & O_NONBLOCK);
2844 		trace_binder_read_done(ret);
2845 		if (!list_empty(&proc->todo))
2846 			wake_up_interruptible(&proc->wait);
2847 		if (ret < 0) {
2848 			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
2849 				ret = -EFAULT;
2850 			goto out;
2851 		}
2852 	}
2853 	binder_debug(BINDER_DEBUG_READ_WRITE,
2854 		     "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
2855 		     proc->pid, thread->pid,
2856 		     (u64)bwr.write_consumed, (u64)bwr.write_size,
2857 		     (u64)bwr.read_consumed, (u64)bwr.read_size);
2858 	if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
2859 		ret = -EFAULT;
2860 		goto out;
2861 	}
2862 out:
2863 	return ret;
2864 }
2865 
2866 static int binder_ioctl_set_ctx_mgr(struct file *filp)
2867 {
2868 	int ret = 0;
2869 	struct binder_proc *proc = filp->private_data;
2870 	struct binder_context *context = proc->context;
2871 	struct binder_node *new_node;
2872 	kuid_t curr_euid = current_euid();
2873 
2874 	mutex_lock(&context->context_mgr_node_lock);
2875 	if (context->binder_context_mgr_node) {
2876 		pr_err("BINDER_SET_CONTEXT_MGR already set\n");
2877 		ret = -EBUSY;
2878 		goto out;
2879 	}
2880 	ret = security_binder_set_context_mgr(proc->tsk);
2881 	if (ret < 0)
2882 		goto out;
2883 	if (uid_valid(context->binder_context_mgr_uid)) {
2884 		if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
2885 			pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
2886 			       from_kuid(&init_user_ns, curr_euid),
2887 			       from_kuid(&init_user_ns,
2888 					 context->binder_context_mgr_uid));
2889 			ret = -EPERM;
2890 			goto out;
2891 		}
2892 	} else {
2893 		context->binder_context_mgr_uid = curr_euid;
2894 	}
2895 	new_node = binder_new_node(proc, 0, 0);
2896 	if (!new_node) {
2897 		ret = -ENOMEM;
2898 		goto out;
2899 	}
2900 	new_node->local_weak_refs++;
2901 	new_node->local_strong_refs++;
2902 	new_node->has_strong_ref = 1;
2903 	new_node->has_weak_ref = 1;
2904 	context->binder_context_mgr_node = new_node;
2905 out:
2906 	mutex_unlock(&context->context_mgr_node_lock);
2907 	return ret;
2908 }
2909 
2910 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2911 {
2912 	int ret;
2913 	struct binder_proc *proc = filp->private_data;
2914 	struct binder_thread *thread;
2915 	unsigned int size = _IOC_SIZE(cmd);
2916 	void __user *ubuf = (void __user *)arg;
2917 
2918 	/*pr_info("binder_ioctl: %d:%d %x %lx\n",
2919 			proc->pid, current->pid, cmd, arg);*/
2920 
2921 	trace_binder_ioctl(cmd, arg);
2922 
2923 	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
2924 	if (ret)
2925 		goto err_unlocked;
2926 
2927 	binder_lock(__func__);
2928 	thread = binder_get_thread(proc);
2929 	if (thread == NULL) {
2930 		ret = -ENOMEM;
2931 		goto err;
2932 	}
2933 
2934 	switch (cmd) {
2935 	case BINDER_WRITE_READ:
2936 		ret = binder_ioctl_write_read(filp, cmd, arg, thread);
2937 		if (ret)
2938 			goto err;
2939 		break;
2940 	case BINDER_SET_MAX_THREADS:
2941 		if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
2942 			ret = -EINVAL;
2943 			goto err;
2944 		}
2945 		break;
2946 	case BINDER_SET_CONTEXT_MGR:
2947 		ret = binder_ioctl_set_ctx_mgr(filp);
2948 		if (ret)
2949 			goto err;
2950 		break;
2951 	case BINDER_THREAD_EXIT:
2952 		binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
2953 			     proc->pid, thread->pid);
2954 		binder_free_thread(proc, thread);
2955 		thread = NULL;
2956 		break;
2957 	case BINDER_VERSION: {
2958 		struct binder_version __user *ver = ubuf;
2959 
2960 		if (size != sizeof(struct binder_version)) {
2961 			ret = -EINVAL;
2962 			goto err;
2963 		}
2964 		if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
2965 			     &ver->protocol_version)) {
2966 			ret = -EINVAL;
2967 			goto err;
2968 		}
2969 		break;
2970 	}
2971 	default:
2972 		ret = -EINVAL;
2973 		goto err;
2974 	}
2975 	ret = 0;
2976 err:
2977 	if (thread)
2978 		thread->looper_need_return = false;
2979 	binder_unlock(__func__);
2980 	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
2981 	if (ret && ret != -ERESTARTSYS)
2982 		pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
2983 err_unlocked:
2984 	trace_binder_ioctl_done(ret);
2985 	return ret;
2986 }
2987 
2988 static void binder_vma_open(struct vm_area_struct *vma)
2989 {
2990 	struct binder_proc *proc = vma->vm_private_data;
2991 
2992 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2993 		     "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
2994 		     proc->pid, vma->vm_start, vma->vm_end,
2995 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2996 		     (unsigned long)pgprot_val(vma->vm_page_prot));
2997 }
2998 
2999 static void binder_vma_close(struct vm_area_struct *vma)
3000 {
3001 	struct binder_proc *proc = vma->vm_private_data;
3002 
3003 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3004 		     "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
3005 		     proc->pid, vma->vm_start, vma->vm_end,
3006 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
3007 		     (unsigned long)pgprot_val(vma->vm_page_prot));
3008 	binder_alloc_vma_close(&proc->alloc);
3009 	binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
3010 }
3011 
3012 static int binder_vm_fault(struct vm_fault *vmf)
3013 {
3014 	return VM_FAULT_SIGBUS;
3015 }
3016 
3017 static const struct vm_operations_struct binder_vm_ops = {
3018 	.open = binder_vma_open,
3019 	.close = binder_vma_close,
3020 	.fault = binder_vm_fault,
3021 };
3022 
3023 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
3024 {
3025 	int ret;
3026 	struct binder_proc *proc = filp->private_data;
3027 	const char *failure_string;
3028 
3029 	if (proc->tsk != current->group_leader)
3030 		return -EINVAL;
3031 
3032 	if ((vma->vm_end - vma->vm_start) > SZ_4M)
3033 		vma->vm_end = vma->vm_start + SZ_4M;
3034 
3035 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3036 		     "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
3037 		     __func__, proc->pid, vma->vm_start, vma->vm_end,
3038 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
3039 		     (unsigned long)pgprot_val(vma->vm_page_prot));
3040 
3041 	if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
3042 		ret = -EPERM;
3043 		failure_string = "bad vm_flags";
3044 		goto err_bad_arg;
3045 	}
3046 	vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
3047 	vma->vm_ops = &binder_vm_ops;
3048 	vma->vm_private_data = proc;
3049 
3050 	ret = binder_alloc_mmap_handler(&proc->alloc, vma);
3051 	if (ret)
3052 		return ret;
3053 	proc->files = get_files_struct(current);
3054 	return 0;
3055 
3056 err_bad_arg:
3057 	pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
3058 	       proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
3059 	return ret;
3060 }
3061 
3062 static int binder_open(struct inode *nodp, struct file *filp)
3063 {
3064 	struct binder_proc *proc;
3065 	struct binder_device *binder_dev;
3066 
3067 	binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
3068 		     current->group_leader->pid, current->pid);
3069 
3070 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
3071 	if (proc == NULL)
3072 		return -ENOMEM;
3073 	get_task_struct(current->group_leader);
3074 	proc->tsk = current->group_leader;
3075 	INIT_LIST_HEAD(&proc->todo);
3076 	init_waitqueue_head(&proc->wait);
3077 	proc->default_priority = task_nice(current);
3078 	binder_dev = container_of(filp->private_data, struct binder_device,
3079 				  miscdev);
3080 	proc->context = &binder_dev->context;
3081 	binder_alloc_init(&proc->alloc);
3082 
3083 	binder_lock(__func__);
3084 
3085 	binder_stats_created(BINDER_STAT_PROC);
3086 	proc->pid = current->group_leader->pid;
3087 	INIT_LIST_HEAD(&proc->delivered_death);
3088 	filp->private_data = proc;
3089 
3090 	binder_unlock(__func__);
3091 
3092 	mutex_lock(&binder_procs_lock);
3093 	hlist_add_head(&proc->proc_node, &binder_procs);
3094 	mutex_unlock(&binder_procs_lock);
3095 
3096 	if (binder_debugfs_dir_entry_proc) {
3097 		char strbuf[11];
3098 
3099 		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
3100 		/*
3101 		 * proc debug entries are shared between contexts, so
3102 		 * this will fail if the process tries to open the driver
3103 		 * again with a different context. The priting code will
3104 		 * anyway print all contexts that a given PID has, so this
3105 		 * is not a problem.
3106 		 */
3107 		proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
3108 			binder_debugfs_dir_entry_proc,
3109 			(void *)(unsigned long)proc->pid,
3110 			&binder_proc_fops);
3111 	}
3112 
3113 	return 0;
3114 }
3115 
3116 static int binder_flush(struct file *filp, fl_owner_t id)
3117 {
3118 	struct binder_proc *proc = filp->private_data;
3119 
3120 	binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
3121 
3122 	return 0;
3123 }
3124 
3125 static void binder_deferred_flush(struct binder_proc *proc)
3126 {
3127 	struct rb_node *n;
3128 	int wake_count = 0;
3129 
3130 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
3131 		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
3132 
3133 		thread->looper_need_return = true;
3134 		if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
3135 			wake_up_interruptible(&thread->wait);
3136 			wake_count++;
3137 		}
3138 	}
3139 	wake_up_interruptible_all(&proc->wait);
3140 
3141 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3142 		     "binder_flush: %d woke %d threads\n", proc->pid,
3143 		     wake_count);
3144 }
3145 
3146 static int binder_release(struct inode *nodp, struct file *filp)
3147 {
3148 	struct binder_proc *proc = filp->private_data;
3149 
3150 	debugfs_remove(proc->debugfs_entry);
3151 	binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
3152 
3153 	return 0;
3154 }
3155 
3156 static int binder_node_release(struct binder_node *node, int refs)
3157 {
3158 	struct binder_ref *ref;
3159 	int death = 0;
3160 
3161 	list_del_init(&node->work.entry);
3162 	binder_release_work(&node->async_todo);
3163 
3164 	if (hlist_empty(&node->refs)) {
3165 		kfree(node);
3166 		binder_stats_deleted(BINDER_STAT_NODE);
3167 
3168 		return refs;
3169 	}
3170 
3171 	node->proc = NULL;
3172 	node->local_strong_refs = 0;
3173 	node->local_weak_refs = 0;
3174 
3175 	spin_lock(&binder_dead_nodes_lock);
3176 	hlist_add_head(&node->dead_node, &binder_dead_nodes);
3177 	spin_unlock(&binder_dead_nodes_lock);
3178 
3179 	hlist_for_each_entry(ref, &node->refs, node_entry) {
3180 		refs++;
3181 
3182 		if (!ref->death)
3183 			continue;
3184 
3185 		death++;
3186 
3187 		if (list_empty(&ref->death->work.entry)) {
3188 			ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3189 			list_add_tail(&ref->death->work.entry,
3190 				      &ref->proc->todo);
3191 			wake_up_interruptible(&ref->proc->wait);
3192 		} else
3193 			BUG();
3194 	}
3195 
3196 	binder_debug(BINDER_DEBUG_DEAD_BINDER,
3197 		     "node %d now dead, refs %d, death %d\n",
3198 		     node->debug_id, refs, death);
3199 
3200 	return refs;
3201 }
3202 
3203 static void binder_deferred_release(struct binder_proc *proc)
3204 {
3205 	struct binder_context *context = proc->context;
3206 	struct rb_node *n;
3207 	int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
3208 
3209 	BUG_ON(proc->files);
3210 
3211 	mutex_lock(&binder_procs_lock);
3212 	hlist_del(&proc->proc_node);
3213 	mutex_unlock(&binder_procs_lock);
3214 
3215 	mutex_lock(&context->context_mgr_node_lock);
3216 	if (context->binder_context_mgr_node &&
3217 	    context->binder_context_mgr_node->proc == proc) {
3218 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
3219 			     "%s: %d context_mgr_node gone\n",
3220 			     __func__, proc->pid);
3221 		context->binder_context_mgr_node = NULL;
3222 	}
3223 	mutex_unlock(&context->context_mgr_node_lock);
3224 
3225 	threads = 0;
3226 	active_transactions = 0;
3227 	while ((n = rb_first(&proc->threads))) {
3228 		struct binder_thread *thread;
3229 
3230 		thread = rb_entry(n, struct binder_thread, rb_node);
3231 		threads++;
3232 		active_transactions += binder_free_thread(proc, thread);
3233 	}
3234 
3235 	nodes = 0;
3236 	incoming_refs = 0;
3237 	while ((n = rb_first(&proc->nodes))) {
3238 		struct binder_node *node;
3239 
3240 		node = rb_entry(n, struct binder_node, rb_node);
3241 		nodes++;
3242 		rb_erase(&node->rb_node, &proc->nodes);
3243 		incoming_refs = binder_node_release(node, incoming_refs);
3244 	}
3245 
3246 	outgoing_refs = 0;
3247 	while ((n = rb_first(&proc->refs_by_desc))) {
3248 		struct binder_ref *ref;
3249 
3250 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
3251 		outgoing_refs++;
3252 		binder_delete_ref(ref);
3253 	}
3254 
3255 	binder_release_work(&proc->todo);
3256 	binder_release_work(&proc->delivered_death);
3257 
3258 	binder_alloc_deferred_release(&proc->alloc);
3259 	binder_stats_deleted(BINDER_STAT_PROC);
3260 
3261 	put_task_struct(proc->tsk);
3262 
3263 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3264 		     "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
3265 		     __func__, proc->pid, threads, nodes, incoming_refs,
3266 		     outgoing_refs, active_transactions);
3267 
3268 	kfree(proc);
3269 }
3270 
3271 static void binder_deferred_func(struct work_struct *work)
3272 {
3273 	struct binder_proc *proc;
3274 	struct files_struct *files;
3275 
3276 	int defer;
3277 
3278 	do {
3279 		binder_lock(__func__);
3280 		mutex_lock(&binder_deferred_lock);
3281 		if (!hlist_empty(&binder_deferred_list)) {
3282 			proc = hlist_entry(binder_deferred_list.first,
3283 					struct binder_proc, deferred_work_node);
3284 			hlist_del_init(&proc->deferred_work_node);
3285 			defer = proc->deferred_work;
3286 			proc->deferred_work = 0;
3287 		} else {
3288 			proc = NULL;
3289 			defer = 0;
3290 		}
3291 		mutex_unlock(&binder_deferred_lock);
3292 
3293 		files = NULL;
3294 		if (defer & BINDER_DEFERRED_PUT_FILES) {
3295 			files = proc->files;
3296 			if (files)
3297 				proc->files = NULL;
3298 		}
3299 
3300 		if (defer & BINDER_DEFERRED_FLUSH)
3301 			binder_deferred_flush(proc);
3302 
3303 		if (defer & BINDER_DEFERRED_RELEASE)
3304 			binder_deferred_release(proc); /* frees proc */
3305 
3306 		binder_unlock(__func__);
3307 		if (files)
3308 			put_files_struct(files);
3309 	} while (proc);
3310 }
3311 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
3312 
3313 static void
3314 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
3315 {
3316 	mutex_lock(&binder_deferred_lock);
3317 	proc->deferred_work |= defer;
3318 	if (hlist_unhashed(&proc->deferred_work_node)) {
3319 		hlist_add_head(&proc->deferred_work_node,
3320 				&binder_deferred_list);
3321 		schedule_work(&binder_deferred_work);
3322 	}
3323 	mutex_unlock(&binder_deferred_lock);
3324 }
3325 
3326 static void print_binder_transaction(struct seq_file *m, const char *prefix,
3327 				     struct binder_transaction *t)
3328 {
3329 	seq_printf(m,
3330 		   "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
3331 		   prefix, t->debug_id, t,
3332 		   t->from ? t->from->proc->pid : 0,
3333 		   t->from ? t->from->pid : 0,
3334 		   t->to_proc ? t->to_proc->pid : 0,
3335 		   t->to_thread ? t->to_thread->pid : 0,
3336 		   t->code, t->flags, t->priority, t->need_reply);
3337 	if (t->buffer == NULL) {
3338 		seq_puts(m, " buffer free\n");
3339 		return;
3340 	}
3341 	if (t->buffer->target_node)
3342 		seq_printf(m, " node %d",
3343 			   t->buffer->target_node->debug_id);
3344 	seq_printf(m, " size %zd:%zd data %p\n",
3345 		   t->buffer->data_size, t->buffer->offsets_size,
3346 		   t->buffer->data);
3347 }
3348 
3349 static void print_binder_work(struct seq_file *m, const char *prefix,
3350 			      const char *transaction_prefix,
3351 			      struct binder_work *w)
3352 {
3353 	struct binder_node *node;
3354 	struct binder_transaction *t;
3355 
3356 	switch (w->type) {
3357 	case BINDER_WORK_TRANSACTION:
3358 		t = container_of(w, struct binder_transaction, work);
3359 		print_binder_transaction(m, transaction_prefix, t);
3360 		break;
3361 	case BINDER_WORK_TRANSACTION_COMPLETE:
3362 		seq_printf(m, "%stransaction complete\n", prefix);
3363 		break;
3364 	case BINDER_WORK_NODE:
3365 		node = container_of(w, struct binder_node, work);
3366 		seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
3367 			   prefix, node->debug_id,
3368 			   (u64)node->ptr, (u64)node->cookie);
3369 		break;
3370 	case BINDER_WORK_DEAD_BINDER:
3371 		seq_printf(m, "%shas dead binder\n", prefix);
3372 		break;
3373 	case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3374 		seq_printf(m, "%shas cleared dead binder\n", prefix);
3375 		break;
3376 	case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
3377 		seq_printf(m, "%shas cleared death notification\n", prefix);
3378 		break;
3379 	default:
3380 		seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
3381 		break;
3382 	}
3383 }
3384 
3385 static void print_binder_thread(struct seq_file *m,
3386 				struct binder_thread *thread,
3387 				int print_always)
3388 {
3389 	struct binder_transaction *t;
3390 	struct binder_work *w;
3391 	size_t start_pos = m->count;
3392 	size_t header_pos;
3393 
3394 	seq_printf(m, "  thread %d: l %02x need_return %d\n",
3395 			thread->pid, thread->looper,
3396 			thread->looper_need_return);
3397 	header_pos = m->count;
3398 	t = thread->transaction_stack;
3399 	while (t) {
3400 		if (t->from == thread) {
3401 			print_binder_transaction(m,
3402 						 "    outgoing transaction", t);
3403 			t = t->from_parent;
3404 		} else if (t->to_thread == thread) {
3405 			print_binder_transaction(m,
3406 						 "    incoming transaction", t);
3407 			t = t->to_parent;
3408 		} else {
3409 			print_binder_transaction(m, "    bad transaction", t);
3410 			t = NULL;
3411 		}
3412 	}
3413 	list_for_each_entry(w, &thread->todo, entry) {
3414 		print_binder_work(m, "    ", "    pending transaction", w);
3415 	}
3416 	if (!print_always && m->count == header_pos)
3417 		m->count = start_pos;
3418 }
3419 
3420 static void print_binder_node(struct seq_file *m, struct binder_node *node)
3421 {
3422 	struct binder_ref *ref;
3423 	struct binder_work *w;
3424 	int count;
3425 
3426 	count = 0;
3427 	hlist_for_each_entry(ref, &node->refs, node_entry)
3428 		count++;
3429 
3430 	seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
3431 		   node->debug_id, (u64)node->ptr, (u64)node->cookie,
3432 		   node->has_strong_ref, node->has_weak_ref,
3433 		   node->local_strong_refs, node->local_weak_refs,
3434 		   node->internal_strong_refs, count);
3435 	if (count) {
3436 		seq_puts(m, " proc");
3437 		hlist_for_each_entry(ref, &node->refs, node_entry)
3438 			seq_printf(m, " %d", ref->proc->pid);
3439 	}
3440 	seq_puts(m, "\n");
3441 	list_for_each_entry(w, &node->async_todo, entry)
3442 		print_binder_work(m, "    ",
3443 				  "    pending async transaction", w);
3444 }
3445 
3446 static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
3447 {
3448 	seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %p\n",
3449 		   ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
3450 		   ref->node->debug_id, ref->strong, ref->weak, ref->death);
3451 }
3452 
3453 static void print_binder_proc(struct seq_file *m,
3454 			      struct binder_proc *proc, int print_all)
3455 {
3456 	struct binder_work *w;
3457 	struct rb_node *n;
3458 	size_t start_pos = m->count;
3459 	size_t header_pos;
3460 
3461 	seq_printf(m, "proc %d\n", proc->pid);
3462 	seq_printf(m, "context %s\n", proc->context->name);
3463 	header_pos = m->count;
3464 
3465 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3466 		print_binder_thread(m, rb_entry(n, struct binder_thread,
3467 						rb_node), print_all);
3468 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
3469 		struct binder_node *node = rb_entry(n, struct binder_node,
3470 						    rb_node);
3471 		if (print_all || node->has_async_transaction)
3472 			print_binder_node(m, node);
3473 	}
3474 	if (print_all) {
3475 		for (n = rb_first(&proc->refs_by_desc);
3476 		     n != NULL;
3477 		     n = rb_next(n))
3478 			print_binder_ref(m, rb_entry(n, struct binder_ref,
3479 						     rb_node_desc));
3480 	}
3481 	binder_alloc_print_allocated(m, &proc->alloc);
3482 	list_for_each_entry(w, &proc->todo, entry)
3483 		print_binder_work(m, "  ", "  pending transaction", w);
3484 	list_for_each_entry(w, &proc->delivered_death, entry) {
3485 		seq_puts(m, "  has delivered dead binder\n");
3486 		break;
3487 	}
3488 	if (!print_all && m->count == header_pos)
3489 		m->count = start_pos;
3490 }
3491 
3492 static const char * const binder_return_strings[] = {
3493 	"BR_ERROR",
3494 	"BR_OK",
3495 	"BR_TRANSACTION",
3496 	"BR_REPLY",
3497 	"BR_ACQUIRE_RESULT",
3498 	"BR_DEAD_REPLY",
3499 	"BR_TRANSACTION_COMPLETE",
3500 	"BR_INCREFS",
3501 	"BR_ACQUIRE",
3502 	"BR_RELEASE",
3503 	"BR_DECREFS",
3504 	"BR_ATTEMPT_ACQUIRE",
3505 	"BR_NOOP",
3506 	"BR_SPAWN_LOOPER",
3507 	"BR_FINISHED",
3508 	"BR_DEAD_BINDER",
3509 	"BR_CLEAR_DEATH_NOTIFICATION_DONE",
3510 	"BR_FAILED_REPLY"
3511 };
3512 
3513 static const char * const binder_command_strings[] = {
3514 	"BC_TRANSACTION",
3515 	"BC_REPLY",
3516 	"BC_ACQUIRE_RESULT",
3517 	"BC_FREE_BUFFER",
3518 	"BC_INCREFS",
3519 	"BC_ACQUIRE",
3520 	"BC_RELEASE",
3521 	"BC_DECREFS",
3522 	"BC_INCREFS_DONE",
3523 	"BC_ACQUIRE_DONE",
3524 	"BC_ATTEMPT_ACQUIRE",
3525 	"BC_REGISTER_LOOPER",
3526 	"BC_ENTER_LOOPER",
3527 	"BC_EXIT_LOOPER",
3528 	"BC_REQUEST_DEATH_NOTIFICATION",
3529 	"BC_CLEAR_DEATH_NOTIFICATION",
3530 	"BC_DEAD_BINDER_DONE",
3531 	"BC_TRANSACTION_SG",
3532 	"BC_REPLY_SG",
3533 };
3534 
3535 static const char * const binder_objstat_strings[] = {
3536 	"proc",
3537 	"thread",
3538 	"node",
3539 	"ref",
3540 	"death",
3541 	"transaction",
3542 	"transaction_complete"
3543 };
3544 
3545 static void print_binder_stats(struct seq_file *m, const char *prefix,
3546 			       struct binder_stats *stats)
3547 {
3548 	int i;
3549 
3550 	BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
3551 		     ARRAY_SIZE(binder_command_strings));
3552 	for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
3553 		int temp = atomic_read(&stats->bc[i]);
3554 
3555 		if (temp)
3556 			seq_printf(m, "%s%s: %d\n", prefix,
3557 				   binder_command_strings[i], temp);
3558 	}
3559 
3560 	BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
3561 		     ARRAY_SIZE(binder_return_strings));
3562 	for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
3563 		int temp = atomic_read(&stats->br[i]);
3564 
3565 		if (temp)
3566 			seq_printf(m, "%s%s: %d\n", prefix,
3567 				   binder_return_strings[i], temp);
3568 	}
3569 
3570 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
3571 		     ARRAY_SIZE(binder_objstat_strings));
3572 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
3573 		     ARRAY_SIZE(stats->obj_deleted));
3574 	for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
3575 		int created = atomic_read(&stats->obj_created[i]);
3576 		int deleted = atomic_read(&stats->obj_deleted[i]);
3577 
3578 		if (created || deleted)
3579 			seq_printf(m, "%s%s: active %d total %d\n",
3580 				prefix,
3581 				binder_objstat_strings[i],
3582 				created - deleted,
3583 				created);
3584 	}
3585 }
3586 
3587 static void print_binder_proc_stats(struct seq_file *m,
3588 				    struct binder_proc *proc)
3589 {
3590 	struct binder_work *w;
3591 	struct rb_node *n;
3592 	int count, strong, weak;
3593 
3594 	seq_printf(m, "proc %d\n", proc->pid);
3595 	seq_printf(m, "context %s\n", proc->context->name);
3596 	count = 0;
3597 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3598 		count++;
3599 	seq_printf(m, "  threads: %d\n", count);
3600 	seq_printf(m, "  requested threads: %d+%d/%d\n"
3601 			"  ready threads %d\n"
3602 			"  free async space %zd\n", proc->requested_threads,
3603 			proc->requested_threads_started, proc->max_threads,
3604 			proc->ready_threads,
3605 			binder_alloc_get_free_async_space(&proc->alloc));
3606 	count = 0;
3607 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
3608 		count++;
3609 	seq_printf(m, "  nodes: %d\n", count);
3610 	count = 0;
3611 	strong = 0;
3612 	weak = 0;
3613 	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
3614 		struct binder_ref *ref = rb_entry(n, struct binder_ref,
3615 						  rb_node_desc);
3616 		count++;
3617 		strong += ref->strong;
3618 		weak += ref->weak;
3619 	}
3620 	seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
3621 
3622 	count = binder_alloc_get_allocated_count(&proc->alloc);
3623 	seq_printf(m, "  buffers: %d\n", count);
3624 
3625 	count = 0;
3626 	list_for_each_entry(w, &proc->todo, entry) {
3627 		switch (w->type) {
3628 		case BINDER_WORK_TRANSACTION:
3629 			count++;
3630 			break;
3631 		default:
3632 			break;
3633 		}
3634 	}
3635 	seq_printf(m, "  pending transactions: %d\n", count);
3636 
3637 	print_binder_stats(m, "  ", &proc->stats);
3638 }
3639 
3640 
3641 static int binder_state_show(struct seq_file *m, void *unused)
3642 {
3643 	struct binder_proc *proc;
3644 	struct binder_node *node;
3645 
3646 	binder_lock(__func__);
3647 
3648 	seq_puts(m, "binder state:\n");
3649 
3650 	spin_lock(&binder_dead_nodes_lock);
3651 	if (!hlist_empty(&binder_dead_nodes))
3652 		seq_puts(m, "dead nodes:\n");
3653 	hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
3654 		print_binder_node(m, node);
3655 	spin_unlock(&binder_dead_nodes_lock);
3656 
3657 	mutex_lock(&binder_procs_lock);
3658 	hlist_for_each_entry(proc, &binder_procs, proc_node)
3659 		print_binder_proc(m, proc, 1);
3660 	mutex_unlock(&binder_procs_lock);
3661 	binder_unlock(__func__);
3662 	return 0;
3663 }
3664 
3665 static int binder_stats_show(struct seq_file *m, void *unused)
3666 {
3667 	struct binder_proc *proc;
3668 
3669 	binder_lock(__func__);
3670 
3671 	seq_puts(m, "binder stats:\n");
3672 
3673 	print_binder_stats(m, "", &binder_stats);
3674 
3675 	mutex_lock(&binder_procs_lock);
3676 	hlist_for_each_entry(proc, &binder_procs, proc_node)
3677 		print_binder_proc_stats(m, proc);
3678 	mutex_unlock(&binder_procs_lock);
3679 	binder_unlock(__func__);
3680 	return 0;
3681 }
3682 
3683 static int binder_transactions_show(struct seq_file *m, void *unused)
3684 {
3685 	struct binder_proc *proc;
3686 
3687 	binder_lock(__func__);
3688 
3689 	seq_puts(m, "binder transactions:\n");
3690 	mutex_lock(&binder_procs_lock);
3691 	hlist_for_each_entry(proc, &binder_procs, proc_node)
3692 		print_binder_proc(m, proc, 0);
3693 	mutex_unlock(&binder_procs_lock);
3694 	binder_unlock(__func__);
3695 	return 0;
3696 }
3697 
3698 static int binder_proc_show(struct seq_file *m, void *unused)
3699 {
3700 	struct binder_proc *itr;
3701 	int pid = (unsigned long)m->private;
3702 
3703 	binder_lock(__func__);
3704 
3705 	mutex_lock(&binder_procs_lock);
3706 	hlist_for_each_entry(itr, &binder_procs, proc_node) {
3707 		if (itr->pid == pid) {
3708 			seq_puts(m, "binder proc state:\n");
3709 			print_binder_proc(m, itr, 1);
3710 		}
3711 	}
3712 	mutex_unlock(&binder_procs_lock);
3713 
3714 	binder_unlock(__func__);
3715 	return 0;
3716 }
3717 
3718 static void print_binder_transaction_log_entry(struct seq_file *m,
3719 					struct binder_transaction_log_entry *e)
3720 {
3721 	seq_printf(m,
3722 		   "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d\n",
3723 		   e->debug_id, (e->call_type == 2) ? "reply" :
3724 		   ((e->call_type == 1) ? "async" : "call "), e->from_proc,
3725 		   e->from_thread, e->to_proc, e->to_thread, e->context_name,
3726 		   e->to_node, e->target_handle, e->data_size, e->offsets_size,
3727 		   e->return_error, e->return_error_param,
3728 		   e->return_error_line);
3729 }
3730 
3731 static int binder_transaction_log_show(struct seq_file *m, void *unused)
3732 {
3733 	struct binder_transaction_log *log = m->private;
3734 	int i;
3735 
3736 	if (log->full) {
3737 		for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
3738 			print_binder_transaction_log_entry(m, &log->entry[i]);
3739 	}
3740 	for (i = 0; i < log->next; i++)
3741 		print_binder_transaction_log_entry(m, &log->entry[i]);
3742 	return 0;
3743 }
3744 
3745 static const struct file_operations binder_fops = {
3746 	.owner = THIS_MODULE,
3747 	.poll = binder_poll,
3748 	.unlocked_ioctl = binder_ioctl,
3749 	.compat_ioctl = binder_ioctl,
3750 	.mmap = binder_mmap,
3751 	.open = binder_open,
3752 	.flush = binder_flush,
3753 	.release = binder_release,
3754 };
3755 
3756 BINDER_DEBUG_ENTRY(state);
3757 BINDER_DEBUG_ENTRY(stats);
3758 BINDER_DEBUG_ENTRY(transactions);
3759 BINDER_DEBUG_ENTRY(transaction_log);
3760 
3761 static int __init init_binder_device(const char *name)
3762 {
3763 	int ret;
3764 	struct binder_device *binder_device;
3765 
3766 	binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
3767 	if (!binder_device)
3768 		return -ENOMEM;
3769 
3770 	binder_device->miscdev.fops = &binder_fops;
3771 	binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
3772 	binder_device->miscdev.name = name;
3773 
3774 	binder_device->context.binder_context_mgr_uid = INVALID_UID;
3775 	binder_device->context.name = name;
3776 	mutex_init(&binder_device->context.context_mgr_node_lock);
3777 
3778 	ret = misc_register(&binder_device->miscdev);
3779 	if (ret < 0) {
3780 		kfree(binder_device);
3781 		return ret;
3782 	}
3783 
3784 	hlist_add_head(&binder_device->hlist, &binder_devices);
3785 
3786 	return ret;
3787 }
3788 
3789 static int __init binder_init(void)
3790 {
3791 	int ret;
3792 	char *device_name, *device_names;
3793 	struct binder_device *device;
3794 	struct hlist_node *tmp;
3795 
3796 	binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
3797 	if (binder_debugfs_dir_entry_root)
3798 		binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
3799 						 binder_debugfs_dir_entry_root);
3800 
3801 	if (binder_debugfs_dir_entry_root) {
3802 		debugfs_create_file("state",
3803 				    S_IRUGO,
3804 				    binder_debugfs_dir_entry_root,
3805 				    NULL,
3806 				    &binder_state_fops);
3807 		debugfs_create_file("stats",
3808 				    S_IRUGO,
3809 				    binder_debugfs_dir_entry_root,
3810 				    NULL,
3811 				    &binder_stats_fops);
3812 		debugfs_create_file("transactions",
3813 				    S_IRUGO,
3814 				    binder_debugfs_dir_entry_root,
3815 				    NULL,
3816 				    &binder_transactions_fops);
3817 		debugfs_create_file("transaction_log",
3818 				    S_IRUGO,
3819 				    binder_debugfs_dir_entry_root,
3820 				    &binder_transaction_log,
3821 				    &binder_transaction_log_fops);
3822 		debugfs_create_file("failed_transaction_log",
3823 				    S_IRUGO,
3824 				    binder_debugfs_dir_entry_root,
3825 				    &binder_transaction_log_failed,
3826 				    &binder_transaction_log_fops);
3827 	}
3828 
3829 	/*
3830 	 * Copy the module_parameter string, because we don't want to
3831 	 * tokenize it in-place.
3832 	 */
3833 	device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
3834 	if (!device_names) {
3835 		ret = -ENOMEM;
3836 		goto err_alloc_device_names_failed;
3837 	}
3838 	strcpy(device_names, binder_devices_param);
3839 
3840 	while ((device_name = strsep(&device_names, ","))) {
3841 		ret = init_binder_device(device_name);
3842 		if (ret)
3843 			goto err_init_binder_device_failed;
3844 	}
3845 
3846 	return ret;
3847 
3848 err_init_binder_device_failed:
3849 	hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
3850 		misc_deregister(&device->miscdev);
3851 		hlist_del(&device->hlist);
3852 		kfree(device);
3853 	}
3854 err_alloc_device_names_failed:
3855 	debugfs_remove_recursive(binder_debugfs_dir_entry_root);
3856 
3857 	return ret;
3858 }
3859 
3860 device_initcall(binder_init);
3861 
3862 #define CREATE_TRACE_POINTS
3863 #include "binder_trace.h"
3864 
3865 MODULE_LICENSE("GPL v2");
3866