xref: /openbmc/linux/drivers/android/binder.c (revision 7a4408c6)
1 /* binder.c
2  *
3  * Android IPC Subsystem
4  *
5  * Copyright (C) 2007-2008 Google, Inc.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17 
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 
20 #include <asm/cacheflush.h>
21 #include <linux/fdtable.h>
22 #include <linux/file.h>
23 #include <linux/freezer.h>
24 #include <linux/fs.h>
25 #include <linux/list.h>
26 #include <linux/miscdevice.h>
27 #include <linux/module.h>
28 #include <linux/mutex.h>
29 #include <linux/nsproxy.h>
30 #include <linux/poll.h>
31 #include <linux/debugfs.h>
32 #include <linux/rbtree.h>
33 #include <linux/sched/signal.h>
34 #include <linux/sched/mm.h>
35 #include <linux/seq_file.h>
36 #include <linux/uaccess.h>
37 #include <linux/pid_namespace.h>
38 #include <linux/security.h>
39 
40 #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
41 #define BINDER_IPC_32BIT 1
42 #endif
43 
44 #include <uapi/linux/android/binder.h>
45 #include "binder_alloc.h"
46 #include "binder_trace.h"
47 
48 static DEFINE_MUTEX(binder_main_lock);
49 
50 static HLIST_HEAD(binder_deferred_list);
51 static DEFINE_MUTEX(binder_deferred_lock);
52 
53 static HLIST_HEAD(binder_devices);
54 static HLIST_HEAD(binder_procs);
55 static DEFINE_MUTEX(binder_procs_lock);
56 
57 static HLIST_HEAD(binder_dead_nodes);
58 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
59 
60 static struct dentry *binder_debugfs_dir_entry_root;
61 static struct dentry *binder_debugfs_dir_entry_proc;
62 static atomic_t binder_last_id;
63 
64 #define BINDER_DEBUG_ENTRY(name) \
65 static int binder_##name##_open(struct inode *inode, struct file *file) \
66 { \
67 	return single_open(file, binder_##name##_show, inode->i_private); \
68 } \
69 \
70 static const struct file_operations binder_##name##_fops = { \
71 	.owner = THIS_MODULE, \
72 	.open = binder_##name##_open, \
73 	.read = seq_read, \
74 	.llseek = seq_lseek, \
75 	.release = single_release, \
76 }
77 
78 static int binder_proc_show(struct seq_file *m, void *unused);
79 BINDER_DEBUG_ENTRY(proc);
80 
81 /* This is only defined in include/asm-arm/sizes.h */
82 #ifndef SZ_1K
83 #define SZ_1K                               0x400
84 #endif
85 
86 #ifndef SZ_4M
87 #define SZ_4M                               0x400000
88 #endif
89 
90 #define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
91 
92 #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
93 
94 enum {
95 	BINDER_DEBUG_USER_ERROR             = 1U << 0,
96 	BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
97 	BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
98 	BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
99 	BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
100 	BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
101 	BINDER_DEBUG_READ_WRITE             = 1U << 6,
102 	BINDER_DEBUG_USER_REFS              = 1U << 7,
103 	BINDER_DEBUG_THREADS                = 1U << 8,
104 	BINDER_DEBUG_TRANSACTION            = 1U << 9,
105 	BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
106 	BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
107 	BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
108 	BINDER_DEBUG_PRIORITY_CAP           = 1U << 13,
109 };
110 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
111 	BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
112 module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
113 
114 static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
115 module_param_named(devices, binder_devices_param, charp, 0444);
116 
117 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
118 static int binder_stop_on_user_error;
119 
120 static int binder_set_stop_on_user_error(const char *val,
121 					 struct kernel_param *kp)
122 {
123 	int ret;
124 
125 	ret = param_set_int(val, kp);
126 	if (binder_stop_on_user_error < 2)
127 		wake_up(&binder_user_error_wait);
128 	return ret;
129 }
130 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
131 	param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
132 
133 #define binder_debug(mask, x...) \
134 	do { \
135 		if (binder_debug_mask & mask) \
136 			pr_info(x); \
137 	} while (0)
138 
139 #define binder_user_error(x...) \
140 	do { \
141 		if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
142 			pr_info(x); \
143 		if (binder_stop_on_user_error) \
144 			binder_stop_on_user_error = 2; \
145 	} while (0)
146 
147 #define to_flat_binder_object(hdr) \
148 	container_of(hdr, struct flat_binder_object, hdr)
149 
150 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
151 
152 #define to_binder_buffer_object(hdr) \
153 	container_of(hdr, struct binder_buffer_object, hdr)
154 
155 #define to_binder_fd_array_object(hdr) \
156 	container_of(hdr, struct binder_fd_array_object, hdr)
157 
158 enum binder_stat_types {
159 	BINDER_STAT_PROC,
160 	BINDER_STAT_THREAD,
161 	BINDER_STAT_NODE,
162 	BINDER_STAT_REF,
163 	BINDER_STAT_DEATH,
164 	BINDER_STAT_TRANSACTION,
165 	BINDER_STAT_TRANSACTION_COMPLETE,
166 	BINDER_STAT_COUNT
167 };
168 
169 struct binder_stats {
170 	atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
171 	atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
172 	atomic_t obj_created[BINDER_STAT_COUNT];
173 	atomic_t obj_deleted[BINDER_STAT_COUNT];
174 };
175 
176 static struct binder_stats binder_stats;
177 
178 static inline void binder_stats_deleted(enum binder_stat_types type)
179 {
180 	atomic_inc(&binder_stats.obj_deleted[type]);
181 }
182 
183 static inline void binder_stats_created(enum binder_stat_types type)
184 {
185 	atomic_inc(&binder_stats.obj_created[type]);
186 }
187 
188 struct binder_transaction_log_entry {
189 	int debug_id;
190 	int debug_id_done;
191 	int call_type;
192 	int from_proc;
193 	int from_thread;
194 	int target_handle;
195 	int to_proc;
196 	int to_thread;
197 	int to_node;
198 	int data_size;
199 	int offsets_size;
200 	int return_error_line;
201 	uint32_t return_error;
202 	uint32_t return_error_param;
203 	const char *context_name;
204 };
205 struct binder_transaction_log {
206 	atomic_t cur;
207 	bool full;
208 	struct binder_transaction_log_entry entry[32];
209 };
210 static struct binder_transaction_log binder_transaction_log;
211 static struct binder_transaction_log binder_transaction_log_failed;
212 
213 static struct binder_transaction_log_entry *binder_transaction_log_add(
214 	struct binder_transaction_log *log)
215 {
216 	struct binder_transaction_log_entry *e;
217 	unsigned int cur = atomic_inc_return(&log->cur);
218 
219 	if (cur >= ARRAY_SIZE(log->entry))
220 		log->full = 1;
221 	e = &log->entry[cur % ARRAY_SIZE(log->entry)];
222 	WRITE_ONCE(e->debug_id_done, 0);
223 	/*
224 	 * write-barrier to synchronize access to e->debug_id_done.
225 	 * We make sure the initialized 0 value is seen before
226 	 * memset() other fields are zeroed by memset.
227 	 */
228 	smp_wmb();
229 	memset(e, 0, sizeof(*e));
230 	return e;
231 }
232 
233 struct binder_context {
234 	struct binder_node *binder_context_mgr_node;
235 	struct mutex context_mgr_node_lock;
236 
237 	kuid_t binder_context_mgr_uid;
238 	const char *name;
239 };
240 
241 struct binder_device {
242 	struct hlist_node hlist;
243 	struct miscdevice miscdev;
244 	struct binder_context context;
245 };
246 
247 struct binder_work {
248 	struct list_head entry;
249 	enum {
250 		BINDER_WORK_TRANSACTION = 1,
251 		BINDER_WORK_TRANSACTION_COMPLETE,
252 		BINDER_WORK_RETURN_ERROR,
253 		BINDER_WORK_NODE,
254 		BINDER_WORK_DEAD_BINDER,
255 		BINDER_WORK_DEAD_BINDER_AND_CLEAR,
256 		BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
257 	} type;
258 };
259 
260 struct binder_error {
261 	struct binder_work work;
262 	uint32_t cmd;
263 };
264 
265 struct binder_node {
266 	int debug_id;
267 	struct binder_work work;
268 	union {
269 		struct rb_node rb_node;
270 		struct hlist_node dead_node;
271 	};
272 	struct binder_proc *proc;
273 	struct hlist_head refs;
274 	int internal_strong_refs;
275 	int local_weak_refs;
276 	int local_strong_refs;
277 	binder_uintptr_t ptr;
278 	binder_uintptr_t cookie;
279 	unsigned has_strong_ref:1;
280 	unsigned pending_strong_ref:1;
281 	unsigned has_weak_ref:1;
282 	unsigned pending_weak_ref:1;
283 	unsigned has_async_transaction:1;
284 	unsigned accept_fds:1;
285 	unsigned min_priority:8;
286 	struct list_head async_todo;
287 };
288 
289 struct binder_ref_death {
290 	struct binder_work work;
291 	binder_uintptr_t cookie;
292 };
293 
294 struct binder_ref {
295 	/* Lookups needed: */
296 	/*   node + proc => ref (transaction) */
297 	/*   desc + proc => ref (transaction, inc/dec ref) */
298 	/*   node => refs + procs (proc exit) */
299 	int debug_id;
300 	struct rb_node rb_node_desc;
301 	struct rb_node rb_node_node;
302 	struct hlist_node node_entry;
303 	struct binder_proc *proc;
304 	struct binder_node *node;
305 	uint32_t desc;
306 	int strong;
307 	int weak;
308 	struct binder_ref_death *death;
309 };
310 
311 enum binder_deferred_state {
312 	BINDER_DEFERRED_PUT_FILES    = 0x01,
313 	BINDER_DEFERRED_FLUSH        = 0x02,
314 	BINDER_DEFERRED_RELEASE      = 0x04,
315 };
316 
317 struct binder_proc {
318 	struct hlist_node proc_node;
319 	struct rb_root threads;
320 	struct rb_root nodes;
321 	struct rb_root refs_by_desc;
322 	struct rb_root refs_by_node;
323 	int pid;
324 	struct task_struct *tsk;
325 	struct files_struct *files;
326 	struct hlist_node deferred_work_node;
327 	int deferred_work;
328 	bool is_dead;
329 
330 	struct list_head todo;
331 	wait_queue_head_t wait;
332 	struct binder_stats stats;
333 	struct list_head delivered_death;
334 	int max_threads;
335 	int requested_threads;
336 	int requested_threads_started;
337 	int ready_threads;
338 	int tmp_ref;
339 	long default_priority;
340 	struct dentry *debugfs_entry;
341 	struct binder_alloc alloc;
342 	struct binder_context *context;
343 };
344 
345 enum {
346 	BINDER_LOOPER_STATE_REGISTERED  = 0x01,
347 	BINDER_LOOPER_STATE_ENTERED     = 0x02,
348 	BINDER_LOOPER_STATE_EXITED      = 0x04,
349 	BINDER_LOOPER_STATE_INVALID     = 0x08,
350 	BINDER_LOOPER_STATE_WAITING     = 0x10,
351 };
352 
353 struct binder_thread {
354 	struct binder_proc *proc;
355 	struct rb_node rb_node;
356 	int pid;
357 	int looper;              /* only modified by this thread */
358 	bool looper_need_return; /* can be written by other thread */
359 	struct binder_transaction *transaction_stack;
360 	struct list_head todo;
361 	struct binder_error return_error;
362 	struct binder_error reply_error;
363 	wait_queue_head_t wait;
364 	struct binder_stats stats;
365 	atomic_t tmp_ref;
366 	bool is_dead;
367 };
368 
369 struct binder_transaction {
370 	int debug_id;
371 	struct binder_work work;
372 	struct binder_thread *from;
373 	struct binder_transaction *from_parent;
374 	struct binder_proc *to_proc;
375 	struct binder_thread *to_thread;
376 	struct binder_transaction *to_parent;
377 	unsigned need_reply:1;
378 	/* unsigned is_dead:1; */	/* not used at the moment */
379 
380 	struct binder_buffer *buffer;
381 	unsigned int	code;
382 	unsigned int	flags;
383 	long	priority;
384 	long	saved_priority;
385 	kuid_t	sender_euid;
386 	/**
387 	 * @lock:  protects @from, @to_proc, and @to_thread
388 	 *
389 	 * @from, @to_proc, and @to_thread can be set to NULL
390 	 * during thread teardown
391 	 */
392 	spinlock_t lock;
393 };
394 
395 static void
396 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
397 static void binder_free_thread(struct binder_thread *thread);
398 static void binder_free_proc(struct binder_proc *proc);
399 
400 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
401 {
402 	struct files_struct *files = proc->files;
403 	unsigned long rlim_cur;
404 	unsigned long irqs;
405 
406 	if (files == NULL)
407 		return -ESRCH;
408 
409 	if (!lock_task_sighand(proc->tsk, &irqs))
410 		return -EMFILE;
411 
412 	rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
413 	unlock_task_sighand(proc->tsk, &irqs);
414 
415 	return __alloc_fd(files, 0, rlim_cur, flags);
416 }
417 
418 /*
419  * copied from fd_install
420  */
421 static void task_fd_install(
422 	struct binder_proc *proc, unsigned int fd, struct file *file)
423 {
424 	if (proc->files)
425 		__fd_install(proc->files, fd, file);
426 }
427 
428 /*
429  * copied from sys_close
430  */
431 static long task_close_fd(struct binder_proc *proc, unsigned int fd)
432 {
433 	int retval;
434 
435 	if (proc->files == NULL)
436 		return -ESRCH;
437 
438 	retval = __close_fd(proc->files, fd);
439 	/* can't restart close syscall because file table entry was cleared */
440 	if (unlikely(retval == -ERESTARTSYS ||
441 		     retval == -ERESTARTNOINTR ||
442 		     retval == -ERESTARTNOHAND ||
443 		     retval == -ERESTART_RESTARTBLOCK))
444 		retval = -EINTR;
445 
446 	return retval;
447 }
448 
449 static inline void binder_lock(const char *tag)
450 {
451 	trace_binder_lock(tag);
452 	mutex_lock(&binder_main_lock);
453 	trace_binder_locked(tag);
454 }
455 
456 static inline void binder_unlock(const char *tag)
457 {
458 	trace_binder_unlock(tag);
459 	mutex_unlock(&binder_main_lock);
460 }
461 
462 static void binder_set_nice(long nice)
463 {
464 	long min_nice;
465 
466 	if (can_nice(current, nice)) {
467 		set_user_nice(current, nice);
468 		return;
469 	}
470 	min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);
471 	binder_debug(BINDER_DEBUG_PRIORITY_CAP,
472 		     "%d: nice value %ld not allowed use %ld instead\n",
473 		      current->pid, nice, min_nice);
474 	set_user_nice(current, min_nice);
475 	if (min_nice <= MAX_NICE)
476 		return;
477 	binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
478 }
479 
480 static struct binder_node *binder_get_node(struct binder_proc *proc,
481 					   binder_uintptr_t ptr)
482 {
483 	struct rb_node *n = proc->nodes.rb_node;
484 	struct binder_node *node;
485 
486 	while (n) {
487 		node = rb_entry(n, struct binder_node, rb_node);
488 
489 		if (ptr < node->ptr)
490 			n = n->rb_left;
491 		else if (ptr > node->ptr)
492 			n = n->rb_right;
493 		else
494 			return node;
495 	}
496 	return NULL;
497 }
498 
499 static struct binder_node *binder_new_node(struct binder_proc *proc,
500 					   binder_uintptr_t ptr,
501 					   binder_uintptr_t cookie)
502 {
503 	struct rb_node **p = &proc->nodes.rb_node;
504 	struct rb_node *parent = NULL;
505 	struct binder_node *node;
506 
507 	while (*p) {
508 		parent = *p;
509 		node = rb_entry(parent, struct binder_node, rb_node);
510 
511 		if (ptr < node->ptr)
512 			p = &(*p)->rb_left;
513 		else if (ptr > node->ptr)
514 			p = &(*p)->rb_right;
515 		else
516 			return NULL;
517 	}
518 
519 	node = kzalloc(sizeof(*node), GFP_KERNEL);
520 	if (node == NULL)
521 		return NULL;
522 	binder_stats_created(BINDER_STAT_NODE);
523 	rb_link_node(&node->rb_node, parent, p);
524 	rb_insert_color(&node->rb_node, &proc->nodes);
525 	node->debug_id = atomic_inc_return(&binder_last_id);
526 	node->proc = proc;
527 	node->ptr = ptr;
528 	node->cookie = cookie;
529 	node->work.type = BINDER_WORK_NODE;
530 	INIT_LIST_HEAD(&node->work.entry);
531 	INIT_LIST_HEAD(&node->async_todo);
532 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
533 		     "%d:%d node %d u%016llx c%016llx created\n",
534 		     proc->pid, current->pid, node->debug_id,
535 		     (u64)node->ptr, (u64)node->cookie);
536 	return node;
537 }
538 
539 static int binder_inc_node(struct binder_node *node, int strong, int internal,
540 			   struct list_head *target_list)
541 {
542 	if (strong) {
543 		if (internal) {
544 			if (target_list == NULL &&
545 			    node->internal_strong_refs == 0 &&
546 			    !(node->proc &&
547 			      node == node->proc->context->binder_context_mgr_node &&
548 			      node->has_strong_ref)) {
549 				pr_err("invalid inc strong node for %d\n",
550 					node->debug_id);
551 				return -EINVAL;
552 			}
553 			node->internal_strong_refs++;
554 		} else
555 			node->local_strong_refs++;
556 		if (!node->has_strong_ref && target_list) {
557 			list_del_init(&node->work.entry);
558 			list_add_tail(&node->work.entry, target_list);
559 		}
560 	} else {
561 		if (!internal)
562 			node->local_weak_refs++;
563 		if (!node->has_weak_ref && list_empty(&node->work.entry)) {
564 			if (target_list == NULL) {
565 				pr_err("invalid inc weak node for %d\n",
566 					node->debug_id);
567 				return -EINVAL;
568 			}
569 			list_add_tail(&node->work.entry, target_list);
570 		}
571 	}
572 	return 0;
573 }
574 
575 static int binder_dec_node(struct binder_node *node, int strong, int internal)
576 {
577 	if (strong) {
578 		if (internal)
579 			node->internal_strong_refs--;
580 		else
581 			node->local_strong_refs--;
582 		if (node->local_strong_refs || node->internal_strong_refs)
583 			return 0;
584 	} else {
585 		if (!internal)
586 			node->local_weak_refs--;
587 		if (node->local_weak_refs || !hlist_empty(&node->refs))
588 			return 0;
589 	}
590 	if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
591 		if (list_empty(&node->work.entry)) {
592 			list_add_tail(&node->work.entry, &node->proc->todo);
593 			wake_up_interruptible(&node->proc->wait);
594 		}
595 	} else {
596 		if (hlist_empty(&node->refs) && !node->local_strong_refs &&
597 		    !node->local_weak_refs) {
598 			list_del_init(&node->work.entry);
599 			if (node->proc) {
600 				rb_erase(&node->rb_node, &node->proc->nodes);
601 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
602 					     "refless node %d deleted\n",
603 					     node->debug_id);
604 			} else {
605 				spin_lock(&binder_dead_nodes_lock);
606 				hlist_del(&node->dead_node);
607 				spin_unlock(&binder_dead_nodes_lock);
608 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
609 					     "dead node %d deleted\n",
610 					     node->debug_id);
611 			}
612 			kfree(node);
613 			binder_stats_deleted(BINDER_STAT_NODE);
614 		}
615 	}
616 
617 	return 0;
618 }
619 
620 
621 static struct binder_ref *binder_get_ref(struct binder_proc *proc,
622 					 u32 desc, bool need_strong_ref)
623 {
624 	struct rb_node *n = proc->refs_by_desc.rb_node;
625 	struct binder_ref *ref;
626 
627 	while (n) {
628 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
629 
630 		if (desc < ref->desc) {
631 			n = n->rb_left;
632 		} else if (desc > ref->desc) {
633 			n = n->rb_right;
634 		} else if (need_strong_ref && !ref->strong) {
635 			binder_user_error("tried to use weak ref as strong ref\n");
636 			return NULL;
637 		} else {
638 			return ref;
639 		}
640 	}
641 	return NULL;
642 }
643 
644 static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
645 						  struct binder_node *node)
646 {
647 	struct rb_node *n;
648 	struct rb_node **p = &proc->refs_by_node.rb_node;
649 	struct rb_node *parent = NULL;
650 	struct binder_ref *ref, *new_ref;
651 	struct binder_context *context = proc->context;
652 
653 	while (*p) {
654 		parent = *p;
655 		ref = rb_entry(parent, struct binder_ref, rb_node_node);
656 
657 		if (node < ref->node)
658 			p = &(*p)->rb_left;
659 		else if (node > ref->node)
660 			p = &(*p)->rb_right;
661 		else
662 			return ref;
663 	}
664 	new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
665 	if (new_ref == NULL)
666 		return NULL;
667 	binder_stats_created(BINDER_STAT_REF);
668 	new_ref->debug_id = atomic_inc_return(&binder_last_id);
669 	new_ref->proc = proc;
670 	new_ref->node = node;
671 	rb_link_node(&new_ref->rb_node_node, parent, p);
672 	rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
673 
674 	new_ref->desc = (node == context->binder_context_mgr_node) ? 0 : 1;
675 	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
676 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
677 		if (ref->desc > new_ref->desc)
678 			break;
679 		new_ref->desc = ref->desc + 1;
680 	}
681 
682 	p = &proc->refs_by_desc.rb_node;
683 	while (*p) {
684 		parent = *p;
685 		ref = rb_entry(parent, struct binder_ref, rb_node_desc);
686 
687 		if (new_ref->desc < ref->desc)
688 			p = &(*p)->rb_left;
689 		else if (new_ref->desc > ref->desc)
690 			p = &(*p)->rb_right;
691 		else
692 			BUG();
693 	}
694 	rb_link_node(&new_ref->rb_node_desc, parent, p);
695 	rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
696 	hlist_add_head(&new_ref->node_entry, &node->refs);
697 
698 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
699 		     "%d new ref %d desc %d for node %d\n",
700 		      proc->pid, new_ref->debug_id, new_ref->desc,
701 		      node->debug_id);
702 	return new_ref;
703 }
704 
705 static void binder_delete_ref(struct binder_ref *ref)
706 {
707 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
708 		     "%d delete ref %d desc %d for node %d\n",
709 		      ref->proc->pid, ref->debug_id, ref->desc,
710 		      ref->node->debug_id);
711 
712 	rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
713 	rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
714 	if (ref->strong)
715 		binder_dec_node(ref->node, 1, 1);
716 	hlist_del(&ref->node_entry);
717 	binder_dec_node(ref->node, 0, 1);
718 	if (ref->death) {
719 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
720 			     "%d delete ref %d desc %d has death notification\n",
721 			      ref->proc->pid, ref->debug_id, ref->desc);
722 		list_del(&ref->death->work.entry);
723 		kfree(ref->death);
724 		binder_stats_deleted(BINDER_STAT_DEATH);
725 	}
726 	kfree(ref);
727 	binder_stats_deleted(BINDER_STAT_REF);
728 }
729 
730 static int binder_inc_ref(struct binder_ref *ref, int strong,
731 			  struct list_head *target_list)
732 {
733 	int ret;
734 
735 	if (strong) {
736 		if (ref->strong == 0) {
737 			ret = binder_inc_node(ref->node, 1, 1, target_list);
738 			if (ret)
739 				return ret;
740 		}
741 		ref->strong++;
742 	} else {
743 		if (ref->weak == 0) {
744 			ret = binder_inc_node(ref->node, 0, 1, target_list);
745 			if (ret)
746 				return ret;
747 		}
748 		ref->weak++;
749 	}
750 	return 0;
751 }
752 
753 
754 static int binder_dec_ref(struct binder_ref *ref, int strong)
755 {
756 	if (strong) {
757 		if (ref->strong == 0) {
758 			binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
759 					  ref->proc->pid, ref->debug_id,
760 					  ref->desc, ref->strong, ref->weak);
761 			return -EINVAL;
762 		}
763 		ref->strong--;
764 		if (ref->strong == 0) {
765 			int ret;
766 
767 			ret = binder_dec_node(ref->node, strong, 1);
768 			if (ret)
769 				return ret;
770 		}
771 	} else {
772 		if (ref->weak == 0) {
773 			binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
774 					  ref->proc->pid, ref->debug_id,
775 					  ref->desc, ref->strong, ref->weak);
776 			return -EINVAL;
777 		}
778 		ref->weak--;
779 	}
780 	if (ref->strong == 0 && ref->weak == 0)
781 		binder_delete_ref(ref);
782 	return 0;
783 }
784 
785 static void binder_pop_transaction(struct binder_thread *target_thread,
786 				   struct binder_transaction *t)
787 {
788 	BUG_ON(!target_thread);
789 	BUG_ON(target_thread->transaction_stack != t);
790 	BUG_ON(target_thread->transaction_stack->from != target_thread);
791 	target_thread->transaction_stack =
792 		target_thread->transaction_stack->from_parent;
793 	t->from = NULL;
794 }
795 
796 /**
797  * binder_thread_dec_tmpref() - decrement thread->tmp_ref
798  * @thread:	thread to decrement
799  *
800  * A thread needs to be kept alive while being used to create or
801  * handle a transaction. binder_get_txn_from() is used to safely
802  * extract t->from from a binder_transaction and keep the thread
803  * indicated by t->from from being freed. When done with that
804  * binder_thread, this function is called to decrement the
805  * tmp_ref and free if appropriate (thread has been released
806  * and no transaction being processed by the driver)
807  */
808 static void binder_thread_dec_tmpref(struct binder_thread *thread)
809 {
810 	/*
811 	 * atomic is used to protect the counter value while
812 	 * it cannot reach zero or thread->is_dead is false
813 	 *
814 	 * TODO: future patch adds locking to ensure that the
815 	 * check of tmp_ref and is_dead is done with a lock held
816 	 */
817 	atomic_dec(&thread->tmp_ref);
818 	if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
819 		binder_free_thread(thread);
820 		return;
821 	}
822 }
823 
824 /**
825  * binder_proc_dec_tmpref() - decrement proc->tmp_ref
826  * @proc:	proc to decrement
827  *
828  * A binder_proc needs to be kept alive while being used to create or
829  * handle a transaction. proc->tmp_ref is incremented when
830  * creating a new transaction or the binder_proc is currently in-use
831  * by threads that are being released. When done with the binder_proc,
832  * this function is called to decrement the counter and free the
833  * proc if appropriate (proc has been released, all threads have
834  * been released and not currenly in-use to process a transaction).
835  */
836 static void binder_proc_dec_tmpref(struct binder_proc *proc)
837 {
838 	proc->tmp_ref--;
839 	if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
840 			!proc->tmp_ref) {
841 		binder_free_proc(proc);
842 		return;
843 	}
844 }
845 
846 /**
847  * binder_get_txn_from() - safely extract the "from" thread in transaction
848  * @t:	binder transaction for t->from
849  *
850  * Atomically return the "from" thread and increment the tmp_ref
851  * count for the thread to ensure it stays alive until
852  * binder_thread_dec_tmpref() is called.
853  *
854  * Return: the value of t->from
855  */
856 static struct binder_thread *binder_get_txn_from(
857 		struct binder_transaction *t)
858 {
859 	struct binder_thread *from;
860 
861 	spin_lock(&t->lock);
862 	from = t->from;
863 	if (from)
864 		atomic_inc(&from->tmp_ref);
865 	spin_unlock(&t->lock);
866 	return from;
867 }
868 
869 static void binder_free_transaction(struct binder_transaction *t)
870 {
871 	if (t->buffer)
872 		t->buffer->transaction = NULL;
873 	kfree(t);
874 	binder_stats_deleted(BINDER_STAT_TRANSACTION);
875 }
876 
877 static void binder_send_failed_reply(struct binder_transaction *t,
878 				     uint32_t error_code)
879 {
880 	struct binder_thread *target_thread;
881 	struct binder_transaction *next;
882 
883 	BUG_ON(t->flags & TF_ONE_WAY);
884 	while (1) {
885 		target_thread = binder_get_txn_from(t);
886 		if (target_thread) {
887 			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
888 				     "send failed reply for transaction %d to %d:%d\n",
889 				      t->debug_id,
890 				      target_thread->proc->pid,
891 				      target_thread->pid);
892 
893 			binder_pop_transaction(target_thread, t);
894 			if (target_thread->reply_error.cmd == BR_OK) {
895 				target_thread->reply_error.cmd = error_code;
896 				list_add_tail(
897 					&target_thread->reply_error.work.entry,
898 					&target_thread->todo);
899 				wake_up_interruptible(&target_thread->wait);
900 			} else {
901 				WARN(1, "Unexpected reply error: %u\n",
902 						target_thread->reply_error.cmd);
903 			}
904 			binder_thread_dec_tmpref(target_thread);
905 			binder_free_transaction(t);
906 			return;
907 		}
908 		next = t->from_parent;
909 
910 		binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
911 			     "send failed reply for transaction %d, target dead\n",
912 			     t->debug_id);
913 
914 		binder_free_transaction(t);
915 		if (next == NULL) {
916 			binder_debug(BINDER_DEBUG_DEAD_BINDER,
917 				     "reply failed, no target thread at root\n");
918 			return;
919 		}
920 		t = next;
921 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
922 			     "reply failed, no target thread -- retry %d\n",
923 			      t->debug_id);
924 	}
925 }
926 
927 /**
928  * binder_validate_object() - checks for a valid metadata object in a buffer.
929  * @buffer:	binder_buffer that we're parsing.
930  * @offset:	offset in the buffer at which to validate an object.
931  *
932  * Return:	If there's a valid metadata object at @offset in @buffer, the
933  *		size of that object. Otherwise, it returns zero.
934  */
935 static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
936 {
937 	/* Check if we can read a header first */
938 	struct binder_object_header *hdr;
939 	size_t object_size = 0;
940 
941 	if (offset > buffer->data_size - sizeof(*hdr) ||
942 	    buffer->data_size < sizeof(*hdr) ||
943 	    !IS_ALIGNED(offset, sizeof(u32)))
944 		return 0;
945 
946 	/* Ok, now see if we can read a complete object. */
947 	hdr = (struct binder_object_header *)(buffer->data + offset);
948 	switch (hdr->type) {
949 	case BINDER_TYPE_BINDER:
950 	case BINDER_TYPE_WEAK_BINDER:
951 	case BINDER_TYPE_HANDLE:
952 	case BINDER_TYPE_WEAK_HANDLE:
953 		object_size = sizeof(struct flat_binder_object);
954 		break;
955 	case BINDER_TYPE_FD:
956 		object_size = sizeof(struct binder_fd_object);
957 		break;
958 	case BINDER_TYPE_PTR:
959 		object_size = sizeof(struct binder_buffer_object);
960 		break;
961 	case BINDER_TYPE_FDA:
962 		object_size = sizeof(struct binder_fd_array_object);
963 		break;
964 	default:
965 		return 0;
966 	}
967 	if (offset <= buffer->data_size - object_size &&
968 	    buffer->data_size >= object_size)
969 		return object_size;
970 	else
971 		return 0;
972 }
973 
974 /**
975  * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
976  * @b:		binder_buffer containing the object
977  * @index:	index in offset array at which the binder_buffer_object is
978  *		located
979  * @start:	points to the start of the offset array
980  * @num_valid:	the number of valid offsets in the offset array
981  *
982  * Return:	If @index is within the valid range of the offset array
983  *		described by @start and @num_valid, and if there's a valid
984  *		binder_buffer_object at the offset found in index @index
985  *		of the offset array, that object is returned. Otherwise,
986  *		%NULL is returned.
987  *		Note that the offset found in index @index itself is not
988  *		verified; this function assumes that @num_valid elements
989  *		from @start were previously verified to have valid offsets.
990  */
991 static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
992 							binder_size_t index,
993 							binder_size_t *start,
994 							binder_size_t num_valid)
995 {
996 	struct binder_buffer_object *buffer_obj;
997 	binder_size_t *offp;
998 
999 	if (index >= num_valid)
1000 		return NULL;
1001 
1002 	offp = start + index;
1003 	buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
1004 	if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
1005 		return NULL;
1006 
1007 	return buffer_obj;
1008 }
1009 
1010 /**
1011  * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1012  * @b:			transaction buffer
1013  * @objects_start	start of objects buffer
1014  * @buffer:		binder_buffer_object in which to fix up
1015  * @offset:		start offset in @buffer to fix up
1016  * @last_obj:		last binder_buffer_object that we fixed up in
1017  * @last_min_offset:	minimum fixup offset in @last_obj
1018  *
1019  * Return:		%true if a fixup in buffer @buffer at offset @offset is
1020  *			allowed.
1021  *
1022  * For safety reasons, we only allow fixups inside a buffer to happen
1023  * at increasing offsets; additionally, we only allow fixup on the last
1024  * buffer object that was verified, or one of its parents.
1025  *
1026  * Example of what is allowed:
1027  *
1028  * A
1029  *   B (parent = A, offset = 0)
1030  *   C (parent = A, offset = 16)
1031  *     D (parent = C, offset = 0)
1032  *   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1033  *
1034  * Examples of what is not allowed:
1035  *
1036  * Decreasing offsets within the same parent:
1037  * A
1038  *   C (parent = A, offset = 16)
1039  *   B (parent = A, offset = 0) // decreasing offset within A
1040  *
1041  * Referring to a parent that wasn't the last object or any of its parents:
1042  * A
1043  *   B (parent = A, offset = 0)
1044  *   C (parent = A, offset = 0)
1045  *   C (parent = A, offset = 16)
1046  *     D (parent = B, offset = 0) // B is not A or any of A's parents
1047  */
1048 static bool binder_validate_fixup(struct binder_buffer *b,
1049 				  binder_size_t *objects_start,
1050 				  struct binder_buffer_object *buffer,
1051 				  binder_size_t fixup_offset,
1052 				  struct binder_buffer_object *last_obj,
1053 				  binder_size_t last_min_offset)
1054 {
1055 	if (!last_obj) {
1056 		/* Nothing to fix up in */
1057 		return false;
1058 	}
1059 
1060 	while (last_obj != buffer) {
1061 		/*
1062 		 * Safe to retrieve the parent of last_obj, since it
1063 		 * was already previously verified by the driver.
1064 		 */
1065 		if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1066 			return false;
1067 		last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
1068 		last_obj = (struct binder_buffer_object *)
1069 			(b->data + *(objects_start + last_obj->parent));
1070 	}
1071 	return (fixup_offset >= last_min_offset);
1072 }
1073 
1074 static void binder_transaction_buffer_release(struct binder_proc *proc,
1075 					      struct binder_buffer *buffer,
1076 					      binder_size_t *failed_at)
1077 {
1078 	binder_size_t *offp, *off_start, *off_end;
1079 	int debug_id = buffer->debug_id;
1080 
1081 	binder_debug(BINDER_DEBUG_TRANSACTION,
1082 		     "%d buffer release %d, size %zd-%zd, failed at %p\n",
1083 		     proc->pid, buffer->debug_id,
1084 		     buffer->data_size, buffer->offsets_size, failed_at);
1085 
1086 	if (buffer->target_node)
1087 		binder_dec_node(buffer->target_node, 1, 0);
1088 
1089 	off_start = (binder_size_t *)(buffer->data +
1090 				      ALIGN(buffer->data_size, sizeof(void *)));
1091 	if (failed_at)
1092 		off_end = failed_at;
1093 	else
1094 		off_end = (void *)off_start + buffer->offsets_size;
1095 	for (offp = off_start; offp < off_end; offp++) {
1096 		struct binder_object_header *hdr;
1097 		size_t object_size = binder_validate_object(buffer, *offp);
1098 
1099 		if (object_size == 0) {
1100 			pr_err("transaction release %d bad object at offset %lld, size %zd\n",
1101 			       debug_id, (u64)*offp, buffer->data_size);
1102 			continue;
1103 		}
1104 		hdr = (struct binder_object_header *)(buffer->data + *offp);
1105 		switch (hdr->type) {
1106 		case BINDER_TYPE_BINDER:
1107 		case BINDER_TYPE_WEAK_BINDER: {
1108 			struct flat_binder_object *fp;
1109 			struct binder_node *node;
1110 
1111 			fp = to_flat_binder_object(hdr);
1112 			node = binder_get_node(proc, fp->binder);
1113 			if (node == NULL) {
1114 				pr_err("transaction release %d bad node %016llx\n",
1115 				       debug_id, (u64)fp->binder);
1116 				break;
1117 			}
1118 			binder_debug(BINDER_DEBUG_TRANSACTION,
1119 				     "        node %d u%016llx\n",
1120 				     node->debug_id, (u64)node->ptr);
1121 			binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
1122 					0);
1123 		} break;
1124 		case BINDER_TYPE_HANDLE:
1125 		case BINDER_TYPE_WEAK_HANDLE: {
1126 			struct flat_binder_object *fp;
1127 			struct binder_ref *ref;
1128 
1129 			fp = to_flat_binder_object(hdr);
1130 			ref = binder_get_ref(proc, fp->handle,
1131 					     hdr->type == BINDER_TYPE_HANDLE);
1132 			if (ref == NULL) {
1133 				pr_err("transaction release %d bad handle %d\n",
1134 				 debug_id, fp->handle);
1135 				break;
1136 			}
1137 			binder_debug(BINDER_DEBUG_TRANSACTION,
1138 				     "        ref %d desc %d (node %d)\n",
1139 				     ref->debug_id, ref->desc, ref->node->debug_id);
1140 			binder_dec_ref(ref, hdr->type == BINDER_TYPE_HANDLE);
1141 		} break;
1142 
1143 		case BINDER_TYPE_FD: {
1144 			struct binder_fd_object *fp = to_binder_fd_object(hdr);
1145 
1146 			binder_debug(BINDER_DEBUG_TRANSACTION,
1147 				     "        fd %d\n", fp->fd);
1148 			if (failed_at)
1149 				task_close_fd(proc, fp->fd);
1150 		} break;
1151 		case BINDER_TYPE_PTR:
1152 			/*
1153 			 * Nothing to do here, this will get cleaned up when the
1154 			 * transaction buffer gets freed
1155 			 */
1156 			break;
1157 		case BINDER_TYPE_FDA: {
1158 			struct binder_fd_array_object *fda;
1159 			struct binder_buffer_object *parent;
1160 			uintptr_t parent_buffer;
1161 			u32 *fd_array;
1162 			size_t fd_index;
1163 			binder_size_t fd_buf_size;
1164 
1165 			fda = to_binder_fd_array_object(hdr);
1166 			parent = binder_validate_ptr(buffer, fda->parent,
1167 						     off_start,
1168 						     offp - off_start);
1169 			if (!parent) {
1170 				pr_err("transaction release %d bad parent offset",
1171 				       debug_id);
1172 				continue;
1173 			}
1174 			/*
1175 			 * Since the parent was already fixed up, convert it
1176 			 * back to kernel address space to access it
1177 			 */
1178 			parent_buffer = parent->buffer -
1179 				binder_alloc_get_user_buffer_offset(
1180 						&proc->alloc);
1181 
1182 			fd_buf_size = sizeof(u32) * fda->num_fds;
1183 			if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
1184 				pr_err("transaction release %d invalid number of fds (%lld)\n",
1185 				       debug_id, (u64)fda->num_fds);
1186 				continue;
1187 			}
1188 			if (fd_buf_size > parent->length ||
1189 			    fda->parent_offset > parent->length - fd_buf_size) {
1190 				/* No space for all file descriptors here. */
1191 				pr_err("transaction release %d not enough space for %lld fds in buffer\n",
1192 				       debug_id, (u64)fda->num_fds);
1193 				continue;
1194 			}
1195 			fd_array = (u32 *)(parent_buffer + fda->parent_offset);
1196 			for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
1197 				task_close_fd(proc, fd_array[fd_index]);
1198 		} break;
1199 		default:
1200 			pr_err("transaction release %d bad object type %x\n",
1201 				debug_id, hdr->type);
1202 			break;
1203 		}
1204 	}
1205 }
1206 
1207 static int binder_translate_binder(struct flat_binder_object *fp,
1208 				   struct binder_transaction *t,
1209 				   struct binder_thread *thread)
1210 {
1211 	struct binder_node *node;
1212 	struct binder_ref *ref;
1213 	struct binder_proc *proc = thread->proc;
1214 	struct binder_proc *target_proc = t->to_proc;
1215 
1216 	node = binder_get_node(proc, fp->binder);
1217 	if (!node) {
1218 		node = binder_new_node(proc, fp->binder, fp->cookie);
1219 		if (!node)
1220 			return -ENOMEM;
1221 
1222 		node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1223 		node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1224 	}
1225 	if (fp->cookie != node->cookie) {
1226 		binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
1227 				  proc->pid, thread->pid, (u64)fp->binder,
1228 				  node->debug_id, (u64)fp->cookie,
1229 				  (u64)node->cookie);
1230 		return -EINVAL;
1231 	}
1232 	if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
1233 		return -EPERM;
1234 
1235 	ref = binder_get_ref_for_node(target_proc, node);
1236 	if (!ref)
1237 		return -ENOMEM;
1238 
1239 	if (fp->hdr.type == BINDER_TYPE_BINDER)
1240 		fp->hdr.type = BINDER_TYPE_HANDLE;
1241 	else
1242 		fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
1243 	fp->binder = 0;
1244 	fp->handle = ref->desc;
1245 	fp->cookie = 0;
1246 	binder_inc_ref(ref, fp->hdr.type == BINDER_TYPE_HANDLE, &thread->todo);
1247 
1248 	trace_binder_transaction_node_to_ref(t, node, ref);
1249 	binder_debug(BINDER_DEBUG_TRANSACTION,
1250 		     "        node %d u%016llx -> ref %d desc %d\n",
1251 		     node->debug_id, (u64)node->ptr,
1252 		     ref->debug_id, ref->desc);
1253 
1254 	return 0;
1255 }
1256 
1257 static int binder_translate_handle(struct flat_binder_object *fp,
1258 				   struct binder_transaction *t,
1259 				   struct binder_thread *thread)
1260 {
1261 	struct binder_ref *ref;
1262 	struct binder_proc *proc = thread->proc;
1263 	struct binder_proc *target_proc = t->to_proc;
1264 
1265 	ref = binder_get_ref(proc, fp->handle,
1266 			     fp->hdr.type == BINDER_TYPE_HANDLE);
1267 	if (!ref) {
1268 		binder_user_error("%d:%d got transaction with invalid handle, %d\n",
1269 				  proc->pid, thread->pid, fp->handle);
1270 		return -EINVAL;
1271 	}
1272 	if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
1273 		return -EPERM;
1274 
1275 	if (ref->node->proc == target_proc) {
1276 		if (fp->hdr.type == BINDER_TYPE_HANDLE)
1277 			fp->hdr.type = BINDER_TYPE_BINDER;
1278 		else
1279 			fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
1280 		fp->binder = ref->node->ptr;
1281 		fp->cookie = ref->node->cookie;
1282 		binder_inc_node(ref->node, fp->hdr.type == BINDER_TYPE_BINDER,
1283 				0, NULL);
1284 		trace_binder_transaction_ref_to_node(t, ref);
1285 		binder_debug(BINDER_DEBUG_TRANSACTION,
1286 			     "        ref %d desc %d -> node %d u%016llx\n",
1287 			     ref->debug_id, ref->desc, ref->node->debug_id,
1288 			     (u64)ref->node->ptr);
1289 	} else {
1290 		struct binder_ref *new_ref;
1291 
1292 		new_ref = binder_get_ref_for_node(target_proc, ref->node);
1293 		if (!new_ref)
1294 			return -ENOMEM;
1295 
1296 		fp->binder = 0;
1297 		fp->handle = new_ref->desc;
1298 		fp->cookie = 0;
1299 		binder_inc_ref(new_ref, fp->hdr.type == BINDER_TYPE_HANDLE,
1300 			       NULL);
1301 		trace_binder_transaction_ref_to_ref(t, ref, new_ref);
1302 		binder_debug(BINDER_DEBUG_TRANSACTION,
1303 			     "        ref %d desc %d -> ref %d desc %d (node %d)\n",
1304 			     ref->debug_id, ref->desc, new_ref->debug_id,
1305 			     new_ref->desc, ref->node->debug_id);
1306 	}
1307 	return 0;
1308 }
1309 
1310 static int binder_translate_fd(int fd,
1311 			       struct binder_transaction *t,
1312 			       struct binder_thread *thread,
1313 			       struct binder_transaction *in_reply_to)
1314 {
1315 	struct binder_proc *proc = thread->proc;
1316 	struct binder_proc *target_proc = t->to_proc;
1317 	int target_fd;
1318 	struct file *file;
1319 	int ret;
1320 	bool target_allows_fd;
1321 
1322 	if (in_reply_to)
1323 		target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
1324 	else
1325 		target_allows_fd = t->buffer->target_node->accept_fds;
1326 	if (!target_allows_fd) {
1327 		binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
1328 				  proc->pid, thread->pid,
1329 				  in_reply_to ? "reply" : "transaction",
1330 				  fd);
1331 		ret = -EPERM;
1332 		goto err_fd_not_accepted;
1333 	}
1334 
1335 	file = fget(fd);
1336 	if (!file) {
1337 		binder_user_error("%d:%d got transaction with invalid fd, %d\n",
1338 				  proc->pid, thread->pid, fd);
1339 		ret = -EBADF;
1340 		goto err_fget;
1341 	}
1342 	ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
1343 	if (ret < 0) {
1344 		ret = -EPERM;
1345 		goto err_security;
1346 	}
1347 
1348 	target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
1349 	if (target_fd < 0) {
1350 		ret = -ENOMEM;
1351 		goto err_get_unused_fd;
1352 	}
1353 	task_fd_install(target_proc, target_fd, file);
1354 	trace_binder_transaction_fd(t, fd, target_fd);
1355 	binder_debug(BINDER_DEBUG_TRANSACTION, "        fd %d -> %d\n",
1356 		     fd, target_fd);
1357 
1358 	return target_fd;
1359 
1360 err_get_unused_fd:
1361 err_security:
1362 	fput(file);
1363 err_fget:
1364 err_fd_not_accepted:
1365 	return ret;
1366 }
1367 
1368 static int binder_translate_fd_array(struct binder_fd_array_object *fda,
1369 				     struct binder_buffer_object *parent,
1370 				     struct binder_transaction *t,
1371 				     struct binder_thread *thread,
1372 				     struct binder_transaction *in_reply_to)
1373 {
1374 	binder_size_t fdi, fd_buf_size, num_installed_fds;
1375 	int target_fd;
1376 	uintptr_t parent_buffer;
1377 	u32 *fd_array;
1378 	struct binder_proc *proc = thread->proc;
1379 	struct binder_proc *target_proc = t->to_proc;
1380 
1381 	fd_buf_size = sizeof(u32) * fda->num_fds;
1382 	if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
1383 		binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
1384 				  proc->pid, thread->pid, (u64)fda->num_fds);
1385 		return -EINVAL;
1386 	}
1387 	if (fd_buf_size > parent->length ||
1388 	    fda->parent_offset > parent->length - fd_buf_size) {
1389 		/* No space for all file descriptors here. */
1390 		binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
1391 				  proc->pid, thread->pid, (u64)fda->num_fds);
1392 		return -EINVAL;
1393 	}
1394 	/*
1395 	 * Since the parent was already fixed up, convert it
1396 	 * back to the kernel address space to access it
1397 	 */
1398 	parent_buffer = parent->buffer -
1399 		binder_alloc_get_user_buffer_offset(&target_proc->alloc);
1400 	fd_array = (u32 *)(parent_buffer + fda->parent_offset);
1401 	if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
1402 		binder_user_error("%d:%d parent offset not aligned correctly.\n",
1403 				  proc->pid, thread->pid);
1404 		return -EINVAL;
1405 	}
1406 	for (fdi = 0; fdi < fda->num_fds; fdi++) {
1407 		target_fd = binder_translate_fd(fd_array[fdi], t, thread,
1408 						in_reply_to);
1409 		if (target_fd < 0)
1410 			goto err_translate_fd_failed;
1411 		fd_array[fdi] = target_fd;
1412 	}
1413 	return 0;
1414 
1415 err_translate_fd_failed:
1416 	/*
1417 	 * Failed to allocate fd or security error, free fds
1418 	 * installed so far.
1419 	 */
1420 	num_installed_fds = fdi;
1421 	for (fdi = 0; fdi < num_installed_fds; fdi++)
1422 		task_close_fd(target_proc, fd_array[fdi]);
1423 	return target_fd;
1424 }
1425 
1426 static int binder_fixup_parent(struct binder_transaction *t,
1427 			       struct binder_thread *thread,
1428 			       struct binder_buffer_object *bp,
1429 			       binder_size_t *off_start,
1430 			       binder_size_t num_valid,
1431 			       struct binder_buffer_object *last_fixup_obj,
1432 			       binder_size_t last_fixup_min_off)
1433 {
1434 	struct binder_buffer_object *parent;
1435 	u8 *parent_buffer;
1436 	struct binder_buffer *b = t->buffer;
1437 	struct binder_proc *proc = thread->proc;
1438 	struct binder_proc *target_proc = t->to_proc;
1439 
1440 	if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
1441 		return 0;
1442 
1443 	parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
1444 	if (!parent) {
1445 		binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
1446 				  proc->pid, thread->pid);
1447 		return -EINVAL;
1448 	}
1449 
1450 	if (!binder_validate_fixup(b, off_start,
1451 				   parent, bp->parent_offset,
1452 				   last_fixup_obj,
1453 				   last_fixup_min_off)) {
1454 		binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
1455 				  proc->pid, thread->pid);
1456 		return -EINVAL;
1457 	}
1458 
1459 	if (parent->length < sizeof(binder_uintptr_t) ||
1460 	    bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
1461 		/* No space for a pointer here! */
1462 		binder_user_error("%d:%d got transaction with invalid parent offset\n",
1463 				  proc->pid, thread->pid);
1464 		return -EINVAL;
1465 	}
1466 	parent_buffer = (u8 *)(parent->buffer -
1467 			binder_alloc_get_user_buffer_offset(
1468 				&target_proc->alloc));
1469 	*(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
1470 
1471 	return 0;
1472 }
1473 
1474 static void binder_transaction(struct binder_proc *proc,
1475 			       struct binder_thread *thread,
1476 			       struct binder_transaction_data *tr, int reply,
1477 			       binder_size_t extra_buffers_size)
1478 {
1479 	int ret;
1480 	struct binder_transaction *t;
1481 	struct binder_work *tcomplete;
1482 	binder_size_t *offp, *off_end, *off_start;
1483 	binder_size_t off_min;
1484 	u8 *sg_bufp, *sg_buf_end;
1485 	struct binder_proc *target_proc = NULL;
1486 	struct binder_thread *target_thread = NULL;
1487 	struct binder_node *target_node = NULL;
1488 	struct list_head *target_list;
1489 	wait_queue_head_t *target_wait;
1490 	struct binder_transaction *in_reply_to = NULL;
1491 	struct binder_transaction_log_entry *e;
1492 	uint32_t return_error = 0;
1493 	uint32_t return_error_param = 0;
1494 	uint32_t return_error_line = 0;
1495 	struct binder_buffer_object *last_fixup_obj = NULL;
1496 	binder_size_t last_fixup_min_off = 0;
1497 	struct binder_context *context = proc->context;
1498 	int t_debug_id = atomic_inc_return(&binder_last_id);
1499 
1500 	e = binder_transaction_log_add(&binder_transaction_log);
1501 	e->debug_id = t_debug_id;
1502 	e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
1503 	e->from_proc = proc->pid;
1504 	e->from_thread = thread->pid;
1505 	e->target_handle = tr->target.handle;
1506 	e->data_size = tr->data_size;
1507 	e->offsets_size = tr->offsets_size;
1508 	e->context_name = proc->context->name;
1509 
1510 	if (reply) {
1511 		in_reply_to = thread->transaction_stack;
1512 		if (in_reply_to == NULL) {
1513 			binder_user_error("%d:%d got reply transaction with no transaction stack\n",
1514 					  proc->pid, thread->pid);
1515 			return_error = BR_FAILED_REPLY;
1516 			return_error_param = -EPROTO;
1517 			return_error_line = __LINE__;
1518 			goto err_empty_call_stack;
1519 		}
1520 		binder_set_nice(in_reply_to->saved_priority);
1521 		if (in_reply_to->to_thread != thread) {
1522 			spin_lock(&in_reply_to->lock);
1523 			binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
1524 				proc->pid, thread->pid, in_reply_to->debug_id,
1525 				in_reply_to->to_proc ?
1526 				in_reply_to->to_proc->pid : 0,
1527 				in_reply_to->to_thread ?
1528 				in_reply_to->to_thread->pid : 0);
1529 			spin_unlock(&in_reply_to->lock);
1530 			return_error = BR_FAILED_REPLY;
1531 			return_error_param = -EPROTO;
1532 			return_error_line = __LINE__;
1533 			in_reply_to = NULL;
1534 			goto err_bad_call_stack;
1535 		}
1536 		thread->transaction_stack = in_reply_to->to_parent;
1537 		target_thread = binder_get_txn_from(in_reply_to);
1538 		if (target_thread == NULL) {
1539 			return_error = BR_DEAD_REPLY;
1540 			return_error_line = __LINE__;
1541 			goto err_dead_binder;
1542 		}
1543 		if (target_thread->transaction_stack != in_reply_to) {
1544 			binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
1545 				proc->pid, thread->pid,
1546 				target_thread->transaction_stack ?
1547 				target_thread->transaction_stack->debug_id : 0,
1548 				in_reply_to->debug_id);
1549 			return_error = BR_FAILED_REPLY;
1550 			return_error_param = -EPROTO;
1551 			return_error_line = __LINE__;
1552 			in_reply_to = NULL;
1553 			target_thread = NULL;
1554 			goto err_dead_binder;
1555 		}
1556 		target_proc = target_thread->proc;
1557 		target_proc->tmp_ref++;
1558 	} else {
1559 		if (tr->target.handle) {
1560 			struct binder_ref *ref;
1561 
1562 			/*
1563 			 * There must already be a strong ref
1564 			 * on this node. If so, do a strong
1565 			 * increment on the node to ensure it
1566 			 * stays alive until the transaction is
1567 			 * done.
1568 			 */
1569 			ref = binder_get_ref(proc, tr->target.handle, true);
1570 			if (ref) {
1571 				binder_inc_node(ref->node, 1, 0, NULL);
1572 				target_node = ref->node;
1573 			}
1574 			if (target_node == NULL) {
1575 				binder_user_error("%d:%d got transaction to invalid handle\n",
1576 					proc->pid, thread->pid);
1577 				return_error = BR_FAILED_REPLY;
1578 				return_error_param = -EINVAL;
1579 				return_error_line = __LINE__;
1580 				goto err_invalid_target_handle;
1581 			}
1582 		} else {
1583 			mutex_lock(&context->context_mgr_node_lock);
1584 			target_node = context->binder_context_mgr_node;
1585 			if (target_node == NULL) {
1586 				return_error = BR_DEAD_REPLY;
1587 				mutex_unlock(&context->context_mgr_node_lock);
1588 				return_error_line = __LINE__;
1589 				goto err_no_context_mgr_node;
1590 			}
1591 			binder_inc_node(target_node, 1, 0, NULL);
1592 			mutex_unlock(&context->context_mgr_node_lock);
1593 		}
1594 		e->to_node = target_node->debug_id;
1595 		target_proc = target_node->proc;
1596 		if (target_proc == NULL) {
1597 			return_error = BR_DEAD_REPLY;
1598 			return_error_line = __LINE__;
1599 			goto err_dead_binder;
1600 		}
1601 		target_proc->tmp_ref++;
1602 		if (security_binder_transaction(proc->tsk,
1603 						target_proc->tsk) < 0) {
1604 			return_error = BR_FAILED_REPLY;
1605 			return_error_param = -EPERM;
1606 			return_error_line = __LINE__;
1607 			goto err_invalid_target_handle;
1608 		}
1609 		if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
1610 			struct binder_transaction *tmp;
1611 
1612 			tmp = thread->transaction_stack;
1613 			if (tmp->to_thread != thread) {
1614 				spin_lock(&tmp->lock);
1615 				binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
1616 					proc->pid, thread->pid, tmp->debug_id,
1617 					tmp->to_proc ? tmp->to_proc->pid : 0,
1618 					tmp->to_thread ?
1619 					tmp->to_thread->pid : 0);
1620 				spin_unlock(&tmp->lock);
1621 				return_error = BR_FAILED_REPLY;
1622 				return_error_param = -EPROTO;
1623 				return_error_line = __LINE__;
1624 				goto err_bad_call_stack;
1625 			}
1626 			while (tmp) {
1627 				struct binder_thread *from;
1628 
1629 				spin_lock(&tmp->lock);
1630 				from = tmp->from;
1631 				if (from && from->proc == target_proc) {
1632 					atomic_inc(&from->tmp_ref);
1633 					target_thread = from;
1634 					spin_unlock(&tmp->lock);
1635 					break;
1636 				}
1637 				spin_unlock(&tmp->lock);
1638 				tmp = tmp->from_parent;
1639 			}
1640 		}
1641 	}
1642 	if (target_thread) {
1643 		e->to_thread = target_thread->pid;
1644 		target_list = &target_thread->todo;
1645 		target_wait = &target_thread->wait;
1646 	} else {
1647 		target_list = &target_proc->todo;
1648 		target_wait = &target_proc->wait;
1649 	}
1650 	e->to_proc = target_proc->pid;
1651 
1652 	/* TODO: reuse incoming transaction for reply */
1653 	t = kzalloc(sizeof(*t), GFP_KERNEL);
1654 	if (t == NULL) {
1655 		return_error = BR_FAILED_REPLY;
1656 		return_error_param = -ENOMEM;
1657 		return_error_line = __LINE__;
1658 		goto err_alloc_t_failed;
1659 	}
1660 	binder_stats_created(BINDER_STAT_TRANSACTION);
1661 	spin_lock_init(&t->lock);
1662 
1663 	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
1664 	if (tcomplete == NULL) {
1665 		return_error = BR_FAILED_REPLY;
1666 		return_error_param = -ENOMEM;
1667 		return_error_line = __LINE__;
1668 		goto err_alloc_tcomplete_failed;
1669 	}
1670 	binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
1671 
1672 	t->debug_id = t_debug_id;
1673 
1674 	if (reply)
1675 		binder_debug(BINDER_DEBUG_TRANSACTION,
1676 			     "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
1677 			     proc->pid, thread->pid, t->debug_id,
1678 			     target_proc->pid, target_thread->pid,
1679 			     (u64)tr->data.ptr.buffer,
1680 			     (u64)tr->data.ptr.offsets,
1681 			     (u64)tr->data_size, (u64)tr->offsets_size,
1682 			     (u64)extra_buffers_size);
1683 	else
1684 		binder_debug(BINDER_DEBUG_TRANSACTION,
1685 			     "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
1686 			     proc->pid, thread->pid, t->debug_id,
1687 			     target_proc->pid, target_node->debug_id,
1688 			     (u64)tr->data.ptr.buffer,
1689 			     (u64)tr->data.ptr.offsets,
1690 			     (u64)tr->data_size, (u64)tr->offsets_size,
1691 			     (u64)extra_buffers_size);
1692 
1693 	if (!reply && !(tr->flags & TF_ONE_WAY))
1694 		t->from = thread;
1695 	else
1696 		t->from = NULL;
1697 	t->sender_euid = task_euid(proc->tsk);
1698 	t->to_proc = target_proc;
1699 	t->to_thread = target_thread;
1700 	t->code = tr->code;
1701 	t->flags = tr->flags;
1702 	t->priority = task_nice(current);
1703 
1704 	trace_binder_transaction(reply, t, target_node);
1705 
1706 	t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
1707 		tr->offsets_size, extra_buffers_size,
1708 		!reply && (t->flags & TF_ONE_WAY));
1709 	if (IS_ERR(t->buffer)) {
1710 		/*
1711 		 * -ESRCH indicates VMA cleared. The target is dying.
1712 		 */
1713 		return_error_param = PTR_ERR(t->buffer);
1714 		return_error = return_error_param == -ESRCH ?
1715 			BR_DEAD_REPLY : BR_FAILED_REPLY;
1716 		return_error_line = __LINE__;
1717 		t->buffer = NULL;
1718 		goto err_binder_alloc_buf_failed;
1719 	}
1720 	t->buffer->allow_user_free = 0;
1721 	t->buffer->debug_id = t->debug_id;
1722 	t->buffer->transaction = t;
1723 	t->buffer->target_node = target_node;
1724 	trace_binder_transaction_alloc_buf(t->buffer);
1725 	off_start = (binder_size_t *)(t->buffer->data +
1726 				      ALIGN(tr->data_size, sizeof(void *)));
1727 	offp = off_start;
1728 
1729 	if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
1730 			   tr->data.ptr.buffer, tr->data_size)) {
1731 		binder_user_error("%d:%d got transaction with invalid data ptr\n",
1732 				proc->pid, thread->pid);
1733 		return_error = BR_FAILED_REPLY;
1734 		return_error_param = -EFAULT;
1735 		return_error_line = __LINE__;
1736 		goto err_copy_data_failed;
1737 	}
1738 	if (copy_from_user(offp, (const void __user *)(uintptr_t)
1739 			   tr->data.ptr.offsets, tr->offsets_size)) {
1740 		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
1741 				proc->pid, thread->pid);
1742 		return_error = BR_FAILED_REPLY;
1743 		return_error_param = -EFAULT;
1744 		return_error_line = __LINE__;
1745 		goto err_copy_data_failed;
1746 	}
1747 	if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
1748 		binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
1749 				proc->pid, thread->pid, (u64)tr->offsets_size);
1750 		return_error = BR_FAILED_REPLY;
1751 		return_error_param = -EINVAL;
1752 		return_error_line = __LINE__;
1753 		goto err_bad_offset;
1754 	}
1755 	if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
1756 		binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
1757 				  proc->pid, thread->pid,
1758 				  (u64)extra_buffers_size);
1759 		return_error = BR_FAILED_REPLY;
1760 		return_error_param = -EINVAL;
1761 		return_error_line = __LINE__;
1762 		goto err_bad_offset;
1763 	}
1764 	off_end = (void *)off_start + tr->offsets_size;
1765 	sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
1766 	sg_buf_end = sg_bufp + extra_buffers_size;
1767 	off_min = 0;
1768 	for (; offp < off_end; offp++) {
1769 		struct binder_object_header *hdr;
1770 		size_t object_size = binder_validate_object(t->buffer, *offp);
1771 
1772 		if (object_size == 0 || *offp < off_min) {
1773 			binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
1774 					  proc->pid, thread->pid, (u64)*offp,
1775 					  (u64)off_min,
1776 					  (u64)t->buffer->data_size);
1777 			return_error = BR_FAILED_REPLY;
1778 			return_error_param = -EINVAL;
1779 			return_error_line = __LINE__;
1780 			goto err_bad_offset;
1781 		}
1782 
1783 		hdr = (struct binder_object_header *)(t->buffer->data + *offp);
1784 		off_min = *offp + object_size;
1785 		switch (hdr->type) {
1786 		case BINDER_TYPE_BINDER:
1787 		case BINDER_TYPE_WEAK_BINDER: {
1788 			struct flat_binder_object *fp;
1789 
1790 			fp = to_flat_binder_object(hdr);
1791 			ret = binder_translate_binder(fp, t, thread);
1792 			if (ret < 0) {
1793 				return_error = BR_FAILED_REPLY;
1794 				return_error_param = ret;
1795 				return_error_line = __LINE__;
1796 				goto err_translate_failed;
1797 			}
1798 		} break;
1799 		case BINDER_TYPE_HANDLE:
1800 		case BINDER_TYPE_WEAK_HANDLE: {
1801 			struct flat_binder_object *fp;
1802 
1803 			fp = to_flat_binder_object(hdr);
1804 			ret = binder_translate_handle(fp, t, thread);
1805 			if (ret < 0) {
1806 				return_error = BR_FAILED_REPLY;
1807 				return_error_param = ret;
1808 				return_error_line = __LINE__;
1809 				goto err_translate_failed;
1810 			}
1811 		} break;
1812 
1813 		case BINDER_TYPE_FD: {
1814 			struct binder_fd_object *fp = to_binder_fd_object(hdr);
1815 			int target_fd = binder_translate_fd(fp->fd, t, thread,
1816 							    in_reply_to);
1817 
1818 			if (target_fd < 0) {
1819 				return_error = BR_FAILED_REPLY;
1820 				return_error_param = target_fd;
1821 				return_error_line = __LINE__;
1822 				goto err_translate_failed;
1823 			}
1824 			fp->pad_binder = 0;
1825 			fp->fd = target_fd;
1826 		} break;
1827 		case BINDER_TYPE_FDA: {
1828 			struct binder_fd_array_object *fda =
1829 				to_binder_fd_array_object(hdr);
1830 			struct binder_buffer_object *parent =
1831 				binder_validate_ptr(t->buffer, fda->parent,
1832 						    off_start,
1833 						    offp - off_start);
1834 			if (!parent) {
1835 				binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
1836 						  proc->pid, thread->pid);
1837 				return_error = BR_FAILED_REPLY;
1838 				return_error_param = -EINVAL;
1839 				return_error_line = __LINE__;
1840 				goto err_bad_parent;
1841 			}
1842 			if (!binder_validate_fixup(t->buffer, off_start,
1843 						   parent, fda->parent_offset,
1844 						   last_fixup_obj,
1845 						   last_fixup_min_off)) {
1846 				binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
1847 						  proc->pid, thread->pid);
1848 				return_error = BR_FAILED_REPLY;
1849 				return_error_param = -EINVAL;
1850 				return_error_line = __LINE__;
1851 				goto err_bad_parent;
1852 			}
1853 			ret = binder_translate_fd_array(fda, parent, t, thread,
1854 							in_reply_to);
1855 			if (ret < 0) {
1856 				return_error = BR_FAILED_REPLY;
1857 				return_error_param = ret;
1858 				return_error_line = __LINE__;
1859 				goto err_translate_failed;
1860 			}
1861 			last_fixup_obj = parent;
1862 			last_fixup_min_off =
1863 				fda->parent_offset + sizeof(u32) * fda->num_fds;
1864 		} break;
1865 		case BINDER_TYPE_PTR: {
1866 			struct binder_buffer_object *bp =
1867 				to_binder_buffer_object(hdr);
1868 			size_t buf_left = sg_buf_end - sg_bufp;
1869 
1870 			if (bp->length > buf_left) {
1871 				binder_user_error("%d:%d got transaction with too large buffer\n",
1872 						  proc->pid, thread->pid);
1873 				return_error = BR_FAILED_REPLY;
1874 				return_error_param = -EINVAL;
1875 				return_error_line = __LINE__;
1876 				goto err_bad_offset;
1877 			}
1878 			if (copy_from_user(sg_bufp,
1879 					   (const void __user *)(uintptr_t)
1880 					   bp->buffer, bp->length)) {
1881 				binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
1882 						  proc->pid, thread->pid);
1883 				return_error_param = -EFAULT;
1884 				return_error = BR_FAILED_REPLY;
1885 				return_error_line = __LINE__;
1886 				goto err_copy_data_failed;
1887 			}
1888 			/* Fixup buffer pointer to target proc address space */
1889 			bp->buffer = (uintptr_t)sg_bufp +
1890 				binder_alloc_get_user_buffer_offset(
1891 						&target_proc->alloc);
1892 			sg_bufp += ALIGN(bp->length, sizeof(u64));
1893 
1894 			ret = binder_fixup_parent(t, thread, bp, off_start,
1895 						  offp - off_start,
1896 						  last_fixup_obj,
1897 						  last_fixup_min_off);
1898 			if (ret < 0) {
1899 				return_error = BR_FAILED_REPLY;
1900 				return_error_param = ret;
1901 				return_error_line = __LINE__;
1902 				goto err_translate_failed;
1903 			}
1904 			last_fixup_obj = bp;
1905 			last_fixup_min_off = 0;
1906 		} break;
1907 		default:
1908 			binder_user_error("%d:%d got transaction with invalid object type, %x\n",
1909 				proc->pid, thread->pid, hdr->type);
1910 			return_error = BR_FAILED_REPLY;
1911 			return_error_param = -EINVAL;
1912 			return_error_line = __LINE__;
1913 			goto err_bad_object_type;
1914 		}
1915 	}
1916 	tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
1917 	list_add_tail(&tcomplete->entry, &thread->todo);
1918 
1919 	if (reply) {
1920 		if (target_thread->is_dead)
1921 			goto err_dead_proc_or_thread;
1922 		BUG_ON(t->buffer->async_transaction != 0);
1923 		binder_pop_transaction(target_thread, in_reply_to);
1924 		binder_free_transaction(in_reply_to);
1925 	} else if (!(t->flags & TF_ONE_WAY)) {
1926 		BUG_ON(t->buffer->async_transaction != 0);
1927 		t->need_reply = 1;
1928 		t->from_parent = thread->transaction_stack;
1929 		thread->transaction_stack = t;
1930 		if (target_proc->is_dead ||
1931 				(target_thread && target_thread->is_dead)) {
1932 			binder_pop_transaction(thread, t);
1933 			goto err_dead_proc_or_thread;
1934 		}
1935 	} else {
1936 		BUG_ON(target_node == NULL);
1937 		BUG_ON(t->buffer->async_transaction != 1);
1938 		if (target_node->has_async_transaction) {
1939 			target_list = &target_node->async_todo;
1940 			target_wait = NULL;
1941 		} else
1942 			target_node->has_async_transaction = 1;
1943 		if (target_proc->is_dead ||
1944 				(target_thread && target_thread->is_dead))
1945 			goto err_dead_proc_or_thread;
1946 	}
1947 	t->work.type = BINDER_WORK_TRANSACTION;
1948 	list_add_tail(&t->work.entry, target_list);
1949 	if (target_wait) {
1950 		if (reply || !(tr->flags & TF_ONE_WAY))
1951 			wake_up_interruptible_sync(target_wait);
1952 		else
1953 			wake_up_interruptible(target_wait);
1954 	}
1955 	if (target_thread)
1956 		binder_thread_dec_tmpref(target_thread);
1957 	binder_proc_dec_tmpref(target_proc);
1958 	/*
1959 	 * write barrier to synchronize with initialization
1960 	 * of log entry
1961 	 */
1962 	smp_wmb();
1963 	WRITE_ONCE(e->debug_id_done, t_debug_id);
1964 	return;
1965 
1966 err_dead_proc_or_thread:
1967 	return_error = BR_DEAD_REPLY;
1968 	return_error_line = __LINE__;
1969 err_translate_failed:
1970 err_bad_object_type:
1971 err_bad_offset:
1972 err_bad_parent:
1973 err_copy_data_failed:
1974 	trace_binder_transaction_failed_buffer_release(t->buffer);
1975 	binder_transaction_buffer_release(target_proc, t->buffer, offp);
1976 	target_node = NULL;
1977 	t->buffer->transaction = NULL;
1978 	binder_alloc_free_buf(&target_proc->alloc, t->buffer);
1979 err_binder_alloc_buf_failed:
1980 	kfree(tcomplete);
1981 	binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
1982 err_alloc_tcomplete_failed:
1983 	kfree(t);
1984 	binder_stats_deleted(BINDER_STAT_TRANSACTION);
1985 err_alloc_t_failed:
1986 err_bad_call_stack:
1987 err_empty_call_stack:
1988 err_dead_binder:
1989 err_invalid_target_handle:
1990 err_no_context_mgr_node:
1991 	if (target_thread)
1992 		binder_thread_dec_tmpref(target_thread);
1993 	if (target_proc)
1994 		binder_proc_dec_tmpref(target_proc);
1995 	if (target_node)
1996 		binder_dec_node(target_node, 1, 0);
1997 
1998 	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1999 		     "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
2000 		     proc->pid, thread->pid, return_error, return_error_param,
2001 		     (u64)tr->data_size, (u64)tr->offsets_size,
2002 		     return_error_line);
2003 
2004 	{
2005 		struct binder_transaction_log_entry *fe;
2006 
2007 		e->return_error = return_error;
2008 		e->return_error_param = return_error_param;
2009 		e->return_error_line = return_error_line;
2010 		fe = binder_transaction_log_add(&binder_transaction_log_failed);
2011 		*fe = *e;
2012 		/*
2013 		 * write barrier to synchronize with initialization
2014 		 * of log entry
2015 		 */
2016 		smp_wmb();
2017 		WRITE_ONCE(e->debug_id_done, t_debug_id);
2018 		WRITE_ONCE(fe->debug_id_done, t_debug_id);
2019 	}
2020 
2021 	BUG_ON(thread->return_error.cmd != BR_OK);
2022 	if (in_reply_to) {
2023 		thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
2024 		list_add_tail(&thread->return_error.work.entry,
2025 			      &thread->todo);
2026 		binder_send_failed_reply(in_reply_to, return_error);
2027 	} else {
2028 		thread->return_error.cmd = return_error;
2029 		list_add_tail(&thread->return_error.work.entry,
2030 			      &thread->todo);
2031 	}
2032 }
2033 
2034 static int binder_thread_write(struct binder_proc *proc,
2035 			struct binder_thread *thread,
2036 			binder_uintptr_t binder_buffer, size_t size,
2037 			binder_size_t *consumed)
2038 {
2039 	uint32_t cmd;
2040 	struct binder_context *context = proc->context;
2041 	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
2042 	void __user *ptr = buffer + *consumed;
2043 	void __user *end = buffer + size;
2044 
2045 	while (ptr < end && thread->return_error.cmd == BR_OK) {
2046 		if (get_user(cmd, (uint32_t __user *)ptr))
2047 			return -EFAULT;
2048 		ptr += sizeof(uint32_t);
2049 		trace_binder_command(cmd);
2050 		if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
2051 			atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
2052 			atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
2053 			atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
2054 		}
2055 		switch (cmd) {
2056 		case BC_INCREFS:
2057 		case BC_ACQUIRE:
2058 		case BC_RELEASE:
2059 		case BC_DECREFS: {
2060 			uint32_t target;
2061 			struct binder_ref *ref = NULL;
2062 			const char *debug_string;
2063 
2064 			if (get_user(target, (uint32_t __user *)ptr))
2065 				return -EFAULT;
2066 
2067 			ptr += sizeof(uint32_t);
2068 			if (target == 0 &&
2069 			    (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
2070 				struct binder_node *ctx_mgr_node;
2071 
2072 				mutex_lock(&context->context_mgr_node_lock);
2073 				ctx_mgr_node = context->binder_context_mgr_node;
2074 				if (ctx_mgr_node) {
2075 					ref = binder_get_ref_for_node(proc,
2076 							ctx_mgr_node);
2077 					if (ref && ref->desc != target) {
2078 						binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
2079 							proc->pid, thread->pid,
2080 							ref->desc);
2081 					}
2082 				}
2083 				mutex_unlock(&context->context_mgr_node_lock);
2084 			}
2085 			if (ref == NULL)
2086 				ref = binder_get_ref(proc, target,
2087 						     cmd == BC_ACQUIRE ||
2088 						     cmd == BC_RELEASE);
2089 			if (ref == NULL) {
2090 				binder_user_error("%d:%d refcount change on invalid ref %d\n",
2091 					proc->pid, thread->pid, target);
2092 				break;
2093 			}
2094 			switch (cmd) {
2095 			case BC_INCREFS:
2096 				debug_string = "IncRefs";
2097 				binder_inc_ref(ref, 0, NULL);
2098 				break;
2099 			case BC_ACQUIRE:
2100 				debug_string = "Acquire";
2101 				binder_inc_ref(ref, 1, NULL);
2102 				break;
2103 			case BC_RELEASE:
2104 				debug_string = "Release";
2105 				binder_dec_ref(ref, 1);
2106 				break;
2107 			case BC_DECREFS:
2108 			default:
2109 				debug_string = "DecRefs";
2110 				binder_dec_ref(ref, 0);
2111 				break;
2112 			}
2113 			binder_debug(BINDER_DEBUG_USER_REFS,
2114 				     "%d:%d %s ref %d desc %d s %d w %d for node %d\n",
2115 				     proc->pid, thread->pid, debug_string, ref->debug_id,
2116 				     ref->desc, ref->strong, ref->weak, ref->node->debug_id);
2117 			break;
2118 		}
2119 		case BC_INCREFS_DONE:
2120 		case BC_ACQUIRE_DONE: {
2121 			binder_uintptr_t node_ptr;
2122 			binder_uintptr_t cookie;
2123 			struct binder_node *node;
2124 
2125 			if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
2126 				return -EFAULT;
2127 			ptr += sizeof(binder_uintptr_t);
2128 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
2129 				return -EFAULT;
2130 			ptr += sizeof(binder_uintptr_t);
2131 			node = binder_get_node(proc, node_ptr);
2132 			if (node == NULL) {
2133 				binder_user_error("%d:%d %s u%016llx no match\n",
2134 					proc->pid, thread->pid,
2135 					cmd == BC_INCREFS_DONE ?
2136 					"BC_INCREFS_DONE" :
2137 					"BC_ACQUIRE_DONE",
2138 					(u64)node_ptr);
2139 				break;
2140 			}
2141 			if (cookie != node->cookie) {
2142 				binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
2143 					proc->pid, thread->pid,
2144 					cmd == BC_INCREFS_DONE ?
2145 					"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
2146 					(u64)node_ptr, node->debug_id,
2147 					(u64)cookie, (u64)node->cookie);
2148 				break;
2149 			}
2150 			if (cmd == BC_ACQUIRE_DONE) {
2151 				if (node->pending_strong_ref == 0) {
2152 					binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
2153 						proc->pid, thread->pid,
2154 						node->debug_id);
2155 					break;
2156 				}
2157 				node->pending_strong_ref = 0;
2158 			} else {
2159 				if (node->pending_weak_ref == 0) {
2160 					binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
2161 						proc->pid, thread->pid,
2162 						node->debug_id);
2163 					break;
2164 				}
2165 				node->pending_weak_ref = 0;
2166 			}
2167 			binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
2168 			binder_debug(BINDER_DEBUG_USER_REFS,
2169 				     "%d:%d %s node %d ls %d lw %d\n",
2170 				     proc->pid, thread->pid,
2171 				     cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
2172 				     node->debug_id, node->local_strong_refs, node->local_weak_refs);
2173 			break;
2174 		}
2175 		case BC_ATTEMPT_ACQUIRE:
2176 			pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
2177 			return -EINVAL;
2178 		case BC_ACQUIRE_RESULT:
2179 			pr_err("BC_ACQUIRE_RESULT not supported\n");
2180 			return -EINVAL;
2181 
2182 		case BC_FREE_BUFFER: {
2183 			binder_uintptr_t data_ptr;
2184 			struct binder_buffer *buffer;
2185 
2186 			if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
2187 				return -EFAULT;
2188 			ptr += sizeof(binder_uintptr_t);
2189 
2190 			buffer = binder_alloc_prepare_to_free(&proc->alloc,
2191 							      data_ptr);
2192 			if (buffer == NULL) {
2193 				binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
2194 					proc->pid, thread->pid, (u64)data_ptr);
2195 				break;
2196 			}
2197 			if (!buffer->allow_user_free) {
2198 				binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
2199 					proc->pid, thread->pid, (u64)data_ptr);
2200 				break;
2201 			}
2202 			binder_debug(BINDER_DEBUG_FREE_BUFFER,
2203 				     "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
2204 				     proc->pid, thread->pid, (u64)data_ptr,
2205 				     buffer->debug_id,
2206 				     buffer->transaction ? "active" : "finished");
2207 
2208 			if (buffer->transaction) {
2209 				buffer->transaction->buffer = NULL;
2210 				buffer->transaction = NULL;
2211 			}
2212 			if (buffer->async_transaction && buffer->target_node) {
2213 				BUG_ON(!buffer->target_node->has_async_transaction);
2214 				if (list_empty(&buffer->target_node->async_todo))
2215 					buffer->target_node->has_async_transaction = 0;
2216 				else
2217 					list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
2218 			}
2219 			trace_binder_transaction_buffer_release(buffer);
2220 			binder_transaction_buffer_release(proc, buffer, NULL);
2221 			binder_alloc_free_buf(&proc->alloc, buffer);
2222 			break;
2223 		}
2224 
2225 		case BC_TRANSACTION_SG:
2226 		case BC_REPLY_SG: {
2227 			struct binder_transaction_data_sg tr;
2228 
2229 			if (copy_from_user(&tr, ptr, sizeof(tr)))
2230 				return -EFAULT;
2231 			ptr += sizeof(tr);
2232 			binder_transaction(proc, thread, &tr.transaction_data,
2233 					   cmd == BC_REPLY_SG, tr.buffers_size);
2234 			break;
2235 		}
2236 		case BC_TRANSACTION:
2237 		case BC_REPLY: {
2238 			struct binder_transaction_data tr;
2239 
2240 			if (copy_from_user(&tr, ptr, sizeof(tr)))
2241 				return -EFAULT;
2242 			ptr += sizeof(tr);
2243 			binder_transaction(proc, thread, &tr,
2244 					   cmd == BC_REPLY, 0);
2245 			break;
2246 		}
2247 
2248 		case BC_REGISTER_LOOPER:
2249 			binder_debug(BINDER_DEBUG_THREADS,
2250 				     "%d:%d BC_REGISTER_LOOPER\n",
2251 				     proc->pid, thread->pid);
2252 			if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
2253 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
2254 				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
2255 					proc->pid, thread->pid);
2256 			} else if (proc->requested_threads == 0) {
2257 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
2258 				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
2259 					proc->pid, thread->pid);
2260 			} else {
2261 				proc->requested_threads--;
2262 				proc->requested_threads_started++;
2263 			}
2264 			thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
2265 			break;
2266 		case BC_ENTER_LOOPER:
2267 			binder_debug(BINDER_DEBUG_THREADS,
2268 				     "%d:%d BC_ENTER_LOOPER\n",
2269 				     proc->pid, thread->pid);
2270 			if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
2271 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
2272 				binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
2273 					proc->pid, thread->pid);
2274 			}
2275 			thread->looper |= BINDER_LOOPER_STATE_ENTERED;
2276 			break;
2277 		case BC_EXIT_LOOPER:
2278 			binder_debug(BINDER_DEBUG_THREADS,
2279 				     "%d:%d BC_EXIT_LOOPER\n",
2280 				     proc->pid, thread->pid);
2281 			thread->looper |= BINDER_LOOPER_STATE_EXITED;
2282 			break;
2283 
2284 		case BC_REQUEST_DEATH_NOTIFICATION:
2285 		case BC_CLEAR_DEATH_NOTIFICATION: {
2286 			uint32_t target;
2287 			binder_uintptr_t cookie;
2288 			struct binder_ref *ref;
2289 			struct binder_ref_death *death;
2290 
2291 			if (get_user(target, (uint32_t __user *)ptr))
2292 				return -EFAULT;
2293 			ptr += sizeof(uint32_t);
2294 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
2295 				return -EFAULT;
2296 			ptr += sizeof(binder_uintptr_t);
2297 			ref = binder_get_ref(proc, target, false);
2298 			if (ref == NULL) {
2299 				binder_user_error("%d:%d %s invalid ref %d\n",
2300 					proc->pid, thread->pid,
2301 					cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2302 					"BC_REQUEST_DEATH_NOTIFICATION" :
2303 					"BC_CLEAR_DEATH_NOTIFICATION",
2304 					target);
2305 				break;
2306 			}
2307 
2308 			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
2309 				     "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
2310 				     proc->pid, thread->pid,
2311 				     cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2312 				     "BC_REQUEST_DEATH_NOTIFICATION" :
2313 				     "BC_CLEAR_DEATH_NOTIFICATION",
2314 				     (u64)cookie, ref->debug_id, ref->desc,
2315 				     ref->strong, ref->weak, ref->node->debug_id);
2316 
2317 			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
2318 				if (ref->death) {
2319 					binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
2320 						proc->pid, thread->pid);
2321 					break;
2322 				}
2323 				death = kzalloc(sizeof(*death), GFP_KERNEL);
2324 				if (death == NULL) {
2325 					WARN_ON(thread->return_error.cmd !=
2326 						BR_OK);
2327 					thread->return_error.cmd = BR_ERROR;
2328 					list_add_tail(
2329 					    &thread->return_error.work.entry,
2330 					    &thread->todo);
2331 					binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2332 						     "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
2333 						     proc->pid, thread->pid);
2334 					break;
2335 				}
2336 				binder_stats_created(BINDER_STAT_DEATH);
2337 				INIT_LIST_HEAD(&death->work.entry);
2338 				death->cookie = cookie;
2339 				ref->death = death;
2340 				if (ref->node->proc == NULL) {
2341 					ref->death->work.type = BINDER_WORK_DEAD_BINDER;
2342 					if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2343 						list_add_tail(&ref->death->work.entry, &thread->todo);
2344 					} else {
2345 						list_add_tail(&ref->death->work.entry, &proc->todo);
2346 						wake_up_interruptible(&proc->wait);
2347 					}
2348 				}
2349 			} else {
2350 				if (ref->death == NULL) {
2351 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
2352 						proc->pid, thread->pid);
2353 					break;
2354 				}
2355 				death = ref->death;
2356 				if (death->cookie != cookie) {
2357 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
2358 						proc->pid, thread->pid,
2359 						(u64)death->cookie,
2360 						(u64)cookie);
2361 					break;
2362 				}
2363 				ref->death = NULL;
2364 				if (list_empty(&death->work.entry)) {
2365 					death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2366 					if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2367 						list_add_tail(&death->work.entry, &thread->todo);
2368 					} else {
2369 						list_add_tail(&death->work.entry, &proc->todo);
2370 						wake_up_interruptible(&proc->wait);
2371 					}
2372 				} else {
2373 					BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
2374 					death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
2375 				}
2376 			}
2377 		} break;
2378 		case BC_DEAD_BINDER_DONE: {
2379 			struct binder_work *w;
2380 			binder_uintptr_t cookie;
2381 			struct binder_ref_death *death = NULL;
2382 
2383 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
2384 				return -EFAULT;
2385 
2386 			ptr += sizeof(cookie);
2387 			list_for_each_entry(w, &proc->delivered_death, entry) {
2388 				struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
2389 
2390 				if (tmp_death->cookie == cookie) {
2391 					death = tmp_death;
2392 					break;
2393 				}
2394 			}
2395 			binder_debug(BINDER_DEBUG_DEAD_BINDER,
2396 				     "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
2397 				     proc->pid, thread->pid, (u64)cookie,
2398 				     death);
2399 			if (death == NULL) {
2400 				binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
2401 					proc->pid, thread->pid, (u64)cookie);
2402 				break;
2403 			}
2404 
2405 			list_del_init(&death->work.entry);
2406 			if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
2407 				death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2408 				if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2409 					list_add_tail(&death->work.entry, &thread->todo);
2410 				} else {
2411 					list_add_tail(&death->work.entry, &proc->todo);
2412 					wake_up_interruptible(&proc->wait);
2413 				}
2414 			}
2415 		} break;
2416 
2417 		default:
2418 			pr_err("%d:%d unknown command %d\n",
2419 			       proc->pid, thread->pid, cmd);
2420 			return -EINVAL;
2421 		}
2422 		*consumed = ptr - buffer;
2423 	}
2424 	return 0;
2425 }
2426 
2427 static void binder_stat_br(struct binder_proc *proc,
2428 			   struct binder_thread *thread, uint32_t cmd)
2429 {
2430 	trace_binder_return(cmd);
2431 	if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
2432 		atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
2433 		atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
2434 		atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
2435 	}
2436 }
2437 
2438 static int binder_has_proc_work(struct binder_proc *proc,
2439 				struct binder_thread *thread)
2440 {
2441 	return !list_empty(&proc->todo) || thread->looper_need_return;
2442 }
2443 
2444 static int binder_has_thread_work(struct binder_thread *thread)
2445 {
2446 	return !list_empty(&thread->todo) || thread->looper_need_return;
2447 }
2448 
2449 static int binder_put_node_cmd(struct binder_proc *proc,
2450 			       struct binder_thread *thread,
2451 			       void __user **ptrp,
2452 			       binder_uintptr_t node_ptr,
2453 			       binder_uintptr_t node_cookie,
2454 			       int node_debug_id,
2455 			       uint32_t cmd, const char *cmd_name)
2456 {
2457 	void __user *ptr = *ptrp;
2458 
2459 	if (put_user(cmd, (uint32_t __user *)ptr))
2460 		return -EFAULT;
2461 	ptr += sizeof(uint32_t);
2462 
2463 	if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
2464 		return -EFAULT;
2465 	ptr += sizeof(binder_uintptr_t);
2466 
2467 	if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
2468 		return -EFAULT;
2469 	ptr += sizeof(binder_uintptr_t);
2470 
2471 	binder_stat_br(proc, thread, cmd);
2472 	binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
2473 		     proc->pid, thread->pid, cmd_name, node_debug_id,
2474 		     (u64)node_ptr, (u64)node_cookie);
2475 
2476 	*ptrp = ptr;
2477 	return 0;
2478 }
2479 
2480 static int binder_thread_read(struct binder_proc *proc,
2481 			      struct binder_thread *thread,
2482 			      binder_uintptr_t binder_buffer, size_t size,
2483 			      binder_size_t *consumed, int non_block)
2484 {
2485 	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
2486 	void __user *ptr = buffer + *consumed;
2487 	void __user *end = buffer + size;
2488 
2489 	int ret = 0;
2490 	int wait_for_proc_work;
2491 
2492 	if (*consumed == 0) {
2493 		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
2494 			return -EFAULT;
2495 		ptr += sizeof(uint32_t);
2496 	}
2497 
2498 retry:
2499 	wait_for_proc_work = thread->transaction_stack == NULL &&
2500 				list_empty(&thread->todo);
2501 
2502 	thread->looper |= BINDER_LOOPER_STATE_WAITING;
2503 	if (wait_for_proc_work)
2504 		proc->ready_threads++;
2505 
2506 	binder_unlock(__func__);
2507 
2508 	trace_binder_wait_for_work(wait_for_proc_work,
2509 				   !!thread->transaction_stack,
2510 				   !list_empty(&thread->todo));
2511 	if (wait_for_proc_work) {
2512 		if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2513 					BINDER_LOOPER_STATE_ENTERED))) {
2514 			binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
2515 				proc->pid, thread->pid, thread->looper);
2516 			wait_event_interruptible(binder_user_error_wait,
2517 						 binder_stop_on_user_error < 2);
2518 		}
2519 		binder_set_nice(proc->default_priority);
2520 		if (non_block) {
2521 			if (!binder_has_proc_work(proc, thread))
2522 				ret = -EAGAIN;
2523 		} else
2524 			ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
2525 	} else {
2526 		if (non_block) {
2527 			if (!binder_has_thread_work(thread))
2528 				ret = -EAGAIN;
2529 		} else
2530 			ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
2531 	}
2532 
2533 	binder_lock(__func__);
2534 
2535 	if (wait_for_proc_work)
2536 		proc->ready_threads--;
2537 	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
2538 
2539 	if (ret)
2540 		return ret;
2541 
2542 	while (1) {
2543 		uint32_t cmd;
2544 		struct binder_transaction_data tr;
2545 		struct binder_work *w;
2546 		struct binder_transaction *t = NULL;
2547 		struct binder_thread *t_from;
2548 
2549 		if (!list_empty(&thread->todo)) {
2550 			w = list_first_entry(&thread->todo, struct binder_work,
2551 					     entry);
2552 		} else if (!list_empty(&proc->todo) && wait_for_proc_work) {
2553 			w = list_first_entry(&proc->todo, struct binder_work,
2554 					     entry);
2555 		} else {
2556 			/* no data added */
2557 			if (ptr - buffer == 4 && !thread->looper_need_return)
2558 				goto retry;
2559 			break;
2560 		}
2561 
2562 		if (end - ptr < sizeof(tr) + 4)
2563 			break;
2564 
2565 		switch (w->type) {
2566 		case BINDER_WORK_TRANSACTION: {
2567 			t = container_of(w, struct binder_transaction, work);
2568 		} break;
2569 		case BINDER_WORK_RETURN_ERROR: {
2570 			struct binder_error *e = container_of(
2571 					w, struct binder_error, work);
2572 
2573 			WARN_ON(e->cmd == BR_OK);
2574 			if (put_user(e->cmd, (uint32_t __user *)ptr))
2575 				return -EFAULT;
2576 			e->cmd = BR_OK;
2577 			ptr += sizeof(uint32_t);
2578 
2579 			binder_stat_br(proc, thread, cmd);
2580 			list_del(&w->entry);
2581 		} break;
2582 		case BINDER_WORK_TRANSACTION_COMPLETE: {
2583 			cmd = BR_TRANSACTION_COMPLETE;
2584 			if (put_user(cmd, (uint32_t __user *)ptr))
2585 				return -EFAULT;
2586 			ptr += sizeof(uint32_t);
2587 
2588 			binder_stat_br(proc, thread, cmd);
2589 			binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
2590 				     "%d:%d BR_TRANSACTION_COMPLETE\n",
2591 				     proc->pid, thread->pid);
2592 
2593 			list_del(&w->entry);
2594 			kfree(w);
2595 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2596 		} break;
2597 		case BINDER_WORK_NODE: {
2598 			struct binder_node *node = container_of(w, struct binder_node, work);
2599 			int strong, weak;
2600 			binder_uintptr_t node_ptr = node->ptr;
2601 			binder_uintptr_t node_cookie = node->cookie;
2602 			int node_debug_id = node->debug_id;
2603 			int has_weak_ref;
2604 			int has_strong_ref;
2605 			void __user *orig_ptr = ptr;
2606 
2607 			BUG_ON(proc != node->proc);
2608 			strong = node->internal_strong_refs ||
2609 					node->local_strong_refs;
2610 			weak = !hlist_empty(&node->refs) ||
2611 					node->local_weak_refs || strong;
2612 			has_strong_ref = node->has_strong_ref;
2613 			has_weak_ref = node->has_weak_ref;
2614 
2615 			if (weak && !has_weak_ref) {
2616 				node->has_weak_ref = 1;
2617 				node->pending_weak_ref = 1;
2618 				node->local_weak_refs++;
2619 			}
2620 			if (strong && !has_strong_ref) {
2621 				node->has_strong_ref = 1;
2622 				node->pending_strong_ref = 1;
2623 				node->local_strong_refs++;
2624 			}
2625 			if (!strong && has_strong_ref)
2626 				node->has_strong_ref = 0;
2627 			if (!weak && has_weak_ref)
2628 				node->has_weak_ref = 0;
2629 			list_del(&w->entry);
2630 
2631 			if (!weak && !strong) {
2632 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2633 					     "%d:%d node %d u%016llx c%016llx deleted\n",
2634 					     proc->pid, thread->pid,
2635 					     node_debug_id,
2636 					     (u64)node_ptr,
2637 					     (u64)node_cookie);
2638 				rb_erase(&node->rb_node, &proc->nodes);
2639 				kfree(node);
2640 				binder_stats_deleted(BINDER_STAT_NODE);
2641 			}
2642 			if (weak && !has_weak_ref)
2643 				ret = binder_put_node_cmd(
2644 						proc, thread, &ptr, node_ptr,
2645 						node_cookie, node_debug_id,
2646 						BR_INCREFS, "BR_INCREFS");
2647 			if (!ret && strong && !has_strong_ref)
2648 				ret = binder_put_node_cmd(
2649 						proc, thread, &ptr, node_ptr,
2650 						node_cookie, node_debug_id,
2651 						BR_ACQUIRE, "BR_ACQUIRE");
2652 			if (!ret && !strong && has_strong_ref)
2653 				ret = binder_put_node_cmd(
2654 						proc, thread, &ptr, node_ptr,
2655 						node_cookie, node_debug_id,
2656 						BR_RELEASE, "BR_RELEASE");
2657 			if (!ret && !weak && has_weak_ref)
2658 				ret = binder_put_node_cmd(
2659 						proc, thread, &ptr, node_ptr,
2660 						node_cookie, node_debug_id,
2661 						BR_DECREFS, "BR_DECREFS");
2662 			if (orig_ptr == ptr)
2663 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2664 					     "%d:%d node %d u%016llx c%016llx state unchanged\n",
2665 					     proc->pid, thread->pid,
2666 					     node_debug_id,
2667 					     (u64)node_ptr,
2668 					     (u64)node_cookie);
2669 			if (ret)
2670 				return ret;
2671 		} break;
2672 		case BINDER_WORK_DEAD_BINDER:
2673 		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2674 		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2675 			struct binder_ref_death *death;
2676 			uint32_t cmd;
2677 
2678 			death = container_of(w, struct binder_ref_death, work);
2679 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
2680 				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
2681 			else
2682 				cmd = BR_DEAD_BINDER;
2683 			if (put_user(cmd, (uint32_t __user *)ptr))
2684 				return -EFAULT;
2685 			ptr += sizeof(uint32_t);
2686 			if (put_user(death->cookie,
2687 				     (binder_uintptr_t __user *)ptr))
2688 				return -EFAULT;
2689 			ptr += sizeof(binder_uintptr_t);
2690 			binder_stat_br(proc, thread, cmd);
2691 			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
2692 				     "%d:%d %s %016llx\n",
2693 				      proc->pid, thread->pid,
2694 				      cmd == BR_DEAD_BINDER ?
2695 				      "BR_DEAD_BINDER" :
2696 				      "BR_CLEAR_DEATH_NOTIFICATION_DONE",
2697 				      (u64)death->cookie);
2698 
2699 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
2700 				list_del(&w->entry);
2701 				kfree(death);
2702 				binder_stats_deleted(BINDER_STAT_DEATH);
2703 			} else
2704 				list_move(&w->entry, &proc->delivered_death);
2705 			if (cmd == BR_DEAD_BINDER)
2706 				goto done; /* DEAD_BINDER notifications can cause transactions */
2707 		} break;
2708 		}
2709 
2710 		if (!t)
2711 			continue;
2712 
2713 		BUG_ON(t->buffer == NULL);
2714 		if (t->buffer->target_node) {
2715 			struct binder_node *target_node = t->buffer->target_node;
2716 
2717 			tr.target.ptr = target_node->ptr;
2718 			tr.cookie =  target_node->cookie;
2719 			t->saved_priority = task_nice(current);
2720 			if (t->priority < target_node->min_priority &&
2721 			    !(t->flags & TF_ONE_WAY))
2722 				binder_set_nice(t->priority);
2723 			else if (!(t->flags & TF_ONE_WAY) ||
2724 				 t->saved_priority > target_node->min_priority)
2725 				binder_set_nice(target_node->min_priority);
2726 			cmd = BR_TRANSACTION;
2727 		} else {
2728 			tr.target.ptr = 0;
2729 			tr.cookie = 0;
2730 			cmd = BR_REPLY;
2731 		}
2732 		tr.code = t->code;
2733 		tr.flags = t->flags;
2734 		tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
2735 
2736 		t_from = binder_get_txn_from(t);
2737 		if (t_from) {
2738 			struct task_struct *sender = t_from->proc->tsk;
2739 
2740 			tr.sender_pid = task_tgid_nr_ns(sender,
2741 							task_active_pid_ns(current));
2742 		} else {
2743 			tr.sender_pid = 0;
2744 		}
2745 
2746 		tr.data_size = t->buffer->data_size;
2747 		tr.offsets_size = t->buffer->offsets_size;
2748 		tr.data.ptr.buffer = (binder_uintptr_t)
2749 			((uintptr_t)t->buffer->data +
2750 			binder_alloc_get_user_buffer_offset(&proc->alloc));
2751 		tr.data.ptr.offsets = tr.data.ptr.buffer +
2752 					ALIGN(t->buffer->data_size,
2753 					    sizeof(void *));
2754 
2755 		if (put_user(cmd, (uint32_t __user *)ptr)) {
2756 			if (t_from)
2757 				binder_thread_dec_tmpref(t_from);
2758 			return -EFAULT;
2759 		}
2760 		ptr += sizeof(uint32_t);
2761 		if (copy_to_user(ptr, &tr, sizeof(tr))) {
2762 			if (t_from)
2763 				binder_thread_dec_tmpref(t_from);
2764 			return -EFAULT;
2765 		}
2766 		ptr += sizeof(tr);
2767 
2768 		trace_binder_transaction_received(t);
2769 		binder_stat_br(proc, thread, cmd);
2770 		binder_debug(BINDER_DEBUG_TRANSACTION,
2771 			     "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
2772 			     proc->pid, thread->pid,
2773 			     (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
2774 			     "BR_REPLY",
2775 			     t->debug_id, t_from ? t_from->proc->pid : 0,
2776 			     t_from ? t_from->pid : 0, cmd,
2777 			     t->buffer->data_size, t->buffer->offsets_size,
2778 			     (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
2779 
2780 		if (t_from)
2781 			binder_thread_dec_tmpref(t_from);
2782 		list_del(&t->work.entry);
2783 		t->buffer->allow_user_free = 1;
2784 		if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
2785 			t->to_parent = thread->transaction_stack;
2786 			t->to_thread = thread;
2787 			thread->transaction_stack = t;
2788 		} else {
2789 			binder_free_transaction(t);
2790 		}
2791 		break;
2792 	}
2793 
2794 done:
2795 
2796 	*consumed = ptr - buffer;
2797 	if (proc->requested_threads + proc->ready_threads == 0 &&
2798 	    proc->requested_threads_started < proc->max_threads &&
2799 	    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2800 	     BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
2801 	     /*spawn a new thread if we leave this out */) {
2802 		proc->requested_threads++;
2803 		binder_debug(BINDER_DEBUG_THREADS,
2804 			     "%d:%d BR_SPAWN_LOOPER\n",
2805 			     proc->pid, thread->pid);
2806 		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
2807 			return -EFAULT;
2808 		binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
2809 	}
2810 	return 0;
2811 }
2812 
2813 static void binder_release_work(struct list_head *list)
2814 {
2815 	struct binder_work *w;
2816 
2817 	while (!list_empty(list)) {
2818 		w = list_first_entry(list, struct binder_work, entry);
2819 		list_del_init(&w->entry);
2820 		switch (w->type) {
2821 		case BINDER_WORK_TRANSACTION: {
2822 			struct binder_transaction *t;
2823 
2824 			t = container_of(w, struct binder_transaction, work);
2825 			if (t->buffer->target_node &&
2826 			    !(t->flags & TF_ONE_WAY)) {
2827 				binder_send_failed_reply(t, BR_DEAD_REPLY);
2828 			} else {
2829 				binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2830 					"undelivered transaction %d\n",
2831 					t->debug_id);
2832 				binder_free_transaction(t);
2833 			}
2834 		} break;
2835 		case BINDER_WORK_RETURN_ERROR: {
2836 			struct binder_error *e = container_of(
2837 					w, struct binder_error, work);
2838 
2839 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2840 				"undelivered TRANSACTION_ERROR: %u\n",
2841 				e->cmd);
2842 		} break;
2843 		case BINDER_WORK_TRANSACTION_COMPLETE: {
2844 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2845 				"undelivered TRANSACTION_COMPLETE\n");
2846 			kfree(w);
2847 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2848 		} break;
2849 		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2850 		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2851 			struct binder_ref_death *death;
2852 
2853 			death = container_of(w, struct binder_ref_death, work);
2854 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2855 				"undelivered death notification, %016llx\n",
2856 				(u64)death->cookie);
2857 			kfree(death);
2858 			binder_stats_deleted(BINDER_STAT_DEATH);
2859 		} break;
2860 		default:
2861 			pr_err("unexpected work type, %d, not freed\n",
2862 			       w->type);
2863 			break;
2864 		}
2865 	}
2866 
2867 }
2868 
2869 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
2870 {
2871 	struct binder_thread *thread = NULL;
2872 	struct rb_node *parent = NULL;
2873 	struct rb_node **p = &proc->threads.rb_node;
2874 
2875 	while (*p) {
2876 		parent = *p;
2877 		thread = rb_entry(parent, struct binder_thread, rb_node);
2878 
2879 		if (current->pid < thread->pid)
2880 			p = &(*p)->rb_left;
2881 		else if (current->pid > thread->pid)
2882 			p = &(*p)->rb_right;
2883 		else
2884 			break;
2885 	}
2886 	if (*p == NULL) {
2887 		thread = kzalloc(sizeof(*thread), GFP_KERNEL);
2888 		if (thread == NULL)
2889 			return NULL;
2890 		binder_stats_created(BINDER_STAT_THREAD);
2891 		thread->proc = proc;
2892 		thread->pid = current->pid;
2893 		atomic_set(&thread->tmp_ref, 0);
2894 		init_waitqueue_head(&thread->wait);
2895 		INIT_LIST_HEAD(&thread->todo);
2896 		rb_link_node(&thread->rb_node, parent, p);
2897 		rb_insert_color(&thread->rb_node, &proc->threads);
2898 		thread->looper_need_return = true;
2899 		thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
2900 		thread->return_error.cmd = BR_OK;
2901 		thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
2902 		thread->reply_error.cmd = BR_OK;
2903 	}
2904 	return thread;
2905 }
2906 
2907 static void binder_free_proc(struct binder_proc *proc)
2908 {
2909 	BUG_ON(!list_empty(&proc->todo));
2910 	BUG_ON(!list_empty(&proc->delivered_death));
2911 	binder_alloc_deferred_release(&proc->alloc);
2912 	put_task_struct(proc->tsk);
2913 	binder_stats_deleted(BINDER_STAT_PROC);
2914 	kfree(proc);
2915 }
2916 
2917 static void binder_free_thread(struct binder_thread *thread)
2918 {
2919 	BUG_ON(!list_empty(&thread->todo));
2920 	binder_stats_deleted(BINDER_STAT_THREAD);
2921 	binder_proc_dec_tmpref(thread->proc);
2922 	kfree(thread);
2923 }
2924 
2925 static int binder_thread_release(struct binder_proc *proc,
2926 				 struct binder_thread *thread)
2927 {
2928 	struct binder_transaction *t;
2929 	struct binder_transaction *send_reply = NULL;
2930 	int active_transactions = 0;
2931 	struct binder_transaction *last_t = NULL;
2932 
2933 	/*
2934 	 * take a ref on the proc so it survives
2935 	 * after we remove this thread from proc->threads.
2936 	 * The corresponding dec is when we actually
2937 	 * free the thread in binder_free_thread()
2938 	 */
2939 	proc->tmp_ref++;
2940 	/*
2941 	 * take a ref on this thread to ensure it
2942 	 * survives while we are releasing it
2943 	 */
2944 	atomic_inc(&thread->tmp_ref);
2945 	rb_erase(&thread->rb_node, &proc->threads);
2946 	t = thread->transaction_stack;
2947 	if (t) {
2948 		spin_lock(&t->lock);
2949 		if (t->to_thread == thread)
2950 			send_reply = t;
2951 	}
2952 	thread->is_dead = true;
2953 
2954 	while (t) {
2955 		last_t = t;
2956 		active_transactions++;
2957 		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2958 			     "release %d:%d transaction %d %s, still active\n",
2959 			      proc->pid, thread->pid,
2960 			     t->debug_id,
2961 			     (t->to_thread == thread) ? "in" : "out");
2962 
2963 		if (t->to_thread == thread) {
2964 			t->to_proc = NULL;
2965 			t->to_thread = NULL;
2966 			if (t->buffer) {
2967 				t->buffer->transaction = NULL;
2968 				t->buffer = NULL;
2969 			}
2970 			t = t->to_parent;
2971 		} else if (t->from == thread) {
2972 			t->from = NULL;
2973 			t = t->from_parent;
2974 		} else
2975 			BUG();
2976 		spin_unlock(&last_t->lock);
2977 		if (t)
2978 			spin_lock(&t->lock);
2979 	}
2980 
2981 	if (send_reply)
2982 		binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
2983 	binder_release_work(&thread->todo);
2984 	binder_thread_dec_tmpref(thread);
2985 	return active_transactions;
2986 }
2987 
2988 static unsigned int binder_poll(struct file *filp,
2989 				struct poll_table_struct *wait)
2990 {
2991 	struct binder_proc *proc = filp->private_data;
2992 	struct binder_thread *thread = NULL;
2993 	int wait_for_proc_work;
2994 
2995 	binder_lock(__func__);
2996 
2997 	thread = binder_get_thread(proc);
2998 
2999 	wait_for_proc_work = thread->transaction_stack == NULL &&
3000 		list_empty(&thread->todo);
3001 
3002 	binder_unlock(__func__);
3003 
3004 	if (wait_for_proc_work) {
3005 		if (binder_has_proc_work(proc, thread))
3006 			return POLLIN;
3007 		poll_wait(filp, &proc->wait, wait);
3008 		if (binder_has_proc_work(proc, thread))
3009 			return POLLIN;
3010 	} else {
3011 		if (binder_has_thread_work(thread))
3012 			return POLLIN;
3013 		poll_wait(filp, &thread->wait, wait);
3014 		if (binder_has_thread_work(thread))
3015 			return POLLIN;
3016 	}
3017 	return 0;
3018 }
3019 
3020 static int binder_ioctl_write_read(struct file *filp,
3021 				unsigned int cmd, unsigned long arg,
3022 				struct binder_thread *thread)
3023 {
3024 	int ret = 0;
3025 	struct binder_proc *proc = filp->private_data;
3026 	unsigned int size = _IOC_SIZE(cmd);
3027 	void __user *ubuf = (void __user *)arg;
3028 	struct binder_write_read bwr;
3029 
3030 	if (size != sizeof(struct binder_write_read)) {
3031 		ret = -EINVAL;
3032 		goto out;
3033 	}
3034 	if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
3035 		ret = -EFAULT;
3036 		goto out;
3037 	}
3038 	binder_debug(BINDER_DEBUG_READ_WRITE,
3039 		     "%d:%d write %lld at %016llx, read %lld at %016llx\n",
3040 		     proc->pid, thread->pid,
3041 		     (u64)bwr.write_size, (u64)bwr.write_buffer,
3042 		     (u64)bwr.read_size, (u64)bwr.read_buffer);
3043 
3044 	if (bwr.write_size > 0) {
3045 		ret = binder_thread_write(proc, thread,
3046 					  bwr.write_buffer,
3047 					  bwr.write_size,
3048 					  &bwr.write_consumed);
3049 		trace_binder_write_done(ret);
3050 		if (ret < 0) {
3051 			bwr.read_consumed = 0;
3052 			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
3053 				ret = -EFAULT;
3054 			goto out;
3055 		}
3056 	}
3057 	if (bwr.read_size > 0) {
3058 		ret = binder_thread_read(proc, thread, bwr.read_buffer,
3059 					 bwr.read_size,
3060 					 &bwr.read_consumed,
3061 					 filp->f_flags & O_NONBLOCK);
3062 		trace_binder_read_done(ret);
3063 		if (!list_empty(&proc->todo))
3064 			wake_up_interruptible(&proc->wait);
3065 		if (ret < 0) {
3066 			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
3067 				ret = -EFAULT;
3068 			goto out;
3069 		}
3070 	}
3071 	binder_debug(BINDER_DEBUG_READ_WRITE,
3072 		     "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
3073 		     proc->pid, thread->pid,
3074 		     (u64)bwr.write_consumed, (u64)bwr.write_size,
3075 		     (u64)bwr.read_consumed, (u64)bwr.read_size);
3076 	if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
3077 		ret = -EFAULT;
3078 		goto out;
3079 	}
3080 out:
3081 	return ret;
3082 }
3083 
3084 static int binder_ioctl_set_ctx_mgr(struct file *filp)
3085 {
3086 	int ret = 0;
3087 	struct binder_proc *proc = filp->private_data;
3088 	struct binder_context *context = proc->context;
3089 	struct binder_node *new_node;
3090 	kuid_t curr_euid = current_euid();
3091 
3092 	mutex_lock(&context->context_mgr_node_lock);
3093 	if (context->binder_context_mgr_node) {
3094 		pr_err("BINDER_SET_CONTEXT_MGR already set\n");
3095 		ret = -EBUSY;
3096 		goto out;
3097 	}
3098 	ret = security_binder_set_context_mgr(proc->tsk);
3099 	if (ret < 0)
3100 		goto out;
3101 	if (uid_valid(context->binder_context_mgr_uid)) {
3102 		if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
3103 			pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
3104 			       from_kuid(&init_user_ns, curr_euid),
3105 			       from_kuid(&init_user_ns,
3106 					 context->binder_context_mgr_uid));
3107 			ret = -EPERM;
3108 			goto out;
3109 		}
3110 	} else {
3111 		context->binder_context_mgr_uid = curr_euid;
3112 	}
3113 	new_node = binder_new_node(proc, 0, 0);
3114 	if (!new_node) {
3115 		ret = -ENOMEM;
3116 		goto out;
3117 	}
3118 	new_node->local_weak_refs++;
3119 	new_node->local_strong_refs++;
3120 	new_node->has_strong_ref = 1;
3121 	new_node->has_weak_ref = 1;
3122 	context->binder_context_mgr_node = new_node;
3123 out:
3124 	mutex_unlock(&context->context_mgr_node_lock);
3125 	return ret;
3126 }
3127 
3128 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3129 {
3130 	int ret;
3131 	struct binder_proc *proc = filp->private_data;
3132 	struct binder_thread *thread;
3133 	unsigned int size = _IOC_SIZE(cmd);
3134 	void __user *ubuf = (void __user *)arg;
3135 
3136 	/*pr_info("binder_ioctl: %d:%d %x %lx\n",
3137 			proc->pid, current->pid, cmd, arg);*/
3138 
3139 	trace_binder_ioctl(cmd, arg);
3140 
3141 	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
3142 	if (ret)
3143 		goto err_unlocked;
3144 
3145 	binder_lock(__func__);
3146 	thread = binder_get_thread(proc);
3147 	if (thread == NULL) {
3148 		ret = -ENOMEM;
3149 		goto err;
3150 	}
3151 
3152 	switch (cmd) {
3153 	case BINDER_WRITE_READ:
3154 		ret = binder_ioctl_write_read(filp, cmd, arg, thread);
3155 		if (ret)
3156 			goto err;
3157 		break;
3158 	case BINDER_SET_MAX_THREADS:
3159 		if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
3160 			ret = -EINVAL;
3161 			goto err;
3162 		}
3163 		break;
3164 	case BINDER_SET_CONTEXT_MGR:
3165 		ret = binder_ioctl_set_ctx_mgr(filp);
3166 		if (ret)
3167 			goto err;
3168 		break;
3169 	case BINDER_THREAD_EXIT:
3170 		binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
3171 			     proc->pid, thread->pid);
3172 		binder_thread_release(proc, thread);
3173 		thread = NULL;
3174 		break;
3175 	case BINDER_VERSION: {
3176 		struct binder_version __user *ver = ubuf;
3177 
3178 		if (size != sizeof(struct binder_version)) {
3179 			ret = -EINVAL;
3180 			goto err;
3181 		}
3182 		if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
3183 			     &ver->protocol_version)) {
3184 			ret = -EINVAL;
3185 			goto err;
3186 		}
3187 		break;
3188 	}
3189 	default:
3190 		ret = -EINVAL;
3191 		goto err;
3192 	}
3193 	ret = 0;
3194 err:
3195 	if (thread)
3196 		thread->looper_need_return = false;
3197 	binder_unlock(__func__);
3198 	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
3199 	if (ret && ret != -ERESTARTSYS)
3200 		pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
3201 err_unlocked:
3202 	trace_binder_ioctl_done(ret);
3203 	return ret;
3204 }
3205 
3206 static void binder_vma_open(struct vm_area_struct *vma)
3207 {
3208 	struct binder_proc *proc = vma->vm_private_data;
3209 
3210 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3211 		     "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
3212 		     proc->pid, vma->vm_start, vma->vm_end,
3213 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
3214 		     (unsigned long)pgprot_val(vma->vm_page_prot));
3215 }
3216 
3217 static void binder_vma_close(struct vm_area_struct *vma)
3218 {
3219 	struct binder_proc *proc = vma->vm_private_data;
3220 
3221 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3222 		     "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
3223 		     proc->pid, vma->vm_start, vma->vm_end,
3224 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
3225 		     (unsigned long)pgprot_val(vma->vm_page_prot));
3226 	binder_alloc_vma_close(&proc->alloc);
3227 	binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
3228 }
3229 
3230 static int binder_vm_fault(struct vm_fault *vmf)
3231 {
3232 	return VM_FAULT_SIGBUS;
3233 }
3234 
3235 static const struct vm_operations_struct binder_vm_ops = {
3236 	.open = binder_vma_open,
3237 	.close = binder_vma_close,
3238 	.fault = binder_vm_fault,
3239 };
3240 
3241 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
3242 {
3243 	int ret;
3244 	struct binder_proc *proc = filp->private_data;
3245 	const char *failure_string;
3246 
3247 	if (proc->tsk != current->group_leader)
3248 		return -EINVAL;
3249 
3250 	if ((vma->vm_end - vma->vm_start) > SZ_4M)
3251 		vma->vm_end = vma->vm_start + SZ_4M;
3252 
3253 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3254 		     "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
3255 		     __func__, proc->pid, vma->vm_start, vma->vm_end,
3256 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
3257 		     (unsigned long)pgprot_val(vma->vm_page_prot));
3258 
3259 	if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
3260 		ret = -EPERM;
3261 		failure_string = "bad vm_flags";
3262 		goto err_bad_arg;
3263 	}
3264 	vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
3265 	vma->vm_ops = &binder_vm_ops;
3266 	vma->vm_private_data = proc;
3267 
3268 	ret = binder_alloc_mmap_handler(&proc->alloc, vma);
3269 	if (ret)
3270 		return ret;
3271 	proc->files = get_files_struct(current);
3272 	return 0;
3273 
3274 err_bad_arg:
3275 	pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
3276 	       proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
3277 	return ret;
3278 }
3279 
3280 static int binder_open(struct inode *nodp, struct file *filp)
3281 {
3282 	struct binder_proc *proc;
3283 	struct binder_device *binder_dev;
3284 
3285 	binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
3286 		     current->group_leader->pid, current->pid);
3287 
3288 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
3289 	if (proc == NULL)
3290 		return -ENOMEM;
3291 	get_task_struct(current->group_leader);
3292 	proc->tsk = current->group_leader;
3293 	INIT_LIST_HEAD(&proc->todo);
3294 	init_waitqueue_head(&proc->wait);
3295 	proc->default_priority = task_nice(current);
3296 	binder_dev = container_of(filp->private_data, struct binder_device,
3297 				  miscdev);
3298 	proc->context = &binder_dev->context;
3299 	binder_alloc_init(&proc->alloc);
3300 
3301 	binder_lock(__func__);
3302 
3303 	binder_stats_created(BINDER_STAT_PROC);
3304 	proc->pid = current->group_leader->pid;
3305 	INIT_LIST_HEAD(&proc->delivered_death);
3306 	filp->private_data = proc;
3307 
3308 	binder_unlock(__func__);
3309 
3310 	mutex_lock(&binder_procs_lock);
3311 	hlist_add_head(&proc->proc_node, &binder_procs);
3312 	mutex_unlock(&binder_procs_lock);
3313 
3314 	if (binder_debugfs_dir_entry_proc) {
3315 		char strbuf[11];
3316 
3317 		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
3318 		/*
3319 		 * proc debug entries are shared between contexts, so
3320 		 * this will fail if the process tries to open the driver
3321 		 * again with a different context. The priting code will
3322 		 * anyway print all contexts that a given PID has, so this
3323 		 * is not a problem.
3324 		 */
3325 		proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
3326 			binder_debugfs_dir_entry_proc,
3327 			(void *)(unsigned long)proc->pid,
3328 			&binder_proc_fops);
3329 	}
3330 
3331 	return 0;
3332 }
3333 
3334 static int binder_flush(struct file *filp, fl_owner_t id)
3335 {
3336 	struct binder_proc *proc = filp->private_data;
3337 
3338 	binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
3339 
3340 	return 0;
3341 }
3342 
3343 static void binder_deferred_flush(struct binder_proc *proc)
3344 {
3345 	struct rb_node *n;
3346 	int wake_count = 0;
3347 
3348 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
3349 		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
3350 
3351 		thread->looper_need_return = true;
3352 		if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
3353 			wake_up_interruptible(&thread->wait);
3354 			wake_count++;
3355 		}
3356 	}
3357 	wake_up_interruptible_all(&proc->wait);
3358 
3359 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3360 		     "binder_flush: %d woke %d threads\n", proc->pid,
3361 		     wake_count);
3362 }
3363 
3364 static int binder_release(struct inode *nodp, struct file *filp)
3365 {
3366 	struct binder_proc *proc = filp->private_data;
3367 
3368 	debugfs_remove(proc->debugfs_entry);
3369 	binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
3370 
3371 	return 0;
3372 }
3373 
3374 static int binder_node_release(struct binder_node *node, int refs)
3375 {
3376 	struct binder_ref *ref;
3377 	int death = 0;
3378 
3379 	list_del_init(&node->work.entry);
3380 	binder_release_work(&node->async_todo);
3381 
3382 	if (hlist_empty(&node->refs)) {
3383 		kfree(node);
3384 		binder_stats_deleted(BINDER_STAT_NODE);
3385 
3386 		return refs;
3387 	}
3388 
3389 	node->proc = NULL;
3390 	node->local_strong_refs = 0;
3391 	node->local_weak_refs = 0;
3392 
3393 	spin_lock(&binder_dead_nodes_lock);
3394 	hlist_add_head(&node->dead_node, &binder_dead_nodes);
3395 	spin_unlock(&binder_dead_nodes_lock);
3396 
3397 	hlist_for_each_entry(ref, &node->refs, node_entry) {
3398 		refs++;
3399 
3400 		if (!ref->death)
3401 			continue;
3402 
3403 		death++;
3404 
3405 		if (list_empty(&ref->death->work.entry)) {
3406 			ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3407 			list_add_tail(&ref->death->work.entry,
3408 				      &ref->proc->todo);
3409 			wake_up_interruptible(&ref->proc->wait);
3410 		} else
3411 			BUG();
3412 	}
3413 
3414 	binder_debug(BINDER_DEBUG_DEAD_BINDER,
3415 		     "node %d now dead, refs %d, death %d\n",
3416 		     node->debug_id, refs, death);
3417 
3418 	return refs;
3419 }
3420 
3421 static void binder_deferred_release(struct binder_proc *proc)
3422 {
3423 	struct binder_context *context = proc->context;
3424 	struct rb_node *n;
3425 	int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
3426 
3427 	BUG_ON(proc->files);
3428 
3429 	mutex_lock(&binder_procs_lock);
3430 	hlist_del(&proc->proc_node);
3431 	mutex_unlock(&binder_procs_lock);
3432 
3433 	mutex_lock(&context->context_mgr_node_lock);
3434 	if (context->binder_context_mgr_node &&
3435 	    context->binder_context_mgr_node->proc == proc) {
3436 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
3437 			     "%s: %d context_mgr_node gone\n",
3438 			     __func__, proc->pid);
3439 		context->binder_context_mgr_node = NULL;
3440 	}
3441 	mutex_unlock(&context->context_mgr_node_lock);
3442 	/*
3443 	 * Make sure proc stays alive after we
3444 	 * remove all the threads
3445 	 */
3446 	proc->tmp_ref++;
3447 
3448 	proc->is_dead = true;
3449 	threads = 0;
3450 	active_transactions = 0;
3451 	while ((n = rb_first(&proc->threads))) {
3452 		struct binder_thread *thread;
3453 
3454 		thread = rb_entry(n, struct binder_thread, rb_node);
3455 		threads++;
3456 		active_transactions += binder_thread_release(proc, thread);
3457 	}
3458 
3459 	nodes = 0;
3460 	incoming_refs = 0;
3461 	while ((n = rb_first(&proc->nodes))) {
3462 		struct binder_node *node;
3463 
3464 		node = rb_entry(n, struct binder_node, rb_node);
3465 		nodes++;
3466 		rb_erase(&node->rb_node, &proc->nodes);
3467 		incoming_refs = binder_node_release(node, incoming_refs);
3468 	}
3469 
3470 	outgoing_refs = 0;
3471 	while ((n = rb_first(&proc->refs_by_desc))) {
3472 		struct binder_ref *ref;
3473 
3474 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
3475 		outgoing_refs++;
3476 		binder_delete_ref(ref);
3477 	}
3478 
3479 	binder_release_work(&proc->todo);
3480 	binder_release_work(&proc->delivered_death);
3481 
3482 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3483 		     "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
3484 		     __func__, proc->pid, threads, nodes, incoming_refs,
3485 		     outgoing_refs, active_transactions);
3486 
3487 	binder_proc_dec_tmpref(proc);
3488 }
3489 
3490 static void binder_deferred_func(struct work_struct *work)
3491 {
3492 	struct binder_proc *proc;
3493 	struct files_struct *files;
3494 
3495 	int defer;
3496 
3497 	do {
3498 		binder_lock(__func__);
3499 		mutex_lock(&binder_deferred_lock);
3500 		if (!hlist_empty(&binder_deferred_list)) {
3501 			proc = hlist_entry(binder_deferred_list.first,
3502 					struct binder_proc, deferred_work_node);
3503 			hlist_del_init(&proc->deferred_work_node);
3504 			defer = proc->deferred_work;
3505 			proc->deferred_work = 0;
3506 		} else {
3507 			proc = NULL;
3508 			defer = 0;
3509 		}
3510 		mutex_unlock(&binder_deferred_lock);
3511 
3512 		files = NULL;
3513 		if (defer & BINDER_DEFERRED_PUT_FILES) {
3514 			files = proc->files;
3515 			if (files)
3516 				proc->files = NULL;
3517 		}
3518 
3519 		if (defer & BINDER_DEFERRED_FLUSH)
3520 			binder_deferred_flush(proc);
3521 
3522 		if (defer & BINDER_DEFERRED_RELEASE)
3523 			binder_deferred_release(proc); /* frees proc */
3524 
3525 		binder_unlock(__func__);
3526 		if (files)
3527 			put_files_struct(files);
3528 	} while (proc);
3529 }
3530 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
3531 
3532 static void
3533 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
3534 {
3535 	mutex_lock(&binder_deferred_lock);
3536 	proc->deferred_work |= defer;
3537 	if (hlist_unhashed(&proc->deferred_work_node)) {
3538 		hlist_add_head(&proc->deferred_work_node,
3539 				&binder_deferred_list);
3540 		schedule_work(&binder_deferred_work);
3541 	}
3542 	mutex_unlock(&binder_deferred_lock);
3543 }
3544 
3545 static void print_binder_transaction(struct seq_file *m, const char *prefix,
3546 				     struct binder_transaction *t)
3547 {
3548 	spin_lock(&t->lock);
3549 	seq_printf(m,
3550 		   "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
3551 		   prefix, t->debug_id, t,
3552 		   t->from ? t->from->proc->pid : 0,
3553 		   t->from ? t->from->pid : 0,
3554 		   t->to_proc ? t->to_proc->pid : 0,
3555 		   t->to_thread ? t->to_thread->pid : 0,
3556 		   t->code, t->flags, t->priority, t->need_reply);
3557 	spin_unlock(&t->lock);
3558 
3559 	if (t->buffer == NULL) {
3560 		seq_puts(m, " buffer free\n");
3561 		return;
3562 	}
3563 	if (t->buffer->target_node)
3564 		seq_printf(m, " node %d",
3565 			   t->buffer->target_node->debug_id);
3566 	seq_printf(m, " size %zd:%zd data %p\n",
3567 		   t->buffer->data_size, t->buffer->offsets_size,
3568 		   t->buffer->data);
3569 }
3570 
3571 static void print_binder_work(struct seq_file *m, const char *prefix,
3572 			      const char *transaction_prefix,
3573 			      struct binder_work *w)
3574 {
3575 	struct binder_node *node;
3576 	struct binder_transaction *t;
3577 
3578 	switch (w->type) {
3579 	case BINDER_WORK_TRANSACTION:
3580 		t = container_of(w, struct binder_transaction, work);
3581 		print_binder_transaction(m, transaction_prefix, t);
3582 		break;
3583 	case BINDER_WORK_RETURN_ERROR: {
3584 		struct binder_error *e = container_of(
3585 				w, struct binder_error, work);
3586 
3587 		seq_printf(m, "%stransaction error: %u\n",
3588 			   prefix, e->cmd);
3589 	} break;
3590 	case BINDER_WORK_TRANSACTION_COMPLETE:
3591 		seq_printf(m, "%stransaction complete\n", prefix);
3592 		break;
3593 	case BINDER_WORK_NODE:
3594 		node = container_of(w, struct binder_node, work);
3595 		seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
3596 			   prefix, node->debug_id,
3597 			   (u64)node->ptr, (u64)node->cookie);
3598 		break;
3599 	case BINDER_WORK_DEAD_BINDER:
3600 		seq_printf(m, "%shas dead binder\n", prefix);
3601 		break;
3602 	case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3603 		seq_printf(m, "%shas cleared dead binder\n", prefix);
3604 		break;
3605 	case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
3606 		seq_printf(m, "%shas cleared death notification\n", prefix);
3607 		break;
3608 	default:
3609 		seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
3610 		break;
3611 	}
3612 }
3613 
3614 static void print_binder_thread(struct seq_file *m,
3615 				struct binder_thread *thread,
3616 				int print_always)
3617 {
3618 	struct binder_transaction *t;
3619 	struct binder_work *w;
3620 	size_t start_pos = m->count;
3621 	size_t header_pos;
3622 
3623 	seq_printf(m, "  thread %d: l %02x need_return %d tr %d\n",
3624 			thread->pid, thread->looper,
3625 			thread->looper_need_return,
3626 			atomic_read(&thread->tmp_ref));
3627 	header_pos = m->count;
3628 	t = thread->transaction_stack;
3629 	while (t) {
3630 		if (t->from == thread) {
3631 			print_binder_transaction(m,
3632 						 "    outgoing transaction", t);
3633 			t = t->from_parent;
3634 		} else if (t->to_thread == thread) {
3635 			print_binder_transaction(m,
3636 						 "    incoming transaction", t);
3637 			t = t->to_parent;
3638 		} else {
3639 			print_binder_transaction(m, "    bad transaction", t);
3640 			t = NULL;
3641 		}
3642 	}
3643 	list_for_each_entry(w, &thread->todo, entry) {
3644 		print_binder_work(m, "    ", "    pending transaction", w);
3645 	}
3646 	if (!print_always && m->count == header_pos)
3647 		m->count = start_pos;
3648 }
3649 
3650 static void print_binder_node(struct seq_file *m, struct binder_node *node)
3651 {
3652 	struct binder_ref *ref;
3653 	struct binder_work *w;
3654 	int count;
3655 
3656 	count = 0;
3657 	hlist_for_each_entry(ref, &node->refs, node_entry)
3658 		count++;
3659 
3660 	seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
3661 		   node->debug_id, (u64)node->ptr, (u64)node->cookie,
3662 		   node->has_strong_ref, node->has_weak_ref,
3663 		   node->local_strong_refs, node->local_weak_refs,
3664 		   node->internal_strong_refs, count);
3665 	if (count) {
3666 		seq_puts(m, " proc");
3667 		hlist_for_each_entry(ref, &node->refs, node_entry)
3668 			seq_printf(m, " %d", ref->proc->pid);
3669 	}
3670 	seq_puts(m, "\n");
3671 	list_for_each_entry(w, &node->async_todo, entry)
3672 		print_binder_work(m, "    ",
3673 				  "    pending async transaction", w);
3674 }
3675 
3676 static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
3677 {
3678 	seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %p\n",
3679 		   ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
3680 		   ref->node->debug_id, ref->strong, ref->weak, ref->death);
3681 }
3682 
3683 static void print_binder_proc(struct seq_file *m,
3684 			      struct binder_proc *proc, int print_all)
3685 {
3686 	struct binder_work *w;
3687 	struct rb_node *n;
3688 	size_t start_pos = m->count;
3689 	size_t header_pos;
3690 
3691 	seq_printf(m, "proc %d\n", proc->pid);
3692 	seq_printf(m, "context %s\n", proc->context->name);
3693 	header_pos = m->count;
3694 
3695 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3696 		print_binder_thread(m, rb_entry(n, struct binder_thread,
3697 						rb_node), print_all);
3698 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
3699 		struct binder_node *node = rb_entry(n, struct binder_node,
3700 						    rb_node);
3701 		if (print_all || node->has_async_transaction)
3702 			print_binder_node(m, node);
3703 	}
3704 	if (print_all) {
3705 		for (n = rb_first(&proc->refs_by_desc);
3706 		     n != NULL;
3707 		     n = rb_next(n))
3708 			print_binder_ref(m, rb_entry(n, struct binder_ref,
3709 						     rb_node_desc));
3710 	}
3711 	binder_alloc_print_allocated(m, &proc->alloc);
3712 	list_for_each_entry(w, &proc->todo, entry)
3713 		print_binder_work(m, "  ", "  pending transaction", w);
3714 	list_for_each_entry(w, &proc->delivered_death, entry) {
3715 		seq_puts(m, "  has delivered dead binder\n");
3716 		break;
3717 	}
3718 	if (!print_all && m->count == header_pos)
3719 		m->count = start_pos;
3720 }
3721 
3722 static const char * const binder_return_strings[] = {
3723 	"BR_ERROR",
3724 	"BR_OK",
3725 	"BR_TRANSACTION",
3726 	"BR_REPLY",
3727 	"BR_ACQUIRE_RESULT",
3728 	"BR_DEAD_REPLY",
3729 	"BR_TRANSACTION_COMPLETE",
3730 	"BR_INCREFS",
3731 	"BR_ACQUIRE",
3732 	"BR_RELEASE",
3733 	"BR_DECREFS",
3734 	"BR_ATTEMPT_ACQUIRE",
3735 	"BR_NOOP",
3736 	"BR_SPAWN_LOOPER",
3737 	"BR_FINISHED",
3738 	"BR_DEAD_BINDER",
3739 	"BR_CLEAR_DEATH_NOTIFICATION_DONE",
3740 	"BR_FAILED_REPLY"
3741 };
3742 
3743 static const char * const binder_command_strings[] = {
3744 	"BC_TRANSACTION",
3745 	"BC_REPLY",
3746 	"BC_ACQUIRE_RESULT",
3747 	"BC_FREE_BUFFER",
3748 	"BC_INCREFS",
3749 	"BC_ACQUIRE",
3750 	"BC_RELEASE",
3751 	"BC_DECREFS",
3752 	"BC_INCREFS_DONE",
3753 	"BC_ACQUIRE_DONE",
3754 	"BC_ATTEMPT_ACQUIRE",
3755 	"BC_REGISTER_LOOPER",
3756 	"BC_ENTER_LOOPER",
3757 	"BC_EXIT_LOOPER",
3758 	"BC_REQUEST_DEATH_NOTIFICATION",
3759 	"BC_CLEAR_DEATH_NOTIFICATION",
3760 	"BC_DEAD_BINDER_DONE",
3761 	"BC_TRANSACTION_SG",
3762 	"BC_REPLY_SG",
3763 };
3764 
3765 static const char * const binder_objstat_strings[] = {
3766 	"proc",
3767 	"thread",
3768 	"node",
3769 	"ref",
3770 	"death",
3771 	"transaction",
3772 	"transaction_complete"
3773 };
3774 
3775 static void print_binder_stats(struct seq_file *m, const char *prefix,
3776 			       struct binder_stats *stats)
3777 {
3778 	int i;
3779 
3780 	BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
3781 		     ARRAY_SIZE(binder_command_strings));
3782 	for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
3783 		int temp = atomic_read(&stats->bc[i]);
3784 
3785 		if (temp)
3786 			seq_printf(m, "%s%s: %d\n", prefix,
3787 				   binder_command_strings[i], temp);
3788 	}
3789 
3790 	BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
3791 		     ARRAY_SIZE(binder_return_strings));
3792 	for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
3793 		int temp = atomic_read(&stats->br[i]);
3794 
3795 		if (temp)
3796 			seq_printf(m, "%s%s: %d\n", prefix,
3797 				   binder_return_strings[i], temp);
3798 	}
3799 
3800 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
3801 		     ARRAY_SIZE(binder_objstat_strings));
3802 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
3803 		     ARRAY_SIZE(stats->obj_deleted));
3804 	for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
3805 		int created = atomic_read(&stats->obj_created[i]);
3806 		int deleted = atomic_read(&stats->obj_deleted[i]);
3807 
3808 		if (created || deleted)
3809 			seq_printf(m, "%s%s: active %d total %d\n",
3810 				prefix,
3811 				binder_objstat_strings[i],
3812 				created - deleted,
3813 				created);
3814 	}
3815 }
3816 
3817 static void print_binder_proc_stats(struct seq_file *m,
3818 				    struct binder_proc *proc)
3819 {
3820 	struct binder_work *w;
3821 	struct rb_node *n;
3822 	int count, strong, weak;
3823 
3824 	seq_printf(m, "proc %d\n", proc->pid);
3825 	seq_printf(m, "context %s\n", proc->context->name);
3826 	count = 0;
3827 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3828 		count++;
3829 	seq_printf(m, "  threads: %d\n", count);
3830 	seq_printf(m, "  requested threads: %d+%d/%d\n"
3831 			"  ready threads %d\n"
3832 			"  free async space %zd\n", proc->requested_threads,
3833 			proc->requested_threads_started, proc->max_threads,
3834 			proc->ready_threads,
3835 			binder_alloc_get_free_async_space(&proc->alloc));
3836 	count = 0;
3837 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
3838 		count++;
3839 	seq_printf(m, "  nodes: %d\n", count);
3840 	count = 0;
3841 	strong = 0;
3842 	weak = 0;
3843 	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
3844 		struct binder_ref *ref = rb_entry(n, struct binder_ref,
3845 						  rb_node_desc);
3846 		count++;
3847 		strong += ref->strong;
3848 		weak += ref->weak;
3849 	}
3850 	seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
3851 
3852 	count = binder_alloc_get_allocated_count(&proc->alloc);
3853 	seq_printf(m, "  buffers: %d\n", count);
3854 
3855 	count = 0;
3856 	list_for_each_entry(w, &proc->todo, entry) {
3857 		switch (w->type) {
3858 		case BINDER_WORK_TRANSACTION:
3859 			count++;
3860 			break;
3861 		default:
3862 			break;
3863 		}
3864 	}
3865 	seq_printf(m, "  pending transactions: %d\n", count);
3866 
3867 	print_binder_stats(m, "  ", &proc->stats);
3868 }
3869 
3870 
3871 static int binder_state_show(struct seq_file *m, void *unused)
3872 {
3873 	struct binder_proc *proc;
3874 	struct binder_node *node;
3875 
3876 	binder_lock(__func__);
3877 
3878 	seq_puts(m, "binder state:\n");
3879 
3880 	spin_lock(&binder_dead_nodes_lock);
3881 	if (!hlist_empty(&binder_dead_nodes))
3882 		seq_puts(m, "dead nodes:\n");
3883 	hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
3884 		print_binder_node(m, node);
3885 	spin_unlock(&binder_dead_nodes_lock);
3886 
3887 	mutex_lock(&binder_procs_lock);
3888 	hlist_for_each_entry(proc, &binder_procs, proc_node)
3889 		print_binder_proc(m, proc, 1);
3890 	mutex_unlock(&binder_procs_lock);
3891 	binder_unlock(__func__);
3892 	return 0;
3893 }
3894 
3895 static int binder_stats_show(struct seq_file *m, void *unused)
3896 {
3897 	struct binder_proc *proc;
3898 
3899 	binder_lock(__func__);
3900 
3901 	seq_puts(m, "binder stats:\n");
3902 
3903 	print_binder_stats(m, "", &binder_stats);
3904 
3905 	mutex_lock(&binder_procs_lock);
3906 	hlist_for_each_entry(proc, &binder_procs, proc_node)
3907 		print_binder_proc_stats(m, proc);
3908 	mutex_unlock(&binder_procs_lock);
3909 	binder_unlock(__func__);
3910 	return 0;
3911 }
3912 
3913 static int binder_transactions_show(struct seq_file *m, void *unused)
3914 {
3915 	struct binder_proc *proc;
3916 
3917 	binder_lock(__func__);
3918 
3919 	seq_puts(m, "binder transactions:\n");
3920 	mutex_lock(&binder_procs_lock);
3921 	hlist_for_each_entry(proc, &binder_procs, proc_node)
3922 		print_binder_proc(m, proc, 0);
3923 	mutex_unlock(&binder_procs_lock);
3924 	binder_unlock(__func__);
3925 	return 0;
3926 }
3927 
3928 static int binder_proc_show(struct seq_file *m, void *unused)
3929 {
3930 	struct binder_proc *itr;
3931 	int pid = (unsigned long)m->private;
3932 
3933 	binder_lock(__func__);
3934 
3935 	mutex_lock(&binder_procs_lock);
3936 	hlist_for_each_entry(itr, &binder_procs, proc_node) {
3937 		if (itr->pid == pid) {
3938 			seq_puts(m, "binder proc state:\n");
3939 			print_binder_proc(m, itr, 1);
3940 		}
3941 	}
3942 	mutex_unlock(&binder_procs_lock);
3943 
3944 	binder_unlock(__func__);
3945 	return 0;
3946 }
3947 
3948 static void print_binder_transaction_log_entry(struct seq_file *m,
3949 					struct binder_transaction_log_entry *e)
3950 {
3951 	int debug_id = READ_ONCE(e->debug_id_done);
3952 	/*
3953 	 * read barrier to guarantee debug_id_done read before
3954 	 * we print the log values
3955 	 */
3956 	smp_rmb();
3957 	seq_printf(m,
3958 		   "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
3959 		   e->debug_id, (e->call_type == 2) ? "reply" :
3960 		   ((e->call_type == 1) ? "async" : "call "), e->from_proc,
3961 		   e->from_thread, e->to_proc, e->to_thread, e->context_name,
3962 		   e->to_node, e->target_handle, e->data_size, e->offsets_size,
3963 		   e->return_error, e->return_error_param,
3964 		   e->return_error_line);
3965 	/*
3966 	 * read-barrier to guarantee read of debug_id_done after
3967 	 * done printing the fields of the entry
3968 	 */
3969 	smp_rmb();
3970 	seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
3971 			"\n" : " (incomplete)\n");
3972 }
3973 
3974 static int binder_transaction_log_show(struct seq_file *m, void *unused)
3975 {
3976 	struct binder_transaction_log *log = m->private;
3977 	unsigned int log_cur = atomic_read(&log->cur);
3978 	unsigned int count;
3979 	unsigned int cur;
3980 	int i;
3981 
3982 	count = log_cur + 1;
3983 	cur = count < ARRAY_SIZE(log->entry) && !log->full ?
3984 		0 : count % ARRAY_SIZE(log->entry);
3985 	if (count > ARRAY_SIZE(log->entry) || log->full)
3986 		count = ARRAY_SIZE(log->entry);
3987 	for (i = 0; i < count; i++) {
3988 		unsigned int index = cur++ % ARRAY_SIZE(log->entry);
3989 
3990 		print_binder_transaction_log_entry(m, &log->entry[index]);
3991 	}
3992 	return 0;
3993 }
3994 
3995 static const struct file_operations binder_fops = {
3996 	.owner = THIS_MODULE,
3997 	.poll = binder_poll,
3998 	.unlocked_ioctl = binder_ioctl,
3999 	.compat_ioctl = binder_ioctl,
4000 	.mmap = binder_mmap,
4001 	.open = binder_open,
4002 	.flush = binder_flush,
4003 	.release = binder_release,
4004 };
4005 
4006 BINDER_DEBUG_ENTRY(state);
4007 BINDER_DEBUG_ENTRY(stats);
4008 BINDER_DEBUG_ENTRY(transactions);
4009 BINDER_DEBUG_ENTRY(transaction_log);
4010 
4011 static int __init init_binder_device(const char *name)
4012 {
4013 	int ret;
4014 	struct binder_device *binder_device;
4015 
4016 	binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
4017 	if (!binder_device)
4018 		return -ENOMEM;
4019 
4020 	binder_device->miscdev.fops = &binder_fops;
4021 	binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
4022 	binder_device->miscdev.name = name;
4023 
4024 	binder_device->context.binder_context_mgr_uid = INVALID_UID;
4025 	binder_device->context.name = name;
4026 	mutex_init(&binder_device->context.context_mgr_node_lock);
4027 
4028 	ret = misc_register(&binder_device->miscdev);
4029 	if (ret < 0) {
4030 		kfree(binder_device);
4031 		return ret;
4032 	}
4033 
4034 	hlist_add_head(&binder_device->hlist, &binder_devices);
4035 
4036 	return ret;
4037 }
4038 
4039 static int __init binder_init(void)
4040 {
4041 	int ret;
4042 	char *device_name, *device_names;
4043 	struct binder_device *device;
4044 	struct hlist_node *tmp;
4045 
4046 	atomic_set(&binder_transaction_log.cur, ~0U);
4047 	atomic_set(&binder_transaction_log_failed.cur, ~0U);
4048 
4049 	binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
4050 	if (binder_debugfs_dir_entry_root)
4051 		binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
4052 						 binder_debugfs_dir_entry_root);
4053 
4054 	if (binder_debugfs_dir_entry_root) {
4055 		debugfs_create_file("state",
4056 				    S_IRUGO,
4057 				    binder_debugfs_dir_entry_root,
4058 				    NULL,
4059 				    &binder_state_fops);
4060 		debugfs_create_file("stats",
4061 				    S_IRUGO,
4062 				    binder_debugfs_dir_entry_root,
4063 				    NULL,
4064 				    &binder_stats_fops);
4065 		debugfs_create_file("transactions",
4066 				    S_IRUGO,
4067 				    binder_debugfs_dir_entry_root,
4068 				    NULL,
4069 				    &binder_transactions_fops);
4070 		debugfs_create_file("transaction_log",
4071 				    S_IRUGO,
4072 				    binder_debugfs_dir_entry_root,
4073 				    &binder_transaction_log,
4074 				    &binder_transaction_log_fops);
4075 		debugfs_create_file("failed_transaction_log",
4076 				    S_IRUGO,
4077 				    binder_debugfs_dir_entry_root,
4078 				    &binder_transaction_log_failed,
4079 				    &binder_transaction_log_fops);
4080 	}
4081 
4082 	/*
4083 	 * Copy the module_parameter string, because we don't want to
4084 	 * tokenize it in-place.
4085 	 */
4086 	device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
4087 	if (!device_names) {
4088 		ret = -ENOMEM;
4089 		goto err_alloc_device_names_failed;
4090 	}
4091 	strcpy(device_names, binder_devices_param);
4092 
4093 	while ((device_name = strsep(&device_names, ","))) {
4094 		ret = init_binder_device(device_name);
4095 		if (ret)
4096 			goto err_init_binder_device_failed;
4097 	}
4098 
4099 	return ret;
4100 
4101 err_init_binder_device_failed:
4102 	hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
4103 		misc_deregister(&device->miscdev);
4104 		hlist_del(&device->hlist);
4105 		kfree(device);
4106 	}
4107 err_alloc_device_names_failed:
4108 	debugfs_remove_recursive(binder_debugfs_dir_entry_root);
4109 
4110 	return ret;
4111 }
4112 
4113 device_initcall(binder_init);
4114 
4115 #define CREATE_TRACE_POINTS
4116 #include "binder_trace.h"
4117 
4118 MODULE_LICENSE("GPL v2");
4119