xref: /openbmc/linux/drivers/android/binder.c (revision 8dce88fe)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* binder.c
3  *
4  * Android IPC Subsystem
5  *
6  * Copyright (C) 2007-2008 Google, Inc.
7  */
8 
9 /*
10  * Locking overview
11  *
12  * There are 3 main spinlocks which must be acquired in the
13  * order shown:
14  *
15  * 1) proc->outer_lock : protects binder_ref
16  *    binder_proc_lock() and binder_proc_unlock() are
17  *    used to acq/rel.
18  * 2) node->lock : protects most fields of binder_node.
19  *    binder_node_lock() and binder_node_unlock() are
20  *    used to acq/rel
21  * 3) proc->inner_lock : protects the thread and node lists
22  *    (proc->threads, proc->waiting_threads, proc->nodes)
23  *    and all todo lists associated with the binder_proc
24  *    (proc->todo, thread->todo, proc->delivered_death and
25  *    node->async_todo), as well as thread->transaction_stack
26  *    binder_inner_proc_lock() and binder_inner_proc_unlock()
27  *    are used to acq/rel
28  *
29  * Any lock under procA must never be nested under any lock at the same
30  * level or below on procB.
31  *
32  * Functions that require a lock held on entry indicate which lock
33  * in the suffix of the function name:
34  *
35  * foo_olocked() : requires node->outer_lock
36  * foo_nlocked() : requires node->lock
37  * foo_ilocked() : requires proc->inner_lock
38  * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39  * foo_nilocked(): requires node->lock and proc->inner_lock
40  * ...
41  */
42 
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44 
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
48 #include <linux/fs.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/string.h>
61 #include <linux/uaccess.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/security.h>
64 #include <linux/spinlock.h>
65 #include <linux/ratelimit.h>
66 #include <linux/syscalls.h>
67 #include <linux/task_work.h>
68 #include <linux/sizes.h>
69 
70 #include <uapi/linux/android/binder.h>
71 
72 #include <asm/cacheflush.h>
73 
74 #include "binder_internal.h"
75 #include "binder_trace.h"
76 
77 static HLIST_HEAD(binder_deferred_list);
78 static DEFINE_MUTEX(binder_deferred_lock);
79 
80 static HLIST_HEAD(binder_devices);
81 static HLIST_HEAD(binder_procs);
82 static DEFINE_MUTEX(binder_procs_lock);
83 
84 static HLIST_HEAD(binder_dead_nodes);
85 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
86 
87 static struct dentry *binder_debugfs_dir_entry_root;
88 static struct dentry *binder_debugfs_dir_entry_proc;
89 static atomic_t binder_last_id;
90 
91 static int proc_show(struct seq_file *m, void *unused);
92 DEFINE_SHOW_ATTRIBUTE(proc);
93 
94 #define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
95 
96 enum {
97 	BINDER_DEBUG_USER_ERROR             = 1U << 0,
98 	BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
99 	BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
100 	BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
101 	BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
102 	BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
103 	BINDER_DEBUG_READ_WRITE             = 1U << 6,
104 	BINDER_DEBUG_USER_REFS              = 1U << 7,
105 	BINDER_DEBUG_THREADS                = 1U << 8,
106 	BINDER_DEBUG_TRANSACTION            = 1U << 9,
107 	BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
108 	BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
109 	BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
110 	BINDER_DEBUG_PRIORITY_CAP           = 1U << 13,
111 	BINDER_DEBUG_SPINLOCKS              = 1U << 14,
112 };
113 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
114 	BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
115 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
116 
117 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
118 module_param_named(devices, binder_devices_param, charp, 0444);
119 
120 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
121 static int binder_stop_on_user_error;
122 
123 static int binder_set_stop_on_user_error(const char *val,
124 					 const struct kernel_param *kp)
125 {
126 	int ret;
127 
128 	ret = param_set_int(val, kp);
129 	if (binder_stop_on_user_error < 2)
130 		wake_up(&binder_user_error_wait);
131 	return ret;
132 }
133 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
134 	param_get_int, &binder_stop_on_user_error, 0644);
135 
136 #define binder_debug(mask, x...) \
137 	do { \
138 		if (binder_debug_mask & mask) \
139 			pr_info_ratelimited(x); \
140 	} while (0)
141 
142 #define binder_user_error(x...) \
143 	do { \
144 		if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
145 			pr_info_ratelimited(x); \
146 		if (binder_stop_on_user_error) \
147 			binder_stop_on_user_error = 2; \
148 	} while (0)
149 
150 #define to_flat_binder_object(hdr) \
151 	container_of(hdr, struct flat_binder_object, hdr)
152 
153 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
154 
155 #define to_binder_buffer_object(hdr) \
156 	container_of(hdr, struct binder_buffer_object, hdr)
157 
158 #define to_binder_fd_array_object(hdr) \
159 	container_of(hdr, struct binder_fd_array_object, hdr)
160 
161 static struct binder_stats binder_stats;
162 
163 static inline void binder_stats_deleted(enum binder_stat_types type)
164 {
165 	atomic_inc(&binder_stats.obj_deleted[type]);
166 }
167 
168 static inline void binder_stats_created(enum binder_stat_types type)
169 {
170 	atomic_inc(&binder_stats.obj_created[type]);
171 }
172 
173 struct binder_transaction_log binder_transaction_log;
174 struct binder_transaction_log binder_transaction_log_failed;
175 
176 static struct binder_transaction_log_entry *binder_transaction_log_add(
177 	struct binder_transaction_log *log)
178 {
179 	struct binder_transaction_log_entry *e;
180 	unsigned int cur = atomic_inc_return(&log->cur);
181 
182 	if (cur >= ARRAY_SIZE(log->entry))
183 		log->full = true;
184 	e = &log->entry[cur % ARRAY_SIZE(log->entry)];
185 	WRITE_ONCE(e->debug_id_done, 0);
186 	/*
187 	 * write-barrier to synchronize access to e->debug_id_done.
188 	 * We make sure the initialized 0 value is seen before
189 	 * memset() other fields are zeroed by memset.
190 	 */
191 	smp_wmb();
192 	memset(e, 0, sizeof(*e));
193 	return e;
194 }
195 
196 enum binder_deferred_state {
197 	BINDER_DEFERRED_FLUSH        = 0x01,
198 	BINDER_DEFERRED_RELEASE      = 0x02,
199 };
200 
201 enum {
202 	BINDER_LOOPER_STATE_REGISTERED  = 0x01,
203 	BINDER_LOOPER_STATE_ENTERED     = 0x02,
204 	BINDER_LOOPER_STATE_EXITED      = 0x04,
205 	BINDER_LOOPER_STATE_INVALID     = 0x08,
206 	BINDER_LOOPER_STATE_WAITING     = 0x10,
207 	BINDER_LOOPER_STATE_POLL        = 0x20,
208 };
209 
210 /**
211  * binder_proc_lock() - Acquire outer lock for given binder_proc
212  * @proc:         struct binder_proc to acquire
213  *
214  * Acquires proc->outer_lock. Used to protect binder_ref
215  * structures associated with the given proc.
216  */
217 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
218 static void
219 _binder_proc_lock(struct binder_proc *proc, int line)
220 	__acquires(&proc->outer_lock)
221 {
222 	binder_debug(BINDER_DEBUG_SPINLOCKS,
223 		     "%s: line=%d\n", __func__, line);
224 	spin_lock(&proc->outer_lock);
225 }
226 
227 /**
228  * binder_proc_unlock() - Release spinlock for given binder_proc
229  * @proc:         struct binder_proc to acquire
230  *
231  * Release lock acquired via binder_proc_lock()
232  */
233 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
234 static void
235 _binder_proc_unlock(struct binder_proc *proc, int line)
236 	__releases(&proc->outer_lock)
237 {
238 	binder_debug(BINDER_DEBUG_SPINLOCKS,
239 		     "%s: line=%d\n", __func__, line);
240 	spin_unlock(&proc->outer_lock);
241 }
242 
243 /**
244  * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
245  * @proc:         struct binder_proc to acquire
246  *
247  * Acquires proc->inner_lock. Used to protect todo lists
248  */
249 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
250 static void
251 _binder_inner_proc_lock(struct binder_proc *proc, int line)
252 	__acquires(&proc->inner_lock)
253 {
254 	binder_debug(BINDER_DEBUG_SPINLOCKS,
255 		     "%s: line=%d\n", __func__, line);
256 	spin_lock(&proc->inner_lock);
257 }
258 
259 /**
260  * binder_inner_proc_unlock() - Release inner lock for given binder_proc
261  * @proc:         struct binder_proc to acquire
262  *
263  * Release lock acquired via binder_inner_proc_lock()
264  */
265 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
266 static void
267 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
268 	__releases(&proc->inner_lock)
269 {
270 	binder_debug(BINDER_DEBUG_SPINLOCKS,
271 		     "%s: line=%d\n", __func__, line);
272 	spin_unlock(&proc->inner_lock);
273 }
274 
275 /**
276  * binder_node_lock() - Acquire spinlock for given binder_node
277  * @node:         struct binder_node to acquire
278  *
279  * Acquires node->lock. Used to protect binder_node fields
280  */
281 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
282 static void
283 _binder_node_lock(struct binder_node *node, int line)
284 	__acquires(&node->lock)
285 {
286 	binder_debug(BINDER_DEBUG_SPINLOCKS,
287 		     "%s: line=%d\n", __func__, line);
288 	spin_lock(&node->lock);
289 }
290 
291 /**
292  * binder_node_unlock() - Release spinlock for given binder_proc
293  * @node:         struct binder_node to acquire
294  *
295  * Release lock acquired via binder_node_lock()
296  */
297 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
298 static void
299 _binder_node_unlock(struct binder_node *node, int line)
300 	__releases(&node->lock)
301 {
302 	binder_debug(BINDER_DEBUG_SPINLOCKS,
303 		     "%s: line=%d\n", __func__, line);
304 	spin_unlock(&node->lock);
305 }
306 
307 /**
308  * binder_node_inner_lock() - Acquire node and inner locks
309  * @node:         struct binder_node to acquire
310  *
311  * Acquires node->lock. If node->proc also acquires
312  * proc->inner_lock. Used to protect binder_node fields
313  */
314 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
315 static void
316 _binder_node_inner_lock(struct binder_node *node, int line)
317 	__acquires(&node->lock) __acquires(&node->proc->inner_lock)
318 {
319 	binder_debug(BINDER_DEBUG_SPINLOCKS,
320 		     "%s: line=%d\n", __func__, line);
321 	spin_lock(&node->lock);
322 	if (node->proc)
323 		binder_inner_proc_lock(node->proc);
324 	else
325 		/* annotation for sparse */
326 		__acquire(&node->proc->inner_lock);
327 }
328 
329 /**
330  * binder_node_unlock() - Release node and inner locks
331  * @node:         struct binder_node to acquire
332  *
333  * Release lock acquired via binder_node_lock()
334  */
335 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
336 static void
337 _binder_node_inner_unlock(struct binder_node *node, int line)
338 	__releases(&node->lock) __releases(&node->proc->inner_lock)
339 {
340 	struct binder_proc *proc = node->proc;
341 
342 	binder_debug(BINDER_DEBUG_SPINLOCKS,
343 		     "%s: line=%d\n", __func__, line);
344 	if (proc)
345 		binder_inner_proc_unlock(proc);
346 	else
347 		/* annotation for sparse */
348 		__release(&node->proc->inner_lock);
349 	spin_unlock(&node->lock);
350 }
351 
352 static bool binder_worklist_empty_ilocked(struct list_head *list)
353 {
354 	return list_empty(list);
355 }
356 
357 /**
358  * binder_worklist_empty() - Check if no items on the work list
359  * @proc:       binder_proc associated with list
360  * @list:	list to check
361  *
362  * Return: true if there are no items on list, else false
363  */
364 static bool binder_worklist_empty(struct binder_proc *proc,
365 				  struct list_head *list)
366 {
367 	bool ret;
368 
369 	binder_inner_proc_lock(proc);
370 	ret = binder_worklist_empty_ilocked(list);
371 	binder_inner_proc_unlock(proc);
372 	return ret;
373 }
374 
375 /**
376  * binder_enqueue_work_ilocked() - Add an item to the work list
377  * @work:         struct binder_work to add to list
378  * @target_list:  list to add work to
379  *
380  * Adds the work to the specified list. Asserts that work
381  * is not already on a list.
382  *
383  * Requires the proc->inner_lock to be held.
384  */
385 static void
386 binder_enqueue_work_ilocked(struct binder_work *work,
387 			   struct list_head *target_list)
388 {
389 	BUG_ON(target_list == NULL);
390 	BUG_ON(work->entry.next && !list_empty(&work->entry));
391 	list_add_tail(&work->entry, target_list);
392 }
393 
394 /**
395  * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
396  * @thread:       thread to queue work to
397  * @work:         struct binder_work to add to list
398  *
399  * Adds the work to the todo list of the thread. Doesn't set the process_todo
400  * flag, which means that (if it wasn't already set) the thread will go to
401  * sleep without handling this work when it calls read.
402  *
403  * Requires the proc->inner_lock to be held.
404  */
405 static void
406 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
407 					    struct binder_work *work)
408 {
409 	WARN_ON(!list_empty(&thread->waiting_thread_node));
410 	binder_enqueue_work_ilocked(work, &thread->todo);
411 }
412 
413 /**
414  * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
415  * @thread:       thread to queue work to
416  * @work:         struct binder_work to add to list
417  *
418  * Adds the work to the todo list of the thread, and enables processing
419  * of the todo queue.
420  *
421  * Requires the proc->inner_lock to be held.
422  */
423 static void
424 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
425 				   struct binder_work *work)
426 {
427 	WARN_ON(!list_empty(&thread->waiting_thread_node));
428 	binder_enqueue_work_ilocked(work, &thread->todo);
429 	thread->process_todo = true;
430 }
431 
432 /**
433  * binder_enqueue_thread_work() - Add an item to the thread work list
434  * @thread:       thread to queue work to
435  * @work:         struct binder_work to add to list
436  *
437  * Adds the work to the todo list of the thread, and enables processing
438  * of the todo queue.
439  */
440 static void
441 binder_enqueue_thread_work(struct binder_thread *thread,
442 			   struct binder_work *work)
443 {
444 	binder_inner_proc_lock(thread->proc);
445 	binder_enqueue_thread_work_ilocked(thread, work);
446 	binder_inner_proc_unlock(thread->proc);
447 }
448 
449 static void
450 binder_dequeue_work_ilocked(struct binder_work *work)
451 {
452 	list_del_init(&work->entry);
453 }
454 
455 /**
456  * binder_dequeue_work() - Removes an item from the work list
457  * @proc:         binder_proc associated with list
458  * @work:         struct binder_work to remove from list
459  *
460  * Removes the specified work item from whatever list it is on.
461  * Can safely be called if work is not on any list.
462  */
463 static void
464 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
465 {
466 	binder_inner_proc_lock(proc);
467 	binder_dequeue_work_ilocked(work);
468 	binder_inner_proc_unlock(proc);
469 }
470 
471 static struct binder_work *binder_dequeue_work_head_ilocked(
472 					struct list_head *list)
473 {
474 	struct binder_work *w;
475 
476 	w = list_first_entry_or_null(list, struct binder_work, entry);
477 	if (w)
478 		list_del_init(&w->entry);
479 	return w;
480 }
481 
482 static void
483 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
484 static void binder_free_thread(struct binder_thread *thread);
485 static void binder_free_proc(struct binder_proc *proc);
486 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
487 
488 static bool binder_has_work_ilocked(struct binder_thread *thread,
489 				    bool do_proc_work)
490 {
491 	return thread->process_todo ||
492 		thread->looper_need_return ||
493 		(do_proc_work &&
494 		 !binder_worklist_empty_ilocked(&thread->proc->todo));
495 }
496 
497 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
498 {
499 	bool has_work;
500 
501 	binder_inner_proc_lock(thread->proc);
502 	has_work = binder_has_work_ilocked(thread, do_proc_work);
503 	binder_inner_proc_unlock(thread->proc);
504 
505 	return has_work;
506 }
507 
508 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
509 {
510 	return !thread->transaction_stack &&
511 		binder_worklist_empty_ilocked(&thread->todo) &&
512 		(thread->looper & (BINDER_LOOPER_STATE_ENTERED |
513 				   BINDER_LOOPER_STATE_REGISTERED));
514 }
515 
516 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
517 					       bool sync)
518 {
519 	struct rb_node *n;
520 	struct binder_thread *thread;
521 
522 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
523 		thread = rb_entry(n, struct binder_thread, rb_node);
524 		if (thread->looper & BINDER_LOOPER_STATE_POLL &&
525 		    binder_available_for_proc_work_ilocked(thread)) {
526 			if (sync)
527 				wake_up_interruptible_sync(&thread->wait);
528 			else
529 				wake_up_interruptible(&thread->wait);
530 		}
531 	}
532 }
533 
534 /**
535  * binder_select_thread_ilocked() - selects a thread for doing proc work.
536  * @proc:	process to select a thread from
537  *
538  * Note that calling this function moves the thread off the waiting_threads
539  * list, so it can only be woken up by the caller of this function, or a
540  * signal. Therefore, callers *should* always wake up the thread this function
541  * returns.
542  *
543  * Return:	If there's a thread currently waiting for process work,
544  *		returns that thread. Otherwise returns NULL.
545  */
546 static struct binder_thread *
547 binder_select_thread_ilocked(struct binder_proc *proc)
548 {
549 	struct binder_thread *thread;
550 
551 	assert_spin_locked(&proc->inner_lock);
552 	thread = list_first_entry_or_null(&proc->waiting_threads,
553 					  struct binder_thread,
554 					  waiting_thread_node);
555 
556 	if (thread)
557 		list_del_init(&thread->waiting_thread_node);
558 
559 	return thread;
560 }
561 
562 /**
563  * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
564  * @proc:	process to wake up a thread in
565  * @thread:	specific thread to wake-up (may be NULL)
566  * @sync:	whether to do a synchronous wake-up
567  *
568  * This function wakes up a thread in the @proc process.
569  * The caller may provide a specific thread to wake-up in
570  * the @thread parameter. If @thread is NULL, this function
571  * will wake up threads that have called poll().
572  *
573  * Note that for this function to work as expected, callers
574  * should first call binder_select_thread() to find a thread
575  * to handle the work (if they don't have a thread already),
576  * and pass the result into the @thread parameter.
577  */
578 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
579 					 struct binder_thread *thread,
580 					 bool sync)
581 {
582 	assert_spin_locked(&proc->inner_lock);
583 
584 	if (thread) {
585 		if (sync)
586 			wake_up_interruptible_sync(&thread->wait);
587 		else
588 			wake_up_interruptible(&thread->wait);
589 		return;
590 	}
591 
592 	/* Didn't find a thread waiting for proc work; this can happen
593 	 * in two scenarios:
594 	 * 1. All threads are busy handling transactions
595 	 *    In that case, one of those threads should call back into
596 	 *    the kernel driver soon and pick up this work.
597 	 * 2. Threads are using the (e)poll interface, in which case
598 	 *    they may be blocked on the waitqueue without having been
599 	 *    added to waiting_threads. For this case, we just iterate
600 	 *    over all threads not handling transaction work, and
601 	 *    wake them all up. We wake all because we don't know whether
602 	 *    a thread that called into (e)poll is handling non-binder
603 	 *    work currently.
604 	 */
605 	binder_wakeup_poll_threads_ilocked(proc, sync);
606 }
607 
608 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
609 {
610 	struct binder_thread *thread = binder_select_thread_ilocked(proc);
611 
612 	binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
613 }
614 
615 static void binder_set_nice(long nice)
616 {
617 	long min_nice;
618 
619 	if (can_nice(current, nice)) {
620 		set_user_nice(current, nice);
621 		return;
622 	}
623 	min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
624 	binder_debug(BINDER_DEBUG_PRIORITY_CAP,
625 		     "%d: nice value %ld not allowed use %ld instead\n",
626 		      current->pid, nice, min_nice);
627 	set_user_nice(current, min_nice);
628 	if (min_nice <= MAX_NICE)
629 		return;
630 	binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
631 }
632 
633 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
634 						   binder_uintptr_t ptr)
635 {
636 	struct rb_node *n = proc->nodes.rb_node;
637 	struct binder_node *node;
638 
639 	assert_spin_locked(&proc->inner_lock);
640 
641 	while (n) {
642 		node = rb_entry(n, struct binder_node, rb_node);
643 
644 		if (ptr < node->ptr)
645 			n = n->rb_left;
646 		else if (ptr > node->ptr)
647 			n = n->rb_right;
648 		else {
649 			/*
650 			 * take an implicit weak reference
651 			 * to ensure node stays alive until
652 			 * call to binder_put_node()
653 			 */
654 			binder_inc_node_tmpref_ilocked(node);
655 			return node;
656 		}
657 	}
658 	return NULL;
659 }
660 
661 static struct binder_node *binder_get_node(struct binder_proc *proc,
662 					   binder_uintptr_t ptr)
663 {
664 	struct binder_node *node;
665 
666 	binder_inner_proc_lock(proc);
667 	node = binder_get_node_ilocked(proc, ptr);
668 	binder_inner_proc_unlock(proc);
669 	return node;
670 }
671 
672 static struct binder_node *binder_init_node_ilocked(
673 						struct binder_proc *proc,
674 						struct binder_node *new_node,
675 						struct flat_binder_object *fp)
676 {
677 	struct rb_node **p = &proc->nodes.rb_node;
678 	struct rb_node *parent = NULL;
679 	struct binder_node *node;
680 	binder_uintptr_t ptr = fp ? fp->binder : 0;
681 	binder_uintptr_t cookie = fp ? fp->cookie : 0;
682 	__u32 flags = fp ? fp->flags : 0;
683 
684 	assert_spin_locked(&proc->inner_lock);
685 
686 	while (*p) {
687 
688 		parent = *p;
689 		node = rb_entry(parent, struct binder_node, rb_node);
690 
691 		if (ptr < node->ptr)
692 			p = &(*p)->rb_left;
693 		else if (ptr > node->ptr)
694 			p = &(*p)->rb_right;
695 		else {
696 			/*
697 			 * A matching node is already in
698 			 * the rb tree. Abandon the init
699 			 * and return it.
700 			 */
701 			binder_inc_node_tmpref_ilocked(node);
702 			return node;
703 		}
704 	}
705 	node = new_node;
706 	binder_stats_created(BINDER_STAT_NODE);
707 	node->tmp_refs++;
708 	rb_link_node(&node->rb_node, parent, p);
709 	rb_insert_color(&node->rb_node, &proc->nodes);
710 	node->debug_id = atomic_inc_return(&binder_last_id);
711 	node->proc = proc;
712 	node->ptr = ptr;
713 	node->cookie = cookie;
714 	node->work.type = BINDER_WORK_NODE;
715 	node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
716 	node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
717 	node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
718 	spin_lock_init(&node->lock);
719 	INIT_LIST_HEAD(&node->work.entry);
720 	INIT_LIST_HEAD(&node->async_todo);
721 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
722 		     "%d:%d node %d u%016llx c%016llx created\n",
723 		     proc->pid, current->pid, node->debug_id,
724 		     (u64)node->ptr, (u64)node->cookie);
725 
726 	return node;
727 }
728 
729 static struct binder_node *binder_new_node(struct binder_proc *proc,
730 					   struct flat_binder_object *fp)
731 {
732 	struct binder_node *node;
733 	struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
734 
735 	if (!new_node)
736 		return NULL;
737 	binder_inner_proc_lock(proc);
738 	node = binder_init_node_ilocked(proc, new_node, fp);
739 	binder_inner_proc_unlock(proc);
740 	if (node != new_node)
741 		/*
742 		 * The node was already added by another thread
743 		 */
744 		kfree(new_node);
745 
746 	return node;
747 }
748 
749 static void binder_free_node(struct binder_node *node)
750 {
751 	kfree(node);
752 	binder_stats_deleted(BINDER_STAT_NODE);
753 }
754 
755 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
756 				    int internal,
757 				    struct list_head *target_list)
758 {
759 	struct binder_proc *proc = node->proc;
760 
761 	assert_spin_locked(&node->lock);
762 	if (proc)
763 		assert_spin_locked(&proc->inner_lock);
764 	if (strong) {
765 		if (internal) {
766 			if (target_list == NULL &&
767 			    node->internal_strong_refs == 0 &&
768 			    !(node->proc &&
769 			      node == node->proc->context->binder_context_mgr_node &&
770 			      node->has_strong_ref)) {
771 				pr_err("invalid inc strong node for %d\n",
772 					node->debug_id);
773 				return -EINVAL;
774 			}
775 			node->internal_strong_refs++;
776 		} else
777 			node->local_strong_refs++;
778 		if (!node->has_strong_ref && target_list) {
779 			struct binder_thread *thread = container_of(target_list,
780 						    struct binder_thread, todo);
781 			binder_dequeue_work_ilocked(&node->work);
782 			BUG_ON(&thread->todo != target_list);
783 			binder_enqueue_deferred_thread_work_ilocked(thread,
784 								   &node->work);
785 		}
786 	} else {
787 		if (!internal)
788 			node->local_weak_refs++;
789 		if (!node->has_weak_ref && list_empty(&node->work.entry)) {
790 			if (target_list == NULL) {
791 				pr_err("invalid inc weak node for %d\n",
792 					node->debug_id);
793 				return -EINVAL;
794 			}
795 			/*
796 			 * See comment above
797 			 */
798 			binder_enqueue_work_ilocked(&node->work, target_list);
799 		}
800 	}
801 	return 0;
802 }
803 
804 static int binder_inc_node(struct binder_node *node, int strong, int internal,
805 			   struct list_head *target_list)
806 {
807 	int ret;
808 
809 	binder_node_inner_lock(node);
810 	ret = binder_inc_node_nilocked(node, strong, internal, target_list);
811 	binder_node_inner_unlock(node);
812 
813 	return ret;
814 }
815 
816 static bool binder_dec_node_nilocked(struct binder_node *node,
817 				     int strong, int internal)
818 {
819 	struct binder_proc *proc = node->proc;
820 
821 	assert_spin_locked(&node->lock);
822 	if (proc)
823 		assert_spin_locked(&proc->inner_lock);
824 	if (strong) {
825 		if (internal)
826 			node->internal_strong_refs--;
827 		else
828 			node->local_strong_refs--;
829 		if (node->local_strong_refs || node->internal_strong_refs)
830 			return false;
831 	} else {
832 		if (!internal)
833 			node->local_weak_refs--;
834 		if (node->local_weak_refs || node->tmp_refs ||
835 				!hlist_empty(&node->refs))
836 			return false;
837 	}
838 
839 	if (proc && (node->has_strong_ref || node->has_weak_ref)) {
840 		if (list_empty(&node->work.entry)) {
841 			binder_enqueue_work_ilocked(&node->work, &proc->todo);
842 			binder_wakeup_proc_ilocked(proc);
843 		}
844 	} else {
845 		if (hlist_empty(&node->refs) && !node->local_strong_refs &&
846 		    !node->local_weak_refs && !node->tmp_refs) {
847 			if (proc) {
848 				binder_dequeue_work_ilocked(&node->work);
849 				rb_erase(&node->rb_node, &proc->nodes);
850 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
851 					     "refless node %d deleted\n",
852 					     node->debug_id);
853 			} else {
854 				BUG_ON(!list_empty(&node->work.entry));
855 				spin_lock(&binder_dead_nodes_lock);
856 				/*
857 				 * tmp_refs could have changed so
858 				 * check it again
859 				 */
860 				if (node->tmp_refs) {
861 					spin_unlock(&binder_dead_nodes_lock);
862 					return false;
863 				}
864 				hlist_del(&node->dead_node);
865 				spin_unlock(&binder_dead_nodes_lock);
866 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
867 					     "dead node %d deleted\n",
868 					     node->debug_id);
869 			}
870 			return true;
871 		}
872 	}
873 	return false;
874 }
875 
876 static void binder_dec_node(struct binder_node *node, int strong, int internal)
877 {
878 	bool free_node;
879 
880 	binder_node_inner_lock(node);
881 	free_node = binder_dec_node_nilocked(node, strong, internal);
882 	binder_node_inner_unlock(node);
883 	if (free_node)
884 		binder_free_node(node);
885 }
886 
887 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
888 {
889 	/*
890 	 * No call to binder_inc_node() is needed since we
891 	 * don't need to inform userspace of any changes to
892 	 * tmp_refs
893 	 */
894 	node->tmp_refs++;
895 }
896 
897 /**
898  * binder_inc_node_tmpref() - take a temporary reference on node
899  * @node:	node to reference
900  *
901  * Take reference on node to prevent the node from being freed
902  * while referenced only by a local variable. The inner lock is
903  * needed to serialize with the node work on the queue (which
904  * isn't needed after the node is dead). If the node is dead
905  * (node->proc is NULL), use binder_dead_nodes_lock to protect
906  * node->tmp_refs against dead-node-only cases where the node
907  * lock cannot be acquired (eg traversing the dead node list to
908  * print nodes)
909  */
910 static void binder_inc_node_tmpref(struct binder_node *node)
911 {
912 	binder_node_lock(node);
913 	if (node->proc)
914 		binder_inner_proc_lock(node->proc);
915 	else
916 		spin_lock(&binder_dead_nodes_lock);
917 	binder_inc_node_tmpref_ilocked(node);
918 	if (node->proc)
919 		binder_inner_proc_unlock(node->proc);
920 	else
921 		spin_unlock(&binder_dead_nodes_lock);
922 	binder_node_unlock(node);
923 }
924 
925 /**
926  * binder_dec_node_tmpref() - remove a temporary reference on node
927  * @node:	node to reference
928  *
929  * Release temporary reference on node taken via binder_inc_node_tmpref()
930  */
931 static void binder_dec_node_tmpref(struct binder_node *node)
932 {
933 	bool free_node;
934 
935 	binder_node_inner_lock(node);
936 	if (!node->proc)
937 		spin_lock(&binder_dead_nodes_lock);
938 	else
939 		__acquire(&binder_dead_nodes_lock);
940 	node->tmp_refs--;
941 	BUG_ON(node->tmp_refs < 0);
942 	if (!node->proc)
943 		spin_unlock(&binder_dead_nodes_lock);
944 	else
945 		__release(&binder_dead_nodes_lock);
946 	/*
947 	 * Call binder_dec_node() to check if all refcounts are 0
948 	 * and cleanup is needed. Calling with strong=0 and internal=1
949 	 * causes no actual reference to be released in binder_dec_node().
950 	 * If that changes, a change is needed here too.
951 	 */
952 	free_node = binder_dec_node_nilocked(node, 0, 1);
953 	binder_node_inner_unlock(node);
954 	if (free_node)
955 		binder_free_node(node);
956 }
957 
958 static void binder_put_node(struct binder_node *node)
959 {
960 	binder_dec_node_tmpref(node);
961 }
962 
963 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
964 						 u32 desc, bool need_strong_ref)
965 {
966 	struct rb_node *n = proc->refs_by_desc.rb_node;
967 	struct binder_ref *ref;
968 
969 	while (n) {
970 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
971 
972 		if (desc < ref->data.desc) {
973 			n = n->rb_left;
974 		} else if (desc > ref->data.desc) {
975 			n = n->rb_right;
976 		} else if (need_strong_ref && !ref->data.strong) {
977 			binder_user_error("tried to use weak ref as strong ref\n");
978 			return NULL;
979 		} else {
980 			return ref;
981 		}
982 	}
983 	return NULL;
984 }
985 
986 /**
987  * binder_get_ref_for_node_olocked() - get the ref associated with given node
988  * @proc:	binder_proc that owns the ref
989  * @node:	binder_node of target
990  * @new_ref:	newly allocated binder_ref to be initialized or %NULL
991  *
992  * Look up the ref for the given node and return it if it exists
993  *
994  * If it doesn't exist and the caller provides a newly allocated
995  * ref, initialize the fields of the newly allocated ref and insert
996  * into the given proc rb_trees and node refs list.
997  *
998  * Return:	the ref for node. It is possible that another thread
999  *		allocated/initialized the ref first in which case the
1000  *		returned ref would be different than the passed-in
1001  *		new_ref. new_ref must be kfree'd by the caller in
1002  *		this case.
1003  */
1004 static struct binder_ref *binder_get_ref_for_node_olocked(
1005 					struct binder_proc *proc,
1006 					struct binder_node *node,
1007 					struct binder_ref *new_ref)
1008 {
1009 	struct binder_context *context = proc->context;
1010 	struct rb_node **p = &proc->refs_by_node.rb_node;
1011 	struct rb_node *parent = NULL;
1012 	struct binder_ref *ref;
1013 	struct rb_node *n;
1014 
1015 	while (*p) {
1016 		parent = *p;
1017 		ref = rb_entry(parent, struct binder_ref, rb_node_node);
1018 
1019 		if (node < ref->node)
1020 			p = &(*p)->rb_left;
1021 		else if (node > ref->node)
1022 			p = &(*p)->rb_right;
1023 		else
1024 			return ref;
1025 	}
1026 	if (!new_ref)
1027 		return NULL;
1028 
1029 	binder_stats_created(BINDER_STAT_REF);
1030 	new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1031 	new_ref->proc = proc;
1032 	new_ref->node = node;
1033 	rb_link_node(&new_ref->rb_node_node, parent, p);
1034 	rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1035 
1036 	new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1037 	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1038 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1039 		if (ref->data.desc > new_ref->data.desc)
1040 			break;
1041 		new_ref->data.desc = ref->data.desc + 1;
1042 	}
1043 
1044 	p = &proc->refs_by_desc.rb_node;
1045 	while (*p) {
1046 		parent = *p;
1047 		ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1048 
1049 		if (new_ref->data.desc < ref->data.desc)
1050 			p = &(*p)->rb_left;
1051 		else if (new_ref->data.desc > ref->data.desc)
1052 			p = &(*p)->rb_right;
1053 		else
1054 			BUG();
1055 	}
1056 	rb_link_node(&new_ref->rb_node_desc, parent, p);
1057 	rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1058 
1059 	binder_node_lock(node);
1060 	hlist_add_head(&new_ref->node_entry, &node->refs);
1061 
1062 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1063 		     "%d new ref %d desc %d for node %d\n",
1064 		      proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1065 		      node->debug_id);
1066 	binder_node_unlock(node);
1067 	return new_ref;
1068 }
1069 
1070 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1071 {
1072 	bool delete_node = false;
1073 
1074 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1075 		     "%d delete ref %d desc %d for node %d\n",
1076 		      ref->proc->pid, ref->data.debug_id, ref->data.desc,
1077 		      ref->node->debug_id);
1078 
1079 	rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1080 	rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1081 
1082 	binder_node_inner_lock(ref->node);
1083 	if (ref->data.strong)
1084 		binder_dec_node_nilocked(ref->node, 1, 1);
1085 
1086 	hlist_del(&ref->node_entry);
1087 	delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1088 	binder_node_inner_unlock(ref->node);
1089 	/*
1090 	 * Clear ref->node unless we want the caller to free the node
1091 	 */
1092 	if (!delete_node) {
1093 		/*
1094 		 * The caller uses ref->node to determine
1095 		 * whether the node needs to be freed. Clear
1096 		 * it since the node is still alive.
1097 		 */
1098 		ref->node = NULL;
1099 	}
1100 
1101 	if (ref->death) {
1102 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1103 			     "%d delete ref %d desc %d has death notification\n",
1104 			      ref->proc->pid, ref->data.debug_id,
1105 			      ref->data.desc);
1106 		binder_dequeue_work(ref->proc, &ref->death->work);
1107 		binder_stats_deleted(BINDER_STAT_DEATH);
1108 	}
1109 	binder_stats_deleted(BINDER_STAT_REF);
1110 }
1111 
1112 /**
1113  * binder_inc_ref_olocked() - increment the ref for given handle
1114  * @ref:         ref to be incremented
1115  * @strong:      if true, strong increment, else weak
1116  * @target_list: list to queue node work on
1117  *
1118  * Increment the ref. @ref->proc->outer_lock must be held on entry
1119  *
1120  * Return: 0, if successful, else errno
1121  */
1122 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1123 				  struct list_head *target_list)
1124 {
1125 	int ret;
1126 
1127 	if (strong) {
1128 		if (ref->data.strong == 0) {
1129 			ret = binder_inc_node(ref->node, 1, 1, target_list);
1130 			if (ret)
1131 				return ret;
1132 		}
1133 		ref->data.strong++;
1134 	} else {
1135 		if (ref->data.weak == 0) {
1136 			ret = binder_inc_node(ref->node, 0, 1, target_list);
1137 			if (ret)
1138 				return ret;
1139 		}
1140 		ref->data.weak++;
1141 	}
1142 	return 0;
1143 }
1144 
1145 /**
1146  * binder_dec_ref() - dec the ref for given handle
1147  * @ref:	ref to be decremented
1148  * @strong:	if true, strong decrement, else weak
1149  *
1150  * Decrement the ref.
1151  *
1152  * Return: true if ref is cleaned up and ready to be freed
1153  */
1154 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1155 {
1156 	if (strong) {
1157 		if (ref->data.strong == 0) {
1158 			binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1159 					  ref->proc->pid, ref->data.debug_id,
1160 					  ref->data.desc, ref->data.strong,
1161 					  ref->data.weak);
1162 			return false;
1163 		}
1164 		ref->data.strong--;
1165 		if (ref->data.strong == 0)
1166 			binder_dec_node(ref->node, strong, 1);
1167 	} else {
1168 		if (ref->data.weak == 0) {
1169 			binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1170 					  ref->proc->pid, ref->data.debug_id,
1171 					  ref->data.desc, ref->data.strong,
1172 					  ref->data.weak);
1173 			return false;
1174 		}
1175 		ref->data.weak--;
1176 	}
1177 	if (ref->data.strong == 0 && ref->data.weak == 0) {
1178 		binder_cleanup_ref_olocked(ref);
1179 		return true;
1180 	}
1181 	return false;
1182 }
1183 
1184 /**
1185  * binder_get_node_from_ref() - get the node from the given proc/desc
1186  * @proc:	proc containing the ref
1187  * @desc:	the handle associated with the ref
1188  * @need_strong_ref: if true, only return node if ref is strong
1189  * @rdata:	the id/refcount data for the ref
1190  *
1191  * Given a proc and ref handle, return the associated binder_node
1192  *
1193  * Return: a binder_node or NULL if not found or not strong when strong required
1194  */
1195 static struct binder_node *binder_get_node_from_ref(
1196 		struct binder_proc *proc,
1197 		u32 desc, bool need_strong_ref,
1198 		struct binder_ref_data *rdata)
1199 {
1200 	struct binder_node *node;
1201 	struct binder_ref *ref;
1202 
1203 	binder_proc_lock(proc);
1204 	ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1205 	if (!ref)
1206 		goto err_no_ref;
1207 	node = ref->node;
1208 	/*
1209 	 * Take an implicit reference on the node to ensure
1210 	 * it stays alive until the call to binder_put_node()
1211 	 */
1212 	binder_inc_node_tmpref(node);
1213 	if (rdata)
1214 		*rdata = ref->data;
1215 	binder_proc_unlock(proc);
1216 
1217 	return node;
1218 
1219 err_no_ref:
1220 	binder_proc_unlock(proc);
1221 	return NULL;
1222 }
1223 
1224 /**
1225  * binder_free_ref() - free the binder_ref
1226  * @ref:	ref to free
1227  *
1228  * Free the binder_ref. Free the binder_node indicated by ref->node
1229  * (if non-NULL) and the binder_ref_death indicated by ref->death.
1230  */
1231 static void binder_free_ref(struct binder_ref *ref)
1232 {
1233 	if (ref->node)
1234 		binder_free_node(ref->node);
1235 	kfree(ref->death);
1236 	kfree(ref);
1237 }
1238 
1239 /**
1240  * binder_update_ref_for_handle() - inc/dec the ref for given handle
1241  * @proc:	proc containing the ref
1242  * @desc:	the handle associated with the ref
1243  * @increment:	true=inc reference, false=dec reference
1244  * @strong:	true=strong reference, false=weak reference
1245  * @rdata:	the id/refcount data for the ref
1246  *
1247  * Given a proc and ref handle, increment or decrement the ref
1248  * according to "increment" arg.
1249  *
1250  * Return: 0 if successful, else errno
1251  */
1252 static int binder_update_ref_for_handle(struct binder_proc *proc,
1253 		uint32_t desc, bool increment, bool strong,
1254 		struct binder_ref_data *rdata)
1255 {
1256 	int ret = 0;
1257 	struct binder_ref *ref;
1258 	bool delete_ref = false;
1259 
1260 	binder_proc_lock(proc);
1261 	ref = binder_get_ref_olocked(proc, desc, strong);
1262 	if (!ref) {
1263 		ret = -EINVAL;
1264 		goto err_no_ref;
1265 	}
1266 	if (increment)
1267 		ret = binder_inc_ref_olocked(ref, strong, NULL);
1268 	else
1269 		delete_ref = binder_dec_ref_olocked(ref, strong);
1270 
1271 	if (rdata)
1272 		*rdata = ref->data;
1273 	binder_proc_unlock(proc);
1274 
1275 	if (delete_ref)
1276 		binder_free_ref(ref);
1277 	return ret;
1278 
1279 err_no_ref:
1280 	binder_proc_unlock(proc);
1281 	return ret;
1282 }
1283 
1284 /**
1285  * binder_dec_ref_for_handle() - dec the ref for given handle
1286  * @proc:	proc containing the ref
1287  * @desc:	the handle associated with the ref
1288  * @strong:	true=strong reference, false=weak reference
1289  * @rdata:	the id/refcount data for the ref
1290  *
1291  * Just calls binder_update_ref_for_handle() to decrement the ref.
1292  *
1293  * Return: 0 if successful, else errno
1294  */
1295 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1296 		uint32_t desc, bool strong, struct binder_ref_data *rdata)
1297 {
1298 	return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1299 }
1300 
1301 
1302 /**
1303  * binder_inc_ref_for_node() - increment the ref for given proc/node
1304  * @proc:	 proc containing the ref
1305  * @node:	 target node
1306  * @strong:	 true=strong reference, false=weak reference
1307  * @target_list: worklist to use if node is incremented
1308  * @rdata:	 the id/refcount data for the ref
1309  *
1310  * Given a proc and node, increment the ref. Create the ref if it
1311  * doesn't already exist
1312  *
1313  * Return: 0 if successful, else errno
1314  */
1315 static int binder_inc_ref_for_node(struct binder_proc *proc,
1316 			struct binder_node *node,
1317 			bool strong,
1318 			struct list_head *target_list,
1319 			struct binder_ref_data *rdata)
1320 {
1321 	struct binder_ref *ref;
1322 	struct binder_ref *new_ref = NULL;
1323 	int ret = 0;
1324 
1325 	binder_proc_lock(proc);
1326 	ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1327 	if (!ref) {
1328 		binder_proc_unlock(proc);
1329 		new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1330 		if (!new_ref)
1331 			return -ENOMEM;
1332 		binder_proc_lock(proc);
1333 		ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1334 	}
1335 	ret = binder_inc_ref_olocked(ref, strong, target_list);
1336 	*rdata = ref->data;
1337 	binder_proc_unlock(proc);
1338 	if (new_ref && ref != new_ref)
1339 		/*
1340 		 * Another thread created the ref first so
1341 		 * free the one we allocated
1342 		 */
1343 		kfree(new_ref);
1344 	return ret;
1345 }
1346 
1347 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1348 					   struct binder_transaction *t)
1349 {
1350 	BUG_ON(!target_thread);
1351 	assert_spin_locked(&target_thread->proc->inner_lock);
1352 	BUG_ON(target_thread->transaction_stack != t);
1353 	BUG_ON(target_thread->transaction_stack->from != target_thread);
1354 	target_thread->transaction_stack =
1355 		target_thread->transaction_stack->from_parent;
1356 	t->from = NULL;
1357 }
1358 
1359 /**
1360  * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1361  * @thread:	thread to decrement
1362  *
1363  * A thread needs to be kept alive while being used to create or
1364  * handle a transaction. binder_get_txn_from() is used to safely
1365  * extract t->from from a binder_transaction and keep the thread
1366  * indicated by t->from from being freed. When done with that
1367  * binder_thread, this function is called to decrement the
1368  * tmp_ref and free if appropriate (thread has been released
1369  * and no transaction being processed by the driver)
1370  */
1371 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1372 {
1373 	/*
1374 	 * atomic is used to protect the counter value while
1375 	 * it cannot reach zero or thread->is_dead is false
1376 	 */
1377 	binder_inner_proc_lock(thread->proc);
1378 	atomic_dec(&thread->tmp_ref);
1379 	if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1380 		binder_inner_proc_unlock(thread->proc);
1381 		binder_free_thread(thread);
1382 		return;
1383 	}
1384 	binder_inner_proc_unlock(thread->proc);
1385 }
1386 
1387 /**
1388  * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1389  * @proc:	proc to decrement
1390  *
1391  * A binder_proc needs to be kept alive while being used to create or
1392  * handle a transaction. proc->tmp_ref is incremented when
1393  * creating a new transaction or the binder_proc is currently in-use
1394  * by threads that are being released. When done with the binder_proc,
1395  * this function is called to decrement the counter and free the
1396  * proc if appropriate (proc has been released, all threads have
1397  * been released and not currenly in-use to process a transaction).
1398  */
1399 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1400 {
1401 	binder_inner_proc_lock(proc);
1402 	proc->tmp_ref--;
1403 	if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1404 			!proc->tmp_ref) {
1405 		binder_inner_proc_unlock(proc);
1406 		binder_free_proc(proc);
1407 		return;
1408 	}
1409 	binder_inner_proc_unlock(proc);
1410 }
1411 
1412 /**
1413  * binder_get_txn_from() - safely extract the "from" thread in transaction
1414  * @t:	binder transaction for t->from
1415  *
1416  * Atomically return the "from" thread and increment the tmp_ref
1417  * count for the thread to ensure it stays alive until
1418  * binder_thread_dec_tmpref() is called.
1419  *
1420  * Return: the value of t->from
1421  */
1422 static struct binder_thread *binder_get_txn_from(
1423 		struct binder_transaction *t)
1424 {
1425 	struct binder_thread *from;
1426 
1427 	spin_lock(&t->lock);
1428 	from = t->from;
1429 	if (from)
1430 		atomic_inc(&from->tmp_ref);
1431 	spin_unlock(&t->lock);
1432 	return from;
1433 }
1434 
1435 /**
1436  * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1437  * @t:	binder transaction for t->from
1438  *
1439  * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1440  * to guarantee that the thread cannot be released while operating on it.
1441  * The caller must call binder_inner_proc_unlock() to release the inner lock
1442  * as well as call binder_dec_thread_txn() to release the reference.
1443  *
1444  * Return: the value of t->from
1445  */
1446 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1447 		struct binder_transaction *t)
1448 	__acquires(&t->from->proc->inner_lock)
1449 {
1450 	struct binder_thread *from;
1451 
1452 	from = binder_get_txn_from(t);
1453 	if (!from) {
1454 		__acquire(&from->proc->inner_lock);
1455 		return NULL;
1456 	}
1457 	binder_inner_proc_lock(from->proc);
1458 	if (t->from) {
1459 		BUG_ON(from != t->from);
1460 		return from;
1461 	}
1462 	binder_inner_proc_unlock(from->proc);
1463 	__acquire(&from->proc->inner_lock);
1464 	binder_thread_dec_tmpref(from);
1465 	return NULL;
1466 }
1467 
1468 /**
1469  * binder_free_txn_fixups() - free unprocessed fd fixups
1470  * @t:	binder transaction for t->from
1471  *
1472  * If the transaction is being torn down prior to being
1473  * processed by the target process, free all of the
1474  * fd fixups and fput the file structs. It is safe to
1475  * call this function after the fixups have been
1476  * processed -- in that case, the list will be empty.
1477  */
1478 static void binder_free_txn_fixups(struct binder_transaction *t)
1479 {
1480 	struct binder_txn_fd_fixup *fixup, *tmp;
1481 
1482 	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1483 		fput(fixup->file);
1484 		list_del(&fixup->fixup_entry);
1485 		kfree(fixup);
1486 	}
1487 }
1488 
1489 static void binder_txn_latency_free(struct binder_transaction *t)
1490 {
1491 	int from_proc, from_thread, to_proc, to_thread;
1492 
1493 	spin_lock(&t->lock);
1494 	from_proc = t->from ? t->from->proc->pid : 0;
1495 	from_thread = t->from ? t->from->pid : 0;
1496 	to_proc = t->to_proc ? t->to_proc->pid : 0;
1497 	to_thread = t->to_thread ? t->to_thread->pid : 0;
1498 	spin_unlock(&t->lock);
1499 
1500 	trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
1501 }
1502 
1503 static void binder_free_transaction(struct binder_transaction *t)
1504 {
1505 	struct binder_proc *target_proc = t->to_proc;
1506 
1507 	if (target_proc) {
1508 		binder_inner_proc_lock(target_proc);
1509 		target_proc->outstanding_txns--;
1510 		if (target_proc->outstanding_txns < 0)
1511 			pr_warn("%s: Unexpected outstanding_txns %d\n",
1512 				__func__, target_proc->outstanding_txns);
1513 		if (!target_proc->outstanding_txns && target_proc->is_frozen)
1514 			wake_up_interruptible_all(&target_proc->freeze_wait);
1515 		if (t->buffer)
1516 			t->buffer->transaction = NULL;
1517 		binder_inner_proc_unlock(target_proc);
1518 	}
1519 	if (trace_binder_txn_latency_free_enabled())
1520 		binder_txn_latency_free(t);
1521 	/*
1522 	 * If the transaction has no target_proc, then
1523 	 * t->buffer->transaction has already been cleared.
1524 	 */
1525 	binder_free_txn_fixups(t);
1526 	kfree(t);
1527 	binder_stats_deleted(BINDER_STAT_TRANSACTION);
1528 }
1529 
1530 static void binder_send_failed_reply(struct binder_transaction *t,
1531 				     uint32_t error_code)
1532 {
1533 	struct binder_thread *target_thread;
1534 	struct binder_transaction *next;
1535 
1536 	BUG_ON(t->flags & TF_ONE_WAY);
1537 	while (1) {
1538 		target_thread = binder_get_txn_from_and_acq_inner(t);
1539 		if (target_thread) {
1540 			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1541 				     "send failed reply for transaction %d to %d:%d\n",
1542 				      t->debug_id,
1543 				      target_thread->proc->pid,
1544 				      target_thread->pid);
1545 
1546 			binder_pop_transaction_ilocked(target_thread, t);
1547 			if (target_thread->reply_error.cmd == BR_OK) {
1548 				target_thread->reply_error.cmd = error_code;
1549 				binder_enqueue_thread_work_ilocked(
1550 					target_thread,
1551 					&target_thread->reply_error.work);
1552 				wake_up_interruptible(&target_thread->wait);
1553 			} else {
1554 				/*
1555 				 * Cannot get here for normal operation, but
1556 				 * we can if multiple synchronous transactions
1557 				 * are sent without blocking for responses.
1558 				 * Just ignore the 2nd error in this case.
1559 				 */
1560 				pr_warn("Unexpected reply error: %u\n",
1561 					target_thread->reply_error.cmd);
1562 			}
1563 			binder_inner_proc_unlock(target_thread->proc);
1564 			binder_thread_dec_tmpref(target_thread);
1565 			binder_free_transaction(t);
1566 			return;
1567 		}
1568 		__release(&target_thread->proc->inner_lock);
1569 		next = t->from_parent;
1570 
1571 		binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1572 			     "send failed reply for transaction %d, target dead\n",
1573 			     t->debug_id);
1574 
1575 		binder_free_transaction(t);
1576 		if (next == NULL) {
1577 			binder_debug(BINDER_DEBUG_DEAD_BINDER,
1578 				     "reply failed, no target thread at root\n");
1579 			return;
1580 		}
1581 		t = next;
1582 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1583 			     "reply failed, no target thread -- retry %d\n",
1584 			      t->debug_id);
1585 	}
1586 }
1587 
1588 /**
1589  * binder_cleanup_transaction() - cleans up undelivered transaction
1590  * @t:		transaction that needs to be cleaned up
1591  * @reason:	reason the transaction wasn't delivered
1592  * @error_code:	error to return to caller (if synchronous call)
1593  */
1594 static void binder_cleanup_transaction(struct binder_transaction *t,
1595 				       const char *reason,
1596 				       uint32_t error_code)
1597 {
1598 	if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1599 		binder_send_failed_reply(t, error_code);
1600 	} else {
1601 		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1602 			"undelivered transaction %d, %s\n",
1603 			t->debug_id, reason);
1604 		binder_free_transaction(t);
1605 	}
1606 }
1607 
1608 /**
1609  * binder_get_object() - gets object and checks for valid metadata
1610  * @proc:	binder_proc owning the buffer
1611  * @buffer:	binder_buffer that we're parsing.
1612  * @offset:	offset in the @buffer at which to validate an object.
1613  * @object:	struct binder_object to read into
1614  *
1615  * Return:	If there's a valid metadata object at @offset in @buffer, the
1616  *		size of that object. Otherwise, it returns zero. The object
1617  *		is read into the struct binder_object pointed to by @object.
1618  */
1619 static size_t binder_get_object(struct binder_proc *proc,
1620 				struct binder_buffer *buffer,
1621 				unsigned long offset,
1622 				struct binder_object *object)
1623 {
1624 	size_t read_size;
1625 	struct binder_object_header *hdr;
1626 	size_t object_size = 0;
1627 
1628 	read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
1629 	if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
1630 	    binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1631 					  offset, read_size))
1632 		return 0;
1633 
1634 	/* Ok, now see if we read a complete object. */
1635 	hdr = &object->hdr;
1636 	switch (hdr->type) {
1637 	case BINDER_TYPE_BINDER:
1638 	case BINDER_TYPE_WEAK_BINDER:
1639 	case BINDER_TYPE_HANDLE:
1640 	case BINDER_TYPE_WEAK_HANDLE:
1641 		object_size = sizeof(struct flat_binder_object);
1642 		break;
1643 	case BINDER_TYPE_FD:
1644 		object_size = sizeof(struct binder_fd_object);
1645 		break;
1646 	case BINDER_TYPE_PTR:
1647 		object_size = sizeof(struct binder_buffer_object);
1648 		break;
1649 	case BINDER_TYPE_FDA:
1650 		object_size = sizeof(struct binder_fd_array_object);
1651 		break;
1652 	default:
1653 		return 0;
1654 	}
1655 	if (offset <= buffer->data_size - object_size &&
1656 	    buffer->data_size >= object_size)
1657 		return object_size;
1658 	else
1659 		return 0;
1660 }
1661 
1662 /**
1663  * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1664  * @proc:	binder_proc owning the buffer
1665  * @b:		binder_buffer containing the object
1666  * @object:	struct binder_object to read into
1667  * @index:	index in offset array at which the binder_buffer_object is
1668  *		located
1669  * @start_offset: points to the start of the offset array
1670  * @object_offsetp: offset of @object read from @b
1671  * @num_valid:	the number of valid offsets in the offset array
1672  *
1673  * Return:	If @index is within the valid range of the offset array
1674  *		described by @start and @num_valid, and if there's a valid
1675  *		binder_buffer_object at the offset found in index @index
1676  *		of the offset array, that object is returned. Otherwise,
1677  *		%NULL is returned.
1678  *		Note that the offset found in index @index itself is not
1679  *		verified; this function assumes that @num_valid elements
1680  *		from @start were previously verified to have valid offsets.
1681  *		If @object_offsetp is non-NULL, then the offset within
1682  *		@b is written to it.
1683  */
1684 static struct binder_buffer_object *binder_validate_ptr(
1685 						struct binder_proc *proc,
1686 						struct binder_buffer *b,
1687 						struct binder_object *object,
1688 						binder_size_t index,
1689 						binder_size_t start_offset,
1690 						binder_size_t *object_offsetp,
1691 						binder_size_t num_valid)
1692 {
1693 	size_t object_size;
1694 	binder_size_t object_offset;
1695 	unsigned long buffer_offset;
1696 
1697 	if (index >= num_valid)
1698 		return NULL;
1699 
1700 	buffer_offset = start_offset + sizeof(binder_size_t) * index;
1701 	if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1702 					  b, buffer_offset,
1703 					  sizeof(object_offset)))
1704 		return NULL;
1705 	object_size = binder_get_object(proc, b, object_offset, object);
1706 	if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
1707 		return NULL;
1708 	if (object_offsetp)
1709 		*object_offsetp = object_offset;
1710 
1711 	return &object->bbo;
1712 }
1713 
1714 /**
1715  * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1716  * @proc:		binder_proc owning the buffer
1717  * @b:			transaction buffer
1718  * @objects_start_offset: offset to start of objects buffer
1719  * @buffer_obj_offset:	offset to binder_buffer_object in which to fix up
1720  * @fixup_offset:	start offset in @buffer to fix up
1721  * @last_obj_offset:	offset to last binder_buffer_object that we fixed
1722  * @last_min_offset:	minimum fixup offset in object at @last_obj_offset
1723  *
1724  * Return:		%true if a fixup in buffer @buffer at offset @offset is
1725  *			allowed.
1726  *
1727  * For safety reasons, we only allow fixups inside a buffer to happen
1728  * at increasing offsets; additionally, we only allow fixup on the last
1729  * buffer object that was verified, or one of its parents.
1730  *
1731  * Example of what is allowed:
1732  *
1733  * A
1734  *   B (parent = A, offset = 0)
1735  *   C (parent = A, offset = 16)
1736  *     D (parent = C, offset = 0)
1737  *   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1738  *
1739  * Examples of what is not allowed:
1740  *
1741  * Decreasing offsets within the same parent:
1742  * A
1743  *   C (parent = A, offset = 16)
1744  *   B (parent = A, offset = 0) // decreasing offset within A
1745  *
1746  * Referring to a parent that wasn't the last object or any of its parents:
1747  * A
1748  *   B (parent = A, offset = 0)
1749  *   C (parent = A, offset = 0)
1750  *   C (parent = A, offset = 16)
1751  *     D (parent = B, offset = 0) // B is not A or any of A's parents
1752  */
1753 static bool binder_validate_fixup(struct binder_proc *proc,
1754 				  struct binder_buffer *b,
1755 				  binder_size_t objects_start_offset,
1756 				  binder_size_t buffer_obj_offset,
1757 				  binder_size_t fixup_offset,
1758 				  binder_size_t last_obj_offset,
1759 				  binder_size_t last_min_offset)
1760 {
1761 	if (!last_obj_offset) {
1762 		/* Nothing to fix up in */
1763 		return false;
1764 	}
1765 
1766 	while (last_obj_offset != buffer_obj_offset) {
1767 		unsigned long buffer_offset;
1768 		struct binder_object last_object;
1769 		struct binder_buffer_object *last_bbo;
1770 		size_t object_size = binder_get_object(proc, b, last_obj_offset,
1771 						       &last_object);
1772 		if (object_size != sizeof(*last_bbo))
1773 			return false;
1774 
1775 		last_bbo = &last_object.bbo;
1776 		/*
1777 		 * Safe to retrieve the parent of last_obj, since it
1778 		 * was already previously verified by the driver.
1779 		 */
1780 		if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1781 			return false;
1782 		last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
1783 		buffer_offset = objects_start_offset +
1784 			sizeof(binder_size_t) * last_bbo->parent;
1785 		if (binder_alloc_copy_from_buffer(&proc->alloc,
1786 						  &last_obj_offset,
1787 						  b, buffer_offset,
1788 						  sizeof(last_obj_offset)))
1789 			return false;
1790 	}
1791 	return (fixup_offset >= last_min_offset);
1792 }
1793 
1794 /**
1795  * struct binder_task_work_cb - for deferred close
1796  *
1797  * @twork:                callback_head for task work
1798  * @fd:                   fd to close
1799  *
1800  * Structure to pass task work to be handled after
1801  * returning from binder_ioctl() via task_work_add().
1802  */
1803 struct binder_task_work_cb {
1804 	struct callback_head twork;
1805 	struct file *file;
1806 };
1807 
1808 /**
1809  * binder_do_fd_close() - close list of file descriptors
1810  * @twork:	callback head for task work
1811  *
1812  * It is not safe to call ksys_close() during the binder_ioctl()
1813  * function if there is a chance that binder's own file descriptor
1814  * might be closed. This is to meet the requirements for using
1815  * fdget() (see comments for __fget_light()). Therefore use
1816  * task_work_add() to schedule the close operation once we have
1817  * returned from binder_ioctl(). This function is a callback
1818  * for that mechanism and does the actual ksys_close() on the
1819  * given file descriptor.
1820  */
1821 static void binder_do_fd_close(struct callback_head *twork)
1822 {
1823 	struct binder_task_work_cb *twcb = container_of(twork,
1824 			struct binder_task_work_cb, twork);
1825 
1826 	fput(twcb->file);
1827 	kfree(twcb);
1828 }
1829 
1830 /**
1831  * binder_deferred_fd_close() - schedule a close for the given file-descriptor
1832  * @fd:		file-descriptor to close
1833  *
1834  * See comments in binder_do_fd_close(). This function is used to schedule
1835  * a file-descriptor to be closed after returning from binder_ioctl().
1836  */
1837 static void binder_deferred_fd_close(int fd)
1838 {
1839 	struct binder_task_work_cb *twcb;
1840 
1841 	twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
1842 	if (!twcb)
1843 		return;
1844 	init_task_work(&twcb->twork, binder_do_fd_close);
1845 	close_fd_get_file(fd, &twcb->file);
1846 	if (twcb->file) {
1847 		filp_close(twcb->file, current->files);
1848 		task_work_add(current, &twcb->twork, TWA_RESUME);
1849 	} else {
1850 		kfree(twcb);
1851 	}
1852 }
1853 
1854 static void binder_transaction_buffer_release(struct binder_proc *proc,
1855 					      struct binder_thread *thread,
1856 					      struct binder_buffer *buffer,
1857 					      binder_size_t failed_at,
1858 					      bool is_failure)
1859 {
1860 	int debug_id = buffer->debug_id;
1861 	binder_size_t off_start_offset, buffer_offset, off_end_offset;
1862 
1863 	binder_debug(BINDER_DEBUG_TRANSACTION,
1864 		     "%d buffer release %d, size %zd-%zd, failed at %llx\n",
1865 		     proc->pid, buffer->debug_id,
1866 		     buffer->data_size, buffer->offsets_size,
1867 		     (unsigned long long)failed_at);
1868 
1869 	if (buffer->target_node)
1870 		binder_dec_node(buffer->target_node, 1, 0);
1871 
1872 	off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
1873 	off_end_offset = is_failure && failed_at ? failed_at :
1874 				off_start_offset + buffer->offsets_size;
1875 	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
1876 	     buffer_offset += sizeof(binder_size_t)) {
1877 		struct binder_object_header *hdr;
1878 		size_t object_size = 0;
1879 		struct binder_object object;
1880 		binder_size_t object_offset;
1881 
1882 		if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1883 						   buffer, buffer_offset,
1884 						   sizeof(object_offset)))
1885 			object_size = binder_get_object(proc, buffer,
1886 							object_offset, &object);
1887 		if (object_size == 0) {
1888 			pr_err("transaction release %d bad object at offset %lld, size %zd\n",
1889 			       debug_id, (u64)object_offset, buffer->data_size);
1890 			continue;
1891 		}
1892 		hdr = &object.hdr;
1893 		switch (hdr->type) {
1894 		case BINDER_TYPE_BINDER:
1895 		case BINDER_TYPE_WEAK_BINDER: {
1896 			struct flat_binder_object *fp;
1897 			struct binder_node *node;
1898 
1899 			fp = to_flat_binder_object(hdr);
1900 			node = binder_get_node(proc, fp->binder);
1901 			if (node == NULL) {
1902 				pr_err("transaction release %d bad node %016llx\n",
1903 				       debug_id, (u64)fp->binder);
1904 				break;
1905 			}
1906 			binder_debug(BINDER_DEBUG_TRANSACTION,
1907 				     "        node %d u%016llx\n",
1908 				     node->debug_id, (u64)node->ptr);
1909 			binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
1910 					0);
1911 			binder_put_node(node);
1912 		} break;
1913 		case BINDER_TYPE_HANDLE:
1914 		case BINDER_TYPE_WEAK_HANDLE: {
1915 			struct flat_binder_object *fp;
1916 			struct binder_ref_data rdata;
1917 			int ret;
1918 
1919 			fp = to_flat_binder_object(hdr);
1920 			ret = binder_dec_ref_for_handle(proc, fp->handle,
1921 				hdr->type == BINDER_TYPE_HANDLE, &rdata);
1922 
1923 			if (ret) {
1924 				pr_err("transaction release %d bad handle %d, ret = %d\n",
1925 				 debug_id, fp->handle, ret);
1926 				break;
1927 			}
1928 			binder_debug(BINDER_DEBUG_TRANSACTION,
1929 				     "        ref %d desc %d\n",
1930 				     rdata.debug_id, rdata.desc);
1931 		} break;
1932 
1933 		case BINDER_TYPE_FD: {
1934 			/*
1935 			 * No need to close the file here since user-space
1936 			 * closes it for for successfully delivered
1937 			 * transactions. For transactions that weren't
1938 			 * delivered, the new fd was never allocated so
1939 			 * there is no need to close and the fput on the
1940 			 * file is done when the transaction is torn
1941 			 * down.
1942 			 */
1943 		} break;
1944 		case BINDER_TYPE_PTR:
1945 			/*
1946 			 * Nothing to do here, this will get cleaned up when the
1947 			 * transaction buffer gets freed
1948 			 */
1949 			break;
1950 		case BINDER_TYPE_FDA: {
1951 			struct binder_fd_array_object *fda;
1952 			struct binder_buffer_object *parent;
1953 			struct binder_object ptr_object;
1954 			binder_size_t fda_offset;
1955 			size_t fd_index;
1956 			binder_size_t fd_buf_size;
1957 			binder_size_t num_valid;
1958 
1959 			if (is_failure) {
1960 				/*
1961 				 * The fd fixups have not been applied so no
1962 				 * fds need to be closed.
1963 				 */
1964 				continue;
1965 			}
1966 
1967 			num_valid = (buffer_offset - off_start_offset) /
1968 						sizeof(binder_size_t);
1969 			fda = to_binder_fd_array_object(hdr);
1970 			parent = binder_validate_ptr(proc, buffer, &ptr_object,
1971 						     fda->parent,
1972 						     off_start_offset,
1973 						     NULL,
1974 						     num_valid);
1975 			if (!parent) {
1976 				pr_err("transaction release %d bad parent offset\n",
1977 				       debug_id);
1978 				continue;
1979 			}
1980 			fd_buf_size = sizeof(u32) * fda->num_fds;
1981 			if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
1982 				pr_err("transaction release %d invalid number of fds (%lld)\n",
1983 				       debug_id, (u64)fda->num_fds);
1984 				continue;
1985 			}
1986 			if (fd_buf_size > parent->length ||
1987 			    fda->parent_offset > parent->length - fd_buf_size) {
1988 				/* No space for all file descriptors here. */
1989 				pr_err("transaction release %d not enough space for %lld fds in buffer\n",
1990 				       debug_id, (u64)fda->num_fds);
1991 				continue;
1992 			}
1993 			/*
1994 			 * the source data for binder_buffer_object is visible
1995 			 * to user-space and the @buffer element is the user
1996 			 * pointer to the buffer_object containing the fd_array.
1997 			 * Convert the address to an offset relative to
1998 			 * the base of the transaction buffer.
1999 			 */
2000 			fda_offset =
2001 			    (parent->buffer - (uintptr_t)buffer->user_data) +
2002 			    fda->parent_offset;
2003 			for (fd_index = 0; fd_index < fda->num_fds;
2004 			     fd_index++) {
2005 				u32 fd;
2006 				int err;
2007 				binder_size_t offset = fda_offset +
2008 					fd_index * sizeof(fd);
2009 
2010 				err = binder_alloc_copy_from_buffer(
2011 						&proc->alloc, &fd, buffer,
2012 						offset, sizeof(fd));
2013 				WARN_ON(err);
2014 				if (!err) {
2015 					binder_deferred_fd_close(fd);
2016 					/*
2017 					 * Need to make sure the thread goes
2018 					 * back to userspace to complete the
2019 					 * deferred close
2020 					 */
2021 					if (thread)
2022 						thread->looper_need_return = true;
2023 				}
2024 			}
2025 		} break;
2026 		default:
2027 			pr_err("transaction release %d bad object type %x\n",
2028 				debug_id, hdr->type);
2029 			break;
2030 		}
2031 	}
2032 }
2033 
2034 static int binder_translate_binder(struct flat_binder_object *fp,
2035 				   struct binder_transaction *t,
2036 				   struct binder_thread *thread)
2037 {
2038 	struct binder_node *node;
2039 	struct binder_proc *proc = thread->proc;
2040 	struct binder_proc *target_proc = t->to_proc;
2041 	struct binder_ref_data rdata;
2042 	int ret = 0;
2043 
2044 	node = binder_get_node(proc, fp->binder);
2045 	if (!node) {
2046 		node = binder_new_node(proc, fp);
2047 		if (!node)
2048 			return -ENOMEM;
2049 	}
2050 	if (fp->cookie != node->cookie) {
2051 		binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2052 				  proc->pid, thread->pid, (u64)fp->binder,
2053 				  node->debug_id, (u64)fp->cookie,
2054 				  (u64)node->cookie);
2055 		ret = -EINVAL;
2056 		goto done;
2057 	}
2058 	if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2059 		ret = -EPERM;
2060 		goto done;
2061 	}
2062 
2063 	ret = binder_inc_ref_for_node(target_proc, node,
2064 			fp->hdr.type == BINDER_TYPE_BINDER,
2065 			&thread->todo, &rdata);
2066 	if (ret)
2067 		goto done;
2068 
2069 	if (fp->hdr.type == BINDER_TYPE_BINDER)
2070 		fp->hdr.type = BINDER_TYPE_HANDLE;
2071 	else
2072 		fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2073 	fp->binder = 0;
2074 	fp->handle = rdata.desc;
2075 	fp->cookie = 0;
2076 
2077 	trace_binder_transaction_node_to_ref(t, node, &rdata);
2078 	binder_debug(BINDER_DEBUG_TRANSACTION,
2079 		     "        node %d u%016llx -> ref %d desc %d\n",
2080 		     node->debug_id, (u64)node->ptr,
2081 		     rdata.debug_id, rdata.desc);
2082 done:
2083 	binder_put_node(node);
2084 	return ret;
2085 }
2086 
2087 static int binder_translate_handle(struct flat_binder_object *fp,
2088 				   struct binder_transaction *t,
2089 				   struct binder_thread *thread)
2090 {
2091 	struct binder_proc *proc = thread->proc;
2092 	struct binder_proc *target_proc = t->to_proc;
2093 	struct binder_node *node;
2094 	struct binder_ref_data src_rdata;
2095 	int ret = 0;
2096 
2097 	node = binder_get_node_from_ref(proc, fp->handle,
2098 			fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2099 	if (!node) {
2100 		binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2101 				  proc->pid, thread->pid, fp->handle);
2102 		return -EINVAL;
2103 	}
2104 	if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2105 		ret = -EPERM;
2106 		goto done;
2107 	}
2108 
2109 	binder_node_lock(node);
2110 	if (node->proc == target_proc) {
2111 		if (fp->hdr.type == BINDER_TYPE_HANDLE)
2112 			fp->hdr.type = BINDER_TYPE_BINDER;
2113 		else
2114 			fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2115 		fp->binder = node->ptr;
2116 		fp->cookie = node->cookie;
2117 		if (node->proc)
2118 			binder_inner_proc_lock(node->proc);
2119 		else
2120 			__acquire(&node->proc->inner_lock);
2121 		binder_inc_node_nilocked(node,
2122 					 fp->hdr.type == BINDER_TYPE_BINDER,
2123 					 0, NULL);
2124 		if (node->proc)
2125 			binder_inner_proc_unlock(node->proc);
2126 		else
2127 			__release(&node->proc->inner_lock);
2128 		trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2129 		binder_debug(BINDER_DEBUG_TRANSACTION,
2130 			     "        ref %d desc %d -> node %d u%016llx\n",
2131 			     src_rdata.debug_id, src_rdata.desc, node->debug_id,
2132 			     (u64)node->ptr);
2133 		binder_node_unlock(node);
2134 	} else {
2135 		struct binder_ref_data dest_rdata;
2136 
2137 		binder_node_unlock(node);
2138 		ret = binder_inc_ref_for_node(target_proc, node,
2139 				fp->hdr.type == BINDER_TYPE_HANDLE,
2140 				NULL, &dest_rdata);
2141 		if (ret)
2142 			goto done;
2143 
2144 		fp->binder = 0;
2145 		fp->handle = dest_rdata.desc;
2146 		fp->cookie = 0;
2147 		trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2148 						    &dest_rdata);
2149 		binder_debug(BINDER_DEBUG_TRANSACTION,
2150 			     "        ref %d desc %d -> ref %d desc %d (node %d)\n",
2151 			     src_rdata.debug_id, src_rdata.desc,
2152 			     dest_rdata.debug_id, dest_rdata.desc,
2153 			     node->debug_id);
2154 	}
2155 done:
2156 	binder_put_node(node);
2157 	return ret;
2158 }
2159 
2160 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2161 			       struct binder_transaction *t,
2162 			       struct binder_thread *thread,
2163 			       struct binder_transaction *in_reply_to)
2164 {
2165 	struct binder_proc *proc = thread->proc;
2166 	struct binder_proc *target_proc = t->to_proc;
2167 	struct binder_txn_fd_fixup *fixup;
2168 	struct file *file;
2169 	int ret = 0;
2170 	bool target_allows_fd;
2171 
2172 	if (in_reply_to)
2173 		target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2174 	else
2175 		target_allows_fd = t->buffer->target_node->accept_fds;
2176 	if (!target_allows_fd) {
2177 		binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2178 				  proc->pid, thread->pid,
2179 				  in_reply_to ? "reply" : "transaction",
2180 				  fd);
2181 		ret = -EPERM;
2182 		goto err_fd_not_accepted;
2183 	}
2184 
2185 	file = fget(fd);
2186 	if (!file) {
2187 		binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2188 				  proc->pid, thread->pid, fd);
2189 		ret = -EBADF;
2190 		goto err_fget;
2191 	}
2192 	ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2193 	if (ret < 0) {
2194 		ret = -EPERM;
2195 		goto err_security;
2196 	}
2197 
2198 	/*
2199 	 * Add fixup record for this transaction. The allocation
2200 	 * of the fd in the target needs to be done from a
2201 	 * target thread.
2202 	 */
2203 	fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2204 	if (!fixup) {
2205 		ret = -ENOMEM;
2206 		goto err_alloc;
2207 	}
2208 	fixup->file = file;
2209 	fixup->offset = fd_offset;
2210 	trace_binder_transaction_fd_send(t, fd, fixup->offset);
2211 	list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2212 
2213 	return ret;
2214 
2215 err_alloc:
2216 err_security:
2217 	fput(file);
2218 err_fget:
2219 err_fd_not_accepted:
2220 	return ret;
2221 }
2222 
2223 static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2224 				     struct binder_buffer_object *parent,
2225 				     struct binder_transaction *t,
2226 				     struct binder_thread *thread,
2227 				     struct binder_transaction *in_reply_to)
2228 {
2229 	binder_size_t fdi, fd_buf_size;
2230 	binder_size_t fda_offset;
2231 	struct binder_proc *proc = thread->proc;
2232 	struct binder_proc *target_proc = t->to_proc;
2233 
2234 	fd_buf_size = sizeof(u32) * fda->num_fds;
2235 	if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2236 		binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2237 				  proc->pid, thread->pid, (u64)fda->num_fds);
2238 		return -EINVAL;
2239 	}
2240 	if (fd_buf_size > parent->length ||
2241 	    fda->parent_offset > parent->length - fd_buf_size) {
2242 		/* No space for all file descriptors here. */
2243 		binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2244 				  proc->pid, thread->pid, (u64)fda->num_fds);
2245 		return -EINVAL;
2246 	}
2247 	/*
2248 	 * the source data for binder_buffer_object is visible
2249 	 * to user-space and the @buffer element is the user
2250 	 * pointer to the buffer_object containing the fd_array.
2251 	 * Convert the address to an offset relative to
2252 	 * the base of the transaction buffer.
2253 	 */
2254 	fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2255 		fda->parent_offset;
2256 	if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) {
2257 		binder_user_error("%d:%d parent offset not aligned correctly.\n",
2258 				  proc->pid, thread->pid);
2259 		return -EINVAL;
2260 	}
2261 	for (fdi = 0; fdi < fda->num_fds; fdi++) {
2262 		u32 fd;
2263 		int ret;
2264 		binder_size_t offset = fda_offset + fdi * sizeof(fd);
2265 
2266 		ret = binder_alloc_copy_from_buffer(&target_proc->alloc,
2267 						    &fd, t->buffer,
2268 						    offset, sizeof(fd));
2269 		if (!ret)
2270 			ret = binder_translate_fd(fd, offset, t, thread,
2271 						  in_reply_to);
2272 		if (ret < 0)
2273 			return ret;
2274 	}
2275 	return 0;
2276 }
2277 
2278 static int binder_fixup_parent(struct binder_transaction *t,
2279 			       struct binder_thread *thread,
2280 			       struct binder_buffer_object *bp,
2281 			       binder_size_t off_start_offset,
2282 			       binder_size_t num_valid,
2283 			       binder_size_t last_fixup_obj_off,
2284 			       binder_size_t last_fixup_min_off)
2285 {
2286 	struct binder_buffer_object *parent;
2287 	struct binder_buffer *b = t->buffer;
2288 	struct binder_proc *proc = thread->proc;
2289 	struct binder_proc *target_proc = t->to_proc;
2290 	struct binder_object object;
2291 	binder_size_t buffer_offset;
2292 	binder_size_t parent_offset;
2293 
2294 	if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2295 		return 0;
2296 
2297 	parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2298 				     off_start_offset, &parent_offset,
2299 				     num_valid);
2300 	if (!parent) {
2301 		binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2302 				  proc->pid, thread->pid);
2303 		return -EINVAL;
2304 	}
2305 
2306 	if (!binder_validate_fixup(target_proc, b, off_start_offset,
2307 				   parent_offset, bp->parent_offset,
2308 				   last_fixup_obj_off,
2309 				   last_fixup_min_off)) {
2310 		binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2311 				  proc->pid, thread->pid);
2312 		return -EINVAL;
2313 	}
2314 
2315 	if (parent->length < sizeof(binder_uintptr_t) ||
2316 	    bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2317 		/* No space for a pointer here! */
2318 		binder_user_error("%d:%d got transaction with invalid parent offset\n",
2319 				  proc->pid, thread->pid);
2320 		return -EINVAL;
2321 	}
2322 	buffer_offset = bp->parent_offset +
2323 			(uintptr_t)parent->buffer - (uintptr_t)b->user_data;
2324 	if (binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
2325 					&bp->buffer, sizeof(bp->buffer))) {
2326 		binder_user_error("%d:%d got transaction with invalid parent offset\n",
2327 				  proc->pid, thread->pid);
2328 		return -EINVAL;
2329 	}
2330 
2331 	return 0;
2332 }
2333 
2334 /**
2335  * binder_proc_transaction() - sends a transaction to a process and wakes it up
2336  * @t:		transaction to send
2337  * @proc:	process to send the transaction to
2338  * @thread:	thread in @proc to send the transaction to (may be NULL)
2339  *
2340  * This function queues a transaction to the specified process. It will try
2341  * to find a thread in the target process to handle the transaction and
2342  * wake it up. If no thread is found, the work is queued to the proc
2343  * waitqueue.
2344  *
2345  * If the @thread parameter is not NULL, the transaction is always queued
2346  * to the waitlist of that specific thread.
2347  *
2348  * Return:	0 if the transaction was successfully queued
2349  *		BR_DEAD_REPLY if the target process or thread is dead
2350  *		BR_FROZEN_REPLY if the target process or thread is frozen
2351  */
2352 static int binder_proc_transaction(struct binder_transaction *t,
2353 				    struct binder_proc *proc,
2354 				    struct binder_thread *thread)
2355 {
2356 	struct binder_node *node = t->buffer->target_node;
2357 	bool oneway = !!(t->flags & TF_ONE_WAY);
2358 	bool pending_async = false;
2359 
2360 	BUG_ON(!node);
2361 	binder_node_lock(node);
2362 	if (oneway) {
2363 		BUG_ON(thread);
2364 		if (node->has_async_transaction)
2365 			pending_async = true;
2366 		else
2367 			node->has_async_transaction = true;
2368 	}
2369 
2370 	binder_inner_proc_lock(proc);
2371 	if (proc->is_frozen) {
2372 		proc->sync_recv |= !oneway;
2373 		proc->async_recv |= oneway;
2374 	}
2375 
2376 	if ((proc->is_frozen && !oneway) || proc->is_dead ||
2377 			(thread && thread->is_dead)) {
2378 		binder_inner_proc_unlock(proc);
2379 		binder_node_unlock(node);
2380 		return proc->is_frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
2381 	}
2382 
2383 	if (!thread && !pending_async)
2384 		thread = binder_select_thread_ilocked(proc);
2385 
2386 	if (thread)
2387 		binder_enqueue_thread_work_ilocked(thread, &t->work);
2388 	else if (!pending_async)
2389 		binder_enqueue_work_ilocked(&t->work, &proc->todo);
2390 	else
2391 		binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2392 
2393 	if (!pending_async)
2394 		binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2395 
2396 	proc->outstanding_txns++;
2397 	binder_inner_proc_unlock(proc);
2398 	binder_node_unlock(node);
2399 
2400 	return 0;
2401 }
2402 
2403 /**
2404  * binder_get_node_refs_for_txn() - Get required refs on node for txn
2405  * @node:         struct binder_node for which to get refs
2406  * @proc:         returns @node->proc if valid
2407  * @error:        if no @proc then returns BR_DEAD_REPLY
2408  *
2409  * User-space normally keeps the node alive when creating a transaction
2410  * since it has a reference to the target. The local strong ref keeps it
2411  * alive if the sending process dies before the target process processes
2412  * the transaction. If the source process is malicious or has a reference
2413  * counting bug, relying on the local strong ref can fail.
2414  *
2415  * Since user-space can cause the local strong ref to go away, we also take
2416  * a tmpref on the node to ensure it survives while we are constructing
2417  * the transaction. We also need a tmpref on the proc while we are
2418  * constructing the transaction, so we take that here as well.
2419  *
2420  * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2421  * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2422  * target proc has died, @error is set to BR_DEAD_REPLY
2423  */
2424 static struct binder_node *binder_get_node_refs_for_txn(
2425 		struct binder_node *node,
2426 		struct binder_proc **procp,
2427 		uint32_t *error)
2428 {
2429 	struct binder_node *target_node = NULL;
2430 
2431 	binder_node_inner_lock(node);
2432 	if (node->proc) {
2433 		target_node = node;
2434 		binder_inc_node_nilocked(node, 1, 0, NULL);
2435 		binder_inc_node_tmpref_ilocked(node);
2436 		node->proc->tmp_ref++;
2437 		*procp = node->proc;
2438 	} else
2439 		*error = BR_DEAD_REPLY;
2440 	binder_node_inner_unlock(node);
2441 
2442 	return target_node;
2443 }
2444 
2445 static void binder_transaction(struct binder_proc *proc,
2446 			       struct binder_thread *thread,
2447 			       struct binder_transaction_data *tr, int reply,
2448 			       binder_size_t extra_buffers_size)
2449 {
2450 	int ret;
2451 	struct binder_transaction *t;
2452 	struct binder_work *w;
2453 	struct binder_work *tcomplete;
2454 	binder_size_t buffer_offset = 0;
2455 	binder_size_t off_start_offset, off_end_offset;
2456 	binder_size_t off_min;
2457 	binder_size_t sg_buf_offset, sg_buf_end_offset;
2458 	struct binder_proc *target_proc = NULL;
2459 	struct binder_thread *target_thread = NULL;
2460 	struct binder_node *target_node = NULL;
2461 	struct binder_transaction *in_reply_to = NULL;
2462 	struct binder_transaction_log_entry *e;
2463 	uint32_t return_error = 0;
2464 	uint32_t return_error_param = 0;
2465 	uint32_t return_error_line = 0;
2466 	binder_size_t last_fixup_obj_off = 0;
2467 	binder_size_t last_fixup_min_off = 0;
2468 	struct binder_context *context = proc->context;
2469 	int t_debug_id = atomic_inc_return(&binder_last_id);
2470 	char *secctx = NULL;
2471 	u32 secctx_sz = 0;
2472 
2473 	e = binder_transaction_log_add(&binder_transaction_log);
2474 	e->debug_id = t_debug_id;
2475 	e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2476 	e->from_proc = proc->pid;
2477 	e->from_thread = thread->pid;
2478 	e->target_handle = tr->target.handle;
2479 	e->data_size = tr->data_size;
2480 	e->offsets_size = tr->offsets_size;
2481 	strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
2482 
2483 	if (reply) {
2484 		binder_inner_proc_lock(proc);
2485 		in_reply_to = thread->transaction_stack;
2486 		if (in_reply_to == NULL) {
2487 			binder_inner_proc_unlock(proc);
2488 			binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2489 					  proc->pid, thread->pid);
2490 			return_error = BR_FAILED_REPLY;
2491 			return_error_param = -EPROTO;
2492 			return_error_line = __LINE__;
2493 			goto err_empty_call_stack;
2494 		}
2495 		if (in_reply_to->to_thread != thread) {
2496 			spin_lock(&in_reply_to->lock);
2497 			binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2498 				proc->pid, thread->pid, in_reply_to->debug_id,
2499 				in_reply_to->to_proc ?
2500 				in_reply_to->to_proc->pid : 0,
2501 				in_reply_to->to_thread ?
2502 				in_reply_to->to_thread->pid : 0);
2503 			spin_unlock(&in_reply_to->lock);
2504 			binder_inner_proc_unlock(proc);
2505 			return_error = BR_FAILED_REPLY;
2506 			return_error_param = -EPROTO;
2507 			return_error_line = __LINE__;
2508 			in_reply_to = NULL;
2509 			goto err_bad_call_stack;
2510 		}
2511 		thread->transaction_stack = in_reply_to->to_parent;
2512 		binder_inner_proc_unlock(proc);
2513 		binder_set_nice(in_reply_to->saved_priority);
2514 		target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2515 		if (target_thread == NULL) {
2516 			/* annotation for sparse */
2517 			__release(&target_thread->proc->inner_lock);
2518 			return_error = BR_DEAD_REPLY;
2519 			return_error_line = __LINE__;
2520 			goto err_dead_binder;
2521 		}
2522 		if (target_thread->transaction_stack != in_reply_to) {
2523 			binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2524 				proc->pid, thread->pid,
2525 				target_thread->transaction_stack ?
2526 				target_thread->transaction_stack->debug_id : 0,
2527 				in_reply_to->debug_id);
2528 			binder_inner_proc_unlock(target_thread->proc);
2529 			return_error = BR_FAILED_REPLY;
2530 			return_error_param = -EPROTO;
2531 			return_error_line = __LINE__;
2532 			in_reply_to = NULL;
2533 			target_thread = NULL;
2534 			goto err_dead_binder;
2535 		}
2536 		target_proc = target_thread->proc;
2537 		target_proc->tmp_ref++;
2538 		binder_inner_proc_unlock(target_thread->proc);
2539 	} else {
2540 		if (tr->target.handle) {
2541 			struct binder_ref *ref;
2542 
2543 			/*
2544 			 * There must already be a strong ref
2545 			 * on this node. If so, do a strong
2546 			 * increment on the node to ensure it
2547 			 * stays alive until the transaction is
2548 			 * done.
2549 			 */
2550 			binder_proc_lock(proc);
2551 			ref = binder_get_ref_olocked(proc, tr->target.handle,
2552 						     true);
2553 			if (ref) {
2554 				target_node = binder_get_node_refs_for_txn(
2555 						ref->node, &target_proc,
2556 						&return_error);
2557 			} else {
2558 				binder_user_error("%d:%d got transaction to invalid handle, %u\n",
2559 						  proc->pid, thread->pid, tr->target.handle);
2560 				return_error = BR_FAILED_REPLY;
2561 			}
2562 			binder_proc_unlock(proc);
2563 		} else {
2564 			mutex_lock(&context->context_mgr_node_lock);
2565 			target_node = context->binder_context_mgr_node;
2566 			if (target_node)
2567 				target_node = binder_get_node_refs_for_txn(
2568 						target_node, &target_proc,
2569 						&return_error);
2570 			else
2571 				return_error = BR_DEAD_REPLY;
2572 			mutex_unlock(&context->context_mgr_node_lock);
2573 			if (target_node && target_proc->pid == proc->pid) {
2574 				binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2575 						  proc->pid, thread->pid);
2576 				return_error = BR_FAILED_REPLY;
2577 				return_error_param = -EINVAL;
2578 				return_error_line = __LINE__;
2579 				goto err_invalid_target_handle;
2580 			}
2581 		}
2582 		if (!target_node) {
2583 			/*
2584 			 * return_error is set above
2585 			 */
2586 			return_error_param = -EINVAL;
2587 			return_error_line = __LINE__;
2588 			goto err_dead_binder;
2589 		}
2590 		e->to_node = target_node->debug_id;
2591 		if (WARN_ON(proc == target_proc)) {
2592 			return_error = BR_FAILED_REPLY;
2593 			return_error_param = -EINVAL;
2594 			return_error_line = __LINE__;
2595 			goto err_invalid_target_handle;
2596 		}
2597 		if (security_binder_transaction(proc->cred,
2598 						target_proc->cred) < 0) {
2599 			return_error = BR_FAILED_REPLY;
2600 			return_error_param = -EPERM;
2601 			return_error_line = __LINE__;
2602 			goto err_invalid_target_handle;
2603 		}
2604 		binder_inner_proc_lock(proc);
2605 
2606 		w = list_first_entry_or_null(&thread->todo,
2607 					     struct binder_work, entry);
2608 		if (!(tr->flags & TF_ONE_WAY) && w &&
2609 		    w->type == BINDER_WORK_TRANSACTION) {
2610 			/*
2611 			 * Do not allow new outgoing transaction from a
2612 			 * thread that has a transaction at the head of
2613 			 * its todo list. Only need to check the head
2614 			 * because binder_select_thread_ilocked picks a
2615 			 * thread from proc->waiting_threads to enqueue
2616 			 * the transaction, and nothing is queued to the
2617 			 * todo list while the thread is on waiting_threads.
2618 			 */
2619 			binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
2620 					  proc->pid, thread->pid);
2621 			binder_inner_proc_unlock(proc);
2622 			return_error = BR_FAILED_REPLY;
2623 			return_error_param = -EPROTO;
2624 			return_error_line = __LINE__;
2625 			goto err_bad_todo_list;
2626 		}
2627 
2628 		if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
2629 			struct binder_transaction *tmp;
2630 
2631 			tmp = thread->transaction_stack;
2632 			if (tmp->to_thread != thread) {
2633 				spin_lock(&tmp->lock);
2634 				binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
2635 					proc->pid, thread->pid, tmp->debug_id,
2636 					tmp->to_proc ? tmp->to_proc->pid : 0,
2637 					tmp->to_thread ?
2638 					tmp->to_thread->pid : 0);
2639 				spin_unlock(&tmp->lock);
2640 				binder_inner_proc_unlock(proc);
2641 				return_error = BR_FAILED_REPLY;
2642 				return_error_param = -EPROTO;
2643 				return_error_line = __LINE__;
2644 				goto err_bad_call_stack;
2645 			}
2646 			while (tmp) {
2647 				struct binder_thread *from;
2648 
2649 				spin_lock(&tmp->lock);
2650 				from = tmp->from;
2651 				if (from && from->proc == target_proc) {
2652 					atomic_inc(&from->tmp_ref);
2653 					target_thread = from;
2654 					spin_unlock(&tmp->lock);
2655 					break;
2656 				}
2657 				spin_unlock(&tmp->lock);
2658 				tmp = tmp->from_parent;
2659 			}
2660 		}
2661 		binder_inner_proc_unlock(proc);
2662 	}
2663 	if (target_thread)
2664 		e->to_thread = target_thread->pid;
2665 	e->to_proc = target_proc->pid;
2666 
2667 	/* TODO: reuse incoming transaction for reply */
2668 	t = kzalloc(sizeof(*t), GFP_KERNEL);
2669 	if (t == NULL) {
2670 		return_error = BR_FAILED_REPLY;
2671 		return_error_param = -ENOMEM;
2672 		return_error_line = __LINE__;
2673 		goto err_alloc_t_failed;
2674 	}
2675 	INIT_LIST_HEAD(&t->fd_fixups);
2676 	binder_stats_created(BINDER_STAT_TRANSACTION);
2677 	spin_lock_init(&t->lock);
2678 
2679 	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
2680 	if (tcomplete == NULL) {
2681 		return_error = BR_FAILED_REPLY;
2682 		return_error_param = -ENOMEM;
2683 		return_error_line = __LINE__;
2684 		goto err_alloc_tcomplete_failed;
2685 	}
2686 	binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
2687 
2688 	t->debug_id = t_debug_id;
2689 
2690 	if (reply)
2691 		binder_debug(BINDER_DEBUG_TRANSACTION,
2692 			     "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
2693 			     proc->pid, thread->pid, t->debug_id,
2694 			     target_proc->pid, target_thread->pid,
2695 			     (u64)tr->data.ptr.buffer,
2696 			     (u64)tr->data.ptr.offsets,
2697 			     (u64)tr->data_size, (u64)tr->offsets_size,
2698 			     (u64)extra_buffers_size);
2699 	else
2700 		binder_debug(BINDER_DEBUG_TRANSACTION,
2701 			     "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
2702 			     proc->pid, thread->pid, t->debug_id,
2703 			     target_proc->pid, target_node->debug_id,
2704 			     (u64)tr->data.ptr.buffer,
2705 			     (u64)tr->data.ptr.offsets,
2706 			     (u64)tr->data_size, (u64)tr->offsets_size,
2707 			     (u64)extra_buffers_size);
2708 
2709 	if (!reply && !(tr->flags & TF_ONE_WAY))
2710 		t->from = thread;
2711 	else
2712 		t->from = NULL;
2713 	t->sender_euid = proc->cred->euid;
2714 	t->to_proc = target_proc;
2715 	t->to_thread = target_thread;
2716 	t->code = tr->code;
2717 	t->flags = tr->flags;
2718 	t->priority = task_nice(current);
2719 
2720 	if (target_node && target_node->txn_security_ctx) {
2721 		u32 secid;
2722 		size_t added_size;
2723 
2724 		security_cred_getsecid(proc->cred, &secid);
2725 		ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
2726 		if (ret) {
2727 			return_error = BR_FAILED_REPLY;
2728 			return_error_param = ret;
2729 			return_error_line = __LINE__;
2730 			goto err_get_secctx_failed;
2731 		}
2732 		added_size = ALIGN(secctx_sz, sizeof(u64));
2733 		extra_buffers_size += added_size;
2734 		if (extra_buffers_size < added_size) {
2735 			/* integer overflow of extra_buffers_size */
2736 			return_error = BR_FAILED_REPLY;
2737 			return_error_param = -EINVAL;
2738 			return_error_line = __LINE__;
2739 			goto err_bad_extra_size;
2740 		}
2741 	}
2742 
2743 	trace_binder_transaction(reply, t, target_node);
2744 
2745 	t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
2746 		tr->offsets_size, extra_buffers_size,
2747 		!reply && (t->flags & TF_ONE_WAY), current->tgid);
2748 	if (IS_ERR(t->buffer)) {
2749 		/*
2750 		 * -ESRCH indicates VMA cleared. The target is dying.
2751 		 */
2752 		return_error_param = PTR_ERR(t->buffer);
2753 		return_error = return_error_param == -ESRCH ?
2754 			BR_DEAD_REPLY : BR_FAILED_REPLY;
2755 		return_error_line = __LINE__;
2756 		t->buffer = NULL;
2757 		goto err_binder_alloc_buf_failed;
2758 	}
2759 	if (secctx) {
2760 		int err;
2761 		size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
2762 				    ALIGN(tr->offsets_size, sizeof(void *)) +
2763 				    ALIGN(extra_buffers_size, sizeof(void *)) -
2764 				    ALIGN(secctx_sz, sizeof(u64));
2765 
2766 		t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
2767 		err = binder_alloc_copy_to_buffer(&target_proc->alloc,
2768 						  t->buffer, buf_offset,
2769 						  secctx, secctx_sz);
2770 		if (err) {
2771 			t->security_ctx = 0;
2772 			WARN_ON(1);
2773 		}
2774 		security_release_secctx(secctx, secctx_sz);
2775 		secctx = NULL;
2776 	}
2777 	t->buffer->debug_id = t->debug_id;
2778 	t->buffer->transaction = t;
2779 	t->buffer->target_node = target_node;
2780 	t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
2781 	trace_binder_transaction_alloc_buf(t->buffer);
2782 
2783 	if (binder_alloc_copy_user_to_buffer(
2784 				&target_proc->alloc,
2785 				t->buffer, 0,
2786 				(const void __user *)
2787 					(uintptr_t)tr->data.ptr.buffer,
2788 				tr->data_size)) {
2789 		binder_user_error("%d:%d got transaction with invalid data ptr\n",
2790 				proc->pid, thread->pid);
2791 		return_error = BR_FAILED_REPLY;
2792 		return_error_param = -EFAULT;
2793 		return_error_line = __LINE__;
2794 		goto err_copy_data_failed;
2795 	}
2796 	if (binder_alloc_copy_user_to_buffer(
2797 				&target_proc->alloc,
2798 				t->buffer,
2799 				ALIGN(tr->data_size, sizeof(void *)),
2800 				(const void __user *)
2801 					(uintptr_t)tr->data.ptr.offsets,
2802 				tr->offsets_size)) {
2803 		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2804 				proc->pid, thread->pid);
2805 		return_error = BR_FAILED_REPLY;
2806 		return_error_param = -EFAULT;
2807 		return_error_line = __LINE__;
2808 		goto err_copy_data_failed;
2809 	}
2810 	if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
2811 		binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
2812 				proc->pid, thread->pid, (u64)tr->offsets_size);
2813 		return_error = BR_FAILED_REPLY;
2814 		return_error_param = -EINVAL;
2815 		return_error_line = __LINE__;
2816 		goto err_bad_offset;
2817 	}
2818 	if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
2819 		binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
2820 				  proc->pid, thread->pid,
2821 				  (u64)extra_buffers_size);
2822 		return_error = BR_FAILED_REPLY;
2823 		return_error_param = -EINVAL;
2824 		return_error_line = __LINE__;
2825 		goto err_bad_offset;
2826 	}
2827 	off_start_offset = ALIGN(tr->data_size, sizeof(void *));
2828 	buffer_offset = off_start_offset;
2829 	off_end_offset = off_start_offset + tr->offsets_size;
2830 	sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
2831 	sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
2832 		ALIGN(secctx_sz, sizeof(u64));
2833 	off_min = 0;
2834 	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2835 	     buffer_offset += sizeof(binder_size_t)) {
2836 		struct binder_object_header *hdr;
2837 		size_t object_size;
2838 		struct binder_object object;
2839 		binder_size_t object_offset;
2840 
2841 		if (binder_alloc_copy_from_buffer(&target_proc->alloc,
2842 						  &object_offset,
2843 						  t->buffer,
2844 						  buffer_offset,
2845 						  sizeof(object_offset))) {
2846 			return_error = BR_FAILED_REPLY;
2847 			return_error_param = -EINVAL;
2848 			return_error_line = __LINE__;
2849 			goto err_bad_offset;
2850 		}
2851 		object_size = binder_get_object(target_proc, t->buffer,
2852 						object_offset, &object);
2853 		if (object_size == 0 || object_offset < off_min) {
2854 			binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
2855 					  proc->pid, thread->pid,
2856 					  (u64)object_offset,
2857 					  (u64)off_min,
2858 					  (u64)t->buffer->data_size);
2859 			return_error = BR_FAILED_REPLY;
2860 			return_error_param = -EINVAL;
2861 			return_error_line = __LINE__;
2862 			goto err_bad_offset;
2863 		}
2864 
2865 		hdr = &object.hdr;
2866 		off_min = object_offset + object_size;
2867 		switch (hdr->type) {
2868 		case BINDER_TYPE_BINDER:
2869 		case BINDER_TYPE_WEAK_BINDER: {
2870 			struct flat_binder_object *fp;
2871 
2872 			fp = to_flat_binder_object(hdr);
2873 			ret = binder_translate_binder(fp, t, thread);
2874 
2875 			if (ret < 0 ||
2876 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
2877 							t->buffer,
2878 							object_offset,
2879 							fp, sizeof(*fp))) {
2880 				return_error = BR_FAILED_REPLY;
2881 				return_error_param = ret;
2882 				return_error_line = __LINE__;
2883 				goto err_translate_failed;
2884 			}
2885 		} break;
2886 		case BINDER_TYPE_HANDLE:
2887 		case BINDER_TYPE_WEAK_HANDLE: {
2888 			struct flat_binder_object *fp;
2889 
2890 			fp = to_flat_binder_object(hdr);
2891 			ret = binder_translate_handle(fp, t, thread);
2892 			if (ret < 0 ||
2893 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
2894 							t->buffer,
2895 							object_offset,
2896 							fp, sizeof(*fp))) {
2897 				return_error = BR_FAILED_REPLY;
2898 				return_error_param = ret;
2899 				return_error_line = __LINE__;
2900 				goto err_translate_failed;
2901 			}
2902 		} break;
2903 
2904 		case BINDER_TYPE_FD: {
2905 			struct binder_fd_object *fp = to_binder_fd_object(hdr);
2906 			binder_size_t fd_offset = object_offset +
2907 				(uintptr_t)&fp->fd - (uintptr_t)fp;
2908 			int ret = binder_translate_fd(fp->fd, fd_offset, t,
2909 						      thread, in_reply_to);
2910 
2911 			fp->pad_binder = 0;
2912 			if (ret < 0 ||
2913 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
2914 							t->buffer,
2915 							object_offset,
2916 							fp, sizeof(*fp))) {
2917 				return_error = BR_FAILED_REPLY;
2918 				return_error_param = ret;
2919 				return_error_line = __LINE__;
2920 				goto err_translate_failed;
2921 			}
2922 		} break;
2923 		case BINDER_TYPE_FDA: {
2924 			struct binder_object ptr_object;
2925 			binder_size_t parent_offset;
2926 			struct binder_fd_array_object *fda =
2927 				to_binder_fd_array_object(hdr);
2928 			size_t num_valid = (buffer_offset - off_start_offset) /
2929 						sizeof(binder_size_t);
2930 			struct binder_buffer_object *parent =
2931 				binder_validate_ptr(target_proc, t->buffer,
2932 						    &ptr_object, fda->parent,
2933 						    off_start_offset,
2934 						    &parent_offset,
2935 						    num_valid);
2936 			if (!parent) {
2937 				binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2938 						  proc->pid, thread->pid);
2939 				return_error = BR_FAILED_REPLY;
2940 				return_error_param = -EINVAL;
2941 				return_error_line = __LINE__;
2942 				goto err_bad_parent;
2943 			}
2944 			if (!binder_validate_fixup(target_proc, t->buffer,
2945 						   off_start_offset,
2946 						   parent_offset,
2947 						   fda->parent_offset,
2948 						   last_fixup_obj_off,
2949 						   last_fixup_min_off)) {
2950 				binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2951 						  proc->pid, thread->pid);
2952 				return_error = BR_FAILED_REPLY;
2953 				return_error_param = -EINVAL;
2954 				return_error_line = __LINE__;
2955 				goto err_bad_parent;
2956 			}
2957 			ret = binder_translate_fd_array(fda, parent, t, thread,
2958 							in_reply_to);
2959 			if (ret < 0) {
2960 				return_error = BR_FAILED_REPLY;
2961 				return_error_param = ret;
2962 				return_error_line = __LINE__;
2963 				goto err_translate_failed;
2964 			}
2965 			last_fixup_obj_off = parent_offset;
2966 			last_fixup_min_off =
2967 				fda->parent_offset + sizeof(u32) * fda->num_fds;
2968 		} break;
2969 		case BINDER_TYPE_PTR: {
2970 			struct binder_buffer_object *bp =
2971 				to_binder_buffer_object(hdr);
2972 			size_t buf_left = sg_buf_end_offset - sg_buf_offset;
2973 			size_t num_valid;
2974 
2975 			if (bp->length > buf_left) {
2976 				binder_user_error("%d:%d got transaction with too large buffer\n",
2977 						  proc->pid, thread->pid);
2978 				return_error = BR_FAILED_REPLY;
2979 				return_error_param = -EINVAL;
2980 				return_error_line = __LINE__;
2981 				goto err_bad_offset;
2982 			}
2983 			if (binder_alloc_copy_user_to_buffer(
2984 						&target_proc->alloc,
2985 						t->buffer,
2986 						sg_buf_offset,
2987 						(const void __user *)
2988 							(uintptr_t)bp->buffer,
2989 						bp->length)) {
2990 				binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2991 						  proc->pid, thread->pid);
2992 				return_error_param = -EFAULT;
2993 				return_error = BR_FAILED_REPLY;
2994 				return_error_line = __LINE__;
2995 				goto err_copy_data_failed;
2996 			}
2997 			/* Fixup buffer pointer to target proc address space */
2998 			bp->buffer = (uintptr_t)
2999 				t->buffer->user_data + sg_buf_offset;
3000 			sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3001 
3002 			num_valid = (buffer_offset - off_start_offset) /
3003 					sizeof(binder_size_t);
3004 			ret = binder_fixup_parent(t, thread, bp,
3005 						  off_start_offset,
3006 						  num_valid,
3007 						  last_fixup_obj_off,
3008 						  last_fixup_min_off);
3009 			if (ret < 0 ||
3010 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3011 							t->buffer,
3012 							object_offset,
3013 							bp, sizeof(*bp))) {
3014 				return_error = BR_FAILED_REPLY;
3015 				return_error_param = ret;
3016 				return_error_line = __LINE__;
3017 				goto err_translate_failed;
3018 			}
3019 			last_fixup_obj_off = object_offset;
3020 			last_fixup_min_off = 0;
3021 		} break;
3022 		default:
3023 			binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3024 				proc->pid, thread->pid, hdr->type);
3025 			return_error = BR_FAILED_REPLY;
3026 			return_error_param = -EINVAL;
3027 			return_error_line = __LINE__;
3028 			goto err_bad_object_type;
3029 		}
3030 	}
3031 	if (t->buffer->oneway_spam_suspect)
3032 		tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
3033 	else
3034 		tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3035 	t->work.type = BINDER_WORK_TRANSACTION;
3036 
3037 	if (reply) {
3038 		binder_enqueue_thread_work(thread, tcomplete);
3039 		binder_inner_proc_lock(target_proc);
3040 		if (target_thread->is_dead) {
3041 			return_error = BR_DEAD_REPLY;
3042 			binder_inner_proc_unlock(target_proc);
3043 			goto err_dead_proc_or_thread;
3044 		}
3045 		BUG_ON(t->buffer->async_transaction != 0);
3046 		binder_pop_transaction_ilocked(target_thread, in_reply_to);
3047 		binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3048 		target_proc->outstanding_txns++;
3049 		binder_inner_proc_unlock(target_proc);
3050 		wake_up_interruptible_sync(&target_thread->wait);
3051 		binder_free_transaction(in_reply_to);
3052 	} else if (!(t->flags & TF_ONE_WAY)) {
3053 		BUG_ON(t->buffer->async_transaction != 0);
3054 		binder_inner_proc_lock(proc);
3055 		/*
3056 		 * Defer the TRANSACTION_COMPLETE, so we don't return to
3057 		 * userspace immediately; this allows the target process to
3058 		 * immediately start processing this transaction, reducing
3059 		 * latency. We will then return the TRANSACTION_COMPLETE when
3060 		 * the target replies (or there is an error).
3061 		 */
3062 		binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3063 		t->need_reply = 1;
3064 		t->from_parent = thread->transaction_stack;
3065 		thread->transaction_stack = t;
3066 		binder_inner_proc_unlock(proc);
3067 		return_error = binder_proc_transaction(t,
3068 				target_proc, target_thread);
3069 		if (return_error) {
3070 			binder_inner_proc_lock(proc);
3071 			binder_pop_transaction_ilocked(thread, t);
3072 			binder_inner_proc_unlock(proc);
3073 			goto err_dead_proc_or_thread;
3074 		}
3075 	} else {
3076 		BUG_ON(target_node == NULL);
3077 		BUG_ON(t->buffer->async_transaction != 1);
3078 		binder_enqueue_thread_work(thread, tcomplete);
3079 		return_error = binder_proc_transaction(t, target_proc, NULL);
3080 		if (return_error)
3081 			goto err_dead_proc_or_thread;
3082 	}
3083 	if (target_thread)
3084 		binder_thread_dec_tmpref(target_thread);
3085 	binder_proc_dec_tmpref(target_proc);
3086 	if (target_node)
3087 		binder_dec_node_tmpref(target_node);
3088 	/*
3089 	 * write barrier to synchronize with initialization
3090 	 * of log entry
3091 	 */
3092 	smp_wmb();
3093 	WRITE_ONCE(e->debug_id_done, t_debug_id);
3094 	return;
3095 
3096 err_dead_proc_or_thread:
3097 	return_error_line = __LINE__;
3098 	binder_dequeue_work(proc, tcomplete);
3099 err_translate_failed:
3100 err_bad_object_type:
3101 err_bad_offset:
3102 err_bad_parent:
3103 err_copy_data_failed:
3104 	binder_free_txn_fixups(t);
3105 	trace_binder_transaction_failed_buffer_release(t->buffer);
3106 	binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3107 					  buffer_offset, true);
3108 	if (target_node)
3109 		binder_dec_node_tmpref(target_node);
3110 	target_node = NULL;
3111 	t->buffer->transaction = NULL;
3112 	binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3113 err_binder_alloc_buf_failed:
3114 err_bad_extra_size:
3115 	if (secctx)
3116 		security_release_secctx(secctx, secctx_sz);
3117 err_get_secctx_failed:
3118 	kfree(tcomplete);
3119 	binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3120 err_alloc_tcomplete_failed:
3121 	if (trace_binder_txn_latency_free_enabled())
3122 		binder_txn_latency_free(t);
3123 	kfree(t);
3124 	binder_stats_deleted(BINDER_STAT_TRANSACTION);
3125 err_alloc_t_failed:
3126 err_bad_todo_list:
3127 err_bad_call_stack:
3128 err_empty_call_stack:
3129 err_dead_binder:
3130 err_invalid_target_handle:
3131 	if (target_thread)
3132 		binder_thread_dec_tmpref(target_thread);
3133 	if (target_proc)
3134 		binder_proc_dec_tmpref(target_proc);
3135 	if (target_node) {
3136 		binder_dec_node(target_node, 1, 0);
3137 		binder_dec_node_tmpref(target_node);
3138 	}
3139 
3140 	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3141 		     "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3142 		     proc->pid, thread->pid, return_error, return_error_param,
3143 		     (u64)tr->data_size, (u64)tr->offsets_size,
3144 		     return_error_line);
3145 
3146 	{
3147 		struct binder_transaction_log_entry *fe;
3148 
3149 		e->return_error = return_error;
3150 		e->return_error_param = return_error_param;
3151 		e->return_error_line = return_error_line;
3152 		fe = binder_transaction_log_add(&binder_transaction_log_failed);
3153 		*fe = *e;
3154 		/*
3155 		 * write barrier to synchronize with initialization
3156 		 * of log entry
3157 		 */
3158 		smp_wmb();
3159 		WRITE_ONCE(e->debug_id_done, t_debug_id);
3160 		WRITE_ONCE(fe->debug_id_done, t_debug_id);
3161 	}
3162 
3163 	BUG_ON(thread->return_error.cmd != BR_OK);
3164 	if (in_reply_to) {
3165 		thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3166 		binder_enqueue_thread_work(thread, &thread->return_error.work);
3167 		binder_send_failed_reply(in_reply_to, return_error);
3168 	} else {
3169 		thread->return_error.cmd = return_error;
3170 		binder_enqueue_thread_work(thread, &thread->return_error.work);
3171 	}
3172 }
3173 
3174 /**
3175  * binder_free_buf() - free the specified buffer
3176  * @proc:	binder proc that owns buffer
3177  * @buffer:	buffer to be freed
3178  * @is_failure:	failed to send transaction
3179  *
3180  * If buffer for an async transaction, enqueue the next async
3181  * transaction from the node.
3182  *
3183  * Cleanup buffer and free it.
3184  */
3185 static void
3186 binder_free_buf(struct binder_proc *proc,
3187 		struct binder_thread *thread,
3188 		struct binder_buffer *buffer, bool is_failure)
3189 {
3190 	binder_inner_proc_lock(proc);
3191 	if (buffer->transaction) {
3192 		buffer->transaction->buffer = NULL;
3193 		buffer->transaction = NULL;
3194 	}
3195 	binder_inner_proc_unlock(proc);
3196 	if (buffer->async_transaction && buffer->target_node) {
3197 		struct binder_node *buf_node;
3198 		struct binder_work *w;
3199 
3200 		buf_node = buffer->target_node;
3201 		binder_node_inner_lock(buf_node);
3202 		BUG_ON(!buf_node->has_async_transaction);
3203 		BUG_ON(buf_node->proc != proc);
3204 		w = binder_dequeue_work_head_ilocked(
3205 				&buf_node->async_todo);
3206 		if (!w) {
3207 			buf_node->has_async_transaction = false;
3208 		} else {
3209 			binder_enqueue_work_ilocked(
3210 					w, &proc->todo);
3211 			binder_wakeup_proc_ilocked(proc);
3212 		}
3213 		binder_node_inner_unlock(buf_node);
3214 	}
3215 	trace_binder_transaction_buffer_release(buffer);
3216 	binder_transaction_buffer_release(proc, thread, buffer, 0, is_failure);
3217 	binder_alloc_free_buf(&proc->alloc, buffer);
3218 }
3219 
3220 static int binder_thread_write(struct binder_proc *proc,
3221 			struct binder_thread *thread,
3222 			binder_uintptr_t binder_buffer, size_t size,
3223 			binder_size_t *consumed)
3224 {
3225 	uint32_t cmd;
3226 	struct binder_context *context = proc->context;
3227 	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3228 	void __user *ptr = buffer + *consumed;
3229 	void __user *end = buffer + size;
3230 
3231 	while (ptr < end && thread->return_error.cmd == BR_OK) {
3232 		int ret;
3233 
3234 		if (get_user(cmd, (uint32_t __user *)ptr))
3235 			return -EFAULT;
3236 		ptr += sizeof(uint32_t);
3237 		trace_binder_command(cmd);
3238 		if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3239 			atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3240 			atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3241 			atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3242 		}
3243 		switch (cmd) {
3244 		case BC_INCREFS:
3245 		case BC_ACQUIRE:
3246 		case BC_RELEASE:
3247 		case BC_DECREFS: {
3248 			uint32_t target;
3249 			const char *debug_string;
3250 			bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3251 			bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3252 			struct binder_ref_data rdata;
3253 
3254 			if (get_user(target, (uint32_t __user *)ptr))
3255 				return -EFAULT;
3256 
3257 			ptr += sizeof(uint32_t);
3258 			ret = -1;
3259 			if (increment && !target) {
3260 				struct binder_node *ctx_mgr_node;
3261 
3262 				mutex_lock(&context->context_mgr_node_lock);
3263 				ctx_mgr_node = context->binder_context_mgr_node;
3264 				if (ctx_mgr_node) {
3265 					if (ctx_mgr_node->proc == proc) {
3266 						binder_user_error("%d:%d context manager tried to acquire desc 0\n",
3267 								  proc->pid, thread->pid);
3268 						mutex_unlock(&context->context_mgr_node_lock);
3269 						return -EINVAL;
3270 					}
3271 					ret = binder_inc_ref_for_node(
3272 							proc, ctx_mgr_node,
3273 							strong, NULL, &rdata);
3274 				}
3275 				mutex_unlock(&context->context_mgr_node_lock);
3276 			}
3277 			if (ret)
3278 				ret = binder_update_ref_for_handle(
3279 						proc, target, increment, strong,
3280 						&rdata);
3281 			if (!ret && rdata.desc != target) {
3282 				binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3283 					proc->pid, thread->pid,
3284 					target, rdata.desc);
3285 			}
3286 			switch (cmd) {
3287 			case BC_INCREFS:
3288 				debug_string = "IncRefs";
3289 				break;
3290 			case BC_ACQUIRE:
3291 				debug_string = "Acquire";
3292 				break;
3293 			case BC_RELEASE:
3294 				debug_string = "Release";
3295 				break;
3296 			case BC_DECREFS:
3297 			default:
3298 				debug_string = "DecRefs";
3299 				break;
3300 			}
3301 			if (ret) {
3302 				binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3303 					proc->pid, thread->pid, debug_string,
3304 					strong, target, ret);
3305 				break;
3306 			}
3307 			binder_debug(BINDER_DEBUG_USER_REFS,
3308 				     "%d:%d %s ref %d desc %d s %d w %d\n",
3309 				     proc->pid, thread->pid, debug_string,
3310 				     rdata.debug_id, rdata.desc, rdata.strong,
3311 				     rdata.weak);
3312 			break;
3313 		}
3314 		case BC_INCREFS_DONE:
3315 		case BC_ACQUIRE_DONE: {
3316 			binder_uintptr_t node_ptr;
3317 			binder_uintptr_t cookie;
3318 			struct binder_node *node;
3319 			bool free_node;
3320 
3321 			if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3322 				return -EFAULT;
3323 			ptr += sizeof(binder_uintptr_t);
3324 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3325 				return -EFAULT;
3326 			ptr += sizeof(binder_uintptr_t);
3327 			node = binder_get_node(proc, node_ptr);
3328 			if (node == NULL) {
3329 				binder_user_error("%d:%d %s u%016llx no match\n",
3330 					proc->pid, thread->pid,
3331 					cmd == BC_INCREFS_DONE ?
3332 					"BC_INCREFS_DONE" :
3333 					"BC_ACQUIRE_DONE",
3334 					(u64)node_ptr);
3335 				break;
3336 			}
3337 			if (cookie != node->cookie) {
3338 				binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3339 					proc->pid, thread->pid,
3340 					cmd == BC_INCREFS_DONE ?
3341 					"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3342 					(u64)node_ptr, node->debug_id,
3343 					(u64)cookie, (u64)node->cookie);
3344 				binder_put_node(node);
3345 				break;
3346 			}
3347 			binder_node_inner_lock(node);
3348 			if (cmd == BC_ACQUIRE_DONE) {
3349 				if (node->pending_strong_ref == 0) {
3350 					binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3351 						proc->pid, thread->pid,
3352 						node->debug_id);
3353 					binder_node_inner_unlock(node);
3354 					binder_put_node(node);
3355 					break;
3356 				}
3357 				node->pending_strong_ref = 0;
3358 			} else {
3359 				if (node->pending_weak_ref == 0) {
3360 					binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3361 						proc->pid, thread->pid,
3362 						node->debug_id);
3363 					binder_node_inner_unlock(node);
3364 					binder_put_node(node);
3365 					break;
3366 				}
3367 				node->pending_weak_ref = 0;
3368 			}
3369 			free_node = binder_dec_node_nilocked(node,
3370 					cmd == BC_ACQUIRE_DONE, 0);
3371 			WARN_ON(free_node);
3372 			binder_debug(BINDER_DEBUG_USER_REFS,
3373 				     "%d:%d %s node %d ls %d lw %d tr %d\n",
3374 				     proc->pid, thread->pid,
3375 				     cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3376 				     node->debug_id, node->local_strong_refs,
3377 				     node->local_weak_refs, node->tmp_refs);
3378 			binder_node_inner_unlock(node);
3379 			binder_put_node(node);
3380 			break;
3381 		}
3382 		case BC_ATTEMPT_ACQUIRE:
3383 			pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3384 			return -EINVAL;
3385 		case BC_ACQUIRE_RESULT:
3386 			pr_err("BC_ACQUIRE_RESULT not supported\n");
3387 			return -EINVAL;
3388 
3389 		case BC_FREE_BUFFER: {
3390 			binder_uintptr_t data_ptr;
3391 			struct binder_buffer *buffer;
3392 
3393 			if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3394 				return -EFAULT;
3395 			ptr += sizeof(binder_uintptr_t);
3396 
3397 			buffer = binder_alloc_prepare_to_free(&proc->alloc,
3398 							      data_ptr);
3399 			if (IS_ERR_OR_NULL(buffer)) {
3400 				if (PTR_ERR(buffer) == -EPERM) {
3401 					binder_user_error(
3402 						"%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3403 						proc->pid, thread->pid,
3404 						(u64)data_ptr);
3405 				} else {
3406 					binder_user_error(
3407 						"%d:%d BC_FREE_BUFFER u%016llx no match\n",
3408 						proc->pid, thread->pid,
3409 						(u64)data_ptr);
3410 				}
3411 				break;
3412 			}
3413 			binder_debug(BINDER_DEBUG_FREE_BUFFER,
3414 				     "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3415 				     proc->pid, thread->pid, (u64)data_ptr,
3416 				     buffer->debug_id,
3417 				     buffer->transaction ? "active" : "finished");
3418 			binder_free_buf(proc, thread, buffer, false);
3419 			break;
3420 		}
3421 
3422 		case BC_TRANSACTION_SG:
3423 		case BC_REPLY_SG: {
3424 			struct binder_transaction_data_sg tr;
3425 
3426 			if (copy_from_user(&tr, ptr, sizeof(tr)))
3427 				return -EFAULT;
3428 			ptr += sizeof(tr);
3429 			binder_transaction(proc, thread, &tr.transaction_data,
3430 					   cmd == BC_REPLY_SG, tr.buffers_size);
3431 			break;
3432 		}
3433 		case BC_TRANSACTION:
3434 		case BC_REPLY: {
3435 			struct binder_transaction_data tr;
3436 
3437 			if (copy_from_user(&tr, ptr, sizeof(tr)))
3438 				return -EFAULT;
3439 			ptr += sizeof(tr);
3440 			binder_transaction(proc, thread, &tr,
3441 					   cmd == BC_REPLY, 0);
3442 			break;
3443 		}
3444 
3445 		case BC_REGISTER_LOOPER:
3446 			binder_debug(BINDER_DEBUG_THREADS,
3447 				     "%d:%d BC_REGISTER_LOOPER\n",
3448 				     proc->pid, thread->pid);
3449 			binder_inner_proc_lock(proc);
3450 			if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3451 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
3452 				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3453 					proc->pid, thread->pid);
3454 			} else if (proc->requested_threads == 0) {
3455 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
3456 				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3457 					proc->pid, thread->pid);
3458 			} else {
3459 				proc->requested_threads--;
3460 				proc->requested_threads_started++;
3461 			}
3462 			thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3463 			binder_inner_proc_unlock(proc);
3464 			break;
3465 		case BC_ENTER_LOOPER:
3466 			binder_debug(BINDER_DEBUG_THREADS,
3467 				     "%d:%d BC_ENTER_LOOPER\n",
3468 				     proc->pid, thread->pid);
3469 			if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3470 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
3471 				binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3472 					proc->pid, thread->pid);
3473 			}
3474 			thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3475 			break;
3476 		case BC_EXIT_LOOPER:
3477 			binder_debug(BINDER_DEBUG_THREADS,
3478 				     "%d:%d BC_EXIT_LOOPER\n",
3479 				     proc->pid, thread->pid);
3480 			thread->looper |= BINDER_LOOPER_STATE_EXITED;
3481 			break;
3482 
3483 		case BC_REQUEST_DEATH_NOTIFICATION:
3484 		case BC_CLEAR_DEATH_NOTIFICATION: {
3485 			uint32_t target;
3486 			binder_uintptr_t cookie;
3487 			struct binder_ref *ref;
3488 			struct binder_ref_death *death = NULL;
3489 
3490 			if (get_user(target, (uint32_t __user *)ptr))
3491 				return -EFAULT;
3492 			ptr += sizeof(uint32_t);
3493 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3494 				return -EFAULT;
3495 			ptr += sizeof(binder_uintptr_t);
3496 			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3497 				/*
3498 				 * Allocate memory for death notification
3499 				 * before taking lock
3500 				 */
3501 				death = kzalloc(sizeof(*death), GFP_KERNEL);
3502 				if (death == NULL) {
3503 					WARN_ON(thread->return_error.cmd !=
3504 						BR_OK);
3505 					thread->return_error.cmd = BR_ERROR;
3506 					binder_enqueue_thread_work(
3507 						thread,
3508 						&thread->return_error.work);
3509 					binder_debug(
3510 						BINDER_DEBUG_FAILED_TRANSACTION,
3511 						"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3512 						proc->pid, thread->pid);
3513 					break;
3514 				}
3515 			}
3516 			binder_proc_lock(proc);
3517 			ref = binder_get_ref_olocked(proc, target, false);
3518 			if (ref == NULL) {
3519 				binder_user_error("%d:%d %s invalid ref %d\n",
3520 					proc->pid, thread->pid,
3521 					cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3522 					"BC_REQUEST_DEATH_NOTIFICATION" :
3523 					"BC_CLEAR_DEATH_NOTIFICATION",
3524 					target);
3525 				binder_proc_unlock(proc);
3526 				kfree(death);
3527 				break;
3528 			}
3529 
3530 			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3531 				     "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3532 				     proc->pid, thread->pid,
3533 				     cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3534 				     "BC_REQUEST_DEATH_NOTIFICATION" :
3535 				     "BC_CLEAR_DEATH_NOTIFICATION",
3536 				     (u64)cookie, ref->data.debug_id,
3537 				     ref->data.desc, ref->data.strong,
3538 				     ref->data.weak, ref->node->debug_id);
3539 
3540 			binder_node_lock(ref->node);
3541 			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3542 				if (ref->death) {
3543 					binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3544 						proc->pid, thread->pid);
3545 					binder_node_unlock(ref->node);
3546 					binder_proc_unlock(proc);
3547 					kfree(death);
3548 					break;
3549 				}
3550 				binder_stats_created(BINDER_STAT_DEATH);
3551 				INIT_LIST_HEAD(&death->work.entry);
3552 				death->cookie = cookie;
3553 				ref->death = death;
3554 				if (ref->node->proc == NULL) {
3555 					ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3556 
3557 					binder_inner_proc_lock(proc);
3558 					binder_enqueue_work_ilocked(
3559 						&ref->death->work, &proc->todo);
3560 					binder_wakeup_proc_ilocked(proc);
3561 					binder_inner_proc_unlock(proc);
3562 				}
3563 			} else {
3564 				if (ref->death == NULL) {
3565 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3566 						proc->pid, thread->pid);
3567 					binder_node_unlock(ref->node);
3568 					binder_proc_unlock(proc);
3569 					break;
3570 				}
3571 				death = ref->death;
3572 				if (death->cookie != cookie) {
3573 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3574 						proc->pid, thread->pid,
3575 						(u64)death->cookie,
3576 						(u64)cookie);
3577 					binder_node_unlock(ref->node);
3578 					binder_proc_unlock(proc);
3579 					break;
3580 				}
3581 				ref->death = NULL;
3582 				binder_inner_proc_lock(proc);
3583 				if (list_empty(&death->work.entry)) {
3584 					death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3585 					if (thread->looper &
3586 					    (BINDER_LOOPER_STATE_REGISTERED |
3587 					     BINDER_LOOPER_STATE_ENTERED))
3588 						binder_enqueue_thread_work_ilocked(
3589 								thread,
3590 								&death->work);
3591 					else {
3592 						binder_enqueue_work_ilocked(
3593 								&death->work,
3594 								&proc->todo);
3595 						binder_wakeup_proc_ilocked(
3596 								proc);
3597 					}
3598 				} else {
3599 					BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3600 					death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3601 				}
3602 				binder_inner_proc_unlock(proc);
3603 			}
3604 			binder_node_unlock(ref->node);
3605 			binder_proc_unlock(proc);
3606 		} break;
3607 		case BC_DEAD_BINDER_DONE: {
3608 			struct binder_work *w;
3609 			binder_uintptr_t cookie;
3610 			struct binder_ref_death *death = NULL;
3611 
3612 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3613 				return -EFAULT;
3614 
3615 			ptr += sizeof(cookie);
3616 			binder_inner_proc_lock(proc);
3617 			list_for_each_entry(w, &proc->delivered_death,
3618 					    entry) {
3619 				struct binder_ref_death *tmp_death =
3620 					container_of(w,
3621 						     struct binder_ref_death,
3622 						     work);
3623 
3624 				if (tmp_death->cookie == cookie) {
3625 					death = tmp_death;
3626 					break;
3627 				}
3628 			}
3629 			binder_debug(BINDER_DEBUG_DEAD_BINDER,
3630 				     "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
3631 				     proc->pid, thread->pid, (u64)cookie,
3632 				     death);
3633 			if (death == NULL) {
3634 				binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3635 					proc->pid, thread->pid, (u64)cookie);
3636 				binder_inner_proc_unlock(proc);
3637 				break;
3638 			}
3639 			binder_dequeue_work_ilocked(&death->work);
3640 			if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3641 				death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3642 				if (thread->looper &
3643 					(BINDER_LOOPER_STATE_REGISTERED |
3644 					 BINDER_LOOPER_STATE_ENTERED))
3645 					binder_enqueue_thread_work_ilocked(
3646 						thread, &death->work);
3647 				else {
3648 					binder_enqueue_work_ilocked(
3649 							&death->work,
3650 							&proc->todo);
3651 					binder_wakeup_proc_ilocked(proc);
3652 				}
3653 			}
3654 			binder_inner_proc_unlock(proc);
3655 		} break;
3656 
3657 		default:
3658 			pr_err("%d:%d unknown command %d\n",
3659 			       proc->pid, thread->pid, cmd);
3660 			return -EINVAL;
3661 		}
3662 		*consumed = ptr - buffer;
3663 	}
3664 	return 0;
3665 }
3666 
3667 static void binder_stat_br(struct binder_proc *proc,
3668 			   struct binder_thread *thread, uint32_t cmd)
3669 {
3670 	trace_binder_return(cmd);
3671 	if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
3672 		atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3673 		atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3674 		atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
3675 	}
3676 }
3677 
3678 static int binder_put_node_cmd(struct binder_proc *proc,
3679 			       struct binder_thread *thread,
3680 			       void __user **ptrp,
3681 			       binder_uintptr_t node_ptr,
3682 			       binder_uintptr_t node_cookie,
3683 			       int node_debug_id,
3684 			       uint32_t cmd, const char *cmd_name)
3685 {
3686 	void __user *ptr = *ptrp;
3687 
3688 	if (put_user(cmd, (uint32_t __user *)ptr))
3689 		return -EFAULT;
3690 	ptr += sizeof(uint32_t);
3691 
3692 	if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3693 		return -EFAULT;
3694 	ptr += sizeof(binder_uintptr_t);
3695 
3696 	if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
3697 		return -EFAULT;
3698 	ptr += sizeof(binder_uintptr_t);
3699 
3700 	binder_stat_br(proc, thread, cmd);
3701 	binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
3702 		     proc->pid, thread->pid, cmd_name, node_debug_id,
3703 		     (u64)node_ptr, (u64)node_cookie);
3704 
3705 	*ptrp = ptr;
3706 	return 0;
3707 }
3708 
3709 static int binder_wait_for_work(struct binder_thread *thread,
3710 				bool do_proc_work)
3711 {
3712 	DEFINE_WAIT(wait);
3713 	struct binder_proc *proc = thread->proc;
3714 	int ret = 0;
3715 
3716 	freezer_do_not_count();
3717 	binder_inner_proc_lock(proc);
3718 	for (;;) {
3719 		prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
3720 		if (binder_has_work_ilocked(thread, do_proc_work))
3721 			break;
3722 		if (do_proc_work)
3723 			list_add(&thread->waiting_thread_node,
3724 				 &proc->waiting_threads);
3725 		binder_inner_proc_unlock(proc);
3726 		schedule();
3727 		binder_inner_proc_lock(proc);
3728 		list_del_init(&thread->waiting_thread_node);
3729 		if (signal_pending(current)) {
3730 			ret = -EINTR;
3731 			break;
3732 		}
3733 	}
3734 	finish_wait(&thread->wait, &wait);
3735 	binder_inner_proc_unlock(proc);
3736 	freezer_count();
3737 
3738 	return ret;
3739 }
3740 
3741 /**
3742  * binder_apply_fd_fixups() - finish fd translation
3743  * @proc:         binder_proc associated @t->buffer
3744  * @t:	binder transaction with list of fd fixups
3745  *
3746  * Now that we are in the context of the transaction target
3747  * process, we can allocate and install fds. Process the
3748  * list of fds to translate and fixup the buffer with the
3749  * new fds.
3750  *
3751  * If we fail to allocate an fd, then free the resources by
3752  * fput'ing files that have not been processed and ksys_close'ing
3753  * any fds that have already been allocated.
3754  */
3755 static int binder_apply_fd_fixups(struct binder_proc *proc,
3756 				  struct binder_transaction *t)
3757 {
3758 	struct binder_txn_fd_fixup *fixup, *tmp;
3759 	int ret = 0;
3760 
3761 	list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
3762 		int fd = get_unused_fd_flags(O_CLOEXEC);
3763 
3764 		if (fd < 0) {
3765 			binder_debug(BINDER_DEBUG_TRANSACTION,
3766 				     "failed fd fixup txn %d fd %d\n",
3767 				     t->debug_id, fd);
3768 			ret = -ENOMEM;
3769 			break;
3770 		}
3771 		binder_debug(BINDER_DEBUG_TRANSACTION,
3772 			     "fd fixup txn %d fd %d\n",
3773 			     t->debug_id, fd);
3774 		trace_binder_transaction_fd_recv(t, fd, fixup->offset);
3775 		fd_install(fd, fixup->file);
3776 		fixup->file = NULL;
3777 		if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
3778 						fixup->offset, &fd,
3779 						sizeof(u32))) {
3780 			ret = -EINVAL;
3781 			break;
3782 		}
3783 	}
3784 	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
3785 		if (fixup->file) {
3786 			fput(fixup->file);
3787 		} else if (ret) {
3788 			u32 fd;
3789 			int err;
3790 
3791 			err = binder_alloc_copy_from_buffer(&proc->alloc, &fd,
3792 							    t->buffer,
3793 							    fixup->offset,
3794 							    sizeof(fd));
3795 			WARN_ON(err);
3796 			if (!err)
3797 				binder_deferred_fd_close(fd);
3798 		}
3799 		list_del(&fixup->fixup_entry);
3800 		kfree(fixup);
3801 	}
3802 
3803 	return ret;
3804 }
3805 
3806 static int binder_thread_read(struct binder_proc *proc,
3807 			      struct binder_thread *thread,
3808 			      binder_uintptr_t binder_buffer, size_t size,
3809 			      binder_size_t *consumed, int non_block)
3810 {
3811 	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3812 	void __user *ptr = buffer + *consumed;
3813 	void __user *end = buffer + size;
3814 
3815 	int ret = 0;
3816 	int wait_for_proc_work;
3817 
3818 	if (*consumed == 0) {
3819 		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
3820 			return -EFAULT;
3821 		ptr += sizeof(uint32_t);
3822 	}
3823 
3824 retry:
3825 	binder_inner_proc_lock(proc);
3826 	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
3827 	binder_inner_proc_unlock(proc);
3828 
3829 	thread->looper |= BINDER_LOOPER_STATE_WAITING;
3830 
3831 	trace_binder_wait_for_work(wait_for_proc_work,
3832 				   !!thread->transaction_stack,
3833 				   !binder_worklist_empty(proc, &thread->todo));
3834 	if (wait_for_proc_work) {
3835 		if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3836 					BINDER_LOOPER_STATE_ENTERED))) {
3837 			binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
3838 				proc->pid, thread->pid, thread->looper);
3839 			wait_event_interruptible(binder_user_error_wait,
3840 						 binder_stop_on_user_error < 2);
3841 		}
3842 		binder_set_nice(proc->default_priority);
3843 	}
3844 
3845 	if (non_block) {
3846 		if (!binder_has_work(thread, wait_for_proc_work))
3847 			ret = -EAGAIN;
3848 	} else {
3849 		ret = binder_wait_for_work(thread, wait_for_proc_work);
3850 	}
3851 
3852 	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
3853 
3854 	if (ret)
3855 		return ret;
3856 
3857 	while (1) {
3858 		uint32_t cmd;
3859 		struct binder_transaction_data_secctx tr;
3860 		struct binder_transaction_data *trd = &tr.transaction_data;
3861 		struct binder_work *w = NULL;
3862 		struct list_head *list = NULL;
3863 		struct binder_transaction *t = NULL;
3864 		struct binder_thread *t_from;
3865 		size_t trsize = sizeof(*trd);
3866 
3867 		binder_inner_proc_lock(proc);
3868 		if (!binder_worklist_empty_ilocked(&thread->todo))
3869 			list = &thread->todo;
3870 		else if (!binder_worklist_empty_ilocked(&proc->todo) &&
3871 			   wait_for_proc_work)
3872 			list = &proc->todo;
3873 		else {
3874 			binder_inner_proc_unlock(proc);
3875 
3876 			/* no data added */
3877 			if (ptr - buffer == 4 && !thread->looper_need_return)
3878 				goto retry;
3879 			break;
3880 		}
3881 
3882 		if (end - ptr < sizeof(tr) + 4) {
3883 			binder_inner_proc_unlock(proc);
3884 			break;
3885 		}
3886 		w = binder_dequeue_work_head_ilocked(list);
3887 		if (binder_worklist_empty_ilocked(&thread->todo))
3888 			thread->process_todo = false;
3889 
3890 		switch (w->type) {
3891 		case BINDER_WORK_TRANSACTION: {
3892 			binder_inner_proc_unlock(proc);
3893 			t = container_of(w, struct binder_transaction, work);
3894 		} break;
3895 		case BINDER_WORK_RETURN_ERROR: {
3896 			struct binder_error *e = container_of(
3897 					w, struct binder_error, work);
3898 
3899 			WARN_ON(e->cmd == BR_OK);
3900 			binder_inner_proc_unlock(proc);
3901 			if (put_user(e->cmd, (uint32_t __user *)ptr))
3902 				return -EFAULT;
3903 			cmd = e->cmd;
3904 			e->cmd = BR_OK;
3905 			ptr += sizeof(uint32_t);
3906 
3907 			binder_stat_br(proc, thread, cmd);
3908 		} break;
3909 		case BINDER_WORK_TRANSACTION_COMPLETE:
3910 		case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
3911 			if (proc->oneway_spam_detection_enabled &&
3912 				   w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
3913 				cmd = BR_ONEWAY_SPAM_SUSPECT;
3914 			else
3915 				cmd = BR_TRANSACTION_COMPLETE;
3916 			binder_inner_proc_unlock(proc);
3917 			kfree(w);
3918 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3919 			if (put_user(cmd, (uint32_t __user *)ptr))
3920 				return -EFAULT;
3921 			ptr += sizeof(uint32_t);
3922 
3923 			binder_stat_br(proc, thread, cmd);
3924 			binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
3925 				     "%d:%d BR_TRANSACTION_COMPLETE\n",
3926 				     proc->pid, thread->pid);
3927 		} break;
3928 		case BINDER_WORK_NODE: {
3929 			struct binder_node *node = container_of(w, struct binder_node, work);
3930 			int strong, weak;
3931 			binder_uintptr_t node_ptr = node->ptr;
3932 			binder_uintptr_t node_cookie = node->cookie;
3933 			int node_debug_id = node->debug_id;
3934 			int has_weak_ref;
3935 			int has_strong_ref;
3936 			void __user *orig_ptr = ptr;
3937 
3938 			BUG_ON(proc != node->proc);
3939 			strong = node->internal_strong_refs ||
3940 					node->local_strong_refs;
3941 			weak = !hlist_empty(&node->refs) ||
3942 					node->local_weak_refs ||
3943 					node->tmp_refs || strong;
3944 			has_strong_ref = node->has_strong_ref;
3945 			has_weak_ref = node->has_weak_ref;
3946 
3947 			if (weak && !has_weak_ref) {
3948 				node->has_weak_ref = 1;
3949 				node->pending_weak_ref = 1;
3950 				node->local_weak_refs++;
3951 			}
3952 			if (strong && !has_strong_ref) {
3953 				node->has_strong_ref = 1;
3954 				node->pending_strong_ref = 1;
3955 				node->local_strong_refs++;
3956 			}
3957 			if (!strong && has_strong_ref)
3958 				node->has_strong_ref = 0;
3959 			if (!weak && has_weak_ref)
3960 				node->has_weak_ref = 0;
3961 			if (!weak && !strong) {
3962 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3963 					     "%d:%d node %d u%016llx c%016llx deleted\n",
3964 					     proc->pid, thread->pid,
3965 					     node_debug_id,
3966 					     (u64)node_ptr,
3967 					     (u64)node_cookie);
3968 				rb_erase(&node->rb_node, &proc->nodes);
3969 				binder_inner_proc_unlock(proc);
3970 				binder_node_lock(node);
3971 				/*
3972 				 * Acquire the node lock before freeing the
3973 				 * node to serialize with other threads that
3974 				 * may have been holding the node lock while
3975 				 * decrementing this node (avoids race where
3976 				 * this thread frees while the other thread
3977 				 * is unlocking the node after the final
3978 				 * decrement)
3979 				 */
3980 				binder_node_unlock(node);
3981 				binder_free_node(node);
3982 			} else
3983 				binder_inner_proc_unlock(proc);
3984 
3985 			if (weak && !has_weak_ref)
3986 				ret = binder_put_node_cmd(
3987 						proc, thread, &ptr, node_ptr,
3988 						node_cookie, node_debug_id,
3989 						BR_INCREFS, "BR_INCREFS");
3990 			if (!ret && strong && !has_strong_ref)
3991 				ret = binder_put_node_cmd(
3992 						proc, thread, &ptr, node_ptr,
3993 						node_cookie, node_debug_id,
3994 						BR_ACQUIRE, "BR_ACQUIRE");
3995 			if (!ret && !strong && has_strong_ref)
3996 				ret = binder_put_node_cmd(
3997 						proc, thread, &ptr, node_ptr,
3998 						node_cookie, node_debug_id,
3999 						BR_RELEASE, "BR_RELEASE");
4000 			if (!ret && !weak && has_weak_ref)
4001 				ret = binder_put_node_cmd(
4002 						proc, thread, &ptr, node_ptr,
4003 						node_cookie, node_debug_id,
4004 						BR_DECREFS, "BR_DECREFS");
4005 			if (orig_ptr == ptr)
4006 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4007 					     "%d:%d node %d u%016llx c%016llx state unchanged\n",
4008 					     proc->pid, thread->pid,
4009 					     node_debug_id,
4010 					     (u64)node_ptr,
4011 					     (u64)node_cookie);
4012 			if (ret)
4013 				return ret;
4014 		} break;
4015 		case BINDER_WORK_DEAD_BINDER:
4016 		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4017 		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4018 			struct binder_ref_death *death;
4019 			uint32_t cmd;
4020 			binder_uintptr_t cookie;
4021 
4022 			death = container_of(w, struct binder_ref_death, work);
4023 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4024 				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4025 			else
4026 				cmd = BR_DEAD_BINDER;
4027 			cookie = death->cookie;
4028 
4029 			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4030 				     "%d:%d %s %016llx\n",
4031 				      proc->pid, thread->pid,
4032 				      cmd == BR_DEAD_BINDER ?
4033 				      "BR_DEAD_BINDER" :
4034 				      "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4035 				      (u64)cookie);
4036 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4037 				binder_inner_proc_unlock(proc);
4038 				kfree(death);
4039 				binder_stats_deleted(BINDER_STAT_DEATH);
4040 			} else {
4041 				binder_enqueue_work_ilocked(
4042 						w, &proc->delivered_death);
4043 				binder_inner_proc_unlock(proc);
4044 			}
4045 			if (put_user(cmd, (uint32_t __user *)ptr))
4046 				return -EFAULT;
4047 			ptr += sizeof(uint32_t);
4048 			if (put_user(cookie,
4049 				     (binder_uintptr_t __user *)ptr))
4050 				return -EFAULT;
4051 			ptr += sizeof(binder_uintptr_t);
4052 			binder_stat_br(proc, thread, cmd);
4053 			if (cmd == BR_DEAD_BINDER)
4054 				goto done; /* DEAD_BINDER notifications can cause transactions */
4055 		} break;
4056 		default:
4057 			binder_inner_proc_unlock(proc);
4058 			pr_err("%d:%d: bad work type %d\n",
4059 			       proc->pid, thread->pid, w->type);
4060 			break;
4061 		}
4062 
4063 		if (!t)
4064 			continue;
4065 
4066 		BUG_ON(t->buffer == NULL);
4067 		if (t->buffer->target_node) {
4068 			struct binder_node *target_node = t->buffer->target_node;
4069 
4070 			trd->target.ptr = target_node->ptr;
4071 			trd->cookie =  target_node->cookie;
4072 			t->saved_priority = task_nice(current);
4073 			if (t->priority < target_node->min_priority &&
4074 			    !(t->flags & TF_ONE_WAY))
4075 				binder_set_nice(t->priority);
4076 			else if (!(t->flags & TF_ONE_WAY) ||
4077 				 t->saved_priority > target_node->min_priority)
4078 				binder_set_nice(target_node->min_priority);
4079 			cmd = BR_TRANSACTION;
4080 		} else {
4081 			trd->target.ptr = 0;
4082 			trd->cookie = 0;
4083 			cmd = BR_REPLY;
4084 		}
4085 		trd->code = t->code;
4086 		trd->flags = t->flags;
4087 		trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4088 
4089 		t_from = binder_get_txn_from(t);
4090 		if (t_from) {
4091 			struct task_struct *sender = t_from->proc->tsk;
4092 
4093 			trd->sender_pid =
4094 				task_tgid_nr_ns(sender,
4095 						task_active_pid_ns(current));
4096 		} else {
4097 			trd->sender_pid = 0;
4098 		}
4099 
4100 		ret = binder_apply_fd_fixups(proc, t);
4101 		if (ret) {
4102 			struct binder_buffer *buffer = t->buffer;
4103 			bool oneway = !!(t->flags & TF_ONE_WAY);
4104 			int tid = t->debug_id;
4105 
4106 			if (t_from)
4107 				binder_thread_dec_tmpref(t_from);
4108 			buffer->transaction = NULL;
4109 			binder_cleanup_transaction(t, "fd fixups failed",
4110 						   BR_FAILED_REPLY);
4111 			binder_free_buf(proc, thread, buffer, true);
4112 			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4113 				     "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4114 				     proc->pid, thread->pid,
4115 				     oneway ? "async " :
4116 					(cmd == BR_REPLY ? "reply " : ""),
4117 				     tid, BR_FAILED_REPLY, ret, __LINE__);
4118 			if (cmd == BR_REPLY) {
4119 				cmd = BR_FAILED_REPLY;
4120 				if (put_user(cmd, (uint32_t __user *)ptr))
4121 					return -EFAULT;
4122 				ptr += sizeof(uint32_t);
4123 				binder_stat_br(proc, thread, cmd);
4124 				break;
4125 			}
4126 			continue;
4127 		}
4128 		trd->data_size = t->buffer->data_size;
4129 		trd->offsets_size = t->buffer->offsets_size;
4130 		trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
4131 		trd->data.ptr.offsets = trd->data.ptr.buffer +
4132 					ALIGN(t->buffer->data_size,
4133 					    sizeof(void *));
4134 
4135 		tr.secctx = t->security_ctx;
4136 		if (t->security_ctx) {
4137 			cmd = BR_TRANSACTION_SEC_CTX;
4138 			trsize = sizeof(tr);
4139 		}
4140 		if (put_user(cmd, (uint32_t __user *)ptr)) {
4141 			if (t_from)
4142 				binder_thread_dec_tmpref(t_from);
4143 
4144 			binder_cleanup_transaction(t, "put_user failed",
4145 						   BR_FAILED_REPLY);
4146 
4147 			return -EFAULT;
4148 		}
4149 		ptr += sizeof(uint32_t);
4150 		if (copy_to_user(ptr, &tr, trsize)) {
4151 			if (t_from)
4152 				binder_thread_dec_tmpref(t_from);
4153 
4154 			binder_cleanup_transaction(t, "copy_to_user failed",
4155 						   BR_FAILED_REPLY);
4156 
4157 			return -EFAULT;
4158 		}
4159 		ptr += trsize;
4160 
4161 		trace_binder_transaction_received(t);
4162 		binder_stat_br(proc, thread, cmd);
4163 		binder_debug(BINDER_DEBUG_TRANSACTION,
4164 			     "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4165 			     proc->pid, thread->pid,
4166 			     (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4167 				(cmd == BR_TRANSACTION_SEC_CTX) ?
4168 				     "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4169 			     t->debug_id, t_from ? t_from->proc->pid : 0,
4170 			     t_from ? t_from->pid : 0, cmd,
4171 			     t->buffer->data_size, t->buffer->offsets_size,
4172 			     (u64)trd->data.ptr.buffer,
4173 			     (u64)trd->data.ptr.offsets);
4174 
4175 		if (t_from)
4176 			binder_thread_dec_tmpref(t_from);
4177 		t->buffer->allow_user_free = 1;
4178 		if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4179 			binder_inner_proc_lock(thread->proc);
4180 			t->to_parent = thread->transaction_stack;
4181 			t->to_thread = thread;
4182 			thread->transaction_stack = t;
4183 			binder_inner_proc_unlock(thread->proc);
4184 		} else {
4185 			binder_free_transaction(t);
4186 		}
4187 		break;
4188 	}
4189 
4190 done:
4191 
4192 	*consumed = ptr - buffer;
4193 	binder_inner_proc_lock(proc);
4194 	if (proc->requested_threads == 0 &&
4195 	    list_empty(&thread->proc->waiting_threads) &&
4196 	    proc->requested_threads_started < proc->max_threads &&
4197 	    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4198 	     BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4199 	     /*spawn a new thread if we leave this out */) {
4200 		proc->requested_threads++;
4201 		binder_inner_proc_unlock(proc);
4202 		binder_debug(BINDER_DEBUG_THREADS,
4203 			     "%d:%d BR_SPAWN_LOOPER\n",
4204 			     proc->pid, thread->pid);
4205 		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4206 			return -EFAULT;
4207 		binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4208 	} else
4209 		binder_inner_proc_unlock(proc);
4210 	return 0;
4211 }
4212 
4213 static void binder_release_work(struct binder_proc *proc,
4214 				struct list_head *list)
4215 {
4216 	struct binder_work *w;
4217 	enum binder_work_type wtype;
4218 
4219 	while (1) {
4220 		binder_inner_proc_lock(proc);
4221 		w = binder_dequeue_work_head_ilocked(list);
4222 		wtype = w ? w->type : 0;
4223 		binder_inner_proc_unlock(proc);
4224 		if (!w)
4225 			return;
4226 
4227 		switch (wtype) {
4228 		case BINDER_WORK_TRANSACTION: {
4229 			struct binder_transaction *t;
4230 
4231 			t = container_of(w, struct binder_transaction, work);
4232 
4233 			binder_cleanup_transaction(t, "process died.",
4234 						   BR_DEAD_REPLY);
4235 		} break;
4236 		case BINDER_WORK_RETURN_ERROR: {
4237 			struct binder_error *e = container_of(
4238 					w, struct binder_error, work);
4239 
4240 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4241 				"undelivered TRANSACTION_ERROR: %u\n",
4242 				e->cmd);
4243 		} break;
4244 		case BINDER_WORK_TRANSACTION_COMPLETE: {
4245 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4246 				"undelivered TRANSACTION_COMPLETE\n");
4247 			kfree(w);
4248 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4249 		} break;
4250 		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4251 		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4252 			struct binder_ref_death *death;
4253 
4254 			death = container_of(w, struct binder_ref_death, work);
4255 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4256 				"undelivered death notification, %016llx\n",
4257 				(u64)death->cookie);
4258 			kfree(death);
4259 			binder_stats_deleted(BINDER_STAT_DEATH);
4260 		} break;
4261 		case BINDER_WORK_NODE:
4262 			break;
4263 		default:
4264 			pr_err("unexpected work type, %d, not freed\n",
4265 			       wtype);
4266 			break;
4267 		}
4268 	}
4269 
4270 }
4271 
4272 static struct binder_thread *binder_get_thread_ilocked(
4273 		struct binder_proc *proc, struct binder_thread *new_thread)
4274 {
4275 	struct binder_thread *thread = NULL;
4276 	struct rb_node *parent = NULL;
4277 	struct rb_node **p = &proc->threads.rb_node;
4278 
4279 	while (*p) {
4280 		parent = *p;
4281 		thread = rb_entry(parent, struct binder_thread, rb_node);
4282 
4283 		if (current->pid < thread->pid)
4284 			p = &(*p)->rb_left;
4285 		else if (current->pid > thread->pid)
4286 			p = &(*p)->rb_right;
4287 		else
4288 			return thread;
4289 	}
4290 	if (!new_thread)
4291 		return NULL;
4292 	thread = new_thread;
4293 	binder_stats_created(BINDER_STAT_THREAD);
4294 	thread->proc = proc;
4295 	thread->pid = current->pid;
4296 	atomic_set(&thread->tmp_ref, 0);
4297 	init_waitqueue_head(&thread->wait);
4298 	INIT_LIST_HEAD(&thread->todo);
4299 	rb_link_node(&thread->rb_node, parent, p);
4300 	rb_insert_color(&thread->rb_node, &proc->threads);
4301 	thread->looper_need_return = true;
4302 	thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4303 	thread->return_error.cmd = BR_OK;
4304 	thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4305 	thread->reply_error.cmd = BR_OK;
4306 	INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4307 	return thread;
4308 }
4309 
4310 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4311 {
4312 	struct binder_thread *thread;
4313 	struct binder_thread *new_thread;
4314 
4315 	binder_inner_proc_lock(proc);
4316 	thread = binder_get_thread_ilocked(proc, NULL);
4317 	binder_inner_proc_unlock(proc);
4318 	if (!thread) {
4319 		new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4320 		if (new_thread == NULL)
4321 			return NULL;
4322 		binder_inner_proc_lock(proc);
4323 		thread = binder_get_thread_ilocked(proc, new_thread);
4324 		binder_inner_proc_unlock(proc);
4325 		if (thread != new_thread)
4326 			kfree(new_thread);
4327 	}
4328 	return thread;
4329 }
4330 
4331 static void binder_free_proc(struct binder_proc *proc)
4332 {
4333 	struct binder_device *device;
4334 
4335 	BUG_ON(!list_empty(&proc->todo));
4336 	BUG_ON(!list_empty(&proc->delivered_death));
4337 	if (proc->outstanding_txns)
4338 		pr_warn("%s: Unexpected outstanding_txns %d\n",
4339 			__func__, proc->outstanding_txns);
4340 	device = container_of(proc->context, struct binder_device, context);
4341 	if (refcount_dec_and_test(&device->ref)) {
4342 		kfree(proc->context->name);
4343 		kfree(device);
4344 	}
4345 	binder_alloc_deferred_release(&proc->alloc);
4346 	put_task_struct(proc->tsk);
4347 	put_cred(proc->cred);
4348 	binder_stats_deleted(BINDER_STAT_PROC);
4349 	kfree(proc);
4350 }
4351 
4352 static void binder_free_thread(struct binder_thread *thread)
4353 {
4354 	BUG_ON(!list_empty(&thread->todo));
4355 	binder_stats_deleted(BINDER_STAT_THREAD);
4356 	binder_proc_dec_tmpref(thread->proc);
4357 	kfree(thread);
4358 }
4359 
4360 static int binder_thread_release(struct binder_proc *proc,
4361 				 struct binder_thread *thread)
4362 {
4363 	struct binder_transaction *t;
4364 	struct binder_transaction *send_reply = NULL;
4365 	int active_transactions = 0;
4366 	struct binder_transaction *last_t = NULL;
4367 
4368 	binder_inner_proc_lock(thread->proc);
4369 	/*
4370 	 * take a ref on the proc so it survives
4371 	 * after we remove this thread from proc->threads.
4372 	 * The corresponding dec is when we actually
4373 	 * free the thread in binder_free_thread()
4374 	 */
4375 	proc->tmp_ref++;
4376 	/*
4377 	 * take a ref on this thread to ensure it
4378 	 * survives while we are releasing it
4379 	 */
4380 	atomic_inc(&thread->tmp_ref);
4381 	rb_erase(&thread->rb_node, &proc->threads);
4382 	t = thread->transaction_stack;
4383 	if (t) {
4384 		spin_lock(&t->lock);
4385 		if (t->to_thread == thread)
4386 			send_reply = t;
4387 	} else {
4388 		__acquire(&t->lock);
4389 	}
4390 	thread->is_dead = true;
4391 
4392 	while (t) {
4393 		last_t = t;
4394 		active_transactions++;
4395 		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4396 			     "release %d:%d transaction %d %s, still active\n",
4397 			      proc->pid, thread->pid,
4398 			     t->debug_id,
4399 			     (t->to_thread == thread) ? "in" : "out");
4400 
4401 		if (t->to_thread == thread) {
4402 			thread->proc->outstanding_txns--;
4403 			t->to_proc = NULL;
4404 			t->to_thread = NULL;
4405 			if (t->buffer) {
4406 				t->buffer->transaction = NULL;
4407 				t->buffer = NULL;
4408 			}
4409 			t = t->to_parent;
4410 		} else if (t->from == thread) {
4411 			t->from = NULL;
4412 			t = t->from_parent;
4413 		} else
4414 			BUG();
4415 		spin_unlock(&last_t->lock);
4416 		if (t)
4417 			spin_lock(&t->lock);
4418 		else
4419 			__acquire(&t->lock);
4420 	}
4421 	/* annotation for sparse, lock not acquired in last iteration above */
4422 	__release(&t->lock);
4423 
4424 	/*
4425 	 * If this thread used poll, make sure we remove the waitqueue
4426 	 * from any epoll data structures holding it with POLLFREE.
4427 	 * waitqueue_active() is safe to use here because we're holding
4428 	 * the inner lock.
4429 	 */
4430 	if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
4431 	    waitqueue_active(&thread->wait)) {
4432 		wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
4433 	}
4434 
4435 	binder_inner_proc_unlock(thread->proc);
4436 
4437 	/*
4438 	 * This is needed to avoid races between wake_up_poll() above and
4439 	 * and ep_remove_waitqueue() called for other reasons (eg the epoll file
4440 	 * descriptor being closed); ep_remove_waitqueue() holds an RCU read
4441 	 * lock, so we can be sure it's done after calling synchronize_rcu().
4442 	 */
4443 	if (thread->looper & BINDER_LOOPER_STATE_POLL)
4444 		synchronize_rcu();
4445 
4446 	if (send_reply)
4447 		binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4448 	binder_release_work(proc, &thread->todo);
4449 	binder_thread_dec_tmpref(thread);
4450 	return active_transactions;
4451 }
4452 
4453 static __poll_t binder_poll(struct file *filp,
4454 				struct poll_table_struct *wait)
4455 {
4456 	struct binder_proc *proc = filp->private_data;
4457 	struct binder_thread *thread = NULL;
4458 	bool wait_for_proc_work;
4459 
4460 	thread = binder_get_thread(proc);
4461 	if (!thread)
4462 		return POLLERR;
4463 
4464 	binder_inner_proc_lock(thread->proc);
4465 	thread->looper |= BINDER_LOOPER_STATE_POLL;
4466 	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4467 
4468 	binder_inner_proc_unlock(thread->proc);
4469 
4470 	poll_wait(filp, &thread->wait, wait);
4471 
4472 	if (binder_has_work(thread, wait_for_proc_work))
4473 		return EPOLLIN;
4474 
4475 	return 0;
4476 }
4477 
4478 static int binder_ioctl_write_read(struct file *filp,
4479 				unsigned int cmd, unsigned long arg,
4480 				struct binder_thread *thread)
4481 {
4482 	int ret = 0;
4483 	struct binder_proc *proc = filp->private_data;
4484 	unsigned int size = _IOC_SIZE(cmd);
4485 	void __user *ubuf = (void __user *)arg;
4486 	struct binder_write_read bwr;
4487 
4488 	if (size != sizeof(struct binder_write_read)) {
4489 		ret = -EINVAL;
4490 		goto out;
4491 	}
4492 	if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4493 		ret = -EFAULT;
4494 		goto out;
4495 	}
4496 	binder_debug(BINDER_DEBUG_READ_WRITE,
4497 		     "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4498 		     proc->pid, thread->pid,
4499 		     (u64)bwr.write_size, (u64)bwr.write_buffer,
4500 		     (u64)bwr.read_size, (u64)bwr.read_buffer);
4501 
4502 	if (bwr.write_size > 0) {
4503 		ret = binder_thread_write(proc, thread,
4504 					  bwr.write_buffer,
4505 					  bwr.write_size,
4506 					  &bwr.write_consumed);
4507 		trace_binder_write_done(ret);
4508 		if (ret < 0) {
4509 			bwr.read_consumed = 0;
4510 			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4511 				ret = -EFAULT;
4512 			goto out;
4513 		}
4514 	}
4515 	if (bwr.read_size > 0) {
4516 		ret = binder_thread_read(proc, thread, bwr.read_buffer,
4517 					 bwr.read_size,
4518 					 &bwr.read_consumed,
4519 					 filp->f_flags & O_NONBLOCK);
4520 		trace_binder_read_done(ret);
4521 		binder_inner_proc_lock(proc);
4522 		if (!binder_worklist_empty_ilocked(&proc->todo))
4523 			binder_wakeup_proc_ilocked(proc);
4524 		binder_inner_proc_unlock(proc);
4525 		if (ret < 0) {
4526 			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4527 				ret = -EFAULT;
4528 			goto out;
4529 		}
4530 	}
4531 	binder_debug(BINDER_DEBUG_READ_WRITE,
4532 		     "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4533 		     proc->pid, thread->pid,
4534 		     (u64)bwr.write_consumed, (u64)bwr.write_size,
4535 		     (u64)bwr.read_consumed, (u64)bwr.read_size);
4536 	if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4537 		ret = -EFAULT;
4538 		goto out;
4539 	}
4540 out:
4541 	return ret;
4542 }
4543 
4544 static int binder_ioctl_set_ctx_mgr(struct file *filp,
4545 				    struct flat_binder_object *fbo)
4546 {
4547 	int ret = 0;
4548 	struct binder_proc *proc = filp->private_data;
4549 	struct binder_context *context = proc->context;
4550 	struct binder_node *new_node;
4551 	kuid_t curr_euid = current_euid();
4552 
4553 	mutex_lock(&context->context_mgr_node_lock);
4554 	if (context->binder_context_mgr_node) {
4555 		pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4556 		ret = -EBUSY;
4557 		goto out;
4558 	}
4559 	ret = security_binder_set_context_mgr(proc->cred);
4560 	if (ret < 0)
4561 		goto out;
4562 	if (uid_valid(context->binder_context_mgr_uid)) {
4563 		if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4564 			pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4565 			       from_kuid(&init_user_ns, curr_euid),
4566 			       from_kuid(&init_user_ns,
4567 					 context->binder_context_mgr_uid));
4568 			ret = -EPERM;
4569 			goto out;
4570 		}
4571 	} else {
4572 		context->binder_context_mgr_uid = curr_euid;
4573 	}
4574 	new_node = binder_new_node(proc, fbo);
4575 	if (!new_node) {
4576 		ret = -ENOMEM;
4577 		goto out;
4578 	}
4579 	binder_node_lock(new_node);
4580 	new_node->local_weak_refs++;
4581 	new_node->local_strong_refs++;
4582 	new_node->has_strong_ref = 1;
4583 	new_node->has_weak_ref = 1;
4584 	context->binder_context_mgr_node = new_node;
4585 	binder_node_unlock(new_node);
4586 	binder_put_node(new_node);
4587 out:
4588 	mutex_unlock(&context->context_mgr_node_lock);
4589 	return ret;
4590 }
4591 
4592 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
4593 		struct binder_node_info_for_ref *info)
4594 {
4595 	struct binder_node *node;
4596 	struct binder_context *context = proc->context;
4597 	__u32 handle = info->handle;
4598 
4599 	if (info->strong_count || info->weak_count || info->reserved1 ||
4600 	    info->reserved2 || info->reserved3) {
4601 		binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
4602 				  proc->pid);
4603 		return -EINVAL;
4604 	}
4605 
4606 	/* This ioctl may only be used by the context manager */
4607 	mutex_lock(&context->context_mgr_node_lock);
4608 	if (!context->binder_context_mgr_node ||
4609 		context->binder_context_mgr_node->proc != proc) {
4610 		mutex_unlock(&context->context_mgr_node_lock);
4611 		return -EPERM;
4612 	}
4613 	mutex_unlock(&context->context_mgr_node_lock);
4614 
4615 	node = binder_get_node_from_ref(proc, handle, true, NULL);
4616 	if (!node)
4617 		return -EINVAL;
4618 
4619 	info->strong_count = node->local_strong_refs +
4620 		node->internal_strong_refs;
4621 	info->weak_count = node->local_weak_refs;
4622 
4623 	binder_put_node(node);
4624 
4625 	return 0;
4626 }
4627 
4628 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4629 				struct binder_node_debug_info *info)
4630 {
4631 	struct rb_node *n;
4632 	binder_uintptr_t ptr = info->ptr;
4633 
4634 	memset(info, 0, sizeof(*info));
4635 
4636 	binder_inner_proc_lock(proc);
4637 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4638 		struct binder_node *node = rb_entry(n, struct binder_node,
4639 						    rb_node);
4640 		if (node->ptr > ptr) {
4641 			info->ptr = node->ptr;
4642 			info->cookie = node->cookie;
4643 			info->has_strong_ref = node->has_strong_ref;
4644 			info->has_weak_ref = node->has_weak_ref;
4645 			break;
4646 		}
4647 	}
4648 	binder_inner_proc_unlock(proc);
4649 
4650 	return 0;
4651 }
4652 
4653 static bool binder_txns_pending_ilocked(struct binder_proc *proc)
4654 {
4655 	struct rb_node *n;
4656 	struct binder_thread *thread;
4657 
4658 	if (proc->outstanding_txns > 0)
4659 		return true;
4660 
4661 	for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
4662 		thread = rb_entry(n, struct binder_thread, rb_node);
4663 		if (thread->transaction_stack)
4664 			return true;
4665 	}
4666 	return false;
4667 }
4668 
4669 static int binder_ioctl_freeze(struct binder_freeze_info *info,
4670 			       struct binder_proc *target_proc)
4671 {
4672 	int ret = 0;
4673 
4674 	if (!info->enable) {
4675 		binder_inner_proc_lock(target_proc);
4676 		target_proc->sync_recv = false;
4677 		target_proc->async_recv = false;
4678 		target_proc->is_frozen = false;
4679 		binder_inner_proc_unlock(target_proc);
4680 		return 0;
4681 	}
4682 
4683 	/*
4684 	 * Freezing the target. Prevent new transactions by
4685 	 * setting frozen state. If timeout specified, wait
4686 	 * for transactions to drain.
4687 	 */
4688 	binder_inner_proc_lock(target_proc);
4689 	target_proc->sync_recv = false;
4690 	target_proc->async_recv = false;
4691 	target_proc->is_frozen = true;
4692 	binder_inner_proc_unlock(target_proc);
4693 
4694 	if (info->timeout_ms > 0)
4695 		ret = wait_event_interruptible_timeout(
4696 			target_proc->freeze_wait,
4697 			(!target_proc->outstanding_txns),
4698 			msecs_to_jiffies(info->timeout_ms));
4699 
4700 	/* Check pending transactions that wait for reply */
4701 	if (ret >= 0) {
4702 		binder_inner_proc_lock(target_proc);
4703 		if (binder_txns_pending_ilocked(target_proc))
4704 			ret = -EAGAIN;
4705 		binder_inner_proc_unlock(target_proc);
4706 	}
4707 
4708 	if (ret < 0) {
4709 		binder_inner_proc_lock(target_proc);
4710 		target_proc->is_frozen = false;
4711 		binder_inner_proc_unlock(target_proc);
4712 	}
4713 
4714 	return ret;
4715 }
4716 
4717 static int binder_ioctl_get_freezer_info(
4718 				struct binder_frozen_status_info *info)
4719 {
4720 	struct binder_proc *target_proc;
4721 	bool found = false;
4722 	__u32 txns_pending;
4723 
4724 	info->sync_recv = 0;
4725 	info->async_recv = 0;
4726 
4727 	mutex_lock(&binder_procs_lock);
4728 	hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
4729 		if (target_proc->pid == info->pid) {
4730 			found = true;
4731 			binder_inner_proc_lock(target_proc);
4732 			txns_pending = binder_txns_pending_ilocked(target_proc);
4733 			info->sync_recv |= target_proc->sync_recv |
4734 					(txns_pending << 1);
4735 			info->async_recv |= target_proc->async_recv;
4736 			binder_inner_proc_unlock(target_proc);
4737 		}
4738 	}
4739 	mutex_unlock(&binder_procs_lock);
4740 
4741 	if (!found)
4742 		return -EINVAL;
4743 
4744 	return 0;
4745 }
4746 
4747 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4748 {
4749 	int ret;
4750 	struct binder_proc *proc = filp->private_data;
4751 	struct binder_thread *thread;
4752 	unsigned int size = _IOC_SIZE(cmd);
4753 	void __user *ubuf = (void __user *)arg;
4754 
4755 	/*pr_info("binder_ioctl: %d:%d %x %lx\n",
4756 			proc->pid, current->pid, cmd, arg);*/
4757 
4758 	binder_selftest_alloc(&proc->alloc);
4759 
4760 	trace_binder_ioctl(cmd, arg);
4761 
4762 	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4763 	if (ret)
4764 		goto err_unlocked;
4765 
4766 	thread = binder_get_thread(proc);
4767 	if (thread == NULL) {
4768 		ret = -ENOMEM;
4769 		goto err;
4770 	}
4771 
4772 	switch (cmd) {
4773 	case BINDER_WRITE_READ:
4774 		ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4775 		if (ret)
4776 			goto err;
4777 		break;
4778 	case BINDER_SET_MAX_THREADS: {
4779 		int max_threads;
4780 
4781 		if (copy_from_user(&max_threads, ubuf,
4782 				   sizeof(max_threads))) {
4783 			ret = -EINVAL;
4784 			goto err;
4785 		}
4786 		binder_inner_proc_lock(proc);
4787 		proc->max_threads = max_threads;
4788 		binder_inner_proc_unlock(proc);
4789 		break;
4790 	}
4791 	case BINDER_SET_CONTEXT_MGR_EXT: {
4792 		struct flat_binder_object fbo;
4793 
4794 		if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
4795 			ret = -EINVAL;
4796 			goto err;
4797 		}
4798 		ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
4799 		if (ret)
4800 			goto err;
4801 		break;
4802 	}
4803 	case BINDER_SET_CONTEXT_MGR:
4804 		ret = binder_ioctl_set_ctx_mgr(filp, NULL);
4805 		if (ret)
4806 			goto err;
4807 		break;
4808 	case BINDER_THREAD_EXIT:
4809 		binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
4810 			     proc->pid, thread->pid);
4811 		binder_thread_release(proc, thread);
4812 		thread = NULL;
4813 		break;
4814 	case BINDER_VERSION: {
4815 		struct binder_version __user *ver = ubuf;
4816 
4817 		if (size != sizeof(struct binder_version)) {
4818 			ret = -EINVAL;
4819 			goto err;
4820 		}
4821 		if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4822 			     &ver->protocol_version)) {
4823 			ret = -EINVAL;
4824 			goto err;
4825 		}
4826 		break;
4827 	}
4828 	case BINDER_GET_NODE_INFO_FOR_REF: {
4829 		struct binder_node_info_for_ref info;
4830 
4831 		if (copy_from_user(&info, ubuf, sizeof(info))) {
4832 			ret = -EFAULT;
4833 			goto err;
4834 		}
4835 
4836 		ret = binder_ioctl_get_node_info_for_ref(proc, &info);
4837 		if (ret < 0)
4838 			goto err;
4839 
4840 		if (copy_to_user(ubuf, &info, sizeof(info))) {
4841 			ret = -EFAULT;
4842 			goto err;
4843 		}
4844 
4845 		break;
4846 	}
4847 	case BINDER_GET_NODE_DEBUG_INFO: {
4848 		struct binder_node_debug_info info;
4849 
4850 		if (copy_from_user(&info, ubuf, sizeof(info))) {
4851 			ret = -EFAULT;
4852 			goto err;
4853 		}
4854 
4855 		ret = binder_ioctl_get_node_debug_info(proc, &info);
4856 		if (ret < 0)
4857 			goto err;
4858 
4859 		if (copy_to_user(ubuf, &info, sizeof(info))) {
4860 			ret = -EFAULT;
4861 			goto err;
4862 		}
4863 		break;
4864 	}
4865 	case BINDER_FREEZE: {
4866 		struct binder_freeze_info info;
4867 		struct binder_proc **target_procs = NULL, *target_proc;
4868 		int target_procs_count = 0, i = 0;
4869 
4870 		ret = 0;
4871 
4872 		if (copy_from_user(&info, ubuf, sizeof(info))) {
4873 			ret = -EFAULT;
4874 			goto err;
4875 		}
4876 
4877 		mutex_lock(&binder_procs_lock);
4878 		hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
4879 			if (target_proc->pid == info.pid)
4880 				target_procs_count++;
4881 		}
4882 
4883 		if (target_procs_count == 0) {
4884 			mutex_unlock(&binder_procs_lock);
4885 			ret = -EINVAL;
4886 			goto err;
4887 		}
4888 
4889 		target_procs = kcalloc(target_procs_count,
4890 				       sizeof(struct binder_proc *),
4891 				       GFP_KERNEL);
4892 
4893 		if (!target_procs) {
4894 			mutex_unlock(&binder_procs_lock);
4895 			ret = -ENOMEM;
4896 			goto err;
4897 		}
4898 
4899 		hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
4900 			if (target_proc->pid != info.pid)
4901 				continue;
4902 
4903 			binder_inner_proc_lock(target_proc);
4904 			target_proc->tmp_ref++;
4905 			binder_inner_proc_unlock(target_proc);
4906 
4907 			target_procs[i++] = target_proc;
4908 		}
4909 		mutex_unlock(&binder_procs_lock);
4910 
4911 		for (i = 0; i < target_procs_count; i++) {
4912 			if (ret >= 0)
4913 				ret = binder_ioctl_freeze(&info,
4914 							  target_procs[i]);
4915 
4916 			binder_proc_dec_tmpref(target_procs[i]);
4917 		}
4918 
4919 		kfree(target_procs);
4920 
4921 		if (ret < 0)
4922 			goto err;
4923 		break;
4924 	}
4925 	case BINDER_GET_FROZEN_INFO: {
4926 		struct binder_frozen_status_info info;
4927 
4928 		if (copy_from_user(&info, ubuf, sizeof(info))) {
4929 			ret = -EFAULT;
4930 			goto err;
4931 		}
4932 
4933 		ret = binder_ioctl_get_freezer_info(&info);
4934 		if (ret < 0)
4935 			goto err;
4936 
4937 		if (copy_to_user(ubuf, &info, sizeof(info))) {
4938 			ret = -EFAULT;
4939 			goto err;
4940 		}
4941 		break;
4942 	}
4943 	case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
4944 		uint32_t enable;
4945 
4946 		if (copy_from_user(&enable, ubuf, sizeof(enable))) {
4947 			ret = -EFAULT;
4948 			goto err;
4949 		}
4950 		binder_inner_proc_lock(proc);
4951 		proc->oneway_spam_detection_enabled = (bool)enable;
4952 		binder_inner_proc_unlock(proc);
4953 		break;
4954 	}
4955 	default:
4956 		ret = -EINVAL;
4957 		goto err;
4958 	}
4959 	ret = 0;
4960 err:
4961 	if (thread)
4962 		thread->looper_need_return = false;
4963 	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4964 	if (ret && ret != -EINTR)
4965 		pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
4966 err_unlocked:
4967 	trace_binder_ioctl_done(ret);
4968 	return ret;
4969 }
4970 
4971 static void binder_vma_open(struct vm_area_struct *vma)
4972 {
4973 	struct binder_proc *proc = vma->vm_private_data;
4974 
4975 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4976 		     "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4977 		     proc->pid, vma->vm_start, vma->vm_end,
4978 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4979 		     (unsigned long)pgprot_val(vma->vm_page_prot));
4980 }
4981 
4982 static void binder_vma_close(struct vm_area_struct *vma)
4983 {
4984 	struct binder_proc *proc = vma->vm_private_data;
4985 
4986 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4987 		     "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4988 		     proc->pid, vma->vm_start, vma->vm_end,
4989 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4990 		     (unsigned long)pgprot_val(vma->vm_page_prot));
4991 	binder_alloc_vma_close(&proc->alloc);
4992 }
4993 
4994 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
4995 {
4996 	return VM_FAULT_SIGBUS;
4997 }
4998 
4999 static const struct vm_operations_struct binder_vm_ops = {
5000 	.open = binder_vma_open,
5001 	.close = binder_vma_close,
5002 	.fault = binder_vm_fault,
5003 };
5004 
5005 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5006 {
5007 	struct binder_proc *proc = filp->private_data;
5008 
5009 	if (proc->tsk != current->group_leader)
5010 		return -EINVAL;
5011 
5012 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5013 		     "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5014 		     __func__, proc->pid, vma->vm_start, vma->vm_end,
5015 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5016 		     (unsigned long)pgprot_val(vma->vm_page_prot));
5017 
5018 	if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5019 		pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5020 		       proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
5021 		return -EPERM;
5022 	}
5023 	vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
5024 	vma->vm_flags &= ~VM_MAYWRITE;
5025 
5026 	vma->vm_ops = &binder_vm_ops;
5027 	vma->vm_private_data = proc;
5028 
5029 	return binder_alloc_mmap_handler(&proc->alloc, vma);
5030 }
5031 
5032 static int binder_open(struct inode *nodp, struct file *filp)
5033 {
5034 	struct binder_proc *proc, *itr;
5035 	struct binder_device *binder_dev;
5036 	struct binderfs_info *info;
5037 	struct dentry *binder_binderfs_dir_entry_proc = NULL;
5038 	bool existing_pid = false;
5039 
5040 	binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5041 		     current->group_leader->pid, current->pid);
5042 
5043 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5044 	if (proc == NULL)
5045 		return -ENOMEM;
5046 	spin_lock_init(&proc->inner_lock);
5047 	spin_lock_init(&proc->outer_lock);
5048 	get_task_struct(current->group_leader);
5049 	proc->tsk = current->group_leader;
5050 	proc->cred = get_cred(filp->f_cred);
5051 	INIT_LIST_HEAD(&proc->todo);
5052 	init_waitqueue_head(&proc->freeze_wait);
5053 	proc->default_priority = task_nice(current);
5054 	/* binderfs stashes devices in i_private */
5055 	if (is_binderfs_device(nodp)) {
5056 		binder_dev = nodp->i_private;
5057 		info = nodp->i_sb->s_fs_info;
5058 		binder_binderfs_dir_entry_proc = info->proc_log_dir;
5059 	} else {
5060 		binder_dev = container_of(filp->private_data,
5061 					  struct binder_device, miscdev);
5062 	}
5063 	refcount_inc(&binder_dev->ref);
5064 	proc->context = &binder_dev->context;
5065 	binder_alloc_init(&proc->alloc);
5066 
5067 	binder_stats_created(BINDER_STAT_PROC);
5068 	proc->pid = current->group_leader->pid;
5069 	INIT_LIST_HEAD(&proc->delivered_death);
5070 	INIT_LIST_HEAD(&proc->waiting_threads);
5071 	filp->private_data = proc;
5072 
5073 	mutex_lock(&binder_procs_lock);
5074 	hlist_for_each_entry(itr, &binder_procs, proc_node) {
5075 		if (itr->pid == proc->pid) {
5076 			existing_pid = true;
5077 			break;
5078 		}
5079 	}
5080 	hlist_add_head(&proc->proc_node, &binder_procs);
5081 	mutex_unlock(&binder_procs_lock);
5082 
5083 	if (binder_debugfs_dir_entry_proc && !existing_pid) {
5084 		char strbuf[11];
5085 
5086 		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5087 		/*
5088 		 * proc debug entries are shared between contexts.
5089 		 * Only create for the first PID to avoid debugfs log spamming
5090 		 * The printing code will anyway print all contexts for a given
5091 		 * PID so this is not a problem.
5092 		 */
5093 		proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5094 			binder_debugfs_dir_entry_proc,
5095 			(void *)(unsigned long)proc->pid,
5096 			&proc_fops);
5097 	}
5098 
5099 	if (binder_binderfs_dir_entry_proc && !existing_pid) {
5100 		char strbuf[11];
5101 		struct dentry *binderfs_entry;
5102 
5103 		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5104 		/*
5105 		 * Similar to debugfs, the process specific log file is shared
5106 		 * between contexts. Only create for the first PID.
5107 		 * This is ok since same as debugfs, the log file will contain
5108 		 * information on all contexts of a given PID.
5109 		 */
5110 		binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5111 			strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5112 		if (!IS_ERR(binderfs_entry)) {
5113 			proc->binderfs_entry = binderfs_entry;
5114 		} else {
5115 			int error;
5116 
5117 			error = PTR_ERR(binderfs_entry);
5118 			pr_warn("Unable to create file %s in binderfs (error %d)\n",
5119 				strbuf, error);
5120 		}
5121 	}
5122 
5123 	return 0;
5124 }
5125 
5126 static int binder_flush(struct file *filp, fl_owner_t id)
5127 {
5128 	struct binder_proc *proc = filp->private_data;
5129 
5130 	binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5131 
5132 	return 0;
5133 }
5134 
5135 static void binder_deferred_flush(struct binder_proc *proc)
5136 {
5137 	struct rb_node *n;
5138 	int wake_count = 0;
5139 
5140 	binder_inner_proc_lock(proc);
5141 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5142 		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5143 
5144 		thread->looper_need_return = true;
5145 		if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5146 			wake_up_interruptible(&thread->wait);
5147 			wake_count++;
5148 		}
5149 	}
5150 	binder_inner_proc_unlock(proc);
5151 
5152 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5153 		     "binder_flush: %d woke %d threads\n", proc->pid,
5154 		     wake_count);
5155 }
5156 
5157 static int binder_release(struct inode *nodp, struct file *filp)
5158 {
5159 	struct binder_proc *proc = filp->private_data;
5160 
5161 	debugfs_remove(proc->debugfs_entry);
5162 
5163 	if (proc->binderfs_entry) {
5164 		binderfs_remove_file(proc->binderfs_entry);
5165 		proc->binderfs_entry = NULL;
5166 	}
5167 
5168 	binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5169 
5170 	return 0;
5171 }
5172 
5173 static int binder_node_release(struct binder_node *node, int refs)
5174 {
5175 	struct binder_ref *ref;
5176 	int death = 0;
5177 	struct binder_proc *proc = node->proc;
5178 
5179 	binder_release_work(proc, &node->async_todo);
5180 
5181 	binder_node_lock(node);
5182 	binder_inner_proc_lock(proc);
5183 	binder_dequeue_work_ilocked(&node->work);
5184 	/*
5185 	 * The caller must have taken a temporary ref on the node,
5186 	 */
5187 	BUG_ON(!node->tmp_refs);
5188 	if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5189 		binder_inner_proc_unlock(proc);
5190 		binder_node_unlock(node);
5191 		binder_free_node(node);
5192 
5193 		return refs;
5194 	}
5195 
5196 	node->proc = NULL;
5197 	node->local_strong_refs = 0;
5198 	node->local_weak_refs = 0;
5199 	binder_inner_proc_unlock(proc);
5200 
5201 	spin_lock(&binder_dead_nodes_lock);
5202 	hlist_add_head(&node->dead_node, &binder_dead_nodes);
5203 	spin_unlock(&binder_dead_nodes_lock);
5204 
5205 	hlist_for_each_entry(ref, &node->refs, node_entry) {
5206 		refs++;
5207 		/*
5208 		 * Need the node lock to synchronize
5209 		 * with new notification requests and the
5210 		 * inner lock to synchronize with queued
5211 		 * death notifications.
5212 		 */
5213 		binder_inner_proc_lock(ref->proc);
5214 		if (!ref->death) {
5215 			binder_inner_proc_unlock(ref->proc);
5216 			continue;
5217 		}
5218 
5219 		death++;
5220 
5221 		BUG_ON(!list_empty(&ref->death->work.entry));
5222 		ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5223 		binder_enqueue_work_ilocked(&ref->death->work,
5224 					    &ref->proc->todo);
5225 		binder_wakeup_proc_ilocked(ref->proc);
5226 		binder_inner_proc_unlock(ref->proc);
5227 	}
5228 
5229 	binder_debug(BINDER_DEBUG_DEAD_BINDER,
5230 		     "node %d now dead, refs %d, death %d\n",
5231 		     node->debug_id, refs, death);
5232 	binder_node_unlock(node);
5233 	binder_put_node(node);
5234 
5235 	return refs;
5236 }
5237 
5238 static void binder_deferred_release(struct binder_proc *proc)
5239 {
5240 	struct binder_context *context = proc->context;
5241 	struct rb_node *n;
5242 	int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5243 
5244 	mutex_lock(&binder_procs_lock);
5245 	hlist_del(&proc->proc_node);
5246 	mutex_unlock(&binder_procs_lock);
5247 
5248 	mutex_lock(&context->context_mgr_node_lock);
5249 	if (context->binder_context_mgr_node &&
5250 	    context->binder_context_mgr_node->proc == proc) {
5251 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
5252 			     "%s: %d context_mgr_node gone\n",
5253 			     __func__, proc->pid);
5254 		context->binder_context_mgr_node = NULL;
5255 	}
5256 	mutex_unlock(&context->context_mgr_node_lock);
5257 	binder_inner_proc_lock(proc);
5258 	/*
5259 	 * Make sure proc stays alive after we
5260 	 * remove all the threads
5261 	 */
5262 	proc->tmp_ref++;
5263 
5264 	proc->is_dead = true;
5265 	proc->is_frozen = false;
5266 	proc->sync_recv = false;
5267 	proc->async_recv = false;
5268 	threads = 0;
5269 	active_transactions = 0;
5270 	while ((n = rb_first(&proc->threads))) {
5271 		struct binder_thread *thread;
5272 
5273 		thread = rb_entry(n, struct binder_thread, rb_node);
5274 		binder_inner_proc_unlock(proc);
5275 		threads++;
5276 		active_transactions += binder_thread_release(proc, thread);
5277 		binder_inner_proc_lock(proc);
5278 	}
5279 
5280 	nodes = 0;
5281 	incoming_refs = 0;
5282 	while ((n = rb_first(&proc->nodes))) {
5283 		struct binder_node *node;
5284 
5285 		node = rb_entry(n, struct binder_node, rb_node);
5286 		nodes++;
5287 		/*
5288 		 * take a temporary ref on the node before
5289 		 * calling binder_node_release() which will either
5290 		 * kfree() the node or call binder_put_node()
5291 		 */
5292 		binder_inc_node_tmpref_ilocked(node);
5293 		rb_erase(&node->rb_node, &proc->nodes);
5294 		binder_inner_proc_unlock(proc);
5295 		incoming_refs = binder_node_release(node, incoming_refs);
5296 		binder_inner_proc_lock(proc);
5297 	}
5298 	binder_inner_proc_unlock(proc);
5299 
5300 	outgoing_refs = 0;
5301 	binder_proc_lock(proc);
5302 	while ((n = rb_first(&proc->refs_by_desc))) {
5303 		struct binder_ref *ref;
5304 
5305 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
5306 		outgoing_refs++;
5307 		binder_cleanup_ref_olocked(ref);
5308 		binder_proc_unlock(proc);
5309 		binder_free_ref(ref);
5310 		binder_proc_lock(proc);
5311 	}
5312 	binder_proc_unlock(proc);
5313 
5314 	binder_release_work(proc, &proc->todo);
5315 	binder_release_work(proc, &proc->delivered_death);
5316 
5317 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5318 		     "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5319 		     __func__, proc->pid, threads, nodes, incoming_refs,
5320 		     outgoing_refs, active_transactions);
5321 
5322 	binder_proc_dec_tmpref(proc);
5323 }
5324 
5325 static void binder_deferred_func(struct work_struct *work)
5326 {
5327 	struct binder_proc *proc;
5328 
5329 	int defer;
5330 
5331 	do {
5332 		mutex_lock(&binder_deferred_lock);
5333 		if (!hlist_empty(&binder_deferred_list)) {
5334 			proc = hlist_entry(binder_deferred_list.first,
5335 					struct binder_proc, deferred_work_node);
5336 			hlist_del_init(&proc->deferred_work_node);
5337 			defer = proc->deferred_work;
5338 			proc->deferred_work = 0;
5339 		} else {
5340 			proc = NULL;
5341 			defer = 0;
5342 		}
5343 		mutex_unlock(&binder_deferred_lock);
5344 
5345 		if (defer & BINDER_DEFERRED_FLUSH)
5346 			binder_deferred_flush(proc);
5347 
5348 		if (defer & BINDER_DEFERRED_RELEASE)
5349 			binder_deferred_release(proc); /* frees proc */
5350 	} while (proc);
5351 }
5352 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5353 
5354 static void
5355 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5356 {
5357 	mutex_lock(&binder_deferred_lock);
5358 	proc->deferred_work |= defer;
5359 	if (hlist_unhashed(&proc->deferred_work_node)) {
5360 		hlist_add_head(&proc->deferred_work_node,
5361 				&binder_deferred_list);
5362 		schedule_work(&binder_deferred_work);
5363 	}
5364 	mutex_unlock(&binder_deferred_lock);
5365 }
5366 
5367 static void print_binder_transaction_ilocked(struct seq_file *m,
5368 					     struct binder_proc *proc,
5369 					     const char *prefix,
5370 					     struct binder_transaction *t)
5371 {
5372 	struct binder_proc *to_proc;
5373 	struct binder_buffer *buffer = t->buffer;
5374 
5375 	spin_lock(&t->lock);
5376 	to_proc = t->to_proc;
5377 	seq_printf(m,
5378 		   "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5379 		   prefix, t->debug_id, t,
5380 		   t->from ? t->from->proc->pid : 0,
5381 		   t->from ? t->from->pid : 0,
5382 		   to_proc ? to_proc->pid : 0,
5383 		   t->to_thread ? t->to_thread->pid : 0,
5384 		   t->code, t->flags, t->priority, t->need_reply);
5385 	spin_unlock(&t->lock);
5386 
5387 	if (proc != to_proc) {
5388 		/*
5389 		 * Can only safely deref buffer if we are holding the
5390 		 * correct proc inner lock for this node
5391 		 */
5392 		seq_puts(m, "\n");
5393 		return;
5394 	}
5395 
5396 	if (buffer == NULL) {
5397 		seq_puts(m, " buffer free\n");
5398 		return;
5399 	}
5400 	if (buffer->target_node)
5401 		seq_printf(m, " node %d", buffer->target_node->debug_id);
5402 	seq_printf(m, " size %zd:%zd data %pK\n",
5403 		   buffer->data_size, buffer->offsets_size,
5404 		   buffer->user_data);
5405 }
5406 
5407 static void print_binder_work_ilocked(struct seq_file *m,
5408 				     struct binder_proc *proc,
5409 				     const char *prefix,
5410 				     const char *transaction_prefix,
5411 				     struct binder_work *w)
5412 {
5413 	struct binder_node *node;
5414 	struct binder_transaction *t;
5415 
5416 	switch (w->type) {
5417 	case BINDER_WORK_TRANSACTION:
5418 		t = container_of(w, struct binder_transaction, work);
5419 		print_binder_transaction_ilocked(
5420 				m, proc, transaction_prefix, t);
5421 		break;
5422 	case BINDER_WORK_RETURN_ERROR: {
5423 		struct binder_error *e = container_of(
5424 				w, struct binder_error, work);
5425 
5426 		seq_printf(m, "%stransaction error: %u\n",
5427 			   prefix, e->cmd);
5428 	} break;
5429 	case BINDER_WORK_TRANSACTION_COMPLETE:
5430 		seq_printf(m, "%stransaction complete\n", prefix);
5431 		break;
5432 	case BINDER_WORK_NODE:
5433 		node = container_of(w, struct binder_node, work);
5434 		seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5435 			   prefix, node->debug_id,
5436 			   (u64)node->ptr, (u64)node->cookie);
5437 		break;
5438 	case BINDER_WORK_DEAD_BINDER:
5439 		seq_printf(m, "%shas dead binder\n", prefix);
5440 		break;
5441 	case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5442 		seq_printf(m, "%shas cleared dead binder\n", prefix);
5443 		break;
5444 	case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5445 		seq_printf(m, "%shas cleared death notification\n", prefix);
5446 		break;
5447 	default:
5448 		seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5449 		break;
5450 	}
5451 }
5452 
5453 static void print_binder_thread_ilocked(struct seq_file *m,
5454 					struct binder_thread *thread,
5455 					int print_always)
5456 {
5457 	struct binder_transaction *t;
5458 	struct binder_work *w;
5459 	size_t start_pos = m->count;
5460 	size_t header_pos;
5461 
5462 	seq_printf(m, "  thread %d: l %02x need_return %d tr %d\n",
5463 			thread->pid, thread->looper,
5464 			thread->looper_need_return,
5465 			atomic_read(&thread->tmp_ref));
5466 	header_pos = m->count;
5467 	t = thread->transaction_stack;
5468 	while (t) {
5469 		if (t->from == thread) {
5470 			print_binder_transaction_ilocked(m, thread->proc,
5471 					"    outgoing transaction", t);
5472 			t = t->from_parent;
5473 		} else if (t->to_thread == thread) {
5474 			print_binder_transaction_ilocked(m, thread->proc,
5475 						 "    incoming transaction", t);
5476 			t = t->to_parent;
5477 		} else {
5478 			print_binder_transaction_ilocked(m, thread->proc,
5479 					"    bad transaction", t);
5480 			t = NULL;
5481 		}
5482 	}
5483 	list_for_each_entry(w, &thread->todo, entry) {
5484 		print_binder_work_ilocked(m, thread->proc, "    ",
5485 					  "    pending transaction", w);
5486 	}
5487 	if (!print_always && m->count == header_pos)
5488 		m->count = start_pos;
5489 }
5490 
5491 static void print_binder_node_nilocked(struct seq_file *m,
5492 				       struct binder_node *node)
5493 {
5494 	struct binder_ref *ref;
5495 	struct binder_work *w;
5496 	int count;
5497 
5498 	count = 0;
5499 	hlist_for_each_entry(ref, &node->refs, node_entry)
5500 		count++;
5501 
5502 	seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5503 		   node->debug_id, (u64)node->ptr, (u64)node->cookie,
5504 		   node->has_strong_ref, node->has_weak_ref,
5505 		   node->local_strong_refs, node->local_weak_refs,
5506 		   node->internal_strong_refs, count, node->tmp_refs);
5507 	if (count) {
5508 		seq_puts(m, " proc");
5509 		hlist_for_each_entry(ref, &node->refs, node_entry)
5510 			seq_printf(m, " %d", ref->proc->pid);
5511 	}
5512 	seq_puts(m, "\n");
5513 	if (node->proc) {
5514 		list_for_each_entry(w, &node->async_todo, entry)
5515 			print_binder_work_ilocked(m, node->proc, "    ",
5516 					  "    pending async transaction", w);
5517 	}
5518 }
5519 
5520 static void print_binder_ref_olocked(struct seq_file *m,
5521 				     struct binder_ref *ref)
5522 {
5523 	binder_node_lock(ref->node);
5524 	seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %pK\n",
5525 		   ref->data.debug_id, ref->data.desc,
5526 		   ref->node->proc ? "" : "dead ",
5527 		   ref->node->debug_id, ref->data.strong,
5528 		   ref->data.weak, ref->death);
5529 	binder_node_unlock(ref->node);
5530 }
5531 
5532 static void print_binder_proc(struct seq_file *m,
5533 			      struct binder_proc *proc, int print_all)
5534 {
5535 	struct binder_work *w;
5536 	struct rb_node *n;
5537 	size_t start_pos = m->count;
5538 	size_t header_pos;
5539 	struct binder_node *last_node = NULL;
5540 
5541 	seq_printf(m, "proc %d\n", proc->pid);
5542 	seq_printf(m, "context %s\n", proc->context->name);
5543 	header_pos = m->count;
5544 
5545 	binder_inner_proc_lock(proc);
5546 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5547 		print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5548 						rb_node), print_all);
5549 
5550 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5551 		struct binder_node *node = rb_entry(n, struct binder_node,
5552 						    rb_node);
5553 		if (!print_all && !node->has_async_transaction)
5554 			continue;
5555 
5556 		/*
5557 		 * take a temporary reference on the node so it
5558 		 * survives and isn't removed from the tree
5559 		 * while we print it.
5560 		 */
5561 		binder_inc_node_tmpref_ilocked(node);
5562 		/* Need to drop inner lock to take node lock */
5563 		binder_inner_proc_unlock(proc);
5564 		if (last_node)
5565 			binder_put_node(last_node);
5566 		binder_node_inner_lock(node);
5567 		print_binder_node_nilocked(m, node);
5568 		binder_node_inner_unlock(node);
5569 		last_node = node;
5570 		binder_inner_proc_lock(proc);
5571 	}
5572 	binder_inner_proc_unlock(proc);
5573 	if (last_node)
5574 		binder_put_node(last_node);
5575 
5576 	if (print_all) {
5577 		binder_proc_lock(proc);
5578 		for (n = rb_first(&proc->refs_by_desc);
5579 		     n != NULL;
5580 		     n = rb_next(n))
5581 			print_binder_ref_olocked(m, rb_entry(n,
5582 							    struct binder_ref,
5583 							    rb_node_desc));
5584 		binder_proc_unlock(proc);
5585 	}
5586 	binder_alloc_print_allocated(m, &proc->alloc);
5587 	binder_inner_proc_lock(proc);
5588 	list_for_each_entry(w, &proc->todo, entry)
5589 		print_binder_work_ilocked(m, proc, "  ",
5590 					  "  pending transaction", w);
5591 	list_for_each_entry(w, &proc->delivered_death, entry) {
5592 		seq_puts(m, "  has delivered dead binder\n");
5593 		break;
5594 	}
5595 	binder_inner_proc_unlock(proc);
5596 	if (!print_all && m->count == header_pos)
5597 		m->count = start_pos;
5598 }
5599 
5600 static const char * const binder_return_strings[] = {
5601 	"BR_ERROR",
5602 	"BR_OK",
5603 	"BR_TRANSACTION",
5604 	"BR_REPLY",
5605 	"BR_ACQUIRE_RESULT",
5606 	"BR_DEAD_REPLY",
5607 	"BR_TRANSACTION_COMPLETE",
5608 	"BR_INCREFS",
5609 	"BR_ACQUIRE",
5610 	"BR_RELEASE",
5611 	"BR_DECREFS",
5612 	"BR_ATTEMPT_ACQUIRE",
5613 	"BR_NOOP",
5614 	"BR_SPAWN_LOOPER",
5615 	"BR_FINISHED",
5616 	"BR_DEAD_BINDER",
5617 	"BR_CLEAR_DEATH_NOTIFICATION_DONE",
5618 	"BR_FAILED_REPLY",
5619 	"BR_FROZEN_REPLY",
5620 	"BR_ONEWAY_SPAM_SUSPECT",
5621 };
5622 
5623 static const char * const binder_command_strings[] = {
5624 	"BC_TRANSACTION",
5625 	"BC_REPLY",
5626 	"BC_ACQUIRE_RESULT",
5627 	"BC_FREE_BUFFER",
5628 	"BC_INCREFS",
5629 	"BC_ACQUIRE",
5630 	"BC_RELEASE",
5631 	"BC_DECREFS",
5632 	"BC_INCREFS_DONE",
5633 	"BC_ACQUIRE_DONE",
5634 	"BC_ATTEMPT_ACQUIRE",
5635 	"BC_REGISTER_LOOPER",
5636 	"BC_ENTER_LOOPER",
5637 	"BC_EXIT_LOOPER",
5638 	"BC_REQUEST_DEATH_NOTIFICATION",
5639 	"BC_CLEAR_DEATH_NOTIFICATION",
5640 	"BC_DEAD_BINDER_DONE",
5641 	"BC_TRANSACTION_SG",
5642 	"BC_REPLY_SG",
5643 };
5644 
5645 static const char * const binder_objstat_strings[] = {
5646 	"proc",
5647 	"thread",
5648 	"node",
5649 	"ref",
5650 	"death",
5651 	"transaction",
5652 	"transaction_complete"
5653 };
5654 
5655 static void print_binder_stats(struct seq_file *m, const char *prefix,
5656 			       struct binder_stats *stats)
5657 {
5658 	int i;
5659 
5660 	BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5661 		     ARRAY_SIZE(binder_command_strings));
5662 	for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5663 		int temp = atomic_read(&stats->bc[i]);
5664 
5665 		if (temp)
5666 			seq_printf(m, "%s%s: %d\n", prefix,
5667 				   binder_command_strings[i], temp);
5668 	}
5669 
5670 	BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5671 		     ARRAY_SIZE(binder_return_strings));
5672 	for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5673 		int temp = atomic_read(&stats->br[i]);
5674 
5675 		if (temp)
5676 			seq_printf(m, "%s%s: %d\n", prefix,
5677 				   binder_return_strings[i], temp);
5678 	}
5679 
5680 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5681 		     ARRAY_SIZE(binder_objstat_strings));
5682 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5683 		     ARRAY_SIZE(stats->obj_deleted));
5684 	for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
5685 		int created = atomic_read(&stats->obj_created[i]);
5686 		int deleted = atomic_read(&stats->obj_deleted[i]);
5687 
5688 		if (created || deleted)
5689 			seq_printf(m, "%s%s: active %d total %d\n",
5690 				prefix,
5691 				binder_objstat_strings[i],
5692 				created - deleted,
5693 				created);
5694 	}
5695 }
5696 
5697 static void print_binder_proc_stats(struct seq_file *m,
5698 				    struct binder_proc *proc)
5699 {
5700 	struct binder_work *w;
5701 	struct binder_thread *thread;
5702 	struct rb_node *n;
5703 	int count, strong, weak, ready_threads;
5704 	size_t free_async_space =
5705 		binder_alloc_get_free_async_space(&proc->alloc);
5706 
5707 	seq_printf(m, "proc %d\n", proc->pid);
5708 	seq_printf(m, "context %s\n", proc->context->name);
5709 	count = 0;
5710 	ready_threads = 0;
5711 	binder_inner_proc_lock(proc);
5712 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5713 		count++;
5714 
5715 	list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5716 		ready_threads++;
5717 
5718 	seq_printf(m, "  threads: %d\n", count);
5719 	seq_printf(m, "  requested threads: %d+%d/%d\n"
5720 			"  ready threads %d\n"
5721 			"  free async space %zd\n", proc->requested_threads,
5722 			proc->requested_threads_started, proc->max_threads,
5723 			ready_threads,
5724 			free_async_space);
5725 	count = 0;
5726 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5727 		count++;
5728 	binder_inner_proc_unlock(proc);
5729 	seq_printf(m, "  nodes: %d\n", count);
5730 	count = 0;
5731 	strong = 0;
5732 	weak = 0;
5733 	binder_proc_lock(proc);
5734 	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5735 		struct binder_ref *ref = rb_entry(n, struct binder_ref,
5736 						  rb_node_desc);
5737 		count++;
5738 		strong += ref->data.strong;
5739 		weak += ref->data.weak;
5740 	}
5741 	binder_proc_unlock(proc);
5742 	seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
5743 
5744 	count = binder_alloc_get_allocated_count(&proc->alloc);
5745 	seq_printf(m, "  buffers: %d\n", count);
5746 
5747 	binder_alloc_print_pages(m, &proc->alloc);
5748 
5749 	count = 0;
5750 	binder_inner_proc_lock(proc);
5751 	list_for_each_entry(w, &proc->todo, entry) {
5752 		if (w->type == BINDER_WORK_TRANSACTION)
5753 			count++;
5754 	}
5755 	binder_inner_proc_unlock(proc);
5756 	seq_printf(m, "  pending transactions: %d\n", count);
5757 
5758 	print_binder_stats(m, "  ", &proc->stats);
5759 }
5760 
5761 
5762 int binder_state_show(struct seq_file *m, void *unused)
5763 {
5764 	struct binder_proc *proc;
5765 	struct binder_node *node;
5766 	struct binder_node *last_node = NULL;
5767 
5768 	seq_puts(m, "binder state:\n");
5769 
5770 	spin_lock(&binder_dead_nodes_lock);
5771 	if (!hlist_empty(&binder_dead_nodes))
5772 		seq_puts(m, "dead nodes:\n");
5773 	hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5774 		/*
5775 		 * take a temporary reference on the node so it
5776 		 * survives and isn't removed from the list
5777 		 * while we print it.
5778 		 */
5779 		node->tmp_refs++;
5780 		spin_unlock(&binder_dead_nodes_lock);
5781 		if (last_node)
5782 			binder_put_node(last_node);
5783 		binder_node_lock(node);
5784 		print_binder_node_nilocked(m, node);
5785 		binder_node_unlock(node);
5786 		last_node = node;
5787 		spin_lock(&binder_dead_nodes_lock);
5788 	}
5789 	spin_unlock(&binder_dead_nodes_lock);
5790 	if (last_node)
5791 		binder_put_node(last_node);
5792 
5793 	mutex_lock(&binder_procs_lock);
5794 	hlist_for_each_entry(proc, &binder_procs, proc_node)
5795 		print_binder_proc(m, proc, 1);
5796 	mutex_unlock(&binder_procs_lock);
5797 
5798 	return 0;
5799 }
5800 
5801 int binder_stats_show(struct seq_file *m, void *unused)
5802 {
5803 	struct binder_proc *proc;
5804 
5805 	seq_puts(m, "binder stats:\n");
5806 
5807 	print_binder_stats(m, "", &binder_stats);
5808 
5809 	mutex_lock(&binder_procs_lock);
5810 	hlist_for_each_entry(proc, &binder_procs, proc_node)
5811 		print_binder_proc_stats(m, proc);
5812 	mutex_unlock(&binder_procs_lock);
5813 
5814 	return 0;
5815 }
5816 
5817 int binder_transactions_show(struct seq_file *m, void *unused)
5818 {
5819 	struct binder_proc *proc;
5820 
5821 	seq_puts(m, "binder transactions:\n");
5822 	mutex_lock(&binder_procs_lock);
5823 	hlist_for_each_entry(proc, &binder_procs, proc_node)
5824 		print_binder_proc(m, proc, 0);
5825 	mutex_unlock(&binder_procs_lock);
5826 
5827 	return 0;
5828 }
5829 
5830 static int proc_show(struct seq_file *m, void *unused)
5831 {
5832 	struct binder_proc *itr;
5833 	int pid = (unsigned long)m->private;
5834 
5835 	mutex_lock(&binder_procs_lock);
5836 	hlist_for_each_entry(itr, &binder_procs, proc_node) {
5837 		if (itr->pid == pid) {
5838 			seq_puts(m, "binder proc state:\n");
5839 			print_binder_proc(m, itr, 1);
5840 		}
5841 	}
5842 	mutex_unlock(&binder_procs_lock);
5843 
5844 	return 0;
5845 }
5846 
5847 static void print_binder_transaction_log_entry(struct seq_file *m,
5848 					struct binder_transaction_log_entry *e)
5849 {
5850 	int debug_id = READ_ONCE(e->debug_id_done);
5851 	/*
5852 	 * read barrier to guarantee debug_id_done read before
5853 	 * we print the log values
5854 	 */
5855 	smp_rmb();
5856 	seq_printf(m,
5857 		   "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5858 		   e->debug_id, (e->call_type == 2) ? "reply" :
5859 		   ((e->call_type == 1) ? "async" : "call "), e->from_proc,
5860 		   e->from_thread, e->to_proc, e->to_thread, e->context_name,
5861 		   e->to_node, e->target_handle, e->data_size, e->offsets_size,
5862 		   e->return_error, e->return_error_param,
5863 		   e->return_error_line);
5864 	/*
5865 	 * read-barrier to guarantee read of debug_id_done after
5866 	 * done printing the fields of the entry
5867 	 */
5868 	smp_rmb();
5869 	seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5870 			"\n" : " (incomplete)\n");
5871 }
5872 
5873 int binder_transaction_log_show(struct seq_file *m, void *unused)
5874 {
5875 	struct binder_transaction_log *log = m->private;
5876 	unsigned int log_cur = atomic_read(&log->cur);
5877 	unsigned int count;
5878 	unsigned int cur;
5879 	int i;
5880 
5881 	count = log_cur + 1;
5882 	cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5883 		0 : count % ARRAY_SIZE(log->entry);
5884 	if (count > ARRAY_SIZE(log->entry) || log->full)
5885 		count = ARRAY_SIZE(log->entry);
5886 	for (i = 0; i < count; i++) {
5887 		unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5888 
5889 		print_binder_transaction_log_entry(m, &log->entry[index]);
5890 	}
5891 	return 0;
5892 }
5893 
5894 const struct file_operations binder_fops = {
5895 	.owner = THIS_MODULE,
5896 	.poll = binder_poll,
5897 	.unlocked_ioctl = binder_ioctl,
5898 	.compat_ioctl = compat_ptr_ioctl,
5899 	.mmap = binder_mmap,
5900 	.open = binder_open,
5901 	.flush = binder_flush,
5902 	.release = binder_release,
5903 };
5904 
5905 static int __init init_binder_device(const char *name)
5906 {
5907 	int ret;
5908 	struct binder_device *binder_device;
5909 
5910 	binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
5911 	if (!binder_device)
5912 		return -ENOMEM;
5913 
5914 	binder_device->miscdev.fops = &binder_fops;
5915 	binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
5916 	binder_device->miscdev.name = name;
5917 
5918 	refcount_set(&binder_device->ref, 1);
5919 	binder_device->context.binder_context_mgr_uid = INVALID_UID;
5920 	binder_device->context.name = name;
5921 	mutex_init(&binder_device->context.context_mgr_node_lock);
5922 
5923 	ret = misc_register(&binder_device->miscdev);
5924 	if (ret < 0) {
5925 		kfree(binder_device);
5926 		return ret;
5927 	}
5928 
5929 	hlist_add_head(&binder_device->hlist, &binder_devices);
5930 
5931 	return ret;
5932 }
5933 
5934 static int __init binder_init(void)
5935 {
5936 	int ret;
5937 	char *device_name, *device_tmp;
5938 	struct binder_device *device;
5939 	struct hlist_node *tmp;
5940 	char *device_names = NULL;
5941 
5942 	ret = binder_alloc_shrinker_init();
5943 	if (ret)
5944 		return ret;
5945 
5946 	atomic_set(&binder_transaction_log.cur, ~0U);
5947 	atomic_set(&binder_transaction_log_failed.cur, ~0U);
5948 
5949 	binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
5950 	if (binder_debugfs_dir_entry_root)
5951 		binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
5952 						 binder_debugfs_dir_entry_root);
5953 
5954 	if (binder_debugfs_dir_entry_root) {
5955 		debugfs_create_file("state",
5956 				    0444,
5957 				    binder_debugfs_dir_entry_root,
5958 				    NULL,
5959 				    &binder_state_fops);
5960 		debugfs_create_file("stats",
5961 				    0444,
5962 				    binder_debugfs_dir_entry_root,
5963 				    NULL,
5964 				    &binder_stats_fops);
5965 		debugfs_create_file("transactions",
5966 				    0444,
5967 				    binder_debugfs_dir_entry_root,
5968 				    NULL,
5969 				    &binder_transactions_fops);
5970 		debugfs_create_file("transaction_log",
5971 				    0444,
5972 				    binder_debugfs_dir_entry_root,
5973 				    &binder_transaction_log,
5974 				    &binder_transaction_log_fops);
5975 		debugfs_create_file("failed_transaction_log",
5976 				    0444,
5977 				    binder_debugfs_dir_entry_root,
5978 				    &binder_transaction_log_failed,
5979 				    &binder_transaction_log_fops);
5980 	}
5981 
5982 	if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
5983 	    strcmp(binder_devices_param, "") != 0) {
5984 		/*
5985 		* Copy the module_parameter string, because we don't want to
5986 		* tokenize it in-place.
5987 		 */
5988 		device_names = kstrdup(binder_devices_param, GFP_KERNEL);
5989 		if (!device_names) {
5990 			ret = -ENOMEM;
5991 			goto err_alloc_device_names_failed;
5992 		}
5993 
5994 		device_tmp = device_names;
5995 		while ((device_name = strsep(&device_tmp, ","))) {
5996 			ret = init_binder_device(device_name);
5997 			if (ret)
5998 				goto err_init_binder_device_failed;
5999 		}
6000 	}
6001 
6002 	ret = init_binderfs();
6003 	if (ret)
6004 		goto err_init_binder_device_failed;
6005 
6006 	return ret;
6007 
6008 err_init_binder_device_failed:
6009 	hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6010 		misc_deregister(&device->miscdev);
6011 		hlist_del(&device->hlist);
6012 		kfree(device);
6013 	}
6014 
6015 	kfree(device_names);
6016 
6017 err_alloc_device_names_failed:
6018 	debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6019 
6020 	return ret;
6021 }
6022 
6023 device_initcall(binder_init);
6024 
6025 #define CREATE_TRACE_POINTS
6026 #include "binder_trace.h"
6027 
6028 MODULE_LICENSE("GPL v2");
6029