xref: /openbmc/linux/drivers/android/binder.c (revision d003c346bf75f01d240c80000baf2fbf28e53782)
1 /* binder.c
2  *
3  * Android IPC Subsystem
4  *
5  * Copyright (C) 2007-2008 Google, Inc.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17 
18 /*
19  * Locking overview
20  *
21  * There are 3 main spinlocks which must be acquired in the
22  * order shown:
23  *
24  * 1) proc->outer_lock : protects binder_ref
25  *    binder_proc_lock() and binder_proc_unlock() are
26  *    used to acq/rel.
27  * 2) node->lock : protects most fields of binder_node.
28  *    binder_node_lock() and binder_node_unlock() are
29  *    used to acq/rel
30  * 3) proc->inner_lock : protects the thread and node lists
31  *    (proc->threads, proc->waiting_threads, proc->nodes)
32  *    and all todo lists associated with the binder_proc
33  *    (proc->todo, thread->todo, proc->delivered_death and
34  *    node->async_todo), as well as thread->transaction_stack
35  *    binder_inner_proc_lock() and binder_inner_proc_unlock()
36  *    are used to acq/rel
37  *
38  * Any lock under procA must never be nested under any lock at the same
39  * level or below on procB.
40  *
41  * Functions that require a lock held on entry indicate which lock
42  * in the suffix of the function name:
43  *
44  * foo_olocked() : requires node->outer_lock
45  * foo_nlocked() : requires node->lock
46  * foo_ilocked() : requires proc->inner_lock
47  * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
48  * foo_nilocked(): requires node->lock and proc->inner_lock
49  * ...
50  */
51 
52 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
53 
54 #include <linux/fdtable.h>
55 #include <linux/file.h>
56 #include <linux/freezer.h>
57 #include <linux/fs.h>
58 #include <linux/list.h>
59 #include <linux/miscdevice.h>
60 #include <linux/module.h>
61 #include <linux/mutex.h>
62 #include <linux/nsproxy.h>
63 #include <linux/poll.h>
64 #include <linux/debugfs.h>
65 #include <linux/rbtree.h>
66 #include <linux/sched/signal.h>
67 #include <linux/sched/mm.h>
68 #include <linux/seq_file.h>
69 #include <linux/uaccess.h>
70 #include <linux/pid_namespace.h>
71 #include <linux/security.h>
72 #include <linux/spinlock.h>
73 #include <linux/ratelimit.h>
74 #include <linux/syscalls.h>
75 
76 #include <uapi/linux/android/binder.h>
77 
78 #include <asm/cacheflush.h>
79 
80 #include "binder_alloc.h"
81 #include "binder_trace.h"
82 
83 static HLIST_HEAD(binder_deferred_list);
84 static DEFINE_MUTEX(binder_deferred_lock);
85 
86 static HLIST_HEAD(binder_devices);
87 static HLIST_HEAD(binder_procs);
88 static DEFINE_MUTEX(binder_procs_lock);
89 
90 static HLIST_HEAD(binder_dead_nodes);
91 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
92 
93 static struct dentry *binder_debugfs_dir_entry_root;
94 static struct dentry *binder_debugfs_dir_entry_proc;
95 static atomic_t binder_last_id;
96 
97 static int proc_show(struct seq_file *m, void *unused);
98 DEFINE_SHOW_ATTRIBUTE(proc);
99 
100 /* This is only defined in include/asm-arm/sizes.h */
101 #ifndef SZ_1K
102 #define SZ_1K                               0x400
103 #endif
104 
105 #ifndef SZ_4M
106 #define SZ_4M                               0x400000
107 #endif
108 
109 #define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
110 
111 enum {
112 	BINDER_DEBUG_USER_ERROR             = 1U << 0,
113 	BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
114 	BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
115 	BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
116 	BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
117 	BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
118 	BINDER_DEBUG_READ_WRITE             = 1U << 6,
119 	BINDER_DEBUG_USER_REFS              = 1U << 7,
120 	BINDER_DEBUG_THREADS                = 1U << 8,
121 	BINDER_DEBUG_TRANSACTION            = 1U << 9,
122 	BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
123 	BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
124 	BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
125 	BINDER_DEBUG_PRIORITY_CAP           = 1U << 13,
126 	BINDER_DEBUG_SPINLOCKS              = 1U << 14,
127 };
128 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
129 	BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
130 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
131 
132 static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
133 module_param_named(devices, binder_devices_param, charp, 0444);
134 
135 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
136 static int binder_stop_on_user_error;
137 
138 static int binder_set_stop_on_user_error(const char *val,
139 					 const struct kernel_param *kp)
140 {
141 	int ret;
142 
143 	ret = param_set_int(val, kp);
144 	if (binder_stop_on_user_error < 2)
145 		wake_up(&binder_user_error_wait);
146 	return ret;
147 }
148 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
149 	param_get_int, &binder_stop_on_user_error, 0644);
150 
151 #define binder_debug(mask, x...) \
152 	do { \
153 		if (binder_debug_mask & mask) \
154 			pr_info_ratelimited(x); \
155 	} while (0)
156 
157 #define binder_user_error(x...) \
158 	do { \
159 		if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
160 			pr_info_ratelimited(x); \
161 		if (binder_stop_on_user_error) \
162 			binder_stop_on_user_error = 2; \
163 	} while (0)
164 
165 #define to_flat_binder_object(hdr) \
166 	container_of(hdr, struct flat_binder_object, hdr)
167 
168 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
169 
170 #define to_binder_buffer_object(hdr) \
171 	container_of(hdr, struct binder_buffer_object, hdr)
172 
173 #define to_binder_fd_array_object(hdr) \
174 	container_of(hdr, struct binder_fd_array_object, hdr)
175 
176 enum binder_stat_types {
177 	BINDER_STAT_PROC,
178 	BINDER_STAT_THREAD,
179 	BINDER_STAT_NODE,
180 	BINDER_STAT_REF,
181 	BINDER_STAT_DEATH,
182 	BINDER_STAT_TRANSACTION,
183 	BINDER_STAT_TRANSACTION_COMPLETE,
184 	BINDER_STAT_COUNT
185 };
186 
187 struct binder_stats {
188 	atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
189 	atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
190 	atomic_t obj_created[BINDER_STAT_COUNT];
191 	atomic_t obj_deleted[BINDER_STAT_COUNT];
192 };
193 
194 static struct binder_stats binder_stats;
195 
196 static inline void binder_stats_deleted(enum binder_stat_types type)
197 {
198 	atomic_inc(&binder_stats.obj_deleted[type]);
199 }
200 
201 static inline void binder_stats_created(enum binder_stat_types type)
202 {
203 	atomic_inc(&binder_stats.obj_created[type]);
204 }
205 
206 struct binder_transaction_log_entry {
207 	int debug_id;
208 	int debug_id_done;
209 	int call_type;
210 	int from_proc;
211 	int from_thread;
212 	int target_handle;
213 	int to_proc;
214 	int to_thread;
215 	int to_node;
216 	int data_size;
217 	int offsets_size;
218 	int return_error_line;
219 	uint32_t return_error;
220 	uint32_t return_error_param;
221 	const char *context_name;
222 };
223 struct binder_transaction_log {
224 	atomic_t cur;
225 	bool full;
226 	struct binder_transaction_log_entry entry[32];
227 };
228 static struct binder_transaction_log binder_transaction_log;
229 static struct binder_transaction_log binder_transaction_log_failed;
230 
231 static struct binder_transaction_log_entry *binder_transaction_log_add(
232 	struct binder_transaction_log *log)
233 {
234 	struct binder_transaction_log_entry *e;
235 	unsigned int cur = atomic_inc_return(&log->cur);
236 
237 	if (cur >= ARRAY_SIZE(log->entry))
238 		log->full = true;
239 	e = &log->entry[cur % ARRAY_SIZE(log->entry)];
240 	WRITE_ONCE(e->debug_id_done, 0);
241 	/*
242 	 * write-barrier to synchronize access to e->debug_id_done.
243 	 * We make sure the initialized 0 value is seen before
244 	 * memset() other fields are zeroed by memset.
245 	 */
246 	smp_wmb();
247 	memset(e, 0, sizeof(*e));
248 	return e;
249 }
250 
251 struct binder_context {
252 	struct binder_node *binder_context_mgr_node;
253 	struct mutex context_mgr_node_lock;
254 
255 	kuid_t binder_context_mgr_uid;
256 	const char *name;
257 };
258 
259 struct binder_device {
260 	struct hlist_node hlist;
261 	struct miscdevice miscdev;
262 	struct binder_context context;
263 };
264 
265 /**
266  * struct binder_work - work enqueued on a worklist
267  * @entry:             node enqueued on list
268  * @type:              type of work to be performed
269  *
270  * There are separate work lists for proc, thread, and node (async).
271  */
272 struct binder_work {
273 	struct list_head entry;
274 
275 	enum {
276 		BINDER_WORK_TRANSACTION = 1,
277 		BINDER_WORK_TRANSACTION_COMPLETE,
278 		BINDER_WORK_RETURN_ERROR,
279 		BINDER_WORK_NODE,
280 		BINDER_WORK_DEAD_BINDER,
281 		BINDER_WORK_DEAD_BINDER_AND_CLEAR,
282 		BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
283 	} type;
284 };
285 
286 struct binder_error {
287 	struct binder_work work;
288 	uint32_t cmd;
289 };
290 
291 /**
292  * struct binder_node - binder node bookkeeping
293  * @debug_id:             unique ID for debugging
294  *                        (invariant after initialized)
295  * @lock:                 lock for node fields
296  * @work:                 worklist element for node work
297  *                        (protected by @proc->inner_lock)
298  * @rb_node:              element for proc->nodes tree
299  *                        (protected by @proc->inner_lock)
300  * @dead_node:            element for binder_dead_nodes list
301  *                        (protected by binder_dead_nodes_lock)
302  * @proc:                 binder_proc that owns this node
303  *                        (invariant after initialized)
304  * @refs:                 list of references on this node
305  *                        (protected by @lock)
306  * @internal_strong_refs: used to take strong references when
307  *                        initiating a transaction
308  *                        (protected by @proc->inner_lock if @proc
309  *                        and by @lock)
310  * @local_weak_refs:      weak user refs from local process
311  *                        (protected by @proc->inner_lock if @proc
312  *                        and by @lock)
313  * @local_strong_refs:    strong user refs from local process
314  *                        (protected by @proc->inner_lock if @proc
315  *                        and by @lock)
316  * @tmp_refs:             temporary kernel refs
317  *                        (protected by @proc->inner_lock while @proc
318  *                        is valid, and by binder_dead_nodes_lock
319  *                        if @proc is NULL. During inc/dec and node release
320  *                        it is also protected by @lock to provide safety
321  *                        as the node dies and @proc becomes NULL)
322  * @ptr:                  userspace pointer for node
323  *                        (invariant, no lock needed)
324  * @cookie:               userspace cookie for node
325  *                        (invariant, no lock needed)
326  * @has_strong_ref:       userspace notified of strong ref
327  *                        (protected by @proc->inner_lock if @proc
328  *                        and by @lock)
329  * @pending_strong_ref:   userspace has acked notification of strong ref
330  *                        (protected by @proc->inner_lock if @proc
331  *                        and by @lock)
332  * @has_weak_ref:         userspace notified of weak ref
333  *                        (protected by @proc->inner_lock if @proc
334  *                        and by @lock)
335  * @pending_weak_ref:     userspace has acked notification of weak ref
336  *                        (protected by @proc->inner_lock if @proc
337  *                        and by @lock)
338  * @has_async_transaction: async transaction to node in progress
339  *                        (protected by @lock)
340  * @accept_fds:           file descriptor operations supported for node
341  *                        (invariant after initialized)
342  * @min_priority:         minimum scheduling priority
343  *                        (invariant after initialized)
344  * @async_todo:           list of async work items
345  *                        (protected by @proc->inner_lock)
346  *
347  * Bookkeeping structure for binder nodes.
348  */
349 struct binder_node {
350 	int debug_id;
351 	spinlock_t lock;
352 	struct binder_work work;
353 	union {
354 		struct rb_node rb_node;
355 		struct hlist_node dead_node;
356 	};
357 	struct binder_proc *proc;
358 	struct hlist_head refs;
359 	int internal_strong_refs;
360 	int local_weak_refs;
361 	int local_strong_refs;
362 	int tmp_refs;
363 	binder_uintptr_t ptr;
364 	binder_uintptr_t cookie;
365 	struct {
366 		/*
367 		 * bitfield elements protected by
368 		 * proc inner_lock
369 		 */
370 		u8 has_strong_ref:1;
371 		u8 pending_strong_ref:1;
372 		u8 has_weak_ref:1;
373 		u8 pending_weak_ref:1;
374 	};
375 	struct {
376 		/*
377 		 * invariant after initialization
378 		 */
379 		u8 accept_fds:1;
380 		u8 min_priority;
381 	};
382 	bool has_async_transaction;
383 	struct list_head async_todo;
384 };
385 
386 struct binder_ref_death {
387 	/**
388 	 * @work: worklist element for death notifications
389 	 *        (protected by inner_lock of the proc that
390 	 *        this ref belongs to)
391 	 */
392 	struct binder_work work;
393 	binder_uintptr_t cookie;
394 };
395 
396 /**
397  * struct binder_ref_data - binder_ref counts and id
398  * @debug_id:        unique ID for the ref
399  * @desc:            unique userspace handle for ref
400  * @strong:          strong ref count (debugging only if not locked)
401  * @weak:            weak ref count (debugging only if not locked)
402  *
403  * Structure to hold ref count and ref id information. Since
404  * the actual ref can only be accessed with a lock, this structure
405  * is used to return information about the ref to callers of
406  * ref inc/dec functions.
407  */
408 struct binder_ref_data {
409 	int debug_id;
410 	uint32_t desc;
411 	int strong;
412 	int weak;
413 };
414 
415 /**
416  * struct binder_ref - struct to track references on nodes
417  * @data:        binder_ref_data containing id, handle, and current refcounts
418  * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
419  * @rb_node_node: node for lookup by @node in proc's rb_tree
420  * @node_entry:  list entry for node->refs list in target node
421  *               (protected by @node->lock)
422  * @proc:        binder_proc containing ref
423  * @node:        binder_node of target node. When cleaning up a
424  *               ref for deletion in binder_cleanup_ref, a non-NULL
425  *               @node indicates the node must be freed
426  * @death:       pointer to death notification (ref_death) if requested
427  *               (protected by @node->lock)
428  *
429  * Structure to track references from procA to target node (on procB). This
430  * structure is unsafe to access without holding @proc->outer_lock.
431  */
432 struct binder_ref {
433 	/* Lookups needed: */
434 	/*   node + proc => ref (transaction) */
435 	/*   desc + proc => ref (transaction, inc/dec ref) */
436 	/*   node => refs + procs (proc exit) */
437 	struct binder_ref_data data;
438 	struct rb_node rb_node_desc;
439 	struct rb_node rb_node_node;
440 	struct hlist_node node_entry;
441 	struct binder_proc *proc;
442 	struct binder_node *node;
443 	struct binder_ref_death *death;
444 };
445 
446 enum binder_deferred_state {
447 	BINDER_DEFERRED_FLUSH        = 0x01,
448 	BINDER_DEFERRED_RELEASE      = 0x02,
449 };
450 
451 /**
452  * struct binder_proc - binder process bookkeeping
453  * @proc_node:            element for binder_procs list
454  * @threads:              rbtree of binder_threads in this proc
455  *                        (protected by @inner_lock)
456  * @nodes:                rbtree of binder nodes associated with
457  *                        this proc ordered by node->ptr
458  *                        (protected by @inner_lock)
459  * @refs_by_desc:         rbtree of refs ordered by ref->desc
460  *                        (protected by @outer_lock)
461  * @refs_by_node:         rbtree of refs ordered by ref->node
462  *                        (protected by @outer_lock)
463  * @waiting_threads:      threads currently waiting for proc work
464  *                        (protected by @inner_lock)
465  * @pid                   PID of group_leader of process
466  *                        (invariant after initialized)
467  * @tsk                   task_struct for group_leader of process
468  *                        (invariant after initialized)
469  * @deferred_work_node:   element for binder_deferred_list
470  *                        (protected by binder_deferred_lock)
471  * @deferred_work:        bitmap of deferred work to perform
472  *                        (protected by binder_deferred_lock)
473  * @is_dead:              process is dead and awaiting free
474  *                        when outstanding transactions are cleaned up
475  *                        (protected by @inner_lock)
476  * @todo:                 list of work for this process
477  *                        (protected by @inner_lock)
478  * @stats:                per-process binder statistics
479  *                        (atomics, no lock needed)
480  * @delivered_death:      list of delivered death notification
481  *                        (protected by @inner_lock)
482  * @max_threads:          cap on number of binder threads
483  *                        (protected by @inner_lock)
484  * @requested_threads:    number of binder threads requested but not
485  *                        yet started. In current implementation, can
486  *                        only be 0 or 1.
487  *                        (protected by @inner_lock)
488  * @requested_threads_started: number binder threads started
489  *                        (protected by @inner_lock)
490  * @tmp_ref:              temporary reference to indicate proc is in use
491  *                        (protected by @inner_lock)
492  * @default_priority:     default scheduler priority
493  *                        (invariant after initialized)
494  * @debugfs_entry:        debugfs node
495  * @alloc:                binder allocator bookkeeping
496  * @context:              binder_context for this proc
497  *                        (invariant after initialized)
498  * @inner_lock:           can nest under outer_lock and/or node lock
499  * @outer_lock:           no nesting under innor or node lock
500  *                        Lock order: 1) outer, 2) node, 3) inner
501  *
502  * Bookkeeping structure for binder processes
503  */
504 struct binder_proc {
505 	struct hlist_node proc_node;
506 	struct rb_root threads;
507 	struct rb_root nodes;
508 	struct rb_root refs_by_desc;
509 	struct rb_root refs_by_node;
510 	struct list_head waiting_threads;
511 	int pid;
512 	struct task_struct *tsk;
513 	struct hlist_node deferred_work_node;
514 	int deferred_work;
515 	bool is_dead;
516 
517 	struct list_head todo;
518 	struct binder_stats stats;
519 	struct list_head delivered_death;
520 	int max_threads;
521 	int requested_threads;
522 	int requested_threads_started;
523 	int tmp_ref;
524 	long default_priority;
525 	struct dentry *debugfs_entry;
526 	struct binder_alloc alloc;
527 	struct binder_context *context;
528 	spinlock_t inner_lock;
529 	spinlock_t outer_lock;
530 };
531 
532 enum {
533 	BINDER_LOOPER_STATE_REGISTERED  = 0x01,
534 	BINDER_LOOPER_STATE_ENTERED     = 0x02,
535 	BINDER_LOOPER_STATE_EXITED      = 0x04,
536 	BINDER_LOOPER_STATE_INVALID     = 0x08,
537 	BINDER_LOOPER_STATE_WAITING     = 0x10,
538 	BINDER_LOOPER_STATE_POLL        = 0x20,
539 };
540 
541 /**
542  * struct binder_thread - binder thread bookkeeping
543  * @proc:                 binder process for this thread
544  *                        (invariant after initialization)
545  * @rb_node:              element for proc->threads rbtree
546  *                        (protected by @proc->inner_lock)
547  * @waiting_thread_node:  element for @proc->waiting_threads list
548  *                        (protected by @proc->inner_lock)
549  * @pid:                  PID for this thread
550  *                        (invariant after initialization)
551  * @looper:               bitmap of looping state
552  *                        (only accessed by this thread)
553  * @looper_needs_return:  looping thread needs to exit driver
554  *                        (no lock needed)
555  * @transaction_stack:    stack of in-progress transactions for this thread
556  *                        (protected by @proc->inner_lock)
557  * @todo:                 list of work to do for this thread
558  *                        (protected by @proc->inner_lock)
559  * @process_todo:         whether work in @todo should be processed
560  *                        (protected by @proc->inner_lock)
561  * @return_error:         transaction errors reported by this thread
562  *                        (only accessed by this thread)
563  * @reply_error:          transaction errors reported by target thread
564  *                        (protected by @proc->inner_lock)
565  * @wait:                 wait queue for thread work
566  * @stats:                per-thread statistics
567  *                        (atomics, no lock needed)
568  * @tmp_ref:              temporary reference to indicate thread is in use
569  *                        (atomic since @proc->inner_lock cannot
570  *                        always be acquired)
571  * @is_dead:              thread is dead and awaiting free
572  *                        when outstanding transactions are cleaned up
573  *                        (protected by @proc->inner_lock)
574  *
575  * Bookkeeping structure for binder threads.
576  */
577 struct binder_thread {
578 	struct binder_proc *proc;
579 	struct rb_node rb_node;
580 	struct list_head waiting_thread_node;
581 	int pid;
582 	int looper;              /* only modified by this thread */
583 	bool looper_need_return; /* can be written by other thread */
584 	struct binder_transaction *transaction_stack;
585 	struct list_head todo;
586 	bool process_todo;
587 	struct binder_error return_error;
588 	struct binder_error reply_error;
589 	wait_queue_head_t wait;
590 	struct binder_stats stats;
591 	atomic_t tmp_ref;
592 	bool is_dead;
593 };
594 
595 /**
596  * struct binder_txn_fd_fixup - transaction fd fixup list element
597  * @fixup_entry:          list entry
598  * @file:                 struct file to be associated with new fd
599  * @offset:               offset in buffer data to this fixup
600  *
601  * List element for fd fixups in a transaction. Since file
602  * descriptors need to be allocated in the context of the
603  * target process, we pass each fd to be processed in this
604  * struct.
605  */
606 struct binder_txn_fd_fixup {
607 	struct list_head fixup_entry;
608 	struct file *file;
609 	size_t offset;
610 };
611 
612 struct binder_transaction {
613 	int debug_id;
614 	struct binder_work work;
615 	struct binder_thread *from;
616 	struct binder_transaction *from_parent;
617 	struct binder_proc *to_proc;
618 	struct binder_thread *to_thread;
619 	struct binder_transaction *to_parent;
620 	unsigned need_reply:1;
621 	/* unsigned is_dead:1; */	/* not used at the moment */
622 
623 	struct binder_buffer *buffer;
624 	unsigned int	code;
625 	unsigned int	flags;
626 	long	priority;
627 	long	saved_priority;
628 	kuid_t	sender_euid;
629 	struct list_head fd_fixups;
630 	/**
631 	 * @lock:  protects @from, @to_proc, and @to_thread
632 	 *
633 	 * @from, @to_proc, and @to_thread can be set to NULL
634 	 * during thread teardown
635 	 */
636 	spinlock_t lock;
637 };
638 
639 /**
640  * binder_proc_lock() - Acquire outer lock for given binder_proc
641  * @proc:         struct binder_proc to acquire
642  *
643  * Acquires proc->outer_lock. Used to protect binder_ref
644  * structures associated with the given proc.
645  */
646 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
647 static void
648 _binder_proc_lock(struct binder_proc *proc, int line)
649 	__acquires(&proc->outer_lock)
650 {
651 	binder_debug(BINDER_DEBUG_SPINLOCKS,
652 		     "%s: line=%d\n", __func__, line);
653 	spin_lock(&proc->outer_lock);
654 }
655 
656 /**
657  * binder_proc_unlock() - Release spinlock for given binder_proc
658  * @proc:         struct binder_proc to acquire
659  *
660  * Release lock acquired via binder_proc_lock()
661  */
662 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
663 static void
664 _binder_proc_unlock(struct binder_proc *proc, int line)
665 	__releases(&proc->outer_lock)
666 {
667 	binder_debug(BINDER_DEBUG_SPINLOCKS,
668 		     "%s: line=%d\n", __func__, line);
669 	spin_unlock(&proc->outer_lock);
670 }
671 
672 /**
673  * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
674  * @proc:         struct binder_proc to acquire
675  *
676  * Acquires proc->inner_lock. Used to protect todo lists
677  */
678 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
679 static void
680 _binder_inner_proc_lock(struct binder_proc *proc, int line)
681 	__acquires(&proc->inner_lock)
682 {
683 	binder_debug(BINDER_DEBUG_SPINLOCKS,
684 		     "%s: line=%d\n", __func__, line);
685 	spin_lock(&proc->inner_lock);
686 }
687 
688 /**
689  * binder_inner_proc_unlock() - Release inner lock for given binder_proc
690  * @proc:         struct binder_proc to acquire
691  *
692  * Release lock acquired via binder_inner_proc_lock()
693  */
694 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
695 static void
696 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
697 	__releases(&proc->inner_lock)
698 {
699 	binder_debug(BINDER_DEBUG_SPINLOCKS,
700 		     "%s: line=%d\n", __func__, line);
701 	spin_unlock(&proc->inner_lock);
702 }
703 
704 /**
705  * binder_node_lock() - Acquire spinlock for given binder_node
706  * @node:         struct binder_node to acquire
707  *
708  * Acquires node->lock. Used to protect binder_node fields
709  */
710 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
711 static void
712 _binder_node_lock(struct binder_node *node, int line)
713 	__acquires(&node->lock)
714 {
715 	binder_debug(BINDER_DEBUG_SPINLOCKS,
716 		     "%s: line=%d\n", __func__, line);
717 	spin_lock(&node->lock);
718 }
719 
720 /**
721  * binder_node_unlock() - Release spinlock for given binder_proc
722  * @node:         struct binder_node to acquire
723  *
724  * Release lock acquired via binder_node_lock()
725  */
726 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
727 static void
728 _binder_node_unlock(struct binder_node *node, int line)
729 	__releases(&node->lock)
730 {
731 	binder_debug(BINDER_DEBUG_SPINLOCKS,
732 		     "%s: line=%d\n", __func__, line);
733 	spin_unlock(&node->lock);
734 }
735 
736 /**
737  * binder_node_inner_lock() - Acquire node and inner locks
738  * @node:         struct binder_node to acquire
739  *
740  * Acquires node->lock. If node->proc also acquires
741  * proc->inner_lock. Used to protect binder_node fields
742  */
743 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
744 static void
745 _binder_node_inner_lock(struct binder_node *node, int line)
746 	__acquires(&node->lock) __acquires(&node->proc->inner_lock)
747 {
748 	binder_debug(BINDER_DEBUG_SPINLOCKS,
749 		     "%s: line=%d\n", __func__, line);
750 	spin_lock(&node->lock);
751 	if (node->proc)
752 		binder_inner_proc_lock(node->proc);
753 	else
754 		/* annotation for sparse */
755 		__acquire(&node->proc->inner_lock);
756 }
757 
758 /**
759  * binder_node_unlock() - Release node and inner locks
760  * @node:         struct binder_node to acquire
761  *
762  * Release lock acquired via binder_node_lock()
763  */
764 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
765 static void
766 _binder_node_inner_unlock(struct binder_node *node, int line)
767 	__releases(&node->lock) __releases(&node->proc->inner_lock)
768 {
769 	struct binder_proc *proc = node->proc;
770 
771 	binder_debug(BINDER_DEBUG_SPINLOCKS,
772 		     "%s: line=%d\n", __func__, line);
773 	if (proc)
774 		binder_inner_proc_unlock(proc);
775 	else
776 		/* annotation for sparse */
777 		__release(&node->proc->inner_lock);
778 	spin_unlock(&node->lock);
779 }
780 
781 static bool binder_worklist_empty_ilocked(struct list_head *list)
782 {
783 	return list_empty(list);
784 }
785 
786 /**
787  * binder_worklist_empty() - Check if no items on the work list
788  * @proc:       binder_proc associated with list
789  * @list:	list to check
790  *
791  * Return: true if there are no items on list, else false
792  */
793 static bool binder_worklist_empty(struct binder_proc *proc,
794 				  struct list_head *list)
795 {
796 	bool ret;
797 
798 	binder_inner_proc_lock(proc);
799 	ret = binder_worklist_empty_ilocked(list);
800 	binder_inner_proc_unlock(proc);
801 	return ret;
802 }
803 
804 /**
805  * binder_enqueue_work_ilocked() - Add an item to the work list
806  * @work:         struct binder_work to add to list
807  * @target_list:  list to add work to
808  *
809  * Adds the work to the specified list. Asserts that work
810  * is not already on a list.
811  *
812  * Requires the proc->inner_lock to be held.
813  */
814 static void
815 binder_enqueue_work_ilocked(struct binder_work *work,
816 			   struct list_head *target_list)
817 {
818 	BUG_ON(target_list == NULL);
819 	BUG_ON(work->entry.next && !list_empty(&work->entry));
820 	list_add_tail(&work->entry, target_list);
821 }
822 
823 /**
824  * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
825  * @thread:       thread to queue work to
826  * @work:         struct binder_work to add to list
827  *
828  * Adds the work to the todo list of the thread. Doesn't set the process_todo
829  * flag, which means that (if it wasn't already set) the thread will go to
830  * sleep without handling this work when it calls read.
831  *
832  * Requires the proc->inner_lock to be held.
833  */
834 static void
835 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
836 					    struct binder_work *work)
837 {
838 	WARN_ON(!list_empty(&thread->waiting_thread_node));
839 	binder_enqueue_work_ilocked(work, &thread->todo);
840 }
841 
842 /**
843  * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
844  * @thread:       thread to queue work to
845  * @work:         struct binder_work to add to list
846  *
847  * Adds the work to the todo list of the thread, and enables processing
848  * of the todo queue.
849  *
850  * Requires the proc->inner_lock to be held.
851  */
852 static void
853 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
854 				   struct binder_work *work)
855 {
856 	WARN_ON(!list_empty(&thread->waiting_thread_node));
857 	binder_enqueue_work_ilocked(work, &thread->todo);
858 	thread->process_todo = true;
859 }
860 
861 /**
862  * binder_enqueue_thread_work() - Add an item to the thread work list
863  * @thread:       thread to queue work to
864  * @work:         struct binder_work to add to list
865  *
866  * Adds the work to the todo list of the thread, and enables processing
867  * of the todo queue.
868  */
869 static void
870 binder_enqueue_thread_work(struct binder_thread *thread,
871 			   struct binder_work *work)
872 {
873 	binder_inner_proc_lock(thread->proc);
874 	binder_enqueue_thread_work_ilocked(thread, work);
875 	binder_inner_proc_unlock(thread->proc);
876 }
877 
878 static void
879 binder_dequeue_work_ilocked(struct binder_work *work)
880 {
881 	list_del_init(&work->entry);
882 }
883 
884 /**
885  * binder_dequeue_work() - Removes an item from the work list
886  * @proc:         binder_proc associated with list
887  * @work:         struct binder_work to remove from list
888  *
889  * Removes the specified work item from whatever list it is on.
890  * Can safely be called if work is not on any list.
891  */
892 static void
893 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
894 {
895 	binder_inner_proc_lock(proc);
896 	binder_dequeue_work_ilocked(work);
897 	binder_inner_proc_unlock(proc);
898 }
899 
900 static struct binder_work *binder_dequeue_work_head_ilocked(
901 					struct list_head *list)
902 {
903 	struct binder_work *w;
904 
905 	w = list_first_entry_or_null(list, struct binder_work, entry);
906 	if (w)
907 		list_del_init(&w->entry);
908 	return w;
909 }
910 
911 /**
912  * binder_dequeue_work_head() - Dequeues the item at head of list
913  * @proc:         binder_proc associated with list
914  * @list:         list to dequeue head
915  *
916  * Removes the head of the list if there are items on the list
917  *
918  * Return: pointer dequeued binder_work, NULL if list was empty
919  */
920 static struct binder_work *binder_dequeue_work_head(
921 					struct binder_proc *proc,
922 					struct list_head *list)
923 {
924 	struct binder_work *w;
925 
926 	binder_inner_proc_lock(proc);
927 	w = binder_dequeue_work_head_ilocked(list);
928 	binder_inner_proc_unlock(proc);
929 	return w;
930 }
931 
932 static void
933 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
934 static void binder_free_thread(struct binder_thread *thread);
935 static void binder_free_proc(struct binder_proc *proc);
936 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
937 
938 static bool binder_has_work_ilocked(struct binder_thread *thread,
939 				    bool do_proc_work)
940 {
941 	return thread->process_todo ||
942 		thread->looper_need_return ||
943 		(do_proc_work &&
944 		 !binder_worklist_empty_ilocked(&thread->proc->todo));
945 }
946 
947 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
948 {
949 	bool has_work;
950 
951 	binder_inner_proc_lock(thread->proc);
952 	has_work = binder_has_work_ilocked(thread, do_proc_work);
953 	binder_inner_proc_unlock(thread->proc);
954 
955 	return has_work;
956 }
957 
958 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
959 {
960 	return !thread->transaction_stack &&
961 		binder_worklist_empty_ilocked(&thread->todo) &&
962 		(thread->looper & (BINDER_LOOPER_STATE_ENTERED |
963 				   BINDER_LOOPER_STATE_REGISTERED));
964 }
965 
966 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
967 					       bool sync)
968 {
969 	struct rb_node *n;
970 	struct binder_thread *thread;
971 
972 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
973 		thread = rb_entry(n, struct binder_thread, rb_node);
974 		if (thread->looper & BINDER_LOOPER_STATE_POLL &&
975 		    binder_available_for_proc_work_ilocked(thread)) {
976 			if (sync)
977 				wake_up_interruptible_sync(&thread->wait);
978 			else
979 				wake_up_interruptible(&thread->wait);
980 		}
981 	}
982 }
983 
984 /**
985  * binder_select_thread_ilocked() - selects a thread for doing proc work.
986  * @proc:	process to select a thread from
987  *
988  * Note that calling this function moves the thread off the waiting_threads
989  * list, so it can only be woken up by the caller of this function, or a
990  * signal. Therefore, callers *should* always wake up the thread this function
991  * returns.
992  *
993  * Return:	If there's a thread currently waiting for process work,
994  *		returns that thread. Otherwise returns NULL.
995  */
996 static struct binder_thread *
997 binder_select_thread_ilocked(struct binder_proc *proc)
998 {
999 	struct binder_thread *thread;
1000 
1001 	assert_spin_locked(&proc->inner_lock);
1002 	thread = list_first_entry_or_null(&proc->waiting_threads,
1003 					  struct binder_thread,
1004 					  waiting_thread_node);
1005 
1006 	if (thread)
1007 		list_del_init(&thread->waiting_thread_node);
1008 
1009 	return thread;
1010 }
1011 
1012 /**
1013  * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1014  * @proc:	process to wake up a thread in
1015  * @thread:	specific thread to wake-up (may be NULL)
1016  * @sync:	whether to do a synchronous wake-up
1017  *
1018  * This function wakes up a thread in the @proc process.
1019  * The caller may provide a specific thread to wake-up in
1020  * the @thread parameter. If @thread is NULL, this function
1021  * will wake up threads that have called poll().
1022  *
1023  * Note that for this function to work as expected, callers
1024  * should first call binder_select_thread() to find a thread
1025  * to handle the work (if they don't have a thread already),
1026  * and pass the result into the @thread parameter.
1027  */
1028 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1029 					 struct binder_thread *thread,
1030 					 bool sync)
1031 {
1032 	assert_spin_locked(&proc->inner_lock);
1033 
1034 	if (thread) {
1035 		if (sync)
1036 			wake_up_interruptible_sync(&thread->wait);
1037 		else
1038 			wake_up_interruptible(&thread->wait);
1039 		return;
1040 	}
1041 
1042 	/* Didn't find a thread waiting for proc work; this can happen
1043 	 * in two scenarios:
1044 	 * 1. All threads are busy handling transactions
1045 	 *    In that case, one of those threads should call back into
1046 	 *    the kernel driver soon and pick up this work.
1047 	 * 2. Threads are using the (e)poll interface, in which case
1048 	 *    they may be blocked on the waitqueue without having been
1049 	 *    added to waiting_threads. For this case, we just iterate
1050 	 *    over all threads not handling transaction work, and
1051 	 *    wake them all up. We wake all because we don't know whether
1052 	 *    a thread that called into (e)poll is handling non-binder
1053 	 *    work currently.
1054 	 */
1055 	binder_wakeup_poll_threads_ilocked(proc, sync);
1056 }
1057 
1058 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1059 {
1060 	struct binder_thread *thread = binder_select_thread_ilocked(proc);
1061 
1062 	binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1063 }
1064 
1065 static void binder_set_nice(long nice)
1066 {
1067 	long min_nice;
1068 
1069 	if (can_nice(current, nice)) {
1070 		set_user_nice(current, nice);
1071 		return;
1072 	}
1073 	min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
1074 	binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1075 		     "%d: nice value %ld not allowed use %ld instead\n",
1076 		      current->pid, nice, min_nice);
1077 	set_user_nice(current, min_nice);
1078 	if (min_nice <= MAX_NICE)
1079 		return;
1080 	binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
1081 }
1082 
1083 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1084 						   binder_uintptr_t ptr)
1085 {
1086 	struct rb_node *n = proc->nodes.rb_node;
1087 	struct binder_node *node;
1088 
1089 	assert_spin_locked(&proc->inner_lock);
1090 
1091 	while (n) {
1092 		node = rb_entry(n, struct binder_node, rb_node);
1093 
1094 		if (ptr < node->ptr)
1095 			n = n->rb_left;
1096 		else if (ptr > node->ptr)
1097 			n = n->rb_right;
1098 		else {
1099 			/*
1100 			 * take an implicit weak reference
1101 			 * to ensure node stays alive until
1102 			 * call to binder_put_node()
1103 			 */
1104 			binder_inc_node_tmpref_ilocked(node);
1105 			return node;
1106 		}
1107 	}
1108 	return NULL;
1109 }
1110 
1111 static struct binder_node *binder_get_node(struct binder_proc *proc,
1112 					   binder_uintptr_t ptr)
1113 {
1114 	struct binder_node *node;
1115 
1116 	binder_inner_proc_lock(proc);
1117 	node = binder_get_node_ilocked(proc, ptr);
1118 	binder_inner_proc_unlock(proc);
1119 	return node;
1120 }
1121 
1122 static struct binder_node *binder_init_node_ilocked(
1123 						struct binder_proc *proc,
1124 						struct binder_node *new_node,
1125 						struct flat_binder_object *fp)
1126 {
1127 	struct rb_node **p = &proc->nodes.rb_node;
1128 	struct rb_node *parent = NULL;
1129 	struct binder_node *node;
1130 	binder_uintptr_t ptr = fp ? fp->binder : 0;
1131 	binder_uintptr_t cookie = fp ? fp->cookie : 0;
1132 	__u32 flags = fp ? fp->flags : 0;
1133 
1134 	assert_spin_locked(&proc->inner_lock);
1135 
1136 	while (*p) {
1137 
1138 		parent = *p;
1139 		node = rb_entry(parent, struct binder_node, rb_node);
1140 
1141 		if (ptr < node->ptr)
1142 			p = &(*p)->rb_left;
1143 		else if (ptr > node->ptr)
1144 			p = &(*p)->rb_right;
1145 		else {
1146 			/*
1147 			 * A matching node is already in
1148 			 * the rb tree. Abandon the init
1149 			 * and return it.
1150 			 */
1151 			binder_inc_node_tmpref_ilocked(node);
1152 			return node;
1153 		}
1154 	}
1155 	node = new_node;
1156 	binder_stats_created(BINDER_STAT_NODE);
1157 	node->tmp_refs++;
1158 	rb_link_node(&node->rb_node, parent, p);
1159 	rb_insert_color(&node->rb_node, &proc->nodes);
1160 	node->debug_id = atomic_inc_return(&binder_last_id);
1161 	node->proc = proc;
1162 	node->ptr = ptr;
1163 	node->cookie = cookie;
1164 	node->work.type = BINDER_WORK_NODE;
1165 	node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1166 	node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1167 	spin_lock_init(&node->lock);
1168 	INIT_LIST_HEAD(&node->work.entry);
1169 	INIT_LIST_HEAD(&node->async_todo);
1170 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1171 		     "%d:%d node %d u%016llx c%016llx created\n",
1172 		     proc->pid, current->pid, node->debug_id,
1173 		     (u64)node->ptr, (u64)node->cookie);
1174 
1175 	return node;
1176 }
1177 
1178 static struct binder_node *binder_new_node(struct binder_proc *proc,
1179 					   struct flat_binder_object *fp)
1180 {
1181 	struct binder_node *node;
1182 	struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1183 
1184 	if (!new_node)
1185 		return NULL;
1186 	binder_inner_proc_lock(proc);
1187 	node = binder_init_node_ilocked(proc, new_node, fp);
1188 	binder_inner_proc_unlock(proc);
1189 	if (node != new_node)
1190 		/*
1191 		 * The node was already added by another thread
1192 		 */
1193 		kfree(new_node);
1194 
1195 	return node;
1196 }
1197 
1198 static void binder_free_node(struct binder_node *node)
1199 {
1200 	kfree(node);
1201 	binder_stats_deleted(BINDER_STAT_NODE);
1202 }
1203 
1204 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1205 				    int internal,
1206 				    struct list_head *target_list)
1207 {
1208 	struct binder_proc *proc = node->proc;
1209 
1210 	assert_spin_locked(&node->lock);
1211 	if (proc)
1212 		assert_spin_locked(&proc->inner_lock);
1213 	if (strong) {
1214 		if (internal) {
1215 			if (target_list == NULL &&
1216 			    node->internal_strong_refs == 0 &&
1217 			    !(node->proc &&
1218 			      node == node->proc->context->binder_context_mgr_node &&
1219 			      node->has_strong_ref)) {
1220 				pr_err("invalid inc strong node for %d\n",
1221 					node->debug_id);
1222 				return -EINVAL;
1223 			}
1224 			node->internal_strong_refs++;
1225 		} else
1226 			node->local_strong_refs++;
1227 		if (!node->has_strong_ref && target_list) {
1228 			struct binder_thread *thread = container_of(target_list,
1229 						    struct binder_thread, todo);
1230 			binder_dequeue_work_ilocked(&node->work);
1231 			BUG_ON(&thread->todo != target_list);
1232 			binder_enqueue_deferred_thread_work_ilocked(thread,
1233 								   &node->work);
1234 		}
1235 	} else {
1236 		if (!internal)
1237 			node->local_weak_refs++;
1238 		if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1239 			if (target_list == NULL) {
1240 				pr_err("invalid inc weak node for %d\n",
1241 					node->debug_id);
1242 				return -EINVAL;
1243 			}
1244 			/*
1245 			 * See comment above
1246 			 */
1247 			binder_enqueue_work_ilocked(&node->work, target_list);
1248 		}
1249 	}
1250 	return 0;
1251 }
1252 
1253 static int binder_inc_node(struct binder_node *node, int strong, int internal,
1254 			   struct list_head *target_list)
1255 {
1256 	int ret;
1257 
1258 	binder_node_inner_lock(node);
1259 	ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1260 	binder_node_inner_unlock(node);
1261 
1262 	return ret;
1263 }
1264 
1265 static bool binder_dec_node_nilocked(struct binder_node *node,
1266 				     int strong, int internal)
1267 {
1268 	struct binder_proc *proc = node->proc;
1269 
1270 	assert_spin_locked(&node->lock);
1271 	if (proc)
1272 		assert_spin_locked(&proc->inner_lock);
1273 	if (strong) {
1274 		if (internal)
1275 			node->internal_strong_refs--;
1276 		else
1277 			node->local_strong_refs--;
1278 		if (node->local_strong_refs || node->internal_strong_refs)
1279 			return false;
1280 	} else {
1281 		if (!internal)
1282 			node->local_weak_refs--;
1283 		if (node->local_weak_refs || node->tmp_refs ||
1284 				!hlist_empty(&node->refs))
1285 			return false;
1286 	}
1287 
1288 	if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1289 		if (list_empty(&node->work.entry)) {
1290 			binder_enqueue_work_ilocked(&node->work, &proc->todo);
1291 			binder_wakeup_proc_ilocked(proc);
1292 		}
1293 	} else {
1294 		if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1295 		    !node->local_weak_refs && !node->tmp_refs) {
1296 			if (proc) {
1297 				binder_dequeue_work_ilocked(&node->work);
1298 				rb_erase(&node->rb_node, &proc->nodes);
1299 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1300 					     "refless node %d deleted\n",
1301 					     node->debug_id);
1302 			} else {
1303 				BUG_ON(!list_empty(&node->work.entry));
1304 				spin_lock(&binder_dead_nodes_lock);
1305 				/*
1306 				 * tmp_refs could have changed so
1307 				 * check it again
1308 				 */
1309 				if (node->tmp_refs) {
1310 					spin_unlock(&binder_dead_nodes_lock);
1311 					return false;
1312 				}
1313 				hlist_del(&node->dead_node);
1314 				spin_unlock(&binder_dead_nodes_lock);
1315 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1316 					     "dead node %d deleted\n",
1317 					     node->debug_id);
1318 			}
1319 			return true;
1320 		}
1321 	}
1322 	return false;
1323 }
1324 
1325 static void binder_dec_node(struct binder_node *node, int strong, int internal)
1326 {
1327 	bool free_node;
1328 
1329 	binder_node_inner_lock(node);
1330 	free_node = binder_dec_node_nilocked(node, strong, internal);
1331 	binder_node_inner_unlock(node);
1332 	if (free_node)
1333 		binder_free_node(node);
1334 }
1335 
1336 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1337 {
1338 	/*
1339 	 * No call to binder_inc_node() is needed since we
1340 	 * don't need to inform userspace of any changes to
1341 	 * tmp_refs
1342 	 */
1343 	node->tmp_refs++;
1344 }
1345 
1346 /**
1347  * binder_inc_node_tmpref() - take a temporary reference on node
1348  * @node:	node to reference
1349  *
1350  * Take reference on node to prevent the node from being freed
1351  * while referenced only by a local variable. The inner lock is
1352  * needed to serialize with the node work on the queue (which
1353  * isn't needed after the node is dead). If the node is dead
1354  * (node->proc is NULL), use binder_dead_nodes_lock to protect
1355  * node->tmp_refs against dead-node-only cases where the node
1356  * lock cannot be acquired (eg traversing the dead node list to
1357  * print nodes)
1358  */
1359 static void binder_inc_node_tmpref(struct binder_node *node)
1360 {
1361 	binder_node_lock(node);
1362 	if (node->proc)
1363 		binder_inner_proc_lock(node->proc);
1364 	else
1365 		spin_lock(&binder_dead_nodes_lock);
1366 	binder_inc_node_tmpref_ilocked(node);
1367 	if (node->proc)
1368 		binder_inner_proc_unlock(node->proc);
1369 	else
1370 		spin_unlock(&binder_dead_nodes_lock);
1371 	binder_node_unlock(node);
1372 }
1373 
1374 /**
1375  * binder_dec_node_tmpref() - remove a temporary reference on node
1376  * @node:	node to reference
1377  *
1378  * Release temporary reference on node taken via binder_inc_node_tmpref()
1379  */
1380 static void binder_dec_node_tmpref(struct binder_node *node)
1381 {
1382 	bool free_node;
1383 
1384 	binder_node_inner_lock(node);
1385 	if (!node->proc)
1386 		spin_lock(&binder_dead_nodes_lock);
1387 	else
1388 		__acquire(&binder_dead_nodes_lock);
1389 	node->tmp_refs--;
1390 	BUG_ON(node->tmp_refs < 0);
1391 	if (!node->proc)
1392 		spin_unlock(&binder_dead_nodes_lock);
1393 	else
1394 		__release(&binder_dead_nodes_lock);
1395 	/*
1396 	 * Call binder_dec_node() to check if all refcounts are 0
1397 	 * and cleanup is needed. Calling with strong=0 and internal=1
1398 	 * causes no actual reference to be released in binder_dec_node().
1399 	 * If that changes, a change is needed here too.
1400 	 */
1401 	free_node = binder_dec_node_nilocked(node, 0, 1);
1402 	binder_node_inner_unlock(node);
1403 	if (free_node)
1404 		binder_free_node(node);
1405 }
1406 
1407 static void binder_put_node(struct binder_node *node)
1408 {
1409 	binder_dec_node_tmpref(node);
1410 }
1411 
1412 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1413 						 u32 desc, bool need_strong_ref)
1414 {
1415 	struct rb_node *n = proc->refs_by_desc.rb_node;
1416 	struct binder_ref *ref;
1417 
1418 	while (n) {
1419 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1420 
1421 		if (desc < ref->data.desc) {
1422 			n = n->rb_left;
1423 		} else if (desc > ref->data.desc) {
1424 			n = n->rb_right;
1425 		} else if (need_strong_ref && !ref->data.strong) {
1426 			binder_user_error("tried to use weak ref as strong ref\n");
1427 			return NULL;
1428 		} else {
1429 			return ref;
1430 		}
1431 	}
1432 	return NULL;
1433 }
1434 
1435 /**
1436  * binder_get_ref_for_node_olocked() - get the ref associated with given node
1437  * @proc:	binder_proc that owns the ref
1438  * @node:	binder_node of target
1439  * @new_ref:	newly allocated binder_ref to be initialized or %NULL
1440  *
1441  * Look up the ref for the given node and return it if it exists
1442  *
1443  * If it doesn't exist and the caller provides a newly allocated
1444  * ref, initialize the fields of the newly allocated ref and insert
1445  * into the given proc rb_trees and node refs list.
1446  *
1447  * Return:	the ref for node. It is possible that another thread
1448  *		allocated/initialized the ref first in which case the
1449  *		returned ref would be different than the passed-in
1450  *		new_ref. new_ref must be kfree'd by the caller in
1451  *		this case.
1452  */
1453 static struct binder_ref *binder_get_ref_for_node_olocked(
1454 					struct binder_proc *proc,
1455 					struct binder_node *node,
1456 					struct binder_ref *new_ref)
1457 {
1458 	struct binder_context *context = proc->context;
1459 	struct rb_node **p = &proc->refs_by_node.rb_node;
1460 	struct rb_node *parent = NULL;
1461 	struct binder_ref *ref;
1462 	struct rb_node *n;
1463 
1464 	while (*p) {
1465 		parent = *p;
1466 		ref = rb_entry(parent, struct binder_ref, rb_node_node);
1467 
1468 		if (node < ref->node)
1469 			p = &(*p)->rb_left;
1470 		else if (node > ref->node)
1471 			p = &(*p)->rb_right;
1472 		else
1473 			return ref;
1474 	}
1475 	if (!new_ref)
1476 		return NULL;
1477 
1478 	binder_stats_created(BINDER_STAT_REF);
1479 	new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1480 	new_ref->proc = proc;
1481 	new_ref->node = node;
1482 	rb_link_node(&new_ref->rb_node_node, parent, p);
1483 	rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1484 
1485 	new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1486 	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1487 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1488 		if (ref->data.desc > new_ref->data.desc)
1489 			break;
1490 		new_ref->data.desc = ref->data.desc + 1;
1491 	}
1492 
1493 	p = &proc->refs_by_desc.rb_node;
1494 	while (*p) {
1495 		parent = *p;
1496 		ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1497 
1498 		if (new_ref->data.desc < ref->data.desc)
1499 			p = &(*p)->rb_left;
1500 		else if (new_ref->data.desc > ref->data.desc)
1501 			p = &(*p)->rb_right;
1502 		else
1503 			BUG();
1504 	}
1505 	rb_link_node(&new_ref->rb_node_desc, parent, p);
1506 	rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1507 
1508 	binder_node_lock(node);
1509 	hlist_add_head(&new_ref->node_entry, &node->refs);
1510 
1511 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1512 		     "%d new ref %d desc %d for node %d\n",
1513 		      proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1514 		      node->debug_id);
1515 	binder_node_unlock(node);
1516 	return new_ref;
1517 }
1518 
1519 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1520 {
1521 	bool delete_node = false;
1522 
1523 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1524 		     "%d delete ref %d desc %d for node %d\n",
1525 		      ref->proc->pid, ref->data.debug_id, ref->data.desc,
1526 		      ref->node->debug_id);
1527 
1528 	rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1529 	rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1530 
1531 	binder_node_inner_lock(ref->node);
1532 	if (ref->data.strong)
1533 		binder_dec_node_nilocked(ref->node, 1, 1);
1534 
1535 	hlist_del(&ref->node_entry);
1536 	delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1537 	binder_node_inner_unlock(ref->node);
1538 	/*
1539 	 * Clear ref->node unless we want the caller to free the node
1540 	 */
1541 	if (!delete_node) {
1542 		/*
1543 		 * The caller uses ref->node to determine
1544 		 * whether the node needs to be freed. Clear
1545 		 * it since the node is still alive.
1546 		 */
1547 		ref->node = NULL;
1548 	}
1549 
1550 	if (ref->death) {
1551 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1552 			     "%d delete ref %d desc %d has death notification\n",
1553 			      ref->proc->pid, ref->data.debug_id,
1554 			      ref->data.desc);
1555 		binder_dequeue_work(ref->proc, &ref->death->work);
1556 		binder_stats_deleted(BINDER_STAT_DEATH);
1557 	}
1558 	binder_stats_deleted(BINDER_STAT_REF);
1559 }
1560 
1561 /**
1562  * binder_inc_ref_olocked() - increment the ref for given handle
1563  * @ref:         ref to be incremented
1564  * @strong:      if true, strong increment, else weak
1565  * @target_list: list to queue node work on
1566  *
1567  * Increment the ref. @ref->proc->outer_lock must be held on entry
1568  *
1569  * Return: 0, if successful, else errno
1570  */
1571 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1572 				  struct list_head *target_list)
1573 {
1574 	int ret;
1575 
1576 	if (strong) {
1577 		if (ref->data.strong == 0) {
1578 			ret = binder_inc_node(ref->node, 1, 1, target_list);
1579 			if (ret)
1580 				return ret;
1581 		}
1582 		ref->data.strong++;
1583 	} else {
1584 		if (ref->data.weak == 0) {
1585 			ret = binder_inc_node(ref->node, 0, 1, target_list);
1586 			if (ret)
1587 				return ret;
1588 		}
1589 		ref->data.weak++;
1590 	}
1591 	return 0;
1592 }
1593 
1594 /**
1595  * binder_dec_ref() - dec the ref for given handle
1596  * @ref:	ref to be decremented
1597  * @strong:	if true, strong decrement, else weak
1598  *
1599  * Decrement the ref.
1600  *
1601  * Return: true if ref is cleaned up and ready to be freed
1602  */
1603 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1604 {
1605 	if (strong) {
1606 		if (ref->data.strong == 0) {
1607 			binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1608 					  ref->proc->pid, ref->data.debug_id,
1609 					  ref->data.desc, ref->data.strong,
1610 					  ref->data.weak);
1611 			return false;
1612 		}
1613 		ref->data.strong--;
1614 		if (ref->data.strong == 0)
1615 			binder_dec_node(ref->node, strong, 1);
1616 	} else {
1617 		if (ref->data.weak == 0) {
1618 			binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1619 					  ref->proc->pid, ref->data.debug_id,
1620 					  ref->data.desc, ref->data.strong,
1621 					  ref->data.weak);
1622 			return false;
1623 		}
1624 		ref->data.weak--;
1625 	}
1626 	if (ref->data.strong == 0 && ref->data.weak == 0) {
1627 		binder_cleanup_ref_olocked(ref);
1628 		return true;
1629 	}
1630 	return false;
1631 }
1632 
1633 /**
1634  * binder_get_node_from_ref() - get the node from the given proc/desc
1635  * @proc:	proc containing the ref
1636  * @desc:	the handle associated with the ref
1637  * @need_strong_ref: if true, only return node if ref is strong
1638  * @rdata:	the id/refcount data for the ref
1639  *
1640  * Given a proc and ref handle, return the associated binder_node
1641  *
1642  * Return: a binder_node or NULL if not found or not strong when strong required
1643  */
1644 static struct binder_node *binder_get_node_from_ref(
1645 		struct binder_proc *proc,
1646 		u32 desc, bool need_strong_ref,
1647 		struct binder_ref_data *rdata)
1648 {
1649 	struct binder_node *node;
1650 	struct binder_ref *ref;
1651 
1652 	binder_proc_lock(proc);
1653 	ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1654 	if (!ref)
1655 		goto err_no_ref;
1656 	node = ref->node;
1657 	/*
1658 	 * Take an implicit reference on the node to ensure
1659 	 * it stays alive until the call to binder_put_node()
1660 	 */
1661 	binder_inc_node_tmpref(node);
1662 	if (rdata)
1663 		*rdata = ref->data;
1664 	binder_proc_unlock(proc);
1665 
1666 	return node;
1667 
1668 err_no_ref:
1669 	binder_proc_unlock(proc);
1670 	return NULL;
1671 }
1672 
1673 /**
1674  * binder_free_ref() - free the binder_ref
1675  * @ref:	ref to free
1676  *
1677  * Free the binder_ref. Free the binder_node indicated by ref->node
1678  * (if non-NULL) and the binder_ref_death indicated by ref->death.
1679  */
1680 static void binder_free_ref(struct binder_ref *ref)
1681 {
1682 	if (ref->node)
1683 		binder_free_node(ref->node);
1684 	kfree(ref->death);
1685 	kfree(ref);
1686 }
1687 
1688 /**
1689  * binder_update_ref_for_handle() - inc/dec the ref for given handle
1690  * @proc:	proc containing the ref
1691  * @desc:	the handle associated with the ref
1692  * @increment:	true=inc reference, false=dec reference
1693  * @strong:	true=strong reference, false=weak reference
1694  * @rdata:	the id/refcount data for the ref
1695  *
1696  * Given a proc and ref handle, increment or decrement the ref
1697  * according to "increment" arg.
1698  *
1699  * Return: 0 if successful, else errno
1700  */
1701 static int binder_update_ref_for_handle(struct binder_proc *proc,
1702 		uint32_t desc, bool increment, bool strong,
1703 		struct binder_ref_data *rdata)
1704 {
1705 	int ret = 0;
1706 	struct binder_ref *ref;
1707 	bool delete_ref = false;
1708 
1709 	binder_proc_lock(proc);
1710 	ref = binder_get_ref_olocked(proc, desc, strong);
1711 	if (!ref) {
1712 		ret = -EINVAL;
1713 		goto err_no_ref;
1714 	}
1715 	if (increment)
1716 		ret = binder_inc_ref_olocked(ref, strong, NULL);
1717 	else
1718 		delete_ref = binder_dec_ref_olocked(ref, strong);
1719 
1720 	if (rdata)
1721 		*rdata = ref->data;
1722 	binder_proc_unlock(proc);
1723 
1724 	if (delete_ref)
1725 		binder_free_ref(ref);
1726 	return ret;
1727 
1728 err_no_ref:
1729 	binder_proc_unlock(proc);
1730 	return ret;
1731 }
1732 
1733 /**
1734  * binder_dec_ref_for_handle() - dec the ref for given handle
1735  * @proc:	proc containing the ref
1736  * @desc:	the handle associated with the ref
1737  * @strong:	true=strong reference, false=weak reference
1738  * @rdata:	the id/refcount data for the ref
1739  *
1740  * Just calls binder_update_ref_for_handle() to decrement the ref.
1741  *
1742  * Return: 0 if successful, else errno
1743  */
1744 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1745 		uint32_t desc, bool strong, struct binder_ref_data *rdata)
1746 {
1747 	return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1748 }
1749 
1750 
1751 /**
1752  * binder_inc_ref_for_node() - increment the ref for given proc/node
1753  * @proc:	 proc containing the ref
1754  * @node:	 target node
1755  * @strong:	 true=strong reference, false=weak reference
1756  * @target_list: worklist to use if node is incremented
1757  * @rdata:	 the id/refcount data for the ref
1758  *
1759  * Given a proc and node, increment the ref. Create the ref if it
1760  * doesn't already exist
1761  *
1762  * Return: 0 if successful, else errno
1763  */
1764 static int binder_inc_ref_for_node(struct binder_proc *proc,
1765 			struct binder_node *node,
1766 			bool strong,
1767 			struct list_head *target_list,
1768 			struct binder_ref_data *rdata)
1769 {
1770 	struct binder_ref *ref;
1771 	struct binder_ref *new_ref = NULL;
1772 	int ret = 0;
1773 
1774 	binder_proc_lock(proc);
1775 	ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1776 	if (!ref) {
1777 		binder_proc_unlock(proc);
1778 		new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1779 		if (!new_ref)
1780 			return -ENOMEM;
1781 		binder_proc_lock(proc);
1782 		ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1783 	}
1784 	ret = binder_inc_ref_olocked(ref, strong, target_list);
1785 	*rdata = ref->data;
1786 	binder_proc_unlock(proc);
1787 	if (new_ref && ref != new_ref)
1788 		/*
1789 		 * Another thread created the ref first so
1790 		 * free the one we allocated
1791 		 */
1792 		kfree(new_ref);
1793 	return ret;
1794 }
1795 
1796 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1797 					   struct binder_transaction *t)
1798 {
1799 	BUG_ON(!target_thread);
1800 	assert_spin_locked(&target_thread->proc->inner_lock);
1801 	BUG_ON(target_thread->transaction_stack != t);
1802 	BUG_ON(target_thread->transaction_stack->from != target_thread);
1803 	target_thread->transaction_stack =
1804 		target_thread->transaction_stack->from_parent;
1805 	t->from = NULL;
1806 }
1807 
1808 /**
1809  * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1810  * @thread:	thread to decrement
1811  *
1812  * A thread needs to be kept alive while being used to create or
1813  * handle a transaction. binder_get_txn_from() is used to safely
1814  * extract t->from from a binder_transaction and keep the thread
1815  * indicated by t->from from being freed. When done with that
1816  * binder_thread, this function is called to decrement the
1817  * tmp_ref and free if appropriate (thread has been released
1818  * and no transaction being processed by the driver)
1819  */
1820 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1821 {
1822 	/*
1823 	 * atomic is used to protect the counter value while
1824 	 * it cannot reach zero or thread->is_dead is false
1825 	 */
1826 	binder_inner_proc_lock(thread->proc);
1827 	atomic_dec(&thread->tmp_ref);
1828 	if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1829 		binder_inner_proc_unlock(thread->proc);
1830 		binder_free_thread(thread);
1831 		return;
1832 	}
1833 	binder_inner_proc_unlock(thread->proc);
1834 }
1835 
1836 /**
1837  * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1838  * @proc:	proc to decrement
1839  *
1840  * A binder_proc needs to be kept alive while being used to create or
1841  * handle a transaction. proc->tmp_ref is incremented when
1842  * creating a new transaction or the binder_proc is currently in-use
1843  * by threads that are being released. When done with the binder_proc,
1844  * this function is called to decrement the counter and free the
1845  * proc if appropriate (proc has been released, all threads have
1846  * been released and not currenly in-use to process a transaction).
1847  */
1848 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1849 {
1850 	binder_inner_proc_lock(proc);
1851 	proc->tmp_ref--;
1852 	if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1853 			!proc->tmp_ref) {
1854 		binder_inner_proc_unlock(proc);
1855 		binder_free_proc(proc);
1856 		return;
1857 	}
1858 	binder_inner_proc_unlock(proc);
1859 }
1860 
1861 /**
1862  * binder_get_txn_from() - safely extract the "from" thread in transaction
1863  * @t:	binder transaction for t->from
1864  *
1865  * Atomically return the "from" thread and increment the tmp_ref
1866  * count for the thread to ensure it stays alive until
1867  * binder_thread_dec_tmpref() is called.
1868  *
1869  * Return: the value of t->from
1870  */
1871 static struct binder_thread *binder_get_txn_from(
1872 		struct binder_transaction *t)
1873 {
1874 	struct binder_thread *from;
1875 
1876 	spin_lock(&t->lock);
1877 	from = t->from;
1878 	if (from)
1879 		atomic_inc(&from->tmp_ref);
1880 	spin_unlock(&t->lock);
1881 	return from;
1882 }
1883 
1884 /**
1885  * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1886  * @t:	binder transaction for t->from
1887  *
1888  * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1889  * to guarantee that the thread cannot be released while operating on it.
1890  * The caller must call binder_inner_proc_unlock() to release the inner lock
1891  * as well as call binder_dec_thread_txn() to release the reference.
1892  *
1893  * Return: the value of t->from
1894  */
1895 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1896 		struct binder_transaction *t)
1897 	__acquires(&t->from->proc->inner_lock)
1898 {
1899 	struct binder_thread *from;
1900 
1901 	from = binder_get_txn_from(t);
1902 	if (!from) {
1903 		__acquire(&from->proc->inner_lock);
1904 		return NULL;
1905 	}
1906 	binder_inner_proc_lock(from->proc);
1907 	if (t->from) {
1908 		BUG_ON(from != t->from);
1909 		return from;
1910 	}
1911 	binder_inner_proc_unlock(from->proc);
1912 	__acquire(&from->proc->inner_lock);
1913 	binder_thread_dec_tmpref(from);
1914 	return NULL;
1915 }
1916 
1917 /**
1918  * binder_free_txn_fixups() - free unprocessed fd fixups
1919  * @t:	binder transaction for t->from
1920  *
1921  * If the transaction is being torn down prior to being
1922  * processed by the target process, free all of the
1923  * fd fixups and fput the file structs. It is safe to
1924  * call this function after the fixups have been
1925  * processed -- in that case, the list will be empty.
1926  */
1927 static void binder_free_txn_fixups(struct binder_transaction *t)
1928 {
1929 	struct binder_txn_fd_fixup *fixup, *tmp;
1930 
1931 	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1932 		fput(fixup->file);
1933 		list_del(&fixup->fixup_entry);
1934 		kfree(fixup);
1935 	}
1936 }
1937 
1938 static void binder_free_transaction(struct binder_transaction *t)
1939 {
1940 	if (t->buffer)
1941 		t->buffer->transaction = NULL;
1942 	binder_free_txn_fixups(t);
1943 	kfree(t);
1944 	binder_stats_deleted(BINDER_STAT_TRANSACTION);
1945 }
1946 
1947 static void binder_send_failed_reply(struct binder_transaction *t,
1948 				     uint32_t error_code)
1949 {
1950 	struct binder_thread *target_thread;
1951 	struct binder_transaction *next;
1952 
1953 	BUG_ON(t->flags & TF_ONE_WAY);
1954 	while (1) {
1955 		target_thread = binder_get_txn_from_and_acq_inner(t);
1956 		if (target_thread) {
1957 			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1958 				     "send failed reply for transaction %d to %d:%d\n",
1959 				      t->debug_id,
1960 				      target_thread->proc->pid,
1961 				      target_thread->pid);
1962 
1963 			binder_pop_transaction_ilocked(target_thread, t);
1964 			if (target_thread->reply_error.cmd == BR_OK) {
1965 				target_thread->reply_error.cmd = error_code;
1966 				binder_enqueue_thread_work_ilocked(
1967 					target_thread,
1968 					&target_thread->reply_error.work);
1969 				wake_up_interruptible(&target_thread->wait);
1970 			} else {
1971 				/*
1972 				 * Cannot get here for normal operation, but
1973 				 * we can if multiple synchronous transactions
1974 				 * are sent without blocking for responses.
1975 				 * Just ignore the 2nd error in this case.
1976 				 */
1977 				pr_warn("Unexpected reply error: %u\n",
1978 					target_thread->reply_error.cmd);
1979 			}
1980 			binder_inner_proc_unlock(target_thread->proc);
1981 			binder_thread_dec_tmpref(target_thread);
1982 			binder_free_transaction(t);
1983 			return;
1984 		} else {
1985 			__release(&target_thread->proc->inner_lock);
1986 		}
1987 		next = t->from_parent;
1988 
1989 		binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1990 			     "send failed reply for transaction %d, target dead\n",
1991 			     t->debug_id);
1992 
1993 		binder_free_transaction(t);
1994 		if (next == NULL) {
1995 			binder_debug(BINDER_DEBUG_DEAD_BINDER,
1996 				     "reply failed, no target thread at root\n");
1997 			return;
1998 		}
1999 		t = next;
2000 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
2001 			     "reply failed, no target thread -- retry %d\n",
2002 			      t->debug_id);
2003 	}
2004 }
2005 
2006 /**
2007  * binder_cleanup_transaction() - cleans up undelivered transaction
2008  * @t:		transaction that needs to be cleaned up
2009  * @reason:	reason the transaction wasn't delivered
2010  * @error_code:	error to return to caller (if synchronous call)
2011  */
2012 static void binder_cleanup_transaction(struct binder_transaction *t,
2013 				       const char *reason,
2014 				       uint32_t error_code)
2015 {
2016 	if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
2017 		binder_send_failed_reply(t, error_code);
2018 	} else {
2019 		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2020 			"undelivered transaction %d, %s\n",
2021 			t->debug_id, reason);
2022 		binder_free_transaction(t);
2023 	}
2024 }
2025 
2026 /**
2027  * binder_validate_object() - checks for a valid metadata object in a buffer.
2028  * @buffer:	binder_buffer that we're parsing.
2029  * @offset:	offset in the buffer at which to validate an object.
2030  *
2031  * Return:	If there's a valid metadata object at @offset in @buffer, the
2032  *		size of that object. Otherwise, it returns zero.
2033  */
2034 static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
2035 {
2036 	/* Check if we can read a header first */
2037 	struct binder_object_header *hdr;
2038 	size_t object_size = 0;
2039 
2040 	if (buffer->data_size < sizeof(*hdr) ||
2041 	    offset > buffer->data_size - sizeof(*hdr) ||
2042 	    !IS_ALIGNED(offset, sizeof(u32)))
2043 		return 0;
2044 
2045 	/* Ok, now see if we can read a complete object. */
2046 	hdr = (struct binder_object_header *)(buffer->data + offset);
2047 	switch (hdr->type) {
2048 	case BINDER_TYPE_BINDER:
2049 	case BINDER_TYPE_WEAK_BINDER:
2050 	case BINDER_TYPE_HANDLE:
2051 	case BINDER_TYPE_WEAK_HANDLE:
2052 		object_size = sizeof(struct flat_binder_object);
2053 		break;
2054 	case BINDER_TYPE_FD:
2055 		object_size = sizeof(struct binder_fd_object);
2056 		break;
2057 	case BINDER_TYPE_PTR:
2058 		object_size = sizeof(struct binder_buffer_object);
2059 		break;
2060 	case BINDER_TYPE_FDA:
2061 		object_size = sizeof(struct binder_fd_array_object);
2062 		break;
2063 	default:
2064 		return 0;
2065 	}
2066 	if (offset <= buffer->data_size - object_size &&
2067 	    buffer->data_size >= object_size)
2068 		return object_size;
2069 	else
2070 		return 0;
2071 }
2072 
2073 /**
2074  * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2075  * @b:		binder_buffer containing the object
2076  * @index:	index in offset array at which the binder_buffer_object is
2077  *		located
2078  * @start:	points to the start of the offset array
2079  * @num_valid:	the number of valid offsets in the offset array
2080  *
2081  * Return:	If @index is within the valid range of the offset array
2082  *		described by @start and @num_valid, and if there's a valid
2083  *		binder_buffer_object at the offset found in index @index
2084  *		of the offset array, that object is returned. Otherwise,
2085  *		%NULL is returned.
2086  *		Note that the offset found in index @index itself is not
2087  *		verified; this function assumes that @num_valid elements
2088  *		from @start were previously verified to have valid offsets.
2089  */
2090 static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
2091 							binder_size_t index,
2092 							binder_size_t *start,
2093 							binder_size_t num_valid)
2094 {
2095 	struct binder_buffer_object *buffer_obj;
2096 	binder_size_t *offp;
2097 
2098 	if (index >= num_valid)
2099 		return NULL;
2100 
2101 	offp = start + index;
2102 	buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
2103 	if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
2104 		return NULL;
2105 
2106 	return buffer_obj;
2107 }
2108 
2109 /**
2110  * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2111  * @b:			transaction buffer
2112  * @objects_start	start of objects buffer
2113  * @buffer:		binder_buffer_object in which to fix up
2114  * @offset:		start offset in @buffer to fix up
2115  * @last_obj:		last binder_buffer_object that we fixed up in
2116  * @last_min_offset:	minimum fixup offset in @last_obj
2117  *
2118  * Return:		%true if a fixup in buffer @buffer at offset @offset is
2119  *			allowed.
2120  *
2121  * For safety reasons, we only allow fixups inside a buffer to happen
2122  * at increasing offsets; additionally, we only allow fixup on the last
2123  * buffer object that was verified, or one of its parents.
2124  *
2125  * Example of what is allowed:
2126  *
2127  * A
2128  *   B (parent = A, offset = 0)
2129  *   C (parent = A, offset = 16)
2130  *     D (parent = C, offset = 0)
2131  *   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2132  *
2133  * Examples of what is not allowed:
2134  *
2135  * Decreasing offsets within the same parent:
2136  * A
2137  *   C (parent = A, offset = 16)
2138  *   B (parent = A, offset = 0) // decreasing offset within A
2139  *
2140  * Referring to a parent that wasn't the last object or any of its parents:
2141  * A
2142  *   B (parent = A, offset = 0)
2143  *   C (parent = A, offset = 0)
2144  *   C (parent = A, offset = 16)
2145  *     D (parent = B, offset = 0) // B is not A or any of A's parents
2146  */
2147 static bool binder_validate_fixup(struct binder_buffer *b,
2148 				  binder_size_t *objects_start,
2149 				  struct binder_buffer_object *buffer,
2150 				  binder_size_t fixup_offset,
2151 				  struct binder_buffer_object *last_obj,
2152 				  binder_size_t last_min_offset)
2153 {
2154 	if (!last_obj) {
2155 		/* Nothing to fix up in */
2156 		return false;
2157 	}
2158 
2159 	while (last_obj != buffer) {
2160 		/*
2161 		 * Safe to retrieve the parent of last_obj, since it
2162 		 * was already previously verified by the driver.
2163 		 */
2164 		if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2165 			return false;
2166 		last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
2167 		last_obj = (struct binder_buffer_object *)
2168 			(b->data + *(objects_start + last_obj->parent));
2169 	}
2170 	return (fixup_offset >= last_min_offset);
2171 }
2172 
2173 static void binder_transaction_buffer_release(struct binder_proc *proc,
2174 					      struct binder_buffer *buffer,
2175 					      binder_size_t *failed_at)
2176 {
2177 	binder_size_t *offp, *off_start, *off_end;
2178 	int debug_id = buffer->debug_id;
2179 
2180 	binder_debug(BINDER_DEBUG_TRANSACTION,
2181 		     "%d buffer release %d, size %zd-%zd, failed at %pK\n",
2182 		     proc->pid, buffer->debug_id,
2183 		     buffer->data_size, buffer->offsets_size, failed_at);
2184 
2185 	if (buffer->target_node)
2186 		binder_dec_node(buffer->target_node, 1, 0);
2187 
2188 	off_start = (binder_size_t *)(buffer->data +
2189 				      ALIGN(buffer->data_size, sizeof(void *)));
2190 	if (failed_at)
2191 		off_end = failed_at;
2192 	else
2193 		off_end = (void *)off_start + buffer->offsets_size;
2194 	for (offp = off_start; offp < off_end; offp++) {
2195 		struct binder_object_header *hdr;
2196 		size_t object_size = binder_validate_object(buffer, *offp);
2197 
2198 		if (object_size == 0) {
2199 			pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2200 			       debug_id, (u64)*offp, buffer->data_size);
2201 			continue;
2202 		}
2203 		hdr = (struct binder_object_header *)(buffer->data + *offp);
2204 		switch (hdr->type) {
2205 		case BINDER_TYPE_BINDER:
2206 		case BINDER_TYPE_WEAK_BINDER: {
2207 			struct flat_binder_object *fp;
2208 			struct binder_node *node;
2209 
2210 			fp = to_flat_binder_object(hdr);
2211 			node = binder_get_node(proc, fp->binder);
2212 			if (node == NULL) {
2213 				pr_err("transaction release %d bad node %016llx\n",
2214 				       debug_id, (u64)fp->binder);
2215 				break;
2216 			}
2217 			binder_debug(BINDER_DEBUG_TRANSACTION,
2218 				     "        node %d u%016llx\n",
2219 				     node->debug_id, (u64)node->ptr);
2220 			binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2221 					0);
2222 			binder_put_node(node);
2223 		} break;
2224 		case BINDER_TYPE_HANDLE:
2225 		case BINDER_TYPE_WEAK_HANDLE: {
2226 			struct flat_binder_object *fp;
2227 			struct binder_ref_data rdata;
2228 			int ret;
2229 
2230 			fp = to_flat_binder_object(hdr);
2231 			ret = binder_dec_ref_for_handle(proc, fp->handle,
2232 				hdr->type == BINDER_TYPE_HANDLE, &rdata);
2233 
2234 			if (ret) {
2235 				pr_err("transaction release %d bad handle %d, ret = %d\n",
2236 				 debug_id, fp->handle, ret);
2237 				break;
2238 			}
2239 			binder_debug(BINDER_DEBUG_TRANSACTION,
2240 				     "        ref %d desc %d\n",
2241 				     rdata.debug_id, rdata.desc);
2242 		} break;
2243 
2244 		case BINDER_TYPE_FD: {
2245 			/*
2246 			 * No need to close the file here since user-space
2247 			 * closes it for for successfully delivered
2248 			 * transactions. For transactions that weren't
2249 			 * delivered, the new fd was never allocated so
2250 			 * there is no need to close and the fput on the
2251 			 * file is done when the transaction is torn
2252 			 * down.
2253 			 */
2254 			WARN_ON(failed_at &&
2255 				proc->tsk == current->group_leader);
2256 		} break;
2257 		case BINDER_TYPE_PTR:
2258 			/*
2259 			 * Nothing to do here, this will get cleaned up when the
2260 			 * transaction buffer gets freed
2261 			 */
2262 			break;
2263 		case BINDER_TYPE_FDA: {
2264 			struct binder_fd_array_object *fda;
2265 			struct binder_buffer_object *parent;
2266 			uintptr_t parent_buffer;
2267 			u32 *fd_array;
2268 			size_t fd_index;
2269 			binder_size_t fd_buf_size;
2270 
2271 			if (proc->tsk != current->group_leader) {
2272 				/*
2273 				 * Nothing to do if running in sender context
2274 				 * The fd fixups have not been applied so no
2275 				 * fds need to be closed.
2276 				 */
2277 				continue;
2278 			}
2279 
2280 			fda = to_binder_fd_array_object(hdr);
2281 			parent = binder_validate_ptr(buffer, fda->parent,
2282 						     off_start,
2283 						     offp - off_start);
2284 			if (!parent) {
2285 				pr_err("transaction release %d bad parent offset\n",
2286 				       debug_id);
2287 				continue;
2288 			}
2289 			/*
2290 			 * Since the parent was already fixed up, convert it
2291 			 * back to kernel address space to access it
2292 			 */
2293 			parent_buffer = parent->buffer -
2294 				binder_alloc_get_user_buffer_offset(
2295 						&proc->alloc);
2296 
2297 			fd_buf_size = sizeof(u32) * fda->num_fds;
2298 			if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2299 				pr_err("transaction release %d invalid number of fds (%lld)\n",
2300 				       debug_id, (u64)fda->num_fds);
2301 				continue;
2302 			}
2303 			if (fd_buf_size > parent->length ||
2304 			    fda->parent_offset > parent->length - fd_buf_size) {
2305 				/* No space for all file descriptors here. */
2306 				pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2307 				       debug_id, (u64)fda->num_fds);
2308 				continue;
2309 			}
2310 			fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2311 			for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
2312 				ksys_close(fd_array[fd_index]);
2313 		} break;
2314 		default:
2315 			pr_err("transaction release %d bad object type %x\n",
2316 				debug_id, hdr->type);
2317 			break;
2318 		}
2319 	}
2320 }
2321 
2322 static int binder_translate_binder(struct flat_binder_object *fp,
2323 				   struct binder_transaction *t,
2324 				   struct binder_thread *thread)
2325 {
2326 	struct binder_node *node;
2327 	struct binder_proc *proc = thread->proc;
2328 	struct binder_proc *target_proc = t->to_proc;
2329 	struct binder_ref_data rdata;
2330 	int ret = 0;
2331 
2332 	node = binder_get_node(proc, fp->binder);
2333 	if (!node) {
2334 		node = binder_new_node(proc, fp);
2335 		if (!node)
2336 			return -ENOMEM;
2337 	}
2338 	if (fp->cookie != node->cookie) {
2339 		binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2340 				  proc->pid, thread->pid, (u64)fp->binder,
2341 				  node->debug_id, (u64)fp->cookie,
2342 				  (u64)node->cookie);
2343 		ret = -EINVAL;
2344 		goto done;
2345 	}
2346 	if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2347 		ret = -EPERM;
2348 		goto done;
2349 	}
2350 
2351 	ret = binder_inc_ref_for_node(target_proc, node,
2352 			fp->hdr.type == BINDER_TYPE_BINDER,
2353 			&thread->todo, &rdata);
2354 	if (ret)
2355 		goto done;
2356 
2357 	if (fp->hdr.type == BINDER_TYPE_BINDER)
2358 		fp->hdr.type = BINDER_TYPE_HANDLE;
2359 	else
2360 		fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2361 	fp->binder = 0;
2362 	fp->handle = rdata.desc;
2363 	fp->cookie = 0;
2364 
2365 	trace_binder_transaction_node_to_ref(t, node, &rdata);
2366 	binder_debug(BINDER_DEBUG_TRANSACTION,
2367 		     "        node %d u%016llx -> ref %d desc %d\n",
2368 		     node->debug_id, (u64)node->ptr,
2369 		     rdata.debug_id, rdata.desc);
2370 done:
2371 	binder_put_node(node);
2372 	return ret;
2373 }
2374 
2375 static int binder_translate_handle(struct flat_binder_object *fp,
2376 				   struct binder_transaction *t,
2377 				   struct binder_thread *thread)
2378 {
2379 	struct binder_proc *proc = thread->proc;
2380 	struct binder_proc *target_proc = t->to_proc;
2381 	struct binder_node *node;
2382 	struct binder_ref_data src_rdata;
2383 	int ret = 0;
2384 
2385 	node = binder_get_node_from_ref(proc, fp->handle,
2386 			fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2387 	if (!node) {
2388 		binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2389 				  proc->pid, thread->pid, fp->handle);
2390 		return -EINVAL;
2391 	}
2392 	if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2393 		ret = -EPERM;
2394 		goto done;
2395 	}
2396 
2397 	binder_node_lock(node);
2398 	if (node->proc == target_proc) {
2399 		if (fp->hdr.type == BINDER_TYPE_HANDLE)
2400 			fp->hdr.type = BINDER_TYPE_BINDER;
2401 		else
2402 			fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2403 		fp->binder = node->ptr;
2404 		fp->cookie = node->cookie;
2405 		if (node->proc)
2406 			binder_inner_proc_lock(node->proc);
2407 		else
2408 			__acquire(&node->proc->inner_lock);
2409 		binder_inc_node_nilocked(node,
2410 					 fp->hdr.type == BINDER_TYPE_BINDER,
2411 					 0, NULL);
2412 		if (node->proc)
2413 			binder_inner_proc_unlock(node->proc);
2414 		else
2415 			__release(&node->proc->inner_lock);
2416 		trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2417 		binder_debug(BINDER_DEBUG_TRANSACTION,
2418 			     "        ref %d desc %d -> node %d u%016llx\n",
2419 			     src_rdata.debug_id, src_rdata.desc, node->debug_id,
2420 			     (u64)node->ptr);
2421 		binder_node_unlock(node);
2422 	} else {
2423 		struct binder_ref_data dest_rdata;
2424 
2425 		binder_node_unlock(node);
2426 		ret = binder_inc_ref_for_node(target_proc, node,
2427 				fp->hdr.type == BINDER_TYPE_HANDLE,
2428 				NULL, &dest_rdata);
2429 		if (ret)
2430 			goto done;
2431 
2432 		fp->binder = 0;
2433 		fp->handle = dest_rdata.desc;
2434 		fp->cookie = 0;
2435 		trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2436 						    &dest_rdata);
2437 		binder_debug(BINDER_DEBUG_TRANSACTION,
2438 			     "        ref %d desc %d -> ref %d desc %d (node %d)\n",
2439 			     src_rdata.debug_id, src_rdata.desc,
2440 			     dest_rdata.debug_id, dest_rdata.desc,
2441 			     node->debug_id);
2442 	}
2443 done:
2444 	binder_put_node(node);
2445 	return ret;
2446 }
2447 
2448 static int binder_translate_fd(u32 *fdp,
2449 			       struct binder_transaction *t,
2450 			       struct binder_thread *thread,
2451 			       struct binder_transaction *in_reply_to)
2452 {
2453 	struct binder_proc *proc = thread->proc;
2454 	struct binder_proc *target_proc = t->to_proc;
2455 	struct binder_txn_fd_fixup *fixup;
2456 	struct file *file;
2457 	int ret = 0;
2458 	bool target_allows_fd;
2459 	int fd = *fdp;
2460 
2461 	if (in_reply_to)
2462 		target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2463 	else
2464 		target_allows_fd = t->buffer->target_node->accept_fds;
2465 	if (!target_allows_fd) {
2466 		binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2467 				  proc->pid, thread->pid,
2468 				  in_reply_to ? "reply" : "transaction",
2469 				  fd);
2470 		ret = -EPERM;
2471 		goto err_fd_not_accepted;
2472 	}
2473 
2474 	file = fget(fd);
2475 	if (!file) {
2476 		binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2477 				  proc->pid, thread->pid, fd);
2478 		ret = -EBADF;
2479 		goto err_fget;
2480 	}
2481 	ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2482 	if (ret < 0) {
2483 		ret = -EPERM;
2484 		goto err_security;
2485 	}
2486 
2487 	/*
2488 	 * Add fixup record for this transaction. The allocation
2489 	 * of the fd in the target needs to be done from a
2490 	 * target thread.
2491 	 */
2492 	fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2493 	if (!fixup) {
2494 		ret = -ENOMEM;
2495 		goto err_alloc;
2496 	}
2497 	fixup->file = file;
2498 	fixup->offset = (uintptr_t)fdp - (uintptr_t)t->buffer->data;
2499 	trace_binder_transaction_fd_send(t, fd, fixup->offset);
2500 	list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2501 
2502 	return ret;
2503 
2504 err_alloc:
2505 err_security:
2506 	fput(file);
2507 err_fget:
2508 err_fd_not_accepted:
2509 	return ret;
2510 }
2511 
2512 static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2513 				     struct binder_buffer_object *parent,
2514 				     struct binder_transaction *t,
2515 				     struct binder_thread *thread,
2516 				     struct binder_transaction *in_reply_to)
2517 {
2518 	binder_size_t fdi, fd_buf_size;
2519 	uintptr_t parent_buffer;
2520 	u32 *fd_array;
2521 	struct binder_proc *proc = thread->proc;
2522 	struct binder_proc *target_proc = t->to_proc;
2523 
2524 	fd_buf_size = sizeof(u32) * fda->num_fds;
2525 	if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2526 		binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2527 				  proc->pid, thread->pid, (u64)fda->num_fds);
2528 		return -EINVAL;
2529 	}
2530 	if (fd_buf_size > parent->length ||
2531 	    fda->parent_offset > parent->length - fd_buf_size) {
2532 		/* No space for all file descriptors here. */
2533 		binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2534 				  proc->pid, thread->pid, (u64)fda->num_fds);
2535 		return -EINVAL;
2536 	}
2537 	/*
2538 	 * Since the parent was already fixed up, convert it
2539 	 * back to the kernel address space to access it
2540 	 */
2541 	parent_buffer = parent->buffer -
2542 		binder_alloc_get_user_buffer_offset(&target_proc->alloc);
2543 	fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2544 	if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
2545 		binder_user_error("%d:%d parent offset not aligned correctly.\n",
2546 				  proc->pid, thread->pid);
2547 		return -EINVAL;
2548 	}
2549 	for (fdi = 0; fdi < fda->num_fds; fdi++) {
2550 		int ret = binder_translate_fd(&fd_array[fdi], t, thread,
2551 						in_reply_to);
2552 		if (ret < 0)
2553 			return ret;
2554 	}
2555 	return 0;
2556 }
2557 
2558 static int binder_fixup_parent(struct binder_transaction *t,
2559 			       struct binder_thread *thread,
2560 			       struct binder_buffer_object *bp,
2561 			       binder_size_t *off_start,
2562 			       binder_size_t num_valid,
2563 			       struct binder_buffer_object *last_fixup_obj,
2564 			       binder_size_t last_fixup_min_off)
2565 {
2566 	struct binder_buffer_object *parent;
2567 	u8 *parent_buffer;
2568 	struct binder_buffer *b = t->buffer;
2569 	struct binder_proc *proc = thread->proc;
2570 	struct binder_proc *target_proc = t->to_proc;
2571 
2572 	if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2573 		return 0;
2574 
2575 	parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
2576 	if (!parent) {
2577 		binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2578 				  proc->pid, thread->pid);
2579 		return -EINVAL;
2580 	}
2581 
2582 	if (!binder_validate_fixup(b, off_start,
2583 				   parent, bp->parent_offset,
2584 				   last_fixup_obj,
2585 				   last_fixup_min_off)) {
2586 		binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2587 				  proc->pid, thread->pid);
2588 		return -EINVAL;
2589 	}
2590 
2591 	if (parent->length < sizeof(binder_uintptr_t) ||
2592 	    bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2593 		/* No space for a pointer here! */
2594 		binder_user_error("%d:%d got transaction with invalid parent offset\n",
2595 				  proc->pid, thread->pid);
2596 		return -EINVAL;
2597 	}
2598 	parent_buffer = (u8 *)((uintptr_t)parent->buffer -
2599 			binder_alloc_get_user_buffer_offset(
2600 				&target_proc->alloc));
2601 	*(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
2602 
2603 	return 0;
2604 }
2605 
2606 /**
2607  * binder_proc_transaction() - sends a transaction to a process and wakes it up
2608  * @t:		transaction to send
2609  * @proc:	process to send the transaction to
2610  * @thread:	thread in @proc to send the transaction to (may be NULL)
2611  *
2612  * This function queues a transaction to the specified process. It will try
2613  * to find a thread in the target process to handle the transaction and
2614  * wake it up. If no thread is found, the work is queued to the proc
2615  * waitqueue.
2616  *
2617  * If the @thread parameter is not NULL, the transaction is always queued
2618  * to the waitlist of that specific thread.
2619  *
2620  * Return:	true if the transactions was successfully queued
2621  *		false if the target process or thread is dead
2622  */
2623 static bool binder_proc_transaction(struct binder_transaction *t,
2624 				    struct binder_proc *proc,
2625 				    struct binder_thread *thread)
2626 {
2627 	struct binder_node *node = t->buffer->target_node;
2628 	bool oneway = !!(t->flags & TF_ONE_WAY);
2629 	bool pending_async = false;
2630 
2631 	BUG_ON(!node);
2632 	binder_node_lock(node);
2633 	if (oneway) {
2634 		BUG_ON(thread);
2635 		if (node->has_async_transaction) {
2636 			pending_async = true;
2637 		} else {
2638 			node->has_async_transaction = true;
2639 		}
2640 	}
2641 
2642 	binder_inner_proc_lock(proc);
2643 
2644 	if (proc->is_dead || (thread && thread->is_dead)) {
2645 		binder_inner_proc_unlock(proc);
2646 		binder_node_unlock(node);
2647 		return false;
2648 	}
2649 
2650 	if (!thread && !pending_async)
2651 		thread = binder_select_thread_ilocked(proc);
2652 
2653 	if (thread)
2654 		binder_enqueue_thread_work_ilocked(thread, &t->work);
2655 	else if (!pending_async)
2656 		binder_enqueue_work_ilocked(&t->work, &proc->todo);
2657 	else
2658 		binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2659 
2660 	if (!pending_async)
2661 		binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2662 
2663 	binder_inner_proc_unlock(proc);
2664 	binder_node_unlock(node);
2665 
2666 	return true;
2667 }
2668 
2669 /**
2670  * binder_get_node_refs_for_txn() - Get required refs on node for txn
2671  * @node:         struct binder_node for which to get refs
2672  * @proc:         returns @node->proc if valid
2673  * @error:        if no @proc then returns BR_DEAD_REPLY
2674  *
2675  * User-space normally keeps the node alive when creating a transaction
2676  * since it has a reference to the target. The local strong ref keeps it
2677  * alive if the sending process dies before the target process processes
2678  * the transaction. If the source process is malicious or has a reference
2679  * counting bug, relying on the local strong ref can fail.
2680  *
2681  * Since user-space can cause the local strong ref to go away, we also take
2682  * a tmpref on the node to ensure it survives while we are constructing
2683  * the transaction. We also need a tmpref on the proc while we are
2684  * constructing the transaction, so we take that here as well.
2685  *
2686  * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2687  * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2688  * target proc has died, @error is set to BR_DEAD_REPLY
2689  */
2690 static struct binder_node *binder_get_node_refs_for_txn(
2691 		struct binder_node *node,
2692 		struct binder_proc **procp,
2693 		uint32_t *error)
2694 {
2695 	struct binder_node *target_node = NULL;
2696 
2697 	binder_node_inner_lock(node);
2698 	if (node->proc) {
2699 		target_node = node;
2700 		binder_inc_node_nilocked(node, 1, 0, NULL);
2701 		binder_inc_node_tmpref_ilocked(node);
2702 		node->proc->tmp_ref++;
2703 		*procp = node->proc;
2704 	} else
2705 		*error = BR_DEAD_REPLY;
2706 	binder_node_inner_unlock(node);
2707 
2708 	return target_node;
2709 }
2710 
2711 static void binder_transaction(struct binder_proc *proc,
2712 			       struct binder_thread *thread,
2713 			       struct binder_transaction_data *tr, int reply,
2714 			       binder_size_t extra_buffers_size)
2715 {
2716 	int ret;
2717 	struct binder_transaction *t;
2718 	struct binder_work *w;
2719 	struct binder_work *tcomplete;
2720 	binder_size_t *offp, *off_end, *off_start;
2721 	binder_size_t off_min;
2722 	u8 *sg_bufp, *sg_buf_end;
2723 	struct binder_proc *target_proc = NULL;
2724 	struct binder_thread *target_thread = NULL;
2725 	struct binder_node *target_node = NULL;
2726 	struct binder_transaction *in_reply_to = NULL;
2727 	struct binder_transaction_log_entry *e;
2728 	uint32_t return_error = 0;
2729 	uint32_t return_error_param = 0;
2730 	uint32_t return_error_line = 0;
2731 	struct binder_buffer_object *last_fixup_obj = NULL;
2732 	binder_size_t last_fixup_min_off = 0;
2733 	struct binder_context *context = proc->context;
2734 	int t_debug_id = atomic_inc_return(&binder_last_id);
2735 
2736 	e = binder_transaction_log_add(&binder_transaction_log);
2737 	e->debug_id = t_debug_id;
2738 	e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2739 	e->from_proc = proc->pid;
2740 	e->from_thread = thread->pid;
2741 	e->target_handle = tr->target.handle;
2742 	e->data_size = tr->data_size;
2743 	e->offsets_size = tr->offsets_size;
2744 	e->context_name = proc->context->name;
2745 
2746 	if (reply) {
2747 		binder_inner_proc_lock(proc);
2748 		in_reply_to = thread->transaction_stack;
2749 		if (in_reply_to == NULL) {
2750 			binder_inner_proc_unlock(proc);
2751 			binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2752 					  proc->pid, thread->pid);
2753 			return_error = BR_FAILED_REPLY;
2754 			return_error_param = -EPROTO;
2755 			return_error_line = __LINE__;
2756 			goto err_empty_call_stack;
2757 		}
2758 		if (in_reply_to->to_thread != thread) {
2759 			spin_lock(&in_reply_to->lock);
2760 			binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2761 				proc->pid, thread->pid, in_reply_to->debug_id,
2762 				in_reply_to->to_proc ?
2763 				in_reply_to->to_proc->pid : 0,
2764 				in_reply_to->to_thread ?
2765 				in_reply_to->to_thread->pid : 0);
2766 			spin_unlock(&in_reply_to->lock);
2767 			binder_inner_proc_unlock(proc);
2768 			return_error = BR_FAILED_REPLY;
2769 			return_error_param = -EPROTO;
2770 			return_error_line = __LINE__;
2771 			in_reply_to = NULL;
2772 			goto err_bad_call_stack;
2773 		}
2774 		thread->transaction_stack = in_reply_to->to_parent;
2775 		binder_inner_proc_unlock(proc);
2776 		binder_set_nice(in_reply_to->saved_priority);
2777 		target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2778 		if (target_thread == NULL) {
2779 			/* annotation for sparse */
2780 			__release(&target_thread->proc->inner_lock);
2781 			return_error = BR_DEAD_REPLY;
2782 			return_error_line = __LINE__;
2783 			goto err_dead_binder;
2784 		}
2785 		if (target_thread->transaction_stack != in_reply_to) {
2786 			binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2787 				proc->pid, thread->pid,
2788 				target_thread->transaction_stack ?
2789 				target_thread->transaction_stack->debug_id : 0,
2790 				in_reply_to->debug_id);
2791 			binder_inner_proc_unlock(target_thread->proc);
2792 			return_error = BR_FAILED_REPLY;
2793 			return_error_param = -EPROTO;
2794 			return_error_line = __LINE__;
2795 			in_reply_to = NULL;
2796 			target_thread = NULL;
2797 			goto err_dead_binder;
2798 		}
2799 		target_proc = target_thread->proc;
2800 		target_proc->tmp_ref++;
2801 		binder_inner_proc_unlock(target_thread->proc);
2802 	} else {
2803 		if (tr->target.handle) {
2804 			struct binder_ref *ref;
2805 
2806 			/*
2807 			 * There must already be a strong ref
2808 			 * on this node. If so, do a strong
2809 			 * increment on the node to ensure it
2810 			 * stays alive until the transaction is
2811 			 * done.
2812 			 */
2813 			binder_proc_lock(proc);
2814 			ref = binder_get_ref_olocked(proc, tr->target.handle,
2815 						     true);
2816 			if (ref) {
2817 				target_node = binder_get_node_refs_for_txn(
2818 						ref->node, &target_proc,
2819 						&return_error);
2820 			} else {
2821 				binder_user_error("%d:%d got transaction to invalid handle\n",
2822 						  proc->pid, thread->pid);
2823 				return_error = BR_FAILED_REPLY;
2824 			}
2825 			binder_proc_unlock(proc);
2826 		} else {
2827 			mutex_lock(&context->context_mgr_node_lock);
2828 			target_node = context->binder_context_mgr_node;
2829 			if (target_node)
2830 				target_node = binder_get_node_refs_for_txn(
2831 						target_node, &target_proc,
2832 						&return_error);
2833 			else
2834 				return_error = BR_DEAD_REPLY;
2835 			mutex_unlock(&context->context_mgr_node_lock);
2836 			if (target_node && target_proc == proc) {
2837 				binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2838 						  proc->pid, thread->pid);
2839 				return_error = BR_FAILED_REPLY;
2840 				return_error_param = -EINVAL;
2841 				return_error_line = __LINE__;
2842 				goto err_invalid_target_handle;
2843 			}
2844 		}
2845 		if (!target_node) {
2846 			/*
2847 			 * return_error is set above
2848 			 */
2849 			return_error_param = -EINVAL;
2850 			return_error_line = __LINE__;
2851 			goto err_dead_binder;
2852 		}
2853 		e->to_node = target_node->debug_id;
2854 		if (security_binder_transaction(proc->tsk,
2855 						target_proc->tsk) < 0) {
2856 			return_error = BR_FAILED_REPLY;
2857 			return_error_param = -EPERM;
2858 			return_error_line = __LINE__;
2859 			goto err_invalid_target_handle;
2860 		}
2861 		binder_inner_proc_lock(proc);
2862 
2863 		w = list_first_entry_or_null(&thread->todo,
2864 					     struct binder_work, entry);
2865 		if (!(tr->flags & TF_ONE_WAY) && w &&
2866 		    w->type == BINDER_WORK_TRANSACTION) {
2867 			/*
2868 			 * Do not allow new outgoing transaction from a
2869 			 * thread that has a transaction at the head of
2870 			 * its todo list. Only need to check the head
2871 			 * because binder_select_thread_ilocked picks a
2872 			 * thread from proc->waiting_threads to enqueue
2873 			 * the transaction, and nothing is queued to the
2874 			 * todo list while the thread is on waiting_threads.
2875 			 */
2876 			binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
2877 					  proc->pid, thread->pid);
2878 			binder_inner_proc_unlock(proc);
2879 			return_error = BR_FAILED_REPLY;
2880 			return_error_param = -EPROTO;
2881 			return_error_line = __LINE__;
2882 			goto err_bad_todo_list;
2883 		}
2884 
2885 		if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
2886 			struct binder_transaction *tmp;
2887 
2888 			tmp = thread->transaction_stack;
2889 			if (tmp->to_thread != thread) {
2890 				spin_lock(&tmp->lock);
2891 				binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
2892 					proc->pid, thread->pid, tmp->debug_id,
2893 					tmp->to_proc ? tmp->to_proc->pid : 0,
2894 					tmp->to_thread ?
2895 					tmp->to_thread->pid : 0);
2896 				spin_unlock(&tmp->lock);
2897 				binder_inner_proc_unlock(proc);
2898 				return_error = BR_FAILED_REPLY;
2899 				return_error_param = -EPROTO;
2900 				return_error_line = __LINE__;
2901 				goto err_bad_call_stack;
2902 			}
2903 			while (tmp) {
2904 				struct binder_thread *from;
2905 
2906 				spin_lock(&tmp->lock);
2907 				from = tmp->from;
2908 				if (from && from->proc == target_proc) {
2909 					atomic_inc(&from->tmp_ref);
2910 					target_thread = from;
2911 					spin_unlock(&tmp->lock);
2912 					break;
2913 				}
2914 				spin_unlock(&tmp->lock);
2915 				tmp = tmp->from_parent;
2916 			}
2917 		}
2918 		binder_inner_proc_unlock(proc);
2919 	}
2920 	if (target_thread)
2921 		e->to_thread = target_thread->pid;
2922 	e->to_proc = target_proc->pid;
2923 
2924 	/* TODO: reuse incoming transaction for reply */
2925 	t = kzalloc(sizeof(*t), GFP_KERNEL);
2926 	if (t == NULL) {
2927 		return_error = BR_FAILED_REPLY;
2928 		return_error_param = -ENOMEM;
2929 		return_error_line = __LINE__;
2930 		goto err_alloc_t_failed;
2931 	}
2932 	INIT_LIST_HEAD(&t->fd_fixups);
2933 	binder_stats_created(BINDER_STAT_TRANSACTION);
2934 	spin_lock_init(&t->lock);
2935 
2936 	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
2937 	if (tcomplete == NULL) {
2938 		return_error = BR_FAILED_REPLY;
2939 		return_error_param = -ENOMEM;
2940 		return_error_line = __LINE__;
2941 		goto err_alloc_tcomplete_failed;
2942 	}
2943 	binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
2944 
2945 	t->debug_id = t_debug_id;
2946 
2947 	if (reply)
2948 		binder_debug(BINDER_DEBUG_TRANSACTION,
2949 			     "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
2950 			     proc->pid, thread->pid, t->debug_id,
2951 			     target_proc->pid, target_thread->pid,
2952 			     (u64)tr->data.ptr.buffer,
2953 			     (u64)tr->data.ptr.offsets,
2954 			     (u64)tr->data_size, (u64)tr->offsets_size,
2955 			     (u64)extra_buffers_size);
2956 	else
2957 		binder_debug(BINDER_DEBUG_TRANSACTION,
2958 			     "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
2959 			     proc->pid, thread->pid, t->debug_id,
2960 			     target_proc->pid, target_node->debug_id,
2961 			     (u64)tr->data.ptr.buffer,
2962 			     (u64)tr->data.ptr.offsets,
2963 			     (u64)tr->data_size, (u64)tr->offsets_size,
2964 			     (u64)extra_buffers_size);
2965 
2966 	if (!reply && !(tr->flags & TF_ONE_WAY))
2967 		t->from = thread;
2968 	else
2969 		t->from = NULL;
2970 	t->sender_euid = task_euid(proc->tsk);
2971 	t->to_proc = target_proc;
2972 	t->to_thread = target_thread;
2973 	t->code = tr->code;
2974 	t->flags = tr->flags;
2975 	t->priority = task_nice(current);
2976 
2977 	trace_binder_transaction(reply, t, target_node);
2978 
2979 	t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
2980 		tr->offsets_size, extra_buffers_size,
2981 		!reply && (t->flags & TF_ONE_WAY));
2982 	if (IS_ERR(t->buffer)) {
2983 		/*
2984 		 * -ESRCH indicates VMA cleared. The target is dying.
2985 		 */
2986 		return_error_param = PTR_ERR(t->buffer);
2987 		return_error = return_error_param == -ESRCH ?
2988 			BR_DEAD_REPLY : BR_FAILED_REPLY;
2989 		return_error_line = __LINE__;
2990 		t->buffer = NULL;
2991 		goto err_binder_alloc_buf_failed;
2992 	}
2993 	t->buffer->debug_id = t->debug_id;
2994 	t->buffer->transaction = t;
2995 	t->buffer->target_node = target_node;
2996 	trace_binder_transaction_alloc_buf(t->buffer);
2997 	off_start = (binder_size_t *)(t->buffer->data +
2998 				      ALIGN(tr->data_size, sizeof(void *)));
2999 	offp = off_start;
3000 
3001 	if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
3002 			   tr->data.ptr.buffer, tr->data_size)) {
3003 		binder_user_error("%d:%d got transaction with invalid data ptr\n",
3004 				proc->pid, thread->pid);
3005 		return_error = BR_FAILED_REPLY;
3006 		return_error_param = -EFAULT;
3007 		return_error_line = __LINE__;
3008 		goto err_copy_data_failed;
3009 	}
3010 	if (copy_from_user(offp, (const void __user *)(uintptr_t)
3011 			   tr->data.ptr.offsets, tr->offsets_size)) {
3012 		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3013 				proc->pid, thread->pid);
3014 		return_error = BR_FAILED_REPLY;
3015 		return_error_param = -EFAULT;
3016 		return_error_line = __LINE__;
3017 		goto err_copy_data_failed;
3018 	}
3019 	if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3020 		binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3021 				proc->pid, thread->pid, (u64)tr->offsets_size);
3022 		return_error = BR_FAILED_REPLY;
3023 		return_error_param = -EINVAL;
3024 		return_error_line = __LINE__;
3025 		goto err_bad_offset;
3026 	}
3027 	if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3028 		binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3029 				  proc->pid, thread->pid,
3030 				  (u64)extra_buffers_size);
3031 		return_error = BR_FAILED_REPLY;
3032 		return_error_param = -EINVAL;
3033 		return_error_line = __LINE__;
3034 		goto err_bad_offset;
3035 	}
3036 	off_end = (void *)off_start + tr->offsets_size;
3037 	sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
3038 	sg_buf_end = sg_bufp + extra_buffers_size;
3039 	off_min = 0;
3040 	for (; offp < off_end; offp++) {
3041 		struct binder_object_header *hdr;
3042 		size_t object_size = binder_validate_object(t->buffer, *offp);
3043 
3044 		if (object_size == 0 || *offp < off_min) {
3045 			binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3046 					  proc->pid, thread->pid, (u64)*offp,
3047 					  (u64)off_min,
3048 					  (u64)t->buffer->data_size);
3049 			return_error = BR_FAILED_REPLY;
3050 			return_error_param = -EINVAL;
3051 			return_error_line = __LINE__;
3052 			goto err_bad_offset;
3053 		}
3054 
3055 		hdr = (struct binder_object_header *)(t->buffer->data + *offp);
3056 		off_min = *offp + object_size;
3057 		switch (hdr->type) {
3058 		case BINDER_TYPE_BINDER:
3059 		case BINDER_TYPE_WEAK_BINDER: {
3060 			struct flat_binder_object *fp;
3061 
3062 			fp = to_flat_binder_object(hdr);
3063 			ret = binder_translate_binder(fp, t, thread);
3064 			if (ret < 0) {
3065 				return_error = BR_FAILED_REPLY;
3066 				return_error_param = ret;
3067 				return_error_line = __LINE__;
3068 				goto err_translate_failed;
3069 			}
3070 		} break;
3071 		case BINDER_TYPE_HANDLE:
3072 		case BINDER_TYPE_WEAK_HANDLE: {
3073 			struct flat_binder_object *fp;
3074 
3075 			fp = to_flat_binder_object(hdr);
3076 			ret = binder_translate_handle(fp, t, thread);
3077 			if (ret < 0) {
3078 				return_error = BR_FAILED_REPLY;
3079 				return_error_param = ret;
3080 				return_error_line = __LINE__;
3081 				goto err_translate_failed;
3082 			}
3083 		} break;
3084 
3085 		case BINDER_TYPE_FD: {
3086 			struct binder_fd_object *fp = to_binder_fd_object(hdr);
3087 			int ret = binder_translate_fd(&fp->fd, t, thread,
3088 						      in_reply_to);
3089 
3090 			if (ret < 0) {
3091 				return_error = BR_FAILED_REPLY;
3092 				return_error_param = ret;
3093 				return_error_line = __LINE__;
3094 				goto err_translate_failed;
3095 			}
3096 			fp->pad_binder = 0;
3097 		} break;
3098 		case BINDER_TYPE_FDA: {
3099 			struct binder_fd_array_object *fda =
3100 				to_binder_fd_array_object(hdr);
3101 			struct binder_buffer_object *parent =
3102 				binder_validate_ptr(t->buffer, fda->parent,
3103 						    off_start,
3104 						    offp - off_start);
3105 			if (!parent) {
3106 				binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3107 						  proc->pid, thread->pid);
3108 				return_error = BR_FAILED_REPLY;
3109 				return_error_param = -EINVAL;
3110 				return_error_line = __LINE__;
3111 				goto err_bad_parent;
3112 			}
3113 			if (!binder_validate_fixup(t->buffer, off_start,
3114 						   parent, fda->parent_offset,
3115 						   last_fixup_obj,
3116 						   last_fixup_min_off)) {
3117 				binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3118 						  proc->pid, thread->pid);
3119 				return_error = BR_FAILED_REPLY;
3120 				return_error_param = -EINVAL;
3121 				return_error_line = __LINE__;
3122 				goto err_bad_parent;
3123 			}
3124 			ret = binder_translate_fd_array(fda, parent, t, thread,
3125 							in_reply_to);
3126 			if (ret < 0) {
3127 				return_error = BR_FAILED_REPLY;
3128 				return_error_param = ret;
3129 				return_error_line = __LINE__;
3130 				goto err_translate_failed;
3131 			}
3132 			last_fixup_obj = parent;
3133 			last_fixup_min_off =
3134 				fda->parent_offset + sizeof(u32) * fda->num_fds;
3135 		} break;
3136 		case BINDER_TYPE_PTR: {
3137 			struct binder_buffer_object *bp =
3138 				to_binder_buffer_object(hdr);
3139 			size_t buf_left = sg_buf_end - sg_bufp;
3140 
3141 			if (bp->length > buf_left) {
3142 				binder_user_error("%d:%d got transaction with too large buffer\n",
3143 						  proc->pid, thread->pid);
3144 				return_error = BR_FAILED_REPLY;
3145 				return_error_param = -EINVAL;
3146 				return_error_line = __LINE__;
3147 				goto err_bad_offset;
3148 			}
3149 			if (copy_from_user(sg_bufp,
3150 					   (const void __user *)(uintptr_t)
3151 					   bp->buffer, bp->length)) {
3152 				binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3153 						  proc->pid, thread->pid);
3154 				return_error_param = -EFAULT;
3155 				return_error = BR_FAILED_REPLY;
3156 				return_error_line = __LINE__;
3157 				goto err_copy_data_failed;
3158 			}
3159 			/* Fixup buffer pointer to target proc address space */
3160 			bp->buffer = (uintptr_t)sg_bufp +
3161 				binder_alloc_get_user_buffer_offset(
3162 						&target_proc->alloc);
3163 			sg_bufp += ALIGN(bp->length, sizeof(u64));
3164 
3165 			ret = binder_fixup_parent(t, thread, bp, off_start,
3166 						  offp - off_start,
3167 						  last_fixup_obj,
3168 						  last_fixup_min_off);
3169 			if (ret < 0) {
3170 				return_error = BR_FAILED_REPLY;
3171 				return_error_param = ret;
3172 				return_error_line = __LINE__;
3173 				goto err_translate_failed;
3174 			}
3175 			last_fixup_obj = bp;
3176 			last_fixup_min_off = 0;
3177 		} break;
3178 		default:
3179 			binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3180 				proc->pid, thread->pid, hdr->type);
3181 			return_error = BR_FAILED_REPLY;
3182 			return_error_param = -EINVAL;
3183 			return_error_line = __LINE__;
3184 			goto err_bad_object_type;
3185 		}
3186 	}
3187 	tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3188 	t->work.type = BINDER_WORK_TRANSACTION;
3189 
3190 	if (reply) {
3191 		binder_enqueue_thread_work(thread, tcomplete);
3192 		binder_inner_proc_lock(target_proc);
3193 		if (target_thread->is_dead) {
3194 			binder_inner_proc_unlock(target_proc);
3195 			goto err_dead_proc_or_thread;
3196 		}
3197 		BUG_ON(t->buffer->async_transaction != 0);
3198 		binder_pop_transaction_ilocked(target_thread, in_reply_to);
3199 		binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3200 		binder_inner_proc_unlock(target_proc);
3201 		wake_up_interruptible_sync(&target_thread->wait);
3202 		binder_free_transaction(in_reply_to);
3203 	} else if (!(t->flags & TF_ONE_WAY)) {
3204 		BUG_ON(t->buffer->async_transaction != 0);
3205 		binder_inner_proc_lock(proc);
3206 		/*
3207 		 * Defer the TRANSACTION_COMPLETE, so we don't return to
3208 		 * userspace immediately; this allows the target process to
3209 		 * immediately start processing this transaction, reducing
3210 		 * latency. We will then return the TRANSACTION_COMPLETE when
3211 		 * the target replies (or there is an error).
3212 		 */
3213 		binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3214 		t->need_reply = 1;
3215 		t->from_parent = thread->transaction_stack;
3216 		thread->transaction_stack = t;
3217 		binder_inner_proc_unlock(proc);
3218 		if (!binder_proc_transaction(t, target_proc, target_thread)) {
3219 			binder_inner_proc_lock(proc);
3220 			binder_pop_transaction_ilocked(thread, t);
3221 			binder_inner_proc_unlock(proc);
3222 			goto err_dead_proc_or_thread;
3223 		}
3224 	} else {
3225 		BUG_ON(target_node == NULL);
3226 		BUG_ON(t->buffer->async_transaction != 1);
3227 		binder_enqueue_thread_work(thread, tcomplete);
3228 		if (!binder_proc_transaction(t, target_proc, NULL))
3229 			goto err_dead_proc_or_thread;
3230 	}
3231 	if (target_thread)
3232 		binder_thread_dec_tmpref(target_thread);
3233 	binder_proc_dec_tmpref(target_proc);
3234 	if (target_node)
3235 		binder_dec_node_tmpref(target_node);
3236 	/*
3237 	 * write barrier to synchronize with initialization
3238 	 * of log entry
3239 	 */
3240 	smp_wmb();
3241 	WRITE_ONCE(e->debug_id_done, t_debug_id);
3242 	return;
3243 
3244 err_dead_proc_or_thread:
3245 	return_error = BR_DEAD_REPLY;
3246 	return_error_line = __LINE__;
3247 	binder_dequeue_work(proc, tcomplete);
3248 err_translate_failed:
3249 err_bad_object_type:
3250 err_bad_offset:
3251 err_bad_parent:
3252 err_copy_data_failed:
3253 	binder_free_txn_fixups(t);
3254 	trace_binder_transaction_failed_buffer_release(t->buffer);
3255 	binder_transaction_buffer_release(target_proc, t->buffer, offp);
3256 	if (target_node)
3257 		binder_dec_node_tmpref(target_node);
3258 	target_node = NULL;
3259 	t->buffer->transaction = NULL;
3260 	binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3261 err_binder_alloc_buf_failed:
3262 	kfree(tcomplete);
3263 	binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3264 err_alloc_tcomplete_failed:
3265 	kfree(t);
3266 	binder_stats_deleted(BINDER_STAT_TRANSACTION);
3267 err_alloc_t_failed:
3268 err_bad_todo_list:
3269 err_bad_call_stack:
3270 err_empty_call_stack:
3271 err_dead_binder:
3272 err_invalid_target_handle:
3273 	if (target_thread)
3274 		binder_thread_dec_tmpref(target_thread);
3275 	if (target_proc)
3276 		binder_proc_dec_tmpref(target_proc);
3277 	if (target_node) {
3278 		binder_dec_node(target_node, 1, 0);
3279 		binder_dec_node_tmpref(target_node);
3280 	}
3281 
3282 	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3283 		     "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3284 		     proc->pid, thread->pid, return_error, return_error_param,
3285 		     (u64)tr->data_size, (u64)tr->offsets_size,
3286 		     return_error_line);
3287 
3288 	{
3289 		struct binder_transaction_log_entry *fe;
3290 
3291 		e->return_error = return_error;
3292 		e->return_error_param = return_error_param;
3293 		e->return_error_line = return_error_line;
3294 		fe = binder_transaction_log_add(&binder_transaction_log_failed);
3295 		*fe = *e;
3296 		/*
3297 		 * write barrier to synchronize with initialization
3298 		 * of log entry
3299 		 */
3300 		smp_wmb();
3301 		WRITE_ONCE(e->debug_id_done, t_debug_id);
3302 		WRITE_ONCE(fe->debug_id_done, t_debug_id);
3303 	}
3304 
3305 	BUG_ON(thread->return_error.cmd != BR_OK);
3306 	if (in_reply_to) {
3307 		thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3308 		binder_enqueue_thread_work(thread, &thread->return_error.work);
3309 		binder_send_failed_reply(in_reply_to, return_error);
3310 	} else {
3311 		thread->return_error.cmd = return_error;
3312 		binder_enqueue_thread_work(thread, &thread->return_error.work);
3313 	}
3314 }
3315 
3316 /**
3317  * binder_free_buf() - free the specified buffer
3318  * @proc:	binder proc that owns buffer
3319  * @buffer:	buffer to be freed
3320  *
3321  * If buffer for an async transaction, enqueue the next async
3322  * transaction from the node.
3323  *
3324  * Cleanup buffer and free it.
3325  */
3326 static void
3327 binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
3328 {
3329 	if (buffer->transaction) {
3330 		buffer->transaction->buffer = NULL;
3331 		buffer->transaction = NULL;
3332 	}
3333 	if (buffer->async_transaction && buffer->target_node) {
3334 		struct binder_node *buf_node;
3335 		struct binder_work *w;
3336 
3337 		buf_node = buffer->target_node;
3338 		binder_node_inner_lock(buf_node);
3339 		BUG_ON(!buf_node->has_async_transaction);
3340 		BUG_ON(buf_node->proc != proc);
3341 		w = binder_dequeue_work_head_ilocked(
3342 				&buf_node->async_todo);
3343 		if (!w) {
3344 			buf_node->has_async_transaction = false;
3345 		} else {
3346 			binder_enqueue_work_ilocked(
3347 					w, &proc->todo);
3348 			binder_wakeup_proc_ilocked(proc);
3349 		}
3350 		binder_node_inner_unlock(buf_node);
3351 	}
3352 	trace_binder_transaction_buffer_release(buffer);
3353 	binder_transaction_buffer_release(proc, buffer, NULL);
3354 	binder_alloc_free_buf(&proc->alloc, buffer);
3355 }
3356 
3357 static int binder_thread_write(struct binder_proc *proc,
3358 			struct binder_thread *thread,
3359 			binder_uintptr_t binder_buffer, size_t size,
3360 			binder_size_t *consumed)
3361 {
3362 	uint32_t cmd;
3363 	struct binder_context *context = proc->context;
3364 	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3365 	void __user *ptr = buffer + *consumed;
3366 	void __user *end = buffer + size;
3367 
3368 	while (ptr < end && thread->return_error.cmd == BR_OK) {
3369 		int ret;
3370 
3371 		if (get_user(cmd, (uint32_t __user *)ptr))
3372 			return -EFAULT;
3373 		ptr += sizeof(uint32_t);
3374 		trace_binder_command(cmd);
3375 		if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3376 			atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3377 			atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3378 			atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3379 		}
3380 		switch (cmd) {
3381 		case BC_INCREFS:
3382 		case BC_ACQUIRE:
3383 		case BC_RELEASE:
3384 		case BC_DECREFS: {
3385 			uint32_t target;
3386 			const char *debug_string;
3387 			bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3388 			bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3389 			struct binder_ref_data rdata;
3390 
3391 			if (get_user(target, (uint32_t __user *)ptr))
3392 				return -EFAULT;
3393 
3394 			ptr += sizeof(uint32_t);
3395 			ret = -1;
3396 			if (increment && !target) {
3397 				struct binder_node *ctx_mgr_node;
3398 				mutex_lock(&context->context_mgr_node_lock);
3399 				ctx_mgr_node = context->binder_context_mgr_node;
3400 				if (ctx_mgr_node)
3401 					ret = binder_inc_ref_for_node(
3402 							proc, ctx_mgr_node,
3403 							strong, NULL, &rdata);
3404 				mutex_unlock(&context->context_mgr_node_lock);
3405 			}
3406 			if (ret)
3407 				ret = binder_update_ref_for_handle(
3408 						proc, target, increment, strong,
3409 						&rdata);
3410 			if (!ret && rdata.desc != target) {
3411 				binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3412 					proc->pid, thread->pid,
3413 					target, rdata.desc);
3414 			}
3415 			switch (cmd) {
3416 			case BC_INCREFS:
3417 				debug_string = "IncRefs";
3418 				break;
3419 			case BC_ACQUIRE:
3420 				debug_string = "Acquire";
3421 				break;
3422 			case BC_RELEASE:
3423 				debug_string = "Release";
3424 				break;
3425 			case BC_DECREFS:
3426 			default:
3427 				debug_string = "DecRefs";
3428 				break;
3429 			}
3430 			if (ret) {
3431 				binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3432 					proc->pid, thread->pid, debug_string,
3433 					strong, target, ret);
3434 				break;
3435 			}
3436 			binder_debug(BINDER_DEBUG_USER_REFS,
3437 				     "%d:%d %s ref %d desc %d s %d w %d\n",
3438 				     proc->pid, thread->pid, debug_string,
3439 				     rdata.debug_id, rdata.desc, rdata.strong,
3440 				     rdata.weak);
3441 			break;
3442 		}
3443 		case BC_INCREFS_DONE:
3444 		case BC_ACQUIRE_DONE: {
3445 			binder_uintptr_t node_ptr;
3446 			binder_uintptr_t cookie;
3447 			struct binder_node *node;
3448 			bool free_node;
3449 
3450 			if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3451 				return -EFAULT;
3452 			ptr += sizeof(binder_uintptr_t);
3453 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3454 				return -EFAULT;
3455 			ptr += sizeof(binder_uintptr_t);
3456 			node = binder_get_node(proc, node_ptr);
3457 			if (node == NULL) {
3458 				binder_user_error("%d:%d %s u%016llx no match\n",
3459 					proc->pid, thread->pid,
3460 					cmd == BC_INCREFS_DONE ?
3461 					"BC_INCREFS_DONE" :
3462 					"BC_ACQUIRE_DONE",
3463 					(u64)node_ptr);
3464 				break;
3465 			}
3466 			if (cookie != node->cookie) {
3467 				binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3468 					proc->pid, thread->pid,
3469 					cmd == BC_INCREFS_DONE ?
3470 					"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3471 					(u64)node_ptr, node->debug_id,
3472 					(u64)cookie, (u64)node->cookie);
3473 				binder_put_node(node);
3474 				break;
3475 			}
3476 			binder_node_inner_lock(node);
3477 			if (cmd == BC_ACQUIRE_DONE) {
3478 				if (node->pending_strong_ref == 0) {
3479 					binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3480 						proc->pid, thread->pid,
3481 						node->debug_id);
3482 					binder_node_inner_unlock(node);
3483 					binder_put_node(node);
3484 					break;
3485 				}
3486 				node->pending_strong_ref = 0;
3487 			} else {
3488 				if (node->pending_weak_ref == 0) {
3489 					binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3490 						proc->pid, thread->pid,
3491 						node->debug_id);
3492 					binder_node_inner_unlock(node);
3493 					binder_put_node(node);
3494 					break;
3495 				}
3496 				node->pending_weak_ref = 0;
3497 			}
3498 			free_node = binder_dec_node_nilocked(node,
3499 					cmd == BC_ACQUIRE_DONE, 0);
3500 			WARN_ON(free_node);
3501 			binder_debug(BINDER_DEBUG_USER_REFS,
3502 				     "%d:%d %s node %d ls %d lw %d tr %d\n",
3503 				     proc->pid, thread->pid,
3504 				     cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3505 				     node->debug_id, node->local_strong_refs,
3506 				     node->local_weak_refs, node->tmp_refs);
3507 			binder_node_inner_unlock(node);
3508 			binder_put_node(node);
3509 			break;
3510 		}
3511 		case BC_ATTEMPT_ACQUIRE:
3512 			pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3513 			return -EINVAL;
3514 		case BC_ACQUIRE_RESULT:
3515 			pr_err("BC_ACQUIRE_RESULT not supported\n");
3516 			return -EINVAL;
3517 
3518 		case BC_FREE_BUFFER: {
3519 			binder_uintptr_t data_ptr;
3520 			struct binder_buffer *buffer;
3521 
3522 			if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3523 				return -EFAULT;
3524 			ptr += sizeof(binder_uintptr_t);
3525 
3526 			buffer = binder_alloc_prepare_to_free(&proc->alloc,
3527 							      data_ptr);
3528 			if (IS_ERR_OR_NULL(buffer)) {
3529 				if (PTR_ERR(buffer) == -EPERM) {
3530 					binder_user_error(
3531 						"%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3532 						proc->pid, thread->pid,
3533 						(u64)data_ptr);
3534 				} else {
3535 					binder_user_error(
3536 						"%d:%d BC_FREE_BUFFER u%016llx no match\n",
3537 						proc->pid, thread->pid,
3538 						(u64)data_ptr);
3539 				}
3540 				break;
3541 			}
3542 			binder_debug(BINDER_DEBUG_FREE_BUFFER,
3543 				     "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3544 				     proc->pid, thread->pid, (u64)data_ptr,
3545 				     buffer->debug_id,
3546 				     buffer->transaction ? "active" : "finished");
3547 			binder_free_buf(proc, buffer);
3548 			break;
3549 		}
3550 
3551 		case BC_TRANSACTION_SG:
3552 		case BC_REPLY_SG: {
3553 			struct binder_transaction_data_sg tr;
3554 
3555 			if (copy_from_user(&tr, ptr, sizeof(tr)))
3556 				return -EFAULT;
3557 			ptr += sizeof(tr);
3558 			binder_transaction(proc, thread, &tr.transaction_data,
3559 					   cmd == BC_REPLY_SG, tr.buffers_size);
3560 			break;
3561 		}
3562 		case BC_TRANSACTION:
3563 		case BC_REPLY: {
3564 			struct binder_transaction_data tr;
3565 
3566 			if (copy_from_user(&tr, ptr, sizeof(tr)))
3567 				return -EFAULT;
3568 			ptr += sizeof(tr);
3569 			binder_transaction(proc, thread, &tr,
3570 					   cmd == BC_REPLY, 0);
3571 			break;
3572 		}
3573 
3574 		case BC_REGISTER_LOOPER:
3575 			binder_debug(BINDER_DEBUG_THREADS,
3576 				     "%d:%d BC_REGISTER_LOOPER\n",
3577 				     proc->pid, thread->pid);
3578 			binder_inner_proc_lock(proc);
3579 			if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3580 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
3581 				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3582 					proc->pid, thread->pid);
3583 			} else if (proc->requested_threads == 0) {
3584 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
3585 				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3586 					proc->pid, thread->pid);
3587 			} else {
3588 				proc->requested_threads--;
3589 				proc->requested_threads_started++;
3590 			}
3591 			thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3592 			binder_inner_proc_unlock(proc);
3593 			break;
3594 		case BC_ENTER_LOOPER:
3595 			binder_debug(BINDER_DEBUG_THREADS,
3596 				     "%d:%d BC_ENTER_LOOPER\n",
3597 				     proc->pid, thread->pid);
3598 			if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3599 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
3600 				binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3601 					proc->pid, thread->pid);
3602 			}
3603 			thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3604 			break;
3605 		case BC_EXIT_LOOPER:
3606 			binder_debug(BINDER_DEBUG_THREADS,
3607 				     "%d:%d BC_EXIT_LOOPER\n",
3608 				     proc->pid, thread->pid);
3609 			thread->looper |= BINDER_LOOPER_STATE_EXITED;
3610 			break;
3611 
3612 		case BC_REQUEST_DEATH_NOTIFICATION:
3613 		case BC_CLEAR_DEATH_NOTIFICATION: {
3614 			uint32_t target;
3615 			binder_uintptr_t cookie;
3616 			struct binder_ref *ref;
3617 			struct binder_ref_death *death = NULL;
3618 
3619 			if (get_user(target, (uint32_t __user *)ptr))
3620 				return -EFAULT;
3621 			ptr += sizeof(uint32_t);
3622 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3623 				return -EFAULT;
3624 			ptr += sizeof(binder_uintptr_t);
3625 			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3626 				/*
3627 				 * Allocate memory for death notification
3628 				 * before taking lock
3629 				 */
3630 				death = kzalloc(sizeof(*death), GFP_KERNEL);
3631 				if (death == NULL) {
3632 					WARN_ON(thread->return_error.cmd !=
3633 						BR_OK);
3634 					thread->return_error.cmd = BR_ERROR;
3635 					binder_enqueue_thread_work(
3636 						thread,
3637 						&thread->return_error.work);
3638 					binder_debug(
3639 						BINDER_DEBUG_FAILED_TRANSACTION,
3640 						"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3641 						proc->pid, thread->pid);
3642 					break;
3643 				}
3644 			}
3645 			binder_proc_lock(proc);
3646 			ref = binder_get_ref_olocked(proc, target, false);
3647 			if (ref == NULL) {
3648 				binder_user_error("%d:%d %s invalid ref %d\n",
3649 					proc->pid, thread->pid,
3650 					cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3651 					"BC_REQUEST_DEATH_NOTIFICATION" :
3652 					"BC_CLEAR_DEATH_NOTIFICATION",
3653 					target);
3654 				binder_proc_unlock(proc);
3655 				kfree(death);
3656 				break;
3657 			}
3658 
3659 			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3660 				     "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3661 				     proc->pid, thread->pid,
3662 				     cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3663 				     "BC_REQUEST_DEATH_NOTIFICATION" :
3664 				     "BC_CLEAR_DEATH_NOTIFICATION",
3665 				     (u64)cookie, ref->data.debug_id,
3666 				     ref->data.desc, ref->data.strong,
3667 				     ref->data.weak, ref->node->debug_id);
3668 
3669 			binder_node_lock(ref->node);
3670 			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3671 				if (ref->death) {
3672 					binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3673 						proc->pid, thread->pid);
3674 					binder_node_unlock(ref->node);
3675 					binder_proc_unlock(proc);
3676 					kfree(death);
3677 					break;
3678 				}
3679 				binder_stats_created(BINDER_STAT_DEATH);
3680 				INIT_LIST_HEAD(&death->work.entry);
3681 				death->cookie = cookie;
3682 				ref->death = death;
3683 				if (ref->node->proc == NULL) {
3684 					ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3685 
3686 					binder_inner_proc_lock(proc);
3687 					binder_enqueue_work_ilocked(
3688 						&ref->death->work, &proc->todo);
3689 					binder_wakeup_proc_ilocked(proc);
3690 					binder_inner_proc_unlock(proc);
3691 				}
3692 			} else {
3693 				if (ref->death == NULL) {
3694 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3695 						proc->pid, thread->pid);
3696 					binder_node_unlock(ref->node);
3697 					binder_proc_unlock(proc);
3698 					break;
3699 				}
3700 				death = ref->death;
3701 				if (death->cookie != cookie) {
3702 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3703 						proc->pid, thread->pid,
3704 						(u64)death->cookie,
3705 						(u64)cookie);
3706 					binder_node_unlock(ref->node);
3707 					binder_proc_unlock(proc);
3708 					break;
3709 				}
3710 				ref->death = NULL;
3711 				binder_inner_proc_lock(proc);
3712 				if (list_empty(&death->work.entry)) {
3713 					death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3714 					if (thread->looper &
3715 					    (BINDER_LOOPER_STATE_REGISTERED |
3716 					     BINDER_LOOPER_STATE_ENTERED))
3717 						binder_enqueue_thread_work_ilocked(
3718 								thread,
3719 								&death->work);
3720 					else {
3721 						binder_enqueue_work_ilocked(
3722 								&death->work,
3723 								&proc->todo);
3724 						binder_wakeup_proc_ilocked(
3725 								proc);
3726 					}
3727 				} else {
3728 					BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3729 					death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3730 				}
3731 				binder_inner_proc_unlock(proc);
3732 			}
3733 			binder_node_unlock(ref->node);
3734 			binder_proc_unlock(proc);
3735 		} break;
3736 		case BC_DEAD_BINDER_DONE: {
3737 			struct binder_work *w;
3738 			binder_uintptr_t cookie;
3739 			struct binder_ref_death *death = NULL;
3740 
3741 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3742 				return -EFAULT;
3743 
3744 			ptr += sizeof(cookie);
3745 			binder_inner_proc_lock(proc);
3746 			list_for_each_entry(w, &proc->delivered_death,
3747 					    entry) {
3748 				struct binder_ref_death *tmp_death =
3749 					container_of(w,
3750 						     struct binder_ref_death,
3751 						     work);
3752 
3753 				if (tmp_death->cookie == cookie) {
3754 					death = tmp_death;
3755 					break;
3756 				}
3757 			}
3758 			binder_debug(BINDER_DEBUG_DEAD_BINDER,
3759 				     "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
3760 				     proc->pid, thread->pid, (u64)cookie,
3761 				     death);
3762 			if (death == NULL) {
3763 				binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3764 					proc->pid, thread->pid, (u64)cookie);
3765 				binder_inner_proc_unlock(proc);
3766 				break;
3767 			}
3768 			binder_dequeue_work_ilocked(&death->work);
3769 			if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3770 				death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3771 				if (thread->looper &
3772 					(BINDER_LOOPER_STATE_REGISTERED |
3773 					 BINDER_LOOPER_STATE_ENTERED))
3774 					binder_enqueue_thread_work_ilocked(
3775 						thread, &death->work);
3776 				else {
3777 					binder_enqueue_work_ilocked(
3778 							&death->work,
3779 							&proc->todo);
3780 					binder_wakeup_proc_ilocked(proc);
3781 				}
3782 			}
3783 			binder_inner_proc_unlock(proc);
3784 		} break;
3785 
3786 		default:
3787 			pr_err("%d:%d unknown command %d\n",
3788 			       proc->pid, thread->pid, cmd);
3789 			return -EINVAL;
3790 		}
3791 		*consumed = ptr - buffer;
3792 	}
3793 	return 0;
3794 }
3795 
3796 static void binder_stat_br(struct binder_proc *proc,
3797 			   struct binder_thread *thread, uint32_t cmd)
3798 {
3799 	trace_binder_return(cmd);
3800 	if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
3801 		atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3802 		atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3803 		atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
3804 	}
3805 }
3806 
3807 static int binder_put_node_cmd(struct binder_proc *proc,
3808 			       struct binder_thread *thread,
3809 			       void __user **ptrp,
3810 			       binder_uintptr_t node_ptr,
3811 			       binder_uintptr_t node_cookie,
3812 			       int node_debug_id,
3813 			       uint32_t cmd, const char *cmd_name)
3814 {
3815 	void __user *ptr = *ptrp;
3816 
3817 	if (put_user(cmd, (uint32_t __user *)ptr))
3818 		return -EFAULT;
3819 	ptr += sizeof(uint32_t);
3820 
3821 	if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3822 		return -EFAULT;
3823 	ptr += sizeof(binder_uintptr_t);
3824 
3825 	if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
3826 		return -EFAULT;
3827 	ptr += sizeof(binder_uintptr_t);
3828 
3829 	binder_stat_br(proc, thread, cmd);
3830 	binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
3831 		     proc->pid, thread->pid, cmd_name, node_debug_id,
3832 		     (u64)node_ptr, (u64)node_cookie);
3833 
3834 	*ptrp = ptr;
3835 	return 0;
3836 }
3837 
3838 static int binder_wait_for_work(struct binder_thread *thread,
3839 				bool do_proc_work)
3840 {
3841 	DEFINE_WAIT(wait);
3842 	struct binder_proc *proc = thread->proc;
3843 	int ret = 0;
3844 
3845 	freezer_do_not_count();
3846 	binder_inner_proc_lock(proc);
3847 	for (;;) {
3848 		prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
3849 		if (binder_has_work_ilocked(thread, do_proc_work))
3850 			break;
3851 		if (do_proc_work)
3852 			list_add(&thread->waiting_thread_node,
3853 				 &proc->waiting_threads);
3854 		binder_inner_proc_unlock(proc);
3855 		schedule();
3856 		binder_inner_proc_lock(proc);
3857 		list_del_init(&thread->waiting_thread_node);
3858 		if (signal_pending(current)) {
3859 			ret = -ERESTARTSYS;
3860 			break;
3861 		}
3862 	}
3863 	finish_wait(&thread->wait, &wait);
3864 	binder_inner_proc_unlock(proc);
3865 	freezer_count();
3866 
3867 	return ret;
3868 }
3869 
3870 /**
3871  * binder_apply_fd_fixups() - finish fd translation
3872  * @t:	binder transaction with list of fd fixups
3873  *
3874  * Now that we are in the context of the transaction target
3875  * process, we can allocate and install fds. Process the
3876  * list of fds to translate and fixup the buffer with the
3877  * new fds.
3878  *
3879  * If we fail to allocate an fd, then free the resources by
3880  * fput'ing files that have not been processed and ksys_close'ing
3881  * any fds that have already been allocated.
3882  */
3883 static int binder_apply_fd_fixups(struct binder_transaction *t)
3884 {
3885 	struct binder_txn_fd_fixup *fixup, *tmp;
3886 	int ret = 0;
3887 
3888 	list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
3889 		int fd = get_unused_fd_flags(O_CLOEXEC);
3890 		u32 *fdp;
3891 
3892 		if (fd < 0) {
3893 			binder_debug(BINDER_DEBUG_TRANSACTION,
3894 				     "failed fd fixup txn %d fd %d\n",
3895 				     t->debug_id, fd);
3896 			ret = -ENOMEM;
3897 			break;
3898 		}
3899 		binder_debug(BINDER_DEBUG_TRANSACTION,
3900 			     "fd fixup txn %d fd %d\n",
3901 			     t->debug_id, fd);
3902 		trace_binder_transaction_fd_recv(t, fd, fixup->offset);
3903 		fd_install(fd, fixup->file);
3904 		fixup->file = NULL;
3905 		fdp = (u32 *)(t->buffer->data + fixup->offset);
3906 		/*
3907 		 * This store can cause problems for CPUs with a
3908 		 * VIVT cache (eg ARMv5) since the cache cannot
3909 		 * detect virtual aliases to the same physical cacheline.
3910 		 * To support VIVT, this address and the user-space VA
3911 		 * would both need to be flushed. Since this kernel
3912 		 * VA is not constructed via page_to_virt(), we can't
3913 		 * use flush_dcache_page() on it, so we'd have to use
3914 		 * an internal function. If devices with VIVT ever
3915 		 * need to run Android, we'll either need to go back
3916 		 * to patching the translated fd from the sender side
3917 		 * (using the non-standard kernel functions), or rework
3918 		 * how the kernel uses the buffer to use page_to_virt()
3919 		 * addresses instead of allocating in our own vm area.
3920 		 *
3921 		 * For now, we disable compilation if CONFIG_CPU_CACHE_VIVT.
3922 		 */
3923 		*fdp = fd;
3924 	}
3925 	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
3926 		if (fixup->file) {
3927 			fput(fixup->file);
3928 		} else if (ret) {
3929 			u32 *fdp = (u32 *)(t->buffer->data + fixup->offset);
3930 
3931 			ksys_close(*fdp);
3932 		}
3933 		list_del(&fixup->fixup_entry);
3934 		kfree(fixup);
3935 	}
3936 
3937 	return ret;
3938 }
3939 
3940 static int binder_thread_read(struct binder_proc *proc,
3941 			      struct binder_thread *thread,
3942 			      binder_uintptr_t binder_buffer, size_t size,
3943 			      binder_size_t *consumed, int non_block)
3944 {
3945 	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3946 	void __user *ptr = buffer + *consumed;
3947 	void __user *end = buffer + size;
3948 
3949 	int ret = 0;
3950 	int wait_for_proc_work;
3951 
3952 	if (*consumed == 0) {
3953 		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
3954 			return -EFAULT;
3955 		ptr += sizeof(uint32_t);
3956 	}
3957 
3958 retry:
3959 	binder_inner_proc_lock(proc);
3960 	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
3961 	binder_inner_proc_unlock(proc);
3962 
3963 	thread->looper |= BINDER_LOOPER_STATE_WAITING;
3964 
3965 	trace_binder_wait_for_work(wait_for_proc_work,
3966 				   !!thread->transaction_stack,
3967 				   !binder_worklist_empty(proc, &thread->todo));
3968 	if (wait_for_proc_work) {
3969 		if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3970 					BINDER_LOOPER_STATE_ENTERED))) {
3971 			binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
3972 				proc->pid, thread->pid, thread->looper);
3973 			wait_event_interruptible(binder_user_error_wait,
3974 						 binder_stop_on_user_error < 2);
3975 		}
3976 		binder_set_nice(proc->default_priority);
3977 	}
3978 
3979 	if (non_block) {
3980 		if (!binder_has_work(thread, wait_for_proc_work))
3981 			ret = -EAGAIN;
3982 	} else {
3983 		ret = binder_wait_for_work(thread, wait_for_proc_work);
3984 	}
3985 
3986 	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
3987 
3988 	if (ret)
3989 		return ret;
3990 
3991 	while (1) {
3992 		uint32_t cmd;
3993 		struct binder_transaction_data tr;
3994 		struct binder_work *w = NULL;
3995 		struct list_head *list = NULL;
3996 		struct binder_transaction *t = NULL;
3997 		struct binder_thread *t_from;
3998 
3999 		binder_inner_proc_lock(proc);
4000 		if (!binder_worklist_empty_ilocked(&thread->todo))
4001 			list = &thread->todo;
4002 		else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4003 			   wait_for_proc_work)
4004 			list = &proc->todo;
4005 		else {
4006 			binder_inner_proc_unlock(proc);
4007 
4008 			/* no data added */
4009 			if (ptr - buffer == 4 && !thread->looper_need_return)
4010 				goto retry;
4011 			break;
4012 		}
4013 
4014 		if (end - ptr < sizeof(tr) + 4) {
4015 			binder_inner_proc_unlock(proc);
4016 			break;
4017 		}
4018 		w = binder_dequeue_work_head_ilocked(list);
4019 		if (binder_worklist_empty_ilocked(&thread->todo))
4020 			thread->process_todo = false;
4021 
4022 		switch (w->type) {
4023 		case BINDER_WORK_TRANSACTION: {
4024 			binder_inner_proc_unlock(proc);
4025 			t = container_of(w, struct binder_transaction, work);
4026 		} break;
4027 		case BINDER_WORK_RETURN_ERROR: {
4028 			struct binder_error *e = container_of(
4029 					w, struct binder_error, work);
4030 
4031 			WARN_ON(e->cmd == BR_OK);
4032 			binder_inner_proc_unlock(proc);
4033 			if (put_user(e->cmd, (uint32_t __user *)ptr))
4034 				return -EFAULT;
4035 			cmd = e->cmd;
4036 			e->cmd = BR_OK;
4037 			ptr += sizeof(uint32_t);
4038 
4039 			binder_stat_br(proc, thread, cmd);
4040 		} break;
4041 		case BINDER_WORK_TRANSACTION_COMPLETE: {
4042 			binder_inner_proc_unlock(proc);
4043 			cmd = BR_TRANSACTION_COMPLETE;
4044 			if (put_user(cmd, (uint32_t __user *)ptr))
4045 				return -EFAULT;
4046 			ptr += sizeof(uint32_t);
4047 
4048 			binder_stat_br(proc, thread, cmd);
4049 			binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4050 				     "%d:%d BR_TRANSACTION_COMPLETE\n",
4051 				     proc->pid, thread->pid);
4052 			kfree(w);
4053 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4054 		} break;
4055 		case BINDER_WORK_NODE: {
4056 			struct binder_node *node = container_of(w, struct binder_node, work);
4057 			int strong, weak;
4058 			binder_uintptr_t node_ptr = node->ptr;
4059 			binder_uintptr_t node_cookie = node->cookie;
4060 			int node_debug_id = node->debug_id;
4061 			int has_weak_ref;
4062 			int has_strong_ref;
4063 			void __user *orig_ptr = ptr;
4064 
4065 			BUG_ON(proc != node->proc);
4066 			strong = node->internal_strong_refs ||
4067 					node->local_strong_refs;
4068 			weak = !hlist_empty(&node->refs) ||
4069 					node->local_weak_refs ||
4070 					node->tmp_refs || strong;
4071 			has_strong_ref = node->has_strong_ref;
4072 			has_weak_ref = node->has_weak_ref;
4073 
4074 			if (weak && !has_weak_ref) {
4075 				node->has_weak_ref = 1;
4076 				node->pending_weak_ref = 1;
4077 				node->local_weak_refs++;
4078 			}
4079 			if (strong && !has_strong_ref) {
4080 				node->has_strong_ref = 1;
4081 				node->pending_strong_ref = 1;
4082 				node->local_strong_refs++;
4083 			}
4084 			if (!strong && has_strong_ref)
4085 				node->has_strong_ref = 0;
4086 			if (!weak && has_weak_ref)
4087 				node->has_weak_ref = 0;
4088 			if (!weak && !strong) {
4089 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4090 					     "%d:%d node %d u%016llx c%016llx deleted\n",
4091 					     proc->pid, thread->pid,
4092 					     node_debug_id,
4093 					     (u64)node_ptr,
4094 					     (u64)node_cookie);
4095 				rb_erase(&node->rb_node, &proc->nodes);
4096 				binder_inner_proc_unlock(proc);
4097 				binder_node_lock(node);
4098 				/*
4099 				 * Acquire the node lock before freeing the
4100 				 * node to serialize with other threads that
4101 				 * may have been holding the node lock while
4102 				 * decrementing this node (avoids race where
4103 				 * this thread frees while the other thread
4104 				 * is unlocking the node after the final
4105 				 * decrement)
4106 				 */
4107 				binder_node_unlock(node);
4108 				binder_free_node(node);
4109 			} else
4110 				binder_inner_proc_unlock(proc);
4111 
4112 			if (weak && !has_weak_ref)
4113 				ret = binder_put_node_cmd(
4114 						proc, thread, &ptr, node_ptr,
4115 						node_cookie, node_debug_id,
4116 						BR_INCREFS, "BR_INCREFS");
4117 			if (!ret && strong && !has_strong_ref)
4118 				ret = binder_put_node_cmd(
4119 						proc, thread, &ptr, node_ptr,
4120 						node_cookie, node_debug_id,
4121 						BR_ACQUIRE, "BR_ACQUIRE");
4122 			if (!ret && !strong && has_strong_ref)
4123 				ret = binder_put_node_cmd(
4124 						proc, thread, &ptr, node_ptr,
4125 						node_cookie, node_debug_id,
4126 						BR_RELEASE, "BR_RELEASE");
4127 			if (!ret && !weak && has_weak_ref)
4128 				ret = binder_put_node_cmd(
4129 						proc, thread, &ptr, node_ptr,
4130 						node_cookie, node_debug_id,
4131 						BR_DECREFS, "BR_DECREFS");
4132 			if (orig_ptr == ptr)
4133 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4134 					     "%d:%d node %d u%016llx c%016llx state unchanged\n",
4135 					     proc->pid, thread->pid,
4136 					     node_debug_id,
4137 					     (u64)node_ptr,
4138 					     (u64)node_cookie);
4139 			if (ret)
4140 				return ret;
4141 		} break;
4142 		case BINDER_WORK_DEAD_BINDER:
4143 		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4144 		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4145 			struct binder_ref_death *death;
4146 			uint32_t cmd;
4147 			binder_uintptr_t cookie;
4148 
4149 			death = container_of(w, struct binder_ref_death, work);
4150 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4151 				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4152 			else
4153 				cmd = BR_DEAD_BINDER;
4154 			cookie = death->cookie;
4155 
4156 			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4157 				     "%d:%d %s %016llx\n",
4158 				      proc->pid, thread->pid,
4159 				      cmd == BR_DEAD_BINDER ?
4160 				      "BR_DEAD_BINDER" :
4161 				      "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4162 				      (u64)cookie);
4163 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4164 				binder_inner_proc_unlock(proc);
4165 				kfree(death);
4166 				binder_stats_deleted(BINDER_STAT_DEATH);
4167 			} else {
4168 				binder_enqueue_work_ilocked(
4169 						w, &proc->delivered_death);
4170 				binder_inner_proc_unlock(proc);
4171 			}
4172 			if (put_user(cmd, (uint32_t __user *)ptr))
4173 				return -EFAULT;
4174 			ptr += sizeof(uint32_t);
4175 			if (put_user(cookie,
4176 				     (binder_uintptr_t __user *)ptr))
4177 				return -EFAULT;
4178 			ptr += sizeof(binder_uintptr_t);
4179 			binder_stat_br(proc, thread, cmd);
4180 			if (cmd == BR_DEAD_BINDER)
4181 				goto done; /* DEAD_BINDER notifications can cause transactions */
4182 		} break;
4183 		default:
4184 			binder_inner_proc_unlock(proc);
4185 			pr_err("%d:%d: bad work type %d\n",
4186 			       proc->pid, thread->pid, w->type);
4187 			break;
4188 		}
4189 
4190 		if (!t)
4191 			continue;
4192 
4193 		BUG_ON(t->buffer == NULL);
4194 		if (t->buffer->target_node) {
4195 			struct binder_node *target_node = t->buffer->target_node;
4196 
4197 			tr.target.ptr = target_node->ptr;
4198 			tr.cookie =  target_node->cookie;
4199 			t->saved_priority = task_nice(current);
4200 			if (t->priority < target_node->min_priority &&
4201 			    !(t->flags & TF_ONE_WAY))
4202 				binder_set_nice(t->priority);
4203 			else if (!(t->flags & TF_ONE_WAY) ||
4204 				 t->saved_priority > target_node->min_priority)
4205 				binder_set_nice(target_node->min_priority);
4206 			cmd = BR_TRANSACTION;
4207 		} else {
4208 			tr.target.ptr = 0;
4209 			tr.cookie = 0;
4210 			cmd = BR_REPLY;
4211 		}
4212 		tr.code = t->code;
4213 		tr.flags = t->flags;
4214 		tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4215 
4216 		t_from = binder_get_txn_from(t);
4217 		if (t_from) {
4218 			struct task_struct *sender = t_from->proc->tsk;
4219 
4220 			tr.sender_pid = task_tgid_nr_ns(sender,
4221 							task_active_pid_ns(current));
4222 		} else {
4223 			tr.sender_pid = 0;
4224 		}
4225 
4226 		ret = binder_apply_fd_fixups(t);
4227 		if (ret) {
4228 			struct binder_buffer *buffer = t->buffer;
4229 			bool oneway = !!(t->flags & TF_ONE_WAY);
4230 			int tid = t->debug_id;
4231 
4232 			if (t_from)
4233 				binder_thread_dec_tmpref(t_from);
4234 			buffer->transaction = NULL;
4235 			binder_cleanup_transaction(t, "fd fixups failed",
4236 						   BR_FAILED_REPLY);
4237 			binder_free_buf(proc, buffer);
4238 			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4239 				     "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4240 				     proc->pid, thread->pid,
4241 				     oneway ? "async " :
4242 					(cmd == BR_REPLY ? "reply " : ""),
4243 				     tid, BR_FAILED_REPLY, ret, __LINE__);
4244 			if (cmd == BR_REPLY) {
4245 				cmd = BR_FAILED_REPLY;
4246 				if (put_user(cmd, (uint32_t __user *)ptr))
4247 					return -EFAULT;
4248 				ptr += sizeof(uint32_t);
4249 				binder_stat_br(proc, thread, cmd);
4250 				break;
4251 			}
4252 			continue;
4253 		}
4254 		tr.data_size = t->buffer->data_size;
4255 		tr.offsets_size = t->buffer->offsets_size;
4256 		tr.data.ptr.buffer = (binder_uintptr_t)
4257 			((uintptr_t)t->buffer->data +
4258 			binder_alloc_get_user_buffer_offset(&proc->alloc));
4259 		tr.data.ptr.offsets = tr.data.ptr.buffer +
4260 					ALIGN(t->buffer->data_size,
4261 					    sizeof(void *));
4262 
4263 		if (put_user(cmd, (uint32_t __user *)ptr)) {
4264 			if (t_from)
4265 				binder_thread_dec_tmpref(t_from);
4266 
4267 			binder_cleanup_transaction(t, "put_user failed",
4268 						   BR_FAILED_REPLY);
4269 
4270 			return -EFAULT;
4271 		}
4272 		ptr += sizeof(uint32_t);
4273 		if (copy_to_user(ptr, &tr, sizeof(tr))) {
4274 			if (t_from)
4275 				binder_thread_dec_tmpref(t_from);
4276 
4277 			binder_cleanup_transaction(t, "copy_to_user failed",
4278 						   BR_FAILED_REPLY);
4279 
4280 			return -EFAULT;
4281 		}
4282 		ptr += sizeof(tr);
4283 
4284 		trace_binder_transaction_received(t);
4285 		binder_stat_br(proc, thread, cmd);
4286 		binder_debug(BINDER_DEBUG_TRANSACTION,
4287 			     "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4288 			     proc->pid, thread->pid,
4289 			     (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4290 			     "BR_REPLY",
4291 			     t->debug_id, t_from ? t_from->proc->pid : 0,
4292 			     t_from ? t_from->pid : 0, cmd,
4293 			     t->buffer->data_size, t->buffer->offsets_size,
4294 			     (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
4295 
4296 		if (t_from)
4297 			binder_thread_dec_tmpref(t_from);
4298 		t->buffer->allow_user_free = 1;
4299 		if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
4300 			binder_inner_proc_lock(thread->proc);
4301 			t->to_parent = thread->transaction_stack;
4302 			t->to_thread = thread;
4303 			thread->transaction_stack = t;
4304 			binder_inner_proc_unlock(thread->proc);
4305 		} else {
4306 			binder_free_transaction(t);
4307 		}
4308 		break;
4309 	}
4310 
4311 done:
4312 
4313 	*consumed = ptr - buffer;
4314 	binder_inner_proc_lock(proc);
4315 	if (proc->requested_threads == 0 &&
4316 	    list_empty(&thread->proc->waiting_threads) &&
4317 	    proc->requested_threads_started < proc->max_threads &&
4318 	    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4319 	     BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4320 	     /*spawn a new thread if we leave this out */) {
4321 		proc->requested_threads++;
4322 		binder_inner_proc_unlock(proc);
4323 		binder_debug(BINDER_DEBUG_THREADS,
4324 			     "%d:%d BR_SPAWN_LOOPER\n",
4325 			     proc->pid, thread->pid);
4326 		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4327 			return -EFAULT;
4328 		binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4329 	} else
4330 		binder_inner_proc_unlock(proc);
4331 	return 0;
4332 }
4333 
4334 static void binder_release_work(struct binder_proc *proc,
4335 				struct list_head *list)
4336 {
4337 	struct binder_work *w;
4338 
4339 	while (1) {
4340 		w = binder_dequeue_work_head(proc, list);
4341 		if (!w)
4342 			return;
4343 
4344 		switch (w->type) {
4345 		case BINDER_WORK_TRANSACTION: {
4346 			struct binder_transaction *t;
4347 
4348 			t = container_of(w, struct binder_transaction, work);
4349 
4350 			binder_cleanup_transaction(t, "process died.",
4351 						   BR_DEAD_REPLY);
4352 		} break;
4353 		case BINDER_WORK_RETURN_ERROR: {
4354 			struct binder_error *e = container_of(
4355 					w, struct binder_error, work);
4356 
4357 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4358 				"undelivered TRANSACTION_ERROR: %u\n",
4359 				e->cmd);
4360 		} break;
4361 		case BINDER_WORK_TRANSACTION_COMPLETE: {
4362 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4363 				"undelivered TRANSACTION_COMPLETE\n");
4364 			kfree(w);
4365 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4366 		} break;
4367 		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4368 		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4369 			struct binder_ref_death *death;
4370 
4371 			death = container_of(w, struct binder_ref_death, work);
4372 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4373 				"undelivered death notification, %016llx\n",
4374 				(u64)death->cookie);
4375 			kfree(death);
4376 			binder_stats_deleted(BINDER_STAT_DEATH);
4377 		} break;
4378 		default:
4379 			pr_err("unexpected work type, %d, not freed\n",
4380 			       w->type);
4381 			break;
4382 		}
4383 	}
4384 
4385 }
4386 
4387 static struct binder_thread *binder_get_thread_ilocked(
4388 		struct binder_proc *proc, struct binder_thread *new_thread)
4389 {
4390 	struct binder_thread *thread = NULL;
4391 	struct rb_node *parent = NULL;
4392 	struct rb_node **p = &proc->threads.rb_node;
4393 
4394 	while (*p) {
4395 		parent = *p;
4396 		thread = rb_entry(parent, struct binder_thread, rb_node);
4397 
4398 		if (current->pid < thread->pid)
4399 			p = &(*p)->rb_left;
4400 		else if (current->pid > thread->pid)
4401 			p = &(*p)->rb_right;
4402 		else
4403 			return thread;
4404 	}
4405 	if (!new_thread)
4406 		return NULL;
4407 	thread = new_thread;
4408 	binder_stats_created(BINDER_STAT_THREAD);
4409 	thread->proc = proc;
4410 	thread->pid = current->pid;
4411 	atomic_set(&thread->tmp_ref, 0);
4412 	init_waitqueue_head(&thread->wait);
4413 	INIT_LIST_HEAD(&thread->todo);
4414 	rb_link_node(&thread->rb_node, parent, p);
4415 	rb_insert_color(&thread->rb_node, &proc->threads);
4416 	thread->looper_need_return = true;
4417 	thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4418 	thread->return_error.cmd = BR_OK;
4419 	thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4420 	thread->reply_error.cmd = BR_OK;
4421 	INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4422 	return thread;
4423 }
4424 
4425 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4426 {
4427 	struct binder_thread *thread;
4428 	struct binder_thread *new_thread;
4429 
4430 	binder_inner_proc_lock(proc);
4431 	thread = binder_get_thread_ilocked(proc, NULL);
4432 	binder_inner_proc_unlock(proc);
4433 	if (!thread) {
4434 		new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4435 		if (new_thread == NULL)
4436 			return NULL;
4437 		binder_inner_proc_lock(proc);
4438 		thread = binder_get_thread_ilocked(proc, new_thread);
4439 		binder_inner_proc_unlock(proc);
4440 		if (thread != new_thread)
4441 			kfree(new_thread);
4442 	}
4443 	return thread;
4444 }
4445 
4446 static void binder_free_proc(struct binder_proc *proc)
4447 {
4448 	BUG_ON(!list_empty(&proc->todo));
4449 	BUG_ON(!list_empty(&proc->delivered_death));
4450 	binder_alloc_deferred_release(&proc->alloc);
4451 	put_task_struct(proc->tsk);
4452 	binder_stats_deleted(BINDER_STAT_PROC);
4453 	kfree(proc);
4454 }
4455 
4456 static void binder_free_thread(struct binder_thread *thread)
4457 {
4458 	BUG_ON(!list_empty(&thread->todo));
4459 	binder_stats_deleted(BINDER_STAT_THREAD);
4460 	binder_proc_dec_tmpref(thread->proc);
4461 	kfree(thread);
4462 }
4463 
4464 static int binder_thread_release(struct binder_proc *proc,
4465 				 struct binder_thread *thread)
4466 {
4467 	struct binder_transaction *t;
4468 	struct binder_transaction *send_reply = NULL;
4469 	int active_transactions = 0;
4470 	struct binder_transaction *last_t = NULL;
4471 
4472 	binder_inner_proc_lock(thread->proc);
4473 	/*
4474 	 * take a ref on the proc so it survives
4475 	 * after we remove this thread from proc->threads.
4476 	 * The corresponding dec is when we actually
4477 	 * free the thread in binder_free_thread()
4478 	 */
4479 	proc->tmp_ref++;
4480 	/*
4481 	 * take a ref on this thread to ensure it
4482 	 * survives while we are releasing it
4483 	 */
4484 	atomic_inc(&thread->tmp_ref);
4485 	rb_erase(&thread->rb_node, &proc->threads);
4486 	t = thread->transaction_stack;
4487 	if (t) {
4488 		spin_lock(&t->lock);
4489 		if (t->to_thread == thread)
4490 			send_reply = t;
4491 	} else {
4492 		__acquire(&t->lock);
4493 	}
4494 	thread->is_dead = true;
4495 
4496 	while (t) {
4497 		last_t = t;
4498 		active_transactions++;
4499 		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4500 			     "release %d:%d transaction %d %s, still active\n",
4501 			      proc->pid, thread->pid,
4502 			     t->debug_id,
4503 			     (t->to_thread == thread) ? "in" : "out");
4504 
4505 		if (t->to_thread == thread) {
4506 			t->to_proc = NULL;
4507 			t->to_thread = NULL;
4508 			if (t->buffer) {
4509 				t->buffer->transaction = NULL;
4510 				t->buffer = NULL;
4511 			}
4512 			t = t->to_parent;
4513 		} else if (t->from == thread) {
4514 			t->from = NULL;
4515 			t = t->from_parent;
4516 		} else
4517 			BUG();
4518 		spin_unlock(&last_t->lock);
4519 		if (t)
4520 			spin_lock(&t->lock);
4521 		else
4522 			__acquire(&t->lock);
4523 	}
4524 	/* annotation for sparse, lock not acquired in last iteration above */
4525 	__release(&t->lock);
4526 
4527 	/*
4528 	 * If this thread used poll, make sure we remove the waitqueue
4529 	 * from any epoll data structures holding it with POLLFREE.
4530 	 * waitqueue_active() is safe to use here because we're holding
4531 	 * the inner lock.
4532 	 */
4533 	if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
4534 	    waitqueue_active(&thread->wait)) {
4535 		wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
4536 	}
4537 
4538 	binder_inner_proc_unlock(thread->proc);
4539 
4540 	/*
4541 	 * This is needed to avoid races between wake_up_poll() above and
4542 	 * and ep_remove_waitqueue() called for other reasons (eg the epoll file
4543 	 * descriptor being closed); ep_remove_waitqueue() holds an RCU read
4544 	 * lock, so we can be sure it's done after calling synchronize_rcu().
4545 	 */
4546 	if (thread->looper & BINDER_LOOPER_STATE_POLL)
4547 		synchronize_rcu();
4548 
4549 	if (send_reply)
4550 		binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4551 	binder_release_work(proc, &thread->todo);
4552 	binder_thread_dec_tmpref(thread);
4553 	return active_transactions;
4554 }
4555 
4556 static __poll_t binder_poll(struct file *filp,
4557 				struct poll_table_struct *wait)
4558 {
4559 	struct binder_proc *proc = filp->private_data;
4560 	struct binder_thread *thread = NULL;
4561 	bool wait_for_proc_work;
4562 
4563 	thread = binder_get_thread(proc);
4564 	if (!thread)
4565 		return POLLERR;
4566 
4567 	binder_inner_proc_lock(thread->proc);
4568 	thread->looper |= BINDER_LOOPER_STATE_POLL;
4569 	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4570 
4571 	binder_inner_proc_unlock(thread->proc);
4572 
4573 	poll_wait(filp, &thread->wait, wait);
4574 
4575 	if (binder_has_work(thread, wait_for_proc_work))
4576 		return EPOLLIN;
4577 
4578 	return 0;
4579 }
4580 
4581 static int binder_ioctl_write_read(struct file *filp,
4582 				unsigned int cmd, unsigned long arg,
4583 				struct binder_thread *thread)
4584 {
4585 	int ret = 0;
4586 	struct binder_proc *proc = filp->private_data;
4587 	unsigned int size = _IOC_SIZE(cmd);
4588 	void __user *ubuf = (void __user *)arg;
4589 	struct binder_write_read bwr;
4590 
4591 	if (size != sizeof(struct binder_write_read)) {
4592 		ret = -EINVAL;
4593 		goto out;
4594 	}
4595 	if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4596 		ret = -EFAULT;
4597 		goto out;
4598 	}
4599 	binder_debug(BINDER_DEBUG_READ_WRITE,
4600 		     "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4601 		     proc->pid, thread->pid,
4602 		     (u64)bwr.write_size, (u64)bwr.write_buffer,
4603 		     (u64)bwr.read_size, (u64)bwr.read_buffer);
4604 
4605 	if (bwr.write_size > 0) {
4606 		ret = binder_thread_write(proc, thread,
4607 					  bwr.write_buffer,
4608 					  bwr.write_size,
4609 					  &bwr.write_consumed);
4610 		trace_binder_write_done(ret);
4611 		if (ret < 0) {
4612 			bwr.read_consumed = 0;
4613 			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4614 				ret = -EFAULT;
4615 			goto out;
4616 		}
4617 	}
4618 	if (bwr.read_size > 0) {
4619 		ret = binder_thread_read(proc, thread, bwr.read_buffer,
4620 					 bwr.read_size,
4621 					 &bwr.read_consumed,
4622 					 filp->f_flags & O_NONBLOCK);
4623 		trace_binder_read_done(ret);
4624 		binder_inner_proc_lock(proc);
4625 		if (!binder_worklist_empty_ilocked(&proc->todo))
4626 			binder_wakeup_proc_ilocked(proc);
4627 		binder_inner_proc_unlock(proc);
4628 		if (ret < 0) {
4629 			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4630 				ret = -EFAULT;
4631 			goto out;
4632 		}
4633 	}
4634 	binder_debug(BINDER_DEBUG_READ_WRITE,
4635 		     "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4636 		     proc->pid, thread->pid,
4637 		     (u64)bwr.write_consumed, (u64)bwr.write_size,
4638 		     (u64)bwr.read_consumed, (u64)bwr.read_size);
4639 	if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4640 		ret = -EFAULT;
4641 		goto out;
4642 	}
4643 out:
4644 	return ret;
4645 }
4646 
4647 static int binder_ioctl_set_ctx_mgr(struct file *filp)
4648 {
4649 	int ret = 0;
4650 	struct binder_proc *proc = filp->private_data;
4651 	struct binder_context *context = proc->context;
4652 	struct binder_node *new_node;
4653 	kuid_t curr_euid = current_euid();
4654 
4655 	mutex_lock(&context->context_mgr_node_lock);
4656 	if (context->binder_context_mgr_node) {
4657 		pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4658 		ret = -EBUSY;
4659 		goto out;
4660 	}
4661 	ret = security_binder_set_context_mgr(proc->tsk);
4662 	if (ret < 0)
4663 		goto out;
4664 	if (uid_valid(context->binder_context_mgr_uid)) {
4665 		if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4666 			pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4667 			       from_kuid(&init_user_ns, curr_euid),
4668 			       from_kuid(&init_user_ns,
4669 					 context->binder_context_mgr_uid));
4670 			ret = -EPERM;
4671 			goto out;
4672 		}
4673 	} else {
4674 		context->binder_context_mgr_uid = curr_euid;
4675 	}
4676 	new_node = binder_new_node(proc, NULL);
4677 	if (!new_node) {
4678 		ret = -ENOMEM;
4679 		goto out;
4680 	}
4681 	binder_node_lock(new_node);
4682 	new_node->local_weak_refs++;
4683 	new_node->local_strong_refs++;
4684 	new_node->has_strong_ref = 1;
4685 	new_node->has_weak_ref = 1;
4686 	context->binder_context_mgr_node = new_node;
4687 	binder_node_unlock(new_node);
4688 	binder_put_node(new_node);
4689 out:
4690 	mutex_unlock(&context->context_mgr_node_lock);
4691 	return ret;
4692 }
4693 
4694 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
4695 		struct binder_node_info_for_ref *info)
4696 {
4697 	struct binder_node *node;
4698 	struct binder_context *context = proc->context;
4699 	__u32 handle = info->handle;
4700 
4701 	if (info->strong_count || info->weak_count || info->reserved1 ||
4702 	    info->reserved2 || info->reserved3) {
4703 		binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
4704 				  proc->pid);
4705 		return -EINVAL;
4706 	}
4707 
4708 	/* This ioctl may only be used by the context manager */
4709 	mutex_lock(&context->context_mgr_node_lock);
4710 	if (!context->binder_context_mgr_node ||
4711 		context->binder_context_mgr_node->proc != proc) {
4712 		mutex_unlock(&context->context_mgr_node_lock);
4713 		return -EPERM;
4714 	}
4715 	mutex_unlock(&context->context_mgr_node_lock);
4716 
4717 	node = binder_get_node_from_ref(proc, handle, true, NULL);
4718 	if (!node)
4719 		return -EINVAL;
4720 
4721 	info->strong_count = node->local_strong_refs +
4722 		node->internal_strong_refs;
4723 	info->weak_count = node->local_weak_refs;
4724 
4725 	binder_put_node(node);
4726 
4727 	return 0;
4728 }
4729 
4730 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4731 				struct binder_node_debug_info *info)
4732 {
4733 	struct rb_node *n;
4734 	binder_uintptr_t ptr = info->ptr;
4735 
4736 	memset(info, 0, sizeof(*info));
4737 
4738 	binder_inner_proc_lock(proc);
4739 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4740 		struct binder_node *node = rb_entry(n, struct binder_node,
4741 						    rb_node);
4742 		if (node->ptr > ptr) {
4743 			info->ptr = node->ptr;
4744 			info->cookie = node->cookie;
4745 			info->has_strong_ref = node->has_strong_ref;
4746 			info->has_weak_ref = node->has_weak_ref;
4747 			break;
4748 		}
4749 	}
4750 	binder_inner_proc_unlock(proc);
4751 
4752 	return 0;
4753 }
4754 
4755 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4756 {
4757 	int ret;
4758 	struct binder_proc *proc = filp->private_data;
4759 	struct binder_thread *thread;
4760 	unsigned int size = _IOC_SIZE(cmd);
4761 	void __user *ubuf = (void __user *)arg;
4762 
4763 	/*pr_info("binder_ioctl: %d:%d %x %lx\n",
4764 			proc->pid, current->pid, cmd, arg);*/
4765 
4766 	binder_selftest_alloc(&proc->alloc);
4767 
4768 	trace_binder_ioctl(cmd, arg);
4769 
4770 	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4771 	if (ret)
4772 		goto err_unlocked;
4773 
4774 	thread = binder_get_thread(proc);
4775 	if (thread == NULL) {
4776 		ret = -ENOMEM;
4777 		goto err;
4778 	}
4779 
4780 	switch (cmd) {
4781 	case BINDER_WRITE_READ:
4782 		ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4783 		if (ret)
4784 			goto err;
4785 		break;
4786 	case BINDER_SET_MAX_THREADS: {
4787 		int max_threads;
4788 
4789 		if (copy_from_user(&max_threads, ubuf,
4790 				   sizeof(max_threads))) {
4791 			ret = -EINVAL;
4792 			goto err;
4793 		}
4794 		binder_inner_proc_lock(proc);
4795 		proc->max_threads = max_threads;
4796 		binder_inner_proc_unlock(proc);
4797 		break;
4798 	}
4799 	case BINDER_SET_CONTEXT_MGR:
4800 		ret = binder_ioctl_set_ctx_mgr(filp);
4801 		if (ret)
4802 			goto err;
4803 		break;
4804 	case BINDER_THREAD_EXIT:
4805 		binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
4806 			     proc->pid, thread->pid);
4807 		binder_thread_release(proc, thread);
4808 		thread = NULL;
4809 		break;
4810 	case BINDER_VERSION: {
4811 		struct binder_version __user *ver = ubuf;
4812 
4813 		if (size != sizeof(struct binder_version)) {
4814 			ret = -EINVAL;
4815 			goto err;
4816 		}
4817 		if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4818 			     &ver->protocol_version)) {
4819 			ret = -EINVAL;
4820 			goto err;
4821 		}
4822 		break;
4823 	}
4824 	case BINDER_GET_NODE_INFO_FOR_REF: {
4825 		struct binder_node_info_for_ref info;
4826 
4827 		if (copy_from_user(&info, ubuf, sizeof(info))) {
4828 			ret = -EFAULT;
4829 			goto err;
4830 		}
4831 
4832 		ret = binder_ioctl_get_node_info_for_ref(proc, &info);
4833 		if (ret < 0)
4834 			goto err;
4835 
4836 		if (copy_to_user(ubuf, &info, sizeof(info))) {
4837 			ret = -EFAULT;
4838 			goto err;
4839 		}
4840 
4841 		break;
4842 	}
4843 	case BINDER_GET_NODE_DEBUG_INFO: {
4844 		struct binder_node_debug_info info;
4845 
4846 		if (copy_from_user(&info, ubuf, sizeof(info))) {
4847 			ret = -EFAULT;
4848 			goto err;
4849 		}
4850 
4851 		ret = binder_ioctl_get_node_debug_info(proc, &info);
4852 		if (ret < 0)
4853 			goto err;
4854 
4855 		if (copy_to_user(ubuf, &info, sizeof(info))) {
4856 			ret = -EFAULT;
4857 			goto err;
4858 		}
4859 		break;
4860 	}
4861 	default:
4862 		ret = -EINVAL;
4863 		goto err;
4864 	}
4865 	ret = 0;
4866 err:
4867 	if (thread)
4868 		thread->looper_need_return = false;
4869 	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4870 	if (ret && ret != -ERESTARTSYS)
4871 		pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
4872 err_unlocked:
4873 	trace_binder_ioctl_done(ret);
4874 	return ret;
4875 }
4876 
4877 static void binder_vma_open(struct vm_area_struct *vma)
4878 {
4879 	struct binder_proc *proc = vma->vm_private_data;
4880 
4881 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4882 		     "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4883 		     proc->pid, vma->vm_start, vma->vm_end,
4884 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4885 		     (unsigned long)pgprot_val(vma->vm_page_prot));
4886 }
4887 
4888 static void binder_vma_close(struct vm_area_struct *vma)
4889 {
4890 	struct binder_proc *proc = vma->vm_private_data;
4891 
4892 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4893 		     "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4894 		     proc->pid, vma->vm_start, vma->vm_end,
4895 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4896 		     (unsigned long)pgprot_val(vma->vm_page_prot));
4897 	binder_alloc_vma_close(&proc->alloc);
4898 }
4899 
4900 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
4901 {
4902 	return VM_FAULT_SIGBUS;
4903 }
4904 
4905 static const struct vm_operations_struct binder_vm_ops = {
4906 	.open = binder_vma_open,
4907 	.close = binder_vma_close,
4908 	.fault = binder_vm_fault,
4909 };
4910 
4911 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
4912 {
4913 	int ret;
4914 	struct binder_proc *proc = filp->private_data;
4915 	const char *failure_string;
4916 
4917 	if (proc->tsk != current->group_leader)
4918 		return -EINVAL;
4919 
4920 	if ((vma->vm_end - vma->vm_start) > SZ_4M)
4921 		vma->vm_end = vma->vm_start + SZ_4M;
4922 
4923 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4924 		     "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4925 		     __func__, proc->pid, vma->vm_start, vma->vm_end,
4926 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4927 		     (unsigned long)pgprot_val(vma->vm_page_prot));
4928 
4929 	if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
4930 		ret = -EPERM;
4931 		failure_string = "bad vm_flags";
4932 		goto err_bad_arg;
4933 	}
4934 	vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
4935 	vma->vm_flags &= ~VM_MAYWRITE;
4936 
4937 	vma->vm_ops = &binder_vm_ops;
4938 	vma->vm_private_data = proc;
4939 
4940 	ret = binder_alloc_mmap_handler(&proc->alloc, vma);
4941 	if (ret)
4942 		return ret;
4943 	return 0;
4944 
4945 err_bad_arg:
4946 	pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
4947 	       proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
4948 	return ret;
4949 }
4950 
4951 static int binder_open(struct inode *nodp, struct file *filp)
4952 {
4953 	struct binder_proc *proc;
4954 	struct binder_device *binder_dev;
4955 
4956 	binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
4957 		     current->group_leader->pid, current->pid);
4958 
4959 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
4960 	if (proc == NULL)
4961 		return -ENOMEM;
4962 	spin_lock_init(&proc->inner_lock);
4963 	spin_lock_init(&proc->outer_lock);
4964 	get_task_struct(current->group_leader);
4965 	proc->tsk = current->group_leader;
4966 	INIT_LIST_HEAD(&proc->todo);
4967 	proc->default_priority = task_nice(current);
4968 	binder_dev = container_of(filp->private_data, struct binder_device,
4969 				  miscdev);
4970 	proc->context = &binder_dev->context;
4971 	binder_alloc_init(&proc->alloc);
4972 
4973 	binder_stats_created(BINDER_STAT_PROC);
4974 	proc->pid = current->group_leader->pid;
4975 	INIT_LIST_HEAD(&proc->delivered_death);
4976 	INIT_LIST_HEAD(&proc->waiting_threads);
4977 	filp->private_data = proc;
4978 
4979 	mutex_lock(&binder_procs_lock);
4980 	hlist_add_head(&proc->proc_node, &binder_procs);
4981 	mutex_unlock(&binder_procs_lock);
4982 
4983 	if (binder_debugfs_dir_entry_proc) {
4984 		char strbuf[11];
4985 
4986 		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
4987 		/*
4988 		 * proc debug entries are shared between contexts, so
4989 		 * this will fail if the process tries to open the driver
4990 		 * again with a different context. The priting code will
4991 		 * anyway print all contexts that a given PID has, so this
4992 		 * is not a problem.
4993 		 */
4994 		proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
4995 			binder_debugfs_dir_entry_proc,
4996 			(void *)(unsigned long)proc->pid,
4997 			&proc_fops);
4998 	}
4999 
5000 	return 0;
5001 }
5002 
5003 static int binder_flush(struct file *filp, fl_owner_t id)
5004 {
5005 	struct binder_proc *proc = filp->private_data;
5006 
5007 	binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5008 
5009 	return 0;
5010 }
5011 
5012 static void binder_deferred_flush(struct binder_proc *proc)
5013 {
5014 	struct rb_node *n;
5015 	int wake_count = 0;
5016 
5017 	binder_inner_proc_lock(proc);
5018 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5019 		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5020 
5021 		thread->looper_need_return = true;
5022 		if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5023 			wake_up_interruptible(&thread->wait);
5024 			wake_count++;
5025 		}
5026 	}
5027 	binder_inner_proc_unlock(proc);
5028 
5029 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5030 		     "binder_flush: %d woke %d threads\n", proc->pid,
5031 		     wake_count);
5032 }
5033 
5034 static int binder_release(struct inode *nodp, struct file *filp)
5035 {
5036 	struct binder_proc *proc = filp->private_data;
5037 
5038 	debugfs_remove(proc->debugfs_entry);
5039 	binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5040 
5041 	return 0;
5042 }
5043 
5044 static int binder_node_release(struct binder_node *node, int refs)
5045 {
5046 	struct binder_ref *ref;
5047 	int death = 0;
5048 	struct binder_proc *proc = node->proc;
5049 
5050 	binder_release_work(proc, &node->async_todo);
5051 
5052 	binder_node_lock(node);
5053 	binder_inner_proc_lock(proc);
5054 	binder_dequeue_work_ilocked(&node->work);
5055 	/*
5056 	 * The caller must have taken a temporary ref on the node,
5057 	 */
5058 	BUG_ON(!node->tmp_refs);
5059 	if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5060 		binder_inner_proc_unlock(proc);
5061 		binder_node_unlock(node);
5062 		binder_free_node(node);
5063 
5064 		return refs;
5065 	}
5066 
5067 	node->proc = NULL;
5068 	node->local_strong_refs = 0;
5069 	node->local_weak_refs = 0;
5070 	binder_inner_proc_unlock(proc);
5071 
5072 	spin_lock(&binder_dead_nodes_lock);
5073 	hlist_add_head(&node->dead_node, &binder_dead_nodes);
5074 	spin_unlock(&binder_dead_nodes_lock);
5075 
5076 	hlist_for_each_entry(ref, &node->refs, node_entry) {
5077 		refs++;
5078 		/*
5079 		 * Need the node lock to synchronize
5080 		 * with new notification requests and the
5081 		 * inner lock to synchronize with queued
5082 		 * death notifications.
5083 		 */
5084 		binder_inner_proc_lock(ref->proc);
5085 		if (!ref->death) {
5086 			binder_inner_proc_unlock(ref->proc);
5087 			continue;
5088 		}
5089 
5090 		death++;
5091 
5092 		BUG_ON(!list_empty(&ref->death->work.entry));
5093 		ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5094 		binder_enqueue_work_ilocked(&ref->death->work,
5095 					    &ref->proc->todo);
5096 		binder_wakeup_proc_ilocked(ref->proc);
5097 		binder_inner_proc_unlock(ref->proc);
5098 	}
5099 
5100 	binder_debug(BINDER_DEBUG_DEAD_BINDER,
5101 		     "node %d now dead, refs %d, death %d\n",
5102 		     node->debug_id, refs, death);
5103 	binder_node_unlock(node);
5104 	binder_put_node(node);
5105 
5106 	return refs;
5107 }
5108 
5109 static void binder_deferred_release(struct binder_proc *proc)
5110 {
5111 	struct binder_context *context = proc->context;
5112 	struct rb_node *n;
5113 	int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5114 
5115 	mutex_lock(&binder_procs_lock);
5116 	hlist_del(&proc->proc_node);
5117 	mutex_unlock(&binder_procs_lock);
5118 
5119 	mutex_lock(&context->context_mgr_node_lock);
5120 	if (context->binder_context_mgr_node &&
5121 	    context->binder_context_mgr_node->proc == proc) {
5122 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
5123 			     "%s: %d context_mgr_node gone\n",
5124 			     __func__, proc->pid);
5125 		context->binder_context_mgr_node = NULL;
5126 	}
5127 	mutex_unlock(&context->context_mgr_node_lock);
5128 	binder_inner_proc_lock(proc);
5129 	/*
5130 	 * Make sure proc stays alive after we
5131 	 * remove all the threads
5132 	 */
5133 	proc->tmp_ref++;
5134 
5135 	proc->is_dead = true;
5136 	threads = 0;
5137 	active_transactions = 0;
5138 	while ((n = rb_first(&proc->threads))) {
5139 		struct binder_thread *thread;
5140 
5141 		thread = rb_entry(n, struct binder_thread, rb_node);
5142 		binder_inner_proc_unlock(proc);
5143 		threads++;
5144 		active_transactions += binder_thread_release(proc, thread);
5145 		binder_inner_proc_lock(proc);
5146 	}
5147 
5148 	nodes = 0;
5149 	incoming_refs = 0;
5150 	while ((n = rb_first(&proc->nodes))) {
5151 		struct binder_node *node;
5152 
5153 		node = rb_entry(n, struct binder_node, rb_node);
5154 		nodes++;
5155 		/*
5156 		 * take a temporary ref on the node before
5157 		 * calling binder_node_release() which will either
5158 		 * kfree() the node or call binder_put_node()
5159 		 */
5160 		binder_inc_node_tmpref_ilocked(node);
5161 		rb_erase(&node->rb_node, &proc->nodes);
5162 		binder_inner_proc_unlock(proc);
5163 		incoming_refs = binder_node_release(node, incoming_refs);
5164 		binder_inner_proc_lock(proc);
5165 	}
5166 	binder_inner_proc_unlock(proc);
5167 
5168 	outgoing_refs = 0;
5169 	binder_proc_lock(proc);
5170 	while ((n = rb_first(&proc->refs_by_desc))) {
5171 		struct binder_ref *ref;
5172 
5173 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
5174 		outgoing_refs++;
5175 		binder_cleanup_ref_olocked(ref);
5176 		binder_proc_unlock(proc);
5177 		binder_free_ref(ref);
5178 		binder_proc_lock(proc);
5179 	}
5180 	binder_proc_unlock(proc);
5181 
5182 	binder_release_work(proc, &proc->todo);
5183 	binder_release_work(proc, &proc->delivered_death);
5184 
5185 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5186 		     "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5187 		     __func__, proc->pid, threads, nodes, incoming_refs,
5188 		     outgoing_refs, active_transactions);
5189 
5190 	binder_proc_dec_tmpref(proc);
5191 }
5192 
5193 static void binder_deferred_func(struct work_struct *work)
5194 {
5195 	struct binder_proc *proc;
5196 
5197 	int defer;
5198 
5199 	do {
5200 		mutex_lock(&binder_deferred_lock);
5201 		if (!hlist_empty(&binder_deferred_list)) {
5202 			proc = hlist_entry(binder_deferred_list.first,
5203 					struct binder_proc, deferred_work_node);
5204 			hlist_del_init(&proc->deferred_work_node);
5205 			defer = proc->deferred_work;
5206 			proc->deferred_work = 0;
5207 		} else {
5208 			proc = NULL;
5209 			defer = 0;
5210 		}
5211 		mutex_unlock(&binder_deferred_lock);
5212 
5213 		if (defer & BINDER_DEFERRED_FLUSH)
5214 			binder_deferred_flush(proc);
5215 
5216 		if (defer & BINDER_DEFERRED_RELEASE)
5217 			binder_deferred_release(proc); /* frees proc */
5218 	} while (proc);
5219 }
5220 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5221 
5222 static void
5223 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5224 {
5225 	mutex_lock(&binder_deferred_lock);
5226 	proc->deferred_work |= defer;
5227 	if (hlist_unhashed(&proc->deferred_work_node)) {
5228 		hlist_add_head(&proc->deferred_work_node,
5229 				&binder_deferred_list);
5230 		schedule_work(&binder_deferred_work);
5231 	}
5232 	mutex_unlock(&binder_deferred_lock);
5233 }
5234 
5235 static void print_binder_transaction_ilocked(struct seq_file *m,
5236 					     struct binder_proc *proc,
5237 					     const char *prefix,
5238 					     struct binder_transaction *t)
5239 {
5240 	struct binder_proc *to_proc;
5241 	struct binder_buffer *buffer = t->buffer;
5242 
5243 	spin_lock(&t->lock);
5244 	to_proc = t->to_proc;
5245 	seq_printf(m,
5246 		   "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5247 		   prefix, t->debug_id, t,
5248 		   t->from ? t->from->proc->pid : 0,
5249 		   t->from ? t->from->pid : 0,
5250 		   to_proc ? to_proc->pid : 0,
5251 		   t->to_thread ? t->to_thread->pid : 0,
5252 		   t->code, t->flags, t->priority, t->need_reply);
5253 	spin_unlock(&t->lock);
5254 
5255 	if (proc != to_proc) {
5256 		/*
5257 		 * Can only safely deref buffer if we are holding the
5258 		 * correct proc inner lock for this node
5259 		 */
5260 		seq_puts(m, "\n");
5261 		return;
5262 	}
5263 
5264 	if (buffer == NULL) {
5265 		seq_puts(m, " buffer free\n");
5266 		return;
5267 	}
5268 	if (buffer->target_node)
5269 		seq_printf(m, " node %d", buffer->target_node->debug_id);
5270 	seq_printf(m, " size %zd:%zd data %pK\n",
5271 		   buffer->data_size, buffer->offsets_size,
5272 		   buffer->data);
5273 }
5274 
5275 static void print_binder_work_ilocked(struct seq_file *m,
5276 				     struct binder_proc *proc,
5277 				     const char *prefix,
5278 				     const char *transaction_prefix,
5279 				     struct binder_work *w)
5280 {
5281 	struct binder_node *node;
5282 	struct binder_transaction *t;
5283 
5284 	switch (w->type) {
5285 	case BINDER_WORK_TRANSACTION:
5286 		t = container_of(w, struct binder_transaction, work);
5287 		print_binder_transaction_ilocked(
5288 				m, proc, transaction_prefix, t);
5289 		break;
5290 	case BINDER_WORK_RETURN_ERROR: {
5291 		struct binder_error *e = container_of(
5292 				w, struct binder_error, work);
5293 
5294 		seq_printf(m, "%stransaction error: %u\n",
5295 			   prefix, e->cmd);
5296 	} break;
5297 	case BINDER_WORK_TRANSACTION_COMPLETE:
5298 		seq_printf(m, "%stransaction complete\n", prefix);
5299 		break;
5300 	case BINDER_WORK_NODE:
5301 		node = container_of(w, struct binder_node, work);
5302 		seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5303 			   prefix, node->debug_id,
5304 			   (u64)node->ptr, (u64)node->cookie);
5305 		break;
5306 	case BINDER_WORK_DEAD_BINDER:
5307 		seq_printf(m, "%shas dead binder\n", prefix);
5308 		break;
5309 	case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5310 		seq_printf(m, "%shas cleared dead binder\n", prefix);
5311 		break;
5312 	case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5313 		seq_printf(m, "%shas cleared death notification\n", prefix);
5314 		break;
5315 	default:
5316 		seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5317 		break;
5318 	}
5319 }
5320 
5321 static void print_binder_thread_ilocked(struct seq_file *m,
5322 					struct binder_thread *thread,
5323 					int print_always)
5324 {
5325 	struct binder_transaction *t;
5326 	struct binder_work *w;
5327 	size_t start_pos = m->count;
5328 	size_t header_pos;
5329 
5330 	seq_printf(m, "  thread %d: l %02x need_return %d tr %d\n",
5331 			thread->pid, thread->looper,
5332 			thread->looper_need_return,
5333 			atomic_read(&thread->tmp_ref));
5334 	header_pos = m->count;
5335 	t = thread->transaction_stack;
5336 	while (t) {
5337 		if (t->from == thread) {
5338 			print_binder_transaction_ilocked(m, thread->proc,
5339 					"    outgoing transaction", t);
5340 			t = t->from_parent;
5341 		} else if (t->to_thread == thread) {
5342 			print_binder_transaction_ilocked(m, thread->proc,
5343 						 "    incoming transaction", t);
5344 			t = t->to_parent;
5345 		} else {
5346 			print_binder_transaction_ilocked(m, thread->proc,
5347 					"    bad transaction", t);
5348 			t = NULL;
5349 		}
5350 	}
5351 	list_for_each_entry(w, &thread->todo, entry) {
5352 		print_binder_work_ilocked(m, thread->proc, "    ",
5353 					  "    pending transaction", w);
5354 	}
5355 	if (!print_always && m->count == header_pos)
5356 		m->count = start_pos;
5357 }
5358 
5359 static void print_binder_node_nilocked(struct seq_file *m,
5360 				       struct binder_node *node)
5361 {
5362 	struct binder_ref *ref;
5363 	struct binder_work *w;
5364 	int count;
5365 
5366 	count = 0;
5367 	hlist_for_each_entry(ref, &node->refs, node_entry)
5368 		count++;
5369 
5370 	seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5371 		   node->debug_id, (u64)node->ptr, (u64)node->cookie,
5372 		   node->has_strong_ref, node->has_weak_ref,
5373 		   node->local_strong_refs, node->local_weak_refs,
5374 		   node->internal_strong_refs, count, node->tmp_refs);
5375 	if (count) {
5376 		seq_puts(m, " proc");
5377 		hlist_for_each_entry(ref, &node->refs, node_entry)
5378 			seq_printf(m, " %d", ref->proc->pid);
5379 	}
5380 	seq_puts(m, "\n");
5381 	if (node->proc) {
5382 		list_for_each_entry(w, &node->async_todo, entry)
5383 			print_binder_work_ilocked(m, node->proc, "    ",
5384 					  "    pending async transaction", w);
5385 	}
5386 }
5387 
5388 static void print_binder_ref_olocked(struct seq_file *m,
5389 				     struct binder_ref *ref)
5390 {
5391 	binder_node_lock(ref->node);
5392 	seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %pK\n",
5393 		   ref->data.debug_id, ref->data.desc,
5394 		   ref->node->proc ? "" : "dead ",
5395 		   ref->node->debug_id, ref->data.strong,
5396 		   ref->data.weak, ref->death);
5397 	binder_node_unlock(ref->node);
5398 }
5399 
5400 static void print_binder_proc(struct seq_file *m,
5401 			      struct binder_proc *proc, int print_all)
5402 {
5403 	struct binder_work *w;
5404 	struct rb_node *n;
5405 	size_t start_pos = m->count;
5406 	size_t header_pos;
5407 	struct binder_node *last_node = NULL;
5408 
5409 	seq_printf(m, "proc %d\n", proc->pid);
5410 	seq_printf(m, "context %s\n", proc->context->name);
5411 	header_pos = m->count;
5412 
5413 	binder_inner_proc_lock(proc);
5414 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5415 		print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5416 						rb_node), print_all);
5417 
5418 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5419 		struct binder_node *node = rb_entry(n, struct binder_node,
5420 						    rb_node);
5421 		if (!print_all && !node->has_async_transaction)
5422 			continue;
5423 
5424 		/*
5425 		 * take a temporary reference on the node so it
5426 		 * survives and isn't removed from the tree
5427 		 * while we print it.
5428 		 */
5429 		binder_inc_node_tmpref_ilocked(node);
5430 		/* Need to drop inner lock to take node lock */
5431 		binder_inner_proc_unlock(proc);
5432 		if (last_node)
5433 			binder_put_node(last_node);
5434 		binder_node_inner_lock(node);
5435 		print_binder_node_nilocked(m, node);
5436 		binder_node_inner_unlock(node);
5437 		last_node = node;
5438 		binder_inner_proc_lock(proc);
5439 	}
5440 	binder_inner_proc_unlock(proc);
5441 	if (last_node)
5442 		binder_put_node(last_node);
5443 
5444 	if (print_all) {
5445 		binder_proc_lock(proc);
5446 		for (n = rb_first(&proc->refs_by_desc);
5447 		     n != NULL;
5448 		     n = rb_next(n))
5449 			print_binder_ref_olocked(m, rb_entry(n,
5450 							    struct binder_ref,
5451 							    rb_node_desc));
5452 		binder_proc_unlock(proc);
5453 	}
5454 	binder_alloc_print_allocated(m, &proc->alloc);
5455 	binder_inner_proc_lock(proc);
5456 	list_for_each_entry(w, &proc->todo, entry)
5457 		print_binder_work_ilocked(m, proc, "  ",
5458 					  "  pending transaction", w);
5459 	list_for_each_entry(w, &proc->delivered_death, entry) {
5460 		seq_puts(m, "  has delivered dead binder\n");
5461 		break;
5462 	}
5463 	binder_inner_proc_unlock(proc);
5464 	if (!print_all && m->count == header_pos)
5465 		m->count = start_pos;
5466 }
5467 
5468 static const char * const binder_return_strings[] = {
5469 	"BR_ERROR",
5470 	"BR_OK",
5471 	"BR_TRANSACTION",
5472 	"BR_REPLY",
5473 	"BR_ACQUIRE_RESULT",
5474 	"BR_DEAD_REPLY",
5475 	"BR_TRANSACTION_COMPLETE",
5476 	"BR_INCREFS",
5477 	"BR_ACQUIRE",
5478 	"BR_RELEASE",
5479 	"BR_DECREFS",
5480 	"BR_ATTEMPT_ACQUIRE",
5481 	"BR_NOOP",
5482 	"BR_SPAWN_LOOPER",
5483 	"BR_FINISHED",
5484 	"BR_DEAD_BINDER",
5485 	"BR_CLEAR_DEATH_NOTIFICATION_DONE",
5486 	"BR_FAILED_REPLY"
5487 };
5488 
5489 static const char * const binder_command_strings[] = {
5490 	"BC_TRANSACTION",
5491 	"BC_REPLY",
5492 	"BC_ACQUIRE_RESULT",
5493 	"BC_FREE_BUFFER",
5494 	"BC_INCREFS",
5495 	"BC_ACQUIRE",
5496 	"BC_RELEASE",
5497 	"BC_DECREFS",
5498 	"BC_INCREFS_DONE",
5499 	"BC_ACQUIRE_DONE",
5500 	"BC_ATTEMPT_ACQUIRE",
5501 	"BC_REGISTER_LOOPER",
5502 	"BC_ENTER_LOOPER",
5503 	"BC_EXIT_LOOPER",
5504 	"BC_REQUEST_DEATH_NOTIFICATION",
5505 	"BC_CLEAR_DEATH_NOTIFICATION",
5506 	"BC_DEAD_BINDER_DONE",
5507 	"BC_TRANSACTION_SG",
5508 	"BC_REPLY_SG",
5509 };
5510 
5511 static const char * const binder_objstat_strings[] = {
5512 	"proc",
5513 	"thread",
5514 	"node",
5515 	"ref",
5516 	"death",
5517 	"transaction",
5518 	"transaction_complete"
5519 };
5520 
5521 static void print_binder_stats(struct seq_file *m, const char *prefix,
5522 			       struct binder_stats *stats)
5523 {
5524 	int i;
5525 
5526 	BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5527 		     ARRAY_SIZE(binder_command_strings));
5528 	for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5529 		int temp = atomic_read(&stats->bc[i]);
5530 
5531 		if (temp)
5532 			seq_printf(m, "%s%s: %d\n", prefix,
5533 				   binder_command_strings[i], temp);
5534 	}
5535 
5536 	BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5537 		     ARRAY_SIZE(binder_return_strings));
5538 	for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5539 		int temp = atomic_read(&stats->br[i]);
5540 
5541 		if (temp)
5542 			seq_printf(m, "%s%s: %d\n", prefix,
5543 				   binder_return_strings[i], temp);
5544 	}
5545 
5546 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5547 		     ARRAY_SIZE(binder_objstat_strings));
5548 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5549 		     ARRAY_SIZE(stats->obj_deleted));
5550 	for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
5551 		int created = atomic_read(&stats->obj_created[i]);
5552 		int deleted = atomic_read(&stats->obj_deleted[i]);
5553 
5554 		if (created || deleted)
5555 			seq_printf(m, "%s%s: active %d total %d\n",
5556 				prefix,
5557 				binder_objstat_strings[i],
5558 				created - deleted,
5559 				created);
5560 	}
5561 }
5562 
5563 static void print_binder_proc_stats(struct seq_file *m,
5564 				    struct binder_proc *proc)
5565 {
5566 	struct binder_work *w;
5567 	struct binder_thread *thread;
5568 	struct rb_node *n;
5569 	int count, strong, weak, ready_threads;
5570 	size_t free_async_space =
5571 		binder_alloc_get_free_async_space(&proc->alloc);
5572 
5573 	seq_printf(m, "proc %d\n", proc->pid);
5574 	seq_printf(m, "context %s\n", proc->context->name);
5575 	count = 0;
5576 	ready_threads = 0;
5577 	binder_inner_proc_lock(proc);
5578 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5579 		count++;
5580 
5581 	list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5582 		ready_threads++;
5583 
5584 	seq_printf(m, "  threads: %d\n", count);
5585 	seq_printf(m, "  requested threads: %d+%d/%d\n"
5586 			"  ready threads %d\n"
5587 			"  free async space %zd\n", proc->requested_threads,
5588 			proc->requested_threads_started, proc->max_threads,
5589 			ready_threads,
5590 			free_async_space);
5591 	count = 0;
5592 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5593 		count++;
5594 	binder_inner_proc_unlock(proc);
5595 	seq_printf(m, "  nodes: %d\n", count);
5596 	count = 0;
5597 	strong = 0;
5598 	weak = 0;
5599 	binder_proc_lock(proc);
5600 	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5601 		struct binder_ref *ref = rb_entry(n, struct binder_ref,
5602 						  rb_node_desc);
5603 		count++;
5604 		strong += ref->data.strong;
5605 		weak += ref->data.weak;
5606 	}
5607 	binder_proc_unlock(proc);
5608 	seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
5609 
5610 	count = binder_alloc_get_allocated_count(&proc->alloc);
5611 	seq_printf(m, "  buffers: %d\n", count);
5612 
5613 	binder_alloc_print_pages(m, &proc->alloc);
5614 
5615 	count = 0;
5616 	binder_inner_proc_lock(proc);
5617 	list_for_each_entry(w, &proc->todo, entry) {
5618 		if (w->type == BINDER_WORK_TRANSACTION)
5619 			count++;
5620 	}
5621 	binder_inner_proc_unlock(proc);
5622 	seq_printf(m, "  pending transactions: %d\n", count);
5623 
5624 	print_binder_stats(m, "  ", &proc->stats);
5625 }
5626 
5627 
5628 static int state_show(struct seq_file *m, void *unused)
5629 {
5630 	struct binder_proc *proc;
5631 	struct binder_node *node;
5632 	struct binder_node *last_node = NULL;
5633 
5634 	seq_puts(m, "binder state:\n");
5635 
5636 	spin_lock(&binder_dead_nodes_lock);
5637 	if (!hlist_empty(&binder_dead_nodes))
5638 		seq_puts(m, "dead nodes:\n");
5639 	hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5640 		/*
5641 		 * take a temporary reference on the node so it
5642 		 * survives and isn't removed from the list
5643 		 * while we print it.
5644 		 */
5645 		node->tmp_refs++;
5646 		spin_unlock(&binder_dead_nodes_lock);
5647 		if (last_node)
5648 			binder_put_node(last_node);
5649 		binder_node_lock(node);
5650 		print_binder_node_nilocked(m, node);
5651 		binder_node_unlock(node);
5652 		last_node = node;
5653 		spin_lock(&binder_dead_nodes_lock);
5654 	}
5655 	spin_unlock(&binder_dead_nodes_lock);
5656 	if (last_node)
5657 		binder_put_node(last_node);
5658 
5659 	mutex_lock(&binder_procs_lock);
5660 	hlist_for_each_entry(proc, &binder_procs, proc_node)
5661 		print_binder_proc(m, proc, 1);
5662 	mutex_unlock(&binder_procs_lock);
5663 
5664 	return 0;
5665 }
5666 
5667 static int stats_show(struct seq_file *m, void *unused)
5668 {
5669 	struct binder_proc *proc;
5670 
5671 	seq_puts(m, "binder stats:\n");
5672 
5673 	print_binder_stats(m, "", &binder_stats);
5674 
5675 	mutex_lock(&binder_procs_lock);
5676 	hlist_for_each_entry(proc, &binder_procs, proc_node)
5677 		print_binder_proc_stats(m, proc);
5678 	mutex_unlock(&binder_procs_lock);
5679 
5680 	return 0;
5681 }
5682 
5683 static int transactions_show(struct seq_file *m, void *unused)
5684 {
5685 	struct binder_proc *proc;
5686 
5687 	seq_puts(m, "binder transactions:\n");
5688 	mutex_lock(&binder_procs_lock);
5689 	hlist_for_each_entry(proc, &binder_procs, proc_node)
5690 		print_binder_proc(m, proc, 0);
5691 	mutex_unlock(&binder_procs_lock);
5692 
5693 	return 0;
5694 }
5695 
5696 static int proc_show(struct seq_file *m, void *unused)
5697 {
5698 	struct binder_proc *itr;
5699 	int pid = (unsigned long)m->private;
5700 
5701 	mutex_lock(&binder_procs_lock);
5702 	hlist_for_each_entry(itr, &binder_procs, proc_node) {
5703 		if (itr->pid == pid) {
5704 			seq_puts(m, "binder proc state:\n");
5705 			print_binder_proc(m, itr, 1);
5706 		}
5707 	}
5708 	mutex_unlock(&binder_procs_lock);
5709 
5710 	return 0;
5711 }
5712 
5713 static void print_binder_transaction_log_entry(struct seq_file *m,
5714 					struct binder_transaction_log_entry *e)
5715 {
5716 	int debug_id = READ_ONCE(e->debug_id_done);
5717 	/*
5718 	 * read barrier to guarantee debug_id_done read before
5719 	 * we print the log values
5720 	 */
5721 	smp_rmb();
5722 	seq_printf(m,
5723 		   "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5724 		   e->debug_id, (e->call_type == 2) ? "reply" :
5725 		   ((e->call_type == 1) ? "async" : "call "), e->from_proc,
5726 		   e->from_thread, e->to_proc, e->to_thread, e->context_name,
5727 		   e->to_node, e->target_handle, e->data_size, e->offsets_size,
5728 		   e->return_error, e->return_error_param,
5729 		   e->return_error_line);
5730 	/*
5731 	 * read-barrier to guarantee read of debug_id_done after
5732 	 * done printing the fields of the entry
5733 	 */
5734 	smp_rmb();
5735 	seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5736 			"\n" : " (incomplete)\n");
5737 }
5738 
5739 static int transaction_log_show(struct seq_file *m, void *unused)
5740 {
5741 	struct binder_transaction_log *log = m->private;
5742 	unsigned int log_cur = atomic_read(&log->cur);
5743 	unsigned int count;
5744 	unsigned int cur;
5745 	int i;
5746 
5747 	count = log_cur + 1;
5748 	cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5749 		0 : count % ARRAY_SIZE(log->entry);
5750 	if (count > ARRAY_SIZE(log->entry) || log->full)
5751 		count = ARRAY_SIZE(log->entry);
5752 	for (i = 0; i < count; i++) {
5753 		unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5754 
5755 		print_binder_transaction_log_entry(m, &log->entry[index]);
5756 	}
5757 	return 0;
5758 }
5759 
5760 static const struct file_operations binder_fops = {
5761 	.owner = THIS_MODULE,
5762 	.poll = binder_poll,
5763 	.unlocked_ioctl = binder_ioctl,
5764 	.compat_ioctl = binder_ioctl,
5765 	.mmap = binder_mmap,
5766 	.open = binder_open,
5767 	.flush = binder_flush,
5768 	.release = binder_release,
5769 };
5770 
5771 DEFINE_SHOW_ATTRIBUTE(state);
5772 DEFINE_SHOW_ATTRIBUTE(stats);
5773 DEFINE_SHOW_ATTRIBUTE(transactions);
5774 DEFINE_SHOW_ATTRIBUTE(transaction_log);
5775 
5776 static int __init init_binder_device(const char *name)
5777 {
5778 	int ret;
5779 	struct binder_device *binder_device;
5780 
5781 	binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
5782 	if (!binder_device)
5783 		return -ENOMEM;
5784 
5785 	binder_device->miscdev.fops = &binder_fops;
5786 	binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
5787 	binder_device->miscdev.name = name;
5788 
5789 	binder_device->context.binder_context_mgr_uid = INVALID_UID;
5790 	binder_device->context.name = name;
5791 	mutex_init(&binder_device->context.context_mgr_node_lock);
5792 
5793 	ret = misc_register(&binder_device->miscdev);
5794 	if (ret < 0) {
5795 		kfree(binder_device);
5796 		return ret;
5797 	}
5798 
5799 	hlist_add_head(&binder_device->hlist, &binder_devices);
5800 
5801 	return ret;
5802 }
5803 
5804 static int __init binder_init(void)
5805 {
5806 	int ret;
5807 	char *device_name, *device_names, *device_tmp;
5808 	struct binder_device *device;
5809 	struct hlist_node *tmp;
5810 
5811 	ret = binder_alloc_shrinker_init();
5812 	if (ret)
5813 		return ret;
5814 
5815 	atomic_set(&binder_transaction_log.cur, ~0U);
5816 	atomic_set(&binder_transaction_log_failed.cur, ~0U);
5817 
5818 	binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
5819 	if (binder_debugfs_dir_entry_root)
5820 		binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
5821 						 binder_debugfs_dir_entry_root);
5822 
5823 	if (binder_debugfs_dir_entry_root) {
5824 		debugfs_create_file("state",
5825 				    0444,
5826 				    binder_debugfs_dir_entry_root,
5827 				    NULL,
5828 				    &state_fops);
5829 		debugfs_create_file("stats",
5830 				    0444,
5831 				    binder_debugfs_dir_entry_root,
5832 				    NULL,
5833 				    &stats_fops);
5834 		debugfs_create_file("transactions",
5835 				    0444,
5836 				    binder_debugfs_dir_entry_root,
5837 				    NULL,
5838 				    &transactions_fops);
5839 		debugfs_create_file("transaction_log",
5840 				    0444,
5841 				    binder_debugfs_dir_entry_root,
5842 				    &binder_transaction_log,
5843 				    &transaction_log_fops);
5844 		debugfs_create_file("failed_transaction_log",
5845 				    0444,
5846 				    binder_debugfs_dir_entry_root,
5847 				    &binder_transaction_log_failed,
5848 				    &transaction_log_fops);
5849 	}
5850 
5851 	/*
5852 	 * Copy the module_parameter string, because we don't want to
5853 	 * tokenize it in-place.
5854 	 */
5855 	device_names = kstrdup(binder_devices_param, GFP_KERNEL);
5856 	if (!device_names) {
5857 		ret = -ENOMEM;
5858 		goto err_alloc_device_names_failed;
5859 	}
5860 
5861 	device_tmp = device_names;
5862 	while ((device_name = strsep(&device_tmp, ","))) {
5863 		ret = init_binder_device(device_name);
5864 		if (ret)
5865 			goto err_init_binder_device_failed;
5866 	}
5867 
5868 	return ret;
5869 
5870 err_init_binder_device_failed:
5871 	hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
5872 		misc_deregister(&device->miscdev);
5873 		hlist_del(&device->hlist);
5874 		kfree(device);
5875 	}
5876 
5877 	kfree(device_names);
5878 
5879 err_alloc_device_names_failed:
5880 	debugfs_remove_recursive(binder_debugfs_dir_entry_root);
5881 
5882 	return ret;
5883 }
5884 
5885 device_initcall(binder_init);
5886 
5887 #define CREATE_TRACE_POINTS
5888 #include "binder_trace.h"
5889 
5890 MODULE_LICENSE("GPL v2");
5891