xref: /openbmc/linux/drivers/android/binder.c (revision 9ccb6456)
1 /* binder.c
2  *
3  * Android IPC Subsystem
4  *
5  * Copyright (C) 2007-2008 Google, Inc.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17 
18 /*
19  * Locking overview
20  *
21  * There are 3 main spinlocks which must be acquired in the
22  * order shown:
23  *
24  * 1) proc->outer_lock : protects binder_ref
25  *    binder_proc_lock() and binder_proc_unlock() are
26  *    used to acq/rel.
27  * 2) node->lock : protects most fields of binder_node.
28  *    binder_node_lock() and binder_node_unlock() are
29  *    used to acq/rel
30  * 3) proc->inner_lock : protects the thread and node lists
31  *    (proc->threads, proc->waiting_threads, proc->nodes)
32  *    and all todo lists associated with the binder_proc
33  *    (proc->todo, thread->todo, proc->delivered_death and
34  *    node->async_todo), as well as thread->transaction_stack
35  *    binder_inner_proc_lock() and binder_inner_proc_unlock()
36  *    are used to acq/rel
37  *
38  * Any lock under procA must never be nested under any lock at the same
39  * level or below on procB.
40  *
41  * Functions that require a lock held on entry indicate which lock
42  * in the suffix of the function name:
43  *
44  * foo_olocked() : requires node->outer_lock
45  * foo_nlocked() : requires node->lock
46  * foo_ilocked() : requires proc->inner_lock
47  * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
48  * foo_nilocked(): requires node->lock and proc->inner_lock
49  * ...
50  */
51 
52 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
53 
54 #include <linux/fdtable.h>
55 #include <linux/file.h>
56 #include <linux/freezer.h>
57 #include <linux/fs.h>
58 #include <linux/list.h>
59 #include <linux/miscdevice.h>
60 #include <linux/module.h>
61 #include <linux/mutex.h>
62 #include <linux/nsproxy.h>
63 #include <linux/poll.h>
64 #include <linux/debugfs.h>
65 #include <linux/rbtree.h>
66 #include <linux/sched/signal.h>
67 #include <linux/sched/mm.h>
68 #include <linux/seq_file.h>
69 #include <linux/uaccess.h>
70 #include <linux/pid_namespace.h>
71 #include <linux/security.h>
72 #include <linux/spinlock.h>
73 #include <linux/ratelimit.h>
74 #include <linux/syscalls.h>
75 
76 #include <uapi/linux/android/binder.h>
77 
78 #include <asm/cacheflush.h>
79 
80 #include "binder_alloc.h"
81 #include "binder_trace.h"
82 
83 static HLIST_HEAD(binder_deferred_list);
84 static DEFINE_MUTEX(binder_deferred_lock);
85 
86 static HLIST_HEAD(binder_devices);
87 static HLIST_HEAD(binder_procs);
88 static DEFINE_MUTEX(binder_procs_lock);
89 
90 static HLIST_HEAD(binder_dead_nodes);
91 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
92 
93 static struct dentry *binder_debugfs_dir_entry_root;
94 static struct dentry *binder_debugfs_dir_entry_proc;
95 static atomic_t binder_last_id;
96 
97 #define BINDER_DEBUG_ENTRY(name) \
98 static int binder_##name##_open(struct inode *inode, struct file *file) \
99 { \
100 	return single_open(file, binder_##name##_show, inode->i_private); \
101 } \
102 \
103 static const struct file_operations binder_##name##_fops = { \
104 	.owner = THIS_MODULE, \
105 	.open = binder_##name##_open, \
106 	.read = seq_read, \
107 	.llseek = seq_lseek, \
108 	.release = single_release, \
109 }
110 
111 static int binder_proc_show(struct seq_file *m, void *unused);
112 BINDER_DEBUG_ENTRY(proc);
113 
114 /* This is only defined in include/asm-arm/sizes.h */
115 #ifndef SZ_1K
116 #define SZ_1K                               0x400
117 #endif
118 
119 #ifndef SZ_4M
120 #define SZ_4M                               0x400000
121 #endif
122 
123 #define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
124 
125 enum {
126 	BINDER_DEBUG_USER_ERROR             = 1U << 0,
127 	BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
128 	BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
129 	BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
130 	BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
131 	BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
132 	BINDER_DEBUG_READ_WRITE             = 1U << 6,
133 	BINDER_DEBUG_USER_REFS              = 1U << 7,
134 	BINDER_DEBUG_THREADS                = 1U << 8,
135 	BINDER_DEBUG_TRANSACTION            = 1U << 9,
136 	BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
137 	BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
138 	BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
139 	BINDER_DEBUG_PRIORITY_CAP           = 1U << 13,
140 	BINDER_DEBUG_SPINLOCKS              = 1U << 14,
141 };
142 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
143 	BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
144 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
145 
146 static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
147 module_param_named(devices, binder_devices_param, charp, 0444);
148 
149 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
150 static int binder_stop_on_user_error;
151 
152 static int binder_set_stop_on_user_error(const char *val,
153 					 const struct kernel_param *kp)
154 {
155 	int ret;
156 
157 	ret = param_set_int(val, kp);
158 	if (binder_stop_on_user_error < 2)
159 		wake_up(&binder_user_error_wait);
160 	return ret;
161 }
162 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
163 	param_get_int, &binder_stop_on_user_error, 0644);
164 
165 #define binder_debug(mask, x...) \
166 	do { \
167 		if (binder_debug_mask & mask) \
168 			pr_info_ratelimited(x); \
169 	} while (0)
170 
171 #define binder_user_error(x...) \
172 	do { \
173 		if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
174 			pr_info_ratelimited(x); \
175 		if (binder_stop_on_user_error) \
176 			binder_stop_on_user_error = 2; \
177 	} while (0)
178 
179 #define to_flat_binder_object(hdr) \
180 	container_of(hdr, struct flat_binder_object, hdr)
181 
182 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
183 
184 #define to_binder_buffer_object(hdr) \
185 	container_of(hdr, struct binder_buffer_object, hdr)
186 
187 #define to_binder_fd_array_object(hdr) \
188 	container_of(hdr, struct binder_fd_array_object, hdr)
189 
190 enum binder_stat_types {
191 	BINDER_STAT_PROC,
192 	BINDER_STAT_THREAD,
193 	BINDER_STAT_NODE,
194 	BINDER_STAT_REF,
195 	BINDER_STAT_DEATH,
196 	BINDER_STAT_TRANSACTION,
197 	BINDER_STAT_TRANSACTION_COMPLETE,
198 	BINDER_STAT_COUNT
199 };
200 
201 struct binder_stats {
202 	atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
203 	atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
204 	atomic_t obj_created[BINDER_STAT_COUNT];
205 	atomic_t obj_deleted[BINDER_STAT_COUNT];
206 };
207 
208 static struct binder_stats binder_stats;
209 
210 static inline void binder_stats_deleted(enum binder_stat_types type)
211 {
212 	atomic_inc(&binder_stats.obj_deleted[type]);
213 }
214 
215 static inline void binder_stats_created(enum binder_stat_types type)
216 {
217 	atomic_inc(&binder_stats.obj_created[type]);
218 }
219 
220 struct binder_transaction_log_entry {
221 	int debug_id;
222 	int debug_id_done;
223 	int call_type;
224 	int from_proc;
225 	int from_thread;
226 	int target_handle;
227 	int to_proc;
228 	int to_thread;
229 	int to_node;
230 	int data_size;
231 	int offsets_size;
232 	int return_error_line;
233 	uint32_t return_error;
234 	uint32_t return_error_param;
235 	const char *context_name;
236 };
237 struct binder_transaction_log {
238 	atomic_t cur;
239 	bool full;
240 	struct binder_transaction_log_entry entry[32];
241 };
242 static struct binder_transaction_log binder_transaction_log;
243 static struct binder_transaction_log binder_transaction_log_failed;
244 
245 static struct binder_transaction_log_entry *binder_transaction_log_add(
246 	struct binder_transaction_log *log)
247 {
248 	struct binder_transaction_log_entry *e;
249 	unsigned int cur = atomic_inc_return(&log->cur);
250 
251 	if (cur >= ARRAY_SIZE(log->entry))
252 		log->full = true;
253 	e = &log->entry[cur % ARRAY_SIZE(log->entry)];
254 	WRITE_ONCE(e->debug_id_done, 0);
255 	/*
256 	 * write-barrier to synchronize access to e->debug_id_done.
257 	 * We make sure the initialized 0 value is seen before
258 	 * memset() other fields are zeroed by memset.
259 	 */
260 	smp_wmb();
261 	memset(e, 0, sizeof(*e));
262 	return e;
263 }
264 
265 struct binder_context {
266 	struct binder_node *binder_context_mgr_node;
267 	struct mutex context_mgr_node_lock;
268 
269 	kuid_t binder_context_mgr_uid;
270 	const char *name;
271 };
272 
273 struct binder_device {
274 	struct hlist_node hlist;
275 	struct miscdevice miscdev;
276 	struct binder_context context;
277 };
278 
279 /**
280  * struct binder_work - work enqueued on a worklist
281  * @entry:             node enqueued on list
282  * @type:              type of work to be performed
283  *
284  * There are separate work lists for proc, thread, and node (async).
285  */
286 struct binder_work {
287 	struct list_head entry;
288 
289 	enum {
290 		BINDER_WORK_TRANSACTION = 1,
291 		BINDER_WORK_TRANSACTION_COMPLETE,
292 		BINDER_WORK_RETURN_ERROR,
293 		BINDER_WORK_NODE,
294 		BINDER_WORK_DEAD_BINDER,
295 		BINDER_WORK_DEAD_BINDER_AND_CLEAR,
296 		BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
297 	} type;
298 };
299 
300 struct binder_error {
301 	struct binder_work work;
302 	uint32_t cmd;
303 };
304 
305 /**
306  * struct binder_node - binder node bookkeeping
307  * @debug_id:             unique ID for debugging
308  *                        (invariant after initialized)
309  * @lock:                 lock for node fields
310  * @work:                 worklist element for node work
311  *                        (protected by @proc->inner_lock)
312  * @rb_node:              element for proc->nodes tree
313  *                        (protected by @proc->inner_lock)
314  * @dead_node:            element for binder_dead_nodes list
315  *                        (protected by binder_dead_nodes_lock)
316  * @proc:                 binder_proc that owns this node
317  *                        (invariant after initialized)
318  * @refs:                 list of references on this node
319  *                        (protected by @lock)
320  * @internal_strong_refs: used to take strong references when
321  *                        initiating a transaction
322  *                        (protected by @proc->inner_lock if @proc
323  *                        and by @lock)
324  * @local_weak_refs:      weak user refs from local process
325  *                        (protected by @proc->inner_lock if @proc
326  *                        and by @lock)
327  * @local_strong_refs:    strong user refs from local process
328  *                        (protected by @proc->inner_lock if @proc
329  *                        and by @lock)
330  * @tmp_refs:             temporary kernel refs
331  *                        (protected by @proc->inner_lock while @proc
332  *                        is valid, and by binder_dead_nodes_lock
333  *                        if @proc is NULL. During inc/dec and node release
334  *                        it is also protected by @lock to provide safety
335  *                        as the node dies and @proc becomes NULL)
336  * @ptr:                  userspace pointer for node
337  *                        (invariant, no lock needed)
338  * @cookie:               userspace cookie for node
339  *                        (invariant, no lock needed)
340  * @has_strong_ref:       userspace notified of strong ref
341  *                        (protected by @proc->inner_lock if @proc
342  *                        and by @lock)
343  * @pending_strong_ref:   userspace has acked notification of strong ref
344  *                        (protected by @proc->inner_lock if @proc
345  *                        and by @lock)
346  * @has_weak_ref:         userspace notified of weak ref
347  *                        (protected by @proc->inner_lock if @proc
348  *                        and by @lock)
349  * @pending_weak_ref:     userspace has acked notification of weak ref
350  *                        (protected by @proc->inner_lock if @proc
351  *                        and by @lock)
352  * @has_async_transaction: async transaction to node in progress
353  *                        (protected by @lock)
354  * @accept_fds:           file descriptor operations supported for node
355  *                        (invariant after initialized)
356  * @min_priority:         minimum scheduling priority
357  *                        (invariant after initialized)
358  * @async_todo:           list of async work items
359  *                        (protected by @proc->inner_lock)
360  *
361  * Bookkeeping structure for binder nodes.
362  */
363 struct binder_node {
364 	int debug_id;
365 	spinlock_t lock;
366 	struct binder_work work;
367 	union {
368 		struct rb_node rb_node;
369 		struct hlist_node dead_node;
370 	};
371 	struct binder_proc *proc;
372 	struct hlist_head refs;
373 	int internal_strong_refs;
374 	int local_weak_refs;
375 	int local_strong_refs;
376 	int tmp_refs;
377 	binder_uintptr_t ptr;
378 	binder_uintptr_t cookie;
379 	struct {
380 		/*
381 		 * bitfield elements protected by
382 		 * proc inner_lock
383 		 */
384 		u8 has_strong_ref:1;
385 		u8 pending_strong_ref:1;
386 		u8 has_weak_ref:1;
387 		u8 pending_weak_ref:1;
388 	};
389 	struct {
390 		/*
391 		 * invariant after initialization
392 		 */
393 		u8 accept_fds:1;
394 		u8 min_priority;
395 	};
396 	bool has_async_transaction;
397 	struct list_head async_todo;
398 };
399 
400 struct binder_ref_death {
401 	/**
402 	 * @work: worklist element for death notifications
403 	 *        (protected by inner_lock of the proc that
404 	 *        this ref belongs to)
405 	 */
406 	struct binder_work work;
407 	binder_uintptr_t cookie;
408 };
409 
410 /**
411  * struct binder_ref_data - binder_ref counts and id
412  * @debug_id:        unique ID for the ref
413  * @desc:            unique userspace handle for ref
414  * @strong:          strong ref count (debugging only if not locked)
415  * @weak:            weak ref count (debugging only if not locked)
416  *
417  * Structure to hold ref count and ref id information. Since
418  * the actual ref can only be accessed with a lock, this structure
419  * is used to return information about the ref to callers of
420  * ref inc/dec functions.
421  */
422 struct binder_ref_data {
423 	int debug_id;
424 	uint32_t desc;
425 	int strong;
426 	int weak;
427 };
428 
429 /**
430  * struct binder_ref - struct to track references on nodes
431  * @data:        binder_ref_data containing id, handle, and current refcounts
432  * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
433  * @rb_node_node: node for lookup by @node in proc's rb_tree
434  * @node_entry:  list entry for node->refs list in target node
435  *               (protected by @node->lock)
436  * @proc:        binder_proc containing ref
437  * @node:        binder_node of target node. When cleaning up a
438  *               ref for deletion in binder_cleanup_ref, a non-NULL
439  *               @node indicates the node must be freed
440  * @death:       pointer to death notification (ref_death) if requested
441  *               (protected by @node->lock)
442  *
443  * Structure to track references from procA to target node (on procB). This
444  * structure is unsafe to access without holding @proc->outer_lock.
445  */
446 struct binder_ref {
447 	/* Lookups needed: */
448 	/*   node + proc => ref (transaction) */
449 	/*   desc + proc => ref (transaction, inc/dec ref) */
450 	/*   node => refs + procs (proc exit) */
451 	struct binder_ref_data data;
452 	struct rb_node rb_node_desc;
453 	struct rb_node rb_node_node;
454 	struct hlist_node node_entry;
455 	struct binder_proc *proc;
456 	struct binder_node *node;
457 	struct binder_ref_death *death;
458 };
459 
460 enum binder_deferred_state {
461 	BINDER_DEFERRED_FLUSH        = 0x01,
462 	BINDER_DEFERRED_RELEASE      = 0x02,
463 };
464 
465 /**
466  * struct binder_proc - binder process bookkeeping
467  * @proc_node:            element for binder_procs list
468  * @threads:              rbtree of binder_threads in this proc
469  *                        (protected by @inner_lock)
470  * @nodes:                rbtree of binder nodes associated with
471  *                        this proc ordered by node->ptr
472  *                        (protected by @inner_lock)
473  * @refs_by_desc:         rbtree of refs ordered by ref->desc
474  *                        (protected by @outer_lock)
475  * @refs_by_node:         rbtree of refs ordered by ref->node
476  *                        (protected by @outer_lock)
477  * @waiting_threads:      threads currently waiting for proc work
478  *                        (protected by @inner_lock)
479  * @pid                   PID of group_leader of process
480  *                        (invariant after initialized)
481  * @tsk                   task_struct for group_leader of process
482  *                        (invariant after initialized)
483  * @deferred_work_node:   element for binder_deferred_list
484  *                        (protected by binder_deferred_lock)
485  * @deferred_work:        bitmap of deferred work to perform
486  *                        (protected by binder_deferred_lock)
487  * @is_dead:              process is dead and awaiting free
488  *                        when outstanding transactions are cleaned up
489  *                        (protected by @inner_lock)
490  * @todo:                 list of work for this process
491  *                        (protected by @inner_lock)
492  * @stats:                per-process binder statistics
493  *                        (atomics, no lock needed)
494  * @delivered_death:      list of delivered death notification
495  *                        (protected by @inner_lock)
496  * @max_threads:          cap on number of binder threads
497  *                        (protected by @inner_lock)
498  * @requested_threads:    number of binder threads requested but not
499  *                        yet started. In current implementation, can
500  *                        only be 0 or 1.
501  *                        (protected by @inner_lock)
502  * @requested_threads_started: number binder threads started
503  *                        (protected by @inner_lock)
504  * @tmp_ref:              temporary reference to indicate proc is in use
505  *                        (protected by @inner_lock)
506  * @default_priority:     default scheduler priority
507  *                        (invariant after initialized)
508  * @debugfs_entry:        debugfs node
509  * @alloc:                binder allocator bookkeeping
510  * @context:              binder_context for this proc
511  *                        (invariant after initialized)
512  * @inner_lock:           can nest under outer_lock and/or node lock
513  * @outer_lock:           no nesting under innor or node lock
514  *                        Lock order: 1) outer, 2) node, 3) inner
515  *
516  * Bookkeeping structure for binder processes
517  */
518 struct binder_proc {
519 	struct hlist_node proc_node;
520 	struct rb_root threads;
521 	struct rb_root nodes;
522 	struct rb_root refs_by_desc;
523 	struct rb_root refs_by_node;
524 	struct list_head waiting_threads;
525 	int pid;
526 	struct task_struct *tsk;
527 	struct hlist_node deferred_work_node;
528 	int deferred_work;
529 	bool is_dead;
530 
531 	struct list_head todo;
532 	struct binder_stats stats;
533 	struct list_head delivered_death;
534 	int max_threads;
535 	int requested_threads;
536 	int requested_threads_started;
537 	int tmp_ref;
538 	long default_priority;
539 	struct dentry *debugfs_entry;
540 	struct binder_alloc alloc;
541 	struct binder_context *context;
542 	spinlock_t inner_lock;
543 	spinlock_t outer_lock;
544 };
545 
546 enum {
547 	BINDER_LOOPER_STATE_REGISTERED  = 0x01,
548 	BINDER_LOOPER_STATE_ENTERED     = 0x02,
549 	BINDER_LOOPER_STATE_EXITED      = 0x04,
550 	BINDER_LOOPER_STATE_INVALID     = 0x08,
551 	BINDER_LOOPER_STATE_WAITING     = 0x10,
552 	BINDER_LOOPER_STATE_POLL        = 0x20,
553 };
554 
555 /**
556  * struct binder_thread - binder thread bookkeeping
557  * @proc:                 binder process for this thread
558  *                        (invariant after initialization)
559  * @rb_node:              element for proc->threads rbtree
560  *                        (protected by @proc->inner_lock)
561  * @waiting_thread_node:  element for @proc->waiting_threads list
562  *                        (protected by @proc->inner_lock)
563  * @pid:                  PID for this thread
564  *                        (invariant after initialization)
565  * @looper:               bitmap of looping state
566  *                        (only accessed by this thread)
567  * @looper_needs_return:  looping thread needs to exit driver
568  *                        (no lock needed)
569  * @transaction_stack:    stack of in-progress transactions for this thread
570  *                        (protected by @proc->inner_lock)
571  * @todo:                 list of work to do for this thread
572  *                        (protected by @proc->inner_lock)
573  * @process_todo:         whether work in @todo should be processed
574  *                        (protected by @proc->inner_lock)
575  * @return_error:         transaction errors reported by this thread
576  *                        (only accessed by this thread)
577  * @reply_error:          transaction errors reported by target thread
578  *                        (protected by @proc->inner_lock)
579  * @wait:                 wait queue for thread work
580  * @stats:                per-thread statistics
581  *                        (atomics, no lock needed)
582  * @tmp_ref:              temporary reference to indicate thread is in use
583  *                        (atomic since @proc->inner_lock cannot
584  *                        always be acquired)
585  * @is_dead:              thread is dead and awaiting free
586  *                        when outstanding transactions are cleaned up
587  *                        (protected by @proc->inner_lock)
588  *
589  * Bookkeeping structure for binder threads.
590  */
591 struct binder_thread {
592 	struct binder_proc *proc;
593 	struct rb_node rb_node;
594 	struct list_head waiting_thread_node;
595 	int pid;
596 	int looper;              /* only modified by this thread */
597 	bool looper_need_return; /* can be written by other thread */
598 	struct binder_transaction *transaction_stack;
599 	struct list_head todo;
600 	bool process_todo;
601 	struct binder_error return_error;
602 	struct binder_error reply_error;
603 	wait_queue_head_t wait;
604 	struct binder_stats stats;
605 	atomic_t tmp_ref;
606 	bool is_dead;
607 };
608 
609 /**
610  * struct binder_txn_fd_fixup - transaction fd fixup list element
611  * @fixup_entry:          list entry
612  * @file:                 struct file to be associated with new fd
613  * @offset:               offset in buffer data to this fixup
614  *
615  * List element for fd fixups in a transaction. Since file
616  * descriptors need to be allocated in the context of the
617  * target process, we pass each fd to be processed in this
618  * struct.
619  */
620 struct binder_txn_fd_fixup {
621 	struct list_head fixup_entry;
622 	struct file *file;
623 	size_t offset;
624 };
625 
626 struct binder_transaction {
627 	int debug_id;
628 	struct binder_work work;
629 	struct binder_thread *from;
630 	struct binder_transaction *from_parent;
631 	struct binder_proc *to_proc;
632 	struct binder_thread *to_thread;
633 	struct binder_transaction *to_parent;
634 	unsigned need_reply:1;
635 	/* unsigned is_dead:1; */	/* not used at the moment */
636 
637 	struct binder_buffer *buffer;
638 	unsigned int	code;
639 	unsigned int	flags;
640 	long	priority;
641 	long	saved_priority;
642 	kuid_t	sender_euid;
643 	struct list_head fd_fixups;
644 	/**
645 	 * @lock:  protects @from, @to_proc, and @to_thread
646 	 *
647 	 * @from, @to_proc, and @to_thread can be set to NULL
648 	 * during thread teardown
649 	 */
650 	spinlock_t lock;
651 };
652 
653 /**
654  * binder_proc_lock() - Acquire outer lock for given binder_proc
655  * @proc:         struct binder_proc to acquire
656  *
657  * Acquires proc->outer_lock. Used to protect binder_ref
658  * structures associated with the given proc.
659  */
660 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
661 static void
662 _binder_proc_lock(struct binder_proc *proc, int line)
663 	__acquires(&proc->outer_lock)
664 {
665 	binder_debug(BINDER_DEBUG_SPINLOCKS,
666 		     "%s: line=%d\n", __func__, line);
667 	spin_lock(&proc->outer_lock);
668 }
669 
670 /**
671  * binder_proc_unlock() - Release spinlock for given binder_proc
672  * @proc:         struct binder_proc to acquire
673  *
674  * Release lock acquired via binder_proc_lock()
675  */
676 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
677 static void
678 _binder_proc_unlock(struct binder_proc *proc, int line)
679 	__releases(&proc->outer_lock)
680 {
681 	binder_debug(BINDER_DEBUG_SPINLOCKS,
682 		     "%s: line=%d\n", __func__, line);
683 	spin_unlock(&proc->outer_lock);
684 }
685 
686 /**
687  * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
688  * @proc:         struct binder_proc to acquire
689  *
690  * Acquires proc->inner_lock. Used to protect todo lists
691  */
692 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
693 static void
694 _binder_inner_proc_lock(struct binder_proc *proc, int line)
695 	__acquires(&proc->inner_lock)
696 {
697 	binder_debug(BINDER_DEBUG_SPINLOCKS,
698 		     "%s: line=%d\n", __func__, line);
699 	spin_lock(&proc->inner_lock);
700 }
701 
702 /**
703  * binder_inner_proc_unlock() - Release inner lock for given binder_proc
704  * @proc:         struct binder_proc to acquire
705  *
706  * Release lock acquired via binder_inner_proc_lock()
707  */
708 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
709 static void
710 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
711 	__releases(&proc->inner_lock)
712 {
713 	binder_debug(BINDER_DEBUG_SPINLOCKS,
714 		     "%s: line=%d\n", __func__, line);
715 	spin_unlock(&proc->inner_lock);
716 }
717 
718 /**
719  * binder_node_lock() - Acquire spinlock for given binder_node
720  * @node:         struct binder_node to acquire
721  *
722  * Acquires node->lock. Used to protect binder_node fields
723  */
724 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
725 static void
726 _binder_node_lock(struct binder_node *node, int line)
727 	__acquires(&node->lock)
728 {
729 	binder_debug(BINDER_DEBUG_SPINLOCKS,
730 		     "%s: line=%d\n", __func__, line);
731 	spin_lock(&node->lock);
732 }
733 
734 /**
735  * binder_node_unlock() - Release spinlock for given binder_proc
736  * @node:         struct binder_node to acquire
737  *
738  * Release lock acquired via binder_node_lock()
739  */
740 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
741 static void
742 _binder_node_unlock(struct binder_node *node, int line)
743 	__releases(&node->lock)
744 {
745 	binder_debug(BINDER_DEBUG_SPINLOCKS,
746 		     "%s: line=%d\n", __func__, line);
747 	spin_unlock(&node->lock);
748 }
749 
750 /**
751  * binder_node_inner_lock() - Acquire node and inner locks
752  * @node:         struct binder_node to acquire
753  *
754  * Acquires node->lock. If node->proc also acquires
755  * proc->inner_lock. Used to protect binder_node fields
756  */
757 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
758 static void
759 _binder_node_inner_lock(struct binder_node *node, int line)
760 	__acquires(&node->lock) __acquires(&node->proc->inner_lock)
761 {
762 	binder_debug(BINDER_DEBUG_SPINLOCKS,
763 		     "%s: line=%d\n", __func__, line);
764 	spin_lock(&node->lock);
765 	if (node->proc)
766 		binder_inner_proc_lock(node->proc);
767 	else
768 		/* annotation for sparse */
769 		__acquire(&node->proc->inner_lock);
770 }
771 
772 /**
773  * binder_node_unlock() - Release node and inner locks
774  * @node:         struct binder_node to acquire
775  *
776  * Release lock acquired via binder_node_lock()
777  */
778 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
779 static void
780 _binder_node_inner_unlock(struct binder_node *node, int line)
781 	__releases(&node->lock) __releases(&node->proc->inner_lock)
782 {
783 	struct binder_proc *proc = node->proc;
784 
785 	binder_debug(BINDER_DEBUG_SPINLOCKS,
786 		     "%s: line=%d\n", __func__, line);
787 	if (proc)
788 		binder_inner_proc_unlock(proc);
789 	else
790 		/* annotation for sparse */
791 		__release(&node->proc->inner_lock);
792 	spin_unlock(&node->lock);
793 }
794 
795 static bool binder_worklist_empty_ilocked(struct list_head *list)
796 {
797 	return list_empty(list);
798 }
799 
800 /**
801  * binder_worklist_empty() - Check if no items on the work list
802  * @proc:       binder_proc associated with list
803  * @list:	list to check
804  *
805  * Return: true if there are no items on list, else false
806  */
807 static bool binder_worklist_empty(struct binder_proc *proc,
808 				  struct list_head *list)
809 {
810 	bool ret;
811 
812 	binder_inner_proc_lock(proc);
813 	ret = binder_worklist_empty_ilocked(list);
814 	binder_inner_proc_unlock(proc);
815 	return ret;
816 }
817 
818 /**
819  * binder_enqueue_work_ilocked() - Add an item to the work list
820  * @work:         struct binder_work to add to list
821  * @target_list:  list to add work to
822  *
823  * Adds the work to the specified list. Asserts that work
824  * is not already on a list.
825  *
826  * Requires the proc->inner_lock to be held.
827  */
828 static void
829 binder_enqueue_work_ilocked(struct binder_work *work,
830 			   struct list_head *target_list)
831 {
832 	BUG_ON(target_list == NULL);
833 	BUG_ON(work->entry.next && !list_empty(&work->entry));
834 	list_add_tail(&work->entry, target_list);
835 }
836 
837 /**
838  * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
839  * @thread:       thread to queue work to
840  * @work:         struct binder_work to add to list
841  *
842  * Adds the work to the todo list of the thread. Doesn't set the process_todo
843  * flag, which means that (if it wasn't already set) the thread will go to
844  * sleep without handling this work when it calls read.
845  *
846  * Requires the proc->inner_lock to be held.
847  */
848 static void
849 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
850 					    struct binder_work *work)
851 {
852 	WARN_ON(!list_empty(&thread->waiting_thread_node));
853 	binder_enqueue_work_ilocked(work, &thread->todo);
854 }
855 
856 /**
857  * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
858  * @thread:       thread to queue work to
859  * @work:         struct binder_work to add to list
860  *
861  * Adds the work to the todo list of the thread, and enables processing
862  * of the todo queue.
863  *
864  * Requires the proc->inner_lock to be held.
865  */
866 static void
867 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
868 				   struct binder_work *work)
869 {
870 	WARN_ON(!list_empty(&thread->waiting_thread_node));
871 	binder_enqueue_work_ilocked(work, &thread->todo);
872 	thread->process_todo = true;
873 }
874 
875 /**
876  * binder_enqueue_thread_work() - Add an item to the thread work list
877  * @thread:       thread to queue work to
878  * @work:         struct binder_work to add to list
879  *
880  * Adds the work to the todo list of the thread, and enables processing
881  * of the todo queue.
882  */
883 static void
884 binder_enqueue_thread_work(struct binder_thread *thread,
885 			   struct binder_work *work)
886 {
887 	binder_inner_proc_lock(thread->proc);
888 	binder_enqueue_thread_work_ilocked(thread, work);
889 	binder_inner_proc_unlock(thread->proc);
890 }
891 
892 static void
893 binder_dequeue_work_ilocked(struct binder_work *work)
894 {
895 	list_del_init(&work->entry);
896 }
897 
898 /**
899  * binder_dequeue_work() - Removes an item from the work list
900  * @proc:         binder_proc associated with list
901  * @work:         struct binder_work to remove from list
902  *
903  * Removes the specified work item from whatever list it is on.
904  * Can safely be called if work is not on any list.
905  */
906 static void
907 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
908 {
909 	binder_inner_proc_lock(proc);
910 	binder_dequeue_work_ilocked(work);
911 	binder_inner_proc_unlock(proc);
912 }
913 
914 static struct binder_work *binder_dequeue_work_head_ilocked(
915 					struct list_head *list)
916 {
917 	struct binder_work *w;
918 
919 	w = list_first_entry_or_null(list, struct binder_work, entry);
920 	if (w)
921 		list_del_init(&w->entry);
922 	return w;
923 }
924 
925 /**
926  * binder_dequeue_work_head() - Dequeues the item at head of list
927  * @proc:         binder_proc associated with list
928  * @list:         list to dequeue head
929  *
930  * Removes the head of the list if there are items on the list
931  *
932  * Return: pointer dequeued binder_work, NULL if list was empty
933  */
934 static struct binder_work *binder_dequeue_work_head(
935 					struct binder_proc *proc,
936 					struct list_head *list)
937 {
938 	struct binder_work *w;
939 
940 	binder_inner_proc_lock(proc);
941 	w = binder_dequeue_work_head_ilocked(list);
942 	binder_inner_proc_unlock(proc);
943 	return w;
944 }
945 
946 static void
947 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
948 static void binder_free_thread(struct binder_thread *thread);
949 static void binder_free_proc(struct binder_proc *proc);
950 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
951 
952 static bool binder_has_work_ilocked(struct binder_thread *thread,
953 				    bool do_proc_work)
954 {
955 	return thread->process_todo ||
956 		thread->looper_need_return ||
957 		(do_proc_work &&
958 		 !binder_worklist_empty_ilocked(&thread->proc->todo));
959 }
960 
961 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
962 {
963 	bool has_work;
964 
965 	binder_inner_proc_lock(thread->proc);
966 	has_work = binder_has_work_ilocked(thread, do_proc_work);
967 	binder_inner_proc_unlock(thread->proc);
968 
969 	return has_work;
970 }
971 
972 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
973 {
974 	return !thread->transaction_stack &&
975 		binder_worklist_empty_ilocked(&thread->todo) &&
976 		(thread->looper & (BINDER_LOOPER_STATE_ENTERED |
977 				   BINDER_LOOPER_STATE_REGISTERED));
978 }
979 
980 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
981 					       bool sync)
982 {
983 	struct rb_node *n;
984 	struct binder_thread *thread;
985 
986 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
987 		thread = rb_entry(n, struct binder_thread, rb_node);
988 		if (thread->looper & BINDER_LOOPER_STATE_POLL &&
989 		    binder_available_for_proc_work_ilocked(thread)) {
990 			if (sync)
991 				wake_up_interruptible_sync(&thread->wait);
992 			else
993 				wake_up_interruptible(&thread->wait);
994 		}
995 	}
996 }
997 
998 /**
999  * binder_select_thread_ilocked() - selects a thread for doing proc work.
1000  * @proc:	process to select a thread from
1001  *
1002  * Note that calling this function moves the thread off the waiting_threads
1003  * list, so it can only be woken up by the caller of this function, or a
1004  * signal. Therefore, callers *should* always wake up the thread this function
1005  * returns.
1006  *
1007  * Return:	If there's a thread currently waiting for process work,
1008  *		returns that thread. Otherwise returns NULL.
1009  */
1010 static struct binder_thread *
1011 binder_select_thread_ilocked(struct binder_proc *proc)
1012 {
1013 	struct binder_thread *thread;
1014 
1015 	assert_spin_locked(&proc->inner_lock);
1016 	thread = list_first_entry_or_null(&proc->waiting_threads,
1017 					  struct binder_thread,
1018 					  waiting_thread_node);
1019 
1020 	if (thread)
1021 		list_del_init(&thread->waiting_thread_node);
1022 
1023 	return thread;
1024 }
1025 
1026 /**
1027  * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1028  * @proc:	process to wake up a thread in
1029  * @thread:	specific thread to wake-up (may be NULL)
1030  * @sync:	whether to do a synchronous wake-up
1031  *
1032  * This function wakes up a thread in the @proc process.
1033  * The caller may provide a specific thread to wake-up in
1034  * the @thread parameter. If @thread is NULL, this function
1035  * will wake up threads that have called poll().
1036  *
1037  * Note that for this function to work as expected, callers
1038  * should first call binder_select_thread() to find a thread
1039  * to handle the work (if they don't have a thread already),
1040  * and pass the result into the @thread parameter.
1041  */
1042 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1043 					 struct binder_thread *thread,
1044 					 bool sync)
1045 {
1046 	assert_spin_locked(&proc->inner_lock);
1047 
1048 	if (thread) {
1049 		if (sync)
1050 			wake_up_interruptible_sync(&thread->wait);
1051 		else
1052 			wake_up_interruptible(&thread->wait);
1053 		return;
1054 	}
1055 
1056 	/* Didn't find a thread waiting for proc work; this can happen
1057 	 * in two scenarios:
1058 	 * 1. All threads are busy handling transactions
1059 	 *    In that case, one of those threads should call back into
1060 	 *    the kernel driver soon and pick up this work.
1061 	 * 2. Threads are using the (e)poll interface, in which case
1062 	 *    they may be blocked on the waitqueue without having been
1063 	 *    added to waiting_threads. For this case, we just iterate
1064 	 *    over all threads not handling transaction work, and
1065 	 *    wake them all up. We wake all because we don't know whether
1066 	 *    a thread that called into (e)poll is handling non-binder
1067 	 *    work currently.
1068 	 */
1069 	binder_wakeup_poll_threads_ilocked(proc, sync);
1070 }
1071 
1072 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1073 {
1074 	struct binder_thread *thread = binder_select_thread_ilocked(proc);
1075 
1076 	binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1077 }
1078 
1079 static void binder_set_nice(long nice)
1080 {
1081 	long min_nice;
1082 
1083 	if (can_nice(current, nice)) {
1084 		set_user_nice(current, nice);
1085 		return;
1086 	}
1087 	min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
1088 	binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1089 		     "%d: nice value %ld not allowed use %ld instead\n",
1090 		      current->pid, nice, min_nice);
1091 	set_user_nice(current, min_nice);
1092 	if (min_nice <= MAX_NICE)
1093 		return;
1094 	binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
1095 }
1096 
1097 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1098 						   binder_uintptr_t ptr)
1099 {
1100 	struct rb_node *n = proc->nodes.rb_node;
1101 	struct binder_node *node;
1102 
1103 	assert_spin_locked(&proc->inner_lock);
1104 
1105 	while (n) {
1106 		node = rb_entry(n, struct binder_node, rb_node);
1107 
1108 		if (ptr < node->ptr)
1109 			n = n->rb_left;
1110 		else if (ptr > node->ptr)
1111 			n = n->rb_right;
1112 		else {
1113 			/*
1114 			 * take an implicit weak reference
1115 			 * to ensure node stays alive until
1116 			 * call to binder_put_node()
1117 			 */
1118 			binder_inc_node_tmpref_ilocked(node);
1119 			return node;
1120 		}
1121 	}
1122 	return NULL;
1123 }
1124 
1125 static struct binder_node *binder_get_node(struct binder_proc *proc,
1126 					   binder_uintptr_t ptr)
1127 {
1128 	struct binder_node *node;
1129 
1130 	binder_inner_proc_lock(proc);
1131 	node = binder_get_node_ilocked(proc, ptr);
1132 	binder_inner_proc_unlock(proc);
1133 	return node;
1134 }
1135 
1136 static struct binder_node *binder_init_node_ilocked(
1137 						struct binder_proc *proc,
1138 						struct binder_node *new_node,
1139 						struct flat_binder_object *fp)
1140 {
1141 	struct rb_node **p = &proc->nodes.rb_node;
1142 	struct rb_node *parent = NULL;
1143 	struct binder_node *node;
1144 	binder_uintptr_t ptr = fp ? fp->binder : 0;
1145 	binder_uintptr_t cookie = fp ? fp->cookie : 0;
1146 	__u32 flags = fp ? fp->flags : 0;
1147 
1148 	assert_spin_locked(&proc->inner_lock);
1149 
1150 	while (*p) {
1151 
1152 		parent = *p;
1153 		node = rb_entry(parent, struct binder_node, rb_node);
1154 
1155 		if (ptr < node->ptr)
1156 			p = &(*p)->rb_left;
1157 		else if (ptr > node->ptr)
1158 			p = &(*p)->rb_right;
1159 		else {
1160 			/*
1161 			 * A matching node is already in
1162 			 * the rb tree. Abandon the init
1163 			 * and return it.
1164 			 */
1165 			binder_inc_node_tmpref_ilocked(node);
1166 			return node;
1167 		}
1168 	}
1169 	node = new_node;
1170 	binder_stats_created(BINDER_STAT_NODE);
1171 	node->tmp_refs++;
1172 	rb_link_node(&node->rb_node, parent, p);
1173 	rb_insert_color(&node->rb_node, &proc->nodes);
1174 	node->debug_id = atomic_inc_return(&binder_last_id);
1175 	node->proc = proc;
1176 	node->ptr = ptr;
1177 	node->cookie = cookie;
1178 	node->work.type = BINDER_WORK_NODE;
1179 	node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1180 	node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1181 	spin_lock_init(&node->lock);
1182 	INIT_LIST_HEAD(&node->work.entry);
1183 	INIT_LIST_HEAD(&node->async_todo);
1184 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1185 		     "%d:%d node %d u%016llx c%016llx created\n",
1186 		     proc->pid, current->pid, node->debug_id,
1187 		     (u64)node->ptr, (u64)node->cookie);
1188 
1189 	return node;
1190 }
1191 
1192 static struct binder_node *binder_new_node(struct binder_proc *proc,
1193 					   struct flat_binder_object *fp)
1194 {
1195 	struct binder_node *node;
1196 	struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1197 
1198 	if (!new_node)
1199 		return NULL;
1200 	binder_inner_proc_lock(proc);
1201 	node = binder_init_node_ilocked(proc, new_node, fp);
1202 	binder_inner_proc_unlock(proc);
1203 	if (node != new_node)
1204 		/*
1205 		 * The node was already added by another thread
1206 		 */
1207 		kfree(new_node);
1208 
1209 	return node;
1210 }
1211 
1212 static void binder_free_node(struct binder_node *node)
1213 {
1214 	kfree(node);
1215 	binder_stats_deleted(BINDER_STAT_NODE);
1216 }
1217 
1218 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1219 				    int internal,
1220 				    struct list_head *target_list)
1221 {
1222 	struct binder_proc *proc = node->proc;
1223 
1224 	assert_spin_locked(&node->lock);
1225 	if (proc)
1226 		assert_spin_locked(&proc->inner_lock);
1227 	if (strong) {
1228 		if (internal) {
1229 			if (target_list == NULL &&
1230 			    node->internal_strong_refs == 0 &&
1231 			    !(node->proc &&
1232 			      node == node->proc->context->binder_context_mgr_node &&
1233 			      node->has_strong_ref)) {
1234 				pr_err("invalid inc strong node for %d\n",
1235 					node->debug_id);
1236 				return -EINVAL;
1237 			}
1238 			node->internal_strong_refs++;
1239 		} else
1240 			node->local_strong_refs++;
1241 		if (!node->has_strong_ref && target_list) {
1242 			struct binder_thread *thread = container_of(target_list,
1243 						    struct binder_thread, todo);
1244 			binder_dequeue_work_ilocked(&node->work);
1245 			BUG_ON(&thread->todo != target_list);
1246 			binder_enqueue_deferred_thread_work_ilocked(thread,
1247 								   &node->work);
1248 		}
1249 	} else {
1250 		if (!internal)
1251 			node->local_weak_refs++;
1252 		if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1253 			if (target_list == NULL) {
1254 				pr_err("invalid inc weak node for %d\n",
1255 					node->debug_id);
1256 				return -EINVAL;
1257 			}
1258 			/*
1259 			 * See comment above
1260 			 */
1261 			binder_enqueue_work_ilocked(&node->work, target_list);
1262 		}
1263 	}
1264 	return 0;
1265 }
1266 
1267 static int binder_inc_node(struct binder_node *node, int strong, int internal,
1268 			   struct list_head *target_list)
1269 {
1270 	int ret;
1271 
1272 	binder_node_inner_lock(node);
1273 	ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1274 	binder_node_inner_unlock(node);
1275 
1276 	return ret;
1277 }
1278 
1279 static bool binder_dec_node_nilocked(struct binder_node *node,
1280 				     int strong, int internal)
1281 {
1282 	struct binder_proc *proc = node->proc;
1283 
1284 	assert_spin_locked(&node->lock);
1285 	if (proc)
1286 		assert_spin_locked(&proc->inner_lock);
1287 	if (strong) {
1288 		if (internal)
1289 			node->internal_strong_refs--;
1290 		else
1291 			node->local_strong_refs--;
1292 		if (node->local_strong_refs || node->internal_strong_refs)
1293 			return false;
1294 	} else {
1295 		if (!internal)
1296 			node->local_weak_refs--;
1297 		if (node->local_weak_refs || node->tmp_refs ||
1298 				!hlist_empty(&node->refs))
1299 			return false;
1300 	}
1301 
1302 	if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1303 		if (list_empty(&node->work.entry)) {
1304 			binder_enqueue_work_ilocked(&node->work, &proc->todo);
1305 			binder_wakeup_proc_ilocked(proc);
1306 		}
1307 	} else {
1308 		if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1309 		    !node->local_weak_refs && !node->tmp_refs) {
1310 			if (proc) {
1311 				binder_dequeue_work_ilocked(&node->work);
1312 				rb_erase(&node->rb_node, &proc->nodes);
1313 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1314 					     "refless node %d deleted\n",
1315 					     node->debug_id);
1316 			} else {
1317 				BUG_ON(!list_empty(&node->work.entry));
1318 				spin_lock(&binder_dead_nodes_lock);
1319 				/*
1320 				 * tmp_refs could have changed so
1321 				 * check it again
1322 				 */
1323 				if (node->tmp_refs) {
1324 					spin_unlock(&binder_dead_nodes_lock);
1325 					return false;
1326 				}
1327 				hlist_del(&node->dead_node);
1328 				spin_unlock(&binder_dead_nodes_lock);
1329 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1330 					     "dead node %d deleted\n",
1331 					     node->debug_id);
1332 			}
1333 			return true;
1334 		}
1335 	}
1336 	return false;
1337 }
1338 
1339 static void binder_dec_node(struct binder_node *node, int strong, int internal)
1340 {
1341 	bool free_node;
1342 
1343 	binder_node_inner_lock(node);
1344 	free_node = binder_dec_node_nilocked(node, strong, internal);
1345 	binder_node_inner_unlock(node);
1346 	if (free_node)
1347 		binder_free_node(node);
1348 }
1349 
1350 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1351 {
1352 	/*
1353 	 * No call to binder_inc_node() is needed since we
1354 	 * don't need to inform userspace of any changes to
1355 	 * tmp_refs
1356 	 */
1357 	node->tmp_refs++;
1358 }
1359 
1360 /**
1361  * binder_inc_node_tmpref() - take a temporary reference on node
1362  * @node:	node to reference
1363  *
1364  * Take reference on node to prevent the node from being freed
1365  * while referenced only by a local variable. The inner lock is
1366  * needed to serialize with the node work on the queue (which
1367  * isn't needed after the node is dead). If the node is dead
1368  * (node->proc is NULL), use binder_dead_nodes_lock to protect
1369  * node->tmp_refs against dead-node-only cases where the node
1370  * lock cannot be acquired (eg traversing the dead node list to
1371  * print nodes)
1372  */
1373 static void binder_inc_node_tmpref(struct binder_node *node)
1374 {
1375 	binder_node_lock(node);
1376 	if (node->proc)
1377 		binder_inner_proc_lock(node->proc);
1378 	else
1379 		spin_lock(&binder_dead_nodes_lock);
1380 	binder_inc_node_tmpref_ilocked(node);
1381 	if (node->proc)
1382 		binder_inner_proc_unlock(node->proc);
1383 	else
1384 		spin_unlock(&binder_dead_nodes_lock);
1385 	binder_node_unlock(node);
1386 }
1387 
1388 /**
1389  * binder_dec_node_tmpref() - remove a temporary reference on node
1390  * @node:	node to reference
1391  *
1392  * Release temporary reference on node taken via binder_inc_node_tmpref()
1393  */
1394 static void binder_dec_node_tmpref(struct binder_node *node)
1395 {
1396 	bool free_node;
1397 
1398 	binder_node_inner_lock(node);
1399 	if (!node->proc)
1400 		spin_lock(&binder_dead_nodes_lock);
1401 	else
1402 		__acquire(&binder_dead_nodes_lock);
1403 	node->tmp_refs--;
1404 	BUG_ON(node->tmp_refs < 0);
1405 	if (!node->proc)
1406 		spin_unlock(&binder_dead_nodes_lock);
1407 	else
1408 		__release(&binder_dead_nodes_lock);
1409 	/*
1410 	 * Call binder_dec_node() to check if all refcounts are 0
1411 	 * and cleanup is needed. Calling with strong=0 and internal=1
1412 	 * causes no actual reference to be released in binder_dec_node().
1413 	 * If that changes, a change is needed here too.
1414 	 */
1415 	free_node = binder_dec_node_nilocked(node, 0, 1);
1416 	binder_node_inner_unlock(node);
1417 	if (free_node)
1418 		binder_free_node(node);
1419 }
1420 
1421 static void binder_put_node(struct binder_node *node)
1422 {
1423 	binder_dec_node_tmpref(node);
1424 }
1425 
1426 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1427 						 u32 desc, bool need_strong_ref)
1428 {
1429 	struct rb_node *n = proc->refs_by_desc.rb_node;
1430 	struct binder_ref *ref;
1431 
1432 	while (n) {
1433 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1434 
1435 		if (desc < ref->data.desc) {
1436 			n = n->rb_left;
1437 		} else if (desc > ref->data.desc) {
1438 			n = n->rb_right;
1439 		} else if (need_strong_ref && !ref->data.strong) {
1440 			binder_user_error("tried to use weak ref as strong ref\n");
1441 			return NULL;
1442 		} else {
1443 			return ref;
1444 		}
1445 	}
1446 	return NULL;
1447 }
1448 
1449 /**
1450  * binder_get_ref_for_node_olocked() - get the ref associated with given node
1451  * @proc:	binder_proc that owns the ref
1452  * @node:	binder_node of target
1453  * @new_ref:	newly allocated binder_ref to be initialized or %NULL
1454  *
1455  * Look up the ref for the given node and return it if it exists
1456  *
1457  * If it doesn't exist and the caller provides a newly allocated
1458  * ref, initialize the fields of the newly allocated ref and insert
1459  * into the given proc rb_trees and node refs list.
1460  *
1461  * Return:	the ref for node. It is possible that another thread
1462  *		allocated/initialized the ref first in which case the
1463  *		returned ref would be different than the passed-in
1464  *		new_ref. new_ref must be kfree'd by the caller in
1465  *		this case.
1466  */
1467 static struct binder_ref *binder_get_ref_for_node_olocked(
1468 					struct binder_proc *proc,
1469 					struct binder_node *node,
1470 					struct binder_ref *new_ref)
1471 {
1472 	struct binder_context *context = proc->context;
1473 	struct rb_node **p = &proc->refs_by_node.rb_node;
1474 	struct rb_node *parent = NULL;
1475 	struct binder_ref *ref;
1476 	struct rb_node *n;
1477 
1478 	while (*p) {
1479 		parent = *p;
1480 		ref = rb_entry(parent, struct binder_ref, rb_node_node);
1481 
1482 		if (node < ref->node)
1483 			p = &(*p)->rb_left;
1484 		else if (node > ref->node)
1485 			p = &(*p)->rb_right;
1486 		else
1487 			return ref;
1488 	}
1489 	if (!new_ref)
1490 		return NULL;
1491 
1492 	binder_stats_created(BINDER_STAT_REF);
1493 	new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1494 	new_ref->proc = proc;
1495 	new_ref->node = node;
1496 	rb_link_node(&new_ref->rb_node_node, parent, p);
1497 	rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1498 
1499 	new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1500 	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1501 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1502 		if (ref->data.desc > new_ref->data.desc)
1503 			break;
1504 		new_ref->data.desc = ref->data.desc + 1;
1505 	}
1506 
1507 	p = &proc->refs_by_desc.rb_node;
1508 	while (*p) {
1509 		parent = *p;
1510 		ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1511 
1512 		if (new_ref->data.desc < ref->data.desc)
1513 			p = &(*p)->rb_left;
1514 		else if (new_ref->data.desc > ref->data.desc)
1515 			p = &(*p)->rb_right;
1516 		else
1517 			BUG();
1518 	}
1519 	rb_link_node(&new_ref->rb_node_desc, parent, p);
1520 	rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1521 
1522 	binder_node_lock(node);
1523 	hlist_add_head(&new_ref->node_entry, &node->refs);
1524 
1525 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1526 		     "%d new ref %d desc %d for node %d\n",
1527 		      proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1528 		      node->debug_id);
1529 	binder_node_unlock(node);
1530 	return new_ref;
1531 }
1532 
1533 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1534 {
1535 	bool delete_node = false;
1536 
1537 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1538 		     "%d delete ref %d desc %d for node %d\n",
1539 		      ref->proc->pid, ref->data.debug_id, ref->data.desc,
1540 		      ref->node->debug_id);
1541 
1542 	rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1543 	rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1544 
1545 	binder_node_inner_lock(ref->node);
1546 	if (ref->data.strong)
1547 		binder_dec_node_nilocked(ref->node, 1, 1);
1548 
1549 	hlist_del(&ref->node_entry);
1550 	delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1551 	binder_node_inner_unlock(ref->node);
1552 	/*
1553 	 * Clear ref->node unless we want the caller to free the node
1554 	 */
1555 	if (!delete_node) {
1556 		/*
1557 		 * The caller uses ref->node to determine
1558 		 * whether the node needs to be freed. Clear
1559 		 * it since the node is still alive.
1560 		 */
1561 		ref->node = NULL;
1562 	}
1563 
1564 	if (ref->death) {
1565 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1566 			     "%d delete ref %d desc %d has death notification\n",
1567 			      ref->proc->pid, ref->data.debug_id,
1568 			      ref->data.desc);
1569 		binder_dequeue_work(ref->proc, &ref->death->work);
1570 		binder_stats_deleted(BINDER_STAT_DEATH);
1571 	}
1572 	binder_stats_deleted(BINDER_STAT_REF);
1573 }
1574 
1575 /**
1576  * binder_inc_ref_olocked() - increment the ref for given handle
1577  * @ref:         ref to be incremented
1578  * @strong:      if true, strong increment, else weak
1579  * @target_list: list to queue node work on
1580  *
1581  * Increment the ref. @ref->proc->outer_lock must be held on entry
1582  *
1583  * Return: 0, if successful, else errno
1584  */
1585 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1586 				  struct list_head *target_list)
1587 {
1588 	int ret;
1589 
1590 	if (strong) {
1591 		if (ref->data.strong == 0) {
1592 			ret = binder_inc_node(ref->node, 1, 1, target_list);
1593 			if (ret)
1594 				return ret;
1595 		}
1596 		ref->data.strong++;
1597 	} else {
1598 		if (ref->data.weak == 0) {
1599 			ret = binder_inc_node(ref->node, 0, 1, target_list);
1600 			if (ret)
1601 				return ret;
1602 		}
1603 		ref->data.weak++;
1604 	}
1605 	return 0;
1606 }
1607 
1608 /**
1609  * binder_dec_ref() - dec the ref for given handle
1610  * @ref:	ref to be decremented
1611  * @strong:	if true, strong decrement, else weak
1612  *
1613  * Decrement the ref.
1614  *
1615  * Return: true if ref is cleaned up and ready to be freed
1616  */
1617 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1618 {
1619 	if (strong) {
1620 		if (ref->data.strong == 0) {
1621 			binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1622 					  ref->proc->pid, ref->data.debug_id,
1623 					  ref->data.desc, ref->data.strong,
1624 					  ref->data.weak);
1625 			return false;
1626 		}
1627 		ref->data.strong--;
1628 		if (ref->data.strong == 0)
1629 			binder_dec_node(ref->node, strong, 1);
1630 	} else {
1631 		if (ref->data.weak == 0) {
1632 			binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1633 					  ref->proc->pid, ref->data.debug_id,
1634 					  ref->data.desc, ref->data.strong,
1635 					  ref->data.weak);
1636 			return false;
1637 		}
1638 		ref->data.weak--;
1639 	}
1640 	if (ref->data.strong == 0 && ref->data.weak == 0) {
1641 		binder_cleanup_ref_olocked(ref);
1642 		return true;
1643 	}
1644 	return false;
1645 }
1646 
1647 /**
1648  * binder_get_node_from_ref() - get the node from the given proc/desc
1649  * @proc:	proc containing the ref
1650  * @desc:	the handle associated with the ref
1651  * @need_strong_ref: if true, only return node if ref is strong
1652  * @rdata:	the id/refcount data for the ref
1653  *
1654  * Given a proc and ref handle, return the associated binder_node
1655  *
1656  * Return: a binder_node or NULL if not found or not strong when strong required
1657  */
1658 static struct binder_node *binder_get_node_from_ref(
1659 		struct binder_proc *proc,
1660 		u32 desc, bool need_strong_ref,
1661 		struct binder_ref_data *rdata)
1662 {
1663 	struct binder_node *node;
1664 	struct binder_ref *ref;
1665 
1666 	binder_proc_lock(proc);
1667 	ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1668 	if (!ref)
1669 		goto err_no_ref;
1670 	node = ref->node;
1671 	/*
1672 	 * Take an implicit reference on the node to ensure
1673 	 * it stays alive until the call to binder_put_node()
1674 	 */
1675 	binder_inc_node_tmpref(node);
1676 	if (rdata)
1677 		*rdata = ref->data;
1678 	binder_proc_unlock(proc);
1679 
1680 	return node;
1681 
1682 err_no_ref:
1683 	binder_proc_unlock(proc);
1684 	return NULL;
1685 }
1686 
1687 /**
1688  * binder_free_ref() - free the binder_ref
1689  * @ref:	ref to free
1690  *
1691  * Free the binder_ref. Free the binder_node indicated by ref->node
1692  * (if non-NULL) and the binder_ref_death indicated by ref->death.
1693  */
1694 static void binder_free_ref(struct binder_ref *ref)
1695 {
1696 	if (ref->node)
1697 		binder_free_node(ref->node);
1698 	kfree(ref->death);
1699 	kfree(ref);
1700 }
1701 
1702 /**
1703  * binder_update_ref_for_handle() - inc/dec the ref for given handle
1704  * @proc:	proc containing the ref
1705  * @desc:	the handle associated with the ref
1706  * @increment:	true=inc reference, false=dec reference
1707  * @strong:	true=strong reference, false=weak reference
1708  * @rdata:	the id/refcount data for the ref
1709  *
1710  * Given a proc and ref handle, increment or decrement the ref
1711  * according to "increment" arg.
1712  *
1713  * Return: 0 if successful, else errno
1714  */
1715 static int binder_update_ref_for_handle(struct binder_proc *proc,
1716 		uint32_t desc, bool increment, bool strong,
1717 		struct binder_ref_data *rdata)
1718 {
1719 	int ret = 0;
1720 	struct binder_ref *ref;
1721 	bool delete_ref = false;
1722 
1723 	binder_proc_lock(proc);
1724 	ref = binder_get_ref_olocked(proc, desc, strong);
1725 	if (!ref) {
1726 		ret = -EINVAL;
1727 		goto err_no_ref;
1728 	}
1729 	if (increment)
1730 		ret = binder_inc_ref_olocked(ref, strong, NULL);
1731 	else
1732 		delete_ref = binder_dec_ref_olocked(ref, strong);
1733 
1734 	if (rdata)
1735 		*rdata = ref->data;
1736 	binder_proc_unlock(proc);
1737 
1738 	if (delete_ref)
1739 		binder_free_ref(ref);
1740 	return ret;
1741 
1742 err_no_ref:
1743 	binder_proc_unlock(proc);
1744 	return ret;
1745 }
1746 
1747 /**
1748  * binder_dec_ref_for_handle() - dec the ref for given handle
1749  * @proc:	proc containing the ref
1750  * @desc:	the handle associated with the ref
1751  * @strong:	true=strong reference, false=weak reference
1752  * @rdata:	the id/refcount data for the ref
1753  *
1754  * Just calls binder_update_ref_for_handle() to decrement the ref.
1755  *
1756  * Return: 0 if successful, else errno
1757  */
1758 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1759 		uint32_t desc, bool strong, struct binder_ref_data *rdata)
1760 {
1761 	return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1762 }
1763 
1764 
1765 /**
1766  * binder_inc_ref_for_node() - increment the ref for given proc/node
1767  * @proc:	 proc containing the ref
1768  * @node:	 target node
1769  * @strong:	 true=strong reference, false=weak reference
1770  * @target_list: worklist to use if node is incremented
1771  * @rdata:	 the id/refcount data for the ref
1772  *
1773  * Given a proc and node, increment the ref. Create the ref if it
1774  * doesn't already exist
1775  *
1776  * Return: 0 if successful, else errno
1777  */
1778 static int binder_inc_ref_for_node(struct binder_proc *proc,
1779 			struct binder_node *node,
1780 			bool strong,
1781 			struct list_head *target_list,
1782 			struct binder_ref_data *rdata)
1783 {
1784 	struct binder_ref *ref;
1785 	struct binder_ref *new_ref = NULL;
1786 	int ret = 0;
1787 
1788 	binder_proc_lock(proc);
1789 	ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1790 	if (!ref) {
1791 		binder_proc_unlock(proc);
1792 		new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1793 		if (!new_ref)
1794 			return -ENOMEM;
1795 		binder_proc_lock(proc);
1796 		ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1797 	}
1798 	ret = binder_inc_ref_olocked(ref, strong, target_list);
1799 	*rdata = ref->data;
1800 	binder_proc_unlock(proc);
1801 	if (new_ref && ref != new_ref)
1802 		/*
1803 		 * Another thread created the ref first so
1804 		 * free the one we allocated
1805 		 */
1806 		kfree(new_ref);
1807 	return ret;
1808 }
1809 
1810 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1811 					   struct binder_transaction *t)
1812 {
1813 	BUG_ON(!target_thread);
1814 	assert_spin_locked(&target_thread->proc->inner_lock);
1815 	BUG_ON(target_thread->transaction_stack != t);
1816 	BUG_ON(target_thread->transaction_stack->from != target_thread);
1817 	target_thread->transaction_stack =
1818 		target_thread->transaction_stack->from_parent;
1819 	t->from = NULL;
1820 }
1821 
1822 /**
1823  * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1824  * @thread:	thread to decrement
1825  *
1826  * A thread needs to be kept alive while being used to create or
1827  * handle a transaction. binder_get_txn_from() is used to safely
1828  * extract t->from from a binder_transaction and keep the thread
1829  * indicated by t->from from being freed. When done with that
1830  * binder_thread, this function is called to decrement the
1831  * tmp_ref and free if appropriate (thread has been released
1832  * and no transaction being processed by the driver)
1833  */
1834 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1835 {
1836 	/*
1837 	 * atomic is used to protect the counter value while
1838 	 * it cannot reach zero or thread->is_dead is false
1839 	 */
1840 	binder_inner_proc_lock(thread->proc);
1841 	atomic_dec(&thread->tmp_ref);
1842 	if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1843 		binder_inner_proc_unlock(thread->proc);
1844 		binder_free_thread(thread);
1845 		return;
1846 	}
1847 	binder_inner_proc_unlock(thread->proc);
1848 }
1849 
1850 /**
1851  * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1852  * @proc:	proc to decrement
1853  *
1854  * A binder_proc needs to be kept alive while being used to create or
1855  * handle a transaction. proc->tmp_ref is incremented when
1856  * creating a new transaction or the binder_proc is currently in-use
1857  * by threads that are being released. When done with the binder_proc,
1858  * this function is called to decrement the counter and free the
1859  * proc if appropriate (proc has been released, all threads have
1860  * been released and not currenly in-use to process a transaction).
1861  */
1862 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1863 {
1864 	binder_inner_proc_lock(proc);
1865 	proc->tmp_ref--;
1866 	if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1867 			!proc->tmp_ref) {
1868 		binder_inner_proc_unlock(proc);
1869 		binder_free_proc(proc);
1870 		return;
1871 	}
1872 	binder_inner_proc_unlock(proc);
1873 }
1874 
1875 /**
1876  * binder_get_txn_from() - safely extract the "from" thread in transaction
1877  * @t:	binder transaction for t->from
1878  *
1879  * Atomically return the "from" thread and increment the tmp_ref
1880  * count for the thread to ensure it stays alive until
1881  * binder_thread_dec_tmpref() is called.
1882  *
1883  * Return: the value of t->from
1884  */
1885 static struct binder_thread *binder_get_txn_from(
1886 		struct binder_transaction *t)
1887 {
1888 	struct binder_thread *from;
1889 
1890 	spin_lock(&t->lock);
1891 	from = t->from;
1892 	if (from)
1893 		atomic_inc(&from->tmp_ref);
1894 	spin_unlock(&t->lock);
1895 	return from;
1896 }
1897 
1898 /**
1899  * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1900  * @t:	binder transaction for t->from
1901  *
1902  * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1903  * to guarantee that the thread cannot be released while operating on it.
1904  * The caller must call binder_inner_proc_unlock() to release the inner lock
1905  * as well as call binder_dec_thread_txn() to release the reference.
1906  *
1907  * Return: the value of t->from
1908  */
1909 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1910 		struct binder_transaction *t)
1911 	__acquires(&t->from->proc->inner_lock)
1912 {
1913 	struct binder_thread *from;
1914 
1915 	from = binder_get_txn_from(t);
1916 	if (!from) {
1917 		__acquire(&from->proc->inner_lock);
1918 		return NULL;
1919 	}
1920 	binder_inner_proc_lock(from->proc);
1921 	if (t->from) {
1922 		BUG_ON(from != t->from);
1923 		return from;
1924 	}
1925 	binder_inner_proc_unlock(from->proc);
1926 	__acquire(&from->proc->inner_lock);
1927 	binder_thread_dec_tmpref(from);
1928 	return NULL;
1929 }
1930 
1931 /**
1932  * binder_free_txn_fixups() - free unprocessed fd fixups
1933  * @t:	binder transaction for t->from
1934  *
1935  * If the transaction is being torn down prior to being
1936  * processed by the target process, free all of the
1937  * fd fixups and fput the file structs. It is safe to
1938  * call this function after the fixups have been
1939  * processed -- in that case, the list will be empty.
1940  */
1941 static void binder_free_txn_fixups(struct binder_transaction *t)
1942 {
1943 	struct binder_txn_fd_fixup *fixup, *tmp;
1944 
1945 	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1946 		fput(fixup->file);
1947 		list_del(&fixup->fixup_entry);
1948 		kfree(fixup);
1949 	}
1950 }
1951 
1952 static void binder_free_transaction(struct binder_transaction *t)
1953 {
1954 	if (t->buffer)
1955 		t->buffer->transaction = NULL;
1956 	binder_free_txn_fixups(t);
1957 	kfree(t);
1958 	binder_stats_deleted(BINDER_STAT_TRANSACTION);
1959 }
1960 
1961 static void binder_send_failed_reply(struct binder_transaction *t,
1962 				     uint32_t error_code)
1963 {
1964 	struct binder_thread *target_thread;
1965 	struct binder_transaction *next;
1966 
1967 	BUG_ON(t->flags & TF_ONE_WAY);
1968 	while (1) {
1969 		target_thread = binder_get_txn_from_and_acq_inner(t);
1970 		if (target_thread) {
1971 			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1972 				     "send failed reply for transaction %d to %d:%d\n",
1973 				      t->debug_id,
1974 				      target_thread->proc->pid,
1975 				      target_thread->pid);
1976 
1977 			binder_pop_transaction_ilocked(target_thread, t);
1978 			if (target_thread->reply_error.cmd == BR_OK) {
1979 				target_thread->reply_error.cmd = error_code;
1980 				binder_enqueue_thread_work_ilocked(
1981 					target_thread,
1982 					&target_thread->reply_error.work);
1983 				wake_up_interruptible(&target_thread->wait);
1984 			} else {
1985 				/*
1986 				 * Cannot get here for normal operation, but
1987 				 * we can if multiple synchronous transactions
1988 				 * are sent without blocking for responses.
1989 				 * Just ignore the 2nd error in this case.
1990 				 */
1991 				pr_warn("Unexpected reply error: %u\n",
1992 					target_thread->reply_error.cmd);
1993 			}
1994 			binder_inner_proc_unlock(target_thread->proc);
1995 			binder_thread_dec_tmpref(target_thread);
1996 			binder_free_transaction(t);
1997 			return;
1998 		} else {
1999 			__release(&target_thread->proc->inner_lock);
2000 		}
2001 		next = t->from_parent;
2002 
2003 		binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2004 			     "send failed reply for transaction %d, target dead\n",
2005 			     t->debug_id);
2006 
2007 		binder_free_transaction(t);
2008 		if (next == NULL) {
2009 			binder_debug(BINDER_DEBUG_DEAD_BINDER,
2010 				     "reply failed, no target thread at root\n");
2011 			return;
2012 		}
2013 		t = next;
2014 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
2015 			     "reply failed, no target thread -- retry %d\n",
2016 			      t->debug_id);
2017 	}
2018 }
2019 
2020 /**
2021  * binder_cleanup_transaction() - cleans up undelivered transaction
2022  * @t:		transaction that needs to be cleaned up
2023  * @reason:	reason the transaction wasn't delivered
2024  * @error_code:	error to return to caller (if synchronous call)
2025  */
2026 static void binder_cleanup_transaction(struct binder_transaction *t,
2027 				       const char *reason,
2028 				       uint32_t error_code)
2029 {
2030 	if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
2031 		binder_send_failed_reply(t, error_code);
2032 	} else {
2033 		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2034 			"undelivered transaction %d, %s\n",
2035 			t->debug_id, reason);
2036 		binder_free_transaction(t);
2037 	}
2038 }
2039 
2040 /**
2041  * binder_validate_object() - checks for a valid metadata object in a buffer.
2042  * @buffer:	binder_buffer that we're parsing.
2043  * @offset:	offset in the buffer at which to validate an object.
2044  *
2045  * Return:	If there's a valid metadata object at @offset in @buffer, the
2046  *		size of that object. Otherwise, it returns zero.
2047  */
2048 static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
2049 {
2050 	/* Check if we can read a header first */
2051 	struct binder_object_header *hdr;
2052 	size_t object_size = 0;
2053 
2054 	if (buffer->data_size < sizeof(*hdr) ||
2055 	    offset > buffer->data_size - sizeof(*hdr) ||
2056 	    !IS_ALIGNED(offset, sizeof(u32)))
2057 		return 0;
2058 
2059 	/* Ok, now see if we can read a complete object. */
2060 	hdr = (struct binder_object_header *)(buffer->data + offset);
2061 	switch (hdr->type) {
2062 	case BINDER_TYPE_BINDER:
2063 	case BINDER_TYPE_WEAK_BINDER:
2064 	case BINDER_TYPE_HANDLE:
2065 	case BINDER_TYPE_WEAK_HANDLE:
2066 		object_size = sizeof(struct flat_binder_object);
2067 		break;
2068 	case BINDER_TYPE_FD:
2069 		object_size = sizeof(struct binder_fd_object);
2070 		break;
2071 	case BINDER_TYPE_PTR:
2072 		object_size = sizeof(struct binder_buffer_object);
2073 		break;
2074 	case BINDER_TYPE_FDA:
2075 		object_size = sizeof(struct binder_fd_array_object);
2076 		break;
2077 	default:
2078 		return 0;
2079 	}
2080 	if (offset <= buffer->data_size - object_size &&
2081 	    buffer->data_size >= object_size)
2082 		return object_size;
2083 	else
2084 		return 0;
2085 }
2086 
2087 /**
2088  * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2089  * @b:		binder_buffer containing the object
2090  * @index:	index in offset array at which the binder_buffer_object is
2091  *		located
2092  * @start:	points to the start of the offset array
2093  * @num_valid:	the number of valid offsets in the offset array
2094  *
2095  * Return:	If @index is within the valid range of the offset array
2096  *		described by @start and @num_valid, and if there's a valid
2097  *		binder_buffer_object at the offset found in index @index
2098  *		of the offset array, that object is returned. Otherwise,
2099  *		%NULL is returned.
2100  *		Note that the offset found in index @index itself is not
2101  *		verified; this function assumes that @num_valid elements
2102  *		from @start were previously verified to have valid offsets.
2103  */
2104 static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
2105 							binder_size_t index,
2106 							binder_size_t *start,
2107 							binder_size_t num_valid)
2108 {
2109 	struct binder_buffer_object *buffer_obj;
2110 	binder_size_t *offp;
2111 
2112 	if (index >= num_valid)
2113 		return NULL;
2114 
2115 	offp = start + index;
2116 	buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
2117 	if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
2118 		return NULL;
2119 
2120 	return buffer_obj;
2121 }
2122 
2123 /**
2124  * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2125  * @b:			transaction buffer
2126  * @objects_start	start of objects buffer
2127  * @buffer:		binder_buffer_object in which to fix up
2128  * @offset:		start offset in @buffer to fix up
2129  * @last_obj:		last binder_buffer_object that we fixed up in
2130  * @last_min_offset:	minimum fixup offset in @last_obj
2131  *
2132  * Return:		%true if a fixup in buffer @buffer at offset @offset is
2133  *			allowed.
2134  *
2135  * For safety reasons, we only allow fixups inside a buffer to happen
2136  * at increasing offsets; additionally, we only allow fixup on the last
2137  * buffer object that was verified, or one of its parents.
2138  *
2139  * Example of what is allowed:
2140  *
2141  * A
2142  *   B (parent = A, offset = 0)
2143  *   C (parent = A, offset = 16)
2144  *     D (parent = C, offset = 0)
2145  *   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2146  *
2147  * Examples of what is not allowed:
2148  *
2149  * Decreasing offsets within the same parent:
2150  * A
2151  *   C (parent = A, offset = 16)
2152  *   B (parent = A, offset = 0) // decreasing offset within A
2153  *
2154  * Referring to a parent that wasn't the last object or any of its parents:
2155  * A
2156  *   B (parent = A, offset = 0)
2157  *   C (parent = A, offset = 0)
2158  *   C (parent = A, offset = 16)
2159  *     D (parent = B, offset = 0) // B is not A or any of A's parents
2160  */
2161 static bool binder_validate_fixup(struct binder_buffer *b,
2162 				  binder_size_t *objects_start,
2163 				  struct binder_buffer_object *buffer,
2164 				  binder_size_t fixup_offset,
2165 				  struct binder_buffer_object *last_obj,
2166 				  binder_size_t last_min_offset)
2167 {
2168 	if (!last_obj) {
2169 		/* Nothing to fix up in */
2170 		return false;
2171 	}
2172 
2173 	while (last_obj != buffer) {
2174 		/*
2175 		 * Safe to retrieve the parent of last_obj, since it
2176 		 * was already previously verified by the driver.
2177 		 */
2178 		if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2179 			return false;
2180 		last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
2181 		last_obj = (struct binder_buffer_object *)
2182 			(b->data + *(objects_start + last_obj->parent));
2183 	}
2184 	return (fixup_offset >= last_min_offset);
2185 }
2186 
2187 static void binder_transaction_buffer_release(struct binder_proc *proc,
2188 					      struct binder_buffer *buffer,
2189 					      binder_size_t *failed_at)
2190 {
2191 	binder_size_t *offp, *off_start, *off_end;
2192 	int debug_id = buffer->debug_id;
2193 
2194 	binder_debug(BINDER_DEBUG_TRANSACTION,
2195 		     "%d buffer release %d, size %zd-%zd, failed at %pK\n",
2196 		     proc->pid, buffer->debug_id,
2197 		     buffer->data_size, buffer->offsets_size, failed_at);
2198 
2199 	if (buffer->target_node)
2200 		binder_dec_node(buffer->target_node, 1, 0);
2201 
2202 	off_start = (binder_size_t *)(buffer->data +
2203 				      ALIGN(buffer->data_size, sizeof(void *)));
2204 	if (failed_at)
2205 		off_end = failed_at;
2206 	else
2207 		off_end = (void *)off_start + buffer->offsets_size;
2208 	for (offp = off_start; offp < off_end; offp++) {
2209 		struct binder_object_header *hdr;
2210 		size_t object_size = binder_validate_object(buffer, *offp);
2211 
2212 		if (object_size == 0) {
2213 			pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2214 			       debug_id, (u64)*offp, buffer->data_size);
2215 			continue;
2216 		}
2217 		hdr = (struct binder_object_header *)(buffer->data + *offp);
2218 		switch (hdr->type) {
2219 		case BINDER_TYPE_BINDER:
2220 		case BINDER_TYPE_WEAK_BINDER: {
2221 			struct flat_binder_object *fp;
2222 			struct binder_node *node;
2223 
2224 			fp = to_flat_binder_object(hdr);
2225 			node = binder_get_node(proc, fp->binder);
2226 			if (node == NULL) {
2227 				pr_err("transaction release %d bad node %016llx\n",
2228 				       debug_id, (u64)fp->binder);
2229 				break;
2230 			}
2231 			binder_debug(BINDER_DEBUG_TRANSACTION,
2232 				     "        node %d u%016llx\n",
2233 				     node->debug_id, (u64)node->ptr);
2234 			binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2235 					0);
2236 			binder_put_node(node);
2237 		} break;
2238 		case BINDER_TYPE_HANDLE:
2239 		case BINDER_TYPE_WEAK_HANDLE: {
2240 			struct flat_binder_object *fp;
2241 			struct binder_ref_data rdata;
2242 			int ret;
2243 
2244 			fp = to_flat_binder_object(hdr);
2245 			ret = binder_dec_ref_for_handle(proc, fp->handle,
2246 				hdr->type == BINDER_TYPE_HANDLE, &rdata);
2247 
2248 			if (ret) {
2249 				pr_err("transaction release %d bad handle %d, ret = %d\n",
2250 				 debug_id, fp->handle, ret);
2251 				break;
2252 			}
2253 			binder_debug(BINDER_DEBUG_TRANSACTION,
2254 				     "        ref %d desc %d\n",
2255 				     rdata.debug_id, rdata.desc);
2256 		} break;
2257 
2258 		case BINDER_TYPE_FD: {
2259 			/*
2260 			 * No need to close the file here since user-space
2261 			 * closes it for for successfully delivered
2262 			 * transactions. For transactions that weren't
2263 			 * delivered, the new fd was never allocated so
2264 			 * there is no need to close and the fput on the
2265 			 * file is done when the transaction is torn
2266 			 * down.
2267 			 */
2268 			WARN_ON(failed_at &&
2269 				proc->tsk == current->group_leader);
2270 		} break;
2271 		case BINDER_TYPE_PTR:
2272 			/*
2273 			 * Nothing to do here, this will get cleaned up when the
2274 			 * transaction buffer gets freed
2275 			 */
2276 			break;
2277 		case BINDER_TYPE_FDA: {
2278 			struct binder_fd_array_object *fda;
2279 			struct binder_buffer_object *parent;
2280 			uintptr_t parent_buffer;
2281 			u32 *fd_array;
2282 			size_t fd_index;
2283 			binder_size_t fd_buf_size;
2284 
2285 			if (proc->tsk != current->group_leader) {
2286 				/*
2287 				 * Nothing to do if running in sender context
2288 				 * The fd fixups have not been applied so no
2289 				 * fds need to be closed.
2290 				 */
2291 				continue;
2292 			}
2293 
2294 			fda = to_binder_fd_array_object(hdr);
2295 			parent = binder_validate_ptr(buffer, fda->parent,
2296 						     off_start,
2297 						     offp - off_start);
2298 			if (!parent) {
2299 				pr_err("transaction release %d bad parent offset\n",
2300 				       debug_id);
2301 				continue;
2302 			}
2303 			/*
2304 			 * Since the parent was already fixed up, convert it
2305 			 * back to kernel address space to access it
2306 			 */
2307 			parent_buffer = parent->buffer -
2308 				binder_alloc_get_user_buffer_offset(
2309 						&proc->alloc);
2310 
2311 			fd_buf_size = sizeof(u32) * fda->num_fds;
2312 			if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2313 				pr_err("transaction release %d invalid number of fds (%lld)\n",
2314 				       debug_id, (u64)fda->num_fds);
2315 				continue;
2316 			}
2317 			if (fd_buf_size > parent->length ||
2318 			    fda->parent_offset > parent->length - fd_buf_size) {
2319 				/* No space for all file descriptors here. */
2320 				pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2321 				       debug_id, (u64)fda->num_fds);
2322 				continue;
2323 			}
2324 			fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2325 			for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
2326 				ksys_close(fd_array[fd_index]);
2327 		} break;
2328 		default:
2329 			pr_err("transaction release %d bad object type %x\n",
2330 				debug_id, hdr->type);
2331 			break;
2332 		}
2333 	}
2334 }
2335 
2336 static int binder_translate_binder(struct flat_binder_object *fp,
2337 				   struct binder_transaction *t,
2338 				   struct binder_thread *thread)
2339 {
2340 	struct binder_node *node;
2341 	struct binder_proc *proc = thread->proc;
2342 	struct binder_proc *target_proc = t->to_proc;
2343 	struct binder_ref_data rdata;
2344 	int ret = 0;
2345 
2346 	node = binder_get_node(proc, fp->binder);
2347 	if (!node) {
2348 		node = binder_new_node(proc, fp);
2349 		if (!node)
2350 			return -ENOMEM;
2351 	}
2352 	if (fp->cookie != node->cookie) {
2353 		binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2354 				  proc->pid, thread->pid, (u64)fp->binder,
2355 				  node->debug_id, (u64)fp->cookie,
2356 				  (u64)node->cookie);
2357 		ret = -EINVAL;
2358 		goto done;
2359 	}
2360 	if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2361 		ret = -EPERM;
2362 		goto done;
2363 	}
2364 
2365 	ret = binder_inc_ref_for_node(target_proc, node,
2366 			fp->hdr.type == BINDER_TYPE_BINDER,
2367 			&thread->todo, &rdata);
2368 	if (ret)
2369 		goto done;
2370 
2371 	if (fp->hdr.type == BINDER_TYPE_BINDER)
2372 		fp->hdr.type = BINDER_TYPE_HANDLE;
2373 	else
2374 		fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2375 	fp->binder = 0;
2376 	fp->handle = rdata.desc;
2377 	fp->cookie = 0;
2378 
2379 	trace_binder_transaction_node_to_ref(t, node, &rdata);
2380 	binder_debug(BINDER_DEBUG_TRANSACTION,
2381 		     "        node %d u%016llx -> ref %d desc %d\n",
2382 		     node->debug_id, (u64)node->ptr,
2383 		     rdata.debug_id, rdata.desc);
2384 done:
2385 	binder_put_node(node);
2386 	return ret;
2387 }
2388 
2389 static int binder_translate_handle(struct flat_binder_object *fp,
2390 				   struct binder_transaction *t,
2391 				   struct binder_thread *thread)
2392 {
2393 	struct binder_proc *proc = thread->proc;
2394 	struct binder_proc *target_proc = t->to_proc;
2395 	struct binder_node *node;
2396 	struct binder_ref_data src_rdata;
2397 	int ret = 0;
2398 
2399 	node = binder_get_node_from_ref(proc, fp->handle,
2400 			fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2401 	if (!node) {
2402 		binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2403 				  proc->pid, thread->pid, fp->handle);
2404 		return -EINVAL;
2405 	}
2406 	if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2407 		ret = -EPERM;
2408 		goto done;
2409 	}
2410 
2411 	binder_node_lock(node);
2412 	if (node->proc == target_proc) {
2413 		if (fp->hdr.type == BINDER_TYPE_HANDLE)
2414 			fp->hdr.type = BINDER_TYPE_BINDER;
2415 		else
2416 			fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2417 		fp->binder = node->ptr;
2418 		fp->cookie = node->cookie;
2419 		if (node->proc)
2420 			binder_inner_proc_lock(node->proc);
2421 		else
2422 			__acquire(&node->proc->inner_lock);
2423 		binder_inc_node_nilocked(node,
2424 					 fp->hdr.type == BINDER_TYPE_BINDER,
2425 					 0, NULL);
2426 		if (node->proc)
2427 			binder_inner_proc_unlock(node->proc);
2428 		else
2429 			__release(&node->proc->inner_lock);
2430 		trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2431 		binder_debug(BINDER_DEBUG_TRANSACTION,
2432 			     "        ref %d desc %d -> node %d u%016llx\n",
2433 			     src_rdata.debug_id, src_rdata.desc, node->debug_id,
2434 			     (u64)node->ptr);
2435 		binder_node_unlock(node);
2436 	} else {
2437 		struct binder_ref_data dest_rdata;
2438 
2439 		binder_node_unlock(node);
2440 		ret = binder_inc_ref_for_node(target_proc, node,
2441 				fp->hdr.type == BINDER_TYPE_HANDLE,
2442 				NULL, &dest_rdata);
2443 		if (ret)
2444 			goto done;
2445 
2446 		fp->binder = 0;
2447 		fp->handle = dest_rdata.desc;
2448 		fp->cookie = 0;
2449 		trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2450 						    &dest_rdata);
2451 		binder_debug(BINDER_DEBUG_TRANSACTION,
2452 			     "        ref %d desc %d -> ref %d desc %d (node %d)\n",
2453 			     src_rdata.debug_id, src_rdata.desc,
2454 			     dest_rdata.debug_id, dest_rdata.desc,
2455 			     node->debug_id);
2456 	}
2457 done:
2458 	binder_put_node(node);
2459 	return ret;
2460 }
2461 
2462 static int binder_translate_fd(u32 *fdp,
2463 			       struct binder_transaction *t,
2464 			       struct binder_thread *thread,
2465 			       struct binder_transaction *in_reply_to)
2466 {
2467 	struct binder_proc *proc = thread->proc;
2468 	struct binder_proc *target_proc = t->to_proc;
2469 	struct binder_txn_fd_fixup *fixup;
2470 	struct file *file;
2471 	int ret = 0;
2472 	bool target_allows_fd;
2473 	int fd = *fdp;
2474 
2475 	if (in_reply_to)
2476 		target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2477 	else
2478 		target_allows_fd = t->buffer->target_node->accept_fds;
2479 	if (!target_allows_fd) {
2480 		binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2481 				  proc->pid, thread->pid,
2482 				  in_reply_to ? "reply" : "transaction",
2483 				  fd);
2484 		ret = -EPERM;
2485 		goto err_fd_not_accepted;
2486 	}
2487 
2488 	file = fget(fd);
2489 	if (!file) {
2490 		binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2491 				  proc->pid, thread->pid, fd);
2492 		ret = -EBADF;
2493 		goto err_fget;
2494 	}
2495 	ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2496 	if (ret < 0) {
2497 		ret = -EPERM;
2498 		goto err_security;
2499 	}
2500 
2501 	/*
2502 	 * Add fixup record for this transaction. The allocation
2503 	 * of the fd in the target needs to be done from a
2504 	 * target thread.
2505 	 */
2506 	fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2507 	if (!fixup) {
2508 		ret = -ENOMEM;
2509 		goto err_alloc;
2510 	}
2511 	fixup->file = file;
2512 	fixup->offset = (uintptr_t)fdp - (uintptr_t)t->buffer->data;
2513 	trace_binder_transaction_fd_send(t, fd, fixup->offset);
2514 	list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2515 
2516 	return ret;
2517 
2518 err_alloc:
2519 err_security:
2520 	fput(file);
2521 err_fget:
2522 err_fd_not_accepted:
2523 	return ret;
2524 }
2525 
2526 static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2527 				     struct binder_buffer_object *parent,
2528 				     struct binder_transaction *t,
2529 				     struct binder_thread *thread,
2530 				     struct binder_transaction *in_reply_to)
2531 {
2532 	binder_size_t fdi, fd_buf_size;
2533 	uintptr_t parent_buffer;
2534 	u32 *fd_array;
2535 	struct binder_proc *proc = thread->proc;
2536 	struct binder_proc *target_proc = t->to_proc;
2537 
2538 	fd_buf_size = sizeof(u32) * fda->num_fds;
2539 	if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2540 		binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2541 				  proc->pid, thread->pid, (u64)fda->num_fds);
2542 		return -EINVAL;
2543 	}
2544 	if (fd_buf_size > parent->length ||
2545 	    fda->parent_offset > parent->length - fd_buf_size) {
2546 		/* No space for all file descriptors here. */
2547 		binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2548 				  proc->pid, thread->pid, (u64)fda->num_fds);
2549 		return -EINVAL;
2550 	}
2551 	/*
2552 	 * Since the parent was already fixed up, convert it
2553 	 * back to the kernel address space to access it
2554 	 */
2555 	parent_buffer = parent->buffer -
2556 		binder_alloc_get_user_buffer_offset(&target_proc->alloc);
2557 	fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2558 	if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
2559 		binder_user_error("%d:%d parent offset not aligned correctly.\n",
2560 				  proc->pid, thread->pid);
2561 		return -EINVAL;
2562 	}
2563 	for (fdi = 0; fdi < fda->num_fds; fdi++) {
2564 		int ret = binder_translate_fd(&fd_array[fdi], t, thread,
2565 						in_reply_to);
2566 		if (ret < 0)
2567 			return ret;
2568 	}
2569 	return 0;
2570 }
2571 
2572 static int binder_fixup_parent(struct binder_transaction *t,
2573 			       struct binder_thread *thread,
2574 			       struct binder_buffer_object *bp,
2575 			       binder_size_t *off_start,
2576 			       binder_size_t num_valid,
2577 			       struct binder_buffer_object *last_fixup_obj,
2578 			       binder_size_t last_fixup_min_off)
2579 {
2580 	struct binder_buffer_object *parent;
2581 	u8 *parent_buffer;
2582 	struct binder_buffer *b = t->buffer;
2583 	struct binder_proc *proc = thread->proc;
2584 	struct binder_proc *target_proc = t->to_proc;
2585 
2586 	if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2587 		return 0;
2588 
2589 	parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
2590 	if (!parent) {
2591 		binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2592 				  proc->pid, thread->pid);
2593 		return -EINVAL;
2594 	}
2595 
2596 	if (!binder_validate_fixup(b, off_start,
2597 				   parent, bp->parent_offset,
2598 				   last_fixup_obj,
2599 				   last_fixup_min_off)) {
2600 		binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2601 				  proc->pid, thread->pid);
2602 		return -EINVAL;
2603 	}
2604 
2605 	if (parent->length < sizeof(binder_uintptr_t) ||
2606 	    bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2607 		/* No space for a pointer here! */
2608 		binder_user_error("%d:%d got transaction with invalid parent offset\n",
2609 				  proc->pid, thread->pid);
2610 		return -EINVAL;
2611 	}
2612 	parent_buffer = (u8 *)((uintptr_t)parent->buffer -
2613 			binder_alloc_get_user_buffer_offset(
2614 				&target_proc->alloc));
2615 	*(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
2616 
2617 	return 0;
2618 }
2619 
2620 /**
2621  * binder_proc_transaction() - sends a transaction to a process and wakes it up
2622  * @t:		transaction to send
2623  * @proc:	process to send the transaction to
2624  * @thread:	thread in @proc to send the transaction to (may be NULL)
2625  *
2626  * This function queues a transaction to the specified process. It will try
2627  * to find a thread in the target process to handle the transaction and
2628  * wake it up. If no thread is found, the work is queued to the proc
2629  * waitqueue.
2630  *
2631  * If the @thread parameter is not NULL, the transaction is always queued
2632  * to the waitlist of that specific thread.
2633  *
2634  * Return:	true if the transactions was successfully queued
2635  *		false if the target process or thread is dead
2636  */
2637 static bool binder_proc_transaction(struct binder_transaction *t,
2638 				    struct binder_proc *proc,
2639 				    struct binder_thread *thread)
2640 {
2641 	struct binder_node *node = t->buffer->target_node;
2642 	bool oneway = !!(t->flags & TF_ONE_WAY);
2643 	bool pending_async = false;
2644 
2645 	BUG_ON(!node);
2646 	binder_node_lock(node);
2647 	if (oneway) {
2648 		BUG_ON(thread);
2649 		if (node->has_async_transaction) {
2650 			pending_async = true;
2651 		} else {
2652 			node->has_async_transaction = true;
2653 		}
2654 	}
2655 
2656 	binder_inner_proc_lock(proc);
2657 
2658 	if (proc->is_dead || (thread && thread->is_dead)) {
2659 		binder_inner_proc_unlock(proc);
2660 		binder_node_unlock(node);
2661 		return false;
2662 	}
2663 
2664 	if (!thread && !pending_async)
2665 		thread = binder_select_thread_ilocked(proc);
2666 
2667 	if (thread)
2668 		binder_enqueue_thread_work_ilocked(thread, &t->work);
2669 	else if (!pending_async)
2670 		binder_enqueue_work_ilocked(&t->work, &proc->todo);
2671 	else
2672 		binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2673 
2674 	if (!pending_async)
2675 		binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2676 
2677 	binder_inner_proc_unlock(proc);
2678 	binder_node_unlock(node);
2679 
2680 	return true;
2681 }
2682 
2683 /**
2684  * binder_get_node_refs_for_txn() - Get required refs on node for txn
2685  * @node:         struct binder_node for which to get refs
2686  * @proc:         returns @node->proc if valid
2687  * @error:        if no @proc then returns BR_DEAD_REPLY
2688  *
2689  * User-space normally keeps the node alive when creating a transaction
2690  * since it has a reference to the target. The local strong ref keeps it
2691  * alive if the sending process dies before the target process processes
2692  * the transaction. If the source process is malicious or has a reference
2693  * counting bug, relying on the local strong ref can fail.
2694  *
2695  * Since user-space can cause the local strong ref to go away, we also take
2696  * a tmpref on the node to ensure it survives while we are constructing
2697  * the transaction. We also need a tmpref on the proc while we are
2698  * constructing the transaction, so we take that here as well.
2699  *
2700  * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2701  * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2702  * target proc has died, @error is set to BR_DEAD_REPLY
2703  */
2704 static struct binder_node *binder_get_node_refs_for_txn(
2705 		struct binder_node *node,
2706 		struct binder_proc **procp,
2707 		uint32_t *error)
2708 {
2709 	struct binder_node *target_node = NULL;
2710 
2711 	binder_node_inner_lock(node);
2712 	if (node->proc) {
2713 		target_node = node;
2714 		binder_inc_node_nilocked(node, 1, 0, NULL);
2715 		binder_inc_node_tmpref_ilocked(node);
2716 		node->proc->tmp_ref++;
2717 		*procp = node->proc;
2718 	} else
2719 		*error = BR_DEAD_REPLY;
2720 	binder_node_inner_unlock(node);
2721 
2722 	return target_node;
2723 }
2724 
2725 static void binder_transaction(struct binder_proc *proc,
2726 			       struct binder_thread *thread,
2727 			       struct binder_transaction_data *tr, int reply,
2728 			       binder_size_t extra_buffers_size)
2729 {
2730 	int ret;
2731 	struct binder_transaction *t;
2732 	struct binder_work *w;
2733 	struct binder_work *tcomplete;
2734 	binder_size_t *offp, *off_end, *off_start;
2735 	binder_size_t off_min;
2736 	u8 *sg_bufp, *sg_buf_end;
2737 	struct binder_proc *target_proc = NULL;
2738 	struct binder_thread *target_thread = NULL;
2739 	struct binder_node *target_node = NULL;
2740 	struct binder_transaction *in_reply_to = NULL;
2741 	struct binder_transaction_log_entry *e;
2742 	uint32_t return_error = 0;
2743 	uint32_t return_error_param = 0;
2744 	uint32_t return_error_line = 0;
2745 	struct binder_buffer_object *last_fixup_obj = NULL;
2746 	binder_size_t last_fixup_min_off = 0;
2747 	struct binder_context *context = proc->context;
2748 	int t_debug_id = atomic_inc_return(&binder_last_id);
2749 
2750 	e = binder_transaction_log_add(&binder_transaction_log);
2751 	e->debug_id = t_debug_id;
2752 	e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2753 	e->from_proc = proc->pid;
2754 	e->from_thread = thread->pid;
2755 	e->target_handle = tr->target.handle;
2756 	e->data_size = tr->data_size;
2757 	e->offsets_size = tr->offsets_size;
2758 	e->context_name = proc->context->name;
2759 
2760 	if (reply) {
2761 		binder_inner_proc_lock(proc);
2762 		in_reply_to = thread->transaction_stack;
2763 		if (in_reply_to == NULL) {
2764 			binder_inner_proc_unlock(proc);
2765 			binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2766 					  proc->pid, thread->pid);
2767 			return_error = BR_FAILED_REPLY;
2768 			return_error_param = -EPROTO;
2769 			return_error_line = __LINE__;
2770 			goto err_empty_call_stack;
2771 		}
2772 		if (in_reply_to->to_thread != thread) {
2773 			spin_lock(&in_reply_to->lock);
2774 			binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2775 				proc->pid, thread->pid, in_reply_to->debug_id,
2776 				in_reply_to->to_proc ?
2777 				in_reply_to->to_proc->pid : 0,
2778 				in_reply_to->to_thread ?
2779 				in_reply_to->to_thread->pid : 0);
2780 			spin_unlock(&in_reply_to->lock);
2781 			binder_inner_proc_unlock(proc);
2782 			return_error = BR_FAILED_REPLY;
2783 			return_error_param = -EPROTO;
2784 			return_error_line = __LINE__;
2785 			in_reply_to = NULL;
2786 			goto err_bad_call_stack;
2787 		}
2788 		thread->transaction_stack = in_reply_to->to_parent;
2789 		binder_inner_proc_unlock(proc);
2790 		binder_set_nice(in_reply_to->saved_priority);
2791 		target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2792 		if (target_thread == NULL) {
2793 			/* annotation for sparse */
2794 			__release(&target_thread->proc->inner_lock);
2795 			return_error = BR_DEAD_REPLY;
2796 			return_error_line = __LINE__;
2797 			goto err_dead_binder;
2798 		}
2799 		if (target_thread->transaction_stack != in_reply_to) {
2800 			binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2801 				proc->pid, thread->pid,
2802 				target_thread->transaction_stack ?
2803 				target_thread->transaction_stack->debug_id : 0,
2804 				in_reply_to->debug_id);
2805 			binder_inner_proc_unlock(target_thread->proc);
2806 			return_error = BR_FAILED_REPLY;
2807 			return_error_param = -EPROTO;
2808 			return_error_line = __LINE__;
2809 			in_reply_to = NULL;
2810 			target_thread = NULL;
2811 			goto err_dead_binder;
2812 		}
2813 		target_proc = target_thread->proc;
2814 		target_proc->tmp_ref++;
2815 		binder_inner_proc_unlock(target_thread->proc);
2816 	} else {
2817 		if (tr->target.handle) {
2818 			struct binder_ref *ref;
2819 
2820 			/*
2821 			 * There must already be a strong ref
2822 			 * on this node. If so, do a strong
2823 			 * increment on the node to ensure it
2824 			 * stays alive until the transaction is
2825 			 * done.
2826 			 */
2827 			binder_proc_lock(proc);
2828 			ref = binder_get_ref_olocked(proc, tr->target.handle,
2829 						     true);
2830 			if (ref) {
2831 				target_node = binder_get_node_refs_for_txn(
2832 						ref->node, &target_proc,
2833 						&return_error);
2834 			} else {
2835 				binder_user_error("%d:%d got transaction to invalid handle\n",
2836 						  proc->pid, thread->pid);
2837 				return_error = BR_FAILED_REPLY;
2838 			}
2839 			binder_proc_unlock(proc);
2840 		} else {
2841 			mutex_lock(&context->context_mgr_node_lock);
2842 			target_node = context->binder_context_mgr_node;
2843 			if (target_node)
2844 				target_node = binder_get_node_refs_for_txn(
2845 						target_node, &target_proc,
2846 						&return_error);
2847 			else
2848 				return_error = BR_DEAD_REPLY;
2849 			mutex_unlock(&context->context_mgr_node_lock);
2850 			if (target_node && target_proc == proc) {
2851 				binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2852 						  proc->pid, thread->pid);
2853 				return_error = BR_FAILED_REPLY;
2854 				return_error_param = -EINVAL;
2855 				return_error_line = __LINE__;
2856 				goto err_invalid_target_handle;
2857 			}
2858 		}
2859 		if (!target_node) {
2860 			/*
2861 			 * return_error is set above
2862 			 */
2863 			return_error_param = -EINVAL;
2864 			return_error_line = __LINE__;
2865 			goto err_dead_binder;
2866 		}
2867 		e->to_node = target_node->debug_id;
2868 		if (security_binder_transaction(proc->tsk,
2869 						target_proc->tsk) < 0) {
2870 			return_error = BR_FAILED_REPLY;
2871 			return_error_param = -EPERM;
2872 			return_error_line = __LINE__;
2873 			goto err_invalid_target_handle;
2874 		}
2875 		binder_inner_proc_lock(proc);
2876 
2877 		w = list_first_entry_or_null(&thread->todo,
2878 					     struct binder_work, entry);
2879 		if (!(tr->flags & TF_ONE_WAY) && w &&
2880 		    w->type == BINDER_WORK_TRANSACTION) {
2881 			/*
2882 			 * Do not allow new outgoing transaction from a
2883 			 * thread that has a transaction at the head of
2884 			 * its todo list. Only need to check the head
2885 			 * because binder_select_thread_ilocked picks a
2886 			 * thread from proc->waiting_threads to enqueue
2887 			 * the transaction, and nothing is queued to the
2888 			 * todo list while the thread is on waiting_threads.
2889 			 */
2890 			binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
2891 					  proc->pid, thread->pid);
2892 			binder_inner_proc_unlock(proc);
2893 			return_error = BR_FAILED_REPLY;
2894 			return_error_param = -EPROTO;
2895 			return_error_line = __LINE__;
2896 			goto err_bad_todo_list;
2897 		}
2898 
2899 		if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
2900 			struct binder_transaction *tmp;
2901 
2902 			tmp = thread->transaction_stack;
2903 			if (tmp->to_thread != thread) {
2904 				spin_lock(&tmp->lock);
2905 				binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
2906 					proc->pid, thread->pid, tmp->debug_id,
2907 					tmp->to_proc ? tmp->to_proc->pid : 0,
2908 					tmp->to_thread ?
2909 					tmp->to_thread->pid : 0);
2910 				spin_unlock(&tmp->lock);
2911 				binder_inner_proc_unlock(proc);
2912 				return_error = BR_FAILED_REPLY;
2913 				return_error_param = -EPROTO;
2914 				return_error_line = __LINE__;
2915 				goto err_bad_call_stack;
2916 			}
2917 			while (tmp) {
2918 				struct binder_thread *from;
2919 
2920 				spin_lock(&tmp->lock);
2921 				from = tmp->from;
2922 				if (from && from->proc == target_proc) {
2923 					atomic_inc(&from->tmp_ref);
2924 					target_thread = from;
2925 					spin_unlock(&tmp->lock);
2926 					break;
2927 				}
2928 				spin_unlock(&tmp->lock);
2929 				tmp = tmp->from_parent;
2930 			}
2931 		}
2932 		binder_inner_proc_unlock(proc);
2933 	}
2934 	if (target_thread)
2935 		e->to_thread = target_thread->pid;
2936 	e->to_proc = target_proc->pid;
2937 
2938 	/* TODO: reuse incoming transaction for reply */
2939 	t = kzalloc(sizeof(*t), GFP_KERNEL);
2940 	if (t == NULL) {
2941 		return_error = BR_FAILED_REPLY;
2942 		return_error_param = -ENOMEM;
2943 		return_error_line = __LINE__;
2944 		goto err_alloc_t_failed;
2945 	}
2946 	INIT_LIST_HEAD(&t->fd_fixups);
2947 	binder_stats_created(BINDER_STAT_TRANSACTION);
2948 	spin_lock_init(&t->lock);
2949 
2950 	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
2951 	if (tcomplete == NULL) {
2952 		return_error = BR_FAILED_REPLY;
2953 		return_error_param = -ENOMEM;
2954 		return_error_line = __LINE__;
2955 		goto err_alloc_tcomplete_failed;
2956 	}
2957 	binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
2958 
2959 	t->debug_id = t_debug_id;
2960 
2961 	if (reply)
2962 		binder_debug(BINDER_DEBUG_TRANSACTION,
2963 			     "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
2964 			     proc->pid, thread->pid, t->debug_id,
2965 			     target_proc->pid, target_thread->pid,
2966 			     (u64)tr->data.ptr.buffer,
2967 			     (u64)tr->data.ptr.offsets,
2968 			     (u64)tr->data_size, (u64)tr->offsets_size,
2969 			     (u64)extra_buffers_size);
2970 	else
2971 		binder_debug(BINDER_DEBUG_TRANSACTION,
2972 			     "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
2973 			     proc->pid, thread->pid, t->debug_id,
2974 			     target_proc->pid, target_node->debug_id,
2975 			     (u64)tr->data.ptr.buffer,
2976 			     (u64)tr->data.ptr.offsets,
2977 			     (u64)tr->data_size, (u64)tr->offsets_size,
2978 			     (u64)extra_buffers_size);
2979 
2980 	if (!reply && !(tr->flags & TF_ONE_WAY))
2981 		t->from = thread;
2982 	else
2983 		t->from = NULL;
2984 	t->sender_euid = task_euid(proc->tsk);
2985 	t->to_proc = target_proc;
2986 	t->to_thread = target_thread;
2987 	t->code = tr->code;
2988 	t->flags = tr->flags;
2989 	t->priority = task_nice(current);
2990 
2991 	trace_binder_transaction(reply, t, target_node);
2992 
2993 	t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
2994 		tr->offsets_size, extra_buffers_size,
2995 		!reply && (t->flags & TF_ONE_WAY));
2996 	if (IS_ERR(t->buffer)) {
2997 		/*
2998 		 * -ESRCH indicates VMA cleared. The target is dying.
2999 		 */
3000 		return_error_param = PTR_ERR(t->buffer);
3001 		return_error = return_error_param == -ESRCH ?
3002 			BR_DEAD_REPLY : BR_FAILED_REPLY;
3003 		return_error_line = __LINE__;
3004 		t->buffer = NULL;
3005 		goto err_binder_alloc_buf_failed;
3006 	}
3007 	t->buffer->allow_user_free = 0;
3008 	t->buffer->debug_id = t->debug_id;
3009 	t->buffer->transaction = t;
3010 	t->buffer->target_node = target_node;
3011 	trace_binder_transaction_alloc_buf(t->buffer);
3012 	off_start = (binder_size_t *)(t->buffer->data +
3013 				      ALIGN(tr->data_size, sizeof(void *)));
3014 	offp = off_start;
3015 
3016 	if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
3017 			   tr->data.ptr.buffer, tr->data_size)) {
3018 		binder_user_error("%d:%d got transaction with invalid data ptr\n",
3019 				proc->pid, thread->pid);
3020 		return_error = BR_FAILED_REPLY;
3021 		return_error_param = -EFAULT;
3022 		return_error_line = __LINE__;
3023 		goto err_copy_data_failed;
3024 	}
3025 	if (copy_from_user(offp, (const void __user *)(uintptr_t)
3026 			   tr->data.ptr.offsets, tr->offsets_size)) {
3027 		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3028 				proc->pid, thread->pid);
3029 		return_error = BR_FAILED_REPLY;
3030 		return_error_param = -EFAULT;
3031 		return_error_line = __LINE__;
3032 		goto err_copy_data_failed;
3033 	}
3034 	if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3035 		binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3036 				proc->pid, thread->pid, (u64)tr->offsets_size);
3037 		return_error = BR_FAILED_REPLY;
3038 		return_error_param = -EINVAL;
3039 		return_error_line = __LINE__;
3040 		goto err_bad_offset;
3041 	}
3042 	if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3043 		binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3044 				  proc->pid, thread->pid,
3045 				  (u64)extra_buffers_size);
3046 		return_error = BR_FAILED_REPLY;
3047 		return_error_param = -EINVAL;
3048 		return_error_line = __LINE__;
3049 		goto err_bad_offset;
3050 	}
3051 	off_end = (void *)off_start + tr->offsets_size;
3052 	sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
3053 	sg_buf_end = sg_bufp + extra_buffers_size;
3054 	off_min = 0;
3055 	for (; offp < off_end; offp++) {
3056 		struct binder_object_header *hdr;
3057 		size_t object_size = binder_validate_object(t->buffer, *offp);
3058 
3059 		if (object_size == 0 || *offp < off_min) {
3060 			binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3061 					  proc->pid, thread->pid, (u64)*offp,
3062 					  (u64)off_min,
3063 					  (u64)t->buffer->data_size);
3064 			return_error = BR_FAILED_REPLY;
3065 			return_error_param = -EINVAL;
3066 			return_error_line = __LINE__;
3067 			goto err_bad_offset;
3068 		}
3069 
3070 		hdr = (struct binder_object_header *)(t->buffer->data + *offp);
3071 		off_min = *offp + object_size;
3072 		switch (hdr->type) {
3073 		case BINDER_TYPE_BINDER:
3074 		case BINDER_TYPE_WEAK_BINDER: {
3075 			struct flat_binder_object *fp;
3076 
3077 			fp = to_flat_binder_object(hdr);
3078 			ret = binder_translate_binder(fp, t, thread);
3079 			if (ret < 0) {
3080 				return_error = BR_FAILED_REPLY;
3081 				return_error_param = ret;
3082 				return_error_line = __LINE__;
3083 				goto err_translate_failed;
3084 			}
3085 		} break;
3086 		case BINDER_TYPE_HANDLE:
3087 		case BINDER_TYPE_WEAK_HANDLE: {
3088 			struct flat_binder_object *fp;
3089 
3090 			fp = to_flat_binder_object(hdr);
3091 			ret = binder_translate_handle(fp, t, thread);
3092 			if (ret < 0) {
3093 				return_error = BR_FAILED_REPLY;
3094 				return_error_param = ret;
3095 				return_error_line = __LINE__;
3096 				goto err_translate_failed;
3097 			}
3098 		} break;
3099 
3100 		case BINDER_TYPE_FD: {
3101 			struct binder_fd_object *fp = to_binder_fd_object(hdr);
3102 			int ret = binder_translate_fd(&fp->fd, t, thread,
3103 						      in_reply_to);
3104 
3105 			if (ret < 0) {
3106 				return_error = BR_FAILED_REPLY;
3107 				return_error_param = ret;
3108 				return_error_line = __LINE__;
3109 				goto err_translate_failed;
3110 			}
3111 			fp->pad_binder = 0;
3112 		} break;
3113 		case BINDER_TYPE_FDA: {
3114 			struct binder_fd_array_object *fda =
3115 				to_binder_fd_array_object(hdr);
3116 			struct binder_buffer_object *parent =
3117 				binder_validate_ptr(t->buffer, fda->parent,
3118 						    off_start,
3119 						    offp - off_start);
3120 			if (!parent) {
3121 				binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3122 						  proc->pid, thread->pid);
3123 				return_error = BR_FAILED_REPLY;
3124 				return_error_param = -EINVAL;
3125 				return_error_line = __LINE__;
3126 				goto err_bad_parent;
3127 			}
3128 			if (!binder_validate_fixup(t->buffer, off_start,
3129 						   parent, fda->parent_offset,
3130 						   last_fixup_obj,
3131 						   last_fixup_min_off)) {
3132 				binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3133 						  proc->pid, thread->pid);
3134 				return_error = BR_FAILED_REPLY;
3135 				return_error_param = -EINVAL;
3136 				return_error_line = __LINE__;
3137 				goto err_bad_parent;
3138 			}
3139 			ret = binder_translate_fd_array(fda, parent, t, thread,
3140 							in_reply_to);
3141 			if (ret < 0) {
3142 				return_error = BR_FAILED_REPLY;
3143 				return_error_param = ret;
3144 				return_error_line = __LINE__;
3145 				goto err_translate_failed;
3146 			}
3147 			last_fixup_obj = parent;
3148 			last_fixup_min_off =
3149 				fda->parent_offset + sizeof(u32) * fda->num_fds;
3150 		} break;
3151 		case BINDER_TYPE_PTR: {
3152 			struct binder_buffer_object *bp =
3153 				to_binder_buffer_object(hdr);
3154 			size_t buf_left = sg_buf_end - sg_bufp;
3155 
3156 			if (bp->length > buf_left) {
3157 				binder_user_error("%d:%d got transaction with too large buffer\n",
3158 						  proc->pid, thread->pid);
3159 				return_error = BR_FAILED_REPLY;
3160 				return_error_param = -EINVAL;
3161 				return_error_line = __LINE__;
3162 				goto err_bad_offset;
3163 			}
3164 			if (copy_from_user(sg_bufp,
3165 					   (const void __user *)(uintptr_t)
3166 					   bp->buffer, bp->length)) {
3167 				binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3168 						  proc->pid, thread->pid);
3169 				return_error_param = -EFAULT;
3170 				return_error = BR_FAILED_REPLY;
3171 				return_error_line = __LINE__;
3172 				goto err_copy_data_failed;
3173 			}
3174 			/* Fixup buffer pointer to target proc address space */
3175 			bp->buffer = (uintptr_t)sg_bufp +
3176 				binder_alloc_get_user_buffer_offset(
3177 						&target_proc->alloc);
3178 			sg_bufp += ALIGN(bp->length, sizeof(u64));
3179 
3180 			ret = binder_fixup_parent(t, thread, bp, off_start,
3181 						  offp - off_start,
3182 						  last_fixup_obj,
3183 						  last_fixup_min_off);
3184 			if (ret < 0) {
3185 				return_error = BR_FAILED_REPLY;
3186 				return_error_param = ret;
3187 				return_error_line = __LINE__;
3188 				goto err_translate_failed;
3189 			}
3190 			last_fixup_obj = bp;
3191 			last_fixup_min_off = 0;
3192 		} break;
3193 		default:
3194 			binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3195 				proc->pid, thread->pid, hdr->type);
3196 			return_error = BR_FAILED_REPLY;
3197 			return_error_param = -EINVAL;
3198 			return_error_line = __LINE__;
3199 			goto err_bad_object_type;
3200 		}
3201 	}
3202 	tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3203 	t->work.type = BINDER_WORK_TRANSACTION;
3204 
3205 	if (reply) {
3206 		binder_enqueue_thread_work(thread, tcomplete);
3207 		binder_inner_proc_lock(target_proc);
3208 		if (target_thread->is_dead) {
3209 			binder_inner_proc_unlock(target_proc);
3210 			goto err_dead_proc_or_thread;
3211 		}
3212 		BUG_ON(t->buffer->async_transaction != 0);
3213 		binder_pop_transaction_ilocked(target_thread, in_reply_to);
3214 		binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3215 		binder_inner_proc_unlock(target_proc);
3216 		wake_up_interruptible_sync(&target_thread->wait);
3217 		binder_free_transaction(in_reply_to);
3218 	} else if (!(t->flags & TF_ONE_WAY)) {
3219 		BUG_ON(t->buffer->async_transaction != 0);
3220 		binder_inner_proc_lock(proc);
3221 		/*
3222 		 * Defer the TRANSACTION_COMPLETE, so we don't return to
3223 		 * userspace immediately; this allows the target process to
3224 		 * immediately start processing this transaction, reducing
3225 		 * latency. We will then return the TRANSACTION_COMPLETE when
3226 		 * the target replies (or there is an error).
3227 		 */
3228 		binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3229 		t->need_reply = 1;
3230 		t->from_parent = thread->transaction_stack;
3231 		thread->transaction_stack = t;
3232 		binder_inner_proc_unlock(proc);
3233 		if (!binder_proc_transaction(t, target_proc, target_thread)) {
3234 			binder_inner_proc_lock(proc);
3235 			binder_pop_transaction_ilocked(thread, t);
3236 			binder_inner_proc_unlock(proc);
3237 			goto err_dead_proc_or_thread;
3238 		}
3239 	} else {
3240 		BUG_ON(target_node == NULL);
3241 		BUG_ON(t->buffer->async_transaction != 1);
3242 		binder_enqueue_thread_work(thread, tcomplete);
3243 		if (!binder_proc_transaction(t, target_proc, NULL))
3244 			goto err_dead_proc_or_thread;
3245 	}
3246 	if (target_thread)
3247 		binder_thread_dec_tmpref(target_thread);
3248 	binder_proc_dec_tmpref(target_proc);
3249 	if (target_node)
3250 		binder_dec_node_tmpref(target_node);
3251 	/*
3252 	 * write barrier to synchronize with initialization
3253 	 * of log entry
3254 	 */
3255 	smp_wmb();
3256 	WRITE_ONCE(e->debug_id_done, t_debug_id);
3257 	return;
3258 
3259 err_dead_proc_or_thread:
3260 	return_error = BR_DEAD_REPLY;
3261 	return_error_line = __LINE__;
3262 	binder_dequeue_work(proc, tcomplete);
3263 err_translate_failed:
3264 err_bad_object_type:
3265 err_bad_offset:
3266 err_bad_parent:
3267 err_copy_data_failed:
3268 	binder_free_txn_fixups(t);
3269 	trace_binder_transaction_failed_buffer_release(t->buffer);
3270 	binder_transaction_buffer_release(target_proc, t->buffer, offp);
3271 	if (target_node)
3272 		binder_dec_node_tmpref(target_node);
3273 	target_node = NULL;
3274 	t->buffer->transaction = NULL;
3275 	binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3276 err_binder_alloc_buf_failed:
3277 	kfree(tcomplete);
3278 	binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3279 err_alloc_tcomplete_failed:
3280 	kfree(t);
3281 	binder_stats_deleted(BINDER_STAT_TRANSACTION);
3282 err_alloc_t_failed:
3283 err_bad_todo_list:
3284 err_bad_call_stack:
3285 err_empty_call_stack:
3286 err_dead_binder:
3287 err_invalid_target_handle:
3288 	if (target_thread)
3289 		binder_thread_dec_tmpref(target_thread);
3290 	if (target_proc)
3291 		binder_proc_dec_tmpref(target_proc);
3292 	if (target_node) {
3293 		binder_dec_node(target_node, 1, 0);
3294 		binder_dec_node_tmpref(target_node);
3295 	}
3296 
3297 	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3298 		     "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3299 		     proc->pid, thread->pid, return_error, return_error_param,
3300 		     (u64)tr->data_size, (u64)tr->offsets_size,
3301 		     return_error_line);
3302 
3303 	{
3304 		struct binder_transaction_log_entry *fe;
3305 
3306 		e->return_error = return_error;
3307 		e->return_error_param = return_error_param;
3308 		e->return_error_line = return_error_line;
3309 		fe = binder_transaction_log_add(&binder_transaction_log_failed);
3310 		*fe = *e;
3311 		/*
3312 		 * write barrier to synchronize with initialization
3313 		 * of log entry
3314 		 */
3315 		smp_wmb();
3316 		WRITE_ONCE(e->debug_id_done, t_debug_id);
3317 		WRITE_ONCE(fe->debug_id_done, t_debug_id);
3318 	}
3319 
3320 	BUG_ON(thread->return_error.cmd != BR_OK);
3321 	if (in_reply_to) {
3322 		thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3323 		binder_enqueue_thread_work(thread, &thread->return_error.work);
3324 		binder_send_failed_reply(in_reply_to, return_error);
3325 	} else {
3326 		thread->return_error.cmd = return_error;
3327 		binder_enqueue_thread_work(thread, &thread->return_error.work);
3328 	}
3329 }
3330 
3331 /**
3332  * binder_free_buf() - free the specified buffer
3333  * @proc:	binder proc that owns buffer
3334  * @buffer:	buffer to be freed
3335  *
3336  * If buffer for an async transaction, enqueue the next async
3337  * transaction from the node.
3338  *
3339  * Cleanup buffer and free it.
3340  */
3341 static void
3342 binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
3343 {
3344 	if (buffer->transaction) {
3345 		buffer->transaction->buffer = NULL;
3346 		buffer->transaction = NULL;
3347 	}
3348 	if (buffer->async_transaction && buffer->target_node) {
3349 		struct binder_node *buf_node;
3350 		struct binder_work *w;
3351 
3352 		buf_node = buffer->target_node;
3353 		binder_node_inner_lock(buf_node);
3354 		BUG_ON(!buf_node->has_async_transaction);
3355 		BUG_ON(buf_node->proc != proc);
3356 		w = binder_dequeue_work_head_ilocked(
3357 				&buf_node->async_todo);
3358 		if (!w) {
3359 			buf_node->has_async_transaction = false;
3360 		} else {
3361 			binder_enqueue_work_ilocked(
3362 					w, &proc->todo);
3363 			binder_wakeup_proc_ilocked(proc);
3364 		}
3365 		binder_node_inner_unlock(buf_node);
3366 	}
3367 	trace_binder_transaction_buffer_release(buffer);
3368 	binder_transaction_buffer_release(proc, buffer, NULL);
3369 	binder_alloc_free_buf(&proc->alloc, buffer);
3370 }
3371 
3372 static int binder_thread_write(struct binder_proc *proc,
3373 			struct binder_thread *thread,
3374 			binder_uintptr_t binder_buffer, size_t size,
3375 			binder_size_t *consumed)
3376 {
3377 	uint32_t cmd;
3378 	struct binder_context *context = proc->context;
3379 	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3380 	void __user *ptr = buffer + *consumed;
3381 	void __user *end = buffer + size;
3382 
3383 	while (ptr < end && thread->return_error.cmd == BR_OK) {
3384 		int ret;
3385 
3386 		if (get_user(cmd, (uint32_t __user *)ptr))
3387 			return -EFAULT;
3388 		ptr += sizeof(uint32_t);
3389 		trace_binder_command(cmd);
3390 		if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3391 			atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3392 			atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3393 			atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3394 		}
3395 		switch (cmd) {
3396 		case BC_INCREFS:
3397 		case BC_ACQUIRE:
3398 		case BC_RELEASE:
3399 		case BC_DECREFS: {
3400 			uint32_t target;
3401 			const char *debug_string;
3402 			bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3403 			bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3404 			struct binder_ref_data rdata;
3405 
3406 			if (get_user(target, (uint32_t __user *)ptr))
3407 				return -EFAULT;
3408 
3409 			ptr += sizeof(uint32_t);
3410 			ret = -1;
3411 			if (increment && !target) {
3412 				struct binder_node *ctx_mgr_node;
3413 				mutex_lock(&context->context_mgr_node_lock);
3414 				ctx_mgr_node = context->binder_context_mgr_node;
3415 				if (ctx_mgr_node)
3416 					ret = binder_inc_ref_for_node(
3417 							proc, ctx_mgr_node,
3418 							strong, NULL, &rdata);
3419 				mutex_unlock(&context->context_mgr_node_lock);
3420 			}
3421 			if (ret)
3422 				ret = binder_update_ref_for_handle(
3423 						proc, target, increment, strong,
3424 						&rdata);
3425 			if (!ret && rdata.desc != target) {
3426 				binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3427 					proc->pid, thread->pid,
3428 					target, rdata.desc);
3429 			}
3430 			switch (cmd) {
3431 			case BC_INCREFS:
3432 				debug_string = "IncRefs";
3433 				break;
3434 			case BC_ACQUIRE:
3435 				debug_string = "Acquire";
3436 				break;
3437 			case BC_RELEASE:
3438 				debug_string = "Release";
3439 				break;
3440 			case BC_DECREFS:
3441 			default:
3442 				debug_string = "DecRefs";
3443 				break;
3444 			}
3445 			if (ret) {
3446 				binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3447 					proc->pid, thread->pid, debug_string,
3448 					strong, target, ret);
3449 				break;
3450 			}
3451 			binder_debug(BINDER_DEBUG_USER_REFS,
3452 				     "%d:%d %s ref %d desc %d s %d w %d\n",
3453 				     proc->pid, thread->pid, debug_string,
3454 				     rdata.debug_id, rdata.desc, rdata.strong,
3455 				     rdata.weak);
3456 			break;
3457 		}
3458 		case BC_INCREFS_DONE:
3459 		case BC_ACQUIRE_DONE: {
3460 			binder_uintptr_t node_ptr;
3461 			binder_uintptr_t cookie;
3462 			struct binder_node *node;
3463 			bool free_node;
3464 
3465 			if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3466 				return -EFAULT;
3467 			ptr += sizeof(binder_uintptr_t);
3468 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3469 				return -EFAULT;
3470 			ptr += sizeof(binder_uintptr_t);
3471 			node = binder_get_node(proc, node_ptr);
3472 			if (node == NULL) {
3473 				binder_user_error("%d:%d %s u%016llx no match\n",
3474 					proc->pid, thread->pid,
3475 					cmd == BC_INCREFS_DONE ?
3476 					"BC_INCREFS_DONE" :
3477 					"BC_ACQUIRE_DONE",
3478 					(u64)node_ptr);
3479 				break;
3480 			}
3481 			if (cookie != node->cookie) {
3482 				binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3483 					proc->pid, thread->pid,
3484 					cmd == BC_INCREFS_DONE ?
3485 					"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3486 					(u64)node_ptr, node->debug_id,
3487 					(u64)cookie, (u64)node->cookie);
3488 				binder_put_node(node);
3489 				break;
3490 			}
3491 			binder_node_inner_lock(node);
3492 			if (cmd == BC_ACQUIRE_DONE) {
3493 				if (node->pending_strong_ref == 0) {
3494 					binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3495 						proc->pid, thread->pid,
3496 						node->debug_id);
3497 					binder_node_inner_unlock(node);
3498 					binder_put_node(node);
3499 					break;
3500 				}
3501 				node->pending_strong_ref = 0;
3502 			} else {
3503 				if (node->pending_weak_ref == 0) {
3504 					binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3505 						proc->pid, thread->pid,
3506 						node->debug_id);
3507 					binder_node_inner_unlock(node);
3508 					binder_put_node(node);
3509 					break;
3510 				}
3511 				node->pending_weak_ref = 0;
3512 			}
3513 			free_node = binder_dec_node_nilocked(node,
3514 					cmd == BC_ACQUIRE_DONE, 0);
3515 			WARN_ON(free_node);
3516 			binder_debug(BINDER_DEBUG_USER_REFS,
3517 				     "%d:%d %s node %d ls %d lw %d tr %d\n",
3518 				     proc->pid, thread->pid,
3519 				     cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3520 				     node->debug_id, node->local_strong_refs,
3521 				     node->local_weak_refs, node->tmp_refs);
3522 			binder_node_inner_unlock(node);
3523 			binder_put_node(node);
3524 			break;
3525 		}
3526 		case BC_ATTEMPT_ACQUIRE:
3527 			pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3528 			return -EINVAL;
3529 		case BC_ACQUIRE_RESULT:
3530 			pr_err("BC_ACQUIRE_RESULT not supported\n");
3531 			return -EINVAL;
3532 
3533 		case BC_FREE_BUFFER: {
3534 			binder_uintptr_t data_ptr;
3535 			struct binder_buffer *buffer;
3536 
3537 			if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3538 				return -EFAULT;
3539 			ptr += sizeof(binder_uintptr_t);
3540 
3541 			buffer = binder_alloc_prepare_to_free(&proc->alloc,
3542 							      data_ptr);
3543 			if (buffer == NULL) {
3544 				binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
3545 					proc->pid, thread->pid, (u64)data_ptr);
3546 				break;
3547 			}
3548 			if (!buffer->allow_user_free) {
3549 				binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
3550 					proc->pid, thread->pid, (u64)data_ptr);
3551 				break;
3552 			}
3553 			binder_debug(BINDER_DEBUG_FREE_BUFFER,
3554 				     "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3555 				     proc->pid, thread->pid, (u64)data_ptr,
3556 				     buffer->debug_id,
3557 				     buffer->transaction ? "active" : "finished");
3558 			binder_free_buf(proc, buffer);
3559 			break;
3560 		}
3561 
3562 		case BC_TRANSACTION_SG:
3563 		case BC_REPLY_SG: {
3564 			struct binder_transaction_data_sg tr;
3565 
3566 			if (copy_from_user(&tr, ptr, sizeof(tr)))
3567 				return -EFAULT;
3568 			ptr += sizeof(tr);
3569 			binder_transaction(proc, thread, &tr.transaction_data,
3570 					   cmd == BC_REPLY_SG, tr.buffers_size);
3571 			break;
3572 		}
3573 		case BC_TRANSACTION:
3574 		case BC_REPLY: {
3575 			struct binder_transaction_data tr;
3576 
3577 			if (copy_from_user(&tr, ptr, sizeof(tr)))
3578 				return -EFAULT;
3579 			ptr += sizeof(tr);
3580 			binder_transaction(proc, thread, &tr,
3581 					   cmd == BC_REPLY, 0);
3582 			break;
3583 		}
3584 
3585 		case BC_REGISTER_LOOPER:
3586 			binder_debug(BINDER_DEBUG_THREADS,
3587 				     "%d:%d BC_REGISTER_LOOPER\n",
3588 				     proc->pid, thread->pid);
3589 			binder_inner_proc_lock(proc);
3590 			if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3591 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
3592 				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3593 					proc->pid, thread->pid);
3594 			} else if (proc->requested_threads == 0) {
3595 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
3596 				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3597 					proc->pid, thread->pid);
3598 			} else {
3599 				proc->requested_threads--;
3600 				proc->requested_threads_started++;
3601 			}
3602 			thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3603 			binder_inner_proc_unlock(proc);
3604 			break;
3605 		case BC_ENTER_LOOPER:
3606 			binder_debug(BINDER_DEBUG_THREADS,
3607 				     "%d:%d BC_ENTER_LOOPER\n",
3608 				     proc->pid, thread->pid);
3609 			if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3610 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
3611 				binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3612 					proc->pid, thread->pid);
3613 			}
3614 			thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3615 			break;
3616 		case BC_EXIT_LOOPER:
3617 			binder_debug(BINDER_DEBUG_THREADS,
3618 				     "%d:%d BC_EXIT_LOOPER\n",
3619 				     proc->pid, thread->pid);
3620 			thread->looper |= BINDER_LOOPER_STATE_EXITED;
3621 			break;
3622 
3623 		case BC_REQUEST_DEATH_NOTIFICATION:
3624 		case BC_CLEAR_DEATH_NOTIFICATION: {
3625 			uint32_t target;
3626 			binder_uintptr_t cookie;
3627 			struct binder_ref *ref;
3628 			struct binder_ref_death *death = NULL;
3629 
3630 			if (get_user(target, (uint32_t __user *)ptr))
3631 				return -EFAULT;
3632 			ptr += sizeof(uint32_t);
3633 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3634 				return -EFAULT;
3635 			ptr += sizeof(binder_uintptr_t);
3636 			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3637 				/*
3638 				 * Allocate memory for death notification
3639 				 * before taking lock
3640 				 */
3641 				death = kzalloc(sizeof(*death), GFP_KERNEL);
3642 				if (death == NULL) {
3643 					WARN_ON(thread->return_error.cmd !=
3644 						BR_OK);
3645 					thread->return_error.cmd = BR_ERROR;
3646 					binder_enqueue_thread_work(
3647 						thread,
3648 						&thread->return_error.work);
3649 					binder_debug(
3650 						BINDER_DEBUG_FAILED_TRANSACTION,
3651 						"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3652 						proc->pid, thread->pid);
3653 					break;
3654 				}
3655 			}
3656 			binder_proc_lock(proc);
3657 			ref = binder_get_ref_olocked(proc, target, false);
3658 			if (ref == NULL) {
3659 				binder_user_error("%d:%d %s invalid ref %d\n",
3660 					proc->pid, thread->pid,
3661 					cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3662 					"BC_REQUEST_DEATH_NOTIFICATION" :
3663 					"BC_CLEAR_DEATH_NOTIFICATION",
3664 					target);
3665 				binder_proc_unlock(proc);
3666 				kfree(death);
3667 				break;
3668 			}
3669 
3670 			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3671 				     "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3672 				     proc->pid, thread->pid,
3673 				     cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3674 				     "BC_REQUEST_DEATH_NOTIFICATION" :
3675 				     "BC_CLEAR_DEATH_NOTIFICATION",
3676 				     (u64)cookie, ref->data.debug_id,
3677 				     ref->data.desc, ref->data.strong,
3678 				     ref->data.weak, ref->node->debug_id);
3679 
3680 			binder_node_lock(ref->node);
3681 			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3682 				if (ref->death) {
3683 					binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3684 						proc->pid, thread->pid);
3685 					binder_node_unlock(ref->node);
3686 					binder_proc_unlock(proc);
3687 					kfree(death);
3688 					break;
3689 				}
3690 				binder_stats_created(BINDER_STAT_DEATH);
3691 				INIT_LIST_HEAD(&death->work.entry);
3692 				death->cookie = cookie;
3693 				ref->death = death;
3694 				if (ref->node->proc == NULL) {
3695 					ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3696 
3697 					binder_inner_proc_lock(proc);
3698 					binder_enqueue_work_ilocked(
3699 						&ref->death->work, &proc->todo);
3700 					binder_wakeup_proc_ilocked(proc);
3701 					binder_inner_proc_unlock(proc);
3702 				}
3703 			} else {
3704 				if (ref->death == NULL) {
3705 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3706 						proc->pid, thread->pid);
3707 					binder_node_unlock(ref->node);
3708 					binder_proc_unlock(proc);
3709 					break;
3710 				}
3711 				death = ref->death;
3712 				if (death->cookie != cookie) {
3713 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3714 						proc->pid, thread->pid,
3715 						(u64)death->cookie,
3716 						(u64)cookie);
3717 					binder_node_unlock(ref->node);
3718 					binder_proc_unlock(proc);
3719 					break;
3720 				}
3721 				ref->death = NULL;
3722 				binder_inner_proc_lock(proc);
3723 				if (list_empty(&death->work.entry)) {
3724 					death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3725 					if (thread->looper &
3726 					    (BINDER_LOOPER_STATE_REGISTERED |
3727 					     BINDER_LOOPER_STATE_ENTERED))
3728 						binder_enqueue_thread_work_ilocked(
3729 								thread,
3730 								&death->work);
3731 					else {
3732 						binder_enqueue_work_ilocked(
3733 								&death->work,
3734 								&proc->todo);
3735 						binder_wakeup_proc_ilocked(
3736 								proc);
3737 					}
3738 				} else {
3739 					BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3740 					death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3741 				}
3742 				binder_inner_proc_unlock(proc);
3743 			}
3744 			binder_node_unlock(ref->node);
3745 			binder_proc_unlock(proc);
3746 		} break;
3747 		case BC_DEAD_BINDER_DONE: {
3748 			struct binder_work *w;
3749 			binder_uintptr_t cookie;
3750 			struct binder_ref_death *death = NULL;
3751 
3752 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3753 				return -EFAULT;
3754 
3755 			ptr += sizeof(cookie);
3756 			binder_inner_proc_lock(proc);
3757 			list_for_each_entry(w, &proc->delivered_death,
3758 					    entry) {
3759 				struct binder_ref_death *tmp_death =
3760 					container_of(w,
3761 						     struct binder_ref_death,
3762 						     work);
3763 
3764 				if (tmp_death->cookie == cookie) {
3765 					death = tmp_death;
3766 					break;
3767 				}
3768 			}
3769 			binder_debug(BINDER_DEBUG_DEAD_BINDER,
3770 				     "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
3771 				     proc->pid, thread->pid, (u64)cookie,
3772 				     death);
3773 			if (death == NULL) {
3774 				binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3775 					proc->pid, thread->pid, (u64)cookie);
3776 				binder_inner_proc_unlock(proc);
3777 				break;
3778 			}
3779 			binder_dequeue_work_ilocked(&death->work);
3780 			if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3781 				death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3782 				if (thread->looper &
3783 					(BINDER_LOOPER_STATE_REGISTERED |
3784 					 BINDER_LOOPER_STATE_ENTERED))
3785 					binder_enqueue_thread_work_ilocked(
3786 						thread, &death->work);
3787 				else {
3788 					binder_enqueue_work_ilocked(
3789 							&death->work,
3790 							&proc->todo);
3791 					binder_wakeup_proc_ilocked(proc);
3792 				}
3793 			}
3794 			binder_inner_proc_unlock(proc);
3795 		} break;
3796 
3797 		default:
3798 			pr_err("%d:%d unknown command %d\n",
3799 			       proc->pid, thread->pid, cmd);
3800 			return -EINVAL;
3801 		}
3802 		*consumed = ptr - buffer;
3803 	}
3804 	return 0;
3805 }
3806 
3807 static void binder_stat_br(struct binder_proc *proc,
3808 			   struct binder_thread *thread, uint32_t cmd)
3809 {
3810 	trace_binder_return(cmd);
3811 	if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
3812 		atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3813 		atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3814 		atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
3815 	}
3816 }
3817 
3818 static int binder_put_node_cmd(struct binder_proc *proc,
3819 			       struct binder_thread *thread,
3820 			       void __user **ptrp,
3821 			       binder_uintptr_t node_ptr,
3822 			       binder_uintptr_t node_cookie,
3823 			       int node_debug_id,
3824 			       uint32_t cmd, const char *cmd_name)
3825 {
3826 	void __user *ptr = *ptrp;
3827 
3828 	if (put_user(cmd, (uint32_t __user *)ptr))
3829 		return -EFAULT;
3830 	ptr += sizeof(uint32_t);
3831 
3832 	if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3833 		return -EFAULT;
3834 	ptr += sizeof(binder_uintptr_t);
3835 
3836 	if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
3837 		return -EFAULT;
3838 	ptr += sizeof(binder_uintptr_t);
3839 
3840 	binder_stat_br(proc, thread, cmd);
3841 	binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
3842 		     proc->pid, thread->pid, cmd_name, node_debug_id,
3843 		     (u64)node_ptr, (u64)node_cookie);
3844 
3845 	*ptrp = ptr;
3846 	return 0;
3847 }
3848 
3849 static int binder_wait_for_work(struct binder_thread *thread,
3850 				bool do_proc_work)
3851 {
3852 	DEFINE_WAIT(wait);
3853 	struct binder_proc *proc = thread->proc;
3854 	int ret = 0;
3855 
3856 	freezer_do_not_count();
3857 	binder_inner_proc_lock(proc);
3858 	for (;;) {
3859 		prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
3860 		if (binder_has_work_ilocked(thread, do_proc_work))
3861 			break;
3862 		if (do_proc_work)
3863 			list_add(&thread->waiting_thread_node,
3864 				 &proc->waiting_threads);
3865 		binder_inner_proc_unlock(proc);
3866 		schedule();
3867 		binder_inner_proc_lock(proc);
3868 		list_del_init(&thread->waiting_thread_node);
3869 		if (signal_pending(current)) {
3870 			ret = -ERESTARTSYS;
3871 			break;
3872 		}
3873 	}
3874 	finish_wait(&thread->wait, &wait);
3875 	binder_inner_proc_unlock(proc);
3876 	freezer_count();
3877 
3878 	return ret;
3879 }
3880 
3881 /**
3882  * binder_apply_fd_fixups() - finish fd translation
3883  * @t:	binder transaction with list of fd fixups
3884  *
3885  * Now that we are in the context of the transaction target
3886  * process, we can allocate and install fds. Process the
3887  * list of fds to translate and fixup the buffer with the
3888  * new fds.
3889  *
3890  * If we fail to allocate an fd, then free the resources by
3891  * fput'ing files that have not been processed and ksys_close'ing
3892  * any fds that have already been allocated.
3893  */
3894 static int binder_apply_fd_fixups(struct binder_transaction *t)
3895 {
3896 	struct binder_txn_fd_fixup *fixup, *tmp;
3897 	int ret = 0;
3898 
3899 	list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
3900 		int fd = get_unused_fd_flags(O_CLOEXEC);
3901 		u32 *fdp;
3902 
3903 		if (fd < 0) {
3904 			binder_debug(BINDER_DEBUG_TRANSACTION,
3905 				     "failed fd fixup txn %d fd %d\n",
3906 				     t->debug_id, fd);
3907 			ret = -ENOMEM;
3908 			break;
3909 		}
3910 		binder_debug(BINDER_DEBUG_TRANSACTION,
3911 			     "fd fixup txn %d fd %d\n",
3912 			     t->debug_id, fd);
3913 		trace_binder_transaction_fd_recv(t, fd, fixup->offset);
3914 		fd_install(fd, fixup->file);
3915 		fixup->file = NULL;
3916 		fdp = (u32 *)(t->buffer->data + fixup->offset);
3917 		/*
3918 		 * This store can cause problems for CPUs with a
3919 		 * VIVT cache (eg ARMv5) since the cache cannot
3920 		 * detect virtual aliases to the same physical cacheline.
3921 		 * To support VIVT, this address and the user-space VA
3922 		 * would both need to be flushed. Since this kernel
3923 		 * VA is not constructed via page_to_virt(), we can't
3924 		 * use flush_dcache_page() on it, so we'd have to use
3925 		 * an internal function. If devices with VIVT ever
3926 		 * need to run Android, we'll either need to go back
3927 		 * to patching the translated fd from the sender side
3928 		 * (using the non-standard kernel functions), or rework
3929 		 * how the kernel uses the buffer to use page_to_virt()
3930 		 * addresses instead of allocating in our own vm area.
3931 		 *
3932 		 * For now, we disable compilation if CONFIG_CPU_CACHE_VIVT.
3933 		 */
3934 		*fdp = fd;
3935 	}
3936 	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
3937 		if (fixup->file) {
3938 			fput(fixup->file);
3939 		} else if (ret) {
3940 			u32 *fdp = (u32 *)(t->buffer->data + fixup->offset);
3941 
3942 			ksys_close(*fdp);
3943 		}
3944 		list_del(&fixup->fixup_entry);
3945 		kfree(fixup);
3946 	}
3947 
3948 	return ret;
3949 }
3950 
3951 static int binder_thread_read(struct binder_proc *proc,
3952 			      struct binder_thread *thread,
3953 			      binder_uintptr_t binder_buffer, size_t size,
3954 			      binder_size_t *consumed, int non_block)
3955 {
3956 	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3957 	void __user *ptr = buffer + *consumed;
3958 	void __user *end = buffer + size;
3959 
3960 	int ret = 0;
3961 	int wait_for_proc_work;
3962 
3963 	if (*consumed == 0) {
3964 		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
3965 			return -EFAULT;
3966 		ptr += sizeof(uint32_t);
3967 	}
3968 
3969 retry:
3970 	binder_inner_proc_lock(proc);
3971 	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
3972 	binder_inner_proc_unlock(proc);
3973 
3974 	thread->looper |= BINDER_LOOPER_STATE_WAITING;
3975 
3976 	trace_binder_wait_for_work(wait_for_proc_work,
3977 				   !!thread->transaction_stack,
3978 				   !binder_worklist_empty(proc, &thread->todo));
3979 	if (wait_for_proc_work) {
3980 		if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3981 					BINDER_LOOPER_STATE_ENTERED))) {
3982 			binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
3983 				proc->pid, thread->pid, thread->looper);
3984 			wait_event_interruptible(binder_user_error_wait,
3985 						 binder_stop_on_user_error < 2);
3986 		}
3987 		binder_set_nice(proc->default_priority);
3988 	}
3989 
3990 	if (non_block) {
3991 		if (!binder_has_work(thread, wait_for_proc_work))
3992 			ret = -EAGAIN;
3993 	} else {
3994 		ret = binder_wait_for_work(thread, wait_for_proc_work);
3995 	}
3996 
3997 	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
3998 
3999 	if (ret)
4000 		return ret;
4001 
4002 	while (1) {
4003 		uint32_t cmd;
4004 		struct binder_transaction_data tr;
4005 		struct binder_work *w = NULL;
4006 		struct list_head *list = NULL;
4007 		struct binder_transaction *t = NULL;
4008 		struct binder_thread *t_from;
4009 
4010 		binder_inner_proc_lock(proc);
4011 		if (!binder_worklist_empty_ilocked(&thread->todo))
4012 			list = &thread->todo;
4013 		else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4014 			   wait_for_proc_work)
4015 			list = &proc->todo;
4016 		else {
4017 			binder_inner_proc_unlock(proc);
4018 
4019 			/* no data added */
4020 			if (ptr - buffer == 4 && !thread->looper_need_return)
4021 				goto retry;
4022 			break;
4023 		}
4024 
4025 		if (end - ptr < sizeof(tr) + 4) {
4026 			binder_inner_proc_unlock(proc);
4027 			break;
4028 		}
4029 		w = binder_dequeue_work_head_ilocked(list);
4030 		if (binder_worklist_empty_ilocked(&thread->todo))
4031 			thread->process_todo = false;
4032 
4033 		switch (w->type) {
4034 		case BINDER_WORK_TRANSACTION: {
4035 			binder_inner_proc_unlock(proc);
4036 			t = container_of(w, struct binder_transaction, work);
4037 		} break;
4038 		case BINDER_WORK_RETURN_ERROR: {
4039 			struct binder_error *e = container_of(
4040 					w, struct binder_error, work);
4041 
4042 			WARN_ON(e->cmd == BR_OK);
4043 			binder_inner_proc_unlock(proc);
4044 			if (put_user(e->cmd, (uint32_t __user *)ptr))
4045 				return -EFAULT;
4046 			cmd = e->cmd;
4047 			e->cmd = BR_OK;
4048 			ptr += sizeof(uint32_t);
4049 
4050 			binder_stat_br(proc, thread, cmd);
4051 		} break;
4052 		case BINDER_WORK_TRANSACTION_COMPLETE: {
4053 			binder_inner_proc_unlock(proc);
4054 			cmd = BR_TRANSACTION_COMPLETE;
4055 			if (put_user(cmd, (uint32_t __user *)ptr))
4056 				return -EFAULT;
4057 			ptr += sizeof(uint32_t);
4058 
4059 			binder_stat_br(proc, thread, cmd);
4060 			binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4061 				     "%d:%d BR_TRANSACTION_COMPLETE\n",
4062 				     proc->pid, thread->pid);
4063 			kfree(w);
4064 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4065 		} break;
4066 		case BINDER_WORK_NODE: {
4067 			struct binder_node *node = container_of(w, struct binder_node, work);
4068 			int strong, weak;
4069 			binder_uintptr_t node_ptr = node->ptr;
4070 			binder_uintptr_t node_cookie = node->cookie;
4071 			int node_debug_id = node->debug_id;
4072 			int has_weak_ref;
4073 			int has_strong_ref;
4074 			void __user *orig_ptr = ptr;
4075 
4076 			BUG_ON(proc != node->proc);
4077 			strong = node->internal_strong_refs ||
4078 					node->local_strong_refs;
4079 			weak = !hlist_empty(&node->refs) ||
4080 					node->local_weak_refs ||
4081 					node->tmp_refs || strong;
4082 			has_strong_ref = node->has_strong_ref;
4083 			has_weak_ref = node->has_weak_ref;
4084 
4085 			if (weak && !has_weak_ref) {
4086 				node->has_weak_ref = 1;
4087 				node->pending_weak_ref = 1;
4088 				node->local_weak_refs++;
4089 			}
4090 			if (strong && !has_strong_ref) {
4091 				node->has_strong_ref = 1;
4092 				node->pending_strong_ref = 1;
4093 				node->local_strong_refs++;
4094 			}
4095 			if (!strong && has_strong_ref)
4096 				node->has_strong_ref = 0;
4097 			if (!weak && has_weak_ref)
4098 				node->has_weak_ref = 0;
4099 			if (!weak && !strong) {
4100 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4101 					     "%d:%d node %d u%016llx c%016llx deleted\n",
4102 					     proc->pid, thread->pid,
4103 					     node_debug_id,
4104 					     (u64)node_ptr,
4105 					     (u64)node_cookie);
4106 				rb_erase(&node->rb_node, &proc->nodes);
4107 				binder_inner_proc_unlock(proc);
4108 				binder_node_lock(node);
4109 				/*
4110 				 * Acquire the node lock before freeing the
4111 				 * node to serialize with other threads that
4112 				 * may have been holding the node lock while
4113 				 * decrementing this node (avoids race where
4114 				 * this thread frees while the other thread
4115 				 * is unlocking the node after the final
4116 				 * decrement)
4117 				 */
4118 				binder_node_unlock(node);
4119 				binder_free_node(node);
4120 			} else
4121 				binder_inner_proc_unlock(proc);
4122 
4123 			if (weak && !has_weak_ref)
4124 				ret = binder_put_node_cmd(
4125 						proc, thread, &ptr, node_ptr,
4126 						node_cookie, node_debug_id,
4127 						BR_INCREFS, "BR_INCREFS");
4128 			if (!ret && strong && !has_strong_ref)
4129 				ret = binder_put_node_cmd(
4130 						proc, thread, &ptr, node_ptr,
4131 						node_cookie, node_debug_id,
4132 						BR_ACQUIRE, "BR_ACQUIRE");
4133 			if (!ret && !strong && has_strong_ref)
4134 				ret = binder_put_node_cmd(
4135 						proc, thread, &ptr, node_ptr,
4136 						node_cookie, node_debug_id,
4137 						BR_RELEASE, "BR_RELEASE");
4138 			if (!ret && !weak && has_weak_ref)
4139 				ret = binder_put_node_cmd(
4140 						proc, thread, &ptr, node_ptr,
4141 						node_cookie, node_debug_id,
4142 						BR_DECREFS, "BR_DECREFS");
4143 			if (orig_ptr == ptr)
4144 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4145 					     "%d:%d node %d u%016llx c%016llx state unchanged\n",
4146 					     proc->pid, thread->pid,
4147 					     node_debug_id,
4148 					     (u64)node_ptr,
4149 					     (u64)node_cookie);
4150 			if (ret)
4151 				return ret;
4152 		} break;
4153 		case BINDER_WORK_DEAD_BINDER:
4154 		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4155 		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4156 			struct binder_ref_death *death;
4157 			uint32_t cmd;
4158 			binder_uintptr_t cookie;
4159 
4160 			death = container_of(w, struct binder_ref_death, work);
4161 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4162 				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4163 			else
4164 				cmd = BR_DEAD_BINDER;
4165 			cookie = death->cookie;
4166 
4167 			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4168 				     "%d:%d %s %016llx\n",
4169 				      proc->pid, thread->pid,
4170 				      cmd == BR_DEAD_BINDER ?
4171 				      "BR_DEAD_BINDER" :
4172 				      "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4173 				      (u64)cookie);
4174 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4175 				binder_inner_proc_unlock(proc);
4176 				kfree(death);
4177 				binder_stats_deleted(BINDER_STAT_DEATH);
4178 			} else {
4179 				binder_enqueue_work_ilocked(
4180 						w, &proc->delivered_death);
4181 				binder_inner_proc_unlock(proc);
4182 			}
4183 			if (put_user(cmd, (uint32_t __user *)ptr))
4184 				return -EFAULT;
4185 			ptr += sizeof(uint32_t);
4186 			if (put_user(cookie,
4187 				     (binder_uintptr_t __user *)ptr))
4188 				return -EFAULT;
4189 			ptr += sizeof(binder_uintptr_t);
4190 			binder_stat_br(proc, thread, cmd);
4191 			if (cmd == BR_DEAD_BINDER)
4192 				goto done; /* DEAD_BINDER notifications can cause transactions */
4193 		} break;
4194 		default:
4195 			binder_inner_proc_unlock(proc);
4196 			pr_err("%d:%d: bad work type %d\n",
4197 			       proc->pid, thread->pid, w->type);
4198 			break;
4199 		}
4200 
4201 		if (!t)
4202 			continue;
4203 
4204 		BUG_ON(t->buffer == NULL);
4205 		if (t->buffer->target_node) {
4206 			struct binder_node *target_node = t->buffer->target_node;
4207 
4208 			tr.target.ptr = target_node->ptr;
4209 			tr.cookie =  target_node->cookie;
4210 			t->saved_priority = task_nice(current);
4211 			if (t->priority < target_node->min_priority &&
4212 			    !(t->flags & TF_ONE_WAY))
4213 				binder_set_nice(t->priority);
4214 			else if (!(t->flags & TF_ONE_WAY) ||
4215 				 t->saved_priority > target_node->min_priority)
4216 				binder_set_nice(target_node->min_priority);
4217 			cmd = BR_TRANSACTION;
4218 		} else {
4219 			tr.target.ptr = 0;
4220 			tr.cookie = 0;
4221 			cmd = BR_REPLY;
4222 		}
4223 		tr.code = t->code;
4224 		tr.flags = t->flags;
4225 		tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4226 
4227 		t_from = binder_get_txn_from(t);
4228 		if (t_from) {
4229 			struct task_struct *sender = t_from->proc->tsk;
4230 
4231 			tr.sender_pid = task_tgid_nr_ns(sender,
4232 							task_active_pid_ns(current));
4233 		} else {
4234 			tr.sender_pid = 0;
4235 		}
4236 
4237 		ret = binder_apply_fd_fixups(t);
4238 		if (ret) {
4239 			struct binder_buffer *buffer = t->buffer;
4240 			bool oneway = !!(t->flags & TF_ONE_WAY);
4241 			int tid = t->debug_id;
4242 
4243 			if (t_from)
4244 				binder_thread_dec_tmpref(t_from);
4245 			buffer->transaction = NULL;
4246 			binder_cleanup_transaction(t, "fd fixups failed",
4247 						   BR_FAILED_REPLY);
4248 			binder_free_buf(proc, buffer);
4249 			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4250 				     "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4251 				     proc->pid, thread->pid,
4252 				     oneway ? "async " :
4253 					(cmd == BR_REPLY ? "reply " : ""),
4254 				     tid, BR_FAILED_REPLY, ret, __LINE__);
4255 			if (cmd == BR_REPLY) {
4256 				cmd = BR_FAILED_REPLY;
4257 				if (put_user(cmd, (uint32_t __user *)ptr))
4258 					return -EFAULT;
4259 				ptr += sizeof(uint32_t);
4260 				binder_stat_br(proc, thread, cmd);
4261 				break;
4262 			}
4263 			continue;
4264 		}
4265 		tr.data_size = t->buffer->data_size;
4266 		tr.offsets_size = t->buffer->offsets_size;
4267 		tr.data.ptr.buffer = (binder_uintptr_t)
4268 			((uintptr_t)t->buffer->data +
4269 			binder_alloc_get_user_buffer_offset(&proc->alloc));
4270 		tr.data.ptr.offsets = tr.data.ptr.buffer +
4271 					ALIGN(t->buffer->data_size,
4272 					    sizeof(void *));
4273 
4274 		if (put_user(cmd, (uint32_t __user *)ptr)) {
4275 			if (t_from)
4276 				binder_thread_dec_tmpref(t_from);
4277 
4278 			binder_cleanup_transaction(t, "put_user failed",
4279 						   BR_FAILED_REPLY);
4280 
4281 			return -EFAULT;
4282 		}
4283 		ptr += sizeof(uint32_t);
4284 		if (copy_to_user(ptr, &tr, sizeof(tr))) {
4285 			if (t_from)
4286 				binder_thread_dec_tmpref(t_from);
4287 
4288 			binder_cleanup_transaction(t, "copy_to_user failed",
4289 						   BR_FAILED_REPLY);
4290 
4291 			return -EFAULT;
4292 		}
4293 		ptr += sizeof(tr);
4294 
4295 		trace_binder_transaction_received(t);
4296 		binder_stat_br(proc, thread, cmd);
4297 		binder_debug(BINDER_DEBUG_TRANSACTION,
4298 			     "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4299 			     proc->pid, thread->pid,
4300 			     (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4301 			     "BR_REPLY",
4302 			     t->debug_id, t_from ? t_from->proc->pid : 0,
4303 			     t_from ? t_from->pid : 0, cmd,
4304 			     t->buffer->data_size, t->buffer->offsets_size,
4305 			     (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
4306 
4307 		if (t_from)
4308 			binder_thread_dec_tmpref(t_from);
4309 		t->buffer->allow_user_free = 1;
4310 		if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
4311 			binder_inner_proc_lock(thread->proc);
4312 			t->to_parent = thread->transaction_stack;
4313 			t->to_thread = thread;
4314 			thread->transaction_stack = t;
4315 			binder_inner_proc_unlock(thread->proc);
4316 		} else {
4317 			binder_free_transaction(t);
4318 		}
4319 		break;
4320 	}
4321 
4322 done:
4323 
4324 	*consumed = ptr - buffer;
4325 	binder_inner_proc_lock(proc);
4326 	if (proc->requested_threads == 0 &&
4327 	    list_empty(&thread->proc->waiting_threads) &&
4328 	    proc->requested_threads_started < proc->max_threads &&
4329 	    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4330 	     BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4331 	     /*spawn a new thread if we leave this out */) {
4332 		proc->requested_threads++;
4333 		binder_inner_proc_unlock(proc);
4334 		binder_debug(BINDER_DEBUG_THREADS,
4335 			     "%d:%d BR_SPAWN_LOOPER\n",
4336 			     proc->pid, thread->pid);
4337 		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4338 			return -EFAULT;
4339 		binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4340 	} else
4341 		binder_inner_proc_unlock(proc);
4342 	return 0;
4343 }
4344 
4345 static void binder_release_work(struct binder_proc *proc,
4346 				struct list_head *list)
4347 {
4348 	struct binder_work *w;
4349 
4350 	while (1) {
4351 		w = binder_dequeue_work_head(proc, list);
4352 		if (!w)
4353 			return;
4354 
4355 		switch (w->type) {
4356 		case BINDER_WORK_TRANSACTION: {
4357 			struct binder_transaction *t;
4358 
4359 			t = container_of(w, struct binder_transaction, work);
4360 
4361 			binder_cleanup_transaction(t, "process died.",
4362 						   BR_DEAD_REPLY);
4363 		} break;
4364 		case BINDER_WORK_RETURN_ERROR: {
4365 			struct binder_error *e = container_of(
4366 					w, struct binder_error, work);
4367 
4368 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4369 				"undelivered TRANSACTION_ERROR: %u\n",
4370 				e->cmd);
4371 		} break;
4372 		case BINDER_WORK_TRANSACTION_COMPLETE: {
4373 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4374 				"undelivered TRANSACTION_COMPLETE\n");
4375 			kfree(w);
4376 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4377 		} break;
4378 		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4379 		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4380 			struct binder_ref_death *death;
4381 
4382 			death = container_of(w, struct binder_ref_death, work);
4383 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4384 				"undelivered death notification, %016llx\n",
4385 				(u64)death->cookie);
4386 			kfree(death);
4387 			binder_stats_deleted(BINDER_STAT_DEATH);
4388 		} break;
4389 		default:
4390 			pr_err("unexpected work type, %d, not freed\n",
4391 			       w->type);
4392 			break;
4393 		}
4394 	}
4395 
4396 }
4397 
4398 static struct binder_thread *binder_get_thread_ilocked(
4399 		struct binder_proc *proc, struct binder_thread *new_thread)
4400 {
4401 	struct binder_thread *thread = NULL;
4402 	struct rb_node *parent = NULL;
4403 	struct rb_node **p = &proc->threads.rb_node;
4404 
4405 	while (*p) {
4406 		parent = *p;
4407 		thread = rb_entry(parent, struct binder_thread, rb_node);
4408 
4409 		if (current->pid < thread->pid)
4410 			p = &(*p)->rb_left;
4411 		else if (current->pid > thread->pid)
4412 			p = &(*p)->rb_right;
4413 		else
4414 			return thread;
4415 	}
4416 	if (!new_thread)
4417 		return NULL;
4418 	thread = new_thread;
4419 	binder_stats_created(BINDER_STAT_THREAD);
4420 	thread->proc = proc;
4421 	thread->pid = current->pid;
4422 	atomic_set(&thread->tmp_ref, 0);
4423 	init_waitqueue_head(&thread->wait);
4424 	INIT_LIST_HEAD(&thread->todo);
4425 	rb_link_node(&thread->rb_node, parent, p);
4426 	rb_insert_color(&thread->rb_node, &proc->threads);
4427 	thread->looper_need_return = true;
4428 	thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4429 	thread->return_error.cmd = BR_OK;
4430 	thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4431 	thread->reply_error.cmd = BR_OK;
4432 	INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4433 	return thread;
4434 }
4435 
4436 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4437 {
4438 	struct binder_thread *thread;
4439 	struct binder_thread *new_thread;
4440 
4441 	binder_inner_proc_lock(proc);
4442 	thread = binder_get_thread_ilocked(proc, NULL);
4443 	binder_inner_proc_unlock(proc);
4444 	if (!thread) {
4445 		new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4446 		if (new_thread == NULL)
4447 			return NULL;
4448 		binder_inner_proc_lock(proc);
4449 		thread = binder_get_thread_ilocked(proc, new_thread);
4450 		binder_inner_proc_unlock(proc);
4451 		if (thread != new_thread)
4452 			kfree(new_thread);
4453 	}
4454 	return thread;
4455 }
4456 
4457 static void binder_free_proc(struct binder_proc *proc)
4458 {
4459 	BUG_ON(!list_empty(&proc->todo));
4460 	BUG_ON(!list_empty(&proc->delivered_death));
4461 	binder_alloc_deferred_release(&proc->alloc);
4462 	put_task_struct(proc->tsk);
4463 	binder_stats_deleted(BINDER_STAT_PROC);
4464 	kfree(proc);
4465 }
4466 
4467 static void binder_free_thread(struct binder_thread *thread)
4468 {
4469 	BUG_ON(!list_empty(&thread->todo));
4470 	binder_stats_deleted(BINDER_STAT_THREAD);
4471 	binder_proc_dec_tmpref(thread->proc);
4472 	kfree(thread);
4473 }
4474 
4475 static int binder_thread_release(struct binder_proc *proc,
4476 				 struct binder_thread *thread)
4477 {
4478 	struct binder_transaction *t;
4479 	struct binder_transaction *send_reply = NULL;
4480 	int active_transactions = 0;
4481 	struct binder_transaction *last_t = NULL;
4482 
4483 	binder_inner_proc_lock(thread->proc);
4484 	/*
4485 	 * take a ref on the proc so it survives
4486 	 * after we remove this thread from proc->threads.
4487 	 * The corresponding dec is when we actually
4488 	 * free the thread in binder_free_thread()
4489 	 */
4490 	proc->tmp_ref++;
4491 	/*
4492 	 * take a ref on this thread to ensure it
4493 	 * survives while we are releasing it
4494 	 */
4495 	atomic_inc(&thread->tmp_ref);
4496 	rb_erase(&thread->rb_node, &proc->threads);
4497 	t = thread->transaction_stack;
4498 	if (t) {
4499 		spin_lock(&t->lock);
4500 		if (t->to_thread == thread)
4501 			send_reply = t;
4502 	} else {
4503 		__acquire(&t->lock);
4504 	}
4505 	thread->is_dead = true;
4506 
4507 	while (t) {
4508 		last_t = t;
4509 		active_transactions++;
4510 		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4511 			     "release %d:%d transaction %d %s, still active\n",
4512 			      proc->pid, thread->pid,
4513 			     t->debug_id,
4514 			     (t->to_thread == thread) ? "in" : "out");
4515 
4516 		if (t->to_thread == thread) {
4517 			t->to_proc = NULL;
4518 			t->to_thread = NULL;
4519 			if (t->buffer) {
4520 				t->buffer->transaction = NULL;
4521 				t->buffer = NULL;
4522 			}
4523 			t = t->to_parent;
4524 		} else if (t->from == thread) {
4525 			t->from = NULL;
4526 			t = t->from_parent;
4527 		} else
4528 			BUG();
4529 		spin_unlock(&last_t->lock);
4530 		if (t)
4531 			spin_lock(&t->lock);
4532 		else
4533 			__acquire(&t->lock);
4534 	}
4535 	/* annotation for sparse, lock not acquired in last iteration above */
4536 	__release(&t->lock);
4537 
4538 	/*
4539 	 * If this thread used poll, make sure we remove the waitqueue
4540 	 * from any epoll data structures holding it with POLLFREE.
4541 	 * waitqueue_active() is safe to use here because we're holding
4542 	 * the inner lock.
4543 	 */
4544 	if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
4545 	    waitqueue_active(&thread->wait)) {
4546 		wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
4547 	}
4548 
4549 	binder_inner_proc_unlock(thread->proc);
4550 
4551 	/*
4552 	 * This is needed to avoid races between wake_up_poll() above and
4553 	 * and ep_remove_waitqueue() called for other reasons (eg the epoll file
4554 	 * descriptor being closed); ep_remove_waitqueue() holds an RCU read
4555 	 * lock, so we can be sure it's done after calling synchronize_rcu().
4556 	 */
4557 	if (thread->looper & BINDER_LOOPER_STATE_POLL)
4558 		synchronize_rcu();
4559 
4560 	if (send_reply)
4561 		binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4562 	binder_release_work(proc, &thread->todo);
4563 	binder_thread_dec_tmpref(thread);
4564 	return active_transactions;
4565 }
4566 
4567 static __poll_t binder_poll(struct file *filp,
4568 				struct poll_table_struct *wait)
4569 {
4570 	struct binder_proc *proc = filp->private_data;
4571 	struct binder_thread *thread = NULL;
4572 	bool wait_for_proc_work;
4573 
4574 	thread = binder_get_thread(proc);
4575 	if (!thread)
4576 		return POLLERR;
4577 
4578 	binder_inner_proc_lock(thread->proc);
4579 	thread->looper |= BINDER_LOOPER_STATE_POLL;
4580 	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4581 
4582 	binder_inner_proc_unlock(thread->proc);
4583 
4584 	poll_wait(filp, &thread->wait, wait);
4585 
4586 	if (binder_has_work(thread, wait_for_proc_work))
4587 		return EPOLLIN;
4588 
4589 	return 0;
4590 }
4591 
4592 static int binder_ioctl_write_read(struct file *filp,
4593 				unsigned int cmd, unsigned long arg,
4594 				struct binder_thread *thread)
4595 {
4596 	int ret = 0;
4597 	struct binder_proc *proc = filp->private_data;
4598 	unsigned int size = _IOC_SIZE(cmd);
4599 	void __user *ubuf = (void __user *)arg;
4600 	struct binder_write_read bwr;
4601 
4602 	if (size != sizeof(struct binder_write_read)) {
4603 		ret = -EINVAL;
4604 		goto out;
4605 	}
4606 	if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4607 		ret = -EFAULT;
4608 		goto out;
4609 	}
4610 	binder_debug(BINDER_DEBUG_READ_WRITE,
4611 		     "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4612 		     proc->pid, thread->pid,
4613 		     (u64)bwr.write_size, (u64)bwr.write_buffer,
4614 		     (u64)bwr.read_size, (u64)bwr.read_buffer);
4615 
4616 	if (bwr.write_size > 0) {
4617 		ret = binder_thread_write(proc, thread,
4618 					  bwr.write_buffer,
4619 					  bwr.write_size,
4620 					  &bwr.write_consumed);
4621 		trace_binder_write_done(ret);
4622 		if (ret < 0) {
4623 			bwr.read_consumed = 0;
4624 			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4625 				ret = -EFAULT;
4626 			goto out;
4627 		}
4628 	}
4629 	if (bwr.read_size > 0) {
4630 		ret = binder_thread_read(proc, thread, bwr.read_buffer,
4631 					 bwr.read_size,
4632 					 &bwr.read_consumed,
4633 					 filp->f_flags & O_NONBLOCK);
4634 		trace_binder_read_done(ret);
4635 		binder_inner_proc_lock(proc);
4636 		if (!binder_worklist_empty_ilocked(&proc->todo))
4637 			binder_wakeup_proc_ilocked(proc);
4638 		binder_inner_proc_unlock(proc);
4639 		if (ret < 0) {
4640 			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4641 				ret = -EFAULT;
4642 			goto out;
4643 		}
4644 	}
4645 	binder_debug(BINDER_DEBUG_READ_WRITE,
4646 		     "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4647 		     proc->pid, thread->pid,
4648 		     (u64)bwr.write_consumed, (u64)bwr.write_size,
4649 		     (u64)bwr.read_consumed, (u64)bwr.read_size);
4650 	if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4651 		ret = -EFAULT;
4652 		goto out;
4653 	}
4654 out:
4655 	return ret;
4656 }
4657 
4658 static int binder_ioctl_set_ctx_mgr(struct file *filp)
4659 {
4660 	int ret = 0;
4661 	struct binder_proc *proc = filp->private_data;
4662 	struct binder_context *context = proc->context;
4663 	struct binder_node *new_node;
4664 	kuid_t curr_euid = current_euid();
4665 
4666 	mutex_lock(&context->context_mgr_node_lock);
4667 	if (context->binder_context_mgr_node) {
4668 		pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4669 		ret = -EBUSY;
4670 		goto out;
4671 	}
4672 	ret = security_binder_set_context_mgr(proc->tsk);
4673 	if (ret < 0)
4674 		goto out;
4675 	if (uid_valid(context->binder_context_mgr_uid)) {
4676 		if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4677 			pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4678 			       from_kuid(&init_user_ns, curr_euid),
4679 			       from_kuid(&init_user_ns,
4680 					 context->binder_context_mgr_uid));
4681 			ret = -EPERM;
4682 			goto out;
4683 		}
4684 	} else {
4685 		context->binder_context_mgr_uid = curr_euid;
4686 	}
4687 	new_node = binder_new_node(proc, NULL);
4688 	if (!new_node) {
4689 		ret = -ENOMEM;
4690 		goto out;
4691 	}
4692 	binder_node_lock(new_node);
4693 	new_node->local_weak_refs++;
4694 	new_node->local_strong_refs++;
4695 	new_node->has_strong_ref = 1;
4696 	new_node->has_weak_ref = 1;
4697 	context->binder_context_mgr_node = new_node;
4698 	binder_node_unlock(new_node);
4699 	binder_put_node(new_node);
4700 out:
4701 	mutex_unlock(&context->context_mgr_node_lock);
4702 	return ret;
4703 }
4704 
4705 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
4706 		struct binder_node_info_for_ref *info)
4707 {
4708 	struct binder_node *node;
4709 	struct binder_context *context = proc->context;
4710 	__u32 handle = info->handle;
4711 
4712 	if (info->strong_count || info->weak_count || info->reserved1 ||
4713 	    info->reserved2 || info->reserved3) {
4714 		binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
4715 				  proc->pid);
4716 		return -EINVAL;
4717 	}
4718 
4719 	/* This ioctl may only be used by the context manager */
4720 	mutex_lock(&context->context_mgr_node_lock);
4721 	if (!context->binder_context_mgr_node ||
4722 		context->binder_context_mgr_node->proc != proc) {
4723 		mutex_unlock(&context->context_mgr_node_lock);
4724 		return -EPERM;
4725 	}
4726 	mutex_unlock(&context->context_mgr_node_lock);
4727 
4728 	node = binder_get_node_from_ref(proc, handle, true, NULL);
4729 	if (!node)
4730 		return -EINVAL;
4731 
4732 	info->strong_count = node->local_strong_refs +
4733 		node->internal_strong_refs;
4734 	info->weak_count = node->local_weak_refs;
4735 
4736 	binder_put_node(node);
4737 
4738 	return 0;
4739 }
4740 
4741 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4742 				struct binder_node_debug_info *info)
4743 {
4744 	struct rb_node *n;
4745 	binder_uintptr_t ptr = info->ptr;
4746 
4747 	memset(info, 0, sizeof(*info));
4748 
4749 	binder_inner_proc_lock(proc);
4750 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4751 		struct binder_node *node = rb_entry(n, struct binder_node,
4752 						    rb_node);
4753 		if (node->ptr > ptr) {
4754 			info->ptr = node->ptr;
4755 			info->cookie = node->cookie;
4756 			info->has_strong_ref = node->has_strong_ref;
4757 			info->has_weak_ref = node->has_weak_ref;
4758 			break;
4759 		}
4760 	}
4761 	binder_inner_proc_unlock(proc);
4762 
4763 	return 0;
4764 }
4765 
4766 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4767 {
4768 	int ret;
4769 	struct binder_proc *proc = filp->private_data;
4770 	struct binder_thread *thread;
4771 	unsigned int size = _IOC_SIZE(cmd);
4772 	void __user *ubuf = (void __user *)arg;
4773 
4774 	/*pr_info("binder_ioctl: %d:%d %x %lx\n",
4775 			proc->pid, current->pid, cmd, arg);*/
4776 
4777 	binder_selftest_alloc(&proc->alloc);
4778 
4779 	trace_binder_ioctl(cmd, arg);
4780 
4781 	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4782 	if (ret)
4783 		goto err_unlocked;
4784 
4785 	thread = binder_get_thread(proc);
4786 	if (thread == NULL) {
4787 		ret = -ENOMEM;
4788 		goto err;
4789 	}
4790 
4791 	switch (cmd) {
4792 	case BINDER_WRITE_READ:
4793 		ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4794 		if (ret)
4795 			goto err;
4796 		break;
4797 	case BINDER_SET_MAX_THREADS: {
4798 		int max_threads;
4799 
4800 		if (copy_from_user(&max_threads, ubuf,
4801 				   sizeof(max_threads))) {
4802 			ret = -EINVAL;
4803 			goto err;
4804 		}
4805 		binder_inner_proc_lock(proc);
4806 		proc->max_threads = max_threads;
4807 		binder_inner_proc_unlock(proc);
4808 		break;
4809 	}
4810 	case BINDER_SET_CONTEXT_MGR:
4811 		ret = binder_ioctl_set_ctx_mgr(filp);
4812 		if (ret)
4813 			goto err;
4814 		break;
4815 	case BINDER_THREAD_EXIT:
4816 		binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
4817 			     proc->pid, thread->pid);
4818 		binder_thread_release(proc, thread);
4819 		thread = NULL;
4820 		break;
4821 	case BINDER_VERSION: {
4822 		struct binder_version __user *ver = ubuf;
4823 
4824 		if (size != sizeof(struct binder_version)) {
4825 			ret = -EINVAL;
4826 			goto err;
4827 		}
4828 		if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4829 			     &ver->protocol_version)) {
4830 			ret = -EINVAL;
4831 			goto err;
4832 		}
4833 		break;
4834 	}
4835 	case BINDER_GET_NODE_INFO_FOR_REF: {
4836 		struct binder_node_info_for_ref info;
4837 
4838 		if (copy_from_user(&info, ubuf, sizeof(info))) {
4839 			ret = -EFAULT;
4840 			goto err;
4841 		}
4842 
4843 		ret = binder_ioctl_get_node_info_for_ref(proc, &info);
4844 		if (ret < 0)
4845 			goto err;
4846 
4847 		if (copy_to_user(ubuf, &info, sizeof(info))) {
4848 			ret = -EFAULT;
4849 			goto err;
4850 		}
4851 
4852 		break;
4853 	}
4854 	case BINDER_GET_NODE_DEBUG_INFO: {
4855 		struct binder_node_debug_info info;
4856 
4857 		if (copy_from_user(&info, ubuf, sizeof(info))) {
4858 			ret = -EFAULT;
4859 			goto err;
4860 		}
4861 
4862 		ret = binder_ioctl_get_node_debug_info(proc, &info);
4863 		if (ret < 0)
4864 			goto err;
4865 
4866 		if (copy_to_user(ubuf, &info, sizeof(info))) {
4867 			ret = -EFAULT;
4868 			goto err;
4869 		}
4870 		break;
4871 	}
4872 	default:
4873 		ret = -EINVAL;
4874 		goto err;
4875 	}
4876 	ret = 0;
4877 err:
4878 	if (thread)
4879 		thread->looper_need_return = false;
4880 	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4881 	if (ret && ret != -ERESTARTSYS)
4882 		pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
4883 err_unlocked:
4884 	trace_binder_ioctl_done(ret);
4885 	return ret;
4886 }
4887 
4888 static void binder_vma_open(struct vm_area_struct *vma)
4889 {
4890 	struct binder_proc *proc = vma->vm_private_data;
4891 
4892 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4893 		     "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4894 		     proc->pid, vma->vm_start, vma->vm_end,
4895 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4896 		     (unsigned long)pgprot_val(vma->vm_page_prot));
4897 }
4898 
4899 static void binder_vma_close(struct vm_area_struct *vma)
4900 {
4901 	struct binder_proc *proc = vma->vm_private_data;
4902 
4903 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4904 		     "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4905 		     proc->pid, vma->vm_start, vma->vm_end,
4906 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4907 		     (unsigned long)pgprot_val(vma->vm_page_prot));
4908 	binder_alloc_vma_close(&proc->alloc);
4909 }
4910 
4911 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
4912 {
4913 	return VM_FAULT_SIGBUS;
4914 }
4915 
4916 static const struct vm_operations_struct binder_vm_ops = {
4917 	.open = binder_vma_open,
4918 	.close = binder_vma_close,
4919 	.fault = binder_vm_fault,
4920 };
4921 
4922 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
4923 {
4924 	int ret;
4925 	struct binder_proc *proc = filp->private_data;
4926 	const char *failure_string;
4927 
4928 	if (proc->tsk != current->group_leader)
4929 		return -EINVAL;
4930 
4931 	if ((vma->vm_end - vma->vm_start) > SZ_4M)
4932 		vma->vm_end = vma->vm_start + SZ_4M;
4933 
4934 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4935 		     "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4936 		     __func__, proc->pid, vma->vm_start, vma->vm_end,
4937 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4938 		     (unsigned long)pgprot_val(vma->vm_page_prot));
4939 
4940 	if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
4941 		ret = -EPERM;
4942 		failure_string = "bad vm_flags";
4943 		goto err_bad_arg;
4944 	}
4945 	vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
4946 	vma->vm_flags &= ~VM_MAYWRITE;
4947 
4948 	vma->vm_ops = &binder_vm_ops;
4949 	vma->vm_private_data = proc;
4950 
4951 	ret = binder_alloc_mmap_handler(&proc->alloc, vma);
4952 	if (ret)
4953 		return ret;
4954 	return 0;
4955 
4956 err_bad_arg:
4957 	pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
4958 	       proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
4959 	return ret;
4960 }
4961 
4962 static int binder_open(struct inode *nodp, struct file *filp)
4963 {
4964 	struct binder_proc *proc;
4965 	struct binder_device *binder_dev;
4966 
4967 	binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
4968 		     current->group_leader->pid, current->pid);
4969 
4970 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
4971 	if (proc == NULL)
4972 		return -ENOMEM;
4973 	spin_lock_init(&proc->inner_lock);
4974 	spin_lock_init(&proc->outer_lock);
4975 	get_task_struct(current->group_leader);
4976 	proc->tsk = current->group_leader;
4977 	INIT_LIST_HEAD(&proc->todo);
4978 	proc->default_priority = task_nice(current);
4979 	binder_dev = container_of(filp->private_data, struct binder_device,
4980 				  miscdev);
4981 	proc->context = &binder_dev->context;
4982 	binder_alloc_init(&proc->alloc);
4983 
4984 	binder_stats_created(BINDER_STAT_PROC);
4985 	proc->pid = current->group_leader->pid;
4986 	INIT_LIST_HEAD(&proc->delivered_death);
4987 	INIT_LIST_HEAD(&proc->waiting_threads);
4988 	filp->private_data = proc;
4989 
4990 	mutex_lock(&binder_procs_lock);
4991 	hlist_add_head(&proc->proc_node, &binder_procs);
4992 	mutex_unlock(&binder_procs_lock);
4993 
4994 	if (binder_debugfs_dir_entry_proc) {
4995 		char strbuf[11];
4996 
4997 		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
4998 		/*
4999 		 * proc debug entries are shared between contexts, so
5000 		 * this will fail if the process tries to open the driver
5001 		 * again with a different context. The priting code will
5002 		 * anyway print all contexts that a given PID has, so this
5003 		 * is not a problem.
5004 		 */
5005 		proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5006 			binder_debugfs_dir_entry_proc,
5007 			(void *)(unsigned long)proc->pid,
5008 			&binder_proc_fops);
5009 	}
5010 
5011 	return 0;
5012 }
5013 
5014 static int binder_flush(struct file *filp, fl_owner_t id)
5015 {
5016 	struct binder_proc *proc = filp->private_data;
5017 
5018 	binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5019 
5020 	return 0;
5021 }
5022 
5023 static void binder_deferred_flush(struct binder_proc *proc)
5024 {
5025 	struct rb_node *n;
5026 	int wake_count = 0;
5027 
5028 	binder_inner_proc_lock(proc);
5029 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5030 		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5031 
5032 		thread->looper_need_return = true;
5033 		if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5034 			wake_up_interruptible(&thread->wait);
5035 			wake_count++;
5036 		}
5037 	}
5038 	binder_inner_proc_unlock(proc);
5039 
5040 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5041 		     "binder_flush: %d woke %d threads\n", proc->pid,
5042 		     wake_count);
5043 }
5044 
5045 static int binder_release(struct inode *nodp, struct file *filp)
5046 {
5047 	struct binder_proc *proc = filp->private_data;
5048 
5049 	debugfs_remove(proc->debugfs_entry);
5050 	binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5051 
5052 	return 0;
5053 }
5054 
5055 static int binder_node_release(struct binder_node *node, int refs)
5056 {
5057 	struct binder_ref *ref;
5058 	int death = 0;
5059 	struct binder_proc *proc = node->proc;
5060 
5061 	binder_release_work(proc, &node->async_todo);
5062 
5063 	binder_node_lock(node);
5064 	binder_inner_proc_lock(proc);
5065 	binder_dequeue_work_ilocked(&node->work);
5066 	/*
5067 	 * The caller must have taken a temporary ref on the node,
5068 	 */
5069 	BUG_ON(!node->tmp_refs);
5070 	if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5071 		binder_inner_proc_unlock(proc);
5072 		binder_node_unlock(node);
5073 		binder_free_node(node);
5074 
5075 		return refs;
5076 	}
5077 
5078 	node->proc = NULL;
5079 	node->local_strong_refs = 0;
5080 	node->local_weak_refs = 0;
5081 	binder_inner_proc_unlock(proc);
5082 
5083 	spin_lock(&binder_dead_nodes_lock);
5084 	hlist_add_head(&node->dead_node, &binder_dead_nodes);
5085 	spin_unlock(&binder_dead_nodes_lock);
5086 
5087 	hlist_for_each_entry(ref, &node->refs, node_entry) {
5088 		refs++;
5089 		/*
5090 		 * Need the node lock to synchronize
5091 		 * with new notification requests and the
5092 		 * inner lock to synchronize with queued
5093 		 * death notifications.
5094 		 */
5095 		binder_inner_proc_lock(ref->proc);
5096 		if (!ref->death) {
5097 			binder_inner_proc_unlock(ref->proc);
5098 			continue;
5099 		}
5100 
5101 		death++;
5102 
5103 		BUG_ON(!list_empty(&ref->death->work.entry));
5104 		ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5105 		binder_enqueue_work_ilocked(&ref->death->work,
5106 					    &ref->proc->todo);
5107 		binder_wakeup_proc_ilocked(ref->proc);
5108 		binder_inner_proc_unlock(ref->proc);
5109 	}
5110 
5111 	binder_debug(BINDER_DEBUG_DEAD_BINDER,
5112 		     "node %d now dead, refs %d, death %d\n",
5113 		     node->debug_id, refs, death);
5114 	binder_node_unlock(node);
5115 	binder_put_node(node);
5116 
5117 	return refs;
5118 }
5119 
5120 static void binder_deferred_release(struct binder_proc *proc)
5121 {
5122 	struct binder_context *context = proc->context;
5123 	struct rb_node *n;
5124 	int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5125 
5126 	mutex_lock(&binder_procs_lock);
5127 	hlist_del(&proc->proc_node);
5128 	mutex_unlock(&binder_procs_lock);
5129 
5130 	mutex_lock(&context->context_mgr_node_lock);
5131 	if (context->binder_context_mgr_node &&
5132 	    context->binder_context_mgr_node->proc == proc) {
5133 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
5134 			     "%s: %d context_mgr_node gone\n",
5135 			     __func__, proc->pid);
5136 		context->binder_context_mgr_node = NULL;
5137 	}
5138 	mutex_unlock(&context->context_mgr_node_lock);
5139 	binder_inner_proc_lock(proc);
5140 	/*
5141 	 * Make sure proc stays alive after we
5142 	 * remove all the threads
5143 	 */
5144 	proc->tmp_ref++;
5145 
5146 	proc->is_dead = true;
5147 	threads = 0;
5148 	active_transactions = 0;
5149 	while ((n = rb_first(&proc->threads))) {
5150 		struct binder_thread *thread;
5151 
5152 		thread = rb_entry(n, struct binder_thread, rb_node);
5153 		binder_inner_proc_unlock(proc);
5154 		threads++;
5155 		active_transactions += binder_thread_release(proc, thread);
5156 		binder_inner_proc_lock(proc);
5157 	}
5158 
5159 	nodes = 0;
5160 	incoming_refs = 0;
5161 	while ((n = rb_first(&proc->nodes))) {
5162 		struct binder_node *node;
5163 
5164 		node = rb_entry(n, struct binder_node, rb_node);
5165 		nodes++;
5166 		/*
5167 		 * take a temporary ref on the node before
5168 		 * calling binder_node_release() which will either
5169 		 * kfree() the node or call binder_put_node()
5170 		 */
5171 		binder_inc_node_tmpref_ilocked(node);
5172 		rb_erase(&node->rb_node, &proc->nodes);
5173 		binder_inner_proc_unlock(proc);
5174 		incoming_refs = binder_node_release(node, incoming_refs);
5175 		binder_inner_proc_lock(proc);
5176 	}
5177 	binder_inner_proc_unlock(proc);
5178 
5179 	outgoing_refs = 0;
5180 	binder_proc_lock(proc);
5181 	while ((n = rb_first(&proc->refs_by_desc))) {
5182 		struct binder_ref *ref;
5183 
5184 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
5185 		outgoing_refs++;
5186 		binder_cleanup_ref_olocked(ref);
5187 		binder_proc_unlock(proc);
5188 		binder_free_ref(ref);
5189 		binder_proc_lock(proc);
5190 	}
5191 	binder_proc_unlock(proc);
5192 
5193 	binder_release_work(proc, &proc->todo);
5194 	binder_release_work(proc, &proc->delivered_death);
5195 
5196 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5197 		     "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5198 		     __func__, proc->pid, threads, nodes, incoming_refs,
5199 		     outgoing_refs, active_transactions);
5200 
5201 	binder_proc_dec_tmpref(proc);
5202 }
5203 
5204 static void binder_deferred_func(struct work_struct *work)
5205 {
5206 	struct binder_proc *proc;
5207 
5208 	int defer;
5209 
5210 	do {
5211 		mutex_lock(&binder_deferred_lock);
5212 		if (!hlist_empty(&binder_deferred_list)) {
5213 			proc = hlist_entry(binder_deferred_list.first,
5214 					struct binder_proc, deferred_work_node);
5215 			hlist_del_init(&proc->deferred_work_node);
5216 			defer = proc->deferred_work;
5217 			proc->deferred_work = 0;
5218 		} else {
5219 			proc = NULL;
5220 			defer = 0;
5221 		}
5222 		mutex_unlock(&binder_deferred_lock);
5223 
5224 		if (defer & BINDER_DEFERRED_FLUSH)
5225 			binder_deferred_flush(proc);
5226 
5227 		if (defer & BINDER_DEFERRED_RELEASE)
5228 			binder_deferred_release(proc); /* frees proc */
5229 	} while (proc);
5230 }
5231 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5232 
5233 static void
5234 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5235 {
5236 	mutex_lock(&binder_deferred_lock);
5237 	proc->deferred_work |= defer;
5238 	if (hlist_unhashed(&proc->deferred_work_node)) {
5239 		hlist_add_head(&proc->deferred_work_node,
5240 				&binder_deferred_list);
5241 		schedule_work(&binder_deferred_work);
5242 	}
5243 	mutex_unlock(&binder_deferred_lock);
5244 }
5245 
5246 static void print_binder_transaction_ilocked(struct seq_file *m,
5247 					     struct binder_proc *proc,
5248 					     const char *prefix,
5249 					     struct binder_transaction *t)
5250 {
5251 	struct binder_proc *to_proc;
5252 	struct binder_buffer *buffer = t->buffer;
5253 
5254 	spin_lock(&t->lock);
5255 	to_proc = t->to_proc;
5256 	seq_printf(m,
5257 		   "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5258 		   prefix, t->debug_id, t,
5259 		   t->from ? t->from->proc->pid : 0,
5260 		   t->from ? t->from->pid : 0,
5261 		   to_proc ? to_proc->pid : 0,
5262 		   t->to_thread ? t->to_thread->pid : 0,
5263 		   t->code, t->flags, t->priority, t->need_reply);
5264 	spin_unlock(&t->lock);
5265 
5266 	if (proc != to_proc) {
5267 		/*
5268 		 * Can only safely deref buffer if we are holding the
5269 		 * correct proc inner lock for this node
5270 		 */
5271 		seq_puts(m, "\n");
5272 		return;
5273 	}
5274 
5275 	if (buffer == NULL) {
5276 		seq_puts(m, " buffer free\n");
5277 		return;
5278 	}
5279 	if (buffer->target_node)
5280 		seq_printf(m, " node %d", buffer->target_node->debug_id);
5281 	seq_printf(m, " size %zd:%zd data %pK\n",
5282 		   buffer->data_size, buffer->offsets_size,
5283 		   buffer->data);
5284 }
5285 
5286 static void print_binder_work_ilocked(struct seq_file *m,
5287 				     struct binder_proc *proc,
5288 				     const char *prefix,
5289 				     const char *transaction_prefix,
5290 				     struct binder_work *w)
5291 {
5292 	struct binder_node *node;
5293 	struct binder_transaction *t;
5294 
5295 	switch (w->type) {
5296 	case BINDER_WORK_TRANSACTION:
5297 		t = container_of(w, struct binder_transaction, work);
5298 		print_binder_transaction_ilocked(
5299 				m, proc, transaction_prefix, t);
5300 		break;
5301 	case BINDER_WORK_RETURN_ERROR: {
5302 		struct binder_error *e = container_of(
5303 				w, struct binder_error, work);
5304 
5305 		seq_printf(m, "%stransaction error: %u\n",
5306 			   prefix, e->cmd);
5307 	} break;
5308 	case BINDER_WORK_TRANSACTION_COMPLETE:
5309 		seq_printf(m, "%stransaction complete\n", prefix);
5310 		break;
5311 	case BINDER_WORK_NODE:
5312 		node = container_of(w, struct binder_node, work);
5313 		seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5314 			   prefix, node->debug_id,
5315 			   (u64)node->ptr, (u64)node->cookie);
5316 		break;
5317 	case BINDER_WORK_DEAD_BINDER:
5318 		seq_printf(m, "%shas dead binder\n", prefix);
5319 		break;
5320 	case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5321 		seq_printf(m, "%shas cleared dead binder\n", prefix);
5322 		break;
5323 	case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5324 		seq_printf(m, "%shas cleared death notification\n", prefix);
5325 		break;
5326 	default:
5327 		seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5328 		break;
5329 	}
5330 }
5331 
5332 static void print_binder_thread_ilocked(struct seq_file *m,
5333 					struct binder_thread *thread,
5334 					int print_always)
5335 {
5336 	struct binder_transaction *t;
5337 	struct binder_work *w;
5338 	size_t start_pos = m->count;
5339 	size_t header_pos;
5340 
5341 	seq_printf(m, "  thread %d: l %02x need_return %d tr %d\n",
5342 			thread->pid, thread->looper,
5343 			thread->looper_need_return,
5344 			atomic_read(&thread->tmp_ref));
5345 	header_pos = m->count;
5346 	t = thread->transaction_stack;
5347 	while (t) {
5348 		if (t->from == thread) {
5349 			print_binder_transaction_ilocked(m, thread->proc,
5350 					"    outgoing transaction", t);
5351 			t = t->from_parent;
5352 		} else if (t->to_thread == thread) {
5353 			print_binder_transaction_ilocked(m, thread->proc,
5354 						 "    incoming transaction", t);
5355 			t = t->to_parent;
5356 		} else {
5357 			print_binder_transaction_ilocked(m, thread->proc,
5358 					"    bad transaction", t);
5359 			t = NULL;
5360 		}
5361 	}
5362 	list_for_each_entry(w, &thread->todo, entry) {
5363 		print_binder_work_ilocked(m, thread->proc, "    ",
5364 					  "    pending transaction", w);
5365 	}
5366 	if (!print_always && m->count == header_pos)
5367 		m->count = start_pos;
5368 }
5369 
5370 static void print_binder_node_nilocked(struct seq_file *m,
5371 				       struct binder_node *node)
5372 {
5373 	struct binder_ref *ref;
5374 	struct binder_work *w;
5375 	int count;
5376 
5377 	count = 0;
5378 	hlist_for_each_entry(ref, &node->refs, node_entry)
5379 		count++;
5380 
5381 	seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5382 		   node->debug_id, (u64)node->ptr, (u64)node->cookie,
5383 		   node->has_strong_ref, node->has_weak_ref,
5384 		   node->local_strong_refs, node->local_weak_refs,
5385 		   node->internal_strong_refs, count, node->tmp_refs);
5386 	if (count) {
5387 		seq_puts(m, " proc");
5388 		hlist_for_each_entry(ref, &node->refs, node_entry)
5389 			seq_printf(m, " %d", ref->proc->pid);
5390 	}
5391 	seq_puts(m, "\n");
5392 	if (node->proc) {
5393 		list_for_each_entry(w, &node->async_todo, entry)
5394 			print_binder_work_ilocked(m, node->proc, "    ",
5395 					  "    pending async transaction", w);
5396 	}
5397 }
5398 
5399 static void print_binder_ref_olocked(struct seq_file *m,
5400 				     struct binder_ref *ref)
5401 {
5402 	binder_node_lock(ref->node);
5403 	seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %pK\n",
5404 		   ref->data.debug_id, ref->data.desc,
5405 		   ref->node->proc ? "" : "dead ",
5406 		   ref->node->debug_id, ref->data.strong,
5407 		   ref->data.weak, ref->death);
5408 	binder_node_unlock(ref->node);
5409 }
5410 
5411 static void print_binder_proc(struct seq_file *m,
5412 			      struct binder_proc *proc, int print_all)
5413 {
5414 	struct binder_work *w;
5415 	struct rb_node *n;
5416 	size_t start_pos = m->count;
5417 	size_t header_pos;
5418 	struct binder_node *last_node = NULL;
5419 
5420 	seq_printf(m, "proc %d\n", proc->pid);
5421 	seq_printf(m, "context %s\n", proc->context->name);
5422 	header_pos = m->count;
5423 
5424 	binder_inner_proc_lock(proc);
5425 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5426 		print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5427 						rb_node), print_all);
5428 
5429 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5430 		struct binder_node *node = rb_entry(n, struct binder_node,
5431 						    rb_node);
5432 		/*
5433 		 * take a temporary reference on the node so it
5434 		 * survives and isn't removed from the tree
5435 		 * while we print it.
5436 		 */
5437 		binder_inc_node_tmpref_ilocked(node);
5438 		/* Need to drop inner lock to take node lock */
5439 		binder_inner_proc_unlock(proc);
5440 		if (last_node)
5441 			binder_put_node(last_node);
5442 		binder_node_inner_lock(node);
5443 		print_binder_node_nilocked(m, node);
5444 		binder_node_inner_unlock(node);
5445 		last_node = node;
5446 		binder_inner_proc_lock(proc);
5447 	}
5448 	binder_inner_proc_unlock(proc);
5449 	if (last_node)
5450 		binder_put_node(last_node);
5451 
5452 	if (print_all) {
5453 		binder_proc_lock(proc);
5454 		for (n = rb_first(&proc->refs_by_desc);
5455 		     n != NULL;
5456 		     n = rb_next(n))
5457 			print_binder_ref_olocked(m, rb_entry(n,
5458 							    struct binder_ref,
5459 							    rb_node_desc));
5460 		binder_proc_unlock(proc);
5461 	}
5462 	binder_alloc_print_allocated(m, &proc->alloc);
5463 	binder_inner_proc_lock(proc);
5464 	list_for_each_entry(w, &proc->todo, entry)
5465 		print_binder_work_ilocked(m, proc, "  ",
5466 					  "  pending transaction", w);
5467 	list_for_each_entry(w, &proc->delivered_death, entry) {
5468 		seq_puts(m, "  has delivered dead binder\n");
5469 		break;
5470 	}
5471 	binder_inner_proc_unlock(proc);
5472 	if (!print_all && m->count == header_pos)
5473 		m->count = start_pos;
5474 }
5475 
5476 static const char * const binder_return_strings[] = {
5477 	"BR_ERROR",
5478 	"BR_OK",
5479 	"BR_TRANSACTION",
5480 	"BR_REPLY",
5481 	"BR_ACQUIRE_RESULT",
5482 	"BR_DEAD_REPLY",
5483 	"BR_TRANSACTION_COMPLETE",
5484 	"BR_INCREFS",
5485 	"BR_ACQUIRE",
5486 	"BR_RELEASE",
5487 	"BR_DECREFS",
5488 	"BR_ATTEMPT_ACQUIRE",
5489 	"BR_NOOP",
5490 	"BR_SPAWN_LOOPER",
5491 	"BR_FINISHED",
5492 	"BR_DEAD_BINDER",
5493 	"BR_CLEAR_DEATH_NOTIFICATION_DONE",
5494 	"BR_FAILED_REPLY"
5495 };
5496 
5497 static const char * const binder_command_strings[] = {
5498 	"BC_TRANSACTION",
5499 	"BC_REPLY",
5500 	"BC_ACQUIRE_RESULT",
5501 	"BC_FREE_BUFFER",
5502 	"BC_INCREFS",
5503 	"BC_ACQUIRE",
5504 	"BC_RELEASE",
5505 	"BC_DECREFS",
5506 	"BC_INCREFS_DONE",
5507 	"BC_ACQUIRE_DONE",
5508 	"BC_ATTEMPT_ACQUIRE",
5509 	"BC_REGISTER_LOOPER",
5510 	"BC_ENTER_LOOPER",
5511 	"BC_EXIT_LOOPER",
5512 	"BC_REQUEST_DEATH_NOTIFICATION",
5513 	"BC_CLEAR_DEATH_NOTIFICATION",
5514 	"BC_DEAD_BINDER_DONE",
5515 	"BC_TRANSACTION_SG",
5516 	"BC_REPLY_SG",
5517 };
5518 
5519 static const char * const binder_objstat_strings[] = {
5520 	"proc",
5521 	"thread",
5522 	"node",
5523 	"ref",
5524 	"death",
5525 	"transaction",
5526 	"transaction_complete"
5527 };
5528 
5529 static void print_binder_stats(struct seq_file *m, const char *prefix,
5530 			       struct binder_stats *stats)
5531 {
5532 	int i;
5533 
5534 	BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5535 		     ARRAY_SIZE(binder_command_strings));
5536 	for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5537 		int temp = atomic_read(&stats->bc[i]);
5538 
5539 		if (temp)
5540 			seq_printf(m, "%s%s: %d\n", prefix,
5541 				   binder_command_strings[i], temp);
5542 	}
5543 
5544 	BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5545 		     ARRAY_SIZE(binder_return_strings));
5546 	for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5547 		int temp = atomic_read(&stats->br[i]);
5548 
5549 		if (temp)
5550 			seq_printf(m, "%s%s: %d\n", prefix,
5551 				   binder_return_strings[i], temp);
5552 	}
5553 
5554 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5555 		     ARRAY_SIZE(binder_objstat_strings));
5556 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5557 		     ARRAY_SIZE(stats->obj_deleted));
5558 	for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
5559 		int created = atomic_read(&stats->obj_created[i]);
5560 		int deleted = atomic_read(&stats->obj_deleted[i]);
5561 
5562 		if (created || deleted)
5563 			seq_printf(m, "%s%s: active %d total %d\n",
5564 				prefix,
5565 				binder_objstat_strings[i],
5566 				created - deleted,
5567 				created);
5568 	}
5569 }
5570 
5571 static void print_binder_proc_stats(struct seq_file *m,
5572 				    struct binder_proc *proc)
5573 {
5574 	struct binder_work *w;
5575 	struct binder_thread *thread;
5576 	struct rb_node *n;
5577 	int count, strong, weak, ready_threads;
5578 	size_t free_async_space =
5579 		binder_alloc_get_free_async_space(&proc->alloc);
5580 
5581 	seq_printf(m, "proc %d\n", proc->pid);
5582 	seq_printf(m, "context %s\n", proc->context->name);
5583 	count = 0;
5584 	ready_threads = 0;
5585 	binder_inner_proc_lock(proc);
5586 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5587 		count++;
5588 
5589 	list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5590 		ready_threads++;
5591 
5592 	seq_printf(m, "  threads: %d\n", count);
5593 	seq_printf(m, "  requested threads: %d+%d/%d\n"
5594 			"  ready threads %d\n"
5595 			"  free async space %zd\n", proc->requested_threads,
5596 			proc->requested_threads_started, proc->max_threads,
5597 			ready_threads,
5598 			free_async_space);
5599 	count = 0;
5600 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5601 		count++;
5602 	binder_inner_proc_unlock(proc);
5603 	seq_printf(m, "  nodes: %d\n", count);
5604 	count = 0;
5605 	strong = 0;
5606 	weak = 0;
5607 	binder_proc_lock(proc);
5608 	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5609 		struct binder_ref *ref = rb_entry(n, struct binder_ref,
5610 						  rb_node_desc);
5611 		count++;
5612 		strong += ref->data.strong;
5613 		weak += ref->data.weak;
5614 	}
5615 	binder_proc_unlock(proc);
5616 	seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
5617 
5618 	count = binder_alloc_get_allocated_count(&proc->alloc);
5619 	seq_printf(m, "  buffers: %d\n", count);
5620 
5621 	binder_alloc_print_pages(m, &proc->alloc);
5622 
5623 	count = 0;
5624 	binder_inner_proc_lock(proc);
5625 	list_for_each_entry(w, &proc->todo, entry) {
5626 		if (w->type == BINDER_WORK_TRANSACTION)
5627 			count++;
5628 	}
5629 	binder_inner_proc_unlock(proc);
5630 	seq_printf(m, "  pending transactions: %d\n", count);
5631 
5632 	print_binder_stats(m, "  ", &proc->stats);
5633 }
5634 
5635 
5636 static int binder_state_show(struct seq_file *m, void *unused)
5637 {
5638 	struct binder_proc *proc;
5639 	struct binder_node *node;
5640 	struct binder_node *last_node = NULL;
5641 
5642 	seq_puts(m, "binder state:\n");
5643 
5644 	spin_lock(&binder_dead_nodes_lock);
5645 	if (!hlist_empty(&binder_dead_nodes))
5646 		seq_puts(m, "dead nodes:\n");
5647 	hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5648 		/*
5649 		 * take a temporary reference on the node so it
5650 		 * survives and isn't removed from the list
5651 		 * while we print it.
5652 		 */
5653 		node->tmp_refs++;
5654 		spin_unlock(&binder_dead_nodes_lock);
5655 		if (last_node)
5656 			binder_put_node(last_node);
5657 		binder_node_lock(node);
5658 		print_binder_node_nilocked(m, node);
5659 		binder_node_unlock(node);
5660 		last_node = node;
5661 		spin_lock(&binder_dead_nodes_lock);
5662 	}
5663 	spin_unlock(&binder_dead_nodes_lock);
5664 	if (last_node)
5665 		binder_put_node(last_node);
5666 
5667 	mutex_lock(&binder_procs_lock);
5668 	hlist_for_each_entry(proc, &binder_procs, proc_node)
5669 		print_binder_proc(m, proc, 1);
5670 	mutex_unlock(&binder_procs_lock);
5671 
5672 	return 0;
5673 }
5674 
5675 static int binder_stats_show(struct seq_file *m, void *unused)
5676 {
5677 	struct binder_proc *proc;
5678 
5679 	seq_puts(m, "binder stats:\n");
5680 
5681 	print_binder_stats(m, "", &binder_stats);
5682 
5683 	mutex_lock(&binder_procs_lock);
5684 	hlist_for_each_entry(proc, &binder_procs, proc_node)
5685 		print_binder_proc_stats(m, proc);
5686 	mutex_unlock(&binder_procs_lock);
5687 
5688 	return 0;
5689 }
5690 
5691 static int binder_transactions_show(struct seq_file *m, void *unused)
5692 {
5693 	struct binder_proc *proc;
5694 
5695 	seq_puts(m, "binder transactions:\n");
5696 	mutex_lock(&binder_procs_lock);
5697 	hlist_for_each_entry(proc, &binder_procs, proc_node)
5698 		print_binder_proc(m, proc, 0);
5699 	mutex_unlock(&binder_procs_lock);
5700 
5701 	return 0;
5702 }
5703 
5704 static int binder_proc_show(struct seq_file *m, void *unused)
5705 {
5706 	struct binder_proc *itr;
5707 	int pid = (unsigned long)m->private;
5708 
5709 	mutex_lock(&binder_procs_lock);
5710 	hlist_for_each_entry(itr, &binder_procs, proc_node) {
5711 		if (itr->pid == pid) {
5712 			seq_puts(m, "binder proc state:\n");
5713 			print_binder_proc(m, itr, 1);
5714 		}
5715 	}
5716 	mutex_unlock(&binder_procs_lock);
5717 
5718 	return 0;
5719 }
5720 
5721 static void print_binder_transaction_log_entry(struct seq_file *m,
5722 					struct binder_transaction_log_entry *e)
5723 {
5724 	int debug_id = READ_ONCE(e->debug_id_done);
5725 	/*
5726 	 * read barrier to guarantee debug_id_done read before
5727 	 * we print the log values
5728 	 */
5729 	smp_rmb();
5730 	seq_printf(m,
5731 		   "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5732 		   e->debug_id, (e->call_type == 2) ? "reply" :
5733 		   ((e->call_type == 1) ? "async" : "call "), e->from_proc,
5734 		   e->from_thread, e->to_proc, e->to_thread, e->context_name,
5735 		   e->to_node, e->target_handle, e->data_size, e->offsets_size,
5736 		   e->return_error, e->return_error_param,
5737 		   e->return_error_line);
5738 	/*
5739 	 * read-barrier to guarantee read of debug_id_done after
5740 	 * done printing the fields of the entry
5741 	 */
5742 	smp_rmb();
5743 	seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5744 			"\n" : " (incomplete)\n");
5745 }
5746 
5747 static int binder_transaction_log_show(struct seq_file *m, void *unused)
5748 {
5749 	struct binder_transaction_log *log = m->private;
5750 	unsigned int log_cur = atomic_read(&log->cur);
5751 	unsigned int count;
5752 	unsigned int cur;
5753 	int i;
5754 
5755 	count = log_cur + 1;
5756 	cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5757 		0 : count % ARRAY_SIZE(log->entry);
5758 	if (count > ARRAY_SIZE(log->entry) || log->full)
5759 		count = ARRAY_SIZE(log->entry);
5760 	for (i = 0; i < count; i++) {
5761 		unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5762 
5763 		print_binder_transaction_log_entry(m, &log->entry[index]);
5764 	}
5765 	return 0;
5766 }
5767 
5768 static const struct file_operations binder_fops = {
5769 	.owner = THIS_MODULE,
5770 	.poll = binder_poll,
5771 	.unlocked_ioctl = binder_ioctl,
5772 	.compat_ioctl = binder_ioctl,
5773 	.mmap = binder_mmap,
5774 	.open = binder_open,
5775 	.flush = binder_flush,
5776 	.release = binder_release,
5777 };
5778 
5779 BINDER_DEBUG_ENTRY(state);
5780 BINDER_DEBUG_ENTRY(stats);
5781 BINDER_DEBUG_ENTRY(transactions);
5782 BINDER_DEBUG_ENTRY(transaction_log);
5783 
5784 static int __init init_binder_device(const char *name)
5785 {
5786 	int ret;
5787 	struct binder_device *binder_device;
5788 
5789 	binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
5790 	if (!binder_device)
5791 		return -ENOMEM;
5792 
5793 	binder_device->miscdev.fops = &binder_fops;
5794 	binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
5795 	binder_device->miscdev.name = name;
5796 
5797 	binder_device->context.binder_context_mgr_uid = INVALID_UID;
5798 	binder_device->context.name = name;
5799 	mutex_init(&binder_device->context.context_mgr_node_lock);
5800 
5801 	ret = misc_register(&binder_device->miscdev);
5802 	if (ret < 0) {
5803 		kfree(binder_device);
5804 		return ret;
5805 	}
5806 
5807 	hlist_add_head(&binder_device->hlist, &binder_devices);
5808 
5809 	return ret;
5810 }
5811 
5812 static int __init binder_init(void)
5813 {
5814 	int ret;
5815 	char *device_name, *device_names, *device_tmp;
5816 	struct binder_device *device;
5817 	struct hlist_node *tmp;
5818 
5819 	ret = binder_alloc_shrinker_init();
5820 	if (ret)
5821 		return ret;
5822 
5823 	atomic_set(&binder_transaction_log.cur, ~0U);
5824 	atomic_set(&binder_transaction_log_failed.cur, ~0U);
5825 
5826 	binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
5827 	if (binder_debugfs_dir_entry_root)
5828 		binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
5829 						 binder_debugfs_dir_entry_root);
5830 
5831 	if (binder_debugfs_dir_entry_root) {
5832 		debugfs_create_file("state",
5833 				    0444,
5834 				    binder_debugfs_dir_entry_root,
5835 				    NULL,
5836 				    &binder_state_fops);
5837 		debugfs_create_file("stats",
5838 				    0444,
5839 				    binder_debugfs_dir_entry_root,
5840 				    NULL,
5841 				    &binder_stats_fops);
5842 		debugfs_create_file("transactions",
5843 				    0444,
5844 				    binder_debugfs_dir_entry_root,
5845 				    NULL,
5846 				    &binder_transactions_fops);
5847 		debugfs_create_file("transaction_log",
5848 				    0444,
5849 				    binder_debugfs_dir_entry_root,
5850 				    &binder_transaction_log,
5851 				    &binder_transaction_log_fops);
5852 		debugfs_create_file("failed_transaction_log",
5853 				    0444,
5854 				    binder_debugfs_dir_entry_root,
5855 				    &binder_transaction_log_failed,
5856 				    &binder_transaction_log_fops);
5857 	}
5858 
5859 	/*
5860 	 * Copy the module_parameter string, because we don't want to
5861 	 * tokenize it in-place.
5862 	 */
5863 	device_names = kstrdup(binder_devices_param, GFP_KERNEL);
5864 	if (!device_names) {
5865 		ret = -ENOMEM;
5866 		goto err_alloc_device_names_failed;
5867 	}
5868 
5869 	device_tmp = device_names;
5870 	while ((device_name = strsep(&device_tmp, ","))) {
5871 		ret = init_binder_device(device_name);
5872 		if (ret)
5873 			goto err_init_binder_device_failed;
5874 	}
5875 
5876 	return ret;
5877 
5878 err_init_binder_device_failed:
5879 	hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
5880 		misc_deregister(&device->miscdev);
5881 		hlist_del(&device->hlist);
5882 		kfree(device);
5883 	}
5884 
5885 	kfree(device_names);
5886 
5887 err_alloc_device_names_failed:
5888 	debugfs_remove_recursive(binder_debugfs_dir_entry_root);
5889 
5890 	return ret;
5891 }
5892 
5893 device_initcall(binder_init);
5894 
5895 #define CREATE_TRACE_POINTS
5896 #include "binder_trace.h"
5897 
5898 MODULE_LICENSE("GPL v2");
5899