xref: /openbmc/linux/drivers/android/binder.c (revision 4b4f3acc)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* binder.c
3  *
4  * Android IPC Subsystem
5  *
6  * Copyright (C) 2007-2008 Google, Inc.
7  */
8 
9 /*
10  * Locking overview
11  *
12  * There are 3 main spinlocks which must be acquired in the
13  * order shown:
14  *
15  * 1) proc->outer_lock : protects binder_ref
16  *    binder_proc_lock() and binder_proc_unlock() are
17  *    used to acq/rel.
18  * 2) node->lock : protects most fields of binder_node.
19  *    binder_node_lock() and binder_node_unlock() are
20  *    used to acq/rel
21  * 3) proc->inner_lock : protects the thread and node lists
22  *    (proc->threads, proc->waiting_threads, proc->nodes)
23  *    and all todo lists associated with the binder_proc
24  *    (proc->todo, thread->todo, proc->delivered_death and
25  *    node->async_todo), as well as thread->transaction_stack
26  *    binder_inner_proc_lock() and binder_inner_proc_unlock()
27  *    are used to acq/rel
28  *
29  * Any lock under procA must never be nested under any lock at the same
30  * level or below on procB.
31  *
32  * Functions that require a lock held on entry indicate which lock
33  * in the suffix of the function name:
34  *
35  * foo_olocked() : requires node->outer_lock
36  * foo_nlocked() : requires node->lock
37  * foo_ilocked() : requires proc->inner_lock
38  * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39  * foo_nilocked(): requires node->lock and proc->inner_lock
40  * ...
41  */
42 
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44 
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
48 #include <linux/fs.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/uaccess.h>
61 #include <linux/pid_namespace.h>
62 #include <linux/security.h>
63 #include <linux/spinlock.h>
64 #include <linux/ratelimit.h>
65 #include <linux/syscalls.h>
66 #include <linux/task_work.h>
67 
68 #include <uapi/linux/android/binder.h>
69 
70 #include <asm/cacheflush.h>
71 
72 #include "binder_alloc.h"
73 #include "binder_internal.h"
74 #include "binder_trace.h"
75 
76 static HLIST_HEAD(binder_deferred_list);
77 static DEFINE_MUTEX(binder_deferred_lock);
78 
79 static HLIST_HEAD(binder_devices);
80 static HLIST_HEAD(binder_procs);
81 static DEFINE_MUTEX(binder_procs_lock);
82 
83 static HLIST_HEAD(binder_dead_nodes);
84 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
85 
86 static struct dentry *binder_debugfs_dir_entry_root;
87 static struct dentry *binder_debugfs_dir_entry_proc;
88 static atomic_t binder_last_id;
89 
90 static int proc_show(struct seq_file *m, void *unused);
91 DEFINE_SHOW_ATTRIBUTE(proc);
92 
93 /* This is only defined in include/asm-arm/sizes.h */
94 #ifndef SZ_1K
95 #define SZ_1K                               0x400
96 #endif
97 
98 #ifndef SZ_4M
99 #define SZ_4M                               0x400000
100 #endif
101 
102 #define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
103 
104 enum {
105 	BINDER_DEBUG_USER_ERROR             = 1U << 0,
106 	BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
107 	BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
108 	BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
109 	BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
110 	BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
111 	BINDER_DEBUG_READ_WRITE             = 1U << 6,
112 	BINDER_DEBUG_USER_REFS              = 1U << 7,
113 	BINDER_DEBUG_THREADS                = 1U << 8,
114 	BINDER_DEBUG_TRANSACTION            = 1U << 9,
115 	BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
116 	BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
117 	BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
118 	BINDER_DEBUG_PRIORITY_CAP           = 1U << 13,
119 	BINDER_DEBUG_SPINLOCKS              = 1U << 14,
120 };
121 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
122 	BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
123 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
124 
125 static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
126 module_param_named(devices, binder_devices_param, charp, 0444);
127 
128 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
129 static int binder_stop_on_user_error;
130 
131 static int binder_set_stop_on_user_error(const char *val,
132 					 const struct kernel_param *kp)
133 {
134 	int ret;
135 
136 	ret = param_set_int(val, kp);
137 	if (binder_stop_on_user_error < 2)
138 		wake_up(&binder_user_error_wait);
139 	return ret;
140 }
141 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
142 	param_get_int, &binder_stop_on_user_error, 0644);
143 
144 #define binder_debug(mask, x...) \
145 	do { \
146 		if (binder_debug_mask & mask) \
147 			pr_info_ratelimited(x); \
148 	} while (0)
149 
150 #define binder_user_error(x...) \
151 	do { \
152 		if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
153 			pr_info_ratelimited(x); \
154 		if (binder_stop_on_user_error) \
155 			binder_stop_on_user_error = 2; \
156 	} while (0)
157 
158 #define to_flat_binder_object(hdr) \
159 	container_of(hdr, struct flat_binder_object, hdr)
160 
161 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
162 
163 #define to_binder_buffer_object(hdr) \
164 	container_of(hdr, struct binder_buffer_object, hdr)
165 
166 #define to_binder_fd_array_object(hdr) \
167 	container_of(hdr, struct binder_fd_array_object, hdr)
168 
169 enum binder_stat_types {
170 	BINDER_STAT_PROC,
171 	BINDER_STAT_THREAD,
172 	BINDER_STAT_NODE,
173 	BINDER_STAT_REF,
174 	BINDER_STAT_DEATH,
175 	BINDER_STAT_TRANSACTION,
176 	BINDER_STAT_TRANSACTION_COMPLETE,
177 	BINDER_STAT_COUNT
178 };
179 
180 struct binder_stats {
181 	atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
182 	atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
183 	atomic_t obj_created[BINDER_STAT_COUNT];
184 	atomic_t obj_deleted[BINDER_STAT_COUNT];
185 };
186 
187 static struct binder_stats binder_stats;
188 
189 static inline void binder_stats_deleted(enum binder_stat_types type)
190 {
191 	atomic_inc(&binder_stats.obj_deleted[type]);
192 }
193 
194 static inline void binder_stats_created(enum binder_stat_types type)
195 {
196 	atomic_inc(&binder_stats.obj_created[type]);
197 }
198 
199 struct binder_transaction_log_entry {
200 	int debug_id;
201 	int debug_id_done;
202 	int call_type;
203 	int from_proc;
204 	int from_thread;
205 	int target_handle;
206 	int to_proc;
207 	int to_thread;
208 	int to_node;
209 	int data_size;
210 	int offsets_size;
211 	int return_error_line;
212 	uint32_t return_error;
213 	uint32_t return_error_param;
214 	const char *context_name;
215 };
216 struct binder_transaction_log {
217 	atomic_t cur;
218 	bool full;
219 	struct binder_transaction_log_entry entry[32];
220 };
221 static struct binder_transaction_log binder_transaction_log;
222 static struct binder_transaction_log binder_transaction_log_failed;
223 
224 static struct binder_transaction_log_entry *binder_transaction_log_add(
225 	struct binder_transaction_log *log)
226 {
227 	struct binder_transaction_log_entry *e;
228 	unsigned int cur = atomic_inc_return(&log->cur);
229 
230 	if (cur >= ARRAY_SIZE(log->entry))
231 		log->full = true;
232 	e = &log->entry[cur % ARRAY_SIZE(log->entry)];
233 	WRITE_ONCE(e->debug_id_done, 0);
234 	/*
235 	 * write-barrier to synchronize access to e->debug_id_done.
236 	 * We make sure the initialized 0 value is seen before
237 	 * memset() other fields are zeroed by memset.
238 	 */
239 	smp_wmb();
240 	memset(e, 0, sizeof(*e));
241 	return e;
242 }
243 
244 /**
245  * struct binder_work - work enqueued on a worklist
246  * @entry:             node enqueued on list
247  * @type:              type of work to be performed
248  *
249  * There are separate work lists for proc, thread, and node (async).
250  */
251 struct binder_work {
252 	struct list_head entry;
253 
254 	enum {
255 		BINDER_WORK_TRANSACTION = 1,
256 		BINDER_WORK_TRANSACTION_COMPLETE,
257 		BINDER_WORK_RETURN_ERROR,
258 		BINDER_WORK_NODE,
259 		BINDER_WORK_DEAD_BINDER,
260 		BINDER_WORK_DEAD_BINDER_AND_CLEAR,
261 		BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
262 	} type;
263 };
264 
265 struct binder_error {
266 	struct binder_work work;
267 	uint32_t cmd;
268 };
269 
270 /**
271  * struct binder_node - binder node bookkeeping
272  * @debug_id:             unique ID for debugging
273  *                        (invariant after initialized)
274  * @lock:                 lock for node fields
275  * @work:                 worklist element for node work
276  *                        (protected by @proc->inner_lock)
277  * @rb_node:              element for proc->nodes tree
278  *                        (protected by @proc->inner_lock)
279  * @dead_node:            element for binder_dead_nodes list
280  *                        (protected by binder_dead_nodes_lock)
281  * @proc:                 binder_proc that owns this node
282  *                        (invariant after initialized)
283  * @refs:                 list of references on this node
284  *                        (protected by @lock)
285  * @internal_strong_refs: used to take strong references when
286  *                        initiating a transaction
287  *                        (protected by @proc->inner_lock if @proc
288  *                        and by @lock)
289  * @local_weak_refs:      weak user refs from local process
290  *                        (protected by @proc->inner_lock if @proc
291  *                        and by @lock)
292  * @local_strong_refs:    strong user refs from local process
293  *                        (protected by @proc->inner_lock if @proc
294  *                        and by @lock)
295  * @tmp_refs:             temporary kernel refs
296  *                        (protected by @proc->inner_lock while @proc
297  *                        is valid, and by binder_dead_nodes_lock
298  *                        if @proc is NULL. During inc/dec and node release
299  *                        it is also protected by @lock to provide safety
300  *                        as the node dies and @proc becomes NULL)
301  * @ptr:                  userspace pointer for node
302  *                        (invariant, no lock needed)
303  * @cookie:               userspace cookie for node
304  *                        (invariant, no lock needed)
305  * @has_strong_ref:       userspace notified of strong ref
306  *                        (protected by @proc->inner_lock if @proc
307  *                        and by @lock)
308  * @pending_strong_ref:   userspace has acked notification of strong ref
309  *                        (protected by @proc->inner_lock if @proc
310  *                        and by @lock)
311  * @has_weak_ref:         userspace notified of weak ref
312  *                        (protected by @proc->inner_lock if @proc
313  *                        and by @lock)
314  * @pending_weak_ref:     userspace has acked notification of weak ref
315  *                        (protected by @proc->inner_lock if @proc
316  *                        and by @lock)
317  * @has_async_transaction: async transaction to node in progress
318  *                        (protected by @lock)
319  * @accept_fds:           file descriptor operations supported for node
320  *                        (invariant after initialized)
321  * @min_priority:         minimum scheduling priority
322  *                        (invariant after initialized)
323  * @txn_security_ctx:     require sender's security context
324  *                        (invariant after initialized)
325  * @async_todo:           list of async work items
326  *                        (protected by @proc->inner_lock)
327  *
328  * Bookkeeping structure for binder nodes.
329  */
330 struct binder_node {
331 	int debug_id;
332 	spinlock_t lock;
333 	struct binder_work work;
334 	union {
335 		struct rb_node rb_node;
336 		struct hlist_node dead_node;
337 	};
338 	struct binder_proc *proc;
339 	struct hlist_head refs;
340 	int internal_strong_refs;
341 	int local_weak_refs;
342 	int local_strong_refs;
343 	int tmp_refs;
344 	binder_uintptr_t ptr;
345 	binder_uintptr_t cookie;
346 	struct {
347 		/*
348 		 * bitfield elements protected by
349 		 * proc inner_lock
350 		 */
351 		u8 has_strong_ref:1;
352 		u8 pending_strong_ref:1;
353 		u8 has_weak_ref:1;
354 		u8 pending_weak_ref:1;
355 	};
356 	struct {
357 		/*
358 		 * invariant after initialization
359 		 */
360 		u8 accept_fds:1;
361 		u8 txn_security_ctx:1;
362 		u8 min_priority;
363 	};
364 	bool has_async_transaction;
365 	struct list_head async_todo;
366 };
367 
368 struct binder_ref_death {
369 	/**
370 	 * @work: worklist element for death notifications
371 	 *        (protected by inner_lock of the proc that
372 	 *        this ref belongs to)
373 	 */
374 	struct binder_work work;
375 	binder_uintptr_t cookie;
376 };
377 
378 /**
379  * struct binder_ref_data - binder_ref counts and id
380  * @debug_id:        unique ID for the ref
381  * @desc:            unique userspace handle for ref
382  * @strong:          strong ref count (debugging only if not locked)
383  * @weak:            weak ref count (debugging only if not locked)
384  *
385  * Structure to hold ref count and ref id information. Since
386  * the actual ref can only be accessed with a lock, this structure
387  * is used to return information about the ref to callers of
388  * ref inc/dec functions.
389  */
390 struct binder_ref_data {
391 	int debug_id;
392 	uint32_t desc;
393 	int strong;
394 	int weak;
395 };
396 
397 /**
398  * struct binder_ref - struct to track references on nodes
399  * @data:        binder_ref_data containing id, handle, and current refcounts
400  * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
401  * @rb_node_node: node for lookup by @node in proc's rb_tree
402  * @node_entry:  list entry for node->refs list in target node
403  *               (protected by @node->lock)
404  * @proc:        binder_proc containing ref
405  * @node:        binder_node of target node. When cleaning up a
406  *               ref for deletion in binder_cleanup_ref, a non-NULL
407  *               @node indicates the node must be freed
408  * @death:       pointer to death notification (ref_death) if requested
409  *               (protected by @node->lock)
410  *
411  * Structure to track references from procA to target node (on procB). This
412  * structure is unsafe to access without holding @proc->outer_lock.
413  */
414 struct binder_ref {
415 	/* Lookups needed: */
416 	/*   node + proc => ref (transaction) */
417 	/*   desc + proc => ref (transaction, inc/dec ref) */
418 	/*   node => refs + procs (proc exit) */
419 	struct binder_ref_data data;
420 	struct rb_node rb_node_desc;
421 	struct rb_node rb_node_node;
422 	struct hlist_node node_entry;
423 	struct binder_proc *proc;
424 	struct binder_node *node;
425 	struct binder_ref_death *death;
426 };
427 
428 enum binder_deferred_state {
429 	BINDER_DEFERRED_FLUSH        = 0x01,
430 	BINDER_DEFERRED_RELEASE      = 0x02,
431 };
432 
433 /**
434  * struct binder_proc - binder process bookkeeping
435  * @proc_node:            element for binder_procs list
436  * @threads:              rbtree of binder_threads in this proc
437  *                        (protected by @inner_lock)
438  * @nodes:                rbtree of binder nodes associated with
439  *                        this proc ordered by node->ptr
440  *                        (protected by @inner_lock)
441  * @refs_by_desc:         rbtree of refs ordered by ref->desc
442  *                        (protected by @outer_lock)
443  * @refs_by_node:         rbtree of refs ordered by ref->node
444  *                        (protected by @outer_lock)
445  * @waiting_threads:      threads currently waiting for proc work
446  *                        (protected by @inner_lock)
447  * @pid                   PID of group_leader of process
448  *                        (invariant after initialized)
449  * @tsk                   task_struct for group_leader of process
450  *                        (invariant after initialized)
451  * @deferred_work_node:   element for binder_deferred_list
452  *                        (protected by binder_deferred_lock)
453  * @deferred_work:        bitmap of deferred work to perform
454  *                        (protected by binder_deferred_lock)
455  * @is_dead:              process is dead and awaiting free
456  *                        when outstanding transactions are cleaned up
457  *                        (protected by @inner_lock)
458  * @todo:                 list of work for this process
459  *                        (protected by @inner_lock)
460  * @stats:                per-process binder statistics
461  *                        (atomics, no lock needed)
462  * @delivered_death:      list of delivered death notification
463  *                        (protected by @inner_lock)
464  * @max_threads:          cap on number of binder threads
465  *                        (protected by @inner_lock)
466  * @requested_threads:    number of binder threads requested but not
467  *                        yet started. In current implementation, can
468  *                        only be 0 or 1.
469  *                        (protected by @inner_lock)
470  * @requested_threads_started: number binder threads started
471  *                        (protected by @inner_lock)
472  * @tmp_ref:              temporary reference to indicate proc is in use
473  *                        (protected by @inner_lock)
474  * @default_priority:     default scheduler priority
475  *                        (invariant after initialized)
476  * @debugfs_entry:        debugfs node
477  * @alloc:                binder allocator bookkeeping
478  * @context:              binder_context for this proc
479  *                        (invariant after initialized)
480  * @inner_lock:           can nest under outer_lock and/or node lock
481  * @outer_lock:           no nesting under innor or node lock
482  *                        Lock order: 1) outer, 2) node, 3) inner
483  *
484  * Bookkeeping structure for binder processes
485  */
486 struct binder_proc {
487 	struct hlist_node proc_node;
488 	struct rb_root threads;
489 	struct rb_root nodes;
490 	struct rb_root refs_by_desc;
491 	struct rb_root refs_by_node;
492 	struct list_head waiting_threads;
493 	int pid;
494 	struct task_struct *tsk;
495 	struct hlist_node deferred_work_node;
496 	int deferred_work;
497 	bool is_dead;
498 
499 	struct list_head todo;
500 	struct binder_stats stats;
501 	struct list_head delivered_death;
502 	int max_threads;
503 	int requested_threads;
504 	int requested_threads_started;
505 	int tmp_ref;
506 	long default_priority;
507 	struct dentry *debugfs_entry;
508 	struct binder_alloc alloc;
509 	struct binder_context *context;
510 	spinlock_t inner_lock;
511 	spinlock_t outer_lock;
512 };
513 
514 enum {
515 	BINDER_LOOPER_STATE_REGISTERED  = 0x01,
516 	BINDER_LOOPER_STATE_ENTERED     = 0x02,
517 	BINDER_LOOPER_STATE_EXITED      = 0x04,
518 	BINDER_LOOPER_STATE_INVALID     = 0x08,
519 	BINDER_LOOPER_STATE_WAITING     = 0x10,
520 	BINDER_LOOPER_STATE_POLL        = 0x20,
521 };
522 
523 /**
524  * struct binder_thread - binder thread bookkeeping
525  * @proc:                 binder process for this thread
526  *                        (invariant after initialization)
527  * @rb_node:              element for proc->threads rbtree
528  *                        (protected by @proc->inner_lock)
529  * @waiting_thread_node:  element for @proc->waiting_threads list
530  *                        (protected by @proc->inner_lock)
531  * @pid:                  PID for this thread
532  *                        (invariant after initialization)
533  * @looper:               bitmap of looping state
534  *                        (only accessed by this thread)
535  * @looper_needs_return:  looping thread needs to exit driver
536  *                        (no lock needed)
537  * @transaction_stack:    stack of in-progress transactions for this thread
538  *                        (protected by @proc->inner_lock)
539  * @todo:                 list of work to do for this thread
540  *                        (protected by @proc->inner_lock)
541  * @process_todo:         whether work in @todo should be processed
542  *                        (protected by @proc->inner_lock)
543  * @return_error:         transaction errors reported by this thread
544  *                        (only accessed by this thread)
545  * @reply_error:          transaction errors reported by target thread
546  *                        (protected by @proc->inner_lock)
547  * @wait:                 wait queue for thread work
548  * @stats:                per-thread statistics
549  *                        (atomics, no lock needed)
550  * @tmp_ref:              temporary reference to indicate thread is in use
551  *                        (atomic since @proc->inner_lock cannot
552  *                        always be acquired)
553  * @is_dead:              thread is dead and awaiting free
554  *                        when outstanding transactions are cleaned up
555  *                        (protected by @proc->inner_lock)
556  *
557  * Bookkeeping structure for binder threads.
558  */
559 struct binder_thread {
560 	struct binder_proc *proc;
561 	struct rb_node rb_node;
562 	struct list_head waiting_thread_node;
563 	int pid;
564 	int looper;              /* only modified by this thread */
565 	bool looper_need_return; /* can be written by other thread */
566 	struct binder_transaction *transaction_stack;
567 	struct list_head todo;
568 	bool process_todo;
569 	struct binder_error return_error;
570 	struct binder_error reply_error;
571 	wait_queue_head_t wait;
572 	struct binder_stats stats;
573 	atomic_t tmp_ref;
574 	bool is_dead;
575 };
576 
577 /**
578  * struct binder_txn_fd_fixup - transaction fd fixup list element
579  * @fixup_entry:          list entry
580  * @file:                 struct file to be associated with new fd
581  * @offset:               offset in buffer data to this fixup
582  *
583  * List element for fd fixups in a transaction. Since file
584  * descriptors need to be allocated in the context of the
585  * target process, we pass each fd to be processed in this
586  * struct.
587  */
588 struct binder_txn_fd_fixup {
589 	struct list_head fixup_entry;
590 	struct file *file;
591 	size_t offset;
592 };
593 
594 struct binder_transaction {
595 	int debug_id;
596 	struct binder_work work;
597 	struct binder_thread *from;
598 	struct binder_transaction *from_parent;
599 	struct binder_proc *to_proc;
600 	struct binder_thread *to_thread;
601 	struct binder_transaction *to_parent;
602 	unsigned need_reply:1;
603 	/* unsigned is_dead:1; */	/* not used at the moment */
604 
605 	struct binder_buffer *buffer;
606 	unsigned int	code;
607 	unsigned int	flags;
608 	long	priority;
609 	long	saved_priority;
610 	kuid_t	sender_euid;
611 	struct list_head fd_fixups;
612 	binder_uintptr_t security_ctx;
613 	/**
614 	 * @lock:  protects @from, @to_proc, and @to_thread
615 	 *
616 	 * @from, @to_proc, and @to_thread can be set to NULL
617 	 * during thread teardown
618 	 */
619 	spinlock_t lock;
620 };
621 
622 /**
623  * struct binder_object - union of flat binder object types
624  * @hdr:   generic object header
625  * @fbo:   binder object (nodes and refs)
626  * @fdo:   file descriptor object
627  * @bbo:   binder buffer pointer
628  * @fdao:  file descriptor array
629  *
630  * Used for type-independent object copies
631  */
632 struct binder_object {
633 	union {
634 		struct binder_object_header hdr;
635 		struct flat_binder_object fbo;
636 		struct binder_fd_object fdo;
637 		struct binder_buffer_object bbo;
638 		struct binder_fd_array_object fdao;
639 	};
640 };
641 
642 /**
643  * binder_proc_lock() - Acquire outer lock for given binder_proc
644  * @proc:         struct binder_proc to acquire
645  *
646  * Acquires proc->outer_lock. Used to protect binder_ref
647  * structures associated with the given proc.
648  */
649 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
650 static void
651 _binder_proc_lock(struct binder_proc *proc, int line)
652 	__acquires(&proc->outer_lock)
653 {
654 	binder_debug(BINDER_DEBUG_SPINLOCKS,
655 		     "%s: line=%d\n", __func__, line);
656 	spin_lock(&proc->outer_lock);
657 }
658 
659 /**
660  * binder_proc_unlock() - Release spinlock for given binder_proc
661  * @proc:         struct binder_proc to acquire
662  *
663  * Release lock acquired via binder_proc_lock()
664  */
665 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
666 static void
667 _binder_proc_unlock(struct binder_proc *proc, int line)
668 	__releases(&proc->outer_lock)
669 {
670 	binder_debug(BINDER_DEBUG_SPINLOCKS,
671 		     "%s: line=%d\n", __func__, line);
672 	spin_unlock(&proc->outer_lock);
673 }
674 
675 /**
676  * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
677  * @proc:         struct binder_proc to acquire
678  *
679  * Acquires proc->inner_lock. Used to protect todo lists
680  */
681 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
682 static void
683 _binder_inner_proc_lock(struct binder_proc *proc, int line)
684 	__acquires(&proc->inner_lock)
685 {
686 	binder_debug(BINDER_DEBUG_SPINLOCKS,
687 		     "%s: line=%d\n", __func__, line);
688 	spin_lock(&proc->inner_lock);
689 }
690 
691 /**
692  * binder_inner_proc_unlock() - Release inner lock for given binder_proc
693  * @proc:         struct binder_proc to acquire
694  *
695  * Release lock acquired via binder_inner_proc_lock()
696  */
697 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
698 static void
699 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
700 	__releases(&proc->inner_lock)
701 {
702 	binder_debug(BINDER_DEBUG_SPINLOCKS,
703 		     "%s: line=%d\n", __func__, line);
704 	spin_unlock(&proc->inner_lock);
705 }
706 
707 /**
708  * binder_node_lock() - Acquire spinlock for given binder_node
709  * @node:         struct binder_node to acquire
710  *
711  * Acquires node->lock. Used to protect binder_node fields
712  */
713 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
714 static void
715 _binder_node_lock(struct binder_node *node, int line)
716 	__acquires(&node->lock)
717 {
718 	binder_debug(BINDER_DEBUG_SPINLOCKS,
719 		     "%s: line=%d\n", __func__, line);
720 	spin_lock(&node->lock);
721 }
722 
723 /**
724  * binder_node_unlock() - Release spinlock for given binder_proc
725  * @node:         struct binder_node to acquire
726  *
727  * Release lock acquired via binder_node_lock()
728  */
729 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
730 static void
731 _binder_node_unlock(struct binder_node *node, int line)
732 	__releases(&node->lock)
733 {
734 	binder_debug(BINDER_DEBUG_SPINLOCKS,
735 		     "%s: line=%d\n", __func__, line);
736 	spin_unlock(&node->lock);
737 }
738 
739 /**
740  * binder_node_inner_lock() - Acquire node and inner locks
741  * @node:         struct binder_node to acquire
742  *
743  * Acquires node->lock. If node->proc also acquires
744  * proc->inner_lock. Used to protect binder_node fields
745  */
746 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
747 static void
748 _binder_node_inner_lock(struct binder_node *node, int line)
749 	__acquires(&node->lock) __acquires(&node->proc->inner_lock)
750 {
751 	binder_debug(BINDER_DEBUG_SPINLOCKS,
752 		     "%s: line=%d\n", __func__, line);
753 	spin_lock(&node->lock);
754 	if (node->proc)
755 		binder_inner_proc_lock(node->proc);
756 	else
757 		/* annotation for sparse */
758 		__acquire(&node->proc->inner_lock);
759 }
760 
761 /**
762  * binder_node_unlock() - Release node and inner locks
763  * @node:         struct binder_node to acquire
764  *
765  * Release lock acquired via binder_node_lock()
766  */
767 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
768 static void
769 _binder_node_inner_unlock(struct binder_node *node, int line)
770 	__releases(&node->lock) __releases(&node->proc->inner_lock)
771 {
772 	struct binder_proc *proc = node->proc;
773 
774 	binder_debug(BINDER_DEBUG_SPINLOCKS,
775 		     "%s: line=%d\n", __func__, line);
776 	if (proc)
777 		binder_inner_proc_unlock(proc);
778 	else
779 		/* annotation for sparse */
780 		__release(&node->proc->inner_lock);
781 	spin_unlock(&node->lock);
782 }
783 
784 static bool binder_worklist_empty_ilocked(struct list_head *list)
785 {
786 	return list_empty(list);
787 }
788 
789 /**
790  * binder_worklist_empty() - Check if no items on the work list
791  * @proc:       binder_proc associated with list
792  * @list:	list to check
793  *
794  * Return: true if there are no items on list, else false
795  */
796 static bool binder_worklist_empty(struct binder_proc *proc,
797 				  struct list_head *list)
798 {
799 	bool ret;
800 
801 	binder_inner_proc_lock(proc);
802 	ret = binder_worklist_empty_ilocked(list);
803 	binder_inner_proc_unlock(proc);
804 	return ret;
805 }
806 
807 /**
808  * binder_enqueue_work_ilocked() - Add an item to the work list
809  * @work:         struct binder_work to add to list
810  * @target_list:  list to add work to
811  *
812  * Adds the work to the specified list. Asserts that work
813  * is not already on a list.
814  *
815  * Requires the proc->inner_lock to be held.
816  */
817 static void
818 binder_enqueue_work_ilocked(struct binder_work *work,
819 			   struct list_head *target_list)
820 {
821 	BUG_ON(target_list == NULL);
822 	BUG_ON(work->entry.next && !list_empty(&work->entry));
823 	list_add_tail(&work->entry, target_list);
824 }
825 
826 /**
827  * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
828  * @thread:       thread to queue work to
829  * @work:         struct binder_work to add to list
830  *
831  * Adds the work to the todo list of the thread. Doesn't set the process_todo
832  * flag, which means that (if it wasn't already set) the thread will go to
833  * sleep without handling this work when it calls read.
834  *
835  * Requires the proc->inner_lock to be held.
836  */
837 static void
838 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
839 					    struct binder_work *work)
840 {
841 	WARN_ON(!list_empty(&thread->waiting_thread_node));
842 	binder_enqueue_work_ilocked(work, &thread->todo);
843 }
844 
845 /**
846  * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
847  * @thread:       thread to queue work to
848  * @work:         struct binder_work to add to list
849  *
850  * Adds the work to the todo list of the thread, and enables processing
851  * of the todo queue.
852  *
853  * Requires the proc->inner_lock to be held.
854  */
855 static void
856 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
857 				   struct binder_work *work)
858 {
859 	WARN_ON(!list_empty(&thread->waiting_thread_node));
860 	binder_enqueue_work_ilocked(work, &thread->todo);
861 	thread->process_todo = true;
862 }
863 
864 /**
865  * binder_enqueue_thread_work() - Add an item to the thread work list
866  * @thread:       thread to queue work to
867  * @work:         struct binder_work to add to list
868  *
869  * Adds the work to the todo list of the thread, and enables processing
870  * of the todo queue.
871  */
872 static void
873 binder_enqueue_thread_work(struct binder_thread *thread,
874 			   struct binder_work *work)
875 {
876 	binder_inner_proc_lock(thread->proc);
877 	binder_enqueue_thread_work_ilocked(thread, work);
878 	binder_inner_proc_unlock(thread->proc);
879 }
880 
881 static void
882 binder_dequeue_work_ilocked(struct binder_work *work)
883 {
884 	list_del_init(&work->entry);
885 }
886 
887 /**
888  * binder_dequeue_work() - Removes an item from the work list
889  * @proc:         binder_proc associated with list
890  * @work:         struct binder_work to remove from list
891  *
892  * Removes the specified work item from whatever list it is on.
893  * Can safely be called if work is not on any list.
894  */
895 static void
896 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
897 {
898 	binder_inner_proc_lock(proc);
899 	binder_dequeue_work_ilocked(work);
900 	binder_inner_proc_unlock(proc);
901 }
902 
903 static struct binder_work *binder_dequeue_work_head_ilocked(
904 					struct list_head *list)
905 {
906 	struct binder_work *w;
907 
908 	w = list_first_entry_or_null(list, struct binder_work, entry);
909 	if (w)
910 		list_del_init(&w->entry);
911 	return w;
912 }
913 
914 /**
915  * binder_dequeue_work_head() - Dequeues the item at head of list
916  * @proc:         binder_proc associated with list
917  * @list:         list to dequeue head
918  *
919  * Removes the head of the list if there are items on the list
920  *
921  * Return: pointer dequeued binder_work, NULL if list was empty
922  */
923 static struct binder_work *binder_dequeue_work_head(
924 					struct binder_proc *proc,
925 					struct list_head *list)
926 {
927 	struct binder_work *w;
928 
929 	binder_inner_proc_lock(proc);
930 	w = binder_dequeue_work_head_ilocked(list);
931 	binder_inner_proc_unlock(proc);
932 	return w;
933 }
934 
935 static void
936 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
937 static void binder_free_thread(struct binder_thread *thread);
938 static void binder_free_proc(struct binder_proc *proc);
939 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
940 
941 static bool binder_has_work_ilocked(struct binder_thread *thread,
942 				    bool do_proc_work)
943 {
944 	return thread->process_todo ||
945 		thread->looper_need_return ||
946 		(do_proc_work &&
947 		 !binder_worklist_empty_ilocked(&thread->proc->todo));
948 }
949 
950 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
951 {
952 	bool has_work;
953 
954 	binder_inner_proc_lock(thread->proc);
955 	has_work = binder_has_work_ilocked(thread, do_proc_work);
956 	binder_inner_proc_unlock(thread->proc);
957 
958 	return has_work;
959 }
960 
961 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
962 {
963 	return !thread->transaction_stack &&
964 		binder_worklist_empty_ilocked(&thread->todo) &&
965 		(thread->looper & (BINDER_LOOPER_STATE_ENTERED |
966 				   BINDER_LOOPER_STATE_REGISTERED));
967 }
968 
969 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
970 					       bool sync)
971 {
972 	struct rb_node *n;
973 	struct binder_thread *thread;
974 
975 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
976 		thread = rb_entry(n, struct binder_thread, rb_node);
977 		if (thread->looper & BINDER_LOOPER_STATE_POLL &&
978 		    binder_available_for_proc_work_ilocked(thread)) {
979 			if (sync)
980 				wake_up_interruptible_sync(&thread->wait);
981 			else
982 				wake_up_interruptible(&thread->wait);
983 		}
984 	}
985 }
986 
987 /**
988  * binder_select_thread_ilocked() - selects a thread for doing proc work.
989  * @proc:	process to select a thread from
990  *
991  * Note that calling this function moves the thread off the waiting_threads
992  * list, so it can only be woken up by the caller of this function, or a
993  * signal. Therefore, callers *should* always wake up the thread this function
994  * returns.
995  *
996  * Return:	If there's a thread currently waiting for process work,
997  *		returns that thread. Otherwise returns NULL.
998  */
999 static struct binder_thread *
1000 binder_select_thread_ilocked(struct binder_proc *proc)
1001 {
1002 	struct binder_thread *thread;
1003 
1004 	assert_spin_locked(&proc->inner_lock);
1005 	thread = list_first_entry_or_null(&proc->waiting_threads,
1006 					  struct binder_thread,
1007 					  waiting_thread_node);
1008 
1009 	if (thread)
1010 		list_del_init(&thread->waiting_thread_node);
1011 
1012 	return thread;
1013 }
1014 
1015 /**
1016  * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1017  * @proc:	process to wake up a thread in
1018  * @thread:	specific thread to wake-up (may be NULL)
1019  * @sync:	whether to do a synchronous wake-up
1020  *
1021  * This function wakes up a thread in the @proc process.
1022  * The caller may provide a specific thread to wake-up in
1023  * the @thread parameter. If @thread is NULL, this function
1024  * will wake up threads that have called poll().
1025  *
1026  * Note that for this function to work as expected, callers
1027  * should first call binder_select_thread() to find a thread
1028  * to handle the work (if they don't have a thread already),
1029  * and pass the result into the @thread parameter.
1030  */
1031 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1032 					 struct binder_thread *thread,
1033 					 bool sync)
1034 {
1035 	assert_spin_locked(&proc->inner_lock);
1036 
1037 	if (thread) {
1038 		if (sync)
1039 			wake_up_interruptible_sync(&thread->wait);
1040 		else
1041 			wake_up_interruptible(&thread->wait);
1042 		return;
1043 	}
1044 
1045 	/* Didn't find a thread waiting for proc work; this can happen
1046 	 * in two scenarios:
1047 	 * 1. All threads are busy handling transactions
1048 	 *    In that case, one of those threads should call back into
1049 	 *    the kernel driver soon and pick up this work.
1050 	 * 2. Threads are using the (e)poll interface, in which case
1051 	 *    they may be blocked on the waitqueue without having been
1052 	 *    added to waiting_threads. For this case, we just iterate
1053 	 *    over all threads not handling transaction work, and
1054 	 *    wake them all up. We wake all because we don't know whether
1055 	 *    a thread that called into (e)poll is handling non-binder
1056 	 *    work currently.
1057 	 */
1058 	binder_wakeup_poll_threads_ilocked(proc, sync);
1059 }
1060 
1061 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1062 {
1063 	struct binder_thread *thread = binder_select_thread_ilocked(proc);
1064 
1065 	binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1066 }
1067 
1068 static void binder_set_nice(long nice)
1069 {
1070 	long min_nice;
1071 
1072 	if (can_nice(current, nice)) {
1073 		set_user_nice(current, nice);
1074 		return;
1075 	}
1076 	min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
1077 	binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1078 		     "%d: nice value %ld not allowed use %ld instead\n",
1079 		      current->pid, nice, min_nice);
1080 	set_user_nice(current, min_nice);
1081 	if (min_nice <= MAX_NICE)
1082 		return;
1083 	binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
1084 }
1085 
1086 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1087 						   binder_uintptr_t ptr)
1088 {
1089 	struct rb_node *n = proc->nodes.rb_node;
1090 	struct binder_node *node;
1091 
1092 	assert_spin_locked(&proc->inner_lock);
1093 
1094 	while (n) {
1095 		node = rb_entry(n, struct binder_node, rb_node);
1096 
1097 		if (ptr < node->ptr)
1098 			n = n->rb_left;
1099 		else if (ptr > node->ptr)
1100 			n = n->rb_right;
1101 		else {
1102 			/*
1103 			 * take an implicit weak reference
1104 			 * to ensure node stays alive until
1105 			 * call to binder_put_node()
1106 			 */
1107 			binder_inc_node_tmpref_ilocked(node);
1108 			return node;
1109 		}
1110 	}
1111 	return NULL;
1112 }
1113 
1114 static struct binder_node *binder_get_node(struct binder_proc *proc,
1115 					   binder_uintptr_t ptr)
1116 {
1117 	struct binder_node *node;
1118 
1119 	binder_inner_proc_lock(proc);
1120 	node = binder_get_node_ilocked(proc, ptr);
1121 	binder_inner_proc_unlock(proc);
1122 	return node;
1123 }
1124 
1125 static struct binder_node *binder_init_node_ilocked(
1126 						struct binder_proc *proc,
1127 						struct binder_node *new_node,
1128 						struct flat_binder_object *fp)
1129 {
1130 	struct rb_node **p = &proc->nodes.rb_node;
1131 	struct rb_node *parent = NULL;
1132 	struct binder_node *node;
1133 	binder_uintptr_t ptr = fp ? fp->binder : 0;
1134 	binder_uintptr_t cookie = fp ? fp->cookie : 0;
1135 	__u32 flags = fp ? fp->flags : 0;
1136 
1137 	assert_spin_locked(&proc->inner_lock);
1138 
1139 	while (*p) {
1140 
1141 		parent = *p;
1142 		node = rb_entry(parent, struct binder_node, rb_node);
1143 
1144 		if (ptr < node->ptr)
1145 			p = &(*p)->rb_left;
1146 		else if (ptr > node->ptr)
1147 			p = &(*p)->rb_right;
1148 		else {
1149 			/*
1150 			 * A matching node is already in
1151 			 * the rb tree. Abandon the init
1152 			 * and return it.
1153 			 */
1154 			binder_inc_node_tmpref_ilocked(node);
1155 			return node;
1156 		}
1157 	}
1158 	node = new_node;
1159 	binder_stats_created(BINDER_STAT_NODE);
1160 	node->tmp_refs++;
1161 	rb_link_node(&node->rb_node, parent, p);
1162 	rb_insert_color(&node->rb_node, &proc->nodes);
1163 	node->debug_id = atomic_inc_return(&binder_last_id);
1164 	node->proc = proc;
1165 	node->ptr = ptr;
1166 	node->cookie = cookie;
1167 	node->work.type = BINDER_WORK_NODE;
1168 	node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1169 	node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1170 	node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
1171 	spin_lock_init(&node->lock);
1172 	INIT_LIST_HEAD(&node->work.entry);
1173 	INIT_LIST_HEAD(&node->async_todo);
1174 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1175 		     "%d:%d node %d u%016llx c%016llx created\n",
1176 		     proc->pid, current->pid, node->debug_id,
1177 		     (u64)node->ptr, (u64)node->cookie);
1178 
1179 	return node;
1180 }
1181 
1182 static struct binder_node *binder_new_node(struct binder_proc *proc,
1183 					   struct flat_binder_object *fp)
1184 {
1185 	struct binder_node *node;
1186 	struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1187 
1188 	if (!new_node)
1189 		return NULL;
1190 	binder_inner_proc_lock(proc);
1191 	node = binder_init_node_ilocked(proc, new_node, fp);
1192 	binder_inner_proc_unlock(proc);
1193 	if (node != new_node)
1194 		/*
1195 		 * The node was already added by another thread
1196 		 */
1197 		kfree(new_node);
1198 
1199 	return node;
1200 }
1201 
1202 static void binder_free_node(struct binder_node *node)
1203 {
1204 	kfree(node);
1205 	binder_stats_deleted(BINDER_STAT_NODE);
1206 }
1207 
1208 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1209 				    int internal,
1210 				    struct list_head *target_list)
1211 {
1212 	struct binder_proc *proc = node->proc;
1213 
1214 	assert_spin_locked(&node->lock);
1215 	if (proc)
1216 		assert_spin_locked(&proc->inner_lock);
1217 	if (strong) {
1218 		if (internal) {
1219 			if (target_list == NULL &&
1220 			    node->internal_strong_refs == 0 &&
1221 			    !(node->proc &&
1222 			      node == node->proc->context->binder_context_mgr_node &&
1223 			      node->has_strong_ref)) {
1224 				pr_err("invalid inc strong node for %d\n",
1225 					node->debug_id);
1226 				return -EINVAL;
1227 			}
1228 			node->internal_strong_refs++;
1229 		} else
1230 			node->local_strong_refs++;
1231 		if (!node->has_strong_ref && target_list) {
1232 			struct binder_thread *thread = container_of(target_list,
1233 						    struct binder_thread, todo);
1234 			binder_dequeue_work_ilocked(&node->work);
1235 			BUG_ON(&thread->todo != target_list);
1236 			binder_enqueue_deferred_thread_work_ilocked(thread,
1237 								   &node->work);
1238 		}
1239 	} else {
1240 		if (!internal)
1241 			node->local_weak_refs++;
1242 		if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1243 			if (target_list == NULL) {
1244 				pr_err("invalid inc weak node for %d\n",
1245 					node->debug_id);
1246 				return -EINVAL;
1247 			}
1248 			/*
1249 			 * See comment above
1250 			 */
1251 			binder_enqueue_work_ilocked(&node->work, target_list);
1252 		}
1253 	}
1254 	return 0;
1255 }
1256 
1257 static int binder_inc_node(struct binder_node *node, int strong, int internal,
1258 			   struct list_head *target_list)
1259 {
1260 	int ret;
1261 
1262 	binder_node_inner_lock(node);
1263 	ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1264 	binder_node_inner_unlock(node);
1265 
1266 	return ret;
1267 }
1268 
1269 static bool binder_dec_node_nilocked(struct binder_node *node,
1270 				     int strong, int internal)
1271 {
1272 	struct binder_proc *proc = node->proc;
1273 
1274 	assert_spin_locked(&node->lock);
1275 	if (proc)
1276 		assert_spin_locked(&proc->inner_lock);
1277 	if (strong) {
1278 		if (internal)
1279 			node->internal_strong_refs--;
1280 		else
1281 			node->local_strong_refs--;
1282 		if (node->local_strong_refs || node->internal_strong_refs)
1283 			return false;
1284 	} else {
1285 		if (!internal)
1286 			node->local_weak_refs--;
1287 		if (node->local_weak_refs || node->tmp_refs ||
1288 				!hlist_empty(&node->refs))
1289 			return false;
1290 	}
1291 
1292 	if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1293 		if (list_empty(&node->work.entry)) {
1294 			binder_enqueue_work_ilocked(&node->work, &proc->todo);
1295 			binder_wakeup_proc_ilocked(proc);
1296 		}
1297 	} else {
1298 		if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1299 		    !node->local_weak_refs && !node->tmp_refs) {
1300 			if (proc) {
1301 				binder_dequeue_work_ilocked(&node->work);
1302 				rb_erase(&node->rb_node, &proc->nodes);
1303 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1304 					     "refless node %d deleted\n",
1305 					     node->debug_id);
1306 			} else {
1307 				BUG_ON(!list_empty(&node->work.entry));
1308 				spin_lock(&binder_dead_nodes_lock);
1309 				/*
1310 				 * tmp_refs could have changed so
1311 				 * check it again
1312 				 */
1313 				if (node->tmp_refs) {
1314 					spin_unlock(&binder_dead_nodes_lock);
1315 					return false;
1316 				}
1317 				hlist_del(&node->dead_node);
1318 				spin_unlock(&binder_dead_nodes_lock);
1319 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1320 					     "dead node %d deleted\n",
1321 					     node->debug_id);
1322 			}
1323 			return true;
1324 		}
1325 	}
1326 	return false;
1327 }
1328 
1329 static void binder_dec_node(struct binder_node *node, int strong, int internal)
1330 {
1331 	bool free_node;
1332 
1333 	binder_node_inner_lock(node);
1334 	free_node = binder_dec_node_nilocked(node, strong, internal);
1335 	binder_node_inner_unlock(node);
1336 	if (free_node)
1337 		binder_free_node(node);
1338 }
1339 
1340 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1341 {
1342 	/*
1343 	 * No call to binder_inc_node() is needed since we
1344 	 * don't need to inform userspace of any changes to
1345 	 * tmp_refs
1346 	 */
1347 	node->tmp_refs++;
1348 }
1349 
1350 /**
1351  * binder_inc_node_tmpref() - take a temporary reference on node
1352  * @node:	node to reference
1353  *
1354  * Take reference on node to prevent the node from being freed
1355  * while referenced only by a local variable. The inner lock is
1356  * needed to serialize with the node work on the queue (which
1357  * isn't needed after the node is dead). If the node is dead
1358  * (node->proc is NULL), use binder_dead_nodes_lock to protect
1359  * node->tmp_refs against dead-node-only cases where the node
1360  * lock cannot be acquired (eg traversing the dead node list to
1361  * print nodes)
1362  */
1363 static void binder_inc_node_tmpref(struct binder_node *node)
1364 {
1365 	binder_node_lock(node);
1366 	if (node->proc)
1367 		binder_inner_proc_lock(node->proc);
1368 	else
1369 		spin_lock(&binder_dead_nodes_lock);
1370 	binder_inc_node_tmpref_ilocked(node);
1371 	if (node->proc)
1372 		binder_inner_proc_unlock(node->proc);
1373 	else
1374 		spin_unlock(&binder_dead_nodes_lock);
1375 	binder_node_unlock(node);
1376 }
1377 
1378 /**
1379  * binder_dec_node_tmpref() - remove a temporary reference on node
1380  * @node:	node to reference
1381  *
1382  * Release temporary reference on node taken via binder_inc_node_tmpref()
1383  */
1384 static void binder_dec_node_tmpref(struct binder_node *node)
1385 {
1386 	bool free_node;
1387 
1388 	binder_node_inner_lock(node);
1389 	if (!node->proc)
1390 		spin_lock(&binder_dead_nodes_lock);
1391 	else
1392 		__acquire(&binder_dead_nodes_lock);
1393 	node->tmp_refs--;
1394 	BUG_ON(node->tmp_refs < 0);
1395 	if (!node->proc)
1396 		spin_unlock(&binder_dead_nodes_lock);
1397 	else
1398 		__release(&binder_dead_nodes_lock);
1399 	/*
1400 	 * Call binder_dec_node() to check if all refcounts are 0
1401 	 * and cleanup is needed. Calling with strong=0 and internal=1
1402 	 * causes no actual reference to be released in binder_dec_node().
1403 	 * If that changes, a change is needed here too.
1404 	 */
1405 	free_node = binder_dec_node_nilocked(node, 0, 1);
1406 	binder_node_inner_unlock(node);
1407 	if (free_node)
1408 		binder_free_node(node);
1409 }
1410 
1411 static void binder_put_node(struct binder_node *node)
1412 {
1413 	binder_dec_node_tmpref(node);
1414 }
1415 
1416 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1417 						 u32 desc, bool need_strong_ref)
1418 {
1419 	struct rb_node *n = proc->refs_by_desc.rb_node;
1420 	struct binder_ref *ref;
1421 
1422 	while (n) {
1423 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1424 
1425 		if (desc < ref->data.desc) {
1426 			n = n->rb_left;
1427 		} else if (desc > ref->data.desc) {
1428 			n = n->rb_right;
1429 		} else if (need_strong_ref && !ref->data.strong) {
1430 			binder_user_error("tried to use weak ref as strong ref\n");
1431 			return NULL;
1432 		} else {
1433 			return ref;
1434 		}
1435 	}
1436 	return NULL;
1437 }
1438 
1439 /**
1440  * binder_get_ref_for_node_olocked() - get the ref associated with given node
1441  * @proc:	binder_proc that owns the ref
1442  * @node:	binder_node of target
1443  * @new_ref:	newly allocated binder_ref to be initialized or %NULL
1444  *
1445  * Look up the ref for the given node and return it if it exists
1446  *
1447  * If it doesn't exist and the caller provides a newly allocated
1448  * ref, initialize the fields of the newly allocated ref and insert
1449  * into the given proc rb_trees and node refs list.
1450  *
1451  * Return:	the ref for node. It is possible that another thread
1452  *		allocated/initialized the ref first in which case the
1453  *		returned ref would be different than the passed-in
1454  *		new_ref. new_ref must be kfree'd by the caller in
1455  *		this case.
1456  */
1457 static struct binder_ref *binder_get_ref_for_node_olocked(
1458 					struct binder_proc *proc,
1459 					struct binder_node *node,
1460 					struct binder_ref *new_ref)
1461 {
1462 	struct binder_context *context = proc->context;
1463 	struct rb_node **p = &proc->refs_by_node.rb_node;
1464 	struct rb_node *parent = NULL;
1465 	struct binder_ref *ref;
1466 	struct rb_node *n;
1467 
1468 	while (*p) {
1469 		parent = *p;
1470 		ref = rb_entry(parent, struct binder_ref, rb_node_node);
1471 
1472 		if (node < ref->node)
1473 			p = &(*p)->rb_left;
1474 		else if (node > ref->node)
1475 			p = &(*p)->rb_right;
1476 		else
1477 			return ref;
1478 	}
1479 	if (!new_ref)
1480 		return NULL;
1481 
1482 	binder_stats_created(BINDER_STAT_REF);
1483 	new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1484 	new_ref->proc = proc;
1485 	new_ref->node = node;
1486 	rb_link_node(&new_ref->rb_node_node, parent, p);
1487 	rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1488 
1489 	new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1490 	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1491 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1492 		if (ref->data.desc > new_ref->data.desc)
1493 			break;
1494 		new_ref->data.desc = ref->data.desc + 1;
1495 	}
1496 
1497 	p = &proc->refs_by_desc.rb_node;
1498 	while (*p) {
1499 		parent = *p;
1500 		ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1501 
1502 		if (new_ref->data.desc < ref->data.desc)
1503 			p = &(*p)->rb_left;
1504 		else if (new_ref->data.desc > ref->data.desc)
1505 			p = &(*p)->rb_right;
1506 		else
1507 			BUG();
1508 	}
1509 	rb_link_node(&new_ref->rb_node_desc, parent, p);
1510 	rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1511 
1512 	binder_node_lock(node);
1513 	hlist_add_head(&new_ref->node_entry, &node->refs);
1514 
1515 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1516 		     "%d new ref %d desc %d for node %d\n",
1517 		      proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1518 		      node->debug_id);
1519 	binder_node_unlock(node);
1520 	return new_ref;
1521 }
1522 
1523 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1524 {
1525 	bool delete_node = false;
1526 
1527 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1528 		     "%d delete ref %d desc %d for node %d\n",
1529 		      ref->proc->pid, ref->data.debug_id, ref->data.desc,
1530 		      ref->node->debug_id);
1531 
1532 	rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1533 	rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1534 
1535 	binder_node_inner_lock(ref->node);
1536 	if (ref->data.strong)
1537 		binder_dec_node_nilocked(ref->node, 1, 1);
1538 
1539 	hlist_del(&ref->node_entry);
1540 	delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1541 	binder_node_inner_unlock(ref->node);
1542 	/*
1543 	 * Clear ref->node unless we want the caller to free the node
1544 	 */
1545 	if (!delete_node) {
1546 		/*
1547 		 * The caller uses ref->node to determine
1548 		 * whether the node needs to be freed. Clear
1549 		 * it since the node is still alive.
1550 		 */
1551 		ref->node = NULL;
1552 	}
1553 
1554 	if (ref->death) {
1555 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1556 			     "%d delete ref %d desc %d has death notification\n",
1557 			      ref->proc->pid, ref->data.debug_id,
1558 			      ref->data.desc);
1559 		binder_dequeue_work(ref->proc, &ref->death->work);
1560 		binder_stats_deleted(BINDER_STAT_DEATH);
1561 	}
1562 	binder_stats_deleted(BINDER_STAT_REF);
1563 }
1564 
1565 /**
1566  * binder_inc_ref_olocked() - increment the ref for given handle
1567  * @ref:         ref to be incremented
1568  * @strong:      if true, strong increment, else weak
1569  * @target_list: list to queue node work on
1570  *
1571  * Increment the ref. @ref->proc->outer_lock must be held on entry
1572  *
1573  * Return: 0, if successful, else errno
1574  */
1575 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1576 				  struct list_head *target_list)
1577 {
1578 	int ret;
1579 
1580 	if (strong) {
1581 		if (ref->data.strong == 0) {
1582 			ret = binder_inc_node(ref->node, 1, 1, target_list);
1583 			if (ret)
1584 				return ret;
1585 		}
1586 		ref->data.strong++;
1587 	} else {
1588 		if (ref->data.weak == 0) {
1589 			ret = binder_inc_node(ref->node, 0, 1, target_list);
1590 			if (ret)
1591 				return ret;
1592 		}
1593 		ref->data.weak++;
1594 	}
1595 	return 0;
1596 }
1597 
1598 /**
1599  * binder_dec_ref() - dec the ref for given handle
1600  * @ref:	ref to be decremented
1601  * @strong:	if true, strong decrement, else weak
1602  *
1603  * Decrement the ref.
1604  *
1605  * Return: true if ref is cleaned up and ready to be freed
1606  */
1607 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1608 {
1609 	if (strong) {
1610 		if (ref->data.strong == 0) {
1611 			binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1612 					  ref->proc->pid, ref->data.debug_id,
1613 					  ref->data.desc, ref->data.strong,
1614 					  ref->data.weak);
1615 			return false;
1616 		}
1617 		ref->data.strong--;
1618 		if (ref->data.strong == 0)
1619 			binder_dec_node(ref->node, strong, 1);
1620 	} else {
1621 		if (ref->data.weak == 0) {
1622 			binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1623 					  ref->proc->pid, ref->data.debug_id,
1624 					  ref->data.desc, ref->data.strong,
1625 					  ref->data.weak);
1626 			return false;
1627 		}
1628 		ref->data.weak--;
1629 	}
1630 	if (ref->data.strong == 0 && ref->data.weak == 0) {
1631 		binder_cleanup_ref_olocked(ref);
1632 		return true;
1633 	}
1634 	return false;
1635 }
1636 
1637 /**
1638  * binder_get_node_from_ref() - get the node from the given proc/desc
1639  * @proc:	proc containing the ref
1640  * @desc:	the handle associated with the ref
1641  * @need_strong_ref: if true, only return node if ref is strong
1642  * @rdata:	the id/refcount data for the ref
1643  *
1644  * Given a proc and ref handle, return the associated binder_node
1645  *
1646  * Return: a binder_node or NULL if not found or not strong when strong required
1647  */
1648 static struct binder_node *binder_get_node_from_ref(
1649 		struct binder_proc *proc,
1650 		u32 desc, bool need_strong_ref,
1651 		struct binder_ref_data *rdata)
1652 {
1653 	struct binder_node *node;
1654 	struct binder_ref *ref;
1655 
1656 	binder_proc_lock(proc);
1657 	ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1658 	if (!ref)
1659 		goto err_no_ref;
1660 	node = ref->node;
1661 	/*
1662 	 * Take an implicit reference on the node to ensure
1663 	 * it stays alive until the call to binder_put_node()
1664 	 */
1665 	binder_inc_node_tmpref(node);
1666 	if (rdata)
1667 		*rdata = ref->data;
1668 	binder_proc_unlock(proc);
1669 
1670 	return node;
1671 
1672 err_no_ref:
1673 	binder_proc_unlock(proc);
1674 	return NULL;
1675 }
1676 
1677 /**
1678  * binder_free_ref() - free the binder_ref
1679  * @ref:	ref to free
1680  *
1681  * Free the binder_ref. Free the binder_node indicated by ref->node
1682  * (if non-NULL) and the binder_ref_death indicated by ref->death.
1683  */
1684 static void binder_free_ref(struct binder_ref *ref)
1685 {
1686 	if (ref->node)
1687 		binder_free_node(ref->node);
1688 	kfree(ref->death);
1689 	kfree(ref);
1690 }
1691 
1692 /**
1693  * binder_update_ref_for_handle() - inc/dec the ref for given handle
1694  * @proc:	proc containing the ref
1695  * @desc:	the handle associated with the ref
1696  * @increment:	true=inc reference, false=dec reference
1697  * @strong:	true=strong reference, false=weak reference
1698  * @rdata:	the id/refcount data for the ref
1699  *
1700  * Given a proc and ref handle, increment or decrement the ref
1701  * according to "increment" arg.
1702  *
1703  * Return: 0 if successful, else errno
1704  */
1705 static int binder_update_ref_for_handle(struct binder_proc *proc,
1706 		uint32_t desc, bool increment, bool strong,
1707 		struct binder_ref_data *rdata)
1708 {
1709 	int ret = 0;
1710 	struct binder_ref *ref;
1711 	bool delete_ref = false;
1712 
1713 	binder_proc_lock(proc);
1714 	ref = binder_get_ref_olocked(proc, desc, strong);
1715 	if (!ref) {
1716 		ret = -EINVAL;
1717 		goto err_no_ref;
1718 	}
1719 	if (increment)
1720 		ret = binder_inc_ref_olocked(ref, strong, NULL);
1721 	else
1722 		delete_ref = binder_dec_ref_olocked(ref, strong);
1723 
1724 	if (rdata)
1725 		*rdata = ref->data;
1726 	binder_proc_unlock(proc);
1727 
1728 	if (delete_ref)
1729 		binder_free_ref(ref);
1730 	return ret;
1731 
1732 err_no_ref:
1733 	binder_proc_unlock(proc);
1734 	return ret;
1735 }
1736 
1737 /**
1738  * binder_dec_ref_for_handle() - dec the ref for given handle
1739  * @proc:	proc containing the ref
1740  * @desc:	the handle associated with the ref
1741  * @strong:	true=strong reference, false=weak reference
1742  * @rdata:	the id/refcount data for the ref
1743  *
1744  * Just calls binder_update_ref_for_handle() to decrement the ref.
1745  *
1746  * Return: 0 if successful, else errno
1747  */
1748 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1749 		uint32_t desc, bool strong, struct binder_ref_data *rdata)
1750 {
1751 	return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1752 }
1753 
1754 
1755 /**
1756  * binder_inc_ref_for_node() - increment the ref for given proc/node
1757  * @proc:	 proc containing the ref
1758  * @node:	 target node
1759  * @strong:	 true=strong reference, false=weak reference
1760  * @target_list: worklist to use if node is incremented
1761  * @rdata:	 the id/refcount data for the ref
1762  *
1763  * Given a proc and node, increment the ref. Create the ref if it
1764  * doesn't already exist
1765  *
1766  * Return: 0 if successful, else errno
1767  */
1768 static int binder_inc_ref_for_node(struct binder_proc *proc,
1769 			struct binder_node *node,
1770 			bool strong,
1771 			struct list_head *target_list,
1772 			struct binder_ref_data *rdata)
1773 {
1774 	struct binder_ref *ref;
1775 	struct binder_ref *new_ref = NULL;
1776 	int ret = 0;
1777 
1778 	binder_proc_lock(proc);
1779 	ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1780 	if (!ref) {
1781 		binder_proc_unlock(proc);
1782 		new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1783 		if (!new_ref)
1784 			return -ENOMEM;
1785 		binder_proc_lock(proc);
1786 		ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1787 	}
1788 	ret = binder_inc_ref_olocked(ref, strong, target_list);
1789 	*rdata = ref->data;
1790 	binder_proc_unlock(proc);
1791 	if (new_ref && ref != new_ref)
1792 		/*
1793 		 * Another thread created the ref first so
1794 		 * free the one we allocated
1795 		 */
1796 		kfree(new_ref);
1797 	return ret;
1798 }
1799 
1800 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1801 					   struct binder_transaction *t)
1802 {
1803 	BUG_ON(!target_thread);
1804 	assert_spin_locked(&target_thread->proc->inner_lock);
1805 	BUG_ON(target_thread->transaction_stack != t);
1806 	BUG_ON(target_thread->transaction_stack->from != target_thread);
1807 	target_thread->transaction_stack =
1808 		target_thread->transaction_stack->from_parent;
1809 	t->from = NULL;
1810 }
1811 
1812 /**
1813  * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1814  * @thread:	thread to decrement
1815  *
1816  * A thread needs to be kept alive while being used to create or
1817  * handle a transaction. binder_get_txn_from() is used to safely
1818  * extract t->from from a binder_transaction and keep the thread
1819  * indicated by t->from from being freed. When done with that
1820  * binder_thread, this function is called to decrement the
1821  * tmp_ref and free if appropriate (thread has been released
1822  * and no transaction being processed by the driver)
1823  */
1824 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1825 {
1826 	/*
1827 	 * atomic is used to protect the counter value while
1828 	 * it cannot reach zero or thread->is_dead is false
1829 	 */
1830 	binder_inner_proc_lock(thread->proc);
1831 	atomic_dec(&thread->tmp_ref);
1832 	if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1833 		binder_inner_proc_unlock(thread->proc);
1834 		binder_free_thread(thread);
1835 		return;
1836 	}
1837 	binder_inner_proc_unlock(thread->proc);
1838 }
1839 
1840 /**
1841  * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1842  * @proc:	proc to decrement
1843  *
1844  * A binder_proc needs to be kept alive while being used to create or
1845  * handle a transaction. proc->tmp_ref is incremented when
1846  * creating a new transaction or the binder_proc is currently in-use
1847  * by threads that are being released. When done with the binder_proc,
1848  * this function is called to decrement the counter and free the
1849  * proc if appropriate (proc has been released, all threads have
1850  * been released and not currenly in-use to process a transaction).
1851  */
1852 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1853 {
1854 	binder_inner_proc_lock(proc);
1855 	proc->tmp_ref--;
1856 	if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1857 			!proc->tmp_ref) {
1858 		binder_inner_proc_unlock(proc);
1859 		binder_free_proc(proc);
1860 		return;
1861 	}
1862 	binder_inner_proc_unlock(proc);
1863 }
1864 
1865 /**
1866  * binder_get_txn_from() - safely extract the "from" thread in transaction
1867  * @t:	binder transaction for t->from
1868  *
1869  * Atomically return the "from" thread and increment the tmp_ref
1870  * count for the thread to ensure it stays alive until
1871  * binder_thread_dec_tmpref() is called.
1872  *
1873  * Return: the value of t->from
1874  */
1875 static struct binder_thread *binder_get_txn_from(
1876 		struct binder_transaction *t)
1877 {
1878 	struct binder_thread *from;
1879 
1880 	spin_lock(&t->lock);
1881 	from = t->from;
1882 	if (from)
1883 		atomic_inc(&from->tmp_ref);
1884 	spin_unlock(&t->lock);
1885 	return from;
1886 }
1887 
1888 /**
1889  * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1890  * @t:	binder transaction for t->from
1891  *
1892  * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1893  * to guarantee that the thread cannot be released while operating on it.
1894  * The caller must call binder_inner_proc_unlock() to release the inner lock
1895  * as well as call binder_dec_thread_txn() to release the reference.
1896  *
1897  * Return: the value of t->from
1898  */
1899 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1900 		struct binder_transaction *t)
1901 	__acquires(&t->from->proc->inner_lock)
1902 {
1903 	struct binder_thread *from;
1904 
1905 	from = binder_get_txn_from(t);
1906 	if (!from) {
1907 		__acquire(&from->proc->inner_lock);
1908 		return NULL;
1909 	}
1910 	binder_inner_proc_lock(from->proc);
1911 	if (t->from) {
1912 		BUG_ON(from != t->from);
1913 		return from;
1914 	}
1915 	binder_inner_proc_unlock(from->proc);
1916 	__acquire(&from->proc->inner_lock);
1917 	binder_thread_dec_tmpref(from);
1918 	return NULL;
1919 }
1920 
1921 /**
1922  * binder_free_txn_fixups() - free unprocessed fd fixups
1923  * @t:	binder transaction for t->from
1924  *
1925  * If the transaction is being torn down prior to being
1926  * processed by the target process, free all of the
1927  * fd fixups and fput the file structs. It is safe to
1928  * call this function after the fixups have been
1929  * processed -- in that case, the list will be empty.
1930  */
1931 static void binder_free_txn_fixups(struct binder_transaction *t)
1932 {
1933 	struct binder_txn_fd_fixup *fixup, *tmp;
1934 
1935 	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1936 		fput(fixup->file);
1937 		list_del(&fixup->fixup_entry);
1938 		kfree(fixup);
1939 	}
1940 }
1941 
1942 static void binder_free_transaction(struct binder_transaction *t)
1943 {
1944 	struct binder_proc *target_proc = t->to_proc;
1945 
1946 	if (target_proc) {
1947 		binder_inner_proc_lock(target_proc);
1948 		if (t->buffer)
1949 			t->buffer->transaction = NULL;
1950 		binder_inner_proc_unlock(target_proc);
1951 	}
1952 	/*
1953 	 * If the transaction has no target_proc, then
1954 	 * t->buffer->transaction has already been cleared.
1955 	 */
1956 	binder_free_txn_fixups(t);
1957 	kfree(t);
1958 	binder_stats_deleted(BINDER_STAT_TRANSACTION);
1959 }
1960 
1961 static void binder_send_failed_reply(struct binder_transaction *t,
1962 				     uint32_t error_code)
1963 {
1964 	struct binder_thread *target_thread;
1965 	struct binder_transaction *next;
1966 
1967 	BUG_ON(t->flags & TF_ONE_WAY);
1968 	while (1) {
1969 		target_thread = binder_get_txn_from_and_acq_inner(t);
1970 		if (target_thread) {
1971 			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1972 				     "send failed reply for transaction %d to %d:%d\n",
1973 				      t->debug_id,
1974 				      target_thread->proc->pid,
1975 				      target_thread->pid);
1976 
1977 			binder_pop_transaction_ilocked(target_thread, t);
1978 			if (target_thread->reply_error.cmd == BR_OK) {
1979 				target_thread->reply_error.cmd = error_code;
1980 				binder_enqueue_thread_work_ilocked(
1981 					target_thread,
1982 					&target_thread->reply_error.work);
1983 				wake_up_interruptible(&target_thread->wait);
1984 			} else {
1985 				/*
1986 				 * Cannot get here for normal operation, but
1987 				 * we can if multiple synchronous transactions
1988 				 * are sent without blocking for responses.
1989 				 * Just ignore the 2nd error in this case.
1990 				 */
1991 				pr_warn("Unexpected reply error: %u\n",
1992 					target_thread->reply_error.cmd);
1993 			}
1994 			binder_inner_proc_unlock(target_thread->proc);
1995 			binder_thread_dec_tmpref(target_thread);
1996 			binder_free_transaction(t);
1997 			return;
1998 		} else {
1999 			__release(&target_thread->proc->inner_lock);
2000 		}
2001 		next = t->from_parent;
2002 
2003 		binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2004 			     "send failed reply for transaction %d, target dead\n",
2005 			     t->debug_id);
2006 
2007 		binder_free_transaction(t);
2008 		if (next == NULL) {
2009 			binder_debug(BINDER_DEBUG_DEAD_BINDER,
2010 				     "reply failed, no target thread at root\n");
2011 			return;
2012 		}
2013 		t = next;
2014 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
2015 			     "reply failed, no target thread -- retry %d\n",
2016 			      t->debug_id);
2017 	}
2018 }
2019 
2020 /**
2021  * binder_cleanup_transaction() - cleans up undelivered transaction
2022  * @t:		transaction that needs to be cleaned up
2023  * @reason:	reason the transaction wasn't delivered
2024  * @error_code:	error to return to caller (if synchronous call)
2025  */
2026 static void binder_cleanup_transaction(struct binder_transaction *t,
2027 				       const char *reason,
2028 				       uint32_t error_code)
2029 {
2030 	if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
2031 		binder_send_failed_reply(t, error_code);
2032 	} else {
2033 		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2034 			"undelivered transaction %d, %s\n",
2035 			t->debug_id, reason);
2036 		binder_free_transaction(t);
2037 	}
2038 }
2039 
2040 /**
2041  * binder_get_object() - gets object and checks for valid metadata
2042  * @proc:	binder_proc owning the buffer
2043  * @buffer:	binder_buffer that we're parsing.
2044  * @offset:	offset in the @buffer at which to validate an object.
2045  * @object:	struct binder_object to read into
2046  *
2047  * Return:	If there's a valid metadata object at @offset in @buffer, the
2048  *		size of that object. Otherwise, it returns zero. The object
2049  *		is read into the struct binder_object pointed to by @object.
2050  */
2051 static size_t binder_get_object(struct binder_proc *proc,
2052 				struct binder_buffer *buffer,
2053 				unsigned long offset,
2054 				struct binder_object *object)
2055 {
2056 	size_t read_size;
2057 	struct binder_object_header *hdr;
2058 	size_t object_size = 0;
2059 
2060 	read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
2061 	if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
2062 	    !IS_ALIGNED(offset, sizeof(u32)))
2063 		return 0;
2064 	binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
2065 				      offset, read_size);
2066 
2067 	/* Ok, now see if we read a complete object. */
2068 	hdr = &object->hdr;
2069 	switch (hdr->type) {
2070 	case BINDER_TYPE_BINDER:
2071 	case BINDER_TYPE_WEAK_BINDER:
2072 	case BINDER_TYPE_HANDLE:
2073 	case BINDER_TYPE_WEAK_HANDLE:
2074 		object_size = sizeof(struct flat_binder_object);
2075 		break;
2076 	case BINDER_TYPE_FD:
2077 		object_size = sizeof(struct binder_fd_object);
2078 		break;
2079 	case BINDER_TYPE_PTR:
2080 		object_size = sizeof(struct binder_buffer_object);
2081 		break;
2082 	case BINDER_TYPE_FDA:
2083 		object_size = sizeof(struct binder_fd_array_object);
2084 		break;
2085 	default:
2086 		return 0;
2087 	}
2088 	if (offset <= buffer->data_size - object_size &&
2089 	    buffer->data_size >= object_size)
2090 		return object_size;
2091 	else
2092 		return 0;
2093 }
2094 
2095 /**
2096  * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2097  * @proc:	binder_proc owning the buffer
2098  * @b:		binder_buffer containing the object
2099  * @object:	struct binder_object to read into
2100  * @index:	index in offset array at which the binder_buffer_object is
2101  *		located
2102  * @start_offset: points to the start of the offset array
2103  * @object_offsetp: offset of @object read from @b
2104  * @num_valid:	the number of valid offsets in the offset array
2105  *
2106  * Return:	If @index is within the valid range of the offset array
2107  *		described by @start and @num_valid, and if there's a valid
2108  *		binder_buffer_object at the offset found in index @index
2109  *		of the offset array, that object is returned. Otherwise,
2110  *		%NULL is returned.
2111  *		Note that the offset found in index @index itself is not
2112  *		verified; this function assumes that @num_valid elements
2113  *		from @start were previously verified to have valid offsets.
2114  *		If @object_offsetp is non-NULL, then the offset within
2115  *		@b is written to it.
2116  */
2117 static struct binder_buffer_object *binder_validate_ptr(
2118 						struct binder_proc *proc,
2119 						struct binder_buffer *b,
2120 						struct binder_object *object,
2121 						binder_size_t index,
2122 						binder_size_t start_offset,
2123 						binder_size_t *object_offsetp,
2124 						binder_size_t num_valid)
2125 {
2126 	size_t object_size;
2127 	binder_size_t object_offset;
2128 	unsigned long buffer_offset;
2129 
2130 	if (index >= num_valid)
2131 		return NULL;
2132 
2133 	buffer_offset = start_offset + sizeof(binder_size_t) * index;
2134 	binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2135 				      b, buffer_offset, sizeof(object_offset));
2136 	object_size = binder_get_object(proc, b, object_offset, object);
2137 	if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
2138 		return NULL;
2139 	if (object_offsetp)
2140 		*object_offsetp = object_offset;
2141 
2142 	return &object->bbo;
2143 }
2144 
2145 /**
2146  * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2147  * @proc:		binder_proc owning the buffer
2148  * @b:			transaction buffer
2149  * @objects_start_offset: offset to start of objects buffer
2150  * @buffer_obj_offset:	offset to binder_buffer_object in which to fix up
2151  * @fixup_offset:	start offset in @buffer to fix up
2152  * @last_obj_offset:	offset to last binder_buffer_object that we fixed
2153  * @last_min_offset:	minimum fixup offset in object at @last_obj_offset
2154  *
2155  * Return:		%true if a fixup in buffer @buffer at offset @offset is
2156  *			allowed.
2157  *
2158  * For safety reasons, we only allow fixups inside a buffer to happen
2159  * at increasing offsets; additionally, we only allow fixup on the last
2160  * buffer object that was verified, or one of its parents.
2161  *
2162  * Example of what is allowed:
2163  *
2164  * A
2165  *   B (parent = A, offset = 0)
2166  *   C (parent = A, offset = 16)
2167  *     D (parent = C, offset = 0)
2168  *   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2169  *
2170  * Examples of what is not allowed:
2171  *
2172  * Decreasing offsets within the same parent:
2173  * A
2174  *   C (parent = A, offset = 16)
2175  *   B (parent = A, offset = 0) // decreasing offset within A
2176  *
2177  * Referring to a parent that wasn't the last object or any of its parents:
2178  * A
2179  *   B (parent = A, offset = 0)
2180  *   C (parent = A, offset = 0)
2181  *   C (parent = A, offset = 16)
2182  *     D (parent = B, offset = 0) // B is not A or any of A's parents
2183  */
2184 static bool binder_validate_fixup(struct binder_proc *proc,
2185 				  struct binder_buffer *b,
2186 				  binder_size_t objects_start_offset,
2187 				  binder_size_t buffer_obj_offset,
2188 				  binder_size_t fixup_offset,
2189 				  binder_size_t last_obj_offset,
2190 				  binder_size_t last_min_offset)
2191 {
2192 	if (!last_obj_offset) {
2193 		/* Nothing to fix up in */
2194 		return false;
2195 	}
2196 
2197 	while (last_obj_offset != buffer_obj_offset) {
2198 		unsigned long buffer_offset;
2199 		struct binder_object last_object;
2200 		struct binder_buffer_object *last_bbo;
2201 		size_t object_size = binder_get_object(proc, b, last_obj_offset,
2202 						       &last_object);
2203 		if (object_size != sizeof(*last_bbo))
2204 			return false;
2205 
2206 		last_bbo = &last_object.bbo;
2207 		/*
2208 		 * Safe to retrieve the parent of last_obj, since it
2209 		 * was already previously verified by the driver.
2210 		 */
2211 		if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2212 			return false;
2213 		last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
2214 		buffer_offset = objects_start_offset +
2215 			sizeof(binder_size_t) * last_bbo->parent,
2216 		binder_alloc_copy_from_buffer(&proc->alloc, &last_obj_offset,
2217 					      b, buffer_offset,
2218 					      sizeof(last_obj_offset));
2219 	}
2220 	return (fixup_offset >= last_min_offset);
2221 }
2222 
2223 /**
2224  * struct binder_task_work_cb - for deferred close
2225  *
2226  * @twork:                callback_head for task work
2227  * @fd:                   fd to close
2228  *
2229  * Structure to pass task work to be handled after
2230  * returning from binder_ioctl() via task_work_add().
2231  */
2232 struct binder_task_work_cb {
2233 	struct callback_head twork;
2234 	struct file *file;
2235 };
2236 
2237 /**
2238  * binder_do_fd_close() - close list of file descriptors
2239  * @twork:	callback head for task work
2240  *
2241  * It is not safe to call ksys_close() during the binder_ioctl()
2242  * function if there is a chance that binder's own file descriptor
2243  * might be closed. This is to meet the requirements for using
2244  * fdget() (see comments for __fget_light()). Therefore use
2245  * task_work_add() to schedule the close operation once we have
2246  * returned from binder_ioctl(). This function is a callback
2247  * for that mechanism and does the actual ksys_close() on the
2248  * given file descriptor.
2249  */
2250 static void binder_do_fd_close(struct callback_head *twork)
2251 {
2252 	struct binder_task_work_cb *twcb = container_of(twork,
2253 			struct binder_task_work_cb, twork);
2254 
2255 	fput(twcb->file);
2256 	kfree(twcb);
2257 }
2258 
2259 /**
2260  * binder_deferred_fd_close() - schedule a close for the given file-descriptor
2261  * @fd:		file-descriptor to close
2262  *
2263  * See comments in binder_do_fd_close(). This function is used to schedule
2264  * a file-descriptor to be closed after returning from binder_ioctl().
2265  */
2266 static void binder_deferred_fd_close(int fd)
2267 {
2268 	struct binder_task_work_cb *twcb;
2269 
2270 	twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
2271 	if (!twcb)
2272 		return;
2273 	init_task_work(&twcb->twork, binder_do_fd_close);
2274 	__close_fd_get_file(fd, &twcb->file);
2275 	if (twcb->file)
2276 		task_work_add(current, &twcb->twork, true);
2277 	else
2278 		kfree(twcb);
2279 }
2280 
2281 static void binder_transaction_buffer_release(struct binder_proc *proc,
2282 					      struct binder_buffer *buffer,
2283 					      binder_size_t failed_at,
2284 					      bool is_failure)
2285 {
2286 	int debug_id = buffer->debug_id;
2287 	binder_size_t off_start_offset, buffer_offset, off_end_offset;
2288 
2289 	binder_debug(BINDER_DEBUG_TRANSACTION,
2290 		     "%d buffer release %d, size %zd-%zd, failed at %llx\n",
2291 		     proc->pid, buffer->debug_id,
2292 		     buffer->data_size, buffer->offsets_size,
2293 		     (unsigned long long)failed_at);
2294 
2295 	if (buffer->target_node)
2296 		binder_dec_node(buffer->target_node, 1, 0);
2297 
2298 	off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
2299 	off_end_offset = is_failure ? failed_at :
2300 				off_start_offset + buffer->offsets_size;
2301 	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2302 	     buffer_offset += sizeof(binder_size_t)) {
2303 		struct binder_object_header *hdr;
2304 		size_t object_size;
2305 		struct binder_object object;
2306 		binder_size_t object_offset;
2307 
2308 		binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2309 					      buffer, buffer_offset,
2310 					      sizeof(object_offset));
2311 		object_size = binder_get_object(proc, buffer,
2312 						object_offset, &object);
2313 		if (object_size == 0) {
2314 			pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2315 			       debug_id, (u64)object_offset, buffer->data_size);
2316 			continue;
2317 		}
2318 		hdr = &object.hdr;
2319 		switch (hdr->type) {
2320 		case BINDER_TYPE_BINDER:
2321 		case BINDER_TYPE_WEAK_BINDER: {
2322 			struct flat_binder_object *fp;
2323 			struct binder_node *node;
2324 
2325 			fp = to_flat_binder_object(hdr);
2326 			node = binder_get_node(proc, fp->binder);
2327 			if (node == NULL) {
2328 				pr_err("transaction release %d bad node %016llx\n",
2329 				       debug_id, (u64)fp->binder);
2330 				break;
2331 			}
2332 			binder_debug(BINDER_DEBUG_TRANSACTION,
2333 				     "        node %d u%016llx\n",
2334 				     node->debug_id, (u64)node->ptr);
2335 			binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2336 					0);
2337 			binder_put_node(node);
2338 		} break;
2339 		case BINDER_TYPE_HANDLE:
2340 		case BINDER_TYPE_WEAK_HANDLE: {
2341 			struct flat_binder_object *fp;
2342 			struct binder_ref_data rdata;
2343 			int ret;
2344 
2345 			fp = to_flat_binder_object(hdr);
2346 			ret = binder_dec_ref_for_handle(proc, fp->handle,
2347 				hdr->type == BINDER_TYPE_HANDLE, &rdata);
2348 
2349 			if (ret) {
2350 				pr_err("transaction release %d bad handle %d, ret = %d\n",
2351 				 debug_id, fp->handle, ret);
2352 				break;
2353 			}
2354 			binder_debug(BINDER_DEBUG_TRANSACTION,
2355 				     "        ref %d desc %d\n",
2356 				     rdata.debug_id, rdata.desc);
2357 		} break;
2358 
2359 		case BINDER_TYPE_FD: {
2360 			/*
2361 			 * No need to close the file here since user-space
2362 			 * closes it for for successfully delivered
2363 			 * transactions. For transactions that weren't
2364 			 * delivered, the new fd was never allocated so
2365 			 * there is no need to close and the fput on the
2366 			 * file is done when the transaction is torn
2367 			 * down.
2368 			 */
2369 			WARN_ON(failed_at &&
2370 				proc->tsk == current->group_leader);
2371 		} break;
2372 		case BINDER_TYPE_PTR:
2373 			/*
2374 			 * Nothing to do here, this will get cleaned up when the
2375 			 * transaction buffer gets freed
2376 			 */
2377 			break;
2378 		case BINDER_TYPE_FDA: {
2379 			struct binder_fd_array_object *fda;
2380 			struct binder_buffer_object *parent;
2381 			struct binder_object ptr_object;
2382 			binder_size_t fda_offset;
2383 			size_t fd_index;
2384 			binder_size_t fd_buf_size;
2385 			binder_size_t num_valid;
2386 
2387 			if (proc->tsk != current->group_leader) {
2388 				/*
2389 				 * Nothing to do if running in sender context
2390 				 * The fd fixups have not been applied so no
2391 				 * fds need to be closed.
2392 				 */
2393 				continue;
2394 			}
2395 
2396 			num_valid = (buffer_offset - off_start_offset) /
2397 						sizeof(binder_size_t);
2398 			fda = to_binder_fd_array_object(hdr);
2399 			parent = binder_validate_ptr(proc, buffer, &ptr_object,
2400 						     fda->parent,
2401 						     off_start_offset,
2402 						     NULL,
2403 						     num_valid);
2404 			if (!parent) {
2405 				pr_err("transaction release %d bad parent offset\n",
2406 				       debug_id);
2407 				continue;
2408 			}
2409 			fd_buf_size = sizeof(u32) * fda->num_fds;
2410 			if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2411 				pr_err("transaction release %d invalid number of fds (%lld)\n",
2412 				       debug_id, (u64)fda->num_fds);
2413 				continue;
2414 			}
2415 			if (fd_buf_size > parent->length ||
2416 			    fda->parent_offset > parent->length - fd_buf_size) {
2417 				/* No space for all file descriptors here. */
2418 				pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2419 				       debug_id, (u64)fda->num_fds);
2420 				continue;
2421 			}
2422 			/*
2423 			 * the source data for binder_buffer_object is visible
2424 			 * to user-space and the @buffer element is the user
2425 			 * pointer to the buffer_object containing the fd_array.
2426 			 * Convert the address to an offset relative to
2427 			 * the base of the transaction buffer.
2428 			 */
2429 			fda_offset =
2430 			    (parent->buffer - (uintptr_t)buffer->user_data) +
2431 			    fda->parent_offset;
2432 			for (fd_index = 0; fd_index < fda->num_fds;
2433 			     fd_index++) {
2434 				u32 fd;
2435 				binder_size_t offset = fda_offset +
2436 					fd_index * sizeof(fd);
2437 
2438 				binder_alloc_copy_from_buffer(&proc->alloc,
2439 							      &fd,
2440 							      buffer,
2441 							      offset,
2442 							      sizeof(fd));
2443 				binder_deferred_fd_close(fd);
2444 			}
2445 		} break;
2446 		default:
2447 			pr_err("transaction release %d bad object type %x\n",
2448 				debug_id, hdr->type);
2449 			break;
2450 		}
2451 	}
2452 }
2453 
2454 static int binder_translate_binder(struct flat_binder_object *fp,
2455 				   struct binder_transaction *t,
2456 				   struct binder_thread *thread)
2457 {
2458 	struct binder_node *node;
2459 	struct binder_proc *proc = thread->proc;
2460 	struct binder_proc *target_proc = t->to_proc;
2461 	struct binder_ref_data rdata;
2462 	int ret = 0;
2463 
2464 	node = binder_get_node(proc, fp->binder);
2465 	if (!node) {
2466 		node = binder_new_node(proc, fp);
2467 		if (!node)
2468 			return -ENOMEM;
2469 	}
2470 	if (fp->cookie != node->cookie) {
2471 		binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2472 				  proc->pid, thread->pid, (u64)fp->binder,
2473 				  node->debug_id, (u64)fp->cookie,
2474 				  (u64)node->cookie);
2475 		ret = -EINVAL;
2476 		goto done;
2477 	}
2478 	if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2479 		ret = -EPERM;
2480 		goto done;
2481 	}
2482 
2483 	ret = binder_inc_ref_for_node(target_proc, node,
2484 			fp->hdr.type == BINDER_TYPE_BINDER,
2485 			&thread->todo, &rdata);
2486 	if (ret)
2487 		goto done;
2488 
2489 	if (fp->hdr.type == BINDER_TYPE_BINDER)
2490 		fp->hdr.type = BINDER_TYPE_HANDLE;
2491 	else
2492 		fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2493 	fp->binder = 0;
2494 	fp->handle = rdata.desc;
2495 	fp->cookie = 0;
2496 
2497 	trace_binder_transaction_node_to_ref(t, node, &rdata);
2498 	binder_debug(BINDER_DEBUG_TRANSACTION,
2499 		     "        node %d u%016llx -> ref %d desc %d\n",
2500 		     node->debug_id, (u64)node->ptr,
2501 		     rdata.debug_id, rdata.desc);
2502 done:
2503 	binder_put_node(node);
2504 	return ret;
2505 }
2506 
2507 static int binder_translate_handle(struct flat_binder_object *fp,
2508 				   struct binder_transaction *t,
2509 				   struct binder_thread *thread)
2510 {
2511 	struct binder_proc *proc = thread->proc;
2512 	struct binder_proc *target_proc = t->to_proc;
2513 	struct binder_node *node;
2514 	struct binder_ref_data src_rdata;
2515 	int ret = 0;
2516 
2517 	node = binder_get_node_from_ref(proc, fp->handle,
2518 			fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2519 	if (!node) {
2520 		binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2521 				  proc->pid, thread->pid, fp->handle);
2522 		return -EINVAL;
2523 	}
2524 	if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2525 		ret = -EPERM;
2526 		goto done;
2527 	}
2528 
2529 	binder_node_lock(node);
2530 	if (node->proc == target_proc) {
2531 		if (fp->hdr.type == BINDER_TYPE_HANDLE)
2532 			fp->hdr.type = BINDER_TYPE_BINDER;
2533 		else
2534 			fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2535 		fp->binder = node->ptr;
2536 		fp->cookie = node->cookie;
2537 		if (node->proc)
2538 			binder_inner_proc_lock(node->proc);
2539 		else
2540 			__acquire(&node->proc->inner_lock);
2541 		binder_inc_node_nilocked(node,
2542 					 fp->hdr.type == BINDER_TYPE_BINDER,
2543 					 0, NULL);
2544 		if (node->proc)
2545 			binder_inner_proc_unlock(node->proc);
2546 		else
2547 			__release(&node->proc->inner_lock);
2548 		trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2549 		binder_debug(BINDER_DEBUG_TRANSACTION,
2550 			     "        ref %d desc %d -> node %d u%016llx\n",
2551 			     src_rdata.debug_id, src_rdata.desc, node->debug_id,
2552 			     (u64)node->ptr);
2553 		binder_node_unlock(node);
2554 	} else {
2555 		struct binder_ref_data dest_rdata;
2556 
2557 		binder_node_unlock(node);
2558 		ret = binder_inc_ref_for_node(target_proc, node,
2559 				fp->hdr.type == BINDER_TYPE_HANDLE,
2560 				NULL, &dest_rdata);
2561 		if (ret)
2562 			goto done;
2563 
2564 		fp->binder = 0;
2565 		fp->handle = dest_rdata.desc;
2566 		fp->cookie = 0;
2567 		trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2568 						    &dest_rdata);
2569 		binder_debug(BINDER_DEBUG_TRANSACTION,
2570 			     "        ref %d desc %d -> ref %d desc %d (node %d)\n",
2571 			     src_rdata.debug_id, src_rdata.desc,
2572 			     dest_rdata.debug_id, dest_rdata.desc,
2573 			     node->debug_id);
2574 	}
2575 done:
2576 	binder_put_node(node);
2577 	return ret;
2578 }
2579 
2580 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2581 			       struct binder_transaction *t,
2582 			       struct binder_thread *thread,
2583 			       struct binder_transaction *in_reply_to)
2584 {
2585 	struct binder_proc *proc = thread->proc;
2586 	struct binder_proc *target_proc = t->to_proc;
2587 	struct binder_txn_fd_fixup *fixup;
2588 	struct file *file;
2589 	int ret = 0;
2590 	bool target_allows_fd;
2591 
2592 	if (in_reply_to)
2593 		target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2594 	else
2595 		target_allows_fd = t->buffer->target_node->accept_fds;
2596 	if (!target_allows_fd) {
2597 		binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2598 				  proc->pid, thread->pid,
2599 				  in_reply_to ? "reply" : "transaction",
2600 				  fd);
2601 		ret = -EPERM;
2602 		goto err_fd_not_accepted;
2603 	}
2604 
2605 	file = fget(fd);
2606 	if (!file) {
2607 		binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2608 				  proc->pid, thread->pid, fd);
2609 		ret = -EBADF;
2610 		goto err_fget;
2611 	}
2612 	ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2613 	if (ret < 0) {
2614 		ret = -EPERM;
2615 		goto err_security;
2616 	}
2617 
2618 	/*
2619 	 * Add fixup record for this transaction. The allocation
2620 	 * of the fd in the target needs to be done from a
2621 	 * target thread.
2622 	 */
2623 	fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2624 	if (!fixup) {
2625 		ret = -ENOMEM;
2626 		goto err_alloc;
2627 	}
2628 	fixup->file = file;
2629 	fixup->offset = fd_offset;
2630 	trace_binder_transaction_fd_send(t, fd, fixup->offset);
2631 	list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2632 
2633 	return ret;
2634 
2635 err_alloc:
2636 err_security:
2637 	fput(file);
2638 err_fget:
2639 err_fd_not_accepted:
2640 	return ret;
2641 }
2642 
2643 static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2644 				     struct binder_buffer_object *parent,
2645 				     struct binder_transaction *t,
2646 				     struct binder_thread *thread,
2647 				     struct binder_transaction *in_reply_to)
2648 {
2649 	binder_size_t fdi, fd_buf_size;
2650 	binder_size_t fda_offset;
2651 	struct binder_proc *proc = thread->proc;
2652 	struct binder_proc *target_proc = t->to_proc;
2653 
2654 	fd_buf_size = sizeof(u32) * fda->num_fds;
2655 	if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2656 		binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2657 				  proc->pid, thread->pid, (u64)fda->num_fds);
2658 		return -EINVAL;
2659 	}
2660 	if (fd_buf_size > parent->length ||
2661 	    fda->parent_offset > parent->length - fd_buf_size) {
2662 		/* No space for all file descriptors here. */
2663 		binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2664 				  proc->pid, thread->pid, (u64)fda->num_fds);
2665 		return -EINVAL;
2666 	}
2667 	/*
2668 	 * the source data for binder_buffer_object is visible
2669 	 * to user-space and the @buffer element is the user
2670 	 * pointer to the buffer_object containing the fd_array.
2671 	 * Convert the address to an offset relative to
2672 	 * the base of the transaction buffer.
2673 	 */
2674 	fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2675 		fda->parent_offset;
2676 	if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) {
2677 		binder_user_error("%d:%d parent offset not aligned correctly.\n",
2678 				  proc->pid, thread->pid);
2679 		return -EINVAL;
2680 	}
2681 	for (fdi = 0; fdi < fda->num_fds; fdi++) {
2682 		u32 fd;
2683 		int ret;
2684 		binder_size_t offset = fda_offset + fdi * sizeof(fd);
2685 
2686 		binder_alloc_copy_from_buffer(&target_proc->alloc,
2687 					      &fd, t->buffer,
2688 					      offset, sizeof(fd));
2689 		ret = binder_translate_fd(fd, offset, t, thread,
2690 					  in_reply_to);
2691 		if (ret < 0)
2692 			return ret;
2693 	}
2694 	return 0;
2695 }
2696 
2697 static int binder_fixup_parent(struct binder_transaction *t,
2698 			       struct binder_thread *thread,
2699 			       struct binder_buffer_object *bp,
2700 			       binder_size_t off_start_offset,
2701 			       binder_size_t num_valid,
2702 			       binder_size_t last_fixup_obj_off,
2703 			       binder_size_t last_fixup_min_off)
2704 {
2705 	struct binder_buffer_object *parent;
2706 	struct binder_buffer *b = t->buffer;
2707 	struct binder_proc *proc = thread->proc;
2708 	struct binder_proc *target_proc = t->to_proc;
2709 	struct binder_object object;
2710 	binder_size_t buffer_offset;
2711 	binder_size_t parent_offset;
2712 
2713 	if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2714 		return 0;
2715 
2716 	parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2717 				     off_start_offset, &parent_offset,
2718 				     num_valid);
2719 	if (!parent) {
2720 		binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2721 				  proc->pid, thread->pid);
2722 		return -EINVAL;
2723 	}
2724 
2725 	if (!binder_validate_fixup(target_proc, b, off_start_offset,
2726 				   parent_offset, bp->parent_offset,
2727 				   last_fixup_obj_off,
2728 				   last_fixup_min_off)) {
2729 		binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2730 				  proc->pid, thread->pid);
2731 		return -EINVAL;
2732 	}
2733 
2734 	if (parent->length < sizeof(binder_uintptr_t) ||
2735 	    bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2736 		/* No space for a pointer here! */
2737 		binder_user_error("%d:%d got transaction with invalid parent offset\n",
2738 				  proc->pid, thread->pid);
2739 		return -EINVAL;
2740 	}
2741 	buffer_offset = bp->parent_offset +
2742 			(uintptr_t)parent->buffer - (uintptr_t)b->user_data;
2743 	binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
2744 				    &bp->buffer, sizeof(bp->buffer));
2745 
2746 	return 0;
2747 }
2748 
2749 /**
2750  * binder_proc_transaction() - sends a transaction to a process and wakes it up
2751  * @t:		transaction to send
2752  * @proc:	process to send the transaction to
2753  * @thread:	thread in @proc to send the transaction to (may be NULL)
2754  *
2755  * This function queues a transaction to the specified process. It will try
2756  * to find a thread in the target process to handle the transaction and
2757  * wake it up. If no thread is found, the work is queued to the proc
2758  * waitqueue.
2759  *
2760  * If the @thread parameter is not NULL, the transaction is always queued
2761  * to the waitlist of that specific thread.
2762  *
2763  * Return:	true if the transactions was successfully queued
2764  *		false if the target process or thread is dead
2765  */
2766 static bool binder_proc_transaction(struct binder_transaction *t,
2767 				    struct binder_proc *proc,
2768 				    struct binder_thread *thread)
2769 {
2770 	struct binder_node *node = t->buffer->target_node;
2771 	bool oneway = !!(t->flags & TF_ONE_WAY);
2772 	bool pending_async = false;
2773 
2774 	BUG_ON(!node);
2775 	binder_node_lock(node);
2776 	if (oneway) {
2777 		BUG_ON(thread);
2778 		if (node->has_async_transaction) {
2779 			pending_async = true;
2780 		} else {
2781 			node->has_async_transaction = true;
2782 		}
2783 	}
2784 
2785 	binder_inner_proc_lock(proc);
2786 
2787 	if (proc->is_dead || (thread && thread->is_dead)) {
2788 		binder_inner_proc_unlock(proc);
2789 		binder_node_unlock(node);
2790 		return false;
2791 	}
2792 
2793 	if (!thread && !pending_async)
2794 		thread = binder_select_thread_ilocked(proc);
2795 
2796 	if (thread)
2797 		binder_enqueue_thread_work_ilocked(thread, &t->work);
2798 	else if (!pending_async)
2799 		binder_enqueue_work_ilocked(&t->work, &proc->todo);
2800 	else
2801 		binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2802 
2803 	if (!pending_async)
2804 		binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2805 
2806 	binder_inner_proc_unlock(proc);
2807 	binder_node_unlock(node);
2808 
2809 	return true;
2810 }
2811 
2812 /**
2813  * binder_get_node_refs_for_txn() - Get required refs on node for txn
2814  * @node:         struct binder_node for which to get refs
2815  * @proc:         returns @node->proc if valid
2816  * @error:        if no @proc then returns BR_DEAD_REPLY
2817  *
2818  * User-space normally keeps the node alive when creating a transaction
2819  * since it has a reference to the target. The local strong ref keeps it
2820  * alive if the sending process dies before the target process processes
2821  * the transaction. If the source process is malicious or has a reference
2822  * counting bug, relying on the local strong ref can fail.
2823  *
2824  * Since user-space can cause the local strong ref to go away, we also take
2825  * a tmpref on the node to ensure it survives while we are constructing
2826  * the transaction. We also need a tmpref on the proc while we are
2827  * constructing the transaction, so we take that here as well.
2828  *
2829  * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2830  * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2831  * target proc has died, @error is set to BR_DEAD_REPLY
2832  */
2833 static struct binder_node *binder_get_node_refs_for_txn(
2834 		struct binder_node *node,
2835 		struct binder_proc **procp,
2836 		uint32_t *error)
2837 {
2838 	struct binder_node *target_node = NULL;
2839 
2840 	binder_node_inner_lock(node);
2841 	if (node->proc) {
2842 		target_node = node;
2843 		binder_inc_node_nilocked(node, 1, 0, NULL);
2844 		binder_inc_node_tmpref_ilocked(node);
2845 		node->proc->tmp_ref++;
2846 		*procp = node->proc;
2847 	} else
2848 		*error = BR_DEAD_REPLY;
2849 	binder_node_inner_unlock(node);
2850 
2851 	return target_node;
2852 }
2853 
2854 static void binder_transaction(struct binder_proc *proc,
2855 			       struct binder_thread *thread,
2856 			       struct binder_transaction_data *tr, int reply,
2857 			       binder_size_t extra_buffers_size)
2858 {
2859 	int ret;
2860 	struct binder_transaction *t;
2861 	struct binder_work *w;
2862 	struct binder_work *tcomplete;
2863 	binder_size_t buffer_offset = 0;
2864 	binder_size_t off_start_offset, off_end_offset;
2865 	binder_size_t off_min;
2866 	binder_size_t sg_buf_offset, sg_buf_end_offset;
2867 	struct binder_proc *target_proc = NULL;
2868 	struct binder_thread *target_thread = NULL;
2869 	struct binder_node *target_node = NULL;
2870 	struct binder_transaction *in_reply_to = NULL;
2871 	struct binder_transaction_log_entry *e;
2872 	uint32_t return_error = 0;
2873 	uint32_t return_error_param = 0;
2874 	uint32_t return_error_line = 0;
2875 	binder_size_t last_fixup_obj_off = 0;
2876 	binder_size_t last_fixup_min_off = 0;
2877 	struct binder_context *context = proc->context;
2878 	int t_debug_id = atomic_inc_return(&binder_last_id);
2879 	char *secctx = NULL;
2880 	u32 secctx_sz = 0;
2881 
2882 	e = binder_transaction_log_add(&binder_transaction_log);
2883 	e->debug_id = t_debug_id;
2884 	e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2885 	e->from_proc = proc->pid;
2886 	e->from_thread = thread->pid;
2887 	e->target_handle = tr->target.handle;
2888 	e->data_size = tr->data_size;
2889 	e->offsets_size = tr->offsets_size;
2890 	e->context_name = proc->context->name;
2891 
2892 	if (reply) {
2893 		binder_inner_proc_lock(proc);
2894 		in_reply_to = thread->transaction_stack;
2895 		if (in_reply_to == NULL) {
2896 			binder_inner_proc_unlock(proc);
2897 			binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2898 					  proc->pid, thread->pid);
2899 			return_error = BR_FAILED_REPLY;
2900 			return_error_param = -EPROTO;
2901 			return_error_line = __LINE__;
2902 			goto err_empty_call_stack;
2903 		}
2904 		if (in_reply_to->to_thread != thread) {
2905 			spin_lock(&in_reply_to->lock);
2906 			binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2907 				proc->pid, thread->pid, in_reply_to->debug_id,
2908 				in_reply_to->to_proc ?
2909 				in_reply_to->to_proc->pid : 0,
2910 				in_reply_to->to_thread ?
2911 				in_reply_to->to_thread->pid : 0);
2912 			spin_unlock(&in_reply_to->lock);
2913 			binder_inner_proc_unlock(proc);
2914 			return_error = BR_FAILED_REPLY;
2915 			return_error_param = -EPROTO;
2916 			return_error_line = __LINE__;
2917 			in_reply_to = NULL;
2918 			goto err_bad_call_stack;
2919 		}
2920 		thread->transaction_stack = in_reply_to->to_parent;
2921 		binder_inner_proc_unlock(proc);
2922 		binder_set_nice(in_reply_to->saved_priority);
2923 		target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2924 		if (target_thread == NULL) {
2925 			/* annotation for sparse */
2926 			__release(&target_thread->proc->inner_lock);
2927 			return_error = BR_DEAD_REPLY;
2928 			return_error_line = __LINE__;
2929 			goto err_dead_binder;
2930 		}
2931 		if (target_thread->transaction_stack != in_reply_to) {
2932 			binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2933 				proc->pid, thread->pid,
2934 				target_thread->transaction_stack ?
2935 				target_thread->transaction_stack->debug_id : 0,
2936 				in_reply_to->debug_id);
2937 			binder_inner_proc_unlock(target_thread->proc);
2938 			return_error = BR_FAILED_REPLY;
2939 			return_error_param = -EPROTO;
2940 			return_error_line = __LINE__;
2941 			in_reply_to = NULL;
2942 			target_thread = NULL;
2943 			goto err_dead_binder;
2944 		}
2945 		target_proc = target_thread->proc;
2946 		target_proc->tmp_ref++;
2947 		binder_inner_proc_unlock(target_thread->proc);
2948 	} else {
2949 		if (tr->target.handle) {
2950 			struct binder_ref *ref;
2951 
2952 			/*
2953 			 * There must already be a strong ref
2954 			 * on this node. If so, do a strong
2955 			 * increment on the node to ensure it
2956 			 * stays alive until the transaction is
2957 			 * done.
2958 			 */
2959 			binder_proc_lock(proc);
2960 			ref = binder_get_ref_olocked(proc, tr->target.handle,
2961 						     true);
2962 			if (ref) {
2963 				target_node = binder_get_node_refs_for_txn(
2964 						ref->node, &target_proc,
2965 						&return_error);
2966 			} else {
2967 				binder_user_error("%d:%d got transaction to invalid handle\n",
2968 						  proc->pid, thread->pid);
2969 				return_error = BR_FAILED_REPLY;
2970 			}
2971 			binder_proc_unlock(proc);
2972 		} else {
2973 			mutex_lock(&context->context_mgr_node_lock);
2974 			target_node = context->binder_context_mgr_node;
2975 			if (target_node)
2976 				target_node = binder_get_node_refs_for_txn(
2977 						target_node, &target_proc,
2978 						&return_error);
2979 			else
2980 				return_error = BR_DEAD_REPLY;
2981 			mutex_unlock(&context->context_mgr_node_lock);
2982 			if (target_node && target_proc == proc) {
2983 				binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2984 						  proc->pid, thread->pid);
2985 				return_error = BR_FAILED_REPLY;
2986 				return_error_param = -EINVAL;
2987 				return_error_line = __LINE__;
2988 				goto err_invalid_target_handle;
2989 			}
2990 		}
2991 		if (!target_node) {
2992 			/*
2993 			 * return_error is set above
2994 			 */
2995 			return_error_param = -EINVAL;
2996 			return_error_line = __LINE__;
2997 			goto err_dead_binder;
2998 		}
2999 		e->to_node = target_node->debug_id;
3000 		if (security_binder_transaction(proc->tsk,
3001 						target_proc->tsk) < 0) {
3002 			return_error = BR_FAILED_REPLY;
3003 			return_error_param = -EPERM;
3004 			return_error_line = __LINE__;
3005 			goto err_invalid_target_handle;
3006 		}
3007 		binder_inner_proc_lock(proc);
3008 
3009 		w = list_first_entry_or_null(&thread->todo,
3010 					     struct binder_work, entry);
3011 		if (!(tr->flags & TF_ONE_WAY) && w &&
3012 		    w->type == BINDER_WORK_TRANSACTION) {
3013 			/*
3014 			 * Do not allow new outgoing transaction from a
3015 			 * thread that has a transaction at the head of
3016 			 * its todo list. Only need to check the head
3017 			 * because binder_select_thread_ilocked picks a
3018 			 * thread from proc->waiting_threads to enqueue
3019 			 * the transaction, and nothing is queued to the
3020 			 * todo list while the thread is on waiting_threads.
3021 			 */
3022 			binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3023 					  proc->pid, thread->pid);
3024 			binder_inner_proc_unlock(proc);
3025 			return_error = BR_FAILED_REPLY;
3026 			return_error_param = -EPROTO;
3027 			return_error_line = __LINE__;
3028 			goto err_bad_todo_list;
3029 		}
3030 
3031 		if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3032 			struct binder_transaction *tmp;
3033 
3034 			tmp = thread->transaction_stack;
3035 			if (tmp->to_thread != thread) {
3036 				spin_lock(&tmp->lock);
3037 				binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3038 					proc->pid, thread->pid, tmp->debug_id,
3039 					tmp->to_proc ? tmp->to_proc->pid : 0,
3040 					tmp->to_thread ?
3041 					tmp->to_thread->pid : 0);
3042 				spin_unlock(&tmp->lock);
3043 				binder_inner_proc_unlock(proc);
3044 				return_error = BR_FAILED_REPLY;
3045 				return_error_param = -EPROTO;
3046 				return_error_line = __LINE__;
3047 				goto err_bad_call_stack;
3048 			}
3049 			while (tmp) {
3050 				struct binder_thread *from;
3051 
3052 				spin_lock(&tmp->lock);
3053 				from = tmp->from;
3054 				if (from && from->proc == target_proc) {
3055 					atomic_inc(&from->tmp_ref);
3056 					target_thread = from;
3057 					spin_unlock(&tmp->lock);
3058 					break;
3059 				}
3060 				spin_unlock(&tmp->lock);
3061 				tmp = tmp->from_parent;
3062 			}
3063 		}
3064 		binder_inner_proc_unlock(proc);
3065 	}
3066 	if (target_thread)
3067 		e->to_thread = target_thread->pid;
3068 	e->to_proc = target_proc->pid;
3069 
3070 	/* TODO: reuse incoming transaction for reply */
3071 	t = kzalloc(sizeof(*t), GFP_KERNEL);
3072 	if (t == NULL) {
3073 		return_error = BR_FAILED_REPLY;
3074 		return_error_param = -ENOMEM;
3075 		return_error_line = __LINE__;
3076 		goto err_alloc_t_failed;
3077 	}
3078 	INIT_LIST_HEAD(&t->fd_fixups);
3079 	binder_stats_created(BINDER_STAT_TRANSACTION);
3080 	spin_lock_init(&t->lock);
3081 
3082 	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3083 	if (tcomplete == NULL) {
3084 		return_error = BR_FAILED_REPLY;
3085 		return_error_param = -ENOMEM;
3086 		return_error_line = __LINE__;
3087 		goto err_alloc_tcomplete_failed;
3088 	}
3089 	binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3090 
3091 	t->debug_id = t_debug_id;
3092 
3093 	if (reply)
3094 		binder_debug(BINDER_DEBUG_TRANSACTION,
3095 			     "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3096 			     proc->pid, thread->pid, t->debug_id,
3097 			     target_proc->pid, target_thread->pid,
3098 			     (u64)tr->data.ptr.buffer,
3099 			     (u64)tr->data.ptr.offsets,
3100 			     (u64)tr->data_size, (u64)tr->offsets_size,
3101 			     (u64)extra_buffers_size);
3102 	else
3103 		binder_debug(BINDER_DEBUG_TRANSACTION,
3104 			     "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3105 			     proc->pid, thread->pid, t->debug_id,
3106 			     target_proc->pid, target_node->debug_id,
3107 			     (u64)tr->data.ptr.buffer,
3108 			     (u64)tr->data.ptr.offsets,
3109 			     (u64)tr->data_size, (u64)tr->offsets_size,
3110 			     (u64)extra_buffers_size);
3111 
3112 	if (!reply && !(tr->flags & TF_ONE_WAY))
3113 		t->from = thread;
3114 	else
3115 		t->from = NULL;
3116 	t->sender_euid = task_euid(proc->tsk);
3117 	t->to_proc = target_proc;
3118 	t->to_thread = target_thread;
3119 	t->code = tr->code;
3120 	t->flags = tr->flags;
3121 	t->priority = task_nice(current);
3122 
3123 	if (target_node && target_node->txn_security_ctx) {
3124 		u32 secid;
3125 		size_t added_size;
3126 
3127 		security_task_getsecid(proc->tsk, &secid);
3128 		ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3129 		if (ret) {
3130 			return_error = BR_FAILED_REPLY;
3131 			return_error_param = ret;
3132 			return_error_line = __LINE__;
3133 			goto err_get_secctx_failed;
3134 		}
3135 		added_size = ALIGN(secctx_sz, sizeof(u64));
3136 		extra_buffers_size += added_size;
3137 		if (extra_buffers_size < added_size) {
3138 			/* integer overflow of extra_buffers_size */
3139 			return_error = BR_FAILED_REPLY;
3140 			return_error_param = EINVAL;
3141 			return_error_line = __LINE__;
3142 			goto err_bad_extra_size;
3143 		}
3144 	}
3145 
3146 	trace_binder_transaction(reply, t, target_node);
3147 
3148 	t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3149 		tr->offsets_size, extra_buffers_size,
3150 		!reply && (t->flags & TF_ONE_WAY));
3151 	if (IS_ERR(t->buffer)) {
3152 		/*
3153 		 * -ESRCH indicates VMA cleared. The target is dying.
3154 		 */
3155 		return_error_param = PTR_ERR(t->buffer);
3156 		return_error = return_error_param == -ESRCH ?
3157 			BR_DEAD_REPLY : BR_FAILED_REPLY;
3158 		return_error_line = __LINE__;
3159 		t->buffer = NULL;
3160 		goto err_binder_alloc_buf_failed;
3161 	}
3162 	if (secctx) {
3163 		size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3164 				    ALIGN(tr->offsets_size, sizeof(void *)) +
3165 				    ALIGN(extra_buffers_size, sizeof(void *)) -
3166 				    ALIGN(secctx_sz, sizeof(u64));
3167 
3168 		t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
3169 		binder_alloc_copy_to_buffer(&target_proc->alloc,
3170 					    t->buffer, buf_offset,
3171 					    secctx, secctx_sz);
3172 		security_release_secctx(secctx, secctx_sz);
3173 		secctx = NULL;
3174 	}
3175 	t->buffer->debug_id = t->debug_id;
3176 	t->buffer->transaction = t;
3177 	t->buffer->target_node = target_node;
3178 	trace_binder_transaction_alloc_buf(t->buffer);
3179 
3180 	if (binder_alloc_copy_user_to_buffer(
3181 				&target_proc->alloc,
3182 				t->buffer, 0,
3183 				(const void __user *)
3184 					(uintptr_t)tr->data.ptr.buffer,
3185 				tr->data_size)) {
3186 		binder_user_error("%d:%d got transaction with invalid data ptr\n",
3187 				proc->pid, thread->pid);
3188 		return_error = BR_FAILED_REPLY;
3189 		return_error_param = -EFAULT;
3190 		return_error_line = __LINE__;
3191 		goto err_copy_data_failed;
3192 	}
3193 	if (binder_alloc_copy_user_to_buffer(
3194 				&target_proc->alloc,
3195 				t->buffer,
3196 				ALIGN(tr->data_size, sizeof(void *)),
3197 				(const void __user *)
3198 					(uintptr_t)tr->data.ptr.offsets,
3199 				tr->offsets_size)) {
3200 		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3201 				proc->pid, thread->pid);
3202 		return_error = BR_FAILED_REPLY;
3203 		return_error_param = -EFAULT;
3204 		return_error_line = __LINE__;
3205 		goto err_copy_data_failed;
3206 	}
3207 	if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3208 		binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3209 				proc->pid, thread->pid, (u64)tr->offsets_size);
3210 		return_error = BR_FAILED_REPLY;
3211 		return_error_param = -EINVAL;
3212 		return_error_line = __LINE__;
3213 		goto err_bad_offset;
3214 	}
3215 	if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3216 		binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3217 				  proc->pid, thread->pid,
3218 				  (u64)extra_buffers_size);
3219 		return_error = BR_FAILED_REPLY;
3220 		return_error_param = -EINVAL;
3221 		return_error_line = __LINE__;
3222 		goto err_bad_offset;
3223 	}
3224 	off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3225 	buffer_offset = off_start_offset;
3226 	off_end_offset = off_start_offset + tr->offsets_size;
3227 	sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3228 	sg_buf_end_offset = sg_buf_offset + extra_buffers_size;
3229 	off_min = 0;
3230 	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3231 	     buffer_offset += sizeof(binder_size_t)) {
3232 		struct binder_object_header *hdr;
3233 		size_t object_size;
3234 		struct binder_object object;
3235 		binder_size_t object_offset;
3236 
3237 		binder_alloc_copy_from_buffer(&target_proc->alloc,
3238 					      &object_offset,
3239 					      t->buffer,
3240 					      buffer_offset,
3241 					      sizeof(object_offset));
3242 		object_size = binder_get_object(target_proc, t->buffer,
3243 						object_offset, &object);
3244 		if (object_size == 0 || object_offset < off_min) {
3245 			binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3246 					  proc->pid, thread->pid,
3247 					  (u64)object_offset,
3248 					  (u64)off_min,
3249 					  (u64)t->buffer->data_size);
3250 			return_error = BR_FAILED_REPLY;
3251 			return_error_param = -EINVAL;
3252 			return_error_line = __LINE__;
3253 			goto err_bad_offset;
3254 		}
3255 
3256 		hdr = &object.hdr;
3257 		off_min = object_offset + object_size;
3258 		switch (hdr->type) {
3259 		case BINDER_TYPE_BINDER:
3260 		case BINDER_TYPE_WEAK_BINDER: {
3261 			struct flat_binder_object *fp;
3262 
3263 			fp = to_flat_binder_object(hdr);
3264 			ret = binder_translate_binder(fp, t, thread);
3265 			if (ret < 0) {
3266 				return_error = BR_FAILED_REPLY;
3267 				return_error_param = ret;
3268 				return_error_line = __LINE__;
3269 				goto err_translate_failed;
3270 			}
3271 			binder_alloc_copy_to_buffer(&target_proc->alloc,
3272 						    t->buffer, object_offset,
3273 						    fp, sizeof(*fp));
3274 		} break;
3275 		case BINDER_TYPE_HANDLE:
3276 		case BINDER_TYPE_WEAK_HANDLE: {
3277 			struct flat_binder_object *fp;
3278 
3279 			fp = to_flat_binder_object(hdr);
3280 			ret = binder_translate_handle(fp, t, thread);
3281 			if (ret < 0) {
3282 				return_error = BR_FAILED_REPLY;
3283 				return_error_param = ret;
3284 				return_error_line = __LINE__;
3285 				goto err_translate_failed;
3286 			}
3287 			binder_alloc_copy_to_buffer(&target_proc->alloc,
3288 						    t->buffer, object_offset,
3289 						    fp, sizeof(*fp));
3290 		} break;
3291 
3292 		case BINDER_TYPE_FD: {
3293 			struct binder_fd_object *fp = to_binder_fd_object(hdr);
3294 			binder_size_t fd_offset = object_offset +
3295 				(uintptr_t)&fp->fd - (uintptr_t)fp;
3296 			int ret = binder_translate_fd(fp->fd, fd_offset, t,
3297 						      thread, in_reply_to);
3298 
3299 			if (ret < 0) {
3300 				return_error = BR_FAILED_REPLY;
3301 				return_error_param = ret;
3302 				return_error_line = __LINE__;
3303 				goto err_translate_failed;
3304 			}
3305 			fp->pad_binder = 0;
3306 			binder_alloc_copy_to_buffer(&target_proc->alloc,
3307 						    t->buffer, object_offset,
3308 						    fp, sizeof(*fp));
3309 		} break;
3310 		case BINDER_TYPE_FDA: {
3311 			struct binder_object ptr_object;
3312 			binder_size_t parent_offset;
3313 			struct binder_fd_array_object *fda =
3314 				to_binder_fd_array_object(hdr);
3315 			size_t num_valid = (buffer_offset - off_start_offset) *
3316 						sizeof(binder_size_t);
3317 			struct binder_buffer_object *parent =
3318 				binder_validate_ptr(target_proc, t->buffer,
3319 						    &ptr_object, fda->parent,
3320 						    off_start_offset,
3321 						    &parent_offset,
3322 						    num_valid);
3323 			if (!parent) {
3324 				binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3325 						  proc->pid, thread->pid);
3326 				return_error = BR_FAILED_REPLY;
3327 				return_error_param = -EINVAL;
3328 				return_error_line = __LINE__;
3329 				goto err_bad_parent;
3330 			}
3331 			if (!binder_validate_fixup(target_proc, t->buffer,
3332 						   off_start_offset,
3333 						   parent_offset,
3334 						   fda->parent_offset,
3335 						   last_fixup_obj_off,
3336 						   last_fixup_min_off)) {
3337 				binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3338 						  proc->pid, thread->pid);
3339 				return_error = BR_FAILED_REPLY;
3340 				return_error_param = -EINVAL;
3341 				return_error_line = __LINE__;
3342 				goto err_bad_parent;
3343 			}
3344 			ret = binder_translate_fd_array(fda, parent, t, thread,
3345 							in_reply_to);
3346 			if (ret < 0) {
3347 				return_error = BR_FAILED_REPLY;
3348 				return_error_param = ret;
3349 				return_error_line = __LINE__;
3350 				goto err_translate_failed;
3351 			}
3352 			last_fixup_obj_off = parent_offset;
3353 			last_fixup_min_off =
3354 				fda->parent_offset + sizeof(u32) * fda->num_fds;
3355 		} break;
3356 		case BINDER_TYPE_PTR: {
3357 			struct binder_buffer_object *bp =
3358 				to_binder_buffer_object(hdr);
3359 			size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3360 			size_t num_valid;
3361 
3362 			if (bp->length > buf_left) {
3363 				binder_user_error("%d:%d got transaction with too large buffer\n",
3364 						  proc->pid, thread->pid);
3365 				return_error = BR_FAILED_REPLY;
3366 				return_error_param = -EINVAL;
3367 				return_error_line = __LINE__;
3368 				goto err_bad_offset;
3369 			}
3370 			if (binder_alloc_copy_user_to_buffer(
3371 						&target_proc->alloc,
3372 						t->buffer,
3373 						sg_buf_offset,
3374 						(const void __user *)
3375 							(uintptr_t)bp->buffer,
3376 						bp->length)) {
3377 				binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3378 						  proc->pid, thread->pid);
3379 				return_error_param = -EFAULT;
3380 				return_error = BR_FAILED_REPLY;
3381 				return_error_line = __LINE__;
3382 				goto err_copy_data_failed;
3383 			}
3384 			/* Fixup buffer pointer to target proc address space */
3385 			bp->buffer = (uintptr_t)
3386 				t->buffer->user_data + sg_buf_offset;
3387 			sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3388 
3389 			num_valid = (buffer_offset - off_start_offset) *
3390 					sizeof(binder_size_t);
3391 			ret = binder_fixup_parent(t, thread, bp,
3392 						  off_start_offset,
3393 						  num_valid,
3394 						  last_fixup_obj_off,
3395 						  last_fixup_min_off);
3396 			if (ret < 0) {
3397 				return_error = BR_FAILED_REPLY;
3398 				return_error_param = ret;
3399 				return_error_line = __LINE__;
3400 				goto err_translate_failed;
3401 			}
3402 			binder_alloc_copy_to_buffer(&target_proc->alloc,
3403 						    t->buffer, object_offset,
3404 						    bp, sizeof(*bp));
3405 			last_fixup_obj_off = object_offset;
3406 			last_fixup_min_off = 0;
3407 		} break;
3408 		default:
3409 			binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3410 				proc->pid, thread->pid, hdr->type);
3411 			return_error = BR_FAILED_REPLY;
3412 			return_error_param = -EINVAL;
3413 			return_error_line = __LINE__;
3414 			goto err_bad_object_type;
3415 		}
3416 	}
3417 	tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3418 	t->work.type = BINDER_WORK_TRANSACTION;
3419 
3420 	if (reply) {
3421 		binder_enqueue_thread_work(thread, tcomplete);
3422 		binder_inner_proc_lock(target_proc);
3423 		if (target_thread->is_dead) {
3424 			binder_inner_proc_unlock(target_proc);
3425 			goto err_dead_proc_or_thread;
3426 		}
3427 		BUG_ON(t->buffer->async_transaction != 0);
3428 		binder_pop_transaction_ilocked(target_thread, in_reply_to);
3429 		binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3430 		binder_inner_proc_unlock(target_proc);
3431 		wake_up_interruptible_sync(&target_thread->wait);
3432 		binder_free_transaction(in_reply_to);
3433 	} else if (!(t->flags & TF_ONE_WAY)) {
3434 		BUG_ON(t->buffer->async_transaction != 0);
3435 		binder_inner_proc_lock(proc);
3436 		/*
3437 		 * Defer the TRANSACTION_COMPLETE, so we don't return to
3438 		 * userspace immediately; this allows the target process to
3439 		 * immediately start processing this transaction, reducing
3440 		 * latency. We will then return the TRANSACTION_COMPLETE when
3441 		 * the target replies (or there is an error).
3442 		 */
3443 		binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3444 		t->need_reply = 1;
3445 		t->from_parent = thread->transaction_stack;
3446 		thread->transaction_stack = t;
3447 		binder_inner_proc_unlock(proc);
3448 		if (!binder_proc_transaction(t, target_proc, target_thread)) {
3449 			binder_inner_proc_lock(proc);
3450 			binder_pop_transaction_ilocked(thread, t);
3451 			binder_inner_proc_unlock(proc);
3452 			goto err_dead_proc_or_thread;
3453 		}
3454 	} else {
3455 		BUG_ON(target_node == NULL);
3456 		BUG_ON(t->buffer->async_transaction != 1);
3457 		binder_enqueue_thread_work(thread, tcomplete);
3458 		if (!binder_proc_transaction(t, target_proc, NULL))
3459 			goto err_dead_proc_or_thread;
3460 	}
3461 	if (target_thread)
3462 		binder_thread_dec_tmpref(target_thread);
3463 	binder_proc_dec_tmpref(target_proc);
3464 	if (target_node)
3465 		binder_dec_node_tmpref(target_node);
3466 	/*
3467 	 * write barrier to synchronize with initialization
3468 	 * of log entry
3469 	 */
3470 	smp_wmb();
3471 	WRITE_ONCE(e->debug_id_done, t_debug_id);
3472 	return;
3473 
3474 err_dead_proc_or_thread:
3475 	return_error = BR_DEAD_REPLY;
3476 	return_error_line = __LINE__;
3477 	binder_dequeue_work(proc, tcomplete);
3478 err_translate_failed:
3479 err_bad_object_type:
3480 err_bad_offset:
3481 err_bad_parent:
3482 err_copy_data_failed:
3483 	binder_free_txn_fixups(t);
3484 	trace_binder_transaction_failed_buffer_release(t->buffer);
3485 	binder_transaction_buffer_release(target_proc, t->buffer,
3486 					  buffer_offset, true);
3487 	if (target_node)
3488 		binder_dec_node_tmpref(target_node);
3489 	target_node = NULL;
3490 	t->buffer->transaction = NULL;
3491 	binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3492 err_binder_alloc_buf_failed:
3493 err_bad_extra_size:
3494 	if (secctx)
3495 		security_release_secctx(secctx, secctx_sz);
3496 err_get_secctx_failed:
3497 	kfree(tcomplete);
3498 	binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3499 err_alloc_tcomplete_failed:
3500 	kfree(t);
3501 	binder_stats_deleted(BINDER_STAT_TRANSACTION);
3502 err_alloc_t_failed:
3503 err_bad_todo_list:
3504 err_bad_call_stack:
3505 err_empty_call_stack:
3506 err_dead_binder:
3507 err_invalid_target_handle:
3508 	if (target_thread)
3509 		binder_thread_dec_tmpref(target_thread);
3510 	if (target_proc)
3511 		binder_proc_dec_tmpref(target_proc);
3512 	if (target_node) {
3513 		binder_dec_node(target_node, 1, 0);
3514 		binder_dec_node_tmpref(target_node);
3515 	}
3516 
3517 	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3518 		     "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3519 		     proc->pid, thread->pid, return_error, return_error_param,
3520 		     (u64)tr->data_size, (u64)tr->offsets_size,
3521 		     return_error_line);
3522 
3523 	{
3524 		struct binder_transaction_log_entry *fe;
3525 
3526 		e->return_error = return_error;
3527 		e->return_error_param = return_error_param;
3528 		e->return_error_line = return_error_line;
3529 		fe = binder_transaction_log_add(&binder_transaction_log_failed);
3530 		*fe = *e;
3531 		/*
3532 		 * write barrier to synchronize with initialization
3533 		 * of log entry
3534 		 */
3535 		smp_wmb();
3536 		WRITE_ONCE(e->debug_id_done, t_debug_id);
3537 		WRITE_ONCE(fe->debug_id_done, t_debug_id);
3538 	}
3539 
3540 	BUG_ON(thread->return_error.cmd != BR_OK);
3541 	if (in_reply_to) {
3542 		thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3543 		binder_enqueue_thread_work(thread, &thread->return_error.work);
3544 		binder_send_failed_reply(in_reply_to, return_error);
3545 	} else {
3546 		thread->return_error.cmd = return_error;
3547 		binder_enqueue_thread_work(thread, &thread->return_error.work);
3548 	}
3549 }
3550 
3551 /**
3552  * binder_free_buf() - free the specified buffer
3553  * @proc:	binder proc that owns buffer
3554  * @buffer:	buffer to be freed
3555  *
3556  * If buffer for an async transaction, enqueue the next async
3557  * transaction from the node.
3558  *
3559  * Cleanup buffer and free it.
3560  */
3561 static void
3562 binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
3563 {
3564 	binder_inner_proc_lock(proc);
3565 	if (buffer->transaction) {
3566 		buffer->transaction->buffer = NULL;
3567 		buffer->transaction = NULL;
3568 	}
3569 	binder_inner_proc_unlock(proc);
3570 	if (buffer->async_transaction && buffer->target_node) {
3571 		struct binder_node *buf_node;
3572 		struct binder_work *w;
3573 
3574 		buf_node = buffer->target_node;
3575 		binder_node_inner_lock(buf_node);
3576 		BUG_ON(!buf_node->has_async_transaction);
3577 		BUG_ON(buf_node->proc != proc);
3578 		w = binder_dequeue_work_head_ilocked(
3579 				&buf_node->async_todo);
3580 		if (!w) {
3581 			buf_node->has_async_transaction = false;
3582 		} else {
3583 			binder_enqueue_work_ilocked(
3584 					w, &proc->todo);
3585 			binder_wakeup_proc_ilocked(proc);
3586 		}
3587 		binder_node_inner_unlock(buf_node);
3588 	}
3589 	trace_binder_transaction_buffer_release(buffer);
3590 	binder_transaction_buffer_release(proc, buffer, 0, false);
3591 	binder_alloc_free_buf(&proc->alloc, buffer);
3592 }
3593 
3594 static int binder_thread_write(struct binder_proc *proc,
3595 			struct binder_thread *thread,
3596 			binder_uintptr_t binder_buffer, size_t size,
3597 			binder_size_t *consumed)
3598 {
3599 	uint32_t cmd;
3600 	struct binder_context *context = proc->context;
3601 	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3602 	void __user *ptr = buffer + *consumed;
3603 	void __user *end = buffer + size;
3604 
3605 	while (ptr < end && thread->return_error.cmd == BR_OK) {
3606 		int ret;
3607 
3608 		if (get_user(cmd, (uint32_t __user *)ptr))
3609 			return -EFAULT;
3610 		ptr += sizeof(uint32_t);
3611 		trace_binder_command(cmd);
3612 		if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3613 			atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3614 			atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3615 			atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3616 		}
3617 		switch (cmd) {
3618 		case BC_INCREFS:
3619 		case BC_ACQUIRE:
3620 		case BC_RELEASE:
3621 		case BC_DECREFS: {
3622 			uint32_t target;
3623 			const char *debug_string;
3624 			bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3625 			bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3626 			struct binder_ref_data rdata;
3627 
3628 			if (get_user(target, (uint32_t __user *)ptr))
3629 				return -EFAULT;
3630 
3631 			ptr += sizeof(uint32_t);
3632 			ret = -1;
3633 			if (increment && !target) {
3634 				struct binder_node *ctx_mgr_node;
3635 				mutex_lock(&context->context_mgr_node_lock);
3636 				ctx_mgr_node = context->binder_context_mgr_node;
3637 				if (ctx_mgr_node)
3638 					ret = binder_inc_ref_for_node(
3639 							proc, ctx_mgr_node,
3640 							strong, NULL, &rdata);
3641 				mutex_unlock(&context->context_mgr_node_lock);
3642 			}
3643 			if (ret)
3644 				ret = binder_update_ref_for_handle(
3645 						proc, target, increment, strong,
3646 						&rdata);
3647 			if (!ret && rdata.desc != target) {
3648 				binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3649 					proc->pid, thread->pid,
3650 					target, rdata.desc);
3651 			}
3652 			switch (cmd) {
3653 			case BC_INCREFS:
3654 				debug_string = "IncRefs";
3655 				break;
3656 			case BC_ACQUIRE:
3657 				debug_string = "Acquire";
3658 				break;
3659 			case BC_RELEASE:
3660 				debug_string = "Release";
3661 				break;
3662 			case BC_DECREFS:
3663 			default:
3664 				debug_string = "DecRefs";
3665 				break;
3666 			}
3667 			if (ret) {
3668 				binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3669 					proc->pid, thread->pid, debug_string,
3670 					strong, target, ret);
3671 				break;
3672 			}
3673 			binder_debug(BINDER_DEBUG_USER_REFS,
3674 				     "%d:%d %s ref %d desc %d s %d w %d\n",
3675 				     proc->pid, thread->pid, debug_string,
3676 				     rdata.debug_id, rdata.desc, rdata.strong,
3677 				     rdata.weak);
3678 			break;
3679 		}
3680 		case BC_INCREFS_DONE:
3681 		case BC_ACQUIRE_DONE: {
3682 			binder_uintptr_t node_ptr;
3683 			binder_uintptr_t cookie;
3684 			struct binder_node *node;
3685 			bool free_node;
3686 
3687 			if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3688 				return -EFAULT;
3689 			ptr += sizeof(binder_uintptr_t);
3690 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3691 				return -EFAULT;
3692 			ptr += sizeof(binder_uintptr_t);
3693 			node = binder_get_node(proc, node_ptr);
3694 			if (node == NULL) {
3695 				binder_user_error("%d:%d %s u%016llx no match\n",
3696 					proc->pid, thread->pid,
3697 					cmd == BC_INCREFS_DONE ?
3698 					"BC_INCREFS_DONE" :
3699 					"BC_ACQUIRE_DONE",
3700 					(u64)node_ptr);
3701 				break;
3702 			}
3703 			if (cookie != node->cookie) {
3704 				binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3705 					proc->pid, thread->pid,
3706 					cmd == BC_INCREFS_DONE ?
3707 					"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3708 					(u64)node_ptr, node->debug_id,
3709 					(u64)cookie, (u64)node->cookie);
3710 				binder_put_node(node);
3711 				break;
3712 			}
3713 			binder_node_inner_lock(node);
3714 			if (cmd == BC_ACQUIRE_DONE) {
3715 				if (node->pending_strong_ref == 0) {
3716 					binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3717 						proc->pid, thread->pid,
3718 						node->debug_id);
3719 					binder_node_inner_unlock(node);
3720 					binder_put_node(node);
3721 					break;
3722 				}
3723 				node->pending_strong_ref = 0;
3724 			} else {
3725 				if (node->pending_weak_ref == 0) {
3726 					binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3727 						proc->pid, thread->pid,
3728 						node->debug_id);
3729 					binder_node_inner_unlock(node);
3730 					binder_put_node(node);
3731 					break;
3732 				}
3733 				node->pending_weak_ref = 0;
3734 			}
3735 			free_node = binder_dec_node_nilocked(node,
3736 					cmd == BC_ACQUIRE_DONE, 0);
3737 			WARN_ON(free_node);
3738 			binder_debug(BINDER_DEBUG_USER_REFS,
3739 				     "%d:%d %s node %d ls %d lw %d tr %d\n",
3740 				     proc->pid, thread->pid,
3741 				     cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3742 				     node->debug_id, node->local_strong_refs,
3743 				     node->local_weak_refs, node->tmp_refs);
3744 			binder_node_inner_unlock(node);
3745 			binder_put_node(node);
3746 			break;
3747 		}
3748 		case BC_ATTEMPT_ACQUIRE:
3749 			pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3750 			return -EINVAL;
3751 		case BC_ACQUIRE_RESULT:
3752 			pr_err("BC_ACQUIRE_RESULT not supported\n");
3753 			return -EINVAL;
3754 
3755 		case BC_FREE_BUFFER: {
3756 			binder_uintptr_t data_ptr;
3757 			struct binder_buffer *buffer;
3758 
3759 			if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3760 				return -EFAULT;
3761 			ptr += sizeof(binder_uintptr_t);
3762 
3763 			buffer = binder_alloc_prepare_to_free(&proc->alloc,
3764 							      data_ptr);
3765 			if (IS_ERR_OR_NULL(buffer)) {
3766 				if (PTR_ERR(buffer) == -EPERM) {
3767 					binder_user_error(
3768 						"%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3769 						proc->pid, thread->pid,
3770 						(u64)data_ptr);
3771 				} else {
3772 					binder_user_error(
3773 						"%d:%d BC_FREE_BUFFER u%016llx no match\n",
3774 						proc->pid, thread->pid,
3775 						(u64)data_ptr);
3776 				}
3777 				break;
3778 			}
3779 			binder_debug(BINDER_DEBUG_FREE_BUFFER,
3780 				     "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3781 				     proc->pid, thread->pid, (u64)data_ptr,
3782 				     buffer->debug_id,
3783 				     buffer->transaction ? "active" : "finished");
3784 			binder_free_buf(proc, buffer);
3785 			break;
3786 		}
3787 
3788 		case BC_TRANSACTION_SG:
3789 		case BC_REPLY_SG: {
3790 			struct binder_transaction_data_sg tr;
3791 
3792 			if (copy_from_user(&tr, ptr, sizeof(tr)))
3793 				return -EFAULT;
3794 			ptr += sizeof(tr);
3795 			binder_transaction(proc, thread, &tr.transaction_data,
3796 					   cmd == BC_REPLY_SG, tr.buffers_size);
3797 			break;
3798 		}
3799 		case BC_TRANSACTION:
3800 		case BC_REPLY: {
3801 			struct binder_transaction_data tr;
3802 
3803 			if (copy_from_user(&tr, ptr, sizeof(tr)))
3804 				return -EFAULT;
3805 			ptr += sizeof(tr);
3806 			binder_transaction(proc, thread, &tr,
3807 					   cmd == BC_REPLY, 0);
3808 			break;
3809 		}
3810 
3811 		case BC_REGISTER_LOOPER:
3812 			binder_debug(BINDER_DEBUG_THREADS,
3813 				     "%d:%d BC_REGISTER_LOOPER\n",
3814 				     proc->pid, thread->pid);
3815 			binder_inner_proc_lock(proc);
3816 			if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3817 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
3818 				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3819 					proc->pid, thread->pid);
3820 			} else if (proc->requested_threads == 0) {
3821 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
3822 				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3823 					proc->pid, thread->pid);
3824 			} else {
3825 				proc->requested_threads--;
3826 				proc->requested_threads_started++;
3827 			}
3828 			thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3829 			binder_inner_proc_unlock(proc);
3830 			break;
3831 		case BC_ENTER_LOOPER:
3832 			binder_debug(BINDER_DEBUG_THREADS,
3833 				     "%d:%d BC_ENTER_LOOPER\n",
3834 				     proc->pid, thread->pid);
3835 			if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3836 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
3837 				binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3838 					proc->pid, thread->pid);
3839 			}
3840 			thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3841 			break;
3842 		case BC_EXIT_LOOPER:
3843 			binder_debug(BINDER_DEBUG_THREADS,
3844 				     "%d:%d BC_EXIT_LOOPER\n",
3845 				     proc->pid, thread->pid);
3846 			thread->looper |= BINDER_LOOPER_STATE_EXITED;
3847 			break;
3848 
3849 		case BC_REQUEST_DEATH_NOTIFICATION:
3850 		case BC_CLEAR_DEATH_NOTIFICATION: {
3851 			uint32_t target;
3852 			binder_uintptr_t cookie;
3853 			struct binder_ref *ref;
3854 			struct binder_ref_death *death = NULL;
3855 
3856 			if (get_user(target, (uint32_t __user *)ptr))
3857 				return -EFAULT;
3858 			ptr += sizeof(uint32_t);
3859 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3860 				return -EFAULT;
3861 			ptr += sizeof(binder_uintptr_t);
3862 			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3863 				/*
3864 				 * Allocate memory for death notification
3865 				 * before taking lock
3866 				 */
3867 				death = kzalloc(sizeof(*death), GFP_KERNEL);
3868 				if (death == NULL) {
3869 					WARN_ON(thread->return_error.cmd !=
3870 						BR_OK);
3871 					thread->return_error.cmd = BR_ERROR;
3872 					binder_enqueue_thread_work(
3873 						thread,
3874 						&thread->return_error.work);
3875 					binder_debug(
3876 						BINDER_DEBUG_FAILED_TRANSACTION,
3877 						"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3878 						proc->pid, thread->pid);
3879 					break;
3880 				}
3881 			}
3882 			binder_proc_lock(proc);
3883 			ref = binder_get_ref_olocked(proc, target, false);
3884 			if (ref == NULL) {
3885 				binder_user_error("%d:%d %s invalid ref %d\n",
3886 					proc->pid, thread->pid,
3887 					cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3888 					"BC_REQUEST_DEATH_NOTIFICATION" :
3889 					"BC_CLEAR_DEATH_NOTIFICATION",
3890 					target);
3891 				binder_proc_unlock(proc);
3892 				kfree(death);
3893 				break;
3894 			}
3895 
3896 			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3897 				     "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3898 				     proc->pid, thread->pid,
3899 				     cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3900 				     "BC_REQUEST_DEATH_NOTIFICATION" :
3901 				     "BC_CLEAR_DEATH_NOTIFICATION",
3902 				     (u64)cookie, ref->data.debug_id,
3903 				     ref->data.desc, ref->data.strong,
3904 				     ref->data.weak, ref->node->debug_id);
3905 
3906 			binder_node_lock(ref->node);
3907 			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3908 				if (ref->death) {
3909 					binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3910 						proc->pid, thread->pid);
3911 					binder_node_unlock(ref->node);
3912 					binder_proc_unlock(proc);
3913 					kfree(death);
3914 					break;
3915 				}
3916 				binder_stats_created(BINDER_STAT_DEATH);
3917 				INIT_LIST_HEAD(&death->work.entry);
3918 				death->cookie = cookie;
3919 				ref->death = death;
3920 				if (ref->node->proc == NULL) {
3921 					ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3922 
3923 					binder_inner_proc_lock(proc);
3924 					binder_enqueue_work_ilocked(
3925 						&ref->death->work, &proc->todo);
3926 					binder_wakeup_proc_ilocked(proc);
3927 					binder_inner_proc_unlock(proc);
3928 				}
3929 			} else {
3930 				if (ref->death == NULL) {
3931 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3932 						proc->pid, thread->pid);
3933 					binder_node_unlock(ref->node);
3934 					binder_proc_unlock(proc);
3935 					break;
3936 				}
3937 				death = ref->death;
3938 				if (death->cookie != cookie) {
3939 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3940 						proc->pid, thread->pid,
3941 						(u64)death->cookie,
3942 						(u64)cookie);
3943 					binder_node_unlock(ref->node);
3944 					binder_proc_unlock(proc);
3945 					break;
3946 				}
3947 				ref->death = NULL;
3948 				binder_inner_proc_lock(proc);
3949 				if (list_empty(&death->work.entry)) {
3950 					death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3951 					if (thread->looper &
3952 					    (BINDER_LOOPER_STATE_REGISTERED |
3953 					     BINDER_LOOPER_STATE_ENTERED))
3954 						binder_enqueue_thread_work_ilocked(
3955 								thread,
3956 								&death->work);
3957 					else {
3958 						binder_enqueue_work_ilocked(
3959 								&death->work,
3960 								&proc->todo);
3961 						binder_wakeup_proc_ilocked(
3962 								proc);
3963 					}
3964 				} else {
3965 					BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3966 					death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3967 				}
3968 				binder_inner_proc_unlock(proc);
3969 			}
3970 			binder_node_unlock(ref->node);
3971 			binder_proc_unlock(proc);
3972 		} break;
3973 		case BC_DEAD_BINDER_DONE: {
3974 			struct binder_work *w;
3975 			binder_uintptr_t cookie;
3976 			struct binder_ref_death *death = NULL;
3977 
3978 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3979 				return -EFAULT;
3980 
3981 			ptr += sizeof(cookie);
3982 			binder_inner_proc_lock(proc);
3983 			list_for_each_entry(w, &proc->delivered_death,
3984 					    entry) {
3985 				struct binder_ref_death *tmp_death =
3986 					container_of(w,
3987 						     struct binder_ref_death,
3988 						     work);
3989 
3990 				if (tmp_death->cookie == cookie) {
3991 					death = tmp_death;
3992 					break;
3993 				}
3994 			}
3995 			binder_debug(BINDER_DEBUG_DEAD_BINDER,
3996 				     "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
3997 				     proc->pid, thread->pid, (u64)cookie,
3998 				     death);
3999 			if (death == NULL) {
4000 				binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4001 					proc->pid, thread->pid, (u64)cookie);
4002 				binder_inner_proc_unlock(proc);
4003 				break;
4004 			}
4005 			binder_dequeue_work_ilocked(&death->work);
4006 			if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4007 				death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4008 				if (thread->looper &
4009 					(BINDER_LOOPER_STATE_REGISTERED |
4010 					 BINDER_LOOPER_STATE_ENTERED))
4011 					binder_enqueue_thread_work_ilocked(
4012 						thread, &death->work);
4013 				else {
4014 					binder_enqueue_work_ilocked(
4015 							&death->work,
4016 							&proc->todo);
4017 					binder_wakeup_proc_ilocked(proc);
4018 				}
4019 			}
4020 			binder_inner_proc_unlock(proc);
4021 		} break;
4022 
4023 		default:
4024 			pr_err("%d:%d unknown command %d\n",
4025 			       proc->pid, thread->pid, cmd);
4026 			return -EINVAL;
4027 		}
4028 		*consumed = ptr - buffer;
4029 	}
4030 	return 0;
4031 }
4032 
4033 static void binder_stat_br(struct binder_proc *proc,
4034 			   struct binder_thread *thread, uint32_t cmd)
4035 {
4036 	trace_binder_return(cmd);
4037 	if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4038 		atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4039 		atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4040 		atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4041 	}
4042 }
4043 
4044 static int binder_put_node_cmd(struct binder_proc *proc,
4045 			       struct binder_thread *thread,
4046 			       void __user **ptrp,
4047 			       binder_uintptr_t node_ptr,
4048 			       binder_uintptr_t node_cookie,
4049 			       int node_debug_id,
4050 			       uint32_t cmd, const char *cmd_name)
4051 {
4052 	void __user *ptr = *ptrp;
4053 
4054 	if (put_user(cmd, (uint32_t __user *)ptr))
4055 		return -EFAULT;
4056 	ptr += sizeof(uint32_t);
4057 
4058 	if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4059 		return -EFAULT;
4060 	ptr += sizeof(binder_uintptr_t);
4061 
4062 	if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4063 		return -EFAULT;
4064 	ptr += sizeof(binder_uintptr_t);
4065 
4066 	binder_stat_br(proc, thread, cmd);
4067 	binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4068 		     proc->pid, thread->pid, cmd_name, node_debug_id,
4069 		     (u64)node_ptr, (u64)node_cookie);
4070 
4071 	*ptrp = ptr;
4072 	return 0;
4073 }
4074 
4075 static int binder_wait_for_work(struct binder_thread *thread,
4076 				bool do_proc_work)
4077 {
4078 	DEFINE_WAIT(wait);
4079 	struct binder_proc *proc = thread->proc;
4080 	int ret = 0;
4081 
4082 	freezer_do_not_count();
4083 	binder_inner_proc_lock(proc);
4084 	for (;;) {
4085 		prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
4086 		if (binder_has_work_ilocked(thread, do_proc_work))
4087 			break;
4088 		if (do_proc_work)
4089 			list_add(&thread->waiting_thread_node,
4090 				 &proc->waiting_threads);
4091 		binder_inner_proc_unlock(proc);
4092 		schedule();
4093 		binder_inner_proc_lock(proc);
4094 		list_del_init(&thread->waiting_thread_node);
4095 		if (signal_pending(current)) {
4096 			ret = -ERESTARTSYS;
4097 			break;
4098 		}
4099 	}
4100 	finish_wait(&thread->wait, &wait);
4101 	binder_inner_proc_unlock(proc);
4102 	freezer_count();
4103 
4104 	return ret;
4105 }
4106 
4107 /**
4108  * binder_apply_fd_fixups() - finish fd translation
4109  * @proc:         binder_proc associated @t->buffer
4110  * @t:	binder transaction with list of fd fixups
4111  *
4112  * Now that we are in the context of the transaction target
4113  * process, we can allocate and install fds. Process the
4114  * list of fds to translate and fixup the buffer with the
4115  * new fds.
4116  *
4117  * If we fail to allocate an fd, then free the resources by
4118  * fput'ing files that have not been processed and ksys_close'ing
4119  * any fds that have already been allocated.
4120  */
4121 static int binder_apply_fd_fixups(struct binder_proc *proc,
4122 				  struct binder_transaction *t)
4123 {
4124 	struct binder_txn_fd_fixup *fixup, *tmp;
4125 	int ret = 0;
4126 
4127 	list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4128 		int fd = get_unused_fd_flags(O_CLOEXEC);
4129 
4130 		if (fd < 0) {
4131 			binder_debug(BINDER_DEBUG_TRANSACTION,
4132 				     "failed fd fixup txn %d fd %d\n",
4133 				     t->debug_id, fd);
4134 			ret = -ENOMEM;
4135 			break;
4136 		}
4137 		binder_debug(BINDER_DEBUG_TRANSACTION,
4138 			     "fd fixup txn %d fd %d\n",
4139 			     t->debug_id, fd);
4140 		trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4141 		fd_install(fd, fixup->file);
4142 		fixup->file = NULL;
4143 		binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4144 					    fixup->offset, &fd,
4145 					    sizeof(u32));
4146 	}
4147 	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4148 		if (fixup->file) {
4149 			fput(fixup->file);
4150 		} else if (ret) {
4151 			u32 fd;
4152 
4153 			binder_alloc_copy_from_buffer(&proc->alloc, &fd,
4154 						      t->buffer, fixup->offset,
4155 						      sizeof(fd));
4156 			binder_deferred_fd_close(fd);
4157 		}
4158 		list_del(&fixup->fixup_entry);
4159 		kfree(fixup);
4160 	}
4161 
4162 	return ret;
4163 }
4164 
4165 static int binder_thread_read(struct binder_proc *proc,
4166 			      struct binder_thread *thread,
4167 			      binder_uintptr_t binder_buffer, size_t size,
4168 			      binder_size_t *consumed, int non_block)
4169 {
4170 	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4171 	void __user *ptr = buffer + *consumed;
4172 	void __user *end = buffer + size;
4173 
4174 	int ret = 0;
4175 	int wait_for_proc_work;
4176 
4177 	if (*consumed == 0) {
4178 		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4179 			return -EFAULT;
4180 		ptr += sizeof(uint32_t);
4181 	}
4182 
4183 retry:
4184 	binder_inner_proc_lock(proc);
4185 	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4186 	binder_inner_proc_unlock(proc);
4187 
4188 	thread->looper |= BINDER_LOOPER_STATE_WAITING;
4189 
4190 	trace_binder_wait_for_work(wait_for_proc_work,
4191 				   !!thread->transaction_stack,
4192 				   !binder_worklist_empty(proc, &thread->todo));
4193 	if (wait_for_proc_work) {
4194 		if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4195 					BINDER_LOOPER_STATE_ENTERED))) {
4196 			binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4197 				proc->pid, thread->pid, thread->looper);
4198 			wait_event_interruptible(binder_user_error_wait,
4199 						 binder_stop_on_user_error < 2);
4200 		}
4201 		binder_set_nice(proc->default_priority);
4202 	}
4203 
4204 	if (non_block) {
4205 		if (!binder_has_work(thread, wait_for_proc_work))
4206 			ret = -EAGAIN;
4207 	} else {
4208 		ret = binder_wait_for_work(thread, wait_for_proc_work);
4209 	}
4210 
4211 	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4212 
4213 	if (ret)
4214 		return ret;
4215 
4216 	while (1) {
4217 		uint32_t cmd;
4218 		struct binder_transaction_data_secctx tr;
4219 		struct binder_transaction_data *trd = &tr.transaction_data;
4220 		struct binder_work *w = NULL;
4221 		struct list_head *list = NULL;
4222 		struct binder_transaction *t = NULL;
4223 		struct binder_thread *t_from;
4224 		size_t trsize = sizeof(*trd);
4225 
4226 		binder_inner_proc_lock(proc);
4227 		if (!binder_worklist_empty_ilocked(&thread->todo))
4228 			list = &thread->todo;
4229 		else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4230 			   wait_for_proc_work)
4231 			list = &proc->todo;
4232 		else {
4233 			binder_inner_proc_unlock(proc);
4234 
4235 			/* no data added */
4236 			if (ptr - buffer == 4 && !thread->looper_need_return)
4237 				goto retry;
4238 			break;
4239 		}
4240 
4241 		if (end - ptr < sizeof(tr) + 4) {
4242 			binder_inner_proc_unlock(proc);
4243 			break;
4244 		}
4245 		w = binder_dequeue_work_head_ilocked(list);
4246 		if (binder_worklist_empty_ilocked(&thread->todo))
4247 			thread->process_todo = false;
4248 
4249 		switch (w->type) {
4250 		case BINDER_WORK_TRANSACTION: {
4251 			binder_inner_proc_unlock(proc);
4252 			t = container_of(w, struct binder_transaction, work);
4253 		} break;
4254 		case BINDER_WORK_RETURN_ERROR: {
4255 			struct binder_error *e = container_of(
4256 					w, struct binder_error, work);
4257 
4258 			WARN_ON(e->cmd == BR_OK);
4259 			binder_inner_proc_unlock(proc);
4260 			if (put_user(e->cmd, (uint32_t __user *)ptr))
4261 				return -EFAULT;
4262 			cmd = e->cmd;
4263 			e->cmd = BR_OK;
4264 			ptr += sizeof(uint32_t);
4265 
4266 			binder_stat_br(proc, thread, cmd);
4267 		} break;
4268 		case BINDER_WORK_TRANSACTION_COMPLETE: {
4269 			binder_inner_proc_unlock(proc);
4270 			cmd = BR_TRANSACTION_COMPLETE;
4271 			if (put_user(cmd, (uint32_t __user *)ptr))
4272 				return -EFAULT;
4273 			ptr += sizeof(uint32_t);
4274 
4275 			binder_stat_br(proc, thread, cmd);
4276 			binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4277 				     "%d:%d BR_TRANSACTION_COMPLETE\n",
4278 				     proc->pid, thread->pid);
4279 			kfree(w);
4280 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4281 		} break;
4282 		case BINDER_WORK_NODE: {
4283 			struct binder_node *node = container_of(w, struct binder_node, work);
4284 			int strong, weak;
4285 			binder_uintptr_t node_ptr = node->ptr;
4286 			binder_uintptr_t node_cookie = node->cookie;
4287 			int node_debug_id = node->debug_id;
4288 			int has_weak_ref;
4289 			int has_strong_ref;
4290 			void __user *orig_ptr = ptr;
4291 
4292 			BUG_ON(proc != node->proc);
4293 			strong = node->internal_strong_refs ||
4294 					node->local_strong_refs;
4295 			weak = !hlist_empty(&node->refs) ||
4296 					node->local_weak_refs ||
4297 					node->tmp_refs || strong;
4298 			has_strong_ref = node->has_strong_ref;
4299 			has_weak_ref = node->has_weak_ref;
4300 
4301 			if (weak && !has_weak_ref) {
4302 				node->has_weak_ref = 1;
4303 				node->pending_weak_ref = 1;
4304 				node->local_weak_refs++;
4305 			}
4306 			if (strong && !has_strong_ref) {
4307 				node->has_strong_ref = 1;
4308 				node->pending_strong_ref = 1;
4309 				node->local_strong_refs++;
4310 			}
4311 			if (!strong && has_strong_ref)
4312 				node->has_strong_ref = 0;
4313 			if (!weak && has_weak_ref)
4314 				node->has_weak_ref = 0;
4315 			if (!weak && !strong) {
4316 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4317 					     "%d:%d node %d u%016llx c%016llx deleted\n",
4318 					     proc->pid, thread->pid,
4319 					     node_debug_id,
4320 					     (u64)node_ptr,
4321 					     (u64)node_cookie);
4322 				rb_erase(&node->rb_node, &proc->nodes);
4323 				binder_inner_proc_unlock(proc);
4324 				binder_node_lock(node);
4325 				/*
4326 				 * Acquire the node lock before freeing the
4327 				 * node to serialize with other threads that
4328 				 * may have been holding the node lock while
4329 				 * decrementing this node (avoids race where
4330 				 * this thread frees while the other thread
4331 				 * is unlocking the node after the final
4332 				 * decrement)
4333 				 */
4334 				binder_node_unlock(node);
4335 				binder_free_node(node);
4336 			} else
4337 				binder_inner_proc_unlock(proc);
4338 
4339 			if (weak && !has_weak_ref)
4340 				ret = binder_put_node_cmd(
4341 						proc, thread, &ptr, node_ptr,
4342 						node_cookie, node_debug_id,
4343 						BR_INCREFS, "BR_INCREFS");
4344 			if (!ret && strong && !has_strong_ref)
4345 				ret = binder_put_node_cmd(
4346 						proc, thread, &ptr, node_ptr,
4347 						node_cookie, node_debug_id,
4348 						BR_ACQUIRE, "BR_ACQUIRE");
4349 			if (!ret && !strong && has_strong_ref)
4350 				ret = binder_put_node_cmd(
4351 						proc, thread, &ptr, node_ptr,
4352 						node_cookie, node_debug_id,
4353 						BR_RELEASE, "BR_RELEASE");
4354 			if (!ret && !weak && has_weak_ref)
4355 				ret = binder_put_node_cmd(
4356 						proc, thread, &ptr, node_ptr,
4357 						node_cookie, node_debug_id,
4358 						BR_DECREFS, "BR_DECREFS");
4359 			if (orig_ptr == ptr)
4360 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4361 					     "%d:%d node %d u%016llx c%016llx state unchanged\n",
4362 					     proc->pid, thread->pid,
4363 					     node_debug_id,
4364 					     (u64)node_ptr,
4365 					     (u64)node_cookie);
4366 			if (ret)
4367 				return ret;
4368 		} break;
4369 		case BINDER_WORK_DEAD_BINDER:
4370 		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4371 		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4372 			struct binder_ref_death *death;
4373 			uint32_t cmd;
4374 			binder_uintptr_t cookie;
4375 
4376 			death = container_of(w, struct binder_ref_death, work);
4377 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4378 				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4379 			else
4380 				cmd = BR_DEAD_BINDER;
4381 			cookie = death->cookie;
4382 
4383 			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4384 				     "%d:%d %s %016llx\n",
4385 				      proc->pid, thread->pid,
4386 				      cmd == BR_DEAD_BINDER ?
4387 				      "BR_DEAD_BINDER" :
4388 				      "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4389 				      (u64)cookie);
4390 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4391 				binder_inner_proc_unlock(proc);
4392 				kfree(death);
4393 				binder_stats_deleted(BINDER_STAT_DEATH);
4394 			} else {
4395 				binder_enqueue_work_ilocked(
4396 						w, &proc->delivered_death);
4397 				binder_inner_proc_unlock(proc);
4398 			}
4399 			if (put_user(cmd, (uint32_t __user *)ptr))
4400 				return -EFAULT;
4401 			ptr += sizeof(uint32_t);
4402 			if (put_user(cookie,
4403 				     (binder_uintptr_t __user *)ptr))
4404 				return -EFAULT;
4405 			ptr += sizeof(binder_uintptr_t);
4406 			binder_stat_br(proc, thread, cmd);
4407 			if (cmd == BR_DEAD_BINDER)
4408 				goto done; /* DEAD_BINDER notifications can cause transactions */
4409 		} break;
4410 		default:
4411 			binder_inner_proc_unlock(proc);
4412 			pr_err("%d:%d: bad work type %d\n",
4413 			       proc->pid, thread->pid, w->type);
4414 			break;
4415 		}
4416 
4417 		if (!t)
4418 			continue;
4419 
4420 		BUG_ON(t->buffer == NULL);
4421 		if (t->buffer->target_node) {
4422 			struct binder_node *target_node = t->buffer->target_node;
4423 
4424 			trd->target.ptr = target_node->ptr;
4425 			trd->cookie =  target_node->cookie;
4426 			t->saved_priority = task_nice(current);
4427 			if (t->priority < target_node->min_priority &&
4428 			    !(t->flags & TF_ONE_WAY))
4429 				binder_set_nice(t->priority);
4430 			else if (!(t->flags & TF_ONE_WAY) ||
4431 				 t->saved_priority > target_node->min_priority)
4432 				binder_set_nice(target_node->min_priority);
4433 			cmd = BR_TRANSACTION;
4434 		} else {
4435 			trd->target.ptr = 0;
4436 			trd->cookie = 0;
4437 			cmd = BR_REPLY;
4438 		}
4439 		trd->code = t->code;
4440 		trd->flags = t->flags;
4441 		trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4442 
4443 		t_from = binder_get_txn_from(t);
4444 		if (t_from) {
4445 			struct task_struct *sender = t_from->proc->tsk;
4446 
4447 			trd->sender_pid =
4448 				task_tgid_nr_ns(sender,
4449 						task_active_pid_ns(current));
4450 		} else {
4451 			trd->sender_pid = 0;
4452 		}
4453 
4454 		ret = binder_apply_fd_fixups(proc, t);
4455 		if (ret) {
4456 			struct binder_buffer *buffer = t->buffer;
4457 			bool oneway = !!(t->flags & TF_ONE_WAY);
4458 			int tid = t->debug_id;
4459 
4460 			if (t_from)
4461 				binder_thread_dec_tmpref(t_from);
4462 			buffer->transaction = NULL;
4463 			binder_cleanup_transaction(t, "fd fixups failed",
4464 						   BR_FAILED_REPLY);
4465 			binder_free_buf(proc, buffer);
4466 			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4467 				     "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4468 				     proc->pid, thread->pid,
4469 				     oneway ? "async " :
4470 					(cmd == BR_REPLY ? "reply " : ""),
4471 				     tid, BR_FAILED_REPLY, ret, __LINE__);
4472 			if (cmd == BR_REPLY) {
4473 				cmd = BR_FAILED_REPLY;
4474 				if (put_user(cmd, (uint32_t __user *)ptr))
4475 					return -EFAULT;
4476 				ptr += sizeof(uint32_t);
4477 				binder_stat_br(proc, thread, cmd);
4478 				break;
4479 			}
4480 			continue;
4481 		}
4482 		trd->data_size = t->buffer->data_size;
4483 		trd->offsets_size = t->buffer->offsets_size;
4484 		trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
4485 		trd->data.ptr.offsets = trd->data.ptr.buffer +
4486 					ALIGN(t->buffer->data_size,
4487 					    sizeof(void *));
4488 
4489 		tr.secctx = t->security_ctx;
4490 		if (t->security_ctx) {
4491 			cmd = BR_TRANSACTION_SEC_CTX;
4492 			trsize = sizeof(tr);
4493 		}
4494 		if (put_user(cmd, (uint32_t __user *)ptr)) {
4495 			if (t_from)
4496 				binder_thread_dec_tmpref(t_from);
4497 
4498 			binder_cleanup_transaction(t, "put_user failed",
4499 						   BR_FAILED_REPLY);
4500 
4501 			return -EFAULT;
4502 		}
4503 		ptr += sizeof(uint32_t);
4504 		if (copy_to_user(ptr, &tr, trsize)) {
4505 			if (t_from)
4506 				binder_thread_dec_tmpref(t_from);
4507 
4508 			binder_cleanup_transaction(t, "copy_to_user failed",
4509 						   BR_FAILED_REPLY);
4510 
4511 			return -EFAULT;
4512 		}
4513 		ptr += trsize;
4514 
4515 		trace_binder_transaction_received(t);
4516 		binder_stat_br(proc, thread, cmd);
4517 		binder_debug(BINDER_DEBUG_TRANSACTION,
4518 			     "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4519 			     proc->pid, thread->pid,
4520 			     (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4521 				(cmd == BR_TRANSACTION_SEC_CTX) ?
4522 				     "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4523 			     t->debug_id, t_from ? t_from->proc->pid : 0,
4524 			     t_from ? t_from->pid : 0, cmd,
4525 			     t->buffer->data_size, t->buffer->offsets_size,
4526 			     (u64)trd->data.ptr.buffer,
4527 			     (u64)trd->data.ptr.offsets);
4528 
4529 		if (t_from)
4530 			binder_thread_dec_tmpref(t_from);
4531 		t->buffer->allow_user_free = 1;
4532 		if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4533 			binder_inner_proc_lock(thread->proc);
4534 			t->to_parent = thread->transaction_stack;
4535 			t->to_thread = thread;
4536 			thread->transaction_stack = t;
4537 			binder_inner_proc_unlock(thread->proc);
4538 		} else {
4539 			binder_free_transaction(t);
4540 		}
4541 		break;
4542 	}
4543 
4544 done:
4545 
4546 	*consumed = ptr - buffer;
4547 	binder_inner_proc_lock(proc);
4548 	if (proc->requested_threads == 0 &&
4549 	    list_empty(&thread->proc->waiting_threads) &&
4550 	    proc->requested_threads_started < proc->max_threads &&
4551 	    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4552 	     BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4553 	     /*spawn a new thread if we leave this out */) {
4554 		proc->requested_threads++;
4555 		binder_inner_proc_unlock(proc);
4556 		binder_debug(BINDER_DEBUG_THREADS,
4557 			     "%d:%d BR_SPAWN_LOOPER\n",
4558 			     proc->pid, thread->pid);
4559 		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4560 			return -EFAULT;
4561 		binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4562 	} else
4563 		binder_inner_proc_unlock(proc);
4564 	return 0;
4565 }
4566 
4567 static void binder_release_work(struct binder_proc *proc,
4568 				struct list_head *list)
4569 {
4570 	struct binder_work *w;
4571 
4572 	while (1) {
4573 		w = binder_dequeue_work_head(proc, list);
4574 		if (!w)
4575 			return;
4576 
4577 		switch (w->type) {
4578 		case BINDER_WORK_TRANSACTION: {
4579 			struct binder_transaction *t;
4580 
4581 			t = container_of(w, struct binder_transaction, work);
4582 
4583 			binder_cleanup_transaction(t, "process died.",
4584 						   BR_DEAD_REPLY);
4585 		} break;
4586 		case BINDER_WORK_RETURN_ERROR: {
4587 			struct binder_error *e = container_of(
4588 					w, struct binder_error, work);
4589 
4590 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4591 				"undelivered TRANSACTION_ERROR: %u\n",
4592 				e->cmd);
4593 		} break;
4594 		case BINDER_WORK_TRANSACTION_COMPLETE: {
4595 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4596 				"undelivered TRANSACTION_COMPLETE\n");
4597 			kfree(w);
4598 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4599 		} break;
4600 		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4601 		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4602 			struct binder_ref_death *death;
4603 
4604 			death = container_of(w, struct binder_ref_death, work);
4605 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4606 				"undelivered death notification, %016llx\n",
4607 				(u64)death->cookie);
4608 			kfree(death);
4609 			binder_stats_deleted(BINDER_STAT_DEATH);
4610 		} break;
4611 		default:
4612 			pr_err("unexpected work type, %d, not freed\n",
4613 			       w->type);
4614 			break;
4615 		}
4616 	}
4617 
4618 }
4619 
4620 static struct binder_thread *binder_get_thread_ilocked(
4621 		struct binder_proc *proc, struct binder_thread *new_thread)
4622 {
4623 	struct binder_thread *thread = NULL;
4624 	struct rb_node *parent = NULL;
4625 	struct rb_node **p = &proc->threads.rb_node;
4626 
4627 	while (*p) {
4628 		parent = *p;
4629 		thread = rb_entry(parent, struct binder_thread, rb_node);
4630 
4631 		if (current->pid < thread->pid)
4632 			p = &(*p)->rb_left;
4633 		else if (current->pid > thread->pid)
4634 			p = &(*p)->rb_right;
4635 		else
4636 			return thread;
4637 	}
4638 	if (!new_thread)
4639 		return NULL;
4640 	thread = new_thread;
4641 	binder_stats_created(BINDER_STAT_THREAD);
4642 	thread->proc = proc;
4643 	thread->pid = current->pid;
4644 	atomic_set(&thread->tmp_ref, 0);
4645 	init_waitqueue_head(&thread->wait);
4646 	INIT_LIST_HEAD(&thread->todo);
4647 	rb_link_node(&thread->rb_node, parent, p);
4648 	rb_insert_color(&thread->rb_node, &proc->threads);
4649 	thread->looper_need_return = true;
4650 	thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4651 	thread->return_error.cmd = BR_OK;
4652 	thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4653 	thread->reply_error.cmd = BR_OK;
4654 	INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4655 	return thread;
4656 }
4657 
4658 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4659 {
4660 	struct binder_thread *thread;
4661 	struct binder_thread *new_thread;
4662 
4663 	binder_inner_proc_lock(proc);
4664 	thread = binder_get_thread_ilocked(proc, NULL);
4665 	binder_inner_proc_unlock(proc);
4666 	if (!thread) {
4667 		new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4668 		if (new_thread == NULL)
4669 			return NULL;
4670 		binder_inner_proc_lock(proc);
4671 		thread = binder_get_thread_ilocked(proc, new_thread);
4672 		binder_inner_proc_unlock(proc);
4673 		if (thread != new_thread)
4674 			kfree(new_thread);
4675 	}
4676 	return thread;
4677 }
4678 
4679 static void binder_free_proc(struct binder_proc *proc)
4680 {
4681 	BUG_ON(!list_empty(&proc->todo));
4682 	BUG_ON(!list_empty(&proc->delivered_death));
4683 	binder_alloc_deferred_release(&proc->alloc);
4684 	put_task_struct(proc->tsk);
4685 	binder_stats_deleted(BINDER_STAT_PROC);
4686 	kfree(proc);
4687 }
4688 
4689 static void binder_free_thread(struct binder_thread *thread)
4690 {
4691 	BUG_ON(!list_empty(&thread->todo));
4692 	binder_stats_deleted(BINDER_STAT_THREAD);
4693 	binder_proc_dec_tmpref(thread->proc);
4694 	kfree(thread);
4695 }
4696 
4697 static int binder_thread_release(struct binder_proc *proc,
4698 				 struct binder_thread *thread)
4699 {
4700 	struct binder_transaction *t;
4701 	struct binder_transaction *send_reply = NULL;
4702 	int active_transactions = 0;
4703 	struct binder_transaction *last_t = NULL;
4704 
4705 	binder_inner_proc_lock(thread->proc);
4706 	/*
4707 	 * take a ref on the proc so it survives
4708 	 * after we remove this thread from proc->threads.
4709 	 * The corresponding dec is when we actually
4710 	 * free the thread in binder_free_thread()
4711 	 */
4712 	proc->tmp_ref++;
4713 	/*
4714 	 * take a ref on this thread to ensure it
4715 	 * survives while we are releasing it
4716 	 */
4717 	atomic_inc(&thread->tmp_ref);
4718 	rb_erase(&thread->rb_node, &proc->threads);
4719 	t = thread->transaction_stack;
4720 	if (t) {
4721 		spin_lock(&t->lock);
4722 		if (t->to_thread == thread)
4723 			send_reply = t;
4724 	} else {
4725 		__acquire(&t->lock);
4726 	}
4727 	thread->is_dead = true;
4728 
4729 	while (t) {
4730 		last_t = t;
4731 		active_transactions++;
4732 		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4733 			     "release %d:%d transaction %d %s, still active\n",
4734 			      proc->pid, thread->pid,
4735 			     t->debug_id,
4736 			     (t->to_thread == thread) ? "in" : "out");
4737 
4738 		if (t->to_thread == thread) {
4739 			t->to_proc = NULL;
4740 			t->to_thread = NULL;
4741 			if (t->buffer) {
4742 				t->buffer->transaction = NULL;
4743 				t->buffer = NULL;
4744 			}
4745 			t = t->to_parent;
4746 		} else if (t->from == thread) {
4747 			t->from = NULL;
4748 			t = t->from_parent;
4749 		} else
4750 			BUG();
4751 		spin_unlock(&last_t->lock);
4752 		if (t)
4753 			spin_lock(&t->lock);
4754 		else
4755 			__acquire(&t->lock);
4756 	}
4757 	/* annotation for sparse, lock not acquired in last iteration above */
4758 	__release(&t->lock);
4759 
4760 	/*
4761 	 * If this thread used poll, make sure we remove the waitqueue
4762 	 * from any epoll data structures holding it with POLLFREE.
4763 	 * waitqueue_active() is safe to use here because we're holding
4764 	 * the inner lock.
4765 	 */
4766 	if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
4767 	    waitqueue_active(&thread->wait)) {
4768 		wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
4769 	}
4770 
4771 	binder_inner_proc_unlock(thread->proc);
4772 
4773 	/*
4774 	 * This is needed to avoid races between wake_up_poll() above and
4775 	 * and ep_remove_waitqueue() called for other reasons (eg the epoll file
4776 	 * descriptor being closed); ep_remove_waitqueue() holds an RCU read
4777 	 * lock, so we can be sure it's done after calling synchronize_rcu().
4778 	 */
4779 	if (thread->looper & BINDER_LOOPER_STATE_POLL)
4780 		synchronize_rcu();
4781 
4782 	if (send_reply)
4783 		binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4784 	binder_release_work(proc, &thread->todo);
4785 	binder_thread_dec_tmpref(thread);
4786 	return active_transactions;
4787 }
4788 
4789 static __poll_t binder_poll(struct file *filp,
4790 				struct poll_table_struct *wait)
4791 {
4792 	struct binder_proc *proc = filp->private_data;
4793 	struct binder_thread *thread = NULL;
4794 	bool wait_for_proc_work;
4795 
4796 	thread = binder_get_thread(proc);
4797 	if (!thread)
4798 		return POLLERR;
4799 
4800 	binder_inner_proc_lock(thread->proc);
4801 	thread->looper |= BINDER_LOOPER_STATE_POLL;
4802 	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4803 
4804 	binder_inner_proc_unlock(thread->proc);
4805 
4806 	poll_wait(filp, &thread->wait, wait);
4807 
4808 	if (binder_has_work(thread, wait_for_proc_work))
4809 		return EPOLLIN;
4810 
4811 	return 0;
4812 }
4813 
4814 static int binder_ioctl_write_read(struct file *filp,
4815 				unsigned int cmd, unsigned long arg,
4816 				struct binder_thread *thread)
4817 {
4818 	int ret = 0;
4819 	struct binder_proc *proc = filp->private_data;
4820 	unsigned int size = _IOC_SIZE(cmd);
4821 	void __user *ubuf = (void __user *)arg;
4822 	struct binder_write_read bwr;
4823 
4824 	if (size != sizeof(struct binder_write_read)) {
4825 		ret = -EINVAL;
4826 		goto out;
4827 	}
4828 	if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4829 		ret = -EFAULT;
4830 		goto out;
4831 	}
4832 	binder_debug(BINDER_DEBUG_READ_WRITE,
4833 		     "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4834 		     proc->pid, thread->pid,
4835 		     (u64)bwr.write_size, (u64)bwr.write_buffer,
4836 		     (u64)bwr.read_size, (u64)bwr.read_buffer);
4837 
4838 	if (bwr.write_size > 0) {
4839 		ret = binder_thread_write(proc, thread,
4840 					  bwr.write_buffer,
4841 					  bwr.write_size,
4842 					  &bwr.write_consumed);
4843 		trace_binder_write_done(ret);
4844 		if (ret < 0) {
4845 			bwr.read_consumed = 0;
4846 			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4847 				ret = -EFAULT;
4848 			goto out;
4849 		}
4850 	}
4851 	if (bwr.read_size > 0) {
4852 		ret = binder_thread_read(proc, thread, bwr.read_buffer,
4853 					 bwr.read_size,
4854 					 &bwr.read_consumed,
4855 					 filp->f_flags & O_NONBLOCK);
4856 		trace_binder_read_done(ret);
4857 		binder_inner_proc_lock(proc);
4858 		if (!binder_worklist_empty_ilocked(&proc->todo))
4859 			binder_wakeup_proc_ilocked(proc);
4860 		binder_inner_proc_unlock(proc);
4861 		if (ret < 0) {
4862 			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4863 				ret = -EFAULT;
4864 			goto out;
4865 		}
4866 	}
4867 	binder_debug(BINDER_DEBUG_READ_WRITE,
4868 		     "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4869 		     proc->pid, thread->pid,
4870 		     (u64)bwr.write_consumed, (u64)bwr.write_size,
4871 		     (u64)bwr.read_consumed, (u64)bwr.read_size);
4872 	if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4873 		ret = -EFAULT;
4874 		goto out;
4875 	}
4876 out:
4877 	return ret;
4878 }
4879 
4880 static int binder_ioctl_set_ctx_mgr(struct file *filp,
4881 				    struct flat_binder_object *fbo)
4882 {
4883 	int ret = 0;
4884 	struct binder_proc *proc = filp->private_data;
4885 	struct binder_context *context = proc->context;
4886 	struct binder_node *new_node;
4887 	kuid_t curr_euid = current_euid();
4888 
4889 	mutex_lock(&context->context_mgr_node_lock);
4890 	if (context->binder_context_mgr_node) {
4891 		pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4892 		ret = -EBUSY;
4893 		goto out;
4894 	}
4895 	ret = security_binder_set_context_mgr(proc->tsk);
4896 	if (ret < 0)
4897 		goto out;
4898 	if (uid_valid(context->binder_context_mgr_uid)) {
4899 		if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4900 			pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4901 			       from_kuid(&init_user_ns, curr_euid),
4902 			       from_kuid(&init_user_ns,
4903 					 context->binder_context_mgr_uid));
4904 			ret = -EPERM;
4905 			goto out;
4906 		}
4907 	} else {
4908 		context->binder_context_mgr_uid = curr_euid;
4909 	}
4910 	new_node = binder_new_node(proc, fbo);
4911 	if (!new_node) {
4912 		ret = -ENOMEM;
4913 		goto out;
4914 	}
4915 	binder_node_lock(new_node);
4916 	new_node->local_weak_refs++;
4917 	new_node->local_strong_refs++;
4918 	new_node->has_strong_ref = 1;
4919 	new_node->has_weak_ref = 1;
4920 	context->binder_context_mgr_node = new_node;
4921 	binder_node_unlock(new_node);
4922 	binder_put_node(new_node);
4923 out:
4924 	mutex_unlock(&context->context_mgr_node_lock);
4925 	return ret;
4926 }
4927 
4928 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
4929 		struct binder_node_info_for_ref *info)
4930 {
4931 	struct binder_node *node;
4932 	struct binder_context *context = proc->context;
4933 	__u32 handle = info->handle;
4934 
4935 	if (info->strong_count || info->weak_count || info->reserved1 ||
4936 	    info->reserved2 || info->reserved3) {
4937 		binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
4938 				  proc->pid);
4939 		return -EINVAL;
4940 	}
4941 
4942 	/* This ioctl may only be used by the context manager */
4943 	mutex_lock(&context->context_mgr_node_lock);
4944 	if (!context->binder_context_mgr_node ||
4945 		context->binder_context_mgr_node->proc != proc) {
4946 		mutex_unlock(&context->context_mgr_node_lock);
4947 		return -EPERM;
4948 	}
4949 	mutex_unlock(&context->context_mgr_node_lock);
4950 
4951 	node = binder_get_node_from_ref(proc, handle, true, NULL);
4952 	if (!node)
4953 		return -EINVAL;
4954 
4955 	info->strong_count = node->local_strong_refs +
4956 		node->internal_strong_refs;
4957 	info->weak_count = node->local_weak_refs;
4958 
4959 	binder_put_node(node);
4960 
4961 	return 0;
4962 }
4963 
4964 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4965 				struct binder_node_debug_info *info)
4966 {
4967 	struct rb_node *n;
4968 	binder_uintptr_t ptr = info->ptr;
4969 
4970 	memset(info, 0, sizeof(*info));
4971 
4972 	binder_inner_proc_lock(proc);
4973 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4974 		struct binder_node *node = rb_entry(n, struct binder_node,
4975 						    rb_node);
4976 		if (node->ptr > ptr) {
4977 			info->ptr = node->ptr;
4978 			info->cookie = node->cookie;
4979 			info->has_strong_ref = node->has_strong_ref;
4980 			info->has_weak_ref = node->has_weak_ref;
4981 			break;
4982 		}
4983 	}
4984 	binder_inner_proc_unlock(proc);
4985 
4986 	return 0;
4987 }
4988 
4989 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4990 {
4991 	int ret;
4992 	struct binder_proc *proc = filp->private_data;
4993 	struct binder_thread *thread;
4994 	unsigned int size = _IOC_SIZE(cmd);
4995 	void __user *ubuf = (void __user *)arg;
4996 
4997 	/*pr_info("binder_ioctl: %d:%d %x %lx\n",
4998 			proc->pid, current->pid, cmd, arg);*/
4999 
5000 	binder_selftest_alloc(&proc->alloc);
5001 
5002 	trace_binder_ioctl(cmd, arg);
5003 
5004 	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5005 	if (ret)
5006 		goto err_unlocked;
5007 
5008 	thread = binder_get_thread(proc);
5009 	if (thread == NULL) {
5010 		ret = -ENOMEM;
5011 		goto err;
5012 	}
5013 
5014 	switch (cmd) {
5015 	case BINDER_WRITE_READ:
5016 		ret = binder_ioctl_write_read(filp, cmd, arg, thread);
5017 		if (ret)
5018 			goto err;
5019 		break;
5020 	case BINDER_SET_MAX_THREADS: {
5021 		int max_threads;
5022 
5023 		if (copy_from_user(&max_threads, ubuf,
5024 				   sizeof(max_threads))) {
5025 			ret = -EINVAL;
5026 			goto err;
5027 		}
5028 		binder_inner_proc_lock(proc);
5029 		proc->max_threads = max_threads;
5030 		binder_inner_proc_unlock(proc);
5031 		break;
5032 	}
5033 	case BINDER_SET_CONTEXT_MGR_EXT: {
5034 		struct flat_binder_object fbo;
5035 
5036 		if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5037 			ret = -EINVAL;
5038 			goto err;
5039 		}
5040 		ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5041 		if (ret)
5042 			goto err;
5043 		break;
5044 	}
5045 	case BINDER_SET_CONTEXT_MGR:
5046 		ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5047 		if (ret)
5048 			goto err;
5049 		break;
5050 	case BINDER_THREAD_EXIT:
5051 		binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5052 			     proc->pid, thread->pid);
5053 		binder_thread_release(proc, thread);
5054 		thread = NULL;
5055 		break;
5056 	case BINDER_VERSION: {
5057 		struct binder_version __user *ver = ubuf;
5058 
5059 		if (size != sizeof(struct binder_version)) {
5060 			ret = -EINVAL;
5061 			goto err;
5062 		}
5063 		if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5064 			     &ver->protocol_version)) {
5065 			ret = -EINVAL;
5066 			goto err;
5067 		}
5068 		break;
5069 	}
5070 	case BINDER_GET_NODE_INFO_FOR_REF: {
5071 		struct binder_node_info_for_ref info;
5072 
5073 		if (copy_from_user(&info, ubuf, sizeof(info))) {
5074 			ret = -EFAULT;
5075 			goto err;
5076 		}
5077 
5078 		ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5079 		if (ret < 0)
5080 			goto err;
5081 
5082 		if (copy_to_user(ubuf, &info, sizeof(info))) {
5083 			ret = -EFAULT;
5084 			goto err;
5085 		}
5086 
5087 		break;
5088 	}
5089 	case BINDER_GET_NODE_DEBUG_INFO: {
5090 		struct binder_node_debug_info info;
5091 
5092 		if (copy_from_user(&info, ubuf, sizeof(info))) {
5093 			ret = -EFAULT;
5094 			goto err;
5095 		}
5096 
5097 		ret = binder_ioctl_get_node_debug_info(proc, &info);
5098 		if (ret < 0)
5099 			goto err;
5100 
5101 		if (copy_to_user(ubuf, &info, sizeof(info))) {
5102 			ret = -EFAULT;
5103 			goto err;
5104 		}
5105 		break;
5106 	}
5107 	default:
5108 		ret = -EINVAL;
5109 		goto err;
5110 	}
5111 	ret = 0;
5112 err:
5113 	if (thread)
5114 		thread->looper_need_return = false;
5115 	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5116 	if (ret && ret != -ERESTARTSYS)
5117 		pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5118 err_unlocked:
5119 	trace_binder_ioctl_done(ret);
5120 	return ret;
5121 }
5122 
5123 static void binder_vma_open(struct vm_area_struct *vma)
5124 {
5125 	struct binder_proc *proc = vma->vm_private_data;
5126 
5127 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5128 		     "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5129 		     proc->pid, vma->vm_start, vma->vm_end,
5130 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5131 		     (unsigned long)pgprot_val(vma->vm_page_prot));
5132 }
5133 
5134 static void binder_vma_close(struct vm_area_struct *vma)
5135 {
5136 	struct binder_proc *proc = vma->vm_private_data;
5137 
5138 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5139 		     "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5140 		     proc->pid, vma->vm_start, vma->vm_end,
5141 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5142 		     (unsigned long)pgprot_val(vma->vm_page_prot));
5143 	binder_alloc_vma_close(&proc->alloc);
5144 }
5145 
5146 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5147 {
5148 	return VM_FAULT_SIGBUS;
5149 }
5150 
5151 static const struct vm_operations_struct binder_vm_ops = {
5152 	.open = binder_vma_open,
5153 	.close = binder_vma_close,
5154 	.fault = binder_vm_fault,
5155 };
5156 
5157 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5158 {
5159 	int ret;
5160 	struct binder_proc *proc = filp->private_data;
5161 	const char *failure_string;
5162 
5163 	if (proc->tsk != current->group_leader)
5164 		return -EINVAL;
5165 
5166 	if ((vma->vm_end - vma->vm_start) > SZ_4M)
5167 		vma->vm_end = vma->vm_start + SZ_4M;
5168 
5169 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5170 		     "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5171 		     __func__, proc->pid, vma->vm_start, vma->vm_end,
5172 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5173 		     (unsigned long)pgprot_val(vma->vm_page_prot));
5174 
5175 	if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5176 		ret = -EPERM;
5177 		failure_string = "bad vm_flags";
5178 		goto err_bad_arg;
5179 	}
5180 	vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
5181 	vma->vm_flags &= ~VM_MAYWRITE;
5182 
5183 	vma->vm_ops = &binder_vm_ops;
5184 	vma->vm_private_data = proc;
5185 
5186 	ret = binder_alloc_mmap_handler(&proc->alloc, vma);
5187 	if (ret)
5188 		return ret;
5189 	return 0;
5190 
5191 err_bad_arg:
5192 	pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5193 	       proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
5194 	return ret;
5195 }
5196 
5197 static int binder_open(struct inode *nodp, struct file *filp)
5198 {
5199 	struct binder_proc *proc;
5200 	struct binder_device *binder_dev;
5201 
5202 	binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5203 		     current->group_leader->pid, current->pid);
5204 
5205 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5206 	if (proc == NULL)
5207 		return -ENOMEM;
5208 	spin_lock_init(&proc->inner_lock);
5209 	spin_lock_init(&proc->outer_lock);
5210 	get_task_struct(current->group_leader);
5211 	proc->tsk = current->group_leader;
5212 	INIT_LIST_HEAD(&proc->todo);
5213 	proc->default_priority = task_nice(current);
5214 	/* binderfs stashes devices in i_private */
5215 	if (is_binderfs_device(nodp))
5216 		binder_dev = nodp->i_private;
5217 	else
5218 		binder_dev = container_of(filp->private_data,
5219 					  struct binder_device, miscdev);
5220 	proc->context = &binder_dev->context;
5221 	binder_alloc_init(&proc->alloc);
5222 
5223 	binder_stats_created(BINDER_STAT_PROC);
5224 	proc->pid = current->group_leader->pid;
5225 	INIT_LIST_HEAD(&proc->delivered_death);
5226 	INIT_LIST_HEAD(&proc->waiting_threads);
5227 	filp->private_data = proc;
5228 
5229 	mutex_lock(&binder_procs_lock);
5230 	hlist_add_head(&proc->proc_node, &binder_procs);
5231 	mutex_unlock(&binder_procs_lock);
5232 
5233 	if (binder_debugfs_dir_entry_proc) {
5234 		char strbuf[11];
5235 
5236 		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5237 		/*
5238 		 * proc debug entries are shared between contexts, so
5239 		 * this will fail if the process tries to open the driver
5240 		 * again with a different context. The priting code will
5241 		 * anyway print all contexts that a given PID has, so this
5242 		 * is not a problem.
5243 		 */
5244 		proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5245 			binder_debugfs_dir_entry_proc,
5246 			(void *)(unsigned long)proc->pid,
5247 			&proc_fops);
5248 	}
5249 
5250 	return 0;
5251 }
5252 
5253 static int binder_flush(struct file *filp, fl_owner_t id)
5254 {
5255 	struct binder_proc *proc = filp->private_data;
5256 
5257 	binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5258 
5259 	return 0;
5260 }
5261 
5262 static void binder_deferred_flush(struct binder_proc *proc)
5263 {
5264 	struct rb_node *n;
5265 	int wake_count = 0;
5266 
5267 	binder_inner_proc_lock(proc);
5268 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5269 		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5270 
5271 		thread->looper_need_return = true;
5272 		if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5273 			wake_up_interruptible(&thread->wait);
5274 			wake_count++;
5275 		}
5276 	}
5277 	binder_inner_proc_unlock(proc);
5278 
5279 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5280 		     "binder_flush: %d woke %d threads\n", proc->pid,
5281 		     wake_count);
5282 }
5283 
5284 static int binder_release(struct inode *nodp, struct file *filp)
5285 {
5286 	struct binder_proc *proc = filp->private_data;
5287 
5288 	debugfs_remove(proc->debugfs_entry);
5289 	binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5290 
5291 	return 0;
5292 }
5293 
5294 static int binder_node_release(struct binder_node *node, int refs)
5295 {
5296 	struct binder_ref *ref;
5297 	int death = 0;
5298 	struct binder_proc *proc = node->proc;
5299 
5300 	binder_release_work(proc, &node->async_todo);
5301 
5302 	binder_node_lock(node);
5303 	binder_inner_proc_lock(proc);
5304 	binder_dequeue_work_ilocked(&node->work);
5305 	/*
5306 	 * The caller must have taken a temporary ref on the node,
5307 	 */
5308 	BUG_ON(!node->tmp_refs);
5309 	if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5310 		binder_inner_proc_unlock(proc);
5311 		binder_node_unlock(node);
5312 		binder_free_node(node);
5313 
5314 		return refs;
5315 	}
5316 
5317 	node->proc = NULL;
5318 	node->local_strong_refs = 0;
5319 	node->local_weak_refs = 0;
5320 	binder_inner_proc_unlock(proc);
5321 
5322 	spin_lock(&binder_dead_nodes_lock);
5323 	hlist_add_head(&node->dead_node, &binder_dead_nodes);
5324 	spin_unlock(&binder_dead_nodes_lock);
5325 
5326 	hlist_for_each_entry(ref, &node->refs, node_entry) {
5327 		refs++;
5328 		/*
5329 		 * Need the node lock to synchronize
5330 		 * with new notification requests and the
5331 		 * inner lock to synchronize with queued
5332 		 * death notifications.
5333 		 */
5334 		binder_inner_proc_lock(ref->proc);
5335 		if (!ref->death) {
5336 			binder_inner_proc_unlock(ref->proc);
5337 			continue;
5338 		}
5339 
5340 		death++;
5341 
5342 		BUG_ON(!list_empty(&ref->death->work.entry));
5343 		ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5344 		binder_enqueue_work_ilocked(&ref->death->work,
5345 					    &ref->proc->todo);
5346 		binder_wakeup_proc_ilocked(ref->proc);
5347 		binder_inner_proc_unlock(ref->proc);
5348 	}
5349 
5350 	binder_debug(BINDER_DEBUG_DEAD_BINDER,
5351 		     "node %d now dead, refs %d, death %d\n",
5352 		     node->debug_id, refs, death);
5353 	binder_node_unlock(node);
5354 	binder_put_node(node);
5355 
5356 	return refs;
5357 }
5358 
5359 static void binder_deferred_release(struct binder_proc *proc)
5360 {
5361 	struct binder_context *context = proc->context;
5362 	struct rb_node *n;
5363 	int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5364 
5365 	mutex_lock(&binder_procs_lock);
5366 	hlist_del(&proc->proc_node);
5367 	mutex_unlock(&binder_procs_lock);
5368 
5369 	mutex_lock(&context->context_mgr_node_lock);
5370 	if (context->binder_context_mgr_node &&
5371 	    context->binder_context_mgr_node->proc == proc) {
5372 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
5373 			     "%s: %d context_mgr_node gone\n",
5374 			     __func__, proc->pid);
5375 		context->binder_context_mgr_node = NULL;
5376 	}
5377 	mutex_unlock(&context->context_mgr_node_lock);
5378 	binder_inner_proc_lock(proc);
5379 	/*
5380 	 * Make sure proc stays alive after we
5381 	 * remove all the threads
5382 	 */
5383 	proc->tmp_ref++;
5384 
5385 	proc->is_dead = true;
5386 	threads = 0;
5387 	active_transactions = 0;
5388 	while ((n = rb_first(&proc->threads))) {
5389 		struct binder_thread *thread;
5390 
5391 		thread = rb_entry(n, struct binder_thread, rb_node);
5392 		binder_inner_proc_unlock(proc);
5393 		threads++;
5394 		active_transactions += binder_thread_release(proc, thread);
5395 		binder_inner_proc_lock(proc);
5396 	}
5397 
5398 	nodes = 0;
5399 	incoming_refs = 0;
5400 	while ((n = rb_first(&proc->nodes))) {
5401 		struct binder_node *node;
5402 
5403 		node = rb_entry(n, struct binder_node, rb_node);
5404 		nodes++;
5405 		/*
5406 		 * take a temporary ref on the node before
5407 		 * calling binder_node_release() which will either
5408 		 * kfree() the node or call binder_put_node()
5409 		 */
5410 		binder_inc_node_tmpref_ilocked(node);
5411 		rb_erase(&node->rb_node, &proc->nodes);
5412 		binder_inner_proc_unlock(proc);
5413 		incoming_refs = binder_node_release(node, incoming_refs);
5414 		binder_inner_proc_lock(proc);
5415 	}
5416 	binder_inner_proc_unlock(proc);
5417 
5418 	outgoing_refs = 0;
5419 	binder_proc_lock(proc);
5420 	while ((n = rb_first(&proc->refs_by_desc))) {
5421 		struct binder_ref *ref;
5422 
5423 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
5424 		outgoing_refs++;
5425 		binder_cleanup_ref_olocked(ref);
5426 		binder_proc_unlock(proc);
5427 		binder_free_ref(ref);
5428 		binder_proc_lock(proc);
5429 	}
5430 	binder_proc_unlock(proc);
5431 
5432 	binder_release_work(proc, &proc->todo);
5433 	binder_release_work(proc, &proc->delivered_death);
5434 
5435 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5436 		     "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5437 		     __func__, proc->pid, threads, nodes, incoming_refs,
5438 		     outgoing_refs, active_transactions);
5439 
5440 	binder_proc_dec_tmpref(proc);
5441 }
5442 
5443 static void binder_deferred_func(struct work_struct *work)
5444 {
5445 	struct binder_proc *proc;
5446 
5447 	int defer;
5448 
5449 	do {
5450 		mutex_lock(&binder_deferred_lock);
5451 		if (!hlist_empty(&binder_deferred_list)) {
5452 			proc = hlist_entry(binder_deferred_list.first,
5453 					struct binder_proc, deferred_work_node);
5454 			hlist_del_init(&proc->deferred_work_node);
5455 			defer = proc->deferred_work;
5456 			proc->deferred_work = 0;
5457 		} else {
5458 			proc = NULL;
5459 			defer = 0;
5460 		}
5461 		mutex_unlock(&binder_deferred_lock);
5462 
5463 		if (defer & BINDER_DEFERRED_FLUSH)
5464 			binder_deferred_flush(proc);
5465 
5466 		if (defer & BINDER_DEFERRED_RELEASE)
5467 			binder_deferred_release(proc); /* frees proc */
5468 	} while (proc);
5469 }
5470 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5471 
5472 static void
5473 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5474 {
5475 	mutex_lock(&binder_deferred_lock);
5476 	proc->deferred_work |= defer;
5477 	if (hlist_unhashed(&proc->deferred_work_node)) {
5478 		hlist_add_head(&proc->deferred_work_node,
5479 				&binder_deferred_list);
5480 		schedule_work(&binder_deferred_work);
5481 	}
5482 	mutex_unlock(&binder_deferred_lock);
5483 }
5484 
5485 static void print_binder_transaction_ilocked(struct seq_file *m,
5486 					     struct binder_proc *proc,
5487 					     const char *prefix,
5488 					     struct binder_transaction *t)
5489 {
5490 	struct binder_proc *to_proc;
5491 	struct binder_buffer *buffer = t->buffer;
5492 
5493 	spin_lock(&t->lock);
5494 	to_proc = t->to_proc;
5495 	seq_printf(m,
5496 		   "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5497 		   prefix, t->debug_id, t,
5498 		   t->from ? t->from->proc->pid : 0,
5499 		   t->from ? t->from->pid : 0,
5500 		   to_proc ? to_proc->pid : 0,
5501 		   t->to_thread ? t->to_thread->pid : 0,
5502 		   t->code, t->flags, t->priority, t->need_reply);
5503 	spin_unlock(&t->lock);
5504 
5505 	if (proc != to_proc) {
5506 		/*
5507 		 * Can only safely deref buffer if we are holding the
5508 		 * correct proc inner lock for this node
5509 		 */
5510 		seq_puts(m, "\n");
5511 		return;
5512 	}
5513 
5514 	if (buffer == NULL) {
5515 		seq_puts(m, " buffer free\n");
5516 		return;
5517 	}
5518 	if (buffer->target_node)
5519 		seq_printf(m, " node %d", buffer->target_node->debug_id);
5520 	seq_printf(m, " size %zd:%zd data %pK\n",
5521 		   buffer->data_size, buffer->offsets_size,
5522 		   buffer->user_data);
5523 }
5524 
5525 static void print_binder_work_ilocked(struct seq_file *m,
5526 				     struct binder_proc *proc,
5527 				     const char *prefix,
5528 				     const char *transaction_prefix,
5529 				     struct binder_work *w)
5530 {
5531 	struct binder_node *node;
5532 	struct binder_transaction *t;
5533 
5534 	switch (w->type) {
5535 	case BINDER_WORK_TRANSACTION:
5536 		t = container_of(w, struct binder_transaction, work);
5537 		print_binder_transaction_ilocked(
5538 				m, proc, transaction_prefix, t);
5539 		break;
5540 	case BINDER_WORK_RETURN_ERROR: {
5541 		struct binder_error *e = container_of(
5542 				w, struct binder_error, work);
5543 
5544 		seq_printf(m, "%stransaction error: %u\n",
5545 			   prefix, e->cmd);
5546 	} break;
5547 	case BINDER_WORK_TRANSACTION_COMPLETE:
5548 		seq_printf(m, "%stransaction complete\n", prefix);
5549 		break;
5550 	case BINDER_WORK_NODE:
5551 		node = container_of(w, struct binder_node, work);
5552 		seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5553 			   prefix, node->debug_id,
5554 			   (u64)node->ptr, (u64)node->cookie);
5555 		break;
5556 	case BINDER_WORK_DEAD_BINDER:
5557 		seq_printf(m, "%shas dead binder\n", prefix);
5558 		break;
5559 	case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5560 		seq_printf(m, "%shas cleared dead binder\n", prefix);
5561 		break;
5562 	case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5563 		seq_printf(m, "%shas cleared death notification\n", prefix);
5564 		break;
5565 	default:
5566 		seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5567 		break;
5568 	}
5569 }
5570 
5571 static void print_binder_thread_ilocked(struct seq_file *m,
5572 					struct binder_thread *thread,
5573 					int print_always)
5574 {
5575 	struct binder_transaction *t;
5576 	struct binder_work *w;
5577 	size_t start_pos = m->count;
5578 	size_t header_pos;
5579 
5580 	seq_printf(m, "  thread %d: l %02x need_return %d tr %d\n",
5581 			thread->pid, thread->looper,
5582 			thread->looper_need_return,
5583 			atomic_read(&thread->tmp_ref));
5584 	header_pos = m->count;
5585 	t = thread->transaction_stack;
5586 	while (t) {
5587 		if (t->from == thread) {
5588 			print_binder_transaction_ilocked(m, thread->proc,
5589 					"    outgoing transaction", t);
5590 			t = t->from_parent;
5591 		} else if (t->to_thread == thread) {
5592 			print_binder_transaction_ilocked(m, thread->proc,
5593 						 "    incoming transaction", t);
5594 			t = t->to_parent;
5595 		} else {
5596 			print_binder_transaction_ilocked(m, thread->proc,
5597 					"    bad transaction", t);
5598 			t = NULL;
5599 		}
5600 	}
5601 	list_for_each_entry(w, &thread->todo, entry) {
5602 		print_binder_work_ilocked(m, thread->proc, "    ",
5603 					  "    pending transaction", w);
5604 	}
5605 	if (!print_always && m->count == header_pos)
5606 		m->count = start_pos;
5607 }
5608 
5609 static void print_binder_node_nilocked(struct seq_file *m,
5610 				       struct binder_node *node)
5611 {
5612 	struct binder_ref *ref;
5613 	struct binder_work *w;
5614 	int count;
5615 
5616 	count = 0;
5617 	hlist_for_each_entry(ref, &node->refs, node_entry)
5618 		count++;
5619 
5620 	seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5621 		   node->debug_id, (u64)node->ptr, (u64)node->cookie,
5622 		   node->has_strong_ref, node->has_weak_ref,
5623 		   node->local_strong_refs, node->local_weak_refs,
5624 		   node->internal_strong_refs, count, node->tmp_refs);
5625 	if (count) {
5626 		seq_puts(m, " proc");
5627 		hlist_for_each_entry(ref, &node->refs, node_entry)
5628 			seq_printf(m, " %d", ref->proc->pid);
5629 	}
5630 	seq_puts(m, "\n");
5631 	if (node->proc) {
5632 		list_for_each_entry(w, &node->async_todo, entry)
5633 			print_binder_work_ilocked(m, node->proc, "    ",
5634 					  "    pending async transaction", w);
5635 	}
5636 }
5637 
5638 static void print_binder_ref_olocked(struct seq_file *m,
5639 				     struct binder_ref *ref)
5640 {
5641 	binder_node_lock(ref->node);
5642 	seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %pK\n",
5643 		   ref->data.debug_id, ref->data.desc,
5644 		   ref->node->proc ? "" : "dead ",
5645 		   ref->node->debug_id, ref->data.strong,
5646 		   ref->data.weak, ref->death);
5647 	binder_node_unlock(ref->node);
5648 }
5649 
5650 static void print_binder_proc(struct seq_file *m,
5651 			      struct binder_proc *proc, int print_all)
5652 {
5653 	struct binder_work *w;
5654 	struct rb_node *n;
5655 	size_t start_pos = m->count;
5656 	size_t header_pos;
5657 	struct binder_node *last_node = NULL;
5658 
5659 	seq_printf(m, "proc %d\n", proc->pid);
5660 	seq_printf(m, "context %s\n", proc->context->name);
5661 	header_pos = m->count;
5662 
5663 	binder_inner_proc_lock(proc);
5664 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5665 		print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5666 						rb_node), print_all);
5667 
5668 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5669 		struct binder_node *node = rb_entry(n, struct binder_node,
5670 						    rb_node);
5671 		if (!print_all && !node->has_async_transaction)
5672 			continue;
5673 
5674 		/*
5675 		 * take a temporary reference on the node so it
5676 		 * survives and isn't removed from the tree
5677 		 * while we print it.
5678 		 */
5679 		binder_inc_node_tmpref_ilocked(node);
5680 		/* Need to drop inner lock to take node lock */
5681 		binder_inner_proc_unlock(proc);
5682 		if (last_node)
5683 			binder_put_node(last_node);
5684 		binder_node_inner_lock(node);
5685 		print_binder_node_nilocked(m, node);
5686 		binder_node_inner_unlock(node);
5687 		last_node = node;
5688 		binder_inner_proc_lock(proc);
5689 	}
5690 	binder_inner_proc_unlock(proc);
5691 	if (last_node)
5692 		binder_put_node(last_node);
5693 
5694 	if (print_all) {
5695 		binder_proc_lock(proc);
5696 		for (n = rb_first(&proc->refs_by_desc);
5697 		     n != NULL;
5698 		     n = rb_next(n))
5699 			print_binder_ref_olocked(m, rb_entry(n,
5700 							    struct binder_ref,
5701 							    rb_node_desc));
5702 		binder_proc_unlock(proc);
5703 	}
5704 	binder_alloc_print_allocated(m, &proc->alloc);
5705 	binder_inner_proc_lock(proc);
5706 	list_for_each_entry(w, &proc->todo, entry)
5707 		print_binder_work_ilocked(m, proc, "  ",
5708 					  "  pending transaction", w);
5709 	list_for_each_entry(w, &proc->delivered_death, entry) {
5710 		seq_puts(m, "  has delivered dead binder\n");
5711 		break;
5712 	}
5713 	binder_inner_proc_unlock(proc);
5714 	if (!print_all && m->count == header_pos)
5715 		m->count = start_pos;
5716 }
5717 
5718 static const char * const binder_return_strings[] = {
5719 	"BR_ERROR",
5720 	"BR_OK",
5721 	"BR_TRANSACTION",
5722 	"BR_REPLY",
5723 	"BR_ACQUIRE_RESULT",
5724 	"BR_DEAD_REPLY",
5725 	"BR_TRANSACTION_COMPLETE",
5726 	"BR_INCREFS",
5727 	"BR_ACQUIRE",
5728 	"BR_RELEASE",
5729 	"BR_DECREFS",
5730 	"BR_ATTEMPT_ACQUIRE",
5731 	"BR_NOOP",
5732 	"BR_SPAWN_LOOPER",
5733 	"BR_FINISHED",
5734 	"BR_DEAD_BINDER",
5735 	"BR_CLEAR_DEATH_NOTIFICATION_DONE",
5736 	"BR_FAILED_REPLY"
5737 };
5738 
5739 static const char * const binder_command_strings[] = {
5740 	"BC_TRANSACTION",
5741 	"BC_REPLY",
5742 	"BC_ACQUIRE_RESULT",
5743 	"BC_FREE_BUFFER",
5744 	"BC_INCREFS",
5745 	"BC_ACQUIRE",
5746 	"BC_RELEASE",
5747 	"BC_DECREFS",
5748 	"BC_INCREFS_DONE",
5749 	"BC_ACQUIRE_DONE",
5750 	"BC_ATTEMPT_ACQUIRE",
5751 	"BC_REGISTER_LOOPER",
5752 	"BC_ENTER_LOOPER",
5753 	"BC_EXIT_LOOPER",
5754 	"BC_REQUEST_DEATH_NOTIFICATION",
5755 	"BC_CLEAR_DEATH_NOTIFICATION",
5756 	"BC_DEAD_BINDER_DONE",
5757 	"BC_TRANSACTION_SG",
5758 	"BC_REPLY_SG",
5759 };
5760 
5761 static const char * const binder_objstat_strings[] = {
5762 	"proc",
5763 	"thread",
5764 	"node",
5765 	"ref",
5766 	"death",
5767 	"transaction",
5768 	"transaction_complete"
5769 };
5770 
5771 static void print_binder_stats(struct seq_file *m, const char *prefix,
5772 			       struct binder_stats *stats)
5773 {
5774 	int i;
5775 
5776 	BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5777 		     ARRAY_SIZE(binder_command_strings));
5778 	for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5779 		int temp = atomic_read(&stats->bc[i]);
5780 
5781 		if (temp)
5782 			seq_printf(m, "%s%s: %d\n", prefix,
5783 				   binder_command_strings[i], temp);
5784 	}
5785 
5786 	BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5787 		     ARRAY_SIZE(binder_return_strings));
5788 	for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5789 		int temp = atomic_read(&stats->br[i]);
5790 
5791 		if (temp)
5792 			seq_printf(m, "%s%s: %d\n", prefix,
5793 				   binder_return_strings[i], temp);
5794 	}
5795 
5796 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5797 		     ARRAY_SIZE(binder_objstat_strings));
5798 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5799 		     ARRAY_SIZE(stats->obj_deleted));
5800 	for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
5801 		int created = atomic_read(&stats->obj_created[i]);
5802 		int deleted = atomic_read(&stats->obj_deleted[i]);
5803 
5804 		if (created || deleted)
5805 			seq_printf(m, "%s%s: active %d total %d\n",
5806 				prefix,
5807 				binder_objstat_strings[i],
5808 				created - deleted,
5809 				created);
5810 	}
5811 }
5812 
5813 static void print_binder_proc_stats(struct seq_file *m,
5814 				    struct binder_proc *proc)
5815 {
5816 	struct binder_work *w;
5817 	struct binder_thread *thread;
5818 	struct rb_node *n;
5819 	int count, strong, weak, ready_threads;
5820 	size_t free_async_space =
5821 		binder_alloc_get_free_async_space(&proc->alloc);
5822 
5823 	seq_printf(m, "proc %d\n", proc->pid);
5824 	seq_printf(m, "context %s\n", proc->context->name);
5825 	count = 0;
5826 	ready_threads = 0;
5827 	binder_inner_proc_lock(proc);
5828 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5829 		count++;
5830 
5831 	list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5832 		ready_threads++;
5833 
5834 	seq_printf(m, "  threads: %d\n", count);
5835 	seq_printf(m, "  requested threads: %d+%d/%d\n"
5836 			"  ready threads %d\n"
5837 			"  free async space %zd\n", proc->requested_threads,
5838 			proc->requested_threads_started, proc->max_threads,
5839 			ready_threads,
5840 			free_async_space);
5841 	count = 0;
5842 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5843 		count++;
5844 	binder_inner_proc_unlock(proc);
5845 	seq_printf(m, "  nodes: %d\n", count);
5846 	count = 0;
5847 	strong = 0;
5848 	weak = 0;
5849 	binder_proc_lock(proc);
5850 	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5851 		struct binder_ref *ref = rb_entry(n, struct binder_ref,
5852 						  rb_node_desc);
5853 		count++;
5854 		strong += ref->data.strong;
5855 		weak += ref->data.weak;
5856 	}
5857 	binder_proc_unlock(proc);
5858 	seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
5859 
5860 	count = binder_alloc_get_allocated_count(&proc->alloc);
5861 	seq_printf(m, "  buffers: %d\n", count);
5862 
5863 	binder_alloc_print_pages(m, &proc->alloc);
5864 
5865 	count = 0;
5866 	binder_inner_proc_lock(proc);
5867 	list_for_each_entry(w, &proc->todo, entry) {
5868 		if (w->type == BINDER_WORK_TRANSACTION)
5869 			count++;
5870 	}
5871 	binder_inner_proc_unlock(proc);
5872 	seq_printf(m, "  pending transactions: %d\n", count);
5873 
5874 	print_binder_stats(m, "  ", &proc->stats);
5875 }
5876 
5877 
5878 static int state_show(struct seq_file *m, void *unused)
5879 {
5880 	struct binder_proc *proc;
5881 	struct binder_node *node;
5882 	struct binder_node *last_node = NULL;
5883 
5884 	seq_puts(m, "binder state:\n");
5885 
5886 	spin_lock(&binder_dead_nodes_lock);
5887 	if (!hlist_empty(&binder_dead_nodes))
5888 		seq_puts(m, "dead nodes:\n");
5889 	hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5890 		/*
5891 		 * take a temporary reference on the node so it
5892 		 * survives and isn't removed from the list
5893 		 * while we print it.
5894 		 */
5895 		node->tmp_refs++;
5896 		spin_unlock(&binder_dead_nodes_lock);
5897 		if (last_node)
5898 			binder_put_node(last_node);
5899 		binder_node_lock(node);
5900 		print_binder_node_nilocked(m, node);
5901 		binder_node_unlock(node);
5902 		last_node = node;
5903 		spin_lock(&binder_dead_nodes_lock);
5904 	}
5905 	spin_unlock(&binder_dead_nodes_lock);
5906 	if (last_node)
5907 		binder_put_node(last_node);
5908 
5909 	mutex_lock(&binder_procs_lock);
5910 	hlist_for_each_entry(proc, &binder_procs, proc_node)
5911 		print_binder_proc(m, proc, 1);
5912 	mutex_unlock(&binder_procs_lock);
5913 
5914 	return 0;
5915 }
5916 
5917 static int stats_show(struct seq_file *m, void *unused)
5918 {
5919 	struct binder_proc *proc;
5920 
5921 	seq_puts(m, "binder stats:\n");
5922 
5923 	print_binder_stats(m, "", &binder_stats);
5924 
5925 	mutex_lock(&binder_procs_lock);
5926 	hlist_for_each_entry(proc, &binder_procs, proc_node)
5927 		print_binder_proc_stats(m, proc);
5928 	mutex_unlock(&binder_procs_lock);
5929 
5930 	return 0;
5931 }
5932 
5933 static int transactions_show(struct seq_file *m, void *unused)
5934 {
5935 	struct binder_proc *proc;
5936 
5937 	seq_puts(m, "binder transactions:\n");
5938 	mutex_lock(&binder_procs_lock);
5939 	hlist_for_each_entry(proc, &binder_procs, proc_node)
5940 		print_binder_proc(m, proc, 0);
5941 	mutex_unlock(&binder_procs_lock);
5942 
5943 	return 0;
5944 }
5945 
5946 static int proc_show(struct seq_file *m, void *unused)
5947 {
5948 	struct binder_proc *itr;
5949 	int pid = (unsigned long)m->private;
5950 
5951 	mutex_lock(&binder_procs_lock);
5952 	hlist_for_each_entry(itr, &binder_procs, proc_node) {
5953 		if (itr->pid == pid) {
5954 			seq_puts(m, "binder proc state:\n");
5955 			print_binder_proc(m, itr, 1);
5956 		}
5957 	}
5958 	mutex_unlock(&binder_procs_lock);
5959 
5960 	return 0;
5961 }
5962 
5963 static void print_binder_transaction_log_entry(struct seq_file *m,
5964 					struct binder_transaction_log_entry *e)
5965 {
5966 	int debug_id = READ_ONCE(e->debug_id_done);
5967 	/*
5968 	 * read barrier to guarantee debug_id_done read before
5969 	 * we print the log values
5970 	 */
5971 	smp_rmb();
5972 	seq_printf(m,
5973 		   "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5974 		   e->debug_id, (e->call_type == 2) ? "reply" :
5975 		   ((e->call_type == 1) ? "async" : "call "), e->from_proc,
5976 		   e->from_thread, e->to_proc, e->to_thread, e->context_name,
5977 		   e->to_node, e->target_handle, e->data_size, e->offsets_size,
5978 		   e->return_error, e->return_error_param,
5979 		   e->return_error_line);
5980 	/*
5981 	 * read-barrier to guarantee read of debug_id_done after
5982 	 * done printing the fields of the entry
5983 	 */
5984 	smp_rmb();
5985 	seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5986 			"\n" : " (incomplete)\n");
5987 }
5988 
5989 static int transaction_log_show(struct seq_file *m, void *unused)
5990 {
5991 	struct binder_transaction_log *log = m->private;
5992 	unsigned int log_cur = atomic_read(&log->cur);
5993 	unsigned int count;
5994 	unsigned int cur;
5995 	int i;
5996 
5997 	count = log_cur + 1;
5998 	cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5999 		0 : count % ARRAY_SIZE(log->entry);
6000 	if (count > ARRAY_SIZE(log->entry) || log->full)
6001 		count = ARRAY_SIZE(log->entry);
6002 	for (i = 0; i < count; i++) {
6003 		unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6004 
6005 		print_binder_transaction_log_entry(m, &log->entry[index]);
6006 	}
6007 	return 0;
6008 }
6009 
6010 const struct file_operations binder_fops = {
6011 	.owner = THIS_MODULE,
6012 	.poll = binder_poll,
6013 	.unlocked_ioctl = binder_ioctl,
6014 	.compat_ioctl = binder_ioctl,
6015 	.mmap = binder_mmap,
6016 	.open = binder_open,
6017 	.flush = binder_flush,
6018 	.release = binder_release,
6019 };
6020 
6021 DEFINE_SHOW_ATTRIBUTE(state);
6022 DEFINE_SHOW_ATTRIBUTE(stats);
6023 DEFINE_SHOW_ATTRIBUTE(transactions);
6024 DEFINE_SHOW_ATTRIBUTE(transaction_log);
6025 
6026 static int __init init_binder_device(const char *name)
6027 {
6028 	int ret;
6029 	struct binder_device *binder_device;
6030 
6031 	binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6032 	if (!binder_device)
6033 		return -ENOMEM;
6034 
6035 	binder_device->miscdev.fops = &binder_fops;
6036 	binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6037 	binder_device->miscdev.name = name;
6038 
6039 	binder_device->context.binder_context_mgr_uid = INVALID_UID;
6040 	binder_device->context.name = name;
6041 	mutex_init(&binder_device->context.context_mgr_node_lock);
6042 
6043 	ret = misc_register(&binder_device->miscdev);
6044 	if (ret < 0) {
6045 		kfree(binder_device);
6046 		return ret;
6047 	}
6048 
6049 	hlist_add_head(&binder_device->hlist, &binder_devices);
6050 
6051 	return ret;
6052 }
6053 
6054 static int __init binder_init(void)
6055 {
6056 	int ret;
6057 	char *device_name, *device_tmp;
6058 	struct binder_device *device;
6059 	struct hlist_node *tmp;
6060 	char *device_names = NULL;
6061 
6062 	ret = binder_alloc_shrinker_init();
6063 	if (ret)
6064 		return ret;
6065 
6066 	atomic_set(&binder_transaction_log.cur, ~0U);
6067 	atomic_set(&binder_transaction_log_failed.cur, ~0U);
6068 
6069 	binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6070 	if (binder_debugfs_dir_entry_root)
6071 		binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6072 						 binder_debugfs_dir_entry_root);
6073 
6074 	if (binder_debugfs_dir_entry_root) {
6075 		debugfs_create_file("state",
6076 				    0444,
6077 				    binder_debugfs_dir_entry_root,
6078 				    NULL,
6079 				    &state_fops);
6080 		debugfs_create_file("stats",
6081 				    0444,
6082 				    binder_debugfs_dir_entry_root,
6083 				    NULL,
6084 				    &stats_fops);
6085 		debugfs_create_file("transactions",
6086 				    0444,
6087 				    binder_debugfs_dir_entry_root,
6088 				    NULL,
6089 				    &transactions_fops);
6090 		debugfs_create_file("transaction_log",
6091 				    0444,
6092 				    binder_debugfs_dir_entry_root,
6093 				    &binder_transaction_log,
6094 				    &transaction_log_fops);
6095 		debugfs_create_file("failed_transaction_log",
6096 				    0444,
6097 				    binder_debugfs_dir_entry_root,
6098 				    &binder_transaction_log_failed,
6099 				    &transaction_log_fops);
6100 	}
6101 
6102 	if (strcmp(binder_devices_param, "") != 0) {
6103 		/*
6104 		* Copy the module_parameter string, because we don't want to
6105 		* tokenize it in-place.
6106 		 */
6107 		device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6108 		if (!device_names) {
6109 			ret = -ENOMEM;
6110 			goto err_alloc_device_names_failed;
6111 		}
6112 
6113 		device_tmp = device_names;
6114 		while ((device_name = strsep(&device_tmp, ","))) {
6115 			ret = init_binder_device(device_name);
6116 			if (ret)
6117 				goto err_init_binder_device_failed;
6118 		}
6119 	}
6120 
6121 	ret = init_binderfs();
6122 	if (ret)
6123 		goto err_init_binder_device_failed;
6124 
6125 	return ret;
6126 
6127 err_init_binder_device_failed:
6128 	hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6129 		misc_deregister(&device->miscdev);
6130 		hlist_del(&device->hlist);
6131 		kfree(device);
6132 	}
6133 
6134 	kfree(device_names);
6135 
6136 err_alloc_device_names_failed:
6137 	debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6138 
6139 	return ret;
6140 }
6141 
6142 device_initcall(binder_init);
6143 
6144 #define CREATE_TRACE_POINTS
6145 #include "binder_trace.h"
6146 
6147 MODULE_LICENSE("GPL v2");
6148