xref: /openbmc/linux/drivers/android/binder.c (revision dfc53baa)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* binder.c
3  *
4  * Android IPC Subsystem
5  *
6  * Copyright (C) 2007-2008 Google, Inc.
7  */
8 
9 /*
10  * Locking overview
11  *
12  * There are 3 main spinlocks which must be acquired in the
13  * order shown:
14  *
15  * 1) proc->outer_lock : protects binder_ref
16  *    binder_proc_lock() and binder_proc_unlock() are
17  *    used to acq/rel.
18  * 2) node->lock : protects most fields of binder_node.
19  *    binder_node_lock() and binder_node_unlock() are
20  *    used to acq/rel
21  * 3) proc->inner_lock : protects the thread and node lists
22  *    (proc->threads, proc->waiting_threads, proc->nodes)
23  *    and all todo lists associated with the binder_proc
24  *    (proc->todo, thread->todo, proc->delivered_death and
25  *    node->async_todo), as well as thread->transaction_stack
26  *    binder_inner_proc_lock() and binder_inner_proc_unlock()
27  *    are used to acq/rel
28  *
29  * Any lock under procA must never be nested under any lock at the same
30  * level or below on procB.
31  *
32  * Functions that require a lock held on entry indicate which lock
33  * in the suffix of the function name:
34  *
35  * foo_olocked() : requires node->outer_lock
36  * foo_nlocked() : requires node->lock
37  * foo_ilocked() : requires proc->inner_lock
38  * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39  * foo_nilocked(): requires node->lock and proc->inner_lock
40  * ...
41  */
42 
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44 
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
48 #include <linux/fs.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/string.h>
61 #include <linux/uaccess.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/security.h>
64 #include <linux/spinlock.h>
65 #include <linux/ratelimit.h>
66 #include <linux/syscalls.h>
67 #include <linux/task_work.h>
68 #include <linux/sizes.h>
69 
70 #include <uapi/linux/android/binder.h>
71 #include <uapi/linux/android/binderfs.h>
72 
73 #include <asm/cacheflush.h>
74 
75 #include "binder_alloc.h"
76 #include "binder_internal.h"
77 #include "binder_trace.h"
78 
79 static HLIST_HEAD(binder_deferred_list);
80 static DEFINE_MUTEX(binder_deferred_lock);
81 
82 static HLIST_HEAD(binder_devices);
83 static HLIST_HEAD(binder_procs);
84 static DEFINE_MUTEX(binder_procs_lock);
85 
86 static HLIST_HEAD(binder_dead_nodes);
87 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
88 
89 static struct dentry *binder_debugfs_dir_entry_root;
90 static struct dentry *binder_debugfs_dir_entry_proc;
91 static atomic_t binder_last_id;
92 
93 static int proc_show(struct seq_file *m, void *unused);
94 DEFINE_SHOW_ATTRIBUTE(proc);
95 
96 #define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
97 
98 enum {
99 	BINDER_DEBUG_USER_ERROR             = 1U << 0,
100 	BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
101 	BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
102 	BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
103 	BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
104 	BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
105 	BINDER_DEBUG_READ_WRITE             = 1U << 6,
106 	BINDER_DEBUG_USER_REFS              = 1U << 7,
107 	BINDER_DEBUG_THREADS                = 1U << 8,
108 	BINDER_DEBUG_TRANSACTION            = 1U << 9,
109 	BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
110 	BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
111 	BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
112 	BINDER_DEBUG_PRIORITY_CAP           = 1U << 13,
113 	BINDER_DEBUG_SPINLOCKS              = 1U << 14,
114 };
115 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
116 	BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
117 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
118 
119 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
120 module_param_named(devices, binder_devices_param, charp, 0444);
121 
122 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
123 static int binder_stop_on_user_error;
124 
125 static int binder_set_stop_on_user_error(const char *val,
126 					 const struct kernel_param *kp)
127 {
128 	int ret;
129 
130 	ret = param_set_int(val, kp);
131 	if (binder_stop_on_user_error < 2)
132 		wake_up(&binder_user_error_wait);
133 	return ret;
134 }
135 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
136 	param_get_int, &binder_stop_on_user_error, 0644);
137 
138 #define binder_debug(mask, x...) \
139 	do { \
140 		if (binder_debug_mask & mask) \
141 			pr_info_ratelimited(x); \
142 	} while (0)
143 
144 #define binder_user_error(x...) \
145 	do { \
146 		if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
147 			pr_info_ratelimited(x); \
148 		if (binder_stop_on_user_error) \
149 			binder_stop_on_user_error = 2; \
150 	} while (0)
151 
152 #define to_flat_binder_object(hdr) \
153 	container_of(hdr, struct flat_binder_object, hdr)
154 
155 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
156 
157 #define to_binder_buffer_object(hdr) \
158 	container_of(hdr, struct binder_buffer_object, hdr)
159 
160 #define to_binder_fd_array_object(hdr) \
161 	container_of(hdr, struct binder_fd_array_object, hdr)
162 
163 enum binder_stat_types {
164 	BINDER_STAT_PROC,
165 	BINDER_STAT_THREAD,
166 	BINDER_STAT_NODE,
167 	BINDER_STAT_REF,
168 	BINDER_STAT_DEATH,
169 	BINDER_STAT_TRANSACTION,
170 	BINDER_STAT_TRANSACTION_COMPLETE,
171 	BINDER_STAT_COUNT
172 };
173 
174 struct binder_stats {
175 	atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
176 	atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
177 	atomic_t obj_created[BINDER_STAT_COUNT];
178 	atomic_t obj_deleted[BINDER_STAT_COUNT];
179 };
180 
181 static struct binder_stats binder_stats;
182 
183 static inline void binder_stats_deleted(enum binder_stat_types type)
184 {
185 	atomic_inc(&binder_stats.obj_deleted[type]);
186 }
187 
188 static inline void binder_stats_created(enum binder_stat_types type)
189 {
190 	atomic_inc(&binder_stats.obj_created[type]);
191 }
192 
193 struct binder_transaction_log binder_transaction_log;
194 struct binder_transaction_log binder_transaction_log_failed;
195 
196 static struct binder_transaction_log_entry *binder_transaction_log_add(
197 	struct binder_transaction_log *log)
198 {
199 	struct binder_transaction_log_entry *e;
200 	unsigned int cur = atomic_inc_return(&log->cur);
201 
202 	if (cur >= ARRAY_SIZE(log->entry))
203 		log->full = true;
204 	e = &log->entry[cur % ARRAY_SIZE(log->entry)];
205 	WRITE_ONCE(e->debug_id_done, 0);
206 	/*
207 	 * write-barrier to synchronize access to e->debug_id_done.
208 	 * We make sure the initialized 0 value is seen before
209 	 * memset() other fields are zeroed by memset.
210 	 */
211 	smp_wmb();
212 	memset(e, 0, sizeof(*e));
213 	return e;
214 }
215 
216 /**
217  * struct binder_work - work enqueued on a worklist
218  * @entry:             node enqueued on list
219  * @type:              type of work to be performed
220  *
221  * There are separate work lists for proc, thread, and node (async).
222  */
223 struct binder_work {
224 	struct list_head entry;
225 
226 	enum {
227 		BINDER_WORK_TRANSACTION = 1,
228 		BINDER_WORK_TRANSACTION_COMPLETE,
229 		BINDER_WORK_RETURN_ERROR,
230 		BINDER_WORK_NODE,
231 		BINDER_WORK_DEAD_BINDER,
232 		BINDER_WORK_DEAD_BINDER_AND_CLEAR,
233 		BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
234 	} type;
235 };
236 
237 struct binder_error {
238 	struct binder_work work;
239 	uint32_t cmd;
240 };
241 
242 /**
243  * struct binder_node - binder node bookkeeping
244  * @debug_id:             unique ID for debugging
245  *                        (invariant after initialized)
246  * @lock:                 lock for node fields
247  * @work:                 worklist element for node work
248  *                        (protected by @proc->inner_lock)
249  * @rb_node:              element for proc->nodes tree
250  *                        (protected by @proc->inner_lock)
251  * @dead_node:            element for binder_dead_nodes list
252  *                        (protected by binder_dead_nodes_lock)
253  * @proc:                 binder_proc that owns this node
254  *                        (invariant after initialized)
255  * @refs:                 list of references on this node
256  *                        (protected by @lock)
257  * @internal_strong_refs: used to take strong references when
258  *                        initiating a transaction
259  *                        (protected by @proc->inner_lock if @proc
260  *                        and by @lock)
261  * @local_weak_refs:      weak user refs from local process
262  *                        (protected by @proc->inner_lock if @proc
263  *                        and by @lock)
264  * @local_strong_refs:    strong user refs from local process
265  *                        (protected by @proc->inner_lock if @proc
266  *                        and by @lock)
267  * @tmp_refs:             temporary kernel refs
268  *                        (protected by @proc->inner_lock while @proc
269  *                        is valid, and by binder_dead_nodes_lock
270  *                        if @proc is NULL. During inc/dec and node release
271  *                        it is also protected by @lock to provide safety
272  *                        as the node dies and @proc becomes NULL)
273  * @ptr:                  userspace pointer for node
274  *                        (invariant, no lock needed)
275  * @cookie:               userspace cookie for node
276  *                        (invariant, no lock needed)
277  * @has_strong_ref:       userspace notified of strong ref
278  *                        (protected by @proc->inner_lock if @proc
279  *                        and by @lock)
280  * @pending_strong_ref:   userspace has acked notification of strong ref
281  *                        (protected by @proc->inner_lock if @proc
282  *                        and by @lock)
283  * @has_weak_ref:         userspace notified of weak ref
284  *                        (protected by @proc->inner_lock if @proc
285  *                        and by @lock)
286  * @pending_weak_ref:     userspace has acked notification of weak ref
287  *                        (protected by @proc->inner_lock if @proc
288  *                        and by @lock)
289  * @has_async_transaction: async transaction to node in progress
290  *                        (protected by @lock)
291  * @accept_fds:           file descriptor operations supported for node
292  *                        (invariant after initialized)
293  * @min_priority:         minimum scheduling priority
294  *                        (invariant after initialized)
295  * @txn_security_ctx:     require sender's security context
296  *                        (invariant after initialized)
297  * @async_todo:           list of async work items
298  *                        (protected by @proc->inner_lock)
299  *
300  * Bookkeeping structure for binder nodes.
301  */
302 struct binder_node {
303 	int debug_id;
304 	spinlock_t lock;
305 	struct binder_work work;
306 	union {
307 		struct rb_node rb_node;
308 		struct hlist_node dead_node;
309 	};
310 	struct binder_proc *proc;
311 	struct hlist_head refs;
312 	int internal_strong_refs;
313 	int local_weak_refs;
314 	int local_strong_refs;
315 	int tmp_refs;
316 	binder_uintptr_t ptr;
317 	binder_uintptr_t cookie;
318 	struct {
319 		/*
320 		 * bitfield elements protected by
321 		 * proc inner_lock
322 		 */
323 		u8 has_strong_ref:1;
324 		u8 pending_strong_ref:1;
325 		u8 has_weak_ref:1;
326 		u8 pending_weak_ref:1;
327 	};
328 	struct {
329 		/*
330 		 * invariant after initialization
331 		 */
332 		u8 accept_fds:1;
333 		u8 txn_security_ctx:1;
334 		u8 min_priority;
335 	};
336 	bool has_async_transaction;
337 	struct list_head async_todo;
338 };
339 
340 struct binder_ref_death {
341 	/**
342 	 * @work: worklist element for death notifications
343 	 *        (protected by inner_lock of the proc that
344 	 *        this ref belongs to)
345 	 */
346 	struct binder_work work;
347 	binder_uintptr_t cookie;
348 };
349 
350 /**
351  * struct binder_ref_data - binder_ref counts and id
352  * @debug_id:        unique ID for the ref
353  * @desc:            unique userspace handle for ref
354  * @strong:          strong ref count (debugging only if not locked)
355  * @weak:            weak ref count (debugging only if not locked)
356  *
357  * Structure to hold ref count and ref id information. Since
358  * the actual ref can only be accessed with a lock, this structure
359  * is used to return information about the ref to callers of
360  * ref inc/dec functions.
361  */
362 struct binder_ref_data {
363 	int debug_id;
364 	uint32_t desc;
365 	int strong;
366 	int weak;
367 };
368 
369 /**
370  * struct binder_ref - struct to track references on nodes
371  * @data:        binder_ref_data containing id, handle, and current refcounts
372  * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
373  * @rb_node_node: node for lookup by @node in proc's rb_tree
374  * @node_entry:  list entry for node->refs list in target node
375  *               (protected by @node->lock)
376  * @proc:        binder_proc containing ref
377  * @node:        binder_node of target node. When cleaning up a
378  *               ref for deletion in binder_cleanup_ref, a non-NULL
379  *               @node indicates the node must be freed
380  * @death:       pointer to death notification (ref_death) if requested
381  *               (protected by @node->lock)
382  *
383  * Structure to track references from procA to target node (on procB). This
384  * structure is unsafe to access without holding @proc->outer_lock.
385  */
386 struct binder_ref {
387 	/* Lookups needed: */
388 	/*   node + proc => ref (transaction) */
389 	/*   desc + proc => ref (transaction, inc/dec ref) */
390 	/*   node => refs + procs (proc exit) */
391 	struct binder_ref_data data;
392 	struct rb_node rb_node_desc;
393 	struct rb_node rb_node_node;
394 	struct hlist_node node_entry;
395 	struct binder_proc *proc;
396 	struct binder_node *node;
397 	struct binder_ref_death *death;
398 };
399 
400 enum binder_deferred_state {
401 	BINDER_DEFERRED_FLUSH        = 0x01,
402 	BINDER_DEFERRED_RELEASE      = 0x02,
403 };
404 
405 /**
406  * struct binder_proc - binder process bookkeeping
407  * @proc_node:            element for binder_procs list
408  * @threads:              rbtree of binder_threads in this proc
409  *                        (protected by @inner_lock)
410  * @nodes:                rbtree of binder nodes associated with
411  *                        this proc ordered by node->ptr
412  *                        (protected by @inner_lock)
413  * @refs_by_desc:         rbtree of refs ordered by ref->desc
414  *                        (protected by @outer_lock)
415  * @refs_by_node:         rbtree of refs ordered by ref->node
416  *                        (protected by @outer_lock)
417  * @waiting_threads:      threads currently waiting for proc work
418  *                        (protected by @inner_lock)
419  * @pid                   PID of group_leader of process
420  *                        (invariant after initialized)
421  * @tsk                   task_struct for group_leader of process
422  *                        (invariant after initialized)
423  * @deferred_work_node:   element for binder_deferred_list
424  *                        (protected by binder_deferred_lock)
425  * @deferred_work:        bitmap of deferred work to perform
426  *                        (protected by binder_deferred_lock)
427  * @is_dead:              process is dead and awaiting free
428  *                        when outstanding transactions are cleaned up
429  *                        (protected by @inner_lock)
430  * @todo:                 list of work for this process
431  *                        (protected by @inner_lock)
432  * @stats:                per-process binder statistics
433  *                        (atomics, no lock needed)
434  * @delivered_death:      list of delivered death notification
435  *                        (protected by @inner_lock)
436  * @max_threads:          cap on number of binder threads
437  *                        (protected by @inner_lock)
438  * @requested_threads:    number of binder threads requested but not
439  *                        yet started. In current implementation, can
440  *                        only be 0 or 1.
441  *                        (protected by @inner_lock)
442  * @requested_threads_started: number binder threads started
443  *                        (protected by @inner_lock)
444  * @tmp_ref:              temporary reference to indicate proc is in use
445  *                        (protected by @inner_lock)
446  * @default_priority:     default scheduler priority
447  *                        (invariant after initialized)
448  * @debugfs_entry:        debugfs node
449  * @alloc:                binder allocator bookkeeping
450  * @context:              binder_context for this proc
451  *                        (invariant after initialized)
452  * @inner_lock:           can nest under outer_lock and/or node lock
453  * @outer_lock:           no nesting under innor or node lock
454  *                        Lock order: 1) outer, 2) node, 3) inner
455  * @binderfs_entry:       process-specific binderfs log file
456  *
457  * Bookkeeping structure for binder processes
458  */
459 struct binder_proc {
460 	struct hlist_node proc_node;
461 	struct rb_root threads;
462 	struct rb_root nodes;
463 	struct rb_root refs_by_desc;
464 	struct rb_root refs_by_node;
465 	struct list_head waiting_threads;
466 	int pid;
467 	struct task_struct *tsk;
468 	struct hlist_node deferred_work_node;
469 	int deferred_work;
470 	bool is_dead;
471 
472 	struct list_head todo;
473 	struct binder_stats stats;
474 	struct list_head delivered_death;
475 	int max_threads;
476 	int requested_threads;
477 	int requested_threads_started;
478 	int tmp_ref;
479 	long default_priority;
480 	struct dentry *debugfs_entry;
481 	struct binder_alloc alloc;
482 	struct binder_context *context;
483 	spinlock_t inner_lock;
484 	spinlock_t outer_lock;
485 	struct dentry *binderfs_entry;
486 };
487 
488 enum {
489 	BINDER_LOOPER_STATE_REGISTERED  = 0x01,
490 	BINDER_LOOPER_STATE_ENTERED     = 0x02,
491 	BINDER_LOOPER_STATE_EXITED      = 0x04,
492 	BINDER_LOOPER_STATE_INVALID     = 0x08,
493 	BINDER_LOOPER_STATE_WAITING     = 0x10,
494 	BINDER_LOOPER_STATE_POLL        = 0x20,
495 };
496 
497 /**
498  * struct binder_thread - binder thread bookkeeping
499  * @proc:                 binder process for this thread
500  *                        (invariant after initialization)
501  * @rb_node:              element for proc->threads rbtree
502  *                        (protected by @proc->inner_lock)
503  * @waiting_thread_node:  element for @proc->waiting_threads list
504  *                        (protected by @proc->inner_lock)
505  * @pid:                  PID for this thread
506  *                        (invariant after initialization)
507  * @looper:               bitmap of looping state
508  *                        (only accessed by this thread)
509  * @looper_needs_return:  looping thread needs to exit driver
510  *                        (no lock needed)
511  * @transaction_stack:    stack of in-progress transactions for this thread
512  *                        (protected by @proc->inner_lock)
513  * @todo:                 list of work to do for this thread
514  *                        (protected by @proc->inner_lock)
515  * @process_todo:         whether work in @todo should be processed
516  *                        (protected by @proc->inner_lock)
517  * @return_error:         transaction errors reported by this thread
518  *                        (only accessed by this thread)
519  * @reply_error:          transaction errors reported by target thread
520  *                        (protected by @proc->inner_lock)
521  * @wait:                 wait queue for thread work
522  * @stats:                per-thread statistics
523  *                        (atomics, no lock needed)
524  * @tmp_ref:              temporary reference to indicate thread is in use
525  *                        (atomic since @proc->inner_lock cannot
526  *                        always be acquired)
527  * @is_dead:              thread is dead and awaiting free
528  *                        when outstanding transactions are cleaned up
529  *                        (protected by @proc->inner_lock)
530  *
531  * Bookkeeping structure for binder threads.
532  */
533 struct binder_thread {
534 	struct binder_proc *proc;
535 	struct rb_node rb_node;
536 	struct list_head waiting_thread_node;
537 	int pid;
538 	int looper;              /* only modified by this thread */
539 	bool looper_need_return; /* can be written by other thread */
540 	struct binder_transaction *transaction_stack;
541 	struct list_head todo;
542 	bool process_todo;
543 	struct binder_error return_error;
544 	struct binder_error reply_error;
545 	wait_queue_head_t wait;
546 	struct binder_stats stats;
547 	atomic_t tmp_ref;
548 	bool is_dead;
549 };
550 
551 /**
552  * struct binder_txn_fd_fixup - transaction fd fixup list element
553  * @fixup_entry:          list entry
554  * @file:                 struct file to be associated with new fd
555  * @offset:               offset in buffer data to this fixup
556  *
557  * List element for fd fixups in a transaction. Since file
558  * descriptors need to be allocated in the context of the
559  * target process, we pass each fd to be processed in this
560  * struct.
561  */
562 struct binder_txn_fd_fixup {
563 	struct list_head fixup_entry;
564 	struct file *file;
565 	size_t offset;
566 };
567 
568 struct binder_transaction {
569 	int debug_id;
570 	struct binder_work work;
571 	struct binder_thread *from;
572 	struct binder_transaction *from_parent;
573 	struct binder_proc *to_proc;
574 	struct binder_thread *to_thread;
575 	struct binder_transaction *to_parent;
576 	unsigned need_reply:1;
577 	/* unsigned is_dead:1; */	/* not used at the moment */
578 
579 	struct binder_buffer *buffer;
580 	unsigned int	code;
581 	unsigned int	flags;
582 	long	priority;
583 	long	saved_priority;
584 	kuid_t	sender_euid;
585 	struct list_head fd_fixups;
586 	binder_uintptr_t security_ctx;
587 	/**
588 	 * @lock:  protects @from, @to_proc, and @to_thread
589 	 *
590 	 * @from, @to_proc, and @to_thread can be set to NULL
591 	 * during thread teardown
592 	 */
593 	spinlock_t lock;
594 };
595 
596 /**
597  * struct binder_object - union of flat binder object types
598  * @hdr:   generic object header
599  * @fbo:   binder object (nodes and refs)
600  * @fdo:   file descriptor object
601  * @bbo:   binder buffer pointer
602  * @fdao:  file descriptor array
603  *
604  * Used for type-independent object copies
605  */
606 struct binder_object {
607 	union {
608 		struct binder_object_header hdr;
609 		struct flat_binder_object fbo;
610 		struct binder_fd_object fdo;
611 		struct binder_buffer_object bbo;
612 		struct binder_fd_array_object fdao;
613 	};
614 };
615 
616 /**
617  * binder_proc_lock() - Acquire outer lock for given binder_proc
618  * @proc:         struct binder_proc to acquire
619  *
620  * Acquires proc->outer_lock. Used to protect binder_ref
621  * structures associated with the given proc.
622  */
623 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
624 static void
625 _binder_proc_lock(struct binder_proc *proc, int line)
626 	__acquires(&proc->outer_lock)
627 {
628 	binder_debug(BINDER_DEBUG_SPINLOCKS,
629 		     "%s: line=%d\n", __func__, line);
630 	spin_lock(&proc->outer_lock);
631 }
632 
633 /**
634  * binder_proc_unlock() - Release spinlock for given binder_proc
635  * @proc:         struct binder_proc to acquire
636  *
637  * Release lock acquired via binder_proc_lock()
638  */
639 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
640 static void
641 _binder_proc_unlock(struct binder_proc *proc, int line)
642 	__releases(&proc->outer_lock)
643 {
644 	binder_debug(BINDER_DEBUG_SPINLOCKS,
645 		     "%s: line=%d\n", __func__, line);
646 	spin_unlock(&proc->outer_lock);
647 }
648 
649 /**
650  * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
651  * @proc:         struct binder_proc to acquire
652  *
653  * Acquires proc->inner_lock. Used to protect todo lists
654  */
655 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
656 static void
657 _binder_inner_proc_lock(struct binder_proc *proc, int line)
658 	__acquires(&proc->inner_lock)
659 {
660 	binder_debug(BINDER_DEBUG_SPINLOCKS,
661 		     "%s: line=%d\n", __func__, line);
662 	spin_lock(&proc->inner_lock);
663 }
664 
665 /**
666  * binder_inner_proc_unlock() - Release inner lock for given binder_proc
667  * @proc:         struct binder_proc to acquire
668  *
669  * Release lock acquired via binder_inner_proc_lock()
670  */
671 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
672 static void
673 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
674 	__releases(&proc->inner_lock)
675 {
676 	binder_debug(BINDER_DEBUG_SPINLOCKS,
677 		     "%s: line=%d\n", __func__, line);
678 	spin_unlock(&proc->inner_lock);
679 }
680 
681 /**
682  * binder_node_lock() - Acquire spinlock for given binder_node
683  * @node:         struct binder_node to acquire
684  *
685  * Acquires node->lock. Used to protect binder_node fields
686  */
687 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
688 static void
689 _binder_node_lock(struct binder_node *node, int line)
690 	__acquires(&node->lock)
691 {
692 	binder_debug(BINDER_DEBUG_SPINLOCKS,
693 		     "%s: line=%d\n", __func__, line);
694 	spin_lock(&node->lock);
695 }
696 
697 /**
698  * binder_node_unlock() - Release spinlock for given binder_proc
699  * @node:         struct binder_node to acquire
700  *
701  * Release lock acquired via binder_node_lock()
702  */
703 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
704 static void
705 _binder_node_unlock(struct binder_node *node, int line)
706 	__releases(&node->lock)
707 {
708 	binder_debug(BINDER_DEBUG_SPINLOCKS,
709 		     "%s: line=%d\n", __func__, line);
710 	spin_unlock(&node->lock);
711 }
712 
713 /**
714  * binder_node_inner_lock() - Acquire node and inner locks
715  * @node:         struct binder_node to acquire
716  *
717  * Acquires node->lock. If node->proc also acquires
718  * proc->inner_lock. Used to protect binder_node fields
719  */
720 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
721 static void
722 _binder_node_inner_lock(struct binder_node *node, int line)
723 	__acquires(&node->lock) __acquires(&node->proc->inner_lock)
724 {
725 	binder_debug(BINDER_DEBUG_SPINLOCKS,
726 		     "%s: line=%d\n", __func__, line);
727 	spin_lock(&node->lock);
728 	if (node->proc)
729 		binder_inner_proc_lock(node->proc);
730 	else
731 		/* annotation for sparse */
732 		__acquire(&node->proc->inner_lock);
733 }
734 
735 /**
736  * binder_node_unlock() - Release node and inner locks
737  * @node:         struct binder_node to acquire
738  *
739  * Release lock acquired via binder_node_lock()
740  */
741 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
742 static void
743 _binder_node_inner_unlock(struct binder_node *node, int line)
744 	__releases(&node->lock) __releases(&node->proc->inner_lock)
745 {
746 	struct binder_proc *proc = node->proc;
747 
748 	binder_debug(BINDER_DEBUG_SPINLOCKS,
749 		     "%s: line=%d\n", __func__, line);
750 	if (proc)
751 		binder_inner_proc_unlock(proc);
752 	else
753 		/* annotation for sparse */
754 		__release(&node->proc->inner_lock);
755 	spin_unlock(&node->lock);
756 }
757 
758 static bool binder_worklist_empty_ilocked(struct list_head *list)
759 {
760 	return list_empty(list);
761 }
762 
763 /**
764  * binder_worklist_empty() - Check if no items on the work list
765  * @proc:       binder_proc associated with list
766  * @list:	list to check
767  *
768  * Return: true if there are no items on list, else false
769  */
770 static bool binder_worklist_empty(struct binder_proc *proc,
771 				  struct list_head *list)
772 {
773 	bool ret;
774 
775 	binder_inner_proc_lock(proc);
776 	ret = binder_worklist_empty_ilocked(list);
777 	binder_inner_proc_unlock(proc);
778 	return ret;
779 }
780 
781 /**
782  * binder_enqueue_work_ilocked() - Add an item to the work list
783  * @work:         struct binder_work to add to list
784  * @target_list:  list to add work to
785  *
786  * Adds the work to the specified list. Asserts that work
787  * is not already on a list.
788  *
789  * Requires the proc->inner_lock to be held.
790  */
791 static void
792 binder_enqueue_work_ilocked(struct binder_work *work,
793 			   struct list_head *target_list)
794 {
795 	BUG_ON(target_list == NULL);
796 	BUG_ON(work->entry.next && !list_empty(&work->entry));
797 	list_add_tail(&work->entry, target_list);
798 }
799 
800 /**
801  * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
802  * @thread:       thread to queue work to
803  * @work:         struct binder_work to add to list
804  *
805  * Adds the work to the todo list of the thread. Doesn't set the process_todo
806  * flag, which means that (if it wasn't already set) the thread will go to
807  * sleep without handling this work when it calls read.
808  *
809  * Requires the proc->inner_lock to be held.
810  */
811 static void
812 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
813 					    struct binder_work *work)
814 {
815 	WARN_ON(!list_empty(&thread->waiting_thread_node));
816 	binder_enqueue_work_ilocked(work, &thread->todo);
817 }
818 
819 /**
820  * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
821  * @thread:       thread to queue work to
822  * @work:         struct binder_work to add to list
823  *
824  * Adds the work to the todo list of the thread, and enables processing
825  * of the todo queue.
826  *
827  * Requires the proc->inner_lock to be held.
828  */
829 static void
830 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
831 				   struct binder_work *work)
832 {
833 	WARN_ON(!list_empty(&thread->waiting_thread_node));
834 	binder_enqueue_work_ilocked(work, &thread->todo);
835 	thread->process_todo = true;
836 }
837 
838 /**
839  * binder_enqueue_thread_work() - Add an item to the thread work list
840  * @thread:       thread to queue work to
841  * @work:         struct binder_work to add to list
842  *
843  * Adds the work to the todo list of the thread, and enables processing
844  * of the todo queue.
845  */
846 static void
847 binder_enqueue_thread_work(struct binder_thread *thread,
848 			   struct binder_work *work)
849 {
850 	binder_inner_proc_lock(thread->proc);
851 	binder_enqueue_thread_work_ilocked(thread, work);
852 	binder_inner_proc_unlock(thread->proc);
853 }
854 
855 static void
856 binder_dequeue_work_ilocked(struct binder_work *work)
857 {
858 	list_del_init(&work->entry);
859 }
860 
861 /**
862  * binder_dequeue_work() - Removes an item from the work list
863  * @proc:         binder_proc associated with list
864  * @work:         struct binder_work to remove from list
865  *
866  * Removes the specified work item from whatever list it is on.
867  * Can safely be called if work is not on any list.
868  */
869 static void
870 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
871 {
872 	binder_inner_proc_lock(proc);
873 	binder_dequeue_work_ilocked(work);
874 	binder_inner_proc_unlock(proc);
875 }
876 
877 static struct binder_work *binder_dequeue_work_head_ilocked(
878 					struct list_head *list)
879 {
880 	struct binder_work *w;
881 
882 	w = list_first_entry_or_null(list, struct binder_work, entry);
883 	if (w)
884 		list_del_init(&w->entry);
885 	return w;
886 }
887 
888 /**
889  * binder_dequeue_work_head() - Dequeues the item at head of list
890  * @proc:         binder_proc associated with list
891  * @list:         list to dequeue head
892  *
893  * Removes the head of the list if there are items on the list
894  *
895  * Return: pointer dequeued binder_work, NULL if list was empty
896  */
897 static struct binder_work *binder_dequeue_work_head(
898 					struct binder_proc *proc,
899 					struct list_head *list)
900 {
901 	struct binder_work *w;
902 
903 	binder_inner_proc_lock(proc);
904 	w = binder_dequeue_work_head_ilocked(list);
905 	binder_inner_proc_unlock(proc);
906 	return w;
907 }
908 
909 static void
910 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
911 static void binder_free_thread(struct binder_thread *thread);
912 static void binder_free_proc(struct binder_proc *proc);
913 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
914 
915 static bool binder_has_work_ilocked(struct binder_thread *thread,
916 				    bool do_proc_work)
917 {
918 	return thread->process_todo ||
919 		thread->looper_need_return ||
920 		(do_proc_work &&
921 		 !binder_worklist_empty_ilocked(&thread->proc->todo));
922 }
923 
924 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
925 {
926 	bool has_work;
927 
928 	binder_inner_proc_lock(thread->proc);
929 	has_work = binder_has_work_ilocked(thread, do_proc_work);
930 	binder_inner_proc_unlock(thread->proc);
931 
932 	return has_work;
933 }
934 
935 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
936 {
937 	return !thread->transaction_stack &&
938 		binder_worklist_empty_ilocked(&thread->todo) &&
939 		(thread->looper & (BINDER_LOOPER_STATE_ENTERED |
940 				   BINDER_LOOPER_STATE_REGISTERED));
941 }
942 
943 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
944 					       bool sync)
945 {
946 	struct rb_node *n;
947 	struct binder_thread *thread;
948 
949 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
950 		thread = rb_entry(n, struct binder_thread, rb_node);
951 		if (thread->looper & BINDER_LOOPER_STATE_POLL &&
952 		    binder_available_for_proc_work_ilocked(thread)) {
953 			if (sync)
954 				wake_up_interruptible_sync(&thread->wait);
955 			else
956 				wake_up_interruptible(&thread->wait);
957 		}
958 	}
959 }
960 
961 /**
962  * binder_select_thread_ilocked() - selects a thread for doing proc work.
963  * @proc:	process to select a thread from
964  *
965  * Note that calling this function moves the thread off the waiting_threads
966  * list, so it can only be woken up by the caller of this function, or a
967  * signal. Therefore, callers *should* always wake up the thread this function
968  * returns.
969  *
970  * Return:	If there's a thread currently waiting for process work,
971  *		returns that thread. Otherwise returns NULL.
972  */
973 static struct binder_thread *
974 binder_select_thread_ilocked(struct binder_proc *proc)
975 {
976 	struct binder_thread *thread;
977 
978 	assert_spin_locked(&proc->inner_lock);
979 	thread = list_first_entry_or_null(&proc->waiting_threads,
980 					  struct binder_thread,
981 					  waiting_thread_node);
982 
983 	if (thread)
984 		list_del_init(&thread->waiting_thread_node);
985 
986 	return thread;
987 }
988 
989 /**
990  * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
991  * @proc:	process to wake up a thread in
992  * @thread:	specific thread to wake-up (may be NULL)
993  * @sync:	whether to do a synchronous wake-up
994  *
995  * This function wakes up a thread in the @proc process.
996  * The caller may provide a specific thread to wake-up in
997  * the @thread parameter. If @thread is NULL, this function
998  * will wake up threads that have called poll().
999  *
1000  * Note that for this function to work as expected, callers
1001  * should first call binder_select_thread() to find a thread
1002  * to handle the work (if they don't have a thread already),
1003  * and pass the result into the @thread parameter.
1004  */
1005 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1006 					 struct binder_thread *thread,
1007 					 bool sync)
1008 {
1009 	assert_spin_locked(&proc->inner_lock);
1010 
1011 	if (thread) {
1012 		if (sync)
1013 			wake_up_interruptible_sync(&thread->wait);
1014 		else
1015 			wake_up_interruptible(&thread->wait);
1016 		return;
1017 	}
1018 
1019 	/* Didn't find a thread waiting for proc work; this can happen
1020 	 * in two scenarios:
1021 	 * 1. All threads are busy handling transactions
1022 	 *    In that case, one of those threads should call back into
1023 	 *    the kernel driver soon and pick up this work.
1024 	 * 2. Threads are using the (e)poll interface, in which case
1025 	 *    they may be blocked on the waitqueue without having been
1026 	 *    added to waiting_threads. For this case, we just iterate
1027 	 *    over all threads not handling transaction work, and
1028 	 *    wake them all up. We wake all because we don't know whether
1029 	 *    a thread that called into (e)poll is handling non-binder
1030 	 *    work currently.
1031 	 */
1032 	binder_wakeup_poll_threads_ilocked(proc, sync);
1033 }
1034 
1035 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1036 {
1037 	struct binder_thread *thread = binder_select_thread_ilocked(proc);
1038 
1039 	binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1040 }
1041 
1042 static void binder_set_nice(long nice)
1043 {
1044 	long min_nice;
1045 
1046 	if (can_nice(current, nice)) {
1047 		set_user_nice(current, nice);
1048 		return;
1049 	}
1050 	min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
1051 	binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1052 		     "%d: nice value %ld not allowed use %ld instead\n",
1053 		      current->pid, nice, min_nice);
1054 	set_user_nice(current, min_nice);
1055 	if (min_nice <= MAX_NICE)
1056 		return;
1057 	binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
1058 }
1059 
1060 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1061 						   binder_uintptr_t ptr)
1062 {
1063 	struct rb_node *n = proc->nodes.rb_node;
1064 	struct binder_node *node;
1065 
1066 	assert_spin_locked(&proc->inner_lock);
1067 
1068 	while (n) {
1069 		node = rb_entry(n, struct binder_node, rb_node);
1070 
1071 		if (ptr < node->ptr)
1072 			n = n->rb_left;
1073 		else if (ptr > node->ptr)
1074 			n = n->rb_right;
1075 		else {
1076 			/*
1077 			 * take an implicit weak reference
1078 			 * to ensure node stays alive until
1079 			 * call to binder_put_node()
1080 			 */
1081 			binder_inc_node_tmpref_ilocked(node);
1082 			return node;
1083 		}
1084 	}
1085 	return NULL;
1086 }
1087 
1088 static struct binder_node *binder_get_node(struct binder_proc *proc,
1089 					   binder_uintptr_t ptr)
1090 {
1091 	struct binder_node *node;
1092 
1093 	binder_inner_proc_lock(proc);
1094 	node = binder_get_node_ilocked(proc, ptr);
1095 	binder_inner_proc_unlock(proc);
1096 	return node;
1097 }
1098 
1099 static struct binder_node *binder_init_node_ilocked(
1100 						struct binder_proc *proc,
1101 						struct binder_node *new_node,
1102 						struct flat_binder_object *fp)
1103 {
1104 	struct rb_node **p = &proc->nodes.rb_node;
1105 	struct rb_node *parent = NULL;
1106 	struct binder_node *node;
1107 	binder_uintptr_t ptr = fp ? fp->binder : 0;
1108 	binder_uintptr_t cookie = fp ? fp->cookie : 0;
1109 	__u32 flags = fp ? fp->flags : 0;
1110 
1111 	assert_spin_locked(&proc->inner_lock);
1112 
1113 	while (*p) {
1114 
1115 		parent = *p;
1116 		node = rb_entry(parent, struct binder_node, rb_node);
1117 
1118 		if (ptr < node->ptr)
1119 			p = &(*p)->rb_left;
1120 		else if (ptr > node->ptr)
1121 			p = &(*p)->rb_right;
1122 		else {
1123 			/*
1124 			 * A matching node is already in
1125 			 * the rb tree. Abandon the init
1126 			 * and return it.
1127 			 */
1128 			binder_inc_node_tmpref_ilocked(node);
1129 			return node;
1130 		}
1131 	}
1132 	node = new_node;
1133 	binder_stats_created(BINDER_STAT_NODE);
1134 	node->tmp_refs++;
1135 	rb_link_node(&node->rb_node, parent, p);
1136 	rb_insert_color(&node->rb_node, &proc->nodes);
1137 	node->debug_id = atomic_inc_return(&binder_last_id);
1138 	node->proc = proc;
1139 	node->ptr = ptr;
1140 	node->cookie = cookie;
1141 	node->work.type = BINDER_WORK_NODE;
1142 	node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1143 	node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1144 	node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
1145 	spin_lock_init(&node->lock);
1146 	INIT_LIST_HEAD(&node->work.entry);
1147 	INIT_LIST_HEAD(&node->async_todo);
1148 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1149 		     "%d:%d node %d u%016llx c%016llx created\n",
1150 		     proc->pid, current->pid, node->debug_id,
1151 		     (u64)node->ptr, (u64)node->cookie);
1152 
1153 	return node;
1154 }
1155 
1156 static struct binder_node *binder_new_node(struct binder_proc *proc,
1157 					   struct flat_binder_object *fp)
1158 {
1159 	struct binder_node *node;
1160 	struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1161 
1162 	if (!new_node)
1163 		return NULL;
1164 	binder_inner_proc_lock(proc);
1165 	node = binder_init_node_ilocked(proc, new_node, fp);
1166 	binder_inner_proc_unlock(proc);
1167 	if (node != new_node)
1168 		/*
1169 		 * The node was already added by another thread
1170 		 */
1171 		kfree(new_node);
1172 
1173 	return node;
1174 }
1175 
1176 static void binder_free_node(struct binder_node *node)
1177 {
1178 	kfree(node);
1179 	binder_stats_deleted(BINDER_STAT_NODE);
1180 }
1181 
1182 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1183 				    int internal,
1184 				    struct list_head *target_list)
1185 {
1186 	struct binder_proc *proc = node->proc;
1187 
1188 	assert_spin_locked(&node->lock);
1189 	if (proc)
1190 		assert_spin_locked(&proc->inner_lock);
1191 	if (strong) {
1192 		if (internal) {
1193 			if (target_list == NULL &&
1194 			    node->internal_strong_refs == 0 &&
1195 			    !(node->proc &&
1196 			      node == node->proc->context->binder_context_mgr_node &&
1197 			      node->has_strong_ref)) {
1198 				pr_err("invalid inc strong node for %d\n",
1199 					node->debug_id);
1200 				return -EINVAL;
1201 			}
1202 			node->internal_strong_refs++;
1203 		} else
1204 			node->local_strong_refs++;
1205 		if (!node->has_strong_ref && target_list) {
1206 			struct binder_thread *thread = container_of(target_list,
1207 						    struct binder_thread, todo);
1208 			binder_dequeue_work_ilocked(&node->work);
1209 			BUG_ON(&thread->todo != target_list);
1210 			binder_enqueue_deferred_thread_work_ilocked(thread,
1211 								   &node->work);
1212 		}
1213 	} else {
1214 		if (!internal)
1215 			node->local_weak_refs++;
1216 		if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1217 			if (target_list == NULL) {
1218 				pr_err("invalid inc weak node for %d\n",
1219 					node->debug_id);
1220 				return -EINVAL;
1221 			}
1222 			/*
1223 			 * See comment above
1224 			 */
1225 			binder_enqueue_work_ilocked(&node->work, target_list);
1226 		}
1227 	}
1228 	return 0;
1229 }
1230 
1231 static int binder_inc_node(struct binder_node *node, int strong, int internal,
1232 			   struct list_head *target_list)
1233 {
1234 	int ret;
1235 
1236 	binder_node_inner_lock(node);
1237 	ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1238 	binder_node_inner_unlock(node);
1239 
1240 	return ret;
1241 }
1242 
1243 static bool binder_dec_node_nilocked(struct binder_node *node,
1244 				     int strong, int internal)
1245 {
1246 	struct binder_proc *proc = node->proc;
1247 
1248 	assert_spin_locked(&node->lock);
1249 	if (proc)
1250 		assert_spin_locked(&proc->inner_lock);
1251 	if (strong) {
1252 		if (internal)
1253 			node->internal_strong_refs--;
1254 		else
1255 			node->local_strong_refs--;
1256 		if (node->local_strong_refs || node->internal_strong_refs)
1257 			return false;
1258 	} else {
1259 		if (!internal)
1260 			node->local_weak_refs--;
1261 		if (node->local_weak_refs || node->tmp_refs ||
1262 				!hlist_empty(&node->refs))
1263 			return false;
1264 	}
1265 
1266 	if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1267 		if (list_empty(&node->work.entry)) {
1268 			binder_enqueue_work_ilocked(&node->work, &proc->todo);
1269 			binder_wakeup_proc_ilocked(proc);
1270 		}
1271 	} else {
1272 		if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1273 		    !node->local_weak_refs && !node->tmp_refs) {
1274 			if (proc) {
1275 				binder_dequeue_work_ilocked(&node->work);
1276 				rb_erase(&node->rb_node, &proc->nodes);
1277 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1278 					     "refless node %d deleted\n",
1279 					     node->debug_id);
1280 			} else {
1281 				BUG_ON(!list_empty(&node->work.entry));
1282 				spin_lock(&binder_dead_nodes_lock);
1283 				/*
1284 				 * tmp_refs could have changed so
1285 				 * check it again
1286 				 */
1287 				if (node->tmp_refs) {
1288 					spin_unlock(&binder_dead_nodes_lock);
1289 					return false;
1290 				}
1291 				hlist_del(&node->dead_node);
1292 				spin_unlock(&binder_dead_nodes_lock);
1293 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1294 					     "dead node %d deleted\n",
1295 					     node->debug_id);
1296 			}
1297 			return true;
1298 		}
1299 	}
1300 	return false;
1301 }
1302 
1303 static void binder_dec_node(struct binder_node *node, int strong, int internal)
1304 {
1305 	bool free_node;
1306 
1307 	binder_node_inner_lock(node);
1308 	free_node = binder_dec_node_nilocked(node, strong, internal);
1309 	binder_node_inner_unlock(node);
1310 	if (free_node)
1311 		binder_free_node(node);
1312 }
1313 
1314 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1315 {
1316 	/*
1317 	 * No call to binder_inc_node() is needed since we
1318 	 * don't need to inform userspace of any changes to
1319 	 * tmp_refs
1320 	 */
1321 	node->tmp_refs++;
1322 }
1323 
1324 /**
1325  * binder_inc_node_tmpref() - take a temporary reference on node
1326  * @node:	node to reference
1327  *
1328  * Take reference on node to prevent the node from being freed
1329  * while referenced only by a local variable. The inner lock is
1330  * needed to serialize with the node work on the queue (which
1331  * isn't needed after the node is dead). If the node is dead
1332  * (node->proc is NULL), use binder_dead_nodes_lock to protect
1333  * node->tmp_refs against dead-node-only cases where the node
1334  * lock cannot be acquired (eg traversing the dead node list to
1335  * print nodes)
1336  */
1337 static void binder_inc_node_tmpref(struct binder_node *node)
1338 {
1339 	binder_node_lock(node);
1340 	if (node->proc)
1341 		binder_inner_proc_lock(node->proc);
1342 	else
1343 		spin_lock(&binder_dead_nodes_lock);
1344 	binder_inc_node_tmpref_ilocked(node);
1345 	if (node->proc)
1346 		binder_inner_proc_unlock(node->proc);
1347 	else
1348 		spin_unlock(&binder_dead_nodes_lock);
1349 	binder_node_unlock(node);
1350 }
1351 
1352 /**
1353  * binder_dec_node_tmpref() - remove a temporary reference on node
1354  * @node:	node to reference
1355  *
1356  * Release temporary reference on node taken via binder_inc_node_tmpref()
1357  */
1358 static void binder_dec_node_tmpref(struct binder_node *node)
1359 {
1360 	bool free_node;
1361 
1362 	binder_node_inner_lock(node);
1363 	if (!node->proc)
1364 		spin_lock(&binder_dead_nodes_lock);
1365 	else
1366 		__acquire(&binder_dead_nodes_lock);
1367 	node->tmp_refs--;
1368 	BUG_ON(node->tmp_refs < 0);
1369 	if (!node->proc)
1370 		spin_unlock(&binder_dead_nodes_lock);
1371 	else
1372 		__release(&binder_dead_nodes_lock);
1373 	/*
1374 	 * Call binder_dec_node() to check if all refcounts are 0
1375 	 * and cleanup is needed. Calling with strong=0 and internal=1
1376 	 * causes no actual reference to be released in binder_dec_node().
1377 	 * If that changes, a change is needed here too.
1378 	 */
1379 	free_node = binder_dec_node_nilocked(node, 0, 1);
1380 	binder_node_inner_unlock(node);
1381 	if (free_node)
1382 		binder_free_node(node);
1383 }
1384 
1385 static void binder_put_node(struct binder_node *node)
1386 {
1387 	binder_dec_node_tmpref(node);
1388 }
1389 
1390 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1391 						 u32 desc, bool need_strong_ref)
1392 {
1393 	struct rb_node *n = proc->refs_by_desc.rb_node;
1394 	struct binder_ref *ref;
1395 
1396 	while (n) {
1397 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1398 
1399 		if (desc < ref->data.desc) {
1400 			n = n->rb_left;
1401 		} else if (desc > ref->data.desc) {
1402 			n = n->rb_right;
1403 		} else if (need_strong_ref && !ref->data.strong) {
1404 			binder_user_error("tried to use weak ref as strong ref\n");
1405 			return NULL;
1406 		} else {
1407 			return ref;
1408 		}
1409 	}
1410 	return NULL;
1411 }
1412 
1413 /**
1414  * binder_get_ref_for_node_olocked() - get the ref associated with given node
1415  * @proc:	binder_proc that owns the ref
1416  * @node:	binder_node of target
1417  * @new_ref:	newly allocated binder_ref to be initialized or %NULL
1418  *
1419  * Look up the ref for the given node and return it if it exists
1420  *
1421  * If it doesn't exist and the caller provides a newly allocated
1422  * ref, initialize the fields of the newly allocated ref and insert
1423  * into the given proc rb_trees and node refs list.
1424  *
1425  * Return:	the ref for node. It is possible that another thread
1426  *		allocated/initialized the ref first in which case the
1427  *		returned ref would be different than the passed-in
1428  *		new_ref. new_ref must be kfree'd by the caller in
1429  *		this case.
1430  */
1431 static struct binder_ref *binder_get_ref_for_node_olocked(
1432 					struct binder_proc *proc,
1433 					struct binder_node *node,
1434 					struct binder_ref *new_ref)
1435 {
1436 	struct binder_context *context = proc->context;
1437 	struct rb_node **p = &proc->refs_by_node.rb_node;
1438 	struct rb_node *parent = NULL;
1439 	struct binder_ref *ref;
1440 	struct rb_node *n;
1441 
1442 	while (*p) {
1443 		parent = *p;
1444 		ref = rb_entry(parent, struct binder_ref, rb_node_node);
1445 
1446 		if (node < ref->node)
1447 			p = &(*p)->rb_left;
1448 		else if (node > ref->node)
1449 			p = &(*p)->rb_right;
1450 		else
1451 			return ref;
1452 	}
1453 	if (!new_ref)
1454 		return NULL;
1455 
1456 	binder_stats_created(BINDER_STAT_REF);
1457 	new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1458 	new_ref->proc = proc;
1459 	new_ref->node = node;
1460 	rb_link_node(&new_ref->rb_node_node, parent, p);
1461 	rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1462 
1463 	new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1464 	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1465 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1466 		if (ref->data.desc > new_ref->data.desc)
1467 			break;
1468 		new_ref->data.desc = ref->data.desc + 1;
1469 	}
1470 
1471 	p = &proc->refs_by_desc.rb_node;
1472 	while (*p) {
1473 		parent = *p;
1474 		ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1475 
1476 		if (new_ref->data.desc < ref->data.desc)
1477 			p = &(*p)->rb_left;
1478 		else if (new_ref->data.desc > ref->data.desc)
1479 			p = &(*p)->rb_right;
1480 		else
1481 			BUG();
1482 	}
1483 	rb_link_node(&new_ref->rb_node_desc, parent, p);
1484 	rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1485 
1486 	binder_node_lock(node);
1487 	hlist_add_head(&new_ref->node_entry, &node->refs);
1488 
1489 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1490 		     "%d new ref %d desc %d for node %d\n",
1491 		      proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1492 		      node->debug_id);
1493 	binder_node_unlock(node);
1494 	return new_ref;
1495 }
1496 
1497 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1498 {
1499 	bool delete_node = false;
1500 
1501 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1502 		     "%d delete ref %d desc %d for node %d\n",
1503 		      ref->proc->pid, ref->data.debug_id, ref->data.desc,
1504 		      ref->node->debug_id);
1505 
1506 	rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1507 	rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1508 
1509 	binder_node_inner_lock(ref->node);
1510 	if (ref->data.strong)
1511 		binder_dec_node_nilocked(ref->node, 1, 1);
1512 
1513 	hlist_del(&ref->node_entry);
1514 	delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1515 	binder_node_inner_unlock(ref->node);
1516 	/*
1517 	 * Clear ref->node unless we want the caller to free the node
1518 	 */
1519 	if (!delete_node) {
1520 		/*
1521 		 * The caller uses ref->node to determine
1522 		 * whether the node needs to be freed. Clear
1523 		 * it since the node is still alive.
1524 		 */
1525 		ref->node = NULL;
1526 	}
1527 
1528 	if (ref->death) {
1529 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1530 			     "%d delete ref %d desc %d has death notification\n",
1531 			      ref->proc->pid, ref->data.debug_id,
1532 			      ref->data.desc);
1533 		binder_dequeue_work(ref->proc, &ref->death->work);
1534 		binder_stats_deleted(BINDER_STAT_DEATH);
1535 	}
1536 	binder_stats_deleted(BINDER_STAT_REF);
1537 }
1538 
1539 /**
1540  * binder_inc_ref_olocked() - increment the ref for given handle
1541  * @ref:         ref to be incremented
1542  * @strong:      if true, strong increment, else weak
1543  * @target_list: list to queue node work on
1544  *
1545  * Increment the ref. @ref->proc->outer_lock must be held on entry
1546  *
1547  * Return: 0, if successful, else errno
1548  */
1549 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1550 				  struct list_head *target_list)
1551 {
1552 	int ret;
1553 
1554 	if (strong) {
1555 		if (ref->data.strong == 0) {
1556 			ret = binder_inc_node(ref->node, 1, 1, target_list);
1557 			if (ret)
1558 				return ret;
1559 		}
1560 		ref->data.strong++;
1561 	} else {
1562 		if (ref->data.weak == 0) {
1563 			ret = binder_inc_node(ref->node, 0, 1, target_list);
1564 			if (ret)
1565 				return ret;
1566 		}
1567 		ref->data.weak++;
1568 	}
1569 	return 0;
1570 }
1571 
1572 /**
1573  * binder_dec_ref() - dec the ref for given handle
1574  * @ref:	ref to be decremented
1575  * @strong:	if true, strong decrement, else weak
1576  *
1577  * Decrement the ref.
1578  *
1579  * Return: true if ref is cleaned up and ready to be freed
1580  */
1581 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1582 {
1583 	if (strong) {
1584 		if (ref->data.strong == 0) {
1585 			binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1586 					  ref->proc->pid, ref->data.debug_id,
1587 					  ref->data.desc, ref->data.strong,
1588 					  ref->data.weak);
1589 			return false;
1590 		}
1591 		ref->data.strong--;
1592 		if (ref->data.strong == 0)
1593 			binder_dec_node(ref->node, strong, 1);
1594 	} else {
1595 		if (ref->data.weak == 0) {
1596 			binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1597 					  ref->proc->pid, ref->data.debug_id,
1598 					  ref->data.desc, ref->data.strong,
1599 					  ref->data.weak);
1600 			return false;
1601 		}
1602 		ref->data.weak--;
1603 	}
1604 	if (ref->data.strong == 0 && ref->data.weak == 0) {
1605 		binder_cleanup_ref_olocked(ref);
1606 		return true;
1607 	}
1608 	return false;
1609 }
1610 
1611 /**
1612  * binder_get_node_from_ref() - get the node from the given proc/desc
1613  * @proc:	proc containing the ref
1614  * @desc:	the handle associated with the ref
1615  * @need_strong_ref: if true, only return node if ref is strong
1616  * @rdata:	the id/refcount data for the ref
1617  *
1618  * Given a proc and ref handle, return the associated binder_node
1619  *
1620  * Return: a binder_node or NULL if not found or not strong when strong required
1621  */
1622 static struct binder_node *binder_get_node_from_ref(
1623 		struct binder_proc *proc,
1624 		u32 desc, bool need_strong_ref,
1625 		struct binder_ref_data *rdata)
1626 {
1627 	struct binder_node *node;
1628 	struct binder_ref *ref;
1629 
1630 	binder_proc_lock(proc);
1631 	ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1632 	if (!ref)
1633 		goto err_no_ref;
1634 	node = ref->node;
1635 	/*
1636 	 * Take an implicit reference on the node to ensure
1637 	 * it stays alive until the call to binder_put_node()
1638 	 */
1639 	binder_inc_node_tmpref(node);
1640 	if (rdata)
1641 		*rdata = ref->data;
1642 	binder_proc_unlock(proc);
1643 
1644 	return node;
1645 
1646 err_no_ref:
1647 	binder_proc_unlock(proc);
1648 	return NULL;
1649 }
1650 
1651 /**
1652  * binder_free_ref() - free the binder_ref
1653  * @ref:	ref to free
1654  *
1655  * Free the binder_ref. Free the binder_node indicated by ref->node
1656  * (if non-NULL) and the binder_ref_death indicated by ref->death.
1657  */
1658 static void binder_free_ref(struct binder_ref *ref)
1659 {
1660 	if (ref->node)
1661 		binder_free_node(ref->node);
1662 	kfree(ref->death);
1663 	kfree(ref);
1664 }
1665 
1666 /**
1667  * binder_update_ref_for_handle() - inc/dec the ref for given handle
1668  * @proc:	proc containing the ref
1669  * @desc:	the handle associated with the ref
1670  * @increment:	true=inc reference, false=dec reference
1671  * @strong:	true=strong reference, false=weak reference
1672  * @rdata:	the id/refcount data for the ref
1673  *
1674  * Given a proc and ref handle, increment or decrement the ref
1675  * according to "increment" arg.
1676  *
1677  * Return: 0 if successful, else errno
1678  */
1679 static int binder_update_ref_for_handle(struct binder_proc *proc,
1680 		uint32_t desc, bool increment, bool strong,
1681 		struct binder_ref_data *rdata)
1682 {
1683 	int ret = 0;
1684 	struct binder_ref *ref;
1685 	bool delete_ref = false;
1686 
1687 	binder_proc_lock(proc);
1688 	ref = binder_get_ref_olocked(proc, desc, strong);
1689 	if (!ref) {
1690 		ret = -EINVAL;
1691 		goto err_no_ref;
1692 	}
1693 	if (increment)
1694 		ret = binder_inc_ref_olocked(ref, strong, NULL);
1695 	else
1696 		delete_ref = binder_dec_ref_olocked(ref, strong);
1697 
1698 	if (rdata)
1699 		*rdata = ref->data;
1700 	binder_proc_unlock(proc);
1701 
1702 	if (delete_ref)
1703 		binder_free_ref(ref);
1704 	return ret;
1705 
1706 err_no_ref:
1707 	binder_proc_unlock(proc);
1708 	return ret;
1709 }
1710 
1711 /**
1712  * binder_dec_ref_for_handle() - dec the ref for given handle
1713  * @proc:	proc containing the ref
1714  * @desc:	the handle associated with the ref
1715  * @strong:	true=strong reference, false=weak reference
1716  * @rdata:	the id/refcount data for the ref
1717  *
1718  * Just calls binder_update_ref_for_handle() to decrement the ref.
1719  *
1720  * Return: 0 if successful, else errno
1721  */
1722 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1723 		uint32_t desc, bool strong, struct binder_ref_data *rdata)
1724 {
1725 	return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1726 }
1727 
1728 
1729 /**
1730  * binder_inc_ref_for_node() - increment the ref for given proc/node
1731  * @proc:	 proc containing the ref
1732  * @node:	 target node
1733  * @strong:	 true=strong reference, false=weak reference
1734  * @target_list: worklist to use if node is incremented
1735  * @rdata:	 the id/refcount data for the ref
1736  *
1737  * Given a proc and node, increment the ref. Create the ref if it
1738  * doesn't already exist
1739  *
1740  * Return: 0 if successful, else errno
1741  */
1742 static int binder_inc_ref_for_node(struct binder_proc *proc,
1743 			struct binder_node *node,
1744 			bool strong,
1745 			struct list_head *target_list,
1746 			struct binder_ref_data *rdata)
1747 {
1748 	struct binder_ref *ref;
1749 	struct binder_ref *new_ref = NULL;
1750 	int ret = 0;
1751 
1752 	binder_proc_lock(proc);
1753 	ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1754 	if (!ref) {
1755 		binder_proc_unlock(proc);
1756 		new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1757 		if (!new_ref)
1758 			return -ENOMEM;
1759 		binder_proc_lock(proc);
1760 		ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1761 	}
1762 	ret = binder_inc_ref_olocked(ref, strong, target_list);
1763 	*rdata = ref->data;
1764 	binder_proc_unlock(proc);
1765 	if (new_ref && ref != new_ref)
1766 		/*
1767 		 * Another thread created the ref first so
1768 		 * free the one we allocated
1769 		 */
1770 		kfree(new_ref);
1771 	return ret;
1772 }
1773 
1774 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1775 					   struct binder_transaction *t)
1776 {
1777 	BUG_ON(!target_thread);
1778 	assert_spin_locked(&target_thread->proc->inner_lock);
1779 	BUG_ON(target_thread->transaction_stack != t);
1780 	BUG_ON(target_thread->transaction_stack->from != target_thread);
1781 	target_thread->transaction_stack =
1782 		target_thread->transaction_stack->from_parent;
1783 	t->from = NULL;
1784 }
1785 
1786 /**
1787  * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1788  * @thread:	thread to decrement
1789  *
1790  * A thread needs to be kept alive while being used to create or
1791  * handle a transaction. binder_get_txn_from() is used to safely
1792  * extract t->from from a binder_transaction and keep the thread
1793  * indicated by t->from from being freed. When done with that
1794  * binder_thread, this function is called to decrement the
1795  * tmp_ref and free if appropriate (thread has been released
1796  * and no transaction being processed by the driver)
1797  */
1798 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1799 {
1800 	/*
1801 	 * atomic is used to protect the counter value while
1802 	 * it cannot reach zero or thread->is_dead is false
1803 	 */
1804 	binder_inner_proc_lock(thread->proc);
1805 	atomic_dec(&thread->tmp_ref);
1806 	if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1807 		binder_inner_proc_unlock(thread->proc);
1808 		binder_free_thread(thread);
1809 		return;
1810 	}
1811 	binder_inner_proc_unlock(thread->proc);
1812 }
1813 
1814 /**
1815  * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1816  * @proc:	proc to decrement
1817  *
1818  * A binder_proc needs to be kept alive while being used to create or
1819  * handle a transaction. proc->tmp_ref is incremented when
1820  * creating a new transaction or the binder_proc is currently in-use
1821  * by threads that are being released. When done with the binder_proc,
1822  * this function is called to decrement the counter and free the
1823  * proc if appropriate (proc has been released, all threads have
1824  * been released and not currenly in-use to process a transaction).
1825  */
1826 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1827 {
1828 	binder_inner_proc_lock(proc);
1829 	proc->tmp_ref--;
1830 	if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1831 			!proc->tmp_ref) {
1832 		binder_inner_proc_unlock(proc);
1833 		binder_free_proc(proc);
1834 		return;
1835 	}
1836 	binder_inner_proc_unlock(proc);
1837 }
1838 
1839 /**
1840  * binder_get_txn_from() - safely extract the "from" thread in transaction
1841  * @t:	binder transaction for t->from
1842  *
1843  * Atomically return the "from" thread and increment the tmp_ref
1844  * count for the thread to ensure it stays alive until
1845  * binder_thread_dec_tmpref() is called.
1846  *
1847  * Return: the value of t->from
1848  */
1849 static struct binder_thread *binder_get_txn_from(
1850 		struct binder_transaction *t)
1851 {
1852 	struct binder_thread *from;
1853 
1854 	spin_lock(&t->lock);
1855 	from = t->from;
1856 	if (from)
1857 		atomic_inc(&from->tmp_ref);
1858 	spin_unlock(&t->lock);
1859 	return from;
1860 }
1861 
1862 /**
1863  * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1864  * @t:	binder transaction for t->from
1865  *
1866  * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1867  * to guarantee that the thread cannot be released while operating on it.
1868  * The caller must call binder_inner_proc_unlock() to release the inner lock
1869  * as well as call binder_dec_thread_txn() to release the reference.
1870  *
1871  * Return: the value of t->from
1872  */
1873 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1874 		struct binder_transaction *t)
1875 	__acquires(&t->from->proc->inner_lock)
1876 {
1877 	struct binder_thread *from;
1878 
1879 	from = binder_get_txn_from(t);
1880 	if (!from) {
1881 		__acquire(&from->proc->inner_lock);
1882 		return NULL;
1883 	}
1884 	binder_inner_proc_lock(from->proc);
1885 	if (t->from) {
1886 		BUG_ON(from != t->from);
1887 		return from;
1888 	}
1889 	binder_inner_proc_unlock(from->proc);
1890 	__acquire(&from->proc->inner_lock);
1891 	binder_thread_dec_tmpref(from);
1892 	return NULL;
1893 }
1894 
1895 /**
1896  * binder_free_txn_fixups() - free unprocessed fd fixups
1897  * @t:	binder transaction for t->from
1898  *
1899  * If the transaction is being torn down prior to being
1900  * processed by the target process, free all of the
1901  * fd fixups and fput the file structs. It is safe to
1902  * call this function after the fixups have been
1903  * processed -- in that case, the list will be empty.
1904  */
1905 static void binder_free_txn_fixups(struct binder_transaction *t)
1906 {
1907 	struct binder_txn_fd_fixup *fixup, *tmp;
1908 
1909 	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1910 		fput(fixup->file);
1911 		list_del(&fixup->fixup_entry);
1912 		kfree(fixup);
1913 	}
1914 }
1915 
1916 static void binder_free_transaction(struct binder_transaction *t)
1917 {
1918 	struct binder_proc *target_proc = t->to_proc;
1919 
1920 	if (target_proc) {
1921 		binder_inner_proc_lock(target_proc);
1922 		if (t->buffer)
1923 			t->buffer->transaction = NULL;
1924 		binder_inner_proc_unlock(target_proc);
1925 	}
1926 	/*
1927 	 * If the transaction has no target_proc, then
1928 	 * t->buffer->transaction has already been cleared.
1929 	 */
1930 	binder_free_txn_fixups(t);
1931 	kfree(t);
1932 	binder_stats_deleted(BINDER_STAT_TRANSACTION);
1933 }
1934 
1935 static void binder_send_failed_reply(struct binder_transaction *t,
1936 				     uint32_t error_code)
1937 {
1938 	struct binder_thread *target_thread;
1939 	struct binder_transaction *next;
1940 
1941 	BUG_ON(t->flags & TF_ONE_WAY);
1942 	while (1) {
1943 		target_thread = binder_get_txn_from_and_acq_inner(t);
1944 		if (target_thread) {
1945 			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1946 				     "send failed reply for transaction %d to %d:%d\n",
1947 				      t->debug_id,
1948 				      target_thread->proc->pid,
1949 				      target_thread->pid);
1950 
1951 			binder_pop_transaction_ilocked(target_thread, t);
1952 			if (target_thread->reply_error.cmd == BR_OK) {
1953 				target_thread->reply_error.cmd = error_code;
1954 				binder_enqueue_thread_work_ilocked(
1955 					target_thread,
1956 					&target_thread->reply_error.work);
1957 				wake_up_interruptible(&target_thread->wait);
1958 			} else {
1959 				/*
1960 				 * Cannot get here for normal operation, but
1961 				 * we can if multiple synchronous transactions
1962 				 * are sent without blocking for responses.
1963 				 * Just ignore the 2nd error in this case.
1964 				 */
1965 				pr_warn("Unexpected reply error: %u\n",
1966 					target_thread->reply_error.cmd);
1967 			}
1968 			binder_inner_proc_unlock(target_thread->proc);
1969 			binder_thread_dec_tmpref(target_thread);
1970 			binder_free_transaction(t);
1971 			return;
1972 		}
1973 		__release(&target_thread->proc->inner_lock);
1974 		next = t->from_parent;
1975 
1976 		binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1977 			     "send failed reply for transaction %d, target dead\n",
1978 			     t->debug_id);
1979 
1980 		binder_free_transaction(t);
1981 		if (next == NULL) {
1982 			binder_debug(BINDER_DEBUG_DEAD_BINDER,
1983 				     "reply failed, no target thread at root\n");
1984 			return;
1985 		}
1986 		t = next;
1987 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1988 			     "reply failed, no target thread -- retry %d\n",
1989 			      t->debug_id);
1990 	}
1991 }
1992 
1993 /**
1994  * binder_cleanup_transaction() - cleans up undelivered transaction
1995  * @t:		transaction that needs to be cleaned up
1996  * @reason:	reason the transaction wasn't delivered
1997  * @error_code:	error to return to caller (if synchronous call)
1998  */
1999 static void binder_cleanup_transaction(struct binder_transaction *t,
2000 				       const char *reason,
2001 				       uint32_t error_code)
2002 {
2003 	if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
2004 		binder_send_failed_reply(t, error_code);
2005 	} else {
2006 		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2007 			"undelivered transaction %d, %s\n",
2008 			t->debug_id, reason);
2009 		binder_free_transaction(t);
2010 	}
2011 }
2012 
2013 /**
2014  * binder_get_object() - gets object and checks for valid metadata
2015  * @proc:	binder_proc owning the buffer
2016  * @buffer:	binder_buffer that we're parsing.
2017  * @offset:	offset in the @buffer at which to validate an object.
2018  * @object:	struct binder_object to read into
2019  *
2020  * Return:	If there's a valid metadata object at @offset in @buffer, the
2021  *		size of that object. Otherwise, it returns zero. The object
2022  *		is read into the struct binder_object pointed to by @object.
2023  */
2024 static size_t binder_get_object(struct binder_proc *proc,
2025 				struct binder_buffer *buffer,
2026 				unsigned long offset,
2027 				struct binder_object *object)
2028 {
2029 	size_t read_size;
2030 	struct binder_object_header *hdr;
2031 	size_t object_size = 0;
2032 
2033 	read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
2034 	if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
2035 	    binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
2036 					  offset, read_size))
2037 		return 0;
2038 
2039 	/* Ok, now see if we read a complete object. */
2040 	hdr = &object->hdr;
2041 	switch (hdr->type) {
2042 	case BINDER_TYPE_BINDER:
2043 	case BINDER_TYPE_WEAK_BINDER:
2044 	case BINDER_TYPE_HANDLE:
2045 	case BINDER_TYPE_WEAK_HANDLE:
2046 		object_size = sizeof(struct flat_binder_object);
2047 		break;
2048 	case BINDER_TYPE_FD:
2049 		object_size = sizeof(struct binder_fd_object);
2050 		break;
2051 	case BINDER_TYPE_PTR:
2052 		object_size = sizeof(struct binder_buffer_object);
2053 		break;
2054 	case BINDER_TYPE_FDA:
2055 		object_size = sizeof(struct binder_fd_array_object);
2056 		break;
2057 	default:
2058 		return 0;
2059 	}
2060 	if (offset <= buffer->data_size - object_size &&
2061 	    buffer->data_size >= object_size)
2062 		return object_size;
2063 	else
2064 		return 0;
2065 }
2066 
2067 /**
2068  * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2069  * @proc:	binder_proc owning the buffer
2070  * @b:		binder_buffer containing the object
2071  * @object:	struct binder_object to read into
2072  * @index:	index in offset array at which the binder_buffer_object is
2073  *		located
2074  * @start_offset: points to the start of the offset array
2075  * @object_offsetp: offset of @object read from @b
2076  * @num_valid:	the number of valid offsets in the offset array
2077  *
2078  * Return:	If @index is within the valid range of the offset array
2079  *		described by @start and @num_valid, and if there's a valid
2080  *		binder_buffer_object at the offset found in index @index
2081  *		of the offset array, that object is returned. Otherwise,
2082  *		%NULL is returned.
2083  *		Note that the offset found in index @index itself is not
2084  *		verified; this function assumes that @num_valid elements
2085  *		from @start were previously verified to have valid offsets.
2086  *		If @object_offsetp is non-NULL, then the offset within
2087  *		@b is written to it.
2088  */
2089 static struct binder_buffer_object *binder_validate_ptr(
2090 						struct binder_proc *proc,
2091 						struct binder_buffer *b,
2092 						struct binder_object *object,
2093 						binder_size_t index,
2094 						binder_size_t start_offset,
2095 						binder_size_t *object_offsetp,
2096 						binder_size_t num_valid)
2097 {
2098 	size_t object_size;
2099 	binder_size_t object_offset;
2100 	unsigned long buffer_offset;
2101 
2102 	if (index >= num_valid)
2103 		return NULL;
2104 
2105 	buffer_offset = start_offset + sizeof(binder_size_t) * index;
2106 	if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2107 					  b, buffer_offset,
2108 					  sizeof(object_offset)))
2109 		return NULL;
2110 	object_size = binder_get_object(proc, b, object_offset, object);
2111 	if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
2112 		return NULL;
2113 	if (object_offsetp)
2114 		*object_offsetp = object_offset;
2115 
2116 	return &object->bbo;
2117 }
2118 
2119 /**
2120  * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2121  * @proc:		binder_proc owning the buffer
2122  * @b:			transaction buffer
2123  * @objects_start_offset: offset to start of objects buffer
2124  * @buffer_obj_offset:	offset to binder_buffer_object in which to fix up
2125  * @fixup_offset:	start offset in @buffer to fix up
2126  * @last_obj_offset:	offset to last binder_buffer_object that we fixed
2127  * @last_min_offset:	minimum fixup offset in object at @last_obj_offset
2128  *
2129  * Return:		%true if a fixup in buffer @buffer at offset @offset is
2130  *			allowed.
2131  *
2132  * For safety reasons, we only allow fixups inside a buffer to happen
2133  * at increasing offsets; additionally, we only allow fixup on the last
2134  * buffer object that was verified, or one of its parents.
2135  *
2136  * Example of what is allowed:
2137  *
2138  * A
2139  *   B (parent = A, offset = 0)
2140  *   C (parent = A, offset = 16)
2141  *     D (parent = C, offset = 0)
2142  *   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2143  *
2144  * Examples of what is not allowed:
2145  *
2146  * Decreasing offsets within the same parent:
2147  * A
2148  *   C (parent = A, offset = 16)
2149  *   B (parent = A, offset = 0) // decreasing offset within A
2150  *
2151  * Referring to a parent that wasn't the last object or any of its parents:
2152  * A
2153  *   B (parent = A, offset = 0)
2154  *   C (parent = A, offset = 0)
2155  *   C (parent = A, offset = 16)
2156  *     D (parent = B, offset = 0) // B is not A or any of A's parents
2157  */
2158 static bool binder_validate_fixup(struct binder_proc *proc,
2159 				  struct binder_buffer *b,
2160 				  binder_size_t objects_start_offset,
2161 				  binder_size_t buffer_obj_offset,
2162 				  binder_size_t fixup_offset,
2163 				  binder_size_t last_obj_offset,
2164 				  binder_size_t last_min_offset)
2165 {
2166 	if (!last_obj_offset) {
2167 		/* Nothing to fix up in */
2168 		return false;
2169 	}
2170 
2171 	while (last_obj_offset != buffer_obj_offset) {
2172 		unsigned long buffer_offset;
2173 		struct binder_object last_object;
2174 		struct binder_buffer_object *last_bbo;
2175 		size_t object_size = binder_get_object(proc, b, last_obj_offset,
2176 						       &last_object);
2177 		if (object_size != sizeof(*last_bbo))
2178 			return false;
2179 
2180 		last_bbo = &last_object.bbo;
2181 		/*
2182 		 * Safe to retrieve the parent of last_obj, since it
2183 		 * was already previously verified by the driver.
2184 		 */
2185 		if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2186 			return false;
2187 		last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
2188 		buffer_offset = objects_start_offset +
2189 			sizeof(binder_size_t) * last_bbo->parent;
2190 		if (binder_alloc_copy_from_buffer(&proc->alloc,
2191 						  &last_obj_offset,
2192 						  b, buffer_offset,
2193 						  sizeof(last_obj_offset)))
2194 			return false;
2195 	}
2196 	return (fixup_offset >= last_min_offset);
2197 }
2198 
2199 /**
2200  * struct binder_task_work_cb - for deferred close
2201  *
2202  * @twork:                callback_head for task work
2203  * @fd:                   fd to close
2204  *
2205  * Structure to pass task work to be handled after
2206  * returning from binder_ioctl() via task_work_add().
2207  */
2208 struct binder_task_work_cb {
2209 	struct callback_head twork;
2210 	struct file *file;
2211 };
2212 
2213 /**
2214  * binder_do_fd_close() - close list of file descriptors
2215  * @twork:	callback head for task work
2216  *
2217  * It is not safe to call ksys_close() during the binder_ioctl()
2218  * function if there is a chance that binder's own file descriptor
2219  * might be closed. This is to meet the requirements for using
2220  * fdget() (see comments for __fget_light()). Therefore use
2221  * task_work_add() to schedule the close operation once we have
2222  * returned from binder_ioctl(). This function is a callback
2223  * for that mechanism and does the actual ksys_close() on the
2224  * given file descriptor.
2225  */
2226 static void binder_do_fd_close(struct callback_head *twork)
2227 {
2228 	struct binder_task_work_cb *twcb = container_of(twork,
2229 			struct binder_task_work_cb, twork);
2230 
2231 	fput(twcb->file);
2232 	kfree(twcb);
2233 }
2234 
2235 /**
2236  * binder_deferred_fd_close() - schedule a close for the given file-descriptor
2237  * @fd:		file-descriptor to close
2238  *
2239  * See comments in binder_do_fd_close(). This function is used to schedule
2240  * a file-descriptor to be closed after returning from binder_ioctl().
2241  */
2242 static void binder_deferred_fd_close(int fd)
2243 {
2244 	struct binder_task_work_cb *twcb;
2245 
2246 	twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
2247 	if (!twcb)
2248 		return;
2249 	init_task_work(&twcb->twork, binder_do_fd_close);
2250 	__close_fd_get_file(fd, &twcb->file);
2251 	if (twcb->file) {
2252 		filp_close(twcb->file, current->files);
2253 		task_work_add(current, &twcb->twork, true);
2254 	} else {
2255 		kfree(twcb);
2256 	}
2257 }
2258 
2259 static void binder_transaction_buffer_release(struct binder_proc *proc,
2260 					      struct binder_buffer *buffer,
2261 					      binder_size_t failed_at,
2262 					      bool is_failure)
2263 {
2264 	int debug_id = buffer->debug_id;
2265 	binder_size_t off_start_offset, buffer_offset, off_end_offset;
2266 
2267 	binder_debug(BINDER_DEBUG_TRANSACTION,
2268 		     "%d buffer release %d, size %zd-%zd, failed at %llx\n",
2269 		     proc->pid, buffer->debug_id,
2270 		     buffer->data_size, buffer->offsets_size,
2271 		     (unsigned long long)failed_at);
2272 
2273 	if (buffer->target_node)
2274 		binder_dec_node(buffer->target_node, 1, 0);
2275 
2276 	off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
2277 	off_end_offset = is_failure ? failed_at :
2278 				off_start_offset + buffer->offsets_size;
2279 	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2280 	     buffer_offset += sizeof(binder_size_t)) {
2281 		struct binder_object_header *hdr;
2282 		size_t object_size = 0;
2283 		struct binder_object object;
2284 		binder_size_t object_offset;
2285 
2286 		if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2287 						   buffer, buffer_offset,
2288 						   sizeof(object_offset)))
2289 			object_size = binder_get_object(proc, buffer,
2290 							object_offset, &object);
2291 		if (object_size == 0) {
2292 			pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2293 			       debug_id, (u64)object_offset, buffer->data_size);
2294 			continue;
2295 		}
2296 		hdr = &object.hdr;
2297 		switch (hdr->type) {
2298 		case BINDER_TYPE_BINDER:
2299 		case BINDER_TYPE_WEAK_BINDER: {
2300 			struct flat_binder_object *fp;
2301 			struct binder_node *node;
2302 
2303 			fp = to_flat_binder_object(hdr);
2304 			node = binder_get_node(proc, fp->binder);
2305 			if (node == NULL) {
2306 				pr_err("transaction release %d bad node %016llx\n",
2307 				       debug_id, (u64)fp->binder);
2308 				break;
2309 			}
2310 			binder_debug(BINDER_DEBUG_TRANSACTION,
2311 				     "        node %d u%016llx\n",
2312 				     node->debug_id, (u64)node->ptr);
2313 			binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2314 					0);
2315 			binder_put_node(node);
2316 		} break;
2317 		case BINDER_TYPE_HANDLE:
2318 		case BINDER_TYPE_WEAK_HANDLE: {
2319 			struct flat_binder_object *fp;
2320 			struct binder_ref_data rdata;
2321 			int ret;
2322 
2323 			fp = to_flat_binder_object(hdr);
2324 			ret = binder_dec_ref_for_handle(proc, fp->handle,
2325 				hdr->type == BINDER_TYPE_HANDLE, &rdata);
2326 
2327 			if (ret) {
2328 				pr_err("transaction release %d bad handle %d, ret = %d\n",
2329 				 debug_id, fp->handle, ret);
2330 				break;
2331 			}
2332 			binder_debug(BINDER_DEBUG_TRANSACTION,
2333 				     "        ref %d desc %d\n",
2334 				     rdata.debug_id, rdata.desc);
2335 		} break;
2336 
2337 		case BINDER_TYPE_FD: {
2338 			/*
2339 			 * No need to close the file here since user-space
2340 			 * closes it for for successfully delivered
2341 			 * transactions. For transactions that weren't
2342 			 * delivered, the new fd was never allocated so
2343 			 * there is no need to close and the fput on the
2344 			 * file is done when the transaction is torn
2345 			 * down.
2346 			 */
2347 			WARN_ON(failed_at &&
2348 				proc->tsk == current->group_leader);
2349 		} break;
2350 		case BINDER_TYPE_PTR:
2351 			/*
2352 			 * Nothing to do here, this will get cleaned up when the
2353 			 * transaction buffer gets freed
2354 			 */
2355 			break;
2356 		case BINDER_TYPE_FDA: {
2357 			struct binder_fd_array_object *fda;
2358 			struct binder_buffer_object *parent;
2359 			struct binder_object ptr_object;
2360 			binder_size_t fda_offset;
2361 			size_t fd_index;
2362 			binder_size_t fd_buf_size;
2363 			binder_size_t num_valid;
2364 
2365 			if (proc->tsk != current->group_leader) {
2366 				/*
2367 				 * Nothing to do if running in sender context
2368 				 * The fd fixups have not been applied so no
2369 				 * fds need to be closed.
2370 				 */
2371 				continue;
2372 			}
2373 
2374 			num_valid = (buffer_offset - off_start_offset) /
2375 						sizeof(binder_size_t);
2376 			fda = to_binder_fd_array_object(hdr);
2377 			parent = binder_validate_ptr(proc, buffer, &ptr_object,
2378 						     fda->parent,
2379 						     off_start_offset,
2380 						     NULL,
2381 						     num_valid);
2382 			if (!parent) {
2383 				pr_err("transaction release %d bad parent offset\n",
2384 				       debug_id);
2385 				continue;
2386 			}
2387 			fd_buf_size = sizeof(u32) * fda->num_fds;
2388 			if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2389 				pr_err("transaction release %d invalid number of fds (%lld)\n",
2390 				       debug_id, (u64)fda->num_fds);
2391 				continue;
2392 			}
2393 			if (fd_buf_size > parent->length ||
2394 			    fda->parent_offset > parent->length - fd_buf_size) {
2395 				/* No space for all file descriptors here. */
2396 				pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2397 				       debug_id, (u64)fda->num_fds);
2398 				continue;
2399 			}
2400 			/*
2401 			 * the source data for binder_buffer_object is visible
2402 			 * to user-space and the @buffer element is the user
2403 			 * pointer to the buffer_object containing the fd_array.
2404 			 * Convert the address to an offset relative to
2405 			 * the base of the transaction buffer.
2406 			 */
2407 			fda_offset =
2408 			    (parent->buffer - (uintptr_t)buffer->user_data) +
2409 			    fda->parent_offset;
2410 			for (fd_index = 0; fd_index < fda->num_fds;
2411 			     fd_index++) {
2412 				u32 fd;
2413 				int err;
2414 				binder_size_t offset = fda_offset +
2415 					fd_index * sizeof(fd);
2416 
2417 				err = binder_alloc_copy_from_buffer(
2418 						&proc->alloc, &fd, buffer,
2419 						offset, sizeof(fd));
2420 				WARN_ON(err);
2421 				if (!err)
2422 					binder_deferred_fd_close(fd);
2423 			}
2424 		} break;
2425 		default:
2426 			pr_err("transaction release %d bad object type %x\n",
2427 				debug_id, hdr->type);
2428 			break;
2429 		}
2430 	}
2431 }
2432 
2433 static int binder_translate_binder(struct flat_binder_object *fp,
2434 				   struct binder_transaction *t,
2435 				   struct binder_thread *thread)
2436 {
2437 	struct binder_node *node;
2438 	struct binder_proc *proc = thread->proc;
2439 	struct binder_proc *target_proc = t->to_proc;
2440 	struct binder_ref_data rdata;
2441 	int ret = 0;
2442 
2443 	node = binder_get_node(proc, fp->binder);
2444 	if (!node) {
2445 		node = binder_new_node(proc, fp);
2446 		if (!node)
2447 			return -ENOMEM;
2448 	}
2449 	if (fp->cookie != node->cookie) {
2450 		binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2451 				  proc->pid, thread->pid, (u64)fp->binder,
2452 				  node->debug_id, (u64)fp->cookie,
2453 				  (u64)node->cookie);
2454 		ret = -EINVAL;
2455 		goto done;
2456 	}
2457 	if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2458 		ret = -EPERM;
2459 		goto done;
2460 	}
2461 
2462 	ret = binder_inc_ref_for_node(target_proc, node,
2463 			fp->hdr.type == BINDER_TYPE_BINDER,
2464 			&thread->todo, &rdata);
2465 	if (ret)
2466 		goto done;
2467 
2468 	if (fp->hdr.type == BINDER_TYPE_BINDER)
2469 		fp->hdr.type = BINDER_TYPE_HANDLE;
2470 	else
2471 		fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2472 	fp->binder = 0;
2473 	fp->handle = rdata.desc;
2474 	fp->cookie = 0;
2475 
2476 	trace_binder_transaction_node_to_ref(t, node, &rdata);
2477 	binder_debug(BINDER_DEBUG_TRANSACTION,
2478 		     "        node %d u%016llx -> ref %d desc %d\n",
2479 		     node->debug_id, (u64)node->ptr,
2480 		     rdata.debug_id, rdata.desc);
2481 done:
2482 	binder_put_node(node);
2483 	return ret;
2484 }
2485 
2486 static int binder_translate_handle(struct flat_binder_object *fp,
2487 				   struct binder_transaction *t,
2488 				   struct binder_thread *thread)
2489 {
2490 	struct binder_proc *proc = thread->proc;
2491 	struct binder_proc *target_proc = t->to_proc;
2492 	struct binder_node *node;
2493 	struct binder_ref_data src_rdata;
2494 	int ret = 0;
2495 
2496 	node = binder_get_node_from_ref(proc, fp->handle,
2497 			fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2498 	if (!node) {
2499 		binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2500 				  proc->pid, thread->pid, fp->handle);
2501 		return -EINVAL;
2502 	}
2503 	if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2504 		ret = -EPERM;
2505 		goto done;
2506 	}
2507 
2508 	binder_node_lock(node);
2509 	if (node->proc == target_proc) {
2510 		if (fp->hdr.type == BINDER_TYPE_HANDLE)
2511 			fp->hdr.type = BINDER_TYPE_BINDER;
2512 		else
2513 			fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2514 		fp->binder = node->ptr;
2515 		fp->cookie = node->cookie;
2516 		if (node->proc)
2517 			binder_inner_proc_lock(node->proc);
2518 		else
2519 			__acquire(&node->proc->inner_lock);
2520 		binder_inc_node_nilocked(node,
2521 					 fp->hdr.type == BINDER_TYPE_BINDER,
2522 					 0, NULL);
2523 		if (node->proc)
2524 			binder_inner_proc_unlock(node->proc);
2525 		else
2526 			__release(&node->proc->inner_lock);
2527 		trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2528 		binder_debug(BINDER_DEBUG_TRANSACTION,
2529 			     "        ref %d desc %d -> node %d u%016llx\n",
2530 			     src_rdata.debug_id, src_rdata.desc, node->debug_id,
2531 			     (u64)node->ptr);
2532 		binder_node_unlock(node);
2533 	} else {
2534 		struct binder_ref_data dest_rdata;
2535 
2536 		binder_node_unlock(node);
2537 		ret = binder_inc_ref_for_node(target_proc, node,
2538 				fp->hdr.type == BINDER_TYPE_HANDLE,
2539 				NULL, &dest_rdata);
2540 		if (ret)
2541 			goto done;
2542 
2543 		fp->binder = 0;
2544 		fp->handle = dest_rdata.desc;
2545 		fp->cookie = 0;
2546 		trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2547 						    &dest_rdata);
2548 		binder_debug(BINDER_DEBUG_TRANSACTION,
2549 			     "        ref %d desc %d -> ref %d desc %d (node %d)\n",
2550 			     src_rdata.debug_id, src_rdata.desc,
2551 			     dest_rdata.debug_id, dest_rdata.desc,
2552 			     node->debug_id);
2553 	}
2554 done:
2555 	binder_put_node(node);
2556 	return ret;
2557 }
2558 
2559 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2560 			       struct binder_transaction *t,
2561 			       struct binder_thread *thread,
2562 			       struct binder_transaction *in_reply_to)
2563 {
2564 	struct binder_proc *proc = thread->proc;
2565 	struct binder_proc *target_proc = t->to_proc;
2566 	struct binder_txn_fd_fixup *fixup;
2567 	struct file *file;
2568 	int ret = 0;
2569 	bool target_allows_fd;
2570 
2571 	if (in_reply_to)
2572 		target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2573 	else
2574 		target_allows_fd = t->buffer->target_node->accept_fds;
2575 	if (!target_allows_fd) {
2576 		binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2577 				  proc->pid, thread->pid,
2578 				  in_reply_to ? "reply" : "transaction",
2579 				  fd);
2580 		ret = -EPERM;
2581 		goto err_fd_not_accepted;
2582 	}
2583 
2584 	file = fget(fd);
2585 	if (!file) {
2586 		binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2587 				  proc->pid, thread->pid, fd);
2588 		ret = -EBADF;
2589 		goto err_fget;
2590 	}
2591 	ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2592 	if (ret < 0) {
2593 		ret = -EPERM;
2594 		goto err_security;
2595 	}
2596 
2597 	/*
2598 	 * Add fixup record for this transaction. The allocation
2599 	 * of the fd in the target needs to be done from a
2600 	 * target thread.
2601 	 */
2602 	fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2603 	if (!fixup) {
2604 		ret = -ENOMEM;
2605 		goto err_alloc;
2606 	}
2607 	fixup->file = file;
2608 	fixup->offset = fd_offset;
2609 	trace_binder_transaction_fd_send(t, fd, fixup->offset);
2610 	list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2611 
2612 	return ret;
2613 
2614 err_alloc:
2615 err_security:
2616 	fput(file);
2617 err_fget:
2618 err_fd_not_accepted:
2619 	return ret;
2620 }
2621 
2622 static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2623 				     struct binder_buffer_object *parent,
2624 				     struct binder_transaction *t,
2625 				     struct binder_thread *thread,
2626 				     struct binder_transaction *in_reply_to)
2627 {
2628 	binder_size_t fdi, fd_buf_size;
2629 	binder_size_t fda_offset;
2630 	struct binder_proc *proc = thread->proc;
2631 	struct binder_proc *target_proc = t->to_proc;
2632 
2633 	fd_buf_size = sizeof(u32) * fda->num_fds;
2634 	if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2635 		binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2636 				  proc->pid, thread->pid, (u64)fda->num_fds);
2637 		return -EINVAL;
2638 	}
2639 	if (fd_buf_size > parent->length ||
2640 	    fda->parent_offset > parent->length - fd_buf_size) {
2641 		/* No space for all file descriptors here. */
2642 		binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2643 				  proc->pid, thread->pid, (u64)fda->num_fds);
2644 		return -EINVAL;
2645 	}
2646 	/*
2647 	 * the source data for binder_buffer_object is visible
2648 	 * to user-space and the @buffer element is the user
2649 	 * pointer to the buffer_object containing the fd_array.
2650 	 * Convert the address to an offset relative to
2651 	 * the base of the transaction buffer.
2652 	 */
2653 	fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2654 		fda->parent_offset;
2655 	if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) {
2656 		binder_user_error("%d:%d parent offset not aligned correctly.\n",
2657 				  proc->pid, thread->pid);
2658 		return -EINVAL;
2659 	}
2660 	for (fdi = 0; fdi < fda->num_fds; fdi++) {
2661 		u32 fd;
2662 		int ret;
2663 		binder_size_t offset = fda_offset + fdi * sizeof(fd);
2664 
2665 		ret = binder_alloc_copy_from_buffer(&target_proc->alloc,
2666 						    &fd, t->buffer,
2667 						    offset, sizeof(fd));
2668 		if (!ret)
2669 			ret = binder_translate_fd(fd, offset, t, thread,
2670 						  in_reply_to);
2671 		if (ret < 0)
2672 			return ret;
2673 	}
2674 	return 0;
2675 }
2676 
2677 static int binder_fixup_parent(struct binder_transaction *t,
2678 			       struct binder_thread *thread,
2679 			       struct binder_buffer_object *bp,
2680 			       binder_size_t off_start_offset,
2681 			       binder_size_t num_valid,
2682 			       binder_size_t last_fixup_obj_off,
2683 			       binder_size_t last_fixup_min_off)
2684 {
2685 	struct binder_buffer_object *parent;
2686 	struct binder_buffer *b = t->buffer;
2687 	struct binder_proc *proc = thread->proc;
2688 	struct binder_proc *target_proc = t->to_proc;
2689 	struct binder_object object;
2690 	binder_size_t buffer_offset;
2691 	binder_size_t parent_offset;
2692 
2693 	if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2694 		return 0;
2695 
2696 	parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2697 				     off_start_offset, &parent_offset,
2698 				     num_valid);
2699 	if (!parent) {
2700 		binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2701 				  proc->pid, thread->pid);
2702 		return -EINVAL;
2703 	}
2704 
2705 	if (!binder_validate_fixup(target_proc, b, off_start_offset,
2706 				   parent_offset, bp->parent_offset,
2707 				   last_fixup_obj_off,
2708 				   last_fixup_min_off)) {
2709 		binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2710 				  proc->pid, thread->pid);
2711 		return -EINVAL;
2712 	}
2713 
2714 	if (parent->length < sizeof(binder_uintptr_t) ||
2715 	    bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2716 		/* No space for a pointer here! */
2717 		binder_user_error("%d:%d got transaction with invalid parent offset\n",
2718 				  proc->pid, thread->pid);
2719 		return -EINVAL;
2720 	}
2721 	buffer_offset = bp->parent_offset +
2722 			(uintptr_t)parent->buffer - (uintptr_t)b->user_data;
2723 	if (binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
2724 					&bp->buffer, sizeof(bp->buffer))) {
2725 		binder_user_error("%d:%d got transaction with invalid parent offset\n",
2726 				  proc->pid, thread->pid);
2727 		return -EINVAL;
2728 	}
2729 
2730 	return 0;
2731 }
2732 
2733 /**
2734  * binder_proc_transaction() - sends a transaction to a process and wakes it up
2735  * @t:		transaction to send
2736  * @proc:	process to send the transaction to
2737  * @thread:	thread in @proc to send the transaction to (may be NULL)
2738  *
2739  * This function queues a transaction to the specified process. It will try
2740  * to find a thread in the target process to handle the transaction and
2741  * wake it up. If no thread is found, the work is queued to the proc
2742  * waitqueue.
2743  *
2744  * If the @thread parameter is not NULL, the transaction is always queued
2745  * to the waitlist of that specific thread.
2746  *
2747  * Return:	true if the transactions was successfully queued
2748  *		false if the target process or thread is dead
2749  */
2750 static bool binder_proc_transaction(struct binder_transaction *t,
2751 				    struct binder_proc *proc,
2752 				    struct binder_thread *thread)
2753 {
2754 	struct binder_node *node = t->buffer->target_node;
2755 	bool oneway = !!(t->flags & TF_ONE_WAY);
2756 	bool pending_async = false;
2757 
2758 	BUG_ON(!node);
2759 	binder_node_lock(node);
2760 	if (oneway) {
2761 		BUG_ON(thread);
2762 		if (node->has_async_transaction)
2763 			pending_async = true;
2764 		else
2765 			node->has_async_transaction = true;
2766 	}
2767 
2768 	binder_inner_proc_lock(proc);
2769 
2770 	if (proc->is_dead || (thread && thread->is_dead)) {
2771 		binder_inner_proc_unlock(proc);
2772 		binder_node_unlock(node);
2773 		return false;
2774 	}
2775 
2776 	if (!thread && !pending_async)
2777 		thread = binder_select_thread_ilocked(proc);
2778 
2779 	if (thread)
2780 		binder_enqueue_thread_work_ilocked(thread, &t->work);
2781 	else if (!pending_async)
2782 		binder_enqueue_work_ilocked(&t->work, &proc->todo);
2783 	else
2784 		binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2785 
2786 	if (!pending_async)
2787 		binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2788 
2789 	binder_inner_proc_unlock(proc);
2790 	binder_node_unlock(node);
2791 
2792 	return true;
2793 }
2794 
2795 /**
2796  * binder_get_node_refs_for_txn() - Get required refs on node for txn
2797  * @node:         struct binder_node for which to get refs
2798  * @proc:         returns @node->proc if valid
2799  * @error:        if no @proc then returns BR_DEAD_REPLY
2800  *
2801  * User-space normally keeps the node alive when creating a transaction
2802  * since it has a reference to the target. The local strong ref keeps it
2803  * alive if the sending process dies before the target process processes
2804  * the transaction. If the source process is malicious or has a reference
2805  * counting bug, relying on the local strong ref can fail.
2806  *
2807  * Since user-space can cause the local strong ref to go away, we also take
2808  * a tmpref on the node to ensure it survives while we are constructing
2809  * the transaction. We also need a tmpref on the proc while we are
2810  * constructing the transaction, so we take that here as well.
2811  *
2812  * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2813  * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2814  * target proc has died, @error is set to BR_DEAD_REPLY
2815  */
2816 static struct binder_node *binder_get_node_refs_for_txn(
2817 		struct binder_node *node,
2818 		struct binder_proc **procp,
2819 		uint32_t *error)
2820 {
2821 	struct binder_node *target_node = NULL;
2822 
2823 	binder_node_inner_lock(node);
2824 	if (node->proc) {
2825 		target_node = node;
2826 		binder_inc_node_nilocked(node, 1, 0, NULL);
2827 		binder_inc_node_tmpref_ilocked(node);
2828 		node->proc->tmp_ref++;
2829 		*procp = node->proc;
2830 	} else
2831 		*error = BR_DEAD_REPLY;
2832 	binder_node_inner_unlock(node);
2833 
2834 	return target_node;
2835 }
2836 
2837 static void binder_transaction(struct binder_proc *proc,
2838 			       struct binder_thread *thread,
2839 			       struct binder_transaction_data *tr, int reply,
2840 			       binder_size_t extra_buffers_size)
2841 {
2842 	int ret;
2843 	struct binder_transaction *t;
2844 	struct binder_work *w;
2845 	struct binder_work *tcomplete;
2846 	binder_size_t buffer_offset = 0;
2847 	binder_size_t off_start_offset, off_end_offset;
2848 	binder_size_t off_min;
2849 	binder_size_t sg_buf_offset, sg_buf_end_offset;
2850 	struct binder_proc *target_proc = NULL;
2851 	struct binder_thread *target_thread = NULL;
2852 	struct binder_node *target_node = NULL;
2853 	struct binder_transaction *in_reply_to = NULL;
2854 	struct binder_transaction_log_entry *e;
2855 	uint32_t return_error = 0;
2856 	uint32_t return_error_param = 0;
2857 	uint32_t return_error_line = 0;
2858 	binder_size_t last_fixup_obj_off = 0;
2859 	binder_size_t last_fixup_min_off = 0;
2860 	struct binder_context *context = proc->context;
2861 	int t_debug_id = atomic_inc_return(&binder_last_id);
2862 	char *secctx = NULL;
2863 	u32 secctx_sz = 0;
2864 
2865 	e = binder_transaction_log_add(&binder_transaction_log);
2866 	e->debug_id = t_debug_id;
2867 	e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2868 	e->from_proc = proc->pid;
2869 	e->from_thread = thread->pid;
2870 	e->target_handle = tr->target.handle;
2871 	e->data_size = tr->data_size;
2872 	e->offsets_size = tr->offsets_size;
2873 	strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
2874 
2875 	if (reply) {
2876 		binder_inner_proc_lock(proc);
2877 		in_reply_to = thread->transaction_stack;
2878 		if (in_reply_to == NULL) {
2879 			binder_inner_proc_unlock(proc);
2880 			binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2881 					  proc->pid, thread->pid);
2882 			return_error = BR_FAILED_REPLY;
2883 			return_error_param = -EPROTO;
2884 			return_error_line = __LINE__;
2885 			goto err_empty_call_stack;
2886 		}
2887 		if (in_reply_to->to_thread != thread) {
2888 			spin_lock(&in_reply_to->lock);
2889 			binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2890 				proc->pid, thread->pid, in_reply_to->debug_id,
2891 				in_reply_to->to_proc ?
2892 				in_reply_to->to_proc->pid : 0,
2893 				in_reply_to->to_thread ?
2894 				in_reply_to->to_thread->pid : 0);
2895 			spin_unlock(&in_reply_to->lock);
2896 			binder_inner_proc_unlock(proc);
2897 			return_error = BR_FAILED_REPLY;
2898 			return_error_param = -EPROTO;
2899 			return_error_line = __LINE__;
2900 			in_reply_to = NULL;
2901 			goto err_bad_call_stack;
2902 		}
2903 		thread->transaction_stack = in_reply_to->to_parent;
2904 		binder_inner_proc_unlock(proc);
2905 		binder_set_nice(in_reply_to->saved_priority);
2906 		target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2907 		if (target_thread == NULL) {
2908 			/* annotation for sparse */
2909 			__release(&target_thread->proc->inner_lock);
2910 			return_error = BR_DEAD_REPLY;
2911 			return_error_line = __LINE__;
2912 			goto err_dead_binder;
2913 		}
2914 		if (target_thread->transaction_stack != in_reply_to) {
2915 			binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2916 				proc->pid, thread->pid,
2917 				target_thread->transaction_stack ?
2918 				target_thread->transaction_stack->debug_id : 0,
2919 				in_reply_to->debug_id);
2920 			binder_inner_proc_unlock(target_thread->proc);
2921 			return_error = BR_FAILED_REPLY;
2922 			return_error_param = -EPROTO;
2923 			return_error_line = __LINE__;
2924 			in_reply_to = NULL;
2925 			target_thread = NULL;
2926 			goto err_dead_binder;
2927 		}
2928 		target_proc = target_thread->proc;
2929 		target_proc->tmp_ref++;
2930 		binder_inner_proc_unlock(target_thread->proc);
2931 	} else {
2932 		if (tr->target.handle) {
2933 			struct binder_ref *ref;
2934 
2935 			/*
2936 			 * There must already be a strong ref
2937 			 * on this node. If so, do a strong
2938 			 * increment on the node to ensure it
2939 			 * stays alive until the transaction is
2940 			 * done.
2941 			 */
2942 			binder_proc_lock(proc);
2943 			ref = binder_get_ref_olocked(proc, tr->target.handle,
2944 						     true);
2945 			if (ref) {
2946 				target_node = binder_get_node_refs_for_txn(
2947 						ref->node, &target_proc,
2948 						&return_error);
2949 			} else {
2950 				binder_user_error("%d:%d got transaction to invalid handle\n",
2951 						  proc->pid, thread->pid);
2952 				return_error = BR_FAILED_REPLY;
2953 			}
2954 			binder_proc_unlock(proc);
2955 		} else {
2956 			mutex_lock(&context->context_mgr_node_lock);
2957 			target_node = context->binder_context_mgr_node;
2958 			if (target_node)
2959 				target_node = binder_get_node_refs_for_txn(
2960 						target_node, &target_proc,
2961 						&return_error);
2962 			else
2963 				return_error = BR_DEAD_REPLY;
2964 			mutex_unlock(&context->context_mgr_node_lock);
2965 			if (target_node && target_proc->pid == proc->pid) {
2966 				binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2967 						  proc->pid, thread->pid);
2968 				return_error = BR_FAILED_REPLY;
2969 				return_error_param = -EINVAL;
2970 				return_error_line = __LINE__;
2971 				goto err_invalid_target_handle;
2972 			}
2973 		}
2974 		if (!target_node) {
2975 			/*
2976 			 * return_error is set above
2977 			 */
2978 			return_error_param = -EINVAL;
2979 			return_error_line = __LINE__;
2980 			goto err_dead_binder;
2981 		}
2982 		e->to_node = target_node->debug_id;
2983 		if (WARN_ON(proc == target_proc)) {
2984 			return_error = BR_FAILED_REPLY;
2985 			return_error_param = -EINVAL;
2986 			return_error_line = __LINE__;
2987 			goto err_invalid_target_handle;
2988 		}
2989 		if (security_binder_transaction(proc->tsk,
2990 						target_proc->tsk) < 0) {
2991 			return_error = BR_FAILED_REPLY;
2992 			return_error_param = -EPERM;
2993 			return_error_line = __LINE__;
2994 			goto err_invalid_target_handle;
2995 		}
2996 		binder_inner_proc_lock(proc);
2997 
2998 		w = list_first_entry_or_null(&thread->todo,
2999 					     struct binder_work, entry);
3000 		if (!(tr->flags & TF_ONE_WAY) && w &&
3001 		    w->type == BINDER_WORK_TRANSACTION) {
3002 			/*
3003 			 * Do not allow new outgoing transaction from a
3004 			 * thread that has a transaction at the head of
3005 			 * its todo list. Only need to check the head
3006 			 * because binder_select_thread_ilocked picks a
3007 			 * thread from proc->waiting_threads to enqueue
3008 			 * the transaction, and nothing is queued to the
3009 			 * todo list while the thread is on waiting_threads.
3010 			 */
3011 			binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3012 					  proc->pid, thread->pid);
3013 			binder_inner_proc_unlock(proc);
3014 			return_error = BR_FAILED_REPLY;
3015 			return_error_param = -EPROTO;
3016 			return_error_line = __LINE__;
3017 			goto err_bad_todo_list;
3018 		}
3019 
3020 		if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3021 			struct binder_transaction *tmp;
3022 
3023 			tmp = thread->transaction_stack;
3024 			if (tmp->to_thread != thread) {
3025 				spin_lock(&tmp->lock);
3026 				binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3027 					proc->pid, thread->pid, tmp->debug_id,
3028 					tmp->to_proc ? tmp->to_proc->pid : 0,
3029 					tmp->to_thread ?
3030 					tmp->to_thread->pid : 0);
3031 				spin_unlock(&tmp->lock);
3032 				binder_inner_proc_unlock(proc);
3033 				return_error = BR_FAILED_REPLY;
3034 				return_error_param = -EPROTO;
3035 				return_error_line = __LINE__;
3036 				goto err_bad_call_stack;
3037 			}
3038 			while (tmp) {
3039 				struct binder_thread *from;
3040 
3041 				spin_lock(&tmp->lock);
3042 				from = tmp->from;
3043 				if (from && from->proc == target_proc) {
3044 					atomic_inc(&from->tmp_ref);
3045 					target_thread = from;
3046 					spin_unlock(&tmp->lock);
3047 					break;
3048 				}
3049 				spin_unlock(&tmp->lock);
3050 				tmp = tmp->from_parent;
3051 			}
3052 		}
3053 		binder_inner_proc_unlock(proc);
3054 	}
3055 	if (target_thread)
3056 		e->to_thread = target_thread->pid;
3057 	e->to_proc = target_proc->pid;
3058 
3059 	/* TODO: reuse incoming transaction for reply */
3060 	t = kzalloc(sizeof(*t), GFP_KERNEL);
3061 	if (t == NULL) {
3062 		return_error = BR_FAILED_REPLY;
3063 		return_error_param = -ENOMEM;
3064 		return_error_line = __LINE__;
3065 		goto err_alloc_t_failed;
3066 	}
3067 	INIT_LIST_HEAD(&t->fd_fixups);
3068 	binder_stats_created(BINDER_STAT_TRANSACTION);
3069 	spin_lock_init(&t->lock);
3070 
3071 	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3072 	if (tcomplete == NULL) {
3073 		return_error = BR_FAILED_REPLY;
3074 		return_error_param = -ENOMEM;
3075 		return_error_line = __LINE__;
3076 		goto err_alloc_tcomplete_failed;
3077 	}
3078 	binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3079 
3080 	t->debug_id = t_debug_id;
3081 
3082 	if (reply)
3083 		binder_debug(BINDER_DEBUG_TRANSACTION,
3084 			     "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3085 			     proc->pid, thread->pid, t->debug_id,
3086 			     target_proc->pid, target_thread->pid,
3087 			     (u64)tr->data.ptr.buffer,
3088 			     (u64)tr->data.ptr.offsets,
3089 			     (u64)tr->data_size, (u64)tr->offsets_size,
3090 			     (u64)extra_buffers_size);
3091 	else
3092 		binder_debug(BINDER_DEBUG_TRANSACTION,
3093 			     "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3094 			     proc->pid, thread->pid, t->debug_id,
3095 			     target_proc->pid, target_node->debug_id,
3096 			     (u64)tr->data.ptr.buffer,
3097 			     (u64)tr->data.ptr.offsets,
3098 			     (u64)tr->data_size, (u64)tr->offsets_size,
3099 			     (u64)extra_buffers_size);
3100 
3101 	if (!reply && !(tr->flags & TF_ONE_WAY))
3102 		t->from = thread;
3103 	else
3104 		t->from = NULL;
3105 	t->sender_euid = task_euid(proc->tsk);
3106 	t->to_proc = target_proc;
3107 	t->to_thread = target_thread;
3108 	t->code = tr->code;
3109 	t->flags = tr->flags;
3110 	t->priority = task_nice(current);
3111 
3112 	if (target_node && target_node->txn_security_ctx) {
3113 		u32 secid;
3114 		size_t added_size;
3115 
3116 		security_task_getsecid(proc->tsk, &secid);
3117 		ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3118 		if (ret) {
3119 			return_error = BR_FAILED_REPLY;
3120 			return_error_param = ret;
3121 			return_error_line = __LINE__;
3122 			goto err_get_secctx_failed;
3123 		}
3124 		added_size = ALIGN(secctx_sz, sizeof(u64));
3125 		extra_buffers_size += added_size;
3126 		if (extra_buffers_size < added_size) {
3127 			/* integer overflow of extra_buffers_size */
3128 			return_error = BR_FAILED_REPLY;
3129 			return_error_param = EINVAL;
3130 			return_error_line = __LINE__;
3131 			goto err_bad_extra_size;
3132 		}
3133 	}
3134 
3135 	trace_binder_transaction(reply, t, target_node);
3136 
3137 	t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3138 		tr->offsets_size, extra_buffers_size,
3139 		!reply && (t->flags & TF_ONE_WAY));
3140 	if (IS_ERR(t->buffer)) {
3141 		/*
3142 		 * -ESRCH indicates VMA cleared. The target is dying.
3143 		 */
3144 		return_error_param = PTR_ERR(t->buffer);
3145 		return_error = return_error_param == -ESRCH ?
3146 			BR_DEAD_REPLY : BR_FAILED_REPLY;
3147 		return_error_line = __LINE__;
3148 		t->buffer = NULL;
3149 		goto err_binder_alloc_buf_failed;
3150 	}
3151 	if (secctx) {
3152 		int err;
3153 		size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3154 				    ALIGN(tr->offsets_size, sizeof(void *)) +
3155 				    ALIGN(extra_buffers_size, sizeof(void *)) -
3156 				    ALIGN(secctx_sz, sizeof(u64));
3157 
3158 		t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
3159 		err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3160 						  t->buffer, buf_offset,
3161 						  secctx, secctx_sz);
3162 		if (err) {
3163 			t->security_ctx = 0;
3164 			WARN_ON(1);
3165 		}
3166 		security_release_secctx(secctx, secctx_sz);
3167 		secctx = NULL;
3168 	}
3169 	t->buffer->debug_id = t->debug_id;
3170 	t->buffer->transaction = t;
3171 	t->buffer->target_node = target_node;
3172 	trace_binder_transaction_alloc_buf(t->buffer);
3173 
3174 	if (binder_alloc_copy_user_to_buffer(
3175 				&target_proc->alloc,
3176 				t->buffer, 0,
3177 				(const void __user *)
3178 					(uintptr_t)tr->data.ptr.buffer,
3179 				tr->data_size)) {
3180 		binder_user_error("%d:%d got transaction with invalid data ptr\n",
3181 				proc->pid, thread->pid);
3182 		return_error = BR_FAILED_REPLY;
3183 		return_error_param = -EFAULT;
3184 		return_error_line = __LINE__;
3185 		goto err_copy_data_failed;
3186 	}
3187 	if (binder_alloc_copy_user_to_buffer(
3188 				&target_proc->alloc,
3189 				t->buffer,
3190 				ALIGN(tr->data_size, sizeof(void *)),
3191 				(const void __user *)
3192 					(uintptr_t)tr->data.ptr.offsets,
3193 				tr->offsets_size)) {
3194 		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3195 				proc->pid, thread->pid);
3196 		return_error = BR_FAILED_REPLY;
3197 		return_error_param = -EFAULT;
3198 		return_error_line = __LINE__;
3199 		goto err_copy_data_failed;
3200 	}
3201 	if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3202 		binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3203 				proc->pid, thread->pid, (u64)tr->offsets_size);
3204 		return_error = BR_FAILED_REPLY;
3205 		return_error_param = -EINVAL;
3206 		return_error_line = __LINE__;
3207 		goto err_bad_offset;
3208 	}
3209 	if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3210 		binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3211 				  proc->pid, thread->pid,
3212 				  (u64)extra_buffers_size);
3213 		return_error = BR_FAILED_REPLY;
3214 		return_error_param = -EINVAL;
3215 		return_error_line = __LINE__;
3216 		goto err_bad_offset;
3217 	}
3218 	off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3219 	buffer_offset = off_start_offset;
3220 	off_end_offset = off_start_offset + tr->offsets_size;
3221 	sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3222 	sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3223 		ALIGN(secctx_sz, sizeof(u64));
3224 	off_min = 0;
3225 	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3226 	     buffer_offset += sizeof(binder_size_t)) {
3227 		struct binder_object_header *hdr;
3228 		size_t object_size;
3229 		struct binder_object object;
3230 		binder_size_t object_offset;
3231 
3232 		if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3233 						  &object_offset,
3234 						  t->buffer,
3235 						  buffer_offset,
3236 						  sizeof(object_offset))) {
3237 			return_error = BR_FAILED_REPLY;
3238 			return_error_param = -EINVAL;
3239 			return_error_line = __LINE__;
3240 			goto err_bad_offset;
3241 		}
3242 		object_size = binder_get_object(target_proc, t->buffer,
3243 						object_offset, &object);
3244 		if (object_size == 0 || object_offset < off_min) {
3245 			binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3246 					  proc->pid, thread->pid,
3247 					  (u64)object_offset,
3248 					  (u64)off_min,
3249 					  (u64)t->buffer->data_size);
3250 			return_error = BR_FAILED_REPLY;
3251 			return_error_param = -EINVAL;
3252 			return_error_line = __LINE__;
3253 			goto err_bad_offset;
3254 		}
3255 
3256 		hdr = &object.hdr;
3257 		off_min = object_offset + object_size;
3258 		switch (hdr->type) {
3259 		case BINDER_TYPE_BINDER:
3260 		case BINDER_TYPE_WEAK_BINDER: {
3261 			struct flat_binder_object *fp;
3262 
3263 			fp = to_flat_binder_object(hdr);
3264 			ret = binder_translate_binder(fp, t, thread);
3265 
3266 			if (ret < 0 ||
3267 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3268 							t->buffer,
3269 							object_offset,
3270 							fp, sizeof(*fp))) {
3271 				return_error = BR_FAILED_REPLY;
3272 				return_error_param = ret;
3273 				return_error_line = __LINE__;
3274 				goto err_translate_failed;
3275 			}
3276 		} break;
3277 		case BINDER_TYPE_HANDLE:
3278 		case BINDER_TYPE_WEAK_HANDLE: {
3279 			struct flat_binder_object *fp;
3280 
3281 			fp = to_flat_binder_object(hdr);
3282 			ret = binder_translate_handle(fp, t, thread);
3283 			if (ret < 0 ||
3284 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3285 							t->buffer,
3286 							object_offset,
3287 							fp, sizeof(*fp))) {
3288 				return_error = BR_FAILED_REPLY;
3289 				return_error_param = ret;
3290 				return_error_line = __LINE__;
3291 				goto err_translate_failed;
3292 			}
3293 		} break;
3294 
3295 		case BINDER_TYPE_FD: {
3296 			struct binder_fd_object *fp = to_binder_fd_object(hdr);
3297 			binder_size_t fd_offset = object_offset +
3298 				(uintptr_t)&fp->fd - (uintptr_t)fp;
3299 			int ret = binder_translate_fd(fp->fd, fd_offset, t,
3300 						      thread, in_reply_to);
3301 
3302 			fp->pad_binder = 0;
3303 			if (ret < 0 ||
3304 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3305 							t->buffer,
3306 							object_offset,
3307 							fp, sizeof(*fp))) {
3308 				return_error = BR_FAILED_REPLY;
3309 				return_error_param = ret;
3310 				return_error_line = __LINE__;
3311 				goto err_translate_failed;
3312 			}
3313 		} break;
3314 		case BINDER_TYPE_FDA: {
3315 			struct binder_object ptr_object;
3316 			binder_size_t parent_offset;
3317 			struct binder_fd_array_object *fda =
3318 				to_binder_fd_array_object(hdr);
3319 			size_t num_valid = (buffer_offset - off_start_offset) /
3320 						sizeof(binder_size_t);
3321 			struct binder_buffer_object *parent =
3322 				binder_validate_ptr(target_proc, t->buffer,
3323 						    &ptr_object, fda->parent,
3324 						    off_start_offset,
3325 						    &parent_offset,
3326 						    num_valid);
3327 			if (!parent) {
3328 				binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3329 						  proc->pid, thread->pid);
3330 				return_error = BR_FAILED_REPLY;
3331 				return_error_param = -EINVAL;
3332 				return_error_line = __LINE__;
3333 				goto err_bad_parent;
3334 			}
3335 			if (!binder_validate_fixup(target_proc, t->buffer,
3336 						   off_start_offset,
3337 						   parent_offset,
3338 						   fda->parent_offset,
3339 						   last_fixup_obj_off,
3340 						   last_fixup_min_off)) {
3341 				binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3342 						  proc->pid, thread->pid);
3343 				return_error = BR_FAILED_REPLY;
3344 				return_error_param = -EINVAL;
3345 				return_error_line = __LINE__;
3346 				goto err_bad_parent;
3347 			}
3348 			ret = binder_translate_fd_array(fda, parent, t, thread,
3349 							in_reply_to);
3350 			if (ret < 0) {
3351 				return_error = BR_FAILED_REPLY;
3352 				return_error_param = ret;
3353 				return_error_line = __LINE__;
3354 				goto err_translate_failed;
3355 			}
3356 			last_fixup_obj_off = parent_offset;
3357 			last_fixup_min_off =
3358 				fda->parent_offset + sizeof(u32) * fda->num_fds;
3359 		} break;
3360 		case BINDER_TYPE_PTR: {
3361 			struct binder_buffer_object *bp =
3362 				to_binder_buffer_object(hdr);
3363 			size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3364 			size_t num_valid;
3365 
3366 			if (bp->length > buf_left) {
3367 				binder_user_error("%d:%d got transaction with too large buffer\n",
3368 						  proc->pid, thread->pid);
3369 				return_error = BR_FAILED_REPLY;
3370 				return_error_param = -EINVAL;
3371 				return_error_line = __LINE__;
3372 				goto err_bad_offset;
3373 			}
3374 			if (binder_alloc_copy_user_to_buffer(
3375 						&target_proc->alloc,
3376 						t->buffer,
3377 						sg_buf_offset,
3378 						(const void __user *)
3379 							(uintptr_t)bp->buffer,
3380 						bp->length)) {
3381 				binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3382 						  proc->pid, thread->pid);
3383 				return_error_param = -EFAULT;
3384 				return_error = BR_FAILED_REPLY;
3385 				return_error_line = __LINE__;
3386 				goto err_copy_data_failed;
3387 			}
3388 			/* Fixup buffer pointer to target proc address space */
3389 			bp->buffer = (uintptr_t)
3390 				t->buffer->user_data + sg_buf_offset;
3391 			sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3392 
3393 			num_valid = (buffer_offset - off_start_offset) /
3394 					sizeof(binder_size_t);
3395 			ret = binder_fixup_parent(t, thread, bp,
3396 						  off_start_offset,
3397 						  num_valid,
3398 						  last_fixup_obj_off,
3399 						  last_fixup_min_off);
3400 			if (ret < 0 ||
3401 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3402 							t->buffer,
3403 							object_offset,
3404 							bp, sizeof(*bp))) {
3405 				return_error = BR_FAILED_REPLY;
3406 				return_error_param = ret;
3407 				return_error_line = __LINE__;
3408 				goto err_translate_failed;
3409 			}
3410 			last_fixup_obj_off = object_offset;
3411 			last_fixup_min_off = 0;
3412 		} break;
3413 		default:
3414 			binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3415 				proc->pid, thread->pid, hdr->type);
3416 			return_error = BR_FAILED_REPLY;
3417 			return_error_param = -EINVAL;
3418 			return_error_line = __LINE__;
3419 			goto err_bad_object_type;
3420 		}
3421 	}
3422 	tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3423 	t->work.type = BINDER_WORK_TRANSACTION;
3424 
3425 	if (reply) {
3426 		binder_enqueue_thread_work(thread, tcomplete);
3427 		binder_inner_proc_lock(target_proc);
3428 		if (target_thread->is_dead) {
3429 			binder_inner_proc_unlock(target_proc);
3430 			goto err_dead_proc_or_thread;
3431 		}
3432 		BUG_ON(t->buffer->async_transaction != 0);
3433 		binder_pop_transaction_ilocked(target_thread, in_reply_to);
3434 		binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3435 		binder_inner_proc_unlock(target_proc);
3436 		wake_up_interruptible_sync(&target_thread->wait);
3437 		binder_free_transaction(in_reply_to);
3438 	} else if (!(t->flags & TF_ONE_WAY)) {
3439 		BUG_ON(t->buffer->async_transaction != 0);
3440 		binder_inner_proc_lock(proc);
3441 		/*
3442 		 * Defer the TRANSACTION_COMPLETE, so we don't return to
3443 		 * userspace immediately; this allows the target process to
3444 		 * immediately start processing this transaction, reducing
3445 		 * latency. We will then return the TRANSACTION_COMPLETE when
3446 		 * the target replies (or there is an error).
3447 		 */
3448 		binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3449 		t->need_reply = 1;
3450 		t->from_parent = thread->transaction_stack;
3451 		thread->transaction_stack = t;
3452 		binder_inner_proc_unlock(proc);
3453 		if (!binder_proc_transaction(t, target_proc, target_thread)) {
3454 			binder_inner_proc_lock(proc);
3455 			binder_pop_transaction_ilocked(thread, t);
3456 			binder_inner_proc_unlock(proc);
3457 			goto err_dead_proc_or_thread;
3458 		}
3459 	} else {
3460 		BUG_ON(target_node == NULL);
3461 		BUG_ON(t->buffer->async_transaction != 1);
3462 		binder_enqueue_thread_work(thread, tcomplete);
3463 		if (!binder_proc_transaction(t, target_proc, NULL))
3464 			goto err_dead_proc_or_thread;
3465 	}
3466 	if (target_thread)
3467 		binder_thread_dec_tmpref(target_thread);
3468 	binder_proc_dec_tmpref(target_proc);
3469 	if (target_node)
3470 		binder_dec_node_tmpref(target_node);
3471 	/*
3472 	 * write barrier to synchronize with initialization
3473 	 * of log entry
3474 	 */
3475 	smp_wmb();
3476 	WRITE_ONCE(e->debug_id_done, t_debug_id);
3477 	return;
3478 
3479 err_dead_proc_or_thread:
3480 	return_error = BR_DEAD_REPLY;
3481 	return_error_line = __LINE__;
3482 	binder_dequeue_work(proc, tcomplete);
3483 err_translate_failed:
3484 err_bad_object_type:
3485 err_bad_offset:
3486 err_bad_parent:
3487 err_copy_data_failed:
3488 	binder_free_txn_fixups(t);
3489 	trace_binder_transaction_failed_buffer_release(t->buffer);
3490 	binder_transaction_buffer_release(target_proc, t->buffer,
3491 					  buffer_offset, true);
3492 	if (target_node)
3493 		binder_dec_node_tmpref(target_node);
3494 	target_node = NULL;
3495 	t->buffer->transaction = NULL;
3496 	binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3497 err_binder_alloc_buf_failed:
3498 err_bad_extra_size:
3499 	if (secctx)
3500 		security_release_secctx(secctx, secctx_sz);
3501 err_get_secctx_failed:
3502 	kfree(tcomplete);
3503 	binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3504 err_alloc_tcomplete_failed:
3505 	kfree(t);
3506 	binder_stats_deleted(BINDER_STAT_TRANSACTION);
3507 err_alloc_t_failed:
3508 err_bad_todo_list:
3509 err_bad_call_stack:
3510 err_empty_call_stack:
3511 err_dead_binder:
3512 err_invalid_target_handle:
3513 	if (target_thread)
3514 		binder_thread_dec_tmpref(target_thread);
3515 	if (target_proc)
3516 		binder_proc_dec_tmpref(target_proc);
3517 	if (target_node) {
3518 		binder_dec_node(target_node, 1, 0);
3519 		binder_dec_node_tmpref(target_node);
3520 	}
3521 
3522 	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3523 		     "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3524 		     proc->pid, thread->pid, return_error, return_error_param,
3525 		     (u64)tr->data_size, (u64)tr->offsets_size,
3526 		     return_error_line);
3527 
3528 	{
3529 		struct binder_transaction_log_entry *fe;
3530 
3531 		e->return_error = return_error;
3532 		e->return_error_param = return_error_param;
3533 		e->return_error_line = return_error_line;
3534 		fe = binder_transaction_log_add(&binder_transaction_log_failed);
3535 		*fe = *e;
3536 		/*
3537 		 * write barrier to synchronize with initialization
3538 		 * of log entry
3539 		 */
3540 		smp_wmb();
3541 		WRITE_ONCE(e->debug_id_done, t_debug_id);
3542 		WRITE_ONCE(fe->debug_id_done, t_debug_id);
3543 	}
3544 
3545 	BUG_ON(thread->return_error.cmd != BR_OK);
3546 	if (in_reply_to) {
3547 		thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3548 		binder_enqueue_thread_work(thread, &thread->return_error.work);
3549 		binder_send_failed_reply(in_reply_to, return_error);
3550 	} else {
3551 		thread->return_error.cmd = return_error;
3552 		binder_enqueue_thread_work(thread, &thread->return_error.work);
3553 	}
3554 }
3555 
3556 /**
3557  * binder_free_buf() - free the specified buffer
3558  * @proc:	binder proc that owns buffer
3559  * @buffer:	buffer to be freed
3560  *
3561  * If buffer for an async transaction, enqueue the next async
3562  * transaction from the node.
3563  *
3564  * Cleanup buffer and free it.
3565  */
3566 static void
3567 binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
3568 {
3569 	binder_inner_proc_lock(proc);
3570 	if (buffer->transaction) {
3571 		buffer->transaction->buffer = NULL;
3572 		buffer->transaction = NULL;
3573 	}
3574 	binder_inner_proc_unlock(proc);
3575 	if (buffer->async_transaction && buffer->target_node) {
3576 		struct binder_node *buf_node;
3577 		struct binder_work *w;
3578 
3579 		buf_node = buffer->target_node;
3580 		binder_node_inner_lock(buf_node);
3581 		BUG_ON(!buf_node->has_async_transaction);
3582 		BUG_ON(buf_node->proc != proc);
3583 		w = binder_dequeue_work_head_ilocked(
3584 				&buf_node->async_todo);
3585 		if (!w) {
3586 			buf_node->has_async_transaction = false;
3587 		} else {
3588 			binder_enqueue_work_ilocked(
3589 					w, &proc->todo);
3590 			binder_wakeup_proc_ilocked(proc);
3591 		}
3592 		binder_node_inner_unlock(buf_node);
3593 	}
3594 	trace_binder_transaction_buffer_release(buffer);
3595 	binder_transaction_buffer_release(proc, buffer, 0, false);
3596 	binder_alloc_free_buf(&proc->alloc, buffer);
3597 }
3598 
3599 static int binder_thread_write(struct binder_proc *proc,
3600 			struct binder_thread *thread,
3601 			binder_uintptr_t binder_buffer, size_t size,
3602 			binder_size_t *consumed)
3603 {
3604 	uint32_t cmd;
3605 	struct binder_context *context = proc->context;
3606 	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3607 	void __user *ptr = buffer + *consumed;
3608 	void __user *end = buffer + size;
3609 
3610 	while (ptr < end && thread->return_error.cmd == BR_OK) {
3611 		int ret;
3612 
3613 		if (get_user(cmd, (uint32_t __user *)ptr))
3614 			return -EFAULT;
3615 		ptr += sizeof(uint32_t);
3616 		trace_binder_command(cmd);
3617 		if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3618 			atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3619 			atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3620 			atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3621 		}
3622 		switch (cmd) {
3623 		case BC_INCREFS:
3624 		case BC_ACQUIRE:
3625 		case BC_RELEASE:
3626 		case BC_DECREFS: {
3627 			uint32_t target;
3628 			const char *debug_string;
3629 			bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3630 			bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3631 			struct binder_ref_data rdata;
3632 
3633 			if (get_user(target, (uint32_t __user *)ptr))
3634 				return -EFAULT;
3635 
3636 			ptr += sizeof(uint32_t);
3637 			ret = -1;
3638 			if (increment && !target) {
3639 				struct binder_node *ctx_mgr_node;
3640 				mutex_lock(&context->context_mgr_node_lock);
3641 				ctx_mgr_node = context->binder_context_mgr_node;
3642 				if (ctx_mgr_node) {
3643 					if (ctx_mgr_node->proc == proc) {
3644 						binder_user_error("%d:%d context manager tried to acquire desc 0\n",
3645 								  proc->pid, thread->pid);
3646 						mutex_unlock(&context->context_mgr_node_lock);
3647 						return -EINVAL;
3648 					}
3649 					ret = binder_inc_ref_for_node(
3650 							proc, ctx_mgr_node,
3651 							strong, NULL, &rdata);
3652 				}
3653 				mutex_unlock(&context->context_mgr_node_lock);
3654 			}
3655 			if (ret)
3656 				ret = binder_update_ref_for_handle(
3657 						proc, target, increment, strong,
3658 						&rdata);
3659 			if (!ret && rdata.desc != target) {
3660 				binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3661 					proc->pid, thread->pid,
3662 					target, rdata.desc);
3663 			}
3664 			switch (cmd) {
3665 			case BC_INCREFS:
3666 				debug_string = "IncRefs";
3667 				break;
3668 			case BC_ACQUIRE:
3669 				debug_string = "Acquire";
3670 				break;
3671 			case BC_RELEASE:
3672 				debug_string = "Release";
3673 				break;
3674 			case BC_DECREFS:
3675 			default:
3676 				debug_string = "DecRefs";
3677 				break;
3678 			}
3679 			if (ret) {
3680 				binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3681 					proc->pid, thread->pid, debug_string,
3682 					strong, target, ret);
3683 				break;
3684 			}
3685 			binder_debug(BINDER_DEBUG_USER_REFS,
3686 				     "%d:%d %s ref %d desc %d s %d w %d\n",
3687 				     proc->pid, thread->pid, debug_string,
3688 				     rdata.debug_id, rdata.desc, rdata.strong,
3689 				     rdata.weak);
3690 			break;
3691 		}
3692 		case BC_INCREFS_DONE:
3693 		case BC_ACQUIRE_DONE: {
3694 			binder_uintptr_t node_ptr;
3695 			binder_uintptr_t cookie;
3696 			struct binder_node *node;
3697 			bool free_node;
3698 
3699 			if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3700 				return -EFAULT;
3701 			ptr += sizeof(binder_uintptr_t);
3702 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3703 				return -EFAULT;
3704 			ptr += sizeof(binder_uintptr_t);
3705 			node = binder_get_node(proc, node_ptr);
3706 			if (node == NULL) {
3707 				binder_user_error("%d:%d %s u%016llx no match\n",
3708 					proc->pid, thread->pid,
3709 					cmd == BC_INCREFS_DONE ?
3710 					"BC_INCREFS_DONE" :
3711 					"BC_ACQUIRE_DONE",
3712 					(u64)node_ptr);
3713 				break;
3714 			}
3715 			if (cookie != node->cookie) {
3716 				binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3717 					proc->pid, thread->pid,
3718 					cmd == BC_INCREFS_DONE ?
3719 					"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3720 					(u64)node_ptr, node->debug_id,
3721 					(u64)cookie, (u64)node->cookie);
3722 				binder_put_node(node);
3723 				break;
3724 			}
3725 			binder_node_inner_lock(node);
3726 			if (cmd == BC_ACQUIRE_DONE) {
3727 				if (node->pending_strong_ref == 0) {
3728 					binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3729 						proc->pid, thread->pid,
3730 						node->debug_id);
3731 					binder_node_inner_unlock(node);
3732 					binder_put_node(node);
3733 					break;
3734 				}
3735 				node->pending_strong_ref = 0;
3736 			} else {
3737 				if (node->pending_weak_ref == 0) {
3738 					binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3739 						proc->pid, thread->pid,
3740 						node->debug_id);
3741 					binder_node_inner_unlock(node);
3742 					binder_put_node(node);
3743 					break;
3744 				}
3745 				node->pending_weak_ref = 0;
3746 			}
3747 			free_node = binder_dec_node_nilocked(node,
3748 					cmd == BC_ACQUIRE_DONE, 0);
3749 			WARN_ON(free_node);
3750 			binder_debug(BINDER_DEBUG_USER_REFS,
3751 				     "%d:%d %s node %d ls %d lw %d tr %d\n",
3752 				     proc->pid, thread->pid,
3753 				     cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3754 				     node->debug_id, node->local_strong_refs,
3755 				     node->local_weak_refs, node->tmp_refs);
3756 			binder_node_inner_unlock(node);
3757 			binder_put_node(node);
3758 			break;
3759 		}
3760 		case BC_ATTEMPT_ACQUIRE:
3761 			pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3762 			return -EINVAL;
3763 		case BC_ACQUIRE_RESULT:
3764 			pr_err("BC_ACQUIRE_RESULT not supported\n");
3765 			return -EINVAL;
3766 
3767 		case BC_FREE_BUFFER: {
3768 			binder_uintptr_t data_ptr;
3769 			struct binder_buffer *buffer;
3770 
3771 			if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3772 				return -EFAULT;
3773 			ptr += sizeof(binder_uintptr_t);
3774 
3775 			buffer = binder_alloc_prepare_to_free(&proc->alloc,
3776 							      data_ptr);
3777 			if (IS_ERR_OR_NULL(buffer)) {
3778 				if (PTR_ERR(buffer) == -EPERM) {
3779 					binder_user_error(
3780 						"%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3781 						proc->pid, thread->pid,
3782 						(u64)data_ptr);
3783 				} else {
3784 					binder_user_error(
3785 						"%d:%d BC_FREE_BUFFER u%016llx no match\n",
3786 						proc->pid, thread->pid,
3787 						(u64)data_ptr);
3788 				}
3789 				break;
3790 			}
3791 			binder_debug(BINDER_DEBUG_FREE_BUFFER,
3792 				     "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3793 				     proc->pid, thread->pid, (u64)data_ptr,
3794 				     buffer->debug_id,
3795 				     buffer->transaction ? "active" : "finished");
3796 			binder_free_buf(proc, buffer);
3797 			break;
3798 		}
3799 
3800 		case BC_TRANSACTION_SG:
3801 		case BC_REPLY_SG: {
3802 			struct binder_transaction_data_sg tr;
3803 
3804 			if (copy_from_user(&tr, ptr, sizeof(tr)))
3805 				return -EFAULT;
3806 			ptr += sizeof(tr);
3807 			binder_transaction(proc, thread, &tr.transaction_data,
3808 					   cmd == BC_REPLY_SG, tr.buffers_size);
3809 			break;
3810 		}
3811 		case BC_TRANSACTION:
3812 		case BC_REPLY: {
3813 			struct binder_transaction_data tr;
3814 
3815 			if (copy_from_user(&tr, ptr, sizeof(tr)))
3816 				return -EFAULT;
3817 			ptr += sizeof(tr);
3818 			binder_transaction(proc, thread, &tr,
3819 					   cmd == BC_REPLY, 0);
3820 			break;
3821 		}
3822 
3823 		case BC_REGISTER_LOOPER:
3824 			binder_debug(BINDER_DEBUG_THREADS,
3825 				     "%d:%d BC_REGISTER_LOOPER\n",
3826 				     proc->pid, thread->pid);
3827 			binder_inner_proc_lock(proc);
3828 			if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3829 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
3830 				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3831 					proc->pid, thread->pid);
3832 			} else if (proc->requested_threads == 0) {
3833 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
3834 				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3835 					proc->pid, thread->pid);
3836 			} else {
3837 				proc->requested_threads--;
3838 				proc->requested_threads_started++;
3839 			}
3840 			thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3841 			binder_inner_proc_unlock(proc);
3842 			break;
3843 		case BC_ENTER_LOOPER:
3844 			binder_debug(BINDER_DEBUG_THREADS,
3845 				     "%d:%d BC_ENTER_LOOPER\n",
3846 				     proc->pid, thread->pid);
3847 			if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3848 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
3849 				binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3850 					proc->pid, thread->pid);
3851 			}
3852 			thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3853 			break;
3854 		case BC_EXIT_LOOPER:
3855 			binder_debug(BINDER_DEBUG_THREADS,
3856 				     "%d:%d BC_EXIT_LOOPER\n",
3857 				     proc->pid, thread->pid);
3858 			thread->looper |= BINDER_LOOPER_STATE_EXITED;
3859 			break;
3860 
3861 		case BC_REQUEST_DEATH_NOTIFICATION:
3862 		case BC_CLEAR_DEATH_NOTIFICATION: {
3863 			uint32_t target;
3864 			binder_uintptr_t cookie;
3865 			struct binder_ref *ref;
3866 			struct binder_ref_death *death = NULL;
3867 
3868 			if (get_user(target, (uint32_t __user *)ptr))
3869 				return -EFAULT;
3870 			ptr += sizeof(uint32_t);
3871 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3872 				return -EFAULT;
3873 			ptr += sizeof(binder_uintptr_t);
3874 			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3875 				/*
3876 				 * Allocate memory for death notification
3877 				 * before taking lock
3878 				 */
3879 				death = kzalloc(sizeof(*death), GFP_KERNEL);
3880 				if (death == NULL) {
3881 					WARN_ON(thread->return_error.cmd !=
3882 						BR_OK);
3883 					thread->return_error.cmd = BR_ERROR;
3884 					binder_enqueue_thread_work(
3885 						thread,
3886 						&thread->return_error.work);
3887 					binder_debug(
3888 						BINDER_DEBUG_FAILED_TRANSACTION,
3889 						"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3890 						proc->pid, thread->pid);
3891 					break;
3892 				}
3893 			}
3894 			binder_proc_lock(proc);
3895 			ref = binder_get_ref_olocked(proc, target, false);
3896 			if (ref == NULL) {
3897 				binder_user_error("%d:%d %s invalid ref %d\n",
3898 					proc->pid, thread->pid,
3899 					cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3900 					"BC_REQUEST_DEATH_NOTIFICATION" :
3901 					"BC_CLEAR_DEATH_NOTIFICATION",
3902 					target);
3903 				binder_proc_unlock(proc);
3904 				kfree(death);
3905 				break;
3906 			}
3907 
3908 			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3909 				     "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3910 				     proc->pid, thread->pid,
3911 				     cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3912 				     "BC_REQUEST_DEATH_NOTIFICATION" :
3913 				     "BC_CLEAR_DEATH_NOTIFICATION",
3914 				     (u64)cookie, ref->data.debug_id,
3915 				     ref->data.desc, ref->data.strong,
3916 				     ref->data.weak, ref->node->debug_id);
3917 
3918 			binder_node_lock(ref->node);
3919 			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3920 				if (ref->death) {
3921 					binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3922 						proc->pid, thread->pid);
3923 					binder_node_unlock(ref->node);
3924 					binder_proc_unlock(proc);
3925 					kfree(death);
3926 					break;
3927 				}
3928 				binder_stats_created(BINDER_STAT_DEATH);
3929 				INIT_LIST_HEAD(&death->work.entry);
3930 				death->cookie = cookie;
3931 				ref->death = death;
3932 				if (ref->node->proc == NULL) {
3933 					ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3934 
3935 					binder_inner_proc_lock(proc);
3936 					binder_enqueue_work_ilocked(
3937 						&ref->death->work, &proc->todo);
3938 					binder_wakeup_proc_ilocked(proc);
3939 					binder_inner_proc_unlock(proc);
3940 				}
3941 			} else {
3942 				if (ref->death == NULL) {
3943 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3944 						proc->pid, thread->pid);
3945 					binder_node_unlock(ref->node);
3946 					binder_proc_unlock(proc);
3947 					break;
3948 				}
3949 				death = ref->death;
3950 				if (death->cookie != cookie) {
3951 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3952 						proc->pid, thread->pid,
3953 						(u64)death->cookie,
3954 						(u64)cookie);
3955 					binder_node_unlock(ref->node);
3956 					binder_proc_unlock(proc);
3957 					break;
3958 				}
3959 				ref->death = NULL;
3960 				binder_inner_proc_lock(proc);
3961 				if (list_empty(&death->work.entry)) {
3962 					death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3963 					if (thread->looper &
3964 					    (BINDER_LOOPER_STATE_REGISTERED |
3965 					     BINDER_LOOPER_STATE_ENTERED))
3966 						binder_enqueue_thread_work_ilocked(
3967 								thread,
3968 								&death->work);
3969 					else {
3970 						binder_enqueue_work_ilocked(
3971 								&death->work,
3972 								&proc->todo);
3973 						binder_wakeup_proc_ilocked(
3974 								proc);
3975 					}
3976 				} else {
3977 					BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3978 					death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3979 				}
3980 				binder_inner_proc_unlock(proc);
3981 			}
3982 			binder_node_unlock(ref->node);
3983 			binder_proc_unlock(proc);
3984 		} break;
3985 		case BC_DEAD_BINDER_DONE: {
3986 			struct binder_work *w;
3987 			binder_uintptr_t cookie;
3988 			struct binder_ref_death *death = NULL;
3989 
3990 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3991 				return -EFAULT;
3992 
3993 			ptr += sizeof(cookie);
3994 			binder_inner_proc_lock(proc);
3995 			list_for_each_entry(w, &proc->delivered_death,
3996 					    entry) {
3997 				struct binder_ref_death *tmp_death =
3998 					container_of(w,
3999 						     struct binder_ref_death,
4000 						     work);
4001 
4002 				if (tmp_death->cookie == cookie) {
4003 					death = tmp_death;
4004 					break;
4005 				}
4006 			}
4007 			binder_debug(BINDER_DEBUG_DEAD_BINDER,
4008 				     "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4009 				     proc->pid, thread->pid, (u64)cookie,
4010 				     death);
4011 			if (death == NULL) {
4012 				binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4013 					proc->pid, thread->pid, (u64)cookie);
4014 				binder_inner_proc_unlock(proc);
4015 				break;
4016 			}
4017 			binder_dequeue_work_ilocked(&death->work);
4018 			if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4019 				death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4020 				if (thread->looper &
4021 					(BINDER_LOOPER_STATE_REGISTERED |
4022 					 BINDER_LOOPER_STATE_ENTERED))
4023 					binder_enqueue_thread_work_ilocked(
4024 						thread, &death->work);
4025 				else {
4026 					binder_enqueue_work_ilocked(
4027 							&death->work,
4028 							&proc->todo);
4029 					binder_wakeup_proc_ilocked(proc);
4030 				}
4031 			}
4032 			binder_inner_proc_unlock(proc);
4033 		} break;
4034 
4035 		default:
4036 			pr_err("%d:%d unknown command %d\n",
4037 			       proc->pid, thread->pid, cmd);
4038 			return -EINVAL;
4039 		}
4040 		*consumed = ptr - buffer;
4041 	}
4042 	return 0;
4043 }
4044 
4045 static void binder_stat_br(struct binder_proc *proc,
4046 			   struct binder_thread *thread, uint32_t cmd)
4047 {
4048 	trace_binder_return(cmd);
4049 	if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4050 		atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4051 		atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4052 		atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4053 	}
4054 }
4055 
4056 static int binder_put_node_cmd(struct binder_proc *proc,
4057 			       struct binder_thread *thread,
4058 			       void __user **ptrp,
4059 			       binder_uintptr_t node_ptr,
4060 			       binder_uintptr_t node_cookie,
4061 			       int node_debug_id,
4062 			       uint32_t cmd, const char *cmd_name)
4063 {
4064 	void __user *ptr = *ptrp;
4065 
4066 	if (put_user(cmd, (uint32_t __user *)ptr))
4067 		return -EFAULT;
4068 	ptr += sizeof(uint32_t);
4069 
4070 	if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4071 		return -EFAULT;
4072 	ptr += sizeof(binder_uintptr_t);
4073 
4074 	if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4075 		return -EFAULT;
4076 	ptr += sizeof(binder_uintptr_t);
4077 
4078 	binder_stat_br(proc, thread, cmd);
4079 	binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4080 		     proc->pid, thread->pid, cmd_name, node_debug_id,
4081 		     (u64)node_ptr, (u64)node_cookie);
4082 
4083 	*ptrp = ptr;
4084 	return 0;
4085 }
4086 
4087 static int binder_wait_for_work(struct binder_thread *thread,
4088 				bool do_proc_work)
4089 {
4090 	DEFINE_WAIT(wait);
4091 	struct binder_proc *proc = thread->proc;
4092 	int ret = 0;
4093 
4094 	freezer_do_not_count();
4095 	binder_inner_proc_lock(proc);
4096 	for (;;) {
4097 		prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
4098 		if (binder_has_work_ilocked(thread, do_proc_work))
4099 			break;
4100 		if (do_proc_work)
4101 			list_add(&thread->waiting_thread_node,
4102 				 &proc->waiting_threads);
4103 		binder_inner_proc_unlock(proc);
4104 		schedule();
4105 		binder_inner_proc_lock(proc);
4106 		list_del_init(&thread->waiting_thread_node);
4107 		if (signal_pending(current)) {
4108 			ret = -ERESTARTSYS;
4109 			break;
4110 		}
4111 	}
4112 	finish_wait(&thread->wait, &wait);
4113 	binder_inner_proc_unlock(proc);
4114 	freezer_count();
4115 
4116 	return ret;
4117 }
4118 
4119 /**
4120  * binder_apply_fd_fixups() - finish fd translation
4121  * @proc:         binder_proc associated @t->buffer
4122  * @t:	binder transaction with list of fd fixups
4123  *
4124  * Now that we are in the context of the transaction target
4125  * process, we can allocate and install fds. Process the
4126  * list of fds to translate and fixup the buffer with the
4127  * new fds.
4128  *
4129  * If we fail to allocate an fd, then free the resources by
4130  * fput'ing files that have not been processed and ksys_close'ing
4131  * any fds that have already been allocated.
4132  */
4133 static int binder_apply_fd_fixups(struct binder_proc *proc,
4134 				  struct binder_transaction *t)
4135 {
4136 	struct binder_txn_fd_fixup *fixup, *tmp;
4137 	int ret = 0;
4138 
4139 	list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4140 		int fd = get_unused_fd_flags(O_CLOEXEC);
4141 
4142 		if (fd < 0) {
4143 			binder_debug(BINDER_DEBUG_TRANSACTION,
4144 				     "failed fd fixup txn %d fd %d\n",
4145 				     t->debug_id, fd);
4146 			ret = -ENOMEM;
4147 			break;
4148 		}
4149 		binder_debug(BINDER_DEBUG_TRANSACTION,
4150 			     "fd fixup txn %d fd %d\n",
4151 			     t->debug_id, fd);
4152 		trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4153 		fd_install(fd, fixup->file);
4154 		fixup->file = NULL;
4155 		if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4156 						fixup->offset, &fd,
4157 						sizeof(u32))) {
4158 			ret = -EINVAL;
4159 			break;
4160 		}
4161 	}
4162 	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4163 		if (fixup->file) {
4164 			fput(fixup->file);
4165 		} else if (ret) {
4166 			u32 fd;
4167 			int err;
4168 
4169 			err = binder_alloc_copy_from_buffer(&proc->alloc, &fd,
4170 							    t->buffer,
4171 							    fixup->offset,
4172 							    sizeof(fd));
4173 			WARN_ON(err);
4174 			if (!err)
4175 				binder_deferred_fd_close(fd);
4176 		}
4177 		list_del(&fixup->fixup_entry);
4178 		kfree(fixup);
4179 	}
4180 
4181 	return ret;
4182 }
4183 
4184 static int binder_thread_read(struct binder_proc *proc,
4185 			      struct binder_thread *thread,
4186 			      binder_uintptr_t binder_buffer, size_t size,
4187 			      binder_size_t *consumed, int non_block)
4188 {
4189 	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4190 	void __user *ptr = buffer + *consumed;
4191 	void __user *end = buffer + size;
4192 
4193 	int ret = 0;
4194 	int wait_for_proc_work;
4195 
4196 	if (*consumed == 0) {
4197 		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4198 			return -EFAULT;
4199 		ptr += sizeof(uint32_t);
4200 	}
4201 
4202 retry:
4203 	binder_inner_proc_lock(proc);
4204 	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4205 	binder_inner_proc_unlock(proc);
4206 
4207 	thread->looper |= BINDER_LOOPER_STATE_WAITING;
4208 
4209 	trace_binder_wait_for_work(wait_for_proc_work,
4210 				   !!thread->transaction_stack,
4211 				   !binder_worklist_empty(proc, &thread->todo));
4212 	if (wait_for_proc_work) {
4213 		if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4214 					BINDER_LOOPER_STATE_ENTERED))) {
4215 			binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4216 				proc->pid, thread->pid, thread->looper);
4217 			wait_event_interruptible(binder_user_error_wait,
4218 						 binder_stop_on_user_error < 2);
4219 		}
4220 		binder_set_nice(proc->default_priority);
4221 	}
4222 
4223 	if (non_block) {
4224 		if (!binder_has_work(thread, wait_for_proc_work))
4225 			ret = -EAGAIN;
4226 	} else {
4227 		ret = binder_wait_for_work(thread, wait_for_proc_work);
4228 	}
4229 
4230 	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4231 
4232 	if (ret)
4233 		return ret;
4234 
4235 	while (1) {
4236 		uint32_t cmd;
4237 		struct binder_transaction_data_secctx tr;
4238 		struct binder_transaction_data *trd = &tr.transaction_data;
4239 		struct binder_work *w = NULL;
4240 		struct list_head *list = NULL;
4241 		struct binder_transaction *t = NULL;
4242 		struct binder_thread *t_from;
4243 		size_t trsize = sizeof(*trd);
4244 
4245 		binder_inner_proc_lock(proc);
4246 		if (!binder_worklist_empty_ilocked(&thread->todo))
4247 			list = &thread->todo;
4248 		else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4249 			   wait_for_proc_work)
4250 			list = &proc->todo;
4251 		else {
4252 			binder_inner_proc_unlock(proc);
4253 
4254 			/* no data added */
4255 			if (ptr - buffer == 4 && !thread->looper_need_return)
4256 				goto retry;
4257 			break;
4258 		}
4259 
4260 		if (end - ptr < sizeof(tr) + 4) {
4261 			binder_inner_proc_unlock(proc);
4262 			break;
4263 		}
4264 		w = binder_dequeue_work_head_ilocked(list);
4265 		if (binder_worklist_empty_ilocked(&thread->todo))
4266 			thread->process_todo = false;
4267 
4268 		switch (w->type) {
4269 		case BINDER_WORK_TRANSACTION: {
4270 			binder_inner_proc_unlock(proc);
4271 			t = container_of(w, struct binder_transaction, work);
4272 		} break;
4273 		case BINDER_WORK_RETURN_ERROR: {
4274 			struct binder_error *e = container_of(
4275 					w, struct binder_error, work);
4276 
4277 			WARN_ON(e->cmd == BR_OK);
4278 			binder_inner_proc_unlock(proc);
4279 			if (put_user(e->cmd, (uint32_t __user *)ptr))
4280 				return -EFAULT;
4281 			cmd = e->cmd;
4282 			e->cmd = BR_OK;
4283 			ptr += sizeof(uint32_t);
4284 
4285 			binder_stat_br(proc, thread, cmd);
4286 		} break;
4287 		case BINDER_WORK_TRANSACTION_COMPLETE: {
4288 			binder_inner_proc_unlock(proc);
4289 			cmd = BR_TRANSACTION_COMPLETE;
4290 			kfree(w);
4291 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4292 			if (put_user(cmd, (uint32_t __user *)ptr))
4293 				return -EFAULT;
4294 			ptr += sizeof(uint32_t);
4295 
4296 			binder_stat_br(proc, thread, cmd);
4297 			binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4298 				     "%d:%d BR_TRANSACTION_COMPLETE\n",
4299 				     proc->pid, thread->pid);
4300 		} break;
4301 		case BINDER_WORK_NODE: {
4302 			struct binder_node *node = container_of(w, struct binder_node, work);
4303 			int strong, weak;
4304 			binder_uintptr_t node_ptr = node->ptr;
4305 			binder_uintptr_t node_cookie = node->cookie;
4306 			int node_debug_id = node->debug_id;
4307 			int has_weak_ref;
4308 			int has_strong_ref;
4309 			void __user *orig_ptr = ptr;
4310 
4311 			BUG_ON(proc != node->proc);
4312 			strong = node->internal_strong_refs ||
4313 					node->local_strong_refs;
4314 			weak = !hlist_empty(&node->refs) ||
4315 					node->local_weak_refs ||
4316 					node->tmp_refs || strong;
4317 			has_strong_ref = node->has_strong_ref;
4318 			has_weak_ref = node->has_weak_ref;
4319 
4320 			if (weak && !has_weak_ref) {
4321 				node->has_weak_ref = 1;
4322 				node->pending_weak_ref = 1;
4323 				node->local_weak_refs++;
4324 			}
4325 			if (strong && !has_strong_ref) {
4326 				node->has_strong_ref = 1;
4327 				node->pending_strong_ref = 1;
4328 				node->local_strong_refs++;
4329 			}
4330 			if (!strong && has_strong_ref)
4331 				node->has_strong_ref = 0;
4332 			if (!weak && has_weak_ref)
4333 				node->has_weak_ref = 0;
4334 			if (!weak && !strong) {
4335 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4336 					     "%d:%d node %d u%016llx c%016llx deleted\n",
4337 					     proc->pid, thread->pid,
4338 					     node_debug_id,
4339 					     (u64)node_ptr,
4340 					     (u64)node_cookie);
4341 				rb_erase(&node->rb_node, &proc->nodes);
4342 				binder_inner_proc_unlock(proc);
4343 				binder_node_lock(node);
4344 				/*
4345 				 * Acquire the node lock before freeing the
4346 				 * node to serialize with other threads that
4347 				 * may have been holding the node lock while
4348 				 * decrementing this node (avoids race where
4349 				 * this thread frees while the other thread
4350 				 * is unlocking the node after the final
4351 				 * decrement)
4352 				 */
4353 				binder_node_unlock(node);
4354 				binder_free_node(node);
4355 			} else
4356 				binder_inner_proc_unlock(proc);
4357 
4358 			if (weak && !has_weak_ref)
4359 				ret = binder_put_node_cmd(
4360 						proc, thread, &ptr, node_ptr,
4361 						node_cookie, node_debug_id,
4362 						BR_INCREFS, "BR_INCREFS");
4363 			if (!ret && strong && !has_strong_ref)
4364 				ret = binder_put_node_cmd(
4365 						proc, thread, &ptr, node_ptr,
4366 						node_cookie, node_debug_id,
4367 						BR_ACQUIRE, "BR_ACQUIRE");
4368 			if (!ret && !strong && has_strong_ref)
4369 				ret = binder_put_node_cmd(
4370 						proc, thread, &ptr, node_ptr,
4371 						node_cookie, node_debug_id,
4372 						BR_RELEASE, "BR_RELEASE");
4373 			if (!ret && !weak && has_weak_ref)
4374 				ret = binder_put_node_cmd(
4375 						proc, thread, &ptr, node_ptr,
4376 						node_cookie, node_debug_id,
4377 						BR_DECREFS, "BR_DECREFS");
4378 			if (orig_ptr == ptr)
4379 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4380 					     "%d:%d node %d u%016llx c%016llx state unchanged\n",
4381 					     proc->pid, thread->pid,
4382 					     node_debug_id,
4383 					     (u64)node_ptr,
4384 					     (u64)node_cookie);
4385 			if (ret)
4386 				return ret;
4387 		} break;
4388 		case BINDER_WORK_DEAD_BINDER:
4389 		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4390 		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4391 			struct binder_ref_death *death;
4392 			uint32_t cmd;
4393 			binder_uintptr_t cookie;
4394 
4395 			death = container_of(w, struct binder_ref_death, work);
4396 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4397 				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4398 			else
4399 				cmd = BR_DEAD_BINDER;
4400 			cookie = death->cookie;
4401 
4402 			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4403 				     "%d:%d %s %016llx\n",
4404 				      proc->pid, thread->pid,
4405 				      cmd == BR_DEAD_BINDER ?
4406 				      "BR_DEAD_BINDER" :
4407 				      "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4408 				      (u64)cookie);
4409 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4410 				binder_inner_proc_unlock(proc);
4411 				kfree(death);
4412 				binder_stats_deleted(BINDER_STAT_DEATH);
4413 			} else {
4414 				binder_enqueue_work_ilocked(
4415 						w, &proc->delivered_death);
4416 				binder_inner_proc_unlock(proc);
4417 			}
4418 			if (put_user(cmd, (uint32_t __user *)ptr))
4419 				return -EFAULT;
4420 			ptr += sizeof(uint32_t);
4421 			if (put_user(cookie,
4422 				     (binder_uintptr_t __user *)ptr))
4423 				return -EFAULT;
4424 			ptr += sizeof(binder_uintptr_t);
4425 			binder_stat_br(proc, thread, cmd);
4426 			if (cmd == BR_DEAD_BINDER)
4427 				goto done; /* DEAD_BINDER notifications can cause transactions */
4428 		} break;
4429 		default:
4430 			binder_inner_proc_unlock(proc);
4431 			pr_err("%d:%d: bad work type %d\n",
4432 			       proc->pid, thread->pid, w->type);
4433 			break;
4434 		}
4435 
4436 		if (!t)
4437 			continue;
4438 
4439 		BUG_ON(t->buffer == NULL);
4440 		if (t->buffer->target_node) {
4441 			struct binder_node *target_node = t->buffer->target_node;
4442 
4443 			trd->target.ptr = target_node->ptr;
4444 			trd->cookie =  target_node->cookie;
4445 			t->saved_priority = task_nice(current);
4446 			if (t->priority < target_node->min_priority &&
4447 			    !(t->flags & TF_ONE_WAY))
4448 				binder_set_nice(t->priority);
4449 			else if (!(t->flags & TF_ONE_WAY) ||
4450 				 t->saved_priority > target_node->min_priority)
4451 				binder_set_nice(target_node->min_priority);
4452 			cmd = BR_TRANSACTION;
4453 		} else {
4454 			trd->target.ptr = 0;
4455 			trd->cookie = 0;
4456 			cmd = BR_REPLY;
4457 		}
4458 		trd->code = t->code;
4459 		trd->flags = t->flags;
4460 		trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4461 
4462 		t_from = binder_get_txn_from(t);
4463 		if (t_from) {
4464 			struct task_struct *sender = t_from->proc->tsk;
4465 
4466 			trd->sender_pid =
4467 				task_tgid_nr_ns(sender,
4468 						task_active_pid_ns(current));
4469 		} else {
4470 			trd->sender_pid = 0;
4471 		}
4472 
4473 		ret = binder_apply_fd_fixups(proc, t);
4474 		if (ret) {
4475 			struct binder_buffer *buffer = t->buffer;
4476 			bool oneway = !!(t->flags & TF_ONE_WAY);
4477 			int tid = t->debug_id;
4478 
4479 			if (t_from)
4480 				binder_thread_dec_tmpref(t_from);
4481 			buffer->transaction = NULL;
4482 			binder_cleanup_transaction(t, "fd fixups failed",
4483 						   BR_FAILED_REPLY);
4484 			binder_free_buf(proc, buffer);
4485 			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4486 				     "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4487 				     proc->pid, thread->pid,
4488 				     oneway ? "async " :
4489 					(cmd == BR_REPLY ? "reply " : ""),
4490 				     tid, BR_FAILED_REPLY, ret, __LINE__);
4491 			if (cmd == BR_REPLY) {
4492 				cmd = BR_FAILED_REPLY;
4493 				if (put_user(cmd, (uint32_t __user *)ptr))
4494 					return -EFAULT;
4495 				ptr += sizeof(uint32_t);
4496 				binder_stat_br(proc, thread, cmd);
4497 				break;
4498 			}
4499 			continue;
4500 		}
4501 		trd->data_size = t->buffer->data_size;
4502 		trd->offsets_size = t->buffer->offsets_size;
4503 		trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
4504 		trd->data.ptr.offsets = trd->data.ptr.buffer +
4505 					ALIGN(t->buffer->data_size,
4506 					    sizeof(void *));
4507 
4508 		tr.secctx = t->security_ctx;
4509 		if (t->security_ctx) {
4510 			cmd = BR_TRANSACTION_SEC_CTX;
4511 			trsize = sizeof(tr);
4512 		}
4513 		if (put_user(cmd, (uint32_t __user *)ptr)) {
4514 			if (t_from)
4515 				binder_thread_dec_tmpref(t_from);
4516 
4517 			binder_cleanup_transaction(t, "put_user failed",
4518 						   BR_FAILED_REPLY);
4519 
4520 			return -EFAULT;
4521 		}
4522 		ptr += sizeof(uint32_t);
4523 		if (copy_to_user(ptr, &tr, trsize)) {
4524 			if (t_from)
4525 				binder_thread_dec_tmpref(t_from);
4526 
4527 			binder_cleanup_transaction(t, "copy_to_user failed",
4528 						   BR_FAILED_REPLY);
4529 
4530 			return -EFAULT;
4531 		}
4532 		ptr += trsize;
4533 
4534 		trace_binder_transaction_received(t);
4535 		binder_stat_br(proc, thread, cmd);
4536 		binder_debug(BINDER_DEBUG_TRANSACTION,
4537 			     "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4538 			     proc->pid, thread->pid,
4539 			     (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4540 				(cmd == BR_TRANSACTION_SEC_CTX) ?
4541 				     "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4542 			     t->debug_id, t_from ? t_from->proc->pid : 0,
4543 			     t_from ? t_from->pid : 0, cmd,
4544 			     t->buffer->data_size, t->buffer->offsets_size,
4545 			     (u64)trd->data.ptr.buffer,
4546 			     (u64)trd->data.ptr.offsets);
4547 
4548 		if (t_from)
4549 			binder_thread_dec_tmpref(t_from);
4550 		t->buffer->allow_user_free = 1;
4551 		if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4552 			binder_inner_proc_lock(thread->proc);
4553 			t->to_parent = thread->transaction_stack;
4554 			t->to_thread = thread;
4555 			thread->transaction_stack = t;
4556 			binder_inner_proc_unlock(thread->proc);
4557 		} else {
4558 			binder_free_transaction(t);
4559 		}
4560 		break;
4561 	}
4562 
4563 done:
4564 
4565 	*consumed = ptr - buffer;
4566 	binder_inner_proc_lock(proc);
4567 	if (proc->requested_threads == 0 &&
4568 	    list_empty(&thread->proc->waiting_threads) &&
4569 	    proc->requested_threads_started < proc->max_threads &&
4570 	    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4571 	     BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4572 	     /*spawn a new thread if we leave this out */) {
4573 		proc->requested_threads++;
4574 		binder_inner_proc_unlock(proc);
4575 		binder_debug(BINDER_DEBUG_THREADS,
4576 			     "%d:%d BR_SPAWN_LOOPER\n",
4577 			     proc->pid, thread->pid);
4578 		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4579 			return -EFAULT;
4580 		binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4581 	} else
4582 		binder_inner_proc_unlock(proc);
4583 	return 0;
4584 }
4585 
4586 static void binder_release_work(struct binder_proc *proc,
4587 				struct list_head *list)
4588 {
4589 	struct binder_work *w;
4590 
4591 	while (1) {
4592 		w = binder_dequeue_work_head(proc, list);
4593 		if (!w)
4594 			return;
4595 
4596 		switch (w->type) {
4597 		case BINDER_WORK_TRANSACTION: {
4598 			struct binder_transaction *t;
4599 
4600 			t = container_of(w, struct binder_transaction, work);
4601 
4602 			binder_cleanup_transaction(t, "process died.",
4603 						   BR_DEAD_REPLY);
4604 		} break;
4605 		case BINDER_WORK_RETURN_ERROR: {
4606 			struct binder_error *e = container_of(
4607 					w, struct binder_error, work);
4608 
4609 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4610 				"undelivered TRANSACTION_ERROR: %u\n",
4611 				e->cmd);
4612 		} break;
4613 		case BINDER_WORK_TRANSACTION_COMPLETE: {
4614 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4615 				"undelivered TRANSACTION_COMPLETE\n");
4616 			kfree(w);
4617 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4618 		} break;
4619 		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4620 		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4621 			struct binder_ref_death *death;
4622 
4623 			death = container_of(w, struct binder_ref_death, work);
4624 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4625 				"undelivered death notification, %016llx\n",
4626 				(u64)death->cookie);
4627 			kfree(death);
4628 			binder_stats_deleted(BINDER_STAT_DEATH);
4629 		} break;
4630 		default:
4631 			pr_err("unexpected work type, %d, not freed\n",
4632 			       w->type);
4633 			break;
4634 		}
4635 	}
4636 
4637 }
4638 
4639 static struct binder_thread *binder_get_thread_ilocked(
4640 		struct binder_proc *proc, struct binder_thread *new_thread)
4641 {
4642 	struct binder_thread *thread = NULL;
4643 	struct rb_node *parent = NULL;
4644 	struct rb_node **p = &proc->threads.rb_node;
4645 
4646 	while (*p) {
4647 		parent = *p;
4648 		thread = rb_entry(parent, struct binder_thread, rb_node);
4649 
4650 		if (current->pid < thread->pid)
4651 			p = &(*p)->rb_left;
4652 		else if (current->pid > thread->pid)
4653 			p = &(*p)->rb_right;
4654 		else
4655 			return thread;
4656 	}
4657 	if (!new_thread)
4658 		return NULL;
4659 	thread = new_thread;
4660 	binder_stats_created(BINDER_STAT_THREAD);
4661 	thread->proc = proc;
4662 	thread->pid = current->pid;
4663 	atomic_set(&thread->tmp_ref, 0);
4664 	init_waitqueue_head(&thread->wait);
4665 	INIT_LIST_HEAD(&thread->todo);
4666 	rb_link_node(&thread->rb_node, parent, p);
4667 	rb_insert_color(&thread->rb_node, &proc->threads);
4668 	thread->looper_need_return = true;
4669 	thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4670 	thread->return_error.cmd = BR_OK;
4671 	thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4672 	thread->reply_error.cmd = BR_OK;
4673 	INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4674 	return thread;
4675 }
4676 
4677 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4678 {
4679 	struct binder_thread *thread;
4680 	struct binder_thread *new_thread;
4681 
4682 	binder_inner_proc_lock(proc);
4683 	thread = binder_get_thread_ilocked(proc, NULL);
4684 	binder_inner_proc_unlock(proc);
4685 	if (!thread) {
4686 		new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4687 		if (new_thread == NULL)
4688 			return NULL;
4689 		binder_inner_proc_lock(proc);
4690 		thread = binder_get_thread_ilocked(proc, new_thread);
4691 		binder_inner_proc_unlock(proc);
4692 		if (thread != new_thread)
4693 			kfree(new_thread);
4694 	}
4695 	return thread;
4696 }
4697 
4698 static void binder_free_proc(struct binder_proc *proc)
4699 {
4700 	struct binder_device *device;
4701 
4702 	BUG_ON(!list_empty(&proc->todo));
4703 	BUG_ON(!list_empty(&proc->delivered_death));
4704 	device = container_of(proc->context, struct binder_device, context);
4705 	if (refcount_dec_and_test(&device->ref)) {
4706 		kfree(proc->context->name);
4707 		kfree(device);
4708 	}
4709 	binder_alloc_deferred_release(&proc->alloc);
4710 	put_task_struct(proc->tsk);
4711 	binder_stats_deleted(BINDER_STAT_PROC);
4712 	kfree(proc);
4713 }
4714 
4715 static void binder_free_thread(struct binder_thread *thread)
4716 {
4717 	BUG_ON(!list_empty(&thread->todo));
4718 	binder_stats_deleted(BINDER_STAT_THREAD);
4719 	binder_proc_dec_tmpref(thread->proc);
4720 	kfree(thread);
4721 }
4722 
4723 static int binder_thread_release(struct binder_proc *proc,
4724 				 struct binder_thread *thread)
4725 {
4726 	struct binder_transaction *t;
4727 	struct binder_transaction *send_reply = NULL;
4728 	int active_transactions = 0;
4729 	struct binder_transaction *last_t = NULL;
4730 
4731 	binder_inner_proc_lock(thread->proc);
4732 	/*
4733 	 * take a ref on the proc so it survives
4734 	 * after we remove this thread from proc->threads.
4735 	 * The corresponding dec is when we actually
4736 	 * free the thread in binder_free_thread()
4737 	 */
4738 	proc->tmp_ref++;
4739 	/*
4740 	 * take a ref on this thread to ensure it
4741 	 * survives while we are releasing it
4742 	 */
4743 	atomic_inc(&thread->tmp_ref);
4744 	rb_erase(&thread->rb_node, &proc->threads);
4745 	t = thread->transaction_stack;
4746 	if (t) {
4747 		spin_lock(&t->lock);
4748 		if (t->to_thread == thread)
4749 			send_reply = t;
4750 	} else {
4751 		__acquire(&t->lock);
4752 	}
4753 	thread->is_dead = true;
4754 
4755 	while (t) {
4756 		last_t = t;
4757 		active_transactions++;
4758 		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4759 			     "release %d:%d transaction %d %s, still active\n",
4760 			      proc->pid, thread->pid,
4761 			     t->debug_id,
4762 			     (t->to_thread == thread) ? "in" : "out");
4763 
4764 		if (t->to_thread == thread) {
4765 			t->to_proc = NULL;
4766 			t->to_thread = NULL;
4767 			if (t->buffer) {
4768 				t->buffer->transaction = NULL;
4769 				t->buffer = NULL;
4770 			}
4771 			t = t->to_parent;
4772 		} else if (t->from == thread) {
4773 			t->from = NULL;
4774 			t = t->from_parent;
4775 		} else
4776 			BUG();
4777 		spin_unlock(&last_t->lock);
4778 		if (t)
4779 			spin_lock(&t->lock);
4780 		else
4781 			__acquire(&t->lock);
4782 	}
4783 	/* annotation for sparse, lock not acquired in last iteration above */
4784 	__release(&t->lock);
4785 
4786 	/*
4787 	 * If this thread used poll, make sure we remove the waitqueue
4788 	 * from any epoll data structures holding it with POLLFREE.
4789 	 * waitqueue_active() is safe to use here because we're holding
4790 	 * the inner lock.
4791 	 */
4792 	if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
4793 	    waitqueue_active(&thread->wait)) {
4794 		wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
4795 	}
4796 
4797 	binder_inner_proc_unlock(thread->proc);
4798 
4799 	/*
4800 	 * This is needed to avoid races between wake_up_poll() above and
4801 	 * and ep_remove_waitqueue() called for other reasons (eg the epoll file
4802 	 * descriptor being closed); ep_remove_waitqueue() holds an RCU read
4803 	 * lock, so we can be sure it's done after calling synchronize_rcu().
4804 	 */
4805 	if (thread->looper & BINDER_LOOPER_STATE_POLL)
4806 		synchronize_rcu();
4807 
4808 	if (send_reply)
4809 		binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4810 	binder_release_work(proc, &thread->todo);
4811 	binder_thread_dec_tmpref(thread);
4812 	return active_transactions;
4813 }
4814 
4815 static __poll_t binder_poll(struct file *filp,
4816 				struct poll_table_struct *wait)
4817 {
4818 	struct binder_proc *proc = filp->private_data;
4819 	struct binder_thread *thread = NULL;
4820 	bool wait_for_proc_work;
4821 
4822 	thread = binder_get_thread(proc);
4823 	if (!thread)
4824 		return POLLERR;
4825 
4826 	binder_inner_proc_lock(thread->proc);
4827 	thread->looper |= BINDER_LOOPER_STATE_POLL;
4828 	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4829 
4830 	binder_inner_proc_unlock(thread->proc);
4831 
4832 	poll_wait(filp, &thread->wait, wait);
4833 
4834 	if (binder_has_work(thread, wait_for_proc_work))
4835 		return EPOLLIN;
4836 
4837 	return 0;
4838 }
4839 
4840 static int binder_ioctl_write_read(struct file *filp,
4841 				unsigned int cmd, unsigned long arg,
4842 				struct binder_thread *thread)
4843 {
4844 	int ret = 0;
4845 	struct binder_proc *proc = filp->private_data;
4846 	unsigned int size = _IOC_SIZE(cmd);
4847 	void __user *ubuf = (void __user *)arg;
4848 	struct binder_write_read bwr;
4849 
4850 	if (size != sizeof(struct binder_write_read)) {
4851 		ret = -EINVAL;
4852 		goto out;
4853 	}
4854 	if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4855 		ret = -EFAULT;
4856 		goto out;
4857 	}
4858 	binder_debug(BINDER_DEBUG_READ_WRITE,
4859 		     "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4860 		     proc->pid, thread->pid,
4861 		     (u64)bwr.write_size, (u64)bwr.write_buffer,
4862 		     (u64)bwr.read_size, (u64)bwr.read_buffer);
4863 
4864 	if (bwr.write_size > 0) {
4865 		ret = binder_thread_write(proc, thread,
4866 					  bwr.write_buffer,
4867 					  bwr.write_size,
4868 					  &bwr.write_consumed);
4869 		trace_binder_write_done(ret);
4870 		if (ret < 0) {
4871 			bwr.read_consumed = 0;
4872 			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4873 				ret = -EFAULT;
4874 			goto out;
4875 		}
4876 	}
4877 	if (bwr.read_size > 0) {
4878 		ret = binder_thread_read(proc, thread, bwr.read_buffer,
4879 					 bwr.read_size,
4880 					 &bwr.read_consumed,
4881 					 filp->f_flags & O_NONBLOCK);
4882 		trace_binder_read_done(ret);
4883 		binder_inner_proc_lock(proc);
4884 		if (!binder_worklist_empty_ilocked(&proc->todo))
4885 			binder_wakeup_proc_ilocked(proc);
4886 		binder_inner_proc_unlock(proc);
4887 		if (ret < 0) {
4888 			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4889 				ret = -EFAULT;
4890 			goto out;
4891 		}
4892 	}
4893 	binder_debug(BINDER_DEBUG_READ_WRITE,
4894 		     "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4895 		     proc->pid, thread->pid,
4896 		     (u64)bwr.write_consumed, (u64)bwr.write_size,
4897 		     (u64)bwr.read_consumed, (u64)bwr.read_size);
4898 	if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4899 		ret = -EFAULT;
4900 		goto out;
4901 	}
4902 out:
4903 	return ret;
4904 }
4905 
4906 static int binder_ioctl_set_ctx_mgr(struct file *filp,
4907 				    struct flat_binder_object *fbo)
4908 {
4909 	int ret = 0;
4910 	struct binder_proc *proc = filp->private_data;
4911 	struct binder_context *context = proc->context;
4912 	struct binder_node *new_node;
4913 	kuid_t curr_euid = current_euid();
4914 
4915 	mutex_lock(&context->context_mgr_node_lock);
4916 	if (context->binder_context_mgr_node) {
4917 		pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4918 		ret = -EBUSY;
4919 		goto out;
4920 	}
4921 	ret = security_binder_set_context_mgr(proc->tsk);
4922 	if (ret < 0)
4923 		goto out;
4924 	if (uid_valid(context->binder_context_mgr_uid)) {
4925 		if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4926 			pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4927 			       from_kuid(&init_user_ns, curr_euid),
4928 			       from_kuid(&init_user_ns,
4929 					 context->binder_context_mgr_uid));
4930 			ret = -EPERM;
4931 			goto out;
4932 		}
4933 	} else {
4934 		context->binder_context_mgr_uid = curr_euid;
4935 	}
4936 	new_node = binder_new_node(proc, fbo);
4937 	if (!new_node) {
4938 		ret = -ENOMEM;
4939 		goto out;
4940 	}
4941 	binder_node_lock(new_node);
4942 	new_node->local_weak_refs++;
4943 	new_node->local_strong_refs++;
4944 	new_node->has_strong_ref = 1;
4945 	new_node->has_weak_ref = 1;
4946 	context->binder_context_mgr_node = new_node;
4947 	binder_node_unlock(new_node);
4948 	binder_put_node(new_node);
4949 out:
4950 	mutex_unlock(&context->context_mgr_node_lock);
4951 	return ret;
4952 }
4953 
4954 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
4955 		struct binder_node_info_for_ref *info)
4956 {
4957 	struct binder_node *node;
4958 	struct binder_context *context = proc->context;
4959 	__u32 handle = info->handle;
4960 
4961 	if (info->strong_count || info->weak_count || info->reserved1 ||
4962 	    info->reserved2 || info->reserved3) {
4963 		binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
4964 				  proc->pid);
4965 		return -EINVAL;
4966 	}
4967 
4968 	/* This ioctl may only be used by the context manager */
4969 	mutex_lock(&context->context_mgr_node_lock);
4970 	if (!context->binder_context_mgr_node ||
4971 		context->binder_context_mgr_node->proc != proc) {
4972 		mutex_unlock(&context->context_mgr_node_lock);
4973 		return -EPERM;
4974 	}
4975 	mutex_unlock(&context->context_mgr_node_lock);
4976 
4977 	node = binder_get_node_from_ref(proc, handle, true, NULL);
4978 	if (!node)
4979 		return -EINVAL;
4980 
4981 	info->strong_count = node->local_strong_refs +
4982 		node->internal_strong_refs;
4983 	info->weak_count = node->local_weak_refs;
4984 
4985 	binder_put_node(node);
4986 
4987 	return 0;
4988 }
4989 
4990 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4991 				struct binder_node_debug_info *info)
4992 {
4993 	struct rb_node *n;
4994 	binder_uintptr_t ptr = info->ptr;
4995 
4996 	memset(info, 0, sizeof(*info));
4997 
4998 	binder_inner_proc_lock(proc);
4999 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5000 		struct binder_node *node = rb_entry(n, struct binder_node,
5001 						    rb_node);
5002 		if (node->ptr > ptr) {
5003 			info->ptr = node->ptr;
5004 			info->cookie = node->cookie;
5005 			info->has_strong_ref = node->has_strong_ref;
5006 			info->has_weak_ref = node->has_weak_ref;
5007 			break;
5008 		}
5009 	}
5010 	binder_inner_proc_unlock(proc);
5011 
5012 	return 0;
5013 }
5014 
5015 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5016 {
5017 	int ret;
5018 	struct binder_proc *proc = filp->private_data;
5019 	struct binder_thread *thread;
5020 	unsigned int size = _IOC_SIZE(cmd);
5021 	void __user *ubuf = (void __user *)arg;
5022 
5023 	/*pr_info("binder_ioctl: %d:%d %x %lx\n",
5024 			proc->pid, current->pid, cmd, arg);*/
5025 
5026 	binder_selftest_alloc(&proc->alloc);
5027 
5028 	trace_binder_ioctl(cmd, arg);
5029 
5030 	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5031 	if (ret)
5032 		goto err_unlocked;
5033 
5034 	thread = binder_get_thread(proc);
5035 	if (thread == NULL) {
5036 		ret = -ENOMEM;
5037 		goto err;
5038 	}
5039 
5040 	switch (cmd) {
5041 	case BINDER_WRITE_READ:
5042 		ret = binder_ioctl_write_read(filp, cmd, arg, thread);
5043 		if (ret)
5044 			goto err;
5045 		break;
5046 	case BINDER_SET_MAX_THREADS: {
5047 		int max_threads;
5048 
5049 		if (copy_from_user(&max_threads, ubuf,
5050 				   sizeof(max_threads))) {
5051 			ret = -EINVAL;
5052 			goto err;
5053 		}
5054 		binder_inner_proc_lock(proc);
5055 		proc->max_threads = max_threads;
5056 		binder_inner_proc_unlock(proc);
5057 		break;
5058 	}
5059 	case BINDER_SET_CONTEXT_MGR_EXT: {
5060 		struct flat_binder_object fbo;
5061 
5062 		if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5063 			ret = -EINVAL;
5064 			goto err;
5065 		}
5066 		ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5067 		if (ret)
5068 			goto err;
5069 		break;
5070 	}
5071 	case BINDER_SET_CONTEXT_MGR:
5072 		ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5073 		if (ret)
5074 			goto err;
5075 		break;
5076 	case BINDER_THREAD_EXIT:
5077 		binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5078 			     proc->pid, thread->pid);
5079 		binder_thread_release(proc, thread);
5080 		thread = NULL;
5081 		break;
5082 	case BINDER_VERSION: {
5083 		struct binder_version __user *ver = ubuf;
5084 
5085 		if (size != sizeof(struct binder_version)) {
5086 			ret = -EINVAL;
5087 			goto err;
5088 		}
5089 		if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5090 			     &ver->protocol_version)) {
5091 			ret = -EINVAL;
5092 			goto err;
5093 		}
5094 		break;
5095 	}
5096 	case BINDER_GET_NODE_INFO_FOR_REF: {
5097 		struct binder_node_info_for_ref info;
5098 
5099 		if (copy_from_user(&info, ubuf, sizeof(info))) {
5100 			ret = -EFAULT;
5101 			goto err;
5102 		}
5103 
5104 		ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5105 		if (ret < 0)
5106 			goto err;
5107 
5108 		if (copy_to_user(ubuf, &info, sizeof(info))) {
5109 			ret = -EFAULT;
5110 			goto err;
5111 		}
5112 
5113 		break;
5114 	}
5115 	case BINDER_GET_NODE_DEBUG_INFO: {
5116 		struct binder_node_debug_info info;
5117 
5118 		if (copy_from_user(&info, ubuf, sizeof(info))) {
5119 			ret = -EFAULT;
5120 			goto err;
5121 		}
5122 
5123 		ret = binder_ioctl_get_node_debug_info(proc, &info);
5124 		if (ret < 0)
5125 			goto err;
5126 
5127 		if (copy_to_user(ubuf, &info, sizeof(info))) {
5128 			ret = -EFAULT;
5129 			goto err;
5130 		}
5131 		break;
5132 	}
5133 	default:
5134 		ret = -EINVAL;
5135 		goto err;
5136 	}
5137 	ret = 0;
5138 err:
5139 	if (thread)
5140 		thread->looper_need_return = false;
5141 	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5142 	if (ret && ret != -ERESTARTSYS)
5143 		pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5144 err_unlocked:
5145 	trace_binder_ioctl_done(ret);
5146 	return ret;
5147 }
5148 
5149 static void binder_vma_open(struct vm_area_struct *vma)
5150 {
5151 	struct binder_proc *proc = vma->vm_private_data;
5152 
5153 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5154 		     "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5155 		     proc->pid, vma->vm_start, vma->vm_end,
5156 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5157 		     (unsigned long)pgprot_val(vma->vm_page_prot));
5158 }
5159 
5160 static void binder_vma_close(struct vm_area_struct *vma)
5161 {
5162 	struct binder_proc *proc = vma->vm_private_data;
5163 
5164 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5165 		     "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5166 		     proc->pid, vma->vm_start, vma->vm_end,
5167 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5168 		     (unsigned long)pgprot_val(vma->vm_page_prot));
5169 	binder_alloc_vma_close(&proc->alloc);
5170 }
5171 
5172 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5173 {
5174 	return VM_FAULT_SIGBUS;
5175 }
5176 
5177 static const struct vm_operations_struct binder_vm_ops = {
5178 	.open = binder_vma_open,
5179 	.close = binder_vma_close,
5180 	.fault = binder_vm_fault,
5181 };
5182 
5183 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5184 {
5185 	int ret;
5186 	struct binder_proc *proc = filp->private_data;
5187 	const char *failure_string;
5188 
5189 	if (proc->tsk != current->group_leader)
5190 		return -EINVAL;
5191 
5192 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5193 		     "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5194 		     __func__, proc->pid, vma->vm_start, vma->vm_end,
5195 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5196 		     (unsigned long)pgprot_val(vma->vm_page_prot));
5197 
5198 	if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5199 		ret = -EPERM;
5200 		failure_string = "bad vm_flags";
5201 		goto err_bad_arg;
5202 	}
5203 	vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
5204 	vma->vm_flags &= ~VM_MAYWRITE;
5205 
5206 	vma->vm_ops = &binder_vm_ops;
5207 	vma->vm_private_data = proc;
5208 
5209 	ret = binder_alloc_mmap_handler(&proc->alloc, vma);
5210 	if (ret)
5211 		return ret;
5212 	return 0;
5213 
5214 err_bad_arg:
5215 	pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5216 	       proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
5217 	return ret;
5218 }
5219 
5220 static int binder_open(struct inode *nodp, struct file *filp)
5221 {
5222 	struct binder_proc *proc, *itr;
5223 	struct binder_device *binder_dev;
5224 	struct binderfs_info *info;
5225 	struct dentry *binder_binderfs_dir_entry_proc = NULL;
5226 	bool existing_pid = false;
5227 
5228 	binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5229 		     current->group_leader->pid, current->pid);
5230 
5231 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5232 	if (proc == NULL)
5233 		return -ENOMEM;
5234 	spin_lock_init(&proc->inner_lock);
5235 	spin_lock_init(&proc->outer_lock);
5236 	get_task_struct(current->group_leader);
5237 	proc->tsk = current->group_leader;
5238 	INIT_LIST_HEAD(&proc->todo);
5239 	proc->default_priority = task_nice(current);
5240 	/* binderfs stashes devices in i_private */
5241 	if (is_binderfs_device(nodp)) {
5242 		binder_dev = nodp->i_private;
5243 		info = nodp->i_sb->s_fs_info;
5244 		binder_binderfs_dir_entry_proc = info->proc_log_dir;
5245 	} else {
5246 		binder_dev = container_of(filp->private_data,
5247 					  struct binder_device, miscdev);
5248 	}
5249 	refcount_inc(&binder_dev->ref);
5250 	proc->context = &binder_dev->context;
5251 	binder_alloc_init(&proc->alloc);
5252 
5253 	binder_stats_created(BINDER_STAT_PROC);
5254 	proc->pid = current->group_leader->pid;
5255 	INIT_LIST_HEAD(&proc->delivered_death);
5256 	INIT_LIST_HEAD(&proc->waiting_threads);
5257 	filp->private_data = proc;
5258 
5259 	mutex_lock(&binder_procs_lock);
5260 	hlist_for_each_entry(itr, &binder_procs, proc_node) {
5261 		if (itr->pid == proc->pid) {
5262 			existing_pid = true;
5263 			break;
5264 		}
5265 	}
5266 	hlist_add_head(&proc->proc_node, &binder_procs);
5267 	mutex_unlock(&binder_procs_lock);
5268 
5269 	if (binder_debugfs_dir_entry_proc && !existing_pid) {
5270 		char strbuf[11];
5271 
5272 		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5273 		/*
5274 		 * proc debug entries are shared between contexts.
5275 		 * Only create for the first PID to avoid debugfs log spamming
5276 		 * The printing code will anyway print all contexts for a given
5277 		 * PID so this is not a problem.
5278 		 */
5279 		proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5280 			binder_debugfs_dir_entry_proc,
5281 			(void *)(unsigned long)proc->pid,
5282 			&proc_fops);
5283 	}
5284 
5285 	if (binder_binderfs_dir_entry_proc && !existing_pid) {
5286 		char strbuf[11];
5287 		struct dentry *binderfs_entry;
5288 
5289 		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5290 		/*
5291 		 * Similar to debugfs, the process specific log file is shared
5292 		 * between contexts. Only create for the first PID.
5293 		 * This is ok since same as debugfs, the log file will contain
5294 		 * information on all contexts of a given PID.
5295 		 */
5296 		binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5297 			strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5298 		if (!IS_ERR(binderfs_entry)) {
5299 			proc->binderfs_entry = binderfs_entry;
5300 		} else {
5301 			int error;
5302 
5303 			error = PTR_ERR(binderfs_entry);
5304 			pr_warn("Unable to create file %s in binderfs (error %d)\n",
5305 				strbuf, error);
5306 		}
5307 	}
5308 
5309 	return 0;
5310 }
5311 
5312 static int binder_flush(struct file *filp, fl_owner_t id)
5313 {
5314 	struct binder_proc *proc = filp->private_data;
5315 
5316 	binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5317 
5318 	return 0;
5319 }
5320 
5321 static void binder_deferred_flush(struct binder_proc *proc)
5322 {
5323 	struct rb_node *n;
5324 	int wake_count = 0;
5325 
5326 	binder_inner_proc_lock(proc);
5327 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5328 		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5329 
5330 		thread->looper_need_return = true;
5331 		if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5332 			wake_up_interruptible(&thread->wait);
5333 			wake_count++;
5334 		}
5335 	}
5336 	binder_inner_proc_unlock(proc);
5337 
5338 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5339 		     "binder_flush: %d woke %d threads\n", proc->pid,
5340 		     wake_count);
5341 }
5342 
5343 static int binder_release(struct inode *nodp, struct file *filp)
5344 {
5345 	struct binder_proc *proc = filp->private_data;
5346 
5347 	debugfs_remove(proc->debugfs_entry);
5348 
5349 	if (proc->binderfs_entry) {
5350 		binderfs_remove_file(proc->binderfs_entry);
5351 		proc->binderfs_entry = NULL;
5352 	}
5353 
5354 	binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5355 
5356 	return 0;
5357 }
5358 
5359 static int binder_node_release(struct binder_node *node, int refs)
5360 {
5361 	struct binder_ref *ref;
5362 	int death = 0;
5363 	struct binder_proc *proc = node->proc;
5364 
5365 	binder_release_work(proc, &node->async_todo);
5366 
5367 	binder_node_lock(node);
5368 	binder_inner_proc_lock(proc);
5369 	binder_dequeue_work_ilocked(&node->work);
5370 	/*
5371 	 * The caller must have taken a temporary ref on the node,
5372 	 */
5373 	BUG_ON(!node->tmp_refs);
5374 	if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5375 		binder_inner_proc_unlock(proc);
5376 		binder_node_unlock(node);
5377 		binder_free_node(node);
5378 
5379 		return refs;
5380 	}
5381 
5382 	node->proc = NULL;
5383 	node->local_strong_refs = 0;
5384 	node->local_weak_refs = 0;
5385 	binder_inner_proc_unlock(proc);
5386 
5387 	spin_lock(&binder_dead_nodes_lock);
5388 	hlist_add_head(&node->dead_node, &binder_dead_nodes);
5389 	spin_unlock(&binder_dead_nodes_lock);
5390 
5391 	hlist_for_each_entry(ref, &node->refs, node_entry) {
5392 		refs++;
5393 		/*
5394 		 * Need the node lock to synchronize
5395 		 * with new notification requests and the
5396 		 * inner lock to synchronize with queued
5397 		 * death notifications.
5398 		 */
5399 		binder_inner_proc_lock(ref->proc);
5400 		if (!ref->death) {
5401 			binder_inner_proc_unlock(ref->proc);
5402 			continue;
5403 		}
5404 
5405 		death++;
5406 
5407 		BUG_ON(!list_empty(&ref->death->work.entry));
5408 		ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5409 		binder_enqueue_work_ilocked(&ref->death->work,
5410 					    &ref->proc->todo);
5411 		binder_wakeup_proc_ilocked(ref->proc);
5412 		binder_inner_proc_unlock(ref->proc);
5413 	}
5414 
5415 	binder_debug(BINDER_DEBUG_DEAD_BINDER,
5416 		     "node %d now dead, refs %d, death %d\n",
5417 		     node->debug_id, refs, death);
5418 	binder_node_unlock(node);
5419 	binder_put_node(node);
5420 
5421 	return refs;
5422 }
5423 
5424 static void binder_deferred_release(struct binder_proc *proc)
5425 {
5426 	struct binder_context *context = proc->context;
5427 	struct rb_node *n;
5428 	int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5429 
5430 	mutex_lock(&binder_procs_lock);
5431 	hlist_del(&proc->proc_node);
5432 	mutex_unlock(&binder_procs_lock);
5433 
5434 	mutex_lock(&context->context_mgr_node_lock);
5435 	if (context->binder_context_mgr_node &&
5436 	    context->binder_context_mgr_node->proc == proc) {
5437 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
5438 			     "%s: %d context_mgr_node gone\n",
5439 			     __func__, proc->pid);
5440 		context->binder_context_mgr_node = NULL;
5441 	}
5442 	mutex_unlock(&context->context_mgr_node_lock);
5443 	binder_inner_proc_lock(proc);
5444 	/*
5445 	 * Make sure proc stays alive after we
5446 	 * remove all the threads
5447 	 */
5448 	proc->tmp_ref++;
5449 
5450 	proc->is_dead = true;
5451 	threads = 0;
5452 	active_transactions = 0;
5453 	while ((n = rb_first(&proc->threads))) {
5454 		struct binder_thread *thread;
5455 
5456 		thread = rb_entry(n, struct binder_thread, rb_node);
5457 		binder_inner_proc_unlock(proc);
5458 		threads++;
5459 		active_transactions += binder_thread_release(proc, thread);
5460 		binder_inner_proc_lock(proc);
5461 	}
5462 
5463 	nodes = 0;
5464 	incoming_refs = 0;
5465 	while ((n = rb_first(&proc->nodes))) {
5466 		struct binder_node *node;
5467 
5468 		node = rb_entry(n, struct binder_node, rb_node);
5469 		nodes++;
5470 		/*
5471 		 * take a temporary ref on the node before
5472 		 * calling binder_node_release() which will either
5473 		 * kfree() the node or call binder_put_node()
5474 		 */
5475 		binder_inc_node_tmpref_ilocked(node);
5476 		rb_erase(&node->rb_node, &proc->nodes);
5477 		binder_inner_proc_unlock(proc);
5478 		incoming_refs = binder_node_release(node, incoming_refs);
5479 		binder_inner_proc_lock(proc);
5480 	}
5481 	binder_inner_proc_unlock(proc);
5482 
5483 	outgoing_refs = 0;
5484 	binder_proc_lock(proc);
5485 	while ((n = rb_first(&proc->refs_by_desc))) {
5486 		struct binder_ref *ref;
5487 
5488 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
5489 		outgoing_refs++;
5490 		binder_cleanup_ref_olocked(ref);
5491 		binder_proc_unlock(proc);
5492 		binder_free_ref(ref);
5493 		binder_proc_lock(proc);
5494 	}
5495 	binder_proc_unlock(proc);
5496 
5497 	binder_release_work(proc, &proc->todo);
5498 	binder_release_work(proc, &proc->delivered_death);
5499 
5500 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5501 		     "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5502 		     __func__, proc->pid, threads, nodes, incoming_refs,
5503 		     outgoing_refs, active_transactions);
5504 
5505 	binder_proc_dec_tmpref(proc);
5506 }
5507 
5508 static void binder_deferred_func(struct work_struct *work)
5509 {
5510 	struct binder_proc *proc;
5511 
5512 	int defer;
5513 
5514 	do {
5515 		mutex_lock(&binder_deferred_lock);
5516 		if (!hlist_empty(&binder_deferred_list)) {
5517 			proc = hlist_entry(binder_deferred_list.first,
5518 					struct binder_proc, deferred_work_node);
5519 			hlist_del_init(&proc->deferred_work_node);
5520 			defer = proc->deferred_work;
5521 			proc->deferred_work = 0;
5522 		} else {
5523 			proc = NULL;
5524 			defer = 0;
5525 		}
5526 		mutex_unlock(&binder_deferred_lock);
5527 
5528 		if (defer & BINDER_DEFERRED_FLUSH)
5529 			binder_deferred_flush(proc);
5530 
5531 		if (defer & BINDER_DEFERRED_RELEASE)
5532 			binder_deferred_release(proc); /* frees proc */
5533 	} while (proc);
5534 }
5535 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5536 
5537 static void
5538 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5539 {
5540 	mutex_lock(&binder_deferred_lock);
5541 	proc->deferred_work |= defer;
5542 	if (hlist_unhashed(&proc->deferred_work_node)) {
5543 		hlist_add_head(&proc->deferred_work_node,
5544 				&binder_deferred_list);
5545 		schedule_work(&binder_deferred_work);
5546 	}
5547 	mutex_unlock(&binder_deferred_lock);
5548 }
5549 
5550 static void print_binder_transaction_ilocked(struct seq_file *m,
5551 					     struct binder_proc *proc,
5552 					     const char *prefix,
5553 					     struct binder_transaction *t)
5554 {
5555 	struct binder_proc *to_proc;
5556 	struct binder_buffer *buffer = t->buffer;
5557 
5558 	spin_lock(&t->lock);
5559 	to_proc = t->to_proc;
5560 	seq_printf(m,
5561 		   "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5562 		   prefix, t->debug_id, t,
5563 		   t->from ? t->from->proc->pid : 0,
5564 		   t->from ? t->from->pid : 0,
5565 		   to_proc ? to_proc->pid : 0,
5566 		   t->to_thread ? t->to_thread->pid : 0,
5567 		   t->code, t->flags, t->priority, t->need_reply);
5568 	spin_unlock(&t->lock);
5569 
5570 	if (proc != to_proc) {
5571 		/*
5572 		 * Can only safely deref buffer if we are holding the
5573 		 * correct proc inner lock for this node
5574 		 */
5575 		seq_puts(m, "\n");
5576 		return;
5577 	}
5578 
5579 	if (buffer == NULL) {
5580 		seq_puts(m, " buffer free\n");
5581 		return;
5582 	}
5583 	if (buffer->target_node)
5584 		seq_printf(m, " node %d", buffer->target_node->debug_id);
5585 	seq_printf(m, " size %zd:%zd data %pK\n",
5586 		   buffer->data_size, buffer->offsets_size,
5587 		   buffer->user_data);
5588 }
5589 
5590 static void print_binder_work_ilocked(struct seq_file *m,
5591 				     struct binder_proc *proc,
5592 				     const char *prefix,
5593 				     const char *transaction_prefix,
5594 				     struct binder_work *w)
5595 {
5596 	struct binder_node *node;
5597 	struct binder_transaction *t;
5598 
5599 	switch (w->type) {
5600 	case BINDER_WORK_TRANSACTION:
5601 		t = container_of(w, struct binder_transaction, work);
5602 		print_binder_transaction_ilocked(
5603 				m, proc, transaction_prefix, t);
5604 		break;
5605 	case BINDER_WORK_RETURN_ERROR: {
5606 		struct binder_error *e = container_of(
5607 				w, struct binder_error, work);
5608 
5609 		seq_printf(m, "%stransaction error: %u\n",
5610 			   prefix, e->cmd);
5611 	} break;
5612 	case BINDER_WORK_TRANSACTION_COMPLETE:
5613 		seq_printf(m, "%stransaction complete\n", prefix);
5614 		break;
5615 	case BINDER_WORK_NODE:
5616 		node = container_of(w, struct binder_node, work);
5617 		seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5618 			   prefix, node->debug_id,
5619 			   (u64)node->ptr, (u64)node->cookie);
5620 		break;
5621 	case BINDER_WORK_DEAD_BINDER:
5622 		seq_printf(m, "%shas dead binder\n", prefix);
5623 		break;
5624 	case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5625 		seq_printf(m, "%shas cleared dead binder\n", prefix);
5626 		break;
5627 	case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5628 		seq_printf(m, "%shas cleared death notification\n", prefix);
5629 		break;
5630 	default:
5631 		seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5632 		break;
5633 	}
5634 }
5635 
5636 static void print_binder_thread_ilocked(struct seq_file *m,
5637 					struct binder_thread *thread,
5638 					int print_always)
5639 {
5640 	struct binder_transaction *t;
5641 	struct binder_work *w;
5642 	size_t start_pos = m->count;
5643 	size_t header_pos;
5644 
5645 	seq_printf(m, "  thread %d: l %02x need_return %d tr %d\n",
5646 			thread->pid, thread->looper,
5647 			thread->looper_need_return,
5648 			atomic_read(&thread->tmp_ref));
5649 	header_pos = m->count;
5650 	t = thread->transaction_stack;
5651 	while (t) {
5652 		if (t->from == thread) {
5653 			print_binder_transaction_ilocked(m, thread->proc,
5654 					"    outgoing transaction", t);
5655 			t = t->from_parent;
5656 		} else if (t->to_thread == thread) {
5657 			print_binder_transaction_ilocked(m, thread->proc,
5658 						 "    incoming transaction", t);
5659 			t = t->to_parent;
5660 		} else {
5661 			print_binder_transaction_ilocked(m, thread->proc,
5662 					"    bad transaction", t);
5663 			t = NULL;
5664 		}
5665 	}
5666 	list_for_each_entry(w, &thread->todo, entry) {
5667 		print_binder_work_ilocked(m, thread->proc, "    ",
5668 					  "    pending transaction", w);
5669 	}
5670 	if (!print_always && m->count == header_pos)
5671 		m->count = start_pos;
5672 }
5673 
5674 static void print_binder_node_nilocked(struct seq_file *m,
5675 				       struct binder_node *node)
5676 {
5677 	struct binder_ref *ref;
5678 	struct binder_work *w;
5679 	int count;
5680 
5681 	count = 0;
5682 	hlist_for_each_entry(ref, &node->refs, node_entry)
5683 		count++;
5684 
5685 	seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5686 		   node->debug_id, (u64)node->ptr, (u64)node->cookie,
5687 		   node->has_strong_ref, node->has_weak_ref,
5688 		   node->local_strong_refs, node->local_weak_refs,
5689 		   node->internal_strong_refs, count, node->tmp_refs);
5690 	if (count) {
5691 		seq_puts(m, " proc");
5692 		hlist_for_each_entry(ref, &node->refs, node_entry)
5693 			seq_printf(m, " %d", ref->proc->pid);
5694 	}
5695 	seq_puts(m, "\n");
5696 	if (node->proc) {
5697 		list_for_each_entry(w, &node->async_todo, entry)
5698 			print_binder_work_ilocked(m, node->proc, "    ",
5699 					  "    pending async transaction", w);
5700 	}
5701 }
5702 
5703 static void print_binder_ref_olocked(struct seq_file *m,
5704 				     struct binder_ref *ref)
5705 {
5706 	binder_node_lock(ref->node);
5707 	seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %pK\n",
5708 		   ref->data.debug_id, ref->data.desc,
5709 		   ref->node->proc ? "" : "dead ",
5710 		   ref->node->debug_id, ref->data.strong,
5711 		   ref->data.weak, ref->death);
5712 	binder_node_unlock(ref->node);
5713 }
5714 
5715 static void print_binder_proc(struct seq_file *m,
5716 			      struct binder_proc *proc, int print_all)
5717 {
5718 	struct binder_work *w;
5719 	struct rb_node *n;
5720 	size_t start_pos = m->count;
5721 	size_t header_pos;
5722 	struct binder_node *last_node = NULL;
5723 
5724 	seq_printf(m, "proc %d\n", proc->pid);
5725 	seq_printf(m, "context %s\n", proc->context->name);
5726 	header_pos = m->count;
5727 
5728 	binder_inner_proc_lock(proc);
5729 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5730 		print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5731 						rb_node), print_all);
5732 
5733 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5734 		struct binder_node *node = rb_entry(n, struct binder_node,
5735 						    rb_node);
5736 		if (!print_all && !node->has_async_transaction)
5737 			continue;
5738 
5739 		/*
5740 		 * take a temporary reference on the node so it
5741 		 * survives and isn't removed from the tree
5742 		 * while we print it.
5743 		 */
5744 		binder_inc_node_tmpref_ilocked(node);
5745 		/* Need to drop inner lock to take node lock */
5746 		binder_inner_proc_unlock(proc);
5747 		if (last_node)
5748 			binder_put_node(last_node);
5749 		binder_node_inner_lock(node);
5750 		print_binder_node_nilocked(m, node);
5751 		binder_node_inner_unlock(node);
5752 		last_node = node;
5753 		binder_inner_proc_lock(proc);
5754 	}
5755 	binder_inner_proc_unlock(proc);
5756 	if (last_node)
5757 		binder_put_node(last_node);
5758 
5759 	if (print_all) {
5760 		binder_proc_lock(proc);
5761 		for (n = rb_first(&proc->refs_by_desc);
5762 		     n != NULL;
5763 		     n = rb_next(n))
5764 			print_binder_ref_olocked(m, rb_entry(n,
5765 							    struct binder_ref,
5766 							    rb_node_desc));
5767 		binder_proc_unlock(proc);
5768 	}
5769 	binder_alloc_print_allocated(m, &proc->alloc);
5770 	binder_inner_proc_lock(proc);
5771 	list_for_each_entry(w, &proc->todo, entry)
5772 		print_binder_work_ilocked(m, proc, "  ",
5773 					  "  pending transaction", w);
5774 	list_for_each_entry(w, &proc->delivered_death, entry) {
5775 		seq_puts(m, "  has delivered dead binder\n");
5776 		break;
5777 	}
5778 	binder_inner_proc_unlock(proc);
5779 	if (!print_all && m->count == header_pos)
5780 		m->count = start_pos;
5781 }
5782 
5783 static const char * const binder_return_strings[] = {
5784 	"BR_ERROR",
5785 	"BR_OK",
5786 	"BR_TRANSACTION",
5787 	"BR_REPLY",
5788 	"BR_ACQUIRE_RESULT",
5789 	"BR_DEAD_REPLY",
5790 	"BR_TRANSACTION_COMPLETE",
5791 	"BR_INCREFS",
5792 	"BR_ACQUIRE",
5793 	"BR_RELEASE",
5794 	"BR_DECREFS",
5795 	"BR_ATTEMPT_ACQUIRE",
5796 	"BR_NOOP",
5797 	"BR_SPAWN_LOOPER",
5798 	"BR_FINISHED",
5799 	"BR_DEAD_BINDER",
5800 	"BR_CLEAR_DEATH_NOTIFICATION_DONE",
5801 	"BR_FAILED_REPLY"
5802 };
5803 
5804 static const char * const binder_command_strings[] = {
5805 	"BC_TRANSACTION",
5806 	"BC_REPLY",
5807 	"BC_ACQUIRE_RESULT",
5808 	"BC_FREE_BUFFER",
5809 	"BC_INCREFS",
5810 	"BC_ACQUIRE",
5811 	"BC_RELEASE",
5812 	"BC_DECREFS",
5813 	"BC_INCREFS_DONE",
5814 	"BC_ACQUIRE_DONE",
5815 	"BC_ATTEMPT_ACQUIRE",
5816 	"BC_REGISTER_LOOPER",
5817 	"BC_ENTER_LOOPER",
5818 	"BC_EXIT_LOOPER",
5819 	"BC_REQUEST_DEATH_NOTIFICATION",
5820 	"BC_CLEAR_DEATH_NOTIFICATION",
5821 	"BC_DEAD_BINDER_DONE",
5822 	"BC_TRANSACTION_SG",
5823 	"BC_REPLY_SG",
5824 };
5825 
5826 static const char * const binder_objstat_strings[] = {
5827 	"proc",
5828 	"thread",
5829 	"node",
5830 	"ref",
5831 	"death",
5832 	"transaction",
5833 	"transaction_complete"
5834 };
5835 
5836 static void print_binder_stats(struct seq_file *m, const char *prefix,
5837 			       struct binder_stats *stats)
5838 {
5839 	int i;
5840 
5841 	BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5842 		     ARRAY_SIZE(binder_command_strings));
5843 	for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5844 		int temp = atomic_read(&stats->bc[i]);
5845 
5846 		if (temp)
5847 			seq_printf(m, "%s%s: %d\n", prefix,
5848 				   binder_command_strings[i], temp);
5849 	}
5850 
5851 	BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5852 		     ARRAY_SIZE(binder_return_strings));
5853 	for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5854 		int temp = atomic_read(&stats->br[i]);
5855 
5856 		if (temp)
5857 			seq_printf(m, "%s%s: %d\n", prefix,
5858 				   binder_return_strings[i], temp);
5859 	}
5860 
5861 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5862 		     ARRAY_SIZE(binder_objstat_strings));
5863 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5864 		     ARRAY_SIZE(stats->obj_deleted));
5865 	for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
5866 		int created = atomic_read(&stats->obj_created[i]);
5867 		int deleted = atomic_read(&stats->obj_deleted[i]);
5868 
5869 		if (created || deleted)
5870 			seq_printf(m, "%s%s: active %d total %d\n",
5871 				prefix,
5872 				binder_objstat_strings[i],
5873 				created - deleted,
5874 				created);
5875 	}
5876 }
5877 
5878 static void print_binder_proc_stats(struct seq_file *m,
5879 				    struct binder_proc *proc)
5880 {
5881 	struct binder_work *w;
5882 	struct binder_thread *thread;
5883 	struct rb_node *n;
5884 	int count, strong, weak, ready_threads;
5885 	size_t free_async_space =
5886 		binder_alloc_get_free_async_space(&proc->alloc);
5887 
5888 	seq_printf(m, "proc %d\n", proc->pid);
5889 	seq_printf(m, "context %s\n", proc->context->name);
5890 	count = 0;
5891 	ready_threads = 0;
5892 	binder_inner_proc_lock(proc);
5893 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5894 		count++;
5895 
5896 	list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5897 		ready_threads++;
5898 
5899 	seq_printf(m, "  threads: %d\n", count);
5900 	seq_printf(m, "  requested threads: %d+%d/%d\n"
5901 			"  ready threads %d\n"
5902 			"  free async space %zd\n", proc->requested_threads,
5903 			proc->requested_threads_started, proc->max_threads,
5904 			ready_threads,
5905 			free_async_space);
5906 	count = 0;
5907 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5908 		count++;
5909 	binder_inner_proc_unlock(proc);
5910 	seq_printf(m, "  nodes: %d\n", count);
5911 	count = 0;
5912 	strong = 0;
5913 	weak = 0;
5914 	binder_proc_lock(proc);
5915 	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5916 		struct binder_ref *ref = rb_entry(n, struct binder_ref,
5917 						  rb_node_desc);
5918 		count++;
5919 		strong += ref->data.strong;
5920 		weak += ref->data.weak;
5921 	}
5922 	binder_proc_unlock(proc);
5923 	seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
5924 
5925 	count = binder_alloc_get_allocated_count(&proc->alloc);
5926 	seq_printf(m, "  buffers: %d\n", count);
5927 
5928 	binder_alloc_print_pages(m, &proc->alloc);
5929 
5930 	count = 0;
5931 	binder_inner_proc_lock(proc);
5932 	list_for_each_entry(w, &proc->todo, entry) {
5933 		if (w->type == BINDER_WORK_TRANSACTION)
5934 			count++;
5935 	}
5936 	binder_inner_proc_unlock(proc);
5937 	seq_printf(m, "  pending transactions: %d\n", count);
5938 
5939 	print_binder_stats(m, "  ", &proc->stats);
5940 }
5941 
5942 
5943 int binder_state_show(struct seq_file *m, void *unused)
5944 {
5945 	struct binder_proc *proc;
5946 	struct binder_node *node;
5947 	struct binder_node *last_node = NULL;
5948 
5949 	seq_puts(m, "binder state:\n");
5950 
5951 	spin_lock(&binder_dead_nodes_lock);
5952 	if (!hlist_empty(&binder_dead_nodes))
5953 		seq_puts(m, "dead nodes:\n");
5954 	hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5955 		/*
5956 		 * take a temporary reference on the node so it
5957 		 * survives and isn't removed from the list
5958 		 * while we print it.
5959 		 */
5960 		node->tmp_refs++;
5961 		spin_unlock(&binder_dead_nodes_lock);
5962 		if (last_node)
5963 			binder_put_node(last_node);
5964 		binder_node_lock(node);
5965 		print_binder_node_nilocked(m, node);
5966 		binder_node_unlock(node);
5967 		last_node = node;
5968 		spin_lock(&binder_dead_nodes_lock);
5969 	}
5970 	spin_unlock(&binder_dead_nodes_lock);
5971 	if (last_node)
5972 		binder_put_node(last_node);
5973 
5974 	mutex_lock(&binder_procs_lock);
5975 	hlist_for_each_entry(proc, &binder_procs, proc_node)
5976 		print_binder_proc(m, proc, 1);
5977 	mutex_unlock(&binder_procs_lock);
5978 
5979 	return 0;
5980 }
5981 
5982 int binder_stats_show(struct seq_file *m, void *unused)
5983 {
5984 	struct binder_proc *proc;
5985 
5986 	seq_puts(m, "binder stats:\n");
5987 
5988 	print_binder_stats(m, "", &binder_stats);
5989 
5990 	mutex_lock(&binder_procs_lock);
5991 	hlist_for_each_entry(proc, &binder_procs, proc_node)
5992 		print_binder_proc_stats(m, proc);
5993 	mutex_unlock(&binder_procs_lock);
5994 
5995 	return 0;
5996 }
5997 
5998 int binder_transactions_show(struct seq_file *m, void *unused)
5999 {
6000 	struct binder_proc *proc;
6001 
6002 	seq_puts(m, "binder transactions:\n");
6003 	mutex_lock(&binder_procs_lock);
6004 	hlist_for_each_entry(proc, &binder_procs, proc_node)
6005 		print_binder_proc(m, proc, 0);
6006 	mutex_unlock(&binder_procs_lock);
6007 
6008 	return 0;
6009 }
6010 
6011 static int proc_show(struct seq_file *m, void *unused)
6012 {
6013 	struct binder_proc *itr;
6014 	int pid = (unsigned long)m->private;
6015 
6016 	mutex_lock(&binder_procs_lock);
6017 	hlist_for_each_entry(itr, &binder_procs, proc_node) {
6018 		if (itr->pid == pid) {
6019 			seq_puts(m, "binder proc state:\n");
6020 			print_binder_proc(m, itr, 1);
6021 		}
6022 	}
6023 	mutex_unlock(&binder_procs_lock);
6024 
6025 	return 0;
6026 }
6027 
6028 static void print_binder_transaction_log_entry(struct seq_file *m,
6029 					struct binder_transaction_log_entry *e)
6030 {
6031 	int debug_id = READ_ONCE(e->debug_id_done);
6032 	/*
6033 	 * read barrier to guarantee debug_id_done read before
6034 	 * we print the log values
6035 	 */
6036 	smp_rmb();
6037 	seq_printf(m,
6038 		   "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6039 		   e->debug_id, (e->call_type == 2) ? "reply" :
6040 		   ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6041 		   e->from_thread, e->to_proc, e->to_thread, e->context_name,
6042 		   e->to_node, e->target_handle, e->data_size, e->offsets_size,
6043 		   e->return_error, e->return_error_param,
6044 		   e->return_error_line);
6045 	/*
6046 	 * read-barrier to guarantee read of debug_id_done after
6047 	 * done printing the fields of the entry
6048 	 */
6049 	smp_rmb();
6050 	seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6051 			"\n" : " (incomplete)\n");
6052 }
6053 
6054 int binder_transaction_log_show(struct seq_file *m, void *unused)
6055 {
6056 	struct binder_transaction_log *log = m->private;
6057 	unsigned int log_cur = atomic_read(&log->cur);
6058 	unsigned int count;
6059 	unsigned int cur;
6060 	int i;
6061 
6062 	count = log_cur + 1;
6063 	cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6064 		0 : count % ARRAY_SIZE(log->entry);
6065 	if (count > ARRAY_SIZE(log->entry) || log->full)
6066 		count = ARRAY_SIZE(log->entry);
6067 	for (i = 0; i < count; i++) {
6068 		unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6069 
6070 		print_binder_transaction_log_entry(m, &log->entry[index]);
6071 	}
6072 	return 0;
6073 }
6074 
6075 const struct file_operations binder_fops = {
6076 	.owner = THIS_MODULE,
6077 	.poll = binder_poll,
6078 	.unlocked_ioctl = binder_ioctl,
6079 	.compat_ioctl = compat_ptr_ioctl,
6080 	.mmap = binder_mmap,
6081 	.open = binder_open,
6082 	.flush = binder_flush,
6083 	.release = binder_release,
6084 };
6085 
6086 static int __init init_binder_device(const char *name)
6087 {
6088 	int ret;
6089 	struct binder_device *binder_device;
6090 
6091 	binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6092 	if (!binder_device)
6093 		return -ENOMEM;
6094 
6095 	binder_device->miscdev.fops = &binder_fops;
6096 	binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6097 	binder_device->miscdev.name = name;
6098 
6099 	refcount_set(&binder_device->ref, 1);
6100 	binder_device->context.binder_context_mgr_uid = INVALID_UID;
6101 	binder_device->context.name = name;
6102 	mutex_init(&binder_device->context.context_mgr_node_lock);
6103 
6104 	ret = misc_register(&binder_device->miscdev);
6105 	if (ret < 0) {
6106 		kfree(binder_device);
6107 		return ret;
6108 	}
6109 
6110 	hlist_add_head(&binder_device->hlist, &binder_devices);
6111 
6112 	return ret;
6113 }
6114 
6115 static int __init binder_init(void)
6116 {
6117 	int ret;
6118 	char *device_name, *device_tmp;
6119 	struct binder_device *device;
6120 	struct hlist_node *tmp;
6121 	char *device_names = NULL;
6122 
6123 	ret = binder_alloc_shrinker_init();
6124 	if (ret)
6125 		return ret;
6126 
6127 	atomic_set(&binder_transaction_log.cur, ~0U);
6128 	atomic_set(&binder_transaction_log_failed.cur, ~0U);
6129 
6130 	binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6131 	if (binder_debugfs_dir_entry_root)
6132 		binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6133 						 binder_debugfs_dir_entry_root);
6134 
6135 	if (binder_debugfs_dir_entry_root) {
6136 		debugfs_create_file("state",
6137 				    0444,
6138 				    binder_debugfs_dir_entry_root,
6139 				    NULL,
6140 				    &binder_state_fops);
6141 		debugfs_create_file("stats",
6142 				    0444,
6143 				    binder_debugfs_dir_entry_root,
6144 				    NULL,
6145 				    &binder_stats_fops);
6146 		debugfs_create_file("transactions",
6147 				    0444,
6148 				    binder_debugfs_dir_entry_root,
6149 				    NULL,
6150 				    &binder_transactions_fops);
6151 		debugfs_create_file("transaction_log",
6152 				    0444,
6153 				    binder_debugfs_dir_entry_root,
6154 				    &binder_transaction_log,
6155 				    &binder_transaction_log_fops);
6156 		debugfs_create_file("failed_transaction_log",
6157 				    0444,
6158 				    binder_debugfs_dir_entry_root,
6159 				    &binder_transaction_log_failed,
6160 				    &binder_transaction_log_fops);
6161 	}
6162 
6163 	if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6164 	    strcmp(binder_devices_param, "") != 0) {
6165 		/*
6166 		* Copy the module_parameter string, because we don't want to
6167 		* tokenize it in-place.
6168 		 */
6169 		device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6170 		if (!device_names) {
6171 			ret = -ENOMEM;
6172 			goto err_alloc_device_names_failed;
6173 		}
6174 
6175 		device_tmp = device_names;
6176 		while ((device_name = strsep(&device_tmp, ","))) {
6177 			ret = init_binder_device(device_name);
6178 			if (ret)
6179 				goto err_init_binder_device_failed;
6180 		}
6181 	}
6182 
6183 	ret = init_binderfs();
6184 	if (ret)
6185 		goto err_init_binder_device_failed;
6186 
6187 	return ret;
6188 
6189 err_init_binder_device_failed:
6190 	hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6191 		misc_deregister(&device->miscdev);
6192 		hlist_del(&device->hlist);
6193 		kfree(device);
6194 	}
6195 
6196 	kfree(device_names);
6197 
6198 err_alloc_device_names_failed:
6199 	debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6200 
6201 	return ret;
6202 }
6203 
6204 device_initcall(binder_init);
6205 
6206 #define CREATE_TRACE_POINTS
6207 #include "binder_trace.h"
6208 
6209 MODULE_LICENSE("GPL v2");
6210