xref: /openbmc/linux/drivers/android/binder.c (revision 2b77dcc5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* binder.c
3  *
4  * Android IPC Subsystem
5  *
6  * Copyright (C) 2007-2008 Google, Inc.
7  */
8 
9 /*
10  * Locking overview
11  *
12  * There are 3 main spinlocks which must be acquired in the
13  * order shown:
14  *
15  * 1) proc->outer_lock : protects binder_ref
16  *    binder_proc_lock() and binder_proc_unlock() are
17  *    used to acq/rel.
18  * 2) node->lock : protects most fields of binder_node.
19  *    binder_node_lock() and binder_node_unlock() are
20  *    used to acq/rel
21  * 3) proc->inner_lock : protects the thread and node lists
22  *    (proc->threads, proc->waiting_threads, proc->nodes)
23  *    and all todo lists associated with the binder_proc
24  *    (proc->todo, thread->todo, proc->delivered_death and
25  *    node->async_todo), as well as thread->transaction_stack
26  *    binder_inner_proc_lock() and binder_inner_proc_unlock()
27  *    are used to acq/rel
28  *
29  * Any lock under procA must never be nested under any lock at the same
30  * level or below on procB.
31  *
32  * Functions that require a lock held on entry indicate which lock
33  * in the suffix of the function name:
34  *
35  * foo_olocked() : requires node->outer_lock
36  * foo_nlocked() : requires node->lock
37  * foo_ilocked() : requires proc->inner_lock
38  * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39  * foo_nilocked(): requires node->lock and proc->inner_lock
40  * ...
41  */
42 
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44 
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
48 #include <linux/fs.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/string.h>
61 #include <linux/uaccess.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/security.h>
64 #include <linux/spinlock.h>
65 #include <linux/ratelimit.h>
66 #include <linux/syscalls.h>
67 #include <linux/task_work.h>
68 
69 #include <uapi/linux/android/binder.h>
70 #include <uapi/linux/android/binderfs.h>
71 
72 #include <asm/cacheflush.h>
73 
74 #include "binder_alloc.h"
75 #include "binder_internal.h"
76 #include "binder_trace.h"
77 
78 static HLIST_HEAD(binder_deferred_list);
79 static DEFINE_MUTEX(binder_deferred_lock);
80 
81 static HLIST_HEAD(binder_devices);
82 static HLIST_HEAD(binder_procs);
83 static DEFINE_MUTEX(binder_procs_lock);
84 
85 static HLIST_HEAD(binder_dead_nodes);
86 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
87 
88 static struct dentry *binder_debugfs_dir_entry_root;
89 static struct dentry *binder_debugfs_dir_entry_proc;
90 static atomic_t binder_last_id;
91 
92 static int proc_show(struct seq_file *m, void *unused);
93 DEFINE_SHOW_ATTRIBUTE(proc);
94 
95 /* This is only defined in include/asm-arm/sizes.h */
96 #ifndef SZ_1K
97 #define SZ_1K                               0x400
98 #endif
99 
100 #ifndef SZ_4M
101 #define SZ_4M                               0x400000
102 #endif
103 
104 #define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
105 
106 enum {
107 	BINDER_DEBUG_USER_ERROR             = 1U << 0,
108 	BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
109 	BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
110 	BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
111 	BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
112 	BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
113 	BINDER_DEBUG_READ_WRITE             = 1U << 6,
114 	BINDER_DEBUG_USER_REFS              = 1U << 7,
115 	BINDER_DEBUG_THREADS                = 1U << 8,
116 	BINDER_DEBUG_TRANSACTION            = 1U << 9,
117 	BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
118 	BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
119 	BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
120 	BINDER_DEBUG_PRIORITY_CAP           = 1U << 13,
121 	BINDER_DEBUG_SPINLOCKS              = 1U << 14,
122 };
123 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
124 	BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
125 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
126 
127 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
128 module_param_named(devices, binder_devices_param, charp, 0444);
129 
130 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
131 static int binder_stop_on_user_error;
132 
133 static int binder_set_stop_on_user_error(const char *val,
134 					 const struct kernel_param *kp)
135 {
136 	int ret;
137 
138 	ret = param_set_int(val, kp);
139 	if (binder_stop_on_user_error < 2)
140 		wake_up(&binder_user_error_wait);
141 	return ret;
142 }
143 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
144 	param_get_int, &binder_stop_on_user_error, 0644);
145 
146 #define binder_debug(mask, x...) \
147 	do { \
148 		if (binder_debug_mask & mask) \
149 			pr_info_ratelimited(x); \
150 	} while (0)
151 
152 #define binder_user_error(x...) \
153 	do { \
154 		if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
155 			pr_info_ratelimited(x); \
156 		if (binder_stop_on_user_error) \
157 			binder_stop_on_user_error = 2; \
158 	} while (0)
159 
160 #define to_flat_binder_object(hdr) \
161 	container_of(hdr, struct flat_binder_object, hdr)
162 
163 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
164 
165 #define to_binder_buffer_object(hdr) \
166 	container_of(hdr, struct binder_buffer_object, hdr)
167 
168 #define to_binder_fd_array_object(hdr) \
169 	container_of(hdr, struct binder_fd_array_object, hdr)
170 
171 enum binder_stat_types {
172 	BINDER_STAT_PROC,
173 	BINDER_STAT_THREAD,
174 	BINDER_STAT_NODE,
175 	BINDER_STAT_REF,
176 	BINDER_STAT_DEATH,
177 	BINDER_STAT_TRANSACTION,
178 	BINDER_STAT_TRANSACTION_COMPLETE,
179 	BINDER_STAT_COUNT
180 };
181 
182 struct binder_stats {
183 	atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
184 	atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
185 	atomic_t obj_created[BINDER_STAT_COUNT];
186 	atomic_t obj_deleted[BINDER_STAT_COUNT];
187 };
188 
189 static struct binder_stats binder_stats;
190 
191 static inline void binder_stats_deleted(enum binder_stat_types type)
192 {
193 	atomic_inc(&binder_stats.obj_deleted[type]);
194 }
195 
196 static inline void binder_stats_created(enum binder_stat_types type)
197 {
198 	atomic_inc(&binder_stats.obj_created[type]);
199 }
200 
201 struct binder_transaction_log binder_transaction_log;
202 struct binder_transaction_log binder_transaction_log_failed;
203 
204 static struct binder_transaction_log_entry *binder_transaction_log_add(
205 	struct binder_transaction_log *log)
206 {
207 	struct binder_transaction_log_entry *e;
208 	unsigned int cur = atomic_inc_return(&log->cur);
209 
210 	if (cur >= ARRAY_SIZE(log->entry))
211 		log->full = true;
212 	e = &log->entry[cur % ARRAY_SIZE(log->entry)];
213 	WRITE_ONCE(e->debug_id_done, 0);
214 	/*
215 	 * write-barrier to synchronize access to e->debug_id_done.
216 	 * We make sure the initialized 0 value is seen before
217 	 * memset() other fields are zeroed by memset.
218 	 */
219 	smp_wmb();
220 	memset(e, 0, sizeof(*e));
221 	return e;
222 }
223 
224 /**
225  * struct binder_work - work enqueued on a worklist
226  * @entry:             node enqueued on list
227  * @type:              type of work to be performed
228  *
229  * There are separate work lists for proc, thread, and node (async).
230  */
231 struct binder_work {
232 	struct list_head entry;
233 
234 	enum {
235 		BINDER_WORK_TRANSACTION = 1,
236 		BINDER_WORK_TRANSACTION_COMPLETE,
237 		BINDER_WORK_RETURN_ERROR,
238 		BINDER_WORK_NODE,
239 		BINDER_WORK_DEAD_BINDER,
240 		BINDER_WORK_DEAD_BINDER_AND_CLEAR,
241 		BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
242 	} type;
243 };
244 
245 struct binder_error {
246 	struct binder_work work;
247 	uint32_t cmd;
248 };
249 
250 /**
251  * struct binder_node - binder node bookkeeping
252  * @debug_id:             unique ID for debugging
253  *                        (invariant after initialized)
254  * @lock:                 lock for node fields
255  * @work:                 worklist element for node work
256  *                        (protected by @proc->inner_lock)
257  * @rb_node:              element for proc->nodes tree
258  *                        (protected by @proc->inner_lock)
259  * @dead_node:            element for binder_dead_nodes list
260  *                        (protected by binder_dead_nodes_lock)
261  * @proc:                 binder_proc that owns this node
262  *                        (invariant after initialized)
263  * @refs:                 list of references on this node
264  *                        (protected by @lock)
265  * @internal_strong_refs: used to take strong references when
266  *                        initiating a transaction
267  *                        (protected by @proc->inner_lock if @proc
268  *                        and by @lock)
269  * @local_weak_refs:      weak user refs from local process
270  *                        (protected by @proc->inner_lock if @proc
271  *                        and by @lock)
272  * @local_strong_refs:    strong user refs from local process
273  *                        (protected by @proc->inner_lock if @proc
274  *                        and by @lock)
275  * @tmp_refs:             temporary kernel refs
276  *                        (protected by @proc->inner_lock while @proc
277  *                        is valid, and by binder_dead_nodes_lock
278  *                        if @proc is NULL. During inc/dec and node release
279  *                        it is also protected by @lock to provide safety
280  *                        as the node dies and @proc becomes NULL)
281  * @ptr:                  userspace pointer for node
282  *                        (invariant, no lock needed)
283  * @cookie:               userspace cookie for node
284  *                        (invariant, no lock needed)
285  * @has_strong_ref:       userspace notified of strong ref
286  *                        (protected by @proc->inner_lock if @proc
287  *                        and by @lock)
288  * @pending_strong_ref:   userspace has acked notification of strong ref
289  *                        (protected by @proc->inner_lock if @proc
290  *                        and by @lock)
291  * @has_weak_ref:         userspace notified of weak ref
292  *                        (protected by @proc->inner_lock if @proc
293  *                        and by @lock)
294  * @pending_weak_ref:     userspace has acked notification of weak ref
295  *                        (protected by @proc->inner_lock if @proc
296  *                        and by @lock)
297  * @has_async_transaction: async transaction to node in progress
298  *                        (protected by @lock)
299  * @accept_fds:           file descriptor operations supported for node
300  *                        (invariant after initialized)
301  * @min_priority:         minimum scheduling priority
302  *                        (invariant after initialized)
303  * @txn_security_ctx:     require sender's security context
304  *                        (invariant after initialized)
305  * @async_todo:           list of async work items
306  *                        (protected by @proc->inner_lock)
307  *
308  * Bookkeeping structure for binder nodes.
309  */
310 struct binder_node {
311 	int debug_id;
312 	spinlock_t lock;
313 	struct binder_work work;
314 	union {
315 		struct rb_node rb_node;
316 		struct hlist_node dead_node;
317 	};
318 	struct binder_proc *proc;
319 	struct hlist_head refs;
320 	int internal_strong_refs;
321 	int local_weak_refs;
322 	int local_strong_refs;
323 	int tmp_refs;
324 	binder_uintptr_t ptr;
325 	binder_uintptr_t cookie;
326 	struct {
327 		/*
328 		 * bitfield elements protected by
329 		 * proc inner_lock
330 		 */
331 		u8 has_strong_ref:1;
332 		u8 pending_strong_ref:1;
333 		u8 has_weak_ref:1;
334 		u8 pending_weak_ref:1;
335 	};
336 	struct {
337 		/*
338 		 * invariant after initialization
339 		 */
340 		u8 accept_fds:1;
341 		u8 txn_security_ctx:1;
342 		u8 min_priority;
343 	};
344 	bool has_async_transaction;
345 	struct list_head async_todo;
346 };
347 
348 struct binder_ref_death {
349 	/**
350 	 * @work: worklist element for death notifications
351 	 *        (protected by inner_lock of the proc that
352 	 *        this ref belongs to)
353 	 */
354 	struct binder_work work;
355 	binder_uintptr_t cookie;
356 };
357 
358 /**
359  * struct binder_ref_data - binder_ref counts and id
360  * @debug_id:        unique ID for the ref
361  * @desc:            unique userspace handle for ref
362  * @strong:          strong ref count (debugging only if not locked)
363  * @weak:            weak ref count (debugging only if not locked)
364  *
365  * Structure to hold ref count and ref id information. Since
366  * the actual ref can only be accessed with a lock, this structure
367  * is used to return information about the ref to callers of
368  * ref inc/dec functions.
369  */
370 struct binder_ref_data {
371 	int debug_id;
372 	uint32_t desc;
373 	int strong;
374 	int weak;
375 };
376 
377 /**
378  * struct binder_ref - struct to track references on nodes
379  * @data:        binder_ref_data containing id, handle, and current refcounts
380  * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
381  * @rb_node_node: node for lookup by @node in proc's rb_tree
382  * @node_entry:  list entry for node->refs list in target node
383  *               (protected by @node->lock)
384  * @proc:        binder_proc containing ref
385  * @node:        binder_node of target node. When cleaning up a
386  *               ref for deletion in binder_cleanup_ref, a non-NULL
387  *               @node indicates the node must be freed
388  * @death:       pointer to death notification (ref_death) if requested
389  *               (protected by @node->lock)
390  *
391  * Structure to track references from procA to target node (on procB). This
392  * structure is unsafe to access without holding @proc->outer_lock.
393  */
394 struct binder_ref {
395 	/* Lookups needed: */
396 	/*   node + proc => ref (transaction) */
397 	/*   desc + proc => ref (transaction, inc/dec ref) */
398 	/*   node => refs + procs (proc exit) */
399 	struct binder_ref_data data;
400 	struct rb_node rb_node_desc;
401 	struct rb_node rb_node_node;
402 	struct hlist_node node_entry;
403 	struct binder_proc *proc;
404 	struct binder_node *node;
405 	struct binder_ref_death *death;
406 };
407 
408 enum binder_deferred_state {
409 	BINDER_DEFERRED_FLUSH        = 0x01,
410 	BINDER_DEFERRED_RELEASE      = 0x02,
411 };
412 
413 /**
414  * struct binder_proc - binder process bookkeeping
415  * @proc_node:            element for binder_procs list
416  * @threads:              rbtree of binder_threads in this proc
417  *                        (protected by @inner_lock)
418  * @nodes:                rbtree of binder nodes associated with
419  *                        this proc ordered by node->ptr
420  *                        (protected by @inner_lock)
421  * @refs_by_desc:         rbtree of refs ordered by ref->desc
422  *                        (protected by @outer_lock)
423  * @refs_by_node:         rbtree of refs ordered by ref->node
424  *                        (protected by @outer_lock)
425  * @waiting_threads:      threads currently waiting for proc work
426  *                        (protected by @inner_lock)
427  * @pid                   PID of group_leader of process
428  *                        (invariant after initialized)
429  * @tsk                   task_struct for group_leader of process
430  *                        (invariant after initialized)
431  * @deferred_work_node:   element for binder_deferred_list
432  *                        (protected by binder_deferred_lock)
433  * @deferred_work:        bitmap of deferred work to perform
434  *                        (protected by binder_deferred_lock)
435  * @is_dead:              process is dead and awaiting free
436  *                        when outstanding transactions are cleaned up
437  *                        (protected by @inner_lock)
438  * @todo:                 list of work for this process
439  *                        (protected by @inner_lock)
440  * @stats:                per-process binder statistics
441  *                        (atomics, no lock needed)
442  * @delivered_death:      list of delivered death notification
443  *                        (protected by @inner_lock)
444  * @max_threads:          cap on number of binder threads
445  *                        (protected by @inner_lock)
446  * @requested_threads:    number of binder threads requested but not
447  *                        yet started. In current implementation, can
448  *                        only be 0 or 1.
449  *                        (protected by @inner_lock)
450  * @requested_threads_started: number binder threads started
451  *                        (protected by @inner_lock)
452  * @tmp_ref:              temporary reference to indicate proc is in use
453  *                        (protected by @inner_lock)
454  * @default_priority:     default scheduler priority
455  *                        (invariant after initialized)
456  * @debugfs_entry:        debugfs node
457  * @alloc:                binder allocator bookkeeping
458  * @context:              binder_context for this proc
459  *                        (invariant after initialized)
460  * @inner_lock:           can nest under outer_lock and/or node lock
461  * @outer_lock:           no nesting under innor or node lock
462  *                        Lock order: 1) outer, 2) node, 3) inner
463  * @binderfs_entry:       process-specific binderfs log file
464  *
465  * Bookkeeping structure for binder processes
466  */
467 struct binder_proc {
468 	struct hlist_node proc_node;
469 	struct rb_root threads;
470 	struct rb_root nodes;
471 	struct rb_root refs_by_desc;
472 	struct rb_root refs_by_node;
473 	struct list_head waiting_threads;
474 	int pid;
475 	struct task_struct *tsk;
476 	struct hlist_node deferred_work_node;
477 	int deferred_work;
478 	bool is_dead;
479 
480 	struct list_head todo;
481 	struct binder_stats stats;
482 	struct list_head delivered_death;
483 	int max_threads;
484 	int requested_threads;
485 	int requested_threads_started;
486 	int tmp_ref;
487 	long default_priority;
488 	struct dentry *debugfs_entry;
489 	struct binder_alloc alloc;
490 	struct binder_context *context;
491 	spinlock_t inner_lock;
492 	spinlock_t outer_lock;
493 	struct dentry *binderfs_entry;
494 };
495 
496 enum {
497 	BINDER_LOOPER_STATE_REGISTERED  = 0x01,
498 	BINDER_LOOPER_STATE_ENTERED     = 0x02,
499 	BINDER_LOOPER_STATE_EXITED      = 0x04,
500 	BINDER_LOOPER_STATE_INVALID     = 0x08,
501 	BINDER_LOOPER_STATE_WAITING     = 0x10,
502 	BINDER_LOOPER_STATE_POLL        = 0x20,
503 };
504 
505 /**
506  * struct binder_thread - binder thread bookkeeping
507  * @proc:                 binder process for this thread
508  *                        (invariant after initialization)
509  * @rb_node:              element for proc->threads rbtree
510  *                        (protected by @proc->inner_lock)
511  * @waiting_thread_node:  element for @proc->waiting_threads list
512  *                        (protected by @proc->inner_lock)
513  * @pid:                  PID for this thread
514  *                        (invariant after initialization)
515  * @looper:               bitmap of looping state
516  *                        (only accessed by this thread)
517  * @looper_needs_return:  looping thread needs to exit driver
518  *                        (no lock needed)
519  * @transaction_stack:    stack of in-progress transactions for this thread
520  *                        (protected by @proc->inner_lock)
521  * @todo:                 list of work to do for this thread
522  *                        (protected by @proc->inner_lock)
523  * @process_todo:         whether work in @todo should be processed
524  *                        (protected by @proc->inner_lock)
525  * @return_error:         transaction errors reported by this thread
526  *                        (only accessed by this thread)
527  * @reply_error:          transaction errors reported by target thread
528  *                        (protected by @proc->inner_lock)
529  * @wait:                 wait queue for thread work
530  * @stats:                per-thread statistics
531  *                        (atomics, no lock needed)
532  * @tmp_ref:              temporary reference to indicate thread is in use
533  *                        (atomic since @proc->inner_lock cannot
534  *                        always be acquired)
535  * @is_dead:              thread is dead and awaiting free
536  *                        when outstanding transactions are cleaned up
537  *                        (protected by @proc->inner_lock)
538  *
539  * Bookkeeping structure for binder threads.
540  */
541 struct binder_thread {
542 	struct binder_proc *proc;
543 	struct rb_node rb_node;
544 	struct list_head waiting_thread_node;
545 	int pid;
546 	int looper;              /* only modified by this thread */
547 	bool looper_need_return; /* can be written by other thread */
548 	struct binder_transaction *transaction_stack;
549 	struct list_head todo;
550 	bool process_todo;
551 	struct binder_error return_error;
552 	struct binder_error reply_error;
553 	wait_queue_head_t wait;
554 	struct binder_stats stats;
555 	atomic_t tmp_ref;
556 	bool is_dead;
557 };
558 
559 /**
560  * struct binder_txn_fd_fixup - transaction fd fixup list element
561  * @fixup_entry:          list entry
562  * @file:                 struct file to be associated with new fd
563  * @offset:               offset in buffer data to this fixup
564  *
565  * List element for fd fixups in a transaction. Since file
566  * descriptors need to be allocated in the context of the
567  * target process, we pass each fd to be processed in this
568  * struct.
569  */
570 struct binder_txn_fd_fixup {
571 	struct list_head fixup_entry;
572 	struct file *file;
573 	size_t offset;
574 };
575 
576 struct binder_transaction {
577 	int debug_id;
578 	struct binder_work work;
579 	struct binder_thread *from;
580 	struct binder_transaction *from_parent;
581 	struct binder_proc *to_proc;
582 	struct binder_thread *to_thread;
583 	struct binder_transaction *to_parent;
584 	unsigned need_reply:1;
585 	/* unsigned is_dead:1; */	/* not used at the moment */
586 
587 	struct binder_buffer *buffer;
588 	unsigned int	code;
589 	unsigned int	flags;
590 	long	priority;
591 	long	saved_priority;
592 	kuid_t	sender_euid;
593 	struct list_head fd_fixups;
594 	binder_uintptr_t security_ctx;
595 	/**
596 	 * @lock:  protects @from, @to_proc, and @to_thread
597 	 *
598 	 * @from, @to_proc, and @to_thread can be set to NULL
599 	 * during thread teardown
600 	 */
601 	spinlock_t lock;
602 };
603 
604 /**
605  * struct binder_object - union of flat binder object types
606  * @hdr:   generic object header
607  * @fbo:   binder object (nodes and refs)
608  * @fdo:   file descriptor object
609  * @bbo:   binder buffer pointer
610  * @fdao:  file descriptor array
611  *
612  * Used for type-independent object copies
613  */
614 struct binder_object {
615 	union {
616 		struct binder_object_header hdr;
617 		struct flat_binder_object fbo;
618 		struct binder_fd_object fdo;
619 		struct binder_buffer_object bbo;
620 		struct binder_fd_array_object fdao;
621 	};
622 };
623 
624 /**
625  * binder_proc_lock() - Acquire outer lock for given binder_proc
626  * @proc:         struct binder_proc to acquire
627  *
628  * Acquires proc->outer_lock. Used to protect binder_ref
629  * structures associated with the given proc.
630  */
631 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
632 static void
633 _binder_proc_lock(struct binder_proc *proc, int line)
634 	__acquires(&proc->outer_lock)
635 {
636 	binder_debug(BINDER_DEBUG_SPINLOCKS,
637 		     "%s: line=%d\n", __func__, line);
638 	spin_lock(&proc->outer_lock);
639 }
640 
641 /**
642  * binder_proc_unlock() - Release spinlock for given binder_proc
643  * @proc:         struct binder_proc to acquire
644  *
645  * Release lock acquired via binder_proc_lock()
646  */
647 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
648 static void
649 _binder_proc_unlock(struct binder_proc *proc, int line)
650 	__releases(&proc->outer_lock)
651 {
652 	binder_debug(BINDER_DEBUG_SPINLOCKS,
653 		     "%s: line=%d\n", __func__, line);
654 	spin_unlock(&proc->outer_lock);
655 }
656 
657 /**
658  * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
659  * @proc:         struct binder_proc to acquire
660  *
661  * Acquires proc->inner_lock. Used to protect todo lists
662  */
663 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
664 static void
665 _binder_inner_proc_lock(struct binder_proc *proc, int line)
666 	__acquires(&proc->inner_lock)
667 {
668 	binder_debug(BINDER_DEBUG_SPINLOCKS,
669 		     "%s: line=%d\n", __func__, line);
670 	spin_lock(&proc->inner_lock);
671 }
672 
673 /**
674  * binder_inner_proc_unlock() - Release inner lock for given binder_proc
675  * @proc:         struct binder_proc to acquire
676  *
677  * Release lock acquired via binder_inner_proc_lock()
678  */
679 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
680 static void
681 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
682 	__releases(&proc->inner_lock)
683 {
684 	binder_debug(BINDER_DEBUG_SPINLOCKS,
685 		     "%s: line=%d\n", __func__, line);
686 	spin_unlock(&proc->inner_lock);
687 }
688 
689 /**
690  * binder_node_lock() - Acquire spinlock for given binder_node
691  * @node:         struct binder_node to acquire
692  *
693  * Acquires node->lock. Used to protect binder_node fields
694  */
695 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
696 static void
697 _binder_node_lock(struct binder_node *node, int line)
698 	__acquires(&node->lock)
699 {
700 	binder_debug(BINDER_DEBUG_SPINLOCKS,
701 		     "%s: line=%d\n", __func__, line);
702 	spin_lock(&node->lock);
703 }
704 
705 /**
706  * binder_node_unlock() - Release spinlock for given binder_proc
707  * @node:         struct binder_node to acquire
708  *
709  * Release lock acquired via binder_node_lock()
710  */
711 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
712 static void
713 _binder_node_unlock(struct binder_node *node, int line)
714 	__releases(&node->lock)
715 {
716 	binder_debug(BINDER_DEBUG_SPINLOCKS,
717 		     "%s: line=%d\n", __func__, line);
718 	spin_unlock(&node->lock);
719 }
720 
721 /**
722  * binder_node_inner_lock() - Acquire node and inner locks
723  * @node:         struct binder_node to acquire
724  *
725  * Acquires node->lock. If node->proc also acquires
726  * proc->inner_lock. Used to protect binder_node fields
727  */
728 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
729 static void
730 _binder_node_inner_lock(struct binder_node *node, int line)
731 	__acquires(&node->lock) __acquires(&node->proc->inner_lock)
732 {
733 	binder_debug(BINDER_DEBUG_SPINLOCKS,
734 		     "%s: line=%d\n", __func__, line);
735 	spin_lock(&node->lock);
736 	if (node->proc)
737 		binder_inner_proc_lock(node->proc);
738 	else
739 		/* annotation for sparse */
740 		__acquire(&node->proc->inner_lock);
741 }
742 
743 /**
744  * binder_node_unlock() - Release node and inner locks
745  * @node:         struct binder_node to acquire
746  *
747  * Release lock acquired via binder_node_lock()
748  */
749 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
750 static void
751 _binder_node_inner_unlock(struct binder_node *node, int line)
752 	__releases(&node->lock) __releases(&node->proc->inner_lock)
753 {
754 	struct binder_proc *proc = node->proc;
755 
756 	binder_debug(BINDER_DEBUG_SPINLOCKS,
757 		     "%s: line=%d\n", __func__, line);
758 	if (proc)
759 		binder_inner_proc_unlock(proc);
760 	else
761 		/* annotation for sparse */
762 		__release(&node->proc->inner_lock);
763 	spin_unlock(&node->lock);
764 }
765 
766 static bool binder_worklist_empty_ilocked(struct list_head *list)
767 {
768 	return list_empty(list);
769 }
770 
771 /**
772  * binder_worklist_empty() - Check if no items on the work list
773  * @proc:       binder_proc associated with list
774  * @list:	list to check
775  *
776  * Return: true if there are no items on list, else false
777  */
778 static bool binder_worklist_empty(struct binder_proc *proc,
779 				  struct list_head *list)
780 {
781 	bool ret;
782 
783 	binder_inner_proc_lock(proc);
784 	ret = binder_worklist_empty_ilocked(list);
785 	binder_inner_proc_unlock(proc);
786 	return ret;
787 }
788 
789 /**
790  * binder_enqueue_work_ilocked() - Add an item to the work list
791  * @work:         struct binder_work to add to list
792  * @target_list:  list to add work to
793  *
794  * Adds the work to the specified list. Asserts that work
795  * is not already on a list.
796  *
797  * Requires the proc->inner_lock to be held.
798  */
799 static void
800 binder_enqueue_work_ilocked(struct binder_work *work,
801 			   struct list_head *target_list)
802 {
803 	BUG_ON(target_list == NULL);
804 	BUG_ON(work->entry.next && !list_empty(&work->entry));
805 	list_add_tail(&work->entry, target_list);
806 }
807 
808 /**
809  * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
810  * @thread:       thread to queue work to
811  * @work:         struct binder_work to add to list
812  *
813  * Adds the work to the todo list of the thread. Doesn't set the process_todo
814  * flag, which means that (if it wasn't already set) the thread will go to
815  * sleep without handling this work when it calls read.
816  *
817  * Requires the proc->inner_lock to be held.
818  */
819 static void
820 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
821 					    struct binder_work *work)
822 {
823 	WARN_ON(!list_empty(&thread->waiting_thread_node));
824 	binder_enqueue_work_ilocked(work, &thread->todo);
825 }
826 
827 /**
828  * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
829  * @thread:       thread to queue work to
830  * @work:         struct binder_work to add to list
831  *
832  * Adds the work to the todo list of the thread, and enables processing
833  * of the todo queue.
834  *
835  * Requires the proc->inner_lock to be held.
836  */
837 static void
838 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
839 				   struct binder_work *work)
840 {
841 	WARN_ON(!list_empty(&thread->waiting_thread_node));
842 	binder_enqueue_work_ilocked(work, &thread->todo);
843 	thread->process_todo = true;
844 }
845 
846 /**
847  * binder_enqueue_thread_work() - Add an item to the thread work list
848  * @thread:       thread to queue work to
849  * @work:         struct binder_work to add to list
850  *
851  * Adds the work to the todo list of the thread, and enables processing
852  * of the todo queue.
853  */
854 static void
855 binder_enqueue_thread_work(struct binder_thread *thread,
856 			   struct binder_work *work)
857 {
858 	binder_inner_proc_lock(thread->proc);
859 	binder_enqueue_thread_work_ilocked(thread, work);
860 	binder_inner_proc_unlock(thread->proc);
861 }
862 
863 static void
864 binder_dequeue_work_ilocked(struct binder_work *work)
865 {
866 	list_del_init(&work->entry);
867 }
868 
869 /**
870  * binder_dequeue_work() - Removes an item from the work list
871  * @proc:         binder_proc associated with list
872  * @work:         struct binder_work to remove from list
873  *
874  * Removes the specified work item from whatever list it is on.
875  * Can safely be called if work is not on any list.
876  */
877 static void
878 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
879 {
880 	binder_inner_proc_lock(proc);
881 	binder_dequeue_work_ilocked(work);
882 	binder_inner_proc_unlock(proc);
883 }
884 
885 static struct binder_work *binder_dequeue_work_head_ilocked(
886 					struct list_head *list)
887 {
888 	struct binder_work *w;
889 
890 	w = list_first_entry_or_null(list, struct binder_work, entry);
891 	if (w)
892 		list_del_init(&w->entry);
893 	return w;
894 }
895 
896 /**
897  * binder_dequeue_work_head() - Dequeues the item at head of list
898  * @proc:         binder_proc associated with list
899  * @list:         list to dequeue head
900  *
901  * Removes the head of the list if there are items on the list
902  *
903  * Return: pointer dequeued binder_work, NULL if list was empty
904  */
905 static struct binder_work *binder_dequeue_work_head(
906 					struct binder_proc *proc,
907 					struct list_head *list)
908 {
909 	struct binder_work *w;
910 
911 	binder_inner_proc_lock(proc);
912 	w = binder_dequeue_work_head_ilocked(list);
913 	binder_inner_proc_unlock(proc);
914 	return w;
915 }
916 
917 static void
918 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
919 static void binder_free_thread(struct binder_thread *thread);
920 static void binder_free_proc(struct binder_proc *proc);
921 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
922 
923 static bool binder_has_work_ilocked(struct binder_thread *thread,
924 				    bool do_proc_work)
925 {
926 	return thread->process_todo ||
927 		thread->looper_need_return ||
928 		(do_proc_work &&
929 		 !binder_worklist_empty_ilocked(&thread->proc->todo));
930 }
931 
932 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
933 {
934 	bool has_work;
935 
936 	binder_inner_proc_lock(thread->proc);
937 	has_work = binder_has_work_ilocked(thread, do_proc_work);
938 	binder_inner_proc_unlock(thread->proc);
939 
940 	return has_work;
941 }
942 
943 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
944 {
945 	return !thread->transaction_stack &&
946 		binder_worklist_empty_ilocked(&thread->todo) &&
947 		(thread->looper & (BINDER_LOOPER_STATE_ENTERED |
948 				   BINDER_LOOPER_STATE_REGISTERED));
949 }
950 
951 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
952 					       bool sync)
953 {
954 	struct rb_node *n;
955 	struct binder_thread *thread;
956 
957 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
958 		thread = rb_entry(n, struct binder_thread, rb_node);
959 		if (thread->looper & BINDER_LOOPER_STATE_POLL &&
960 		    binder_available_for_proc_work_ilocked(thread)) {
961 			if (sync)
962 				wake_up_interruptible_sync(&thread->wait);
963 			else
964 				wake_up_interruptible(&thread->wait);
965 		}
966 	}
967 }
968 
969 /**
970  * binder_select_thread_ilocked() - selects a thread for doing proc work.
971  * @proc:	process to select a thread from
972  *
973  * Note that calling this function moves the thread off the waiting_threads
974  * list, so it can only be woken up by the caller of this function, or a
975  * signal. Therefore, callers *should* always wake up the thread this function
976  * returns.
977  *
978  * Return:	If there's a thread currently waiting for process work,
979  *		returns that thread. Otherwise returns NULL.
980  */
981 static struct binder_thread *
982 binder_select_thread_ilocked(struct binder_proc *proc)
983 {
984 	struct binder_thread *thread;
985 
986 	assert_spin_locked(&proc->inner_lock);
987 	thread = list_first_entry_or_null(&proc->waiting_threads,
988 					  struct binder_thread,
989 					  waiting_thread_node);
990 
991 	if (thread)
992 		list_del_init(&thread->waiting_thread_node);
993 
994 	return thread;
995 }
996 
997 /**
998  * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
999  * @proc:	process to wake up a thread in
1000  * @thread:	specific thread to wake-up (may be NULL)
1001  * @sync:	whether to do a synchronous wake-up
1002  *
1003  * This function wakes up a thread in the @proc process.
1004  * The caller may provide a specific thread to wake-up in
1005  * the @thread parameter. If @thread is NULL, this function
1006  * will wake up threads that have called poll().
1007  *
1008  * Note that for this function to work as expected, callers
1009  * should first call binder_select_thread() to find a thread
1010  * to handle the work (if they don't have a thread already),
1011  * and pass the result into the @thread parameter.
1012  */
1013 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1014 					 struct binder_thread *thread,
1015 					 bool sync)
1016 {
1017 	assert_spin_locked(&proc->inner_lock);
1018 
1019 	if (thread) {
1020 		if (sync)
1021 			wake_up_interruptible_sync(&thread->wait);
1022 		else
1023 			wake_up_interruptible(&thread->wait);
1024 		return;
1025 	}
1026 
1027 	/* Didn't find a thread waiting for proc work; this can happen
1028 	 * in two scenarios:
1029 	 * 1. All threads are busy handling transactions
1030 	 *    In that case, one of those threads should call back into
1031 	 *    the kernel driver soon and pick up this work.
1032 	 * 2. Threads are using the (e)poll interface, in which case
1033 	 *    they may be blocked on the waitqueue without having been
1034 	 *    added to waiting_threads. For this case, we just iterate
1035 	 *    over all threads not handling transaction work, and
1036 	 *    wake them all up. We wake all because we don't know whether
1037 	 *    a thread that called into (e)poll is handling non-binder
1038 	 *    work currently.
1039 	 */
1040 	binder_wakeup_poll_threads_ilocked(proc, sync);
1041 }
1042 
1043 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1044 {
1045 	struct binder_thread *thread = binder_select_thread_ilocked(proc);
1046 
1047 	binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1048 }
1049 
1050 static void binder_set_nice(long nice)
1051 {
1052 	long min_nice;
1053 
1054 	if (can_nice(current, nice)) {
1055 		set_user_nice(current, nice);
1056 		return;
1057 	}
1058 	min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
1059 	binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1060 		     "%d: nice value %ld not allowed use %ld instead\n",
1061 		      current->pid, nice, min_nice);
1062 	set_user_nice(current, min_nice);
1063 	if (min_nice <= MAX_NICE)
1064 		return;
1065 	binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
1066 }
1067 
1068 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1069 						   binder_uintptr_t ptr)
1070 {
1071 	struct rb_node *n = proc->nodes.rb_node;
1072 	struct binder_node *node;
1073 
1074 	assert_spin_locked(&proc->inner_lock);
1075 
1076 	while (n) {
1077 		node = rb_entry(n, struct binder_node, rb_node);
1078 
1079 		if (ptr < node->ptr)
1080 			n = n->rb_left;
1081 		else if (ptr > node->ptr)
1082 			n = n->rb_right;
1083 		else {
1084 			/*
1085 			 * take an implicit weak reference
1086 			 * to ensure node stays alive until
1087 			 * call to binder_put_node()
1088 			 */
1089 			binder_inc_node_tmpref_ilocked(node);
1090 			return node;
1091 		}
1092 	}
1093 	return NULL;
1094 }
1095 
1096 static struct binder_node *binder_get_node(struct binder_proc *proc,
1097 					   binder_uintptr_t ptr)
1098 {
1099 	struct binder_node *node;
1100 
1101 	binder_inner_proc_lock(proc);
1102 	node = binder_get_node_ilocked(proc, ptr);
1103 	binder_inner_proc_unlock(proc);
1104 	return node;
1105 }
1106 
1107 static struct binder_node *binder_init_node_ilocked(
1108 						struct binder_proc *proc,
1109 						struct binder_node *new_node,
1110 						struct flat_binder_object *fp)
1111 {
1112 	struct rb_node **p = &proc->nodes.rb_node;
1113 	struct rb_node *parent = NULL;
1114 	struct binder_node *node;
1115 	binder_uintptr_t ptr = fp ? fp->binder : 0;
1116 	binder_uintptr_t cookie = fp ? fp->cookie : 0;
1117 	__u32 flags = fp ? fp->flags : 0;
1118 
1119 	assert_spin_locked(&proc->inner_lock);
1120 
1121 	while (*p) {
1122 
1123 		parent = *p;
1124 		node = rb_entry(parent, struct binder_node, rb_node);
1125 
1126 		if (ptr < node->ptr)
1127 			p = &(*p)->rb_left;
1128 		else if (ptr > node->ptr)
1129 			p = &(*p)->rb_right;
1130 		else {
1131 			/*
1132 			 * A matching node is already in
1133 			 * the rb tree. Abandon the init
1134 			 * and return it.
1135 			 */
1136 			binder_inc_node_tmpref_ilocked(node);
1137 			return node;
1138 		}
1139 	}
1140 	node = new_node;
1141 	binder_stats_created(BINDER_STAT_NODE);
1142 	node->tmp_refs++;
1143 	rb_link_node(&node->rb_node, parent, p);
1144 	rb_insert_color(&node->rb_node, &proc->nodes);
1145 	node->debug_id = atomic_inc_return(&binder_last_id);
1146 	node->proc = proc;
1147 	node->ptr = ptr;
1148 	node->cookie = cookie;
1149 	node->work.type = BINDER_WORK_NODE;
1150 	node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1151 	node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1152 	node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
1153 	spin_lock_init(&node->lock);
1154 	INIT_LIST_HEAD(&node->work.entry);
1155 	INIT_LIST_HEAD(&node->async_todo);
1156 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1157 		     "%d:%d node %d u%016llx c%016llx created\n",
1158 		     proc->pid, current->pid, node->debug_id,
1159 		     (u64)node->ptr, (u64)node->cookie);
1160 
1161 	return node;
1162 }
1163 
1164 static struct binder_node *binder_new_node(struct binder_proc *proc,
1165 					   struct flat_binder_object *fp)
1166 {
1167 	struct binder_node *node;
1168 	struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1169 
1170 	if (!new_node)
1171 		return NULL;
1172 	binder_inner_proc_lock(proc);
1173 	node = binder_init_node_ilocked(proc, new_node, fp);
1174 	binder_inner_proc_unlock(proc);
1175 	if (node != new_node)
1176 		/*
1177 		 * The node was already added by another thread
1178 		 */
1179 		kfree(new_node);
1180 
1181 	return node;
1182 }
1183 
1184 static void binder_free_node(struct binder_node *node)
1185 {
1186 	kfree(node);
1187 	binder_stats_deleted(BINDER_STAT_NODE);
1188 }
1189 
1190 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1191 				    int internal,
1192 				    struct list_head *target_list)
1193 {
1194 	struct binder_proc *proc = node->proc;
1195 
1196 	assert_spin_locked(&node->lock);
1197 	if (proc)
1198 		assert_spin_locked(&proc->inner_lock);
1199 	if (strong) {
1200 		if (internal) {
1201 			if (target_list == NULL &&
1202 			    node->internal_strong_refs == 0 &&
1203 			    !(node->proc &&
1204 			      node == node->proc->context->binder_context_mgr_node &&
1205 			      node->has_strong_ref)) {
1206 				pr_err("invalid inc strong node for %d\n",
1207 					node->debug_id);
1208 				return -EINVAL;
1209 			}
1210 			node->internal_strong_refs++;
1211 		} else
1212 			node->local_strong_refs++;
1213 		if (!node->has_strong_ref && target_list) {
1214 			struct binder_thread *thread = container_of(target_list,
1215 						    struct binder_thread, todo);
1216 			binder_dequeue_work_ilocked(&node->work);
1217 			BUG_ON(&thread->todo != target_list);
1218 			binder_enqueue_deferred_thread_work_ilocked(thread,
1219 								   &node->work);
1220 		}
1221 	} else {
1222 		if (!internal)
1223 			node->local_weak_refs++;
1224 		if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1225 			if (target_list == NULL) {
1226 				pr_err("invalid inc weak node for %d\n",
1227 					node->debug_id);
1228 				return -EINVAL;
1229 			}
1230 			/*
1231 			 * See comment above
1232 			 */
1233 			binder_enqueue_work_ilocked(&node->work, target_list);
1234 		}
1235 	}
1236 	return 0;
1237 }
1238 
1239 static int binder_inc_node(struct binder_node *node, int strong, int internal,
1240 			   struct list_head *target_list)
1241 {
1242 	int ret;
1243 
1244 	binder_node_inner_lock(node);
1245 	ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1246 	binder_node_inner_unlock(node);
1247 
1248 	return ret;
1249 }
1250 
1251 static bool binder_dec_node_nilocked(struct binder_node *node,
1252 				     int strong, int internal)
1253 {
1254 	struct binder_proc *proc = node->proc;
1255 
1256 	assert_spin_locked(&node->lock);
1257 	if (proc)
1258 		assert_spin_locked(&proc->inner_lock);
1259 	if (strong) {
1260 		if (internal)
1261 			node->internal_strong_refs--;
1262 		else
1263 			node->local_strong_refs--;
1264 		if (node->local_strong_refs || node->internal_strong_refs)
1265 			return false;
1266 	} else {
1267 		if (!internal)
1268 			node->local_weak_refs--;
1269 		if (node->local_weak_refs || node->tmp_refs ||
1270 				!hlist_empty(&node->refs))
1271 			return false;
1272 	}
1273 
1274 	if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1275 		if (list_empty(&node->work.entry)) {
1276 			binder_enqueue_work_ilocked(&node->work, &proc->todo);
1277 			binder_wakeup_proc_ilocked(proc);
1278 		}
1279 	} else {
1280 		if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1281 		    !node->local_weak_refs && !node->tmp_refs) {
1282 			if (proc) {
1283 				binder_dequeue_work_ilocked(&node->work);
1284 				rb_erase(&node->rb_node, &proc->nodes);
1285 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1286 					     "refless node %d deleted\n",
1287 					     node->debug_id);
1288 			} else {
1289 				BUG_ON(!list_empty(&node->work.entry));
1290 				spin_lock(&binder_dead_nodes_lock);
1291 				/*
1292 				 * tmp_refs could have changed so
1293 				 * check it again
1294 				 */
1295 				if (node->tmp_refs) {
1296 					spin_unlock(&binder_dead_nodes_lock);
1297 					return false;
1298 				}
1299 				hlist_del(&node->dead_node);
1300 				spin_unlock(&binder_dead_nodes_lock);
1301 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1302 					     "dead node %d deleted\n",
1303 					     node->debug_id);
1304 			}
1305 			return true;
1306 		}
1307 	}
1308 	return false;
1309 }
1310 
1311 static void binder_dec_node(struct binder_node *node, int strong, int internal)
1312 {
1313 	bool free_node;
1314 
1315 	binder_node_inner_lock(node);
1316 	free_node = binder_dec_node_nilocked(node, strong, internal);
1317 	binder_node_inner_unlock(node);
1318 	if (free_node)
1319 		binder_free_node(node);
1320 }
1321 
1322 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1323 {
1324 	/*
1325 	 * No call to binder_inc_node() is needed since we
1326 	 * don't need to inform userspace of any changes to
1327 	 * tmp_refs
1328 	 */
1329 	node->tmp_refs++;
1330 }
1331 
1332 /**
1333  * binder_inc_node_tmpref() - take a temporary reference on node
1334  * @node:	node to reference
1335  *
1336  * Take reference on node to prevent the node from being freed
1337  * while referenced only by a local variable. The inner lock is
1338  * needed to serialize with the node work on the queue (which
1339  * isn't needed after the node is dead). If the node is dead
1340  * (node->proc is NULL), use binder_dead_nodes_lock to protect
1341  * node->tmp_refs against dead-node-only cases where the node
1342  * lock cannot be acquired (eg traversing the dead node list to
1343  * print nodes)
1344  */
1345 static void binder_inc_node_tmpref(struct binder_node *node)
1346 {
1347 	binder_node_lock(node);
1348 	if (node->proc)
1349 		binder_inner_proc_lock(node->proc);
1350 	else
1351 		spin_lock(&binder_dead_nodes_lock);
1352 	binder_inc_node_tmpref_ilocked(node);
1353 	if (node->proc)
1354 		binder_inner_proc_unlock(node->proc);
1355 	else
1356 		spin_unlock(&binder_dead_nodes_lock);
1357 	binder_node_unlock(node);
1358 }
1359 
1360 /**
1361  * binder_dec_node_tmpref() - remove a temporary reference on node
1362  * @node:	node to reference
1363  *
1364  * Release temporary reference on node taken via binder_inc_node_tmpref()
1365  */
1366 static void binder_dec_node_tmpref(struct binder_node *node)
1367 {
1368 	bool free_node;
1369 
1370 	binder_node_inner_lock(node);
1371 	if (!node->proc)
1372 		spin_lock(&binder_dead_nodes_lock);
1373 	else
1374 		__acquire(&binder_dead_nodes_lock);
1375 	node->tmp_refs--;
1376 	BUG_ON(node->tmp_refs < 0);
1377 	if (!node->proc)
1378 		spin_unlock(&binder_dead_nodes_lock);
1379 	else
1380 		__release(&binder_dead_nodes_lock);
1381 	/*
1382 	 * Call binder_dec_node() to check if all refcounts are 0
1383 	 * and cleanup is needed. Calling with strong=0 and internal=1
1384 	 * causes no actual reference to be released in binder_dec_node().
1385 	 * If that changes, a change is needed here too.
1386 	 */
1387 	free_node = binder_dec_node_nilocked(node, 0, 1);
1388 	binder_node_inner_unlock(node);
1389 	if (free_node)
1390 		binder_free_node(node);
1391 }
1392 
1393 static void binder_put_node(struct binder_node *node)
1394 {
1395 	binder_dec_node_tmpref(node);
1396 }
1397 
1398 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1399 						 u32 desc, bool need_strong_ref)
1400 {
1401 	struct rb_node *n = proc->refs_by_desc.rb_node;
1402 	struct binder_ref *ref;
1403 
1404 	while (n) {
1405 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1406 
1407 		if (desc < ref->data.desc) {
1408 			n = n->rb_left;
1409 		} else if (desc > ref->data.desc) {
1410 			n = n->rb_right;
1411 		} else if (need_strong_ref && !ref->data.strong) {
1412 			binder_user_error("tried to use weak ref as strong ref\n");
1413 			return NULL;
1414 		} else {
1415 			return ref;
1416 		}
1417 	}
1418 	return NULL;
1419 }
1420 
1421 /**
1422  * binder_get_ref_for_node_olocked() - get the ref associated with given node
1423  * @proc:	binder_proc that owns the ref
1424  * @node:	binder_node of target
1425  * @new_ref:	newly allocated binder_ref to be initialized or %NULL
1426  *
1427  * Look up the ref for the given node and return it if it exists
1428  *
1429  * If it doesn't exist and the caller provides a newly allocated
1430  * ref, initialize the fields of the newly allocated ref and insert
1431  * into the given proc rb_trees and node refs list.
1432  *
1433  * Return:	the ref for node. It is possible that another thread
1434  *		allocated/initialized the ref first in which case the
1435  *		returned ref would be different than the passed-in
1436  *		new_ref. new_ref must be kfree'd by the caller in
1437  *		this case.
1438  */
1439 static struct binder_ref *binder_get_ref_for_node_olocked(
1440 					struct binder_proc *proc,
1441 					struct binder_node *node,
1442 					struct binder_ref *new_ref)
1443 {
1444 	struct binder_context *context = proc->context;
1445 	struct rb_node **p = &proc->refs_by_node.rb_node;
1446 	struct rb_node *parent = NULL;
1447 	struct binder_ref *ref;
1448 	struct rb_node *n;
1449 
1450 	while (*p) {
1451 		parent = *p;
1452 		ref = rb_entry(parent, struct binder_ref, rb_node_node);
1453 
1454 		if (node < ref->node)
1455 			p = &(*p)->rb_left;
1456 		else if (node > ref->node)
1457 			p = &(*p)->rb_right;
1458 		else
1459 			return ref;
1460 	}
1461 	if (!new_ref)
1462 		return NULL;
1463 
1464 	binder_stats_created(BINDER_STAT_REF);
1465 	new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1466 	new_ref->proc = proc;
1467 	new_ref->node = node;
1468 	rb_link_node(&new_ref->rb_node_node, parent, p);
1469 	rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1470 
1471 	new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1472 	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1473 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1474 		if (ref->data.desc > new_ref->data.desc)
1475 			break;
1476 		new_ref->data.desc = ref->data.desc + 1;
1477 	}
1478 
1479 	p = &proc->refs_by_desc.rb_node;
1480 	while (*p) {
1481 		parent = *p;
1482 		ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1483 
1484 		if (new_ref->data.desc < ref->data.desc)
1485 			p = &(*p)->rb_left;
1486 		else if (new_ref->data.desc > ref->data.desc)
1487 			p = &(*p)->rb_right;
1488 		else
1489 			BUG();
1490 	}
1491 	rb_link_node(&new_ref->rb_node_desc, parent, p);
1492 	rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1493 
1494 	binder_node_lock(node);
1495 	hlist_add_head(&new_ref->node_entry, &node->refs);
1496 
1497 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1498 		     "%d new ref %d desc %d for node %d\n",
1499 		      proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1500 		      node->debug_id);
1501 	binder_node_unlock(node);
1502 	return new_ref;
1503 }
1504 
1505 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1506 {
1507 	bool delete_node = false;
1508 
1509 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1510 		     "%d delete ref %d desc %d for node %d\n",
1511 		      ref->proc->pid, ref->data.debug_id, ref->data.desc,
1512 		      ref->node->debug_id);
1513 
1514 	rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1515 	rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1516 
1517 	binder_node_inner_lock(ref->node);
1518 	if (ref->data.strong)
1519 		binder_dec_node_nilocked(ref->node, 1, 1);
1520 
1521 	hlist_del(&ref->node_entry);
1522 	delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1523 	binder_node_inner_unlock(ref->node);
1524 	/*
1525 	 * Clear ref->node unless we want the caller to free the node
1526 	 */
1527 	if (!delete_node) {
1528 		/*
1529 		 * The caller uses ref->node to determine
1530 		 * whether the node needs to be freed. Clear
1531 		 * it since the node is still alive.
1532 		 */
1533 		ref->node = NULL;
1534 	}
1535 
1536 	if (ref->death) {
1537 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1538 			     "%d delete ref %d desc %d has death notification\n",
1539 			      ref->proc->pid, ref->data.debug_id,
1540 			      ref->data.desc);
1541 		binder_dequeue_work(ref->proc, &ref->death->work);
1542 		binder_stats_deleted(BINDER_STAT_DEATH);
1543 	}
1544 	binder_stats_deleted(BINDER_STAT_REF);
1545 }
1546 
1547 /**
1548  * binder_inc_ref_olocked() - increment the ref for given handle
1549  * @ref:         ref to be incremented
1550  * @strong:      if true, strong increment, else weak
1551  * @target_list: list to queue node work on
1552  *
1553  * Increment the ref. @ref->proc->outer_lock must be held on entry
1554  *
1555  * Return: 0, if successful, else errno
1556  */
1557 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1558 				  struct list_head *target_list)
1559 {
1560 	int ret;
1561 
1562 	if (strong) {
1563 		if (ref->data.strong == 0) {
1564 			ret = binder_inc_node(ref->node, 1, 1, target_list);
1565 			if (ret)
1566 				return ret;
1567 		}
1568 		ref->data.strong++;
1569 	} else {
1570 		if (ref->data.weak == 0) {
1571 			ret = binder_inc_node(ref->node, 0, 1, target_list);
1572 			if (ret)
1573 				return ret;
1574 		}
1575 		ref->data.weak++;
1576 	}
1577 	return 0;
1578 }
1579 
1580 /**
1581  * binder_dec_ref() - dec the ref for given handle
1582  * @ref:	ref to be decremented
1583  * @strong:	if true, strong decrement, else weak
1584  *
1585  * Decrement the ref.
1586  *
1587  * Return: true if ref is cleaned up and ready to be freed
1588  */
1589 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1590 {
1591 	if (strong) {
1592 		if (ref->data.strong == 0) {
1593 			binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1594 					  ref->proc->pid, ref->data.debug_id,
1595 					  ref->data.desc, ref->data.strong,
1596 					  ref->data.weak);
1597 			return false;
1598 		}
1599 		ref->data.strong--;
1600 		if (ref->data.strong == 0)
1601 			binder_dec_node(ref->node, strong, 1);
1602 	} else {
1603 		if (ref->data.weak == 0) {
1604 			binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1605 					  ref->proc->pid, ref->data.debug_id,
1606 					  ref->data.desc, ref->data.strong,
1607 					  ref->data.weak);
1608 			return false;
1609 		}
1610 		ref->data.weak--;
1611 	}
1612 	if (ref->data.strong == 0 && ref->data.weak == 0) {
1613 		binder_cleanup_ref_olocked(ref);
1614 		return true;
1615 	}
1616 	return false;
1617 }
1618 
1619 /**
1620  * binder_get_node_from_ref() - get the node from the given proc/desc
1621  * @proc:	proc containing the ref
1622  * @desc:	the handle associated with the ref
1623  * @need_strong_ref: if true, only return node if ref is strong
1624  * @rdata:	the id/refcount data for the ref
1625  *
1626  * Given a proc and ref handle, return the associated binder_node
1627  *
1628  * Return: a binder_node or NULL if not found or not strong when strong required
1629  */
1630 static struct binder_node *binder_get_node_from_ref(
1631 		struct binder_proc *proc,
1632 		u32 desc, bool need_strong_ref,
1633 		struct binder_ref_data *rdata)
1634 {
1635 	struct binder_node *node;
1636 	struct binder_ref *ref;
1637 
1638 	binder_proc_lock(proc);
1639 	ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1640 	if (!ref)
1641 		goto err_no_ref;
1642 	node = ref->node;
1643 	/*
1644 	 * Take an implicit reference on the node to ensure
1645 	 * it stays alive until the call to binder_put_node()
1646 	 */
1647 	binder_inc_node_tmpref(node);
1648 	if (rdata)
1649 		*rdata = ref->data;
1650 	binder_proc_unlock(proc);
1651 
1652 	return node;
1653 
1654 err_no_ref:
1655 	binder_proc_unlock(proc);
1656 	return NULL;
1657 }
1658 
1659 /**
1660  * binder_free_ref() - free the binder_ref
1661  * @ref:	ref to free
1662  *
1663  * Free the binder_ref. Free the binder_node indicated by ref->node
1664  * (if non-NULL) and the binder_ref_death indicated by ref->death.
1665  */
1666 static void binder_free_ref(struct binder_ref *ref)
1667 {
1668 	if (ref->node)
1669 		binder_free_node(ref->node);
1670 	kfree(ref->death);
1671 	kfree(ref);
1672 }
1673 
1674 /**
1675  * binder_update_ref_for_handle() - inc/dec the ref for given handle
1676  * @proc:	proc containing the ref
1677  * @desc:	the handle associated with the ref
1678  * @increment:	true=inc reference, false=dec reference
1679  * @strong:	true=strong reference, false=weak reference
1680  * @rdata:	the id/refcount data for the ref
1681  *
1682  * Given a proc and ref handle, increment or decrement the ref
1683  * according to "increment" arg.
1684  *
1685  * Return: 0 if successful, else errno
1686  */
1687 static int binder_update_ref_for_handle(struct binder_proc *proc,
1688 		uint32_t desc, bool increment, bool strong,
1689 		struct binder_ref_data *rdata)
1690 {
1691 	int ret = 0;
1692 	struct binder_ref *ref;
1693 	bool delete_ref = false;
1694 
1695 	binder_proc_lock(proc);
1696 	ref = binder_get_ref_olocked(proc, desc, strong);
1697 	if (!ref) {
1698 		ret = -EINVAL;
1699 		goto err_no_ref;
1700 	}
1701 	if (increment)
1702 		ret = binder_inc_ref_olocked(ref, strong, NULL);
1703 	else
1704 		delete_ref = binder_dec_ref_olocked(ref, strong);
1705 
1706 	if (rdata)
1707 		*rdata = ref->data;
1708 	binder_proc_unlock(proc);
1709 
1710 	if (delete_ref)
1711 		binder_free_ref(ref);
1712 	return ret;
1713 
1714 err_no_ref:
1715 	binder_proc_unlock(proc);
1716 	return ret;
1717 }
1718 
1719 /**
1720  * binder_dec_ref_for_handle() - dec the ref for given handle
1721  * @proc:	proc containing the ref
1722  * @desc:	the handle associated with the ref
1723  * @strong:	true=strong reference, false=weak reference
1724  * @rdata:	the id/refcount data for the ref
1725  *
1726  * Just calls binder_update_ref_for_handle() to decrement the ref.
1727  *
1728  * Return: 0 if successful, else errno
1729  */
1730 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1731 		uint32_t desc, bool strong, struct binder_ref_data *rdata)
1732 {
1733 	return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1734 }
1735 
1736 
1737 /**
1738  * binder_inc_ref_for_node() - increment the ref for given proc/node
1739  * @proc:	 proc containing the ref
1740  * @node:	 target node
1741  * @strong:	 true=strong reference, false=weak reference
1742  * @target_list: worklist to use if node is incremented
1743  * @rdata:	 the id/refcount data for the ref
1744  *
1745  * Given a proc and node, increment the ref. Create the ref if it
1746  * doesn't already exist
1747  *
1748  * Return: 0 if successful, else errno
1749  */
1750 static int binder_inc_ref_for_node(struct binder_proc *proc,
1751 			struct binder_node *node,
1752 			bool strong,
1753 			struct list_head *target_list,
1754 			struct binder_ref_data *rdata)
1755 {
1756 	struct binder_ref *ref;
1757 	struct binder_ref *new_ref = NULL;
1758 	int ret = 0;
1759 
1760 	binder_proc_lock(proc);
1761 	ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1762 	if (!ref) {
1763 		binder_proc_unlock(proc);
1764 		new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1765 		if (!new_ref)
1766 			return -ENOMEM;
1767 		binder_proc_lock(proc);
1768 		ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1769 	}
1770 	ret = binder_inc_ref_olocked(ref, strong, target_list);
1771 	*rdata = ref->data;
1772 	binder_proc_unlock(proc);
1773 	if (new_ref && ref != new_ref)
1774 		/*
1775 		 * Another thread created the ref first so
1776 		 * free the one we allocated
1777 		 */
1778 		kfree(new_ref);
1779 	return ret;
1780 }
1781 
1782 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1783 					   struct binder_transaction *t)
1784 {
1785 	BUG_ON(!target_thread);
1786 	assert_spin_locked(&target_thread->proc->inner_lock);
1787 	BUG_ON(target_thread->transaction_stack != t);
1788 	BUG_ON(target_thread->transaction_stack->from != target_thread);
1789 	target_thread->transaction_stack =
1790 		target_thread->transaction_stack->from_parent;
1791 	t->from = NULL;
1792 }
1793 
1794 /**
1795  * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1796  * @thread:	thread to decrement
1797  *
1798  * A thread needs to be kept alive while being used to create or
1799  * handle a transaction. binder_get_txn_from() is used to safely
1800  * extract t->from from a binder_transaction and keep the thread
1801  * indicated by t->from from being freed. When done with that
1802  * binder_thread, this function is called to decrement the
1803  * tmp_ref and free if appropriate (thread has been released
1804  * and no transaction being processed by the driver)
1805  */
1806 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1807 {
1808 	/*
1809 	 * atomic is used to protect the counter value while
1810 	 * it cannot reach zero or thread->is_dead is false
1811 	 */
1812 	binder_inner_proc_lock(thread->proc);
1813 	atomic_dec(&thread->tmp_ref);
1814 	if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1815 		binder_inner_proc_unlock(thread->proc);
1816 		binder_free_thread(thread);
1817 		return;
1818 	}
1819 	binder_inner_proc_unlock(thread->proc);
1820 }
1821 
1822 /**
1823  * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1824  * @proc:	proc to decrement
1825  *
1826  * A binder_proc needs to be kept alive while being used to create or
1827  * handle a transaction. proc->tmp_ref is incremented when
1828  * creating a new transaction or the binder_proc is currently in-use
1829  * by threads that are being released. When done with the binder_proc,
1830  * this function is called to decrement the counter and free the
1831  * proc if appropriate (proc has been released, all threads have
1832  * been released and not currenly in-use to process a transaction).
1833  */
1834 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1835 {
1836 	binder_inner_proc_lock(proc);
1837 	proc->tmp_ref--;
1838 	if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1839 			!proc->tmp_ref) {
1840 		binder_inner_proc_unlock(proc);
1841 		binder_free_proc(proc);
1842 		return;
1843 	}
1844 	binder_inner_proc_unlock(proc);
1845 }
1846 
1847 /**
1848  * binder_get_txn_from() - safely extract the "from" thread in transaction
1849  * @t:	binder transaction for t->from
1850  *
1851  * Atomically return the "from" thread and increment the tmp_ref
1852  * count for the thread to ensure it stays alive until
1853  * binder_thread_dec_tmpref() is called.
1854  *
1855  * Return: the value of t->from
1856  */
1857 static struct binder_thread *binder_get_txn_from(
1858 		struct binder_transaction *t)
1859 {
1860 	struct binder_thread *from;
1861 
1862 	spin_lock(&t->lock);
1863 	from = t->from;
1864 	if (from)
1865 		atomic_inc(&from->tmp_ref);
1866 	spin_unlock(&t->lock);
1867 	return from;
1868 }
1869 
1870 /**
1871  * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1872  * @t:	binder transaction for t->from
1873  *
1874  * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1875  * to guarantee that the thread cannot be released while operating on it.
1876  * The caller must call binder_inner_proc_unlock() to release the inner lock
1877  * as well as call binder_dec_thread_txn() to release the reference.
1878  *
1879  * Return: the value of t->from
1880  */
1881 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1882 		struct binder_transaction *t)
1883 	__acquires(&t->from->proc->inner_lock)
1884 {
1885 	struct binder_thread *from;
1886 
1887 	from = binder_get_txn_from(t);
1888 	if (!from) {
1889 		__acquire(&from->proc->inner_lock);
1890 		return NULL;
1891 	}
1892 	binder_inner_proc_lock(from->proc);
1893 	if (t->from) {
1894 		BUG_ON(from != t->from);
1895 		return from;
1896 	}
1897 	binder_inner_proc_unlock(from->proc);
1898 	__acquire(&from->proc->inner_lock);
1899 	binder_thread_dec_tmpref(from);
1900 	return NULL;
1901 }
1902 
1903 /**
1904  * binder_free_txn_fixups() - free unprocessed fd fixups
1905  * @t:	binder transaction for t->from
1906  *
1907  * If the transaction is being torn down prior to being
1908  * processed by the target process, free all of the
1909  * fd fixups and fput the file structs. It is safe to
1910  * call this function after the fixups have been
1911  * processed -- in that case, the list will be empty.
1912  */
1913 static void binder_free_txn_fixups(struct binder_transaction *t)
1914 {
1915 	struct binder_txn_fd_fixup *fixup, *tmp;
1916 
1917 	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1918 		fput(fixup->file);
1919 		list_del(&fixup->fixup_entry);
1920 		kfree(fixup);
1921 	}
1922 }
1923 
1924 static void binder_free_transaction(struct binder_transaction *t)
1925 {
1926 	struct binder_proc *target_proc = t->to_proc;
1927 
1928 	if (target_proc) {
1929 		binder_inner_proc_lock(target_proc);
1930 		if (t->buffer)
1931 			t->buffer->transaction = NULL;
1932 		binder_inner_proc_unlock(target_proc);
1933 	}
1934 	/*
1935 	 * If the transaction has no target_proc, then
1936 	 * t->buffer->transaction has already been cleared.
1937 	 */
1938 	binder_free_txn_fixups(t);
1939 	kfree(t);
1940 	binder_stats_deleted(BINDER_STAT_TRANSACTION);
1941 }
1942 
1943 static void binder_send_failed_reply(struct binder_transaction *t,
1944 				     uint32_t error_code)
1945 {
1946 	struct binder_thread *target_thread;
1947 	struct binder_transaction *next;
1948 
1949 	BUG_ON(t->flags & TF_ONE_WAY);
1950 	while (1) {
1951 		target_thread = binder_get_txn_from_and_acq_inner(t);
1952 		if (target_thread) {
1953 			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1954 				     "send failed reply for transaction %d to %d:%d\n",
1955 				      t->debug_id,
1956 				      target_thread->proc->pid,
1957 				      target_thread->pid);
1958 
1959 			binder_pop_transaction_ilocked(target_thread, t);
1960 			if (target_thread->reply_error.cmd == BR_OK) {
1961 				target_thread->reply_error.cmd = error_code;
1962 				binder_enqueue_thread_work_ilocked(
1963 					target_thread,
1964 					&target_thread->reply_error.work);
1965 				wake_up_interruptible(&target_thread->wait);
1966 			} else {
1967 				/*
1968 				 * Cannot get here for normal operation, but
1969 				 * we can if multiple synchronous transactions
1970 				 * are sent without blocking for responses.
1971 				 * Just ignore the 2nd error in this case.
1972 				 */
1973 				pr_warn("Unexpected reply error: %u\n",
1974 					target_thread->reply_error.cmd);
1975 			}
1976 			binder_inner_proc_unlock(target_thread->proc);
1977 			binder_thread_dec_tmpref(target_thread);
1978 			binder_free_transaction(t);
1979 			return;
1980 		} else {
1981 			__release(&target_thread->proc->inner_lock);
1982 		}
1983 		next = t->from_parent;
1984 
1985 		binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1986 			     "send failed reply for transaction %d, target dead\n",
1987 			     t->debug_id);
1988 
1989 		binder_free_transaction(t);
1990 		if (next == NULL) {
1991 			binder_debug(BINDER_DEBUG_DEAD_BINDER,
1992 				     "reply failed, no target thread at root\n");
1993 			return;
1994 		}
1995 		t = next;
1996 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1997 			     "reply failed, no target thread -- retry %d\n",
1998 			      t->debug_id);
1999 	}
2000 }
2001 
2002 /**
2003  * binder_cleanup_transaction() - cleans up undelivered transaction
2004  * @t:		transaction that needs to be cleaned up
2005  * @reason:	reason the transaction wasn't delivered
2006  * @error_code:	error to return to caller (if synchronous call)
2007  */
2008 static void binder_cleanup_transaction(struct binder_transaction *t,
2009 				       const char *reason,
2010 				       uint32_t error_code)
2011 {
2012 	if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
2013 		binder_send_failed_reply(t, error_code);
2014 	} else {
2015 		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2016 			"undelivered transaction %d, %s\n",
2017 			t->debug_id, reason);
2018 		binder_free_transaction(t);
2019 	}
2020 }
2021 
2022 /**
2023  * binder_get_object() - gets object and checks for valid metadata
2024  * @proc:	binder_proc owning the buffer
2025  * @buffer:	binder_buffer that we're parsing.
2026  * @offset:	offset in the @buffer at which to validate an object.
2027  * @object:	struct binder_object to read into
2028  *
2029  * Return:	If there's a valid metadata object at @offset in @buffer, the
2030  *		size of that object. Otherwise, it returns zero. The object
2031  *		is read into the struct binder_object pointed to by @object.
2032  */
2033 static size_t binder_get_object(struct binder_proc *proc,
2034 				struct binder_buffer *buffer,
2035 				unsigned long offset,
2036 				struct binder_object *object)
2037 {
2038 	size_t read_size;
2039 	struct binder_object_header *hdr;
2040 	size_t object_size = 0;
2041 
2042 	read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
2043 	if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
2044 	    binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
2045 					  offset, read_size))
2046 		return 0;
2047 
2048 	/* Ok, now see if we read a complete object. */
2049 	hdr = &object->hdr;
2050 	switch (hdr->type) {
2051 	case BINDER_TYPE_BINDER:
2052 	case BINDER_TYPE_WEAK_BINDER:
2053 	case BINDER_TYPE_HANDLE:
2054 	case BINDER_TYPE_WEAK_HANDLE:
2055 		object_size = sizeof(struct flat_binder_object);
2056 		break;
2057 	case BINDER_TYPE_FD:
2058 		object_size = sizeof(struct binder_fd_object);
2059 		break;
2060 	case BINDER_TYPE_PTR:
2061 		object_size = sizeof(struct binder_buffer_object);
2062 		break;
2063 	case BINDER_TYPE_FDA:
2064 		object_size = sizeof(struct binder_fd_array_object);
2065 		break;
2066 	default:
2067 		return 0;
2068 	}
2069 	if (offset <= buffer->data_size - object_size &&
2070 	    buffer->data_size >= object_size)
2071 		return object_size;
2072 	else
2073 		return 0;
2074 }
2075 
2076 /**
2077  * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2078  * @proc:	binder_proc owning the buffer
2079  * @b:		binder_buffer containing the object
2080  * @object:	struct binder_object to read into
2081  * @index:	index in offset array at which the binder_buffer_object is
2082  *		located
2083  * @start_offset: points to the start of the offset array
2084  * @object_offsetp: offset of @object read from @b
2085  * @num_valid:	the number of valid offsets in the offset array
2086  *
2087  * Return:	If @index is within the valid range of the offset array
2088  *		described by @start and @num_valid, and if there's a valid
2089  *		binder_buffer_object at the offset found in index @index
2090  *		of the offset array, that object is returned. Otherwise,
2091  *		%NULL is returned.
2092  *		Note that the offset found in index @index itself is not
2093  *		verified; this function assumes that @num_valid elements
2094  *		from @start were previously verified to have valid offsets.
2095  *		If @object_offsetp is non-NULL, then the offset within
2096  *		@b is written to it.
2097  */
2098 static struct binder_buffer_object *binder_validate_ptr(
2099 						struct binder_proc *proc,
2100 						struct binder_buffer *b,
2101 						struct binder_object *object,
2102 						binder_size_t index,
2103 						binder_size_t start_offset,
2104 						binder_size_t *object_offsetp,
2105 						binder_size_t num_valid)
2106 {
2107 	size_t object_size;
2108 	binder_size_t object_offset;
2109 	unsigned long buffer_offset;
2110 
2111 	if (index >= num_valid)
2112 		return NULL;
2113 
2114 	buffer_offset = start_offset + sizeof(binder_size_t) * index;
2115 	if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2116 					  b, buffer_offset,
2117 					  sizeof(object_offset)))
2118 		return NULL;
2119 	object_size = binder_get_object(proc, b, object_offset, object);
2120 	if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
2121 		return NULL;
2122 	if (object_offsetp)
2123 		*object_offsetp = object_offset;
2124 
2125 	return &object->bbo;
2126 }
2127 
2128 /**
2129  * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2130  * @proc:		binder_proc owning the buffer
2131  * @b:			transaction buffer
2132  * @objects_start_offset: offset to start of objects buffer
2133  * @buffer_obj_offset:	offset to binder_buffer_object in which to fix up
2134  * @fixup_offset:	start offset in @buffer to fix up
2135  * @last_obj_offset:	offset to last binder_buffer_object that we fixed
2136  * @last_min_offset:	minimum fixup offset in object at @last_obj_offset
2137  *
2138  * Return:		%true if a fixup in buffer @buffer at offset @offset is
2139  *			allowed.
2140  *
2141  * For safety reasons, we only allow fixups inside a buffer to happen
2142  * at increasing offsets; additionally, we only allow fixup on the last
2143  * buffer object that was verified, or one of its parents.
2144  *
2145  * Example of what is allowed:
2146  *
2147  * A
2148  *   B (parent = A, offset = 0)
2149  *   C (parent = A, offset = 16)
2150  *     D (parent = C, offset = 0)
2151  *   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2152  *
2153  * Examples of what is not allowed:
2154  *
2155  * Decreasing offsets within the same parent:
2156  * A
2157  *   C (parent = A, offset = 16)
2158  *   B (parent = A, offset = 0) // decreasing offset within A
2159  *
2160  * Referring to a parent that wasn't the last object or any of its parents:
2161  * A
2162  *   B (parent = A, offset = 0)
2163  *   C (parent = A, offset = 0)
2164  *   C (parent = A, offset = 16)
2165  *     D (parent = B, offset = 0) // B is not A or any of A's parents
2166  */
2167 static bool binder_validate_fixup(struct binder_proc *proc,
2168 				  struct binder_buffer *b,
2169 				  binder_size_t objects_start_offset,
2170 				  binder_size_t buffer_obj_offset,
2171 				  binder_size_t fixup_offset,
2172 				  binder_size_t last_obj_offset,
2173 				  binder_size_t last_min_offset)
2174 {
2175 	if (!last_obj_offset) {
2176 		/* Nothing to fix up in */
2177 		return false;
2178 	}
2179 
2180 	while (last_obj_offset != buffer_obj_offset) {
2181 		unsigned long buffer_offset;
2182 		struct binder_object last_object;
2183 		struct binder_buffer_object *last_bbo;
2184 		size_t object_size = binder_get_object(proc, b, last_obj_offset,
2185 						       &last_object);
2186 		if (object_size != sizeof(*last_bbo))
2187 			return false;
2188 
2189 		last_bbo = &last_object.bbo;
2190 		/*
2191 		 * Safe to retrieve the parent of last_obj, since it
2192 		 * was already previously verified by the driver.
2193 		 */
2194 		if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2195 			return false;
2196 		last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
2197 		buffer_offset = objects_start_offset +
2198 			sizeof(binder_size_t) * last_bbo->parent;
2199 		if (binder_alloc_copy_from_buffer(&proc->alloc,
2200 						  &last_obj_offset,
2201 						  b, buffer_offset,
2202 						  sizeof(last_obj_offset)))
2203 			return false;
2204 	}
2205 	return (fixup_offset >= last_min_offset);
2206 }
2207 
2208 /**
2209  * struct binder_task_work_cb - for deferred close
2210  *
2211  * @twork:                callback_head for task work
2212  * @fd:                   fd to close
2213  *
2214  * Structure to pass task work to be handled after
2215  * returning from binder_ioctl() via task_work_add().
2216  */
2217 struct binder_task_work_cb {
2218 	struct callback_head twork;
2219 	struct file *file;
2220 };
2221 
2222 /**
2223  * binder_do_fd_close() - close list of file descriptors
2224  * @twork:	callback head for task work
2225  *
2226  * It is not safe to call ksys_close() during the binder_ioctl()
2227  * function if there is a chance that binder's own file descriptor
2228  * might be closed. This is to meet the requirements for using
2229  * fdget() (see comments for __fget_light()). Therefore use
2230  * task_work_add() to schedule the close operation once we have
2231  * returned from binder_ioctl(). This function is a callback
2232  * for that mechanism and does the actual ksys_close() on the
2233  * given file descriptor.
2234  */
2235 static void binder_do_fd_close(struct callback_head *twork)
2236 {
2237 	struct binder_task_work_cb *twcb = container_of(twork,
2238 			struct binder_task_work_cb, twork);
2239 
2240 	fput(twcb->file);
2241 	kfree(twcb);
2242 }
2243 
2244 /**
2245  * binder_deferred_fd_close() - schedule a close for the given file-descriptor
2246  * @fd:		file-descriptor to close
2247  *
2248  * See comments in binder_do_fd_close(). This function is used to schedule
2249  * a file-descriptor to be closed after returning from binder_ioctl().
2250  */
2251 static void binder_deferred_fd_close(int fd)
2252 {
2253 	struct binder_task_work_cb *twcb;
2254 
2255 	twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
2256 	if (!twcb)
2257 		return;
2258 	init_task_work(&twcb->twork, binder_do_fd_close);
2259 	__close_fd_get_file(fd, &twcb->file);
2260 	if (twcb->file)
2261 		task_work_add(current, &twcb->twork, true);
2262 	else
2263 		kfree(twcb);
2264 }
2265 
2266 static void binder_transaction_buffer_release(struct binder_proc *proc,
2267 					      struct binder_buffer *buffer,
2268 					      binder_size_t failed_at,
2269 					      bool is_failure)
2270 {
2271 	int debug_id = buffer->debug_id;
2272 	binder_size_t off_start_offset, buffer_offset, off_end_offset;
2273 
2274 	binder_debug(BINDER_DEBUG_TRANSACTION,
2275 		     "%d buffer release %d, size %zd-%zd, failed at %llx\n",
2276 		     proc->pid, buffer->debug_id,
2277 		     buffer->data_size, buffer->offsets_size,
2278 		     (unsigned long long)failed_at);
2279 
2280 	if (buffer->target_node)
2281 		binder_dec_node(buffer->target_node, 1, 0);
2282 
2283 	off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
2284 	off_end_offset = is_failure ? failed_at :
2285 				off_start_offset + buffer->offsets_size;
2286 	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2287 	     buffer_offset += sizeof(binder_size_t)) {
2288 		struct binder_object_header *hdr;
2289 		size_t object_size = 0;
2290 		struct binder_object object;
2291 		binder_size_t object_offset;
2292 
2293 		if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2294 						   buffer, buffer_offset,
2295 						   sizeof(object_offset)))
2296 			object_size = binder_get_object(proc, buffer,
2297 							object_offset, &object);
2298 		if (object_size == 0) {
2299 			pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2300 			       debug_id, (u64)object_offset, buffer->data_size);
2301 			continue;
2302 		}
2303 		hdr = &object.hdr;
2304 		switch (hdr->type) {
2305 		case BINDER_TYPE_BINDER:
2306 		case BINDER_TYPE_WEAK_BINDER: {
2307 			struct flat_binder_object *fp;
2308 			struct binder_node *node;
2309 
2310 			fp = to_flat_binder_object(hdr);
2311 			node = binder_get_node(proc, fp->binder);
2312 			if (node == NULL) {
2313 				pr_err("transaction release %d bad node %016llx\n",
2314 				       debug_id, (u64)fp->binder);
2315 				break;
2316 			}
2317 			binder_debug(BINDER_DEBUG_TRANSACTION,
2318 				     "        node %d u%016llx\n",
2319 				     node->debug_id, (u64)node->ptr);
2320 			binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2321 					0);
2322 			binder_put_node(node);
2323 		} break;
2324 		case BINDER_TYPE_HANDLE:
2325 		case BINDER_TYPE_WEAK_HANDLE: {
2326 			struct flat_binder_object *fp;
2327 			struct binder_ref_data rdata;
2328 			int ret;
2329 
2330 			fp = to_flat_binder_object(hdr);
2331 			ret = binder_dec_ref_for_handle(proc, fp->handle,
2332 				hdr->type == BINDER_TYPE_HANDLE, &rdata);
2333 
2334 			if (ret) {
2335 				pr_err("transaction release %d bad handle %d, ret = %d\n",
2336 				 debug_id, fp->handle, ret);
2337 				break;
2338 			}
2339 			binder_debug(BINDER_DEBUG_TRANSACTION,
2340 				     "        ref %d desc %d\n",
2341 				     rdata.debug_id, rdata.desc);
2342 		} break;
2343 
2344 		case BINDER_TYPE_FD: {
2345 			/*
2346 			 * No need to close the file here since user-space
2347 			 * closes it for for successfully delivered
2348 			 * transactions. For transactions that weren't
2349 			 * delivered, the new fd was never allocated so
2350 			 * there is no need to close and the fput on the
2351 			 * file is done when the transaction is torn
2352 			 * down.
2353 			 */
2354 			WARN_ON(failed_at &&
2355 				proc->tsk == current->group_leader);
2356 		} break;
2357 		case BINDER_TYPE_PTR:
2358 			/*
2359 			 * Nothing to do here, this will get cleaned up when the
2360 			 * transaction buffer gets freed
2361 			 */
2362 			break;
2363 		case BINDER_TYPE_FDA: {
2364 			struct binder_fd_array_object *fda;
2365 			struct binder_buffer_object *parent;
2366 			struct binder_object ptr_object;
2367 			binder_size_t fda_offset;
2368 			size_t fd_index;
2369 			binder_size_t fd_buf_size;
2370 			binder_size_t num_valid;
2371 
2372 			if (proc->tsk != current->group_leader) {
2373 				/*
2374 				 * Nothing to do if running in sender context
2375 				 * The fd fixups have not been applied so no
2376 				 * fds need to be closed.
2377 				 */
2378 				continue;
2379 			}
2380 
2381 			num_valid = (buffer_offset - off_start_offset) /
2382 						sizeof(binder_size_t);
2383 			fda = to_binder_fd_array_object(hdr);
2384 			parent = binder_validate_ptr(proc, buffer, &ptr_object,
2385 						     fda->parent,
2386 						     off_start_offset,
2387 						     NULL,
2388 						     num_valid);
2389 			if (!parent) {
2390 				pr_err("transaction release %d bad parent offset\n",
2391 				       debug_id);
2392 				continue;
2393 			}
2394 			fd_buf_size = sizeof(u32) * fda->num_fds;
2395 			if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2396 				pr_err("transaction release %d invalid number of fds (%lld)\n",
2397 				       debug_id, (u64)fda->num_fds);
2398 				continue;
2399 			}
2400 			if (fd_buf_size > parent->length ||
2401 			    fda->parent_offset > parent->length - fd_buf_size) {
2402 				/* No space for all file descriptors here. */
2403 				pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2404 				       debug_id, (u64)fda->num_fds);
2405 				continue;
2406 			}
2407 			/*
2408 			 * the source data for binder_buffer_object is visible
2409 			 * to user-space and the @buffer element is the user
2410 			 * pointer to the buffer_object containing the fd_array.
2411 			 * Convert the address to an offset relative to
2412 			 * the base of the transaction buffer.
2413 			 */
2414 			fda_offset =
2415 			    (parent->buffer - (uintptr_t)buffer->user_data) +
2416 			    fda->parent_offset;
2417 			for (fd_index = 0; fd_index < fda->num_fds;
2418 			     fd_index++) {
2419 				u32 fd;
2420 				int err;
2421 				binder_size_t offset = fda_offset +
2422 					fd_index * sizeof(fd);
2423 
2424 				err = binder_alloc_copy_from_buffer(
2425 						&proc->alloc, &fd, buffer,
2426 						offset, sizeof(fd));
2427 				WARN_ON(err);
2428 				if (!err)
2429 					binder_deferred_fd_close(fd);
2430 			}
2431 		} break;
2432 		default:
2433 			pr_err("transaction release %d bad object type %x\n",
2434 				debug_id, hdr->type);
2435 			break;
2436 		}
2437 	}
2438 }
2439 
2440 static int binder_translate_binder(struct flat_binder_object *fp,
2441 				   struct binder_transaction *t,
2442 				   struct binder_thread *thread)
2443 {
2444 	struct binder_node *node;
2445 	struct binder_proc *proc = thread->proc;
2446 	struct binder_proc *target_proc = t->to_proc;
2447 	struct binder_ref_data rdata;
2448 	int ret = 0;
2449 
2450 	node = binder_get_node(proc, fp->binder);
2451 	if (!node) {
2452 		node = binder_new_node(proc, fp);
2453 		if (!node)
2454 			return -ENOMEM;
2455 	}
2456 	if (fp->cookie != node->cookie) {
2457 		binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2458 				  proc->pid, thread->pid, (u64)fp->binder,
2459 				  node->debug_id, (u64)fp->cookie,
2460 				  (u64)node->cookie);
2461 		ret = -EINVAL;
2462 		goto done;
2463 	}
2464 	if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2465 		ret = -EPERM;
2466 		goto done;
2467 	}
2468 
2469 	ret = binder_inc_ref_for_node(target_proc, node,
2470 			fp->hdr.type == BINDER_TYPE_BINDER,
2471 			&thread->todo, &rdata);
2472 	if (ret)
2473 		goto done;
2474 
2475 	if (fp->hdr.type == BINDER_TYPE_BINDER)
2476 		fp->hdr.type = BINDER_TYPE_HANDLE;
2477 	else
2478 		fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2479 	fp->binder = 0;
2480 	fp->handle = rdata.desc;
2481 	fp->cookie = 0;
2482 
2483 	trace_binder_transaction_node_to_ref(t, node, &rdata);
2484 	binder_debug(BINDER_DEBUG_TRANSACTION,
2485 		     "        node %d u%016llx -> ref %d desc %d\n",
2486 		     node->debug_id, (u64)node->ptr,
2487 		     rdata.debug_id, rdata.desc);
2488 done:
2489 	binder_put_node(node);
2490 	return ret;
2491 }
2492 
2493 static int binder_translate_handle(struct flat_binder_object *fp,
2494 				   struct binder_transaction *t,
2495 				   struct binder_thread *thread)
2496 {
2497 	struct binder_proc *proc = thread->proc;
2498 	struct binder_proc *target_proc = t->to_proc;
2499 	struct binder_node *node;
2500 	struct binder_ref_data src_rdata;
2501 	int ret = 0;
2502 
2503 	node = binder_get_node_from_ref(proc, fp->handle,
2504 			fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2505 	if (!node) {
2506 		binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2507 				  proc->pid, thread->pid, fp->handle);
2508 		return -EINVAL;
2509 	}
2510 	if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2511 		ret = -EPERM;
2512 		goto done;
2513 	}
2514 
2515 	binder_node_lock(node);
2516 	if (node->proc == target_proc) {
2517 		if (fp->hdr.type == BINDER_TYPE_HANDLE)
2518 			fp->hdr.type = BINDER_TYPE_BINDER;
2519 		else
2520 			fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2521 		fp->binder = node->ptr;
2522 		fp->cookie = node->cookie;
2523 		if (node->proc)
2524 			binder_inner_proc_lock(node->proc);
2525 		else
2526 			__acquire(&node->proc->inner_lock);
2527 		binder_inc_node_nilocked(node,
2528 					 fp->hdr.type == BINDER_TYPE_BINDER,
2529 					 0, NULL);
2530 		if (node->proc)
2531 			binder_inner_proc_unlock(node->proc);
2532 		else
2533 			__release(&node->proc->inner_lock);
2534 		trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2535 		binder_debug(BINDER_DEBUG_TRANSACTION,
2536 			     "        ref %d desc %d -> node %d u%016llx\n",
2537 			     src_rdata.debug_id, src_rdata.desc, node->debug_id,
2538 			     (u64)node->ptr);
2539 		binder_node_unlock(node);
2540 	} else {
2541 		struct binder_ref_data dest_rdata;
2542 
2543 		binder_node_unlock(node);
2544 		ret = binder_inc_ref_for_node(target_proc, node,
2545 				fp->hdr.type == BINDER_TYPE_HANDLE,
2546 				NULL, &dest_rdata);
2547 		if (ret)
2548 			goto done;
2549 
2550 		fp->binder = 0;
2551 		fp->handle = dest_rdata.desc;
2552 		fp->cookie = 0;
2553 		trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2554 						    &dest_rdata);
2555 		binder_debug(BINDER_DEBUG_TRANSACTION,
2556 			     "        ref %d desc %d -> ref %d desc %d (node %d)\n",
2557 			     src_rdata.debug_id, src_rdata.desc,
2558 			     dest_rdata.debug_id, dest_rdata.desc,
2559 			     node->debug_id);
2560 	}
2561 done:
2562 	binder_put_node(node);
2563 	return ret;
2564 }
2565 
2566 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2567 			       struct binder_transaction *t,
2568 			       struct binder_thread *thread,
2569 			       struct binder_transaction *in_reply_to)
2570 {
2571 	struct binder_proc *proc = thread->proc;
2572 	struct binder_proc *target_proc = t->to_proc;
2573 	struct binder_txn_fd_fixup *fixup;
2574 	struct file *file;
2575 	int ret = 0;
2576 	bool target_allows_fd;
2577 
2578 	if (in_reply_to)
2579 		target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2580 	else
2581 		target_allows_fd = t->buffer->target_node->accept_fds;
2582 	if (!target_allows_fd) {
2583 		binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2584 				  proc->pid, thread->pid,
2585 				  in_reply_to ? "reply" : "transaction",
2586 				  fd);
2587 		ret = -EPERM;
2588 		goto err_fd_not_accepted;
2589 	}
2590 
2591 	file = fget(fd);
2592 	if (!file) {
2593 		binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2594 				  proc->pid, thread->pid, fd);
2595 		ret = -EBADF;
2596 		goto err_fget;
2597 	}
2598 	ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2599 	if (ret < 0) {
2600 		ret = -EPERM;
2601 		goto err_security;
2602 	}
2603 
2604 	/*
2605 	 * Add fixup record for this transaction. The allocation
2606 	 * of the fd in the target needs to be done from a
2607 	 * target thread.
2608 	 */
2609 	fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2610 	if (!fixup) {
2611 		ret = -ENOMEM;
2612 		goto err_alloc;
2613 	}
2614 	fixup->file = file;
2615 	fixup->offset = fd_offset;
2616 	trace_binder_transaction_fd_send(t, fd, fixup->offset);
2617 	list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2618 
2619 	return ret;
2620 
2621 err_alloc:
2622 err_security:
2623 	fput(file);
2624 err_fget:
2625 err_fd_not_accepted:
2626 	return ret;
2627 }
2628 
2629 static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2630 				     struct binder_buffer_object *parent,
2631 				     struct binder_transaction *t,
2632 				     struct binder_thread *thread,
2633 				     struct binder_transaction *in_reply_to)
2634 {
2635 	binder_size_t fdi, fd_buf_size;
2636 	binder_size_t fda_offset;
2637 	struct binder_proc *proc = thread->proc;
2638 	struct binder_proc *target_proc = t->to_proc;
2639 
2640 	fd_buf_size = sizeof(u32) * fda->num_fds;
2641 	if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2642 		binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2643 				  proc->pid, thread->pid, (u64)fda->num_fds);
2644 		return -EINVAL;
2645 	}
2646 	if (fd_buf_size > parent->length ||
2647 	    fda->parent_offset > parent->length - fd_buf_size) {
2648 		/* No space for all file descriptors here. */
2649 		binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2650 				  proc->pid, thread->pid, (u64)fda->num_fds);
2651 		return -EINVAL;
2652 	}
2653 	/*
2654 	 * the source data for binder_buffer_object is visible
2655 	 * to user-space and the @buffer element is the user
2656 	 * pointer to the buffer_object containing the fd_array.
2657 	 * Convert the address to an offset relative to
2658 	 * the base of the transaction buffer.
2659 	 */
2660 	fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2661 		fda->parent_offset;
2662 	if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) {
2663 		binder_user_error("%d:%d parent offset not aligned correctly.\n",
2664 				  proc->pid, thread->pid);
2665 		return -EINVAL;
2666 	}
2667 	for (fdi = 0; fdi < fda->num_fds; fdi++) {
2668 		u32 fd;
2669 		int ret;
2670 		binder_size_t offset = fda_offset + fdi * sizeof(fd);
2671 
2672 		ret = binder_alloc_copy_from_buffer(&target_proc->alloc,
2673 						    &fd, t->buffer,
2674 						    offset, sizeof(fd));
2675 		if (!ret)
2676 			ret = binder_translate_fd(fd, offset, t, thread,
2677 						  in_reply_to);
2678 		if (ret < 0)
2679 			return ret;
2680 	}
2681 	return 0;
2682 }
2683 
2684 static int binder_fixup_parent(struct binder_transaction *t,
2685 			       struct binder_thread *thread,
2686 			       struct binder_buffer_object *bp,
2687 			       binder_size_t off_start_offset,
2688 			       binder_size_t num_valid,
2689 			       binder_size_t last_fixup_obj_off,
2690 			       binder_size_t last_fixup_min_off)
2691 {
2692 	struct binder_buffer_object *parent;
2693 	struct binder_buffer *b = t->buffer;
2694 	struct binder_proc *proc = thread->proc;
2695 	struct binder_proc *target_proc = t->to_proc;
2696 	struct binder_object object;
2697 	binder_size_t buffer_offset;
2698 	binder_size_t parent_offset;
2699 
2700 	if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2701 		return 0;
2702 
2703 	parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2704 				     off_start_offset, &parent_offset,
2705 				     num_valid);
2706 	if (!parent) {
2707 		binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2708 				  proc->pid, thread->pid);
2709 		return -EINVAL;
2710 	}
2711 
2712 	if (!binder_validate_fixup(target_proc, b, off_start_offset,
2713 				   parent_offset, bp->parent_offset,
2714 				   last_fixup_obj_off,
2715 				   last_fixup_min_off)) {
2716 		binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2717 				  proc->pid, thread->pid);
2718 		return -EINVAL;
2719 	}
2720 
2721 	if (parent->length < sizeof(binder_uintptr_t) ||
2722 	    bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2723 		/* No space for a pointer here! */
2724 		binder_user_error("%d:%d got transaction with invalid parent offset\n",
2725 				  proc->pid, thread->pid);
2726 		return -EINVAL;
2727 	}
2728 	buffer_offset = bp->parent_offset +
2729 			(uintptr_t)parent->buffer - (uintptr_t)b->user_data;
2730 	if (binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
2731 					&bp->buffer, sizeof(bp->buffer))) {
2732 		binder_user_error("%d:%d got transaction with invalid parent offset\n",
2733 				  proc->pid, thread->pid);
2734 		return -EINVAL;
2735 	}
2736 
2737 	return 0;
2738 }
2739 
2740 /**
2741  * binder_proc_transaction() - sends a transaction to a process and wakes it up
2742  * @t:		transaction to send
2743  * @proc:	process to send the transaction to
2744  * @thread:	thread in @proc to send the transaction to (may be NULL)
2745  *
2746  * This function queues a transaction to the specified process. It will try
2747  * to find a thread in the target process to handle the transaction and
2748  * wake it up. If no thread is found, the work is queued to the proc
2749  * waitqueue.
2750  *
2751  * If the @thread parameter is not NULL, the transaction is always queued
2752  * to the waitlist of that specific thread.
2753  *
2754  * Return:	true if the transactions was successfully queued
2755  *		false if the target process or thread is dead
2756  */
2757 static bool binder_proc_transaction(struct binder_transaction *t,
2758 				    struct binder_proc *proc,
2759 				    struct binder_thread *thread)
2760 {
2761 	struct binder_node *node = t->buffer->target_node;
2762 	bool oneway = !!(t->flags & TF_ONE_WAY);
2763 	bool pending_async = false;
2764 
2765 	BUG_ON(!node);
2766 	binder_node_lock(node);
2767 	if (oneway) {
2768 		BUG_ON(thread);
2769 		if (node->has_async_transaction) {
2770 			pending_async = true;
2771 		} else {
2772 			node->has_async_transaction = true;
2773 		}
2774 	}
2775 
2776 	binder_inner_proc_lock(proc);
2777 
2778 	if (proc->is_dead || (thread && thread->is_dead)) {
2779 		binder_inner_proc_unlock(proc);
2780 		binder_node_unlock(node);
2781 		return false;
2782 	}
2783 
2784 	if (!thread && !pending_async)
2785 		thread = binder_select_thread_ilocked(proc);
2786 
2787 	if (thread)
2788 		binder_enqueue_thread_work_ilocked(thread, &t->work);
2789 	else if (!pending_async)
2790 		binder_enqueue_work_ilocked(&t->work, &proc->todo);
2791 	else
2792 		binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2793 
2794 	if (!pending_async)
2795 		binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2796 
2797 	binder_inner_proc_unlock(proc);
2798 	binder_node_unlock(node);
2799 
2800 	return true;
2801 }
2802 
2803 /**
2804  * binder_get_node_refs_for_txn() - Get required refs on node for txn
2805  * @node:         struct binder_node for which to get refs
2806  * @proc:         returns @node->proc if valid
2807  * @error:        if no @proc then returns BR_DEAD_REPLY
2808  *
2809  * User-space normally keeps the node alive when creating a transaction
2810  * since it has a reference to the target. The local strong ref keeps it
2811  * alive if the sending process dies before the target process processes
2812  * the transaction. If the source process is malicious or has a reference
2813  * counting bug, relying on the local strong ref can fail.
2814  *
2815  * Since user-space can cause the local strong ref to go away, we also take
2816  * a tmpref on the node to ensure it survives while we are constructing
2817  * the transaction. We also need a tmpref on the proc while we are
2818  * constructing the transaction, so we take that here as well.
2819  *
2820  * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2821  * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2822  * target proc has died, @error is set to BR_DEAD_REPLY
2823  */
2824 static struct binder_node *binder_get_node_refs_for_txn(
2825 		struct binder_node *node,
2826 		struct binder_proc **procp,
2827 		uint32_t *error)
2828 {
2829 	struct binder_node *target_node = NULL;
2830 
2831 	binder_node_inner_lock(node);
2832 	if (node->proc) {
2833 		target_node = node;
2834 		binder_inc_node_nilocked(node, 1, 0, NULL);
2835 		binder_inc_node_tmpref_ilocked(node);
2836 		node->proc->tmp_ref++;
2837 		*procp = node->proc;
2838 	} else
2839 		*error = BR_DEAD_REPLY;
2840 	binder_node_inner_unlock(node);
2841 
2842 	return target_node;
2843 }
2844 
2845 static void binder_transaction(struct binder_proc *proc,
2846 			       struct binder_thread *thread,
2847 			       struct binder_transaction_data *tr, int reply,
2848 			       binder_size_t extra_buffers_size)
2849 {
2850 	int ret;
2851 	struct binder_transaction *t;
2852 	struct binder_work *w;
2853 	struct binder_work *tcomplete;
2854 	binder_size_t buffer_offset = 0;
2855 	binder_size_t off_start_offset, off_end_offset;
2856 	binder_size_t off_min;
2857 	binder_size_t sg_buf_offset, sg_buf_end_offset;
2858 	struct binder_proc *target_proc = NULL;
2859 	struct binder_thread *target_thread = NULL;
2860 	struct binder_node *target_node = NULL;
2861 	struct binder_transaction *in_reply_to = NULL;
2862 	struct binder_transaction_log_entry *e;
2863 	uint32_t return_error = 0;
2864 	uint32_t return_error_param = 0;
2865 	uint32_t return_error_line = 0;
2866 	binder_size_t last_fixup_obj_off = 0;
2867 	binder_size_t last_fixup_min_off = 0;
2868 	struct binder_context *context = proc->context;
2869 	int t_debug_id = atomic_inc_return(&binder_last_id);
2870 	char *secctx = NULL;
2871 	u32 secctx_sz = 0;
2872 
2873 	e = binder_transaction_log_add(&binder_transaction_log);
2874 	e->debug_id = t_debug_id;
2875 	e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2876 	e->from_proc = proc->pid;
2877 	e->from_thread = thread->pid;
2878 	e->target_handle = tr->target.handle;
2879 	e->data_size = tr->data_size;
2880 	e->offsets_size = tr->offsets_size;
2881 	strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
2882 
2883 	if (reply) {
2884 		binder_inner_proc_lock(proc);
2885 		in_reply_to = thread->transaction_stack;
2886 		if (in_reply_to == NULL) {
2887 			binder_inner_proc_unlock(proc);
2888 			binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2889 					  proc->pid, thread->pid);
2890 			return_error = BR_FAILED_REPLY;
2891 			return_error_param = -EPROTO;
2892 			return_error_line = __LINE__;
2893 			goto err_empty_call_stack;
2894 		}
2895 		if (in_reply_to->to_thread != thread) {
2896 			spin_lock(&in_reply_to->lock);
2897 			binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2898 				proc->pid, thread->pid, in_reply_to->debug_id,
2899 				in_reply_to->to_proc ?
2900 				in_reply_to->to_proc->pid : 0,
2901 				in_reply_to->to_thread ?
2902 				in_reply_to->to_thread->pid : 0);
2903 			spin_unlock(&in_reply_to->lock);
2904 			binder_inner_proc_unlock(proc);
2905 			return_error = BR_FAILED_REPLY;
2906 			return_error_param = -EPROTO;
2907 			return_error_line = __LINE__;
2908 			in_reply_to = NULL;
2909 			goto err_bad_call_stack;
2910 		}
2911 		thread->transaction_stack = in_reply_to->to_parent;
2912 		binder_inner_proc_unlock(proc);
2913 		binder_set_nice(in_reply_to->saved_priority);
2914 		target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2915 		if (target_thread == NULL) {
2916 			/* annotation for sparse */
2917 			__release(&target_thread->proc->inner_lock);
2918 			return_error = BR_DEAD_REPLY;
2919 			return_error_line = __LINE__;
2920 			goto err_dead_binder;
2921 		}
2922 		if (target_thread->transaction_stack != in_reply_to) {
2923 			binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2924 				proc->pid, thread->pid,
2925 				target_thread->transaction_stack ?
2926 				target_thread->transaction_stack->debug_id : 0,
2927 				in_reply_to->debug_id);
2928 			binder_inner_proc_unlock(target_thread->proc);
2929 			return_error = BR_FAILED_REPLY;
2930 			return_error_param = -EPROTO;
2931 			return_error_line = __LINE__;
2932 			in_reply_to = NULL;
2933 			target_thread = NULL;
2934 			goto err_dead_binder;
2935 		}
2936 		target_proc = target_thread->proc;
2937 		target_proc->tmp_ref++;
2938 		binder_inner_proc_unlock(target_thread->proc);
2939 	} else {
2940 		if (tr->target.handle) {
2941 			struct binder_ref *ref;
2942 
2943 			/*
2944 			 * There must already be a strong ref
2945 			 * on this node. If so, do a strong
2946 			 * increment on the node to ensure it
2947 			 * stays alive until the transaction is
2948 			 * done.
2949 			 */
2950 			binder_proc_lock(proc);
2951 			ref = binder_get_ref_olocked(proc, tr->target.handle,
2952 						     true);
2953 			if (ref) {
2954 				target_node = binder_get_node_refs_for_txn(
2955 						ref->node, &target_proc,
2956 						&return_error);
2957 			} else {
2958 				binder_user_error("%d:%d got transaction to invalid handle\n",
2959 						  proc->pid, thread->pid);
2960 				return_error = BR_FAILED_REPLY;
2961 			}
2962 			binder_proc_unlock(proc);
2963 		} else {
2964 			mutex_lock(&context->context_mgr_node_lock);
2965 			target_node = context->binder_context_mgr_node;
2966 			if (target_node)
2967 				target_node = binder_get_node_refs_for_txn(
2968 						target_node, &target_proc,
2969 						&return_error);
2970 			else
2971 				return_error = BR_DEAD_REPLY;
2972 			mutex_unlock(&context->context_mgr_node_lock);
2973 			if (target_node && target_proc->pid == proc->pid) {
2974 				binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2975 						  proc->pid, thread->pid);
2976 				return_error = BR_FAILED_REPLY;
2977 				return_error_param = -EINVAL;
2978 				return_error_line = __LINE__;
2979 				goto err_invalid_target_handle;
2980 			}
2981 		}
2982 		if (!target_node) {
2983 			/*
2984 			 * return_error is set above
2985 			 */
2986 			return_error_param = -EINVAL;
2987 			return_error_line = __LINE__;
2988 			goto err_dead_binder;
2989 		}
2990 		e->to_node = target_node->debug_id;
2991 		if (security_binder_transaction(proc->tsk,
2992 						target_proc->tsk) < 0) {
2993 			return_error = BR_FAILED_REPLY;
2994 			return_error_param = -EPERM;
2995 			return_error_line = __LINE__;
2996 			goto err_invalid_target_handle;
2997 		}
2998 		binder_inner_proc_lock(proc);
2999 
3000 		w = list_first_entry_or_null(&thread->todo,
3001 					     struct binder_work, entry);
3002 		if (!(tr->flags & TF_ONE_WAY) && w &&
3003 		    w->type == BINDER_WORK_TRANSACTION) {
3004 			/*
3005 			 * Do not allow new outgoing transaction from a
3006 			 * thread that has a transaction at the head of
3007 			 * its todo list. Only need to check the head
3008 			 * because binder_select_thread_ilocked picks a
3009 			 * thread from proc->waiting_threads to enqueue
3010 			 * the transaction, and nothing is queued to the
3011 			 * todo list while the thread is on waiting_threads.
3012 			 */
3013 			binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3014 					  proc->pid, thread->pid);
3015 			binder_inner_proc_unlock(proc);
3016 			return_error = BR_FAILED_REPLY;
3017 			return_error_param = -EPROTO;
3018 			return_error_line = __LINE__;
3019 			goto err_bad_todo_list;
3020 		}
3021 
3022 		if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3023 			struct binder_transaction *tmp;
3024 
3025 			tmp = thread->transaction_stack;
3026 			if (tmp->to_thread != thread) {
3027 				spin_lock(&tmp->lock);
3028 				binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3029 					proc->pid, thread->pid, tmp->debug_id,
3030 					tmp->to_proc ? tmp->to_proc->pid : 0,
3031 					tmp->to_thread ?
3032 					tmp->to_thread->pid : 0);
3033 				spin_unlock(&tmp->lock);
3034 				binder_inner_proc_unlock(proc);
3035 				return_error = BR_FAILED_REPLY;
3036 				return_error_param = -EPROTO;
3037 				return_error_line = __LINE__;
3038 				goto err_bad_call_stack;
3039 			}
3040 			while (tmp) {
3041 				struct binder_thread *from;
3042 
3043 				spin_lock(&tmp->lock);
3044 				from = tmp->from;
3045 				if (from && from->proc == target_proc) {
3046 					atomic_inc(&from->tmp_ref);
3047 					target_thread = from;
3048 					spin_unlock(&tmp->lock);
3049 					break;
3050 				}
3051 				spin_unlock(&tmp->lock);
3052 				tmp = tmp->from_parent;
3053 			}
3054 		}
3055 		binder_inner_proc_unlock(proc);
3056 	}
3057 	if (target_thread)
3058 		e->to_thread = target_thread->pid;
3059 	e->to_proc = target_proc->pid;
3060 
3061 	/* TODO: reuse incoming transaction for reply */
3062 	t = kzalloc(sizeof(*t), GFP_KERNEL);
3063 	if (t == NULL) {
3064 		return_error = BR_FAILED_REPLY;
3065 		return_error_param = -ENOMEM;
3066 		return_error_line = __LINE__;
3067 		goto err_alloc_t_failed;
3068 	}
3069 	INIT_LIST_HEAD(&t->fd_fixups);
3070 	binder_stats_created(BINDER_STAT_TRANSACTION);
3071 	spin_lock_init(&t->lock);
3072 
3073 	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3074 	if (tcomplete == NULL) {
3075 		return_error = BR_FAILED_REPLY;
3076 		return_error_param = -ENOMEM;
3077 		return_error_line = __LINE__;
3078 		goto err_alloc_tcomplete_failed;
3079 	}
3080 	binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3081 
3082 	t->debug_id = t_debug_id;
3083 
3084 	if (reply)
3085 		binder_debug(BINDER_DEBUG_TRANSACTION,
3086 			     "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3087 			     proc->pid, thread->pid, t->debug_id,
3088 			     target_proc->pid, target_thread->pid,
3089 			     (u64)tr->data.ptr.buffer,
3090 			     (u64)tr->data.ptr.offsets,
3091 			     (u64)tr->data_size, (u64)tr->offsets_size,
3092 			     (u64)extra_buffers_size);
3093 	else
3094 		binder_debug(BINDER_DEBUG_TRANSACTION,
3095 			     "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3096 			     proc->pid, thread->pid, t->debug_id,
3097 			     target_proc->pid, target_node->debug_id,
3098 			     (u64)tr->data.ptr.buffer,
3099 			     (u64)tr->data.ptr.offsets,
3100 			     (u64)tr->data_size, (u64)tr->offsets_size,
3101 			     (u64)extra_buffers_size);
3102 
3103 	if (!reply && !(tr->flags & TF_ONE_WAY))
3104 		t->from = thread;
3105 	else
3106 		t->from = NULL;
3107 	t->sender_euid = task_euid(proc->tsk);
3108 	t->to_proc = target_proc;
3109 	t->to_thread = target_thread;
3110 	t->code = tr->code;
3111 	t->flags = tr->flags;
3112 	t->priority = task_nice(current);
3113 
3114 	if (target_node && target_node->txn_security_ctx) {
3115 		u32 secid;
3116 		size_t added_size;
3117 
3118 		security_task_getsecid(proc->tsk, &secid);
3119 		ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3120 		if (ret) {
3121 			return_error = BR_FAILED_REPLY;
3122 			return_error_param = ret;
3123 			return_error_line = __LINE__;
3124 			goto err_get_secctx_failed;
3125 		}
3126 		added_size = ALIGN(secctx_sz, sizeof(u64));
3127 		extra_buffers_size += added_size;
3128 		if (extra_buffers_size < added_size) {
3129 			/* integer overflow of extra_buffers_size */
3130 			return_error = BR_FAILED_REPLY;
3131 			return_error_param = EINVAL;
3132 			return_error_line = __LINE__;
3133 			goto err_bad_extra_size;
3134 		}
3135 	}
3136 
3137 	trace_binder_transaction(reply, t, target_node);
3138 
3139 	t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3140 		tr->offsets_size, extra_buffers_size,
3141 		!reply && (t->flags & TF_ONE_WAY));
3142 	if (IS_ERR(t->buffer)) {
3143 		/*
3144 		 * -ESRCH indicates VMA cleared. The target is dying.
3145 		 */
3146 		return_error_param = PTR_ERR(t->buffer);
3147 		return_error = return_error_param == -ESRCH ?
3148 			BR_DEAD_REPLY : BR_FAILED_REPLY;
3149 		return_error_line = __LINE__;
3150 		t->buffer = NULL;
3151 		goto err_binder_alloc_buf_failed;
3152 	}
3153 	if (secctx) {
3154 		int err;
3155 		size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3156 				    ALIGN(tr->offsets_size, sizeof(void *)) +
3157 				    ALIGN(extra_buffers_size, sizeof(void *)) -
3158 				    ALIGN(secctx_sz, sizeof(u64));
3159 
3160 		t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
3161 		err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3162 						  t->buffer, buf_offset,
3163 						  secctx, secctx_sz);
3164 		if (err) {
3165 			t->security_ctx = 0;
3166 			WARN_ON(1);
3167 		}
3168 		security_release_secctx(secctx, secctx_sz);
3169 		secctx = NULL;
3170 	}
3171 	t->buffer->debug_id = t->debug_id;
3172 	t->buffer->transaction = t;
3173 	t->buffer->target_node = target_node;
3174 	trace_binder_transaction_alloc_buf(t->buffer);
3175 
3176 	if (binder_alloc_copy_user_to_buffer(
3177 				&target_proc->alloc,
3178 				t->buffer, 0,
3179 				(const void __user *)
3180 					(uintptr_t)tr->data.ptr.buffer,
3181 				tr->data_size)) {
3182 		binder_user_error("%d:%d got transaction with invalid data ptr\n",
3183 				proc->pid, thread->pid);
3184 		return_error = BR_FAILED_REPLY;
3185 		return_error_param = -EFAULT;
3186 		return_error_line = __LINE__;
3187 		goto err_copy_data_failed;
3188 	}
3189 	if (binder_alloc_copy_user_to_buffer(
3190 				&target_proc->alloc,
3191 				t->buffer,
3192 				ALIGN(tr->data_size, sizeof(void *)),
3193 				(const void __user *)
3194 					(uintptr_t)tr->data.ptr.offsets,
3195 				tr->offsets_size)) {
3196 		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3197 				proc->pid, thread->pid);
3198 		return_error = BR_FAILED_REPLY;
3199 		return_error_param = -EFAULT;
3200 		return_error_line = __LINE__;
3201 		goto err_copy_data_failed;
3202 	}
3203 	if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3204 		binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3205 				proc->pid, thread->pid, (u64)tr->offsets_size);
3206 		return_error = BR_FAILED_REPLY;
3207 		return_error_param = -EINVAL;
3208 		return_error_line = __LINE__;
3209 		goto err_bad_offset;
3210 	}
3211 	if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3212 		binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3213 				  proc->pid, thread->pid,
3214 				  (u64)extra_buffers_size);
3215 		return_error = BR_FAILED_REPLY;
3216 		return_error_param = -EINVAL;
3217 		return_error_line = __LINE__;
3218 		goto err_bad_offset;
3219 	}
3220 	off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3221 	buffer_offset = off_start_offset;
3222 	off_end_offset = off_start_offset + tr->offsets_size;
3223 	sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3224 	sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3225 		ALIGN(secctx_sz, sizeof(u64));
3226 	off_min = 0;
3227 	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3228 	     buffer_offset += sizeof(binder_size_t)) {
3229 		struct binder_object_header *hdr;
3230 		size_t object_size;
3231 		struct binder_object object;
3232 		binder_size_t object_offset;
3233 
3234 		if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3235 						  &object_offset,
3236 						  t->buffer,
3237 						  buffer_offset,
3238 						  sizeof(object_offset))) {
3239 			return_error = BR_FAILED_REPLY;
3240 			return_error_param = -EINVAL;
3241 			return_error_line = __LINE__;
3242 			goto err_bad_offset;
3243 		}
3244 		object_size = binder_get_object(target_proc, t->buffer,
3245 						object_offset, &object);
3246 		if (object_size == 0 || object_offset < off_min) {
3247 			binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3248 					  proc->pid, thread->pid,
3249 					  (u64)object_offset,
3250 					  (u64)off_min,
3251 					  (u64)t->buffer->data_size);
3252 			return_error = BR_FAILED_REPLY;
3253 			return_error_param = -EINVAL;
3254 			return_error_line = __LINE__;
3255 			goto err_bad_offset;
3256 		}
3257 
3258 		hdr = &object.hdr;
3259 		off_min = object_offset + object_size;
3260 		switch (hdr->type) {
3261 		case BINDER_TYPE_BINDER:
3262 		case BINDER_TYPE_WEAK_BINDER: {
3263 			struct flat_binder_object *fp;
3264 
3265 			fp = to_flat_binder_object(hdr);
3266 			ret = binder_translate_binder(fp, t, thread);
3267 
3268 			if (ret < 0 ||
3269 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3270 							t->buffer,
3271 							object_offset,
3272 							fp, sizeof(*fp))) {
3273 				return_error = BR_FAILED_REPLY;
3274 				return_error_param = ret;
3275 				return_error_line = __LINE__;
3276 				goto err_translate_failed;
3277 			}
3278 		} break;
3279 		case BINDER_TYPE_HANDLE:
3280 		case BINDER_TYPE_WEAK_HANDLE: {
3281 			struct flat_binder_object *fp;
3282 
3283 			fp = to_flat_binder_object(hdr);
3284 			ret = binder_translate_handle(fp, t, thread);
3285 			if (ret < 0 ||
3286 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3287 							t->buffer,
3288 							object_offset,
3289 							fp, sizeof(*fp))) {
3290 				return_error = BR_FAILED_REPLY;
3291 				return_error_param = ret;
3292 				return_error_line = __LINE__;
3293 				goto err_translate_failed;
3294 			}
3295 		} break;
3296 
3297 		case BINDER_TYPE_FD: {
3298 			struct binder_fd_object *fp = to_binder_fd_object(hdr);
3299 			binder_size_t fd_offset = object_offset +
3300 				(uintptr_t)&fp->fd - (uintptr_t)fp;
3301 			int ret = binder_translate_fd(fp->fd, fd_offset, t,
3302 						      thread, in_reply_to);
3303 
3304 			fp->pad_binder = 0;
3305 			if (ret < 0 ||
3306 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3307 							t->buffer,
3308 							object_offset,
3309 							fp, sizeof(*fp))) {
3310 				return_error = BR_FAILED_REPLY;
3311 				return_error_param = ret;
3312 				return_error_line = __LINE__;
3313 				goto err_translate_failed;
3314 			}
3315 		} break;
3316 		case BINDER_TYPE_FDA: {
3317 			struct binder_object ptr_object;
3318 			binder_size_t parent_offset;
3319 			struct binder_fd_array_object *fda =
3320 				to_binder_fd_array_object(hdr);
3321 			size_t num_valid = (buffer_offset - off_start_offset) *
3322 						sizeof(binder_size_t);
3323 			struct binder_buffer_object *parent =
3324 				binder_validate_ptr(target_proc, t->buffer,
3325 						    &ptr_object, fda->parent,
3326 						    off_start_offset,
3327 						    &parent_offset,
3328 						    num_valid);
3329 			if (!parent) {
3330 				binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3331 						  proc->pid, thread->pid);
3332 				return_error = BR_FAILED_REPLY;
3333 				return_error_param = -EINVAL;
3334 				return_error_line = __LINE__;
3335 				goto err_bad_parent;
3336 			}
3337 			if (!binder_validate_fixup(target_proc, t->buffer,
3338 						   off_start_offset,
3339 						   parent_offset,
3340 						   fda->parent_offset,
3341 						   last_fixup_obj_off,
3342 						   last_fixup_min_off)) {
3343 				binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3344 						  proc->pid, thread->pid);
3345 				return_error = BR_FAILED_REPLY;
3346 				return_error_param = -EINVAL;
3347 				return_error_line = __LINE__;
3348 				goto err_bad_parent;
3349 			}
3350 			ret = binder_translate_fd_array(fda, parent, t, thread,
3351 							in_reply_to);
3352 			if (ret < 0) {
3353 				return_error = BR_FAILED_REPLY;
3354 				return_error_param = ret;
3355 				return_error_line = __LINE__;
3356 				goto err_translate_failed;
3357 			}
3358 			last_fixup_obj_off = parent_offset;
3359 			last_fixup_min_off =
3360 				fda->parent_offset + sizeof(u32) * fda->num_fds;
3361 		} break;
3362 		case BINDER_TYPE_PTR: {
3363 			struct binder_buffer_object *bp =
3364 				to_binder_buffer_object(hdr);
3365 			size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3366 			size_t num_valid;
3367 
3368 			if (bp->length > buf_left) {
3369 				binder_user_error("%d:%d got transaction with too large buffer\n",
3370 						  proc->pid, thread->pid);
3371 				return_error = BR_FAILED_REPLY;
3372 				return_error_param = -EINVAL;
3373 				return_error_line = __LINE__;
3374 				goto err_bad_offset;
3375 			}
3376 			if (binder_alloc_copy_user_to_buffer(
3377 						&target_proc->alloc,
3378 						t->buffer,
3379 						sg_buf_offset,
3380 						(const void __user *)
3381 							(uintptr_t)bp->buffer,
3382 						bp->length)) {
3383 				binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3384 						  proc->pid, thread->pid);
3385 				return_error_param = -EFAULT;
3386 				return_error = BR_FAILED_REPLY;
3387 				return_error_line = __LINE__;
3388 				goto err_copy_data_failed;
3389 			}
3390 			/* Fixup buffer pointer to target proc address space */
3391 			bp->buffer = (uintptr_t)
3392 				t->buffer->user_data + sg_buf_offset;
3393 			sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3394 
3395 			num_valid = (buffer_offset - off_start_offset) *
3396 					sizeof(binder_size_t);
3397 			ret = binder_fixup_parent(t, thread, bp,
3398 						  off_start_offset,
3399 						  num_valid,
3400 						  last_fixup_obj_off,
3401 						  last_fixup_min_off);
3402 			if (ret < 0 ||
3403 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3404 							t->buffer,
3405 							object_offset,
3406 							bp, sizeof(*bp))) {
3407 				return_error = BR_FAILED_REPLY;
3408 				return_error_param = ret;
3409 				return_error_line = __LINE__;
3410 				goto err_translate_failed;
3411 			}
3412 			last_fixup_obj_off = object_offset;
3413 			last_fixup_min_off = 0;
3414 		} break;
3415 		default:
3416 			binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3417 				proc->pid, thread->pid, hdr->type);
3418 			return_error = BR_FAILED_REPLY;
3419 			return_error_param = -EINVAL;
3420 			return_error_line = __LINE__;
3421 			goto err_bad_object_type;
3422 		}
3423 	}
3424 	tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3425 	t->work.type = BINDER_WORK_TRANSACTION;
3426 
3427 	if (reply) {
3428 		binder_enqueue_thread_work(thread, tcomplete);
3429 		binder_inner_proc_lock(target_proc);
3430 		if (target_thread->is_dead) {
3431 			binder_inner_proc_unlock(target_proc);
3432 			goto err_dead_proc_or_thread;
3433 		}
3434 		BUG_ON(t->buffer->async_transaction != 0);
3435 		binder_pop_transaction_ilocked(target_thread, in_reply_to);
3436 		binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3437 		binder_inner_proc_unlock(target_proc);
3438 		wake_up_interruptible_sync(&target_thread->wait);
3439 		binder_free_transaction(in_reply_to);
3440 	} else if (!(t->flags & TF_ONE_WAY)) {
3441 		BUG_ON(t->buffer->async_transaction != 0);
3442 		binder_inner_proc_lock(proc);
3443 		/*
3444 		 * Defer the TRANSACTION_COMPLETE, so we don't return to
3445 		 * userspace immediately; this allows the target process to
3446 		 * immediately start processing this transaction, reducing
3447 		 * latency. We will then return the TRANSACTION_COMPLETE when
3448 		 * the target replies (or there is an error).
3449 		 */
3450 		binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3451 		t->need_reply = 1;
3452 		t->from_parent = thread->transaction_stack;
3453 		thread->transaction_stack = t;
3454 		binder_inner_proc_unlock(proc);
3455 		if (!binder_proc_transaction(t, target_proc, target_thread)) {
3456 			binder_inner_proc_lock(proc);
3457 			binder_pop_transaction_ilocked(thread, t);
3458 			binder_inner_proc_unlock(proc);
3459 			goto err_dead_proc_or_thread;
3460 		}
3461 	} else {
3462 		BUG_ON(target_node == NULL);
3463 		BUG_ON(t->buffer->async_transaction != 1);
3464 		binder_enqueue_thread_work(thread, tcomplete);
3465 		if (!binder_proc_transaction(t, target_proc, NULL))
3466 			goto err_dead_proc_or_thread;
3467 	}
3468 	if (target_thread)
3469 		binder_thread_dec_tmpref(target_thread);
3470 	binder_proc_dec_tmpref(target_proc);
3471 	if (target_node)
3472 		binder_dec_node_tmpref(target_node);
3473 	/*
3474 	 * write barrier to synchronize with initialization
3475 	 * of log entry
3476 	 */
3477 	smp_wmb();
3478 	WRITE_ONCE(e->debug_id_done, t_debug_id);
3479 	return;
3480 
3481 err_dead_proc_or_thread:
3482 	return_error = BR_DEAD_REPLY;
3483 	return_error_line = __LINE__;
3484 	binder_dequeue_work(proc, tcomplete);
3485 err_translate_failed:
3486 err_bad_object_type:
3487 err_bad_offset:
3488 err_bad_parent:
3489 err_copy_data_failed:
3490 	binder_free_txn_fixups(t);
3491 	trace_binder_transaction_failed_buffer_release(t->buffer);
3492 	binder_transaction_buffer_release(target_proc, t->buffer,
3493 					  buffer_offset, true);
3494 	if (target_node)
3495 		binder_dec_node_tmpref(target_node);
3496 	target_node = NULL;
3497 	t->buffer->transaction = NULL;
3498 	binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3499 err_binder_alloc_buf_failed:
3500 err_bad_extra_size:
3501 	if (secctx)
3502 		security_release_secctx(secctx, secctx_sz);
3503 err_get_secctx_failed:
3504 	kfree(tcomplete);
3505 	binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3506 err_alloc_tcomplete_failed:
3507 	kfree(t);
3508 	binder_stats_deleted(BINDER_STAT_TRANSACTION);
3509 err_alloc_t_failed:
3510 err_bad_todo_list:
3511 err_bad_call_stack:
3512 err_empty_call_stack:
3513 err_dead_binder:
3514 err_invalid_target_handle:
3515 	if (target_thread)
3516 		binder_thread_dec_tmpref(target_thread);
3517 	if (target_proc)
3518 		binder_proc_dec_tmpref(target_proc);
3519 	if (target_node) {
3520 		binder_dec_node(target_node, 1, 0);
3521 		binder_dec_node_tmpref(target_node);
3522 	}
3523 
3524 	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3525 		     "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3526 		     proc->pid, thread->pid, return_error, return_error_param,
3527 		     (u64)tr->data_size, (u64)tr->offsets_size,
3528 		     return_error_line);
3529 
3530 	{
3531 		struct binder_transaction_log_entry *fe;
3532 
3533 		e->return_error = return_error;
3534 		e->return_error_param = return_error_param;
3535 		e->return_error_line = return_error_line;
3536 		fe = binder_transaction_log_add(&binder_transaction_log_failed);
3537 		*fe = *e;
3538 		/*
3539 		 * write barrier to synchronize with initialization
3540 		 * of log entry
3541 		 */
3542 		smp_wmb();
3543 		WRITE_ONCE(e->debug_id_done, t_debug_id);
3544 		WRITE_ONCE(fe->debug_id_done, t_debug_id);
3545 	}
3546 
3547 	BUG_ON(thread->return_error.cmd != BR_OK);
3548 	if (in_reply_to) {
3549 		thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3550 		binder_enqueue_thread_work(thread, &thread->return_error.work);
3551 		binder_send_failed_reply(in_reply_to, return_error);
3552 	} else {
3553 		thread->return_error.cmd = return_error;
3554 		binder_enqueue_thread_work(thread, &thread->return_error.work);
3555 	}
3556 }
3557 
3558 /**
3559  * binder_free_buf() - free the specified buffer
3560  * @proc:	binder proc that owns buffer
3561  * @buffer:	buffer to be freed
3562  *
3563  * If buffer for an async transaction, enqueue the next async
3564  * transaction from the node.
3565  *
3566  * Cleanup buffer and free it.
3567  */
3568 static void
3569 binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
3570 {
3571 	binder_inner_proc_lock(proc);
3572 	if (buffer->transaction) {
3573 		buffer->transaction->buffer = NULL;
3574 		buffer->transaction = NULL;
3575 	}
3576 	binder_inner_proc_unlock(proc);
3577 	if (buffer->async_transaction && buffer->target_node) {
3578 		struct binder_node *buf_node;
3579 		struct binder_work *w;
3580 
3581 		buf_node = buffer->target_node;
3582 		binder_node_inner_lock(buf_node);
3583 		BUG_ON(!buf_node->has_async_transaction);
3584 		BUG_ON(buf_node->proc != proc);
3585 		w = binder_dequeue_work_head_ilocked(
3586 				&buf_node->async_todo);
3587 		if (!w) {
3588 			buf_node->has_async_transaction = false;
3589 		} else {
3590 			binder_enqueue_work_ilocked(
3591 					w, &proc->todo);
3592 			binder_wakeup_proc_ilocked(proc);
3593 		}
3594 		binder_node_inner_unlock(buf_node);
3595 	}
3596 	trace_binder_transaction_buffer_release(buffer);
3597 	binder_transaction_buffer_release(proc, buffer, 0, false);
3598 	binder_alloc_free_buf(&proc->alloc, buffer);
3599 }
3600 
3601 static int binder_thread_write(struct binder_proc *proc,
3602 			struct binder_thread *thread,
3603 			binder_uintptr_t binder_buffer, size_t size,
3604 			binder_size_t *consumed)
3605 {
3606 	uint32_t cmd;
3607 	struct binder_context *context = proc->context;
3608 	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3609 	void __user *ptr = buffer + *consumed;
3610 	void __user *end = buffer + size;
3611 
3612 	while (ptr < end && thread->return_error.cmd == BR_OK) {
3613 		int ret;
3614 
3615 		if (get_user(cmd, (uint32_t __user *)ptr))
3616 			return -EFAULT;
3617 		ptr += sizeof(uint32_t);
3618 		trace_binder_command(cmd);
3619 		if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3620 			atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3621 			atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3622 			atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3623 		}
3624 		switch (cmd) {
3625 		case BC_INCREFS:
3626 		case BC_ACQUIRE:
3627 		case BC_RELEASE:
3628 		case BC_DECREFS: {
3629 			uint32_t target;
3630 			const char *debug_string;
3631 			bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3632 			bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3633 			struct binder_ref_data rdata;
3634 
3635 			if (get_user(target, (uint32_t __user *)ptr))
3636 				return -EFAULT;
3637 
3638 			ptr += sizeof(uint32_t);
3639 			ret = -1;
3640 			if (increment && !target) {
3641 				struct binder_node *ctx_mgr_node;
3642 				mutex_lock(&context->context_mgr_node_lock);
3643 				ctx_mgr_node = context->binder_context_mgr_node;
3644 				if (ctx_mgr_node)
3645 					ret = binder_inc_ref_for_node(
3646 							proc, ctx_mgr_node,
3647 							strong, NULL, &rdata);
3648 				mutex_unlock(&context->context_mgr_node_lock);
3649 			}
3650 			if (ret)
3651 				ret = binder_update_ref_for_handle(
3652 						proc, target, increment, strong,
3653 						&rdata);
3654 			if (!ret && rdata.desc != target) {
3655 				binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3656 					proc->pid, thread->pid,
3657 					target, rdata.desc);
3658 			}
3659 			switch (cmd) {
3660 			case BC_INCREFS:
3661 				debug_string = "IncRefs";
3662 				break;
3663 			case BC_ACQUIRE:
3664 				debug_string = "Acquire";
3665 				break;
3666 			case BC_RELEASE:
3667 				debug_string = "Release";
3668 				break;
3669 			case BC_DECREFS:
3670 			default:
3671 				debug_string = "DecRefs";
3672 				break;
3673 			}
3674 			if (ret) {
3675 				binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3676 					proc->pid, thread->pid, debug_string,
3677 					strong, target, ret);
3678 				break;
3679 			}
3680 			binder_debug(BINDER_DEBUG_USER_REFS,
3681 				     "%d:%d %s ref %d desc %d s %d w %d\n",
3682 				     proc->pid, thread->pid, debug_string,
3683 				     rdata.debug_id, rdata.desc, rdata.strong,
3684 				     rdata.weak);
3685 			break;
3686 		}
3687 		case BC_INCREFS_DONE:
3688 		case BC_ACQUIRE_DONE: {
3689 			binder_uintptr_t node_ptr;
3690 			binder_uintptr_t cookie;
3691 			struct binder_node *node;
3692 			bool free_node;
3693 
3694 			if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3695 				return -EFAULT;
3696 			ptr += sizeof(binder_uintptr_t);
3697 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3698 				return -EFAULT;
3699 			ptr += sizeof(binder_uintptr_t);
3700 			node = binder_get_node(proc, node_ptr);
3701 			if (node == NULL) {
3702 				binder_user_error("%d:%d %s u%016llx no match\n",
3703 					proc->pid, thread->pid,
3704 					cmd == BC_INCREFS_DONE ?
3705 					"BC_INCREFS_DONE" :
3706 					"BC_ACQUIRE_DONE",
3707 					(u64)node_ptr);
3708 				break;
3709 			}
3710 			if (cookie != node->cookie) {
3711 				binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3712 					proc->pid, thread->pid,
3713 					cmd == BC_INCREFS_DONE ?
3714 					"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3715 					(u64)node_ptr, node->debug_id,
3716 					(u64)cookie, (u64)node->cookie);
3717 				binder_put_node(node);
3718 				break;
3719 			}
3720 			binder_node_inner_lock(node);
3721 			if (cmd == BC_ACQUIRE_DONE) {
3722 				if (node->pending_strong_ref == 0) {
3723 					binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3724 						proc->pid, thread->pid,
3725 						node->debug_id);
3726 					binder_node_inner_unlock(node);
3727 					binder_put_node(node);
3728 					break;
3729 				}
3730 				node->pending_strong_ref = 0;
3731 			} else {
3732 				if (node->pending_weak_ref == 0) {
3733 					binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3734 						proc->pid, thread->pid,
3735 						node->debug_id);
3736 					binder_node_inner_unlock(node);
3737 					binder_put_node(node);
3738 					break;
3739 				}
3740 				node->pending_weak_ref = 0;
3741 			}
3742 			free_node = binder_dec_node_nilocked(node,
3743 					cmd == BC_ACQUIRE_DONE, 0);
3744 			WARN_ON(free_node);
3745 			binder_debug(BINDER_DEBUG_USER_REFS,
3746 				     "%d:%d %s node %d ls %d lw %d tr %d\n",
3747 				     proc->pid, thread->pid,
3748 				     cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3749 				     node->debug_id, node->local_strong_refs,
3750 				     node->local_weak_refs, node->tmp_refs);
3751 			binder_node_inner_unlock(node);
3752 			binder_put_node(node);
3753 			break;
3754 		}
3755 		case BC_ATTEMPT_ACQUIRE:
3756 			pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3757 			return -EINVAL;
3758 		case BC_ACQUIRE_RESULT:
3759 			pr_err("BC_ACQUIRE_RESULT not supported\n");
3760 			return -EINVAL;
3761 
3762 		case BC_FREE_BUFFER: {
3763 			binder_uintptr_t data_ptr;
3764 			struct binder_buffer *buffer;
3765 
3766 			if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3767 				return -EFAULT;
3768 			ptr += sizeof(binder_uintptr_t);
3769 
3770 			buffer = binder_alloc_prepare_to_free(&proc->alloc,
3771 							      data_ptr);
3772 			if (IS_ERR_OR_NULL(buffer)) {
3773 				if (PTR_ERR(buffer) == -EPERM) {
3774 					binder_user_error(
3775 						"%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3776 						proc->pid, thread->pid,
3777 						(u64)data_ptr);
3778 				} else {
3779 					binder_user_error(
3780 						"%d:%d BC_FREE_BUFFER u%016llx no match\n",
3781 						proc->pid, thread->pid,
3782 						(u64)data_ptr);
3783 				}
3784 				break;
3785 			}
3786 			binder_debug(BINDER_DEBUG_FREE_BUFFER,
3787 				     "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3788 				     proc->pid, thread->pid, (u64)data_ptr,
3789 				     buffer->debug_id,
3790 				     buffer->transaction ? "active" : "finished");
3791 			binder_free_buf(proc, buffer);
3792 			break;
3793 		}
3794 
3795 		case BC_TRANSACTION_SG:
3796 		case BC_REPLY_SG: {
3797 			struct binder_transaction_data_sg tr;
3798 
3799 			if (copy_from_user(&tr, ptr, sizeof(tr)))
3800 				return -EFAULT;
3801 			ptr += sizeof(tr);
3802 			binder_transaction(proc, thread, &tr.transaction_data,
3803 					   cmd == BC_REPLY_SG, tr.buffers_size);
3804 			break;
3805 		}
3806 		case BC_TRANSACTION:
3807 		case BC_REPLY: {
3808 			struct binder_transaction_data tr;
3809 
3810 			if (copy_from_user(&tr, ptr, sizeof(tr)))
3811 				return -EFAULT;
3812 			ptr += sizeof(tr);
3813 			binder_transaction(proc, thread, &tr,
3814 					   cmd == BC_REPLY, 0);
3815 			break;
3816 		}
3817 
3818 		case BC_REGISTER_LOOPER:
3819 			binder_debug(BINDER_DEBUG_THREADS,
3820 				     "%d:%d BC_REGISTER_LOOPER\n",
3821 				     proc->pid, thread->pid);
3822 			binder_inner_proc_lock(proc);
3823 			if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3824 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
3825 				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3826 					proc->pid, thread->pid);
3827 			} else if (proc->requested_threads == 0) {
3828 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
3829 				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3830 					proc->pid, thread->pid);
3831 			} else {
3832 				proc->requested_threads--;
3833 				proc->requested_threads_started++;
3834 			}
3835 			thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3836 			binder_inner_proc_unlock(proc);
3837 			break;
3838 		case BC_ENTER_LOOPER:
3839 			binder_debug(BINDER_DEBUG_THREADS,
3840 				     "%d:%d BC_ENTER_LOOPER\n",
3841 				     proc->pid, thread->pid);
3842 			if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3843 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
3844 				binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3845 					proc->pid, thread->pid);
3846 			}
3847 			thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3848 			break;
3849 		case BC_EXIT_LOOPER:
3850 			binder_debug(BINDER_DEBUG_THREADS,
3851 				     "%d:%d BC_EXIT_LOOPER\n",
3852 				     proc->pid, thread->pid);
3853 			thread->looper |= BINDER_LOOPER_STATE_EXITED;
3854 			break;
3855 
3856 		case BC_REQUEST_DEATH_NOTIFICATION:
3857 		case BC_CLEAR_DEATH_NOTIFICATION: {
3858 			uint32_t target;
3859 			binder_uintptr_t cookie;
3860 			struct binder_ref *ref;
3861 			struct binder_ref_death *death = NULL;
3862 
3863 			if (get_user(target, (uint32_t __user *)ptr))
3864 				return -EFAULT;
3865 			ptr += sizeof(uint32_t);
3866 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3867 				return -EFAULT;
3868 			ptr += sizeof(binder_uintptr_t);
3869 			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3870 				/*
3871 				 * Allocate memory for death notification
3872 				 * before taking lock
3873 				 */
3874 				death = kzalloc(sizeof(*death), GFP_KERNEL);
3875 				if (death == NULL) {
3876 					WARN_ON(thread->return_error.cmd !=
3877 						BR_OK);
3878 					thread->return_error.cmd = BR_ERROR;
3879 					binder_enqueue_thread_work(
3880 						thread,
3881 						&thread->return_error.work);
3882 					binder_debug(
3883 						BINDER_DEBUG_FAILED_TRANSACTION,
3884 						"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3885 						proc->pid, thread->pid);
3886 					break;
3887 				}
3888 			}
3889 			binder_proc_lock(proc);
3890 			ref = binder_get_ref_olocked(proc, target, false);
3891 			if (ref == NULL) {
3892 				binder_user_error("%d:%d %s invalid ref %d\n",
3893 					proc->pid, thread->pid,
3894 					cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3895 					"BC_REQUEST_DEATH_NOTIFICATION" :
3896 					"BC_CLEAR_DEATH_NOTIFICATION",
3897 					target);
3898 				binder_proc_unlock(proc);
3899 				kfree(death);
3900 				break;
3901 			}
3902 
3903 			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3904 				     "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3905 				     proc->pid, thread->pid,
3906 				     cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3907 				     "BC_REQUEST_DEATH_NOTIFICATION" :
3908 				     "BC_CLEAR_DEATH_NOTIFICATION",
3909 				     (u64)cookie, ref->data.debug_id,
3910 				     ref->data.desc, ref->data.strong,
3911 				     ref->data.weak, ref->node->debug_id);
3912 
3913 			binder_node_lock(ref->node);
3914 			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3915 				if (ref->death) {
3916 					binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3917 						proc->pid, thread->pid);
3918 					binder_node_unlock(ref->node);
3919 					binder_proc_unlock(proc);
3920 					kfree(death);
3921 					break;
3922 				}
3923 				binder_stats_created(BINDER_STAT_DEATH);
3924 				INIT_LIST_HEAD(&death->work.entry);
3925 				death->cookie = cookie;
3926 				ref->death = death;
3927 				if (ref->node->proc == NULL) {
3928 					ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3929 
3930 					binder_inner_proc_lock(proc);
3931 					binder_enqueue_work_ilocked(
3932 						&ref->death->work, &proc->todo);
3933 					binder_wakeup_proc_ilocked(proc);
3934 					binder_inner_proc_unlock(proc);
3935 				}
3936 			} else {
3937 				if (ref->death == NULL) {
3938 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3939 						proc->pid, thread->pid);
3940 					binder_node_unlock(ref->node);
3941 					binder_proc_unlock(proc);
3942 					break;
3943 				}
3944 				death = ref->death;
3945 				if (death->cookie != cookie) {
3946 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3947 						proc->pid, thread->pid,
3948 						(u64)death->cookie,
3949 						(u64)cookie);
3950 					binder_node_unlock(ref->node);
3951 					binder_proc_unlock(proc);
3952 					break;
3953 				}
3954 				ref->death = NULL;
3955 				binder_inner_proc_lock(proc);
3956 				if (list_empty(&death->work.entry)) {
3957 					death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3958 					if (thread->looper &
3959 					    (BINDER_LOOPER_STATE_REGISTERED |
3960 					     BINDER_LOOPER_STATE_ENTERED))
3961 						binder_enqueue_thread_work_ilocked(
3962 								thread,
3963 								&death->work);
3964 					else {
3965 						binder_enqueue_work_ilocked(
3966 								&death->work,
3967 								&proc->todo);
3968 						binder_wakeup_proc_ilocked(
3969 								proc);
3970 					}
3971 				} else {
3972 					BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3973 					death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3974 				}
3975 				binder_inner_proc_unlock(proc);
3976 			}
3977 			binder_node_unlock(ref->node);
3978 			binder_proc_unlock(proc);
3979 		} break;
3980 		case BC_DEAD_BINDER_DONE: {
3981 			struct binder_work *w;
3982 			binder_uintptr_t cookie;
3983 			struct binder_ref_death *death = NULL;
3984 
3985 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3986 				return -EFAULT;
3987 
3988 			ptr += sizeof(cookie);
3989 			binder_inner_proc_lock(proc);
3990 			list_for_each_entry(w, &proc->delivered_death,
3991 					    entry) {
3992 				struct binder_ref_death *tmp_death =
3993 					container_of(w,
3994 						     struct binder_ref_death,
3995 						     work);
3996 
3997 				if (tmp_death->cookie == cookie) {
3998 					death = tmp_death;
3999 					break;
4000 				}
4001 			}
4002 			binder_debug(BINDER_DEBUG_DEAD_BINDER,
4003 				     "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4004 				     proc->pid, thread->pid, (u64)cookie,
4005 				     death);
4006 			if (death == NULL) {
4007 				binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4008 					proc->pid, thread->pid, (u64)cookie);
4009 				binder_inner_proc_unlock(proc);
4010 				break;
4011 			}
4012 			binder_dequeue_work_ilocked(&death->work);
4013 			if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4014 				death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4015 				if (thread->looper &
4016 					(BINDER_LOOPER_STATE_REGISTERED |
4017 					 BINDER_LOOPER_STATE_ENTERED))
4018 					binder_enqueue_thread_work_ilocked(
4019 						thread, &death->work);
4020 				else {
4021 					binder_enqueue_work_ilocked(
4022 							&death->work,
4023 							&proc->todo);
4024 					binder_wakeup_proc_ilocked(proc);
4025 				}
4026 			}
4027 			binder_inner_proc_unlock(proc);
4028 		} break;
4029 
4030 		default:
4031 			pr_err("%d:%d unknown command %d\n",
4032 			       proc->pid, thread->pid, cmd);
4033 			return -EINVAL;
4034 		}
4035 		*consumed = ptr - buffer;
4036 	}
4037 	return 0;
4038 }
4039 
4040 static void binder_stat_br(struct binder_proc *proc,
4041 			   struct binder_thread *thread, uint32_t cmd)
4042 {
4043 	trace_binder_return(cmd);
4044 	if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4045 		atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4046 		atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4047 		atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4048 	}
4049 }
4050 
4051 static int binder_put_node_cmd(struct binder_proc *proc,
4052 			       struct binder_thread *thread,
4053 			       void __user **ptrp,
4054 			       binder_uintptr_t node_ptr,
4055 			       binder_uintptr_t node_cookie,
4056 			       int node_debug_id,
4057 			       uint32_t cmd, const char *cmd_name)
4058 {
4059 	void __user *ptr = *ptrp;
4060 
4061 	if (put_user(cmd, (uint32_t __user *)ptr))
4062 		return -EFAULT;
4063 	ptr += sizeof(uint32_t);
4064 
4065 	if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4066 		return -EFAULT;
4067 	ptr += sizeof(binder_uintptr_t);
4068 
4069 	if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4070 		return -EFAULT;
4071 	ptr += sizeof(binder_uintptr_t);
4072 
4073 	binder_stat_br(proc, thread, cmd);
4074 	binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4075 		     proc->pid, thread->pid, cmd_name, node_debug_id,
4076 		     (u64)node_ptr, (u64)node_cookie);
4077 
4078 	*ptrp = ptr;
4079 	return 0;
4080 }
4081 
4082 static int binder_wait_for_work(struct binder_thread *thread,
4083 				bool do_proc_work)
4084 {
4085 	DEFINE_WAIT(wait);
4086 	struct binder_proc *proc = thread->proc;
4087 	int ret = 0;
4088 
4089 	freezer_do_not_count();
4090 	binder_inner_proc_lock(proc);
4091 	for (;;) {
4092 		prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
4093 		if (binder_has_work_ilocked(thread, do_proc_work))
4094 			break;
4095 		if (do_proc_work)
4096 			list_add(&thread->waiting_thread_node,
4097 				 &proc->waiting_threads);
4098 		binder_inner_proc_unlock(proc);
4099 		schedule();
4100 		binder_inner_proc_lock(proc);
4101 		list_del_init(&thread->waiting_thread_node);
4102 		if (signal_pending(current)) {
4103 			ret = -ERESTARTSYS;
4104 			break;
4105 		}
4106 	}
4107 	finish_wait(&thread->wait, &wait);
4108 	binder_inner_proc_unlock(proc);
4109 	freezer_count();
4110 
4111 	return ret;
4112 }
4113 
4114 /**
4115  * binder_apply_fd_fixups() - finish fd translation
4116  * @proc:         binder_proc associated @t->buffer
4117  * @t:	binder transaction with list of fd fixups
4118  *
4119  * Now that we are in the context of the transaction target
4120  * process, we can allocate and install fds. Process the
4121  * list of fds to translate and fixup the buffer with the
4122  * new fds.
4123  *
4124  * If we fail to allocate an fd, then free the resources by
4125  * fput'ing files that have not been processed and ksys_close'ing
4126  * any fds that have already been allocated.
4127  */
4128 static int binder_apply_fd_fixups(struct binder_proc *proc,
4129 				  struct binder_transaction *t)
4130 {
4131 	struct binder_txn_fd_fixup *fixup, *tmp;
4132 	int ret = 0;
4133 
4134 	list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4135 		int fd = get_unused_fd_flags(O_CLOEXEC);
4136 
4137 		if (fd < 0) {
4138 			binder_debug(BINDER_DEBUG_TRANSACTION,
4139 				     "failed fd fixup txn %d fd %d\n",
4140 				     t->debug_id, fd);
4141 			ret = -ENOMEM;
4142 			break;
4143 		}
4144 		binder_debug(BINDER_DEBUG_TRANSACTION,
4145 			     "fd fixup txn %d fd %d\n",
4146 			     t->debug_id, fd);
4147 		trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4148 		fd_install(fd, fixup->file);
4149 		fixup->file = NULL;
4150 		if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4151 						fixup->offset, &fd,
4152 						sizeof(u32))) {
4153 			ret = -EINVAL;
4154 			break;
4155 		}
4156 	}
4157 	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4158 		if (fixup->file) {
4159 			fput(fixup->file);
4160 		} else if (ret) {
4161 			u32 fd;
4162 			int err;
4163 
4164 			err = binder_alloc_copy_from_buffer(&proc->alloc, &fd,
4165 							    t->buffer,
4166 							    fixup->offset,
4167 							    sizeof(fd));
4168 			WARN_ON(err);
4169 			if (!err)
4170 				binder_deferred_fd_close(fd);
4171 		}
4172 		list_del(&fixup->fixup_entry);
4173 		kfree(fixup);
4174 	}
4175 
4176 	return ret;
4177 }
4178 
4179 static int binder_thread_read(struct binder_proc *proc,
4180 			      struct binder_thread *thread,
4181 			      binder_uintptr_t binder_buffer, size_t size,
4182 			      binder_size_t *consumed, int non_block)
4183 {
4184 	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4185 	void __user *ptr = buffer + *consumed;
4186 	void __user *end = buffer + size;
4187 
4188 	int ret = 0;
4189 	int wait_for_proc_work;
4190 
4191 	if (*consumed == 0) {
4192 		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4193 			return -EFAULT;
4194 		ptr += sizeof(uint32_t);
4195 	}
4196 
4197 retry:
4198 	binder_inner_proc_lock(proc);
4199 	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4200 	binder_inner_proc_unlock(proc);
4201 
4202 	thread->looper |= BINDER_LOOPER_STATE_WAITING;
4203 
4204 	trace_binder_wait_for_work(wait_for_proc_work,
4205 				   !!thread->transaction_stack,
4206 				   !binder_worklist_empty(proc, &thread->todo));
4207 	if (wait_for_proc_work) {
4208 		if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4209 					BINDER_LOOPER_STATE_ENTERED))) {
4210 			binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4211 				proc->pid, thread->pid, thread->looper);
4212 			wait_event_interruptible(binder_user_error_wait,
4213 						 binder_stop_on_user_error < 2);
4214 		}
4215 		binder_set_nice(proc->default_priority);
4216 	}
4217 
4218 	if (non_block) {
4219 		if (!binder_has_work(thread, wait_for_proc_work))
4220 			ret = -EAGAIN;
4221 	} else {
4222 		ret = binder_wait_for_work(thread, wait_for_proc_work);
4223 	}
4224 
4225 	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4226 
4227 	if (ret)
4228 		return ret;
4229 
4230 	while (1) {
4231 		uint32_t cmd;
4232 		struct binder_transaction_data_secctx tr;
4233 		struct binder_transaction_data *trd = &tr.transaction_data;
4234 		struct binder_work *w = NULL;
4235 		struct list_head *list = NULL;
4236 		struct binder_transaction *t = NULL;
4237 		struct binder_thread *t_from;
4238 		size_t trsize = sizeof(*trd);
4239 
4240 		binder_inner_proc_lock(proc);
4241 		if (!binder_worklist_empty_ilocked(&thread->todo))
4242 			list = &thread->todo;
4243 		else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4244 			   wait_for_proc_work)
4245 			list = &proc->todo;
4246 		else {
4247 			binder_inner_proc_unlock(proc);
4248 
4249 			/* no data added */
4250 			if (ptr - buffer == 4 && !thread->looper_need_return)
4251 				goto retry;
4252 			break;
4253 		}
4254 
4255 		if (end - ptr < sizeof(tr) + 4) {
4256 			binder_inner_proc_unlock(proc);
4257 			break;
4258 		}
4259 		w = binder_dequeue_work_head_ilocked(list);
4260 		if (binder_worklist_empty_ilocked(&thread->todo))
4261 			thread->process_todo = false;
4262 
4263 		switch (w->type) {
4264 		case BINDER_WORK_TRANSACTION: {
4265 			binder_inner_proc_unlock(proc);
4266 			t = container_of(w, struct binder_transaction, work);
4267 		} break;
4268 		case BINDER_WORK_RETURN_ERROR: {
4269 			struct binder_error *e = container_of(
4270 					w, struct binder_error, work);
4271 
4272 			WARN_ON(e->cmd == BR_OK);
4273 			binder_inner_proc_unlock(proc);
4274 			if (put_user(e->cmd, (uint32_t __user *)ptr))
4275 				return -EFAULT;
4276 			cmd = e->cmd;
4277 			e->cmd = BR_OK;
4278 			ptr += sizeof(uint32_t);
4279 
4280 			binder_stat_br(proc, thread, cmd);
4281 		} break;
4282 		case BINDER_WORK_TRANSACTION_COMPLETE: {
4283 			binder_inner_proc_unlock(proc);
4284 			cmd = BR_TRANSACTION_COMPLETE;
4285 			kfree(w);
4286 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4287 			if (put_user(cmd, (uint32_t __user *)ptr))
4288 				return -EFAULT;
4289 			ptr += sizeof(uint32_t);
4290 
4291 			binder_stat_br(proc, thread, cmd);
4292 			binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4293 				     "%d:%d BR_TRANSACTION_COMPLETE\n",
4294 				     proc->pid, thread->pid);
4295 		} break;
4296 		case BINDER_WORK_NODE: {
4297 			struct binder_node *node = container_of(w, struct binder_node, work);
4298 			int strong, weak;
4299 			binder_uintptr_t node_ptr = node->ptr;
4300 			binder_uintptr_t node_cookie = node->cookie;
4301 			int node_debug_id = node->debug_id;
4302 			int has_weak_ref;
4303 			int has_strong_ref;
4304 			void __user *orig_ptr = ptr;
4305 
4306 			BUG_ON(proc != node->proc);
4307 			strong = node->internal_strong_refs ||
4308 					node->local_strong_refs;
4309 			weak = !hlist_empty(&node->refs) ||
4310 					node->local_weak_refs ||
4311 					node->tmp_refs || strong;
4312 			has_strong_ref = node->has_strong_ref;
4313 			has_weak_ref = node->has_weak_ref;
4314 
4315 			if (weak && !has_weak_ref) {
4316 				node->has_weak_ref = 1;
4317 				node->pending_weak_ref = 1;
4318 				node->local_weak_refs++;
4319 			}
4320 			if (strong && !has_strong_ref) {
4321 				node->has_strong_ref = 1;
4322 				node->pending_strong_ref = 1;
4323 				node->local_strong_refs++;
4324 			}
4325 			if (!strong && has_strong_ref)
4326 				node->has_strong_ref = 0;
4327 			if (!weak && has_weak_ref)
4328 				node->has_weak_ref = 0;
4329 			if (!weak && !strong) {
4330 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4331 					     "%d:%d node %d u%016llx c%016llx deleted\n",
4332 					     proc->pid, thread->pid,
4333 					     node_debug_id,
4334 					     (u64)node_ptr,
4335 					     (u64)node_cookie);
4336 				rb_erase(&node->rb_node, &proc->nodes);
4337 				binder_inner_proc_unlock(proc);
4338 				binder_node_lock(node);
4339 				/*
4340 				 * Acquire the node lock before freeing the
4341 				 * node to serialize with other threads that
4342 				 * may have been holding the node lock while
4343 				 * decrementing this node (avoids race where
4344 				 * this thread frees while the other thread
4345 				 * is unlocking the node after the final
4346 				 * decrement)
4347 				 */
4348 				binder_node_unlock(node);
4349 				binder_free_node(node);
4350 			} else
4351 				binder_inner_proc_unlock(proc);
4352 
4353 			if (weak && !has_weak_ref)
4354 				ret = binder_put_node_cmd(
4355 						proc, thread, &ptr, node_ptr,
4356 						node_cookie, node_debug_id,
4357 						BR_INCREFS, "BR_INCREFS");
4358 			if (!ret && strong && !has_strong_ref)
4359 				ret = binder_put_node_cmd(
4360 						proc, thread, &ptr, node_ptr,
4361 						node_cookie, node_debug_id,
4362 						BR_ACQUIRE, "BR_ACQUIRE");
4363 			if (!ret && !strong && has_strong_ref)
4364 				ret = binder_put_node_cmd(
4365 						proc, thread, &ptr, node_ptr,
4366 						node_cookie, node_debug_id,
4367 						BR_RELEASE, "BR_RELEASE");
4368 			if (!ret && !weak && has_weak_ref)
4369 				ret = binder_put_node_cmd(
4370 						proc, thread, &ptr, node_ptr,
4371 						node_cookie, node_debug_id,
4372 						BR_DECREFS, "BR_DECREFS");
4373 			if (orig_ptr == ptr)
4374 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4375 					     "%d:%d node %d u%016llx c%016llx state unchanged\n",
4376 					     proc->pid, thread->pid,
4377 					     node_debug_id,
4378 					     (u64)node_ptr,
4379 					     (u64)node_cookie);
4380 			if (ret)
4381 				return ret;
4382 		} break;
4383 		case BINDER_WORK_DEAD_BINDER:
4384 		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4385 		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4386 			struct binder_ref_death *death;
4387 			uint32_t cmd;
4388 			binder_uintptr_t cookie;
4389 
4390 			death = container_of(w, struct binder_ref_death, work);
4391 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4392 				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4393 			else
4394 				cmd = BR_DEAD_BINDER;
4395 			cookie = death->cookie;
4396 
4397 			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4398 				     "%d:%d %s %016llx\n",
4399 				      proc->pid, thread->pid,
4400 				      cmd == BR_DEAD_BINDER ?
4401 				      "BR_DEAD_BINDER" :
4402 				      "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4403 				      (u64)cookie);
4404 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4405 				binder_inner_proc_unlock(proc);
4406 				kfree(death);
4407 				binder_stats_deleted(BINDER_STAT_DEATH);
4408 			} else {
4409 				binder_enqueue_work_ilocked(
4410 						w, &proc->delivered_death);
4411 				binder_inner_proc_unlock(proc);
4412 			}
4413 			if (put_user(cmd, (uint32_t __user *)ptr))
4414 				return -EFAULT;
4415 			ptr += sizeof(uint32_t);
4416 			if (put_user(cookie,
4417 				     (binder_uintptr_t __user *)ptr))
4418 				return -EFAULT;
4419 			ptr += sizeof(binder_uintptr_t);
4420 			binder_stat_br(proc, thread, cmd);
4421 			if (cmd == BR_DEAD_BINDER)
4422 				goto done; /* DEAD_BINDER notifications can cause transactions */
4423 		} break;
4424 		default:
4425 			binder_inner_proc_unlock(proc);
4426 			pr_err("%d:%d: bad work type %d\n",
4427 			       proc->pid, thread->pid, w->type);
4428 			break;
4429 		}
4430 
4431 		if (!t)
4432 			continue;
4433 
4434 		BUG_ON(t->buffer == NULL);
4435 		if (t->buffer->target_node) {
4436 			struct binder_node *target_node = t->buffer->target_node;
4437 
4438 			trd->target.ptr = target_node->ptr;
4439 			trd->cookie =  target_node->cookie;
4440 			t->saved_priority = task_nice(current);
4441 			if (t->priority < target_node->min_priority &&
4442 			    !(t->flags & TF_ONE_WAY))
4443 				binder_set_nice(t->priority);
4444 			else if (!(t->flags & TF_ONE_WAY) ||
4445 				 t->saved_priority > target_node->min_priority)
4446 				binder_set_nice(target_node->min_priority);
4447 			cmd = BR_TRANSACTION;
4448 		} else {
4449 			trd->target.ptr = 0;
4450 			trd->cookie = 0;
4451 			cmd = BR_REPLY;
4452 		}
4453 		trd->code = t->code;
4454 		trd->flags = t->flags;
4455 		trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4456 
4457 		t_from = binder_get_txn_from(t);
4458 		if (t_from) {
4459 			struct task_struct *sender = t_from->proc->tsk;
4460 
4461 			trd->sender_pid =
4462 				task_tgid_nr_ns(sender,
4463 						task_active_pid_ns(current));
4464 		} else {
4465 			trd->sender_pid = 0;
4466 		}
4467 
4468 		ret = binder_apply_fd_fixups(proc, t);
4469 		if (ret) {
4470 			struct binder_buffer *buffer = t->buffer;
4471 			bool oneway = !!(t->flags & TF_ONE_WAY);
4472 			int tid = t->debug_id;
4473 
4474 			if (t_from)
4475 				binder_thread_dec_tmpref(t_from);
4476 			buffer->transaction = NULL;
4477 			binder_cleanup_transaction(t, "fd fixups failed",
4478 						   BR_FAILED_REPLY);
4479 			binder_free_buf(proc, buffer);
4480 			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4481 				     "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4482 				     proc->pid, thread->pid,
4483 				     oneway ? "async " :
4484 					(cmd == BR_REPLY ? "reply " : ""),
4485 				     tid, BR_FAILED_REPLY, ret, __LINE__);
4486 			if (cmd == BR_REPLY) {
4487 				cmd = BR_FAILED_REPLY;
4488 				if (put_user(cmd, (uint32_t __user *)ptr))
4489 					return -EFAULT;
4490 				ptr += sizeof(uint32_t);
4491 				binder_stat_br(proc, thread, cmd);
4492 				break;
4493 			}
4494 			continue;
4495 		}
4496 		trd->data_size = t->buffer->data_size;
4497 		trd->offsets_size = t->buffer->offsets_size;
4498 		trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
4499 		trd->data.ptr.offsets = trd->data.ptr.buffer +
4500 					ALIGN(t->buffer->data_size,
4501 					    sizeof(void *));
4502 
4503 		tr.secctx = t->security_ctx;
4504 		if (t->security_ctx) {
4505 			cmd = BR_TRANSACTION_SEC_CTX;
4506 			trsize = sizeof(tr);
4507 		}
4508 		if (put_user(cmd, (uint32_t __user *)ptr)) {
4509 			if (t_from)
4510 				binder_thread_dec_tmpref(t_from);
4511 
4512 			binder_cleanup_transaction(t, "put_user failed",
4513 						   BR_FAILED_REPLY);
4514 
4515 			return -EFAULT;
4516 		}
4517 		ptr += sizeof(uint32_t);
4518 		if (copy_to_user(ptr, &tr, trsize)) {
4519 			if (t_from)
4520 				binder_thread_dec_tmpref(t_from);
4521 
4522 			binder_cleanup_transaction(t, "copy_to_user failed",
4523 						   BR_FAILED_REPLY);
4524 
4525 			return -EFAULT;
4526 		}
4527 		ptr += trsize;
4528 
4529 		trace_binder_transaction_received(t);
4530 		binder_stat_br(proc, thread, cmd);
4531 		binder_debug(BINDER_DEBUG_TRANSACTION,
4532 			     "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4533 			     proc->pid, thread->pid,
4534 			     (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4535 				(cmd == BR_TRANSACTION_SEC_CTX) ?
4536 				     "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4537 			     t->debug_id, t_from ? t_from->proc->pid : 0,
4538 			     t_from ? t_from->pid : 0, cmd,
4539 			     t->buffer->data_size, t->buffer->offsets_size,
4540 			     (u64)trd->data.ptr.buffer,
4541 			     (u64)trd->data.ptr.offsets);
4542 
4543 		if (t_from)
4544 			binder_thread_dec_tmpref(t_from);
4545 		t->buffer->allow_user_free = 1;
4546 		if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4547 			binder_inner_proc_lock(thread->proc);
4548 			t->to_parent = thread->transaction_stack;
4549 			t->to_thread = thread;
4550 			thread->transaction_stack = t;
4551 			binder_inner_proc_unlock(thread->proc);
4552 		} else {
4553 			binder_free_transaction(t);
4554 		}
4555 		break;
4556 	}
4557 
4558 done:
4559 
4560 	*consumed = ptr - buffer;
4561 	binder_inner_proc_lock(proc);
4562 	if (proc->requested_threads == 0 &&
4563 	    list_empty(&thread->proc->waiting_threads) &&
4564 	    proc->requested_threads_started < proc->max_threads &&
4565 	    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4566 	     BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4567 	     /*spawn a new thread if we leave this out */) {
4568 		proc->requested_threads++;
4569 		binder_inner_proc_unlock(proc);
4570 		binder_debug(BINDER_DEBUG_THREADS,
4571 			     "%d:%d BR_SPAWN_LOOPER\n",
4572 			     proc->pid, thread->pid);
4573 		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4574 			return -EFAULT;
4575 		binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4576 	} else
4577 		binder_inner_proc_unlock(proc);
4578 	return 0;
4579 }
4580 
4581 static void binder_release_work(struct binder_proc *proc,
4582 				struct list_head *list)
4583 {
4584 	struct binder_work *w;
4585 
4586 	while (1) {
4587 		w = binder_dequeue_work_head(proc, list);
4588 		if (!w)
4589 			return;
4590 
4591 		switch (w->type) {
4592 		case BINDER_WORK_TRANSACTION: {
4593 			struct binder_transaction *t;
4594 
4595 			t = container_of(w, struct binder_transaction, work);
4596 
4597 			binder_cleanup_transaction(t, "process died.",
4598 						   BR_DEAD_REPLY);
4599 		} break;
4600 		case BINDER_WORK_RETURN_ERROR: {
4601 			struct binder_error *e = container_of(
4602 					w, struct binder_error, work);
4603 
4604 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4605 				"undelivered TRANSACTION_ERROR: %u\n",
4606 				e->cmd);
4607 		} break;
4608 		case BINDER_WORK_TRANSACTION_COMPLETE: {
4609 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4610 				"undelivered TRANSACTION_COMPLETE\n");
4611 			kfree(w);
4612 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4613 		} break;
4614 		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4615 		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4616 			struct binder_ref_death *death;
4617 
4618 			death = container_of(w, struct binder_ref_death, work);
4619 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4620 				"undelivered death notification, %016llx\n",
4621 				(u64)death->cookie);
4622 			kfree(death);
4623 			binder_stats_deleted(BINDER_STAT_DEATH);
4624 		} break;
4625 		default:
4626 			pr_err("unexpected work type, %d, not freed\n",
4627 			       w->type);
4628 			break;
4629 		}
4630 	}
4631 
4632 }
4633 
4634 static struct binder_thread *binder_get_thread_ilocked(
4635 		struct binder_proc *proc, struct binder_thread *new_thread)
4636 {
4637 	struct binder_thread *thread = NULL;
4638 	struct rb_node *parent = NULL;
4639 	struct rb_node **p = &proc->threads.rb_node;
4640 
4641 	while (*p) {
4642 		parent = *p;
4643 		thread = rb_entry(parent, struct binder_thread, rb_node);
4644 
4645 		if (current->pid < thread->pid)
4646 			p = &(*p)->rb_left;
4647 		else if (current->pid > thread->pid)
4648 			p = &(*p)->rb_right;
4649 		else
4650 			return thread;
4651 	}
4652 	if (!new_thread)
4653 		return NULL;
4654 	thread = new_thread;
4655 	binder_stats_created(BINDER_STAT_THREAD);
4656 	thread->proc = proc;
4657 	thread->pid = current->pid;
4658 	atomic_set(&thread->tmp_ref, 0);
4659 	init_waitqueue_head(&thread->wait);
4660 	INIT_LIST_HEAD(&thread->todo);
4661 	rb_link_node(&thread->rb_node, parent, p);
4662 	rb_insert_color(&thread->rb_node, &proc->threads);
4663 	thread->looper_need_return = true;
4664 	thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4665 	thread->return_error.cmd = BR_OK;
4666 	thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4667 	thread->reply_error.cmd = BR_OK;
4668 	INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4669 	return thread;
4670 }
4671 
4672 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4673 {
4674 	struct binder_thread *thread;
4675 	struct binder_thread *new_thread;
4676 
4677 	binder_inner_proc_lock(proc);
4678 	thread = binder_get_thread_ilocked(proc, NULL);
4679 	binder_inner_proc_unlock(proc);
4680 	if (!thread) {
4681 		new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4682 		if (new_thread == NULL)
4683 			return NULL;
4684 		binder_inner_proc_lock(proc);
4685 		thread = binder_get_thread_ilocked(proc, new_thread);
4686 		binder_inner_proc_unlock(proc);
4687 		if (thread != new_thread)
4688 			kfree(new_thread);
4689 	}
4690 	return thread;
4691 }
4692 
4693 static void binder_free_proc(struct binder_proc *proc)
4694 {
4695 	BUG_ON(!list_empty(&proc->todo));
4696 	BUG_ON(!list_empty(&proc->delivered_death));
4697 	binder_alloc_deferred_release(&proc->alloc);
4698 	put_task_struct(proc->tsk);
4699 	binder_stats_deleted(BINDER_STAT_PROC);
4700 	kfree(proc);
4701 }
4702 
4703 static void binder_free_thread(struct binder_thread *thread)
4704 {
4705 	BUG_ON(!list_empty(&thread->todo));
4706 	binder_stats_deleted(BINDER_STAT_THREAD);
4707 	binder_proc_dec_tmpref(thread->proc);
4708 	kfree(thread);
4709 }
4710 
4711 static int binder_thread_release(struct binder_proc *proc,
4712 				 struct binder_thread *thread)
4713 {
4714 	struct binder_transaction *t;
4715 	struct binder_transaction *send_reply = NULL;
4716 	int active_transactions = 0;
4717 	struct binder_transaction *last_t = NULL;
4718 
4719 	binder_inner_proc_lock(thread->proc);
4720 	/*
4721 	 * take a ref on the proc so it survives
4722 	 * after we remove this thread from proc->threads.
4723 	 * The corresponding dec is when we actually
4724 	 * free the thread in binder_free_thread()
4725 	 */
4726 	proc->tmp_ref++;
4727 	/*
4728 	 * take a ref on this thread to ensure it
4729 	 * survives while we are releasing it
4730 	 */
4731 	atomic_inc(&thread->tmp_ref);
4732 	rb_erase(&thread->rb_node, &proc->threads);
4733 	t = thread->transaction_stack;
4734 	if (t) {
4735 		spin_lock(&t->lock);
4736 		if (t->to_thread == thread)
4737 			send_reply = t;
4738 	} else {
4739 		__acquire(&t->lock);
4740 	}
4741 	thread->is_dead = true;
4742 
4743 	while (t) {
4744 		last_t = t;
4745 		active_transactions++;
4746 		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4747 			     "release %d:%d transaction %d %s, still active\n",
4748 			      proc->pid, thread->pid,
4749 			     t->debug_id,
4750 			     (t->to_thread == thread) ? "in" : "out");
4751 
4752 		if (t->to_thread == thread) {
4753 			t->to_proc = NULL;
4754 			t->to_thread = NULL;
4755 			if (t->buffer) {
4756 				t->buffer->transaction = NULL;
4757 				t->buffer = NULL;
4758 			}
4759 			t = t->to_parent;
4760 		} else if (t->from == thread) {
4761 			t->from = NULL;
4762 			t = t->from_parent;
4763 		} else
4764 			BUG();
4765 		spin_unlock(&last_t->lock);
4766 		if (t)
4767 			spin_lock(&t->lock);
4768 		else
4769 			__acquire(&t->lock);
4770 	}
4771 	/* annotation for sparse, lock not acquired in last iteration above */
4772 	__release(&t->lock);
4773 
4774 	/*
4775 	 * If this thread used poll, make sure we remove the waitqueue
4776 	 * from any epoll data structures holding it with POLLFREE.
4777 	 * waitqueue_active() is safe to use here because we're holding
4778 	 * the inner lock.
4779 	 */
4780 	if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
4781 	    waitqueue_active(&thread->wait)) {
4782 		wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
4783 	}
4784 
4785 	binder_inner_proc_unlock(thread->proc);
4786 
4787 	/*
4788 	 * This is needed to avoid races between wake_up_poll() above and
4789 	 * and ep_remove_waitqueue() called for other reasons (eg the epoll file
4790 	 * descriptor being closed); ep_remove_waitqueue() holds an RCU read
4791 	 * lock, so we can be sure it's done after calling synchronize_rcu().
4792 	 */
4793 	if (thread->looper & BINDER_LOOPER_STATE_POLL)
4794 		synchronize_rcu();
4795 
4796 	if (send_reply)
4797 		binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4798 	binder_release_work(proc, &thread->todo);
4799 	binder_thread_dec_tmpref(thread);
4800 	return active_transactions;
4801 }
4802 
4803 static __poll_t binder_poll(struct file *filp,
4804 				struct poll_table_struct *wait)
4805 {
4806 	struct binder_proc *proc = filp->private_data;
4807 	struct binder_thread *thread = NULL;
4808 	bool wait_for_proc_work;
4809 
4810 	thread = binder_get_thread(proc);
4811 	if (!thread)
4812 		return POLLERR;
4813 
4814 	binder_inner_proc_lock(thread->proc);
4815 	thread->looper |= BINDER_LOOPER_STATE_POLL;
4816 	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4817 
4818 	binder_inner_proc_unlock(thread->proc);
4819 
4820 	poll_wait(filp, &thread->wait, wait);
4821 
4822 	if (binder_has_work(thread, wait_for_proc_work))
4823 		return EPOLLIN;
4824 
4825 	return 0;
4826 }
4827 
4828 static int binder_ioctl_write_read(struct file *filp,
4829 				unsigned int cmd, unsigned long arg,
4830 				struct binder_thread *thread)
4831 {
4832 	int ret = 0;
4833 	struct binder_proc *proc = filp->private_data;
4834 	unsigned int size = _IOC_SIZE(cmd);
4835 	void __user *ubuf = (void __user *)arg;
4836 	struct binder_write_read bwr;
4837 
4838 	if (size != sizeof(struct binder_write_read)) {
4839 		ret = -EINVAL;
4840 		goto out;
4841 	}
4842 	if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4843 		ret = -EFAULT;
4844 		goto out;
4845 	}
4846 	binder_debug(BINDER_DEBUG_READ_WRITE,
4847 		     "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4848 		     proc->pid, thread->pid,
4849 		     (u64)bwr.write_size, (u64)bwr.write_buffer,
4850 		     (u64)bwr.read_size, (u64)bwr.read_buffer);
4851 
4852 	if (bwr.write_size > 0) {
4853 		ret = binder_thread_write(proc, thread,
4854 					  bwr.write_buffer,
4855 					  bwr.write_size,
4856 					  &bwr.write_consumed);
4857 		trace_binder_write_done(ret);
4858 		if (ret < 0) {
4859 			bwr.read_consumed = 0;
4860 			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4861 				ret = -EFAULT;
4862 			goto out;
4863 		}
4864 	}
4865 	if (bwr.read_size > 0) {
4866 		ret = binder_thread_read(proc, thread, bwr.read_buffer,
4867 					 bwr.read_size,
4868 					 &bwr.read_consumed,
4869 					 filp->f_flags & O_NONBLOCK);
4870 		trace_binder_read_done(ret);
4871 		binder_inner_proc_lock(proc);
4872 		if (!binder_worklist_empty_ilocked(&proc->todo))
4873 			binder_wakeup_proc_ilocked(proc);
4874 		binder_inner_proc_unlock(proc);
4875 		if (ret < 0) {
4876 			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4877 				ret = -EFAULT;
4878 			goto out;
4879 		}
4880 	}
4881 	binder_debug(BINDER_DEBUG_READ_WRITE,
4882 		     "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4883 		     proc->pid, thread->pid,
4884 		     (u64)bwr.write_consumed, (u64)bwr.write_size,
4885 		     (u64)bwr.read_consumed, (u64)bwr.read_size);
4886 	if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4887 		ret = -EFAULT;
4888 		goto out;
4889 	}
4890 out:
4891 	return ret;
4892 }
4893 
4894 static int binder_ioctl_set_ctx_mgr(struct file *filp,
4895 				    struct flat_binder_object *fbo)
4896 {
4897 	int ret = 0;
4898 	struct binder_proc *proc = filp->private_data;
4899 	struct binder_context *context = proc->context;
4900 	struct binder_node *new_node;
4901 	kuid_t curr_euid = current_euid();
4902 
4903 	mutex_lock(&context->context_mgr_node_lock);
4904 	if (context->binder_context_mgr_node) {
4905 		pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4906 		ret = -EBUSY;
4907 		goto out;
4908 	}
4909 	ret = security_binder_set_context_mgr(proc->tsk);
4910 	if (ret < 0)
4911 		goto out;
4912 	if (uid_valid(context->binder_context_mgr_uid)) {
4913 		if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4914 			pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4915 			       from_kuid(&init_user_ns, curr_euid),
4916 			       from_kuid(&init_user_ns,
4917 					 context->binder_context_mgr_uid));
4918 			ret = -EPERM;
4919 			goto out;
4920 		}
4921 	} else {
4922 		context->binder_context_mgr_uid = curr_euid;
4923 	}
4924 	new_node = binder_new_node(proc, fbo);
4925 	if (!new_node) {
4926 		ret = -ENOMEM;
4927 		goto out;
4928 	}
4929 	binder_node_lock(new_node);
4930 	new_node->local_weak_refs++;
4931 	new_node->local_strong_refs++;
4932 	new_node->has_strong_ref = 1;
4933 	new_node->has_weak_ref = 1;
4934 	context->binder_context_mgr_node = new_node;
4935 	binder_node_unlock(new_node);
4936 	binder_put_node(new_node);
4937 out:
4938 	mutex_unlock(&context->context_mgr_node_lock);
4939 	return ret;
4940 }
4941 
4942 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
4943 		struct binder_node_info_for_ref *info)
4944 {
4945 	struct binder_node *node;
4946 	struct binder_context *context = proc->context;
4947 	__u32 handle = info->handle;
4948 
4949 	if (info->strong_count || info->weak_count || info->reserved1 ||
4950 	    info->reserved2 || info->reserved3) {
4951 		binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
4952 				  proc->pid);
4953 		return -EINVAL;
4954 	}
4955 
4956 	/* This ioctl may only be used by the context manager */
4957 	mutex_lock(&context->context_mgr_node_lock);
4958 	if (!context->binder_context_mgr_node ||
4959 		context->binder_context_mgr_node->proc != proc) {
4960 		mutex_unlock(&context->context_mgr_node_lock);
4961 		return -EPERM;
4962 	}
4963 	mutex_unlock(&context->context_mgr_node_lock);
4964 
4965 	node = binder_get_node_from_ref(proc, handle, true, NULL);
4966 	if (!node)
4967 		return -EINVAL;
4968 
4969 	info->strong_count = node->local_strong_refs +
4970 		node->internal_strong_refs;
4971 	info->weak_count = node->local_weak_refs;
4972 
4973 	binder_put_node(node);
4974 
4975 	return 0;
4976 }
4977 
4978 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4979 				struct binder_node_debug_info *info)
4980 {
4981 	struct rb_node *n;
4982 	binder_uintptr_t ptr = info->ptr;
4983 
4984 	memset(info, 0, sizeof(*info));
4985 
4986 	binder_inner_proc_lock(proc);
4987 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4988 		struct binder_node *node = rb_entry(n, struct binder_node,
4989 						    rb_node);
4990 		if (node->ptr > ptr) {
4991 			info->ptr = node->ptr;
4992 			info->cookie = node->cookie;
4993 			info->has_strong_ref = node->has_strong_ref;
4994 			info->has_weak_ref = node->has_weak_ref;
4995 			break;
4996 		}
4997 	}
4998 	binder_inner_proc_unlock(proc);
4999 
5000 	return 0;
5001 }
5002 
5003 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5004 {
5005 	int ret;
5006 	struct binder_proc *proc = filp->private_data;
5007 	struct binder_thread *thread;
5008 	unsigned int size = _IOC_SIZE(cmd);
5009 	void __user *ubuf = (void __user *)arg;
5010 
5011 	/*pr_info("binder_ioctl: %d:%d %x %lx\n",
5012 			proc->pid, current->pid, cmd, arg);*/
5013 
5014 	binder_selftest_alloc(&proc->alloc);
5015 
5016 	trace_binder_ioctl(cmd, arg);
5017 
5018 	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5019 	if (ret)
5020 		goto err_unlocked;
5021 
5022 	thread = binder_get_thread(proc);
5023 	if (thread == NULL) {
5024 		ret = -ENOMEM;
5025 		goto err;
5026 	}
5027 
5028 	switch (cmd) {
5029 	case BINDER_WRITE_READ:
5030 		ret = binder_ioctl_write_read(filp, cmd, arg, thread);
5031 		if (ret)
5032 			goto err;
5033 		break;
5034 	case BINDER_SET_MAX_THREADS: {
5035 		int max_threads;
5036 
5037 		if (copy_from_user(&max_threads, ubuf,
5038 				   sizeof(max_threads))) {
5039 			ret = -EINVAL;
5040 			goto err;
5041 		}
5042 		binder_inner_proc_lock(proc);
5043 		proc->max_threads = max_threads;
5044 		binder_inner_proc_unlock(proc);
5045 		break;
5046 	}
5047 	case BINDER_SET_CONTEXT_MGR_EXT: {
5048 		struct flat_binder_object fbo;
5049 
5050 		if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5051 			ret = -EINVAL;
5052 			goto err;
5053 		}
5054 		ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5055 		if (ret)
5056 			goto err;
5057 		break;
5058 	}
5059 	case BINDER_SET_CONTEXT_MGR:
5060 		ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5061 		if (ret)
5062 			goto err;
5063 		break;
5064 	case BINDER_THREAD_EXIT:
5065 		binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5066 			     proc->pid, thread->pid);
5067 		binder_thread_release(proc, thread);
5068 		thread = NULL;
5069 		break;
5070 	case BINDER_VERSION: {
5071 		struct binder_version __user *ver = ubuf;
5072 
5073 		if (size != sizeof(struct binder_version)) {
5074 			ret = -EINVAL;
5075 			goto err;
5076 		}
5077 		if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5078 			     &ver->protocol_version)) {
5079 			ret = -EINVAL;
5080 			goto err;
5081 		}
5082 		break;
5083 	}
5084 	case BINDER_GET_NODE_INFO_FOR_REF: {
5085 		struct binder_node_info_for_ref info;
5086 
5087 		if (copy_from_user(&info, ubuf, sizeof(info))) {
5088 			ret = -EFAULT;
5089 			goto err;
5090 		}
5091 
5092 		ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5093 		if (ret < 0)
5094 			goto err;
5095 
5096 		if (copy_to_user(ubuf, &info, sizeof(info))) {
5097 			ret = -EFAULT;
5098 			goto err;
5099 		}
5100 
5101 		break;
5102 	}
5103 	case BINDER_GET_NODE_DEBUG_INFO: {
5104 		struct binder_node_debug_info info;
5105 
5106 		if (copy_from_user(&info, ubuf, sizeof(info))) {
5107 			ret = -EFAULT;
5108 			goto err;
5109 		}
5110 
5111 		ret = binder_ioctl_get_node_debug_info(proc, &info);
5112 		if (ret < 0)
5113 			goto err;
5114 
5115 		if (copy_to_user(ubuf, &info, sizeof(info))) {
5116 			ret = -EFAULT;
5117 			goto err;
5118 		}
5119 		break;
5120 	}
5121 	default:
5122 		ret = -EINVAL;
5123 		goto err;
5124 	}
5125 	ret = 0;
5126 err:
5127 	if (thread)
5128 		thread->looper_need_return = false;
5129 	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5130 	if (ret && ret != -ERESTARTSYS)
5131 		pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5132 err_unlocked:
5133 	trace_binder_ioctl_done(ret);
5134 	return ret;
5135 }
5136 
5137 static void binder_vma_open(struct vm_area_struct *vma)
5138 {
5139 	struct binder_proc *proc = vma->vm_private_data;
5140 
5141 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5142 		     "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5143 		     proc->pid, vma->vm_start, vma->vm_end,
5144 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5145 		     (unsigned long)pgprot_val(vma->vm_page_prot));
5146 }
5147 
5148 static void binder_vma_close(struct vm_area_struct *vma)
5149 {
5150 	struct binder_proc *proc = vma->vm_private_data;
5151 
5152 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5153 		     "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5154 		     proc->pid, vma->vm_start, vma->vm_end,
5155 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5156 		     (unsigned long)pgprot_val(vma->vm_page_prot));
5157 	binder_alloc_vma_close(&proc->alloc);
5158 }
5159 
5160 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5161 {
5162 	return VM_FAULT_SIGBUS;
5163 }
5164 
5165 static const struct vm_operations_struct binder_vm_ops = {
5166 	.open = binder_vma_open,
5167 	.close = binder_vma_close,
5168 	.fault = binder_vm_fault,
5169 };
5170 
5171 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5172 {
5173 	int ret;
5174 	struct binder_proc *proc = filp->private_data;
5175 	const char *failure_string;
5176 
5177 	if (proc->tsk != current->group_leader)
5178 		return -EINVAL;
5179 
5180 	if ((vma->vm_end - vma->vm_start) > SZ_4M)
5181 		vma->vm_end = vma->vm_start + SZ_4M;
5182 
5183 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5184 		     "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5185 		     __func__, proc->pid, vma->vm_start, vma->vm_end,
5186 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5187 		     (unsigned long)pgprot_val(vma->vm_page_prot));
5188 
5189 	if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5190 		ret = -EPERM;
5191 		failure_string = "bad vm_flags";
5192 		goto err_bad_arg;
5193 	}
5194 	vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
5195 	vma->vm_flags &= ~VM_MAYWRITE;
5196 
5197 	vma->vm_ops = &binder_vm_ops;
5198 	vma->vm_private_data = proc;
5199 
5200 	ret = binder_alloc_mmap_handler(&proc->alloc, vma);
5201 	if (ret)
5202 		return ret;
5203 	return 0;
5204 
5205 err_bad_arg:
5206 	pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5207 	       proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
5208 	return ret;
5209 }
5210 
5211 static int binder_open(struct inode *nodp, struct file *filp)
5212 {
5213 	struct binder_proc *proc;
5214 	struct binder_device *binder_dev;
5215 	struct binderfs_info *info;
5216 	struct dentry *binder_binderfs_dir_entry_proc = NULL;
5217 
5218 	binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5219 		     current->group_leader->pid, current->pid);
5220 
5221 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5222 	if (proc == NULL)
5223 		return -ENOMEM;
5224 	spin_lock_init(&proc->inner_lock);
5225 	spin_lock_init(&proc->outer_lock);
5226 	get_task_struct(current->group_leader);
5227 	proc->tsk = current->group_leader;
5228 	INIT_LIST_HEAD(&proc->todo);
5229 	proc->default_priority = task_nice(current);
5230 	/* binderfs stashes devices in i_private */
5231 	if (is_binderfs_device(nodp)) {
5232 		binder_dev = nodp->i_private;
5233 		info = nodp->i_sb->s_fs_info;
5234 		binder_binderfs_dir_entry_proc = info->proc_log_dir;
5235 	} else {
5236 		binder_dev = container_of(filp->private_data,
5237 					  struct binder_device, miscdev);
5238 	}
5239 	proc->context = &binder_dev->context;
5240 	binder_alloc_init(&proc->alloc);
5241 
5242 	binder_stats_created(BINDER_STAT_PROC);
5243 	proc->pid = current->group_leader->pid;
5244 	INIT_LIST_HEAD(&proc->delivered_death);
5245 	INIT_LIST_HEAD(&proc->waiting_threads);
5246 	filp->private_data = proc;
5247 
5248 	mutex_lock(&binder_procs_lock);
5249 	hlist_add_head(&proc->proc_node, &binder_procs);
5250 	mutex_unlock(&binder_procs_lock);
5251 
5252 	if (binder_debugfs_dir_entry_proc) {
5253 		char strbuf[11];
5254 
5255 		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5256 		/*
5257 		 * proc debug entries are shared between contexts, so
5258 		 * this will fail if the process tries to open the driver
5259 		 * again with a different context. The priting code will
5260 		 * anyway print all contexts that a given PID has, so this
5261 		 * is not a problem.
5262 		 */
5263 		proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5264 			binder_debugfs_dir_entry_proc,
5265 			(void *)(unsigned long)proc->pid,
5266 			&proc_fops);
5267 	}
5268 
5269 	if (binder_binderfs_dir_entry_proc) {
5270 		char strbuf[11];
5271 		struct dentry *binderfs_entry;
5272 
5273 		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5274 		/*
5275 		 * Similar to debugfs, the process specific log file is shared
5276 		 * between contexts. If the file has already been created for a
5277 		 * process, the following binderfs_create_file() call will
5278 		 * fail with error code EEXIST if another context of the same
5279 		 * process invoked binder_open(). This is ok since same as
5280 		 * debugfs, the log file will contain information on all
5281 		 * contexts of a given PID.
5282 		 */
5283 		binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5284 			strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5285 		if (!IS_ERR(binderfs_entry)) {
5286 			proc->binderfs_entry = binderfs_entry;
5287 		} else {
5288 			int error;
5289 
5290 			error = PTR_ERR(binderfs_entry);
5291 			if (error != -EEXIST) {
5292 				pr_warn("Unable to create file %s in binderfs (error %d)\n",
5293 					strbuf, error);
5294 			}
5295 		}
5296 	}
5297 
5298 	return 0;
5299 }
5300 
5301 static int binder_flush(struct file *filp, fl_owner_t id)
5302 {
5303 	struct binder_proc *proc = filp->private_data;
5304 
5305 	binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5306 
5307 	return 0;
5308 }
5309 
5310 static void binder_deferred_flush(struct binder_proc *proc)
5311 {
5312 	struct rb_node *n;
5313 	int wake_count = 0;
5314 
5315 	binder_inner_proc_lock(proc);
5316 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5317 		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5318 
5319 		thread->looper_need_return = true;
5320 		if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5321 			wake_up_interruptible(&thread->wait);
5322 			wake_count++;
5323 		}
5324 	}
5325 	binder_inner_proc_unlock(proc);
5326 
5327 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5328 		     "binder_flush: %d woke %d threads\n", proc->pid,
5329 		     wake_count);
5330 }
5331 
5332 static int binder_release(struct inode *nodp, struct file *filp)
5333 {
5334 	struct binder_proc *proc = filp->private_data;
5335 
5336 	debugfs_remove(proc->debugfs_entry);
5337 
5338 	if (proc->binderfs_entry) {
5339 		binderfs_remove_file(proc->binderfs_entry);
5340 		proc->binderfs_entry = NULL;
5341 	}
5342 
5343 	binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5344 
5345 	return 0;
5346 }
5347 
5348 static int binder_node_release(struct binder_node *node, int refs)
5349 {
5350 	struct binder_ref *ref;
5351 	int death = 0;
5352 	struct binder_proc *proc = node->proc;
5353 
5354 	binder_release_work(proc, &node->async_todo);
5355 
5356 	binder_node_lock(node);
5357 	binder_inner_proc_lock(proc);
5358 	binder_dequeue_work_ilocked(&node->work);
5359 	/*
5360 	 * The caller must have taken a temporary ref on the node,
5361 	 */
5362 	BUG_ON(!node->tmp_refs);
5363 	if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5364 		binder_inner_proc_unlock(proc);
5365 		binder_node_unlock(node);
5366 		binder_free_node(node);
5367 
5368 		return refs;
5369 	}
5370 
5371 	node->proc = NULL;
5372 	node->local_strong_refs = 0;
5373 	node->local_weak_refs = 0;
5374 	binder_inner_proc_unlock(proc);
5375 
5376 	spin_lock(&binder_dead_nodes_lock);
5377 	hlist_add_head(&node->dead_node, &binder_dead_nodes);
5378 	spin_unlock(&binder_dead_nodes_lock);
5379 
5380 	hlist_for_each_entry(ref, &node->refs, node_entry) {
5381 		refs++;
5382 		/*
5383 		 * Need the node lock to synchronize
5384 		 * with new notification requests and the
5385 		 * inner lock to synchronize with queued
5386 		 * death notifications.
5387 		 */
5388 		binder_inner_proc_lock(ref->proc);
5389 		if (!ref->death) {
5390 			binder_inner_proc_unlock(ref->proc);
5391 			continue;
5392 		}
5393 
5394 		death++;
5395 
5396 		BUG_ON(!list_empty(&ref->death->work.entry));
5397 		ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5398 		binder_enqueue_work_ilocked(&ref->death->work,
5399 					    &ref->proc->todo);
5400 		binder_wakeup_proc_ilocked(ref->proc);
5401 		binder_inner_proc_unlock(ref->proc);
5402 	}
5403 
5404 	binder_debug(BINDER_DEBUG_DEAD_BINDER,
5405 		     "node %d now dead, refs %d, death %d\n",
5406 		     node->debug_id, refs, death);
5407 	binder_node_unlock(node);
5408 	binder_put_node(node);
5409 
5410 	return refs;
5411 }
5412 
5413 static void binder_deferred_release(struct binder_proc *proc)
5414 {
5415 	struct binder_context *context = proc->context;
5416 	struct rb_node *n;
5417 	int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5418 
5419 	mutex_lock(&binder_procs_lock);
5420 	hlist_del(&proc->proc_node);
5421 	mutex_unlock(&binder_procs_lock);
5422 
5423 	mutex_lock(&context->context_mgr_node_lock);
5424 	if (context->binder_context_mgr_node &&
5425 	    context->binder_context_mgr_node->proc == proc) {
5426 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
5427 			     "%s: %d context_mgr_node gone\n",
5428 			     __func__, proc->pid);
5429 		context->binder_context_mgr_node = NULL;
5430 	}
5431 	mutex_unlock(&context->context_mgr_node_lock);
5432 	binder_inner_proc_lock(proc);
5433 	/*
5434 	 * Make sure proc stays alive after we
5435 	 * remove all the threads
5436 	 */
5437 	proc->tmp_ref++;
5438 
5439 	proc->is_dead = true;
5440 	threads = 0;
5441 	active_transactions = 0;
5442 	while ((n = rb_first(&proc->threads))) {
5443 		struct binder_thread *thread;
5444 
5445 		thread = rb_entry(n, struct binder_thread, rb_node);
5446 		binder_inner_proc_unlock(proc);
5447 		threads++;
5448 		active_transactions += binder_thread_release(proc, thread);
5449 		binder_inner_proc_lock(proc);
5450 	}
5451 
5452 	nodes = 0;
5453 	incoming_refs = 0;
5454 	while ((n = rb_first(&proc->nodes))) {
5455 		struct binder_node *node;
5456 
5457 		node = rb_entry(n, struct binder_node, rb_node);
5458 		nodes++;
5459 		/*
5460 		 * take a temporary ref on the node before
5461 		 * calling binder_node_release() which will either
5462 		 * kfree() the node or call binder_put_node()
5463 		 */
5464 		binder_inc_node_tmpref_ilocked(node);
5465 		rb_erase(&node->rb_node, &proc->nodes);
5466 		binder_inner_proc_unlock(proc);
5467 		incoming_refs = binder_node_release(node, incoming_refs);
5468 		binder_inner_proc_lock(proc);
5469 	}
5470 	binder_inner_proc_unlock(proc);
5471 
5472 	outgoing_refs = 0;
5473 	binder_proc_lock(proc);
5474 	while ((n = rb_first(&proc->refs_by_desc))) {
5475 		struct binder_ref *ref;
5476 
5477 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
5478 		outgoing_refs++;
5479 		binder_cleanup_ref_olocked(ref);
5480 		binder_proc_unlock(proc);
5481 		binder_free_ref(ref);
5482 		binder_proc_lock(proc);
5483 	}
5484 	binder_proc_unlock(proc);
5485 
5486 	binder_release_work(proc, &proc->todo);
5487 	binder_release_work(proc, &proc->delivered_death);
5488 
5489 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5490 		     "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5491 		     __func__, proc->pid, threads, nodes, incoming_refs,
5492 		     outgoing_refs, active_transactions);
5493 
5494 	binder_proc_dec_tmpref(proc);
5495 }
5496 
5497 static void binder_deferred_func(struct work_struct *work)
5498 {
5499 	struct binder_proc *proc;
5500 
5501 	int defer;
5502 
5503 	do {
5504 		mutex_lock(&binder_deferred_lock);
5505 		if (!hlist_empty(&binder_deferred_list)) {
5506 			proc = hlist_entry(binder_deferred_list.first,
5507 					struct binder_proc, deferred_work_node);
5508 			hlist_del_init(&proc->deferred_work_node);
5509 			defer = proc->deferred_work;
5510 			proc->deferred_work = 0;
5511 		} else {
5512 			proc = NULL;
5513 			defer = 0;
5514 		}
5515 		mutex_unlock(&binder_deferred_lock);
5516 
5517 		if (defer & BINDER_DEFERRED_FLUSH)
5518 			binder_deferred_flush(proc);
5519 
5520 		if (defer & BINDER_DEFERRED_RELEASE)
5521 			binder_deferred_release(proc); /* frees proc */
5522 	} while (proc);
5523 }
5524 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5525 
5526 static void
5527 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5528 {
5529 	mutex_lock(&binder_deferred_lock);
5530 	proc->deferred_work |= defer;
5531 	if (hlist_unhashed(&proc->deferred_work_node)) {
5532 		hlist_add_head(&proc->deferred_work_node,
5533 				&binder_deferred_list);
5534 		schedule_work(&binder_deferred_work);
5535 	}
5536 	mutex_unlock(&binder_deferred_lock);
5537 }
5538 
5539 static void print_binder_transaction_ilocked(struct seq_file *m,
5540 					     struct binder_proc *proc,
5541 					     const char *prefix,
5542 					     struct binder_transaction *t)
5543 {
5544 	struct binder_proc *to_proc;
5545 	struct binder_buffer *buffer = t->buffer;
5546 
5547 	spin_lock(&t->lock);
5548 	to_proc = t->to_proc;
5549 	seq_printf(m,
5550 		   "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5551 		   prefix, t->debug_id, t,
5552 		   t->from ? t->from->proc->pid : 0,
5553 		   t->from ? t->from->pid : 0,
5554 		   to_proc ? to_proc->pid : 0,
5555 		   t->to_thread ? t->to_thread->pid : 0,
5556 		   t->code, t->flags, t->priority, t->need_reply);
5557 	spin_unlock(&t->lock);
5558 
5559 	if (proc != to_proc) {
5560 		/*
5561 		 * Can only safely deref buffer if we are holding the
5562 		 * correct proc inner lock for this node
5563 		 */
5564 		seq_puts(m, "\n");
5565 		return;
5566 	}
5567 
5568 	if (buffer == NULL) {
5569 		seq_puts(m, " buffer free\n");
5570 		return;
5571 	}
5572 	if (buffer->target_node)
5573 		seq_printf(m, " node %d", buffer->target_node->debug_id);
5574 	seq_printf(m, " size %zd:%zd data %pK\n",
5575 		   buffer->data_size, buffer->offsets_size,
5576 		   buffer->user_data);
5577 }
5578 
5579 static void print_binder_work_ilocked(struct seq_file *m,
5580 				     struct binder_proc *proc,
5581 				     const char *prefix,
5582 				     const char *transaction_prefix,
5583 				     struct binder_work *w)
5584 {
5585 	struct binder_node *node;
5586 	struct binder_transaction *t;
5587 
5588 	switch (w->type) {
5589 	case BINDER_WORK_TRANSACTION:
5590 		t = container_of(w, struct binder_transaction, work);
5591 		print_binder_transaction_ilocked(
5592 				m, proc, transaction_prefix, t);
5593 		break;
5594 	case BINDER_WORK_RETURN_ERROR: {
5595 		struct binder_error *e = container_of(
5596 				w, struct binder_error, work);
5597 
5598 		seq_printf(m, "%stransaction error: %u\n",
5599 			   prefix, e->cmd);
5600 	} break;
5601 	case BINDER_WORK_TRANSACTION_COMPLETE:
5602 		seq_printf(m, "%stransaction complete\n", prefix);
5603 		break;
5604 	case BINDER_WORK_NODE:
5605 		node = container_of(w, struct binder_node, work);
5606 		seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5607 			   prefix, node->debug_id,
5608 			   (u64)node->ptr, (u64)node->cookie);
5609 		break;
5610 	case BINDER_WORK_DEAD_BINDER:
5611 		seq_printf(m, "%shas dead binder\n", prefix);
5612 		break;
5613 	case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5614 		seq_printf(m, "%shas cleared dead binder\n", prefix);
5615 		break;
5616 	case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5617 		seq_printf(m, "%shas cleared death notification\n", prefix);
5618 		break;
5619 	default:
5620 		seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5621 		break;
5622 	}
5623 }
5624 
5625 static void print_binder_thread_ilocked(struct seq_file *m,
5626 					struct binder_thread *thread,
5627 					int print_always)
5628 {
5629 	struct binder_transaction *t;
5630 	struct binder_work *w;
5631 	size_t start_pos = m->count;
5632 	size_t header_pos;
5633 
5634 	seq_printf(m, "  thread %d: l %02x need_return %d tr %d\n",
5635 			thread->pid, thread->looper,
5636 			thread->looper_need_return,
5637 			atomic_read(&thread->tmp_ref));
5638 	header_pos = m->count;
5639 	t = thread->transaction_stack;
5640 	while (t) {
5641 		if (t->from == thread) {
5642 			print_binder_transaction_ilocked(m, thread->proc,
5643 					"    outgoing transaction", t);
5644 			t = t->from_parent;
5645 		} else if (t->to_thread == thread) {
5646 			print_binder_transaction_ilocked(m, thread->proc,
5647 						 "    incoming transaction", t);
5648 			t = t->to_parent;
5649 		} else {
5650 			print_binder_transaction_ilocked(m, thread->proc,
5651 					"    bad transaction", t);
5652 			t = NULL;
5653 		}
5654 	}
5655 	list_for_each_entry(w, &thread->todo, entry) {
5656 		print_binder_work_ilocked(m, thread->proc, "    ",
5657 					  "    pending transaction", w);
5658 	}
5659 	if (!print_always && m->count == header_pos)
5660 		m->count = start_pos;
5661 }
5662 
5663 static void print_binder_node_nilocked(struct seq_file *m,
5664 				       struct binder_node *node)
5665 {
5666 	struct binder_ref *ref;
5667 	struct binder_work *w;
5668 	int count;
5669 
5670 	count = 0;
5671 	hlist_for_each_entry(ref, &node->refs, node_entry)
5672 		count++;
5673 
5674 	seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5675 		   node->debug_id, (u64)node->ptr, (u64)node->cookie,
5676 		   node->has_strong_ref, node->has_weak_ref,
5677 		   node->local_strong_refs, node->local_weak_refs,
5678 		   node->internal_strong_refs, count, node->tmp_refs);
5679 	if (count) {
5680 		seq_puts(m, " proc");
5681 		hlist_for_each_entry(ref, &node->refs, node_entry)
5682 			seq_printf(m, " %d", ref->proc->pid);
5683 	}
5684 	seq_puts(m, "\n");
5685 	if (node->proc) {
5686 		list_for_each_entry(w, &node->async_todo, entry)
5687 			print_binder_work_ilocked(m, node->proc, "    ",
5688 					  "    pending async transaction", w);
5689 	}
5690 }
5691 
5692 static void print_binder_ref_olocked(struct seq_file *m,
5693 				     struct binder_ref *ref)
5694 {
5695 	binder_node_lock(ref->node);
5696 	seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %pK\n",
5697 		   ref->data.debug_id, ref->data.desc,
5698 		   ref->node->proc ? "" : "dead ",
5699 		   ref->node->debug_id, ref->data.strong,
5700 		   ref->data.weak, ref->death);
5701 	binder_node_unlock(ref->node);
5702 }
5703 
5704 static void print_binder_proc(struct seq_file *m,
5705 			      struct binder_proc *proc, int print_all)
5706 {
5707 	struct binder_work *w;
5708 	struct rb_node *n;
5709 	size_t start_pos = m->count;
5710 	size_t header_pos;
5711 	struct binder_node *last_node = NULL;
5712 
5713 	seq_printf(m, "proc %d\n", proc->pid);
5714 	seq_printf(m, "context %s\n", proc->context->name);
5715 	header_pos = m->count;
5716 
5717 	binder_inner_proc_lock(proc);
5718 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5719 		print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5720 						rb_node), print_all);
5721 
5722 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5723 		struct binder_node *node = rb_entry(n, struct binder_node,
5724 						    rb_node);
5725 		if (!print_all && !node->has_async_transaction)
5726 			continue;
5727 
5728 		/*
5729 		 * take a temporary reference on the node so it
5730 		 * survives and isn't removed from the tree
5731 		 * while we print it.
5732 		 */
5733 		binder_inc_node_tmpref_ilocked(node);
5734 		/* Need to drop inner lock to take node lock */
5735 		binder_inner_proc_unlock(proc);
5736 		if (last_node)
5737 			binder_put_node(last_node);
5738 		binder_node_inner_lock(node);
5739 		print_binder_node_nilocked(m, node);
5740 		binder_node_inner_unlock(node);
5741 		last_node = node;
5742 		binder_inner_proc_lock(proc);
5743 	}
5744 	binder_inner_proc_unlock(proc);
5745 	if (last_node)
5746 		binder_put_node(last_node);
5747 
5748 	if (print_all) {
5749 		binder_proc_lock(proc);
5750 		for (n = rb_first(&proc->refs_by_desc);
5751 		     n != NULL;
5752 		     n = rb_next(n))
5753 			print_binder_ref_olocked(m, rb_entry(n,
5754 							    struct binder_ref,
5755 							    rb_node_desc));
5756 		binder_proc_unlock(proc);
5757 	}
5758 	binder_alloc_print_allocated(m, &proc->alloc);
5759 	binder_inner_proc_lock(proc);
5760 	list_for_each_entry(w, &proc->todo, entry)
5761 		print_binder_work_ilocked(m, proc, "  ",
5762 					  "  pending transaction", w);
5763 	list_for_each_entry(w, &proc->delivered_death, entry) {
5764 		seq_puts(m, "  has delivered dead binder\n");
5765 		break;
5766 	}
5767 	binder_inner_proc_unlock(proc);
5768 	if (!print_all && m->count == header_pos)
5769 		m->count = start_pos;
5770 }
5771 
5772 static const char * const binder_return_strings[] = {
5773 	"BR_ERROR",
5774 	"BR_OK",
5775 	"BR_TRANSACTION",
5776 	"BR_REPLY",
5777 	"BR_ACQUIRE_RESULT",
5778 	"BR_DEAD_REPLY",
5779 	"BR_TRANSACTION_COMPLETE",
5780 	"BR_INCREFS",
5781 	"BR_ACQUIRE",
5782 	"BR_RELEASE",
5783 	"BR_DECREFS",
5784 	"BR_ATTEMPT_ACQUIRE",
5785 	"BR_NOOP",
5786 	"BR_SPAWN_LOOPER",
5787 	"BR_FINISHED",
5788 	"BR_DEAD_BINDER",
5789 	"BR_CLEAR_DEATH_NOTIFICATION_DONE",
5790 	"BR_FAILED_REPLY"
5791 };
5792 
5793 static const char * const binder_command_strings[] = {
5794 	"BC_TRANSACTION",
5795 	"BC_REPLY",
5796 	"BC_ACQUIRE_RESULT",
5797 	"BC_FREE_BUFFER",
5798 	"BC_INCREFS",
5799 	"BC_ACQUIRE",
5800 	"BC_RELEASE",
5801 	"BC_DECREFS",
5802 	"BC_INCREFS_DONE",
5803 	"BC_ACQUIRE_DONE",
5804 	"BC_ATTEMPT_ACQUIRE",
5805 	"BC_REGISTER_LOOPER",
5806 	"BC_ENTER_LOOPER",
5807 	"BC_EXIT_LOOPER",
5808 	"BC_REQUEST_DEATH_NOTIFICATION",
5809 	"BC_CLEAR_DEATH_NOTIFICATION",
5810 	"BC_DEAD_BINDER_DONE",
5811 	"BC_TRANSACTION_SG",
5812 	"BC_REPLY_SG",
5813 };
5814 
5815 static const char * const binder_objstat_strings[] = {
5816 	"proc",
5817 	"thread",
5818 	"node",
5819 	"ref",
5820 	"death",
5821 	"transaction",
5822 	"transaction_complete"
5823 };
5824 
5825 static void print_binder_stats(struct seq_file *m, const char *prefix,
5826 			       struct binder_stats *stats)
5827 {
5828 	int i;
5829 
5830 	BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5831 		     ARRAY_SIZE(binder_command_strings));
5832 	for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5833 		int temp = atomic_read(&stats->bc[i]);
5834 
5835 		if (temp)
5836 			seq_printf(m, "%s%s: %d\n", prefix,
5837 				   binder_command_strings[i], temp);
5838 	}
5839 
5840 	BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5841 		     ARRAY_SIZE(binder_return_strings));
5842 	for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5843 		int temp = atomic_read(&stats->br[i]);
5844 
5845 		if (temp)
5846 			seq_printf(m, "%s%s: %d\n", prefix,
5847 				   binder_return_strings[i], temp);
5848 	}
5849 
5850 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5851 		     ARRAY_SIZE(binder_objstat_strings));
5852 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5853 		     ARRAY_SIZE(stats->obj_deleted));
5854 	for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
5855 		int created = atomic_read(&stats->obj_created[i]);
5856 		int deleted = atomic_read(&stats->obj_deleted[i]);
5857 
5858 		if (created || deleted)
5859 			seq_printf(m, "%s%s: active %d total %d\n",
5860 				prefix,
5861 				binder_objstat_strings[i],
5862 				created - deleted,
5863 				created);
5864 	}
5865 }
5866 
5867 static void print_binder_proc_stats(struct seq_file *m,
5868 				    struct binder_proc *proc)
5869 {
5870 	struct binder_work *w;
5871 	struct binder_thread *thread;
5872 	struct rb_node *n;
5873 	int count, strong, weak, ready_threads;
5874 	size_t free_async_space =
5875 		binder_alloc_get_free_async_space(&proc->alloc);
5876 
5877 	seq_printf(m, "proc %d\n", proc->pid);
5878 	seq_printf(m, "context %s\n", proc->context->name);
5879 	count = 0;
5880 	ready_threads = 0;
5881 	binder_inner_proc_lock(proc);
5882 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5883 		count++;
5884 
5885 	list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5886 		ready_threads++;
5887 
5888 	seq_printf(m, "  threads: %d\n", count);
5889 	seq_printf(m, "  requested threads: %d+%d/%d\n"
5890 			"  ready threads %d\n"
5891 			"  free async space %zd\n", proc->requested_threads,
5892 			proc->requested_threads_started, proc->max_threads,
5893 			ready_threads,
5894 			free_async_space);
5895 	count = 0;
5896 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5897 		count++;
5898 	binder_inner_proc_unlock(proc);
5899 	seq_printf(m, "  nodes: %d\n", count);
5900 	count = 0;
5901 	strong = 0;
5902 	weak = 0;
5903 	binder_proc_lock(proc);
5904 	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5905 		struct binder_ref *ref = rb_entry(n, struct binder_ref,
5906 						  rb_node_desc);
5907 		count++;
5908 		strong += ref->data.strong;
5909 		weak += ref->data.weak;
5910 	}
5911 	binder_proc_unlock(proc);
5912 	seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
5913 
5914 	count = binder_alloc_get_allocated_count(&proc->alloc);
5915 	seq_printf(m, "  buffers: %d\n", count);
5916 
5917 	binder_alloc_print_pages(m, &proc->alloc);
5918 
5919 	count = 0;
5920 	binder_inner_proc_lock(proc);
5921 	list_for_each_entry(w, &proc->todo, entry) {
5922 		if (w->type == BINDER_WORK_TRANSACTION)
5923 			count++;
5924 	}
5925 	binder_inner_proc_unlock(proc);
5926 	seq_printf(m, "  pending transactions: %d\n", count);
5927 
5928 	print_binder_stats(m, "  ", &proc->stats);
5929 }
5930 
5931 
5932 int binder_state_show(struct seq_file *m, void *unused)
5933 {
5934 	struct binder_proc *proc;
5935 	struct binder_node *node;
5936 	struct binder_node *last_node = NULL;
5937 
5938 	seq_puts(m, "binder state:\n");
5939 
5940 	spin_lock(&binder_dead_nodes_lock);
5941 	if (!hlist_empty(&binder_dead_nodes))
5942 		seq_puts(m, "dead nodes:\n");
5943 	hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5944 		/*
5945 		 * take a temporary reference on the node so it
5946 		 * survives and isn't removed from the list
5947 		 * while we print it.
5948 		 */
5949 		node->tmp_refs++;
5950 		spin_unlock(&binder_dead_nodes_lock);
5951 		if (last_node)
5952 			binder_put_node(last_node);
5953 		binder_node_lock(node);
5954 		print_binder_node_nilocked(m, node);
5955 		binder_node_unlock(node);
5956 		last_node = node;
5957 		spin_lock(&binder_dead_nodes_lock);
5958 	}
5959 	spin_unlock(&binder_dead_nodes_lock);
5960 	if (last_node)
5961 		binder_put_node(last_node);
5962 
5963 	mutex_lock(&binder_procs_lock);
5964 	hlist_for_each_entry(proc, &binder_procs, proc_node)
5965 		print_binder_proc(m, proc, 1);
5966 	mutex_unlock(&binder_procs_lock);
5967 
5968 	return 0;
5969 }
5970 
5971 int binder_stats_show(struct seq_file *m, void *unused)
5972 {
5973 	struct binder_proc *proc;
5974 
5975 	seq_puts(m, "binder stats:\n");
5976 
5977 	print_binder_stats(m, "", &binder_stats);
5978 
5979 	mutex_lock(&binder_procs_lock);
5980 	hlist_for_each_entry(proc, &binder_procs, proc_node)
5981 		print_binder_proc_stats(m, proc);
5982 	mutex_unlock(&binder_procs_lock);
5983 
5984 	return 0;
5985 }
5986 
5987 int binder_transactions_show(struct seq_file *m, void *unused)
5988 {
5989 	struct binder_proc *proc;
5990 
5991 	seq_puts(m, "binder transactions:\n");
5992 	mutex_lock(&binder_procs_lock);
5993 	hlist_for_each_entry(proc, &binder_procs, proc_node)
5994 		print_binder_proc(m, proc, 0);
5995 	mutex_unlock(&binder_procs_lock);
5996 
5997 	return 0;
5998 }
5999 
6000 static int proc_show(struct seq_file *m, void *unused)
6001 {
6002 	struct binder_proc *itr;
6003 	int pid = (unsigned long)m->private;
6004 
6005 	mutex_lock(&binder_procs_lock);
6006 	hlist_for_each_entry(itr, &binder_procs, proc_node) {
6007 		if (itr->pid == pid) {
6008 			seq_puts(m, "binder proc state:\n");
6009 			print_binder_proc(m, itr, 1);
6010 		}
6011 	}
6012 	mutex_unlock(&binder_procs_lock);
6013 
6014 	return 0;
6015 }
6016 
6017 static void print_binder_transaction_log_entry(struct seq_file *m,
6018 					struct binder_transaction_log_entry *e)
6019 {
6020 	int debug_id = READ_ONCE(e->debug_id_done);
6021 	/*
6022 	 * read barrier to guarantee debug_id_done read before
6023 	 * we print the log values
6024 	 */
6025 	smp_rmb();
6026 	seq_printf(m,
6027 		   "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6028 		   e->debug_id, (e->call_type == 2) ? "reply" :
6029 		   ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6030 		   e->from_thread, e->to_proc, e->to_thread, e->context_name,
6031 		   e->to_node, e->target_handle, e->data_size, e->offsets_size,
6032 		   e->return_error, e->return_error_param,
6033 		   e->return_error_line);
6034 	/*
6035 	 * read-barrier to guarantee read of debug_id_done after
6036 	 * done printing the fields of the entry
6037 	 */
6038 	smp_rmb();
6039 	seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6040 			"\n" : " (incomplete)\n");
6041 }
6042 
6043 int binder_transaction_log_show(struct seq_file *m, void *unused)
6044 {
6045 	struct binder_transaction_log *log = m->private;
6046 	unsigned int log_cur = atomic_read(&log->cur);
6047 	unsigned int count;
6048 	unsigned int cur;
6049 	int i;
6050 
6051 	count = log_cur + 1;
6052 	cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6053 		0 : count % ARRAY_SIZE(log->entry);
6054 	if (count > ARRAY_SIZE(log->entry) || log->full)
6055 		count = ARRAY_SIZE(log->entry);
6056 	for (i = 0; i < count; i++) {
6057 		unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6058 
6059 		print_binder_transaction_log_entry(m, &log->entry[index]);
6060 	}
6061 	return 0;
6062 }
6063 
6064 const struct file_operations binder_fops = {
6065 	.owner = THIS_MODULE,
6066 	.poll = binder_poll,
6067 	.unlocked_ioctl = binder_ioctl,
6068 	.compat_ioctl = binder_ioctl,
6069 	.mmap = binder_mmap,
6070 	.open = binder_open,
6071 	.flush = binder_flush,
6072 	.release = binder_release,
6073 };
6074 
6075 static int __init init_binder_device(const char *name)
6076 {
6077 	int ret;
6078 	struct binder_device *binder_device;
6079 
6080 	binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6081 	if (!binder_device)
6082 		return -ENOMEM;
6083 
6084 	binder_device->miscdev.fops = &binder_fops;
6085 	binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6086 	binder_device->miscdev.name = name;
6087 
6088 	binder_device->context.binder_context_mgr_uid = INVALID_UID;
6089 	binder_device->context.name = name;
6090 	mutex_init(&binder_device->context.context_mgr_node_lock);
6091 
6092 	ret = misc_register(&binder_device->miscdev);
6093 	if (ret < 0) {
6094 		kfree(binder_device);
6095 		return ret;
6096 	}
6097 
6098 	hlist_add_head(&binder_device->hlist, &binder_devices);
6099 
6100 	return ret;
6101 }
6102 
6103 static int __init binder_init(void)
6104 {
6105 	int ret;
6106 	char *device_name, *device_tmp;
6107 	struct binder_device *device;
6108 	struct hlist_node *tmp;
6109 	char *device_names = NULL;
6110 
6111 	ret = binder_alloc_shrinker_init();
6112 	if (ret)
6113 		return ret;
6114 
6115 	atomic_set(&binder_transaction_log.cur, ~0U);
6116 	atomic_set(&binder_transaction_log_failed.cur, ~0U);
6117 
6118 	binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6119 	if (binder_debugfs_dir_entry_root)
6120 		binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6121 						 binder_debugfs_dir_entry_root);
6122 
6123 	if (binder_debugfs_dir_entry_root) {
6124 		debugfs_create_file("state",
6125 				    0444,
6126 				    binder_debugfs_dir_entry_root,
6127 				    NULL,
6128 				    &binder_state_fops);
6129 		debugfs_create_file("stats",
6130 				    0444,
6131 				    binder_debugfs_dir_entry_root,
6132 				    NULL,
6133 				    &binder_stats_fops);
6134 		debugfs_create_file("transactions",
6135 				    0444,
6136 				    binder_debugfs_dir_entry_root,
6137 				    NULL,
6138 				    &binder_transactions_fops);
6139 		debugfs_create_file("transaction_log",
6140 				    0444,
6141 				    binder_debugfs_dir_entry_root,
6142 				    &binder_transaction_log,
6143 				    &binder_transaction_log_fops);
6144 		debugfs_create_file("failed_transaction_log",
6145 				    0444,
6146 				    binder_debugfs_dir_entry_root,
6147 				    &binder_transaction_log_failed,
6148 				    &binder_transaction_log_fops);
6149 	}
6150 
6151 	if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6152 	    strcmp(binder_devices_param, "") != 0) {
6153 		/*
6154 		* Copy the module_parameter string, because we don't want to
6155 		* tokenize it in-place.
6156 		 */
6157 		device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6158 		if (!device_names) {
6159 			ret = -ENOMEM;
6160 			goto err_alloc_device_names_failed;
6161 		}
6162 
6163 		device_tmp = device_names;
6164 		while ((device_name = strsep(&device_tmp, ","))) {
6165 			ret = init_binder_device(device_name);
6166 			if (ret)
6167 				goto err_init_binder_device_failed;
6168 		}
6169 	}
6170 
6171 	ret = init_binderfs();
6172 	if (ret)
6173 		goto err_init_binder_device_failed;
6174 
6175 	return ret;
6176 
6177 err_init_binder_device_failed:
6178 	hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6179 		misc_deregister(&device->miscdev);
6180 		hlist_del(&device->hlist);
6181 		kfree(device);
6182 	}
6183 
6184 	kfree(device_names);
6185 
6186 err_alloc_device_names_failed:
6187 	debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6188 
6189 	return ret;
6190 }
6191 
6192 device_initcall(binder_init);
6193 
6194 #define CREATE_TRACE_POINTS
6195 #include "binder_trace.h"
6196 
6197 MODULE_LICENSE("GPL v2");
6198