xref: /openbmc/linux/drivers/android/binder.c (revision ca2478a7d974f38d29d27acb42a952c7f168916e)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /* binder.c
3   *
4   * Android IPC Subsystem
5   *
6   * Copyright (C) 2007-2008 Google, Inc.
7   */
8  
9  /*
10   * Locking overview
11   *
12   * There are 3 main spinlocks which must be acquired in the
13   * order shown:
14   *
15   * 1) proc->outer_lock : protects binder_ref
16   *    binder_proc_lock() and binder_proc_unlock() are
17   *    used to acq/rel.
18   * 2) node->lock : protects most fields of binder_node.
19   *    binder_node_lock() and binder_node_unlock() are
20   *    used to acq/rel
21   * 3) proc->inner_lock : protects the thread and node lists
22   *    (proc->threads, proc->waiting_threads, proc->nodes)
23   *    and all todo lists associated with the binder_proc
24   *    (proc->todo, thread->todo, proc->delivered_death and
25   *    node->async_todo), as well as thread->transaction_stack
26   *    binder_inner_proc_lock() and binder_inner_proc_unlock()
27   *    are used to acq/rel
28   *
29   * Any lock under procA must never be nested under any lock at the same
30   * level or below on procB.
31   *
32   * Functions that require a lock held on entry indicate which lock
33   * in the suffix of the function name:
34   *
35   * foo_olocked() : requires node->outer_lock
36   * foo_nlocked() : requires node->lock
37   * foo_ilocked() : requires proc->inner_lock
38   * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39   * foo_nilocked(): requires node->lock and proc->inner_lock
40   * ...
41   */
42  
43  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44  
45  #include <linux/fdtable.h>
46  #include <linux/file.h>
47  #include <linux/freezer.h>
48  #include <linux/fs.h>
49  #include <linux/list.h>
50  #include <linux/miscdevice.h>
51  #include <linux/module.h>
52  #include <linux/mutex.h>
53  #include <linux/nsproxy.h>
54  #include <linux/poll.h>
55  #include <linux/debugfs.h>
56  #include <linux/rbtree.h>
57  #include <linux/sched/signal.h>
58  #include <linux/sched/mm.h>
59  #include <linux/seq_file.h>
60  #include <linux/string.h>
61  #include <linux/uaccess.h>
62  #include <linux/pid_namespace.h>
63  #include <linux/security.h>
64  #include <linux/spinlock.h>
65  #include <linux/ratelimit.h>
66  #include <linux/syscalls.h>
67  #include <linux/task_work.h>
68  #include <linux/sizes.h>
69  #include <linux/ktime.h>
70  
71  #include <uapi/linux/android/binder.h>
72  
73  #include <linux/cacheflush.h>
74  
75  #include "binder_internal.h"
76  #include "binder_trace.h"
77  
78  static HLIST_HEAD(binder_deferred_list);
79  static DEFINE_MUTEX(binder_deferred_lock);
80  
81  static HLIST_HEAD(binder_devices);
82  static HLIST_HEAD(binder_procs);
83  static DEFINE_MUTEX(binder_procs_lock);
84  
85  static HLIST_HEAD(binder_dead_nodes);
86  static DEFINE_SPINLOCK(binder_dead_nodes_lock);
87  
88  static struct dentry *binder_debugfs_dir_entry_root;
89  static struct dentry *binder_debugfs_dir_entry_proc;
90  static atomic_t binder_last_id;
91  
92  static int proc_show(struct seq_file *m, void *unused);
93  DEFINE_SHOW_ATTRIBUTE(proc);
94  
95  #define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
96  
97  enum {
98  	BINDER_DEBUG_USER_ERROR             = 1U << 0,
99  	BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
100  	BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
101  	BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
102  	BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
103  	BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
104  	BINDER_DEBUG_READ_WRITE             = 1U << 6,
105  	BINDER_DEBUG_USER_REFS              = 1U << 7,
106  	BINDER_DEBUG_THREADS                = 1U << 8,
107  	BINDER_DEBUG_TRANSACTION            = 1U << 9,
108  	BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
109  	BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
110  	BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
111  	BINDER_DEBUG_PRIORITY_CAP           = 1U << 13,
112  	BINDER_DEBUG_SPINLOCKS              = 1U << 14,
113  };
114  static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
115  	BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
116  module_param_named(debug_mask, binder_debug_mask, uint, 0644);
117  
118  char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
119  module_param_named(devices, binder_devices_param, charp, 0444);
120  
121  static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
122  static int binder_stop_on_user_error;
123  
binder_set_stop_on_user_error(const char * val,const struct kernel_param * kp)124  static int binder_set_stop_on_user_error(const char *val,
125  					 const struct kernel_param *kp)
126  {
127  	int ret;
128  
129  	ret = param_set_int(val, kp);
130  	if (binder_stop_on_user_error < 2)
131  		wake_up(&binder_user_error_wait);
132  	return ret;
133  }
134  module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
135  	param_get_int, &binder_stop_on_user_error, 0644);
136  
binder_debug(int mask,const char * format,...)137  static __printf(2, 3) void binder_debug(int mask, const char *format, ...)
138  {
139  	struct va_format vaf;
140  	va_list args;
141  
142  	if (binder_debug_mask & mask) {
143  		va_start(args, format);
144  		vaf.va = &args;
145  		vaf.fmt = format;
146  		pr_info_ratelimited("%pV", &vaf);
147  		va_end(args);
148  	}
149  }
150  
151  #define binder_txn_error(x...) \
152  	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, x)
153  
binder_user_error(const char * format,...)154  static __printf(1, 2) void binder_user_error(const char *format, ...)
155  {
156  	struct va_format vaf;
157  	va_list args;
158  
159  	if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) {
160  		va_start(args, format);
161  		vaf.va = &args;
162  		vaf.fmt = format;
163  		pr_info_ratelimited("%pV", &vaf);
164  		va_end(args);
165  	}
166  
167  	if (binder_stop_on_user_error)
168  		binder_stop_on_user_error = 2;
169  }
170  
171  #define binder_set_extended_error(ee, _id, _command, _param) \
172  	do { \
173  		(ee)->id = _id; \
174  		(ee)->command = _command; \
175  		(ee)->param = _param; \
176  	} while (0)
177  
178  #define to_flat_binder_object(hdr) \
179  	container_of(hdr, struct flat_binder_object, hdr)
180  
181  #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
182  
183  #define to_binder_buffer_object(hdr) \
184  	container_of(hdr, struct binder_buffer_object, hdr)
185  
186  #define to_binder_fd_array_object(hdr) \
187  	container_of(hdr, struct binder_fd_array_object, hdr)
188  
189  static struct binder_stats binder_stats;
190  
binder_stats_deleted(enum binder_stat_types type)191  static inline void binder_stats_deleted(enum binder_stat_types type)
192  {
193  	atomic_inc(&binder_stats.obj_deleted[type]);
194  }
195  
binder_stats_created(enum binder_stat_types type)196  static inline void binder_stats_created(enum binder_stat_types type)
197  {
198  	atomic_inc(&binder_stats.obj_created[type]);
199  }
200  
201  struct binder_transaction_log_entry {
202  	int debug_id;
203  	int debug_id_done;
204  	int call_type;
205  	int from_proc;
206  	int from_thread;
207  	int target_handle;
208  	int to_proc;
209  	int to_thread;
210  	int to_node;
211  	int data_size;
212  	int offsets_size;
213  	int return_error_line;
214  	uint32_t return_error;
215  	uint32_t return_error_param;
216  	char context_name[BINDERFS_MAX_NAME + 1];
217  };
218  
219  struct binder_transaction_log {
220  	atomic_t cur;
221  	bool full;
222  	struct binder_transaction_log_entry entry[32];
223  };
224  
225  static struct binder_transaction_log binder_transaction_log;
226  static struct binder_transaction_log binder_transaction_log_failed;
227  
binder_transaction_log_add(struct binder_transaction_log * log)228  static struct binder_transaction_log_entry *binder_transaction_log_add(
229  	struct binder_transaction_log *log)
230  {
231  	struct binder_transaction_log_entry *e;
232  	unsigned int cur = atomic_inc_return(&log->cur);
233  
234  	if (cur >= ARRAY_SIZE(log->entry))
235  		log->full = true;
236  	e = &log->entry[cur % ARRAY_SIZE(log->entry)];
237  	WRITE_ONCE(e->debug_id_done, 0);
238  	/*
239  	 * write-barrier to synchronize access to e->debug_id_done.
240  	 * We make sure the initialized 0 value is seen before
241  	 * memset() other fields are zeroed by memset.
242  	 */
243  	smp_wmb();
244  	memset(e, 0, sizeof(*e));
245  	return e;
246  }
247  
248  enum binder_deferred_state {
249  	BINDER_DEFERRED_FLUSH        = 0x01,
250  	BINDER_DEFERRED_RELEASE      = 0x02,
251  };
252  
253  enum {
254  	BINDER_LOOPER_STATE_REGISTERED  = 0x01,
255  	BINDER_LOOPER_STATE_ENTERED     = 0x02,
256  	BINDER_LOOPER_STATE_EXITED      = 0x04,
257  	BINDER_LOOPER_STATE_INVALID     = 0x08,
258  	BINDER_LOOPER_STATE_WAITING     = 0x10,
259  	BINDER_LOOPER_STATE_POLL        = 0x20,
260  };
261  
262  /**
263   * binder_proc_lock() - Acquire outer lock for given binder_proc
264   * @proc:         struct binder_proc to acquire
265   *
266   * Acquires proc->outer_lock. Used to protect binder_ref
267   * structures associated with the given proc.
268   */
269  #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
270  static void
_binder_proc_lock(struct binder_proc * proc,int line)271  _binder_proc_lock(struct binder_proc *proc, int line)
272  	__acquires(&proc->outer_lock)
273  {
274  	binder_debug(BINDER_DEBUG_SPINLOCKS,
275  		     "%s: line=%d\n", __func__, line);
276  	spin_lock(&proc->outer_lock);
277  }
278  
279  /**
280   * binder_proc_unlock() - Release spinlock for given binder_proc
281   * @proc:                struct binder_proc to acquire
282   *
283   * Release lock acquired via binder_proc_lock()
284   */
285  #define binder_proc_unlock(proc) _binder_proc_unlock(proc, __LINE__)
286  static void
_binder_proc_unlock(struct binder_proc * proc,int line)287  _binder_proc_unlock(struct binder_proc *proc, int line)
288  	__releases(&proc->outer_lock)
289  {
290  	binder_debug(BINDER_DEBUG_SPINLOCKS,
291  		     "%s: line=%d\n", __func__, line);
292  	spin_unlock(&proc->outer_lock);
293  }
294  
295  /**
296   * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
297   * @proc:         struct binder_proc to acquire
298   *
299   * Acquires proc->inner_lock. Used to protect todo lists
300   */
301  #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
302  static void
_binder_inner_proc_lock(struct binder_proc * proc,int line)303  _binder_inner_proc_lock(struct binder_proc *proc, int line)
304  	__acquires(&proc->inner_lock)
305  {
306  	binder_debug(BINDER_DEBUG_SPINLOCKS,
307  		     "%s: line=%d\n", __func__, line);
308  	spin_lock(&proc->inner_lock);
309  }
310  
311  /**
312   * binder_inner_proc_unlock() - Release inner lock for given binder_proc
313   * @proc:         struct binder_proc to acquire
314   *
315   * Release lock acquired via binder_inner_proc_lock()
316   */
317  #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
318  static void
_binder_inner_proc_unlock(struct binder_proc * proc,int line)319  _binder_inner_proc_unlock(struct binder_proc *proc, int line)
320  	__releases(&proc->inner_lock)
321  {
322  	binder_debug(BINDER_DEBUG_SPINLOCKS,
323  		     "%s: line=%d\n", __func__, line);
324  	spin_unlock(&proc->inner_lock);
325  }
326  
327  /**
328   * binder_node_lock() - Acquire spinlock for given binder_node
329   * @node:         struct binder_node to acquire
330   *
331   * Acquires node->lock. Used to protect binder_node fields
332   */
333  #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
334  static void
_binder_node_lock(struct binder_node * node,int line)335  _binder_node_lock(struct binder_node *node, int line)
336  	__acquires(&node->lock)
337  {
338  	binder_debug(BINDER_DEBUG_SPINLOCKS,
339  		     "%s: line=%d\n", __func__, line);
340  	spin_lock(&node->lock);
341  }
342  
343  /**
344   * binder_node_unlock() - Release spinlock for given binder_proc
345   * @node:         struct binder_node to acquire
346   *
347   * Release lock acquired via binder_node_lock()
348   */
349  #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
350  static void
_binder_node_unlock(struct binder_node * node,int line)351  _binder_node_unlock(struct binder_node *node, int line)
352  	__releases(&node->lock)
353  {
354  	binder_debug(BINDER_DEBUG_SPINLOCKS,
355  		     "%s: line=%d\n", __func__, line);
356  	spin_unlock(&node->lock);
357  }
358  
359  /**
360   * binder_node_inner_lock() - Acquire node and inner locks
361   * @node:         struct binder_node to acquire
362   *
363   * Acquires node->lock. If node->proc also acquires
364   * proc->inner_lock. Used to protect binder_node fields
365   */
366  #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
367  static void
_binder_node_inner_lock(struct binder_node * node,int line)368  _binder_node_inner_lock(struct binder_node *node, int line)
369  	__acquires(&node->lock) __acquires(&node->proc->inner_lock)
370  {
371  	binder_debug(BINDER_DEBUG_SPINLOCKS,
372  		     "%s: line=%d\n", __func__, line);
373  	spin_lock(&node->lock);
374  	if (node->proc)
375  		binder_inner_proc_lock(node->proc);
376  	else
377  		/* annotation for sparse */
378  		__acquire(&node->proc->inner_lock);
379  }
380  
381  /**
382   * binder_node_inner_unlock() - Release node and inner locks
383   * @node:         struct binder_node to acquire
384   *
385   * Release lock acquired via binder_node_lock()
386   */
387  #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
388  static void
_binder_node_inner_unlock(struct binder_node * node,int line)389  _binder_node_inner_unlock(struct binder_node *node, int line)
390  	__releases(&node->lock) __releases(&node->proc->inner_lock)
391  {
392  	struct binder_proc *proc = node->proc;
393  
394  	binder_debug(BINDER_DEBUG_SPINLOCKS,
395  		     "%s: line=%d\n", __func__, line);
396  	if (proc)
397  		binder_inner_proc_unlock(proc);
398  	else
399  		/* annotation for sparse */
400  		__release(&node->proc->inner_lock);
401  	spin_unlock(&node->lock);
402  }
403  
binder_worklist_empty_ilocked(struct list_head * list)404  static bool binder_worklist_empty_ilocked(struct list_head *list)
405  {
406  	return list_empty(list);
407  }
408  
409  /**
410   * binder_worklist_empty() - Check if no items on the work list
411   * @proc:       binder_proc associated with list
412   * @list:	list to check
413   *
414   * Return: true if there are no items on list, else false
415   */
binder_worklist_empty(struct binder_proc * proc,struct list_head * list)416  static bool binder_worklist_empty(struct binder_proc *proc,
417  				  struct list_head *list)
418  {
419  	bool ret;
420  
421  	binder_inner_proc_lock(proc);
422  	ret = binder_worklist_empty_ilocked(list);
423  	binder_inner_proc_unlock(proc);
424  	return ret;
425  }
426  
427  /**
428   * binder_enqueue_work_ilocked() - Add an item to the work list
429   * @work:         struct binder_work to add to list
430   * @target_list:  list to add work to
431   *
432   * Adds the work to the specified list. Asserts that work
433   * is not already on a list.
434   *
435   * Requires the proc->inner_lock to be held.
436   */
437  static void
binder_enqueue_work_ilocked(struct binder_work * work,struct list_head * target_list)438  binder_enqueue_work_ilocked(struct binder_work *work,
439  			   struct list_head *target_list)
440  {
441  	BUG_ON(target_list == NULL);
442  	BUG_ON(work->entry.next && !list_empty(&work->entry));
443  	list_add_tail(&work->entry, target_list);
444  }
445  
446  /**
447   * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
448   * @thread:       thread to queue work to
449   * @work:         struct binder_work to add to list
450   *
451   * Adds the work to the todo list of the thread. Doesn't set the process_todo
452   * flag, which means that (if it wasn't already set) the thread will go to
453   * sleep without handling this work when it calls read.
454   *
455   * Requires the proc->inner_lock to be held.
456   */
457  static void
binder_enqueue_deferred_thread_work_ilocked(struct binder_thread * thread,struct binder_work * work)458  binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
459  					    struct binder_work *work)
460  {
461  	WARN_ON(!list_empty(&thread->waiting_thread_node));
462  	binder_enqueue_work_ilocked(work, &thread->todo);
463  }
464  
465  /**
466   * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
467   * @thread:       thread to queue work to
468   * @work:         struct binder_work to add to list
469   *
470   * Adds the work to the todo list of the thread, and enables processing
471   * of the todo queue.
472   *
473   * Requires the proc->inner_lock to be held.
474   */
475  static void
binder_enqueue_thread_work_ilocked(struct binder_thread * thread,struct binder_work * work)476  binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
477  				   struct binder_work *work)
478  {
479  	WARN_ON(!list_empty(&thread->waiting_thread_node));
480  	binder_enqueue_work_ilocked(work, &thread->todo);
481  
482  	/* (e)poll-based threads require an explicit wakeup signal when
483  	 * queuing their own work; they rely on these events to consume
484  	 * messages without I/O block. Without it, threads risk waiting
485  	 * indefinitely without handling the work.
486  	 */
487  	if (thread->looper & BINDER_LOOPER_STATE_POLL &&
488  	    thread->pid == current->pid && !thread->process_todo)
489  		wake_up_interruptible_sync(&thread->wait);
490  
491  	thread->process_todo = true;
492  }
493  
494  /**
495   * binder_enqueue_thread_work() - Add an item to the thread work list
496   * @thread:       thread to queue work to
497   * @work:         struct binder_work to add to list
498   *
499   * Adds the work to the todo list of the thread, and enables processing
500   * of the todo queue.
501   */
502  static void
binder_enqueue_thread_work(struct binder_thread * thread,struct binder_work * work)503  binder_enqueue_thread_work(struct binder_thread *thread,
504  			   struct binder_work *work)
505  {
506  	binder_inner_proc_lock(thread->proc);
507  	binder_enqueue_thread_work_ilocked(thread, work);
508  	binder_inner_proc_unlock(thread->proc);
509  }
510  
511  static void
binder_dequeue_work_ilocked(struct binder_work * work)512  binder_dequeue_work_ilocked(struct binder_work *work)
513  {
514  	list_del_init(&work->entry);
515  }
516  
517  /**
518   * binder_dequeue_work() - Removes an item from the work list
519   * @proc:         binder_proc associated with list
520   * @work:         struct binder_work to remove from list
521   *
522   * Removes the specified work item from whatever list it is on.
523   * Can safely be called if work is not on any list.
524   */
525  static void
binder_dequeue_work(struct binder_proc * proc,struct binder_work * work)526  binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
527  {
528  	binder_inner_proc_lock(proc);
529  	binder_dequeue_work_ilocked(work);
530  	binder_inner_proc_unlock(proc);
531  }
532  
binder_dequeue_work_head_ilocked(struct list_head * list)533  static struct binder_work *binder_dequeue_work_head_ilocked(
534  					struct list_head *list)
535  {
536  	struct binder_work *w;
537  
538  	w = list_first_entry_or_null(list, struct binder_work, entry);
539  	if (w)
540  		list_del_init(&w->entry);
541  	return w;
542  }
543  
544  static void
545  binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
546  static void binder_free_thread(struct binder_thread *thread);
547  static void binder_free_proc(struct binder_proc *proc);
548  static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
549  
binder_has_work_ilocked(struct binder_thread * thread,bool do_proc_work)550  static bool binder_has_work_ilocked(struct binder_thread *thread,
551  				    bool do_proc_work)
552  {
553  	return thread->process_todo ||
554  		thread->looper_need_return ||
555  		(do_proc_work &&
556  		 !binder_worklist_empty_ilocked(&thread->proc->todo));
557  }
558  
binder_has_work(struct binder_thread * thread,bool do_proc_work)559  static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
560  {
561  	bool has_work;
562  
563  	binder_inner_proc_lock(thread->proc);
564  	has_work = binder_has_work_ilocked(thread, do_proc_work);
565  	binder_inner_proc_unlock(thread->proc);
566  
567  	return has_work;
568  }
569  
binder_available_for_proc_work_ilocked(struct binder_thread * thread)570  static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
571  {
572  	return !thread->transaction_stack &&
573  		binder_worklist_empty_ilocked(&thread->todo);
574  }
575  
binder_wakeup_poll_threads_ilocked(struct binder_proc * proc,bool sync)576  static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
577  					       bool sync)
578  {
579  	struct rb_node *n;
580  	struct binder_thread *thread;
581  
582  	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
583  		thread = rb_entry(n, struct binder_thread, rb_node);
584  		if (thread->looper & BINDER_LOOPER_STATE_POLL &&
585  		    binder_available_for_proc_work_ilocked(thread)) {
586  			if (sync)
587  				wake_up_interruptible_sync(&thread->wait);
588  			else
589  				wake_up_interruptible(&thread->wait);
590  		}
591  	}
592  }
593  
594  /**
595   * binder_select_thread_ilocked() - selects a thread for doing proc work.
596   * @proc:	process to select a thread from
597   *
598   * Note that calling this function moves the thread off the waiting_threads
599   * list, so it can only be woken up by the caller of this function, or a
600   * signal. Therefore, callers *should* always wake up the thread this function
601   * returns.
602   *
603   * Return:	If there's a thread currently waiting for process work,
604   *		returns that thread. Otherwise returns NULL.
605   */
606  static struct binder_thread *
binder_select_thread_ilocked(struct binder_proc * proc)607  binder_select_thread_ilocked(struct binder_proc *proc)
608  {
609  	struct binder_thread *thread;
610  
611  	assert_spin_locked(&proc->inner_lock);
612  	thread = list_first_entry_or_null(&proc->waiting_threads,
613  					  struct binder_thread,
614  					  waiting_thread_node);
615  
616  	if (thread)
617  		list_del_init(&thread->waiting_thread_node);
618  
619  	return thread;
620  }
621  
622  /**
623   * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
624   * @proc:	process to wake up a thread in
625   * @thread:	specific thread to wake-up (may be NULL)
626   * @sync:	whether to do a synchronous wake-up
627   *
628   * This function wakes up a thread in the @proc process.
629   * The caller may provide a specific thread to wake-up in
630   * the @thread parameter. If @thread is NULL, this function
631   * will wake up threads that have called poll().
632   *
633   * Note that for this function to work as expected, callers
634   * should first call binder_select_thread() to find a thread
635   * to handle the work (if they don't have a thread already),
636   * and pass the result into the @thread parameter.
637   */
binder_wakeup_thread_ilocked(struct binder_proc * proc,struct binder_thread * thread,bool sync)638  static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
639  					 struct binder_thread *thread,
640  					 bool sync)
641  {
642  	assert_spin_locked(&proc->inner_lock);
643  
644  	if (thread) {
645  		if (sync)
646  			wake_up_interruptible_sync(&thread->wait);
647  		else
648  			wake_up_interruptible(&thread->wait);
649  		return;
650  	}
651  
652  	/* Didn't find a thread waiting for proc work; this can happen
653  	 * in two scenarios:
654  	 * 1. All threads are busy handling transactions
655  	 *    In that case, one of those threads should call back into
656  	 *    the kernel driver soon and pick up this work.
657  	 * 2. Threads are using the (e)poll interface, in which case
658  	 *    they may be blocked on the waitqueue without having been
659  	 *    added to waiting_threads. For this case, we just iterate
660  	 *    over all threads not handling transaction work, and
661  	 *    wake them all up. We wake all because we don't know whether
662  	 *    a thread that called into (e)poll is handling non-binder
663  	 *    work currently.
664  	 */
665  	binder_wakeup_poll_threads_ilocked(proc, sync);
666  }
667  
binder_wakeup_proc_ilocked(struct binder_proc * proc)668  static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
669  {
670  	struct binder_thread *thread = binder_select_thread_ilocked(proc);
671  
672  	binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
673  }
674  
binder_set_nice(long nice)675  static void binder_set_nice(long nice)
676  {
677  	long min_nice;
678  
679  	if (can_nice(current, nice)) {
680  		set_user_nice(current, nice);
681  		return;
682  	}
683  	min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
684  	binder_debug(BINDER_DEBUG_PRIORITY_CAP,
685  		     "%d: nice value %ld not allowed use %ld instead\n",
686  		      current->pid, nice, min_nice);
687  	set_user_nice(current, min_nice);
688  	if (min_nice <= MAX_NICE)
689  		return;
690  	binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
691  }
692  
binder_get_node_ilocked(struct binder_proc * proc,binder_uintptr_t ptr)693  static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
694  						   binder_uintptr_t ptr)
695  {
696  	struct rb_node *n = proc->nodes.rb_node;
697  	struct binder_node *node;
698  
699  	assert_spin_locked(&proc->inner_lock);
700  
701  	while (n) {
702  		node = rb_entry(n, struct binder_node, rb_node);
703  
704  		if (ptr < node->ptr)
705  			n = n->rb_left;
706  		else if (ptr > node->ptr)
707  			n = n->rb_right;
708  		else {
709  			/*
710  			 * take an implicit weak reference
711  			 * to ensure node stays alive until
712  			 * call to binder_put_node()
713  			 */
714  			binder_inc_node_tmpref_ilocked(node);
715  			return node;
716  		}
717  	}
718  	return NULL;
719  }
720  
binder_get_node(struct binder_proc * proc,binder_uintptr_t ptr)721  static struct binder_node *binder_get_node(struct binder_proc *proc,
722  					   binder_uintptr_t ptr)
723  {
724  	struct binder_node *node;
725  
726  	binder_inner_proc_lock(proc);
727  	node = binder_get_node_ilocked(proc, ptr);
728  	binder_inner_proc_unlock(proc);
729  	return node;
730  }
731  
binder_init_node_ilocked(struct binder_proc * proc,struct binder_node * new_node,struct flat_binder_object * fp)732  static struct binder_node *binder_init_node_ilocked(
733  						struct binder_proc *proc,
734  						struct binder_node *new_node,
735  						struct flat_binder_object *fp)
736  {
737  	struct rb_node **p = &proc->nodes.rb_node;
738  	struct rb_node *parent = NULL;
739  	struct binder_node *node;
740  	binder_uintptr_t ptr = fp ? fp->binder : 0;
741  	binder_uintptr_t cookie = fp ? fp->cookie : 0;
742  	__u32 flags = fp ? fp->flags : 0;
743  
744  	assert_spin_locked(&proc->inner_lock);
745  
746  	while (*p) {
747  
748  		parent = *p;
749  		node = rb_entry(parent, struct binder_node, rb_node);
750  
751  		if (ptr < node->ptr)
752  			p = &(*p)->rb_left;
753  		else if (ptr > node->ptr)
754  			p = &(*p)->rb_right;
755  		else {
756  			/*
757  			 * A matching node is already in
758  			 * the rb tree. Abandon the init
759  			 * and return it.
760  			 */
761  			binder_inc_node_tmpref_ilocked(node);
762  			return node;
763  		}
764  	}
765  	node = new_node;
766  	binder_stats_created(BINDER_STAT_NODE);
767  	node->tmp_refs++;
768  	rb_link_node(&node->rb_node, parent, p);
769  	rb_insert_color(&node->rb_node, &proc->nodes);
770  	node->debug_id = atomic_inc_return(&binder_last_id);
771  	node->proc = proc;
772  	node->ptr = ptr;
773  	node->cookie = cookie;
774  	node->work.type = BINDER_WORK_NODE;
775  	node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
776  	node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
777  	node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
778  	spin_lock_init(&node->lock);
779  	INIT_LIST_HEAD(&node->work.entry);
780  	INIT_LIST_HEAD(&node->async_todo);
781  	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
782  		     "%d:%d node %d u%016llx c%016llx created\n",
783  		     proc->pid, current->pid, node->debug_id,
784  		     (u64)node->ptr, (u64)node->cookie);
785  
786  	return node;
787  }
788  
binder_new_node(struct binder_proc * proc,struct flat_binder_object * fp)789  static struct binder_node *binder_new_node(struct binder_proc *proc,
790  					   struct flat_binder_object *fp)
791  {
792  	struct binder_node *node;
793  	struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
794  
795  	if (!new_node)
796  		return NULL;
797  	binder_inner_proc_lock(proc);
798  	node = binder_init_node_ilocked(proc, new_node, fp);
799  	binder_inner_proc_unlock(proc);
800  	if (node != new_node)
801  		/*
802  		 * The node was already added by another thread
803  		 */
804  		kfree(new_node);
805  
806  	return node;
807  }
808  
binder_free_node(struct binder_node * node)809  static void binder_free_node(struct binder_node *node)
810  {
811  	kfree(node);
812  	binder_stats_deleted(BINDER_STAT_NODE);
813  }
814  
binder_inc_node_nilocked(struct binder_node * node,int strong,int internal,struct list_head * target_list)815  static int binder_inc_node_nilocked(struct binder_node *node, int strong,
816  				    int internal,
817  				    struct list_head *target_list)
818  {
819  	struct binder_proc *proc = node->proc;
820  
821  	assert_spin_locked(&node->lock);
822  	if (proc)
823  		assert_spin_locked(&proc->inner_lock);
824  	if (strong) {
825  		if (internal) {
826  			if (target_list == NULL &&
827  			    node->internal_strong_refs == 0 &&
828  			    !(node->proc &&
829  			      node == node->proc->context->binder_context_mgr_node &&
830  			      node->has_strong_ref)) {
831  				pr_err("invalid inc strong node for %d\n",
832  					node->debug_id);
833  				return -EINVAL;
834  			}
835  			node->internal_strong_refs++;
836  		} else
837  			node->local_strong_refs++;
838  		if (!node->has_strong_ref && target_list) {
839  			struct binder_thread *thread = container_of(target_list,
840  						    struct binder_thread, todo);
841  			binder_dequeue_work_ilocked(&node->work);
842  			BUG_ON(&thread->todo != target_list);
843  			binder_enqueue_deferred_thread_work_ilocked(thread,
844  								   &node->work);
845  		}
846  	} else {
847  		if (!internal)
848  			node->local_weak_refs++;
849  		if (!node->has_weak_ref && list_empty(&node->work.entry)) {
850  			if (target_list == NULL) {
851  				pr_err("invalid inc weak node for %d\n",
852  					node->debug_id);
853  				return -EINVAL;
854  			}
855  			/*
856  			 * See comment above
857  			 */
858  			binder_enqueue_work_ilocked(&node->work, target_list);
859  		}
860  	}
861  	return 0;
862  }
863  
binder_inc_node(struct binder_node * node,int strong,int internal,struct list_head * target_list)864  static int binder_inc_node(struct binder_node *node, int strong, int internal,
865  			   struct list_head *target_list)
866  {
867  	int ret;
868  
869  	binder_node_inner_lock(node);
870  	ret = binder_inc_node_nilocked(node, strong, internal, target_list);
871  	binder_node_inner_unlock(node);
872  
873  	return ret;
874  }
875  
binder_dec_node_nilocked(struct binder_node * node,int strong,int internal)876  static bool binder_dec_node_nilocked(struct binder_node *node,
877  				     int strong, int internal)
878  {
879  	struct binder_proc *proc = node->proc;
880  
881  	assert_spin_locked(&node->lock);
882  	if (proc)
883  		assert_spin_locked(&proc->inner_lock);
884  	if (strong) {
885  		if (internal)
886  			node->internal_strong_refs--;
887  		else
888  			node->local_strong_refs--;
889  		if (node->local_strong_refs || node->internal_strong_refs)
890  			return false;
891  	} else {
892  		if (!internal)
893  			node->local_weak_refs--;
894  		if (node->local_weak_refs || node->tmp_refs ||
895  				!hlist_empty(&node->refs))
896  			return false;
897  	}
898  
899  	if (proc && (node->has_strong_ref || node->has_weak_ref)) {
900  		if (list_empty(&node->work.entry)) {
901  			binder_enqueue_work_ilocked(&node->work, &proc->todo);
902  			binder_wakeup_proc_ilocked(proc);
903  		}
904  	} else {
905  		if (hlist_empty(&node->refs) && !node->local_strong_refs &&
906  		    !node->local_weak_refs && !node->tmp_refs) {
907  			if (proc) {
908  				binder_dequeue_work_ilocked(&node->work);
909  				rb_erase(&node->rb_node, &proc->nodes);
910  				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
911  					     "refless node %d deleted\n",
912  					     node->debug_id);
913  			} else {
914  				BUG_ON(!list_empty(&node->work.entry));
915  				spin_lock(&binder_dead_nodes_lock);
916  				/*
917  				 * tmp_refs could have changed so
918  				 * check it again
919  				 */
920  				if (node->tmp_refs) {
921  					spin_unlock(&binder_dead_nodes_lock);
922  					return false;
923  				}
924  				hlist_del(&node->dead_node);
925  				spin_unlock(&binder_dead_nodes_lock);
926  				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
927  					     "dead node %d deleted\n",
928  					     node->debug_id);
929  			}
930  			return true;
931  		}
932  	}
933  	return false;
934  }
935  
binder_dec_node(struct binder_node * node,int strong,int internal)936  static void binder_dec_node(struct binder_node *node, int strong, int internal)
937  {
938  	bool free_node;
939  
940  	binder_node_inner_lock(node);
941  	free_node = binder_dec_node_nilocked(node, strong, internal);
942  	binder_node_inner_unlock(node);
943  	if (free_node)
944  		binder_free_node(node);
945  }
946  
binder_inc_node_tmpref_ilocked(struct binder_node * node)947  static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
948  {
949  	/*
950  	 * No call to binder_inc_node() is needed since we
951  	 * don't need to inform userspace of any changes to
952  	 * tmp_refs
953  	 */
954  	node->tmp_refs++;
955  }
956  
957  /**
958   * binder_inc_node_tmpref() - take a temporary reference on node
959   * @node:	node to reference
960   *
961   * Take reference on node to prevent the node from being freed
962   * while referenced only by a local variable. The inner lock is
963   * needed to serialize with the node work on the queue (which
964   * isn't needed after the node is dead). If the node is dead
965   * (node->proc is NULL), use binder_dead_nodes_lock to protect
966   * node->tmp_refs against dead-node-only cases where the node
967   * lock cannot be acquired (eg traversing the dead node list to
968   * print nodes)
969   */
binder_inc_node_tmpref(struct binder_node * node)970  static void binder_inc_node_tmpref(struct binder_node *node)
971  {
972  	binder_node_lock(node);
973  	if (node->proc)
974  		binder_inner_proc_lock(node->proc);
975  	else
976  		spin_lock(&binder_dead_nodes_lock);
977  	binder_inc_node_tmpref_ilocked(node);
978  	if (node->proc)
979  		binder_inner_proc_unlock(node->proc);
980  	else
981  		spin_unlock(&binder_dead_nodes_lock);
982  	binder_node_unlock(node);
983  }
984  
985  /**
986   * binder_dec_node_tmpref() - remove a temporary reference on node
987   * @node:	node to reference
988   *
989   * Release temporary reference on node taken via binder_inc_node_tmpref()
990   */
binder_dec_node_tmpref(struct binder_node * node)991  static void binder_dec_node_tmpref(struct binder_node *node)
992  {
993  	bool free_node;
994  
995  	binder_node_inner_lock(node);
996  	if (!node->proc)
997  		spin_lock(&binder_dead_nodes_lock);
998  	else
999  		__acquire(&binder_dead_nodes_lock);
1000  	node->tmp_refs--;
1001  	BUG_ON(node->tmp_refs < 0);
1002  	if (!node->proc)
1003  		spin_unlock(&binder_dead_nodes_lock);
1004  	else
1005  		__release(&binder_dead_nodes_lock);
1006  	/*
1007  	 * Call binder_dec_node() to check if all refcounts are 0
1008  	 * and cleanup is needed. Calling with strong=0 and internal=1
1009  	 * causes no actual reference to be released in binder_dec_node().
1010  	 * If that changes, a change is needed here too.
1011  	 */
1012  	free_node = binder_dec_node_nilocked(node, 0, 1);
1013  	binder_node_inner_unlock(node);
1014  	if (free_node)
1015  		binder_free_node(node);
1016  }
1017  
binder_put_node(struct binder_node * node)1018  static void binder_put_node(struct binder_node *node)
1019  {
1020  	binder_dec_node_tmpref(node);
1021  }
1022  
binder_get_ref_olocked(struct binder_proc * proc,u32 desc,bool need_strong_ref)1023  static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1024  						 u32 desc, bool need_strong_ref)
1025  {
1026  	struct rb_node *n = proc->refs_by_desc.rb_node;
1027  	struct binder_ref *ref;
1028  
1029  	while (n) {
1030  		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1031  
1032  		if (desc < ref->data.desc) {
1033  			n = n->rb_left;
1034  		} else if (desc > ref->data.desc) {
1035  			n = n->rb_right;
1036  		} else if (need_strong_ref && !ref->data.strong) {
1037  			binder_user_error("tried to use weak ref as strong ref\n");
1038  			return NULL;
1039  		} else {
1040  			return ref;
1041  		}
1042  	}
1043  	return NULL;
1044  }
1045  
1046  /**
1047   * binder_get_ref_for_node_olocked() - get the ref associated with given node
1048   * @proc:	binder_proc that owns the ref
1049   * @node:	binder_node of target
1050   * @new_ref:	newly allocated binder_ref to be initialized or %NULL
1051   *
1052   * Look up the ref for the given node and return it if it exists
1053   *
1054   * If it doesn't exist and the caller provides a newly allocated
1055   * ref, initialize the fields of the newly allocated ref and insert
1056   * into the given proc rb_trees and node refs list.
1057   *
1058   * Return:	the ref for node. It is possible that another thread
1059   *		allocated/initialized the ref first in which case the
1060   *		returned ref would be different than the passed-in
1061   *		new_ref. new_ref must be kfree'd by the caller in
1062   *		this case.
1063   */
binder_get_ref_for_node_olocked(struct binder_proc * proc,struct binder_node * node,struct binder_ref * new_ref)1064  static struct binder_ref *binder_get_ref_for_node_olocked(
1065  					struct binder_proc *proc,
1066  					struct binder_node *node,
1067  					struct binder_ref *new_ref)
1068  {
1069  	struct binder_context *context = proc->context;
1070  	struct rb_node **p = &proc->refs_by_node.rb_node;
1071  	struct rb_node *parent = NULL;
1072  	struct binder_ref *ref;
1073  	struct rb_node *n;
1074  
1075  	while (*p) {
1076  		parent = *p;
1077  		ref = rb_entry(parent, struct binder_ref, rb_node_node);
1078  
1079  		if (node < ref->node)
1080  			p = &(*p)->rb_left;
1081  		else if (node > ref->node)
1082  			p = &(*p)->rb_right;
1083  		else
1084  			return ref;
1085  	}
1086  	if (!new_ref)
1087  		return NULL;
1088  
1089  	binder_stats_created(BINDER_STAT_REF);
1090  	new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1091  	new_ref->proc = proc;
1092  	new_ref->node = node;
1093  	rb_link_node(&new_ref->rb_node_node, parent, p);
1094  	rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1095  
1096  	new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1097  	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1098  		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1099  		if (ref->data.desc > new_ref->data.desc)
1100  			break;
1101  		new_ref->data.desc = ref->data.desc + 1;
1102  	}
1103  
1104  	p = &proc->refs_by_desc.rb_node;
1105  	while (*p) {
1106  		parent = *p;
1107  		ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1108  
1109  		if (new_ref->data.desc < ref->data.desc)
1110  			p = &(*p)->rb_left;
1111  		else if (new_ref->data.desc > ref->data.desc)
1112  			p = &(*p)->rb_right;
1113  		else
1114  			BUG();
1115  	}
1116  	rb_link_node(&new_ref->rb_node_desc, parent, p);
1117  	rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1118  
1119  	binder_node_lock(node);
1120  	hlist_add_head(&new_ref->node_entry, &node->refs);
1121  
1122  	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1123  		     "%d new ref %d desc %d for node %d\n",
1124  		      proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1125  		      node->debug_id);
1126  	binder_node_unlock(node);
1127  	return new_ref;
1128  }
1129  
binder_cleanup_ref_olocked(struct binder_ref * ref)1130  static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1131  {
1132  	bool delete_node = false;
1133  
1134  	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1135  		     "%d delete ref %d desc %d for node %d\n",
1136  		      ref->proc->pid, ref->data.debug_id, ref->data.desc,
1137  		      ref->node->debug_id);
1138  
1139  	rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1140  	rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1141  
1142  	binder_node_inner_lock(ref->node);
1143  	if (ref->data.strong)
1144  		binder_dec_node_nilocked(ref->node, 1, 1);
1145  
1146  	hlist_del(&ref->node_entry);
1147  	delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1148  	binder_node_inner_unlock(ref->node);
1149  	/*
1150  	 * Clear ref->node unless we want the caller to free the node
1151  	 */
1152  	if (!delete_node) {
1153  		/*
1154  		 * The caller uses ref->node to determine
1155  		 * whether the node needs to be freed. Clear
1156  		 * it since the node is still alive.
1157  		 */
1158  		ref->node = NULL;
1159  	}
1160  
1161  	if (ref->death) {
1162  		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1163  			     "%d delete ref %d desc %d has death notification\n",
1164  			      ref->proc->pid, ref->data.debug_id,
1165  			      ref->data.desc);
1166  		binder_dequeue_work(ref->proc, &ref->death->work);
1167  		binder_stats_deleted(BINDER_STAT_DEATH);
1168  	}
1169  	binder_stats_deleted(BINDER_STAT_REF);
1170  }
1171  
1172  /**
1173   * binder_inc_ref_olocked() - increment the ref for given handle
1174   * @ref:         ref to be incremented
1175   * @strong:      if true, strong increment, else weak
1176   * @target_list: list to queue node work on
1177   *
1178   * Increment the ref. @ref->proc->outer_lock must be held on entry
1179   *
1180   * Return: 0, if successful, else errno
1181   */
binder_inc_ref_olocked(struct binder_ref * ref,int strong,struct list_head * target_list)1182  static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1183  				  struct list_head *target_list)
1184  {
1185  	int ret;
1186  
1187  	if (strong) {
1188  		if (ref->data.strong == 0) {
1189  			ret = binder_inc_node(ref->node, 1, 1, target_list);
1190  			if (ret)
1191  				return ret;
1192  		}
1193  		ref->data.strong++;
1194  	} else {
1195  		if (ref->data.weak == 0) {
1196  			ret = binder_inc_node(ref->node, 0, 1, target_list);
1197  			if (ret)
1198  				return ret;
1199  		}
1200  		ref->data.weak++;
1201  	}
1202  	return 0;
1203  }
1204  
1205  /**
1206   * binder_dec_ref_olocked() - dec the ref for given handle
1207   * @ref:	ref to be decremented
1208   * @strong:	if true, strong decrement, else weak
1209   *
1210   * Decrement the ref.
1211   *
1212   * Return: %true if ref is cleaned up and ready to be freed.
1213   */
binder_dec_ref_olocked(struct binder_ref * ref,int strong)1214  static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1215  {
1216  	if (strong) {
1217  		if (ref->data.strong == 0) {
1218  			binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1219  					  ref->proc->pid, ref->data.debug_id,
1220  					  ref->data.desc, ref->data.strong,
1221  					  ref->data.weak);
1222  			return false;
1223  		}
1224  		ref->data.strong--;
1225  		if (ref->data.strong == 0)
1226  			binder_dec_node(ref->node, strong, 1);
1227  	} else {
1228  		if (ref->data.weak == 0) {
1229  			binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1230  					  ref->proc->pid, ref->data.debug_id,
1231  					  ref->data.desc, ref->data.strong,
1232  					  ref->data.weak);
1233  			return false;
1234  		}
1235  		ref->data.weak--;
1236  	}
1237  	if (ref->data.strong == 0 && ref->data.weak == 0) {
1238  		binder_cleanup_ref_olocked(ref);
1239  		return true;
1240  	}
1241  	return false;
1242  }
1243  
1244  /**
1245   * binder_get_node_from_ref() - get the node from the given proc/desc
1246   * @proc:	proc containing the ref
1247   * @desc:	the handle associated with the ref
1248   * @need_strong_ref: if true, only return node if ref is strong
1249   * @rdata:	the id/refcount data for the ref
1250   *
1251   * Given a proc and ref handle, return the associated binder_node
1252   *
1253   * Return: a binder_node or NULL if not found or not strong when strong required
1254   */
binder_get_node_from_ref(struct binder_proc * proc,u32 desc,bool need_strong_ref,struct binder_ref_data * rdata)1255  static struct binder_node *binder_get_node_from_ref(
1256  		struct binder_proc *proc,
1257  		u32 desc, bool need_strong_ref,
1258  		struct binder_ref_data *rdata)
1259  {
1260  	struct binder_node *node;
1261  	struct binder_ref *ref;
1262  
1263  	binder_proc_lock(proc);
1264  	ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1265  	if (!ref)
1266  		goto err_no_ref;
1267  	node = ref->node;
1268  	/*
1269  	 * Take an implicit reference on the node to ensure
1270  	 * it stays alive until the call to binder_put_node()
1271  	 */
1272  	binder_inc_node_tmpref(node);
1273  	if (rdata)
1274  		*rdata = ref->data;
1275  	binder_proc_unlock(proc);
1276  
1277  	return node;
1278  
1279  err_no_ref:
1280  	binder_proc_unlock(proc);
1281  	return NULL;
1282  }
1283  
1284  /**
1285   * binder_free_ref() - free the binder_ref
1286   * @ref:	ref to free
1287   *
1288   * Free the binder_ref. Free the binder_node indicated by ref->node
1289   * (if non-NULL) and the binder_ref_death indicated by ref->death.
1290   */
binder_free_ref(struct binder_ref * ref)1291  static void binder_free_ref(struct binder_ref *ref)
1292  {
1293  	if (ref->node)
1294  		binder_free_node(ref->node);
1295  	kfree(ref->death);
1296  	kfree(ref);
1297  }
1298  
1299  /**
1300   * binder_update_ref_for_handle() - inc/dec the ref for given handle
1301   * @proc:	proc containing the ref
1302   * @desc:	the handle associated with the ref
1303   * @increment:	true=inc reference, false=dec reference
1304   * @strong:	true=strong reference, false=weak reference
1305   * @rdata:	the id/refcount data for the ref
1306   *
1307   * Given a proc and ref handle, increment or decrement the ref
1308   * according to "increment" arg.
1309   *
1310   * Return: 0 if successful, else errno
1311   */
binder_update_ref_for_handle(struct binder_proc * proc,uint32_t desc,bool increment,bool strong,struct binder_ref_data * rdata)1312  static int binder_update_ref_for_handle(struct binder_proc *proc,
1313  		uint32_t desc, bool increment, bool strong,
1314  		struct binder_ref_data *rdata)
1315  {
1316  	int ret = 0;
1317  	struct binder_ref *ref;
1318  	bool delete_ref = false;
1319  
1320  	binder_proc_lock(proc);
1321  	ref = binder_get_ref_olocked(proc, desc, strong);
1322  	if (!ref) {
1323  		ret = -EINVAL;
1324  		goto err_no_ref;
1325  	}
1326  	if (increment)
1327  		ret = binder_inc_ref_olocked(ref, strong, NULL);
1328  	else
1329  		delete_ref = binder_dec_ref_olocked(ref, strong);
1330  
1331  	if (rdata)
1332  		*rdata = ref->data;
1333  	binder_proc_unlock(proc);
1334  
1335  	if (delete_ref)
1336  		binder_free_ref(ref);
1337  	return ret;
1338  
1339  err_no_ref:
1340  	binder_proc_unlock(proc);
1341  	return ret;
1342  }
1343  
1344  /**
1345   * binder_dec_ref_for_handle() - dec the ref for given handle
1346   * @proc:	proc containing the ref
1347   * @desc:	the handle associated with the ref
1348   * @strong:	true=strong reference, false=weak reference
1349   * @rdata:	the id/refcount data for the ref
1350   *
1351   * Just calls binder_update_ref_for_handle() to decrement the ref.
1352   *
1353   * Return: 0 if successful, else errno
1354   */
binder_dec_ref_for_handle(struct binder_proc * proc,uint32_t desc,bool strong,struct binder_ref_data * rdata)1355  static int binder_dec_ref_for_handle(struct binder_proc *proc,
1356  		uint32_t desc, bool strong, struct binder_ref_data *rdata)
1357  {
1358  	return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1359  }
1360  
1361  
1362  /**
1363   * binder_inc_ref_for_node() - increment the ref for given proc/node
1364   * @proc:	 proc containing the ref
1365   * @node:	 target node
1366   * @strong:	 true=strong reference, false=weak reference
1367   * @target_list: worklist to use if node is incremented
1368   * @rdata:	 the id/refcount data for the ref
1369   *
1370   * Given a proc and node, increment the ref. Create the ref if it
1371   * doesn't already exist
1372   *
1373   * Return: 0 if successful, else errno
1374   */
binder_inc_ref_for_node(struct binder_proc * proc,struct binder_node * node,bool strong,struct list_head * target_list,struct binder_ref_data * rdata)1375  static int binder_inc_ref_for_node(struct binder_proc *proc,
1376  			struct binder_node *node,
1377  			bool strong,
1378  			struct list_head *target_list,
1379  			struct binder_ref_data *rdata)
1380  {
1381  	struct binder_ref *ref;
1382  	struct binder_ref *new_ref = NULL;
1383  	int ret = 0;
1384  
1385  	binder_proc_lock(proc);
1386  	ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1387  	if (!ref) {
1388  		binder_proc_unlock(proc);
1389  		new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1390  		if (!new_ref)
1391  			return -ENOMEM;
1392  		binder_proc_lock(proc);
1393  		ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1394  	}
1395  	ret = binder_inc_ref_olocked(ref, strong, target_list);
1396  	*rdata = ref->data;
1397  	if (ret && ref == new_ref) {
1398  		/*
1399  		 * Cleanup the failed reference here as the target
1400  		 * could now be dead and have already released its
1401  		 * references by now. Calling on the new reference
1402  		 * with strong=0 and a tmp_refs will not decrement
1403  		 * the node. The new_ref gets kfree'd below.
1404  		 */
1405  		binder_cleanup_ref_olocked(new_ref);
1406  		ref = NULL;
1407  	}
1408  
1409  	binder_proc_unlock(proc);
1410  	if (new_ref && ref != new_ref)
1411  		/*
1412  		 * Another thread created the ref first so
1413  		 * free the one we allocated
1414  		 */
1415  		kfree(new_ref);
1416  	return ret;
1417  }
1418  
binder_pop_transaction_ilocked(struct binder_thread * target_thread,struct binder_transaction * t)1419  static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1420  					   struct binder_transaction *t)
1421  {
1422  	BUG_ON(!target_thread);
1423  	assert_spin_locked(&target_thread->proc->inner_lock);
1424  	BUG_ON(target_thread->transaction_stack != t);
1425  	BUG_ON(target_thread->transaction_stack->from != target_thread);
1426  	target_thread->transaction_stack =
1427  		target_thread->transaction_stack->from_parent;
1428  	t->from = NULL;
1429  }
1430  
1431  /**
1432   * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1433   * @thread:	thread to decrement
1434   *
1435   * A thread needs to be kept alive while being used to create or
1436   * handle a transaction. binder_get_txn_from() is used to safely
1437   * extract t->from from a binder_transaction and keep the thread
1438   * indicated by t->from from being freed. When done with that
1439   * binder_thread, this function is called to decrement the
1440   * tmp_ref and free if appropriate (thread has been released
1441   * and no transaction being processed by the driver)
1442   */
binder_thread_dec_tmpref(struct binder_thread * thread)1443  static void binder_thread_dec_tmpref(struct binder_thread *thread)
1444  {
1445  	/*
1446  	 * atomic is used to protect the counter value while
1447  	 * it cannot reach zero or thread->is_dead is false
1448  	 */
1449  	binder_inner_proc_lock(thread->proc);
1450  	atomic_dec(&thread->tmp_ref);
1451  	if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1452  		binder_inner_proc_unlock(thread->proc);
1453  		binder_free_thread(thread);
1454  		return;
1455  	}
1456  	binder_inner_proc_unlock(thread->proc);
1457  }
1458  
1459  /**
1460   * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1461   * @proc:	proc to decrement
1462   *
1463   * A binder_proc needs to be kept alive while being used to create or
1464   * handle a transaction. proc->tmp_ref is incremented when
1465   * creating a new transaction or the binder_proc is currently in-use
1466   * by threads that are being released. When done with the binder_proc,
1467   * this function is called to decrement the counter and free the
1468   * proc if appropriate (proc has been released, all threads have
1469   * been released and not currenly in-use to process a transaction).
1470   */
binder_proc_dec_tmpref(struct binder_proc * proc)1471  static void binder_proc_dec_tmpref(struct binder_proc *proc)
1472  {
1473  	binder_inner_proc_lock(proc);
1474  	proc->tmp_ref--;
1475  	if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1476  			!proc->tmp_ref) {
1477  		binder_inner_proc_unlock(proc);
1478  		binder_free_proc(proc);
1479  		return;
1480  	}
1481  	binder_inner_proc_unlock(proc);
1482  }
1483  
1484  /**
1485   * binder_get_txn_from() - safely extract the "from" thread in transaction
1486   * @t:	binder transaction for t->from
1487   *
1488   * Atomically return the "from" thread and increment the tmp_ref
1489   * count for the thread to ensure it stays alive until
1490   * binder_thread_dec_tmpref() is called.
1491   *
1492   * Return: the value of t->from
1493   */
binder_get_txn_from(struct binder_transaction * t)1494  static struct binder_thread *binder_get_txn_from(
1495  		struct binder_transaction *t)
1496  {
1497  	struct binder_thread *from;
1498  
1499  	spin_lock(&t->lock);
1500  	from = t->from;
1501  	if (from)
1502  		atomic_inc(&from->tmp_ref);
1503  	spin_unlock(&t->lock);
1504  	return from;
1505  }
1506  
1507  /**
1508   * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1509   * @t:	binder transaction for t->from
1510   *
1511   * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1512   * to guarantee that the thread cannot be released while operating on it.
1513   * The caller must call binder_inner_proc_unlock() to release the inner lock
1514   * as well as call binder_dec_thread_txn() to release the reference.
1515   *
1516   * Return: the value of t->from
1517   */
binder_get_txn_from_and_acq_inner(struct binder_transaction * t)1518  static struct binder_thread *binder_get_txn_from_and_acq_inner(
1519  		struct binder_transaction *t)
1520  	__acquires(&t->from->proc->inner_lock)
1521  {
1522  	struct binder_thread *from;
1523  
1524  	from = binder_get_txn_from(t);
1525  	if (!from) {
1526  		__acquire(&from->proc->inner_lock);
1527  		return NULL;
1528  	}
1529  	binder_inner_proc_lock(from->proc);
1530  	if (t->from) {
1531  		BUG_ON(from != t->from);
1532  		return from;
1533  	}
1534  	binder_inner_proc_unlock(from->proc);
1535  	__acquire(&from->proc->inner_lock);
1536  	binder_thread_dec_tmpref(from);
1537  	return NULL;
1538  }
1539  
1540  /**
1541   * binder_free_txn_fixups() - free unprocessed fd fixups
1542   * @t:	binder transaction for t->from
1543   *
1544   * If the transaction is being torn down prior to being
1545   * processed by the target process, free all of the
1546   * fd fixups and fput the file structs. It is safe to
1547   * call this function after the fixups have been
1548   * processed -- in that case, the list will be empty.
1549   */
binder_free_txn_fixups(struct binder_transaction * t)1550  static void binder_free_txn_fixups(struct binder_transaction *t)
1551  {
1552  	struct binder_txn_fd_fixup *fixup, *tmp;
1553  
1554  	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1555  		fput(fixup->file);
1556  		if (fixup->target_fd >= 0)
1557  			put_unused_fd(fixup->target_fd);
1558  		list_del(&fixup->fixup_entry);
1559  		kfree(fixup);
1560  	}
1561  }
1562  
binder_txn_latency_free(struct binder_transaction * t)1563  static void binder_txn_latency_free(struct binder_transaction *t)
1564  {
1565  	int from_proc, from_thread, to_proc, to_thread;
1566  
1567  	spin_lock(&t->lock);
1568  	from_proc = t->from ? t->from->proc->pid : 0;
1569  	from_thread = t->from ? t->from->pid : 0;
1570  	to_proc = t->to_proc ? t->to_proc->pid : 0;
1571  	to_thread = t->to_thread ? t->to_thread->pid : 0;
1572  	spin_unlock(&t->lock);
1573  
1574  	trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
1575  }
1576  
binder_free_transaction(struct binder_transaction * t)1577  static void binder_free_transaction(struct binder_transaction *t)
1578  {
1579  	struct binder_proc *target_proc = t->to_proc;
1580  
1581  	if (target_proc) {
1582  		binder_inner_proc_lock(target_proc);
1583  		target_proc->outstanding_txns--;
1584  		if (target_proc->outstanding_txns < 0)
1585  			pr_warn("%s: Unexpected outstanding_txns %d\n",
1586  				__func__, target_proc->outstanding_txns);
1587  		if (!target_proc->outstanding_txns && target_proc->is_frozen)
1588  			wake_up_interruptible_all(&target_proc->freeze_wait);
1589  		if (t->buffer)
1590  			t->buffer->transaction = NULL;
1591  		binder_inner_proc_unlock(target_proc);
1592  	}
1593  	if (trace_binder_txn_latency_free_enabled())
1594  		binder_txn_latency_free(t);
1595  	/*
1596  	 * If the transaction has no target_proc, then
1597  	 * t->buffer->transaction has already been cleared.
1598  	 */
1599  	binder_free_txn_fixups(t);
1600  	kfree(t);
1601  	binder_stats_deleted(BINDER_STAT_TRANSACTION);
1602  }
1603  
binder_send_failed_reply(struct binder_transaction * t,uint32_t error_code)1604  static void binder_send_failed_reply(struct binder_transaction *t,
1605  				     uint32_t error_code)
1606  {
1607  	struct binder_thread *target_thread;
1608  	struct binder_transaction *next;
1609  
1610  	BUG_ON(t->flags & TF_ONE_WAY);
1611  	while (1) {
1612  		target_thread = binder_get_txn_from_and_acq_inner(t);
1613  		if (target_thread) {
1614  			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1615  				     "send failed reply for transaction %d to %d:%d\n",
1616  				      t->debug_id,
1617  				      target_thread->proc->pid,
1618  				      target_thread->pid);
1619  
1620  			binder_pop_transaction_ilocked(target_thread, t);
1621  			if (target_thread->reply_error.cmd == BR_OK) {
1622  				target_thread->reply_error.cmd = error_code;
1623  				binder_enqueue_thread_work_ilocked(
1624  					target_thread,
1625  					&target_thread->reply_error.work);
1626  				wake_up_interruptible(&target_thread->wait);
1627  			} else {
1628  				/*
1629  				 * Cannot get here for normal operation, but
1630  				 * we can if multiple synchronous transactions
1631  				 * are sent without blocking for responses.
1632  				 * Just ignore the 2nd error in this case.
1633  				 */
1634  				pr_warn("Unexpected reply error: %u\n",
1635  					target_thread->reply_error.cmd);
1636  			}
1637  			binder_inner_proc_unlock(target_thread->proc);
1638  			binder_thread_dec_tmpref(target_thread);
1639  			binder_free_transaction(t);
1640  			return;
1641  		}
1642  		__release(&target_thread->proc->inner_lock);
1643  		next = t->from_parent;
1644  
1645  		binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1646  			     "send failed reply for transaction %d, target dead\n",
1647  			     t->debug_id);
1648  
1649  		binder_free_transaction(t);
1650  		if (next == NULL) {
1651  			binder_debug(BINDER_DEBUG_DEAD_BINDER,
1652  				     "reply failed, no target thread at root\n");
1653  			return;
1654  		}
1655  		t = next;
1656  		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1657  			     "reply failed, no target thread -- retry %d\n",
1658  			      t->debug_id);
1659  	}
1660  }
1661  
1662  /**
1663   * binder_cleanup_transaction() - cleans up undelivered transaction
1664   * @t:		transaction that needs to be cleaned up
1665   * @reason:	reason the transaction wasn't delivered
1666   * @error_code:	error to return to caller (if synchronous call)
1667   */
binder_cleanup_transaction(struct binder_transaction * t,const char * reason,uint32_t error_code)1668  static void binder_cleanup_transaction(struct binder_transaction *t,
1669  				       const char *reason,
1670  				       uint32_t error_code)
1671  {
1672  	if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1673  		binder_send_failed_reply(t, error_code);
1674  	} else {
1675  		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1676  			"undelivered transaction %d, %s\n",
1677  			t->debug_id, reason);
1678  		binder_free_transaction(t);
1679  	}
1680  }
1681  
1682  /**
1683   * binder_get_object() - gets object and checks for valid metadata
1684   * @proc:	binder_proc owning the buffer
1685   * @u:		sender's user pointer to base of buffer
1686   * @buffer:	binder_buffer that we're parsing.
1687   * @offset:	offset in the @buffer at which to validate an object.
1688   * @object:	struct binder_object to read into
1689   *
1690   * Copy the binder object at the given offset into @object. If @u is
1691   * provided then the copy is from the sender's buffer. If not, then
1692   * it is copied from the target's @buffer.
1693   *
1694   * Return:	If there's a valid metadata object at @offset, the
1695   *		size of that object. Otherwise, it returns zero. The object
1696   *		is read into the struct binder_object pointed to by @object.
1697   */
binder_get_object(struct binder_proc * proc,const void __user * u,struct binder_buffer * buffer,unsigned long offset,struct binder_object * object)1698  static size_t binder_get_object(struct binder_proc *proc,
1699  				const void __user *u,
1700  				struct binder_buffer *buffer,
1701  				unsigned long offset,
1702  				struct binder_object *object)
1703  {
1704  	size_t read_size;
1705  	struct binder_object_header *hdr;
1706  	size_t object_size = 0;
1707  
1708  	read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
1709  	if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
1710  	    !IS_ALIGNED(offset, sizeof(u32)))
1711  		return 0;
1712  
1713  	if (u) {
1714  		if (copy_from_user(object, u + offset, read_size))
1715  			return 0;
1716  	} else {
1717  		if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1718  						  offset, read_size))
1719  			return 0;
1720  	}
1721  
1722  	/* Ok, now see if we read a complete object. */
1723  	hdr = &object->hdr;
1724  	switch (hdr->type) {
1725  	case BINDER_TYPE_BINDER:
1726  	case BINDER_TYPE_WEAK_BINDER:
1727  	case BINDER_TYPE_HANDLE:
1728  	case BINDER_TYPE_WEAK_HANDLE:
1729  		object_size = sizeof(struct flat_binder_object);
1730  		break;
1731  	case BINDER_TYPE_FD:
1732  		object_size = sizeof(struct binder_fd_object);
1733  		break;
1734  	case BINDER_TYPE_PTR:
1735  		object_size = sizeof(struct binder_buffer_object);
1736  		break;
1737  	case BINDER_TYPE_FDA:
1738  		object_size = sizeof(struct binder_fd_array_object);
1739  		break;
1740  	default:
1741  		return 0;
1742  	}
1743  	if (offset <= buffer->data_size - object_size &&
1744  	    buffer->data_size >= object_size)
1745  		return object_size;
1746  	else
1747  		return 0;
1748  }
1749  
1750  /**
1751   * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1752   * @proc:	binder_proc owning the buffer
1753   * @b:		binder_buffer containing the object
1754   * @object:	struct binder_object to read into
1755   * @index:	index in offset array at which the binder_buffer_object is
1756   *		located
1757   * @start_offset: points to the start of the offset array
1758   * @object_offsetp: offset of @object read from @b
1759   * @num_valid:	the number of valid offsets in the offset array
1760   *
1761   * Return:	If @index is within the valid range of the offset array
1762   *		described by @start and @num_valid, and if there's a valid
1763   *		binder_buffer_object at the offset found in index @index
1764   *		of the offset array, that object is returned. Otherwise,
1765   *		%NULL is returned.
1766   *		Note that the offset found in index @index itself is not
1767   *		verified; this function assumes that @num_valid elements
1768   *		from @start were previously verified to have valid offsets.
1769   *		If @object_offsetp is non-NULL, then the offset within
1770   *		@b is written to it.
1771   */
binder_validate_ptr(struct binder_proc * proc,struct binder_buffer * b,struct binder_object * object,binder_size_t index,binder_size_t start_offset,binder_size_t * object_offsetp,binder_size_t num_valid)1772  static struct binder_buffer_object *binder_validate_ptr(
1773  						struct binder_proc *proc,
1774  						struct binder_buffer *b,
1775  						struct binder_object *object,
1776  						binder_size_t index,
1777  						binder_size_t start_offset,
1778  						binder_size_t *object_offsetp,
1779  						binder_size_t num_valid)
1780  {
1781  	size_t object_size;
1782  	binder_size_t object_offset;
1783  	unsigned long buffer_offset;
1784  
1785  	if (index >= num_valid)
1786  		return NULL;
1787  
1788  	buffer_offset = start_offset + sizeof(binder_size_t) * index;
1789  	if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1790  					  b, buffer_offset,
1791  					  sizeof(object_offset)))
1792  		return NULL;
1793  	object_size = binder_get_object(proc, NULL, b, object_offset, object);
1794  	if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
1795  		return NULL;
1796  	if (object_offsetp)
1797  		*object_offsetp = object_offset;
1798  
1799  	return &object->bbo;
1800  }
1801  
1802  /**
1803   * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1804   * @proc:		binder_proc owning the buffer
1805   * @b:			transaction buffer
1806   * @objects_start_offset: offset to start of objects buffer
1807   * @buffer_obj_offset:	offset to binder_buffer_object in which to fix up
1808   * @fixup_offset:	start offset in @buffer to fix up
1809   * @last_obj_offset:	offset to last binder_buffer_object that we fixed
1810   * @last_min_offset:	minimum fixup offset in object at @last_obj_offset
1811   *
1812   * Return:		%true if a fixup in buffer @buffer at offset @offset is
1813   *			allowed.
1814   *
1815   * For safety reasons, we only allow fixups inside a buffer to happen
1816   * at increasing offsets; additionally, we only allow fixup on the last
1817   * buffer object that was verified, or one of its parents.
1818   *
1819   * Example of what is allowed:
1820   *
1821   * A
1822   *   B (parent = A, offset = 0)
1823   *   C (parent = A, offset = 16)
1824   *     D (parent = C, offset = 0)
1825   *   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1826   *
1827   * Examples of what is not allowed:
1828   *
1829   * Decreasing offsets within the same parent:
1830   * A
1831   *   C (parent = A, offset = 16)
1832   *   B (parent = A, offset = 0) // decreasing offset within A
1833   *
1834   * Referring to a parent that wasn't the last object or any of its parents:
1835   * A
1836   *   B (parent = A, offset = 0)
1837   *   C (parent = A, offset = 0)
1838   *   C (parent = A, offset = 16)
1839   *     D (parent = B, offset = 0) // B is not A or any of A's parents
1840   */
binder_validate_fixup(struct binder_proc * proc,struct binder_buffer * b,binder_size_t objects_start_offset,binder_size_t buffer_obj_offset,binder_size_t fixup_offset,binder_size_t last_obj_offset,binder_size_t last_min_offset)1841  static bool binder_validate_fixup(struct binder_proc *proc,
1842  				  struct binder_buffer *b,
1843  				  binder_size_t objects_start_offset,
1844  				  binder_size_t buffer_obj_offset,
1845  				  binder_size_t fixup_offset,
1846  				  binder_size_t last_obj_offset,
1847  				  binder_size_t last_min_offset)
1848  {
1849  	if (!last_obj_offset) {
1850  		/* Nothing to fix up in */
1851  		return false;
1852  	}
1853  
1854  	while (last_obj_offset != buffer_obj_offset) {
1855  		unsigned long buffer_offset;
1856  		struct binder_object last_object;
1857  		struct binder_buffer_object *last_bbo;
1858  		size_t object_size = binder_get_object(proc, NULL, b,
1859  						       last_obj_offset,
1860  						       &last_object);
1861  		if (object_size != sizeof(*last_bbo))
1862  			return false;
1863  
1864  		last_bbo = &last_object.bbo;
1865  		/*
1866  		 * Safe to retrieve the parent of last_obj, since it
1867  		 * was already previously verified by the driver.
1868  		 */
1869  		if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1870  			return false;
1871  		last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
1872  		buffer_offset = objects_start_offset +
1873  			sizeof(binder_size_t) * last_bbo->parent;
1874  		if (binder_alloc_copy_from_buffer(&proc->alloc,
1875  						  &last_obj_offset,
1876  						  b, buffer_offset,
1877  						  sizeof(last_obj_offset)))
1878  			return false;
1879  	}
1880  	return (fixup_offset >= last_min_offset);
1881  }
1882  
1883  /**
1884   * struct binder_task_work_cb - for deferred close
1885   *
1886   * @twork:                callback_head for task work
1887   * @fd:                   fd to close
1888   *
1889   * Structure to pass task work to be handled after
1890   * returning from binder_ioctl() via task_work_add().
1891   */
1892  struct binder_task_work_cb {
1893  	struct callback_head twork;
1894  	struct file *file;
1895  };
1896  
1897  /**
1898   * binder_do_fd_close() - close list of file descriptors
1899   * @twork:	callback head for task work
1900   *
1901   * It is not safe to call ksys_close() during the binder_ioctl()
1902   * function if there is a chance that binder's own file descriptor
1903   * might be closed. This is to meet the requirements for using
1904   * fdget() (see comments for __fget_light()). Therefore use
1905   * task_work_add() to schedule the close operation once we have
1906   * returned from binder_ioctl(). This function is a callback
1907   * for that mechanism and does the actual ksys_close() on the
1908   * given file descriptor.
1909   */
binder_do_fd_close(struct callback_head * twork)1910  static void binder_do_fd_close(struct callback_head *twork)
1911  {
1912  	struct binder_task_work_cb *twcb = container_of(twork,
1913  			struct binder_task_work_cb, twork);
1914  
1915  	fput(twcb->file);
1916  	kfree(twcb);
1917  }
1918  
1919  /**
1920   * binder_deferred_fd_close() - schedule a close for the given file-descriptor
1921   * @fd:		file-descriptor to close
1922   *
1923   * See comments in binder_do_fd_close(). This function is used to schedule
1924   * a file-descriptor to be closed after returning from binder_ioctl().
1925   */
binder_deferred_fd_close(int fd)1926  static void binder_deferred_fd_close(int fd)
1927  {
1928  	struct binder_task_work_cb *twcb;
1929  
1930  	twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
1931  	if (!twcb)
1932  		return;
1933  	init_task_work(&twcb->twork, binder_do_fd_close);
1934  	twcb->file = close_fd_get_file(fd);
1935  	if (twcb->file) {
1936  		// pin it until binder_do_fd_close(); see comments there
1937  		get_file(twcb->file);
1938  		filp_close(twcb->file, current->files);
1939  		task_work_add(current, &twcb->twork, TWA_RESUME);
1940  	} else {
1941  		kfree(twcb);
1942  	}
1943  }
1944  
binder_transaction_buffer_release(struct binder_proc * proc,struct binder_thread * thread,struct binder_buffer * buffer,binder_size_t off_end_offset,bool is_failure)1945  static void binder_transaction_buffer_release(struct binder_proc *proc,
1946  					      struct binder_thread *thread,
1947  					      struct binder_buffer *buffer,
1948  					      binder_size_t off_end_offset,
1949  					      bool is_failure)
1950  {
1951  	int debug_id = buffer->debug_id;
1952  	binder_size_t off_start_offset, buffer_offset;
1953  
1954  	binder_debug(BINDER_DEBUG_TRANSACTION,
1955  		     "%d buffer release %d, size %zd-%zd, failed at %llx\n",
1956  		     proc->pid, buffer->debug_id,
1957  		     buffer->data_size, buffer->offsets_size,
1958  		     (unsigned long long)off_end_offset);
1959  
1960  	if (buffer->target_node)
1961  		binder_dec_node(buffer->target_node, 1, 0);
1962  
1963  	off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
1964  
1965  	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
1966  	     buffer_offset += sizeof(binder_size_t)) {
1967  		struct binder_object_header *hdr;
1968  		size_t object_size = 0;
1969  		struct binder_object object;
1970  		binder_size_t object_offset;
1971  
1972  		if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1973  						   buffer, buffer_offset,
1974  						   sizeof(object_offset)))
1975  			object_size = binder_get_object(proc, NULL, buffer,
1976  							object_offset, &object);
1977  		if (object_size == 0) {
1978  			pr_err("transaction release %d bad object at offset %lld, size %zd\n",
1979  			       debug_id, (u64)object_offset, buffer->data_size);
1980  			continue;
1981  		}
1982  		hdr = &object.hdr;
1983  		switch (hdr->type) {
1984  		case BINDER_TYPE_BINDER:
1985  		case BINDER_TYPE_WEAK_BINDER: {
1986  			struct flat_binder_object *fp;
1987  			struct binder_node *node;
1988  
1989  			fp = to_flat_binder_object(hdr);
1990  			node = binder_get_node(proc, fp->binder);
1991  			if (node == NULL) {
1992  				pr_err("transaction release %d bad node %016llx\n",
1993  				       debug_id, (u64)fp->binder);
1994  				break;
1995  			}
1996  			binder_debug(BINDER_DEBUG_TRANSACTION,
1997  				     "        node %d u%016llx\n",
1998  				     node->debug_id, (u64)node->ptr);
1999  			binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2000  					0);
2001  			binder_put_node(node);
2002  		} break;
2003  		case BINDER_TYPE_HANDLE:
2004  		case BINDER_TYPE_WEAK_HANDLE: {
2005  			struct flat_binder_object *fp;
2006  			struct binder_ref_data rdata;
2007  			int ret;
2008  
2009  			fp = to_flat_binder_object(hdr);
2010  			ret = binder_dec_ref_for_handle(proc, fp->handle,
2011  				hdr->type == BINDER_TYPE_HANDLE, &rdata);
2012  
2013  			if (ret) {
2014  				pr_err("transaction release %d bad handle %d, ret = %d\n",
2015  				 debug_id, fp->handle, ret);
2016  				break;
2017  			}
2018  			binder_debug(BINDER_DEBUG_TRANSACTION,
2019  				     "        ref %d desc %d\n",
2020  				     rdata.debug_id, rdata.desc);
2021  		} break;
2022  
2023  		case BINDER_TYPE_FD: {
2024  			/*
2025  			 * No need to close the file here since user-space
2026  			 * closes it for successfully delivered
2027  			 * transactions. For transactions that weren't
2028  			 * delivered, the new fd was never allocated so
2029  			 * there is no need to close and the fput on the
2030  			 * file is done when the transaction is torn
2031  			 * down.
2032  			 */
2033  		} break;
2034  		case BINDER_TYPE_PTR:
2035  			/*
2036  			 * Nothing to do here, this will get cleaned up when the
2037  			 * transaction buffer gets freed
2038  			 */
2039  			break;
2040  		case BINDER_TYPE_FDA: {
2041  			struct binder_fd_array_object *fda;
2042  			struct binder_buffer_object *parent;
2043  			struct binder_object ptr_object;
2044  			binder_size_t fda_offset;
2045  			size_t fd_index;
2046  			binder_size_t fd_buf_size;
2047  			binder_size_t num_valid;
2048  
2049  			if (is_failure) {
2050  				/*
2051  				 * The fd fixups have not been applied so no
2052  				 * fds need to be closed.
2053  				 */
2054  				continue;
2055  			}
2056  
2057  			num_valid = (buffer_offset - off_start_offset) /
2058  						sizeof(binder_size_t);
2059  			fda = to_binder_fd_array_object(hdr);
2060  			parent = binder_validate_ptr(proc, buffer, &ptr_object,
2061  						     fda->parent,
2062  						     off_start_offset,
2063  						     NULL,
2064  						     num_valid);
2065  			if (!parent) {
2066  				pr_err("transaction release %d bad parent offset\n",
2067  				       debug_id);
2068  				continue;
2069  			}
2070  			fd_buf_size = sizeof(u32) * fda->num_fds;
2071  			if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2072  				pr_err("transaction release %d invalid number of fds (%lld)\n",
2073  				       debug_id, (u64)fda->num_fds);
2074  				continue;
2075  			}
2076  			if (fd_buf_size > parent->length ||
2077  			    fda->parent_offset > parent->length - fd_buf_size) {
2078  				/* No space for all file descriptors here. */
2079  				pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2080  				       debug_id, (u64)fda->num_fds);
2081  				continue;
2082  			}
2083  			/*
2084  			 * the source data for binder_buffer_object is visible
2085  			 * to user-space and the @buffer element is the user
2086  			 * pointer to the buffer_object containing the fd_array.
2087  			 * Convert the address to an offset relative to
2088  			 * the base of the transaction buffer.
2089  			 */
2090  			fda_offset =
2091  			    (parent->buffer - (uintptr_t)buffer->user_data) +
2092  			    fda->parent_offset;
2093  			for (fd_index = 0; fd_index < fda->num_fds;
2094  			     fd_index++) {
2095  				u32 fd;
2096  				int err;
2097  				binder_size_t offset = fda_offset +
2098  					fd_index * sizeof(fd);
2099  
2100  				err = binder_alloc_copy_from_buffer(
2101  						&proc->alloc, &fd, buffer,
2102  						offset, sizeof(fd));
2103  				WARN_ON(err);
2104  				if (!err) {
2105  					binder_deferred_fd_close(fd);
2106  					/*
2107  					 * Need to make sure the thread goes
2108  					 * back to userspace to complete the
2109  					 * deferred close
2110  					 */
2111  					if (thread)
2112  						thread->looper_need_return = true;
2113  				}
2114  			}
2115  		} break;
2116  		default:
2117  			pr_err("transaction release %d bad object type %x\n",
2118  				debug_id, hdr->type);
2119  			break;
2120  		}
2121  	}
2122  }
2123  
2124  /* Clean up all the objects in the buffer */
binder_release_entire_buffer(struct binder_proc * proc,struct binder_thread * thread,struct binder_buffer * buffer,bool is_failure)2125  static inline void binder_release_entire_buffer(struct binder_proc *proc,
2126  						struct binder_thread *thread,
2127  						struct binder_buffer *buffer,
2128  						bool is_failure)
2129  {
2130  	binder_size_t off_end_offset;
2131  
2132  	off_end_offset = ALIGN(buffer->data_size, sizeof(void *));
2133  	off_end_offset += buffer->offsets_size;
2134  
2135  	binder_transaction_buffer_release(proc, thread, buffer,
2136  					  off_end_offset, is_failure);
2137  }
2138  
binder_translate_binder(struct flat_binder_object * fp,struct binder_transaction * t,struct binder_thread * thread)2139  static int binder_translate_binder(struct flat_binder_object *fp,
2140  				   struct binder_transaction *t,
2141  				   struct binder_thread *thread)
2142  {
2143  	struct binder_node *node;
2144  	struct binder_proc *proc = thread->proc;
2145  	struct binder_proc *target_proc = t->to_proc;
2146  	struct binder_ref_data rdata;
2147  	int ret = 0;
2148  
2149  	node = binder_get_node(proc, fp->binder);
2150  	if (!node) {
2151  		node = binder_new_node(proc, fp);
2152  		if (!node)
2153  			return -ENOMEM;
2154  	}
2155  	if (fp->cookie != node->cookie) {
2156  		binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2157  				  proc->pid, thread->pid, (u64)fp->binder,
2158  				  node->debug_id, (u64)fp->cookie,
2159  				  (u64)node->cookie);
2160  		ret = -EINVAL;
2161  		goto done;
2162  	}
2163  	if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2164  		ret = -EPERM;
2165  		goto done;
2166  	}
2167  
2168  	ret = binder_inc_ref_for_node(target_proc, node,
2169  			fp->hdr.type == BINDER_TYPE_BINDER,
2170  			&thread->todo, &rdata);
2171  	if (ret)
2172  		goto done;
2173  
2174  	if (fp->hdr.type == BINDER_TYPE_BINDER)
2175  		fp->hdr.type = BINDER_TYPE_HANDLE;
2176  	else
2177  		fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2178  	fp->binder = 0;
2179  	fp->handle = rdata.desc;
2180  	fp->cookie = 0;
2181  
2182  	trace_binder_transaction_node_to_ref(t, node, &rdata);
2183  	binder_debug(BINDER_DEBUG_TRANSACTION,
2184  		     "        node %d u%016llx -> ref %d desc %d\n",
2185  		     node->debug_id, (u64)node->ptr,
2186  		     rdata.debug_id, rdata.desc);
2187  done:
2188  	binder_put_node(node);
2189  	return ret;
2190  }
2191  
binder_translate_handle(struct flat_binder_object * fp,struct binder_transaction * t,struct binder_thread * thread)2192  static int binder_translate_handle(struct flat_binder_object *fp,
2193  				   struct binder_transaction *t,
2194  				   struct binder_thread *thread)
2195  {
2196  	struct binder_proc *proc = thread->proc;
2197  	struct binder_proc *target_proc = t->to_proc;
2198  	struct binder_node *node;
2199  	struct binder_ref_data src_rdata;
2200  	int ret = 0;
2201  
2202  	node = binder_get_node_from_ref(proc, fp->handle,
2203  			fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2204  	if (!node) {
2205  		binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2206  				  proc->pid, thread->pid, fp->handle);
2207  		return -EINVAL;
2208  	}
2209  	if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2210  		ret = -EPERM;
2211  		goto done;
2212  	}
2213  
2214  	binder_node_lock(node);
2215  	if (node->proc == target_proc) {
2216  		if (fp->hdr.type == BINDER_TYPE_HANDLE)
2217  			fp->hdr.type = BINDER_TYPE_BINDER;
2218  		else
2219  			fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2220  		fp->binder = node->ptr;
2221  		fp->cookie = node->cookie;
2222  		if (node->proc)
2223  			binder_inner_proc_lock(node->proc);
2224  		else
2225  			__acquire(&node->proc->inner_lock);
2226  		binder_inc_node_nilocked(node,
2227  					 fp->hdr.type == BINDER_TYPE_BINDER,
2228  					 0, NULL);
2229  		if (node->proc)
2230  			binder_inner_proc_unlock(node->proc);
2231  		else
2232  			__release(&node->proc->inner_lock);
2233  		trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2234  		binder_debug(BINDER_DEBUG_TRANSACTION,
2235  			     "        ref %d desc %d -> node %d u%016llx\n",
2236  			     src_rdata.debug_id, src_rdata.desc, node->debug_id,
2237  			     (u64)node->ptr);
2238  		binder_node_unlock(node);
2239  	} else {
2240  		struct binder_ref_data dest_rdata;
2241  
2242  		binder_node_unlock(node);
2243  		ret = binder_inc_ref_for_node(target_proc, node,
2244  				fp->hdr.type == BINDER_TYPE_HANDLE,
2245  				NULL, &dest_rdata);
2246  		if (ret)
2247  			goto done;
2248  
2249  		fp->binder = 0;
2250  		fp->handle = dest_rdata.desc;
2251  		fp->cookie = 0;
2252  		trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2253  						    &dest_rdata);
2254  		binder_debug(BINDER_DEBUG_TRANSACTION,
2255  			     "        ref %d desc %d -> ref %d desc %d (node %d)\n",
2256  			     src_rdata.debug_id, src_rdata.desc,
2257  			     dest_rdata.debug_id, dest_rdata.desc,
2258  			     node->debug_id);
2259  	}
2260  done:
2261  	binder_put_node(node);
2262  	return ret;
2263  }
2264  
binder_translate_fd(u32 fd,binder_size_t fd_offset,struct binder_transaction * t,struct binder_thread * thread,struct binder_transaction * in_reply_to)2265  static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2266  			       struct binder_transaction *t,
2267  			       struct binder_thread *thread,
2268  			       struct binder_transaction *in_reply_to)
2269  {
2270  	struct binder_proc *proc = thread->proc;
2271  	struct binder_proc *target_proc = t->to_proc;
2272  	struct binder_txn_fd_fixup *fixup;
2273  	struct file *file;
2274  	int ret = 0;
2275  	bool target_allows_fd;
2276  
2277  	if (in_reply_to)
2278  		target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2279  	else
2280  		target_allows_fd = t->buffer->target_node->accept_fds;
2281  	if (!target_allows_fd) {
2282  		binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2283  				  proc->pid, thread->pid,
2284  				  in_reply_to ? "reply" : "transaction",
2285  				  fd);
2286  		ret = -EPERM;
2287  		goto err_fd_not_accepted;
2288  	}
2289  
2290  	file = fget(fd);
2291  	if (!file) {
2292  		binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2293  				  proc->pid, thread->pid, fd);
2294  		ret = -EBADF;
2295  		goto err_fget;
2296  	}
2297  	ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2298  	if (ret < 0) {
2299  		ret = -EPERM;
2300  		goto err_security;
2301  	}
2302  
2303  	/*
2304  	 * Add fixup record for this transaction. The allocation
2305  	 * of the fd in the target needs to be done from a
2306  	 * target thread.
2307  	 */
2308  	fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2309  	if (!fixup) {
2310  		ret = -ENOMEM;
2311  		goto err_alloc;
2312  	}
2313  	fixup->file = file;
2314  	fixup->offset = fd_offset;
2315  	fixup->target_fd = -1;
2316  	trace_binder_transaction_fd_send(t, fd, fixup->offset);
2317  	list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2318  
2319  	return ret;
2320  
2321  err_alloc:
2322  err_security:
2323  	fput(file);
2324  err_fget:
2325  err_fd_not_accepted:
2326  	return ret;
2327  }
2328  
2329  /**
2330   * struct binder_ptr_fixup - data to be fixed-up in target buffer
2331   * @offset	offset in target buffer to fixup
2332   * @skip_size	bytes to skip in copy (fixup will be written later)
2333   * @fixup_data	data to write at fixup offset
2334   * @node	list node
2335   *
2336   * This is used for the pointer fixup list (pf) which is created and consumed
2337   * during binder_transaction() and is only accessed locally. No
2338   * locking is necessary.
2339   *
2340   * The list is ordered by @offset.
2341   */
2342  struct binder_ptr_fixup {
2343  	binder_size_t offset;
2344  	size_t skip_size;
2345  	binder_uintptr_t fixup_data;
2346  	struct list_head node;
2347  };
2348  
2349  /**
2350   * struct binder_sg_copy - scatter-gather data to be copied
2351   * @offset		offset in target buffer
2352   * @sender_uaddr	user address in source buffer
2353   * @length		bytes to copy
2354   * @node		list node
2355   *
2356   * This is used for the sg copy list (sgc) which is created and consumed
2357   * during binder_transaction() and is only accessed locally. No
2358   * locking is necessary.
2359   *
2360   * The list is ordered by @offset.
2361   */
2362  struct binder_sg_copy {
2363  	binder_size_t offset;
2364  	const void __user *sender_uaddr;
2365  	size_t length;
2366  	struct list_head node;
2367  };
2368  
2369  /**
2370   * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
2371   * @alloc:	binder_alloc associated with @buffer
2372   * @buffer:	binder buffer in target process
2373   * @sgc_head:	list_head of scatter-gather copy list
2374   * @pf_head:	list_head of pointer fixup list
2375   *
2376   * Processes all elements of @sgc_head, applying fixups from @pf_head
2377   * and copying the scatter-gather data from the source process' user
2378   * buffer to the target's buffer. It is expected that the list creation
2379   * and processing all occurs during binder_transaction() so these lists
2380   * are only accessed in local context.
2381   *
2382   * Return: 0=success, else -errno
2383   */
binder_do_deferred_txn_copies(struct binder_alloc * alloc,struct binder_buffer * buffer,struct list_head * sgc_head,struct list_head * pf_head)2384  static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
2385  					 struct binder_buffer *buffer,
2386  					 struct list_head *sgc_head,
2387  					 struct list_head *pf_head)
2388  {
2389  	int ret = 0;
2390  	struct binder_sg_copy *sgc, *tmpsgc;
2391  	struct binder_ptr_fixup *tmppf;
2392  	struct binder_ptr_fixup *pf =
2393  		list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
2394  					 node);
2395  
2396  	list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2397  		size_t bytes_copied = 0;
2398  
2399  		while (bytes_copied < sgc->length) {
2400  			size_t copy_size;
2401  			size_t bytes_left = sgc->length - bytes_copied;
2402  			size_t offset = sgc->offset + bytes_copied;
2403  
2404  			/*
2405  			 * We copy up to the fixup (pointed to by pf)
2406  			 */
2407  			copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
2408  				       : bytes_left;
2409  			if (!ret && copy_size)
2410  				ret = binder_alloc_copy_user_to_buffer(
2411  						alloc, buffer,
2412  						offset,
2413  						sgc->sender_uaddr + bytes_copied,
2414  						copy_size);
2415  			bytes_copied += copy_size;
2416  			if (copy_size != bytes_left) {
2417  				BUG_ON(!pf);
2418  				/* we stopped at a fixup offset */
2419  				if (pf->skip_size) {
2420  					/*
2421  					 * we are just skipping. This is for
2422  					 * BINDER_TYPE_FDA where the translated
2423  					 * fds will be fixed up when we get
2424  					 * to target context.
2425  					 */
2426  					bytes_copied += pf->skip_size;
2427  				} else {
2428  					/* apply the fixup indicated by pf */
2429  					if (!ret)
2430  						ret = binder_alloc_copy_to_buffer(
2431  							alloc, buffer,
2432  							pf->offset,
2433  							&pf->fixup_data,
2434  							sizeof(pf->fixup_data));
2435  					bytes_copied += sizeof(pf->fixup_data);
2436  				}
2437  				list_del(&pf->node);
2438  				kfree(pf);
2439  				pf = list_first_entry_or_null(pf_head,
2440  						struct binder_ptr_fixup, node);
2441  			}
2442  		}
2443  		list_del(&sgc->node);
2444  		kfree(sgc);
2445  	}
2446  	list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2447  		BUG_ON(pf->skip_size == 0);
2448  		list_del(&pf->node);
2449  		kfree(pf);
2450  	}
2451  	BUG_ON(!list_empty(sgc_head));
2452  
2453  	return ret > 0 ? -EINVAL : ret;
2454  }
2455  
2456  /**
2457   * binder_cleanup_deferred_txn_lists() - free specified lists
2458   * @sgc_head:	list_head of scatter-gather copy list
2459   * @pf_head:	list_head of pointer fixup list
2460   *
2461   * Called to clean up @sgc_head and @pf_head if there is an
2462   * error.
2463   */
binder_cleanup_deferred_txn_lists(struct list_head * sgc_head,struct list_head * pf_head)2464  static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
2465  					      struct list_head *pf_head)
2466  {
2467  	struct binder_sg_copy *sgc, *tmpsgc;
2468  	struct binder_ptr_fixup *pf, *tmppf;
2469  
2470  	list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2471  		list_del(&sgc->node);
2472  		kfree(sgc);
2473  	}
2474  	list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2475  		list_del(&pf->node);
2476  		kfree(pf);
2477  	}
2478  }
2479  
2480  /**
2481   * binder_defer_copy() - queue a scatter-gather buffer for copy
2482   * @sgc_head:		list_head of scatter-gather copy list
2483   * @offset:		binder buffer offset in target process
2484   * @sender_uaddr:	user address in source process
2485   * @length:		bytes to copy
2486   *
2487   * Specify a scatter-gather block to be copied. The actual copy must
2488   * be deferred until all the needed fixups are identified and queued.
2489   * Then the copy and fixups are done together so un-translated values
2490   * from the source are never visible in the target buffer.
2491   *
2492   * We are guaranteed that repeated calls to this function will have
2493   * monotonically increasing @offset values so the list will naturally
2494   * be ordered.
2495   *
2496   * Return: 0=success, else -errno
2497   */
binder_defer_copy(struct list_head * sgc_head,binder_size_t offset,const void __user * sender_uaddr,size_t length)2498  static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
2499  			     const void __user *sender_uaddr, size_t length)
2500  {
2501  	struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
2502  
2503  	if (!bc)
2504  		return -ENOMEM;
2505  
2506  	bc->offset = offset;
2507  	bc->sender_uaddr = sender_uaddr;
2508  	bc->length = length;
2509  	INIT_LIST_HEAD(&bc->node);
2510  
2511  	/*
2512  	 * We are guaranteed that the deferred copies are in-order
2513  	 * so just add to the tail.
2514  	 */
2515  	list_add_tail(&bc->node, sgc_head);
2516  
2517  	return 0;
2518  }
2519  
2520  /**
2521   * binder_add_fixup() - queue a fixup to be applied to sg copy
2522   * @pf_head:	list_head of binder ptr fixup list
2523   * @offset:	binder buffer offset in target process
2524   * @fixup:	bytes to be copied for fixup
2525   * @skip_size:	bytes to skip when copying (fixup will be applied later)
2526   *
2527   * Add the specified fixup to a list ordered by @offset. When copying
2528   * the scatter-gather buffers, the fixup will be copied instead of
2529   * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
2530   * will be applied later (in target process context), so we just skip
2531   * the bytes specified by @skip_size. If @skip_size is 0, we copy the
2532   * value in @fixup.
2533   *
2534   * This function is called *mostly* in @offset order, but there are
2535   * exceptions. Since out-of-order inserts are relatively uncommon,
2536   * we insert the new element by searching backward from the tail of
2537   * the list.
2538   *
2539   * Return: 0=success, else -errno
2540   */
binder_add_fixup(struct list_head * pf_head,binder_size_t offset,binder_uintptr_t fixup,size_t skip_size)2541  static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
2542  			    binder_uintptr_t fixup, size_t skip_size)
2543  {
2544  	struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
2545  	struct binder_ptr_fixup *tmppf;
2546  
2547  	if (!pf)
2548  		return -ENOMEM;
2549  
2550  	pf->offset = offset;
2551  	pf->fixup_data = fixup;
2552  	pf->skip_size = skip_size;
2553  	INIT_LIST_HEAD(&pf->node);
2554  
2555  	/* Fixups are *mostly* added in-order, but there are some
2556  	 * exceptions. Look backwards through list for insertion point.
2557  	 */
2558  	list_for_each_entry_reverse(tmppf, pf_head, node) {
2559  		if (tmppf->offset < pf->offset) {
2560  			list_add(&pf->node, &tmppf->node);
2561  			return 0;
2562  		}
2563  	}
2564  	/*
2565  	 * if we get here, then the new offset is the lowest so
2566  	 * insert at the head
2567  	 */
2568  	list_add(&pf->node, pf_head);
2569  	return 0;
2570  }
2571  
binder_translate_fd_array(struct list_head * pf_head,struct binder_fd_array_object * fda,const void __user * sender_ubuffer,struct binder_buffer_object * parent,struct binder_buffer_object * sender_uparent,struct binder_transaction * t,struct binder_thread * thread,struct binder_transaction * in_reply_to)2572  static int binder_translate_fd_array(struct list_head *pf_head,
2573  				     struct binder_fd_array_object *fda,
2574  				     const void __user *sender_ubuffer,
2575  				     struct binder_buffer_object *parent,
2576  				     struct binder_buffer_object *sender_uparent,
2577  				     struct binder_transaction *t,
2578  				     struct binder_thread *thread,
2579  				     struct binder_transaction *in_reply_to)
2580  {
2581  	binder_size_t fdi, fd_buf_size;
2582  	binder_size_t fda_offset;
2583  	const void __user *sender_ufda_base;
2584  	struct binder_proc *proc = thread->proc;
2585  	int ret;
2586  
2587  	if (fda->num_fds == 0)
2588  		return 0;
2589  
2590  	fd_buf_size = sizeof(u32) * fda->num_fds;
2591  	if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2592  		binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2593  				  proc->pid, thread->pid, (u64)fda->num_fds);
2594  		return -EINVAL;
2595  	}
2596  	if (fd_buf_size > parent->length ||
2597  	    fda->parent_offset > parent->length - fd_buf_size) {
2598  		/* No space for all file descriptors here. */
2599  		binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2600  				  proc->pid, thread->pid, (u64)fda->num_fds);
2601  		return -EINVAL;
2602  	}
2603  	/*
2604  	 * the source data for binder_buffer_object is visible
2605  	 * to user-space and the @buffer element is the user
2606  	 * pointer to the buffer_object containing the fd_array.
2607  	 * Convert the address to an offset relative to
2608  	 * the base of the transaction buffer.
2609  	 */
2610  	fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2611  		fda->parent_offset;
2612  	sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
2613  				fda->parent_offset;
2614  
2615  	if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
2616  	    !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
2617  		binder_user_error("%d:%d parent offset not aligned correctly.\n",
2618  				  proc->pid, thread->pid);
2619  		return -EINVAL;
2620  	}
2621  	ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
2622  	if (ret)
2623  		return ret;
2624  
2625  	for (fdi = 0; fdi < fda->num_fds; fdi++) {
2626  		u32 fd;
2627  		binder_size_t offset = fda_offset + fdi * sizeof(fd);
2628  		binder_size_t sender_uoffset = fdi * sizeof(fd);
2629  
2630  		ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
2631  		if (!ret)
2632  			ret = binder_translate_fd(fd, offset, t, thread,
2633  						  in_reply_to);
2634  		if (ret)
2635  			return ret > 0 ? -EINVAL : ret;
2636  	}
2637  	return 0;
2638  }
2639  
binder_fixup_parent(struct list_head * pf_head,struct binder_transaction * t,struct binder_thread * thread,struct binder_buffer_object * bp,binder_size_t off_start_offset,binder_size_t num_valid,binder_size_t last_fixup_obj_off,binder_size_t last_fixup_min_off)2640  static int binder_fixup_parent(struct list_head *pf_head,
2641  			       struct binder_transaction *t,
2642  			       struct binder_thread *thread,
2643  			       struct binder_buffer_object *bp,
2644  			       binder_size_t off_start_offset,
2645  			       binder_size_t num_valid,
2646  			       binder_size_t last_fixup_obj_off,
2647  			       binder_size_t last_fixup_min_off)
2648  {
2649  	struct binder_buffer_object *parent;
2650  	struct binder_buffer *b = t->buffer;
2651  	struct binder_proc *proc = thread->proc;
2652  	struct binder_proc *target_proc = t->to_proc;
2653  	struct binder_object object;
2654  	binder_size_t buffer_offset;
2655  	binder_size_t parent_offset;
2656  
2657  	if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2658  		return 0;
2659  
2660  	parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2661  				     off_start_offset, &parent_offset,
2662  				     num_valid);
2663  	if (!parent) {
2664  		binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2665  				  proc->pid, thread->pid);
2666  		return -EINVAL;
2667  	}
2668  
2669  	if (!binder_validate_fixup(target_proc, b, off_start_offset,
2670  				   parent_offset, bp->parent_offset,
2671  				   last_fixup_obj_off,
2672  				   last_fixup_min_off)) {
2673  		binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2674  				  proc->pid, thread->pid);
2675  		return -EINVAL;
2676  	}
2677  
2678  	if (parent->length < sizeof(binder_uintptr_t) ||
2679  	    bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2680  		/* No space for a pointer here! */
2681  		binder_user_error("%d:%d got transaction with invalid parent offset\n",
2682  				  proc->pid, thread->pid);
2683  		return -EINVAL;
2684  	}
2685  	buffer_offset = bp->parent_offset +
2686  			(uintptr_t)parent->buffer - (uintptr_t)b->user_data;
2687  	return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
2688  }
2689  
2690  /**
2691   * binder_can_update_transaction() - Can a txn be superseded by an updated one?
2692   * @t1: the pending async txn in the frozen process
2693   * @t2: the new async txn to supersede the outdated pending one
2694   *
2695   * Return:  true if t2 can supersede t1
2696   *          false if t2 can not supersede t1
2697   */
binder_can_update_transaction(struct binder_transaction * t1,struct binder_transaction * t2)2698  static bool binder_can_update_transaction(struct binder_transaction *t1,
2699  					  struct binder_transaction *t2)
2700  {
2701  	if ((t1->flags & t2->flags & (TF_ONE_WAY | TF_UPDATE_TXN)) !=
2702  	    (TF_ONE_WAY | TF_UPDATE_TXN) || !t1->to_proc || !t2->to_proc)
2703  		return false;
2704  	if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code &&
2705  	    t1->flags == t2->flags && t1->buffer->pid == t2->buffer->pid &&
2706  	    t1->buffer->target_node->ptr == t2->buffer->target_node->ptr &&
2707  	    t1->buffer->target_node->cookie == t2->buffer->target_node->cookie)
2708  		return true;
2709  	return false;
2710  }
2711  
2712  /**
2713   * binder_find_outdated_transaction_ilocked() - Find the outdated transaction
2714   * @t:		 new async transaction
2715   * @target_list: list to find outdated transaction
2716   *
2717   * Return: the outdated transaction if found
2718   *         NULL if no outdated transacton can be found
2719   *
2720   * Requires the proc->inner_lock to be held.
2721   */
2722  static struct binder_transaction *
binder_find_outdated_transaction_ilocked(struct binder_transaction * t,struct list_head * target_list)2723  binder_find_outdated_transaction_ilocked(struct binder_transaction *t,
2724  					 struct list_head *target_list)
2725  {
2726  	struct binder_work *w;
2727  
2728  	list_for_each_entry(w, target_list, entry) {
2729  		struct binder_transaction *t_queued;
2730  
2731  		if (w->type != BINDER_WORK_TRANSACTION)
2732  			continue;
2733  		t_queued = container_of(w, struct binder_transaction, work);
2734  		if (binder_can_update_transaction(t_queued, t))
2735  			return t_queued;
2736  	}
2737  	return NULL;
2738  }
2739  
2740  /**
2741   * binder_proc_transaction() - sends a transaction to a process and wakes it up
2742   * @t:		transaction to send
2743   * @proc:	process to send the transaction to
2744   * @thread:	thread in @proc to send the transaction to (may be NULL)
2745   *
2746   * This function queues a transaction to the specified process. It will try
2747   * to find a thread in the target process to handle the transaction and
2748   * wake it up. If no thread is found, the work is queued to the proc
2749   * waitqueue.
2750   *
2751   * If the @thread parameter is not NULL, the transaction is always queued
2752   * to the waitlist of that specific thread.
2753   *
2754   * Return:	0 if the transaction was successfully queued
2755   *		BR_DEAD_REPLY if the target process or thread is dead
2756   *		BR_FROZEN_REPLY if the target process or thread is frozen and
2757   *			the sync transaction was rejected
2758   *		BR_TRANSACTION_PENDING_FROZEN if the target process is frozen
2759   *		and the async transaction was successfully queued
2760   */
binder_proc_transaction(struct binder_transaction * t,struct binder_proc * proc,struct binder_thread * thread)2761  static int binder_proc_transaction(struct binder_transaction *t,
2762  				    struct binder_proc *proc,
2763  				    struct binder_thread *thread)
2764  {
2765  	struct binder_node *node = t->buffer->target_node;
2766  	bool oneway = !!(t->flags & TF_ONE_WAY);
2767  	bool pending_async = false;
2768  	struct binder_transaction *t_outdated = NULL;
2769  	bool frozen = false;
2770  
2771  	BUG_ON(!node);
2772  	binder_node_lock(node);
2773  	if (oneway) {
2774  		BUG_ON(thread);
2775  		if (node->has_async_transaction)
2776  			pending_async = true;
2777  		else
2778  			node->has_async_transaction = true;
2779  	}
2780  
2781  	binder_inner_proc_lock(proc);
2782  	if (proc->is_frozen) {
2783  		frozen = true;
2784  		proc->sync_recv |= !oneway;
2785  		proc->async_recv |= oneway;
2786  	}
2787  
2788  	if ((frozen && !oneway) || proc->is_dead ||
2789  			(thread && thread->is_dead)) {
2790  		binder_inner_proc_unlock(proc);
2791  		binder_node_unlock(node);
2792  		return frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
2793  	}
2794  
2795  	if (!thread && !pending_async)
2796  		thread = binder_select_thread_ilocked(proc);
2797  
2798  	if (thread) {
2799  		binder_enqueue_thread_work_ilocked(thread, &t->work);
2800  	} else if (!pending_async) {
2801  		binder_enqueue_work_ilocked(&t->work, &proc->todo);
2802  	} else {
2803  		if ((t->flags & TF_UPDATE_TXN) && frozen) {
2804  			t_outdated = binder_find_outdated_transaction_ilocked(t,
2805  									      &node->async_todo);
2806  			if (t_outdated) {
2807  				binder_debug(BINDER_DEBUG_TRANSACTION,
2808  					     "txn %d supersedes %d\n",
2809  					     t->debug_id, t_outdated->debug_id);
2810  				list_del_init(&t_outdated->work.entry);
2811  				proc->outstanding_txns--;
2812  			}
2813  		}
2814  		binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2815  	}
2816  
2817  	if (!pending_async)
2818  		binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2819  
2820  	proc->outstanding_txns++;
2821  	binder_inner_proc_unlock(proc);
2822  	binder_node_unlock(node);
2823  
2824  	/*
2825  	 * To reduce potential contention, free the outdated transaction and
2826  	 * buffer after releasing the locks.
2827  	 */
2828  	if (t_outdated) {
2829  		struct binder_buffer *buffer = t_outdated->buffer;
2830  
2831  		t_outdated->buffer = NULL;
2832  		buffer->transaction = NULL;
2833  		trace_binder_transaction_update_buffer_release(buffer);
2834  		binder_release_entire_buffer(proc, NULL, buffer, false);
2835  		binder_alloc_free_buf(&proc->alloc, buffer);
2836  		kfree(t_outdated);
2837  		binder_stats_deleted(BINDER_STAT_TRANSACTION);
2838  	}
2839  
2840  	if (oneway && frozen)
2841  		return BR_TRANSACTION_PENDING_FROZEN;
2842  
2843  	return 0;
2844  }
2845  
2846  /**
2847   * binder_get_node_refs_for_txn() - Get required refs on node for txn
2848   * @node:         struct binder_node for which to get refs
2849   * @procp:        returns @node->proc if valid
2850   * @error:        if no @procp then returns BR_DEAD_REPLY
2851   *
2852   * User-space normally keeps the node alive when creating a transaction
2853   * since it has a reference to the target. The local strong ref keeps it
2854   * alive if the sending process dies before the target process processes
2855   * the transaction. If the source process is malicious or has a reference
2856   * counting bug, relying on the local strong ref can fail.
2857   *
2858   * Since user-space can cause the local strong ref to go away, we also take
2859   * a tmpref on the node to ensure it survives while we are constructing
2860   * the transaction. We also need a tmpref on the proc while we are
2861   * constructing the transaction, so we take that here as well.
2862   *
2863   * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2864   * Also sets @procp if valid. If the @node->proc is NULL indicating that the
2865   * target proc has died, @error is set to BR_DEAD_REPLY.
2866   */
binder_get_node_refs_for_txn(struct binder_node * node,struct binder_proc ** procp,uint32_t * error)2867  static struct binder_node *binder_get_node_refs_for_txn(
2868  		struct binder_node *node,
2869  		struct binder_proc **procp,
2870  		uint32_t *error)
2871  {
2872  	struct binder_node *target_node = NULL;
2873  
2874  	binder_node_inner_lock(node);
2875  	if (node->proc) {
2876  		target_node = node;
2877  		binder_inc_node_nilocked(node, 1, 0, NULL);
2878  		binder_inc_node_tmpref_ilocked(node);
2879  		node->proc->tmp_ref++;
2880  		*procp = node->proc;
2881  	} else
2882  		*error = BR_DEAD_REPLY;
2883  	binder_node_inner_unlock(node);
2884  
2885  	return target_node;
2886  }
2887  
binder_set_txn_from_error(struct binder_transaction * t,int id,uint32_t command,int32_t param)2888  static void binder_set_txn_from_error(struct binder_transaction *t, int id,
2889  				      uint32_t command, int32_t param)
2890  {
2891  	struct binder_thread *from = binder_get_txn_from_and_acq_inner(t);
2892  
2893  	if (!from) {
2894  		/* annotation for sparse */
2895  		__release(&from->proc->inner_lock);
2896  		return;
2897  	}
2898  
2899  	/* don't override existing errors */
2900  	if (from->ee.command == BR_OK)
2901  		binder_set_extended_error(&from->ee, id, command, param);
2902  	binder_inner_proc_unlock(from->proc);
2903  	binder_thread_dec_tmpref(from);
2904  }
2905  
binder_transaction(struct binder_proc * proc,struct binder_thread * thread,struct binder_transaction_data * tr,int reply,binder_size_t extra_buffers_size)2906  static void binder_transaction(struct binder_proc *proc,
2907  			       struct binder_thread *thread,
2908  			       struct binder_transaction_data *tr, int reply,
2909  			       binder_size_t extra_buffers_size)
2910  {
2911  	int ret;
2912  	struct binder_transaction *t;
2913  	struct binder_work *w;
2914  	struct binder_work *tcomplete;
2915  	binder_size_t buffer_offset = 0;
2916  	binder_size_t off_start_offset, off_end_offset;
2917  	binder_size_t off_min;
2918  	binder_size_t sg_buf_offset, sg_buf_end_offset;
2919  	binder_size_t user_offset = 0;
2920  	struct binder_proc *target_proc = NULL;
2921  	struct binder_thread *target_thread = NULL;
2922  	struct binder_node *target_node = NULL;
2923  	struct binder_transaction *in_reply_to = NULL;
2924  	struct binder_transaction_log_entry *e;
2925  	uint32_t return_error = 0;
2926  	uint32_t return_error_param = 0;
2927  	uint32_t return_error_line = 0;
2928  	binder_size_t last_fixup_obj_off = 0;
2929  	binder_size_t last_fixup_min_off = 0;
2930  	struct binder_context *context = proc->context;
2931  	int t_debug_id = atomic_inc_return(&binder_last_id);
2932  	ktime_t t_start_time = ktime_get();
2933  	char *secctx = NULL;
2934  	u32 secctx_sz = 0;
2935  	struct list_head sgc_head;
2936  	struct list_head pf_head;
2937  	const void __user *user_buffer = (const void __user *)
2938  				(uintptr_t)tr->data.ptr.buffer;
2939  	INIT_LIST_HEAD(&sgc_head);
2940  	INIT_LIST_HEAD(&pf_head);
2941  
2942  	e = binder_transaction_log_add(&binder_transaction_log);
2943  	e->debug_id = t_debug_id;
2944  	e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2945  	e->from_proc = proc->pid;
2946  	e->from_thread = thread->pid;
2947  	e->target_handle = tr->target.handle;
2948  	e->data_size = tr->data_size;
2949  	e->offsets_size = tr->offsets_size;
2950  	strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
2951  
2952  	binder_inner_proc_lock(proc);
2953  	binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
2954  	binder_inner_proc_unlock(proc);
2955  
2956  	if (reply) {
2957  		binder_inner_proc_lock(proc);
2958  		in_reply_to = thread->transaction_stack;
2959  		if (in_reply_to == NULL) {
2960  			binder_inner_proc_unlock(proc);
2961  			binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2962  					  proc->pid, thread->pid);
2963  			return_error = BR_FAILED_REPLY;
2964  			return_error_param = -EPROTO;
2965  			return_error_line = __LINE__;
2966  			goto err_empty_call_stack;
2967  		}
2968  		if (in_reply_to->to_thread != thread) {
2969  			spin_lock(&in_reply_to->lock);
2970  			binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2971  				proc->pid, thread->pid, in_reply_to->debug_id,
2972  				in_reply_to->to_proc ?
2973  				in_reply_to->to_proc->pid : 0,
2974  				in_reply_to->to_thread ?
2975  				in_reply_to->to_thread->pid : 0);
2976  			spin_unlock(&in_reply_to->lock);
2977  			binder_inner_proc_unlock(proc);
2978  			return_error = BR_FAILED_REPLY;
2979  			return_error_param = -EPROTO;
2980  			return_error_line = __LINE__;
2981  			in_reply_to = NULL;
2982  			goto err_bad_call_stack;
2983  		}
2984  		thread->transaction_stack = in_reply_to->to_parent;
2985  		binder_inner_proc_unlock(proc);
2986  		binder_set_nice(in_reply_to->saved_priority);
2987  		target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2988  		if (target_thread == NULL) {
2989  			/* annotation for sparse */
2990  			__release(&target_thread->proc->inner_lock);
2991  			binder_txn_error("%d:%d reply target not found\n",
2992  				thread->pid, proc->pid);
2993  			return_error = BR_DEAD_REPLY;
2994  			return_error_line = __LINE__;
2995  			goto err_dead_binder;
2996  		}
2997  		if (target_thread->transaction_stack != in_reply_to) {
2998  			binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2999  				proc->pid, thread->pid,
3000  				target_thread->transaction_stack ?
3001  				target_thread->transaction_stack->debug_id : 0,
3002  				in_reply_to->debug_id);
3003  			binder_inner_proc_unlock(target_thread->proc);
3004  			return_error = BR_FAILED_REPLY;
3005  			return_error_param = -EPROTO;
3006  			return_error_line = __LINE__;
3007  			in_reply_to = NULL;
3008  			target_thread = NULL;
3009  			goto err_dead_binder;
3010  		}
3011  		target_proc = target_thread->proc;
3012  		target_proc->tmp_ref++;
3013  		binder_inner_proc_unlock(target_thread->proc);
3014  	} else {
3015  		if (tr->target.handle) {
3016  			struct binder_ref *ref;
3017  
3018  			/*
3019  			 * There must already be a strong ref
3020  			 * on this node. If so, do a strong
3021  			 * increment on the node to ensure it
3022  			 * stays alive until the transaction is
3023  			 * done.
3024  			 */
3025  			binder_proc_lock(proc);
3026  			ref = binder_get_ref_olocked(proc, tr->target.handle,
3027  						     true);
3028  			if (ref) {
3029  				target_node = binder_get_node_refs_for_txn(
3030  						ref->node, &target_proc,
3031  						&return_error);
3032  			} else {
3033  				binder_user_error("%d:%d got transaction to invalid handle, %u\n",
3034  						  proc->pid, thread->pid, tr->target.handle);
3035  				return_error = BR_FAILED_REPLY;
3036  			}
3037  			binder_proc_unlock(proc);
3038  		} else {
3039  			mutex_lock(&context->context_mgr_node_lock);
3040  			target_node = context->binder_context_mgr_node;
3041  			if (target_node)
3042  				target_node = binder_get_node_refs_for_txn(
3043  						target_node, &target_proc,
3044  						&return_error);
3045  			else
3046  				return_error = BR_DEAD_REPLY;
3047  			mutex_unlock(&context->context_mgr_node_lock);
3048  			if (target_node && target_proc->pid == proc->pid) {
3049  				binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3050  						  proc->pid, thread->pid);
3051  				return_error = BR_FAILED_REPLY;
3052  				return_error_param = -EINVAL;
3053  				return_error_line = __LINE__;
3054  				goto err_invalid_target_handle;
3055  			}
3056  		}
3057  		if (!target_node) {
3058  			binder_txn_error("%d:%d cannot find target node\n",
3059  				thread->pid, proc->pid);
3060  			/*
3061  			 * return_error is set above
3062  			 */
3063  			return_error_param = -EINVAL;
3064  			return_error_line = __LINE__;
3065  			goto err_dead_binder;
3066  		}
3067  		e->to_node = target_node->debug_id;
3068  		if (WARN_ON(proc == target_proc)) {
3069  			binder_txn_error("%d:%d self transactions not allowed\n",
3070  				thread->pid, proc->pid);
3071  			return_error = BR_FAILED_REPLY;
3072  			return_error_param = -EINVAL;
3073  			return_error_line = __LINE__;
3074  			goto err_invalid_target_handle;
3075  		}
3076  		if (security_binder_transaction(proc->cred,
3077  						target_proc->cred) < 0) {
3078  			binder_txn_error("%d:%d transaction credentials failed\n",
3079  				thread->pid, proc->pid);
3080  			return_error = BR_FAILED_REPLY;
3081  			return_error_param = -EPERM;
3082  			return_error_line = __LINE__;
3083  			goto err_invalid_target_handle;
3084  		}
3085  		binder_inner_proc_lock(proc);
3086  
3087  		w = list_first_entry_or_null(&thread->todo,
3088  					     struct binder_work, entry);
3089  		if (!(tr->flags & TF_ONE_WAY) && w &&
3090  		    w->type == BINDER_WORK_TRANSACTION) {
3091  			/*
3092  			 * Do not allow new outgoing transaction from a
3093  			 * thread that has a transaction at the head of
3094  			 * its todo list. Only need to check the head
3095  			 * because binder_select_thread_ilocked picks a
3096  			 * thread from proc->waiting_threads to enqueue
3097  			 * the transaction, and nothing is queued to the
3098  			 * todo list while the thread is on waiting_threads.
3099  			 */
3100  			binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3101  					  proc->pid, thread->pid);
3102  			binder_inner_proc_unlock(proc);
3103  			return_error = BR_FAILED_REPLY;
3104  			return_error_param = -EPROTO;
3105  			return_error_line = __LINE__;
3106  			goto err_bad_todo_list;
3107  		}
3108  
3109  		if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3110  			struct binder_transaction *tmp;
3111  
3112  			tmp = thread->transaction_stack;
3113  			if (tmp->to_thread != thread) {
3114  				spin_lock(&tmp->lock);
3115  				binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3116  					proc->pid, thread->pid, tmp->debug_id,
3117  					tmp->to_proc ? tmp->to_proc->pid : 0,
3118  					tmp->to_thread ?
3119  					tmp->to_thread->pid : 0);
3120  				spin_unlock(&tmp->lock);
3121  				binder_inner_proc_unlock(proc);
3122  				return_error = BR_FAILED_REPLY;
3123  				return_error_param = -EPROTO;
3124  				return_error_line = __LINE__;
3125  				goto err_bad_call_stack;
3126  			}
3127  			while (tmp) {
3128  				struct binder_thread *from;
3129  
3130  				spin_lock(&tmp->lock);
3131  				from = tmp->from;
3132  				if (from && from->proc == target_proc) {
3133  					atomic_inc(&from->tmp_ref);
3134  					target_thread = from;
3135  					spin_unlock(&tmp->lock);
3136  					break;
3137  				}
3138  				spin_unlock(&tmp->lock);
3139  				tmp = tmp->from_parent;
3140  			}
3141  		}
3142  		binder_inner_proc_unlock(proc);
3143  	}
3144  	if (target_thread)
3145  		e->to_thread = target_thread->pid;
3146  	e->to_proc = target_proc->pid;
3147  
3148  	/* TODO: reuse incoming transaction for reply */
3149  	t = kzalloc(sizeof(*t), GFP_KERNEL);
3150  	if (t == NULL) {
3151  		binder_txn_error("%d:%d cannot allocate transaction\n",
3152  			thread->pid, proc->pid);
3153  		return_error = BR_FAILED_REPLY;
3154  		return_error_param = -ENOMEM;
3155  		return_error_line = __LINE__;
3156  		goto err_alloc_t_failed;
3157  	}
3158  	INIT_LIST_HEAD(&t->fd_fixups);
3159  	binder_stats_created(BINDER_STAT_TRANSACTION);
3160  	spin_lock_init(&t->lock);
3161  
3162  	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3163  	if (tcomplete == NULL) {
3164  		binder_txn_error("%d:%d cannot allocate work for transaction\n",
3165  			thread->pid, proc->pid);
3166  		return_error = BR_FAILED_REPLY;
3167  		return_error_param = -ENOMEM;
3168  		return_error_line = __LINE__;
3169  		goto err_alloc_tcomplete_failed;
3170  	}
3171  	binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3172  
3173  	t->debug_id = t_debug_id;
3174  	t->start_time = t_start_time;
3175  
3176  	if (reply)
3177  		binder_debug(BINDER_DEBUG_TRANSACTION,
3178  			     "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3179  			     proc->pid, thread->pid, t->debug_id,
3180  			     target_proc->pid, target_thread->pid,
3181  			     (u64)tr->data.ptr.buffer,
3182  			     (u64)tr->data.ptr.offsets,
3183  			     (u64)tr->data_size, (u64)tr->offsets_size,
3184  			     (u64)extra_buffers_size);
3185  	else
3186  		binder_debug(BINDER_DEBUG_TRANSACTION,
3187  			     "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3188  			     proc->pid, thread->pid, t->debug_id,
3189  			     target_proc->pid, target_node->debug_id,
3190  			     (u64)tr->data.ptr.buffer,
3191  			     (u64)tr->data.ptr.offsets,
3192  			     (u64)tr->data_size, (u64)tr->offsets_size,
3193  			     (u64)extra_buffers_size);
3194  
3195  	if (!reply && !(tr->flags & TF_ONE_WAY))
3196  		t->from = thread;
3197  	else
3198  		t->from = NULL;
3199  	t->from_pid = proc->pid;
3200  	t->from_tid = thread->pid;
3201  	t->sender_euid = task_euid(proc->tsk);
3202  	t->to_proc = target_proc;
3203  	t->to_thread = target_thread;
3204  	t->code = tr->code;
3205  	t->flags = tr->flags;
3206  	t->priority = task_nice(current);
3207  
3208  	if (target_node && target_node->txn_security_ctx) {
3209  		u32 secid;
3210  		size_t added_size;
3211  
3212  		security_cred_getsecid(proc->cred, &secid);
3213  		ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3214  		if (ret) {
3215  			binder_txn_error("%d:%d failed to get security context\n",
3216  				thread->pid, proc->pid);
3217  			return_error = BR_FAILED_REPLY;
3218  			return_error_param = ret;
3219  			return_error_line = __LINE__;
3220  			goto err_get_secctx_failed;
3221  		}
3222  		added_size = ALIGN(secctx_sz, sizeof(u64));
3223  		extra_buffers_size += added_size;
3224  		if (extra_buffers_size < added_size) {
3225  			binder_txn_error("%d:%d integer overflow of extra_buffers_size\n",
3226  				thread->pid, proc->pid);
3227  			return_error = BR_FAILED_REPLY;
3228  			return_error_param = -EINVAL;
3229  			return_error_line = __LINE__;
3230  			goto err_bad_extra_size;
3231  		}
3232  	}
3233  
3234  	trace_binder_transaction(reply, t, target_node);
3235  
3236  	t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3237  		tr->offsets_size, extra_buffers_size,
3238  		!reply && (t->flags & TF_ONE_WAY), current->tgid);
3239  	if (IS_ERR(t->buffer)) {
3240  		char *s;
3241  
3242  		ret = PTR_ERR(t->buffer);
3243  		s = (ret == -ESRCH) ? ": vma cleared, target dead or dying"
3244  			: (ret == -ENOSPC) ? ": no space left"
3245  			: (ret == -ENOMEM) ? ": memory allocation failed"
3246  			: "";
3247  		binder_txn_error("cannot allocate buffer%s", s);
3248  
3249  		return_error_param = PTR_ERR(t->buffer);
3250  		return_error = return_error_param == -ESRCH ?
3251  			BR_DEAD_REPLY : BR_FAILED_REPLY;
3252  		return_error_line = __LINE__;
3253  		t->buffer = NULL;
3254  		goto err_binder_alloc_buf_failed;
3255  	}
3256  	if (secctx) {
3257  		int err;
3258  		size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3259  				    ALIGN(tr->offsets_size, sizeof(void *)) +
3260  				    ALIGN(extra_buffers_size, sizeof(void *)) -
3261  				    ALIGN(secctx_sz, sizeof(u64));
3262  
3263  		t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
3264  		err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3265  						  t->buffer, buf_offset,
3266  						  secctx, secctx_sz);
3267  		if (err) {
3268  			t->security_ctx = 0;
3269  			WARN_ON(1);
3270  		}
3271  		security_release_secctx(secctx, secctx_sz);
3272  		secctx = NULL;
3273  	}
3274  	t->buffer->debug_id = t->debug_id;
3275  	t->buffer->transaction = t;
3276  	t->buffer->target_node = target_node;
3277  	t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
3278  	trace_binder_transaction_alloc_buf(t->buffer);
3279  
3280  	if (binder_alloc_copy_user_to_buffer(
3281  				&target_proc->alloc,
3282  				t->buffer,
3283  				ALIGN(tr->data_size, sizeof(void *)),
3284  				(const void __user *)
3285  					(uintptr_t)tr->data.ptr.offsets,
3286  				tr->offsets_size)) {
3287  		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3288  				proc->pid, thread->pid);
3289  		return_error = BR_FAILED_REPLY;
3290  		return_error_param = -EFAULT;
3291  		return_error_line = __LINE__;
3292  		goto err_copy_data_failed;
3293  	}
3294  	if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3295  		binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3296  				proc->pid, thread->pid, (u64)tr->offsets_size);
3297  		return_error = BR_FAILED_REPLY;
3298  		return_error_param = -EINVAL;
3299  		return_error_line = __LINE__;
3300  		goto err_bad_offset;
3301  	}
3302  	if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3303  		binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3304  				  proc->pid, thread->pid,
3305  				  (u64)extra_buffers_size);
3306  		return_error = BR_FAILED_REPLY;
3307  		return_error_param = -EINVAL;
3308  		return_error_line = __LINE__;
3309  		goto err_bad_offset;
3310  	}
3311  	off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3312  	buffer_offset = off_start_offset;
3313  	off_end_offset = off_start_offset + tr->offsets_size;
3314  	sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3315  	sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3316  		ALIGN(secctx_sz, sizeof(u64));
3317  	off_min = 0;
3318  	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3319  	     buffer_offset += sizeof(binder_size_t)) {
3320  		struct binder_object_header *hdr;
3321  		size_t object_size;
3322  		struct binder_object object;
3323  		binder_size_t object_offset;
3324  		binder_size_t copy_size;
3325  
3326  		if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3327  						  &object_offset,
3328  						  t->buffer,
3329  						  buffer_offset,
3330  						  sizeof(object_offset))) {
3331  			binder_txn_error("%d:%d copy offset from buffer failed\n",
3332  				thread->pid, proc->pid);
3333  			return_error = BR_FAILED_REPLY;
3334  			return_error_param = -EINVAL;
3335  			return_error_line = __LINE__;
3336  			goto err_bad_offset;
3337  		}
3338  
3339  		/*
3340  		 * Copy the source user buffer up to the next object
3341  		 * that will be processed.
3342  		 */
3343  		copy_size = object_offset - user_offset;
3344  		if (copy_size && (user_offset > object_offset ||
3345  				object_offset > tr->data_size ||
3346  				binder_alloc_copy_user_to_buffer(
3347  					&target_proc->alloc,
3348  					t->buffer, user_offset,
3349  					user_buffer + user_offset,
3350  					copy_size))) {
3351  			binder_user_error("%d:%d got transaction with invalid data ptr\n",
3352  					proc->pid, thread->pid);
3353  			return_error = BR_FAILED_REPLY;
3354  			return_error_param = -EFAULT;
3355  			return_error_line = __LINE__;
3356  			goto err_copy_data_failed;
3357  		}
3358  		object_size = binder_get_object(target_proc, user_buffer,
3359  				t->buffer, object_offset, &object);
3360  		if (object_size == 0 || object_offset < off_min) {
3361  			binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3362  					  proc->pid, thread->pid,
3363  					  (u64)object_offset,
3364  					  (u64)off_min,
3365  					  (u64)t->buffer->data_size);
3366  			return_error = BR_FAILED_REPLY;
3367  			return_error_param = -EINVAL;
3368  			return_error_line = __LINE__;
3369  			goto err_bad_offset;
3370  		}
3371  		/*
3372  		 * Set offset to the next buffer fragment to be
3373  		 * copied
3374  		 */
3375  		user_offset = object_offset + object_size;
3376  
3377  		hdr = &object.hdr;
3378  		off_min = object_offset + object_size;
3379  		switch (hdr->type) {
3380  		case BINDER_TYPE_BINDER:
3381  		case BINDER_TYPE_WEAK_BINDER: {
3382  			struct flat_binder_object *fp;
3383  
3384  			fp = to_flat_binder_object(hdr);
3385  			ret = binder_translate_binder(fp, t, thread);
3386  
3387  			if (ret < 0 ||
3388  			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3389  							t->buffer,
3390  							object_offset,
3391  							fp, sizeof(*fp))) {
3392  				binder_txn_error("%d:%d translate binder failed\n",
3393  					thread->pid, proc->pid);
3394  				return_error = BR_FAILED_REPLY;
3395  				return_error_param = ret;
3396  				return_error_line = __LINE__;
3397  				goto err_translate_failed;
3398  			}
3399  		} break;
3400  		case BINDER_TYPE_HANDLE:
3401  		case BINDER_TYPE_WEAK_HANDLE: {
3402  			struct flat_binder_object *fp;
3403  
3404  			fp = to_flat_binder_object(hdr);
3405  			ret = binder_translate_handle(fp, t, thread);
3406  			if (ret < 0 ||
3407  			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3408  							t->buffer,
3409  							object_offset,
3410  							fp, sizeof(*fp))) {
3411  				binder_txn_error("%d:%d translate handle failed\n",
3412  					thread->pid, proc->pid);
3413  				return_error = BR_FAILED_REPLY;
3414  				return_error_param = ret;
3415  				return_error_line = __LINE__;
3416  				goto err_translate_failed;
3417  			}
3418  		} break;
3419  
3420  		case BINDER_TYPE_FD: {
3421  			struct binder_fd_object *fp = to_binder_fd_object(hdr);
3422  			binder_size_t fd_offset = object_offset +
3423  				(uintptr_t)&fp->fd - (uintptr_t)fp;
3424  			int ret = binder_translate_fd(fp->fd, fd_offset, t,
3425  						      thread, in_reply_to);
3426  
3427  			fp->pad_binder = 0;
3428  			if (ret < 0 ||
3429  			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3430  							t->buffer,
3431  							object_offset,
3432  							fp, sizeof(*fp))) {
3433  				binder_txn_error("%d:%d translate fd failed\n",
3434  					thread->pid, proc->pid);
3435  				return_error = BR_FAILED_REPLY;
3436  				return_error_param = ret;
3437  				return_error_line = __LINE__;
3438  				goto err_translate_failed;
3439  			}
3440  		} break;
3441  		case BINDER_TYPE_FDA: {
3442  			struct binder_object ptr_object;
3443  			binder_size_t parent_offset;
3444  			struct binder_object user_object;
3445  			size_t user_parent_size;
3446  			struct binder_fd_array_object *fda =
3447  				to_binder_fd_array_object(hdr);
3448  			size_t num_valid = (buffer_offset - off_start_offset) /
3449  						sizeof(binder_size_t);
3450  			struct binder_buffer_object *parent =
3451  				binder_validate_ptr(target_proc, t->buffer,
3452  						    &ptr_object, fda->parent,
3453  						    off_start_offset,
3454  						    &parent_offset,
3455  						    num_valid);
3456  			if (!parent) {
3457  				binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3458  						  proc->pid, thread->pid);
3459  				return_error = BR_FAILED_REPLY;
3460  				return_error_param = -EINVAL;
3461  				return_error_line = __LINE__;
3462  				goto err_bad_parent;
3463  			}
3464  			if (!binder_validate_fixup(target_proc, t->buffer,
3465  						   off_start_offset,
3466  						   parent_offset,
3467  						   fda->parent_offset,
3468  						   last_fixup_obj_off,
3469  						   last_fixup_min_off)) {
3470  				binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3471  						  proc->pid, thread->pid);
3472  				return_error = BR_FAILED_REPLY;
3473  				return_error_param = -EINVAL;
3474  				return_error_line = __LINE__;
3475  				goto err_bad_parent;
3476  			}
3477  			/*
3478  			 * We need to read the user version of the parent
3479  			 * object to get the original user offset
3480  			 */
3481  			user_parent_size =
3482  				binder_get_object(proc, user_buffer, t->buffer,
3483  						  parent_offset, &user_object);
3484  			if (user_parent_size != sizeof(user_object.bbo)) {
3485  				binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
3486  						  proc->pid, thread->pid,
3487  						  user_parent_size,
3488  						  sizeof(user_object.bbo));
3489  				return_error = BR_FAILED_REPLY;
3490  				return_error_param = -EINVAL;
3491  				return_error_line = __LINE__;
3492  				goto err_bad_parent;
3493  			}
3494  			ret = binder_translate_fd_array(&pf_head, fda,
3495  							user_buffer, parent,
3496  							&user_object.bbo, t,
3497  							thread, in_reply_to);
3498  			if (!ret)
3499  				ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
3500  								  t->buffer,
3501  								  object_offset,
3502  								  fda, sizeof(*fda));
3503  			if (ret) {
3504  				binder_txn_error("%d:%d translate fd array failed\n",
3505  					thread->pid, proc->pid);
3506  				return_error = BR_FAILED_REPLY;
3507  				return_error_param = ret > 0 ? -EINVAL : ret;
3508  				return_error_line = __LINE__;
3509  				goto err_translate_failed;
3510  			}
3511  			last_fixup_obj_off = parent_offset;
3512  			last_fixup_min_off =
3513  				fda->parent_offset + sizeof(u32) * fda->num_fds;
3514  		} break;
3515  		case BINDER_TYPE_PTR: {
3516  			struct binder_buffer_object *bp =
3517  				to_binder_buffer_object(hdr);
3518  			size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3519  			size_t num_valid;
3520  
3521  			if (bp->length > buf_left) {
3522  				binder_user_error("%d:%d got transaction with too large buffer\n",
3523  						  proc->pid, thread->pid);
3524  				return_error = BR_FAILED_REPLY;
3525  				return_error_param = -EINVAL;
3526  				return_error_line = __LINE__;
3527  				goto err_bad_offset;
3528  			}
3529  			ret = binder_defer_copy(&sgc_head, sg_buf_offset,
3530  				(const void __user *)(uintptr_t)bp->buffer,
3531  				bp->length);
3532  			if (ret) {
3533  				binder_txn_error("%d:%d deferred copy failed\n",
3534  					thread->pid, proc->pid);
3535  				return_error = BR_FAILED_REPLY;
3536  				return_error_param = ret;
3537  				return_error_line = __LINE__;
3538  				goto err_translate_failed;
3539  			}
3540  			/* Fixup buffer pointer to target proc address space */
3541  			bp->buffer = (uintptr_t)
3542  				t->buffer->user_data + sg_buf_offset;
3543  			sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3544  
3545  			num_valid = (buffer_offset - off_start_offset) /
3546  					sizeof(binder_size_t);
3547  			ret = binder_fixup_parent(&pf_head, t,
3548  						  thread, bp,
3549  						  off_start_offset,
3550  						  num_valid,
3551  						  last_fixup_obj_off,
3552  						  last_fixup_min_off);
3553  			if (ret < 0 ||
3554  			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3555  							t->buffer,
3556  							object_offset,
3557  							bp, sizeof(*bp))) {
3558  				binder_txn_error("%d:%d failed to fixup parent\n",
3559  					thread->pid, proc->pid);
3560  				return_error = BR_FAILED_REPLY;
3561  				return_error_param = ret;
3562  				return_error_line = __LINE__;
3563  				goto err_translate_failed;
3564  			}
3565  			last_fixup_obj_off = object_offset;
3566  			last_fixup_min_off = 0;
3567  		} break;
3568  		default:
3569  			binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3570  				proc->pid, thread->pid, hdr->type);
3571  			return_error = BR_FAILED_REPLY;
3572  			return_error_param = -EINVAL;
3573  			return_error_line = __LINE__;
3574  			goto err_bad_object_type;
3575  		}
3576  	}
3577  	/* Done processing objects, copy the rest of the buffer */
3578  	if (binder_alloc_copy_user_to_buffer(
3579  				&target_proc->alloc,
3580  				t->buffer, user_offset,
3581  				user_buffer + user_offset,
3582  				tr->data_size - user_offset)) {
3583  		binder_user_error("%d:%d got transaction with invalid data ptr\n",
3584  				proc->pid, thread->pid);
3585  		return_error = BR_FAILED_REPLY;
3586  		return_error_param = -EFAULT;
3587  		return_error_line = __LINE__;
3588  		goto err_copy_data_failed;
3589  	}
3590  
3591  	ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
3592  					    &sgc_head, &pf_head);
3593  	if (ret) {
3594  		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3595  				  proc->pid, thread->pid);
3596  		return_error = BR_FAILED_REPLY;
3597  		return_error_param = ret;
3598  		return_error_line = __LINE__;
3599  		goto err_copy_data_failed;
3600  	}
3601  	if (t->buffer->oneway_spam_suspect)
3602  		tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
3603  	else
3604  		tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3605  	t->work.type = BINDER_WORK_TRANSACTION;
3606  
3607  	if (reply) {
3608  		binder_enqueue_thread_work(thread, tcomplete);
3609  		binder_inner_proc_lock(target_proc);
3610  		if (target_thread->is_dead) {
3611  			return_error = BR_DEAD_REPLY;
3612  			binder_inner_proc_unlock(target_proc);
3613  			goto err_dead_proc_or_thread;
3614  		}
3615  		BUG_ON(t->buffer->async_transaction != 0);
3616  		binder_pop_transaction_ilocked(target_thread, in_reply_to);
3617  		binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3618  		target_proc->outstanding_txns++;
3619  		binder_inner_proc_unlock(target_proc);
3620  		wake_up_interruptible_sync(&target_thread->wait);
3621  		binder_free_transaction(in_reply_to);
3622  	} else if (!(t->flags & TF_ONE_WAY)) {
3623  		BUG_ON(t->buffer->async_transaction != 0);
3624  		binder_inner_proc_lock(proc);
3625  		/*
3626  		 * Defer the TRANSACTION_COMPLETE, so we don't return to
3627  		 * userspace immediately; this allows the target process to
3628  		 * immediately start processing this transaction, reducing
3629  		 * latency. We will then return the TRANSACTION_COMPLETE when
3630  		 * the target replies (or there is an error).
3631  		 */
3632  		binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3633  		t->need_reply = 1;
3634  		t->from_parent = thread->transaction_stack;
3635  		thread->transaction_stack = t;
3636  		binder_inner_proc_unlock(proc);
3637  		return_error = binder_proc_transaction(t,
3638  				target_proc, target_thread);
3639  		if (return_error) {
3640  			binder_inner_proc_lock(proc);
3641  			binder_pop_transaction_ilocked(thread, t);
3642  			binder_inner_proc_unlock(proc);
3643  			goto err_dead_proc_or_thread;
3644  		}
3645  	} else {
3646  		BUG_ON(target_node == NULL);
3647  		BUG_ON(t->buffer->async_transaction != 1);
3648  		return_error = binder_proc_transaction(t, target_proc, NULL);
3649  		/*
3650  		 * Let the caller know when async transaction reaches a frozen
3651  		 * process and is put in a pending queue, waiting for the target
3652  		 * process to be unfrozen.
3653  		 */
3654  		if (return_error == BR_TRANSACTION_PENDING_FROZEN)
3655  			tcomplete->type = BINDER_WORK_TRANSACTION_PENDING;
3656  		binder_enqueue_thread_work(thread, tcomplete);
3657  		if (return_error &&
3658  		    return_error != BR_TRANSACTION_PENDING_FROZEN)
3659  			goto err_dead_proc_or_thread;
3660  	}
3661  	if (target_thread)
3662  		binder_thread_dec_tmpref(target_thread);
3663  	binder_proc_dec_tmpref(target_proc);
3664  	if (target_node)
3665  		binder_dec_node_tmpref(target_node);
3666  	/*
3667  	 * write barrier to synchronize with initialization
3668  	 * of log entry
3669  	 */
3670  	smp_wmb();
3671  	WRITE_ONCE(e->debug_id_done, t_debug_id);
3672  	return;
3673  
3674  err_dead_proc_or_thread:
3675  	binder_txn_error("%d:%d dead process or thread\n",
3676  		thread->pid, proc->pid);
3677  	return_error_line = __LINE__;
3678  	binder_dequeue_work(proc, tcomplete);
3679  err_translate_failed:
3680  err_bad_object_type:
3681  err_bad_offset:
3682  err_bad_parent:
3683  err_copy_data_failed:
3684  	binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
3685  	binder_free_txn_fixups(t);
3686  	trace_binder_transaction_failed_buffer_release(t->buffer);
3687  	binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3688  					  buffer_offset, true);
3689  	if (target_node)
3690  		binder_dec_node_tmpref(target_node);
3691  	target_node = NULL;
3692  	t->buffer->transaction = NULL;
3693  	binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3694  err_binder_alloc_buf_failed:
3695  err_bad_extra_size:
3696  	if (secctx)
3697  		security_release_secctx(secctx, secctx_sz);
3698  err_get_secctx_failed:
3699  	kfree(tcomplete);
3700  	binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3701  err_alloc_tcomplete_failed:
3702  	if (trace_binder_txn_latency_free_enabled())
3703  		binder_txn_latency_free(t);
3704  	kfree(t);
3705  	binder_stats_deleted(BINDER_STAT_TRANSACTION);
3706  err_alloc_t_failed:
3707  err_bad_todo_list:
3708  err_bad_call_stack:
3709  err_empty_call_stack:
3710  err_dead_binder:
3711  err_invalid_target_handle:
3712  	if (target_node) {
3713  		binder_dec_node(target_node, 1, 0);
3714  		binder_dec_node_tmpref(target_node);
3715  	}
3716  
3717  	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3718  		     "%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n",
3719  		     proc->pid, thread->pid, reply ? "reply" :
3720  		     (tr->flags & TF_ONE_WAY ? "async" : "call"),
3721  		     target_proc ? target_proc->pid : 0,
3722  		     target_thread ? target_thread->pid : 0,
3723  		     t_debug_id, return_error, return_error_param,
3724  		     (u64)tr->data_size, (u64)tr->offsets_size,
3725  		     return_error_line);
3726  
3727  	if (target_thread)
3728  		binder_thread_dec_tmpref(target_thread);
3729  	if (target_proc)
3730  		binder_proc_dec_tmpref(target_proc);
3731  
3732  	{
3733  		struct binder_transaction_log_entry *fe;
3734  
3735  		e->return_error = return_error;
3736  		e->return_error_param = return_error_param;
3737  		e->return_error_line = return_error_line;
3738  		fe = binder_transaction_log_add(&binder_transaction_log_failed);
3739  		*fe = *e;
3740  		/*
3741  		 * write barrier to synchronize with initialization
3742  		 * of log entry
3743  		 */
3744  		smp_wmb();
3745  		WRITE_ONCE(e->debug_id_done, t_debug_id);
3746  		WRITE_ONCE(fe->debug_id_done, t_debug_id);
3747  	}
3748  
3749  	BUG_ON(thread->return_error.cmd != BR_OK);
3750  	if (in_reply_to) {
3751  		binder_set_txn_from_error(in_reply_to, t_debug_id,
3752  				return_error, return_error_param);
3753  		thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3754  		binder_enqueue_thread_work(thread, &thread->return_error.work);
3755  		binder_send_failed_reply(in_reply_to, return_error);
3756  	} else {
3757  		binder_inner_proc_lock(proc);
3758  		binder_set_extended_error(&thread->ee, t_debug_id,
3759  				return_error, return_error_param);
3760  		binder_inner_proc_unlock(proc);
3761  		thread->return_error.cmd = return_error;
3762  		binder_enqueue_thread_work(thread, &thread->return_error.work);
3763  	}
3764  }
3765  
3766  /**
3767   * binder_free_buf() - free the specified buffer
3768   * @proc:	binder proc that owns buffer
3769   * @buffer:	buffer to be freed
3770   * @is_failure:	failed to send transaction
3771   *
3772   * If buffer for an async transaction, enqueue the next async
3773   * transaction from the node.
3774   *
3775   * Cleanup buffer and free it.
3776   */
3777  static void
binder_free_buf(struct binder_proc * proc,struct binder_thread * thread,struct binder_buffer * buffer,bool is_failure)3778  binder_free_buf(struct binder_proc *proc,
3779  		struct binder_thread *thread,
3780  		struct binder_buffer *buffer, bool is_failure)
3781  {
3782  	binder_inner_proc_lock(proc);
3783  	if (buffer->transaction) {
3784  		buffer->transaction->buffer = NULL;
3785  		buffer->transaction = NULL;
3786  	}
3787  	binder_inner_proc_unlock(proc);
3788  	if (buffer->async_transaction && buffer->target_node) {
3789  		struct binder_node *buf_node;
3790  		struct binder_work *w;
3791  
3792  		buf_node = buffer->target_node;
3793  		binder_node_inner_lock(buf_node);
3794  		BUG_ON(!buf_node->has_async_transaction);
3795  		BUG_ON(buf_node->proc != proc);
3796  		w = binder_dequeue_work_head_ilocked(
3797  				&buf_node->async_todo);
3798  		if (!w) {
3799  			buf_node->has_async_transaction = false;
3800  		} else {
3801  			binder_enqueue_work_ilocked(
3802  					w, &proc->todo);
3803  			binder_wakeup_proc_ilocked(proc);
3804  		}
3805  		binder_node_inner_unlock(buf_node);
3806  	}
3807  	trace_binder_transaction_buffer_release(buffer);
3808  	binder_release_entire_buffer(proc, thread, buffer, is_failure);
3809  	binder_alloc_free_buf(&proc->alloc, buffer);
3810  }
3811  
binder_thread_write(struct binder_proc * proc,struct binder_thread * thread,binder_uintptr_t binder_buffer,size_t size,binder_size_t * consumed)3812  static int binder_thread_write(struct binder_proc *proc,
3813  			struct binder_thread *thread,
3814  			binder_uintptr_t binder_buffer, size_t size,
3815  			binder_size_t *consumed)
3816  {
3817  	uint32_t cmd;
3818  	struct binder_context *context = proc->context;
3819  	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3820  	void __user *ptr = buffer + *consumed;
3821  	void __user *end = buffer + size;
3822  
3823  	while (ptr < end && thread->return_error.cmd == BR_OK) {
3824  		int ret;
3825  
3826  		if (get_user(cmd, (uint32_t __user *)ptr))
3827  			return -EFAULT;
3828  		ptr += sizeof(uint32_t);
3829  		trace_binder_command(cmd);
3830  		if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3831  			atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3832  			atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3833  			atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3834  		}
3835  		switch (cmd) {
3836  		case BC_INCREFS:
3837  		case BC_ACQUIRE:
3838  		case BC_RELEASE:
3839  		case BC_DECREFS: {
3840  			uint32_t target;
3841  			const char *debug_string;
3842  			bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3843  			bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3844  			struct binder_ref_data rdata;
3845  
3846  			if (get_user(target, (uint32_t __user *)ptr))
3847  				return -EFAULT;
3848  
3849  			ptr += sizeof(uint32_t);
3850  			ret = -1;
3851  			if (increment && !target) {
3852  				struct binder_node *ctx_mgr_node;
3853  
3854  				mutex_lock(&context->context_mgr_node_lock);
3855  				ctx_mgr_node = context->binder_context_mgr_node;
3856  				if (ctx_mgr_node) {
3857  					if (ctx_mgr_node->proc == proc) {
3858  						binder_user_error("%d:%d context manager tried to acquire desc 0\n",
3859  								  proc->pid, thread->pid);
3860  						mutex_unlock(&context->context_mgr_node_lock);
3861  						return -EINVAL;
3862  					}
3863  					ret = binder_inc_ref_for_node(
3864  							proc, ctx_mgr_node,
3865  							strong, NULL, &rdata);
3866  				}
3867  				mutex_unlock(&context->context_mgr_node_lock);
3868  			}
3869  			if (ret)
3870  				ret = binder_update_ref_for_handle(
3871  						proc, target, increment, strong,
3872  						&rdata);
3873  			if (!ret && rdata.desc != target) {
3874  				binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3875  					proc->pid, thread->pid,
3876  					target, rdata.desc);
3877  			}
3878  			switch (cmd) {
3879  			case BC_INCREFS:
3880  				debug_string = "IncRefs";
3881  				break;
3882  			case BC_ACQUIRE:
3883  				debug_string = "Acquire";
3884  				break;
3885  			case BC_RELEASE:
3886  				debug_string = "Release";
3887  				break;
3888  			case BC_DECREFS:
3889  			default:
3890  				debug_string = "DecRefs";
3891  				break;
3892  			}
3893  			if (ret) {
3894  				binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3895  					proc->pid, thread->pid, debug_string,
3896  					strong, target, ret);
3897  				break;
3898  			}
3899  			binder_debug(BINDER_DEBUG_USER_REFS,
3900  				     "%d:%d %s ref %d desc %d s %d w %d\n",
3901  				     proc->pid, thread->pid, debug_string,
3902  				     rdata.debug_id, rdata.desc, rdata.strong,
3903  				     rdata.weak);
3904  			break;
3905  		}
3906  		case BC_INCREFS_DONE:
3907  		case BC_ACQUIRE_DONE: {
3908  			binder_uintptr_t node_ptr;
3909  			binder_uintptr_t cookie;
3910  			struct binder_node *node;
3911  			bool free_node;
3912  
3913  			if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3914  				return -EFAULT;
3915  			ptr += sizeof(binder_uintptr_t);
3916  			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3917  				return -EFAULT;
3918  			ptr += sizeof(binder_uintptr_t);
3919  			node = binder_get_node(proc, node_ptr);
3920  			if (node == NULL) {
3921  				binder_user_error("%d:%d %s u%016llx no match\n",
3922  					proc->pid, thread->pid,
3923  					cmd == BC_INCREFS_DONE ?
3924  					"BC_INCREFS_DONE" :
3925  					"BC_ACQUIRE_DONE",
3926  					(u64)node_ptr);
3927  				break;
3928  			}
3929  			if (cookie != node->cookie) {
3930  				binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3931  					proc->pid, thread->pid,
3932  					cmd == BC_INCREFS_DONE ?
3933  					"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3934  					(u64)node_ptr, node->debug_id,
3935  					(u64)cookie, (u64)node->cookie);
3936  				binder_put_node(node);
3937  				break;
3938  			}
3939  			binder_node_inner_lock(node);
3940  			if (cmd == BC_ACQUIRE_DONE) {
3941  				if (node->pending_strong_ref == 0) {
3942  					binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3943  						proc->pid, thread->pid,
3944  						node->debug_id);
3945  					binder_node_inner_unlock(node);
3946  					binder_put_node(node);
3947  					break;
3948  				}
3949  				node->pending_strong_ref = 0;
3950  			} else {
3951  				if (node->pending_weak_ref == 0) {
3952  					binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3953  						proc->pid, thread->pid,
3954  						node->debug_id);
3955  					binder_node_inner_unlock(node);
3956  					binder_put_node(node);
3957  					break;
3958  				}
3959  				node->pending_weak_ref = 0;
3960  			}
3961  			free_node = binder_dec_node_nilocked(node,
3962  					cmd == BC_ACQUIRE_DONE, 0);
3963  			WARN_ON(free_node);
3964  			binder_debug(BINDER_DEBUG_USER_REFS,
3965  				     "%d:%d %s node %d ls %d lw %d tr %d\n",
3966  				     proc->pid, thread->pid,
3967  				     cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3968  				     node->debug_id, node->local_strong_refs,
3969  				     node->local_weak_refs, node->tmp_refs);
3970  			binder_node_inner_unlock(node);
3971  			binder_put_node(node);
3972  			break;
3973  		}
3974  		case BC_ATTEMPT_ACQUIRE:
3975  			pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3976  			return -EINVAL;
3977  		case BC_ACQUIRE_RESULT:
3978  			pr_err("BC_ACQUIRE_RESULT not supported\n");
3979  			return -EINVAL;
3980  
3981  		case BC_FREE_BUFFER: {
3982  			binder_uintptr_t data_ptr;
3983  			struct binder_buffer *buffer;
3984  
3985  			if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3986  				return -EFAULT;
3987  			ptr += sizeof(binder_uintptr_t);
3988  
3989  			buffer = binder_alloc_prepare_to_free(&proc->alloc,
3990  							      data_ptr);
3991  			if (IS_ERR_OR_NULL(buffer)) {
3992  				if (PTR_ERR(buffer) == -EPERM) {
3993  					binder_user_error(
3994  						"%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3995  						proc->pid, thread->pid,
3996  						(u64)data_ptr);
3997  				} else {
3998  					binder_user_error(
3999  						"%d:%d BC_FREE_BUFFER u%016llx no match\n",
4000  						proc->pid, thread->pid,
4001  						(u64)data_ptr);
4002  				}
4003  				break;
4004  			}
4005  			binder_debug(BINDER_DEBUG_FREE_BUFFER,
4006  				     "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
4007  				     proc->pid, thread->pid, (u64)data_ptr,
4008  				     buffer->debug_id,
4009  				     buffer->transaction ? "active" : "finished");
4010  			binder_free_buf(proc, thread, buffer, false);
4011  			break;
4012  		}
4013  
4014  		case BC_TRANSACTION_SG:
4015  		case BC_REPLY_SG: {
4016  			struct binder_transaction_data_sg tr;
4017  
4018  			if (copy_from_user(&tr, ptr, sizeof(tr)))
4019  				return -EFAULT;
4020  			ptr += sizeof(tr);
4021  			binder_transaction(proc, thread, &tr.transaction_data,
4022  					   cmd == BC_REPLY_SG, tr.buffers_size);
4023  			break;
4024  		}
4025  		case BC_TRANSACTION:
4026  		case BC_REPLY: {
4027  			struct binder_transaction_data tr;
4028  
4029  			if (copy_from_user(&tr, ptr, sizeof(tr)))
4030  				return -EFAULT;
4031  			ptr += sizeof(tr);
4032  			binder_transaction(proc, thread, &tr,
4033  					   cmd == BC_REPLY, 0);
4034  			break;
4035  		}
4036  
4037  		case BC_REGISTER_LOOPER:
4038  			binder_debug(BINDER_DEBUG_THREADS,
4039  				     "%d:%d BC_REGISTER_LOOPER\n",
4040  				     proc->pid, thread->pid);
4041  			binder_inner_proc_lock(proc);
4042  			if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
4043  				thread->looper |= BINDER_LOOPER_STATE_INVALID;
4044  				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
4045  					proc->pid, thread->pid);
4046  			} else if (proc->requested_threads == 0) {
4047  				thread->looper |= BINDER_LOOPER_STATE_INVALID;
4048  				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
4049  					proc->pid, thread->pid);
4050  			} else {
4051  				proc->requested_threads--;
4052  				proc->requested_threads_started++;
4053  			}
4054  			thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
4055  			binder_inner_proc_unlock(proc);
4056  			break;
4057  		case BC_ENTER_LOOPER:
4058  			binder_debug(BINDER_DEBUG_THREADS,
4059  				     "%d:%d BC_ENTER_LOOPER\n",
4060  				     proc->pid, thread->pid);
4061  			if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
4062  				thread->looper |= BINDER_LOOPER_STATE_INVALID;
4063  				binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
4064  					proc->pid, thread->pid);
4065  			}
4066  			thread->looper |= BINDER_LOOPER_STATE_ENTERED;
4067  			break;
4068  		case BC_EXIT_LOOPER:
4069  			binder_debug(BINDER_DEBUG_THREADS,
4070  				     "%d:%d BC_EXIT_LOOPER\n",
4071  				     proc->pid, thread->pid);
4072  			thread->looper |= BINDER_LOOPER_STATE_EXITED;
4073  			break;
4074  
4075  		case BC_REQUEST_DEATH_NOTIFICATION:
4076  		case BC_CLEAR_DEATH_NOTIFICATION: {
4077  			uint32_t target;
4078  			binder_uintptr_t cookie;
4079  			struct binder_ref *ref;
4080  			struct binder_ref_death *death = NULL;
4081  
4082  			if (get_user(target, (uint32_t __user *)ptr))
4083  				return -EFAULT;
4084  			ptr += sizeof(uint32_t);
4085  			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4086  				return -EFAULT;
4087  			ptr += sizeof(binder_uintptr_t);
4088  			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4089  				/*
4090  				 * Allocate memory for death notification
4091  				 * before taking lock
4092  				 */
4093  				death = kzalloc(sizeof(*death), GFP_KERNEL);
4094  				if (death == NULL) {
4095  					WARN_ON(thread->return_error.cmd !=
4096  						BR_OK);
4097  					thread->return_error.cmd = BR_ERROR;
4098  					binder_enqueue_thread_work(
4099  						thread,
4100  						&thread->return_error.work);
4101  					binder_debug(
4102  						BINDER_DEBUG_FAILED_TRANSACTION,
4103  						"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
4104  						proc->pid, thread->pid);
4105  					break;
4106  				}
4107  			}
4108  			binder_proc_lock(proc);
4109  			ref = binder_get_ref_olocked(proc, target, false);
4110  			if (ref == NULL) {
4111  				binder_user_error("%d:%d %s invalid ref %d\n",
4112  					proc->pid, thread->pid,
4113  					cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4114  					"BC_REQUEST_DEATH_NOTIFICATION" :
4115  					"BC_CLEAR_DEATH_NOTIFICATION",
4116  					target);
4117  				binder_proc_unlock(proc);
4118  				kfree(death);
4119  				break;
4120  			}
4121  
4122  			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4123  				     "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
4124  				     proc->pid, thread->pid,
4125  				     cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4126  				     "BC_REQUEST_DEATH_NOTIFICATION" :
4127  				     "BC_CLEAR_DEATH_NOTIFICATION",
4128  				     (u64)cookie, ref->data.debug_id,
4129  				     ref->data.desc, ref->data.strong,
4130  				     ref->data.weak, ref->node->debug_id);
4131  
4132  			binder_node_lock(ref->node);
4133  			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4134  				if (ref->death) {
4135  					binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
4136  						proc->pid, thread->pid);
4137  					binder_node_unlock(ref->node);
4138  					binder_proc_unlock(proc);
4139  					kfree(death);
4140  					break;
4141  				}
4142  				binder_stats_created(BINDER_STAT_DEATH);
4143  				INIT_LIST_HEAD(&death->work.entry);
4144  				death->cookie = cookie;
4145  				ref->death = death;
4146  				if (ref->node->proc == NULL) {
4147  					ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4148  
4149  					binder_inner_proc_lock(proc);
4150  					binder_enqueue_work_ilocked(
4151  						&ref->death->work, &proc->todo);
4152  					binder_wakeup_proc_ilocked(proc);
4153  					binder_inner_proc_unlock(proc);
4154  				}
4155  			} else {
4156  				if (ref->death == NULL) {
4157  					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
4158  						proc->pid, thread->pid);
4159  					binder_node_unlock(ref->node);
4160  					binder_proc_unlock(proc);
4161  					break;
4162  				}
4163  				death = ref->death;
4164  				if (death->cookie != cookie) {
4165  					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
4166  						proc->pid, thread->pid,
4167  						(u64)death->cookie,
4168  						(u64)cookie);
4169  					binder_node_unlock(ref->node);
4170  					binder_proc_unlock(proc);
4171  					break;
4172  				}
4173  				ref->death = NULL;
4174  				binder_inner_proc_lock(proc);
4175  				if (list_empty(&death->work.entry)) {
4176  					death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4177  					if (thread->looper &
4178  					    (BINDER_LOOPER_STATE_REGISTERED |
4179  					     BINDER_LOOPER_STATE_ENTERED))
4180  						binder_enqueue_thread_work_ilocked(
4181  								thread,
4182  								&death->work);
4183  					else {
4184  						binder_enqueue_work_ilocked(
4185  								&death->work,
4186  								&proc->todo);
4187  						binder_wakeup_proc_ilocked(
4188  								proc);
4189  					}
4190  				} else {
4191  					BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
4192  					death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
4193  				}
4194  				binder_inner_proc_unlock(proc);
4195  			}
4196  			binder_node_unlock(ref->node);
4197  			binder_proc_unlock(proc);
4198  		} break;
4199  		case BC_DEAD_BINDER_DONE: {
4200  			struct binder_work *w;
4201  			binder_uintptr_t cookie;
4202  			struct binder_ref_death *death = NULL;
4203  
4204  			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4205  				return -EFAULT;
4206  
4207  			ptr += sizeof(cookie);
4208  			binder_inner_proc_lock(proc);
4209  			list_for_each_entry(w, &proc->delivered_death,
4210  					    entry) {
4211  				struct binder_ref_death *tmp_death =
4212  					container_of(w,
4213  						     struct binder_ref_death,
4214  						     work);
4215  
4216  				if (tmp_death->cookie == cookie) {
4217  					death = tmp_death;
4218  					break;
4219  				}
4220  			}
4221  			binder_debug(BINDER_DEBUG_DEAD_BINDER,
4222  				     "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4223  				     proc->pid, thread->pid, (u64)cookie,
4224  				     death);
4225  			if (death == NULL) {
4226  				binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4227  					proc->pid, thread->pid, (u64)cookie);
4228  				binder_inner_proc_unlock(proc);
4229  				break;
4230  			}
4231  			binder_dequeue_work_ilocked(&death->work);
4232  			if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4233  				death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4234  				if (thread->looper &
4235  					(BINDER_LOOPER_STATE_REGISTERED |
4236  					 BINDER_LOOPER_STATE_ENTERED))
4237  					binder_enqueue_thread_work_ilocked(
4238  						thread, &death->work);
4239  				else {
4240  					binder_enqueue_work_ilocked(
4241  							&death->work,
4242  							&proc->todo);
4243  					binder_wakeup_proc_ilocked(proc);
4244  				}
4245  			}
4246  			binder_inner_proc_unlock(proc);
4247  		} break;
4248  
4249  		default:
4250  			pr_err("%d:%d unknown command %u\n",
4251  			       proc->pid, thread->pid, cmd);
4252  			return -EINVAL;
4253  		}
4254  		*consumed = ptr - buffer;
4255  	}
4256  	return 0;
4257  }
4258  
binder_stat_br(struct binder_proc * proc,struct binder_thread * thread,uint32_t cmd)4259  static void binder_stat_br(struct binder_proc *proc,
4260  			   struct binder_thread *thread, uint32_t cmd)
4261  {
4262  	trace_binder_return(cmd);
4263  	if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4264  		atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4265  		atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4266  		atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4267  	}
4268  }
4269  
binder_put_node_cmd(struct binder_proc * proc,struct binder_thread * thread,void __user ** ptrp,binder_uintptr_t node_ptr,binder_uintptr_t node_cookie,int node_debug_id,uint32_t cmd,const char * cmd_name)4270  static int binder_put_node_cmd(struct binder_proc *proc,
4271  			       struct binder_thread *thread,
4272  			       void __user **ptrp,
4273  			       binder_uintptr_t node_ptr,
4274  			       binder_uintptr_t node_cookie,
4275  			       int node_debug_id,
4276  			       uint32_t cmd, const char *cmd_name)
4277  {
4278  	void __user *ptr = *ptrp;
4279  
4280  	if (put_user(cmd, (uint32_t __user *)ptr))
4281  		return -EFAULT;
4282  	ptr += sizeof(uint32_t);
4283  
4284  	if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4285  		return -EFAULT;
4286  	ptr += sizeof(binder_uintptr_t);
4287  
4288  	if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4289  		return -EFAULT;
4290  	ptr += sizeof(binder_uintptr_t);
4291  
4292  	binder_stat_br(proc, thread, cmd);
4293  	binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4294  		     proc->pid, thread->pid, cmd_name, node_debug_id,
4295  		     (u64)node_ptr, (u64)node_cookie);
4296  
4297  	*ptrp = ptr;
4298  	return 0;
4299  }
4300  
binder_wait_for_work(struct binder_thread * thread,bool do_proc_work)4301  static int binder_wait_for_work(struct binder_thread *thread,
4302  				bool do_proc_work)
4303  {
4304  	DEFINE_WAIT(wait);
4305  	struct binder_proc *proc = thread->proc;
4306  	int ret = 0;
4307  
4308  	binder_inner_proc_lock(proc);
4309  	for (;;) {
4310  		prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE);
4311  		if (binder_has_work_ilocked(thread, do_proc_work))
4312  			break;
4313  		if (do_proc_work)
4314  			list_add(&thread->waiting_thread_node,
4315  				 &proc->waiting_threads);
4316  		binder_inner_proc_unlock(proc);
4317  		schedule();
4318  		binder_inner_proc_lock(proc);
4319  		list_del_init(&thread->waiting_thread_node);
4320  		if (signal_pending(current)) {
4321  			ret = -EINTR;
4322  			break;
4323  		}
4324  	}
4325  	finish_wait(&thread->wait, &wait);
4326  	binder_inner_proc_unlock(proc);
4327  
4328  	return ret;
4329  }
4330  
4331  /**
4332   * binder_apply_fd_fixups() - finish fd translation
4333   * @proc:         binder_proc associated @t->buffer
4334   * @t:	binder transaction with list of fd fixups
4335   *
4336   * Now that we are in the context of the transaction target
4337   * process, we can allocate and install fds. Process the
4338   * list of fds to translate and fixup the buffer with the
4339   * new fds first and only then install the files.
4340   *
4341   * If we fail to allocate an fd, skip the install and release
4342   * any fds that have already been allocated.
4343   */
binder_apply_fd_fixups(struct binder_proc * proc,struct binder_transaction * t)4344  static int binder_apply_fd_fixups(struct binder_proc *proc,
4345  				  struct binder_transaction *t)
4346  {
4347  	struct binder_txn_fd_fixup *fixup, *tmp;
4348  	int ret = 0;
4349  
4350  	list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4351  		int fd = get_unused_fd_flags(O_CLOEXEC);
4352  
4353  		if (fd < 0) {
4354  			binder_debug(BINDER_DEBUG_TRANSACTION,
4355  				     "failed fd fixup txn %d fd %d\n",
4356  				     t->debug_id, fd);
4357  			ret = -ENOMEM;
4358  			goto err;
4359  		}
4360  		binder_debug(BINDER_DEBUG_TRANSACTION,
4361  			     "fd fixup txn %d fd %d\n",
4362  			     t->debug_id, fd);
4363  		trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4364  		fixup->target_fd = fd;
4365  		if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4366  						fixup->offset, &fd,
4367  						sizeof(u32))) {
4368  			ret = -EINVAL;
4369  			goto err;
4370  		}
4371  	}
4372  	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4373  		fd_install(fixup->target_fd, fixup->file);
4374  		list_del(&fixup->fixup_entry);
4375  		kfree(fixup);
4376  	}
4377  
4378  	return ret;
4379  
4380  err:
4381  	binder_free_txn_fixups(t);
4382  	return ret;
4383  }
4384  
binder_thread_read(struct binder_proc * proc,struct binder_thread * thread,binder_uintptr_t binder_buffer,size_t size,binder_size_t * consumed,int non_block)4385  static int binder_thread_read(struct binder_proc *proc,
4386  			      struct binder_thread *thread,
4387  			      binder_uintptr_t binder_buffer, size_t size,
4388  			      binder_size_t *consumed, int non_block)
4389  {
4390  	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4391  	void __user *ptr = buffer + *consumed;
4392  	void __user *end = buffer + size;
4393  
4394  	int ret = 0;
4395  	int wait_for_proc_work;
4396  
4397  	if (*consumed == 0) {
4398  		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4399  			return -EFAULT;
4400  		ptr += sizeof(uint32_t);
4401  	}
4402  
4403  retry:
4404  	binder_inner_proc_lock(proc);
4405  	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4406  	binder_inner_proc_unlock(proc);
4407  
4408  	thread->looper |= BINDER_LOOPER_STATE_WAITING;
4409  
4410  	trace_binder_wait_for_work(wait_for_proc_work,
4411  				   !!thread->transaction_stack,
4412  				   !binder_worklist_empty(proc, &thread->todo));
4413  	if (wait_for_proc_work) {
4414  		if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4415  					BINDER_LOOPER_STATE_ENTERED))) {
4416  			binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4417  				proc->pid, thread->pid, thread->looper);
4418  			wait_event_interruptible(binder_user_error_wait,
4419  						 binder_stop_on_user_error < 2);
4420  		}
4421  		binder_set_nice(proc->default_priority);
4422  	}
4423  
4424  	if (non_block) {
4425  		if (!binder_has_work(thread, wait_for_proc_work))
4426  			ret = -EAGAIN;
4427  	} else {
4428  		ret = binder_wait_for_work(thread, wait_for_proc_work);
4429  	}
4430  
4431  	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4432  
4433  	if (ret)
4434  		return ret;
4435  
4436  	while (1) {
4437  		uint32_t cmd;
4438  		struct binder_transaction_data_secctx tr;
4439  		struct binder_transaction_data *trd = &tr.transaction_data;
4440  		struct binder_work *w = NULL;
4441  		struct list_head *list = NULL;
4442  		struct binder_transaction *t = NULL;
4443  		struct binder_thread *t_from;
4444  		size_t trsize = sizeof(*trd);
4445  
4446  		binder_inner_proc_lock(proc);
4447  		if (!binder_worklist_empty_ilocked(&thread->todo))
4448  			list = &thread->todo;
4449  		else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4450  			   wait_for_proc_work)
4451  			list = &proc->todo;
4452  		else {
4453  			binder_inner_proc_unlock(proc);
4454  
4455  			/* no data added */
4456  			if (ptr - buffer == 4 && !thread->looper_need_return)
4457  				goto retry;
4458  			break;
4459  		}
4460  
4461  		if (end - ptr < sizeof(tr) + 4) {
4462  			binder_inner_proc_unlock(proc);
4463  			break;
4464  		}
4465  		w = binder_dequeue_work_head_ilocked(list);
4466  		if (binder_worklist_empty_ilocked(&thread->todo))
4467  			thread->process_todo = false;
4468  
4469  		switch (w->type) {
4470  		case BINDER_WORK_TRANSACTION: {
4471  			binder_inner_proc_unlock(proc);
4472  			t = container_of(w, struct binder_transaction, work);
4473  		} break;
4474  		case BINDER_WORK_RETURN_ERROR: {
4475  			struct binder_error *e = container_of(
4476  					w, struct binder_error, work);
4477  
4478  			WARN_ON(e->cmd == BR_OK);
4479  			binder_inner_proc_unlock(proc);
4480  			if (put_user(e->cmd, (uint32_t __user *)ptr))
4481  				return -EFAULT;
4482  			cmd = e->cmd;
4483  			e->cmd = BR_OK;
4484  			ptr += sizeof(uint32_t);
4485  
4486  			binder_stat_br(proc, thread, cmd);
4487  		} break;
4488  		case BINDER_WORK_TRANSACTION_COMPLETE:
4489  		case BINDER_WORK_TRANSACTION_PENDING:
4490  		case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
4491  			if (proc->oneway_spam_detection_enabled &&
4492  				   w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
4493  				cmd = BR_ONEWAY_SPAM_SUSPECT;
4494  			else if (w->type == BINDER_WORK_TRANSACTION_PENDING)
4495  				cmd = BR_TRANSACTION_PENDING_FROZEN;
4496  			else
4497  				cmd = BR_TRANSACTION_COMPLETE;
4498  			binder_inner_proc_unlock(proc);
4499  			kfree(w);
4500  			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4501  			if (put_user(cmd, (uint32_t __user *)ptr))
4502  				return -EFAULT;
4503  			ptr += sizeof(uint32_t);
4504  
4505  			binder_stat_br(proc, thread, cmd);
4506  			binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4507  				     "%d:%d BR_TRANSACTION_COMPLETE\n",
4508  				     proc->pid, thread->pid);
4509  		} break;
4510  		case BINDER_WORK_NODE: {
4511  			struct binder_node *node = container_of(w, struct binder_node, work);
4512  			int strong, weak;
4513  			binder_uintptr_t node_ptr = node->ptr;
4514  			binder_uintptr_t node_cookie = node->cookie;
4515  			int node_debug_id = node->debug_id;
4516  			int has_weak_ref;
4517  			int has_strong_ref;
4518  			void __user *orig_ptr = ptr;
4519  
4520  			BUG_ON(proc != node->proc);
4521  			strong = node->internal_strong_refs ||
4522  					node->local_strong_refs;
4523  			weak = !hlist_empty(&node->refs) ||
4524  					node->local_weak_refs ||
4525  					node->tmp_refs || strong;
4526  			has_strong_ref = node->has_strong_ref;
4527  			has_weak_ref = node->has_weak_ref;
4528  
4529  			if (weak && !has_weak_ref) {
4530  				node->has_weak_ref = 1;
4531  				node->pending_weak_ref = 1;
4532  				node->local_weak_refs++;
4533  			}
4534  			if (strong && !has_strong_ref) {
4535  				node->has_strong_ref = 1;
4536  				node->pending_strong_ref = 1;
4537  				node->local_strong_refs++;
4538  			}
4539  			if (!strong && has_strong_ref)
4540  				node->has_strong_ref = 0;
4541  			if (!weak && has_weak_ref)
4542  				node->has_weak_ref = 0;
4543  			if (!weak && !strong) {
4544  				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4545  					     "%d:%d node %d u%016llx c%016llx deleted\n",
4546  					     proc->pid, thread->pid,
4547  					     node_debug_id,
4548  					     (u64)node_ptr,
4549  					     (u64)node_cookie);
4550  				rb_erase(&node->rb_node, &proc->nodes);
4551  				binder_inner_proc_unlock(proc);
4552  				binder_node_lock(node);
4553  				/*
4554  				 * Acquire the node lock before freeing the
4555  				 * node to serialize with other threads that
4556  				 * may have been holding the node lock while
4557  				 * decrementing this node (avoids race where
4558  				 * this thread frees while the other thread
4559  				 * is unlocking the node after the final
4560  				 * decrement)
4561  				 */
4562  				binder_node_unlock(node);
4563  				binder_free_node(node);
4564  			} else
4565  				binder_inner_proc_unlock(proc);
4566  
4567  			if (weak && !has_weak_ref)
4568  				ret = binder_put_node_cmd(
4569  						proc, thread, &ptr, node_ptr,
4570  						node_cookie, node_debug_id,
4571  						BR_INCREFS, "BR_INCREFS");
4572  			if (!ret && strong && !has_strong_ref)
4573  				ret = binder_put_node_cmd(
4574  						proc, thread, &ptr, node_ptr,
4575  						node_cookie, node_debug_id,
4576  						BR_ACQUIRE, "BR_ACQUIRE");
4577  			if (!ret && !strong && has_strong_ref)
4578  				ret = binder_put_node_cmd(
4579  						proc, thread, &ptr, node_ptr,
4580  						node_cookie, node_debug_id,
4581  						BR_RELEASE, "BR_RELEASE");
4582  			if (!ret && !weak && has_weak_ref)
4583  				ret = binder_put_node_cmd(
4584  						proc, thread, &ptr, node_ptr,
4585  						node_cookie, node_debug_id,
4586  						BR_DECREFS, "BR_DECREFS");
4587  			if (orig_ptr == ptr)
4588  				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4589  					     "%d:%d node %d u%016llx c%016llx state unchanged\n",
4590  					     proc->pid, thread->pid,
4591  					     node_debug_id,
4592  					     (u64)node_ptr,
4593  					     (u64)node_cookie);
4594  			if (ret)
4595  				return ret;
4596  		} break;
4597  		case BINDER_WORK_DEAD_BINDER:
4598  		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4599  		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4600  			struct binder_ref_death *death;
4601  			uint32_t cmd;
4602  			binder_uintptr_t cookie;
4603  
4604  			death = container_of(w, struct binder_ref_death, work);
4605  			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4606  				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4607  			else
4608  				cmd = BR_DEAD_BINDER;
4609  			cookie = death->cookie;
4610  
4611  			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4612  				     "%d:%d %s %016llx\n",
4613  				      proc->pid, thread->pid,
4614  				      cmd == BR_DEAD_BINDER ?
4615  				      "BR_DEAD_BINDER" :
4616  				      "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4617  				      (u64)cookie);
4618  			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4619  				binder_inner_proc_unlock(proc);
4620  				kfree(death);
4621  				binder_stats_deleted(BINDER_STAT_DEATH);
4622  			} else {
4623  				binder_enqueue_work_ilocked(
4624  						w, &proc->delivered_death);
4625  				binder_inner_proc_unlock(proc);
4626  			}
4627  			if (put_user(cmd, (uint32_t __user *)ptr))
4628  				return -EFAULT;
4629  			ptr += sizeof(uint32_t);
4630  			if (put_user(cookie,
4631  				     (binder_uintptr_t __user *)ptr))
4632  				return -EFAULT;
4633  			ptr += sizeof(binder_uintptr_t);
4634  			binder_stat_br(proc, thread, cmd);
4635  			if (cmd == BR_DEAD_BINDER)
4636  				goto done; /* DEAD_BINDER notifications can cause transactions */
4637  		} break;
4638  		default:
4639  			binder_inner_proc_unlock(proc);
4640  			pr_err("%d:%d: bad work type %d\n",
4641  			       proc->pid, thread->pid, w->type);
4642  			break;
4643  		}
4644  
4645  		if (!t)
4646  			continue;
4647  
4648  		BUG_ON(t->buffer == NULL);
4649  		if (t->buffer->target_node) {
4650  			struct binder_node *target_node = t->buffer->target_node;
4651  
4652  			trd->target.ptr = target_node->ptr;
4653  			trd->cookie =  target_node->cookie;
4654  			t->saved_priority = task_nice(current);
4655  			if (t->priority < target_node->min_priority &&
4656  			    !(t->flags & TF_ONE_WAY))
4657  				binder_set_nice(t->priority);
4658  			else if (!(t->flags & TF_ONE_WAY) ||
4659  				 t->saved_priority > target_node->min_priority)
4660  				binder_set_nice(target_node->min_priority);
4661  			cmd = BR_TRANSACTION;
4662  		} else {
4663  			trd->target.ptr = 0;
4664  			trd->cookie = 0;
4665  			cmd = BR_REPLY;
4666  		}
4667  		trd->code = t->code;
4668  		trd->flags = t->flags;
4669  		trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4670  
4671  		t_from = binder_get_txn_from(t);
4672  		if (t_from) {
4673  			struct task_struct *sender = t_from->proc->tsk;
4674  
4675  			trd->sender_pid =
4676  				task_tgid_nr_ns(sender,
4677  						task_active_pid_ns(current));
4678  		} else {
4679  			trd->sender_pid = 0;
4680  		}
4681  
4682  		ret = binder_apply_fd_fixups(proc, t);
4683  		if (ret) {
4684  			struct binder_buffer *buffer = t->buffer;
4685  			bool oneway = !!(t->flags & TF_ONE_WAY);
4686  			int tid = t->debug_id;
4687  
4688  			if (t_from)
4689  				binder_thread_dec_tmpref(t_from);
4690  			buffer->transaction = NULL;
4691  			binder_cleanup_transaction(t, "fd fixups failed",
4692  						   BR_FAILED_REPLY);
4693  			binder_free_buf(proc, thread, buffer, true);
4694  			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4695  				     "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4696  				     proc->pid, thread->pid,
4697  				     oneway ? "async " :
4698  					(cmd == BR_REPLY ? "reply " : ""),
4699  				     tid, BR_FAILED_REPLY, ret, __LINE__);
4700  			if (cmd == BR_REPLY) {
4701  				cmd = BR_FAILED_REPLY;
4702  				if (put_user(cmd, (uint32_t __user *)ptr))
4703  					return -EFAULT;
4704  				ptr += sizeof(uint32_t);
4705  				binder_stat_br(proc, thread, cmd);
4706  				break;
4707  			}
4708  			continue;
4709  		}
4710  		trd->data_size = t->buffer->data_size;
4711  		trd->offsets_size = t->buffer->offsets_size;
4712  		trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
4713  		trd->data.ptr.offsets = trd->data.ptr.buffer +
4714  					ALIGN(t->buffer->data_size,
4715  					    sizeof(void *));
4716  
4717  		tr.secctx = t->security_ctx;
4718  		if (t->security_ctx) {
4719  			cmd = BR_TRANSACTION_SEC_CTX;
4720  			trsize = sizeof(tr);
4721  		}
4722  		if (put_user(cmd, (uint32_t __user *)ptr)) {
4723  			if (t_from)
4724  				binder_thread_dec_tmpref(t_from);
4725  
4726  			binder_cleanup_transaction(t, "put_user failed",
4727  						   BR_FAILED_REPLY);
4728  
4729  			return -EFAULT;
4730  		}
4731  		ptr += sizeof(uint32_t);
4732  		if (copy_to_user(ptr, &tr, trsize)) {
4733  			if (t_from)
4734  				binder_thread_dec_tmpref(t_from);
4735  
4736  			binder_cleanup_transaction(t, "copy_to_user failed",
4737  						   BR_FAILED_REPLY);
4738  
4739  			return -EFAULT;
4740  		}
4741  		ptr += trsize;
4742  
4743  		trace_binder_transaction_received(t);
4744  		binder_stat_br(proc, thread, cmd);
4745  		binder_debug(BINDER_DEBUG_TRANSACTION,
4746  			     "%d:%d %s %d %d:%d, cmd %u size %zd-%zd ptr %016llx-%016llx\n",
4747  			     proc->pid, thread->pid,
4748  			     (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4749  				(cmd == BR_TRANSACTION_SEC_CTX) ?
4750  				     "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4751  			     t->debug_id, t_from ? t_from->proc->pid : 0,
4752  			     t_from ? t_from->pid : 0, cmd,
4753  			     t->buffer->data_size, t->buffer->offsets_size,
4754  			     (u64)trd->data.ptr.buffer,
4755  			     (u64)trd->data.ptr.offsets);
4756  
4757  		if (t_from)
4758  			binder_thread_dec_tmpref(t_from);
4759  		t->buffer->allow_user_free = 1;
4760  		if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4761  			binder_inner_proc_lock(thread->proc);
4762  			t->to_parent = thread->transaction_stack;
4763  			t->to_thread = thread;
4764  			thread->transaction_stack = t;
4765  			binder_inner_proc_unlock(thread->proc);
4766  		} else {
4767  			binder_free_transaction(t);
4768  		}
4769  		break;
4770  	}
4771  
4772  done:
4773  
4774  	*consumed = ptr - buffer;
4775  	binder_inner_proc_lock(proc);
4776  	if (proc->requested_threads == 0 &&
4777  	    list_empty(&thread->proc->waiting_threads) &&
4778  	    proc->requested_threads_started < proc->max_threads &&
4779  	    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4780  	     BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4781  	     /*spawn a new thread if we leave this out */) {
4782  		proc->requested_threads++;
4783  		binder_inner_proc_unlock(proc);
4784  		binder_debug(BINDER_DEBUG_THREADS,
4785  			     "%d:%d BR_SPAWN_LOOPER\n",
4786  			     proc->pid, thread->pid);
4787  		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4788  			return -EFAULT;
4789  		binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4790  	} else
4791  		binder_inner_proc_unlock(proc);
4792  	return 0;
4793  }
4794  
binder_release_work(struct binder_proc * proc,struct list_head * list)4795  static void binder_release_work(struct binder_proc *proc,
4796  				struct list_head *list)
4797  {
4798  	struct binder_work *w;
4799  	enum binder_work_type wtype;
4800  
4801  	while (1) {
4802  		binder_inner_proc_lock(proc);
4803  		w = binder_dequeue_work_head_ilocked(list);
4804  		wtype = w ? w->type : 0;
4805  		binder_inner_proc_unlock(proc);
4806  		if (!w)
4807  			return;
4808  
4809  		switch (wtype) {
4810  		case BINDER_WORK_TRANSACTION: {
4811  			struct binder_transaction *t;
4812  
4813  			t = container_of(w, struct binder_transaction, work);
4814  
4815  			binder_cleanup_transaction(t, "process died.",
4816  						   BR_DEAD_REPLY);
4817  		} break;
4818  		case BINDER_WORK_RETURN_ERROR: {
4819  			struct binder_error *e = container_of(
4820  					w, struct binder_error, work);
4821  
4822  			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4823  				"undelivered TRANSACTION_ERROR: %u\n",
4824  				e->cmd);
4825  		} break;
4826  		case BINDER_WORK_TRANSACTION_PENDING:
4827  		case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT:
4828  		case BINDER_WORK_TRANSACTION_COMPLETE: {
4829  			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4830  				"undelivered TRANSACTION_COMPLETE\n");
4831  			kfree(w);
4832  			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4833  		} break;
4834  		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4835  		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4836  			struct binder_ref_death *death;
4837  
4838  			death = container_of(w, struct binder_ref_death, work);
4839  			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4840  				"undelivered death notification, %016llx\n",
4841  				(u64)death->cookie);
4842  			kfree(death);
4843  			binder_stats_deleted(BINDER_STAT_DEATH);
4844  		} break;
4845  		case BINDER_WORK_NODE:
4846  			break;
4847  		default:
4848  			pr_err("unexpected work type, %d, not freed\n",
4849  			       wtype);
4850  			break;
4851  		}
4852  	}
4853  
4854  }
4855  
binder_get_thread_ilocked(struct binder_proc * proc,struct binder_thread * new_thread)4856  static struct binder_thread *binder_get_thread_ilocked(
4857  		struct binder_proc *proc, struct binder_thread *new_thread)
4858  {
4859  	struct binder_thread *thread = NULL;
4860  	struct rb_node *parent = NULL;
4861  	struct rb_node **p = &proc->threads.rb_node;
4862  
4863  	while (*p) {
4864  		parent = *p;
4865  		thread = rb_entry(parent, struct binder_thread, rb_node);
4866  
4867  		if (current->pid < thread->pid)
4868  			p = &(*p)->rb_left;
4869  		else if (current->pid > thread->pid)
4870  			p = &(*p)->rb_right;
4871  		else
4872  			return thread;
4873  	}
4874  	if (!new_thread)
4875  		return NULL;
4876  	thread = new_thread;
4877  	binder_stats_created(BINDER_STAT_THREAD);
4878  	thread->proc = proc;
4879  	thread->pid = current->pid;
4880  	atomic_set(&thread->tmp_ref, 0);
4881  	init_waitqueue_head(&thread->wait);
4882  	INIT_LIST_HEAD(&thread->todo);
4883  	rb_link_node(&thread->rb_node, parent, p);
4884  	rb_insert_color(&thread->rb_node, &proc->threads);
4885  	thread->looper_need_return = true;
4886  	thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4887  	thread->return_error.cmd = BR_OK;
4888  	thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4889  	thread->reply_error.cmd = BR_OK;
4890  	thread->ee.command = BR_OK;
4891  	INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4892  	return thread;
4893  }
4894  
binder_get_thread(struct binder_proc * proc)4895  static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4896  {
4897  	struct binder_thread *thread;
4898  	struct binder_thread *new_thread;
4899  
4900  	binder_inner_proc_lock(proc);
4901  	thread = binder_get_thread_ilocked(proc, NULL);
4902  	binder_inner_proc_unlock(proc);
4903  	if (!thread) {
4904  		new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4905  		if (new_thread == NULL)
4906  			return NULL;
4907  		binder_inner_proc_lock(proc);
4908  		thread = binder_get_thread_ilocked(proc, new_thread);
4909  		binder_inner_proc_unlock(proc);
4910  		if (thread != new_thread)
4911  			kfree(new_thread);
4912  	}
4913  	return thread;
4914  }
4915  
binder_free_proc(struct binder_proc * proc)4916  static void binder_free_proc(struct binder_proc *proc)
4917  {
4918  	struct binder_device *device;
4919  
4920  	BUG_ON(!list_empty(&proc->todo));
4921  	BUG_ON(!list_empty(&proc->delivered_death));
4922  	if (proc->outstanding_txns)
4923  		pr_warn("%s: Unexpected outstanding_txns %d\n",
4924  			__func__, proc->outstanding_txns);
4925  	device = container_of(proc->context, struct binder_device, context);
4926  	if (refcount_dec_and_test(&device->ref)) {
4927  		kfree(proc->context->name);
4928  		kfree(device);
4929  	}
4930  	binder_alloc_deferred_release(&proc->alloc);
4931  	put_task_struct(proc->tsk);
4932  	put_cred(proc->cred);
4933  	binder_stats_deleted(BINDER_STAT_PROC);
4934  	kfree(proc);
4935  }
4936  
binder_free_thread(struct binder_thread * thread)4937  static void binder_free_thread(struct binder_thread *thread)
4938  {
4939  	BUG_ON(!list_empty(&thread->todo));
4940  	binder_stats_deleted(BINDER_STAT_THREAD);
4941  	binder_proc_dec_tmpref(thread->proc);
4942  	kfree(thread);
4943  }
4944  
binder_thread_release(struct binder_proc * proc,struct binder_thread * thread)4945  static int binder_thread_release(struct binder_proc *proc,
4946  				 struct binder_thread *thread)
4947  {
4948  	struct binder_transaction *t;
4949  	struct binder_transaction *send_reply = NULL;
4950  	int active_transactions = 0;
4951  	struct binder_transaction *last_t = NULL;
4952  
4953  	binder_inner_proc_lock(thread->proc);
4954  	/*
4955  	 * take a ref on the proc so it survives
4956  	 * after we remove this thread from proc->threads.
4957  	 * The corresponding dec is when we actually
4958  	 * free the thread in binder_free_thread()
4959  	 */
4960  	proc->tmp_ref++;
4961  	/*
4962  	 * take a ref on this thread to ensure it
4963  	 * survives while we are releasing it
4964  	 */
4965  	atomic_inc(&thread->tmp_ref);
4966  	rb_erase(&thread->rb_node, &proc->threads);
4967  	t = thread->transaction_stack;
4968  	if (t) {
4969  		spin_lock(&t->lock);
4970  		if (t->to_thread == thread)
4971  			send_reply = t;
4972  	} else {
4973  		__acquire(&t->lock);
4974  	}
4975  	thread->is_dead = true;
4976  
4977  	while (t) {
4978  		last_t = t;
4979  		active_transactions++;
4980  		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4981  			     "release %d:%d transaction %d %s, still active\n",
4982  			      proc->pid, thread->pid,
4983  			     t->debug_id,
4984  			     (t->to_thread == thread) ? "in" : "out");
4985  
4986  		if (t->to_thread == thread) {
4987  			thread->proc->outstanding_txns--;
4988  			t->to_proc = NULL;
4989  			t->to_thread = NULL;
4990  			if (t->buffer) {
4991  				t->buffer->transaction = NULL;
4992  				t->buffer = NULL;
4993  			}
4994  			t = t->to_parent;
4995  		} else if (t->from == thread) {
4996  			t->from = NULL;
4997  			t = t->from_parent;
4998  		} else
4999  			BUG();
5000  		spin_unlock(&last_t->lock);
5001  		if (t)
5002  			spin_lock(&t->lock);
5003  		else
5004  			__acquire(&t->lock);
5005  	}
5006  	/* annotation for sparse, lock not acquired in last iteration above */
5007  	__release(&t->lock);
5008  
5009  	/*
5010  	 * If this thread used poll, make sure we remove the waitqueue from any
5011  	 * poll data structures holding it.
5012  	 */
5013  	if (thread->looper & BINDER_LOOPER_STATE_POLL)
5014  		wake_up_pollfree(&thread->wait);
5015  
5016  	binder_inner_proc_unlock(thread->proc);
5017  
5018  	/*
5019  	 * This is needed to avoid races between wake_up_pollfree() above and
5020  	 * someone else removing the last entry from the queue for other reasons
5021  	 * (e.g. ep_remove_wait_queue() being called due to an epoll file
5022  	 * descriptor being closed).  Such other users hold an RCU read lock, so
5023  	 * we can be sure they're done after we call synchronize_rcu().
5024  	 */
5025  	if (thread->looper & BINDER_LOOPER_STATE_POLL)
5026  		synchronize_rcu();
5027  
5028  	if (send_reply)
5029  		binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
5030  	binder_release_work(proc, &thread->todo);
5031  	binder_thread_dec_tmpref(thread);
5032  	return active_transactions;
5033  }
5034  
binder_poll(struct file * filp,struct poll_table_struct * wait)5035  static __poll_t binder_poll(struct file *filp,
5036  				struct poll_table_struct *wait)
5037  {
5038  	struct binder_proc *proc = filp->private_data;
5039  	struct binder_thread *thread = NULL;
5040  	bool wait_for_proc_work;
5041  
5042  	thread = binder_get_thread(proc);
5043  	if (!thread)
5044  		return EPOLLERR;
5045  
5046  	binder_inner_proc_lock(thread->proc);
5047  	thread->looper |= BINDER_LOOPER_STATE_POLL;
5048  	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
5049  
5050  	binder_inner_proc_unlock(thread->proc);
5051  
5052  	poll_wait(filp, &thread->wait, wait);
5053  
5054  	if (binder_has_work(thread, wait_for_proc_work))
5055  		return EPOLLIN;
5056  
5057  	return 0;
5058  }
5059  
binder_ioctl_write_read(struct file * filp,unsigned long arg,struct binder_thread * thread)5060  static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
5061  				struct binder_thread *thread)
5062  {
5063  	int ret = 0;
5064  	struct binder_proc *proc = filp->private_data;
5065  	void __user *ubuf = (void __user *)arg;
5066  	struct binder_write_read bwr;
5067  
5068  	if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
5069  		ret = -EFAULT;
5070  		goto out;
5071  	}
5072  	binder_debug(BINDER_DEBUG_READ_WRITE,
5073  		     "%d:%d write %lld at %016llx, read %lld at %016llx\n",
5074  		     proc->pid, thread->pid,
5075  		     (u64)bwr.write_size, (u64)bwr.write_buffer,
5076  		     (u64)bwr.read_size, (u64)bwr.read_buffer);
5077  
5078  	if (bwr.write_size > 0) {
5079  		ret = binder_thread_write(proc, thread,
5080  					  bwr.write_buffer,
5081  					  bwr.write_size,
5082  					  &bwr.write_consumed);
5083  		trace_binder_write_done(ret);
5084  		if (ret < 0) {
5085  			bwr.read_consumed = 0;
5086  			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5087  				ret = -EFAULT;
5088  			goto out;
5089  		}
5090  	}
5091  	if (bwr.read_size > 0) {
5092  		ret = binder_thread_read(proc, thread, bwr.read_buffer,
5093  					 bwr.read_size,
5094  					 &bwr.read_consumed,
5095  					 filp->f_flags & O_NONBLOCK);
5096  		trace_binder_read_done(ret);
5097  		binder_inner_proc_lock(proc);
5098  		if (!binder_worklist_empty_ilocked(&proc->todo))
5099  			binder_wakeup_proc_ilocked(proc);
5100  		binder_inner_proc_unlock(proc);
5101  		if (ret < 0) {
5102  			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5103  				ret = -EFAULT;
5104  			goto out;
5105  		}
5106  	}
5107  	binder_debug(BINDER_DEBUG_READ_WRITE,
5108  		     "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
5109  		     proc->pid, thread->pid,
5110  		     (u64)bwr.write_consumed, (u64)bwr.write_size,
5111  		     (u64)bwr.read_consumed, (u64)bwr.read_size);
5112  	if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
5113  		ret = -EFAULT;
5114  		goto out;
5115  	}
5116  out:
5117  	return ret;
5118  }
5119  
binder_ioctl_set_ctx_mgr(struct file * filp,struct flat_binder_object * fbo)5120  static int binder_ioctl_set_ctx_mgr(struct file *filp,
5121  				    struct flat_binder_object *fbo)
5122  {
5123  	int ret = 0;
5124  	struct binder_proc *proc = filp->private_data;
5125  	struct binder_context *context = proc->context;
5126  	struct binder_node *new_node;
5127  	kuid_t curr_euid = current_euid();
5128  
5129  	mutex_lock(&context->context_mgr_node_lock);
5130  	if (context->binder_context_mgr_node) {
5131  		pr_err("BINDER_SET_CONTEXT_MGR already set\n");
5132  		ret = -EBUSY;
5133  		goto out;
5134  	}
5135  	ret = security_binder_set_context_mgr(proc->cred);
5136  	if (ret < 0)
5137  		goto out;
5138  	if (uid_valid(context->binder_context_mgr_uid)) {
5139  		if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
5140  			pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
5141  			       from_kuid(&init_user_ns, curr_euid),
5142  			       from_kuid(&init_user_ns,
5143  					 context->binder_context_mgr_uid));
5144  			ret = -EPERM;
5145  			goto out;
5146  		}
5147  	} else {
5148  		context->binder_context_mgr_uid = curr_euid;
5149  	}
5150  	new_node = binder_new_node(proc, fbo);
5151  	if (!new_node) {
5152  		ret = -ENOMEM;
5153  		goto out;
5154  	}
5155  	binder_node_lock(new_node);
5156  	new_node->local_weak_refs++;
5157  	new_node->local_strong_refs++;
5158  	new_node->has_strong_ref = 1;
5159  	new_node->has_weak_ref = 1;
5160  	context->binder_context_mgr_node = new_node;
5161  	binder_node_unlock(new_node);
5162  	binder_put_node(new_node);
5163  out:
5164  	mutex_unlock(&context->context_mgr_node_lock);
5165  	return ret;
5166  }
5167  
binder_ioctl_get_node_info_for_ref(struct binder_proc * proc,struct binder_node_info_for_ref * info)5168  static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
5169  		struct binder_node_info_for_ref *info)
5170  {
5171  	struct binder_node *node;
5172  	struct binder_context *context = proc->context;
5173  	__u32 handle = info->handle;
5174  
5175  	if (info->strong_count || info->weak_count || info->reserved1 ||
5176  	    info->reserved2 || info->reserved3) {
5177  		binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
5178  				  proc->pid);
5179  		return -EINVAL;
5180  	}
5181  
5182  	/* This ioctl may only be used by the context manager */
5183  	mutex_lock(&context->context_mgr_node_lock);
5184  	if (!context->binder_context_mgr_node ||
5185  		context->binder_context_mgr_node->proc != proc) {
5186  		mutex_unlock(&context->context_mgr_node_lock);
5187  		return -EPERM;
5188  	}
5189  	mutex_unlock(&context->context_mgr_node_lock);
5190  
5191  	node = binder_get_node_from_ref(proc, handle, true, NULL);
5192  	if (!node)
5193  		return -EINVAL;
5194  
5195  	info->strong_count = node->local_strong_refs +
5196  		node->internal_strong_refs;
5197  	info->weak_count = node->local_weak_refs;
5198  
5199  	binder_put_node(node);
5200  
5201  	return 0;
5202  }
5203  
binder_ioctl_get_node_debug_info(struct binder_proc * proc,struct binder_node_debug_info * info)5204  static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
5205  				struct binder_node_debug_info *info)
5206  {
5207  	struct rb_node *n;
5208  	binder_uintptr_t ptr = info->ptr;
5209  
5210  	memset(info, 0, sizeof(*info));
5211  
5212  	binder_inner_proc_lock(proc);
5213  	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5214  		struct binder_node *node = rb_entry(n, struct binder_node,
5215  						    rb_node);
5216  		if (node->ptr > ptr) {
5217  			info->ptr = node->ptr;
5218  			info->cookie = node->cookie;
5219  			info->has_strong_ref = node->has_strong_ref;
5220  			info->has_weak_ref = node->has_weak_ref;
5221  			break;
5222  		}
5223  	}
5224  	binder_inner_proc_unlock(proc);
5225  
5226  	return 0;
5227  }
5228  
binder_txns_pending_ilocked(struct binder_proc * proc)5229  static bool binder_txns_pending_ilocked(struct binder_proc *proc)
5230  {
5231  	struct rb_node *n;
5232  	struct binder_thread *thread;
5233  
5234  	if (proc->outstanding_txns > 0)
5235  		return true;
5236  
5237  	for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
5238  		thread = rb_entry(n, struct binder_thread, rb_node);
5239  		if (thread->transaction_stack)
5240  			return true;
5241  	}
5242  	return false;
5243  }
5244  
binder_ioctl_freeze(struct binder_freeze_info * info,struct binder_proc * target_proc)5245  static int binder_ioctl_freeze(struct binder_freeze_info *info,
5246  			       struct binder_proc *target_proc)
5247  {
5248  	int ret = 0;
5249  
5250  	if (!info->enable) {
5251  		binder_inner_proc_lock(target_proc);
5252  		target_proc->sync_recv = false;
5253  		target_proc->async_recv = false;
5254  		target_proc->is_frozen = false;
5255  		binder_inner_proc_unlock(target_proc);
5256  		return 0;
5257  	}
5258  
5259  	/*
5260  	 * Freezing the target. Prevent new transactions by
5261  	 * setting frozen state. If timeout specified, wait
5262  	 * for transactions to drain.
5263  	 */
5264  	binder_inner_proc_lock(target_proc);
5265  	target_proc->sync_recv = false;
5266  	target_proc->async_recv = false;
5267  	target_proc->is_frozen = true;
5268  	binder_inner_proc_unlock(target_proc);
5269  
5270  	if (info->timeout_ms > 0)
5271  		ret = wait_event_interruptible_timeout(
5272  			target_proc->freeze_wait,
5273  			(!target_proc->outstanding_txns),
5274  			msecs_to_jiffies(info->timeout_ms));
5275  
5276  	/* Check pending transactions that wait for reply */
5277  	if (ret >= 0) {
5278  		binder_inner_proc_lock(target_proc);
5279  		if (binder_txns_pending_ilocked(target_proc))
5280  			ret = -EAGAIN;
5281  		binder_inner_proc_unlock(target_proc);
5282  	}
5283  
5284  	if (ret < 0) {
5285  		binder_inner_proc_lock(target_proc);
5286  		target_proc->is_frozen = false;
5287  		binder_inner_proc_unlock(target_proc);
5288  	}
5289  
5290  	return ret;
5291  }
5292  
binder_ioctl_get_freezer_info(struct binder_frozen_status_info * info)5293  static int binder_ioctl_get_freezer_info(
5294  				struct binder_frozen_status_info *info)
5295  {
5296  	struct binder_proc *target_proc;
5297  	bool found = false;
5298  	__u32 txns_pending;
5299  
5300  	info->sync_recv = 0;
5301  	info->async_recv = 0;
5302  
5303  	mutex_lock(&binder_procs_lock);
5304  	hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5305  		if (target_proc->pid == info->pid) {
5306  			found = true;
5307  			binder_inner_proc_lock(target_proc);
5308  			txns_pending = binder_txns_pending_ilocked(target_proc);
5309  			info->sync_recv |= target_proc->sync_recv |
5310  					(txns_pending << 1);
5311  			info->async_recv |= target_proc->async_recv;
5312  			binder_inner_proc_unlock(target_proc);
5313  		}
5314  	}
5315  	mutex_unlock(&binder_procs_lock);
5316  
5317  	if (!found)
5318  		return -EINVAL;
5319  
5320  	return 0;
5321  }
5322  
binder_ioctl_get_extended_error(struct binder_thread * thread,void __user * ubuf)5323  static int binder_ioctl_get_extended_error(struct binder_thread *thread,
5324  					   void __user *ubuf)
5325  {
5326  	struct binder_extended_error ee;
5327  
5328  	binder_inner_proc_lock(thread->proc);
5329  	ee = thread->ee;
5330  	binder_set_extended_error(&thread->ee, 0, BR_OK, 0);
5331  	binder_inner_proc_unlock(thread->proc);
5332  
5333  	if (copy_to_user(ubuf, &ee, sizeof(ee)))
5334  		return -EFAULT;
5335  
5336  	return 0;
5337  }
5338  
binder_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)5339  static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5340  {
5341  	int ret;
5342  	struct binder_proc *proc = filp->private_data;
5343  	struct binder_thread *thread;
5344  	void __user *ubuf = (void __user *)arg;
5345  
5346  	/*pr_info("binder_ioctl: %d:%d %x %lx\n",
5347  			proc->pid, current->pid, cmd, arg);*/
5348  
5349  	binder_selftest_alloc(&proc->alloc);
5350  
5351  	trace_binder_ioctl(cmd, arg);
5352  
5353  	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5354  	if (ret)
5355  		goto err_unlocked;
5356  
5357  	thread = binder_get_thread(proc);
5358  	if (thread == NULL) {
5359  		ret = -ENOMEM;
5360  		goto err;
5361  	}
5362  
5363  	switch (cmd) {
5364  	case BINDER_WRITE_READ:
5365  		ret = binder_ioctl_write_read(filp, arg, thread);
5366  		if (ret)
5367  			goto err;
5368  		break;
5369  	case BINDER_SET_MAX_THREADS: {
5370  		u32 max_threads;
5371  
5372  		if (copy_from_user(&max_threads, ubuf,
5373  				   sizeof(max_threads))) {
5374  			ret = -EINVAL;
5375  			goto err;
5376  		}
5377  		binder_inner_proc_lock(proc);
5378  		proc->max_threads = max_threads;
5379  		binder_inner_proc_unlock(proc);
5380  		break;
5381  	}
5382  	case BINDER_SET_CONTEXT_MGR_EXT: {
5383  		struct flat_binder_object fbo;
5384  
5385  		if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5386  			ret = -EINVAL;
5387  			goto err;
5388  		}
5389  		ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5390  		if (ret)
5391  			goto err;
5392  		break;
5393  	}
5394  	case BINDER_SET_CONTEXT_MGR:
5395  		ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5396  		if (ret)
5397  			goto err;
5398  		break;
5399  	case BINDER_THREAD_EXIT:
5400  		binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5401  			     proc->pid, thread->pid);
5402  		binder_thread_release(proc, thread);
5403  		thread = NULL;
5404  		break;
5405  	case BINDER_VERSION: {
5406  		struct binder_version __user *ver = ubuf;
5407  
5408  		if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5409  			     &ver->protocol_version)) {
5410  			ret = -EINVAL;
5411  			goto err;
5412  		}
5413  		break;
5414  	}
5415  	case BINDER_GET_NODE_INFO_FOR_REF: {
5416  		struct binder_node_info_for_ref info;
5417  
5418  		if (copy_from_user(&info, ubuf, sizeof(info))) {
5419  			ret = -EFAULT;
5420  			goto err;
5421  		}
5422  
5423  		ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5424  		if (ret < 0)
5425  			goto err;
5426  
5427  		if (copy_to_user(ubuf, &info, sizeof(info))) {
5428  			ret = -EFAULT;
5429  			goto err;
5430  		}
5431  
5432  		break;
5433  	}
5434  	case BINDER_GET_NODE_DEBUG_INFO: {
5435  		struct binder_node_debug_info info;
5436  
5437  		if (copy_from_user(&info, ubuf, sizeof(info))) {
5438  			ret = -EFAULT;
5439  			goto err;
5440  		}
5441  
5442  		ret = binder_ioctl_get_node_debug_info(proc, &info);
5443  		if (ret < 0)
5444  			goto err;
5445  
5446  		if (copy_to_user(ubuf, &info, sizeof(info))) {
5447  			ret = -EFAULT;
5448  			goto err;
5449  		}
5450  		break;
5451  	}
5452  	case BINDER_FREEZE: {
5453  		struct binder_freeze_info info;
5454  		struct binder_proc **target_procs = NULL, *target_proc;
5455  		int target_procs_count = 0, i = 0;
5456  
5457  		ret = 0;
5458  
5459  		if (copy_from_user(&info, ubuf, sizeof(info))) {
5460  			ret = -EFAULT;
5461  			goto err;
5462  		}
5463  
5464  		mutex_lock(&binder_procs_lock);
5465  		hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5466  			if (target_proc->pid == info.pid)
5467  				target_procs_count++;
5468  		}
5469  
5470  		if (target_procs_count == 0) {
5471  			mutex_unlock(&binder_procs_lock);
5472  			ret = -EINVAL;
5473  			goto err;
5474  		}
5475  
5476  		target_procs = kcalloc(target_procs_count,
5477  				       sizeof(struct binder_proc *),
5478  				       GFP_KERNEL);
5479  
5480  		if (!target_procs) {
5481  			mutex_unlock(&binder_procs_lock);
5482  			ret = -ENOMEM;
5483  			goto err;
5484  		}
5485  
5486  		hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5487  			if (target_proc->pid != info.pid)
5488  				continue;
5489  
5490  			binder_inner_proc_lock(target_proc);
5491  			target_proc->tmp_ref++;
5492  			binder_inner_proc_unlock(target_proc);
5493  
5494  			target_procs[i++] = target_proc;
5495  		}
5496  		mutex_unlock(&binder_procs_lock);
5497  
5498  		for (i = 0; i < target_procs_count; i++) {
5499  			if (ret >= 0)
5500  				ret = binder_ioctl_freeze(&info,
5501  							  target_procs[i]);
5502  
5503  			binder_proc_dec_tmpref(target_procs[i]);
5504  		}
5505  
5506  		kfree(target_procs);
5507  
5508  		if (ret < 0)
5509  			goto err;
5510  		break;
5511  	}
5512  	case BINDER_GET_FROZEN_INFO: {
5513  		struct binder_frozen_status_info info;
5514  
5515  		if (copy_from_user(&info, ubuf, sizeof(info))) {
5516  			ret = -EFAULT;
5517  			goto err;
5518  		}
5519  
5520  		ret = binder_ioctl_get_freezer_info(&info);
5521  		if (ret < 0)
5522  			goto err;
5523  
5524  		if (copy_to_user(ubuf, &info, sizeof(info))) {
5525  			ret = -EFAULT;
5526  			goto err;
5527  		}
5528  		break;
5529  	}
5530  	case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
5531  		uint32_t enable;
5532  
5533  		if (copy_from_user(&enable, ubuf, sizeof(enable))) {
5534  			ret = -EFAULT;
5535  			goto err;
5536  		}
5537  		binder_inner_proc_lock(proc);
5538  		proc->oneway_spam_detection_enabled = (bool)enable;
5539  		binder_inner_proc_unlock(proc);
5540  		break;
5541  	}
5542  	case BINDER_GET_EXTENDED_ERROR:
5543  		ret = binder_ioctl_get_extended_error(thread, ubuf);
5544  		if (ret < 0)
5545  			goto err;
5546  		break;
5547  	default:
5548  		ret = -EINVAL;
5549  		goto err;
5550  	}
5551  	ret = 0;
5552  err:
5553  	if (thread)
5554  		thread->looper_need_return = false;
5555  	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5556  	if (ret && ret != -EINTR)
5557  		pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5558  err_unlocked:
5559  	trace_binder_ioctl_done(ret);
5560  	return ret;
5561  }
5562  
binder_vma_open(struct vm_area_struct * vma)5563  static void binder_vma_open(struct vm_area_struct *vma)
5564  {
5565  	struct binder_proc *proc = vma->vm_private_data;
5566  
5567  	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5568  		     "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5569  		     proc->pid, vma->vm_start, vma->vm_end,
5570  		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5571  		     (unsigned long)pgprot_val(vma->vm_page_prot));
5572  }
5573  
binder_vma_close(struct vm_area_struct * vma)5574  static void binder_vma_close(struct vm_area_struct *vma)
5575  {
5576  	struct binder_proc *proc = vma->vm_private_data;
5577  
5578  	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5579  		     "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5580  		     proc->pid, vma->vm_start, vma->vm_end,
5581  		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5582  		     (unsigned long)pgprot_val(vma->vm_page_prot));
5583  	binder_alloc_vma_close(&proc->alloc);
5584  }
5585  
binder_vm_fault(struct vm_fault * vmf)5586  static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5587  {
5588  	return VM_FAULT_SIGBUS;
5589  }
5590  
5591  static const struct vm_operations_struct binder_vm_ops = {
5592  	.open = binder_vma_open,
5593  	.close = binder_vma_close,
5594  	.fault = binder_vm_fault,
5595  };
5596  
binder_mmap(struct file * filp,struct vm_area_struct * vma)5597  static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5598  {
5599  	struct binder_proc *proc = filp->private_data;
5600  
5601  	if (proc->tsk != current->group_leader)
5602  		return -EINVAL;
5603  
5604  	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5605  		     "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5606  		     __func__, proc->pid, vma->vm_start, vma->vm_end,
5607  		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5608  		     (unsigned long)pgprot_val(vma->vm_page_prot));
5609  
5610  	if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5611  		pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5612  		       proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
5613  		return -EPERM;
5614  	}
5615  	vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
5616  
5617  	vma->vm_ops = &binder_vm_ops;
5618  	vma->vm_private_data = proc;
5619  
5620  	return binder_alloc_mmap_handler(&proc->alloc, vma);
5621  }
5622  
binder_open(struct inode * nodp,struct file * filp)5623  static int binder_open(struct inode *nodp, struct file *filp)
5624  {
5625  	struct binder_proc *proc, *itr;
5626  	struct binder_device *binder_dev;
5627  	struct binderfs_info *info;
5628  	struct dentry *binder_binderfs_dir_entry_proc = NULL;
5629  	bool existing_pid = false;
5630  
5631  	binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5632  		     current->group_leader->pid, current->pid);
5633  
5634  	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5635  	if (proc == NULL)
5636  		return -ENOMEM;
5637  	spin_lock_init(&proc->inner_lock);
5638  	spin_lock_init(&proc->outer_lock);
5639  	get_task_struct(current->group_leader);
5640  	proc->tsk = current->group_leader;
5641  	proc->cred = get_cred(filp->f_cred);
5642  	INIT_LIST_HEAD(&proc->todo);
5643  	init_waitqueue_head(&proc->freeze_wait);
5644  	proc->default_priority = task_nice(current);
5645  	/* binderfs stashes devices in i_private */
5646  	if (is_binderfs_device(nodp)) {
5647  		binder_dev = nodp->i_private;
5648  		info = nodp->i_sb->s_fs_info;
5649  		binder_binderfs_dir_entry_proc = info->proc_log_dir;
5650  	} else {
5651  		binder_dev = container_of(filp->private_data,
5652  					  struct binder_device, miscdev);
5653  	}
5654  	refcount_inc(&binder_dev->ref);
5655  	proc->context = &binder_dev->context;
5656  	binder_alloc_init(&proc->alloc);
5657  
5658  	binder_stats_created(BINDER_STAT_PROC);
5659  	proc->pid = current->group_leader->pid;
5660  	INIT_LIST_HEAD(&proc->delivered_death);
5661  	INIT_LIST_HEAD(&proc->waiting_threads);
5662  	filp->private_data = proc;
5663  
5664  	mutex_lock(&binder_procs_lock);
5665  	hlist_for_each_entry(itr, &binder_procs, proc_node) {
5666  		if (itr->pid == proc->pid) {
5667  			existing_pid = true;
5668  			break;
5669  		}
5670  	}
5671  	hlist_add_head(&proc->proc_node, &binder_procs);
5672  	mutex_unlock(&binder_procs_lock);
5673  
5674  	if (binder_debugfs_dir_entry_proc && !existing_pid) {
5675  		char strbuf[11];
5676  
5677  		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5678  		/*
5679  		 * proc debug entries are shared between contexts.
5680  		 * Only create for the first PID to avoid debugfs log spamming
5681  		 * The printing code will anyway print all contexts for a given
5682  		 * PID so this is not a problem.
5683  		 */
5684  		proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5685  			binder_debugfs_dir_entry_proc,
5686  			(void *)(unsigned long)proc->pid,
5687  			&proc_fops);
5688  	}
5689  
5690  	if (binder_binderfs_dir_entry_proc && !existing_pid) {
5691  		char strbuf[11];
5692  		struct dentry *binderfs_entry;
5693  
5694  		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5695  		/*
5696  		 * Similar to debugfs, the process specific log file is shared
5697  		 * between contexts. Only create for the first PID.
5698  		 * This is ok since same as debugfs, the log file will contain
5699  		 * information on all contexts of a given PID.
5700  		 */
5701  		binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5702  			strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5703  		if (!IS_ERR(binderfs_entry)) {
5704  			proc->binderfs_entry = binderfs_entry;
5705  		} else {
5706  			int error;
5707  
5708  			error = PTR_ERR(binderfs_entry);
5709  			pr_warn("Unable to create file %s in binderfs (error %d)\n",
5710  				strbuf, error);
5711  		}
5712  	}
5713  
5714  	return 0;
5715  }
5716  
binder_flush(struct file * filp,fl_owner_t id)5717  static int binder_flush(struct file *filp, fl_owner_t id)
5718  {
5719  	struct binder_proc *proc = filp->private_data;
5720  
5721  	binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5722  
5723  	return 0;
5724  }
5725  
binder_deferred_flush(struct binder_proc * proc)5726  static void binder_deferred_flush(struct binder_proc *proc)
5727  {
5728  	struct rb_node *n;
5729  	int wake_count = 0;
5730  
5731  	binder_inner_proc_lock(proc);
5732  	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5733  		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5734  
5735  		thread->looper_need_return = true;
5736  		if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5737  			wake_up_interruptible(&thread->wait);
5738  			wake_count++;
5739  		}
5740  	}
5741  	binder_inner_proc_unlock(proc);
5742  
5743  	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5744  		     "binder_flush: %d woke %d threads\n", proc->pid,
5745  		     wake_count);
5746  }
5747  
binder_release(struct inode * nodp,struct file * filp)5748  static int binder_release(struct inode *nodp, struct file *filp)
5749  {
5750  	struct binder_proc *proc = filp->private_data;
5751  
5752  	debugfs_remove(proc->debugfs_entry);
5753  
5754  	if (proc->binderfs_entry) {
5755  		binderfs_remove_file(proc->binderfs_entry);
5756  		proc->binderfs_entry = NULL;
5757  	}
5758  
5759  	binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5760  
5761  	return 0;
5762  }
5763  
binder_node_release(struct binder_node * node,int refs)5764  static int binder_node_release(struct binder_node *node, int refs)
5765  {
5766  	struct binder_ref *ref;
5767  	int death = 0;
5768  	struct binder_proc *proc = node->proc;
5769  
5770  	binder_release_work(proc, &node->async_todo);
5771  
5772  	binder_node_lock(node);
5773  	binder_inner_proc_lock(proc);
5774  	binder_dequeue_work_ilocked(&node->work);
5775  	/*
5776  	 * The caller must have taken a temporary ref on the node,
5777  	 */
5778  	BUG_ON(!node->tmp_refs);
5779  	if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5780  		binder_inner_proc_unlock(proc);
5781  		binder_node_unlock(node);
5782  		binder_free_node(node);
5783  
5784  		return refs;
5785  	}
5786  
5787  	node->proc = NULL;
5788  	node->local_strong_refs = 0;
5789  	node->local_weak_refs = 0;
5790  	binder_inner_proc_unlock(proc);
5791  
5792  	spin_lock(&binder_dead_nodes_lock);
5793  	hlist_add_head(&node->dead_node, &binder_dead_nodes);
5794  	spin_unlock(&binder_dead_nodes_lock);
5795  
5796  	hlist_for_each_entry(ref, &node->refs, node_entry) {
5797  		refs++;
5798  		/*
5799  		 * Need the node lock to synchronize
5800  		 * with new notification requests and the
5801  		 * inner lock to synchronize with queued
5802  		 * death notifications.
5803  		 */
5804  		binder_inner_proc_lock(ref->proc);
5805  		if (!ref->death) {
5806  			binder_inner_proc_unlock(ref->proc);
5807  			continue;
5808  		}
5809  
5810  		death++;
5811  
5812  		BUG_ON(!list_empty(&ref->death->work.entry));
5813  		ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5814  		binder_enqueue_work_ilocked(&ref->death->work,
5815  					    &ref->proc->todo);
5816  		binder_wakeup_proc_ilocked(ref->proc);
5817  		binder_inner_proc_unlock(ref->proc);
5818  	}
5819  
5820  	binder_debug(BINDER_DEBUG_DEAD_BINDER,
5821  		     "node %d now dead, refs %d, death %d\n",
5822  		     node->debug_id, refs, death);
5823  	binder_node_unlock(node);
5824  	binder_put_node(node);
5825  
5826  	return refs;
5827  }
5828  
binder_deferred_release(struct binder_proc * proc)5829  static void binder_deferred_release(struct binder_proc *proc)
5830  {
5831  	struct binder_context *context = proc->context;
5832  	struct rb_node *n;
5833  	int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5834  
5835  	mutex_lock(&binder_procs_lock);
5836  	hlist_del(&proc->proc_node);
5837  	mutex_unlock(&binder_procs_lock);
5838  
5839  	mutex_lock(&context->context_mgr_node_lock);
5840  	if (context->binder_context_mgr_node &&
5841  	    context->binder_context_mgr_node->proc == proc) {
5842  		binder_debug(BINDER_DEBUG_DEAD_BINDER,
5843  			     "%s: %d context_mgr_node gone\n",
5844  			     __func__, proc->pid);
5845  		context->binder_context_mgr_node = NULL;
5846  	}
5847  	mutex_unlock(&context->context_mgr_node_lock);
5848  	binder_inner_proc_lock(proc);
5849  	/*
5850  	 * Make sure proc stays alive after we
5851  	 * remove all the threads
5852  	 */
5853  	proc->tmp_ref++;
5854  
5855  	proc->is_dead = true;
5856  	proc->is_frozen = false;
5857  	proc->sync_recv = false;
5858  	proc->async_recv = false;
5859  	threads = 0;
5860  	active_transactions = 0;
5861  	while ((n = rb_first(&proc->threads))) {
5862  		struct binder_thread *thread;
5863  
5864  		thread = rb_entry(n, struct binder_thread, rb_node);
5865  		binder_inner_proc_unlock(proc);
5866  		threads++;
5867  		active_transactions += binder_thread_release(proc, thread);
5868  		binder_inner_proc_lock(proc);
5869  	}
5870  
5871  	nodes = 0;
5872  	incoming_refs = 0;
5873  	while ((n = rb_first(&proc->nodes))) {
5874  		struct binder_node *node;
5875  
5876  		node = rb_entry(n, struct binder_node, rb_node);
5877  		nodes++;
5878  		/*
5879  		 * take a temporary ref on the node before
5880  		 * calling binder_node_release() which will either
5881  		 * kfree() the node or call binder_put_node()
5882  		 */
5883  		binder_inc_node_tmpref_ilocked(node);
5884  		rb_erase(&node->rb_node, &proc->nodes);
5885  		binder_inner_proc_unlock(proc);
5886  		incoming_refs = binder_node_release(node, incoming_refs);
5887  		binder_inner_proc_lock(proc);
5888  	}
5889  	binder_inner_proc_unlock(proc);
5890  
5891  	outgoing_refs = 0;
5892  	binder_proc_lock(proc);
5893  	while ((n = rb_first(&proc->refs_by_desc))) {
5894  		struct binder_ref *ref;
5895  
5896  		ref = rb_entry(n, struct binder_ref, rb_node_desc);
5897  		outgoing_refs++;
5898  		binder_cleanup_ref_olocked(ref);
5899  		binder_proc_unlock(proc);
5900  		binder_free_ref(ref);
5901  		binder_proc_lock(proc);
5902  	}
5903  	binder_proc_unlock(proc);
5904  
5905  	binder_release_work(proc, &proc->todo);
5906  	binder_release_work(proc, &proc->delivered_death);
5907  
5908  	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5909  		     "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5910  		     __func__, proc->pid, threads, nodes, incoming_refs,
5911  		     outgoing_refs, active_transactions);
5912  
5913  	binder_proc_dec_tmpref(proc);
5914  }
5915  
binder_deferred_func(struct work_struct * work)5916  static void binder_deferred_func(struct work_struct *work)
5917  {
5918  	struct binder_proc *proc;
5919  
5920  	int defer;
5921  
5922  	do {
5923  		mutex_lock(&binder_deferred_lock);
5924  		if (!hlist_empty(&binder_deferred_list)) {
5925  			proc = hlist_entry(binder_deferred_list.first,
5926  					struct binder_proc, deferred_work_node);
5927  			hlist_del_init(&proc->deferred_work_node);
5928  			defer = proc->deferred_work;
5929  			proc->deferred_work = 0;
5930  		} else {
5931  			proc = NULL;
5932  			defer = 0;
5933  		}
5934  		mutex_unlock(&binder_deferred_lock);
5935  
5936  		if (defer & BINDER_DEFERRED_FLUSH)
5937  			binder_deferred_flush(proc);
5938  
5939  		if (defer & BINDER_DEFERRED_RELEASE)
5940  			binder_deferred_release(proc); /* frees proc */
5941  	} while (proc);
5942  }
5943  static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5944  
5945  static void
binder_defer_work(struct binder_proc * proc,enum binder_deferred_state defer)5946  binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5947  {
5948  	mutex_lock(&binder_deferred_lock);
5949  	proc->deferred_work |= defer;
5950  	if (hlist_unhashed(&proc->deferred_work_node)) {
5951  		hlist_add_head(&proc->deferred_work_node,
5952  				&binder_deferred_list);
5953  		schedule_work(&binder_deferred_work);
5954  	}
5955  	mutex_unlock(&binder_deferred_lock);
5956  }
5957  
print_binder_transaction_ilocked(struct seq_file * m,struct binder_proc * proc,const char * prefix,struct binder_transaction * t)5958  static void print_binder_transaction_ilocked(struct seq_file *m,
5959  					     struct binder_proc *proc,
5960  					     const char *prefix,
5961  					     struct binder_transaction *t)
5962  {
5963  	struct binder_proc *to_proc;
5964  	struct binder_buffer *buffer = t->buffer;
5965  	ktime_t current_time = ktime_get();
5966  
5967  	spin_lock(&t->lock);
5968  	to_proc = t->to_proc;
5969  	seq_printf(m,
5970  		   "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d elapsed %lldms",
5971  		   prefix, t->debug_id, t,
5972  		   t->from_pid,
5973  		   t->from_tid,
5974  		   to_proc ? to_proc->pid : 0,
5975  		   t->to_thread ? t->to_thread->pid : 0,
5976  		   t->code, t->flags, t->priority, t->need_reply,
5977  		   ktime_ms_delta(current_time, t->start_time));
5978  	spin_unlock(&t->lock);
5979  
5980  	if (proc != to_proc) {
5981  		/*
5982  		 * Can only safely deref buffer if we are holding the
5983  		 * correct proc inner lock for this node
5984  		 */
5985  		seq_puts(m, "\n");
5986  		return;
5987  	}
5988  
5989  	if (buffer == NULL) {
5990  		seq_puts(m, " buffer free\n");
5991  		return;
5992  	}
5993  	if (buffer->target_node)
5994  		seq_printf(m, " node %d", buffer->target_node->debug_id);
5995  	seq_printf(m, " size %zd:%zd data %pK\n",
5996  		   buffer->data_size, buffer->offsets_size,
5997  		   buffer->user_data);
5998  }
5999  
print_binder_work_ilocked(struct seq_file * m,struct binder_proc * proc,const char * prefix,const char * transaction_prefix,struct binder_work * w)6000  static void print_binder_work_ilocked(struct seq_file *m,
6001  				     struct binder_proc *proc,
6002  				     const char *prefix,
6003  				     const char *transaction_prefix,
6004  				     struct binder_work *w)
6005  {
6006  	struct binder_node *node;
6007  	struct binder_transaction *t;
6008  
6009  	switch (w->type) {
6010  	case BINDER_WORK_TRANSACTION:
6011  		t = container_of(w, struct binder_transaction, work);
6012  		print_binder_transaction_ilocked(
6013  				m, proc, transaction_prefix, t);
6014  		break;
6015  	case BINDER_WORK_RETURN_ERROR: {
6016  		struct binder_error *e = container_of(
6017  				w, struct binder_error, work);
6018  
6019  		seq_printf(m, "%stransaction error: %u\n",
6020  			   prefix, e->cmd);
6021  	} break;
6022  	case BINDER_WORK_TRANSACTION_COMPLETE:
6023  		seq_printf(m, "%stransaction complete\n", prefix);
6024  		break;
6025  	case BINDER_WORK_NODE:
6026  		node = container_of(w, struct binder_node, work);
6027  		seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
6028  			   prefix, node->debug_id,
6029  			   (u64)node->ptr, (u64)node->cookie);
6030  		break;
6031  	case BINDER_WORK_DEAD_BINDER:
6032  		seq_printf(m, "%shas dead binder\n", prefix);
6033  		break;
6034  	case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
6035  		seq_printf(m, "%shas cleared dead binder\n", prefix);
6036  		break;
6037  	case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
6038  		seq_printf(m, "%shas cleared death notification\n", prefix);
6039  		break;
6040  	default:
6041  		seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
6042  		break;
6043  	}
6044  }
6045  
print_binder_thread_ilocked(struct seq_file * m,struct binder_thread * thread,int print_always)6046  static void print_binder_thread_ilocked(struct seq_file *m,
6047  					struct binder_thread *thread,
6048  					int print_always)
6049  {
6050  	struct binder_transaction *t;
6051  	struct binder_work *w;
6052  	size_t start_pos = m->count;
6053  	size_t header_pos;
6054  
6055  	seq_printf(m, "  thread %d: l %02x need_return %d tr %d\n",
6056  			thread->pid, thread->looper,
6057  			thread->looper_need_return,
6058  			atomic_read(&thread->tmp_ref));
6059  	header_pos = m->count;
6060  	t = thread->transaction_stack;
6061  	while (t) {
6062  		if (t->from == thread) {
6063  			print_binder_transaction_ilocked(m, thread->proc,
6064  					"    outgoing transaction", t);
6065  			t = t->from_parent;
6066  		} else if (t->to_thread == thread) {
6067  			print_binder_transaction_ilocked(m, thread->proc,
6068  						 "    incoming transaction", t);
6069  			t = t->to_parent;
6070  		} else {
6071  			print_binder_transaction_ilocked(m, thread->proc,
6072  					"    bad transaction", t);
6073  			t = NULL;
6074  		}
6075  	}
6076  	list_for_each_entry(w, &thread->todo, entry) {
6077  		print_binder_work_ilocked(m, thread->proc, "    ",
6078  					  "    pending transaction", w);
6079  	}
6080  	if (!print_always && m->count == header_pos)
6081  		m->count = start_pos;
6082  }
6083  
print_binder_node_nilocked(struct seq_file * m,struct binder_node * node)6084  static void print_binder_node_nilocked(struct seq_file *m,
6085  				       struct binder_node *node)
6086  {
6087  	struct binder_ref *ref;
6088  	struct binder_work *w;
6089  	int count;
6090  
6091  	count = 0;
6092  	hlist_for_each_entry(ref, &node->refs, node_entry)
6093  		count++;
6094  
6095  	seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
6096  		   node->debug_id, (u64)node->ptr, (u64)node->cookie,
6097  		   node->has_strong_ref, node->has_weak_ref,
6098  		   node->local_strong_refs, node->local_weak_refs,
6099  		   node->internal_strong_refs, count, node->tmp_refs);
6100  	if (count) {
6101  		seq_puts(m, " proc");
6102  		hlist_for_each_entry(ref, &node->refs, node_entry)
6103  			seq_printf(m, " %d", ref->proc->pid);
6104  	}
6105  	seq_puts(m, "\n");
6106  	if (node->proc) {
6107  		list_for_each_entry(w, &node->async_todo, entry)
6108  			print_binder_work_ilocked(m, node->proc, "    ",
6109  					  "    pending async transaction", w);
6110  	}
6111  }
6112  
print_binder_ref_olocked(struct seq_file * m,struct binder_ref * ref)6113  static void print_binder_ref_olocked(struct seq_file *m,
6114  				     struct binder_ref *ref)
6115  {
6116  	binder_node_lock(ref->node);
6117  	seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %pK\n",
6118  		   ref->data.debug_id, ref->data.desc,
6119  		   ref->node->proc ? "" : "dead ",
6120  		   ref->node->debug_id, ref->data.strong,
6121  		   ref->data.weak, ref->death);
6122  	binder_node_unlock(ref->node);
6123  }
6124  
print_binder_proc(struct seq_file * m,struct binder_proc * proc,int print_all)6125  static void print_binder_proc(struct seq_file *m,
6126  			      struct binder_proc *proc, int print_all)
6127  {
6128  	struct binder_work *w;
6129  	struct rb_node *n;
6130  	size_t start_pos = m->count;
6131  	size_t header_pos;
6132  	struct binder_node *last_node = NULL;
6133  
6134  	seq_printf(m, "proc %d\n", proc->pid);
6135  	seq_printf(m, "context %s\n", proc->context->name);
6136  	header_pos = m->count;
6137  
6138  	binder_inner_proc_lock(proc);
6139  	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6140  		print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
6141  						rb_node), print_all);
6142  
6143  	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
6144  		struct binder_node *node = rb_entry(n, struct binder_node,
6145  						    rb_node);
6146  		if (!print_all && !node->has_async_transaction)
6147  			continue;
6148  
6149  		/*
6150  		 * take a temporary reference on the node so it
6151  		 * survives and isn't removed from the tree
6152  		 * while we print it.
6153  		 */
6154  		binder_inc_node_tmpref_ilocked(node);
6155  		/* Need to drop inner lock to take node lock */
6156  		binder_inner_proc_unlock(proc);
6157  		if (last_node)
6158  			binder_put_node(last_node);
6159  		binder_node_inner_lock(node);
6160  		print_binder_node_nilocked(m, node);
6161  		binder_node_inner_unlock(node);
6162  		last_node = node;
6163  		binder_inner_proc_lock(proc);
6164  	}
6165  	binder_inner_proc_unlock(proc);
6166  	if (last_node)
6167  		binder_put_node(last_node);
6168  
6169  	if (print_all) {
6170  		binder_proc_lock(proc);
6171  		for (n = rb_first(&proc->refs_by_desc);
6172  		     n != NULL;
6173  		     n = rb_next(n))
6174  			print_binder_ref_olocked(m, rb_entry(n,
6175  							    struct binder_ref,
6176  							    rb_node_desc));
6177  		binder_proc_unlock(proc);
6178  	}
6179  	binder_alloc_print_allocated(m, &proc->alloc);
6180  	binder_inner_proc_lock(proc);
6181  	list_for_each_entry(w, &proc->todo, entry)
6182  		print_binder_work_ilocked(m, proc, "  ",
6183  					  "  pending transaction", w);
6184  	list_for_each_entry(w, &proc->delivered_death, entry) {
6185  		seq_puts(m, "  has delivered dead binder\n");
6186  		break;
6187  	}
6188  	binder_inner_proc_unlock(proc);
6189  	if (!print_all && m->count == header_pos)
6190  		m->count = start_pos;
6191  }
6192  
6193  static const char * const binder_return_strings[] = {
6194  	"BR_ERROR",
6195  	"BR_OK",
6196  	"BR_TRANSACTION",
6197  	"BR_REPLY",
6198  	"BR_ACQUIRE_RESULT",
6199  	"BR_DEAD_REPLY",
6200  	"BR_TRANSACTION_COMPLETE",
6201  	"BR_INCREFS",
6202  	"BR_ACQUIRE",
6203  	"BR_RELEASE",
6204  	"BR_DECREFS",
6205  	"BR_ATTEMPT_ACQUIRE",
6206  	"BR_NOOP",
6207  	"BR_SPAWN_LOOPER",
6208  	"BR_FINISHED",
6209  	"BR_DEAD_BINDER",
6210  	"BR_CLEAR_DEATH_NOTIFICATION_DONE",
6211  	"BR_FAILED_REPLY",
6212  	"BR_FROZEN_REPLY",
6213  	"BR_ONEWAY_SPAM_SUSPECT",
6214  	"BR_TRANSACTION_PENDING_FROZEN"
6215  };
6216  
6217  static const char * const binder_command_strings[] = {
6218  	"BC_TRANSACTION",
6219  	"BC_REPLY",
6220  	"BC_ACQUIRE_RESULT",
6221  	"BC_FREE_BUFFER",
6222  	"BC_INCREFS",
6223  	"BC_ACQUIRE",
6224  	"BC_RELEASE",
6225  	"BC_DECREFS",
6226  	"BC_INCREFS_DONE",
6227  	"BC_ACQUIRE_DONE",
6228  	"BC_ATTEMPT_ACQUIRE",
6229  	"BC_REGISTER_LOOPER",
6230  	"BC_ENTER_LOOPER",
6231  	"BC_EXIT_LOOPER",
6232  	"BC_REQUEST_DEATH_NOTIFICATION",
6233  	"BC_CLEAR_DEATH_NOTIFICATION",
6234  	"BC_DEAD_BINDER_DONE",
6235  	"BC_TRANSACTION_SG",
6236  	"BC_REPLY_SG",
6237  };
6238  
6239  static const char * const binder_objstat_strings[] = {
6240  	"proc",
6241  	"thread",
6242  	"node",
6243  	"ref",
6244  	"death",
6245  	"transaction",
6246  	"transaction_complete"
6247  };
6248  
print_binder_stats(struct seq_file * m,const char * prefix,struct binder_stats * stats)6249  static void print_binder_stats(struct seq_file *m, const char *prefix,
6250  			       struct binder_stats *stats)
6251  {
6252  	int i;
6253  
6254  	BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
6255  		     ARRAY_SIZE(binder_command_strings));
6256  	for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
6257  		int temp = atomic_read(&stats->bc[i]);
6258  
6259  		if (temp)
6260  			seq_printf(m, "%s%s: %d\n", prefix,
6261  				   binder_command_strings[i], temp);
6262  	}
6263  
6264  	BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
6265  		     ARRAY_SIZE(binder_return_strings));
6266  	for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
6267  		int temp = atomic_read(&stats->br[i]);
6268  
6269  		if (temp)
6270  			seq_printf(m, "%s%s: %d\n", prefix,
6271  				   binder_return_strings[i], temp);
6272  	}
6273  
6274  	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6275  		     ARRAY_SIZE(binder_objstat_strings));
6276  	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6277  		     ARRAY_SIZE(stats->obj_deleted));
6278  	for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
6279  		int created = atomic_read(&stats->obj_created[i]);
6280  		int deleted = atomic_read(&stats->obj_deleted[i]);
6281  
6282  		if (created || deleted)
6283  			seq_printf(m, "%s%s: active %d total %d\n",
6284  				prefix,
6285  				binder_objstat_strings[i],
6286  				created - deleted,
6287  				created);
6288  	}
6289  }
6290  
print_binder_proc_stats(struct seq_file * m,struct binder_proc * proc)6291  static void print_binder_proc_stats(struct seq_file *m,
6292  				    struct binder_proc *proc)
6293  {
6294  	struct binder_work *w;
6295  	struct binder_thread *thread;
6296  	struct rb_node *n;
6297  	int count, strong, weak, ready_threads;
6298  	size_t free_async_space =
6299  		binder_alloc_get_free_async_space(&proc->alloc);
6300  
6301  	seq_printf(m, "proc %d\n", proc->pid);
6302  	seq_printf(m, "context %s\n", proc->context->name);
6303  	count = 0;
6304  	ready_threads = 0;
6305  	binder_inner_proc_lock(proc);
6306  	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6307  		count++;
6308  
6309  	list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6310  		ready_threads++;
6311  
6312  	seq_printf(m, "  threads: %d\n", count);
6313  	seq_printf(m, "  requested threads: %d+%d/%d\n"
6314  			"  ready threads %d\n"
6315  			"  free async space %zd\n", proc->requested_threads,
6316  			proc->requested_threads_started, proc->max_threads,
6317  			ready_threads,
6318  			free_async_space);
6319  	count = 0;
6320  	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
6321  		count++;
6322  	binder_inner_proc_unlock(proc);
6323  	seq_printf(m, "  nodes: %d\n", count);
6324  	count = 0;
6325  	strong = 0;
6326  	weak = 0;
6327  	binder_proc_lock(proc);
6328  	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
6329  		struct binder_ref *ref = rb_entry(n, struct binder_ref,
6330  						  rb_node_desc);
6331  		count++;
6332  		strong += ref->data.strong;
6333  		weak += ref->data.weak;
6334  	}
6335  	binder_proc_unlock(proc);
6336  	seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
6337  
6338  	count = binder_alloc_get_allocated_count(&proc->alloc);
6339  	seq_printf(m, "  buffers: %d\n", count);
6340  
6341  	binder_alloc_print_pages(m, &proc->alloc);
6342  
6343  	count = 0;
6344  	binder_inner_proc_lock(proc);
6345  	list_for_each_entry(w, &proc->todo, entry) {
6346  		if (w->type == BINDER_WORK_TRANSACTION)
6347  			count++;
6348  	}
6349  	binder_inner_proc_unlock(proc);
6350  	seq_printf(m, "  pending transactions: %d\n", count);
6351  
6352  	print_binder_stats(m, "  ", &proc->stats);
6353  }
6354  
state_show(struct seq_file * m,void * unused)6355  static int state_show(struct seq_file *m, void *unused)
6356  {
6357  	struct binder_proc *proc;
6358  	struct binder_node *node;
6359  	struct binder_node *last_node = NULL;
6360  
6361  	seq_puts(m, "binder state:\n");
6362  
6363  	spin_lock(&binder_dead_nodes_lock);
6364  	if (!hlist_empty(&binder_dead_nodes))
6365  		seq_puts(m, "dead nodes:\n");
6366  	hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
6367  		/*
6368  		 * take a temporary reference on the node so it
6369  		 * survives and isn't removed from the list
6370  		 * while we print it.
6371  		 */
6372  		node->tmp_refs++;
6373  		spin_unlock(&binder_dead_nodes_lock);
6374  		if (last_node)
6375  			binder_put_node(last_node);
6376  		binder_node_lock(node);
6377  		print_binder_node_nilocked(m, node);
6378  		binder_node_unlock(node);
6379  		last_node = node;
6380  		spin_lock(&binder_dead_nodes_lock);
6381  	}
6382  	spin_unlock(&binder_dead_nodes_lock);
6383  	if (last_node)
6384  		binder_put_node(last_node);
6385  
6386  	mutex_lock(&binder_procs_lock);
6387  	hlist_for_each_entry(proc, &binder_procs, proc_node)
6388  		print_binder_proc(m, proc, 1);
6389  	mutex_unlock(&binder_procs_lock);
6390  
6391  	return 0;
6392  }
6393  
stats_show(struct seq_file * m,void * unused)6394  static int stats_show(struct seq_file *m, void *unused)
6395  {
6396  	struct binder_proc *proc;
6397  
6398  	seq_puts(m, "binder stats:\n");
6399  
6400  	print_binder_stats(m, "", &binder_stats);
6401  
6402  	mutex_lock(&binder_procs_lock);
6403  	hlist_for_each_entry(proc, &binder_procs, proc_node)
6404  		print_binder_proc_stats(m, proc);
6405  	mutex_unlock(&binder_procs_lock);
6406  
6407  	return 0;
6408  }
6409  
transactions_show(struct seq_file * m,void * unused)6410  static int transactions_show(struct seq_file *m, void *unused)
6411  {
6412  	struct binder_proc *proc;
6413  
6414  	seq_puts(m, "binder transactions:\n");
6415  	mutex_lock(&binder_procs_lock);
6416  	hlist_for_each_entry(proc, &binder_procs, proc_node)
6417  		print_binder_proc(m, proc, 0);
6418  	mutex_unlock(&binder_procs_lock);
6419  
6420  	return 0;
6421  }
6422  
proc_show(struct seq_file * m,void * unused)6423  static int proc_show(struct seq_file *m, void *unused)
6424  {
6425  	struct binder_proc *itr;
6426  	int pid = (unsigned long)m->private;
6427  
6428  	mutex_lock(&binder_procs_lock);
6429  	hlist_for_each_entry(itr, &binder_procs, proc_node) {
6430  		if (itr->pid == pid) {
6431  			seq_puts(m, "binder proc state:\n");
6432  			print_binder_proc(m, itr, 1);
6433  		}
6434  	}
6435  	mutex_unlock(&binder_procs_lock);
6436  
6437  	return 0;
6438  }
6439  
print_binder_transaction_log_entry(struct seq_file * m,struct binder_transaction_log_entry * e)6440  static void print_binder_transaction_log_entry(struct seq_file *m,
6441  					struct binder_transaction_log_entry *e)
6442  {
6443  	int debug_id = READ_ONCE(e->debug_id_done);
6444  	/*
6445  	 * read barrier to guarantee debug_id_done read before
6446  	 * we print the log values
6447  	 */
6448  	smp_rmb();
6449  	seq_printf(m,
6450  		   "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6451  		   e->debug_id, (e->call_type == 2) ? "reply" :
6452  		   ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6453  		   e->from_thread, e->to_proc, e->to_thread, e->context_name,
6454  		   e->to_node, e->target_handle, e->data_size, e->offsets_size,
6455  		   e->return_error, e->return_error_param,
6456  		   e->return_error_line);
6457  	/*
6458  	 * read-barrier to guarantee read of debug_id_done after
6459  	 * done printing the fields of the entry
6460  	 */
6461  	smp_rmb();
6462  	seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6463  			"\n" : " (incomplete)\n");
6464  }
6465  
transaction_log_show(struct seq_file * m,void * unused)6466  static int transaction_log_show(struct seq_file *m, void *unused)
6467  {
6468  	struct binder_transaction_log *log = m->private;
6469  	unsigned int log_cur = atomic_read(&log->cur);
6470  	unsigned int count;
6471  	unsigned int cur;
6472  	int i;
6473  
6474  	count = log_cur + 1;
6475  	cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6476  		0 : count % ARRAY_SIZE(log->entry);
6477  	if (count > ARRAY_SIZE(log->entry) || log->full)
6478  		count = ARRAY_SIZE(log->entry);
6479  	for (i = 0; i < count; i++) {
6480  		unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6481  
6482  		print_binder_transaction_log_entry(m, &log->entry[index]);
6483  	}
6484  	return 0;
6485  }
6486  
6487  const struct file_operations binder_fops = {
6488  	.owner = THIS_MODULE,
6489  	.poll = binder_poll,
6490  	.unlocked_ioctl = binder_ioctl,
6491  	.compat_ioctl = compat_ptr_ioctl,
6492  	.mmap = binder_mmap,
6493  	.open = binder_open,
6494  	.flush = binder_flush,
6495  	.release = binder_release,
6496  };
6497  
6498  DEFINE_SHOW_ATTRIBUTE(state);
6499  DEFINE_SHOW_ATTRIBUTE(stats);
6500  DEFINE_SHOW_ATTRIBUTE(transactions);
6501  DEFINE_SHOW_ATTRIBUTE(transaction_log);
6502  
6503  const struct binder_debugfs_entry binder_debugfs_entries[] = {
6504  	{
6505  		.name = "state",
6506  		.mode = 0444,
6507  		.fops = &state_fops,
6508  		.data = NULL,
6509  	},
6510  	{
6511  		.name = "stats",
6512  		.mode = 0444,
6513  		.fops = &stats_fops,
6514  		.data = NULL,
6515  	},
6516  	{
6517  		.name = "transactions",
6518  		.mode = 0444,
6519  		.fops = &transactions_fops,
6520  		.data = NULL,
6521  	},
6522  	{
6523  		.name = "transaction_log",
6524  		.mode = 0444,
6525  		.fops = &transaction_log_fops,
6526  		.data = &binder_transaction_log,
6527  	},
6528  	{
6529  		.name = "failed_transaction_log",
6530  		.mode = 0444,
6531  		.fops = &transaction_log_fops,
6532  		.data = &binder_transaction_log_failed,
6533  	},
6534  	{} /* terminator */
6535  };
6536  
init_binder_device(const char * name)6537  static int __init init_binder_device(const char *name)
6538  {
6539  	int ret;
6540  	struct binder_device *binder_device;
6541  
6542  	binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6543  	if (!binder_device)
6544  		return -ENOMEM;
6545  
6546  	binder_device->miscdev.fops = &binder_fops;
6547  	binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6548  	binder_device->miscdev.name = name;
6549  
6550  	refcount_set(&binder_device->ref, 1);
6551  	binder_device->context.binder_context_mgr_uid = INVALID_UID;
6552  	binder_device->context.name = name;
6553  	mutex_init(&binder_device->context.context_mgr_node_lock);
6554  
6555  	ret = misc_register(&binder_device->miscdev);
6556  	if (ret < 0) {
6557  		kfree(binder_device);
6558  		return ret;
6559  	}
6560  
6561  	hlist_add_head(&binder_device->hlist, &binder_devices);
6562  
6563  	return ret;
6564  }
6565  
binder_init(void)6566  static int __init binder_init(void)
6567  {
6568  	int ret;
6569  	char *device_name, *device_tmp;
6570  	struct binder_device *device;
6571  	struct hlist_node *tmp;
6572  	char *device_names = NULL;
6573  	const struct binder_debugfs_entry *db_entry;
6574  
6575  	ret = binder_alloc_shrinker_init();
6576  	if (ret)
6577  		return ret;
6578  
6579  	atomic_set(&binder_transaction_log.cur, ~0U);
6580  	atomic_set(&binder_transaction_log_failed.cur, ~0U);
6581  
6582  	binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6583  
6584  	binder_for_each_debugfs_entry(db_entry)
6585  		debugfs_create_file(db_entry->name,
6586  					db_entry->mode,
6587  					binder_debugfs_dir_entry_root,
6588  					db_entry->data,
6589  					db_entry->fops);
6590  
6591  	binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6592  						binder_debugfs_dir_entry_root);
6593  
6594  	if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6595  	    strcmp(binder_devices_param, "") != 0) {
6596  		/*
6597  		* Copy the module_parameter string, because we don't want to
6598  		* tokenize it in-place.
6599  		 */
6600  		device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6601  		if (!device_names) {
6602  			ret = -ENOMEM;
6603  			goto err_alloc_device_names_failed;
6604  		}
6605  
6606  		device_tmp = device_names;
6607  		while ((device_name = strsep(&device_tmp, ","))) {
6608  			ret = init_binder_device(device_name);
6609  			if (ret)
6610  				goto err_init_binder_device_failed;
6611  		}
6612  	}
6613  
6614  	ret = init_binderfs();
6615  	if (ret)
6616  		goto err_init_binder_device_failed;
6617  
6618  	return ret;
6619  
6620  err_init_binder_device_failed:
6621  	hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6622  		misc_deregister(&device->miscdev);
6623  		hlist_del(&device->hlist);
6624  		kfree(device);
6625  	}
6626  
6627  	kfree(device_names);
6628  
6629  err_alloc_device_names_failed:
6630  	debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6631  	binder_alloc_shrinker_exit();
6632  
6633  	return ret;
6634  }
6635  
6636  device_initcall(binder_init);
6637  
6638  #define CREATE_TRACE_POINTS
6639  #include "binder_trace.h"
6640  
6641  MODULE_LICENSE("GPL v2");
6642