xref: /openbmc/linux/drivers/android/binder.c (revision 19c98724)
1 /* binder.c
2  *
3  * Android IPC Subsystem
4  *
5  * Copyright (C) 2007-2008 Google, Inc.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17 
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 
20 #include <asm/cacheflush.h>
21 #include <linux/fdtable.h>
22 #include <linux/file.h>
23 #include <linux/freezer.h>
24 #include <linux/fs.h>
25 #include <linux/list.h>
26 #include <linux/miscdevice.h>
27 #include <linux/mm.h>
28 #include <linux/module.h>
29 #include <linux/mutex.h>
30 #include <linux/nsproxy.h>
31 #include <linux/poll.h>
32 #include <linux/debugfs.h>
33 #include <linux/rbtree.h>
34 #include <linux/sched/signal.h>
35 #include <linux/sched/mm.h>
36 #include <linux/seq_file.h>
37 #include <linux/uaccess.h>
38 #include <linux/vmalloc.h>
39 #include <linux/slab.h>
40 #include <linux/pid_namespace.h>
41 #include <linux/security.h>
42 
43 #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
44 #define BINDER_IPC_32BIT 1
45 #endif
46 
47 #include <uapi/linux/android/binder.h>
48 #include "binder_trace.h"
49 
50 static DEFINE_MUTEX(binder_main_lock);
51 static DEFINE_MUTEX(binder_deferred_lock);
52 static DEFINE_MUTEX(binder_alloc_mmap_lock);
53 
54 static HLIST_HEAD(binder_devices);
55 static HLIST_HEAD(binder_procs);
56 static HLIST_HEAD(binder_deferred_list);
57 static HLIST_HEAD(binder_dead_nodes);
58 
59 static struct dentry *binder_debugfs_dir_entry_root;
60 static struct dentry *binder_debugfs_dir_entry_proc;
61 static int binder_last_id;
62 
63 #define BINDER_DEBUG_ENTRY(name) \
64 static int binder_##name##_open(struct inode *inode, struct file *file) \
65 { \
66 	return single_open(file, binder_##name##_show, inode->i_private); \
67 } \
68 \
69 static const struct file_operations binder_##name##_fops = { \
70 	.owner = THIS_MODULE, \
71 	.open = binder_##name##_open, \
72 	.read = seq_read, \
73 	.llseek = seq_lseek, \
74 	.release = single_release, \
75 }
76 
77 static int binder_proc_show(struct seq_file *m, void *unused);
78 BINDER_DEBUG_ENTRY(proc);
79 
80 /* This is only defined in include/asm-arm/sizes.h */
81 #ifndef SZ_1K
82 #define SZ_1K                               0x400
83 #endif
84 
85 #ifndef SZ_4M
86 #define SZ_4M                               0x400000
87 #endif
88 
89 #define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
90 
91 #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
92 
93 enum {
94 	BINDER_DEBUG_USER_ERROR             = 1U << 0,
95 	BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
96 	BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
97 	BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
98 	BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
99 	BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
100 	BINDER_DEBUG_READ_WRITE             = 1U << 6,
101 	BINDER_DEBUG_USER_REFS              = 1U << 7,
102 	BINDER_DEBUG_THREADS                = 1U << 8,
103 	BINDER_DEBUG_TRANSACTION            = 1U << 9,
104 	BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
105 	BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
106 	BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
107 	BINDER_DEBUG_PRIORITY_CAP           = 1U << 13,
108 };
109 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
110 	BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
111 module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
112 
113 static bool binder_debug_no_lock;
114 module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
115 
116 static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
117 module_param_named(devices, binder_devices_param, charp, 0444);
118 
119 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
120 static int binder_stop_on_user_error;
121 
122 static int binder_set_stop_on_user_error(const char *val,
123 					 struct kernel_param *kp)
124 {
125 	int ret;
126 
127 	ret = param_set_int(val, kp);
128 	if (binder_stop_on_user_error < 2)
129 		wake_up(&binder_user_error_wait);
130 	return ret;
131 }
132 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
133 	param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
134 
135 #define binder_debug(mask, x...) \
136 	do { \
137 		if (binder_debug_mask & mask) \
138 			pr_info(x); \
139 	} while (0)
140 
141 #define binder_user_error(x...) \
142 	do { \
143 		if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
144 			pr_info(x); \
145 		if (binder_stop_on_user_error) \
146 			binder_stop_on_user_error = 2; \
147 	} while (0)
148 
149 #define to_flat_binder_object(hdr) \
150 	container_of(hdr, struct flat_binder_object, hdr)
151 
152 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
153 
154 #define to_binder_buffer_object(hdr) \
155 	container_of(hdr, struct binder_buffer_object, hdr)
156 
157 #define to_binder_fd_array_object(hdr) \
158 	container_of(hdr, struct binder_fd_array_object, hdr)
159 
160 /*
161  * debug declarations for binder_alloc. To be
162  * moved to binder_alloc.c
163  */
164 enum {
165 	BINDER_ALLOC_DEBUG_OPEN_CLOSE             = 1U << 1,
166 	BINDER_ALLOC_DEBUG_BUFFER_ALLOC           = 1U << 2,
167 	BINDER_ALLOC_DEBUG_BUFFER_ALLOC_ASYNC     = 1U << 3,
168 };
169 static uint32_t binder_alloc_debug_mask;
170 
171 module_param_named(alloc_debug_mask, binder_alloc_debug_mask,
172 		   uint, 0644);
173 
174 #define binder_alloc_debug(mask, x...) \
175 	do { \
176 		if (binder_alloc_debug_mask & mask) \
177 			pr_info(x); \
178 	} while (0)
179 /* end of binder_alloc debug declarations */
180 
181 enum binder_stat_types {
182 	BINDER_STAT_PROC,
183 	BINDER_STAT_THREAD,
184 	BINDER_STAT_NODE,
185 	BINDER_STAT_REF,
186 	BINDER_STAT_DEATH,
187 	BINDER_STAT_TRANSACTION,
188 	BINDER_STAT_TRANSACTION_COMPLETE,
189 	BINDER_STAT_COUNT
190 };
191 
192 struct binder_stats {
193 	int br[_IOC_NR(BR_FAILED_REPLY) + 1];
194 	int bc[_IOC_NR(BC_REPLY_SG) + 1];
195 	int obj_created[BINDER_STAT_COUNT];
196 	int obj_deleted[BINDER_STAT_COUNT];
197 };
198 
199 static struct binder_stats binder_stats;
200 
201 static inline void binder_stats_deleted(enum binder_stat_types type)
202 {
203 	binder_stats.obj_deleted[type]++;
204 }
205 
206 static inline void binder_stats_created(enum binder_stat_types type)
207 {
208 	binder_stats.obj_created[type]++;
209 }
210 
211 struct binder_transaction_log_entry {
212 	int debug_id;
213 	int call_type;
214 	int from_proc;
215 	int from_thread;
216 	int target_handle;
217 	int to_proc;
218 	int to_thread;
219 	int to_node;
220 	int data_size;
221 	int offsets_size;
222 	const char *context_name;
223 };
224 struct binder_transaction_log {
225 	int next;
226 	int full;
227 	struct binder_transaction_log_entry entry[32];
228 };
229 static struct binder_transaction_log binder_transaction_log;
230 static struct binder_transaction_log binder_transaction_log_failed;
231 
232 static struct binder_transaction_log_entry *binder_transaction_log_add(
233 	struct binder_transaction_log *log)
234 {
235 	struct binder_transaction_log_entry *e;
236 
237 	e = &log->entry[log->next];
238 	memset(e, 0, sizeof(*e));
239 	log->next++;
240 	if (log->next == ARRAY_SIZE(log->entry)) {
241 		log->next = 0;
242 		log->full = 1;
243 	}
244 	return e;
245 }
246 
247 struct binder_context {
248 	struct binder_node *binder_context_mgr_node;
249 	kuid_t binder_context_mgr_uid;
250 	const char *name;
251 };
252 
253 struct binder_device {
254 	struct hlist_node hlist;
255 	struct miscdevice miscdev;
256 	struct binder_context context;
257 };
258 
259 struct binder_work {
260 	struct list_head entry;
261 	enum {
262 		BINDER_WORK_TRANSACTION = 1,
263 		BINDER_WORK_TRANSACTION_COMPLETE,
264 		BINDER_WORK_NODE,
265 		BINDER_WORK_DEAD_BINDER,
266 		BINDER_WORK_DEAD_BINDER_AND_CLEAR,
267 		BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
268 	} type;
269 };
270 
271 struct binder_node {
272 	int debug_id;
273 	struct binder_work work;
274 	union {
275 		struct rb_node rb_node;
276 		struct hlist_node dead_node;
277 	};
278 	struct binder_proc *proc;
279 	struct hlist_head refs;
280 	int internal_strong_refs;
281 	int local_weak_refs;
282 	int local_strong_refs;
283 	binder_uintptr_t ptr;
284 	binder_uintptr_t cookie;
285 	unsigned has_strong_ref:1;
286 	unsigned pending_strong_ref:1;
287 	unsigned has_weak_ref:1;
288 	unsigned pending_weak_ref:1;
289 	unsigned has_async_transaction:1;
290 	unsigned accept_fds:1;
291 	unsigned min_priority:8;
292 	struct list_head async_todo;
293 };
294 
295 struct binder_ref_death {
296 	struct binder_work work;
297 	binder_uintptr_t cookie;
298 };
299 
300 struct binder_ref {
301 	/* Lookups needed: */
302 	/*   node + proc => ref (transaction) */
303 	/*   desc + proc => ref (transaction, inc/dec ref) */
304 	/*   node => refs + procs (proc exit) */
305 	int debug_id;
306 	struct rb_node rb_node_desc;
307 	struct rb_node rb_node_node;
308 	struct hlist_node node_entry;
309 	struct binder_proc *proc;
310 	struct binder_node *node;
311 	uint32_t desc;
312 	int strong;
313 	int weak;
314 	struct binder_ref_death *death;
315 };
316 
317 struct binder_buffer {
318 	struct list_head entry; /* free and allocated entries by address */
319 	struct rb_node rb_node; /* free entry by size or allocated entry */
320 				/* by address */
321 	unsigned free:1;
322 	unsigned allow_user_free:1;
323 	unsigned async_transaction:1;
324 	unsigned debug_id:29;
325 
326 	struct binder_transaction *transaction;
327 
328 	struct binder_node *target_node;
329 	size_t data_size;
330 	size_t offsets_size;
331 	size_t extra_buffers_size;
332 	uint8_t data[0];
333 };
334 
335 enum binder_deferred_state {
336 	BINDER_DEFERRED_PUT_FILES    = 0x01,
337 	BINDER_DEFERRED_FLUSH        = 0x02,
338 	BINDER_DEFERRED_RELEASE      = 0x04,
339 };
340 
341 /**
342  * struct binder_alloc - per-binder proc state for binder allocator
343  * @vma:               vm_area_struct passed to mmap_handler
344  *                     (invarient after mmap)
345  * @vma_vm_mm:         copy of vma->vm_mm (invarient after mmap)
346  * @buffer:            base of per-proc address space mapped via mmap
347  * @user_buffer_offset: offset between user and kernel VAs for buffer
348  * @buffers:           list of all buffers for this proc
349  * @free_buffers:      rb tree of buffers available for allocation
350  *                     sorted by size
351  * @allocated_buffers: rb tree of allocated buffers sorted by address
352  * @free_async_space:  VA space available for async buffers. This is
353  *                     initialized at mmap time to 1/2 the full VA space
354  * @pages:             array of physical page addresses for each page of
355  *                     mmap'd space
356  * @buffer_size:       size of address space (could be less than requested)
357  *
358  * Bookkeeping structure for per-proc address space management for binder
359  * buffers. It is normally initialized during binder_init() and binder_mmap()
360  * calls. The address space is used for both user-visible buffers and for
361  * struct binder_buffer objects used to track the user buffers
362  */
363 struct binder_alloc {
364 	struct mutex mutex;
365 	struct task_struct *tsk;
366 	struct vm_area_struct *vma;
367 	struct mm_struct *vma_vm_mm;
368 	void *buffer;
369 	ptrdiff_t user_buffer_offset;
370 	struct list_head buffers;
371 	struct rb_root free_buffers;
372 	struct rb_root allocated_buffers;
373 	size_t free_async_space;
374 	struct page **pages;
375 	size_t buffer_size;
376 	int pid;
377 };
378 
379 struct binder_proc {
380 	struct hlist_node proc_node;
381 	struct rb_root threads;
382 	struct rb_root nodes;
383 	struct rb_root refs_by_desc;
384 	struct rb_root refs_by_node;
385 	int pid;
386 	struct task_struct *tsk;
387 	struct files_struct *files;
388 	struct hlist_node deferred_work_node;
389 	int deferred_work;
390 
391 	struct list_head todo;
392 	wait_queue_head_t wait;
393 	struct binder_stats stats;
394 	struct list_head delivered_death;
395 	int max_threads;
396 	int requested_threads;
397 	int requested_threads_started;
398 	int ready_threads;
399 	long default_priority;
400 	struct dentry *debugfs_entry;
401 	struct binder_alloc alloc;
402 	struct binder_context *context;
403 };
404 
405 enum {
406 	BINDER_LOOPER_STATE_REGISTERED  = 0x01,
407 	BINDER_LOOPER_STATE_ENTERED     = 0x02,
408 	BINDER_LOOPER_STATE_EXITED      = 0x04,
409 	BINDER_LOOPER_STATE_INVALID     = 0x08,
410 	BINDER_LOOPER_STATE_WAITING     = 0x10,
411 	BINDER_LOOPER_STATE_NEED_RETURN = 0x20
412 };
413 
414 struct binder_thread {
415 	struct binder_proc *proc;
416 	struct rb_node rb_node;
417 	int pid;
418 	int looper;
419 	struct binder_transaction *transaction_stack;
420 	struct list_head todo;
421 	uint32_t return_error; /* Write failed, return error code in read buf */
422 	uint32_t return_error2; /* Write failed, return error code in read */
423 		/* buffer. Used when sending a reply to a dead process that */
424 		/* we are also waiting on */
425 	wait_queue_head_t wait;
426 	struct binder_stats stats;
427 };
428 
429 struct binder_transaction {
430 	int debug_id;
431 	struct binder_work work;
432 	struct binder_thread *from;
433 	struct binder_transaction *from_parent;
434 	struct binder_proc *to_proc;
435 	struct binder_thread *to_thread;
436 	struct binder_transaction *to_parent;
437 	unsigned need_reply:1;
438 	/* unsigned is_dead:1; */	/* not used at the moment */
439 
440 	struct binder_buffer *buffer;
441 	unsigned int	code;
442 	unsigned int	flags;
443 	long	priority;
444 	long	saved_priority;
445 	kuid_t	sender_euid;
446 };
447 
448 /*
449  * Forward declarations of binder_alloc functions.
450  * These will be moved to binder_alloc.h when
451  * binder_alloc is moved to its own files.
452  */
453 extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
454 						  size_t data_size,
455 						  size_t offsets_size,
456 						  size_t extra_buffers_size,
457 						  int is_async);
458 extern void binder_alloc_init(struct binder_alloc *alloc);
459 extern void binder_alloc_vma_close(struct binder_alloc *alloc);
460 extern struct binder_buffer *
461 binder_alloc_buffer_lookup(struct binder_alloc *alloc,
462 			   uintptr_t user_ptr);
463 extern void binder_alloc_free_buf(struct binder_alloc *alloc,
464 				  struct binder_buffer *buffer);
465 extern int binder_alloc_mmap_handler(struct binder_alloc *alloc,
466 				     struct vm_area_struct *vma);
467 extern void binder_alloc_deferred_release(struct binder_alloc *alloc);
468 extern int binder_alloc_get_allocated_count(struct binder_alloc *alloc);
469 extern void binder_alloc_print_allocated(struct seq_file *m,
470 					 struct binder_alloc *alloc);
471 
472 static inline size_t
473 binder_alloc_get_free_async_space(struct binder_alloc *alloc)
474 {
475 	size_t free_async_space;
476 
477 	mutex_lock(&alloc->mutex);
478 	free_async_space = alloc->free_async_space;
479 	mutex_unlock(&alloc->mutex);
480 	return free_async_space;
481 }
482 
483 static inline ptrdiff_t
484 binder_alloc_get_user_buffer_offset(struct binder_alloc *alloc)
485 {
486 	/*
487 	 * user_buffer_offset is constant if vma is set and
488 	 * undefined if vma is not set. It is possible to
489 	 * get here with !alloc->vma if the target process
490 	 * is dying while a transaction is being initiated.
491 	 * Returning the old value is ok in this case and
492 	 * the transaction will fail.
493 	 */
494 	return alloc->user_buffer_offset;
495 }
496 /* end of binder_alloc declarations */
497 
498 static void
499 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
500 
501 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
502 {
503 	struct files_struct *files = proc->files;
504 	unsigned long rlim_cur;
505 	unsigned long irqs;
506 
507 	if (files == NULL)
508 		return -ESRCH;
509 
510 	if (!lock_task_sighand(proc->tsk, &irqs))
511 		return -EMFILE;
512 
513 	rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
514 	unlock_task_sighand(proc->tsk, &irqs);
515 
516 	return __alloc_fd(files, 0, rlim_cur, flags);
517 }
518 
519 /*
520  * copied from fd_install
521  */
522 static void task_fd_install(
523 	struct binder_proc *proc, unsigned int fd, struct file *file)
524 {
525 	if (proc->files)
526 		__fd_install(proc->files, fd, file);
527 }
528 
529 /*
530  * copied from sys_close
531  */
532 static long task_close_fd(struct binder_proc *proc, unsigned int fd)
533 {
534 	int retval;
535 
536 	if (proc->files == NULL)
537 		return -ESRCH;
538 
539 	retval = __close_fd(proc->files, fd);
540 	/* can't restart close syscall because file table entry was cleared */
541 	if (unlikely(retval == -ERESTARTSYS ||
542 		     retval == -ERESTARTNOINTR ||
543 		     retval == -ERESTARTNOHAND ||
544 		     retval == -ERESTART_RESTARTBLOCK))
545 		retval = -EINTR;
546 
547 	return retval;
548 }
549 
550 static inline void binder_lock(const char *tag)
551 {
552 	trace_binder_lock(tag);
553 	mutex_lock(&binder_main_lock);
554 	trace_binder_locked(tag);
555 }
556 
557 static inline void binder_unlock(const char *tag)
558 {
559 	trace_binder_unlock(tag);
560 	mutex_unlock(&binder_main_lock);
561 }
562 
563 static void binder_set_nice(long nice)
564 {
565 	long min_nice;
566 
567 	if (can_nice(current, nice)) {
568 		set_user_nice(current, nice);
569 		return;
570 	}
571 	min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);
572 	binder_debug(BINDER_DEBUG_PRIORITY_CAP,
573 		     "%d: nice value %ld not allowed use %ld instead\n",
574 		      current->pid, nice, min_nice);
575 	set_user_nice(current, min_nice);
576 	if (min_nice <= MAX_NICE)
577 		return;
578 	binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
579 }
580 
581 static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
582 				       struct binder_buffer *buffer)
583 {
584 	if (list_is_last(&buffer->entry, &alloc->buffers))
585 		return alloc->buffer +
586 		       alloc->buffer_size - (void *)buffer->data;
587 	return (size_t)list_entry(buffer->entry.next,
588 			  struct binder_buffer, entry) - (size_t)buffer->data;
589 }
590 
591 static void binder_insert_free_buffer(struct binder_alloc *alloc,
592 				      struct binder_buffer *new_buffer)
593 {
594 	struct rb_node **p = &alloc->free_buffers.rb_node;
595 	struct rb_node *parent = NULL;
596 	struct binder_buffer *buffer;
597 	size_t buffer_size;
598 	size_t new_buffer_size;
599 
600 	BUG_ON(!new_buffer->free);
601 
602 	new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
603 
604 	binder_alloc_debug(BINDER_ALLOC_DEBUG_BUFFER_ALLOC,
605 		     "%d: add free buffer, size %zd, at %pK\n",
606 		      alloc->pid, new_buffer_size, new_buffer);
607 
608 	while (*p) {
609 		parent = *p;
610 		buffer = rb_entry(parent, struct binder_buffer, rb_node);
611 		BUG_ON(!buffer->free);
612 
613 		buffer_size = binder_alloc_buffer_size(alloc, buffer);
614 
615 		if (new_buffer_size < buffer_size)
616 			p = &parent->rb_left;
617 		else
618 			p = &parent->rb_right;
619 	}
620 	rb_link_node(&new_buffer->rb_node, parent, p);
621 	rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
622 }
623 
624 static void binder_insert_allocated_buffer(struct binder_alloc *alloc,
625 					   struct binder_buffer *new_buffer)
626 {
627 	struct rb_node **p = &alloc->allocated_buffers.rb_node;
628 	struct rb_node *parent = NULL;
629 	struct binder_buffer *buffer;
630 
631 	BUG_ON(new_buffer->free);
632 
633 	while (*p) {
634 		parent = *p;
635 		buffer = rb_entry(parent, struct binder_buffer, rb_node);
636 		BUG_ON(buffer->free);
637 
638 		if (new_buffer < buffer)
639 			p = &parent->rb_left;
640 		else if (new_buffer > buffer)
641 			p = &parent->rb_right;
642 		else
643 			BUG();
644 	}
645 	rb_link_node(&new_buffer->rb_node, parent, p);
646 	rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
647 }
648 
649 static struct binder_buffer *binder_alloc_buffer_lookup_locked(
650 		struct binder_alloc *alloc,
651 		uintptr_t user_ptr)
652 {
653 	struct rb_node *n = alloc->allocated_buffers.rb_node;
654 	struct binder_buffer *buffer;
655 	struct binder_buffer *kern_ptr;
656 
657 	kern_ptr = (struct binder_buffer *)(user_ptr - alloc->user_buffer_offset
658 		- offsetof(struct binder_buffer, data));
659 
660 	while (n) {
661 		buffer = rb_entry(n, struct binder_buffer, rb_node);
662 		BUG_ON(buffer->free);
663 
664 		if (kern_ptr < buffer)
665 			n = n->rb_left;
666 		else if (kern_ptr > buffer)
667 			n = n->rb_right;
668 		else
669 			return buffer;
670 	}
671 	return NULL;
672 }
673 
674 struct binder_buffer *binder_alloc_buffer_lookup(struct binder_alloc *alloc,
675 						 uintptr_t user_ptr)
676 {
677 	struct binder_buffer *buffer;
678 
679 	mutex_lock(&alloc->mutex);
680 	buffer = binder_alloc_buffer_lookup_locked(alloc, user_ptr);
681 	mutex_unlock(&alloc->mutex);
682 	return buffer;
683 }
684 
685 static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
686 				    void *start, void *end,
687 				    struct vm_area_struct *vma)
688 {
689 	void *page_addr;
690 	unsigned long user_page_addr;
691 	struct page **page;
692 	struct mm_struct *mm;
693 
694 	binder_alloc_debug(BINDER_ALLOC_DEBUG_BUFFER_ALLOC,
695 		     "%d: %s pages %pK-%pK\n", alloc->pid,
696 		     allocate ? "allocate" : "free", start, end);
697 
698 	if (end <= start)
699 		return 0;
700 
701 	trace_binder_update_page_range(alloc, allocate, start, end);
702 
703 	if (vma)
704 		mm = NULL;
705 	else
706 		mm = get_task_mm(alloc->tsk);
707 
708 	if (mm) {
709 		down_write(&mm->mmap_sem);
710 		vma = alloc->vma;
711 		if (vma && mm != alloc->vma_vm_mm) {
712 			pr_err("%d: vma mm and task mm mismatch\n",
713 				alloc->pid);
714 			vma = NULL;
715 		}
716 	}
717 
718 	if (allocate == 0)
719 		goto free_range;
720 
721 	if (vma == NULL) {
722 		pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
723 			alloc->pid);
724 		goto err_no_vma;
725 	}
726 
727 	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
728 		int ret;
729 
730 		page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
731 
732 		BUG_ON(*page);
733 		*page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
734 		if (*page == NULL) {
735 			pr_err("%d: binder_alloc_buf failed for page at %pK\n",
736 				alloc->pid, page_addr);
737 			goto err_alloc_page_failed;
738 		}
739 		ret = map_kernel_range_noflush((unsigned long)page_addr,
740 					PAGE_SIZE, PAGE_KERNEL, page);
741 		flush_cache_vmap((unsigned long)page_addr,
742 				(unsigned long)page_addr + PAGE_SIZE);
743 		if (ret != 1) {
744 			pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
745 			       alloc->pid, page_addr);
746 			goto err_map_kernel_failed;
747 		}
748 		user_page_addr =
749 			(uintptr_t)page_addr + alloc->user_buffer_offset;
750 		ret = vm_insert_page(vma, user_page_addr, page[0]);
751 		if (ret) {
752 			pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
753 			       alloc->pid, user_page_addr);
754 			goto err_vm_insert_page_failed;
755 		}
756 		/* vm_insert_page does not seem to increment the refcount */
757 	}
758 	if (mm) {
759 		up_write(&mm->mmap_sem);
760 		mmput(mm);
761 	}
762 	return 0;
763 
764 free_range:
765 	for (page_addr = end - PAGE_SIZE; page_addr >= start;
766 	     page_addr -= PAGE_SIZE) {
767 		page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
768 		if (vma)
769 			zap_page_range(vma, (uintptr_t)page_addr +
770 				alloc->user_buffer_offset, PAGE_SIZE);
771 err_vm_insert_page_failed:
772 		unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
773 err_map_kernel_failed:
774 		__free_page(*page);
775 		*page = NULL;
776 err_alloc_page_failed:
777 		;
778 	}
779 err_no_vma:
780 	if (mm) {
781 		up_write(&mm->mmap_sem);
782 		mmput(mm);
783 	}
784 	return -ENOMEM;
785 }
786 
787 static struct binder_buffer *binder_alloc_new_buf_locked(
788 		struct binder_alloc *alloc, size_t data_size,
789 		size_t offsets_size, size_t extra_buffers_size, int is_async)
790 {
791 	struct rb_node *n = alloc->free_buffers.rb_node;
792 	struct binder_buffer *buffer;
793 	size_t buffer_size;
794 	struct rb_node *best_fit = NULL;
795 	void *has_page_addr;
796 	void *end_page_addr;
797 	size_t size, data_offsets_size;
798 
799 	if (alloc->vma == NULL) {
800 		pr_err("%d: binder_alloc_buf, no vma\n",
801 		       alloc->pid);
802 		return NULL;
803 	}
804 
805 	data_offsets_size = ALIGN(data_size, sizeof(void *)) +
806 		ALIGN(offsets_size, sizeof(void *));
807 
808 	if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
809 		binder_alloc_debug(BINDER_ALLOC_DEBUG_BUFFER_ALLOC,
810 				"%d: got transaction with invalid size %zd-%zd\n",
811 				alloc->pid, data_size, offsets_size);
812 		return NULL;
813 	}
814 	size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
815 	if (size < data_offsets_size || size < extra_buffers_size) {
816 		binder_alloc_debug(BINDER_ALLOC_DEBUG_BUFFER_ALLOC,
817 				"%d: got transaction with invalid extra_buffers_size %zd\n",
818 				alloc->pid, extra_buffers_size);
819 		return NULL;
820 	}
821 	if (is_async &&
822 	    alloc->free_async_space < size + sizeof(struct binder_buffer)) {
823 		binder_alloc_debug(BINDER_ALLOC_DEBUG_BUFFER_ALLOC,
824 			     "%d: binder_alloc_buf size %zd failed, no async space left\n",
825 			      alloc->pid, size);
826 		return NULL;
827 	}
828 
829 	while (n) {
830 		buffer = rb_entry(n, struct binder_buffer, rb_node);
831 		BUG_ON(!buffer->free);
832 		buffer_size = binder_alloc_buffer_size(alloc, buffer);
833 
834 		if (size < buffer_size) {
835 			best_fit = n;
836 			n = n->rb_left;
837 		} else if (size > buffer_size)
838 			n = n->rb_right;
839 		else {
840 			best_fit = n;
841 			break;
842 		}
843 	}
844 	if (best_fit == NULL) {
845 		pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
846 			alloc->pid, size);
847 		return NULL;
848 	}
849 	if (n == NULL) {
850 		buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
851 		buffer_size = binder_alloc_buffer_size(alloc, buffer);
852 	}
853 
854 	binder_alloc_debug(BINDER_ALLOC_DEBUG_BUFFER_ALLOC,
855 		     "%d: %s size %zd got buffer %pK size %zd\n",
856 		      alloc->pid, __func__, size, buffer, buffer_size);
857 
858 	has_page_addr =
859 		(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
860 	if (n == NULL) {
861 		if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
862 			buffer_size = size; /* no room for other buffers */
863 		else
864 			buffer_size = size + sizeof(struct binder_buffer);
865 	}
866 	end_page_addr =
867 		(void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
868 	if (end_page_addr > has_page_addr)
869 		end_page_addr = has_page_addr;
870 	if (binder_update_page_range(alloc, 1,
871 	    (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
872 		return NULL;
873 
874 	rb_erase(best_fit, &alloc->free_buffers);
875 	buffer->free = 0;
876 	binder_insert_allocated_buffer(alloc, buffer);
877 	if (buffer_size != size) {
878 		struct binder_buffer *new_buffer = (void *)buffer->data + size;
879 
880 		list_add(&new_buffer->entry, &buffer->entry);
881 		new_buffer->free = 1;
882 		binder_insert_free_buffer(alloc, new_buffer);
883 	}
884 	binder_alloc_debug(BINDER_ALLOC_DEBUG_BUFFER_ALLOC,
885 		     "%d: %s size %zd got %pK\n",
886 		      alloc->pid, __func__, size, buffer);
887 	buffer->data_size = data_size;
888 	buffer->offsets_size = offsets_size;
889 	buffer->async_transaction = is_async;
890 	buffer->extra_buffers_size = extra_buffers_size;
891 	if (is_async) {
892 		alloc->free_async_space -= size + sizeof(struct binder_buffer);
893 		binder_alloc_debug(BINDER_ALLOC_DEBUG_BUFFER_ALLOC_ASYNC,
894 			     "%d: binder_alloc_buf size %zd async free %zd\n",
895 			      alloc->pid, size, alloc->free_async_space);
896 	}
897 
898 	return buffer;
899 }
900 
901 struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
902 					   size_t data_size,
903 					   size_t offsets_size,
904 					   size_t extra_buffers_size,
905 					   int is_async)
906 {
907 	struct binder_buffer *buffer;
908 
909 	mutex_lock(&alloc->mutex);
910 	buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
911 					     extra_buffers_size, is_async);
912 	mutex_unlock(&alloc->mutex);
913 	return buffer;
914 }
915 
916 static void *buffer_start_page(struct binder_buffer *buffer)
917 {
918 	return (void *)((uintptr_t)buffer & PAGE_MASK);
919 }
920 
921 static void *buffer_end_page(struct binder_buffer *buffer)
922 {
923 	return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
924 }
925 
926 static void binder_delete_free_buffer(struct binder_alloc *alloc,
927 				      struct binder_buffer *buffer)
928 {
929 	struct binder_buffer *prev, *next = NULL;
930 	int free_page_end = 1;
931 	int free_page_start = 1;
932 
933 	BUG_ON(alloc->buffers.next == &buffer->entry);
934 	prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
935 	BUG_ON(!prev->free);
936 	if (buffer_end_page(prev) == buffer_start_page(buffer)) {
937 		free_page_start = 0;
938 		if (buffer_end_page(prev) == buffer_end_page(buffer))
939 			free_page_end = 0;
940 		binder_alloc_debug(BINDER_ALLOC_DEBUG_BUFFER_ALLOC,
941 			     "%d: merge free, buffer %pK share page with %pK\n",
942 			      alloc->pid, buffer, prev);
943 	}
944 
945 	if (!list_is_last(&buffer->entry, &alloc->buffers)) {
946 		next = list_entry(buffer->entry.next,
947 				  struct binder_buffer, entry);
948 		if (buffer_start_page(next) == buffer_end_page(buffer)) {
949 			free_page_end = 0;
950 			if (buffer_start_page(next) ==
951 			    buffer_start_page(buffer))
952 				free_page_start = 0;
953 			binder_alloc_debug(BINDER_ALLOC_DEBUG_BUFFER_ALLOC,
954 				     "%d: merge free, buffer %pK share page with %pK\n",
955 				      alloc->pid, buffer, prev);
956 		}
957 	}
958 	list_del(&buffer->entry);
959 	if (free_page_start || free_page_end) {
960 		binder_alloc_debug(BINDER_ALLOC_DEBUG_BUFFER_ALLOC,
961 			     "%d: merge free, buffer %pK do not share page%s%s with %pK or %pK\n",
962 			     alloc->pid, buffer, free_page_start ? "" : " end",
963 			     free_page_end ? "" : " start", prev, next);
964 		binder_update_page_range(alloc, 0, free_page_start ?
965 			buffer_start_page(buffer) : buffer_end_page(buffer),
966 			(free_page_end ? buffer_end_page(buffer) :
967 			buffer_start_page(buffer)) + PAGE_SIZE, NULL);
968 	}
969 }
970 
971 static void binder_free_buf_locked(struct binder_alloc *alloc,
972 				   struct binder_buffer *buffer)
973 {
974 	size_t size, buffer_size;
975 
976 	buffer_size = binder_alloc_buffer_size(alloc, buffer);
977 
978 	size = ALIGN(buffer->data_size, sizeof(void *)) +
979 		ALIGN(buffer->offsets_size, sizeof(void *)) +
980 		ALIGN(buffer->extra_buffers_size, sizeof(void *));
981 
982 	binder_alloc_debug(BINDER_ALLOC_DEBUG_BUFFER_ALLOC,
983 		     "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
984 		      alloc->pid, buffer, size, buffer_size);
985 
986 	BUG_ON(buffer->free);
987 	BUG_ON(size > buffer_size);
988 	BUG_ON(buffer->transaction != NULL);
989 	BUG_ON((void *)buffer < alloc->buffer);
990 	BUG_ON((void *)buffer > alloc->buffer + alloc->buffer_size);
991 
992 	if (buffer->async_transaction) {
993 		alloc->free_async_space += size + sizeof(struct binder_buffer);
994 
995 		binder_alloc_debug(BINDER_ALLOC_DEBUG_BUFFER_ALLOC_ASYNC,
996 			     "%d: binder_free_buf size %zd async free %zd\n",
997 			      alloc->pid, size, alloc->free_async_space);
998 	}
999 
1000 	binder_update_page_range(alloc, 0,
1001 		(void *)PAGE_ALIGN((uintptr_t)buffer->data),
1002 		(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
1003 		NULL);
1004 
1005 	rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
1006 	buffer->free = 1;
1007 	if (!list_is_last(&buffer->entry, &alloc->buffers)) {
1008 		struct binder_buffer *next = list_entry(buffer->entry.next,
1009 						struct binder_buffer, entry);
1010 
1011 		if (next->free) {
1012 			rb_erase(&next->rb_node, &alloc->free_buffers);
1013 			binder_delete_free_buffer(alloc, next);
1014 		}
1015 	}
1016 	if (alloc->buffers.next != &buffer->entry) {
1017 		struct binder_buffer *prev = list_entry(buffer->entry.prev,
1018 						struct binder_buffer, entry);
1019 
1020 		if (prev->free) {
1021 			binder_delete_free_buffer(alloc, buffer);
1022 			rb_erase(&prev->rb_node, &alloc->free_buffers);
1023 			buffer = prev;
1024 		}
1025 	}
1026 	binder_insert_free_buffer(alloc, buffer);
1027 }
1028 
1029 void binder_alloc_free_buf(struct binder_alloc *alloc,
1030 			    struct binder_buffer *buffer)
1031 {
1032 	mutex_lock(&alloc->mutex);
1033 	binder_free_buf_locked(alloc, buffer);
1034 	mutex_unlock(&alloc->mutex);
1035 }
1036 
1037 static struct binder_node *binder_get_node(struct binder_proc *proc,
1038 					   binder_uintptr_t ptr)
1039 {
1040 	struct rb_node *n = proc->nodes.rb_node;
1041 	struct binder_node *node;
1042 
1043 	while (n) {
1044 		node = rb_entry(n, struct binder_node, rb_node);
1045 
1046 		if (ptr < node->ptr)
1047 			n = n->rb_left;
1048 		else if (ptr > node->ptr)
1049 			n = n->rb_right;
1050 		else
1051 			return node;
1052 	}
1053 	return NULL;
1054 }
1055 
1056 static struct binder_node *binder_new_node(struct binder_proc *proc,
1057 					   binder_uintptr_t ptr,
1058 					   binder_uintptr_t cookie)
1059 {
1060 	struct rb_node **p = &proc->nodes.rb_node;
1061 	struct rb_node *parent = NULL;
1062 	struct binder_node *node;
1063 
1064 	while (*p) {
1065 		parent = *p;
1066 		node = rb_entry(parent, struct binder_node, rb_node);
1067 
1068 		if (ptr < node->ptr)
1069 			p = &(*p)->rb_left;
1070 		else if (ptr > node->ptr)
1071 			p = &(*p)->rb_right;
1072 		else
1073 			return NULL;
1074 	}
1075 
1076 	node = kzalloc(sizeof(*node), GFP_KERNEL);
1077 	if (node == NULL)
1078 		return NULL;
1079 	binder_stats_created(BINDER_STAT_NODE);
1080 	rb_link_node(&node->rb_node, parent, p);
1081 	rb_insert_color(&node->rb_node, &proc->nodes);
1082 	node->debug_id = ++binder_last_id;
1083 	node->proc = proc;
1084 	node->ptr = ptr;
1085 	node->cookie = cookie;
1086 	node->work.type = BINDER_WORK_NODE;
1087 	INIT_LIST_HEAD(&node->work.entry);
1088 	INIT_LIST_HEAD(&node->async_todo);
1089 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1090 		     "%d:%d node %d u%016llx c%016llx created\n",
1091 		     proc->pid, current->pid, node->debug_id,
1092 		     (u64)node->ptr, (u64)node->cookie);
1093 	return node;
1094 }
1095 
1096 static int binder_inc_node(struct binder_node *node, int strong, int internal,
1097 			   struct list_head *target_list)
1098 {
1099 	if (strong) {
1100 		if (internal) {
1101 			if (target_list == NULL &&
1102 			    node->internal_strong_refs == 0 &&
1103 			    !(node->proc &&
1104 			      node == node->proc->context->binder_context_mgr_node &&
1105 			      node->has_strong_ref)) {
1106 				pr_err("invalid inc strong node for %d\n",
1107 					node->debug_id);
1108 				return -EINVAL;
1109 			}
1110 			node->internal_strong_refs++;
1111 		} else
1112 			node->local_strong_refs++;
1113 		if (!node->has_strong_ref && target_list) {
1114 			list_del_init(&node->work.entry);
1115 			list_add_tail(&node->work.entry, target_list);
1116 		}
1117 	} else {
1118 		if (!internal)
1119 			node->local_weak_refs++;
1120 		if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1121 			if (target_list == NULL) {
1122 				pr_err("invalid inc weak node for %d\n",
1123 					node->debug_id);
1124 				return -EINVAL;
1125 			}
1126 			list_add_tail(&node->work.entry, target_list);
1127 		}
1128 	}
1129 	return 0;
1130 }
1131 
1132 static int binder_dec_node(struct binder_node *node, int strong, int internal)
1133 {
1134 	if (strong) {
1135 		if (internal)
1136 			node->internal_strong_refs--;
1137 		else
1138 			node->local_strong_refs--;
1139 		if (node->local_strong_refs || node->internal_strong_refs)
1140 			return 0;
1141 	} else {
1142 		if (!internal)
1143 			node->local_weak_refs--;
1144 		if (node->local_weak_refs || !hlist_empty(&node->refs))
1145 			return 0;
1146 	}
1147 	if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
1148 		if (list_empty(&node->work.entry)) {
1149 			list_add_tail(&node->work.entry, &node->proc->todo);
1150 			wake_up_interruptible(&node->proc->wait);
1151 		}
1152 	} else {
1153 		if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1154 		    !node->local_weak_refs) {
1155 			list_del_init(&node->work.entry);
1156 			if (node->proc) {
1157 				rb_erase(&node->rb_node, &node->proc->nodes);
1158 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1159 					     "refless node %d deleted\n",
1160 					     node->debug_id);
1161 			} else {
1162 				hlist_del(&node->dead_node);
1163 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1164 					     "dead node %d deleted\n",
1165 					     node->debug_id);
1166 			}
1167 			kfree(node);
1168 			binder_stats_deleted(BINDER_STAT_NODE);
1169 		}
1170 	}
1171 
1172 	return 0;
1173 }
1174 
1175 
1176 static struct binder_ref *binder_get_ref(struct binder_proc *proc,
1177 					 u32 desc, bool need_strong_ref)
1178 {
1179 	struct rb_node *n = proc->refs_by_desc.rb_node;
1180 	struct binder_ref *ref;
1181 
1182 	while (n) {
1183 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1184 
1185 		if (desc < ref->desc) {
1186 			n = n->rb_left;
1187 		} else if (desc > ref->desc) {
1188 			n = n->rb_right;
1189 		} else if (need_strong_ref && !ref->strong) {
1190 			binder_user_error("tried to use weak ref as strong ref\n");
1191 			return NULL;
1192 		} else {
1193 			return ref;
1194 		}
1195 	}
1196 	return NULL;
1197 }
1198 
1199 static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
1200 						  struct binder_node *node)
1201 {
1202 	struct rb_node *n;
1203 	struct rb_node **p = &proc->refs_by_node.rb_node;
1204 	struct rb_node *parent = NULL;
1205 	struct binder_ref *ref, *new_ref;
1206 	struct binder_context *context = proc->context;
1207 
1208 	while (*p) {
1209 		parent = *p;
1210 		ref = rb_entry(parent, struct binder_ref, rb_node_node);
1211 
1212 		if (node < ref->node)
1213 			p = &(*p)->rb_left;
1214 		else if (node > ref->node)
1215 			p = &(*p)->rb_right;
1216 		else
1217 			return ref;
1218 	}
1219 	new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1220 	if (new_ref == NULL)
1221 		return NULL;
1222 	binder_stats_created(BINDER_STAT_REF);
1223 	new_ref->debug_id = ++binder_last_id;
1224 	new_ref->proc = proc;
1225 	new_ref->node = node;
1226 	rb_link_node(&new_ref->rb_node_node, parent, p);
1227 	rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1228 
1229 	new_ref->desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1230 	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1231 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1232 		if (ref->desc > new_ref->desc)
1233 			break;
1234 		new_ref->desc = ref->desc + 1;
1235 	}
1236 
1237 	p = &proc->refs_by_desc.rb_node;
1238 	while (*p) {
1239 		parent = *p;
1240 		ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1241 
1242 		if (new_ref->desc < ref->desc)
1243 			p = &(*p)->rb_left;
1244 		else if (new_ref->desc > ref->desc)
1245 			p = &(*p)->rb_right;
1246 		else
1247 			BUG();
1248 	}
1249 	rb_link_node(&new_ref->rb_node_desc, parent, p);
1250 	rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1251 	if (node) {
1252 		hlist_add_head(&new_ref->node_entry, &node->refs);
1253 
1254 		binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1255 			     "%d new ref %d desc %d for node %d\n",
1256 			      proc->pid, new_ref->debug_id, new_ref->desc,
1257 			      node->debug_id);
1258 	} else {
1259 		binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1260 			     "%d new ref %d desc %d for dead node\n",
1261 			      proc->pid, new_ref->debug_id, new_ref->desc);
1262 	}
1263 	return new_ref;
1264 }
1265 
1266 static void binder_delete_ref(struct binder_ref *ref)
1267 {
1268 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1269 		     "%d delete ref %d desc %d for node %d\n",
1270 		      ref->proc->pid, ref->debug_id, ref->desc,
1271 		      ref->node->debug_id);
1272 
1273 	rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1274 	rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1275 	if (ref->strong)
1276 		binder_dec_node(ref->node, 1, 1);
1277 	hlist_del(&ref->node_entry);
1278 	binder_dec_node(ref->node, 0, 1);
1279 	if (ref->death) {
1280 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1281 			     "%d delete ref %d desc %d has death notification\n",
1282 			      ref->proc->pid, ref->debug_id, ref->desc);
1283 		list_del(&ref->death->work.entry);
1284 		kfree(ref->death);
1285 		binder_stats_deleted(BINDER_STAT_DEATH);
1286 	}
1287 	kfree(ref);
1288 	binder_stats_deleted(BINDER_STAT_REF);
1289 }
1290 
1291 static int binder_inc_ref(struct binder_ref *ref, int strong,
1292 			  struct list_head *target_list)
1293 {
1294 	int ret;
1295 
1296 	if (strong) {
1297 		if (ref->strong == 0) {
1298 			ret = binder_inc_node(ref->node, 1, 1, target_list);
1299 			if (ret)
1300 				return ret;
1301 		}
1302 		ref->strong++;
1303 	} else {
1304 		if (ref->weak == 0) {
1305 			ret = binder_inc_node(ref->node, 0, 1, target_list);
1306 			if (ret)
1307 				return ret;
1308 		}
1309 		ref->weak++;
1310 	}
1311 	return 0;
1312 }
1313 
1314 
1315 static int binder_dec_ref(struct binder_ref *ref, int strong)
1316 {
1317 	if (strong) {
1318 		if (ref->strong == 0) {
1319 			binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1320 					  ref->proc->pid, ref->debug_id,
1321 					  ref->desc, ref->strong, ref->weak);
1322 			return -EINVAL;
1323 		}
1324 		ref->strong--;
1325 		if (ref->strong == 0) {
1326 			int ret;
1327 
1328 			ret = binder_dec_node(ref->node, strong, 1);
1329 			if (ret)
1330 				return ret;
1331 		}
1332 	} else {
1333 		if (ref->weak == 0) {
1334 			binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1335 					  ref->proc->pid, ref->debug_id,
1336 					  ref->desc, ref->strong, ref->weak);
1337 			return -EINVAL;
1338 		}
1339 		ref->weak--;
1340 	}
1341 	if (ref->strong == 0 && ref->weak == 0)
1342 		binder_delete_ref(ref);
1343 	return 0;
1344 }
1345 
1346 static void binder_pop_transaction(struct binder_thread *target_thread,
1347 				   struct binder_transaction *t)
1348 {
1349 	if (target_thread) {
1350 		BUG_ON(target_thread->transaction_stack != t);
1351 		BUG_ON(target_thread->transaction_stack->from != target_thread);
1352 		target_thread->transaction_stack =
1353 			target_thread->transaction_stack->from_parent;
1354 		t->from = NULL;
1355 	}
1356 	t->need_reply = 0;
1357 	if (t->buffer)
1358 		t->buffer->transaction = NULL;
1359 	kfree(t);
1360 	binder_stats_deleted(BINDER_STAT_TRANSACTION);
1361 }
1362 
1363 static void binder_send_failed_reply(struct binder_transaction *t,
1364 				     uint32_t error_code)
1365 {
1366 	struct binder_thread *target_thread;
1367 	struct binder_transaction *next;
1368 
1369 	BUG_ON(t->flags & TF_ONE_WAY);
1370 	while (1) {
1371 		target_thread = t->from;
1372 		if (target_thread) {
1373 			if (target_thread->return_error != BR_OK &&
1374 			   target_thread->return_error2 == BR_OK) {
1375 				target_thread->return_error2 =
1376 					target_thread->return_error;
1377 				target_thread->return_error = BR_OK;
1378 			}
1379 			if (target_thread->return_error == BR_OK) {
1380 				binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1381 					     "send failed reply for transaction %d to %d:%d\n",
1382 					      t->debug_id,
1383 					      target_thread->proc->pid,
1384 					      target_thread->pid);
1385 
1386 				binder_pop_transaction(target_thread, t);
1387 				target_thread->return_error = error_code;
1388 				wake_up_interruptible(&target_thread->wait);
1389 			} else {
1390 				pr_err("reply failed, target thread, %d:%d, has error code %d already\n",
1391 					target_thread->proc->pid,
1392 					target_thread->pid,
1393 					target_thread->return_error);
1394 			}
1395 			return;
1396 		}
1397 		next = t->from_parent;
1398 
1399 		binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1400 			     "send failed reply for transaction %d, target dead\n",
1401 			     t->debug_id);
1402 
1403 		binder_pop_transaction(target_thread, t);
1404 		if (next == NULL) {
1405 			binder_debug(BINDER_DEBUG_DEAD_BINDER,
1406 				     "reply failed, no target thread at root\n");
1407 			return;
1408 		}
1409 		t = next;
1410 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1411 			     "reply failed, no target thread -- retry %d\n",
1412 			      t->debug_id);
1413 	}
1414 }
1415 
1416 /**
1417  * binder_validate_object() - checks for a valid metadata object in a buffer.
1418  * @buffer:	binder_buffer that we're parsing.
1419  * @offset:	offset in the buffer at which to validate an object.
1420  *
1421  * Return:	If there's a valid metadata object at @offset in @buffer, the
1422  *		size of that object. Otherwise, it returns zero.
1423  */
1424 static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
1425 {
1426 	/* Check if we can read a header first */
1427 	struct binder_object_header *hdr;
1428 	size_t object_size = 0;
1429 
1430 	if (offset > buffer->data_size - sizeof(*hdr) ||
1431 	    buffer->data_size < sizeof(*hdr) ||
1432 	    !IS_ALIGNED(offset, sizeof(u32)))
1433 		return 0;
1434 
1435 	/* Ok, now see if we can read a complete object. */
1436 	hdr = (struct binder_object_header *)(buffer->data + offset);
1437 	switch (hdr->type) {
1438 	case BINDER_TYPE_BINDER:
1439 	case BINDER_TYPE_WEAK_BINDER:
1440 	case BINDER_TYPE_HANDLE:
1441 	case BINDER_TYPE_WEAK_HANDLE:
1442 		object_size = sizeof(struct flat_binder_object);
1443 		break;
1444 	case BINDER_TYPE_FD:
1445 		object_size = sizeof(struct binder_fd_object);
1446 		break;
1447 	case BINDER_TYPE_PTR:
1448 		object_size = sizeof(struct binder_buffer_object);
1449 		break;
1450 	case BINDER_TYPE_FDA:
1451 		object_size = sizeof(struct binder_fd_array_object);
1452 		break;
1453 	default:
1454 		return 0;
1455 	}
1456 	if (offset <= buffer->data_size - object_size &&
1457 	    buffer->data_size >= object_size)
1458 		return object_size;
1459 	else
1460 		return 0;
1461 }
1462 
1463 /**
1464  * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1465  * @b:		binder_buffer containing the object
1466  * @index:	index in offset array at which the binder_buffer_object is
1467  *		located
1468  * @start:	points to the start of the offset array
1469  * @num_valid:	the number of valid offsets in the offset array
1470  *
1471  * Return:	If @index is within the valid range of the offset array
1472  *		described by @start and @num_valid, and if there's a valid
1473  *		binder_buffer_object at the offset found in index @index
1474  *		of the offset array, that object is returned. Otherwise,
1475  *		%NULL is returned.
1476  *		Note that the offset found in index @index itself is not
1477  *		verified; this function assumes that @num_valid elements
1478  *		from @start were previously verified to have valid offsets.
1479  */
1480 static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
1481 							binder_size_t index,
1482 							binder_size_t *start,
1483 							binder_size_t num_valid)
1484 {
1485 	struct binder_buffer_object *buffer_obj;
1486 	binder_size_t *offp;
1487 
1488 	if (index >= num_valid)
1489 		return NULL;
1490 
1491 	offp = start + index;
1492 	buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
1493 	if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
1494 		return NULL;
1495 
1496 	return buffer_obj;
1497 }
1498 
1499 /**
1500  * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1501  * @b:			transaction buffer
1502  * @objects_start	start of objects buffer
1503  * @buffer:		binder_buffer_object in which to fix up
1504  * @offset:		start offset in @buffer to fix up
1505  * @last_obj:		last binder_buffer_object that we fixed up in
1506  * @last_min_offset:	minimum fixup offset in @last_obj
1507  *
1508  * Return:		%true if a fixup in buffer @buffer at offset @offset is
1509  *			allowed.
1510  *
1511  * For safety reasons, we only allow fixups inside a buffer to happen
1512  * at increasing offsets; additionally, we only allow fixup on the last
1513  * buffer object that was verified, or one of its parents.
1514  *
1515  * Example of what is allowed:
1516  *
1517  * A
1518  *   B (parent = A, offset = 0)
1519  *   C (parent = A, offset = 16)
1520  *     D (parent = C, offset = 0)
1521  *   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1522  *
1523  * Examples of what is not allowed:
1524  *
1525  * Decreasing offsets within the same parent:
1526  * A
1527  *   C (parent = A, offset = 16)
1528  *   B (parent = A, offset = 0) // decreasing offset within A
1529  *
1530  * Referring to a parent that wasn't the last object or any of its parents:
1531  * A
1532  *   B (parent = A, offset = 0)
1533  *   C (parent = A, offset = 0)
1534  *   C (parent = A, offset = 16)
1535  *     D (parent = B, offset = 0) // B is not A or any of A's parents
1536  */
1537 static bool binder_validate_fixup(struct binder_buffer *b,
1538 				  binder_size_t *objects_start,
1539 				  struct binder_buffer_object *buffer,
1540 				  binder_size_t fixup_offset,
1541 				  struct binder_buffer_object *last_obj,
1542 				  binder_size_t last_min_offset)
1543 {
1544 	if (!last_obj) {
1545 		/* Nothing to fix up in */
1546 		return false;
1547 	}
1548 
1549 	while (last_obj != buffer) {
1550 		/*
1551 		 * Safe to retrieve the parent of last_obj, since it
1552 		 * was already previously verified by the driver.
1553 		 */
1554 		if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1555 			return false;
1556 		last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
1557 		last_obj = (struct binder_buffer_object *)
1558 			(b->data + *(objects_start + last_obj->parent));
1559 	}
1560 	return (fixup_offset >= last_min_offset);
1561 }
1562 
1563 static void binder_transaction_buffer_release(struct binder_proc *proc,
1564 					      struct binder_buffer *buffer,
1565 					      binder_size_t *failed_at)
1566 {
1567 	binder_size_t *offp, *off_start, *off_end;
1568 	int debug_id = buffer->debug_id;
1569 
1570 	binder_debug(BINDER_DEBUG_TRANSACTION,
1571 		     "%d buffer release %d, size %zd-%zd, failed at %p\n",
1572 		     proc->pid, buffer->debug_id,
1573 		     buffer->data_size, buffer->offsets_size, failed_at);
1574 
1575 	if (buffer->target_node)
1576 		binder_dec_node(buffer->target_node, 1, 0);
1577 
1578 	off_start = (binder_size_t *)(buffer->data +
1579 				      ALIGN(buffer->data_size, sizeof(void *)));
1580 	if (failed_at)
1581 		off_end = failed_at;
1582 	else
1583 		off_end = (void *)off_start + buffer->offsets_size;
1584 	for (offp = off_start; offp < off_end; offp++) {
1585 		struct binder_object_header *hdr;
1586 		size_t object_size = binder_validate_object(buffer, *offp);
1587 
1588 		if (object_size == 0) {
1589 			pr_err("transaction release %d bad object at offset %lld, size %zd\n",
1590 			       debug_id, (u64)*offp, buffer->data_size);
1591 			continue;
1592 		}
1593 		hdr = (struct binder_object_header *)(buffer->data + *offp);
1594 		switch (hdr->type) {
1595 		case BINDER_TYPE_BINDER:
1596 		case BINDER_TYPE_WEAK_BINDER: {
1597 			struct flat_binder_object *fp;
1598 			struct binder_node *node;
1599 
1600 			fp = to_flat_binder_object(hdr);
1601 			node = binder_get_node(proc, fp->binder);
1602 			if (node == NULL) {
1603 				pr_err("transaction release %d bad node %016llx\n",
1604 				       debug_id, (u64)fp->binder);
1605 				break;
1606 			}
1607 			binder_debug(BINDER_DEBUG_TRANSACTION,
1608 				     "        node %d u%016llx\n",
1609 				     node->debug_id, (u64)node->ptr);
1610 			binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
1611 					0);
1612 		} break;
1613 		case BINDER_TYPE_HANDLE:
1614 		case BINDER_TYPE_WEAK_HANDLE: {
1615 			struct flat_binder_object *fp;
1616 			struct binder_ref *ref;
1617 
1618 			fp = to_flat_binder_object(hdr);
1619 			ref = binder_get_ref(proc, fp->handle,
1620 					     hdr->type == BINDER_TYPE_HANDLE);
1621 			if (ref == NULL) {
1622 				pr_err("transaction release %d bad handle %d\n",
1623 				 debug_id, fp->handle);
1624 				break;
1625 			}
1626 			binder_debug(BINDER_DEBUG_TRANSACTION,
1627 				     "        ref %d desc %d (node %d)\n",
1628 				     ref->debug_id, ref->desc, ref->node->debug_id);
1629 			binder_dec_ref(ref, hdr->type == BINDER_TYPE_HANDLE);
1630 		} break;
1631 
1632 		case BINDER_TYPE_FD: {
1633 			struct binder_fd_object *fp = to_binder_fd_object(hdr);
1634 
1635 			binder_debug(BINDER_DEBUG_TRANSACTION,
1636 				     "        fd %d\n", fp->fd);
1637 			if (failed_at)
1638 				task_close_fd(proc, fp->fd);
1639 		} break;
1640 		case BINDER_TYPE_PTR:
1641 			/*
1642 			 * Nothing to do here, this will get cleaned up when the
1643 			 * transaction buffer gets freed
1644 			 */
1645 			break;
1646 		case BINDER_TYPE_FDA: {
1647 			struct binder_fd_array_object *fda;
1648 			struct binder_buffer_object *parent;
1649 			uintptr_t parent_buffer;
1650 			u32 *fd_array;
1651 			size_t fd_index;
1652 			binder_size_t fd_buf_size;
1653 
1654 			fda = to_binder_fd_array_object(hdr);
1655 			parent = binder_validate_ptr(buffer, fda->parent,
1656 						     off_start,
1657 						     offp - off_start);
1658 			if (!parent) {
1659 				pr_err("transaction release %d bad parent offset",
1660 				       debug_id);
1661 				continue;
1662 			}
1663 			/*
1664 			 * Since the parent was already fixed up, convert it
1665 			 * back to kernel address space to access it
1666 			 */
1667 			parent_buffer = parent->buffer -
1668 				binder_alloc_get_user_buffer_offset(
1669 						&proc->alloc);
1670 
1671 			fd_buf_size = sizeof(u32) * fda->num_fds;
1672 			if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
1673 				pr_err("transaction release %d invalid number of fds (%lld)\n",
1674 				       debug_id, (u64)fda->num_fds);
1675 				continue;
1676 			}
1677 			if (fd_buf_size > parent->length ||
1678 			    fda->parent_offset > parent->length - fd_buf_size) {
1679 				/* No space for all file descriptors here. */
1680 				pr_err("transaction release %d not enough space for %lld fds in buffer\n",
1681 				       debug_id, (u64)fda->num_fds);
1682 				continue;
1683 			}
1684 			fd_array = (u32 *)(parent_buffer + fda->parent_offset);
1685 			for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
1686 				task_close_fd(proc, fd_array[fd_index]);
1687 		} break;
1688 		default:
1689 			pr_err("transaction release %d bad object type %x\n",
1690 				debug_id, hdr->type);
1691 			break;
1692 		}
1693 	}
1694 }
1695 
1696 static int binder_translate_binder(struct flat_binder_object *fp,
1697 				   struct binder_transaction *t,
1698 				   struct binder_thread *thread)
1699 {
1700 	struct binder_node *node;
1701 	struct binder_ref *ref;
1702 	struct binder_proc *proc = thread->proc;
1703 	struct binder_proc *target_proc = t->to_proc;
1704 
1705 	node = binder_get_node(proc, fp->binder);
1706 	if (!node) {
1707 		node = binder_new_node(proc, fp->binder, fp->cookie);
1708 		if (!node)
1709 			return -ENOMEM;
1710 
1711 		node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1712 		node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1713 	}
1714 	if (fp->cookie != node->cookie) {
1715 		binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
1716 				  proc->pid, thread->pid, (u64)fp->binder,
1717 				  node->debug_id, (u64)fp->cookie,
1718 				  (u64)node->cookie);
1719 		return -EINVAL;
1720 	}
1721 	if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
1722 		return -EPERM;
1723 
1724 	ref = binder_get_ref_for_node(target_proc, node);
1725 	if (!ref)
1726 		return -EINVAL;
1727 
1728 	if (fp->hdr.type == BINDER_TYPE_BINDER)
1729 		fp->hdr.type = BINDER_TYPE_HANDLE;
1730 	else
1731 		fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
1732 	fp->binder = 0;
1733 	fp->handle = ref->desc;
1734 	fp->cookie = 0;
1735 	binder_inc_ref(ref, fp->hdr.type == BINDER_TYPE_HANDLE, &thread->todo);
1736 
1737 	trace_binder_transaction_node_to_ref(t, node, ref);
1738 	binder_debug(BINDER_DEBUG_TRANSACTION,
1739 		     "        node %d u%016llx -> ref %d desc %d\n",
1740 		     node->debug_id, (u64)node->ptr,
1741 		     ref->debug_id, ref->desc);
1742 
1743 	return 0;
1744 }
1745 
1746 static int binder_translate_handle(struct flat_binder_object *fp,
1747 				   struct binder_transaction *t,
1748 				   struct binder_thread *thread)
1749 {
1750 	struct binder_ref *ref;
1751 	struct binder_proc *proc = thread->proc;
1752 	struct binder_proc *target_proc = t->to_proc;
1753 
1754 	ref = binder_get_ref(proc, fp->handle,
1755 			     fp->hdr.type == BINDER_TYPE_HANDLE);
1756 	if (!ref) {
1757 		binder_user_error("%d:%d got transaction with invalid handle, %d\n",
1758 				  proc->pid, thread->pid, fp->handle);
1759 		return -EINVAL;
1760 	}
1761 	if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
1762 		return -EPERM;
1763 
1764 	if (ref->node->proc == target_proc) {
1765 		if (fp->hdr.type == BINDER_TYPE_HANDLE)
1766 			fp->hdr.type = BINDER_TYPE_BINDER;
1767 		else
1768 			fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
1769 		fp->binder = ref->node->ptr;
1770 		fp->cookie = ref->node->cookie;
1771 		binder_inc_node(ref->node, fp->hdr.type == BINDER_TYPE_BINDER,
1772 				0, NULL);
1773 		trace_binder_transaction_ref_to_node(t, ref);
1774 		binder_debug(BINDER_DEBUG_TRANSACTION,
1775 			     "        ref %d desc %d -> node %d u%016llx\n",
1776 			     ref->debug_id, ref->desc, ref->node->debug_id,
1777 			     (u64)ref->node->ptr);
1778 	} else {
1779 		struct binder_ref *new_ref;
1780 
1781 		new_ref = binder_get_ref_for_node(target_proc, ref->node);
1782 		if (!new_ref)
1783 			return -EINVAL;
1784 
1785 		fp->binder = 0;
1786 		fp->handle = new_ref->desc;
1787 		fp->cookie = 0;
1788 		binder_inc_ref(new_ref, fp->hdr.type == BINDER_TYPE_HANDLE,
1789 			       NULL);
1790 		trace_binder_transaction_ref_to_ref(t, ref, new_ref);
1791 		binder_debug(BINDER_DEBUG_TRANSACTION,
1792 			     "        ref %d desc %d -> ref %d desc %d (node %d)\n",
1793 			     ref->debug_id, ref->desc, new_ref->debug_id,
1794 			     new_ref->desc, ref->node->debug_id);
1795 	}
1796 	return 0;
1797 }
1798 
1799 static int binder_translate_fd(int fd,
1800 			       struct binder_transaction *t,
1801 			       struct binder_thread *thread,
1802 			       struct binder_transaction *in_reply_to)
1803 {
1804 	struct binder_proc *proc = thread->proc;
1805 	struct binder_proc *target_proc = t->to_proc;
1806 	int target_fd;
1807 	struct file *file;
1808 	int ret;
1809 	bool target_allows_fd;
1810 
1811 	if (in_reply_to)
1812 		target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
1813 	else
1814 		target_allows_fd = t->buffer->target_node->accept_fds;
1815 	if (!target_allows_fd) {
1816 		binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
1817 				  proc->pid, thread->pid,
1818 				  in_reply_to ? "reply" : "transaction",
1819 				  fd);
1820 		ret = -EPERM;
1821 		goto err_fd_not_accepted;
1822 	}
1823 
1824 	file = fget(fd);
1825 	if (!file) {
1826 		binder_user_error("%d:%d got transaction with invalid fd, %d\n",
1827 				  proc->pid, thread->pid, fd);
1828 		ret = -EBADF;
1829 		goto err_fget;
1830 	}
1831 	ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
1832 	if (ret < 0) {
1833 		ret = -EPERM;
1834 		goto err_security;
1835 	}
1836 
1837 	target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
1838 	if (target_fd < 0) {
1839 		ret = -ENOMEM;
1840 		goto err_get_unused_fd;
1841 	}
1842 	task_fd_install(target_proc, target_fd, file);
1843 	trace_binder_transaction_fd(t, fd, target_fd);
1844 	binder_debug(BINDER_DEBUG_TRANSACTION, "        fd %d -> %d\n",
1845 		     fd, target_fd);
1846 
1847 	return target_fd;
1848 
1849 err_get_unused_fd:
1850 err_security:
1851 	fput(file);
1852 err_fget:
1853 err_fd_not_accepted:
1854 	return ret;
1855 }
1856 
1857 static int binder_translate_fd_array(struct binder_fd_array_object *fda,
1858 				     struct binder_buffer_object *parent,
1859 				     struct binder_transaction *t,
1860 				     struct binder_thread *thread,
1861 				     struct binder_transaction *in_reply_to)
1862 {
1863 	binder_size_t fdi, fd_buf_size, num_installed_fds;
1864 	int target_fd;
1865 	uintptr_t parent_buffer;
1866 	u32 *fd_array;
1867 	struct binder_proc *proc = thread->proc;
1868 	struct binder_proc *target_proc = t->to_proc;
1869 
1870 	fd_buf_size = sizeof(u32) * fda->num_fds;
1871 	if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
1872 		binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
1873 				  proc->pid, thread->pid, (u64)fda->num_fds);
1874 		return -EINVAL;
1875 	}
1876 	if (fd_buf_size > parent->length ||
1877 	    fda->parent_offset > parent->length - fd_buf_size) {
1878 		/* No space for all file descriptors here. */
1879 		binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
1880 				  proc->pid, thread->pid, (u64)fda->num_fds);
1881 		return -EINVAL;
1882 	}
1883 	/*
1884 	 * Since the parent was already fixed up, convert it
1885 	 * back to the kernel address space to access it
1886 	 */
1887 	parent_buffer = parent->buffer -
1888 		binder_alloc_get_user_buffer_offset(&target_proc->alloc);
1889 	fd_array = (u32 *)(parent_buffer + fda->parent_offset);
1890 	if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
1891 		binder_user_error("%d:%d parent offset not aligned correctly.\n",
1892 				  proc->pid, thread->pid);
1893 		return -EINVAL;
1894 	}
1895 	for (fdi = 0; fdi < fda->num_fds; fdi++) {
1896 		target_fd = binder_translate_fd(fd_array[fdi], t, thread,
1897 						in_reply_to);
1898 		if (target_fd < 0)
1899 			goto err_translate_fd_failed;
1900 		fd_array[fdi] = target_fd;
1901 	}
1902 	return 0;
1903 
1904 err_translate_fd_failed:
1905 	/*
1906 	 * Failed to allocate fd or security error, free fds
1907 	 * installed so far.
1908 	 */
1909 	num_installed_fds = fdi;
1910 	for (fdi = 0; fdi < num_installed_fds; fdi++)
1911 		task_close_fd(target_proc, fd_array[fdi]);
1912 	return target_fd;
1913 }
1914 
1915 static int binder_fixup_parent(struct binder_transaction *t,
1916 			       struct binder_thread *thread,
1917 			       struct binder_buffer_object *bp,
1918 			       binder_size_t *off_start,
1919 			       binder_size_t num_valid,
1920 			       struct binder_buffer_object *last_fixup_obj,
1921 			       binder_size_t last_fixup_min_off)
1922 {
1923 	struct binder_buffer_object *parent;
1924 	u8 *parent_buffer;
1925 	struct binder_buffer *b = t->buffer;
1926 	struct binder_proc *proc = thread->proc;
1927 	struct binder_proc *target_proc = t->to_proc;
1928 
1929 	if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
1930 		return 0;
1931 
1932 	parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
1933 	if (!parent) {
1934 		binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
1935 				  proc->pid, thread->pid);
1936 		return -EINVAL;
1937 	}
1938 
1939 	if (!binder_validate_fixup(b, off_start,
1940 				   parent, bp->parent_offset,
1941 				   last_fixup_obj,
1942 				   last_fixup_min_off)) {
1943 		binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
1944 				  proc->pid, thread->pid);
1945 		return -EINVAL;
1946 	}
1947 
1948 	if (parent->length < sizeof(binder_uintptr_t) ||
1949 	    bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
1950 		/* No space for a pointer here! */
1951 		binder_user_error("%d:%d got transaction with invalid parent offset\n",
1952 				  proc->pid, thread->pid);
1953 		return -EINVAL;
1954 	}
1955 	parent_buffer = (u8 *)(parent->buffer -
1956 			binder_alloc_get_user_buffer_offset(
1957 				&target_proc->alloc));
1958 	*(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
1959 
1960 	return 0;
1961 }
1962 
1963 static void binder_transaction(struct binder_proc *proc,
1964 			       struct binder_thread *thread,
1965 			       struct binder_transaction_data *tr, int reply,
1966 			       binder_size_t extra_buffers_size)
1967 {
1968 	int ret;
1969 	struct binder_transaction *t;
1970 	struct binder_work *tcomplete;
1971 	binder_size_t *offp, *off_end, *off_start;
1972 	binder_size_t off_min;
1973 	u8 *sg_bufp, *sg_buf_end;
1974 	struct binder_proc *target_proc;
1975 	struct binder_thread *target_thread = NULL;
1976 	struct binder_node *target_node = NULL;
1977 	struct list_head *target_list;
1978 	wait_queue_head_t *target_wait;
1979 	struct binder_transaction *in_reply_to = NULL;
1980 	struct binder_transaction_log_entry *e;
1981 	uint32_t return_error;
1982 	struct binder_buffer_object *last_fixup_obj = NULL;
1983 	binder_size_t last_fixup_min_off = 0;
1984 	struct binder_context *context = proc->context;
1985 
1986 	e = binder_transaction_log_add(&binder_transaction_log);
1987 	e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
1988 	e->from_proc = proc->pid;
1989 	e->from_thread = thread->pid;
1990 	e->target_handle = tr->target.handle;
1991 	e->data_size = tr->data_size;
1992 	e->offsets_size = tr->offsets_size;
1993 	e->context_name = proc->context->name;
1994 
1995 	if (reply) {
1996 		in_reply_to = thread->transaction_stack;
1997 		if (in_reply_to == NULL) {
1998 			binder_user_error("%d:%d got reply transaction with no transaction stack\n",
1999 					  proc->pid, thread->pid);
2000 			return_error = BR_FAILED_REPLY;
2001 			goto err_empty_call_stack;
2002 		}
2003 		binder_set_nice(in_reply_to->saved_priority);
2004 		if (in_reply_to->to_thread != thread) {
2005 			binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2006 				proc->pid, thread->pid, in_reply_to->debug_id,
2007 				in_reply_to->to_proc ?
2008 				in_reply_to->to_proc->pid : 0,
2009 				in_reply_to->to_thread ?
2010 				in_reply_to->to_thread->pid : 0);
2011 			return_error = BR_FAILED_REPLY;
2012 			in_reply_to = NULL;
2013 			goto err_bad_call_stack;
2014 		}
2015 		thread->transaction_stack = in_reply_to->to_parent;
2016 		target_thread = in_reply_to->from;
2017 		if (target_thread == NULL) {
2018 			return_error = BR_DEAD_REPLY;
2019 			goto err_dead_binder;
2020 		}
2021 		if (target_thread->transaction_stack != in_reply_to) {
2022 			binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2023 				proc->pid, thread->pid,
2024 				target_thread->transaction_stack ?
2025 				target_thread->transaction_stack->debug_id : 0,
2026 				in_reply_to->debug_id);
2027 			return_error = BR_FAILED_REPLY;
2028 			in_reply_to = NULL;
2029 			target_thread = NULL;
2030 			goto err_dead_binder;
2031 		}
2032 		target_proc = target_thread->proc;
2033 	} else {
2034 		if (tr->target.handle) {
2035 			struct binder_ref *ref;
2036 
2037 			ref = binder_get_ref(proc, tr->target.handle, true);
2038 			if (ref == NULL) {
2039 				binder_user_error("%d:%d got transaction to invalid handle\n",
2040 					proc->pid, thread->pid);
2041 				return_error = BR_FAILED_REPLY;
2042 				goto err_invalid_target_handle;
2043 			}
2044 			target_node = ref->node;
2045 		} else {
2046 			target_node = context->binder_context_mgr_node;
2047 			if (target_node == NULL) {
2048 				return_error = BR_DEAD_REPLY;
2049 				goto err_no_context_mgr_node;
2050 			}
2051 		}
2052 		e->to_node = target_node->debug_id;
2053 		target_proc = target_node->proc;
2054 		if (target_proc == NULL) {
2055 			return_error = BR_DEAD_REPLY;
2056 			goto err_dead_binder;
2057 		}
2058 		if (security_binder_transaction(proc->tsk,
2059 						target_proc->tsk) < 0) {
2060 			return_error = BR_FAILED_REPLY;
2061 			goto err_invalid_target_handle;
2062 		}
2063 		if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
2064 			struct binder_transaction *tmp;
2065 
2066 			tmp = thread->transaction_stack;
2067 			if (tmp->to_thread != thread) {
2068 				binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
2069 					proc->pid, thread->pid, tmp->debug_id,
2070 					tmp->to_proc ? tmp->to_proc->pid : 0,
2071 					tmp->to_thread ?
2072 					tmp->to_thread->pid : 0);
2073 				return_error = BR_FAILED_REPLY;
2074 				goto err_bad_call_stack;
2075 			}
2076 			while (tmp) {
2077 				if (tmp->from && tmp->from->proc == target_proc)
2078 					target_thread = tmp->from;
2079 				tmp = tmp->from_parent;
2080 			}
2081 		}
2082 	}
2083 	if (target_thread) {
2084 		e->to_thread = target_thread->pid;
2085 		target_list = &target_thread->todo;
2086 		target_wait = &target_thread->wait;
2087 	} else {
2088 		target_list = &target_proc->todo;
2089 		target_wait = &target_proc->wait;
2090 	}
2091 	e->to_proc = target_proc->pid;
2092 
2093 	/* TODO: reuse incoming transaction for reply */
2094 	t = kzalloc(sizeof(*t), GFP_KERNEL);
2095 	if (t == NULL) {
2096 		return_error = BR_FAILED_REPLY;
2097 		goto err_alloc_t_failed;
2098 	}
2099 	binder_stats_created(BINDER_STAT_TRANSACTION);
2100 
2101 	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
2102 	if (tcomplete == NULL) {
2103 		return_error = BR_FAILED_REPLY;
2104 		goto err_alloc_tcomplete_failed;
2105 	}
2106 	binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
2107 
2108 	t->debug_id = ++binder_last_id;
2109 	e->debug_id = t->debug_id;
2110 
2111 	if (reply)
2112 		binder_debug(BINDER_DEBUG_TRANSACTION,
2113 			     "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
2114 			     proc->pid, thread->pid, t->debug_id,
2115 			     target_proc->pid, target_thread->pid,
2116 			     (u64)tr->data.ptr.buffer,
2117 			     (u64)tr->data.ptr.offsets,
2118 			     (u64)tr->data_size, (u64)tr->offsets_size,
2119 			     (u64)extra_buffers_size);
2120 	else
2121 		binder_debug(BINDER_DEBUG_TRANSACTION,
2122 			     "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
2123 			     proc->pid, thread->pid, t->debug_id,
2124 			     target_proc->pid, target_node->debug_id,
2125 			     (u64)tr->data.ptr.buffer,
2126 			     (u64)tr->data.ptr.offsets,
2127 			     (u64)tr->data_size, (u64)tr->offsets_size,
2128 			     (u64)extra_buffers_size);
2129 
2130 	if (!reply && !(tr->flags & TF_ONE_WAY))
2131 		t->from = thread;
2132 	else
2133 		t->from = NULL;
2134 	t->sender_euid = task_euid(proc->tsk);
2135 	t->to_proc = target_proc;
2136 	t->to_thread = target_thread;
2137 	t->code = tr->code;
2138 	t->flags = tr->flags;
2139 	t->priority = task_nice(current);
2140 
2141 	trace_binder_transaction(reply, t, target_node);
2142 
2143 	t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
2144 		tr->offsets_size, extra_buffers_size,
2145 		!reply && (t->flags & TF_ONE_WAY));
2146 	if (t->buffer == NULL) {
2147 		return_error = BR_FAILED_REPLY;
2148 		goto err_binder_alloc_buf_failed;
2149 	}
2150 	t->buffer->allow_user_free = 0;
2151 	t->buffer->debug_id = t->debug_id;
2152 	t->buffer->transaction = t;
2153 	t->buffer->target_node = target_node;
2154 	trace_binder_transaction_alloc_buf(t->buffer);
2155 	if (target_node)
2156 		binder_inc_node(target_node, 1, 0, NULL);
2157 
2158 	off_start = (binder_size_t *)(t->buffer->data +
2159 				      ALIGN(tr->data_size, sizeof(void *)));
2160 	offp = off_start;
2161 
2162 	if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
2163 			   tr->data.ptr.buffer, tr->data_size)) {
2164 		binder_user_error("%d:%d got transaction with invalid data ptr\n",
2165 				proc->pid, thread->pid);
2166 		return_error = BR_FAILED_REPLY;
2167 		goto err_copy_data_failed;
2168 	}
2169 	if (copy_from_user(offp, (const void __user *)(uintptr_t)
2170 			   tr->data.ptr.offsets, tr->offsets_size)) {
2171 		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2172 				proc->pid, thread->pid);
2173 		return_error = BR_FAILED_REPLY;
2174 		goto err_copy_data_failed;
2175 	}
2176 	if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
2177 		binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
2178 				proc->pid, thread->pid, (u64)tr->offsets_size);
2179 		return_error = BR_FAILED_REPLY;
2180 		goto err_bad_offset;
2181 	}
2182 	if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
2183 		binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
2184 				  proc->pid, thread->pid,
2185 				  (u64)extra_buffers_size);
2186 		return_error = BR_FAILED_REPLY;
2187 		goto err_bad_offset;
2188 	}
2189 	off_end = (void *)off_start + tr->offsets_size;
2190 	sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
2191 	sg_buf_end = sg_bufp + extra_buffers_size;
2192 	off_min = 0;
2193 	for (; offp < off_end; offp++) {
2194 		struct binder_object_header *hdr;
2195 		size_t object_size = binder_validate_object(t->buffer, *offp);
2196 
2197 		if (object_size == 0 || *offp < off_min) {
2198 			binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
2199 					  proc->pid, thread->pid, (u64)*offp,
2200 					  (u64)off_min,
2201 					  (u64)t->buffer->data_size);
2202 			return_error = BR_FAILED_REPLY;
2203 			goto err_bad_offset;
2204 		}
2205 
2206 		hdr = (struct binder_object_header *)(t->buffer->data + *offp);
2207 		off_min = *offp + object_size;
2208 		switch (hdr->type) {
2209 		case BINDER_TYPE_BINDER:
2210 		case BINDER_TYPE_WEAK_BINDER: {
2211 			struct flat_binder_object *fp;
2212 
2213 			fp = to_flat_binder_object(hdr);
2214 			ret = binder_translate_binder(fp, t, thread);
2215 			if (ret < 0) {
2216 				return_error = BR_FAILED_REPLY;
2217 				goto err_translate_failed;
2218 			}
2219 		} break;
2220 		case BINDER_TYPE_HANDLE:
2221 		case BINDER_TYPE_WEAK_HANDLE: {
2222 			struct flat_binder_object *fp;
2223 
2224 			fp = to_flat_binder_object(hdr);
2225 			ret = binder_translate_handle(fp, t, thread);
2226 			if (ret < 0) {
2227 				return_error = BR_FAILED_REPLY;
2228 				goto err_translate_failed;
2229 			}
2230 		} break;
2231 
2232 		case BINDER_TYPE_FD: {
2233 			struct binder_fd_object *fp = to_binder_fd_object(hdr);
2234 			int target_fd = binder_translate_fd(fp->fd, t, thread,
2235 							    in_reply_to);
2236 
2237 			if (target_fd < 0) {
2238 				return_error = BR_FAILED_REPLY;
2239 				goto err_translate_failed;
2240 			}
2241 			fp->pad_binder = 0;
2242 			fp->fd = target_fd;
2243 		} break;
2244 		case BINDER_TYPE_FDA: {
2245 			struct binder_fd_array_object *fda =
2246 				to_binder_fd_array_object(hdr);
2247 			struct binder_buffer_object *parent =
2248 				binder_validate_ptr(t->buffer, fda->parent,
2249 						    off_start,
2250 						    offp - off_start);
2251 			if (!parent) {
2252 				binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2253 						  proc->pid, thread->pid);
2254 				return_error = BR_FAILED_REPLY;
2255 				goto err_bad_parent;
2256 			}
2257 			if (!binder_validate_fixup(t->buffer, off_start,
2258 						   parent, fda->parent_offset,
2259 						   last_fixup_obj,
2260 						   last_fixup_min_off)) {
2261 				binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2262 						  proc->pid, thread->pid);
2263 				return_error = BR_FAILED_REPLY;
2264 				goto err_bad_parent;
2265 			}
2266 			ret = binder_translate_fd_array(fda, parent, t, thread,
2267 							in_reply_to);
2268 			if (ret < 0) {
2269 				return_error = BR_FAILED_REPLY;
2270 				goto err_translate_failed;
2271 			}
2272 			last_fixup_obj = parent;
2273 			last_fixup_min_off =
2274 				fda->parent_offset + sizeof(u32) * fda->num_fds;
2275 		} break;
2276 		case BINDER_TYPE_PTR: {
2277 			struct binder_buffer_object *bp =
2278 				to_binder_buffer_object(hdr);
2279 			size_t buf_left = sg_buf_end - sg_bufp;
2280 
2281 			if (bp->length > buf_left) {
2282 				binder_user_error("%d:%d got transaction with too large buffer\n",
2283 						  proc->pid, thread->pid);
2284 				return_error = BR_FAILED_REPLY;
2285 				goto err_bad_offset;
2286 			}
2287 			if (copy_from_user(sg_bufp,
2288 					   (const void __user *)(uintptr_t)
2289 					   bp->buffer, bp->length)) {
2290 				binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2291 						  proc->pid, thread->pid);
2292 				return_error = BR_FAILED_REPLY;
2293 				goto err_copy_data_failed;
2294 			}
2295 			/* Fixup buffer pointer to target proc address space */
2296 			bp->buffer = (uintptr_t)sg_bufp +
2297 				binder_alloc_get_user_buffer_offset(
2298 						&target_proc->alloc);
2299 			sg_bufp += ALIGN(bp->length, sizeof(u64));
2300 
2301 			ret = binder_fixup_parent(t, thread, bp, off_start,
2302 						  offp - off_start,
2303 						  last_fixup_obj,
2304 						  last_fixup_min_off);
2305 			if (ret < 0) {
2306 				return_error = BR_FAILED_REPLY;
2307 				goto err_translate_failed;
2308 			}
2309 			last_fixup_obj = bp;
2310 			last_fixup_min_off = 0;
2311 		} break;
2312 		default:
2313 			binder_user_error("%d:%d got transaction with invalid object type, %x\n",
2314 				proc->pid, thread->pid, hdr->type);
2315 			return_error = BR_FAILED_REPLY;
2316 			goto err_bad_object_type;
2317 		}
2318 	}
2319 	if (reply) {
2320 		BUG_ON(t->buffer->async_transaction != 0);
2321 		binder_pop_transaction(target_thread, in_reply_to);
2322 	} else if (!(t->flags & TF_ONE_WAY)) {
2323 		BUG_ON(t->buffer->async_transaction != 0);
2324 		t->need_reply = 1;
2325 		t->from_parent = thread->transaction_stack;
2326 		thread->transaction_stack = t;
2327 	} else {
2328 		BUG_ON(target_node == NULL);
2329 		BUG_ON(t->buffer->async_transaction != 1);
2330 		if (target_node->has_async_transaction) {
2331 			target_list = &target_node->async_todo;
2332 			target_wait = NULL;
2333 		} else
2334 			target_node->has_async_transaction = 1;
2335 	}
2336 	t->work.type = BINDER_WORK_TRANSACTION;
2337 	list_add_tail(&t->work.entry, target_list);
2338 	tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
2339 	list_add_tail(&tcomplete->entry, &thread->todo);
2340 	if (target_wait) {
2341 		if (reply || !(t->flags & TF_ONE_WAY))
2342 			wake_up_interruptible_sync(target_wait);
2343 		else
2344 			wake_up_interruptible(target_wait);
2345 	}
2346 	return;
2347 
2348 err_translate_failed:
2349 err_bad_object_type:
2350 err_bad_offset:
2351 err_bad_parent:
2352 err_copy_data_failed:
2353 	trace_binder_transaction_failed_buffer_release(t->buffer);
2354 	binder_transaction_buffer_release(target_proc, t->buffer, offp);
2355 	t->buffer->transaction = NULL;
2356 	binder_alloc_free_buf(&target_proc->alloc, t->buffer);
2357 err_binder_alloc_buf_failed:
2358 	kfree(tcomplete);
2359 	binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2360 err_alloc_tcomplete_failed:
2361 	kfree(t);
2362 	binder_stats_deleted(BINDER_STAT_TRANSACTION);
2363 err_alloc_t_failed:
2364 err_bad_call_stack:
2365 err_empty_call_stack:
2366 err_dead_binder:
2367 err_invalid_target_handle:
2368 err_no_context_mgr_node:
2369 	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2370 		     "%d:%d transaction failed %d, size %lld-%lld\n",
2371 		     proc->pid, thread->pid, return_error,
2372 		     (u64)tr->data_size, (u64)tr->offsets_size);
2373 
2374 	{
2375 		struct binder_transaction_log_entry *fe;
2376 
2377 		fe = binder_transaction_log_add(&binder_transaction_log_failed);
2378 		*fe = *e;
2379 	}
2380 
2381 	BUG_ON(thread->return_error != BR_OK);
2382 	if (in_reply_to) {
2383 		thread->return_error = BR_TRANSACTION_COMPLETE;
2384 		binder_send_failed_reply(in_reply_to, return_error);
2385 	} else
2386 		thread->return_error = return_error;
2387 }
2388 
2389 static int binder_thread_write(struct binder_proc *proc,
2390 			struct binder_thread *thread,
2391 			binder_uintptr_t binder_buffer, size_t size,
2392 			binder_size_t *consumed)
2393 {
2394 	uint32_t cmd;
2395 	struct binder_context *context = proc->context;
2396 	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
2397 	void __user *ptr = buffer + *consumed;
2398 	void __user *end = buffer + size;
2399 
2400 	while (ptr < end && thread->return_error == BR_OK) {
2401 		if (get_user(cmd, (uint32_t __user *)ptr))
2402 			return -EFAULT;
2403 		ptr += sizeof(uint32_t);
2404 		trace_binder_command(cmd);
2405 		if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
2406 			binder_stats.bc[_IOC_NR(cmd)]++;
2407 			proc->stats.bc[_IOC_NR(cmd)]++;
2408 			thread->stats.bc[_IOC_NR(cmd)]++;
2409 		}
2410 		switch (cmd) {
2411 		case BC_INCREFS:
2412 		case BC_ACQUIRE:
2413 		case BC_RELEASE:
2414 		case BC_DECREFS: {
2415 			uint32_t target;
2416 			struct binder_ref *ref;
2417 			const char *debug_string;
2418 
2419 			if (get_user(target, (uint32_t __user *)ptr))
2420 				return -EFAULT;
2421 			ptr += sizeof(uint32_t);
2422 			if (target == 0 && context->binder_context_mgr_node &&
2423 			    (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
2424 				ref = binder_get_ref_for_node(proc,
2425 					context->binder_context_mgr_node);
2426 				if (ref->desc != target) {
2427 					binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
2428 						proc->pid, thread->pid,
2429 						ref->desc);
2430 				}
2431 			} else
2432 				ref = binder_get_ref(proc, target,
2433 						     cmd == BC_ACQUIRE ||
2434 						     cmd == BC_RELEASE);
2435 			if (ref == NULL) {
2436 				binder_user_error("%d:%d refcount change on invalid ref %d\n",
2437 					proc->pid, thread->pid, target);
2438 				break;
2439 			}
2440 			switch (cmd) {
2441 			case BC_INCREFS:
2442 				debug_string = "IncRefs";
2443 				binder_inc_ref(ref, 0, NULL);
2444 				break;
2445 			case BC_ACQUIRE:
2446 				debug_string = "Acquire";
2447 				binder_inc_ref(ref, 1, NULL);
2448 				break;
2449 			case BC_RELEASE:
2450 				debug_string = "Release";
2451 				binder_dec_ref(ref, 1);
2452 				break;
2453 			case BC_DECREFS:
2454 			default:
2455 				debug_string = "DecRefs";
2456 				binder_dec_ref(ref, 0);
2457 				break;
2458 			}
2459 			binder_debug(BINDER_DEBUG_USER_REFS,
2460 				     "%d:%d %s ref %d desc %d s %d w %d for node %d\n",
2461 				     proc->pid, thread->pid, debug_string, ref->debug_id,
2462 				     ref->desc, ref->strong, ref->weak, ref->node->debug_id);
2463 			break;
2464 		}
2465 		case BC_INCREFS_DONE:
2466 		case BC_ACQUIRE_DONE: {
2467 			binder_uintptr_t node_ptr;
2468 			binder_uintptr_t cookie;
2469 			struct binder_node *node;
2470 
2471 			if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
2472 				return -EFAULT;
2473 			ptr += sizeof(binder_uintptr_t);
2474 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
2475 				return -EFAULT;
2476 			ptr += sizeof(binder_uintptr_t);
2477 			node = binder_get_node(proc, node_ptr);
2478 			if (node == NULL) {
2479 				binder_user_error("%d:%d %s u%016llx no match\n",
2480 					proc->pid, thread->pid,
2481 					cmd == BC_INCREFS_DONE ?
2482 					"BC_INCREFS_DONE" :
2483 					"BC_ACQUIRE_DONE",
2484 					(u64)node_ptr);
2485 				break;
2486 			}
2487 			if (cookie != node->cookie) {
2488 				binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
2489 					proc->pid, thread->pid,
2490 					cmd == BC_INCREFS_DONE ?
2491 					"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
2492 					(u64)node_ptr, node->debug_id,
2493 					(u64)cookie, (u64)node->cookie);
2494 				break;
2495 			}
2496 			if (cmd == BC_ACQUIRE_DONE) {
2497 				if (node->pending_strong_ref == 0) {
2498 					binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
2499 						proc->pid, thread->pid,
2500 						node->debug_id);
2501 					break;
2502 				}
2503 				node->pending_strong_ref = 0;
2504 			} else {
2505 				if (node->pending_weak_ref == 0) {
2506 					binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
2507 						proc->pid, thread->pid,
2508 						node->debug_id);
2509 					break;
2510 				}
2511 				node->pending_weak_ref = 0;
2512 			}
2513 			binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
2514 			binder_debug(BINDER_DEBUG_USER_REFS,
2515 				     "%d:%d %s node %d ls %d lw %d\n",
2516 				     proc->pid, thread->pid,
2517 				     cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
2518 				     node->debug_id, node->local_strong_refs, node->local_weak_refs);
2519 			break;
2520 		}
2521 		case BC_ATTEMPT_ACQUIRE:
2522 			pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
2523 			return -EINVAL;
2524 		case BC_ACQUIRE_RESULT:
2525 			pr_err("BC_ACQUIRE_RESULT not supported\n");
2526 			return -EINVAL;
2527 
2528 		case BC_FREE_BUFFER: {
2529 			binder_uintptr_t data_ptr;
2530 			struct binder_buffer *buffer;
2531 
2532 			if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
2533 				return -EFAULT;
2534 			ptr += sizeof(binder_uintptr_t);
2535 
2536 			buffer = binder_alloc_buffer_lookup(&proc->alloc,
2537 							    data_ptr);
2538 			if (buffer == NULL) {
2539 				binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
2540 					proc->pid, thread->pid, (u64)data_ptr);
2541 				break;
2542 			}
2543 			if (!buffer->allow_user_free) {
2544 				binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
2545 					proc->pid, thread->pid, (u64)data_ptr);
2546 				break;
2547 			}
2548 			binder_debug(BINDER_DEBUG_FREE_BUFFER,
2549 				     "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
2550 				     proc->pid, thread->pid, (u64)data_ptr,
2551 				     buffer->debug_id,
2552 				     buffer->transaction ? "active" : "finished");
2553 
2554 			if (buffer->transaction) {
2555 				buffer->transaction->buffer = NULL;
2556 				buffer->transaction = NULL;
2557 			}
2558 			if (buffer->async_transaction && buffer->target_node) {
2559 				BUG_ON(!buffer->target_node->has_async_transaction);
2560 				if (list_empty(&buffer->target_node->async_todo))
2561 					buffer->target_node->has_async_transaction = 0;
2562 				else
2563 					list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
2564 			}
2565 			trace_binder_transaction_buffer_release(buffer);
2566 			binder_transaction_buffer_release(proc, buffer, NULL);
2567 			binder_alloc_free_buf(&proc->alloc, buffer);
2568 			break;
2569 		}
2570 
2571 		case BC_TRANSACTION_SG:
2572 		case BC_REPLY_SG: {
2573 			struct binder_transaction_data_sg tr;
2574 
2575 			if (copy_from_user(&tr, ptr, sizeof(tr)))
2576 				return -EFAULT;
2577 			ptr += sizeof(tr);
2578 			binder_transaction(proc, thread, &tr.transaction_data,
2579 					   cmd == BC_REPLY_SG, tr.buffers_size);
2580 			break;
2581 		}
2582 		case BC_TRANSACTION:
2583 		case BC_REPLY: {
2584 			struct binder_transaction_data tr;
2585 
2586 			if (copy_from_user(&tr, ptr, sizeof(tr)))
2587 				return -EFAULT;
2588 			ptr += sizeof(tr);
2589 			binder_transaction(proc, thread, &tr,
2590 					   cmd == BC_REPLY, 0);
2591 			break;
2592 		}
2593 
2594 		case BC_REGISTER_LOOPER:
2595 			binder_debug(BINDER_DEBUG_THREADS,
2596 				     "%d:%d BC_REGISTER_LOOPER\n",
2597 				     proc->pid, thread->pid);
2598 			if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
2599 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
2600 				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
2601 					proc->pid, thread->pid);
2602 			} else if (proc->requested_threads == 0) {
2603 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
2604 				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
2605 					proc->pid, thread->pid);
2606 			} else {
2607 				proc->requested_threads--;
2608 				proc->requested_threads_started++;
2609 			}
2610 			thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
2611 			break;
2612 		case BC_ENTER_LOOPER:
2613 			binder_debug(BINDER_DEBUG_THREADS,
2614 				     "%d:%d BC_ENTER_LOOPER\n",
2615 				     proc->pid, thread->pid);
2616 			if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
2617 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
2618 				binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
2619 					proc->pid, thread->pid);
2620 			}
2621 			thread->looper |= BINDER_LOOPER_STATE_ENTERED;
2622 			break;
2623 		case BC_EXIT_LOOPER:
2624 			binder_debug(BINDER_DEBUG_THREADS,
2625 				     "%d:%d BC_EXIT_LOOPER\n",
2626 				     proc->pid, thread->pid);
2627 			thread->looper |= BINDER_LOOPER_STATE_EXITED;
2628 			break;
2629 
2630 		case BC_REQUEST_DEATH_NOTIFICATION:
2631 		case BC_CLEAR_DEATH_NOTIFICATION: {
2632 			uint32_t target;
2633 			binder_uintptr_t cookie;
2634 			struct binder_ref *ref;
2635 			struct binder_ref_death *death;
2636 
2637 			if (get_user(target, (uint32_t __user *)ptr))
2638 				return -EFAULT;
2639 			ptr += sizeof(uint32_t);
2640 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
2641 				return -EFAULT;
2642 			ptr += sizeof(binder_uintptr_t);
2643 			ref = binder_get_ref(proc, target, false);
2644 			if (ref == NULL) {
2645 				binder_user_error("%d:%d %s invalid ref %d\n",
2646 					proc->pid, thread->pid,
2647 					cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2648 					"BC_REQUEST_DEATH_NOTIFICATION" :
2649 					"BC_CLEAR_DEATH_NOTIFICATION",
2650 					target);
2651 				break;
2652 			}
2653 
2654 			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
2655 				     "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
2656 				     proc->pid, thread->pid,
2657 				     cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2658 				     "BC_REQUEST_DEATH_NOTIFICATION" :
2659 				     "BC_CLEAR_DEATH_NOTIFICATION",
2660 				     (u64)cookie, ref->debug_id, ref->desc,
2661 				     ref->strong, ref->weak, ref->node->debug_id);
2662 
2663 			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
2664 				if (ref->death) {
2665 					binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
2666 						proc->pid, thread->pid);
2667 					break;
2668 				}
2669 				death = kzalloc(sizeof(*death), GFP_KERNEL);
2670 				if (death == NULL) {
2671 					thread->return_error = BR_ERROR;
2672 					binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2673 						     "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
2674 						     proc->pid, thread->pid);
2675 					break;
2676 				}
2677 				binder_stats_created(BINDER_STAT_DEATH);
2678 				INIT_LIST_HEAD(&death->work.entry);
2679 				death->cookie = cookie;
2680 				ref->death = death;
2681 				if (ref->node->proc == NULL) {
2682 					ref->death->work.type = BINDER_WORK_DEAD_BINDER;
2683 					if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2684 						list_add_tail(&ref->death->work.entry, &thread->todo);
2685 					} else {
2686 						list_add_tail(&ref->death->work.entry, &proc->todo);
2687 						wake_up_interruptible(&proc->wait);
2688 					}
2689 				}
2690 			} else {
2691 				if (ref->death == NULL) {
2692 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
2693 						proc->pid, thread->pid);
2694 					break;
2695 				}
2696 				death = ref->death;
2697 				if (death->cookie != cookie) {
2698 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
2699 						proc->pid, thread->pid,
2700 						(u64)death->cookie,
2701 						(u64)cookie);
2702 					break;
2703 				}
2704 				ref->death = NULL;
2705 				if (list_empty(&death->work.entry)) {
2706 					death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2707 					if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2708 						list_add_tail(&death->work.entry, &thread->todo);
2709 					} else {
2710 						list_add_tail(&death->work.entry, &proc->todo);
2711 						wake_up_interruptible(&proc->wait);
2712 					}
2713 				} else {
2714 					BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
2715 					death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
2716 				}
2717 			}
2718 		} break;
2719 		case BC_DEAD_BINDER_DONE: {
2720 			struct binder_work *w;
2721 			binder_uintptr_t cookie;
2722 			struct binder_ref_death *death = NULL;
2723 
2724 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
2725 				return -EFAULT;
2726 
2727 			ptr += sizeof(cookie);
2728 			list_for_each_entry(w, &proc->delivered_death, entry) {
2729 				struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
2730 
2731 				if (tmp_death->cookie == cookie) {
2732 					death = tmp_death;
2733 					break;
2734 				}
2735 			}
2736 			binder_debug(BINDER_DEBUG_DEAD_BINDER,
2737 				     "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
2738 				     proc->pid, thread->pid, (u64)cookie,
2739 				     death);
2740 			if (death == NULL) {
2741 				binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
2742 					proc->pid, thread->pid, (u64)cookie);
2743 				break;
2744 			}
2745 
2746 			list_del_init(&death->work.entry);
2747 			if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
2748 				death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2749 				if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2750 					list_add_tail(&death->work.entry, &thread->todo);
2751 				} else {
2752 					list_add_tail(&death->work.entry, &proc->todo);
2753 					wake_up_interruptible(&proc->wait);
2754 				}
2755 			}
2756 		} break;
2757 
2758 		default:
2759 			pr_err("%d:%d unknown command %d\n",
2760 			       proc->pid, thread->pid, cmd);
2761 			return -EINVAL;
2762 		}
2763 		*consumed = ptr - buffer;
2764 	}
2765 	return 0;
2766 }
2767 
2768 static void binder_stat_br(struct binder_proc *proc,
2769 			   struct binder_thread *thread, uint32_t cmd)
2770 {
2771 	trace_binder_return(cmd);
2772 	if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
2773 		binder_stats.br[_IOC_NR(cmd)]++;
2774 		proc->stats.br[_IOC_NR(cmd)]++;
2775 		thread->stats.br[_IOC_NR(cmd)]++;
2776 	}
2777 }
2778 
2779 static int binder_has_proc_work(struct binder_proc *proc,
2780 				struct binder_thread *thread)
2781 {
2782 	return !list_empty(&proc->todo) ||
2783 		(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2784 }
2785 
2786 static int binder_has_thread_work(struct binder_thread *thread)
2787 {
2788 	return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
2789 		(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2790 }
2791 
2792 static int binder_thread_read(struct binder_proc *proc,
2793 			      struct binder_thread *thread,
2794 			      binder_uintptr_t binder_buffer, size_t size,
2795 			      binder_size_t *consumed, int non_block)
2796 {
2797 	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
2798 	void __user *ptr = buffer + *consumed;
2799 	void __user *end = buffer + size;
2800 
2801 	int ret = 0;
2802 	int wait_for_proc_work;
2803 
2804 	if (*consumed == 0) {
2805 		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
2806 			return -EFAULT;
2807 		ptr += sizeof(uint32_t);
2808 	}
2809 
2810 retry:
2811 	wait_for_proc_work = thread->transaction_stack == NULL &&
2812 				list_empty(&thread->todo);
2813 
2814 	if (thread->return_error != BR_OK && ptr < end) {
2815 		if (thread->return_error2 != BR_OK) {
2816 			if (put_user(thread->return_error2, (uint32_t __user *)ptr))
2817 				return -EFAULT;
2818 			ptr += sizeof(uint32_t);
2819 			binder_stat_br(proc, thread, thread->return_error2);
2820 			if (ptr == end)
2821 				goto done;
2822 			thread->return_error2 = BR_OK;
2823 		}
2824 		if (put_user(thread->return_error, (uint32_t __user *)ptr))
2825 			return -EFAULT;
2826 		ptr += sizeof(uint32_t);
2827 		binder_stat_br(proc, thread, thread->return_error);
2828 		thread->return_error = BR_OK;
2829 		goto done;
2830 	}
2831 
2832 
2833 	thread->looper |= BINDER_LOOPER_STATE_WAITING;
2834 	if (wait_for_proc_work)
2835 		proc->ready_threads++;
2836 
2837 	binder_unlock(__func__);
2838 
2839 	trace_binder_wait_for_work(wait_for_proc_work,
2840 				   !!thread->transaction_stack,
2841 				   !list_empty(&thread->todo));
2842 	if (wait_for_proc_work) {
2843 		if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2844 					BINDER_LOOPER_STATE_ENTERED))) {
2845 			binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
2846 				proc->pid, thread->pid, thread->looper);
2847 			wait_event_interruptible(binder_user_error_wait,
2848 						 binder_stop_on_user_error < 2);
2849 		}
2850 		binder_set_nice(proc->default_priority);
2851 		if (non_block) {
2852 			if (!binder_has_proc_work(proc, thread))
2853 				ret = -EAGAIN;
2854 		} else
2855 			ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
2856 	} else {
2857 		if (non_block) {
2858 			if (!binder_has_thread_work(thread))
2859 				ret = -EAGAIN;
2860 		} else
2861 			ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
2862 	}
2863 
2864 	binder_lock(__func__);
2865 
2866 	if (wait_for_proc_work)
2867 		proc->ready_threads--;
2868 	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
2869 
2870 	if (ret)
2871 		return ret;
2872 
2873 	while (1) {
2874 		uint32_t cmd;
2875 		struct binder_transaction_data tr;
2876 		struct binder_work *w;
2877 		struct binder_transaction *t = NULL;
2878 
2879 		if (!list_empty(&thread->todo)) {
2880 			w = list_first_entry(&thread->todo, struct binder_work,
2881 					     entry);
2882 		} else if (!list_empty(&proc->todo) && wait_for_proc_work) {
2883 			w = list_first_entry(&proc->todo, struct binder_work,
2884 					     entry);
2885 		} else {
2886 			/* no data added */
2887 			if (ptr - buffer == 4 &&
2888 			    !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
2889 				goto retry;
2890 			break;
2891 		}
2892 
2893 		if (end - ptr < sizeof(tr) + 4)
2894 			break;
2895 
2896 		switch (w->type) {
2897 		case BINDER_WORK_TRANSACTION: {
2898 			t = container_of(w, struct binder_transaction, work);
2899 		} break;
2900 		case BINDER_WORK_TRANSACTION_COMPLETE: {
2901 			cmd = BR_TRANSACTION_COMPLETE;
2902 			if (put_user(cmd, (uint32_t __user *)ptr))
2903 				return -EFAULT;
2904 			ptr += sizeof(uint32_t);
2905 
2906 			binder_stat_br(proc, thread, cmd);
2907 			binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
2908 				     "%d:%d BR_TRANSACTION_COMPLETE\n",
2909 				     proc->pid, thread->pid);
2910 
2911 			list_del(&w->entry);
2912 			kfree(w);
2913 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2914 		} break;
2915 		case BINDER_WORK_NODE: {
2916 			struct binder_node *node = container_of(w, struct binder_node, work);
2917 			uint32_t cmd = BR_NOOP;
2918 			const char *cmd_name;
2919 			int strong = node->internal_strong_refs || node->local_strong_refs;
2920 			int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
2921 
2922 			if (weak && !node->has_weak_ref) {
2923 				cmd = BR_INCREFS;
2924 				cmd_name = "BR_INCREFS";
2925 				node->has_weak_ref = 1;
2926 				node->pending_weak_ref = 1;
2927 				node->local_weak_refs++;
2928 			} else if (strong && !node->has_strong_ref) {
2929 				cmd = BR_ACQUIRE;
2930 				cmd_name = "BR_ACQUIRE";
2931 				node->has_strong_ref = 1;
2932 				node->pending_strong_ref = 1;
2933 				node->local_strong_refs++;
2934 			} else if (!strong && node->has_strong_ref) {
2935 				cmd = BR_RELEASE;
2936 				cmd_name = "BR_RELEASE";
2937 				node->has_strong_ref = 0;
2938 			} else if (!weak && node->has_weak_ref) {
2939 				cmd = BR_DECREFS;
2940 				cmd_name = "BR_DECREFS";
2941 				node->has_weak_ref = 0;
2942 			}
2943 			if (cmd != BR_NOOP) {
2944 				if (put_user(cmd, (uint32_t __user *)ptr))
2945 					return -EFAULT;
2946 				ptr += sizeof(uint32_t);
2947 				if (put_user(node->ptr,
2948 					     (binder_uintptr_t __user *)ptr))
2949 					return -EFAULT;
2950 				ptr += sizeof(binder_uintptr_t);
2951 				if (put_user(node->cookie,
2952 					     (binder_uintptr_t __user *)ptr))
2953 					return -EFAULT;
2954 				ptr += sizeof(binder_uintptr_t);
2955 
2956 				binder_stat_br(proc, thread, cmd);
2957 				binder_debug(BINDER_DEBUG_USER_REFS,
2958 					     "%d:%d %s %d u%016llx c%016llx\n",
2959 					     proc->pid, thread->pid, cmd_name,
2960 					     node->debug_id,
2961 					     (u64)node->ptr, (u64)node->cookie);
2962 			} else {
2963 				list_del_init(&w->entry);
2964 				if (!weak && !strong) {
2965 					binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2966 						     "%d:%d node %d u%016llx c%016llx deleted\n",
2967 						     proc->pid, thread->pid,
2968 						     node->debug_id,
2969 						     (u64)node->ptr,
2970 						     (u64)node->cookie);
2971 					rb_erase(&node->rb_node, &proc->nodes);
2972 					kfree(node);
2973 					binder_stats_deleted(BINDER_STAT_NODE);
2974 				} else {
2975 					binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2976 						     "%d:%d node %d u%016llx c%016llx state unchanged\n",
2977 						     proc->pid, thread->pid,
2978 						     node->debug_id,
2979 						     (u64)node->ptr,
2980 						     (u64)node->cookie);
2981 				}
2982 			}
2983 		} break;
2984 		case BINDER_WORK_DEAD_BINDER:
2985 		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2986 		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2987 			struct binder_ref_death *death;
2988 			uint32_t cmd;
2989 
2990 			death = container_of(w, struct binder_ref_death, work);
2991 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
2992 				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
2993 			else
2994 				cmd = BR_DEAD_BINDER;
2995 			if (put_user(cmd, (uint32_t __user *)ptr))
2996 				return -EFAULT;
2997 			ptr += sizeof(uint32_t);
2998 			if (put_user(death->cookie,
2999 				     (binder_uintptr_t __user *)ptr))
3000 				return -EFAULT;
3001 			ptr += sizeof(binder_uintptr_t);
3002 			binder_stat_br(proc, thread, cmd);
3003 			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3004 				     "%d:%d %s %016llx\n",
3005 				      proc->pid, thread->pid,
3006 				      cmd == BR_DEAD_BINDER ?
3007 				      "BR_DEAD_BINDER" :
3008 				      "BR_CLEAR_DEATH_NOTIFICATION_DONE",
3009 				      (u64)death->cookie);
3010 
3011 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
3012 				list_del(&w->entry);
3013 				kfree(death);
3014 				binder_stats_deleted(BINDER_STAT_DEATH);
3015 			} else
3016 				list_move(&w->entry, &proc->delivered_death);
3017 			if (cmd == BR_DEAD_BINDER)
3018 				goto done; /* DEAD_BINDER notifications can cause transactions */
3019 		} break;
3020 		}
3021 
3022 		if (!t)
3023 			continue;
3024 
3025 		BUG_ON(t->buffer == NULL);
3026 		if (t->buffer->target_node) {
3027 			struct binder_node *target_node = t->buffer->target_node;
3028 
3029 			tr.target.ptr = target_node->ptr;
3030 			tr.cookie =  target_node->cookie;
3031 			t->saved_priority = task_nice(current);
3032 			if (t->priority < target_node->min_priority &&
3033 			    !(t->flags & TF_ONE_WAY))
3034 				binder_set_nice(t->priority);
3035 			else if (!(t->flags & TF_ONE_WAY) ||
3036 				 t->saved_priority > target_node->min_priority)
3037 				binder_set_nice(target_node->min_priority);
3038 			cmd = BR_TRANSACTION;
3039 		} else {
3040 			tr.target.ptr = 0;
3041 			tr.cookie = 0;
3042 			cmd = BR_REPLY;
3043 		}
3044 		tr.code = t->code;
3045 		tr.flags = t->flags;
3046 		tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
3047 
3048 		if (t->from) {
3049 			struct task_struct *sender = t->from->proc->tsk;
3050 
3051 			tr.sender_pid = task_tgid_nr_ns(sender,
3052 							task_active_pid_ns(current));
3053 		} else {
3054 			tr.sender_pid = 0;
3055 		}
3056 
3057 		tr.data_size = t->buffer->data_size;
3058 		tr.offsets_size = t->buffer->offsets_size;
3059 		tr.data.ptr.buffer = (binder_uintptr_t)
3060 			((uintptr_t)t->buffer->data +
3061 			binder_alloc_get_user_buffer_offset(&proc->alloc));
3062 		tr.data.ptr.offsets = tr.data.ptr.buffer +
3063 					ALIGN(t->buffer->data_size,
3064 					    sizeof(void *));
3065 
3066 		if (put_user(cmd, (uint32_t __user *)ptr))
3067 			return -EFAULT;
3068 		ptr += sizeof(uint32_t);
3069 		if (copy_to_user(ptr, &tr, sizeof(tr)))
3070 			return -EFAULT;
3071 		ptr += sizeof(tr);
3072 
3073 		trace_binder_transaction_received(t);
3074 		binder_stat_br(proc, thread, cmd);
3075 		binder_debug(BINDER_DEBUG_TRANSACTION,
3076 			     "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
3077 			     proc->pid, thread->pid,
3078 			     (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
3079 			     "BR_REPLY",
3080 			     t->debug_id, t->from ? t->from->proc->pid : 0,
3081 			     t->from ? t->from->pid : 0, cmd,
3082 			     t->buffer->data_size, t->buffer->offsets_size,
3083 			     (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
3084 
3085 		list_del(&t->work.entry);
3086 		t->buffer->allow_user_free = 1;
3087 		if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
3088 			t->to_parent = thread->transaction_stack;
3089 			t->to_thread = thread;
3090 			thread->transaction_stack = t;
3091 		} else {
3092 			t->buffer->transaction = NULL;
3093 			kfree(t);
3094 			binder_stats_deleted(BINDER_STAT_TRANSACTION);
3095 		}
3096 		break;
3097 	}
3098 
3099 done:
3100 
3101 	*consumed = ptr - buffer;
3102 	if (proc->requested_threads + proc->ready_threads == 0 &&
3103 	    proc->requested_threads_started < proc->max_threads &&
3104 	    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3105 	     BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
3106 	     /*spawn a new thread if we leave this out */) {
3107 		proc->requested_threads++;
3108 		binder_debug(BINDER_DEBUG_THREADS,
3109 			     "%d:%d BR_SPAWN_LOOPER\n",
3110 			     proc->pid, thread->pid);
3111 		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
3112 			return -EFAULT;
3113 		binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
3114 	}
3115 	return 0;
3116 }
3117 
3118 static void binder_release_work(struct list_head *list)
3119 {
3120 	struct binder_work *w;
3121 
3122 	while (!list_empty(list)) {
3123 		w = list_first_entry(list, struct binder_work, entry);
3124 		list_del_init(&w->entry);
3125 		switch (w->type) {
3126 		case BINDER_WORK_TRANSACTION: {
3127 			struct binder_transaction *t;
3128 
3129 			t = container_of(w, struct binder_transaction, work);
3130 			if (t->buffer->target_node &&
3131 			    !(t->flags & TF_ONE_WAY)) {
3132 				binder_send_failed_reply(t, BR_DEAD_REPLY);
3133 			} else {
3134 				binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
3135 					"undelivered transaction %d\n",
3136 					t->debug_id);
3137 				t->buffer->transaction = NULL;
3138 				kfree(t);
3139 				binder_stats_deleted(BINDER_STAT_TRANSACTION);
3140 			}
3141 		} break;
3142 		case BINDER_WORK_TRANSACTION_COMPLETE: {
3143 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
3144 				"undelivered TRANSACTION_COMPLETE\n");
3145 			kfree(w);
3146 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3147 		} break;
3148 		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3149 		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
3150 			struct binder_ref_death *death;
3151 
3152 			death = container_of(w, struct binder_ref_death, work);
3153 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
3154 				"undelivered death notification, %016llx\n",
3155 				(u64)death->cookie);
3156 			kfree(death);
3157 			binder_stats_deleted(BINDER_STAT_DEATH);
3158 		} break;
3159 		default:
3160 			pr_err("unexpected work type, %d, not freed\n",
3161 			       w->type);
3162 			break;
3163 		}
3164 	}
3165 
3166 }
3167 
3168 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
3169 {
3170 	struct binder_thread *thread = NULL;
3171 	struct rb_node *parent = NULL;
3172 	struct rb_node **p = &proc->threads.rb_node;
3173 
3174 	while (*p) {
3175 		parent = *p;
3176 		thread = rb_entry(parent, struct binder_thread, rb_node);
3177 
3178 		if (current->pid < thread->pid)
3179 			p = &(*p)->rb_left;
3180 		else if (current->pid > thread->pid)
3181 			p = &(*p)->rb_right;
3182 		else
3183 			break;
3184 	}
3185 	if (*p == NULL) {
3186 		thread = kzalloc(sizeof(*thread), GFP_KERNEL);
3187 		if (thread == NULL)
3188 			return NULL;
3189 		binder_stats_created(BINDER_STAT_THREAD);
3190 		thread->proc = proc;
3191 		thread->pid = current->pid;
3192 		init_waitqueue_head(&thread->wait);
3193 		INIT_LIST_HEAD(&thread->todo);
3194 		rb_link_node(&thread->rb_node, parent, p);
3195 		rb_insert_color(&thread->rb_node, &proc->threads);
3196 		thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
3197 		thread->return_error = BR_OK;
3198 		thread->return_error2 = BR_OK;
3199 	}
3200 	return thread;
3201 }
3202 
3203 static int binder_free_thread(struct binder_proc *proc,
3204 			      struct binder_thread *thread)
3205 {
3206 	struct binder_transaction *t;
3207 	struct binder_transaction *send_reply = NULL;
3208 	int active_transactions = 0;
3209 
3210 	rb_erase(&thread->rb_node, &proc->threads);
3211 	t = thread->transaction_stack;
3212 	if (t && t->to_thread == thread)
3213 		send_reply = t;
3214 	while (t) {
3215 		active_transactions++;
3216 		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
3217 			     "release %d:%d transaction %d %s, still active\n",
3218 			      proc->pid, thread->pid,
3219 			     t->debug_id,
3220 			     (t->to_thread == thread) ? "in" : "out");
3221 
3222 		if (t->to_thread == thread) {
3223 			t->to_proc = NULL;
3224 			t->to_thread = NULL;
3225 			if (t->buffer) {
3226 				t->buffer->transaction = NULL;
3227 				t->buffer = NULL;
3228 			}
3229 			t = t->to_parent;
3230 		} else if (t->from == thread) {
3231 			t->from = NULL;
3232 			t = t->from_parent;
3233 		} else
3234 			BUG();
3235 	}
3236 	if (send_reply)
3237 		binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
3238 	binder_release_work(&thread->todo);
3239 	kfree(thread);
3240 	binder_stats_deleted(BINDER_STAT_THREAD);
3241 	return active_transactions;
3242 }
3243 
3244 static unsigned int binder_poll(struct file *filp,
3245 				struct poll_table_struct *wait)
3246 {
3247 	struct binder_proc *proc = filp->private_data;
3248 	struct binder_thread *thread = NULL;
3249 	int wait_for_proc_work;
3250 
3251 	binder_lock(__func__);
3252 
3253 	thread = binder_get_thread(proc);
3254 
3255 	wait_for_proc_work = thread->transaction_stack == NULL &&
3256 		list_empty(&thread->todo) && thread->return_error == BR_OK;
3257 
3258 	binder_unlock(__func__);
3259 
3260 	if (wait_for_proc_work) {
3261 		if (binder_has_proc_work(proc, thread))
3262 			return POLLIN;
3263 		poll_wait(filp, &proc->wait, wait);
3264 		if (binder_has_proc_work(proc, thread))
3265 			return POLLIN;
3266 	} else {
3267 		if (binder_has_thread_work(thread))
3268 			return POLLIN;
3269 		poll_wait(filp, &thread->wait, wait);
3270 		if (binder_has_thread_work(thread))
3271 			return POLLIN;
3272 	}
3273 	return 0;
3274 }
3275 
3276 static int binder_ioctl_write_read(struct file *filp,
3277 				unsigned int cmd, unsigned long arg,
3278 				struct binder_thread *thread)
3279 {
3280 	int ret = 0;
3281 	struct binder_proc *proc = filp->private_data;
3282 	unsigned int size = _IOC_SIZE(cmd);
3283 	void __user *ubuf = (void __user *)arg;
3284 	struct binder_write_read bwr;
3285 
3286 	if (size != sizeof(struct binder_write_read)) {
3287 		ret = -EINVAL;
3288 		goto out;
3289 	}
3290 	if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
3291 		ret = -EFAULT;
3292 		goto out;
3293 	}
3294 	binder_debug(BINDER_DEBUG_READ_WRITE,
3295 		     "%d:%d write %lld at %016llx, read %lld at %016llx\n",
3296 		     proc->pid, thread->pid,
3297 		     (u64)bwr.write_size, (u64)bwr.write_buffer,
3298 		     (u64)bwr.read_size, (u64)bwr.read_buffer);
3299 
3300 	if (bwr.write_size > 0) {
3301 		ret = binder_thread_write(proc, thread,
3302 					  bwr.write_buffer,
3303 					  bwr.write_size,
3304 					  &bwr.write_consumed);
3305 		trace_binder_write_done(ret);
3306 		if (ret < 0) {
3307 			bwr.read_consumed = 0;
3308 			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
3309 				ret = -EFAULT;
3310 			goto out;
3311 		}
3312 	}
3313 	if (bwr.read_size > 0) {
3314 		ret = binder_thread_read(proc, thread, bwr.read_buffer,
3315 					 bwr.read_size,
3316 					 &bwr.read_consumed,
3317 					 filp->f_flags & O_NONBLOCK);
3318 		trace_binder_read_done(ret);
3319 		if (!list_empty(&proc->todo))
3320 			wake_up_interruptible(&proc->wait);
3321 		if (ret < 0) {
3322 			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
3323 				ret = -EFAULT;
3324 			goto out;
3325 		}
3326 	}
3327 	binder_debug(BINDER_DEBUG_READ_WRITE,
3328 		     "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
3329 		     proc->pid, thread->pid,
3330 		     (u64)bwr.write_consumed, (u64)bwr.write_size,
3331 		     (u64)bwr.read_consumed, (u64)bwr.read_size);
3332 	if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
3333 		ret = -EFAULT;
3334 		goto out;
3335 	}
3336 out:
3337 	return ret;
3338 }
3339 
3340 static int binder_ioctl_set_ctx_mgr(struct file *filp)
3341 {
3342 	int ret = 0;
3343 	struct binder_proc *proc = filp->private_data;
3344 	struct binder_context *context = proc->context;
3345 
3346 	kuid_t curr_euid = current_euid();
3347 
3348 	if (context->binder_context_mgr_node) {
3349 		pr_err("BINDER_SET_CONTEXT_MGR already set\n");
3350 		ret = -EBUSY;
3351 		goto out;
3352 	}
3353 	ret = security_binder_set_context_mgr(proc->tsk);
3354 	if (ret < 0)
3355 		goto out;
3356 	if (uid_valid(context->binder_context_mgr_uid)) {
3357 		if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
3358 			pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
3359 			       from_kuid(&init_user_ns, curr_euid),
3360 			       from_kuid(&init_user_ns,
3361 					 context->binder_context_mgr_uid));
3362 			ret = -EPERM;
3363 			goto out;
3364 		}
3365 	} else {
3366 		context->binder_context_mgr_uid = curr_euid;
3367 	}
3368 	context->binder_context_mgr_node = binder_new_node(proc, 0, 0);
3369 	if (!context->binder_context_mgr_node) {
3370 		ret = -ENOMEM;
3371 		goto out;
3372 	}
3373 	context->binder_context_mgr_node->local_weak_refs++;
3374 	context->binder_context_mgr_node->local_strong_refs++;
3375 	context->binder_context_mgr_node->has_strong_ref = 1;
3376 	context->binder_context_mgr_node->has_weak_ref = 1;
3377 out:
3378 	return ret;
3379 }
3380 
3381 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3382 {
3383 	int ret;
3384 	struct binder_proc *proc = filp->private_data;
3385 	struct binder_thread *thread;
3386 	unsigned int size = _IOC_SIZE(cmd);
3387 	void __user *ubuf = (void __user *)arg;
3388 
3389 	/*pr_info("binder_ioctl: %d:%d %x %lx\n",
3390 			proc->pid, current->pid, cmd, arg);*/
3391 
3392 	trace_binder_ioctl(cmd, arg);
3393 
3394 	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
3395 	if (ret)
3396 		goto err_unlocked;
3397 
3398 	binder_lock(__func__);
3399 	thread = binder_get_thread(proc);
3400 	if (thread == NULL) {
3401 		ret = -ENOMEM;
3402 		goto err;
3403 	}
3404 
3405 	switch (cmd) {
3406 	case BINDER_WRITE_READ:
3407 		ret = binder_ioctl_write_read(filp, cmd, arg, thread);
3408 		if (ret)
3409 			goto err;
3410 		break;
3411 	case BINDER_SET_MAX_THREADS:
3412 		if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
3413 			ret = -EINVAL;
3414 			goto err;
3415 		}
3416 		break;
3417 	case BINDER_SET_CONTEXT_MGR:
3418 		ret = binder_ioctl_set_ctx_mgr(filp);
3419 		if (ret)
3420 			goto err;
3421 		break;
3422 	case BINDER_THREAD_EXIT:
3423 		binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
3424 			     proc->pid, thread->pid);
3425 		binder_free_thread(proc, thread);
3426 		thread = NULL;
3427 		break;
3428 	case BINDER_VERSION: {
3429 		struct binder_version __user *ver = ubuf;
3430 
3431 		if (size != sizeof(struct binder_version)) {
3432 			ret = -EINVAL;
3433 			goto err;
3434 		}
3435 		if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
3436 			     &ver->protocol_version)) {
3437 			ret = -EINVAL;
3438 			goto err;
3439 		}
3440 		break;
3441 	}
3442 	default:
3443 		ret = -EINVAL;
3444 		goto err;
3445 	}
3446 	ret = 0;
3447 err:
3448 	if (thread)
3449 		thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
3450 	binder_unlock(__func__);
3451 	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
3452 	if (ret && ret != -ERESTARTSYS)
3453 		pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
3454 err_unlocked:
3455 	trace_binder_ioctl_done(ret);
3456 	return ret;
3457 }
3458 
3459 static void binder_vma_open(struct vm_area_struct *vma)
3460 {
3461 	struct binder_proc *proc = vma->vm_private_data;
3462 
3463 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3464 		     "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
3465 		     proc->pid, vma->vm_start, vma->vm_end,
3466 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
3467 		     (unsigned long)pgprot_val(vma->vm_page_prot));
3468 }
3469 
3470 void binder_alloc_vma_close(struct binder_alloc *alloc)
3471 {
3472 	WRITE_ONCE(alloc->vma, NULL);
3473 	WRITE_ONCE(alloc->vma_vm_mm, NULL);
3474 }
3475 
3476 static void binder_vma_close(struct vm_area_struct *vma)
3477 {
3478 	struct binder_proc *proc = vma->vm_private_data;
3479 
3480 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3481 		     "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
3482 		     proc->pid, vma->vm_start, vma->vm_end,
3483 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
3484 		     (unsigned long)pgprot_val(vma->vm_page_prot));
3485 	binder_alloc_vma_close(&proc->alloc);
3486 	binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
3487 }
3488 
3489 static int binder_vm_fault(struct vm_fault *vmf)
3490 {
3491 	return VM_FAULT_SIGBUS;
3492 }
3493 
3494 static const struct vm_operations_struct binder_vm_ops = {
3495 	.open = binder_vma_open,
3496 	.close = binder_vma_close,
3497 	.fault = binder_vm_fault,
3498 };
3499 
3500 int binder_alloc_mmap_handler(struct binder_alloc *alloc,
3501 			      struct vm_area_struct *vma)
3502 {
3503 	int ret;
3504 	struct vm_struct *area;
3505 	const char *failure_string;
3506 	struct binder_buffer *buffer;
3507 
3508 	mutex_lock(&binder_alloc_mmap_lock);
3509 	if (alloc->buffer) {
3510 		ret = -EBUSY;
3511 		failure_string = "already mapped";
3512 		goto err_already_mapped;
3513 	}
3514 
3515 	area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
3516 	if (area == NULL) {
3517 		ret = -ENOMEM;
3518 		failure_string = "get_vm_area";
3519 		goto err_get_vm_area_failed;
3520 	}
3521 	alloc->buffer = area->addr;
3522 	alloc->user_buffer_offset =
3523 			vma->vm_start - (uintptr_t)alloc->buffer;
3524 	mutex_unlock(&binder_alloc_mmap_lock);
3525 
3526 #ifdef CONFIG_CPU_CACHE_VIPT
3527 	if (cache_is_vipt_aliasing()) {
3528 		while (CACHE_COLOUR(
3529 			pr_info("%s: %d %lx-%lx maps %pK bad alignment\n",
3530 				__func__,
3531 				alloc->pid, vma->vm_start, vma->vm_end,
3532 				alloc->buffer);
3533 			vma->vm_start += PAGE_SIZE;
3534 		}
3535 	}
3536 #endif
3537 	alloc->pages = kzalloc(sizeof(alloc->pages[0]) *
3538 				   ((vma->vm_end - vma->vm_start) / PAGE_SIZE),
3539 			       GFP_KERNEL);
3540 	if (alloc->pages == NULL) {
3541 		ret = -ENOMEM;
3542 		failure_string = "alloc page array";
3543 		goto err_alloc_pages_failed;
3544 	}
3545 	alloc->buffer_size = vma->vm_end - vma->vm_start;
3546 
3547 	if (binder_update_page_range(alloc, 1, alloc->buffer,
3548 				     alloc->buffer + PAGE_SIZE, vma)) {
3549 		ret = -ENOMEM;
3550 		failure_string = "alloc small buf";
3551 		goto err_alloc_small_buf_failed;
3552 	}
3553 	buffer = alloc->buffer;
3554 	INIT_LIST_HEAD(&alloc->buffers);
3555 	list_add(&buffer->entry, &alloc->buffers);
3556 	buffer->free = 1;
3557 	binder_insert_free_buffer(alloc, buffer);
3558 	alloc->free_async_space = alloc->buffer_size / 2;
3559 	barrier();
3560 	alloc->vma = vma;
3561 	alloc->vma_vm_mm = vma->vm_mm;
3562 
3563 	return 0;
3564 
3565 err_alloc_small_buf_failed:
3566 	kfree(alloc->pages);
3567 	alloc->pages = NULL;
3568 err_alloc_pages_failed:
3569 	mutex_lock(&binder_alloc_mmap_lock);
3570 	vfree(alloc->buffer);
3571 	alloc->buffer = NULL;
3572 err_get_vm_area_failed:
3573 err_already_mapped:
3574 	mutex_unlock(&binder_alloc_mmap_lock);
3575 	pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
3576 	       alloc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
3577 	return ret;
3578 }
3579 
3580 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
3581 {
3582 	int ret;
3583 	struct binder_proc *proc = filp->private_data;
3584 	const char *failure_string;
3585 
3586 	if (proc->tsk != current->group_leader)
3587 		return -EINVAL;
3588 
3589 	if ((vma->vm_end - vma->vm_start) > SZ_4M)
3590 		vma->vm_end = vma->vm_start + SZ_4M;
3591 
3592 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3593 		     "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
3594 		     __func__, proc->pid, vma->vm_start, vma->vm_end,
3595 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
3596 		     (unsigned long)pgprot_val(vma->vm_page_prot));
3597 
3598 	if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
3599 		ret = -EPERM;
3600 		failure_string = "bad vm_flags";
3601 		goto err_bad_arg;
3602 	}
3603 	vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
3604 	vma->vm_ops = &binder_vm_ops;
3605 	vma->vm_private_data = proc;
3606 
3607 	ret = binder_alloc_mmap_handler(&proc->alloc, vma);
3608 	if (ret)
3609 		return ret;
3610 	proc->files = get_files_struct(current);
3611 	return 0;
3612 
3613 err_bad_arg:
3614 	pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
3615 	       proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
3616 	return ret;
3617 }
3618 
3619 void binder_alloc_init(struct binder_alloc *alloc)
3620 {
3621 	alloc->tsk = current->group_leader;
3622 	alloc->pid = current->group_leader->pid;
3623 	mutex_init(&alloc->mutex);
3624 }
3625 
3626 static int binder_open(struct inode *nodp, struct file *filp)
3627 {
3628 	struct binder_proc *proc;
3629 	struct binder_device *binder_dev;
3630 
3631 	binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
3632 		     current->group_leader->pid, current->pid);
3633 
3634 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
3635 	if (proc == NULL)
3636 		return -ENOMEM;
3637 	get_task_struct(current->group_leader);
3638 	proc->tsk = current->group_leader;
3639 	INIT_LIST_HEAD(&proc->todo);
3640 	init_waitqueue_head(&proc->wait);
3641 	proc->default_priority = task_nice(current);
3642 	binder_dev = container_of(filp->private_data, struct binder_device,
3643 				  miscdev);
3644 	proc->context = &binder_dev->context;
3645 	binder_alloc_init(&proc->alloc);
3646 
3647 	binder_lock(__func__);
3648 
3649 	binder_stats_created(BINDER_STAT_PROC);
3650 	hlist_add_head(&proc->proc_node, &binder_procs);
3651 	proc->pid = current->group_leader->pid;
3652 	INIT_LIST_HEAD(&proc->delivered_death);
3653 	filp->private_data = proc;
3654 
3655 	binder_unlock(__func__);
3656 
3657 	if (binder_debugfs_dir_entry_proc) {
3658 		char strbuf[11];
3659 
3660 		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
3661 		/*
3662 		 * proc debug entries are shared between contexts, so
3663 		 * this will fail if the process tries to open the driver
3664 		 * again with a different context. The priting code will
3665 		 * anyway print all contexts that a given PID has, so this
3666 		 * is not a problem.
3667 		 */
3668 		proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
3669 			binder_debugfs_dir_entry_proc,
3670 			(void *)(unsigned long)proc->pid,
3671 			&binder_proc_fops);
3672 	}
3673 
3674 	return 0;
3675 }
3676 
3677 static int binder_flush(struct file *filp, fl_owner_t id)
3678 {
3679 	struct binder_proc *proc = filp->private_data;
3680 
3681 	binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
3682 
3683 	return 0;
3684 }
3685 
3686 static void binder_deferred_flush(struct binder_proc *proc)
3687 {
3688 	struct rb_node *n;
3689 	int wake_count = 0;
3690 
3691 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
3692 		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
3693 
3694 		thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
3695 		if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
3696 			wake_up_interruptible(&thread->wait);
3697 			wake_count++;
3698 		}
3699 	}
3700 	wake_up_interruptible_all(&proc->wait);
3701 
3702 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3703 		     "binder_flush: %d woke %d threads\n", proc->pid,
3704 		     wake_count);
3705 }
3706 
3707 static int binder_release(struct inode *nodp, struct file *filp)
3708 {
3709 	struct binder_proc *proc = filp->private_data;
3710 
3711 	debugfs_remove(proc->debugfs_entry);
3712 	binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
3713 
3714 	return 0;
3715 }
3716 
3717 static int binder_node_release(struct binder_node *node, int refs)
3718 {
3719 	struct binder_ref *ref;
3720 	int death = 0;
3721 
3722 	list_del_init(&node->work.entry);
3723 	binder_release_work(&node->async_todo);
3724 
3725 	if (hlist_empty(&node->refs)) {
3726 		kfree(node);
3727 		binder_stats_deleted(BINDER_STAT_NODE);
3728 
3729 		return refs;
3730 	}
3731 
3732 	node->proc = NULL;
3733 	node->local_strong_refs = 0;
3734 	node->local_weak_refs = 0;
3735 	hlist_add_head(&node->dead_node, &binder_dead_nodes);
3736 
3737 	hlist_for_each_entry(ref, &node->refs, node_entry) {
3738 		refs++;
3739 
3740 		if (!ref->death)
3741 			continue;
3742 
3743 		death++;
3744 
3745 		if (list_empty(&ref->death->work.entry)) {
3746 			ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3747 			list_add_tail(&ref->death->work.entry,
3748 				      &ref->proc->todo);
3749 			wake_up_interruptible(&ref->proc->wait);
3750 		} else
3751 			BUG();
3752 	}
3753 
3754 	binder_debug(BINDER_DEBUG_DEAD_BINDER,
3755 		     "node %d now dead, refs %d, death %d\n",
3756 		     node->debug_id, refs, death);
3757 
3758 	return refs;
3759 }
3760 
3761 void binder_alloc_deferred_release(struct binder_alloc *alloc)
3762 {
3763 	struct rb_node *n;
3764 	int buffers, page_count;
3765 
3766 	BUG_ON(alloc->vma);
3767 
3768 	buffers = 0;
3769 	mutex_lock(&alloc->mutex);
3770 	while ((n = rb_first(&alloc->allocated_buffers))) {
3771 		struct binder_buffer *buffer;
3772 
3773 		buffer = rb_entry(n, struct binder_buffer, rb_node);
3774 
3775 		/* Transaction should already have been freed */
3776 		BUG_ON(buffer->transaction);
3777 
3778 		binder_free_buf_locked(alloc, buffer);
3779 		buffers++;
3780 	}
3781 
3782 	page_count = 0;
3783 	if (alloc->pages) {
3784 		int i;
3785 
3786 		for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
3787 			void *page_addr;
3788 
3789 			if (!alloc->pages[i])
3790 				continue;
3791 
3792 			page_addr = alloc->buffer + i * PAGE_SIZE;
3793 			binder_alloc_debug(BINDER_ALLOC_DEBUG_BUFFER_ALLOC,
3794 				     "%s: %d: page %d at %pK not freed\n",
3795 				     __func__, alloc->pid, i, page_addr);
3796 			unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
3797 			__free_page(alloc->pages[i]);
3798 			page_count++;
3799 		}
3800 		kfree(alloc->pages);
3801 		vfree(alloc->buffer);
3802 	}
3803 	mutex_unlock(&alloc->mutex);
3804 
3805 	binder_alloc_debug(BINDER_ALLOC_DEBUG_OPEN_CLOSE,
3806 		     "%s: %d buffers %d, pages %d\n",
3807 		     __func__, alloc->pid, buffers, page_count);
3808 }
3809 
3810 static void binder_deferred_release(struct binder_proc *proc)
3811 {
3812 	struct binder_context *context = proc->context;
3813 	struct rb_node *n;
3814 	int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
3815 
3816 	BUG_ON(proc->files);
3817 
3818 	hlist_del(&proc->proc_node);
3819 
3820 	if (context->binder_context_mgr_node &&
3821 	    context->binder_context_mgr_node->proc == proc) {
3822 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
3823 			     "%s: %d context_mgr_node gone\n",
3824 			     __func__, proc->pid);
3825 		context->binder_context_mgr_node = NULL;
3826 	}
3827 
3828 	threads = 0;
3829 	active_transactions = 0;
3830 	while ((n = rb_first(&proc->threads))) {
3831 		struct binder_thread *thread;
3832 
3833 		thread = rb_entry(n, struct binder_thread, rb_node);
3834 		threads++;
3835 		active_transactions += binder_free_thread(proc, thread);
3836 	}
3837 
3838 	nodes = 0;
3839 	incoming_refs = 0;
3840 	while ((n = rb_first(&proc->nodes))) {
3841 		struct binder_node *node;
3842 
3843 		node = rb_entry(n, struct binder_node, rb_node);
3844 		nodes++;
3845 		rb_erase(&node->rb_node, &proc->nodes);
3846 		incoming_refs = binder_node_release(node, incoming_refs);
3847 	}
3848 
3849 	outgoing_refs = 0;
3850 	while ((n = rb_first(&proc->refs_by_desc))) {
3851 		struct binder_ref *ref;
3852 
3853 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
3854 		outgoing_refs++;
3855 		binder_delete_ref(ref);
3856 	}
3857 
3858 	binder_release_work(&proc->todo);
3859 	binder_release_work(&proc->delivered_death);
3860 
3861 	binder_alloc_deferred_release(&proc->alloc);
3862 	binder_stats_deleted(BINDER_STAT_PROC);
3863 
3864 	put_task_struct(proc->tsk);
3865 
3866 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3867 		     "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
3868 		     __func__, proc->pid, threads, nodes, incoming_refs,
3869 		     outgoing_refs, active_transactions);
3870 
3871 	kfree(proc);
3872 }
3873 
3874 static void binder_deferred_func(struct work_struct *work)
3875 {
3876 	struct binder_proc *proc;
3877 	struct files_struct *files;
3878 
3879 	int defer;
3880 
3881 	do {
3882 		binder_lock(__func__);
3883 		mutex_lock(&binder_deferred_lock);
3884 		if (!hlist_empty(&binder_deferred_list)) {
3885 			proc = hlist_entry(binder_deferred_list.first,
3886 					struct binder_proc, deferred_work_node);
3887 			hlist_del_init(&proc->deferred_work_node);
3888 			defer = proc->deferred_work;
3889 			proc->deferred_work = 0;
3890 		} else {
3891 			proc = NULL;
3892 			defer = 0;
3893 		}
3894 		mutex_unlock(&binder_deferred_lock);
3895 
3896 		files = NULL;
3897 		if (defer & BINDER_DEFERRED_PUT_FILES) {
3898 			files = proc->files;
3899 			if (files)
3900 				proc->files = NULL;
3901 		}
3902 
3903 		if (defer & BINDER_DEFERRED_FLUSH)
3904 			binder_deferred_flush(proc);
3905 
3906 		if (defer & BINDER_DEFERRED_RELEASE)
3907 			binder_deferred_release(proc); /* frees proc */
3908 
3909 		binder_unlock(__func__);
3910 		if (files)
3911 			put_files_struct(files);
3912 	} while (proc);
3913 }
3914 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
3915 
3916 static void
3917 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
3918 {
3919 	mutex_lock(&binder_deferred_lock);
3920 	proc->deferred_work |= defer;
3921 	if (hlist_unhashed(&proc->deferred_work_node)) {
3922 		hlist_add_head(&proc->deferred_work_node,
3923 				&binder_deferred_list);
3924 		schedule_work(&binder_deferred_work);
3925 	}
3926 	mutex_unlock(&binder_deferred_lock);
3927 }
3928 
3929 static void print_binder_transaction(struct seq_file *m, const char *prefix,
3930 				     struct binder_transaction *t)
3931 {
3932 	seq_printf(m,
3933 		   "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
3934 		   prefix, t->debug_id, t,
3935 		   t->from ? t->from->proc->pid : 0,
3936 		   t->from ? t->from->pid : 0,
3937 		   t->to_proc ? t->to_proc->pid : 0,
3938 		   t->to_thread ? t->to_thread->pid : 0,
3939 		   t->code, t->flags, t->priority, t->need_reply);
3940 	if (t->buffer == NULL) {
3941 		seq_puts(m, " buffer free\n");
3942 		return;
3943 	}
3944 	if (t->buffer->target_node)
3945 		seq_printf(m, " node %d",
3946 			   t->buffer->target_node->debug_id);
3947 	seq_printf(m, " size %zd:%zd data %p\n",
3948 		   t->buffer->data_size, t->buffer->offsets_size,
3949 		   t->buffer->data);
3950 }
3951 
3952 static void print_binder_work(struct seq_file *m, const char *prefix,
3953 			      const char *transaction_prefix,
3954 			      struct binder_work *w)
3955 {
3956 	struct binder_node *node;
3957 	struct binder_transaction *t;
3958 
3959 	switch (w->type) {
3960 	case BINDER_WORK_TRANSACTION:
3961 		t = container_of(w, struct binder_transaction, work);
3962 		print_binder_transaction(m, transaction_prefix, t);
3963 		break;
3964 	case BINDER_WORK_TRANSACTION_COMPLETE:
3965 		seq_printf(m, "%stransaction complete\n", prefix);
3966 		break;
3967 	case BINDER_WORK_NODE:
3968 		node = container_of(w, struct binder_node, work);
3969 		seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
3970 			   prefix, node->debug_id,
3971 			   (u64)node->ptr, (u64)node->cookie);
3972 		break;
3973 	case BINDER_WORK_DEAD_BINDER:
3974 		seq_printf(m, "%shas dead binder\n", prefix);
3975 		break;
3976 	case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3977 		seq_printf(m, "%shas cleared dead binder\n", prefix);
3978 		break;
3979 	case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
3980 		seq_printf(m, "%shas cleared death notification\n", prefix);
3981 		break;
3982 	default:
3983 		seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
3984 		break;
3985 	}
3986 }
3987 
3988 static void print_binder_thread(struct seq_file *m,
3989 				struct binder_thread *thread,
3990 				int print_always)
3991 {
3992 	struct binder_transaction *t;
3993 	struct binder_work *w;
3994 	size_t start_pos = m->count;
3995 	size_t header_pos;
3996 
3997 	seq_printf(m, "  thread %d: l %02x\n", thread->pid, thread->looper);
3998 	header_pos = m->count;
3999 	t = thread->transaction_stack;
4000 	while (t) {
4001 		if (t->from == thread) {
4002 			print_binder_transaction(m,
4003 						 "    outgoing transaction", t);
4004 			t = t->from_parent;
4005 		} else if (t->to_thread == thread) {
4006 			print_binder_transaction(m,
4007 						 "    incoming transaction", t);
4008 			t = t->to_parent;
4009 		} else {
4010 			print_binder_transaction(m, "    bad transaction", t);
4011 			t = NULL;
4012 		}
4013 	}
4014 	list_for_each_entry(w, &thread->todo, entry) {
4015 		print_binder_work(m, "    ", "    pending transaction", w);
4016 	}
4017 	if (!print_always && m->count == header_pos)
4018 		m->count = start_pos;
4019 }
4020 
4021 static void print_binder_node(struct seq_file *m, struct binder_node *node)
4022 {
4023 	struct binder_ref *ref;
4024 	struct binder_work *w;
4025 	int count;
4026 
4027 	count = 0;
4028 	hlist_for_each_entry(ref, &node->refs, node_entry)
4029 		count++;
4030 
4031 	seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
4032 		   node->debug_id, (u64)node->ptr, (u64)node->cookie,
4033 		   node->has_strong_ref, node->has_weak_ref,
4034 		   node->local_strong_refs, node->local_weak_refs,
4035 		   node->internal_strong_refs, count);
4036 	if (count) {
4037 		seq_puts(m, " proc");
4038 		hlist_for_each_entry(ref, &node->refs, node_entry)
4039 			seq_printf(m, " %d", ref->proc->pid);
4040 	}
4041 	seq_puts(m, "\n");
4042 	list_for_each_entry(w, &node->async_todo, entry)
4043 		print_binder_work(m, "    ",
4044 				  "    pending async transaction", w);
4045 }
4046 
4047 static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
4048 {
4049 	seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %p\n",
4050 		   ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
4051 		   ref->node->debug_id, ref->strong, ref->weak, ref->death);
4052 }
4053 
4054 static void print_binder_buffer(struct seq_file *m, const char *prefix,
4055 				struct binder_buffer *buffer)
4056 {
4057 	seq_printf(m, "%s %d: %pK size %zd:%zd %s\n",
4058 		   prefix, buffer->debug_id, buffer->data,
4059 		   buffer->data_size, buffer->offsets_size,
4060 		   buffer->transaction ? "active" : "delivered");
4061 }
4062 
4063 void binder_alloc_print_allocated(struct seq_file *m,
4064 				  struct binder_alloc *alloc)
4065 {
4066 	struct rb_node *n;
4067 
4068 	mutex_lock(&alloc->mutex);
4069 	for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
4070 		print_binder_buffer(m, "  buffer",
4071 				    rb_entry(n, struct binder_buffer, rb_node));
4072 	mutex_unlock(&alloc->mutex);
4073 }
4074 
4075 static void print_binder_proc(struct seq_file *m,
4076 			      struct binder_proc *proc, int print_all)
4077 {
4078 	struct binder_work *w;
4079 	struct rb_node *n;
4080 	size_t start_pos = m->count;
4081 	size_t header_pos;
4082 
4083 	seq_printf(m, "proc %d\n", proc->pid);
4084 	seq_printf(m, "context %s\n", proc->context->name);
4085 	header_pos = m->count;
4086 
4087 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
4088 		print_binder_thread(m, rb_entry(n, struct binder_thread,
4089 						rb_node), print_all);
4090 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4091 		struct binder_node *node = rb_entry(n, struct binder_node,
4092 						    rb_node);
4093 		if (print_all || node->has_async_transaction)
4094 			print_binder_node(m, node);
4095 	}
4096 	if (print_all) {
4097 		for (n = rb_first(&proc->refs_by_desc);
4098 		     n != NULL;
4099 		     n = rb_next(n))
4100 			print_binder_ref(m, rb_entry(n, struct binder_ref,
4101 						     rb_node_desc));
4102 	}
4103 	binder_alloc_print_allocated(m, &proc->alloc);
4104 	list_for_each_entry(w, &proc->todo, entry)
4105 		print_binder_work(m, "  ", "  pending transaction", w);
4106 	list_for_each_entry(w, &proc->delivered_death, entry) {
4107 		seq_puts(m, "  has delivered dead binder\n");
4108 		break;
4109 	}
4110 	if (!print_all && m->count == header_pos)
4111 		m->count = start_pos;
4112 }
4113 
4114 static const char * const binder_return_strings[] = {
4115 	"BR_ERROR",
4116 	"BR_OK",
4117 	"BR_TRANSACTION",
4118 	"BR_REPLY",
4119 	"BR_ACQUIRE_RESULT",
4120 	"BR_DEAD_REPLY",
4121 	"BR_TRANSACTION_COMPLETE",
4122 	"BR_INCREFS",
4123 	"BR_ACQUIRE",
4124 	"BR_RELEASE",
4125 	"BR_DECREFS",
4126 	"BR_ATTEMPT_ACQUIRE",
4127 	"BR_NOOP",
4128 	"BR_SPAWN_LOOPER",
4129 	"BR_FINISHED",
4130 	"BR_DEAD_BINDER",
4131 	"BR_CLEAR_DEATH_NOTIFICATION_DONE",
4132 	"BR_FAILED_REPLY"
4133 };
4134 
4135 static const char * const binder_command_strings[] = {
4136 	"BC_TRANSACTION",
4137 	"BC_REPLY",
4138 	"BC_ACQUIRE_RESULT",
4139 	"BC_FREE_BUFFER",
4140 	"BC_INCREFS",
4141 	"BC_ACQUIRE",
4142 	"BC_RELEASE",
4143 	"BC_DECREFS",
4144 	"BC_INCREFS_DONE",
4145 	"BC_ACQUIRE_DONE",
4146 	"BC_ATTEMPT_ACQUIRE",
4147 	"BC_REGISTER_LOOPER",
4148 	"BC_ENTER_LOOPER",
4149 	"BC_EXIT_LOOPER",
4150 	"BC_REQUEST_DEATH_NOTIFICATION",
4151 	"BC_CLEAR_DEATH_NOTIFICATION",
4152 	"BC_DEAD_BINDER_DONE",
4153 	"BC_TRANSACTION_SG",
4154 	"BC_REPLY_SG",
4155 };
4156 
4157 static const char * const binder_objstat_strings[] = {
4158 	"proc",
4159 	"thread",
4160 	"node",
4161 	"ref",
4162 	"death",
4163 	"transaction",
4164 	"transaction_complete"
4165 };
4166 
4167 static void print_binder_stats(struct seq_file *m, const char *prefix,
4168 			       struct binder_stats *stats)
4169 {
4170 	int i;
4171 
4172 	BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
4173 		     ARRAY_SIZE(binder_command_strings));
4174 	for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
4175 		if (stats->bc[i])
4176 			seq_printf(m, "%s%s: %d\n", prefix,
4177 				   binder_command_strings[i], stats->bc[i]);
4178 	}
4179 
4180 	BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
4181 		     ARRAY_SIZE(binder_return_strings));
4182 	for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
4183 		if (stats->br[i])
4184 			seq_printf(m, "%s%s: %d\n", prefix,
4185 				   binder_return_strings[i], stats->br[i]);
4186 	}
4187 
4188 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
4189 		     ARRAY_SIZE(binder_objstat_strings));
4190 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
4191 		     ARRAY_SIZE(stats->obj_deleted));
4192 	for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
4193 		if (stats->obj_created[i] || stats->obj_deleted[i])
4194 			seq_printf(m, "%s%s: active %d total %d\n", prefix,
4195 				binder_objstat_strings[i],
4196 				stats->obj_created[i] - stats->obj_deleted[i],
4197 				stats->obj_created[i]);
4198 	}
4199 }
4200 
4201 int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
4202 {
4203 	struct rb_node *n;
4204 	int count = 0;
4205 
4206 	mutex_lock(&alloc->mutex);
4207 	for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
4208 		count++;
4209 	mutex_unlock(&alloc->mutex);
4210 	return count;
4211 }
4212 
4213 static void print_binder_proc_stats(struct seq_file *m,
4214 				    struct binder_proc *proc)
4215 {
4216 	struct binder_work *w;
4217 	struct rb_node *n;
4218 	int count, strong, weak;
4219 
4220 	seq_printf(m, "proc %d\n", proc->pid);
4221 	seq_printf(m, "context %s\n", proc->context->name);
4222 	count = 0;
4223 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
4224 		count++;
4225 	seq_printf(m, "  threads: %d\n", count);
4226 	seq_printf(m, "  requested threads: %d+%d/%d\n"
4227 			"  ready threads %d\n"
4228 			"  free async space %zd\n", proc->requested_threads,
4229 			proc->requested_threads_started, proc->max_threads,
4230 			proc->ready_threads,
4231 			binder_alloc_get_free_async_space(&proc->alloc));
4232 	count = 0;
4233 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
4234 		count++;
4235 	seq_printf(m, "  nodes: %d\n", count);
4236 	count = 0;
4237 	strong = 0;
4238 	weak = 0;
4239 	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
4240 		struct binder_ref *ref = rb_entry(n, struct binder_ref,
4241 						  rb_node_desc);
4242 		count++;
4243 		strong += ref->strong;
4244 		weak += ref->weak;
4245 	}
4246 	seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
4247 
4248 	count = binder_alloc_get_allocated_count(&proc->alloc);
4249 	seq_printf(m, "  buffers: %d\n", count);
4250 
4251 	count = 0;
4252 	list_for_each_entry(w, &proc->todo, entry) {
4253 		switch (w->type) {
4254 		case BINDER_WORK_TRANSACTION:
4255 			count++;
4256 			break;
4257 		default:
4258 			break;
4259 		}
4260 	}
4261 	seq_printf(m, "  pending transactions: %d\n", count);
4262 
4263 	print_binder_stats(m, "  ", &proc->stats);
4264 }
4265 
4266 
4267 static int binder_state_show(struct seq_file *m, void *unused)
4268 {
4269 	struct binder_proc *proc;
4270 	struct binder_node *node;
4271 	int do_lock = !binder_debug_no_lock;
4272 
4273 	if (do_lock)
4274 		binder_lock(__func__);
4275 
4276 	seq_puts(m, "binder state:\n");
4277 
4278 	if (!hlist_empty(&binder_dead_nodes))
4279 		seq_puts(m, "dead nodes:\n");
4280 	hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
4281 		print_binder_node(m, node);
4282 
4283 	hlist_for_each_entry(proc, &binder_procs, proc_node)
4284 		print_binder_proc(m, proc, 1);
4285 	if (do_lock)
4286 		binder_unlock(__func__);
4287 	return 0;
4288 }
4289 
4290 static int binder_stats_show(struct seq_file *m, void *unused)
4291 {
4292 	struct binder_proc *proc;
4293 	int do_lock = !binder_debug_no_lock;
4294 
4295 	if (do_lock)
4296 		binder_lock(__func__);
4297 
4298 	seq_puts(m, "binder stats:\n");
4299 
4300 	print_binder_stats(m, "", &binder_stats);
4301 
4302 	hlist_for_each_entry(proc, &binder_procs, proc_node)
4303 		print_binder_proc_stats(m, proc);
4304 	if (do_lock)
4305 		binder_unlock(__func__);
4306 	return 0;
4307 }
4308 
4309 static int binder_transactions_show(struct seq_file *m, void *unused)
4310 {
4311 	struct binder_proc *proc;
4312 	int do_lock = !binder_debug_no_lock;
4313 
4314 	if (do_lock)
4315 		binder_lock(__func__);
4316 
4317 	seq_puts(m, "binder transactions:\n");
4318 	hlist_for_each_entry(proc, &binder_procs, proc_node)
4319 		print_binder_proc(m, proc, 0);
4320 	if (do_lock)
4321 		binder_unlock(__func__);
4322 	return 0;
4323 }
4324 
4325 static int binder_proc_show(struct seq_file *m, void *unused)
4326 {
4327 	struct binder_proc *itr;
4328 	int pid = (unsigned long)m->private;
4329 	int do_lock = !binder_debug_no_lock;
4330 
4331 	if (do_lock)
4332 		binder_lock(__func__);
4333 
4334 	hlist_for_each_entry(itr, &binder_procs, proc_node) {
4335 		if (itr->pid == pid) {
4336 			seq_puts(m, "binder proc state:\n");
4337 			print_binder_proc(m, itr, 1);
4338 		}
4339 	}
4340 	if (do_lock)
4341 		binder_unlock(__func__);
4342 	return 0;
4343 }
4344 
4345 static void print_binder_transaction_log_entry(struct seq_file *m,
4346 					struct binder_transaction_log_entry *e)
4347 {
4348 	seq_printf(m,
4349 		   "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d\n",
4350 		   e->debug_id, (e->call_type == 2) ? "reply" :
4351 		   ((e->call_type == 1) ? "async" : "call "), e->from_proc,
4352 		   e->from_thread, e->to_proc, e->to_thread, e->context_name,
4353 		   e->to_node, e->target_handle, e->data_size, e->offsets_size);
4354 }
4355 
4356 static int binder_transaction_log_show(struct seq_file *m, void *unused)
4357 {
4358 	struct binder_transaction_log *log = m->private;
4359 	int i;
4360 
4361 	if (log->full) {
4362 		for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
4363 			print_binder_transaction_log_entry(m, &log->entry[i]);
4364 	}
4365 	for (i = 0; i < log->next; i++)
4366 		print_binder_transaction_log_entry(m, &log->entry[i]);
4367 	return 0;
4368 }
4369 
4370 static const struct file_operations binder_fops = {
4371 	.owner = THIS_MODULE,
4372 	.poll = binder_poll,
4373 	.unlocked_ioctl = binder_ioctl,
4374 	.compat_ioctl = binder_ioctl,
4375 	.mmap = binder_mmap,
4376 	.open = binder_open,
4377 	.flush = binder_flush,
4378 	.release = binder_release,
4379 };
4380 
4381 BINDER_DEBUG_ENTRY(state);
4382 BINDER_DEBUG_ENTRY(stats);
4383 BINDER_DEBUG_ENTRY(transactions);
4384 BINDER_DEBUG_ENTRY(transaction_log);
4385 
4386 static int __init init_binder_device(const char *name)
4387 {
4388 	int ret;
4389 	struct binder_device *binder_device;
4390 
4391 	binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
4392 	if (!binder_device)
4393 		return -ENOMEM;
4394 
4395 	binder_device->miscdev.fops = &binder_fops;
4396 	binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
4397 	binder_device->miscdev.name = name;
4398 
4399 	binder_device->context.binder_context_mgr_uid = INVALID_UID;
4400 	binder_device->context.name = name;
4401 
4402 	ret = misc_register(&binder_device->miscdev);
4403 	if (ret < 0) {
4404 		kfree(binder_device);
4405 		return ret;
4406 	}
4407 
4408 	hlist_add_head(&binder_device->hlist, &binder_devices);
4409 
4410 	return ret;
4411 }
4412 
4413 static int __init binder_init(void)
4414 {
4415 	int ret;
4416 	char *device_name, *device_names;
4417 	struct binder_device *device;
4418 	struct hlist_node *tmp;
4419 
4420 	binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
4421 	if (binder_debugfs_dir_entry_root)
4422 		binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
4423 						 binder_debugfs_dir_entry_root);
4424 
4425 	if (binder_debugfs_dir_entry_root) {
4426 		debugfs_create_file("state",
4427 				    S_IRUGO,
4428 				    binder_debugfs_dir_entry_root,
4429 				    NULL,
4430 				    &binder_state_fops);
4431 		debugfs_create_file("stats",
4432 				    S_IRUGO,
4433 				    binder_debugfs_dir_entry_root,
4434 				    NULL,
4435 				    &binder_stats_fops);
4436 		debugfs_create_file("transactions",
4437 				    S_IRUGO,
4438 				    binder_debugfs_dir_entry_root,
4439 				    NULL,
4440 				    &binder_transactions_fops);
4441 		debugfs_create_file("transaction_log",
4442 				    S_IRUGO,
4443 				    binder_debugfs_dir_entry_root,
4444 				    &binder_transaction_log,
4445 				    &binder_transaction_log_fops);
4446 		debugfs_create_file("failed_transaction_log",
4447 				    S_IRUGO,
4448 				    binder_debugfs_dir_entry_root,
4449 				    &binder_transaction_log_failed,
4450 				    &binder_transaction_log_fops);
4451 	}
4452 
4453 	/*
4454 	 * Copy the module_parameter string, because we don't want to
4455 	 * tokenize it in-place.
4456 	 */
4457 	device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
4458 	if (!device_names) {
4459 		ret = -ENOMEM;
4460 		goto err_alloc_device_names_failed;
4461 	}
4462 	strcpy(device_names, binder_devices_param);
4463 
4464 	while ((device_name = strsep(&device_names, ","))) {
4465 		ret = init_binder_device(device_name);
4466 		if (ret)
4467 			goto err_init_binder_device_failed;
4468 	}
4469 
4470 	return ret;
4471 
4472 err_init_binder_device_failed:
4473 	hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
4474 		misc_deregister(&device->miscdev);
4475 		hlist_del(&device->hlist);
4476 		kfree(device);
4477 	}
4478 err_alloc_device_names_failed:
4479 	debugfs_remove_recursive(binder_debugfs_dir_entry_root);
4480 
4481 	return ret;
4482 }
4483 
4484 device_initcall(binder_init);
4485 
4486 #define CREATE_TRACE_POINTS
4487 #include "binder_trace.h"
4488 
4489 MODULE_LICENSE("GPL v2");
4490