xref: /openbmc/linux/drivers/android/binder.c (revision 8730046c)
1 /* binder.c
2  *
3  * Android IPC Subsystem
4  *
5  * Copyright (C) 2007-2008 Google, Inc.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17 
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 
20 #include <asm/cacheflush.h>
21 #include <linux/fdtable.h>
22 #include <linux/file.h>
23 #include <linux/freezer.h>
24 #include <linux/fs.h>
25 #include <linux/list.h>
26 #include <linux/miscdevice.h>
27 #include <linux/mm.h>
28 #include <linux/module.h>
29 #include <linux/mutex.h>
30 #include <linux/nsproxy.h>
31 #include <linux/poll.h>
32 #include <linux/debugfs.h>
33 #include <linux/rbtree.h>
34 #include <linux/sched.h>
35 #include <linux/seq_file.h>
36 #include <linux/uaccess.h>
37 #include <linux/vmalloc.h>
38 #include <linux/slab.h>
39 #include <linux/pid_namespace.h>
40 #include <linux/security.h>
41 
42 #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
43 #define BINDER_IPC_32BIT 1
44 #endif
45 
46 #include <uapi/linux/android/binder.h>
47 #include "binder_trace.h"
48 
49 static DEFINE_MUTEX(binder_main_lock);
50 static DEFINE_MUTEX(binder_deferred_lock);
51 static DEFINE_MUTEX(binder_mmap_lock);
52 
53 static HLIST_HEAD(binder_procs);
54 static HLIST_HEAD(binder_deferred_list);
55 static HLIST_HEAD(binder_dead_nodes);
56 
57 static struct dentry *binder_debugfs_dir_entry_root;
58 static struct dentry *binder_debugfs_dir_entry_proc;
59 static struct binder_node *binder_context_mgr_node;
60 static kuid_t binder_context_mgr_uid = INVALID_UID;
61 static int binder_last_id;
62 
63 #define BINDER_DEBUG_ENTRY(name) \
64 static int binder_##name##_open(struct inode *inode, struct file *file) \
65 { \
66 	return single_open(file, binder_##name##_show, inode->i_private); \
67 } \
68 \
69 static const struct file_operations binder_##name##_fops = { \
70 	.owner = THIS_MODULE, \
71 	.open = binder_##name##_open, \
72 	.read = seq_read, \
73 	.llseek = seq_lseek, \
74 	.release = single_release, \
75 }
76 
77 static int binder_proc_show(struct seq_file *m, void *unused);
78 BINDER_DEBUG_ENTRY(proc);
79 
80 /* This is only defined in include/asm-arm/sizes.h */
81 #ifndef SZ_1K
82 #define SZ_1K                               0x400
83 #endif
84 
85 #ifndef SZ_4M
86 #define SZ_4M                               0x400000
87 #endif
88 
89 #define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
90 
91 #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
92 
93 enum {
94 	BINDER_DEBUG_USER_ERROR             = 1U << 0,
95 	BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
96 	BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
97 	BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
98 	BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
99 	BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
100 	BINDER_DEBUG_READ_WRITE             = 1U << 6,
101 	BINDER_DEBUG_USER_REFS              = 1U << 7,
102 	BINDER_DEBUG_THREADS                = 1U << 8,
103 	BINDER_DEBUG_TRANSACTION            = 1U << 9,
104 	BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
105 	BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
106 	BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
107 	BINDER_DEBUG_BUFFER_ALLOC           = 1U << 13,
108 	BINDER_DEBUG_PRIORITY_CAP           = 1U << 14,
109 	BINDER_DEBUG_BUFFER_ALLOC_ASYNC     = 1U << 15,
110 };
111 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
112 	BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
113 module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
114 
115 static bool binder_debug_no_lock;
116 module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
117 
118 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
119 static int binder_stop_on_user_error;
120 
121 static int binder_set_stop_on_user_error(const char *val,
122 					 struct kernel_param *kp)
123 {
124 	int ret;
125 
126 	ret = param_set_int(val, kp);
127 	if (binder_stop_on_user_error < 2)
128 		wake_up(&binder_user_error_wait);
129 	return ret;
130 }
131 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
132 	param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
133 
134 #define binder_debug(mask, x...) \
135 	do { \
136 		if (binder_debug_mask & mask) \
137 			pr_info(x); \
138 	} while (0)
139 
140 #define binder_user_error(x...) \
141 	do { \
142 		if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
143 			pr_info(x); \
144 		if (binder_stop_on_user_error) \
145 			binder_stop_on_user_error = 2; \
146 	} while (0)
147 
148 enum binder_stat_types {
149 	BINDER_STAT_PROC,
150 	BINDER_STAT_THREAD,
151 	BINDER_STAT_NODE,
152 	BINDER_STAT_REF,
153 	BINDER_STAT_DEATH,
154 	BINDER_STAT_TRANSACTION,
155 	BINDER_STAT_TRANSACTION_COMPLETE,
156 	BINDER_STAT_COUNT
157 };
158 
159 struct binder_stats {
160 	int br[_IOC_NR(BR_FAILED_REPLY) + 1];
161 	int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1];
162 	int obj_created[BINDER_STAT_COUNT];
163 	int obj_deleted[BINDER_STAT_COUNT];
164 };
165 
166 static struct binder_stats binder_stats;
167 
168 static inline void binder_stats_deleted(enum binder_stat_types type)
169 {
170 	binder_stats.obj_deleted[type]++;
171 }
172 
173 static inline void binder_stats_created(enum binder_stat_types type)
174 {
175 	binder_stats.obj_created[type]++;
176 }
177 
178 struct binder_transaction_log_entry {
179 	int debug_id;
180 	int call_type;
181 	int from_proc;
182 	int from_thread;
183 	int target_handle;
184 	int to_proc;
185 	int to_thread;
186 	int to_node;
187 	int data_size;
188 	int offsets_size;
189 };
190 struct binder_transaction_log {
191 	int next;
192 	int full;
193 	struct binder_transaction_log_entry entry[32];
194 };
195 static struct binder_transaction_log binder_transaction_log;
196 static struct binder_transaction_log binder_transaction_log_failed;
197 
198 static struct binder_transaction_log_entry *binder_transaction_log_add(
199 	struct binder_transaction_log *log)
200 {
201 	struct binder_transaction_log_entry *e;
202 
203 	e = &log->entry[log->next];
204 	memset(e, 0, sizeof(*e));
205 	log->next++;
206 	if (log->next == ARRAY_SIZE(log->entry)) {
207 		log->next = 0;
208 		log->full = 1;
209 	}
210 	return e;
211 }
212 
213 struct binder_work {
214 	struct list_head entry;
215 	enum {
216 		BINDER_WORK_TRANSACTION = 1,
217 		BINDER_WORK_TRANSACTION_COMPLETE,
218 		BINDER_WORK_NODE,
219 		BINDER_WORK_DEAD_BINDER,
220 		BINDER_WORK_DEAD_BINDER_AND_CLEAR,
221 		BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
222 	} type;
223 };
224 
225 struct binder_node {
226 	int debug_id;
227 	struct binder_work work;
228 	union {
229 		struct rb_node rb_node;
230 		struct hlist_node dead_node;
231 	};
232 	struct binder_proc *proc;
233 	struct hlist_head refs;
234 	int internal_strong_refs;
235 	int local_weak_refs;
236 	int local_strong_refs;
237 	binder_uintptr_t ptr;
238 	binder_uintptr_t cookie;
239 	unsigned has_strong_ref:1;
240 	unsigned pending_strong_ref:1;
241 	unsigned has_weak_ref:1;
242 	unsigned pending_weak_ref:1;
243 	unsigned has_async_transaction:1;
244 	unsigned accept_fds:1;
245 	unsigned min_priority:8;
246 	struct list_head async_todo;
247 };
248 
249 struct binder_ref_death {
250 	struct binder_work work;
251 	binder_uintptr_t cookie;
252 };
253 
254 struct binder_ref {
255 	/* Lookups needed: */
256 	/*   node + proc => ref (transaction) */
257 	/*   desc + proc => ref (transaction, inc/dec ref) */
258 	/*   node => refs + procs (proc exit) */
259 	int debug_id;
260 	struct rb_node rb_node_desc;
261 	struct rb_node rb_node_node;
262 	struct hlist_node node_entry;
263 	struct binder_proc *proc;
264 	struct binder_node *node;
265 	uint32_t desc;
266 	int strong;
267 	int weak;
268 	struct binder_ref_death *death;
269 };
270 
271 struct binder_buffer {
272 	struct list_head entry; /* free and allocated entries by address */
273 	struct rb_node rb_node; /* free entry by size or allocated entry */
274 				/* by address */
275 	unsigned free:1;
276 	unsigned allow_user_free:1;
277 	unsigned async_transaction:1;
278 	unsigned debug_id:29;
279 
280 	struct binder_transaction *transaction;
281 
282 	struct binder_node *target_node;
283 	size_t data_size;
284 	size_t offsets_size;
285 	uint8_t data[0];
286 };
287 
288 enum binder_deferred_state {
289 	BINDER_DEFERRED_PUT_FILES    = 0x01,
290 	BINDER_DEFERRED_FLUSH        = 0x02,
291 	BINDER_DEFERRED_RELEASE      = 0x04,
292 };
293 
294 struct binder_proc {
295 	struct hlist_node proc_node;
296 	struct rb_root threads;
297 	struct rb_root nodes;
298 	struct rb_root refs_by_desc;
299 	struct rb_root refs_by_node;
300 	int pid;
301 	struct vm_area_struct *vma;
302 	struct mm_struct *vma_vm_mm;
303 	struct task_struct *tsk;
304 	struct files_struct *files;
305 	struct hlist_node deferred_work_node;
306 	int deferred_work;
307 	void *buffer;
308 	ptrdiff_t user_buffer_offset;
309 
310 	struct list_head buffers;
311 	struct rb_root free_buffers;
312 	struct rb_root allocated_buffers;
313 	size_t free_async_space;
314 
315 	struct page **pages;
316 	size_t buffer_size;
317 	uint32_t buffer_free;
318 	struct list_head todo;
319 	wait_queue_head_t wait;
320 	struct binder_stats stats;
321 	struct list_head delivered_death;
322 	int max_threads;
323 	int requested_threads;
324 	int requested_threads_started;
325 	int ready_threads;
326 	long default_priority;
327 	struct dentry *debugfs_entry;
328 };
329 
330 enum {
331 	BINDER_LOOPER_STATE_REGISTERED  = 0x01,
332 	BINDER_LOOPER_STATE_ENTERED     = 0x02,
333 	BINDER_LOOPER_STATE_EXITED      = 0x04,
334 	BINDER_LOOPER_STATE_INVALID     = 0x08,
335 	BINDER_LOOPER_STATE_WAITING     = 0x10,
336 	BINDER_LOOPER_STATE_NEED_RETURN = 0x20
337 };
338 
339 struct binder_thread {
340 	struct binder_proc *proc;
341 	struct rb_node rb_node;
342 	int pid;
343 	int looper;
344 	struct binder_transaction *transaction_stack;
345 	struct list_head todo;
346 	uint32_t return_error; /* Write failed, return error code in read buf */
347 	uint32_t return_error2; /* Write failed, return error code in read */
348 		/* buffer. Used when sending a reply to a dead process that */
349 		/* we are also waiting on */
350 	wait_queue_head_t wait;
351 	struct binder_stats stats;
352 };
353 
354 struct binder_transaction {
355 	int debug_id;
356 	struct binder_work work;
357 	struct binder_thread *from;
358 	struct binder_transaction *from_parent;
359 	struct binder_proc *to_proc;
360 	struct binder_thread *to_thread;
361 	struct binder_transaction *to_parent;
362 	unsigned need_reply:1;
363 	/* unsigned is_dead:1; */	/* not used at the moment */
364 
365 	struct binder_buffer *buffer;
366 	unsigned int	code;
367 	unsigned int	flags;
368 	long	priority;
369 	long	saved_priority;
370 	kuid_t	sender_euid;
371 };
372 
373 static void
374 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
375 
376 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
377 {
378 	struct files_struct *files = proc->files;
379 	unsigned long rlim_cur;
380 	unsigned long irqs;
381 
382 	if (files == NULL)
383 		return -ESRCH;
384 
385 	if (!lock_task_sighand(proc->tsk, &irqs))
386 		return -EMFILE;
387 
388 	rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
389 	unlock_task_sighand(proc->tsk, &irqs);
390 
391 	return __alloc_fd(files, 0, rlim_cur, flags);
392 }
393 
394 /*
395  * copied from fd_install
396  */
397 static void task_fd_install(
398 	struct binder_proc *proc, unsigned int fd, struct file *file)
399 {
400 	if (proc->files)
401 		__fd_install(proc->files, fd, file);
402 }
403 
404 /*
405  * copied from sys_close
406  */
407 static long task_close_fd(struct binder_proc *proc, unsigned int fd)
408 {
409 	int retval;
410 
411 	if (proc->files == NULL)
412 		return -ESRCH;
413 
414 	retval = __close_fd(proc->files, fd);
415 	/* can't restart close syscall because file table entry was cleared */
416 	if (unlikely(retval == -ERESTARTSYS ||
417 		     retval == -ERESTARTNOINTR ||
418 		     retval == -ERESTARTNOHAND ||
419 		     retval == -ERESTART_RESTARTBLOCK))
420 		retval = -EINTR;
421 
422 	return retval;
423 }
424 
425 static inline void binder_lock(const char *tag)
426 {
427 	trace_binder_lock(tag);
428 	mutex_lock(&binder_main_lock);
429 	trace_binder_locked(tag);
430 }
431 
432 static inline void binder_unlock(const char *tag)
433 {
434 	trace_binder_unlock(tag);
435 	mutex_unlock(&binder_main_lock);
436 }
437 
438 static void binder_set_nice(long nice)
439 {
440 	long min_nice;
441 
442 	if (can_nice(current, nice)) {
443 		set_user_nice(current, nice);
444 		return;
445 	}
446 	min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);
447 	binder_debug(BINDER_DEBUG_PRIORITY_CAP,
448 		     "%d: nice value %ld not allowed use %ld instead\n",
449 		      current->pid, nice, min_nice);
450 	set_user_nice(current, min_nice);
451 	if (min_nice <= MAX_NICE)
452 		return;
453 	binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
454 }
455 
456 static size_t binder_buffer_size(struct binder_proc *proc,
457 				 struct binder_buffer *buffer)
458 {
459 	if (list_is_last(&buffer->entry, &proc->buffers))
460 		return proc->buffer + proc->buffer_size - (void *)buffer->data;
461 	return (size_t)list_entry(buffer->entry.next,
462 			  struct binder_buffer, entry) - (size_t)buffer->data;
463 }
464 
465 static void binder_insert_free_buffer(struct binder_proc *proc,
466 				      struct binder_buffer *new_buffer)
467 {
468 	struct rb_node **p = &proc->free_buffers.rb_node;
469 	struct rb_node *parent = NULL;
470 	struct binder_buffer *buffer;
471 	size_t buffer_size;
472 	size_t new_buffer_size;
473 
474 	BUG_ON(!new_buffer->free);
475 
476 	new_buffer_size = binder_buffer_size(proc, new_buffer);
477 
478 	binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
479 		     "%d: add free buffer, size %zd, at %p\n",
480 		      proc->pid, new_buffer_size, new_buffer);
481 
482 	while (*p) {
483 		parent = *p;
484 		buffer = rb_entry(parent, struct binder_buffer, rb_node);
485 		BUG_ON(!buffer->free);
486 
487 		buffer_size = binder_buffer_size(proc, buffer);
488 
489 		if (new_buffer_size < buffer_size)
490 			p = &parent->rb_left;
491 		else
492 			p = &parent->rb_right;
493 	}
494 	rb_link_node(&new_buffer->rb_node, parent, p);
495 	rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
496 }
497 
498 static void binder_insert_allocated_buffer(struct binder_proc *proc,
499 					   struct binder_buffer *new_buffer)
500 {
501 	struct rb_node **p = &proc->allocated_buffers.rb_node;
502 	struct rb_node *parent = NULL;
503 	struct binder_buffer *buffer;
504 
505 	BUG_ON(new_buffer->free);
506 
507 	while (*p) {
508 		parent = *p;
509 		buffer = rb_entry(parent, struct binder_buffer, rb_node);
510 		BUG_ON(buffer->free);
511 
512 		if (new_buffer < buffer)
513 			p = &parent->rb_left;
514 		else if (new_buffer > buffer)
515 			p = &parent->rb_right;
516 		else
517 			BUG();
518 	}
519 	rb_link_node(&new_buffer->rb_node, parent, p);
520 	rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
521 }
522 
523 static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
524 						  uintptr_t user_ptr)
525 {
526 	struct rb_node *n = proc->allocated_buffers.rb_node;
527 	struct binder_buffer *buffer;
528 	struct binder_buffer *kern_ptr;
529 
530 	kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset
531 		- offsetof(struct binder_buffer, data));
532 
533 	while (n) {
534 		buffer = rb_entry(n, struct binder_buffer, rb_node);
535 		BUG_ON(buffer->free);
536 
537 		if (kern_ptr < buffer)
538 			n = n->rb_left;
539 		else if (kern_ptr > buffer)
540 			n = n->rb_right;
541 		else
542 			return buffer;
543 	}
544 	return NULL;
545 }
546 
547 static int binder_update_page_range(struct binder_proc *proc, int allocate,
548 				    void *start, void *end,
549 				    struct vm_area_struct *vma)
550 {
551 	void *page_addr;
552 	unsigned long user_page_addr;
553 	struct page **page;
554 	struct mm_struct *mm;
555 
556 	binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
557 		     "%d: %s pages %p-%p\n", proc->pid,
558 		     allocate ? "allocate" : "free", start, end);
559 
560 	if (end <= start)
561 		return 0;
562 
563 	trace_binder_update_page_range(proc, allocate, start, end);
564 
565 	if (vma)
566 		mm = NULL;
567 	else
568 		mm = get_task_mm(proc->tsk);
569 
570 	if (mm) {
571 		down_write(&mm->mmap_sem);
572 		vma = proc->vma;
573 		if (vma && mm != proc->vma_vm_mm) {
574 			pr_err("%d: vma mm and task mm mismatch\n",
575 				proc->pid);
576 			vma = NULL;
577 		}
578 	}
579 
580 	if (allocate == 0)
581 		goto free_range;
582 
583 	if (vma == NULL) {
584 		pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
585 			proc->pid);
586 		goto err_no_vma;
587 	}
588 
589 	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
590 		int ret;
591 
592 		page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
593 
594 		BUG_ON(*page);
595 		*page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
596 		if (*page == NULL) {
597 			pr_err("%d: binder_alloc_buf failed for page at %p\n",
598 				proc->pid, page_addr);
599 			goto err_alloc_page_failed;
600 		}
601 		ret = map_kernel_range_noflush((unsigned long)page_addr,
602 					PAGE_SIZE, PAGE_KERNEL, page);
603 		flush_cache_vmap((unsigned long)page_addr,
604 				(unsigned long)page_addr + PAGE_SIZE);
605 		if (ret != 1) {
606 			pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
607 			       proc->pid, page_addr);
608 			goto err_map_kernel_failed;
609 		}
610 		user_page_addr =
611 			(uintptr_t)page_addr + proc->user_buffer_offset;
612 		ret = vm_insert_page(vma, user_page_addr, page[0]);
613 		if (ret) {
614 			pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
615 			       proc->pid, user_page_addr);
616 			goto err_vm_insert_page_failed;
617 		}
618 		/* vm_insert_page does not seem to increment the refcount */
619 	}
620 	if (mm) {
621 		up_write(&mm->mmap_sem);
622 		mmput(mm);
623 	}
624 	return 0;
625 
626 free_range:
627 	for (page_addr = end - PAGE_SIZE; page_addr >= start;
628 	     page_addr -= PAGE_SIZE) {
629 		page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
630 		if (vma)
631 			zap_page_range(vma, (uintptr_t)page_addr +
632 				proc->user_buffer_offset, PAGE_SIZE, NULL);
633 err_vm_insert_page_failed:
634 		unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
635 err_map_kernel_failed:
636 		__free_page(*page);
637 		*page = NULL;
638 err_alloc_page_failed:
639 		;
640 	}
641 err_no_vma:
642 	if (mm) {
643 		up_write(&mm->mmap_sem);
644 		mmput(mm);
645 	}
646 	return -ENOMEM;
647 }
648 
649 static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
650 					      size_t data_size,
651 					      size_t offsets_size, int is_async)
652 {
653 	struct rb_node *n = proc->free_buffers.rb_node;
654 	struct binder_buffer *buffer;
655 	size_t buffer_size;
656 	struct rb_node *best_fit = NULL;
657 	void *has_page_addr;
658 	void *end_page_addr;
659 	size_t size;
660 
661 	if (proc->vma == NULL) {
662 		pr_err("%d: binder_alloc_buf, no vma\n",
663 		       proc->pid);
664 		return NULL;
665 	}
666 
667 	size = ALIGN(data_size, sizeof(void *)) +
668 		ALIGN(offsets_size, sizeof(void *));
669 
670 	if (size < data_size || size < offsets_size) {
671 		binder_user_error("%d: got transaction with invalid size %zd-%zd\n",
672 				proc->pid, data_size, offsets_size);
673 		return NULL;
674 	}
675 
676 	if (is_async &&
677 	    proc->free_async_space < size + sizeof(struct binder_buffer)) {
678 		binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
679 			     "%d: binder_alloc_buf size %zd failed, no async space left\n",
680 			      proc->pid, size);
681 		return NULL;
682 	}
683 
684 	while (n) {
685 		buffer = rb_entry(n, struct binder_buffer, rb_node);
686 		BUG_ON(!buffer->free);
687 		buffer_size = binder_buffer_size(proc, buffer);
688 
689 		if (size < buffer_size) {
690 			best_fit = n;
691 			n = n->rb_left;
692 		} else if (size > buffer_size)
693 			n = n->rb_right;
694 		else {
695 			best_fit = n;
696 			break;
697 		}
698 	}
699 	if (best_fit == NULL) {
700 		pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
701 			proc->pid, size);
702 		return NULL;
703 	}
704 	if (n == NULL) {
705 		buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
706 		buffer_size = binder_buffer_size(proc, buffer);
707 	}
708 
709 	binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
710 		     "%d: binder_alloc_buf size %zd got buffer %p size %zd\n",
711 		      proc->pid, size, buffer, buffer_size);
712 
713 	has_page_addr =
714 		(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
715 	if (n == NULL) {
716 		if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
717 			buffer_size = size; /* no room for other buffers */
718 		else
719 			buffer_size = size + sizeof(struct binder_buffer);
720 	}
721 	end_page_addr =
722 		(void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
723 	if (end_page_addr > has_page_addr)
724 		end_page_addr = has_page_addr;
725 	if (binder_update_page_range(proc, 1,
726 	    (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
727 		return NULL;
728 
729 	rb_erase(best_fit, &proc->free_buffers);
730 	buffer->free = 0;
731 	binder_insert_allocated_buffer(proc, buffer);
732 	if (buffer_size != size) {
733 		struct binder_buffer *new_buffer = (void *)buffer->data + size;
734 
735 		list_add(&new_buffer->entry, &buffer->entry);
736 		new_buffer->free = 1;
737 		binder_insert_free_buffer(proc, new_buffer);
738 	}
739 	binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
740 		     "%d: binder_alloc_buf size %zd got %p\n",
741 		      proc->pid, size, buffer);
742 	buffer->data_size = data_size;
743 	buffer->offsets_size = offsets_size;
744 	buffer->async_transaction = is_async;
745 	if (is_async) {
746 		proc->free_async_space -= size + sizeof(struct binder_buffer);
747 		binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
748 			     "%d: binder_alloc_buf size %zd async free %zd\n",
749 			      proc->pid, size, proc->free_async_space);
750 	}
751 
752 	return buffer;
753 }
754 
755 static void *buffer_start_page(struct binder_buffer *buffer)
756 {
757 	return (void *)((uintptr_t)buffer & PAGE_MASK);
758 }
759 
760 static void *buffer_end_page(struct binder_buffer *buffer)
761 {
762 	return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
763 }
764 
765 static void binder_delete_free_buffer(struct binder_proc *proc,
766 				      struct binder_buffer *buffer)
767 {
768 	struct binder_buffer *prev, *next = NULL;
769 	int free_page_end = 1;
770 	int free_page_start = 1;
771 
772 	BUG_ON(proc->buffers.next == &buffer->entry);
773 	prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
774 	BUG_ON(!prev->free);
775 	if (buffer_end_page(prev) == buffer_start_page(buffer)) {
776 		free_page_start = 0;
777 		if (buffer_end_page(prev) == buffer_end_page(buffer))
778 			free_page_end = 0;
779 		binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
780 			     "%d: merge free, buffer %p share page with %p\n",
781 			      proc->pid, buffer, prev);
782 	}
783 
784 	if (!list_is_last(&buffer->entry, &proc->buffers)) {
785 		next = list_entry(buffer->entry.next,
786 				  struct binder_buffer, entry);
787 		if (buffer_start_page(next) == buffer_end_page(buffer)) {
788 			free_page_end = 0;
789 			if (buffer_start_page(next) ==
790 			    buffer_start_page(buffer))
791 				free_page_start = 0;
792 			binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
793 				     "%d: merge free, buffer %p share page with %p\n",
794 				      proc->pid, buffer, prev);
795 		}
796 	}
797 	list_del(&buffer->entry);
798 	if (free_page_start || free_page_end) {
799 		binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
800 			     "%d: merge free, buffer %p do not share page%s%s with %p or %p\n",
801 			     proc->pid, buffer, free_page_start ? "" : " end",
802 			     free_page_end ? "" : " start", prev, next);
803 		binder_update_page_range(proc, 0, free_page_start ?
804 			buffer_start_page(buffer) : buffer_end_page(buffer),
805 			(free_page_end ? buffer_end_page(buffer) :
806 			buffer_start_page(buffer)) + PAGE_SIZE, NULL);
807 	}
808 }
809 
810 static void binder_free_buf(struct binder_proc *proc,
811 			    struct binder_buffer *buffer)
812 {
813 	size_t size, buffer_size;
814 
815 	buffer_size = binder_buffer_size(proc, buffer);
816 
817 	size = ALIGN(buffer->data_size, sizeof(void *)) +
818 		ALIGN(buffer->offsets_size, sizeof(void *));
819 
820 	binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
821 		     "%d: binder_free_buf %p size %zd buffer_size %zd\n",
822 		      proc->pid, buffer, size, buffer_size);
823 
824 	BUG_ON(buffer->free);
825 	BUG_ON(size > buffer_size);
826 	BUG_ON(buffer->transaction != NULL);
827 	BUG_ON((void *)buffer < proc->buffer);
828 	BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
829 
830 	if (buffer->async_transaction) {
831 		proc->free_async_space += size + sizeof(struct binder_buffer);
832 
833 		binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
834 			     "%d: binder_free_buf size %zd async free %zd\n",
835 			      proc->pid, size, proc->free_async_space);
836 	}
837 
838 	binder_update_page_range(proc, 0,
839 		(void *)PAGE_ALIGN((uintptr_t)buffer->data),
840 		(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
841 		NULL);
842 	rb_erase(&buffer->rb_node, &proc->allocated_buffers);
843 	buffer->free = 1;
844 	if (!list_is_last(&buffer->entry, &proc->buffers)) {
845 		struct binder_buffer *next = list_entry(buffer->entry.next,
846 						struct binder_buffer, entry);
847 
848 		if (next->free) {
849 			rb_erase(&next->rb_node, &proc->free_buffers);
850 			binder_delete_free_buffer(proc, next);
851 		}
852 	}
853 	if (proc->buffers.next != &buffer->entry) {
854 		struct binder_buffer *prev = list_entry(buffer->entry.prev,
855 						struct binder_buffer, entry);
856 
857 		if (prev->free) {
858 			binder_delete_free_buffer(proc, buffer);
859 			rb_erase(&prev->rb_node, &proc->free_buffers);
860 			buffer = prev;
861 		}
862 	}
863 	binder_insert_free_buffer(proc, buffer);
864 }
865 
866 static struct binder_node *binder_get_node(struct binder_proc *proc,
867 					   binder_uintptr_t ptr)
868 {
869 	struct rb_node *n = proc->nodes.rb_node;
870 	struct binder_node *node;
871 
872 	while (n) {
873 		node = rb_entry(n, struct binder_node, rb_node);
874 
875 		if (ptr < node->ptr)
876 			n = n->rb_left;
877 		else if (ptr > node->ptr)
878 			n = n->rb_right;
879 		else
880 			return node;
881 	}
882 	return NULL;
883 }
884 
885 static struct binder_node *binder_new_node(struct binder_proc *proc,
886 					   binder_uintptr_t ptr,
887 					   binder_uintptr_t cookie)
888 {
889 	struct rb_node **p = &proc->nodes.rb_node;
890 	struct rb_node *parent = NULL;
891 	struct binder_node *node;
892 
893 	while (*p) {
894 		parent = *p;
895 		node = rb_entry(parent, struct binder_node, rb_node);
896 
897 		if (ptr < node->ptr)
898 			p = &(*p)->rb_left;
899 		else if (ptr > node->ptr)
900 			p = &(*p)->rb_right;
901 		else
902 			return NULL;
903 	}
904 
905 	node = kzalloc(sizeof(*node), GFP_KERNEL);
906 	if (node == NULL)
907 		return NULL;
908 	binder_stats_created(BINDER_STAT_NODE);
909 	rb_link_node(&node->rb_node, parent, p);
910 	rb_insert_color(&node->rb_node, &proc->nodes);
911 	node->debug_id = ++binder_last_id;
912 	node->proc = proc;
913 	node->ptr = ptr;
914 	node->cookie = cookie;
915 	node->work.type = BINDER_WORK_NODE;
916 	INIT_LIST_HEAD(&node->work.entry);
917 	INIT_LIST_HEAD(&node->async_todo);
918 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
919 		     "%d:%d node %d u%016llx c%016llx created\n",
920 		     proc->pid, current->pid, node->debug_id,
921 		     (u64)node->ptr, (u64)node->cookie);
922 	return node;
923 }
924 
925 static int binder_inc_node(struct binder_node *node, int strong, int internal,
926 			   struct list_head *target_list)
927 {
928 	if (strong) {
929 		if (internal) {
930 			if (target_list == NULL &&
931 			    node->internal_strong_refs == 0 &&
932 			    !(node == binder_context_mgr_node &&
933 			    node->has_strong_ref)) {
934 				pr_err("invalid inc strong node for %d\n",
935 					node->debug_id);
936 				return -EINVAL;
937 			}
938 			node->internal_strong_refs++;
939 		} else
940 			node->local_strong_refs++;
941 		if (!node->has_strong_ref && target_list) {
942 			list_del_init(&node->work.entry);
943 			list_add_tail(&node->work.entry, target_list);
944 		}
945 	} else {
946 		if (!internal)
947 			node->local_weak_refs++;
948 		if (!node->has_weak_ref && list_empty(&node->work.entry)) {
949 			if (target_list == NULL) {
950 				pr_err("invalid inc weak node for %d\n",
951 					node->debug_id);
952 				return -EINVAL;
953 			}
954 			list_add_tail(&node->work.entry, target_list);
955 		}
956 	}
957 	return 0;
958 }
959 
960 static int binder_dec_node(struct binder_node *node, int strong, int internal)
961 {
962 	if (strong) {
963 		if (internal)
964 			node->internal_strong_refs--;
965 		else
966 			node->local_strong_refs--;
967 		if (node->local_strong_refs || node->internal_strong_refs)
968 			return 0;
969 	} else {
970 		if (!internal)
971 			node->local_weak_refs--;
972 		if (node->local_weak_refs || !hlist_empty(&node->refs))
973 			return 0;
974 	}
975 	if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
976 		if (list_empty(&node->work.entry)) {
977 			list_add_tail(&node->work.entry, &node->proc->todo);
978 			wake_up_interruptible(&node->proc->wait);
979 		}
980 	} else {
981 		if (hlist_empty(&node->refs) && !node->local_strong_refs &&
982 		    !node->local_weak_refs) {
983 			list_del_init(&node->work.entry);
984 			if (node->proc) {
985 				rb_erase(&node->rb_node, &node->proc->nodes);
986 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
987 					     "refless node %d deleted\n",
988 					     node->debug_id);
989 			} else {
990 				hlist_del(&node->dead_node);
991 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
992 					     "dead node %d deleted\n",
993 					     node->debug_id);
994 			}
995 			kfree(node);
996 			binder_stats_deleted(BINDER_STAT_NODE);
997 		}
998 	}
999 
1000 	return 0;
1001 }
1002 
1003 
1004 static struct binder_ref *binder_get_ref(struct binder_proc *proc,
1005 					 u32 desc, bool need_strong_ref)
1006 {
1007 	struct rb_node *n = proc->refs_by_desc.rb_node;
1008 	struct binder_ref *ref;
1009 
1010 	while (n) {
1011 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1012 
1013 		if (desc < ref->desc) {
1014 			n = n->rb_left;
1015 		} else if (desc > ref->desc) {
1016 			n = n->rb_right;
1017 		} else if (need_strong_ref && !ref->strong) {
1018 			binder_user_error("tried to use weak ref as strong ref\n");
1019 			return NULL;
1020 		} else {
1021 			return ref;
1022 		}
1023 	}
1024 	return NULL;
1025 }
1026 
1027 static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
1028 						  struct binder_node *node)
1029 {
1030 	struct rb_node *n;
1031 	struct rb_node **p = &proc->refs_by_node.rb_node;
1032 	struct rb_node *parent = NULL;
1033 	struct binder_ref *ref, *new_ref;
1034 
1035 	while (*p) {
1036 		parent = *p;
1037 		ref = rb_entry(parent, struct binder_ref, rb_node_node);
1038 
1039 		if (node < ref->node)
1040 			p = &(*p)->rb_left;
1041 		else if (node > ref->node)
1042 			p = &(*p)->rb_right;
1043 		else
1044 			return ref;
1045 	}
1046 	new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1047 	if (new_ref == NULL)
1048 		return NULL;
1049 	binder_stats_created(BINDER_STAT_REF);
1050 	new_ref->debug_id = ++binder_last_id;
1051 	new_ref->proc = proc;
1052 	new_ref->node = node;
1053 	rb_link_node(&new_ref->rb_node_node, parent, p);
1054 	rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1055 
1056 	new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;
1057 	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1058 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1059 		if (ref->desc > new_ref->desc)
1060 			break;
1061 		new_ref->desc = ref->desc + 1;
1062 	}
1063 
1064 	p = &proc->refs_by_desc.rb_node;
1065 	while (*p) {
1066 		parent = *p;
1067 		ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1068 
1069 		if (new_ref->desc < ref->desc)
1070 			p = &(*p)->rb_left;
1071 		else if (new_ref->desc > ref->desc)
1072 			p = &(*p)->rb_right;
1073 		else
1074 			BUG();
1075 	}
1076 	rb_link_node(&new_ref->rb_node_desc, parent, p);
1077 	rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1078 	if (node) {
1079 		hlist_add_head(&new_ref->node_entry, &node->refs);
1080 
1081 		binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1082 			     "%d new ref %d desc %d for node %d\n",
1083 			      proc->pid, new_ref->debug_id, new_ref->desc,
1084 			      node->debug_id);
1085 	} else {
1086 		binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1087 			     "%d new ref %d desc %d for dead node\n",
1088 			      proc->pid, new_ref->debug_id, new_ref->desc);
1089 	}
1090 	return new_ref;
1091 }
1092 
1093 static void binder_delete_ref(struct binder_ref *ref)
1094 {
1095 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1096 		     "%d delete ref %d desc %d for node %d\n",
1097 		      ref->proc->pid, ref->debug_id, ref->desc,
1098 		      ref->node->debug_id);
1099 
1100 	rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1101 	rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1102 	if (ref->strong)
1103 		binder_dec_node(ref->node, 1, 1);
1104 	hlist_del(&ref->node_entry);
1105 	binder_dec_node(ref->node, 0, 1);
1106 	if (ref->death) {
1107 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1108 			     "%d delete ref %d desc %d has death notification\n",
1109 			      ref->proc->pid, ref->debug_id, ref->desc);
1110 		list_del(&ref->death->work.entry);
1111 		kfree(ref->death);
1112 		binder_stats_deleted(BINDER_STAT_DEATH);
1113 	}
1114 	kfree(ref);
1115 	binder_stats_deleted(BINDER_STAT_REF);
1116 }
1117 
1118 static int binder_inc_ref(struct binder_ref *ref, int strong,
1119 			  struct list_head *target_list)
1120 {
1121 	int ret;
1122 
1123 	if (strong) {
1124 		if (ref->strong == 0) {
1125 			ret = binder_inc_node(ref->node, 1, 1, target_list);
1126 			if (ret)
1127 				return ret;
1128 		}
1129 		ref->strong++;
1130 	} else {
1131 		if (ref->weak == 0) {
1132 			ret = binder_inc_node(ref->node, 0, 1, target_list);
1133 			if (ret)
1134 				return ret;
1135 		}
1136 		ref->weak++;
1137 	}
1138 	return 0;
1139 }
1140 
1141 
1142 static int binder_dec_ref(struct binder_ref *ref, int strong)
1143 {
1144 	if (strong) {
1145 		if (ref->strong == 0) {
1146 			binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1147 					  ref->proc->pid, ref->debug_id,
1148 					  ref->desc, ref->strong, ref->weak);
1149 			return -EINVAL;
1150 		}
1151 		ref->strong--;
1152 		if (ref->strong == 0) {
1153 			int ret;
1154 
1155 			ret = binder_dec_node(ref->node, strong, 1);
1156 			if (ret)
1157 				return ret;
1158 		}
1159 	} else {
1160 		if (ref->weak == 0) {
1161 			binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1162 					  ref->proc->pid, ref->debug_id,
1163 					  ref->desc, ref->strong, ref->weak);
1164 			return -EINVAL;
1165 		}
1166 		ref->weak--;
1167 	}
1168 	if (ref->strong == 0 && ref->weak == 0)
1169 		binder_delete_ref(ref);
1170 	return 0;
1171 }
1172 
1173 static void binder_pop_transaction(struct binder_thread *target_thread,
1174 				   struct binder_transaction *t)
1175 {
1176 	if (target_thread) {
1177 		BUG_ON(target_thread->transaction_stack != t);
1178 		BUG_ON(target_thread->transaction_stack->from != target_thread);
1179 		target_thread->transaction_stack =
1180 			target_thread->transaction_stack->from_parent;
1181 		t->from = NULL;
1182 	}
1183 	t->need_reply = 0;
1184 	if (t->buffer)
1185 		t->buffer->transaction = NULL;
1186 	kfree(t);
1187 	binder_stats_deleted(BINDER_STAT_TRANSACTION);
1188 }
1189 
1190 static void binder_send_failed_reply(struct binder_transaction *t,
1191 				     uint32_t error_code)
1192 {
1193 	struct binder_thread *target_thread;
1194 	struct binder_transaction *next;
1195 
1196 	BUG_ON(t->flags & TF_ONE_WAY);
1197 	while (1) {
1198 		target_thread = t->from;
1199 		if (target_thread) {
1200 			if (target_thread->return_error != BR_OK &&
1201 			   target_thread->return_error2 == BR_OK) {
1202 				target_thread->return_error2 =
1203 					target_thread->return_error;
1204 				target_thread->return_error = BR_OK;
1205 			}
1206 			if (target_thread->return_error == BR_OK) {
1207 				binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1208 					     "send failed reply for transaction %d to %d:%d\n",
1209 					      t->debug_id,
1210 					      target_thread->proc->pid,
1211 					      target_thread->pid);
1212 
1213 				binder_pop_transaction(target_thread, t);
1214 				target_thread->return_error = error_code;
1215 				wake_up_interruptible(&target_thread->wait);
1216 			} else {
1217 				pr_err("reply failed, target thread, %d:%d, has error code %d already\n",
1218 					target_thread->proc->pid,
1219 					target_thread->pid,
1220 					target_thread->return_error);
1221 			}
1222 			return;
1223 		}
1224 		next = t->from_parent;
1225 
1226 		binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1227 			     "send failed reply for transaction %d, target dead\n",
1228 			     t->debug_id);
1229 
1230 		binder_pop_transaction(target_thread, t);
1231 		if (next == NULL) {
1232 			binder_debug(BINDER_DEBUG_DEAD_BINDER,
1233 				     "reply failed, no target thread at root\n");
1234 			return;
1235 		}
1236 		t = next;
1237 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1238 			     "reply failed, no target thread -- retry %d\n",
1239 			      t->debug_id);
1240 	}
1241 }
1242 
1243 static void binder_transaction_buffer_release(struct binder_proc *proc,
1244 					      struct binder_buffer *buffer,
1245 					      binder_size_t *failed_at)
1246 {
1247 	binder_size_t *offp, *off_end;
1248 	int debug_id = buffer->debug_id;
1249 
1250 	binder_debug(BINDER_DEBUG_TRANSACTION,
1251 		     "%d buffer release %d, size %zd-%zd, failed at %p\n",
1252 		     proc->pid, buffer->debug_id,
1253 		     buffer->data_size, buffer->offsets_size, failed_at);
1254 
1255 	if (buffer->target_node)
1256 		binder_dec_node(buffer->target_node, 1, 0);
1257 
1258 	offp = (binder_size_t *)(buffer->data +
1259 				 ALIGN(buffer->data_size, sizeof(void *)));
1260 	if (failed_at)
1261 		off_end = failed_at;
1262 	else
1263 		off_end = (void *)offp + buffer->offsets_size;
1264 	for (; offp < off_end; offp++) {
1265 		struct flat_binder_object *fp;
1266 
1267 		if (*offp > buffer->data_size - sizeof(*fp) ||
1268 		    buffer->data_size < sizeof(*fp) ||
1269 		    !IS_ALIGNED(*offp, sizeof(u32))) {
1270 			pr_err("transaction release %d bad offset %lld, size %zd\n",
1271 			       debug_id, (u64)*offp, buffer->data_size);
1272 			continue;
1273 		}
1274 		fp = (struct flat_binder_object *)(buffer->data + *offp);
1275 		switch (fp->type) {
1276 		case BINDER_TYPE_BINDER:
1277 		case BINDER_TYPE_WEAK_BINDER: {
1278 			struct binder_node *node = binder_get_node(proc, fp->binder);
1279 
1280 			if (node == NULL) {
1281 				pr_err("transaction release %d bad node %016llx\n",
1282 				       debug_id, (u64)fp->binder);
1283 				break;
1284 			}
1285 			binder_debug(BINDER_DEBUG_TRANSACTION,
1286 				     "        node %d u%016llx\n",
1287 				     node->debug_id, (u64)node->ptr);
1288 			binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);
1289 		} break;
1290 		case BINDER_TYPE_HANDLE:
1291 		case BINDER_TYPE_WEAK_HANDLE: {
1292 			struct binder_ref *ref;
1293 
1294 			ref = binder_get_ref(proc, fp->handle,
1295 					     fp->type == BINDER_TYPE_HANDLE);
1296 
1297 			if (ref == NULL) {
1298 				pr_err("transaction release %d bad handle %d\n",
1299 				 debug_id, fp->handle);
1300 				break;
1301 			}
1302 			binder_debug(BINDER_DEBUG_TRANSACTION,
1303 				     "        ref %d desc %d (node %d)\n",
1304 				     ref->debug_id, ref->desc, ref->node->debug_id);
1305 			binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE);
1306 		} break;
1307 
1308 		case BINDER_TYPE_FD:
1309 			binder_debug(BINDER_DEBUG_TRANSACTION,
1310 				     "        fd %d\n", fp->handle);
1311 			if (failed_at)
1312 				task_close_fd(proc, fp->handle);
1313 			break;
1314 
1315 		default:
1316 			pr_err("transaction release %d bad object type %x\n",
1317 				debug_id, fp->type);
1318 			break;
1319 		}
1320 	}
1321 }
1322 
1323 static void binder_transaction(struct binder_proc *proc,
1324 			       struct binder_thread *thread,
1325 			       struct binder_transaction_data *tr, int reply)
1326 {
1327 	struct binder_transaction *t;
1328 	struct binder_work *tcomplete;
1329 	binder_size_t *offp, *off_end;
1330 	binder_size_t off_min;
1331 	struct binder_proc *target_proc;
1332 	struct binder_thread *target_thread = NULL;
1333 	struct binder_node *target_node = NULL;
1334 	struct list_head *target_list;
1335 	wait_queue_head_t *target_wait;
1336 	struct binder_transaction *in_reply_to = NULL;
1337 	struct binder_transaction_log_entry *e;
1338 	uint32_t return_error;
1339 
1340 	e = binder_transaction_log_add(&binder_transaction_log);
1341 	e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
1342 	e->from_proc = proc->pid;
1343 	e->from_thread = thread->pid;
1344 	e->target_handle = tr->target.handle;
1345 	e->data_size = tr->data_size;
1346 	e->offsets_size = tr->offsets_size;
1347 
1348 	if (reply) {
1349 		in_reply_to = thread->transaction_stack;
1350 		if (in_reply_to == NULL) {
1351 			binder_user_error("%d:%d got reply transaction with no transaction stack\n",
1352 					  proc->pid, thread->pid);
1353 			return_error = BR_FAILED_REPLY;
1354 			goto err_empty_call_stack;
1355 		}
1356 		binder_set_nice(in_reply_to->saved_priority);
1357 		if (in_reply_to->to_thread != thread) {
1358 			binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
1359 				proc->pid, thread->pid, in_reply_to->debug_id,
1360 				in_reply_to->to_proc ?
1361 				in_reply_to->to_proc->pid : 0,
1362 				in_reply_to->to_thread ?
1363 				in_reply_to->to_thread->pid : 0);
1364 			return_error = BR_FAILED_REPLY;
1365 			in_reply_to = NULL;
1366 			goto err_bad_call_stack;
1367 		}
1368 		thread->transaction_stack = in_reply_to->to_parent;
1369 		target_thread = in_reply_to->from;
1370 		if (target_thread == NULL) {
1371 			return_error = BR_DEAD_REPLY;
1372 			goto err_dead_binder;
1373 		}
1374 		if (target_thread->transaction_stack != in_reply_to) {
1375 			binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
1376 				proc->pid, thread->pid,
1377 				target_thread->transaction_stack ?
1378 				target_thread->transaction_stack->debug_id : 0,
1379 				in_reply_to->debug_id);
1380 			return_error = BR_FAILED_REPLY;
1381 			in_reply_to = NULL;
1382 			target_thread = NULL;
1383 			goto err_dead_binder;
1384 		}
1385 		target_proc = target_thread->proc;
1386 	} else {
1387 		if (tr->target.handle) {
1388 			struct binder_ref *ref;
1389 
1390 			ref = binder_get_ref(proc, tr->target.handle, true);
1391 			if (ref == NULL) {
1392 				binder_user_error("%d:%d got transaction to invalid handle\n",
1393 					proc->pid, thread->pid);
1394 				return_error = BR_FAILED_REPLY;
1395 				goto err_invalid_target_handle;
1396 			}
1397 			target_node = ref->node;
1398 		} else {
1399 			target_node = binder_context_mgr_node;
1400 			if (target_node == NULL) {
1401 				return_error = BR_DEAD_REPLY;
1402 				goto err_no_context_mgr_node;
1403 			}
1404 		}
1405 		e->to_node = target_node->debug_id;
1406 		target_proc = target_node->proc;
1407 		if (target_proc == NULL) {
1408 			return_error = BR_DEAD_REPLY;
1409 			goto err_dead_binder;
1410 		}
1411 		if (security_binder_transaction(proc->tsk,
1412 						target_proc->tsk) < 0) {
1413 			return_error = BR_FAILED_REPLY;
1414 			goto err_invalid_target_handle;
1415 		}
1416 		if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
1417 			struct binder_transaction *tmp;
1418 
1419 			tmp = thread->transaction_stack;
1420 			if (tmp->to_thread != thread) {
1421 				binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
1422 					proc->pid, thread->pid, tmp->debug_id,
1423 					tmp->to_proc ? tmp->to_proc->pid : 0,
1424 					tmp->to_thread ?
1425 					tmp->to_thread->pid : 0);
1426 				return_error = BR_FAILED_REPLY;
1427 				goto err_bad_call_stack;
1428 			}
1429 			while (tmp) {
1430 				if (tmp->from && tmp->from->proc == target_proc)
1431 					target_thread = tmp->from;
1432 				tmp = tmp->from_parent;
1433 			}
1434 		}
1435 	}
1436 	if (target_thread) {
1437 		e->to_thread = target_thread->pid;
1438 		target_list = &target_thread->todo;
1439 		target_wait = &target_thread->wait;
1440 	} else {
1441 		target_list = &target_proc->todo;
1442 		target_wait = &target_proc->wait;
1443 	}
1444 	e->to_proc = target_proc->pid;
1445 
1446 	/* TODO: reuse incoming transaction for reply */
1447 	t = kzalloc(sizeof(*t), GFP_KERNEL);
1448 	if (t == NULL) {
1449 		return_error = BR_FAILED_REPLY;
1450 		goto err_alloc_t_failed;
1451 	}
1452 	binder_stats_created(BINDER_STAT_TRANSACTION);
1453 
1454 	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
1455 	if (tcomplete == NULL) {
1456 		return_error = BR_FAILED_REPLY;
1457 		goto err_alloc_tcomplete_failed;
1458 	}
1459 	binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
1460 
1461 	t->debug_id = ++binder_last_id;
1462 	e->debug_id = t->debug_id;
1463 
1464 	if (reply)
1465 		binder_debug(BINDER_DEBUG_TRANSACTION,
1466 			     "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld\n",
1467 			     proc->pid, thread->pid, t->debug_id,
1468 			     target_proc->pid, target_thread->pid,
1469 			     (u64)tr->data.ptr.buffer,
1470 			     (u64)tr->data.ptr.offsets,
1471 			     (u64)tr->data_size, (u64)tr->offsets_size);
1472 	else
1473 		binder_debug(BINDER_DEBUG_TRANSACTION,
1474 			     "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld\n",
1475 			     proc->pid, thread->pid, t->debug_id,
1476 			     target_proc->pid, target_node->debug_id,
1477 			     (u64)tr->data.ptr.buffer,
1478 			     (u64)tr->data.ptr.offsets,
1479 			     (u64)tr->data_size, (u64)tr->offsets_size);
1480 
1481 	if (!reply && !(tr->flags & TF_ONE_WAY))
1482 		t->from = thread;
1483 	else
1484 		t->from = NULL;
1485 	t->sender_euid = task_euid(proc->tsk);
1486 	t->to_proc = target_proc;
1487 	t->to_thread = target_thread;
1488 	t->code = tr->code;
1489 	t->flags = tr->flags;
1490 	t->priority = task_nice(current);
1491 
1492 	trace_binder_transaction(reply, t, target_node);
1493 
1494 	t->buffer = binder_alloc_buf(target_proc, tr->data_size,
1495 		tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
1496 	if (t->buffer == NULL) {
1497 		return_error = BR_FAILED_REPLY;
1498 		goto err_binder_alloc_buf_failed;
1499 	}
1500 	t->buffer->allow_user_free = 0;
1501 	t->buffer->debug_id = t->debug_id;
1502 	t->buffer->transaction = t;
1503 	t->buffer->target_node = target_node;
1504 	trace_binder_transaction_alloc_buf(t->buffer);
1505 	if (target_node)
1506 		binder_inc_node(target_node, 1, 0, NULL);
1507 
1508 	offp = (binder_size_t *)(t->buffer->data +
1509 				 ALIGN(tr->data_size, sizeof(void *)));
1510 
1511 	if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
1512 			   tr->data.ptr.buffer, tr->data_size)) {
1513 		binder_user_error("%d:%d got transaction with invalid data ptr\n",
1514 				proc->pid, thread->pid);
1515 		return_error = BR_FAILED_REPLY;
1516 		goto err_copy_data_failed;
1517 	}
1518 	if (copy_from_user(offp, (const void __user *)(uintptr_t)
1519 			   tr->data.ptr.offsets, tr->offsets_size)) {
1520 		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
1521 				proc->pid, thread->pid);
1522 		return_error = BR_FAILED_REPLY;
1523 		goto err_copy_data_failed;
1524 	}
1525 	if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
1526 		binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
1527 				proc->pid, thread->pid, (u64)tr->offsets_size);
1528 		return_error = BR_FAILED_REPLY;
1529 		goto err_bad_offset;
1530 	}
1531 	off_end = (void *)offp + tr->offsets_size;
1532 	off_min = 0;
1533 	for (; offp < off_end; offp++) {
1534 		struct flat_binder_object *fp;
1535 
1536 		if (*offp > t->buffer->data_size - sizeof(*fp) ||
1537 		    *offp < off_min ||
1538 		    t->buffer->data_size < sizeof(*fp) ||
1539 		    !IS_ALIGNED(*offp, sizeof(u32))) {
1540 			binder_user_error("%d:%d got transaction with invalid offset, %lld (min %lld, max %lld)\n",
1541 					  proc->pid, thread->pid, (u64)*offp,
1542 					  (u64)off_min,
1543 					  (u64)(t->buffer->data_size -
1544 					  sizeof(*fp)));
1545 			return_error = BR_FAILED_REPLY;
1546 			goto err_bad_offset;
1547 		}
1548 		fp = (struct flat_binder_object *)(t->buffer->data + *offp);
1549 		off_min = *offp + sizeof(struct flat_binder_object);
1550 		switch (fp->type) {
1551 		case BINDER_TYPE_BINDER:
1552 		case BINDER_TYPE_WEAK_BINDER: {
1553 			struct binder_ref *ref;
1554 			struct binder_node *node = binder_get_node(proc, fp->binder);
1555 
1556 			if (node == NULL) {
1557 				node = binder_new_node(proc, fp->binder, fp->cookie);
1558 				if (node == NULL) {
1559 					return_error = BR_FAILED_REPLY;
1560 					goto err_binder_new_node_failed;
1561 				}
1562 				node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1563 				node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1564 			}
1565 			if (fp->cookie != node->cookie) {
1566 				binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
1567 					proc->pid, thread->pid,
1568 					(u64)fp->binder, node->debug_id,
1569 					(u64)fp->cookie, (u64)node->cookie);
1570 				return_error = BR_FAILED_REPLY;
1571 				goto err_binder_get_ref_for_node_failed;
1572 			}
1573 			if (security_binder_transfer_binder(proc->tsk,
1574 							    target_proc->tsk)) {
1575 				return_error = BR_FAILED_REPLY;
1576 				goto err_binder_get_ref_for_node_failed;
1577 			}
1578 			ref = binder_get_ref_for_node(target_proc, node);
1579 			if (ref == NULL) {
1580 				return_error = BR_FAILED_REPLY;
1581 				goto err_binder_get_ref_for_node_failed;
1582 			}
1583 			if (fp->type == BINDER_TYPE_BINDER)
1584 				fp->type = BINDER_TYPE_HANDLE;
1585 			else
1586 				fp->type = BINDER_TYPE_WEAK_HANDLE;
1587 			fp->binder = 0;
1588 			fp->handle = ref->desc;
1589 			fp->cookie = 0;
1590 			binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
1591 				       &thread->todo);
1592 
1593 			trace_binder_transaction_node_to_ref(t, node, ref);
1594 			binder_debug(BINDER_DEBUG_TRANSACTION,
1595 				     "        node %d u%016llx -> ref %d desc %d\n",
1596 				     node->debug_id, (u64)node->ptr,
1597 				     ref->debug_id, ref->desc);
1598 		} break;
1599 		case BINDER_TYPE_HANDLE:
1600 		case BINDER_TYPE_WEAK_HANDLE: {
1601 			struct binder_ref *ref;
1602 
1603 			ref = binder_get_ref(proc, fp->handle,
1604 					     fp->type == BINDER_TYPE_HANDLE);
1605 
1606 			if (ref == NULL) {
1607 				binder_user_error("%d:%d got transaction with invalid handle, %d\n",
1608 						proc->pid,
1609 						thread->pid, fp->handle);
1610 				return_error = BR_FAILED_REPLY;
1611 				goto err_binder_get_ref_failed;
1612 			}
1613 			if (security_binder_transfer_binder(proc->tsk,
1614 							    target_proc->tsk)) {
1615 				return_error = BR_FAILED_REPLY;
1616 				goto err_binder_get_ref_failed;
1617 			}
1618 			if (ref->node->proc == target_proc) {
1619 				if (fp->type == BINDER_TYPE_HANDLE)
1620 					fp->type = BINDER_TYPE_BINDER;
1621 				else
1622 					fp->type = BINDER_TYPE_WEAK_BINDER;
1623 				fp->binder = ref->node->ptr;
1624 				fp->cookie = ref->node->cookie;
1625 				binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
1626 				trace_binder_transaction_ref_to_node(t, ref);
1627 				binder_debug(BINDER_DEBUG_TRANSACTION,
1628 					     "        ref %d desc %d -> node %d u%016llx\n",
1629 					     ref->debug_id, ref->desc, ref->node->debug_id,
1630 					     (u64)ref->node->ptr);
1631 			} else {
1632 				struct binder_ref *new_ref;
1633 
1634 				new_ref = binder_get_ref_for_node(target_proc, ref->node);
1635 				if (new_ref == NULL) {
1636 					return_error = BR_FAILED_REPLY;
1637 					goto err_binder_get_ref_for_node_failed;
1638 				}
1639 				fp->binder = 0;
1640 				fp->handle = new_ref->desc;
1641 				fp->cookie = 0;
1642 				binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
1643 				trace_binder_transaction_ref_to_ref(t, ref,
1644 								    new_ref);
1645 				binder_debug(BINDER_DEBUG_TRANSACTION,
1646 					     "        ref %d desc %d -> ref %d desc %d (node %d)\n",
1647 					     ref->debug_id, ref->desc, new_ref->debug_id,
1648 					     new_ref->desc, ref->node->debug_id);
1649 			}
1650 		} break;
1651 
1652 		case BINDER_TYPE_FD: {
1653 			int target_fd;
1654 			struct file *file;
1655 
1656 			if (reply) {
1657 				if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
1658 					binder_user_error("%d:%d got reply with fd, %d, but target does not allow fds\n",
1659 						proc->pid, thread->pid, fp->handle);
1660 					return_error = BR_FAILED_REPLY;
1661 					goto err_fd_not_allowed;
1662 				}
1663 			} else if (!target_node->accept_fds) {
1664 				binder_user_error("%d:%d got transaction with fd, %d, but target does not allow fds\n",
1665 					proc->pid, thread->pid, fp->handle);
1666 				return_error = BR_FAILED_REPLY;
1667 				goto err_fd_not_allowed;
1668 			}
1669 
1670 			file = fget(fp->handle);
1671 			if (file == NULL) {
1672 				binder_user_error("%d:%d got transaction with invalid fd, %d\n",
1673 					proc->pid, thread->pid, fp->handle);
1674 				return_error = BR_FAILED_REPLY;
1675 				goto err_fget_failed;
1676 			}
1677 			if (security_binder_transfer_file(proc->tsk,
1678 							  target_proc->tsk,
1679 							  file) < 0) {
1680 				fput(file);
1681 				return_error = BR_FAILED_REPLY;
1682 				goto err_get_unused_fd_failed;
1683 			}
1684 			target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
1685 			if (target_fd < 0) {
1686 				fput(file);
1687 				return_error = BR_FAILED_REPLY;
1688 				goto err_get_unused_fd_failed;
1689 			}
1690 			task_fd_install(target_proc, target_fd, file);
1691 			trace_binder_transaction_fd(t, fp->handle, target_fd);
1692 			binder_debug(BINDER_DEBUG_TRANSACTION,
1693 				     "        fd %d -> %d\n", fp->handle, target_fd);
1694 			/* TODO: fput? */
1695 			fp->binder = 0;
1696 			fp->handle = target_fd;
1697 		} break;
1698 
1699 		default:
1700 			binder_user_error("%d:%d got transaction with invalid object type, %x\n",
1701 				proc->pid, thread->pid, fp->type);
1702 			return_error = BR_FAILED_REPLY;
1703 			goto err_bad_object_type;
1704 		}
1705 	}
1706 	if (reply) {
1707 		BUG_ON(t->buffer->async_transaction != 0);
1708 		binder_pop_transaction(target_thread, in_reply_to);
1709 	} else if (!(t->flags & TF_ONE_WAY)) {
1710 		BUG_ON(t->buffer->async_transaction != 0);
1711 		t->need_reply = 1;
1712 		t->from_parent = thread->transaction_stack;
1713 		thread->transaction_stack = t;
1714 	} else {
1715 		BUG_ON(target_node == NULL);
1716 		BUG_ON(t->buffer->async_transaction != 1);
1717 		if (target_node->has_async_transaction) {
1718 			target_list = &target_node->async_todo;
1719 			target_wait = NULL;
1720 		} else
1721 			target_node->has_async_transaction = 1;
1722 	}
1723 	t->work.type = BINDER_WORK_TRANSACTION;
1724 	list_add_tail(&t->work.entry, target_list);
1725 	tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
1726 	list_add_tail(&tcomplete->entry, &thread->todo);
1727 	if (target_wait)
1728 		wake_up_interruptible(target_wait);
1729 	return;
1730 
1731 err_get_unused_fd_failed:
1732 err_fget_failed:
1733 err_fd_not_allowed:
1734 err_binder_get_ref_for_node_failed:
1735 err_binder_get_ref_failed:
1736 err_binder_new_node_failed:
1737 err_bad_object_type:
1738 err_bad_offset:
1739 err_copy_data_failed:
1740 	trace_binder_transaction_failed_buffer_release(t->buffer);
1741 	binder_transaction_buffer_release(target_proc, t->buffer, offp);
1742 	t->buffer->transaction = NULL;
1743 	binder_free_buf(target_proc, t->buffer);
1744 err_binder_alloc_buf_failed:
1745 	kfree(tcomplete);
1746 	binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
1747 err_alloc_tcomplete_failed:
1748 	kfree(t);
1749 	binder_stats_deleted(BINDER_STAT_TRANSACTION);
1750 err_alloc_t_failed:
1751 err_bad_call_stack:
1752 err_empty_call_stack:
1753 err_dead_binder:
1754 err_invalid_target_handle:
1755 err_no_context_mgr_node:
1756 	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1757 		     "%d:%d transaction failed %d, size %lld-%lld\n",
1758 		     proc->pid, thread->pid, return_error,
1759 		     (u64)tr->data_size, (u64)tr->offsets_size);
1760 
1761 	{
1762 		struct binder_transaction_log_entry *fe;
1763 
1764 		fe = binder_transaction_log_add(&binder_transaction_log_failed);
1765 		*fe = *e;
1766 	}
1767 
1768 	BUG_ON(thread->return_error != BR_OK);
1769 	if (in_reply_to) {
1770 		thread->return_error = BR_TRANSACTION_COMPLETE;
1771 		binder_send_failed_reply(in_reply_to, return_error);
1772 	} else
1773 		thread->return_error = return_error;
1774 }
1775 
1776 static int binder_thread_write(struct binder_proc *proc,
1777 			struct binder_thread *thread,
1778 			binder_uintptr_t binder_buffer, size_t size,
1779 			binder_size_t *consumed)
1780 {
1781 	uint32_t cmd;
1782 	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
1783 	void __user *ptr = buffer + *consumed;
1784 	void __user *end = buffer + size;
1785 
1786 	while (ptr < end && thread->return_error == BR_OK) {
1787 		if (get_user(cmd, (uint32_t __user *)ptr))
1788 			return -EFAULT;
1789 		ptr += sizeof(uint32_t);
1790 		trace_binder_command(cmd);
1791 		if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
1792 			binder_stats.bc[_IOC_NR(cmd)]++;
1793 			proc->stats.bc[_IOC_NR(cmd)]++;
1794 			thread->stats.bc[_IOC_NR(cmd)]++;
1795 		}
1796 		switch (cmd) {
1797 		case BC_INCREFS:
1798 		case BC_ACQUIRE:
1799 		case BC_RELEASE:
1800 		case BC_DECREFS: {
1801 			uint32_t target;
1802 			struct binder_ref *ref;
1803 			const char *debug_string;
1804 
1805 			if (get_user(target, (uint32_t __user *)ptr))
1806 				return -EFAULT;
1807 			ptr += sizeof(uint32_t);
1808 			if (target == 0 && binder_context_mgr_node &&
1809 			    (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
1810 				ref = binder_get_ref_for_node(proc,
1811 					       binder_context_mgr_node);
1812 				if (ref->desc != target) {
1813 					binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
1814 						proc->pid, thread->pid,
1815 						ref->desc);
1816 				}
1817 			} else
1818 				ref = binder_get_ref(proc, target,
1819 						     cmd == BC_ACQUIRE ||
1820 						     cmd == BC_RELEASE);
1821 			if (ref == NULL) {
1822 				binder_user_error("%d:%d refcount change on invalid ref %d\n",
1823 					proc->pid, thread->pid, target);
1824 				break;
1825 			}
1826 			switch (cmd) {
1827 			case BC_INCREFS:
1828 				debug_string = "IncRefs";
1829 				binder_inc_ref(ref, 0, NULL);
1830 				break;
1831 			case BC_ACQUIRE:
1832 				debug_string = "Acquire";
1833 				binder_inc_ref(ref, 1, NULL);
1834 				break;
1835 			case BC_RELEASE:
1836 				debug_string = "Release";
1837 				binder_dec_ref(ref, 1);
1838 				break;
1839 			case BC_DECREFS:
1840 			default:
1841 				debug_string = "DecRefs";
1842 				binder_dec_ref(ref, 0);
1843 				break;
1844 			}
1845 			binder_debug(BINDER_DEBUG_USER_REFS,
1846 				     "%d:%d %s ref %d desc %d s %d w %d for node %d\n",
1847 				     proc->pid, thread->pid, debug_string, ref->debug_id,
1848 				     ref->desc, ref->strong, ref->weak, ref->node->debug_id);
1849 			break;
1850 		}
1851 		case BC_INCREFS_DONE:
1852 		case BC_ACQUIRE_DONE: {
1853 			binder_uintptr_t node_ptr;
1854 			binder_uintptr_t cookie;
1855 			struct binder_node *node;
1856 
1857 			if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
1858 				return -EFAULT;
1859 			ptr += sizeof(binder_uintptr_t);
1860 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
1861 				return -EFAULT;
1862 			ptr += sizeof(binder_uintptr_t);
1863 			node = binder_get_node(proc, node_ptr);
1864 			if (node == NULL) {
1865 				binder_user_error("%d:%d %s u%016llx no match\n",
1866 					proc->pid, thread->pid,
1867 					cmd == BC_INCREFS_DONE ?
1868 					"BC_INCREFS_DONE" :
1869 					"BC_ACQUIRE_DONE",
1870 					(u64)node_ptr);
1871 				break;
1872 			}
1873 			if (cookie != node->cookie) {
1874 				binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
1875 					proc->pid, thread->pid,
1876 					cmd == BC_INCREFS_DONE ?
1877 					"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
1878 					(u64)node_ptr, node->debug_id,
1879 					(u64)cookie, (u64)node->cookie);
1880 				break;
1881 			}
1882 			if (cmd == BC_ACQUIRE_DONE) {
1883 				if (node->pending_strong_ref == 0) {
1884 					binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
1885 						proc->pid, thread->pid,
1886 						node->debug_id);
1887 					break;
1888 				}
1889 				node->pending_strong_ref = 0;
1890 			} else {
1891 				if (node->pending_weak_ref == 0) {
1892 					binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
1893 						proc->pid, thread->pid,
1894 						node->debug_id);
1895 					break;
1896 				}
1897 				node->pending_weak_ref = 0;
1898 			}
1899 			binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
1900 			binder_debug(BINDER_DEBUG_USER_REFS,
1901 				     "%d:%d %s node %d ls %d lw %d\n",
1902 				     proc->pid, thread->pid,
1903 				     cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
1904 				     node->debug_id, node->local_strong_refs, node->local_weak_refs);
1905 			break;
1906 		}
1907 		case BC_ATTEMPT_ACQUIRE:
1908 			pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
1909 			return -EINVAL;
1910 		case BC_ACQUIRE_RESULT:
1911 			pr_err("BC_ACQUIRE_RESULT not supported\n");
1912 			return -EINVAL;
1913 
1914 		case BC_FREE_BUFFER: {
1915 			binder_uintptr_t data_ptr;
1916 			struct binder_buffer *buffer;
1917 
1918 			if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
1919 				return -EFAULT;
1920 			ptr += sizeof(binder_uintptr_t);
1921 
1922 			buffer = binder_buffer_lookup(proc, data_ptr);
1923 			if (buffer == NULL) {
1924 				binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
1925 					proc->pid, thread->pid, (u64)data_ptr);
1926 				break;
1927 			}
1928 			if (!buffer->allow_user_free) {
1929 				binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
1930 					proc->pid, thread->pid, (u64)data_ptr);
1931 				break;
1932 			}
1933 			binder_debug(BINDER_DEBUG_FREE_BUFFER,
1934 				     "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
1935 				     proc->pid, thread->pid, (u64)data_ptr,
1936 				     buffer->debug_id,
1937 				     buffer->transaction ? "active" : "finished");
1938 
1939 			if (buffer->transaction) {
1940 				buffer->transaction->buffer = NULL;
1941 				buffer->transaction = NULL;
1942 			}
1943 			if (buffer->async_transaction && buffer->target_node) {
1944 				BUG_ON(!buffer->target_node->has_async_transaction);
1945 				if (list_empty(&buffer->target_node->async_todo))
1946 					buffer->target_node->has_async_transaction = 0;
1947 				else
1948 					list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
1949 			}
1950 			trace_binder_transaction_buffer_release(buffer);
1951 			binder_transaction_buffer_release(proc, buffer, NULL);
1952 			binder_free_buf(proc, buffer);
1953 			break;
1954 		}
1955 
1956 		case BC_TRANSACTION:
1957 		case BC_REPLY: {
1958 			struct binder_transaction_data tr;
1959 
1960 			if (copy_from_user(&tr, ptr, sizeof(tr)))
1961 				return -EFAULT;
1962 			ptr += sizeof(tr);
1963 			binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
1964 			break;
1965 		}
1966 
1967 		case BC_REGISTER_LOOPER:
1968 			binder_debug(BINDER_DEBUG_THREADS,
1969 				     "%d:%d BC_REGISTER_LOOPER\n",
1970 				     proc->pid, thread->pid);
1971 			if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
1972 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
1973 				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
1974 					proc->pid, thread->pid);
1975 			} else if (proc->requested_threads == 0) {
1976 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
1977 				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
1978 					proc->pid, thread->pid);
1979 			} else {
1980 				proc->requested_threads--;
1981 				proc->requested_threads_started++;
1982 			}
1983 			thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
1984 			break;
1985 		case BC_ENTER_LOOPER:
1986 			binder_debug(BINDER_DEBUG_THREADS,
1987 				     "%d:%d BC_ENTER_LOOPER\n",
1988 				     proc->pid, thread->pid);
1989 			if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
1990 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
1991 				binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
1992 					proc->pid, thread->pid);
1993 			}
1994 			thread->looper |= BINDER_LOOPER_STATE_ENTERED;
1995 			break;
1996 		case BC_EXIT_LOOPER:
1997 			binder_debug(BINDER_DEBUG_THREADS,
1998 				     "%d:%d BC_EXIT_LOOPER\n",
1999 				     proc->pid, thread->pid);
2000 			thread->looper |= BINDER_LOOPER_STATE_EXITED;
2001 			break;
2002 
2003 		case BC_REQUEST_DEATH_NOTIFICATION:
2004 		case BC_CLEAR_DEATH_NOTIFICATION: {
2005 			uint32_t target;
2006 			binder_uintptr_t cookie;
2007 			struct binder_ref *ref;
2008 			struct binder_ref_death *death;
2009 
2010 			if (get_user(target, (uint32_t __user *)ptr))
2011 				return -EFAULT;
2012 			ptr += sizeof(uint32_t);
2013 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
2014 				return -EFAULT;
2015 			ptr += sizeof(binder_uintptr_t);
2016 			ref = binder_get_ref(proc, target, false);
2017 			if (ref == NULL) {
2018 				binder_user_error("%d:%d %s invalid ref %d\n",
2019 					proc->pid, thread->pid,
2020 					cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2021 					"BC_REQUEST_DEATH_NOTIFICATION" :
2022 					"BC_CLEAR_DEATH_NOTIFICATION",
2023 					target);
2024 				break;
2025 			}
2026 
2027 			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
2028 				     "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
2029 				     proc->pid, thread->pid,
2030 				     cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2031 				     "BC_REQUEST_DEATH_NOTIFICATION" :
2032 				     "BC_CLEAR_DEATH_NOTIFICATION",
2033 				     (u64)cookie, ref->debug_id, ref->desc,
2034 				     ref->strong, ref->weak, ref->node->debug_id);
2035 
2036 			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
2037 				if (ref->death) {
2038 					binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
2039 						proc->pid, thread->pid);
2040 					break;
2041 				}
2042 				death = kzalloc(sizeof(*death), GFP_KERNEL);
2043 				if (death == NULL) {
2044 					thread->return_error = BR_ERROR;
2045 					binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2046 						     "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
2047 						     proc->pid, thread->pid);
2048 					break;
2049 				}
2050 				binder_stats_created(BINDER_STAT_DEATH);
2051 				INIT_LIST_HEAD(&death->work.entry);
2052 				death->cookie = cookie;
2053 				ref->death = death;
2054 				if (ref->node->proc == NULL) {
2055 					ref->death->work.type = BINDER_WORK_DEAD_BINDER;
2056 					if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2057 						list_add_tail(&ref->death->work.entry, &thread->todo);
2058 					} else {
2059 						list_add_tail(&ref->death->work.entry, &proc->todo);
2060 						wake_up_interruptible(&proc->wait);
2061 					}
2062 				}
2063 			} else {
2064 				if (ref->death == NULL) {
2065 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
2066 						proc->pid, thread->pid);
2067 					break;
2068 				}
2069 				death = ref->death;
2070 				if (death->cookie != cookie) {
2071 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
2072 						proc->pid, thread->pid,
2073 						(u64)death->cookie,
2074 						(u64)cookie);
2075 					break;
2076 				}
2077 				ref->death = NULL;
2078 				if (list_empty(&death->work.entry)) {
2079 					death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2080 					if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2081 						list_add_tail(&death->work.entry, &thread->todo);
2082 					} else {
2083 						list_add_tail(&death->work.entry, &proc->todo);
2084 						wake_up_interruptible(&proc->wait);
2085 					}
2086 				} else {
2087 					BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
2088 					death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
2089 				}
2090 			}
2091 		} break;
2092 		case BC_DEAD_BINDER_DONE: {
2093 			struct binder_work *w;
2094 			binder_uintptr_t cookie;
2095 			struct binder_ref_death *death = NULL;
2096 
2097 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
2098 				return -EFAULT;
2099 
2100 			ptr += sizeof(cookie);
2101 			list_for_each_entry(w, &proc->delivered_death, entry) {
2102 				struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
2103 
2104 				if (tmp_death->cookie == cookie) {
2105 					death = tmp_death;
2106 					break;
2107 				}
2108 			}
2109 			binder_debug(BINDER_DEBUG_DEAD_BINDER,
2110 				     "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
2111 				     proc->pid, thread->pid, (u64)cookie,
2112 				     death);
2113 			if (death == NULL) {
2114 				binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
2115 					proc->pid, thread->pid, (u64)cookie);
2116 				break;
2117 			}
2118 
2119 			list_del_init(&death->work.entry);
2120 			if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
2121 				death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2122 				if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2123 					list_add_tail(&death->work.entry, &thread->todo);
2124 				} else {
2125 					list_add_tail(&death->work.entry, &proc->todo);
2126 					wake_up_interruptible(&proc->wait);
2127 				}
2128 			}
2129 		} break;
2130 
2131 		default:
2132 			pr_err("%d:%d unknown command %d\n",
2133 			       proc->pid, thread->pid, cmd);
2134 			return -EINVAL;
2135 		}
2136 		*consumed = ptr - buffer;
2137 	}
2138 	return 0;
2139 }
2140 
2141 static void binder_stat_br(struct binder_proc *proc,
2142 			   struct binder_thread *thread, uint32_t cmd)
2143 {
2144 	trace_binder_return(cmd);
2145 	if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
2146 		binder_stats.br[_IOC_NR(cmd)]++;
2147 		proc->stats.br[_IOC_NR(cmd)]++;
2148 		thread->stats.br[_IOC_NR(cmd)]++;
2149 	}
2150 }
2151 
2152 static int binder_has_proc_work(struct binder_proc *proc,
2153 				struct binder_thread *thread)
2154 {
2155 	return !list_empty(&proc->todo) ||
2156 		(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2157 }
2158 
2159 static int binder_has_thread_work(struct binder_thread *thread)
2160 {
2161 	return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
2162 		(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2163 }
2164 
2165 static int binder_thread_read(struct binder_proc *proc,
2166 			      struct binder_thread *thread,
2167 			      binder_uintptr_t binder_buffer, size_t size,
2168 			      binder_size_t *consumed, int non_block)
2169 {
2170 	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
2171 	void __user *ptr = buffer + *consumed;
2172 	void __user *end = buffer + size;
2173 
2174 	int ret = 0;
2175 	int wait_for_proc_work;
2176 
2177 	if (*consumed == 0) {
2178 		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
2179 			return -EFAULT;
2180 		ptr += sizeof(uint32_t);
2181 	}
2182 
2183 retry:
2184 	wait_for_proc_work = thread->transaction_stack == NULL &&
2185 				list_empty(&thread->todo);
2186 
2187 	if (thread->return_error != BR_OK && ptr < end) {
2188 		if (thread->return_error2 != BR_OK) {
2189 			if (put_user(thread->return_error2, (uint32_t __user *)ptr))
2190 				return -EFAULT;
2191 			ptr += sizeof(uint32_t);
2192 			binder_stat_br(proc, thread, thread->return_error2);
2193 			if (ptr == end)
2194 				goto done;
2195 			thread->return_error2 = BR_OK;
2196 		}
2197 		if (put_user(thread->return_error, (uint32_t __user *)ptr))
2198 			return -EFAULT;
2199 		ptr += sizeof(uint32_t);
2200 		binder_stat_br(proc, thread, thread->return_error);
2201 		thread->return_error = BR_OK;
2202 		goto done;
2203 	}
2204 
2205 
2206 	thread->looper |= BINDER_LOOPER_STATE_WAITING;
2207 	if (wait_for_proc_work)
2208 		proc->ready_threads++;
2209 
2210 	binder_unlock(__func__);
2211 
2212 	trace_binder_wait_for_work(wait_for_proc_work,
2213 				   !!thread->transaction_stack,
2214 				   !list_empty(&thread->todo));
2215 	if (wait_for_proc_work) {
2216 		if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2217 					BINDER_LOOPER_STATE_ENTERED))) {
2218 			binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
2219 				proc->pid, thread->pid, thread->looper);
2220 			wait_event_interruptible(binder_user_error_wait,
2221 						 binder_stop_on_user_error < 2);
2222 		}
2223 		binder_set_nice(proc->default_priority);
2224 		if (non_block) {
2225 			if (!binder_has_proc_work(proc, thread))
2226 				ret = -EAGAIN;
2227 		} else
2228 			ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
2229 	} else {
2230 		if (non_block) {
2231 			if (!binder_has_thread_work(thread))
2232 				ret = -EAGAIN;
2233 		} else
2234 			ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
2235 	}
2236 
2237 	binder_lock(__func__);
2238 
2239 	if (wait_for_proc_work)
2240 		proc->ready_threads--;
2241 	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
2242 
2243 	if (ret)
2244 		return ret;
2245 
2246 	while (1) {
2247 		uint32_t cmd;
2248 		struct binder_transaction_data tr;
2249 		struct binder_work *w;
2250 		struct binder_transaction *t = NULL;
2251 
2252 		if (!list_empty(&thread->todo)) {
2253 			w = list_first_entry(&thread->todo, struct binder_work,
2254 					     entry);
2255 		} else if (!list_empty(&proc->todo) && wait_for_proc_work) {
2256 			w = list_first_entry(&proc->todo, struct binder_work,
2257 					     entry);
2258 		} else {
2259 			/* no data added */
2260 			if (ptr - buffer == 4 &&
2261 			    !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
2262 				goto retry;
2263 			break;
2264 		}
2265 
2266 		if (end - ptr < sizeof(tr) + 4)
2267 			break;
2268 
2269 		switch (w->type) {
2270 		case BINDER_WORK_TRANSACTION: {
2271 			t = container_of(w, struct binder_transaction, work);
2272 		} break;
2273 		case BINDER_WORK_TRANSACTION_COMPLETE: {
2274 			cmd = BR_TRANSACTION_COMPLETE;
2275 			if (put_user(cmd, (uint32_t __user *)ptr))
2276 				return -EFAULT;
2277 			ptr += sizeof(uint32_t);
2278 
2279 			binder_stat_br(proc, thread, cmd);
2280 			binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
2281 				     "%d:%d BR_TRANSACTION_COMPLETE\n",
2282 				     proc->pid, thread->pid);
2283 
2284 			list_del(&w->entry);
2285 			kfree(w);
2286 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2287 		} break;
2288 		case BINDER_WORK_NODE: {
2289 			struct binder_node *node = container_of(w, struct binder_node, work);
2290 			uint32_t cmd = BR_NOOP;
2291 			const char *cmd_name;
2292 			int strong = node->internal_strong_refs || node->local_strong_refs;
2293 			int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
2294 
2295 			if (weak && !node->has_weak_ref) {
2296 				cmd = BR_INCREFS;
2297 				cmd_name = "BR_INCREFS";
2298 				node->has_weak_ref = 1;
2299 				node->pending_weak_ref = 1;
2300 				node->local_weak_refs++;
2301 			} else if (strong && !node->has_strong_ref) {
2302 				cmd = BR_ACQUIRE;
2303 				cmd_name = "BR_ACQUIRE";
2304 				node->has_strong_ref = 1;
2305 				node->pending_strong_ref = 1;
2306 				node->local_strong_refs++;
2307 			} else if (!strong && node->has_strong_ref) {
2308 				cmd = BR_RELEASE;
2309 				cmd_name = "BR_RELEASE";
2310 				node->has_strong_ref = 0;
2311 			} else if (!weak && node->has_weak_ref) {
2312 				cmd = BR_DECREFS;
2313 				cmd_name = "BR_DECREFS";
2314 				node->has_weak_ref = 0;
2315 			}
2316 			if (cmd != BR_NOOP) {
2317 				if (put_user(cmd, (uint32_t __user *)ptr))
2318 					return -EFAULT;
2319 				ptr += sizeof(uint32_t);
2320 				if (put_user(node->ptr,
2321 					     (binder_uintptr_t __user *)ptr))
2322 					return -EFAULT;
2323 				ptr += sizeof(binder_uintptr_t);
2324 				if (put_user(node->cookie,
2325 					     (binder_uintptr_t __user *)ptr))
2326 					return -EFAULT;
2327 				ptr += sizeof(binder_uintptr_t);
2328 
2329 				binder_stat_br(proc, thread, cmd);
2330 				binder_debug(BINDER_DEBUG_USER_REFS,
2331 					     "%d:%d %s %d u%016llx c%016llx\n",
2332 					     proc->pid, thread->pid, cmd_name,
2333 					     node->debug_id,
2334 					     (u64)node->ptr, (u64)node->cookie);
2335 			} else {
2336 				list_del_init(&w->entry);
2337 				if (!weak && !strong) {
2338 					binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2339 						     "%d:%d node %d u%016llx c%016llx deleted\n",
2340 						     proc->pid, thread->pid,
2341 						     node->debug_id,
2342 						     (u64)node->ptr,
2343 						     (u64)node->cookie);
2344 					rb_erase(&node->rb_node, &proc->nodes);
2345 					kfree(node);
2346 					binder_stats_deleted(BINDER_STAT_NODE);
2347 				} else {
2348 					binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2349 						     "%d:%d node %d u%016llx c%016llx state unchanged\n",
2350 						     proc->pid, thread->pid,
2351 						     node->debug_id,
2352 						     (u64)node->ptr,
2353 						     (u64)node->cookie);
2354 				}
2355 			}
2356 		} break;
2357 		case BINDER_WORK_DEAD_BINDER:
2358 		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2359 		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2360 			struct binder_ref_death *death;
2361 			uint32_t cmd;
2362 
2363 			death = container_of(w, struct binder_ref_death, work);
2364 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
2365 				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
2366 			else
2367 				cmd = BR_DEAD_BINDER;
2368 			if (put_user(cmd, (uint32_t __user *)ptr))
2369 				return -EFAULT;
2370 			ptr += sizeof(uint32_t);
2371 			if (put_user(death->cookie,
2372 				     (binder_uintptr_t __user *)ptr))
2373 				return -EFAULT;
2374 			ptr += sizeof(binder_uintptr_t);
2375 			binder_stat_br(proc, thread, cmd);
2376 			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
2377 				     "%d:%d %s %016llx\n",
2378 				      proc->pid, thread->pid,
2379 				      cmd == BR_DEAD_BINDER ?
2380 				      "BR_DEAD_BINDER" :
2381 				      "BR_CLEAR_DEATH_NOTIFICATION_DONE",
2382 				      (u64)death->cookie);
2383 
2384 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
2385 				list_del(&w->entry);
2386 				kfree(death);
2387 				binder_stats_deleted(BINDER_STAT_DEATH);
2388 			} else
2389 				list_move(&w->entry, &proc->delivered_death);
2390 			if (cmd == BR_DEAD_BINDER)
2391 				goto done; /* DEAD_BINDER notifications can cause transactions */
2392 		} break;
2393 		}
2394 
2395 		if (!t)
2396 			continue;
2397 
2398 		BUG_ON(t->buffer == NULL);
2399 		if (t->buffer->target_node) {
2400 			struct binder_node *target_node = t->buffer->target_node;
2401 
2402 			tr.target.ptr = target_node->ptr;
2403 			tr.cookie =  target_node->cookie;
2404 			t->saved_priority = task_nice(current);
2405 			if (t->priority < target_node->min_priority &&
2406 			    !(t->flags & TF_ONE_WAY))
2407 				binder_set_nice(t->priority);
2408 			else if (!(t->flags & TF_ONE_WAY) ||
2409 				 t->saved_priority > target_node->min_priority)
2410 				binder_set_nice(target_node->min_priority);
2411 			cmd = BR_TRANSACTION;
2412 		} else {
2413 			tr.target.ptr = 0;
2414 			tr.cookie = 0;
2415 			cmd = BR_REPLY;
2416 		}
2417 		tr.code = t->code;
2418 		tr.flags = t->flags;
2419 		tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
2420 
2421 		if (t->from) {
2422 			struct task_struct *sender = t->from->proc->tsk;
2423 
2424 			tr.sender_pid = task_tgid_nr_ns(sender,
2425 							task_active_pid_ns(current));
2426 		} else {
2427 			tr.sender_pid = 0;
2428 		}
2429 
2430 		tr.data_size = t->buffer->data_size;
2431 		tr.offsets_size = t->buffer->offsets_size;
2432 		tr.data.ptr.buffer = (binder_uintptr_t)(
2433 					(uintptr_t)t->buffer->data +
2434 					proc->user_buffer_offset);
2435 		tr.data.ptr.offsets = tr.data.ptr.buffer +
2436 					ALIGN(t->buffer->data_size,
2437 					    sizeof(void *));
2438 
2439 		if (put_user(cmd, (uint32_t __user *)ptr))
2440 			return -EFAULT;
2441 		ptr += sizeof(uint32_t);
2442 		if (copy_to_user(ptr, &tr, sizeof(tr)))
2443 			return -EFAULT;
2444 		ptr += sizeof(tr);
2445 
2446 		trace_binder_transaction_received(t);
2447 		binder_stat_br(proc, thread, cmd);
2448 		binder_debug(BINDER_DEBUG_TRANSACTION,
2449 			     "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
2450 			     proc->pid, thread->pid,
2451 			     (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
2452 			     "BR_REPLY",
2453 			     t->debug_id, t->from ? t->from->proc->pid : 0,
2454 			     t->from ? t->from->pid : 0, cmd,
2455 			     t->buffer->data_size, t->buffer->offsets_size,
2456 			     (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
2457 
2458 		list_del(&t->work.entry);
2459 		t->buffer->allow_user_free = 1;
2460 		if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
2461 			t->to_parent = thread->transaction_stack;
2462 			t->to_thread = thread;
2463 			thread->transaction_stack = t;
2464 		} else {
2465 			t->buffer->transaction = NULL;
2466 			kfree(t);
2467 			binder_stats_deleted(BINDER_STAT_TRANSACTION);
2468 		}
2469 		break;
2470 	}
2471 
2472 done:
2473 
2474 	*consumed = ptr - buffer;
2475 	if (proc->requested_threads + proc->ready_threads == 0 &&
2476 	    proc->requested_threads_started < proc->max_threads &&
2477 	    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2478 	     BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
2479 	     /*spawn a new thread if we leave this out */) {
2480 		proc->requested_threads++;
2481 		binder_debug(BINDER_DEBUG_THREADS,
2482 			     "%d:%d BR_SPAWN_LOOPER\n",
2483 			     proc->pid, thread->pid);
2484 		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
2485 			return -EFAULT;
2486 		binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
2487 	}
2488 	return 0;
2489 }
2490 
2491 static void binder_release_work(struct list_head *list)
2492 {
2493 	struct binder_work *w;
2494 
2495 	while (!list_empty(list)) {
2496 		w = list_first_entry(list, struct binder_work, entry);
2497 		list_del_init(&w->entry);
2498 		switch (w->type) {
2499 		case BINDER_WORK_TRANSACTION: {
2500 			struct binder_transaction *t;
2501 
2502 			t = container_of(w, struct binder_transaction, work);
2503 			if (t->buffer->target_node &&
2504 			    !(t->flags & TF_ONE_WAY)) {
2505 				binder_send_failed_reply(t, BR_DEAD_REPLY);
2506 			} else {
2507 				binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2508 					"undelivered transaction %d\n",
2509 					t->debug_id);
2510 				t->buffer->transaction = NULL;
2511 				kfree(t);
2512 				binder_stats_deleted(BINDER_STAT_TRANSACTION);
2513 			}
2514 		} break;
2515 		case BINDER_WORK_TRANSACTION_COMPLETE: {
2516 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2517 				"undelivered TRANSACTION_COMPLETE\n");
2518 			kfree(w);
2519 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2520 		} break;
2521 		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2522 		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2523 			struct binder_ref_death *death;
2524 
2525 			death = container_of(w, struct binder_ref_death, work);
2526 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2527 				"undelivered death notification, %016llx\n",
2528 				(u64)death->cookie);
2529 			kfree(death);
2530 			binder_stats_deleted(BINDER_STAT_DEATH);
2531 		} break;
2532 		default:
2533 			pr_err("unexpected work type, %d, not freed\n",
2534 			       w->type);
2535 			break;
2536 		}
2537 	}
2538 
2539 }
2540 
2541 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
2542 {
2543 	struct binder_thread *thread = NULL;
2544 	struct rb_node *parent = NULL;
2545 	struct rb_node **p = &proc->threads.rb_node;
2546 
2547 	while (*p) {
2548 		parent = *p;
2549 		thread = rb_entry(parent, struct binder_thread, rb_node);
2550 
2551 		if (current->pid < thread->pid)
2552 			p = &(*p)->rb_left;
2553 		else if (current->pid > thread->pid)
2554 			p = &(*p)->rb_right;
2555 		else
2556 			break;
2557 	}
2558 	if (*p == NULL) {
2559 		thread = kzalloc(sizeof(*thread), GFP_KERNEL);
2560 		if (thread == NULL)
2561 			return NULL;
2562 		binder_stats_created(BINDER_STAT_THREAD);
2563 		thread->proc = proc;
2564 		thread->pid = current->pid;
2565 		init_waitqueue_head(&thread->wait);
2566 		INIT_LIST_HEAD(&thread->todo);
2567 		rb_link_node(&thread->rb_node, parent, p);
2568 		rb_insert_color(&thread->rb_node, &proc->threads);
2569 		thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
2570 		thread->return_error = BR_OK;
2571 		thread->return_error2 = BR_OK;
2572 	}
2573 	return thread;
2574 }
2575 
2576 static int binder_free_thread(struct binder_proc *proc,
2577 			      struct binder_thread *thread)
2578 {
2579 	struct binder_transaction *t;
2580 	struct binder_transaction *send_reply = NULL;
2581 	int active_transactions = 0;
2582 
2583 	rb_erase(&thread->rb_node, &proc->threads);
2584 	t = thread->transaction_stack;
2585 	if (t && t->to_thread == thread)
2586 		send_reply = t;
2587 	while (t) {
2588 		active_transactions++;
2589 		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2590 			     "release %d:%d transaction %d %s, still active\n",
2591 			      proc->pid, thread->pid,
2592 			     t->debug_id,
2593 			     (t->to_thread == thread) ? "in" : "out");
2594 
2595 		if (t->to_thread == thread) {
2596 			t->to_proc = NULL;
2597 			t->to_thread = NULL;
2598 			if (t->buffer) {
2599 				t->buffer->transaction = NULL;
2600 				t->buffer = NULL;
2601 			}
2602 			t = t->to_parent;
2603 		} else if (t->from == thread) {
2604 			t->from = NULL;
2605 			t = t->from_parent;
2606 		} else
2607 			BUG();
2608 	}
2609 	if (send_reply)
2610 		binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
2611 	binder_release_work(&thread->todo);
2612 	kfree(thread);
2613 	binder_stats_deleted(BINDER_STAT_THREAD);
2614 	return active_transactions;
2615 }
2616 
2617 static unsigned int binder_poll(struct file *filp,
2618 				struct poll_table_struct *wait)
2619 {
2620 	struct binder_proc *proc = filp->private_data;
2621 	struct binder_thread *thread = NULL;
2622 	int wait_for_proc_work;
2623 
2624 	binder_lock(__func__);
2625 
2626 	thread = binder_get_thread(proc);
2627 
2628 	wait_for_proc_work = thread->transaction_stack == NULL &&
2629 		list_empty(&thread->todo) && thread->return_error == BR_OK;
2630 
2631 	binder_unlock(__func__);
2632 
2633 	if (wait_for_proc_work) {
2634 		if (binder_has_proc_work(proc, thread))
2635 			return POLLIN;
2636 		poll_wait(filp, &proc->wait, wait);
2637 		if (binder_has_proc_work(proc, thread))
2638 			return POLLIN;
2639 	} else {
2640 		if (binder_has_thread_work(thread))
2641 			return POLLIN;
2642 		poll_wait(filp, &thread->wait, wait);
2643 		if (binder_has_thread_work(thread))
2644 			return POLLIN;
2645 	}
2646 	return 0;
2647 }
2648 
2649 static int binder_ioctl_write_read(struct file *filp,
2650 				unsigned int cmd, unsigned long arg,
2651 				struct binder_thread *thread)
2652 {
2653 	int ret = 0;
2654 	struct binder_proc *proc = filp->private_data;
2655 	unsigned int size = _IOC_SIZE(cmd);
2656 	void __user *ubuf = (void __user *)arg;
2657 	struct binder_write_read bwr;
2658 
2659 	if (size != sizeof(struct binder_write_read)) {
2660 		ret = -EINVAL;
2661 		goto out;
2662 	}
2663 	if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
2664 		ret = -EFAULT;
2665 		goto out;
2666 	}
2667 	binder_debug(BINDER_DEBUG_READ_WRITE,
2668 		     "%d:%d write %lld at %016llx, read %lld at %016llx\n",
2669 		     proc->pid, thread->pid,
2670 		     (u64)bwr.write_size, (u64)bwr.write_buffer,
2671 		     (u64)bwr.read_size, (u64)bwr.read_buffer);
2672 
2673 	if (bwr.write_size > 0) {
2674 		ret = binder_thread_write(proc, thread,
2675 					  bwr.write_buffer,
2676 					  bwr.write_size,
2677 					  &bwr.write_consumed);
2678 		trace_binder_write_done(ret);
2679 		if (ret < 0) {
2680 			bwr.read_consumed = 0;
2681 			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
2682 				ret = -EFAULT;
2683 			goto out;
2684 		}
2685 	}
2686 	if (bwr.read_size > 0) {
2687 		ret = binder_thread_read(proc, thread, bwr.read_buffer,
2688 					 bwr.read_size,
2689 					 &bwr.read_consumed,
2690 					 filp->f_flags & O_NONBLOCK);
2691 		trace_binder_read_done(ret);
2692 		if (!list_empty(&proc->todo))
2693 			wake_up_interruptible(&proc->wait);
2694 		if (ret < 0) {
2695 			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
2696 				ret = -EFAULT;
2697 			goto out;
2698 		}
2699 	}
2700 	binder_debug(BINDER_DEBUG_READ_WRITE,
2701 		     "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
2702 		     proc->pid, thread->pid,
2703 		     (u64)bwr.write_consumed, (u64)bwr.write_size,
2704 		     (u64)bwr.read_consumed, (u64)bwr.read_size);
2705 	if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
2706 		ret = -EFAULT;
2707 		goto out;
2708 	}
2709 out:
2710 	return ret;
2711 }
2712 
2713 static int binder_ioctl_set_ctx_mgr(struct file *filp)
2714 {
2715 	int ret = 0;
2716 	struct binder_proc *proc = filp->private_data;
2717 	kuid_t curr_euid = current_euid();
2718 
2719 	if (binder_context_mgr_node != NULL) {
2720 		pr_err("BINDER_SET_CONTEXT_MGR already set\n");
2721 		ret = -EBUSY;
2722 		goto out;
2723 	}
2724 	ret = security_binder_set_context_mgr(proc->tsk);
2725 	if (ret < 0)
2726 		goto out;
2727 	if (uid_valid(binder_context_mgr_uid)) {
2728 		if (!uid_eq(binder_context_mgr_uid, curr_euid)) {
2729 			pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
2730 			       from_kuid(&init_user_ns, curr_euid),
2731 			       from_kuid(&init_user_ns,
2732 					binder_context_mgr_uid));
2733 			ret = -EPERM;
2734 			goto out;
2735 		}
2736 	} else {
2737 		binder_context_mgr_uid = curr_euid;
2738 	}
2739 	binder_context_mgr_node = binder_new_node(proc, 0, 0);
2740 	if (binder_context_mgr_node == NULL) {
2741 		ret = -ENOMEM;
2742 		goto out;
2743 	}
2744 	binder_context_mgr_node->local_weak_refs++;
2745 	binder_context_mgr_node->local_strong_refs++;
2746 	binder_context_mgr_node->has_strong_ref = 1;
2747 	binder_context_mgr_node->has_weak_ref = 1;
2748 out:
2749 	return ret;
2750 }
2751 
2752 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2753 {
2754 	int ret;
2755 	struct binder_proc *proc = filp->private_data;
2756 	struct binder_thread *thread;
2757 	unsigned int size = _IOC_SIZE(cmd);
2758 	void __user *ubuf = (void __user *)arg;
2759 
2760 	/*pr_info("binder_ioctl: %d:%d %x %lx\n",
2761 			proc->pid, current->pid, cmd, arg);*/
2762 
2763 	if (unlikely(current->mm != proc->vma_vm_mm)) {
2764 		pr_err("current mm mismatch proc mm\n");
2765 		return -EINVAL;
2766 	}
2767 	trace_binder_ioctl(cmd, arg);
2768 
2769 	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
2770 	if (ret)
2771 		goto err_unlocked;
2772 
2773 	binder_lock(__func__);
2774 	thread = binder_get_thread(proc);
2775 	if (thread == NULL) {
2776 		ret = -ENOMEM;
2777 		goto err;
2778 	}
2779 
2780 	switch (cmd) {
2781 	case BINDER_WRITE_READ:
2782 		ret = binder_ioctl_write_read(filp, cmd, arg, thread);
2783 		if (ret)
2784 			goto err;
2785 		break;
2786 	case BINDER_SET_MAX_THREADS:
2787 		if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
2788 			ret = -EINVAL;
2789 			goto err;
2790 		}
2791 		break;
2792 	case BINDER_SET_CONTEXT_MGR:
2793 		ret = binder_ioctl_set_ctx_mgr(filp);
2794 		if (ret)
2795 			goto err;
2796 		break;
2797 	case BINDER_THREAD_EXIT:
2798 		binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
2799 			     proc->pid, thread->pid);
2800 		binder_free_thread(proc, thread);
2801 		thread = NULL;
2802 		break;
2803 	case BINDER_VERSION: {
2804 		struct binder_version __user *ver = ubuf;
2805 
2806 		if (size != sizeof(struct binder_version)) {
2807 			ret = -EINVAL;
2808 			goto err;
2809 		}
2810 		if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
2811 			     &ver->protocol_version)) {
2812 			ret = -EINVAL;
2813 			goto err;
2814 		}
2815 		break;
2816 	}
2817 	default:
2818 		ret = -EINVAL;
2819 		goto err;
2820 	}
2821 	ret = 0;
2822 err:
2823 	if (thread)
2824 		thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
2825 	binder_unlock(__func__);
2826 	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
2827 	if (ret && ret != -ERESTARTSYS)
2828 		pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
2829 err_unlocked:
2830 	trace_binder_ioctl_done(ret);
2831 	return ret;
2832 }
2833 
2834 static void binder_vma_open(struct vm_area_struct *vma)
2835 {
2836 	struct binder_proc *proc = vma->vm_private_data;
2837 
2838 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2839 		     "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
2840 		     proc->pid, vma->vm_start, vma->vm_end,
2841 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2842 		     (unsigned long)pgprot_val(vma->vm_page_prot));
2843 }
2844 
2845 static void binder_vma_close(struct vm_area_struct *vma)
2846 {
2847 	struct binder_proc *proc = vma->vm_private_data;
2848 
2849 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2850 		     "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
2851 		     proc->pid, vma->vm_start, vma->vm_end,
2852 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2853 		     (unsigned long)pgprot_val(vma->vm_page_prot));
2854 	proc->vma = NULL;
2855 	proc->vma_vm_mm = NULL;
2856 	binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
2857 }
2858 
2859 static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2860 {
2861 	return VM_FAULT_SIGBUS;
2862 }
2863 
2864 static const struct vm_operations_struct binder_vm_ops = {
2865 	.open = binder_vma_open,
2866 	.close = binder_vma_close,
2867 	.fault = binder_vm_fault,
2868 };
2869 
2870 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
2871 {
2872 	int ret;
2873 	struct vm_struct *area;
2874 	struct binder_proc *proc = filp->private_data;
2875 	const char *failure_string;
2876 	struct binder_buffer *buffer;
2877 
2878 	if (proc->tsk != current)
2879 		return -EINVAL;
2880 
2881 	if ((vma->vm_end - vma->vm_start) > SZ_4M)
2882 		vma->vm_end = vma->vm_start + SZ_4M;
2883 
2884 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2885 		     "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
2886 		     proc->pid, vma->vm_start, vma->vm_end,
2887 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2888 		     (unsigned long)pgprot_val(vma->vm_page_prot));
2889 
2890 	if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
2891 		ret = -EPERM;
2892 		failure_string = "bad vm_flags";
2893 		goto err_bad_arg;
2894 	}
2895 	vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
2896 
2897 	mutex_lock(&binder_mmap_lock);
2898 	if (proc->buffer) {
2899 		ret = -EBUSY;
2900 		failure_string = "already mapped";
2901 		goto err_already_mapped;
2902 	}
2903 
2904 	area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
2905 	if (area == NULL) {
2906 		ret = -ENOMEM;
2907 		failure_string = "get_vm_area";
2908 		goto err_get_vm_area_failed;
2909 	}
2910 	proc->buffer = area->addr;
2911 	proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
2912 	mutex_unlock(&binder_mmap_lock);
2913 
2914 #ifdef CONFIG_CPU_CACHE_VIPT
2915 	if (cache_is_vipt_aliasing()) {
2916 		while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
2917 			pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
2918 			vma->vm_start += PAGE_SIZE;
2919 		}
2920 	}
2921 #endif
2922 	proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
2923 	if (proc->pages == NULL) {
2924 		ret = -ENOMEM;
2925 		failure_string = "alloc page array";
2926 		goto err_alloc_pages_failed;
2927 	}
2928 	proc->buffer_size = vma->vm_end - vma->vm_start;
2929 
2930 	vma->vm_ops = &binder_vm_ops;
2931 	vma->vm_private_data = proc;
2932 
2933 	if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
2934 		ret = -ENOMEM;
2935 		failure_string = "alloc small buf";
2936 		goto err_alloc_small_buf_failed;
2937 	}
2938 	buffer = proc->buffer;
2939 	INIT_LIST_HEAD(&proc->buffers);
2940 	list_add(&buffer->entry, &proc->buffers);
2941 	buffer->free = 1;
2942 	binder_insert_free_buffer(proc, buffer);
2943 	proc->free_async_space = proc->buffer_size / 2;
2944 	barrier();
2945 	proc->files = get_files_struct(current);
2946 	proc->vma = vma;
2947 	proc->vma_vm_mm = vma->vm_mm;
2948 
2949 	/*pr_info("binder_mmap: %d %lx-%lx maps %p\n",
2950 		 proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
2951 	return 0;
2952 
2953 err_alloc_small_buf_failed:
2954 	kfree(proc->pages);
2955 	proc->pages = NULL;
2956 err_alloc_pages_failed:
2957 	mutex_lock(&binder_mmap_lock);
2958 	vfree(proc->buffer);
2959 	proc->buffer = NULL;
2960 err_get_vm_area_failed:
2961 err_already_mapped:
2962 	mutex_unlock(&binder_mmap_lock);
2963 err_bad_arg:
2964 	pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
2965 	       proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
2966 	return ret;
2967 }
2968 
2969 static int binder_open(struct inode *nodp, struct file *filp)
2970 {
2971 	struct binder_proc *proc;
2972 
2973 	binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
2974 		     current->group_leader->pid, current->pid);
2975 
2976 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
2977 	if (proc == NULL)
2978 		return -ENOMEM;
2979 	get_task_struct(current);
2980 	proc->tsk = current;
2981 	proc->vma_vm_mm = current->mm;
2982 	INIT_LIST_HEAD(&proc->todo);
2983 	init_waitqueue_head(&proc->wait);
2984 	proc->default_priority = task_nice(current);
2985 
2986 	binder_lock(__func__);
2987 
2988 	binder_stats_created(BINDER_STAT_PROC);
2989 	hlist_add_head(&proc->proc_node, &binder_procs);
2990 	proc->pid = current->group_leader->pid;
2991 	INIT_LIST_HEAD(&proc->delivered_death);
2992 	filp->private_data = proc;
2993 
2994 	binder_unlock(__func__);
2995 
2996 	if (binder_debugfs_dir_entry_proc) {
2997 		char strbuf[11];
2998 
2999 		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
3000 		proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
3001 			binder_debugfs_dir_entry_proc, proc, &binder_proc_fops);
3002 	}
3003 
3004 	return 0;
3005 }
3006 
3007 static int binder_flush(struct file *filp, fl_owner_t id)
3008 {
3009 	struct binder_proc *proc = filp->private_data;
3010 
3011 	binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
3012 
3013 	return 0;
3014 }
3015 
3016 static void binder_deferred_flush(struct binder_proc *proc)
3017 {
3018 	struct rb_node *n;
3019 	int wake_count = 0;
3020 
3021 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
3022 		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
3023 
3024 		thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
3025 		if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
3026 			wake_up_interruptible(&thread->wait);
3027 			wake_count++;
3028 		}
3029 	}
3030 	wake_up_interruptible_all(&proc->wait);
3031 
3032 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3033 		     "binder_flush: %d woke %d threads\n", proc->pid,
3034 		     wake_count);
3035 }
3036 
3037 static int binder_release(struct inode *nodp, struct file *filp)
3038 {
3039 	struct binder_proc *proc = filp->private_data;
3040 
3041 	debugfs_remove(proc->debugfs_entry);
3042 	binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
3043 
3044 	return 0;
3045 }
3046 
3047 static int binder_node_release(struct binder_node *node, int refs)
3048 {
3049 	struct binder_ref *ref;
3050 	int death = 0;
3051 
3052 	list_del_init(&node->work.entry);
3053 	binder_release_work(&node->async_todo);
3054 
3055 	if (hlist_empty(&node->refs)) {
3056 		kfree(node);
3057 		binder_stats_deleted(BINDER_STAT_NODE);
3058 
3059 		return refs;
3060 	}
3061 
3062 	node->proc = NULL;
3063 	node->local_strong_refs = 0;
3064 	node->local_weak_refs = 0;
3065 	hlist_add_head(&node->dead_node, &binder_dead_nodes);
3066 
3067 	hlist_for_each_entry(ref, &node->refs, node_entry) {
3068 		refs++;
3069 
3070 		if (!ref->death)
3071 			continue;
3072 
3073 		death++;
3074 
3075 		if (list_empty(&ref->death->work.entry)) {
3076 			ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3077 			list_add_tail(&ref->death->work.entry,
3078 				      &ref->proc->todo);
3079 			wake_up_interruptible(&ref->proc->wait);
3080 		} else
3081 			BUG();
3082 	}
3083 
3084 	binder_debug(BINDER_DEBUG_DEAD_BINDER,
3085 		     "node %d now dead, refs %d, death %d\n",
3086 		     node->debug_id, refs, death);
3087 
3088 	return refs;
3089 }
3090 
3091 static void binder_deferred_release(struct binder_proc *proc)
3092 {
3093 	struct binder_transaction *t;
3094 	struct rb_node *n;
3095 	int threads, nodes, incoming_refs, outgoing_refs, buffers,
3096 		active_transactions, page_count;
3097 
3098 	BUG_ON(proc->vma);
3099 	BUG_ON(proc->files);
3100 
3101 	hlist_del(&proc->proc_node);
3102 
3103 	if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) {
3104 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
3105 			     "%s: %d context_mgr_node gone\n",
3106 			     __func__, proc->pid);
3107 		binder_context_mgr_node = NULL;
3108 	}
3109 
3110 	threads = 0;
3111 	active_transactions = 0;
3112 	while ((n = rb_first(&proc->threads))) {
3113 		struct binder_thread *thread;
3114 
3115 		thread = rb_entry(n, struct binder_thread, rb_node);
3116 		threads++;
3117 		active_transactions += binder_free_thread(proc, thread);
3118 	}
3119 
3120 	nodes = 0;
3121 	incoming_refs = 0;
3122 	while ((n = rb_first(&proc->nodes))) {
3123 		struct binder_node *node;
3124 
3125 		node = rb_entry(n, struct binder_node, rb_node);
3126 		nodes++;
3127 		rb_erase(&node->rb_node, &proc->nodes);
3128 		incoming_refs = binder_node_release(node, incoming_refs);
3129 	}
3130 
3131 	outgoing_refs = 0;
3132 	while ((n = rb_first(&proc->refs_by_desc))) {
3133 		struct binder_ref *ref;
3134 
3135 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
3136 		outgoing_refs++;
3137 		binder_delete_ref(ref);
3138 	}
3139 
3140 	binder_release_work(&proc->todo);
3141 	binder_release_work(&proc->delivered_death);
3142 
3143 	buffers = 0;
3144 	while ((n = rb_first(&proc->allocated_buffers))) {
3145 		struct binder_buffer *buffer;
3146 
3147 		buffer = rb_entry(n, struct binder_buffer, rb_node);
3148 
3149 		t = buffer->transaction;
3150 		if (t) {
3151 			t->buffer = NULL;
3152 			buffer->transaction = NULL;
3153 			pr_err("release proc %d, transaction %d, not freed\n",
3154 			       proc->pid, t->debug_id);
3155 			/*BUG();*/
3156 		}
3157 
3158 		binder_free_buf(proc, buffer);
3159 		buffers++;
3160 	}
3161 
3162 	binder_stats_deleted(BINDER_STAT_PROC);
3163 
3164 	page_count = 0;
3165 	if (proc->pages) {
3166 		int i;
3167 
3168 		for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
3169 			void *page_addr;
3170 
3171 			if (!proc->pages[i])
3172 				continue;
3173 
3174 			page_addr = proc->buffer + i * PAGE_SIZE;
3175 			binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
3176 				     "%s: %d: page %d at %p not freed\n",
3177 				     __func__, proc->pid, i, page_addr);
3178 			unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
3179 			__free_page(proc->pages[i]);
3180 			page_count++;
3181 		}
3182 		kfree(proc->pages);
3183 		vfree(proc->buffer);
3184 	}
3185 
3186 	put_task_struct(proc->tsk);
3187 
3188 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3189 		     "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n",
3190 		     __func__, proc->pid, threads, nodes, incoming_refs,
3191 		     outgoing_refs, active_transactions, buffers, page_count);
3192 
3193 	kfree(proc);
3194 }
3195 
3196 static void binder_deferred_func(struct work_struct *work)
3197 {
3198 	struct binder_proc *proc;
3199 	struct files_struct *files;
3200 
3201 	int defer;
3202 
3203 	do {
3204 		binder_lock(__func__);
3205 		mutex_lock(&binder_deferred_lock);
3206 		if (!hlist_empty(&binder_deferred_list)) {
3207 			proc = hlist_entry(binder_deferred_list.first,
3208 					struct binder_proc, deferred_work_node);
3209 			hlist_del_init(&proc->deferred_work_node);
3210 			defer = proc->deferred_work;
3211 			proc->deferred_work = 0;
3212 		} else {
3213 			proc = NULL;
3214 			defer = 0;
3215 		}
3216 		mutex_unlock(&binder_deferred_lock);
3217 
3218 		files = NULL;
3219 		if (defer & BINDER_DEFERRED_PUT_FILES) {
3220 			files = proc->files;
3221 			if (files)
3222 				proc->files = NULL;
3223 		}
3224 
3225 		if (defer & BINDER_DEFERRED_FLUSH)
3226 			binder_deferred_flush(proc);
3227 
3228 		if (defer & BINDER_DEFERRED_RELEASE)
3229 			binder_deferred_release(proc); /* frees proc */
3230 
3231 		binder_unlock(__func__);
3232 		if (files)
3233 			put_files_struct(files);
3234 	} while (proc);
3235 }
3236 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
3237 
3238 static void
3239 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
3240 {
3241 	mutex_lock(&binder_deferred_lock);
3242 	proc->deferred_work |= defer;
3243 	if (hlist_unhashed(&proc->deferred_work_node)) {
3244 		hlist_add_head(&proc->deferred_work_node,
3245 				&binder_deferred_list);
3246 		schedule_work(&binder_deferred_work);
3247 	}
3248 	mutex_unlock(&binder_deferred_lock);
3249 }
3250 
3251 static void print_binder_transaction(struct seq_file *m, const char *prefix,
3252 				     struct binder_transaction *t)
3253 {
3254 	seq_printf(m,
3255 		   "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
3256 		   prefix, t->debug_id, t,
3257 		   t->from ? t->from->proc->pid : 0,
3258 		   t->from ? t->from->pid : 0,
3259 		   t->to_proc ? t->to_proc->pid : 0,
3260 		   t->to_thread ? t->to_thread->pid : 0,
3261 		   t->code, t->flags, t->priority, t->need_reply);
3262 	if (t->buffer == NULL) {
3263 		seq_puts(m, " buffer free\n");
3264 		return;
3265 	}
3266 	if (t->buffer->target_node)
3267 		seq_printf(m, " node %d",
3268 			   t->buffer->target_node->debug_id);
3269 	seq_printf(m, " size %zd:%zd data %p\n",
3270 		   t->buffer->data_size, t->buffer->offsets_size,
3271 		   t->buffer->data);
3272 }
3273 
3274 static void print_binder_buffer(struct seq_file *m, const char *prefix,
3275 				struct binder_buffer *buffer)
3276 {
3277 	seq_printf(m, "%s %d: %p size %zd:%zd %s\n",
3278 		   prefix, buffer->debug_id, buffer->data,
3279 		   buffer->data_size, buffer->offsets_size,
3280 		   buffer->transaction ? "active" : "delivered");
3281 }
3282 
3283 static void print_binder_work(struct seq_file *m, const char *prefix,
3284 			      const char *transaction_prefix,
3285 			      struct binder_work *w)
3286 {
3287 	struct binder_node *node;
3288 	struct binder_transaction *t;
3289 
3290 	switch (w->type) {
3291 	case BINDER_WORK_TRANSACTION:
3292 		t = container_of(w, struct binder_transaction, work);
3293 		print_binder_transaction(m, transaction_prefix, t);
3294 		break;
3295 	case BINDER_WORK_TRANSACTION_COMPLETE:
3296 		seq_printf(m, "%stransaction complete\n", prefix);
3297 		break;
3298 	case BINDER_WORK_NODE:
3299 		node = container_of(w, struct binder_node, work);
3300 		seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
3301 			   prefix, node->debug_id,
3302 			   (u64)node->ptr, (u64)node->cookie);
3303 		break;
3304 	case BINDER_WORK_DEAD_BINDER:
3305 		seq_printf(m, "%shas dead binder\n", prefix);
3306 		break;
3307 	case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3308 		seq_printf(m, "%shas cleared dead binder\n", prefix);
3309 		break;
3310 	case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
3311 		seq_printf(m, "%shas cleared death notification\n", prefix);
3312 		break;
3313 	default:
3314 		seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
3315 		break;
3316 	}
3317 }
3318 
3319 static void print_binder_thread(struct seq_file *m,
3320 				struct binder_thread *thread,
3321 				int print_always)
3322 {
3323 	struct binder_transaction *t;
3324 	struct binder_work *w;
3325 	size_t start_pos = m->count;
3326 	size_t header_pos;
3327 
3328 	seq_printf(m, "  thread %d: l %02x\n", thread->pid, thread->looper);
3329 	header_pos = m->count;
3330 	t = thread->transaction_stack;
3331 	while (t) {
3332 		if (t->from == thread) {
3333 			print_binder_transaction(m,
3334 						 "    outgoing transaction", t);
3335 			t = t->from_parent;
3336 		} else if (t->to_thread == thread) {
3337 			print_binder_transaction(m,
3338 						 "    incoming transaction", t);
3339 			t = t->to_parent;
3340 		} else {
3341 			print_binder_transaction(m, "    bad transaction", t);
3342 			t = NULL;
3343 		}
3344 	}
3345 	list_for_each_entry(w, &thread->todo, entry) {
3346 		print_binder_work(m, "    ", "    pending transaction", w);
3347 	}
3348 	if (!print_always && m->count == header_pos)
3349 		m->count = start_pos;
3350 }
3351 
3352 static void print_binder_node(struct seq_file *m, struct binder_node *node)
3353 {
3354 	struct binder_ref *ref;
3355 	struct binder_work *w;
3356 	int count;
3357 
3358 	count = 0;
3359 	hlist_for_each_entry(ref, &node->refs, node_entry)
3360 		count++;
3361 
3362 	seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
3363 		   node->debug_id, (u64)node->ptr, (u64)node->cookie,
3364 		   node->has_strong_ref, node->has_weak_ref,
3365 		   node->local_strong_refs, node->local_weak_refs,
3366 		   node->internal_strong_refs, count);
3367 	if (count) {
3368 		seq_puts(m, " proc");
3369 		hlist_for_each_entry(ref, &node->refs, node_entry)
3370 			seq_printf(m, " %d", ref->proc->pid);
3371 	}
3372 	seq_puts(m, "\n");
3373 	list_for_each_entry(w, &node->async_todo, entry)
3374 		print_binder_work(m, "    ",
3375 				  "    pending async transaction", w);
3376 }
3377 
3378 static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
3379 {
3380 	seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %p\n",
3381 		   ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
3382 		   ref->node->debug_id, ref->strong, ref->weak, ref->death);
3383 }
3384 
3385 static void print_binder_proc(struct seq_file *m,
3386 			      struct binder_proc *proc, int print_all)
3387 {
3388 	struct binder_work *w;
3389 	struct rb_node *n;
3390 	size_t start_pos = m->count;
3391 	size_t header_pos;
3392 
3393 	seq_printf(m, "proc %d\n", proc->pid);
3394 	header_pos = m->count;
3395 
3396 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3397 		print_binder_thread(m, rb_entry(n, struct binder_thread,
3398 						rb_node), print_all);
3399 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
3400 		struct binder_node *node = rb_entry(n, struct binder_node,
3401 						    rb_node);
3402 		if (print_all || node->has_async_transaction)
3403 			print_binder_node(m, node);
3404 	}
3405 	if (print_all) {
3406 		for (n = rb_first(&proc->refs_by_desc);
3407 		     n != NULL;
3408 		     n = rb_next(n))
3409 			print_binder_ref(m, rb_entry(n, struct binder_ref,
3410 						     rb_node_desc));
3411 	}
3412 	for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
3413 		print_binder_buffer(m, "  buffer",
3414 				    rb_entry(n, struct binder_buffer, rb_node));
3415 	list_for_each_entry(w, &proc->todo, entry)
3416 		print_binder_work(m, "  ", "  pending transaction", w);
3417 	list_for_each_entry(w, &proc->delivered_death, entry) {
3418 		seq_puts(m, "  has delivered dead binder\n");
3419 		break;
3420 	}
3421 	if (!print_all && m->count == header_pos)
3422 		m->count = start_pos;
3423 }
3424 
3425 static const char * const binder_return_strings[] = {
3426 	"BR_ERROR",
3427 	"BR_OK",
3428 	"BR_TRANSACTION",
3429 	"BR_REPLY",
3430 	"BR_ACQUIRE_RESULT",
3431 	"BR_DEAD_REPLY",
3432 	"BR_TRANSACTION_COMPLETE",
3433 	"BR_INCREFS",
3434 	"BR_ACQUIRE",
3435 	"BR_RELEASE",
3436 	"BR_DECREFS",
3437 	"BR_ATTEMPT_ACQUIRE",
3438 	"BR_NOOP",
3439 	"BR_SPAWN_LOOPER",
3440 	"BR_FINISHED",
3441 	"BR_DEAD_BINDER",
3442 	"BR_CLEAR_DEATH_NOTIFICATION_DONE",
3443 	"BR_FAILED_REPLY"
3444 };
3445 
3446 static const char * const binder_command_strings[] = {
3447 	"BC_TRANSACTION",
3448 	"BC_REPLY",
3449 	"BC_ACQUIRE_RESULT",
3450 	"BC_FREE_BUFFER",
3451 	"BC_INCREFS",
3452 	"BC_ACQUIRE",
3453 	"BC_RELEASE",
3454 	"BC_DECREFS",
3455 	"BC_INCREFS_DONE",
3456 	"BC_ACQUIRE_DONE",
3457 	"BC_ATTEMPT_ACQUIRE",
3458 	"BC_REGISTER_LOOPER",
3459 	"BC_ENTER_LOOPER",
3460 	"BC_EXIT_LOOPER",
3461 	"BC_REQUEST_DEATH_NOTIFICATION",
3462 	"BC_CLEAR_DEATH_NOTIFICATION",
3463 	"BC_DEAD_BINDER_DONE"
3464 };
3465 
3466 static const char * const binder_objstat_strings[] = {
3467 	"proc",
3468 	"thread",
3469 	"node",
3470 	"ref",
3471 	"death",
3472 	"transaction",
3473 	"transaction_complete"
3474 };
3475 
3476 static void print_binder_stats(struct seq_file *m, const char *prefix,
3477 			       struct binder_stats *stats)
3478 {
3479 	int i;
3480 
3481 	BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
3482 		     ARRAY_SIZE(binder_command_strings));
3483 	for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
3484 		if (stats->bc[i])
3485 			seq_printf(m, "%s%s: %d\n", prefix,
3486 				   binder_command_strings[i], stats->bc[i]);
3487 	}
3488 
3489 	BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
3490 		     ARRAY_SIZE(binder_return_strings));
3491 	for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
3492 		if (stats->br[i])
3493 			seq_printf(m, "%s%s: %d\n", prefix,
3494 				   binder_return_strings[i], stats->br[i]);
3495 	}
3496 
3497 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
3498 		     ARRAY_SIZE(binder_objstat_strings));
3499 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
3500 		     ARRAY_SIZE(stats->obj_deleted));
3501 	for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
3502 		if (stats->obj_created[i] || stats->obj_deleted[i])
3503 			seq_printf(m, "%s%s: active %d total %d\n", prefix,
3504 				binder_objstat_strings[i],
3505 				stats->obj_created[i] - stats->obj_deleted[i],
3506 				stats->obj_created[i]);
3507 	}
3508 }
3509 
3510 static void print_binder_proc_stats(struct seq_file *m,
3511 				    struct binder_proc *proc)
3512 {
3513 	struct binder_work *w;
3514 	struct rb_node *n;
3515 	int count, strong, weak;
3516 
3517 	seq_printf(m, "proc %d\n", proc->pid);
3518 	count = 0;
3519 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3520 		count++;
3521 	seq_printf(m, "  threads: %d\n", count);
3522 	seq_printf(m, "  requested threads: %d+%d/%d\n"
3523 			"  ready threads %d\n"
3524 			"  free async space %zd\n", proc->requested_threads,
3525 			proc->requested_threads_started, proc->max_threads,
3526 			proc->ready_threads, proc->free_async_space);
3527 	count = 0;
3528 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
3529 		count++;
3530 	seq_printf(m, "  nodes: %d\n", count);
3531 	count = 0;
3532 	strong = 0;
3533 	weak = 0;
3534 	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
3535 		struct binder_ref *ref = rb_entry(n, struct binder_ref,
3536 						  rb_node_desc);
3537 		count++;
3538 		strong += ref->strong;
3539 		weak += ref->weak;
3540 	}
3541 	seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
3542 
3543 	count = 0;
3544 	for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
3545 		count++;
3546 	seq_printf(m, "  buffers: %d\n", count);
3547 
3548 	count = 0;
3549 	list_for_each_entry(w, &proc->todo, entry) {
3550 		switch (w->type) {
3551 		case BINDER_WORK_TRANSACTION:
3552 			count++;
3553 			break;
3554 		default:
3555 			break;
3556 		}
3557 	}
3558 	seq_printf(m, "  pending transactions: %d\n", count);
3559 
3560 	print_binder_stats(m, "  ", &proc->stats);
3561 }
3562 
3563 
3564 static int binder_state_show(struct seq_file *m, void *unused)
3565 {
3566 	struct binder_proc *proc;
3567 	struct binder_node *node;
3568 	int do_lock = !binder_debug_no_lock;
3569 
3570 	if (do_lock)
3571 		binder_lock(__func__);
3572 
3573 	seq_puts(m, "binder state:\n");
3574 
3575 	if (!hlist_empty(&binder_dead_nodes))
3576 		seq_puts(m, "dead nodes:\n");
3577 	hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
3578 		print_binder_node(m, node);
3579 
3580 	hlist_for_each_entry(proc, &binder_procs, proc_node)
3581 		print_binder_proc(m, proc, 1);
3582 	if (do_lock)
3583 		binder_unlock(__func__);
3584 	return 0;
3585 }
3586 
3587 static int binder_stats_show(struct seq_file *m, void *unused)
3588 {
3589 	struct binder_proc *proc;
3590 	int do_lock = !binder_debug_no_lock;
3591 
3592 	if (do_lock)
3593 		binder_lock(__func__);
3594 
3595 	seq_puts(m, "binder stats:\n");
3596 
3597 	print_binder_stats(m, "", &binder_stats);
3598 
3599 	hlist_for_each_entry(proc, &binder_procs, proc_node)
3600 		print_binder_proc_stats(m, proc);
3601 	if (do_lock)
3602 		binder_unlock(__func__);
3603 	return 0;
3604 }
3605 
3606 static int binder_transactions_show(struct seq_file *m, void *unused)
3607 {
3608 	struct binder_proc *proc;
3609 	int do_lock = !binder_debug_no_lock;
3610 
3611 	if (do_lock)
3612 		binder_lock(__func__);
3613 
3614 	seq_puts(m, "binder transactions:\n");
3615 	hlist_for_each_entry(proc, &binder_procs, proc_node)
3616 		print_binder_proc(m, proc, 0);
3617 	if (do_lock)
3618 		binder_unlock(__func__);
3619 	return 0;
3620 }
3621 
3622 static int binder_proc_show(struct seq_file *m, void *unused)
3623 {
3624 	struct binder_proc *itr;
3625 	struct binder_proc *proc = m->private;
3626 	int do_lock = !binder_debug_no_lock;
3627 	bool valid_proc = false;
3628 
3629 	if (do_lock)
3630 		binder_lock(__func__);
3631 
3632 	hlist_for_each_entry(itr, &binder_procs, proc_node) {
3633 		if (itr == proc) {
3634 			valid_proc = true;
3635 			break;
3636 		}
3637 	}
3638 	if (valid_proc) {
3639 		seq_puts(m, "binder proc state:\n");
3640 		print_binder_proc(m, proc, 1);
3641 	}
3642 	if (do_lock)
3643 		binder_unlock(__func__);
3644 	return 0;
3645 }
3646 
3647 static void print_binder_transaction_log_entry(struct seq_file *m,
3648 					struct binder_transaction_log_entry *e)
3649 {
3650 	seq_printf(m,
3651 		   "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n",
3652 		   e->debug_id, (e->call_type == 2) ? "reply" :
3653 		   ((e->call_type == 1) ? "async" : "call "), e->from_proc,
3654 		   e->from_thread, e->to_proc, e->to_thread, e->to_node,
3655 		   e->target_handle, e->data_size, e->offsets_size);
3656 }
3657 
3658 static int binder_transaction_log_show(struct seq_file *m, void *unused)
3659 {
3660 	struct binder_transaction_log *log = m->private;
3661 	int i;
3662 
3663 	if (log->full) {
3664 		for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
3665 			print_binder_transaction_log_entry(m, &log->entry[i]);
3666 	}
3667 	for (i = 0; i < log->next; i++)
3668 		print_binder_transaction_log_entry(m, &log->entry[i]);
3669 	return 0;
3670 }
3671 
3672 static const struct file_operations binder_fops = {
3673 	.owner = THIS_MODULE,
3674 	.poll = binder_poll,
3675 	.unlocked_ioctl = binder_ioctl,
3676 	.compat_ioctl = binder_ioctl,
3677 	.mmap = binder_mmap,
3678 	.open = binder_open,
3679 	.flush = binder_flush,
3680 	.release = binder_release,
3681 };
3682 
3683 static struct miscdevice binder_miscdev = {
3684 	.minor = MISC_DYNAMIC_MINOR,
3685 	.name = "binder",
3686 	.fops = &binder_fops
3687 };
3688 
3689 BINDER_DEBUG_ENTRY(state);
3690 BINDER_DEBUG_ENTRY(stats);
3691 BINDER_DEBUG_ENTRY(transactions);
3692 BINDER_DEBUG_ENTRY(transaction_log);
3693 
3694 static int __init binder_init(void)
3695 {
3696 	int ret;
3697 
3698 	binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
3699 	if (binder_debugfs_dir_entry_root)
3700 		binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
3701 						 binder_debugfs_dir_entry_root);
3702 	ret = misc_register(&binder_miscdev);
3703 	if (binder_debugfs_dir_entry_root) {
3704 		debugfs_create_file("state",
3705 				    S_IRUGO,
3706 				    binder_debugfs_dir_entry_root,
3707 				    NULL,
3708 				    &binder_state_fops);
3709 		debugfs_create_file("stats",
3710 				    S_IRUGO,
3711 				    binder_debugfs_dir_entry_root,
3712 				    NULL,
3713 				    &binder_stats_fops);
3714 		debugfs_create_file("transactions",
3715 				    S_IRUGO,
3716 				    binder_debugfs_dir_entry_root,
3717 				    NULL,
3718 				    &binder_transactions_fops);
3719 		debugfs_create_file("transaction_log",
3720 				    S_IRUGO,
3721 				    binder_debugfs_dir_entry_root,
3722 				    &binder_transaction_log,
3723 				    &binder_transaction_log_fops);
3724 		debugfs_create_file("failed_transaction_log",
3725 				    S_IRUGO,
3726 				    binder_debugfs_dir_entry_root,
3727 				    &binder_transaction_log_failed,
3728 				    &binder_transaction_log_fops);
3729 	}
3730 	return ret;
3731 }
3732 
3733 device_initcall(binder_init);
3734 
3735 #define CREATE_TRACE_POINTS
3736 #include "binder_trace.h"
3737 
3738 MODULE_LICENSE("GPL v2");
3739