1 /* binder.c 2 * 3 * Android IPC Subsystem 4 * 5 * Copyright (C) 2007-2008 Google, Inc. 6 * 7 * This software is licensed under the terms of the GNU General Public 8 * License version 2, as published by the Free Software Foundation, and 9 * may be copied, distributed, and modified under those terms. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 */ 17 18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19 20 #include <asm/cacheflush.h> 21 #include <linux/fdtable.h> 22 #include <linux/file.h> 23 #include <linux/freezer.h> 24 #include <linux/fs.h> 25 #include <linux/list.h> 26 #include <linux/miscdevice.h> 27 #include <linux/mm.h> 28 #include <linux/module.h> 29 #include <linux/mutex.h> 30 #include <linux/nsproxy.h> 31 #include <linux/poll.h> 32 #include <linux/debugfs.h> 33 #include <linux/rbtree.h> 34 #include <linux/sched.h> 35 #include <linux/seq_file.h> 36 #include <linux/uaccess.h> 37 #include <linux/vmalloc.h> 38 #include <linux/slab.h> 39 #include <linux/pid_namespace.h> 40 #include <linux/security.h> 41 42 #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT 43 #define BINDER_IPC_32BIT 1 44 #endif 45 46 #include <uapi/linux/android/binder.h> 47 #include "binder_trace.h" 48 49 static DEFINE_MUTEX(binder_main_lock); 50 static DEFINE_MUTEX(binder_deferred_lock); 51 static DEFINE_MUTEX(binder_mmap_lock); 52 53 static HLIST_HEAD(binder_devices); 54 static HLIST_HEAD(binder_procs); 55 static HLIST_HEAD(binder_deferred_list); 56 static HLIST_HEAD(binder_dead_nodes); 57 58 static struct dentry *binder_debugfs_dir_entry_root; 59 static struct dentry *binder_debugfs_dir_entry_proc; 60 static int binder_last_id; 61 62 #define BINDER_DEBUG_ENTRY(name) \ 63 static int binder_##name##_open(struct inode *inode, struct file *file) \ 64 { \ 65 return single_open(file, binder_##name##_show, inode->i_private); \ 66 } \ 67 \ 68 static const struct file_operations binder_##name##_fops = { \ 69 .owner = THIS_MODULE, \ 70 .open = binder_##name##_open, \ 71 .read = seq_read, \ 72 .llseek = seq_lseek, \ 73 .release = single_release, \ 74 } 75 76 static int binder_proc_show(struct seq_file *m, void *unused); 77 BINDER_DEBUG_ENTRY(proc); 78 79 /* This is only defined in include/asm-arm/sizes.h */ 80 #ifndef SZ_1K 81 #define SZ_1K 0x400 82 #endif 83 84 #ifndef SZ_4M 85 #define SZ_4M 0x400000 86 #endif 87 88 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE) 89 90 #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64) 91 92 enum { 93 BINDER_DEBUG_USER_ERROR = 1U << 0, 94 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1, 95 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2, 96 BINDER_DEBUG_OPEN_CLOSE = 1U << 3, 97 BINDER_DEBUG_DEAD_BINDER = 1U << 4, 98 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5, 99 BINDER_DEBUG_READ_WRITE = 1U << 6, 100 BINDER_DEBUG_USER_REFS = 1U << 7, 101 BINDER_DEBUG_THREADS = 1U << 8, 102 BINDER_DEBUG_TRANSACTION = 1U << 9, 103 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10, 104 BINDER_DEBUG_FREE_BUFFER = 1U << 11, 105 BINDER_DEBUG_INTERNAL_REFS = 1U << 12, 106 BINDER_DEBUG_BUFFER_ALLOC = 1U << 13, 107 BINDER_DEBUG_PRIORITY_CAP = 1U << 14, 108 BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15, 109 }; 110 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | 111 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; 112 module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO); 113 114 static bool binder_debug_no_lock; 115 module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO); 116 117 static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES; 118 module_param_named(devices, binder_devices_param, charp, 0444); 119 120 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); 121 static int binder_stop_on_user_error; 122 123 static int binder_set_stop_on_user_error(const char *val, 124 struct kernel_param *kp) 125 { 126 int ret; 127 128 ret = param_set_int(val, kp); 129 if (binder_stop_on_user_error < 2) 130 wake_up(&binder_user_error_wait); 131 return ret; 132 } 133 module_param_call(stop_on_user_error, binder_set_stop_on_user_error, 134 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO); 135 136 #define binder_debug(mask, x...) \ 137 do { \ 138 if (binder_debug_mask & mask) \ 139 pr_info(x); \ 140 } while (0) 141 142 #define binder_user_error(x...) \ 143 do { \ 144 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \ 145 pr_info(x); \ 146 if (binder_stop_on_user_error) \ 147 binder_stop_on_user_error = 2; \ 148 } while (0) 149 150 #define to_flat_binder_object(hdr) \ 151 container_of(hdr, struct flat_binder_object, hdr) 152 153 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr) 154 155 enum binder_stat_types { 156 BINDER_STAT_PROC, 157 BINDER_STAT_THREAD, 158 BINDER_STAT_NODE, 159 BINDER_STAT_REF, 160 BINDER_STAT_DEATH, 161 BINDER_STAT_TRANSACTION, 162 BINDER_STAT_TRANSACTION_COMPLETE, 163 BINDER_STAT_COUNT 164 }; 165 166 struct binder_stats { 167 int br[_IOC_NR(BR_FAILED_REPLY) + 1]; 168 int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1]; 169 int obj_created[BINDER_STAT_COUNT]; 170 int obj_deleted[BINDER_STAT_COUNT]; 171 }; 172 173 static struct binder_stats binder_stats; 174 175 static inline void binder_stats_deleted(enum binder_stat_types type) 176 { 177 binder_stats.obj_deleted[type]++; 178 } 179 180 static inline void binder_stats_created(enum binder_stat_types type) 181 { 182 binder_stats.obj_created[type]++; 183 } 184 185 struct binder_transaction_log_entry { 186 int debug_id; 187 int call_type; 188 int from_proc; 189 int from_thread; 190 int target_handle; 191 int to_proc; 192 int to_thread; 193 int to_node; 194 int data_size; 195 int offsets_size; 196 const char *context_name; 197 }; 198 struct binder_transaction_log { 199 int next; 200 int full; 201 struct binder_transaction_log_entry entry[32]; 202 }; 203 static struct binder_transaction_log binder_transaction_log; 204 static struct binder_transaction_log binder_transaction_log_failed; 205 206 static struct binder_transaction_log_entry *binder_transaction_log_add( 207 struct binder_transaction_log *log) 208 { 209 struct binder_transaction_log_entry *e; 210 211 e = &log->entry[log->next]; 212 memset(e, 0, sizeof(*e)); 213 log->next++; 214 if (log->next == ARRAY_SIZE(log->entry)) { 215 log->next = 0; 216 log->full = 1; 217 } 218 return e; 219 } 220 221 struct binder_context { 222 struct binder_node *binder_context_mgr_node; 223 kuid_t binder_context_mgr_uid; 224 const char *name; 225 }; 226 227 struct binder_device { 228 struct hlist_node hlist; 229 struct miscdevice miscdev; 230 struct binder_context context; 231 }; 232 233 struct binder_work { 234 struct list_head entry; 235 enum { 236 BINDER_WORK_TRANSACTION = 1, 237 BINDER_WORK_TRANSACTION_COMPLETE, 238 BINDER_WORK_NODE, 239 BINDER_WORK_DEAD_BINDER, 240 BINDER_WORK_DEAD_BINDER_AND_CLEAR, 241 BINDER_WORK_CLEAR_DEATH_NOTIFICATION, 242 } type; 243 }; 244 245 struct binder_node { 246 int debug_id; 247 struct binder_work work; 248 union { 249 struct rb_node rb_node; 250 struct hlist_node dead_node; 251 }; 252 struct binder_proc *proc; 253 struct hlist_head refs; 254 int internal_strong_refs; 255 int local_weak_refs; 256 int local_strong_refs; 257 binder_uintptr_t ptr; 258 binder_uintptr_t cookie; 259 unsigned has_strong_ref:1; 260 unsigned pending_strong_ref:1; 261 unsigned has_weak_ref:1; 262 unsigned pending_weak_ref:1; 263 unsigned has_async_transaction:1; 264 unsigned accept_fds:1; 265 unsigned min_priority:8; 266 struct list_head async_todo; 267 }; 268 269 struct binder_ref_death { 270 struct binder_work work; 271 binder_uintptr_t cookie; 272 }; 273 274 struct binder_ref { 275 /* Lookups needed: */ 276 /* node + proc => ref (transaction) */ 277 /* desc + proc => ref (transaction, inc/dec ref) */ 278 /* node => refs + procs (proc exit) */ 279 int debug_id; 280 struct rb_node rb_node_desc; 281 struct rb_node rb_node_node; 282 struct hlist_node node_entry; 283 struct binder_proc *proc; 284 struct binder_node *node; 285 uint32_t desc; 286 int strong; 287 int weak; 288 struct binder_ref_death *death; 289 }; 290 291 struct binder_buffer { 292 struct list_head entry; /* free and allocated entries by address */ 293 struct rb_node rb_node; /* free entry by size or allocated entry */ 294 /* by address */ 295 unsigned free:1; 296 unsigned allow_user_free:1; 297 unsigned async_transaction:1; 298 unsigned debug_id:29; 299 300 struct binder_transaction *transaction; 301 302 struct binder_node *target_node; 303 size_t data_size; 304 size_t offsets_size; 305 uint8_t data[0]; 306 }; 307 308 enum binder_deferred_state { 309 BINDER_DEFERRED_PUT_FILES = 0x01, 310 BINDER_DEFERRED_FLUSH = 0x02, 311 BINDER_DEFERRED_RELEASE = 0x04, 312 }; 313 314 struct binder_proc { 315 struct hlist_node proc_node; 316 struct rb_root threads; 317 struct rb_root nodes; 318 struct rb_root refs_by_desc; 319 struct rb_root refs_by_node; 320 int pid; 321 struct vm_area_struct *vma; 322 struct mm_struct *vma_vm_mm; 323 struct task_struct *tsk; 324 struct files_struct *files; 325 struct hlist_node deferred_work_node; 326 int deferred_work; 327 void *buffer; 328 ptrdiff_t user_buffer_offset; 329 330 struct list_head buffers; 331 struct rb_root free_buffers; 332 struct rb_root allocated_buffers; 333 size_t free_async_space; 334 335 struct page **pages; 336 size_t buffer_size; 337 uint32_t buffer_free; 338 struct list_head todo; 339 wait_queue_head_t wait; 340 struct binder_stats stats; 341 struct list_head delivered_death; 342 int max_threads; 343 int requested_threads; 344 int requested_threads_started; 345 int ready_threads; 346 long default_priority; 347 struct dentry *debugfs_entry; 348 struct binder_context *context; 349 }; 350 351 enum { 352 BINDER_LOOPER_STATE_REGISTERED = 0x01, 353 BINDER_LOOPER_STATE_ENTERED = 0x02, 354 BINDER_LOOPER_STATE_EXITED = 0x04, 355 BINDER_LOOPER_STATE_INVALID = 0x08, 356 BINDER_LOOPER_STATE_WAITING = 0x10, 357 BINDER_LOOPER_STATE_NEED_RETURN = 0x20 358 }; 359 360 struct binder_thread { 361 struct binder_proc *proc; 362 struct rb_node rb_node; 363 int pid; 364 int looper; 365 struct binder_transaction *transaction_stack; 366 struct list_head todo; 367 uint32_t return_error; /* Write failed, return error code in read buf */ 368 uint32_t return_error2; /* Write failed, return error code in read */ 369 /* buffer. Used when sending a reply to a dead process that */ 370 /* we are also waiting on */ 371 wait_queue_head_t wait; 372 struct binder_stats stats; 373 }; 374 375 struct binder_transaction { 376 int debug_id; 377 struct binder_work work; 378 struct binder_thread *from; 379 struct binder_transaction *from_parent; 380 struct binder_proc *to_proc; 381 struct binder_thread *to_thread; 382 struct binder_transaction *to_parent; 383 unsigned need_reply:1; 384 /* unsigned is_dead:1; */ /* not used at the moment */ 385 386 struct binder_buffer *buffer; 387 unsigned int code; 388 unsigned int flags; 389 long priority; 390 long saved_priority; 391 kuid_t sender_euid; 392 }; 393 394 static void 395 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); 396 397 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) 398 { 399 struct files_struct *files = proc->files; 400 unsigned long rlim_cur; 401 unsigned long irqs; 402 403 if (files == NULL) 404 return -ESRCH; 405 406 if (!lock_task_sighand(proc->tsk, &irqs)) 407 return -EMFILE; 408 409 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE); 410 unlock_task_sighand(proc->tsk, &irqs); 411 412 return __alloc_fd(files, 0, rlim_cur, flags); 413 } 414 415 /* 416 * copied from fd_install 417 */ 418 static void task_fd_install( 419 struct binder_proc *proc, unsigned int fd, struct file *file) 420 { 421 if (proc->files) 422 __fd_install(proc->files, fd, file); 423 } 424 425 /* 426 * copied from sys_close 427 */ 428 static long task_close_fd(struct binder_proc *proc, unsigned int fd) 429 { 430 int retval; 431 432 if (proc->files == NULL) 433 return -ESRCH; 434 435 retval = __close_fd(proc->files, fd); 436 /* can't restart close syscall because file table entry was cleared */ 437 if (unlikely(retval == -ERESTARTSYS || 438 retval == -ERESTARTNOINTR || 439 retval == -ERESTARTNOHAND || 440 retval == -ERESTART_RESTARTBLOCK)) 441 retval = -EINTR; 442 443 return retval; 444 } 445 446 static inline void binder_lock(const char *tag) 447 { 448 trace_binder_lock(tag); 449 mutex_lock(&binder_main_lock); 450 trace_binder_locked(tag); 451 } 452 453 static inline void binder_unlock(const char *tag) 454 { 455 trace_binder_unlock(tag); 456 mutex_unlock(&binder_main_lock); 457 } 458 459 static void binder_set_nice(long nice) 460 { 461 long min_nice; 462 463 if (can_nice(current, nice)) { 464 set_user_nice(current, nice); 465 return; 466 } 467 min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur); 468 binder_debug(BINDER_DEBUG_PRIORITY_CAP, 469 "%d: nice value %ld not allowed use %ld instead\n", 470 current->pid, nice, min_nice); 471 set_user_nice(current, min_nice); 472 if (min_nice <= MAX_NICE) 473 return; 474 binder_user_error("%d RLIMIT_NICE not set\n", current->pid); 475 } 476 477 static size_t binder_buffer_size(struct binder_proc *proc, 478 struct binder_buffer *buffer) 479 { 480 if (list_is_last(&buffer->entry, &proc->buffers)) 481 return proc->buffer + proc->buffer_size - (void *)buffer->data; 482 return (size_t)list_entry(buffer->entry.next, 483 struct binder_buffer, entry) - (size_t)buffer->data; 484 } 485 486 static void binder_insert_free_buffer(struct binder_proc *proc, 487 struct binder_buffer *new_buffer) 488 { 489 struct rb_node **p = &proc->free_buffers.rb_node; 490 struct rb_node *parent = NULL; 491 struct binder_buffer *buffer; 492 size_t buffer_size; 493 size_t new_buffer_size; 494 495 BUG_ON(!new_buffer->free); 496 497 new_buffer_size = binder_buffer_size(proc, new_buffer); 498 499 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 500 "%d: add free buffer, size %zd, at %p\n", 501 proc->pid, new_buffer_size, new_buffer); 502 503 while (*p) { 504 parent = *p; 505 buffer = rb_entry(parent, struct binder_buffer, rb_node); 506 BUG_ON(!buffer->free); 507 508 buffer_size = binder_buffer_size(proc, buffer); 509 510 if (new_buffer_size < buffer_size) 511 p = &parent->rb_left; 512 else 513 p = &parent->rb_right; 514 } 515 rb_link_node(&new_buffer->rb_node, parent, p); 516 rb_insert_color(&new_buffer->rb_node, &proc->free_buffers); 517 } 518 519 static void binder_insert_allocated_buffer(struct binder_proc *proc, 520 struct binder_buffer *new_buffer) 521 { 522 struct rb_node **p = &proc->allocated_buffers.rb_node; 523 struct rb_node *parent = NULL; 524 struct binder_buffer *buffer; 525 526 BUG_ON(new_buffer->free); 527 528 while (*p) { 529 parent = *p; 530 buffer = rb_entry(parent, struct binder_buffer, rb_node); 531 BUG_ON(buffer->free); 532 533 if (new_buffer < buffer) 534 p = &parent->rb_left; 535 else if (new_buffer > buffer) 536 p = &parent->rb_right; 537 else 538 BUG(); 539 } 540 rb_link_node(&new_buffer->rb_node, parent, p); 541 rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers); 542 } 543 544 static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc, 545 uintptr_t user_ptr) 546 { 547 struct rb_node *n = proc->allocated_buffers.rb_node; 548 struct binder_buffer *buffer; 549 struct binder_buffer *kern_ptr; 550 551 kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset 552 - offsetof(struct binder_buffer, data)); 553 554 while (n) { 555 buffer = rb_entry(n, struct binder_buffer, rb_node); 556 BUG_ON(buffer->free); 557 558 if (kern_ptr < buffer) 559 n = n->rb_left; 560 else if (kern_ptr > buffer) 561 n = n->rb_right; 562 else 563 return buffer; 564 } 565 return NULL; 566 } 567 568 static int binder_update_page_range(struct binder_proc *proc, int allocate, 569 void *start, void *end, 570 struct vm_area_struct *vma) 571 { 572 void *page_addr; 573 unsigned long user_page_addr; 574 struct page **page; 575 struct mm_struct *mm; 576 577 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 578 "%d: %s pages %p-%p\n", proc->pid, 579 allocate ? "allocate" : "free", start, end); 580 581 if (end <= start) 582 return 0; 583 584 trace_binder_update_page_range(proc, allocate, start, end); 585 586 if (vma) 587 mm = NULL; 588 else 589 mm = get_task_mm(proc->tsk); 590 591 if (mm) { 592 down_write(&mm->mmap_sem); 593 vma = proc->vma; 594 if (vma && mm != proc->vma_vm_mm) { 595 pr_err("%d: vma mm and task mm mismatch\n", 596 proc->pid); 597 vma = NULL; 598 } 599 } 600 601 if (allocate == 0) 602 goto free_range; 603 604 if (vma == NULL) { 605 pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n", 606 proc->pid); 607 goto err_no_vma; 608 } 609 610 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { 611 int ret; 612 613 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; 614 615 BUG_ON(*page); 616 *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); 617 if (*page == NULL) { 618 pr_err("%d: binder_alloc_buf failed for page at %p\n", 619 proc->pid, page_addr); 620 goto err_alloc_page_failed; 621 } 622 ret = map_kernel_range_noflush((unsigned long)page_addr, 623 PAGE_SIZE, PAGE_KERNEL, page); 624 flush_cache_vmap((unsigned long)page_addr, 625 (unsigned long)page_addr + PAGE_SIZE); 626 if (ret != 1) { 627 pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n", 628 proc->pid, page_addr); 629 goto err_map_kernel_failed; 630 } 631 user_page_addr = 632 (uintptr_t)page_addr + proc->user_buffer_offset; 633 ret = vm_insert_page(vma, user_page_addr, page[0]); 634 if (ret) { 635 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", 636 proc->pid, user_page_addr); 637 goto err_vm_insert_page_failed; 638 } 639 /* vm_insert_page does not seem to increment the refcount */ 640 } 641 if (mm) { 642 up_write(&mm->mmap_sem); 643 mmput(mm); 644 } 645 return 0; 646 647 free_range: 648 for (page_addr = end - PAGE_SIZE; page_addr >= start; 649 page_addr -= PAGE_SIZE) { 650 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; 651 if (vma) 652 zap_page_range(vma, (uintptr_t)page_addr + 653 proc->user_buffer_offset, PAGE_SIZE, NULL); 654 err_vm_insert_page_failed: 655 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); 656 err_map_kernel_failed: 657 __free_page(*page); 658 *page = NULL; 659 err_alloc_page_failed: 660 ; 661 } 662 err_no_vma: 663 if (mm) { 664 up_write(&mm->mmap_sem); 665 mmput(mm); 666 } 667 return -ENOMEM; 668 } 669 670 static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc, 671 size_t data_size, 672 size_t offsets_size, int is_async) 673 { 674 struct rb_node *n = proc->free_buffers.rb_node; 675 struct binder_buffer *buffer; 676 size_t buffer_size; 677 struct rb_node *best_fit = NULL; 678 void *has_page_addr; 679 void *end_page_addr; 680 size_t size; 681 682 if (proc->vma == NULL) { 683 pr_err("%d: binder_alloc_buf, no vma\n", 684 proc->pid); 685 return NULL; 686 } 687 688 size = ALIGN(data_size, sizeof(void *)) + 689 ALIGN(offsets_size, sizeof(void *)); 690 691 if (size < data_size || size < offsets_size) { 692 binder_user_error("%d: got transaction with invalid size %zd-%zd\n", 693 proc->pid, data_size, offsets_size); 694 return NULL; 695 } 696 697 if (is_async && 698 proc->free_async_space < size + sizeof(struct binder_buffer)) { 699 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 700 "%d: binder_alloc_buf size %zd failed, no async space left\n", 701 proc->pid, size); 702 return NULL; 703 } 704 705 while (n) { 706 buffer = rb_entry(n, struct binder_buffer, rb_node); 707 BUG_ON(!buffer->free); 708 buffer_size = binder_buffer_size(proc, buffer); 709 710 if (size < buffer_size) { 711 best_fit = n; 712 n = n->rb_left; 713 } else if (size > buffer_size) 714 n = n->rb_right; 715 else { 716 best_fit = n; 717 break; 718 } 719 } 720 if (best_fit == NULL) { 721 pr_err("%d: binder_alloc_buf size %zd failed, no address space\n", 722 proc->pid, size); 723 return NULL; 724 } 725 if (n == NULL) { 726 buffer = rb_entry(best_fit, struct binder_buffer, rb_node); 727 buffer_size = binder_buffer_size(proc, buffer); 728 } 729 730 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 731 "%d: binder_alloc_buf size %zd got buffer %p size %zd\n", 732 proc->pid, size, buffer, buffer_size); 733 734 has_page_addr = 735 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); 736 if (n == NULL) { 737 if (size + sizeof(struct binder_buffer) + 4 >= buffer_size) 738 buffer_size = size; /* no room for other buffers */ 739 else 740 buffer_size = size + sizeof(struct binder_buffer); 741 } 742 end_page_addr = 743 (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size); 744 if (end_page_addr > has_page_addr) 745 end_page_addr = has_page_addr; 746 if (binder_update_page_range(proc, 1, 747 (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL)) 748 return NULL; 749 750 rb_erase(best_fit, &proc->free_buffers); 751 buffer->free = 0; 752 binder_insert_allocated_buffer(proc, buffer); 753 if (buffer_size != size) { 754 struct binder_buffer *new_buffer = (void *)buffer->data + size; 755 756 list_add(&new_buffer->entry, &buffer->entry); 757 new_buffer->free = 1; 758 binder_insert_free_buffer(proc, new_buffer); 759 } 760 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 761 "%d: binder_alloc_buf size %zd got %p\n", 762 proc->pid, size, buffer); 763 buffer->data_size = data_size; 764 buffer->offsets_size = offsets_size; 765 buffer->async_transaction = is_async; 766 if (is_async) { 767 proc->free_async_space -= size + sizeof(struct binder_buffer); 768 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, 769 "%d: binder_alloc_buf size %zd async free %zd\n", 770 proc->pid, size, proc->free_async_space); 771 } 772 773 return buffer; 774 } 775 776 static void *buffer_start_page(struct binder_buffer *buffer) 777 { 778 return (void *)((uintptr_t)buffer & PAGE_MASK); 779 } 780 781 static void *buffer_end_page(struct binder_buffer *buffer) 782 { 783 return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK); 784 } 785 786 static void binder_delete_free_buffer(struct binder_proc *proc, 787 struct binder_buffer *buffer) 788 { 789 struct binder_buffer *prev, *next = NULL; 790 int free_page_end = 1; 791 int free_page_start = 1; 792 793 BUG_ON(proc->buffers.next == &buffer->entry); 794 prev = list_entry(buffer->entry.prev, struct binder_buffer, entry); 795 BUG_ON(!prev->free); 796 if (buffer_end_page(prev) == buffer_start_page(buffer)) { 797 free_page_start = 0; 798 if (buffer_end_page(prev) == buffer_end_page(buffer)) 799 free_page_end = 0; 800 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 801 "%d: merge free, buffer %p share page with %p\n", 802 proc->pid, buffer, prev); 803 } 804 805 if (!list_is_last(&buffer->entry, &proc->buffers)) { 806 next = list_entry(buffer->entry.next, 807 struct binder_buffer, entry); 808 if (buffer_start_page(next) == buffer_end_page(buffer)) { 809 free_page_end = 0; 810 if (buffer_start_page(next) == 811 buffer_start_page(buffer)) 812 free_page_start = 0; 813 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 814 "%d: merge free, buffer %p share page with %p\n", 815 proc->pid, buffer, prev); 816 } 817 } 818 list_del(&buffer->entry); 819 if (free_page_start || free_page_end) { 820 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 821 "%d: merge free, buffer %p do not share page%s%s with %p or %p\n", 822 proc->pid, buffer, free_page_start ? "" : " end", 823 free_page_end ? "" : " start", prev, next); 824 binder_update_page_range(proc, 0, free_page_start ? 825 buffer_start_page(buffer) : buffer_end_page(buffer), 826 (free_page_end ? buffer_end_page(buffer) : 827 buffer_start_page(buffer)) + PAGE_SIZE, NULL); 828 } 829 } 830 831 static void binder_free_buf(struct binder_proc *proc, 832 struct binder_buffer *buffer) 833 { 834 size_t size, buffer_size; 835 836 buffer_size = binder_buffer_size(proc, buffer); 837 838 size = ALIGN(buffer->data_size, sizeof(void *)) + 839 ALIGN(buffer->offsets_size, sizeof(void *)); 840 841 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 842 "%d: binder_free_buf %p size %zd buffer_size %zd\n", 843 proc->pid, buffer, size, buffer_size); 844 845 BUG_ON(buffer->free); 846 BUG_ON(size > buffer_size); 847 BUG_ON(buffer->transaction != NULL); 848 BUG_ON((void *)buffer < proc->buffer); 849 BUG_ON((void *)buffer > proc->buffer + proc->buffer_size); 850 851 if (buffer->async_transaction) { 852 proc->free_async_space += size + sizeof(struct binder_buffer); 853 854 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, 855 "%d: binder_free_buf size %zd async free %zd\n", 856 proc->pid, size, proc->free_async_space); 857 } 858 859 binder_update_page_range(proc, 0, 860 (void *)PAGE_ALIGN((uintptr_t)buffer->data), 861 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK), 862 NULL); 863 rb_erase(&buffer->rb_node, &proc->allocated_buffers); 864 buffer->free = 1; 865 if (!list_is_last(&buffer->entry, &proc->buffers)) { 866 struct binder_buffer *next = list_entry(buffer->entry.next, 867 struct binder_buffer, entry); 868 869 if (next->free) { 870 rb_erase(&next->rb_node, &proc->free_buffers); 871 binder_delete_free_buffer(proc, next); 872 } 873 } 874 if (proc->buffers.next != &buffer->entry) { 875 struct binder_buffer *prev = list_entry(buffer->entry.prev, 876 struct binder_buffer, entry); 877 878 if (prev->free) { 879 binder_delete_free_buffer(proc, buffer); 880 rb_erase(&prev->rb_node, &proc->free_buffers); 881 buffer = prev; 882 } 883 } 884 binder_insert_free_buffer(proc, buffer); 885 } 886 887 static struct binder_node *binder_get_node(struct binder_proc *proc, 888 binder_uintptr_t ptr) 889 { 890 struct rb_node *n = proc->nodes.rb_node; 891 struct binder_node *node; 892 893 while (n) { 894 node = rb_entry(n, struct binder_node, rb_node); 895 896 if (ptr < node->ptr) 897 n = n->rb_left; 898 else if (ptr > node->ptr) 899 n = n->rb_right; 900 else 901 return node; 902 } 903 return NULL; 904 } 905 906 static struct binder_node *binder_new_node(struct binder_proc *proc, 907 binder_uintptr_t ptr, 908 binder_uintptr_t cookie) 909 { 910 struct rb_node **p = &proc->nodes.rb_node; 911 struct rb_node *parent = NULL; 912 struct binder_node *node; 913 914 while (*p) { 915 parent = *p; 916 node = rb_entry(parent, struct binder_node, rb_node); 917 918 if (ptr < node->ptr) 919 p = &(*p)->rb_left; 920 else if (ptr > node->ptr) 921 p = &(*p)->rb_right; 922 else 923 return NULL; 924 } 925 926 node = kzalloc(sizeof(*node), GFP_KERNEL); 927 if (node == NULL) 928 return NULL; 929 binder_stats_created(BINDER_STAT_NODE); 930 rb_link_node(&node->rb_node, parent, p); 931 rb_insert_color(&node->rb_node, &proc->nodes); 932 node->debug_id = ++binder_last_id; 933 node->proc = proc; 934 node->ptr = ptr; 935 node->cookie = cookie; 936 node->work.type = BINDER_WORK_NODE; 937 INIT_LIST_HEAD(&node->work.entry); 938 INIT_LIST_HEAD(&node->async_todo); 939 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 940 "%d:%d node %d u%016llx c%016llx created\n", 941 proc->pid, current->pid, node->debug_id, 942 (u64)node->ptr, (u64)node->cookie); 943 return node; 944 } 945 946 static int binder_inc_node(struct binder_node *node, int strong, int internal, 947 struct list_head *target_list) 948 { 949 if (strong) { 950 if (internal) { 951 if (target_list == NULL && 952 node->internal_strong_refs == 0 && 953 !(node->proc && 954 node == node->proc->context->binder_context_mgr_node && 955 node->has_strong_ref)) { 956 pr_err("invalid inc strong node for %d\n", 957 node->debug_id); 958 return -EINVAL; 959 } 960 node->internal_strong_refs++; 961 } else 962 node->local_strong_refs++; 963 if (!node->has_strong_ref && target_list) { 964 list_del_init(&node->work.entry); 965 list_add_tail(&node->work.entry, target_list); 966 } 967 } else { 968 if (!internal) 969 node->local_weak_refs++; 970 if (!node->has_weak_ref && list_empty(&node->work.entry)) { 971 if (target_list == NULL) { 972 pr_err("invalid inc weak node for %d\n", 973 node->debug_id); 974 return -EINVAL; 975 } 976 list_add_tail(&node->work.entry, target_list); 977 } 978 } 979 return 0; 980 } 981 982 static int binder_dec_node(struct binder_node *node, int strong, int internal) 983 { 984 if (strong) { 985 if (internal) 986 node->internal_strong_refs--; 987 else 988 node->local_strong_refs--; 989 if (node->local_strong_refs || node->internal_strong_refs) 990 return 0; 991 } else { 992 if (!internal) 993 node->local_weak_refs--; 994 if (node->local_weak_refs || !hlist_empty(&node->refs)) 995 return 0; 996 } 997 if (node->proc && (node->has_strong_ref || node->has_weak_ref)) { 998 if (list_empty(&node->work.entry)) { 999 list_add_tail(&node->work.entry, &node->proc->todo); 1000 wake_up_interruptible(&node->proc->wait); 1001 } 1002 } else { 1003 if (hlist_empty(&node->refs) && !node->local_strong_refs && 1004 !node->local_weak_refs) { 1005 list_del_init(&node->work.entry); 1006 if (node->proc) { 1007 rb_erase(&node->rb_node, &node->proc->nodes); 1008 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1009 "refless node %d deleted\n", 1010 node->debug_id); 1011 } else { 1012 hlist_del(&node->dead_node); 1013 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1014 "dead node %d deleted\n", 1015 node->debug_id); 1016 } 1017 kfree(node); 1018 binder_stats_deleted(BINDER_STAT_NODE); 1019 } 1020 } 1021 1022 return 0; 1023 } 1024 1025 1026 static struct binder_ref *binder_get_ref(struct binder_proc *proc, 1027 u32 desc, bool need_strong_ref) 1028 { 1029 struct rb_node *n = proc->refs_by_desc.rb_node; 1030 struct binder_ref *ref; 1031 1032 while (n) { 1033 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1034 1035 if (desc < ref->desc) { 1036 n = n->rb_left; 1037 } else if (desc > ref->desc) { 1038 n = n->rb_right; 1039 } else if (need_strong_ref && !ref->strong) { 1040 binder_user_error("tried to use weak ref as strong ref\n"); 1041 return NULL; 1042 } else { 1043 return ref; 1044 } 1045 } 1046 return NULL; 1047 } 1048 1049 static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc, 1050 struct binder_node *node) 1051 { 1052 struct rb_node *n; 1053 struct rb_node **p = &proc->refs_by_node.rb_node; 1054 struct rb_node *parent = NULL; 1055 struct binder_ref *ref, *new_ref; 1056 struct binder_context *context = proc->context; 1057 1058 while (*p) { 1059 parent = *p; 1060 ref = rb_entry(parent, struct binder_ref, rb_node_node); 1061 1062 if (node < ref->node) 1063 p = &(*p)->rb_left; 1064 else if (node > ref->node) 1065 p = &(*p)->rb_right; 1066 else 1067 return ref; 1068 } 1069 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); 1070 if (new_ref == NULL) 1071 return NULL; 1072 binder_stats_created(BINDER_STAT_REF); 1073 new_ref->debug_id = ++binder_last_id; 1074 new_ref->proc = proc; 1075 new_ref->node = node; 1076 rb_link_node(&new_ref->rb_node_node, parent, p); 1077 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); 1078 1079 new_ref->desc = (node == context->binder_context_mgr_node) ? 0 : 1; 1080 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 1081 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1082 if (ref->desc > new_ref->desc) 1083 break; 1084 new_ref->desc = ref->desc + 1; 1085 } 1086 1087 p = &proc->refs_by_desc.rb_node; 1088 while (*p) { 1089 parent = *p; 1090 ref = rb_entry(parent, struct binder_ref, rb_node_desc); 1091 1092 if (new_ref->desc < ref->desc) 1093 p = &(*p)->rb_left; 1094 else if (new_ref->desc > ref->desc) 1095 p = &(*p)->rb_right; 1096 else 1097 BUG(); 1098 } 1099 rb_link_node(&new_ref->rb_node_desc, parent, p); 1100 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc); 1101 if (node) { 1102 hlist_add_head(&new_ref->node_entry, &node->refs); 1103 1104 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1105 "%d new ref %d desc %d for node %d\n", 1106 proc->pid, new_ref->debug_id, new_ref->desc, 1107 node->debug_id); 1108 } else { 1109 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1110 "%d new ref %d desc %d for dead node\n", 1111 proc->pid, new_ref->debug_id, new_ref->desc); 1112 } 1113 return new_ref; 1114 } 1115 1116 static void binder_delete_ref(struct binder_ref *ref) 1117 { 1118 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1119 "%d delete ref %d desc %d for node %d\n", 1120 ref->proc->pid, ref->debug_id, ref->desc, 1121 ref->node->debug_id); 1122 1123 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); 1124 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); 1125 if (ref->strong) 1126 binder_dec_node(ref->node, 1, 1); 1127 hlist_del(&ref->node_entry); 1128 binder_dec_node(ref->node, 0, 1); 1129 if (ref->death) { 1130 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1131 "%d delete ref %d desc %d has death notification\n", 1132 ref->proc->pid, ref->debug_id, ref->desc); 1133 list_del(&ref->death->work.entry); 1134 kfree(ref->death); 1135 binder_stats_deleted(BINDER_STAT_DEATH); 1136 } 1137 kfree(ref); 1138 binder_stats_deleted(BINDER_STAT_REF); 1139 } 1140 1141 static int binder_inc_ref(struct binder_ref *ref, int strong, 1142 struct list_head *target_list) 1143 { 1144 int ret; 1145 1146 if (strong) { 1147 if (ref->strong == 0) { 1148 ret = binder_inc_node(ref->node, 1, 1, target_list); 1149 if (ret) 1150 return ret; 1151 } 1152 ref->strong++; 1153 } else { 1154 if (ref->weak == 0) { 1155 ret = binder_inc_node(ref->node, 0, 1, target_list); 1156 if (ret) 1157 return ret; 1158 } 1159 ref->weak++; 1160 } 1161 return 0; 1162 } 1163 1164 1165 static int binder_dec_ref(struct binder_ref *ref, int strong) 1166 { 1167 if (strong) { 1168 if (ref->strong == 0) { 1169 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n", 1170 ref->proc->pid, ref->debug_id, 1171 ref->desc, ref->strong, ref->weak); 1172 return -EINVAL; 1173 } 1174 ref->strong--; 1175 if (ref->strong == 0) { 1176 int ret; 1177 1178 ret = binder_dec_node(ref->node, strong, 1); 1179 if (ret) 1180 return ret; 1181 } 1182 } else { 1183 if (ref->weak == 0) { 1184 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n", 1185 ref->proc->pid, ref->debug_id, 1186 ref->desc, ref->strong, ref->weak); 1187 return -EINVAL; 1188 } 1189 ref->weak--; 1190 } 1191 if (ref->strong == 0 && ref->weak == 0) 1192 binder_delete_ref(ref); 1193 return 0; 1194 } 1195 1196 static void binder_pop_transaction(struct binder_thread *target_thread, 1197 struct binder_transaction *t) 1198 { 1199 if (target_thread) { 1200 BUG_ON(target_thread->transaction_stack != t); 1201 BUG_ON(target_thread->transaction_stack->from != target_thread); 1202 target_thread->transaction_stack = 1203 target_thread->transaction_stack->from_parent; 1204 t->from = NULL; 1205 } 1206 t->need_reply = 0; 1207 if (t->buffer) 1208 t->buffer->transaction = NULL; 1209 kfree(t); 1210 binder_stats_deleted(BINDER_STAT_TRANSACTION); 1211 } 1212 1213 static void binder_send_failed_reply(struct binder_transaction *t, 1214 uint32_t error_code) 1215 { 1216 struct binder_thread *target_thread; 1217 struct binder_transaction *next; 1218 1219 BUG_ON(t->flags & TF_ONE_WAY); 1220 while (1) { 1221 target_thread = t->from; 1222 if (target_thread) { 1223 if (target_thread->return_error != BR_OK && 1224 target_thread->return_error2 == BR_OK) { 1225 target_thread->return_error2 = 1226 target_thread->return_error; 1227 target_thread->return_error = BR_OK; 1228 } 1229 if (target_thread->return_error == BR_OK) { 1230 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1231 "send failed reply for transaction %d to %d:%d\n", 1232 t->debug_id, 1233 target_thread->proc->pid, 1234 target_thread->pid); 1235 1236 binder_pop_transaction(target_thread, t); 1237 target_thread->return_error = error_code; 1238 wake_up_interruptible(&target_thread->wait); 1239 } else { 1240 pr_err("reply failed, target thread, %d:%d, has error code %d already\n", 1241 target_thread->proc->pid, 1242 target_thread->pid, 1243 target_thread->return_error); 1244 } 1245 return; 1246 } 1247 next = t->from_parent; 1248 1249 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1250 "send failed reply for transaction %d, target dead\n", 1251 t->debug_id); 1252 1253 binder_pop_transaction(target_thread, t); 1254 if (next == NULL) { 1255 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1256 "reply failed, no target thread at root\n"); 1257 return; 1258 } 1259 t = next; 1260 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1261 "reply failed, no target thread -- retry %d\n", 1262 t->debug_id); 1263 } 1264 } 1265 1266 /** 1267 * binder_validate_object() - checks for a valid metadata object in a buffer. 1268 * @buffer: binder_buffer that we're parsing. 1269 * @offset: offset in the buffer at which to validate an object. 1270 * 1271 * Return: If there's a valid metadata object at @offset in @buffer, the 1272 * size of that object. Otherwise, it returns zero. 1273 */ 1274 static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset) 1275 { 1276 /* Check if we can read a header first */ 1277 struct binder_object_header *hdr; 1278 size_t object_size = 0; 1279 1280 if (offset > buffer->data_size - sizeof(*hdr) || 1281 buffer->data_size < sizeof(*hdr) || 1282 !IS_ALIGNED(offset, sizeof(u32))) 1283 return 0; 1284 1285 /* Ok, now see if we can read a complete object. */ 1286 hdr = (struct binder_object_header *)(buffer->data + offset); 1287 switch (hdr->type) { 1288 case BINDER_TYPE_BINDER: 1289 case BINDER_TYPE_WEAK_BINDER: 1290 case BINDER_TYPE_HANDLE: 1291 case BINDER_TYPE_WEAK_HANDLE: 1292 object_size = sizeof(struct flat_binder_object); 1293 break; 1294 case BINDER_TYPE_FD: 1295 object_size = sizeof(struct binder_fd_object); 1296 break; 1297 default: 1298 return 0; 1299 } 1300 if (offset <= buffer->data_size - object_size && 1301 buffer->data_size >= object_size) 1302 return object_size; 1303 else 1304 return 0; 1305 } 1306 1307 static void binder_transaction_buffer_release(struct binder_proc *proc, 1308 struct binder_buffer *buffer, 1309 binder_size_t *failed_at) 1310 { 1311 binder_size_t *offp, *off_end; 1312 int debug_id = buffer->debug_id; 1313 1314 binder_debug(BINDER_DEBUG_TRANSACTION, 1315 "%d buffer release %d, size %zd-%zd, failed at %p\n", 1316 proc->pid, buffer->debug_id, 1317 buffer->data_size, buffer->offsets_size, failed_at); 1318 1319 if (buffer->target_node) 1320 binder_dec_node(buffer->target_node, 1, 0); 1321 1322 offp = (binder_size_t *)(buffer->data + 1323 ALIGN(buffer->data_size, sizeof(void *))); 1324 if (failed_at) 1325 off_end = failed_at; 1326 else 1327 off_end = (void *)offp + buffer->offsets_size; 1328 for (; offp < off_end; offp++) { 1329 struct binder_object_header *hdr; 1330 size_t object_size = binder_validate_object(buffer, *offp); 1331 1332 if (object_size == 0) { 1333 pr_err("transaction release %d bad object at offset %lld, size %zd\n", 1334 debug_id, (u64)*offp, buffer->data_size); 1335 continue; 1336 } 1337 hdr = (struct binder_object_header *)(buffer->data + *offp); 1338 switch (hdr->type) { 1339 case BINDER_TYPE_BINDER: 1340 case BINDER_TYPE_WEAK_BINDER: { 1341 struct flat_binder_object *fp; 1342 struct binder_node *node; 1343 1344 fp = to_flat_binder_object(hdr); 1345 node = binder_get_node(proc, fp->binder); 1346 if (node == NULL) { 1347 pr_err("transaction release %d bad node %016llx\n", 1348 debug_id, (u64)fp->binder); 1349 break; 1350 } 1351 binder_debug(BINDER_DEBUG_TRANSACTION, 1352 " node %d u%016llx\n", 1353 node->debug_id, (u64)node->ptr); 1354 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER, 1355 0); 1356 } break; 1357 case BINDER_TYPE_HANDLE: 1358 case BINDER_TYPE_WEAK_HANDLE: { 1359 struct flat_binder_object *fp; 1360 struct binder_ref *ref; 1361 1362 fp = to_flat_binder_object(hdr); 1363 ref = binder_get_ref(proc, fp->handle, 1364 hdr->type == BINDER_TYPE_HANDLE); 1365 if (ref == NULL) { 1366 pr_err("transaction release %d bad handle %d\n", 1367 debug_id, fp->handle); 1368 break; 1369 } 1370 binder_debug(BINDER_DEBUG_TRANSACTION, 1371 " ref %d desc %d (node %d)\n", 1372 ref->debug_id, ref->desc, ref->node->debug_id); 1373 binder_dec_ref(ref, hdr->type == BINDER_TYPE_HANDLE); 1374 } break; 1375 1376 case BINDER_TYPE_FD: { 1377 struct binder_fd_object *fp = to_binder_fd_object(hdr); 1378 1379 binder_debug(BINDER_DEBUG_TRANSACTION, 1380 " fd %d\n", fp->fd); 1381 if (failed_at) 1382 task_close_fd(proc, fp->fd); 1383 } break; 1384 1385 default: 1386 pr_err("transaction release %d bad object type %x\n", 1387 debug_id, hdr->type); 1388 break; 1389 } 1390 } 1391 } 1392 1393 static void binder_transaction(struct binder_proc *proc, 1394 struct binder_thread *thread, 1395 struct binder_transaction_data *tr, int reply) 1396 { 1397 struct binder_transaction *t; 1398 struct binder_work *tcomplete; 1399 binder_size_t *offp, *off_end; 1400 binder_size_t off_min; 1401 struct binder_proc *target_proc; 1402 struct binder_thread *target_thread = NULL; 1403 struct binder_node *target_node = NULL; 1404 struct list_head *target_list; 1405 wait_queue_head_t *target_wait; 1406 struct binder_transaction *in_reply_to = NULL; 1407 struct binder_transaction_log_entry *e; 1408 uint32_t return_error; 1409 struct binder_context *context = proc->context; 1410 1411 e = binder_transaction_log_add(&binder_transaction_log); 1412 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); 1413 e->from_proc = proc->pid; 1414 e->from_thread = thread->pid; 1415 e->target_handle = tr->target.handle; 1416 e->data_size = tr->data_size; 1417 e->offsets_size = tr->offsets_size; 1418 e->context_name = proc->context->name; 1419 1420 if (reply) { 1421 in_reply_to = thread->transaction_stack; 1422 if (in_reply_to == NULL) { 1423 binder_user_error("%d:%d got reply transaction with no transaction stack\n", 1424 proc->pid, thread->pid); 1425 return_error = BR_FAILED_REPLY; 1426 goto err_empty_call_stack; 1427 } 1428 binder_set_nice(in_reply_to->saved_priority); 1429 if (in_reply_to->to_thread != thread) { 1430 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n", 1431 proc->pid, thread->pid, in_reply_to->debug_id, 1432 in_reply_to->to_proc ? 1433 in_reply_to->to_proc->pid : 0, 1434 in_reply_to->to_thread ? 1435 in_reply_to->to_thread->pid : 0); 1436 return_error = BR_FAILED_REPLY; 1437 in_reply_to = NULL; 1438 goto err_bad_call_stack; 1439 } 1440 thread->transaction_stack = in_reply_to->to_parent; 1441 target_thread = in_reply_to->from; 1442 if (target_thread == NULL) { 1443 return_error = BR_DEAD_REPLY; 1444 goto err_dead_binder; 1445 } 1446 if (target_thread->transaction_stack != in_reply_to) { 1447 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n", 1448 proc->pid, thread->pid, 1449 target_thread->transaction_stack ? 1450 target_thread->transaction_stack->debug_id : 0, 1451 in_reply_to->debug_id); 1452 return_error = BR_FAILED_REPLY; 1453 in_reply_to = NULL; 1454 target_thread = NULL; 1455 goto err_dead_binder; 1456 } 1457 target_proc = target_thread->proc; 1458 } else { 1459 if (tr->target.handle) { 1460 struct binder_ref *ref; 1461 1462 ref = binder_get_ref(proc, tr->target.handle, true); 1463 if (ref == NULL) { 1464 binder_user_error("%d:%d got transaction to invalid handle\n", 1465 proc->pid, thread->pid); 1466 return_error = BR_FAILED_REPLY; 1467 goto err_invalid_target_handle; 1468 } 1469 target_node = ref->node; 1470 } else { 1471 target_node = context->binder_context_mgr_node; 1472 if (target_node == NULL) { 1473 return_error = BR_DEAD_REPLY; 1474 goto err_no_context_mgr_node; 1475 } 1476 } 1477 e->to_node = target_node->debug_id; 1478 target_proc = target_node->proc; 1479 if (target_proc == NULL) { 1480 return_error = BR_DEAD_REPLY; 1481 goto err_dead_binder; 1482 } 1483 if (security_binder_transaction(proc->tsk, 1484 target_proc->tsk) < 0) { 1485 return_error = BR_FAILED_REPLY; 1486 goto err_invalid_target_handle; 1487 } 1488 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { 1489 struct binder_transaction *tmp; 1490 1491 tmp = thread->transaction_stack; 1492 if (tmp->to_thread != thread) { 1493 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n", 1494 proc->pid, thread->pid, tmp->debug_id, 1495 tmp->to_proc ? tmp->to_proc->pid : 0, 1496 tmp->to_thread ? 1497 tmp->to_thread->pid : 0); 1498 return_error = BR_FAILED_REPLY; 1499 goto err_bad_call_stack; 1500 } 1501 while (tmp) { 1502 if (tmp->from && tmp->from->proc == target_proc) 1503 target_thread = tmp->from; 1504 tmp = tmp->from_parent; 1505 } 1506 } 1507 } 1508 if (target_thread) { 1509 e->to_thread = target_thread->pid; 1510 target_list = &target_thread->todo; 1511 target_wait = &target_thread->wait; 1512 } else { 1513 target_list = &target_proc->todo; 1514 target_wait = &target_proc->wait; 1515 } 1516 e->to_proc = target_proc->pid; 1517 1518 /* TODO: reuse incoming transaction for reply */ 1519 t = kzalloc(sizeof(*t), GFP_KERNEL); 1520 if (t == NULL) { 1521 return_error = BR_FAILED_REPLY; 1522 goto err_alloc_t_failed; 1523 } 1524 binder_stats_created(BINDER_STAT_TRANSACTION); 1525 1526 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); 1527 if (tcomplete == NULL) { 1528 return_error = BR_FAILED_REPLY; 1529 goto err_alloc_tcomplete_failed; 1530 } 1531 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE); 1532 1533 t->debug_id = ++binder_last_id; 1534 e->debug_id = t->debug_id; 1535 1536 if (reply) 1537 binder_debug(BINDER_DEBUG_TRANSACTION, 1538 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld\n", 1539 proc->pid, thread->pid, t->debug_id, 1540 target_proc->pid, target_thread->pid, 1541 (u64)tr->data.ptr.buffer, 1542 (u64)tr->data.ptr.offsets, 1543 (u64)tr->data_size, (u64)tr->offsets_size); 1544 else 1545 binder_debug(BINDER_DEBUG_TRANSACTION, 1546 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld\n", 1547 proc->pid, thread->pid, t->debug_id, 1548 target_proc->pid, target_node->debug_id, 1549 (u64)tr->data.ptr.buffer, 1550 (u64)tr->data.ptr.offsets, 1551 (u64)tr->data_size, (u64)tr->offsets_size); 1552 1553 if (!reply && !(tr->flags & TF_ONE_WAY)) 1554 t->from = thread; 1555 else 1556 t->from = NULL; 1557 t->sender_euid = task_euid(proc->tsk); 1558 t->to_proc = target_proc; 1559 t->to_thread = target_thread; 1560 t->code = tr->code; 1561 t->flags = tr->flags; 1562 t->priority = task_nice(current); 1563 1564 trace_binder_transaction(reply, t, target_node); 1565 1566 t->buffer = binder_alloc_buf(target_proc, tr->data_size, 1567 tr->offsets_size, !reply && (t->flags & TF_ONE_WAY)); 1568 if (t->buffer == NULL) { 1569 return_error = BR_FAILED_REPLY; 1570 goto err_binder_alloc_buf_failed; 1571 } 1572 t->buffer->allow_user_free = 0; 1573 t->buffer->debug_id = t->debug_id; 1574 t->buffer->transaction = t; 1575 t->buffer->target_node = target_node; 1576 trace_binder_transaction_alloc_buf(t->buffer); 1577 if (target_node) 1578 binder_inc_node(target_node, 1, 0, NULL); 1579 1580 offp = (binder_size_t *)(t->buffer->data + 1581 ALIGN(tr->data_size, sizeof(void *))); 1582 1583 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t) 1584 tr->data.ptr.buffer, tr->data_size)) { 1585 binder_user_error("%d:%d got transaction with invalid data ptr\n", 1586 proc->pid, thread->pid); 1587 return_error = BR_FAILED_REPLY; 1588 goto err_copy_data_failed; 1589 } 1590 if (copy_from_user(offp, (const void __user *)(uintptr_t) 1591 tr->data.ptr.offsets, tr->offsets_size)) { 1592 binder_user_error("%d:%d got transaction with invalid offsets ptr\n", 1593 proc->pid, thread->pid); 1594 return_error = BR_FAILED_REPLY; 1595 goto err_copy_data_failed; 1596 } 1597 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) { 1598 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n", 1599 proc->pid, thread->pid, (u64)tr->offsets_size); 1600 return_error = BR_FAILED_REPLY; 1601 goto err_bad_offset; 1602 } 1603 off_end = (void *)offp + tr->offsets_size; 1604 off_min = 0; 1605 for (; offp < off_end; offp++) { 1606 struct binder_object_header *hdr; 1607 size_t object_size = binder_validate_object(t->buffer, *offp); 1608 1609 if (object_size == 0 || *offp < off_min) { 1610 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n", 1611 proc->pid, thread->pid, (u64)*offp, 1612 (u64)off_min, 1613 (u64)t->buffer->data_size); 1614 return_error = BR_FAILED_REPLY; 1615 goto err_bad_offset; 1616 } 1617 1618 hdr = (struct binder_object_header *)(t->buffer->data + *offp); 1619 off_min = *offp + object_size; 1620 switch (hdr->type) { 1621 case BINDER_TYPE_BINDER: 1622 case BINDER_TYPE_WEAK_BINDER: { 1623 struct flat_binder_object *fp; 1624 struct binder_node *node; 1625 struct binder_ref *ref; 1626 1627 fp = to_flat_binder_object(hdr); 1628 node = binder_get_node(proc, fp->binder); 1629 if (node == NULL) { 1630 node = binder_new_node(proc, fp->binder, fp->cookie); 1631 if (node == NULL) { 1632 return_error = BR_FAILED_REPLY; 1633 goto err_binder_new_node_failed; 1634 } 1635 node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK; 1636 node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); 1637 } 1638 if (fp->cookie != node->cookie) { 1639 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n", 1640 proc->pid, thread->pid, 1641 (u64)fp->binder, node->debug_id, 1642 (u64)fp->cookie, (u64)node->cookie); 1643 return_error = BR_FAILED_REPLY; 1644 goto err_binder_get_ref_for_node_failed; 1645 } 1646 if (security_binder_transfer_binder(proc->tsk, 1647 target_proc->tsk)) { 1648 return_error = BR_FAILED_REPLY; 1649 goto err_binder_get_ref_for_node_failed; 1650 } 1651 ref = binder_get_ref_for_node(target_proc, node); 1652 if (ref == NULL) { 1653 return_error = BR_FAILED_REPLY; 1654 goto err_binder_get_ref_for_node_failed; 1655 } 1656 if (hdr->type == BINDER_TYPE_BINDER) 1657 hdr->type = BINDER_TYPE_HANDLE; 1658 else 1659 hdr->type = BINDER_TYPE_WEAK_HANDLE; 1660 fp->binder = 0; 1661 fp->handle = ref->desc; 1662 fp->cookie = 0; 1663 binder_inc_ref(ref, hdr->type == BINDER_TYPE_HANDLE, 1664 &thread->todo); 1665 1666 trace_binder_transaction_node_to_ref(t, node, ref); 1667 binder_debug(BINDER_DEBUG_TRANSACTION, 1668 " node %d u%016llx -> ref %d desc %d\n", 1669 node->debug_id, (u64)node->ptr, 1670 ref->debug_id, ref->desc); 1671 } break; 1672 case BINDER_TYPE_HANDLE: 1673 case BINDER_TYPE_WEAK_HANDLE: { 1674 struct flat_binder_object *fp; 1675 struct binder_ref *ref; 1676 1677 fp = to_flat_binder_object(hdr); 1678 ref = binder_get_ref(proc, fp->handle, 1679 hdr->type == BINDER_TYPE_HANDLE); 1680 if (ref == NULL) { 1681 binder_user_error("%d:%d got transaction with invalid handle, %d\n", 1682 proc->pid, 1683 thread->pid, fp->handle); 1684 return_error = BR_FAILED_REPLY; 1685 goto err_binder_get_ref_failed; 1686 } 1687 if (security_binder_transfer_binder(proc->tsk, 1688 target_proc->tsk)) { 1689 return_error = BR_FAILED_REPLY; 1690 goto err_binder_get_ref_failed; 1691 } 1692 if (ref->node->proc == target_proc) { 1693 if (hdr->type == BINDER_TYPE_HANDLE) 1694 hdr->type = BINDER_TYPE_BINDER; 1695 else 1696 hdr->type = BINDER_TYPE_WEAK_BINDER; 1697 fp->binder = ref->node->ptr; 1698 fp->cookie = ref->node->cookie; 1699 binder_inc_node(ref->node, 1700 hdr->type == BINDER_TYPE_BINDER, 1701 0, NULL); 1702 trace_binder_transaction_ref_to_node(t, ref); 1703 binder_debug(BINDER_DEBUG_TRANSACTION, 1704 " ref %d desc %d -> node %d u%016llx\n", 1705 ref->debug_id, ref->desc, ref->node->debug_id, 1706 (u64)ref->node->ptr); 1707 } else { 1708 struct binder_ref *new_ref; 1709 1710 new_ref = binder_get_ref_for_node(target_proc, ref->node); 1711 if (new_ref == NULL) { 1712 return_error = BR_FAILED_REPLY; 1713 goto err_binder_get_ref_for_node_failed; 1714 } 1715 fp->binder = 0; 1716 fp->handle = new_ref->desc; 1717 fp->cookie = 0; 1718 binder_inc_ref(new_ref, 1719 hdr->type == BINDER_TYPE_HANDLE, 1720 NULL); 1721 trace_binder_transaction_ref_to_ref(t, ref, 1722 new_ref); 1723 binder_debug(BINDER_DEBUG_TRANSACTION, 1724 " ref %d desc %d -> ref %d desc %d (node %d)\n", 1725 ref->debug_id, ref->desc, new_ref->debug_id, 1726 new_ref->desc, ref->node->debug_id); 1727 } 1728 } break; 1729 1730 case BINDER_TYPE_FD: { 1731 int target_fd; 1732 struct file *file; 1733 struct binder_fd_object *fp = to_binder_fd_object(hdr); 1734 1735 if (reply) { 1736 if (!(in_reply_to->flags & TF_ACCEPT_FDS)) { 1737 binder_user_error("%d:%d got reply with fd, %d, but target does not allow fds\n", 1738 proc->pid, thread->pid, fp->fd); 1739 return_error = BR_FAILED_REPLY; 1740 goto err_fd_not_allowed; 1741 } 1742 } else if (!target_node->accept_fds) { 1743 binder_user_error("%d:%d got transaction with fd, %d, but target does not allow fds\n", 1744 proc->pid, thread->pid, fp->fd); 1745 return_error = BR_FAILED_REPLY; 1746 goto err_fd_not_allowed; 1747 } 1748 1749 file = fget(fp->fd); 1750 if (file == NULL) { 1751 binder_user_error("%d:%d got transaction with invalid fd, %d\n", 1752 proc->pid, thread->pid, fp->fd); 1753 return_error = BR_FAILED_REPLY; 1754 goto err_fget_failed; 1755 } 1756 if (security_binder_transfer_file(proc->tsk, 1757 target_proc->tsk, 1758 file) < 0) { 1759 fput(file); 1760 return_error = BR_FAILED_REPLY; 1761 goto err_get_unused_fd_failed; 1762 } 1763 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC); 1764 if (target_fd < 0) { 1765 fput(file); 1766 return_error = BR_FAILED_REPLY; 1767 goto err_get_unused_fd_failed; 1768 } 1769 task_fd_install(target_proc, target_fd, file); 1770 trace_binder_transaction_fd(t, fp->fd, target_fd); 1771 binder_debug(BINDER_DEBUG_TRANSACTION, 1772 " fd %d -> %d\n", fp->fd, 1773 target_fd); 1774 /* TODO: fput? */ 1775 fp->pad_binder = 0; 1776 fp->fd = target_fd; 1777 } break; 1778 1779 default: 1780 binder_user_error("%d:%d got transaction with invalid object type, %x\n", 1781 proc->pid, thread->pid, hdr->type); 1782 return_error = BR_FAILED_REPLY; 1783 goto err_bad_object_type; 1784 } 1785 } 1786 if (reply) { 1787 BUG_ON(t->buffer->async_transaction != 0); 1788 binder_pop_transaction(target_thread, in_reply_to); 1789 } else if (!(t->flags & TF_ONE_WAY)) { 1790 BUG_ON(t->buffer->async_transaction != 0); 1791 t->need_reply = 1; 1792 t->from_parent = thread->transaction_stack; 1793 thread->transaction_stack = t; 1794 } else { 1795 BUG_ON(target_node == NULL); 1796 BUG_ON(t->buffer->async_transaction != 1); 1797 if (target_node->has_async_transaction) { 1798 target_list = &target_node->async_todo; 1799 target_wait = NULL; 1800 } else 1801 target_node->has_async_transaction = 1; 1802 } 1803 t->work.type = BINDER_WORK_TRANSACTION; 1804 list_add_tail(&t->work.entry, target_list); 1805 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; 1806 list_add_tail(&tcomplete->entry, &thread->todo); 1807 if (target_wait) 1808 wake_up_interruptible(target_wait); 1809 return; 1810 1811 err_get_unused_fd_failed: 1812 err_fget_failed: 1813 err_fd_not_allowed: 1814 err_binder_get_ref_for_node_failed: 1815 err_binder_get_ref_failed: 1816 err_binder_new_node_failed: 1817 err_bad_object_type: 1818 err_bad_offset: 1819 err_copy_data_failed: 1820 trace_binder_transaction_failed_buffer_release(t->buffer); 1821 binder_transaction_buffer_release(target_proc, t->buffer, offp); 1822 t->buffer->transaction = NULL; 1823 binder_free_buf(target_proc, t->buffer); 1824 err_binder_alloc_buf_failed: 1825 kfree(tcomplete); 1826 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 1827 err_alloc_tcomplete_failed: 1828 kfree(t); 1829 binder_stats_deleted(BINDER_STAT_TRANSACTION); 1830 err_alloc_t_failed: 1831 err_bad_call_stack: 1832 err_empty_call_stack: 1833 err_dead_binder: 1834 err_invalid_target_handle: 1835 err_no_context_mgr_node: 1836 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1837 "%d:%d transaction failed %d, size %lld-%lld\n", 1838 proc->pid, thread->pid, return_error, 1839 (u64)tr->data_size, (u64)tr->offsets_size); 1840 1841 { 1842 struct binder_transaction_log_entry *fe; 1843 1844 fe = binder_transaction_log_add(&binder_transaction_log_failed); 1845 *fe = *e; 1846 } 1847 1848 BUG_ON(thread->return_error != BR_OK); 1849 if (in_reply_to) { 1850 thread->return_error = BR_TRANSACTION_COMPLETE; 1851 binder_send_failed_reply(in_reply_to, return_error); 1852 } else 1853 thread->return_error = return_error; 1854 } 1855 1856 static int binder_thread_write(struct binder_proc *proc, 1857 struct binder_thread *thread, 1858 binder_uintptr_t binder_buffer, size_t size, 1859 binder_size_t *consumed) 1860 { 1861 uint32_t cmd; 1862 struct binder_context *context = proc->context; 1863 void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 1864 void __user *ptr = buffer + *consumed; 1865 void __user *end = buffer + size; 1866 1867 while (ptr < end && thread->return_error == BR_OK) { 1868 if (get_user(cmd, (uint32_t __user *)ptr)) 1869 return -EFAULT; 1870 ptr += sizeof(uint32_t); 1871 trace_binder_command(cmd); 1872 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { 1873 binder_stats.bc[_IOC_NR(cmd)]++; 1874 proc->stats.bc[_IOC_NR(cmd)]++; 1875 thread->stats.bc[_IOC_NR(cmd)]++; 1876 } 1877 switch (cmd) { 1878 case BC_INCREFS: 1879 case BC_ACQUIRE: 1880 case BC_RELEASE: 1881 case BC_DECREFS: { 1882 uint32_t target; 1883 struct binder_ref *ref; 1884 const char *debug_string; 1885 1886 if (get_user(target, (uint32_t __user *)ptr)) 1887 return -EFAULT; 1888 ptr += sizeof(uint32_t); 1889 if (target == 0 && context->binder_context_mgr_node && 1890 (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) { 1891 ref = binder_get_ref_for_node(proc, 1892 context->binder_context_mgr_node); 1893 if (ref->desc != target) { 1894 binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n", 1895 proc->pid, thread->pid, 1896 ref->desc); 1897 } 1898 } else 1899 ref = binder_get_ref(proc, target, 1900 cmd == BC_ACQUIRE || 1901 cmd == BC_RELEASE); 1902 if (ref == NULL) { 1903 binder_user_error("%d:%d refcount change on invalid ref %d\n", 1904 proc->pid, thread->pid, target); 1905 break; 1906 } 1907 switch (cmd) { 1908 case BC_INCREFS: 1909 debug_string = "IncRefs"; 1910 binder_inc_ref(ref, 0, NULL); 1911 break; 1912 case BC_ACQUIRE: 1913 debug_string = "Acquire"; 1914 binder_inc_ref(ref, 1, NULL); 1915 break; 1916 case BC_RELEASE: 1917 debug_string = "Release"; 1918 binder_dec_ref(ref, 1); 1919 break; 1920 case BC_DECREFS: 1921 default: 1922 debug_string = "DecRefs"; 1923 binder_dec_ref(ref, 0); 1924 break; 1925 } 1926 binder_debug(BINDER_DEBUG_USER_REFS, 1927 "%d:%d %s ref %d desc %d s %d w %d for node %d\n", 1928 proc->pid, thread->pid, debug_string, ref->debug_id, 1929 ref->desc, ref->strong, ref->weak, ref->node->debug_id); 1930 break; 1931 } 1932 case BC_INCREFS_DONE: 1933 case BC_ACQUIRE_DONE: { 1934 binder_uintptr_t node_ptr; 1935 binder_uintptr_t cookie; 1936 struct binder_node *node; 1937 1938 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr)) 1939 return -EFAULT; 1940 ptr += sizeof(binder_uintptr_t); 1941 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 1942 return -EFAULT; 1943 ptr += sizeof(binder_uintptr_t); 1944 node = binder_get_node(proc, node_ptr); 1945 if (node == NULL) { 1946 binder_user_error("%d:%d %s u%016llx no match\n", 1947 proc->pid, thread->pid, 1948 cmd == BC_INCREFS_DONE ? 1949 "BC_INCREFS_DONE" : 1950 "BC_ACQUIRE_DONE", 1951 (u64)node_ptr); 1952 break; 1953 } 1954 if (cookie != node->cookie) { 1955 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n", 1956 proc->pid, thread->pid, 1957 cmd == BC_INCREFS_DONE ? 1958 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 1959 (u64)node_ptr, node->debug_id, 1960 (u64)cookie, (u64)node->cookie); 1961 break; 1962 } 1963 if (cmd == BC_ACQUIRE_DONE) { 1964 if (node->pending_strong_ref == 0) { 1965 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n", 1966 proc->pid, thread->pid, 1967 node->debug_id); 1968 break; 1969 } 1970 node->pending_strong_ref = 0; 1971 } else { 1972 if (node->pending_weak_ref == 0) { 1973 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n", 1974 proc->pid, thread->pid, 1975 node->debug_id); 1976 break; 1977 } 1978 node->pending_weak_ref = 0; 1979 } 1980 binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0); 1981 binder_debug(BINDER_DEBUG_USER_REFS, 1982 "%d:%d %s node %d ls %d lw %d\n", 1983 proc->pid, thread->pid, 1984 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 1985 node->debug_id, node->local_strong_refs, node->local_weak_refs); 1986 break; 1987 } 1988 case BC_ATTEMPT_ACQUIRE: 1989 pr_err("BC_ATTEMPT_ACQUIRE not supported\n"); 1990 return -EINVAL; 1991 case BC_ACQUIRE_RESULT: 1992 pr_err("BC_ACQUIRE_RESULT not supported\n"); 1993 return -EINVAL; 1994 1995 case BC_FREE_BUFFER: { 1996 binder_uintptr_t data_ptr; 1997 struct binder_buffer *buffer; 1998 1999 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr)) 2000 return -EFAULT; 2001 ptr += sizeof(binder_uintptr_t); 2002 2003 buffer = binder_buffer_lookup(proc, data_ptr); 2004 if (buffer == NULL) { 2005 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n", 2006 proc->pid, thread->pid, (u64)data_ptr); 2007 break; 2008 } 2009 if (!buffer->allow_user_free) { 2010 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n", 2011 proc->pid, thread->pid, (u64)data_ptr); 2012 break; 2013 } 2014 binder_debug(BINDER_DEBUG_FREE_BUFFER, 2015 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n", 2016 proc->pid, thread->pid, (u64)data_ptr, 2017 buffer->debug_id, 2018 buffer->transaction ? "active" : "finished"); 2019 2020 if (buffer->transaction) { 2021 buffer->transaction->buffer = NULL; 2022 buffer->transaction = NULL; 2023 } 2024 if (buffer->async_transaction && buffer->target_node) { 2025 BUG_ON(!buffer->target_node->has_async_transaction); 2026 if (list_empty(&buffer->target_node->async_todo)) 2027 buffer->target_node->has_async_transaction = 0; 2028 else 2029 list_move_tail(buffer->target_node->async_todo.next, &thread->todo); 2030 } 2031 trace_binder_transaction_buffer_release(buffer); 2032 binder_transaction_buffer_release(proc, buffer, NULL); 2033 binder_free_buf(proc, buffer); 2034 break; 2035 } 2036 2037 case BC_TRANSACTION: 2038 case BC_REPLY: { 2039 struct binder_transaction_data tr; 2040 2041 if (copy_from_user(&tr, ptr, sizeof(tr))) 2042 return -EFAULT; 2043 ptr += sizeof(tr); 2044 binder_transaction(proc, thread, &tr, cmd == BC_REPLY); 2045 break; 2046 } 2047 2048 case BC_REGISTER_LOOPER: 2049 binder_debug(BINDER_DEBUG_THREADS, 2050 "%d:%d BC_REGISTER_LOOPER\n", 2051 proc->pid, thread->pid); 2052 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { 2053 thread->looper |= BINDER_LOOPER_STATE_INVALID; 2054 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n", 2055 proc->pid, thread->pid); 2056 } else if (proc->requested_threads == 0) { 2057 thread->looper |= BINDER_LOOPER_STATE_INVALID; 2058 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n", 2059 proc->pid, thread->pid); 2060 } else { 2061 proc->requested_threads--; 2062 proc->requested_threads_started++; 2063 } 2064 thread->looper |= BINDER_LOOPER_STATE_REGISTERED; 2065 break; 2066 case BC_ENTER_LOOPER: 2067 binder_debug(BINDER_DEBUG_THREADS, 2068 "%d:%d BC_ENTER_LOOPER\n", 2069 proc->pid, thread->pid); 2070 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { 2071 thread->looper |= BINDER_LOOPER_STATE_INVALID; 2072 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n", 2073 proc->pid, thread->pid); 2074 } 2075 thread->looper |= BINDER_LOOPER_STATE_ENTERED; 2076 break; 2077 case BC_EXIT_LOOPER: 2078 binder_debug(BINDER_DEBUG_THREADS, 2079 "%d:%d BC_EXIT_LOOPER\n", 2080 proc->pid, thread->pid); 2081 thread->looper |= BINDER_LOOPER_STATE_EXITED; 2082 break; 2083 2084 case BC_REQUEST_DEATH_NOTIFICATION: 2085 case BC_CLEAR_DEATH_NOTIFICATION: { 2086 uint32_t target; 2087 binder_uintptr_t cookie; 2088 struct binder_ref *ref; 2089 struct binder_ref_death *death; 2090 2091 if (get_user(target, (uint32_t __user *)ptr)) 2092 return -EFAULT; 2093 ptr += sizeof(uint32_t); 2094 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 2095 return -EFAULT; 2096 ptr += sizeof(binder_uintptr_t); 2097 ref = binder_get_ref(proc, target, false); 2098 if (ref == NULL) { 2099 binder_user_error("%d:%d %s invalid ref %d\n", 2100 proc->pid, thread->pid, 2101 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 2102 "BC_REQUEST_DEATH_NOTIFICATION" : 2103 "BC_CLEAR_DEATH_NOTIFICATION", 2104 target); 2105 break; 2106 } 2107 2108 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 2109 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n", 2110 proc->pid, thread->pid, 2111 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 2112 "BC_REQUEST_DEATH_NOTIFICATION" : 2113 "BC_CLEAR_DEATH_NOTIFICATION", 2114 (u64)cookie, ref->debug_id, ref->desc, 2115 ref->strong, ref->weak, ref->node->debug_id); 2116 2117 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { 2118 if (ref->death) { 2119 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n", 2120 proc->pid, thread->pid); 2121 break; 2122 } 2123 death = kzalloc(sizeof(*death), GFP_KERNEL); 2124 if (death == NULL) { 2125 thread->return_error = BR_ERROR; 2126 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 2127 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n", 2128 proc->pid, thread->pid); 2129 break; 2130 } 2131 binder_stats_created(BINDER_STAT_DEATH); 2132 INIT_LIST_HEAD(&death->work.entry); 2133 death->cookie = cookie; 2134 ref->death = death; 2135 if (ref->node->proc == NULL) { 2136 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 2137 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { 2138 list_add_tail(&ref->death->work.entry, &thread->todo); 2139 } else { 2140 list_add_tail(&ref->death->work.entry, &proc->todo); 2141 wake_up_interruptible(&proc->wait); 2142 } 2143 } 2144 } else { 2145 if (ref->death == NULL) { 2146 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n", 2147 proc->pid, thread->pid); 2148 break; 2149 } 2150 death = ref->death; 2151 if (death->cookie != cookie) { 2152 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n", 2153 proc->pid, thread->pid, 2154 (u64)death->cookie, 2155 (u64)cookie); 2156 break; 2157 } 2158 ref->death = NULL; 2159 if (list_empty(&death->work.entry)) { 2160 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 2161 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { 2162 list_add_tail(&death->work.entry, &thread->todo); 2163 } else { 2164 list_add_tail(&death->work.entry, &proc->todo); 2165 wake_up_interruptible(&proc->wait); 2166 } 2167 } else { 2168 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); 2169 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; 2170 } 2171 } 2172 } break; 2173 case BC_DEAD_BINDER_DONE: { 2174 struct binder_work *w; 2175 binder_uintptr_t cookie; 2176 struct binder_ref_death *death = NULL; 2177 2178 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 2179 return -EFAULT; 2180 2181 ptr += sizeof(cookie); 2182 list_for_each_entry(w, &proc->delivered_death, entry) { 2183 struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work); 2184 2185 if (tmp_death->cookie == cookie) { 2186 death = tmp_death; 2187 break; 2188 } 2189 } 2190 binder_debug(BINDER_DEBUG_DEAD_BINDER, 2191 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n", 2192 proc->pid, thread->pid, (u64)cookie, 2193 death); 2194 if (death == NULL) { 2195 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n", 2196 proc->pid, thread->pid, (u64)cookie); 2197 break; 2198 } 2199 2200 list_del_init(&death->work.entry); 2201 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) { 2202 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 2203 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { 2204 list_add_tail(&death->work.entry, &thread->todo); 2205 } else { 2206 list_add_tail(&death->work.entry, &proc->todo); 2207 wake_up_interruptible(&proc->wait); 2208 } 2209 } 2210 } break; 2211 2212 default: 2213 pr_err("%d:%d unknown command %d\n", 2214 proc->pid, thread->pid, cmd); 2215 return -EINVAL; 2216 } 2217 *consumed = ptr - buffer; 2218 } 2219 return 0; 2220 } 2221 2222 static void binder_stat_br(struct binder_proc *proc, 2223 struct binder_thread *thread, uint32_t cmd) 2224 { 2225 trace_binder_return(cmd); 2226 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { 2227 binder_stats.br[_IOC_NR(cmd)]++; 2228 proc->stats.br[_IOC_NR(cmd)]++; 2229 thread->stats.br[_IOC_NR(cmd)]++; 2230 } 2231 } 2232 2233 static int binder_has_proc_work(struct binder_proc *proc, 2234 struct binder_thread *thread) 2235 { 2236 return !list_empty(&proc->todo) || 2237 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); 2238 } 2239 2240 static int binder_has_thread_work(struct binder_thread *thread) 2241 { 2242 return !list_empty(&thread->todo) || thread->return_error != BR_OK || 2243 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); 2244 } 2245 2246 static int binder_thread_read(struct binder_proc *proc, 2247 struct binder_thread *thread, 2248 binder_uintptr_t binder_buffer, size_t size, 2249 binder_size_t *consumed, int non_block) 2250 { 2251 void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 2252 void __user *ptr = buffer + *consumed; 2253 void __user *end = buffer + size; 2254 2255 int ret = 0; 2256 int wait_for_proc_work; 2257 2258 if (*consumed == 0) { 2259 if (put_user(BR_NOOP, (uint32_t __user *)ptr)) 2260 return -EFAULT; 2261 ptr += sizeof(uint32_t); 2262 } 2263 2264 retry: 2265 wait_for_proc_work = thread->transaction_stack == NULL && 2266 list_empty(&thread->todo); 2267 2268 if (thread->return_error != BR_OK && ptr < end) { 2269 if (thread->return_error2 != BR_OK) { 2270 if (put_user(thread->return_error2, (uint32_t __user *)ptr)) 2271 return -EFAULT; 2272 ptr += sizeof(uint32_t); 2273 binder_stat_br(proc, thread, thread->return_error2); 2274 if (ptr == end) 2275 goto done; 2276 thread->return_error2 = BR_OK; 2277 } 2278 if (put_user(thread->return_error, (uint32_t __user *)ptr)) 2279 return -EFAULT; 2280 ptr += sizeof(uint32_t); 2281 binder_stat_br(proc, thread, thread->return_error); 2282 thread->return_error = BR_OK; 2283 goto done; 2284 } 2285 2286 2287 thread->looper |= BINDER_LOOPER_STATE_WAITING; 2288 if (wait_for_proc_work) 2289 proc->ready_threads++; 2290 2291 binder_unlock(__func__); 2292 2293 trace_binder_wait_for_work(wait_for_proc_work, 2294 !!thread->transaction_stack, 2295 !list_empty(&thread->todo)); 2296 if (wait_for_proc_work) { 2297 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 2298 BINDER_LOOPER_STATE_ENTERED))) { 2299 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n", 2300 proc->pid, thread->pid, thread->looper); 2301 wait_event_interruptible(binder_user_error_wait, 2302 binder_stop_on_user_error < 2); 2303 } 2304 binder_set_nice(proc->default_priority); 2305 if (non_block) { 2306 if (!binder_has_proc_work(proc, thread)) 2307 ret = -EAGAIN; 2308 } else 2309 ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread)); 2310 } else { 2311 if (non_block) { 2312 if (!binder_has_thread_work(thread)) 2313 ret = -EAGAIN; 2314 } else 2315 ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread)); 2316 } 2317 2318 binder_lock(__func__); 2319 2320 if (wait_for_proc_work) 2321 proc->ready_threads--; 2322 thread->looper &= ~BINDER_LOOPER_STATE_WAITING; 2323 2324 if (ret) 2325 return ret; 2326 2327 while (1) { 2328 uint32_t cmd; 2329 struct binder_transaction_data tr; 2330 struct binder_work *w; 2331 struct binder_transaction *t = NULL; 2332 2333 if (!list_empty(&thread->todo)) { 2334 w = list_first_entry(&thread->todo, struct binder_work, 2335 entry); 2336 } else if (!list_empty(&proc->todo) && wait_for_proc_work) { 2337 w = list_first_entry(&proc->todo, struct binder_work, 2338 entry); 2339 } else { 2340 /* no data added */ 2341 if (ptr - buffer == 4 && 2342 !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) 2343 goto retry; 2344 break; 2345 } 2346 2347 if (end - ptr < sizeof(tr) + 4) 2348 break; 2349 2350 switch (w->type) { 2351 case BINDER_WORK_TRANSACTION: { 2352 t = container_of(w, struct binder_transaction, work); 2353 } break; 2354 case BINDER_WORK_TRANSACTION_COMPLETE: { 2355 cmd = BR_TRANSACTION_COMPLETE; 2356 if (put_user(cmd, (uint32_t __user *)ptr)) 2357 return -EFAULT; 2358 ptr += sizeof(uint32_t); 2359 2360 binder_stat_br(proc, thread, cmd); 2361 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, 2362 "%d:%d BR_TRANSACTION_COMPLETE\n", 2363 proc->pid, thread->pid); 2364 2365 list_del(&w->entry); 2366 kfree(w); 2367 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 2368 } break; 2369 case BINDER_WORK_NODE: { 2370 struct binder_node *node = container_of(w, struct binder_node, work); 2371 uint32_t cmd = BR_NOOP; 2372 const char *cmd_name; 2373 int strong = node->internal_strong_refs || node->local_strong_refs; 2374 int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong; 2375 2376 if (weak && !node->has_weak_ref) { 2377 cmd = BR_INCREFS; 2378 cmd_name = "BR_INCREFS"; 2379 node->has_weak_ref = 1; 2380 node->pending_weak_ref = 1; 2381 node->local_weak_refs++; 2382 } else if (strong && !node->has_strong_ref) { 2383 cmd = BR_ACQUIRE; 2384 cmd_name = "BR_ACQUIRE"; 2385 node->has_strong_ref = 1; 2386 node->pending_strong_ref = 1; 2387 node->local_strong_refs++; 2388 } else if (!strong && node->has_strong_ref) { 2389 cmd = BR_RELEASE; 2390 cmd_name = "BR_RELEASE"; 2391 node->has_strong_ref = 0; 2392 } else if (!weak && node->has_weak_ref) { 2393 cmd = BR_DECREFS; 2394 cmd_name = "BR_DECREFS"; 2395 node->has_weak_ref = 0; 2396 } 2397 if (cmd != BR_NOOP) { 2398 if (put_user(cmd, (uint32_t __user *)ptr)) 2399 return -EFAULT; 2400 ptr += sizeof(uint32_t); 2401 if (put_user(node->ptr, 2402 (binder_uintptr_t __user *)ptr)) 2403 return -EFAULT; 2404 ptr += sizeof(binder_uintptr_t); 2405 if (put_user(node->cookie, 2406 (binder_uintptr_t __user *)ptr)) 2407 return -EFAULT; 2408 ptr += sizeof(binder_uintptr_t); 2409 2410 binder_stat_br(proc, thread, cmd); 2411 binder_debug(BINDER_DEBUG_USER_REFS, 2412 "%d:%d %s %d u%016llx c%016llx\n", 2413 proc->pid, thread->pid, cmd_name, 2414 node->debug_id, 2415 (u64)node->ptr, (u64)node->cookie); 2416 } else { 2417 list_del_init(&w->entry); 2418 if (!weak && !strong) { 2419 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 2420 "%d:%d node %d u%016llx c%016llx deleted\n", 2421 proc->pid, thread->pid, 2422 node->debug_id, 2423 (u64)node->ptr, 2424 (u64)node->cookie); 2425 rb_erase(&node->rb_node, &proc->nodes); 2426 kfree(node); 2427 binder_stats_deleted(BINDER_STAT_NODE); 2428 } else { 2429 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 2430 "%d:%d node %d u%016llx c%016llx state unchanged\n", 2431 proc->pid, thread->pid, 2432 node->debug_id, 2433 (u64)node->ptr, 2434 (u64)node->cookie); 2435 } 2436 } 2437 } break; 2438 case BINDER_WORK_DEAD_BINDER: 2439 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 2440 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 2441 struct binder_ref_death *death; 2442 uint32_t cmd; 2443 2444 death = container_of(w, struct binder_ref_death, work); 2445 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) 2446 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; 2447 else 2448 cmd = BR_DEAD_BINDER; 2449 if (put_user(cmd, (uint32_t __user *)ptr)) 2450 return -EFAULT; 2451 ptr += sizeof(uint32_t); 2452 if (put_user(death->cookie, 2453 (binder_uintptr_t __user *)ptr)) 2454 return -EFAULT; 2455 ptr += sizeof(binder_uintptr_t); 2456 binder_stat_br(proc, thread, cmd); 2457 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 2458 "%d:%d %s %016llx\n", 2459 proc->pid, thread->pid, 2460 cmd == BR_DEAD_BINDER ? 2461 "BR_DEAD_BINDER" : 2462 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 2463 (u64)death->cookie); 2464 2465 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { 2466 list_del(&w->entry); 2467 kfree(death); 2468 binder_stats_deleted(BINDER_STAT_DEATH); 2469 } else 2470 list_move(&w->entry, &proc->delivered_death); 2471 if (cmd == BR_DEAD_BINDER) 2472 goto done; /* DEAD_BINDER notifications can cause transactions */ 2473 } break; 2474 } 2475 2476 if (!t) 2477 continue; 2478 2479 BUG_ON(t->buffer == NULL); 2480 if (t->buffer->target_node) { 2481 struct binder_node *target_node = t->buffer->target_node; 2482 2483 tr.target.ptr = target_node->ptr; 2484 tr.cookie = target_node->cookie; 2485 t->saved_priority = task_nice(current); 2486 if (t->priority < target_node->min_priority && 2487 !(t->flags & TF_ONE_WAY)) 2488 binder_set_nice(t->priority); 2489 else if (!(t->flags & TF_ONE_WAY) || 2490 t->saved_priority > target_node->min_priority) 2491 binder_set_nice(target_node->min_priority); 2492 cmd = BR_TRANSACTION; 2493 } else { 2494 tr.target.ptr = 0; 2495 tr.cookie = 0; 2496 cmd = BR_REPLY; 2497 } 2498 tr.code = t->code; 2499 tr.flags = t->flags; 2500 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid); 2501 2502 if (t->from) { 2503 struct task_struct *sender = t->from->proc->tsk; 2504 2505 tr.sender_pid = task_tgid_nr_ns(sender, 2506 task_active_pid_ns(current)); 2507 } else { 2508 tr.sender_pid = 0; 2509 } 2510 2511 tr.data_size = t->buffer->data_size; 2512 tr.offsets_size = t->buffer->offsets_size; 2513 tr.data.ptr.buffer = (binder_uintptr_t)( 2514 (uintptr_t)t->buffer->data + 2515 proc->user_buffer_offset); 2516 tr.data.ptr.offsets = tr.data.ptr.buffer + 2517 ALIGN(t->buffer->data_size, 2518 sizeof(void *)); 2519 2520 if (put_user(cmd, (uint32_t __user *)ptr)) 2521 return -EFAULT; 2522 ptr += sizeof(uint32_t); 2523 if (copy_to_user(ptr, &tr, sizeof(tr))) 2524 return -EFAULT; 2525 ptr += sizeof(tr); 2526 2527 trace_binder_transaction_received(t); 2528 binder_stat_br(proc, thread, cmd); 2529 binder_debug(BINDER_DEBUG_TRANSACTION, 2530 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n", 2531 proc->pid, thread->pid, 2532 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : 2533 "BR_REPLY", 2534 t->debug_id, t->from ? t->from->proc->pid : 0, 2535 t->from ? t->from->pid : 0, cmd, 2536 t->buffer->data_size, t->buffer->offsets_size, 2537 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets); 2538 2539 list_del(&t->work.entry); 2540 t->buffer->allow_user_free = 1; 2541 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) { 2542 t->to_parent = thread->transaction_stack; 2543 t->to_thread = thread; 2544 thread->transaction_stack = t; 2545 } else { 2546 t->buffer->transaction = NULL; 2547 kfree(t); 2548 binder_stats_deleted(BINDER_STAT_TRANSACTION); 2549 } 2550 break; 2551 } 2552 2553 done: 2554 2555 *consumed = ptr - buffer; 2556 if (proc->requested_threads + proc->ready_threads == 0 && 2557 proc->requested_threads_started < proc->max_threads && 2558 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 2559 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ 2560 /*spawn a new thread if we leave this out */) { 2561 proc->requested_threads++; 2562 binder_debug(BINDER_DEBUG_THREADS, 2563 "%d:%d BR_SPAWN_LOOPER\n", 2564 proc->pid, thread->pid); 2565 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) 2566 return -EFAULT; 2567 binder_stat_br(proc, thread, BR_SPAWN_LOOPER); 2568 } 2569 return 0; 2570 } 2571 2572 static void binder_release_work(struct list_head *list) 2573 { 2574 struct binder_work *w; 2575 2576 while (!list_empty(list)) { 2577 w = list_first_entry(list, struct binder_work, entry); 2578 list_del_init(&w->entry); 2579 switch (w->type) { 2580 case BINDER_WORK_TRANSACTION: { 2581 struct binder_transaction *t; 2582 2583 t = container_of(w, struct binder_transaction, work); 2584 if (t->buffer->target_node && 2585 !(t->flags & TF_ONE_WAY)) { 2586 binder_send_failed_reply(t, BR_DEAD_REPLY); 2587 } else { 2588 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 2589 "undelivered transaction %d\n", 2590 t->debug_id); 2591 t->buffer->transaction = NULL; 2592 kfree(t); 2593 binder_stats_deleted(BINDER_STAT_TRANSACTION); 2594 } 2595 } break; 2596 case BINDER_WORK_TRANSACTION_COMPLETE: { 2597 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 2598 "undelivered TRANSACTION_COMPLETE\n"); 2599 kfree(w); 2600 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 2601 } break; 2602 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 2603 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 2604 struct binder_ref_death *death; 2605 2606 death = container_of(w, struct binder_ref_death, work); 2607 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 2608 "undelivered death notification, %016llx\n", 2609 (u64)death->cookie); 2610 kfree(death); 2611 binder_stats_deleted(BINDER_STAT_DEATH); 2612 } break; 2613 default: 2614 pr_err("unexpected work type, %d, not freed\n", 2615 w->type); 2616 break; 2617 } 2618 } 2619 2620 } 2621 2622 static struct binder_thread *binder_get_thread(struct binder_proc *proc) 2623 { 2624 struct binder_thread *thread = NULL; 2625 struct rb_node *parent = NULL; 2626 struct rb_node **p = &proc->threads.rb_node; 2627 2628 while (*p) { 2629 parent = *p; 2630 thread = rb_entry(parent, struct binder_thread, rb_node); 2631 2632 if (current->pid < thread->pid) 2633 p = &(*p)->rb_left; 2634 else if (current->pid > thread->pid) 2635 p = &(*p)->rb_right; 2636 else 2637 break; 2638 } 2639 if (*p == NULL) { 2640 thread = kzalloc(sizeof(*thread), GFP_KERNEL); 2641 if (thread == NULL) 2642 return NULL; 2643 binder_stats_created(BINDER_STAT_THREAD); 2644 thread->proc = proc; 2645 thread->pid = current->pid; 2646 init_waitqueue_head(&thread->wait); 2647 INIT_LIST_HEAD(&thread->todo); 2648 rb_link_node(&thread->rb_node, parent, p); 2649 rb_insert_color(&thread->rb_node, &proc->threads); 2650 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; 2651 thread->return_error = BR_OK; 2652 thread->return_error2 = BR_OK; 2653 } 2654 return thread; 2655 } 2656 2657 static int binder_free_thread(struct binder_proc *proc, 2658 struct binder_thread *thread) 2659 { 2660 struct binder_transaction *t; 2661 struct binder_transaction *send_reply = NULL; 2662 int active_transactions = 0; 2663 2664 rb_erase(&thread->rb_node, &proc->threads); 2665 t = thread->transaction_stack; 2666 if (t && t->to_thread == thread) 2667 send_reply = t; 2668 while (t) { 2669 active_transactions++; 2670 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 2671 "release %d:%d transaction %d %s, still active\n", 2672 proc->pid, thread->pid, 2673 t->debug_id, 2674 (t->to_thread == thread) ? "in" : "out"); 2675 2676 if (t->to_thread == thread) { 2677 t->to_proc = NULL; 2678 t->to_thread = NULL; 2679 if (t->buffer) { 2680 t->buffer->transaction = NULL; 2681 t->buffer = NULL; 2682 } 2683 t = t->to_parent; 2684 } else if (t->from == thread) { 2685 t->from = NULL; 2686 t = t->from_parent; 2687 } else 2688 BUG(); 2689 } 2690 if (send_reply) 2691 binder_send_failed_reply(send_reply, BR_DEAD_REPLY); 2692 binder_release_work(&thread->todo); 2693 kfree(thread); 2694 binder_stats_deleted(BINDER_STAT_THREAD); 2695 return active_transactions; 2696 } 2697 2698 static unsigned int binder_poll(struct file *filp, 2699 struct poll_table_struct *wait) 2700 { 2701 struct binder_proc *proc = filp->private_data; 2702 struct binder_thread *thread = NULL; 2703 int wait_for_proc_work; 2704 2705 binder_lock(__func__); 2706 2707 thread = binder_get_thread(proc); 2708 2709 wait_for_proc_work = thread->transaction_stack == NULL && 2710 list_empty(&thread->todo) && thread->return_error == BR_OK; 2711 2712 binder_unlock(__func__); 2713 2714 if (wait_for_proc_work) { 2715 if (binder_has_proc_work(proc, thread)) 2716 return POLLIN; 2717 poll_wait(filp, &proc->wait, wait); 2718 if (binder_has_proc_work(proc, thread)) 2719 return POLLIN; 2720 } else { 2721 if (binder_has_thread_work(thread)) 2722 return POLLIN; 2723 poll_wait(filp, &thread->wait, wait); 2724 if (binder_has_thread_work(thread)) 2725 return POLLIN; 2726 } 2727 return 0; 2728 } 2729 2730 static int binder_ioctl_write_read(struct file *filp, 2731 unsigned int cmd, unsigned long arg, 2732 struct binder_thread *thread) 2733 { 2734 int ret = 0; 2735 struct binder_proc *proc = filp->private_data; 2736 unsigned int size = _IOC_SIZE(cmd); 2737 void __user *ubuf = (void __user *)arg; 2738 struct binder_write_read bwr; 2739 2740 if (size != sizeof(struct binder_write_read)) { 2741 ret = -EINVAL; 2742 goto out; 2743 } 2744 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { 2745 ret = -EFAULT; 2746 goto out; 2747 } 2748 binder_debug(BINDER_DEBUG_READ_WRITE, 2749 "%d:%d write %lld at %016llx, read %lld at %016llx\n", 2750 proc->pid, thread->pid, 2751 (u64)bwr.write_size, (u64)bwr.write_buffer, 2752 (u64)bwr.read_size, (u64)bwr.read_buffer); 2753 2754 if (bwr.write_size > 0) { 2755 ret = binder_thread_write(proc, thread, 2756 bwr.write_buffer, 2757 bwr.write_size, 2758 &bwr.write_consumed); 2759 trace_binder_write_done(ret); 2760 if (ret < 0) { 2761 bwr.read_consumed = 0; 2762 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 2763 ret = -EFAULT; 2764 goto out; 2765 } 2766 } 2767 if (bwr.read_size > 0) { 2768 ret = binder_thread_read(proc, thread, bwr.read_buffer, 2769 bwr.read_size, 2770 &bwr.read_consumed, 2771 filp->f_flags & O_NONBLOCK); 2772 trace_binder_read_done(ret); 2773 if (!list_empty(&proc->todo)) 2774 wake_up_interruptible(&proc->wait); 2775 if (ret < 0) { 2776 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 2777 ret = -EFAULT; 2778 goto out; 2779 } 2780 } 2781 binder_debug(BINDER_DEBUG_READ_WRITE, 2782 "%d:%d wrote %lld of %lld, read return %lld of %lld\n", 2783 proc->pid, thread->pid, 2784 (u64)bwr.write_consumed, (u64)bwr.write_size, 2785 (u64)bwr.read_consumed, (u64)bwr.read_size); 2786 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { 2787 ret = -EFAULT; 2788 goto out; 2789 } 2790 out: 2791 return ret; 2792 } 2793 2794 static int binder_ioctl_set_ctx_mgr(struct file *filp) 2795 { 2796 int ret = 0; 2797 struct binder_proc *proc = filp->private_data; 2798 struct binder_context *context = proc->context; 2799 2800 kuid_t curr_euid = current_euid(); 2801 2802 if (context->binder_context_mgr_node) { 2803 pr_err("BINDER_SET_CONTEXT_MGR already set\n"); 2804 ret = -EBUSY; 2805 goto out; 2806 } 2807 ret = security_binder_set_context_mgr(proc->tsk); 2808 if (ret < 0) 2809 goto out; 2810 if (uid_valid(context->binder_context_mgr_uid)) { 2811 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) { 2812 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n", 2813 from_kuid(&init_user_ns, curr_euid), 2814 from_kuid(&init_user_ns, 2815 context->binder_context_mgr_uid)); 2816 ret = -EPERM; 2817 goto out; 2818 } 2819 } else { 2820 context->binder_context_mgr_uid = curr_euid; 2821 } 2822 context->binder_context_mgr_node = binder_new_node(proc, 0, 0); 2823 if (!context->binder_context_mgr_node) { 2824 ret = -ENOMEM; 2825 goto out; 2826 } 2827 context->binder_context_mgr_node->local_weak_refs++; 2828 context->binder_context_mgr_node->local_strong_refs++; 2829 context->binder_context_mgr_node->has_strong_ref = 1; 2830 context->binder_context_mgr_node->has_weak_ref = 1; 2831 out: 2832 return ret; 2833 } 2834 2835 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 2836 { 2837 int ret; 2838 struct binder_proc *proc = filp->private_data; 2839 struct binder_thread *thread; 2840 unsigned int size = _IOC_SIZE(cmd); 2841 void __user *ubuf = (void __user *)arg; 2842 2843 /*pr_info("binder_ioctl: %d:%d %x %lx\n", 2844 proc->pid, current->pid, cmd, arg);*/ 2845 2846 if (unlikely(current->mm != proc->vma_vm_mm)) { 2847 pr_err("current mm mismatch proc mm\n"); 2848 return -EINVAL; 2849 } 2850 trace_binder_ioctl(cmd, arg); 2851 2852 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 2853 if (ret) 2854 goto err_unlocked; 2855 2856 binder_lock(__func__); 2857 thread = binder_get_thread(proc); 2858 if (thread == NULL) { 2859 ret = -ENOMEM; 2860 goto err; 2861 } 2862 2863 switch (cmd) { 2864 case BINDER_WRITE_READ: 2865 ret = binder_ioctl_write_read(filp, cmd, arg, thread); 2866 if (ret) 2867 goto err; 2868 break; 2869 case BINDER_SET_MAX_THREADS: 2870 if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) { 2871 ret = -EINVAL; 2872 goto err; 2873 } 2874 break; 2875 case BINDER_SET_CONTEXT_MGR: 2876 ret = binder_ioctl_set_ctx_mgr(filp); 2877 if (ret) 2878 goto err; 2879 break; 2880 case BINDER_THREAD_EXIT: 2881 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n", 2882 proc->pid, thread->pid); 2883 binder_free_thread(proc, thread); 2884 thread = NULL; 2885 break; 2886 case BINDER_VERSION: { 2887 struct binder_version __user *ver = ubuf; 2888 2889 if (size != sizeof(struct binder_version)) { 2890 ret = -EINVAL; 2891 goto err; 2892 } 2893 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, 2894 &ver->protocol_version)) { 2895 ret = -EINVAL; 2896 goto err; 2897 } 2898 break; 2899 } 2900 default: 2901 ret = -EINVAL; 2902 goto err; 2903 } 2904 ret = 0; 2905 err: 2906 if (thread) 2907 thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN; 2908 binder_unlock(__func__); 2909 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 2910 if (ret && ret != -ERESTARTSYS) 2911 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); 2912 err_unlocked: 2913 trace_binder_ioctl_done(ret); 2914 return ret; 2915 } 2916 2917 static void binder_vma_open(struct vm_area_struct *vma) 2918 { 2919 struct binder_proc *proc = vma->vm_private_data; 2920 2921 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 2922 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 2923 proc->pid, vma->vm_start, vma->vm_end, 2924 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 2925 (unsigned long)pgprot_val(vma->vm_page_prot)); 2926 } 2927 2928 static void binder_vma_close(struct vm_area_struct *vma) 2929 { 2930 struct binder_proc *proc = vma->vm_private_data; 2931 2932 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 2933 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 2934 proc->pid, vma->vm_start, vma->vm_end, 2935 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 2936 (unsigned long)pgprot_val(vma->vm_page_prot)); 2937 proc->vma = NULL; 2938 proc->vma_vm_mm = NULL; 2939 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); 2940 } 2941 2942 static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 2943 { 2944 return VM_FAULT_SIGBUS; 2945 } 2946 2947 static const struct vm_operations_struct binder_vm_ops = { 2948 .open = binder_vma_open, 2949 .close = binder_vma_close, 2950 .fault = binder_vm_fault, 2951 }; 2952 2953 static int binder_mmap(struct file *filp, struct vm_area_struct *vma) 2954 { 2955 int ret; 2956 struct vm_struct *area; 2957 struct binder_proc *proc = filp->private_data; 2958 const char *failure_string; 2959 struct binder_buffer *buffer; 2960 2961 if (proc->tsk != current) 2962 return -EINVAL; 2963 2964 if ((vma->vm_end - vma->vm_start) > SZ_4M) 2965 vma->vm_end = vma->vm_start + SZ_4M; 2966 2967 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 2968 "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", 2969 proc->pid, vma->vm_start, vma->vm_end, 2970 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 2971 (unsigned long)pgprot_val(vma->vm_page_prot)); 2972 2973 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { 2974 ret = -EPERM; 2975 failure_string = "bad vm_flags"; 2976 goto err_bad_arg; 2977 } 2978 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; 2979 2980 mutex_lock(&binder_mmap_lock); 2981 if (proc->buffer) { 2982 ret = -EBUSY; 2983 failure_string = "already mapped"; 2984 goto err_already_mapped; 2985 } 2986 2987 area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); 2988 if (area == NULL) { 2989 ret = -ENOMEM; 2990 failure_string = "get_vm_area"; 2991 goto err_get_vm_area_failed; 2992 } 2993 proc->buffer = area->addr; 2994 proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer; 2995 mutex_unlock(&binder_mmap_lock); 2996 2997 #ifdef CONFIG_CPU_CACHE_VIPT 2998 if (cache_is_vipt_aliasing()) { 2999 while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) { 3000 pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer); 3001 vma->vm_start += PAGE_SIZE; 3002 } 3003 } 3004 #endif 3005 proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL); 3006 if (proc->pages == NULL) { 3007 ret = -ENOMEM; 3008 failure_string = "alloc page array"; 3009 goto err_alloc_pages_failed; 3010 } 3011 proc->buffer_size = vma->vm_end - vma->vm_start; 3012 3013 vma->vm_ops = &binder_vm_ops; 3014 vma->vm_private_data = proc; 3015 3016 if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) { 3017 ret = -ENOMEM; 3018 failure_string = "alloc small buf"; 3019 goto err_alloc_small_buf_failed; 3020 } 3021 buffer = proc->buffer; 3022 INIT_LIST_HEAD(&proc->buffers); 3023 list_add(&buffer->entry, &proc->buffers); 3024 buffer->free = 1; 3025 binder_insert_free_buffer(proc, buffer); 3026 proc->free_async_space = proc->buffer_size / 2; 3027 barrier(); 3028 proc->files = get_files_struct(current); 3029 proc->vma = vma; 3030 proc->vma_vm_mm = vma->vm_mm; 3031 3032 /*pr_info("binder_mmap: %d %lx-%lx maps %p\n", 3033 proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/ 3034 return 0; 3035 3036 err_alloc_small_buf_failed: 3037 kfree(proc->pages); 3038 proc->pages = NULL; 3039 err_alloc_pages_failed: 3040 mutex_lock(&binder_mmap_lock); 3041 vfree(proc->buffer); 3042 proc->buffer = NULL; 3043 err_get_vm_area_failed: 3044 err_already_mapped: 3045 mutex_unlock(&binder_mmap_lock); 3046 err_bad_arg: 3047 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n", 3048 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); 3049 return ret; 3050 } 3051 3052 static int binder_open(struct inode *nodp, struct file *filp) 3053 { 3054 struct binder_proc *proc; 3055 struct binder_device *binder_dev; 3056 3057 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n", 3058 current->group_leader->pid, current->pid); 3059 3060 proc = kzalloc(sizeof(*proc), GFP_KERNEL); 3061 if (proc == NULL) 3062 return -ENOMEM; 3063 get_task_struct(current); 3064 proc->tsk = current; 3065 proc->vma_vm_mm = current->mm; 3066 INIT_LIST_HEAD(&proc->todo); 3067 init_waitqueue_head(&proc->wait); 3068 proc->default_priority = task_nice(current); 3069 binder_dev = container_of(filp->private_data, struct binder_device, 3070 miscdev); 3071 proc->context = &binder_dev->context; 3072 3073 binder_lock(__func__); 3074 3075 binder_stats_created(BINDER_STAT_PROC); 3076 hlist_add_head(&proc->proc_node, &binder_procs); 3077 proc->pid = current->group_leader->pid; 3078 INIT_LIST_HEAD(&proc->delivered_death); 3079 filp->private_data = proc; 3080 3081 binder_unlock(__func__); 3082 3083 if (binder_debugfs_dir_entry_proc) { 3084 char strbuf[11]; 3085 3086 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); 3087 /* 3088 * proc debug entries are shared between contexts, so 3089 * this will fail if the process tries to open the driver 3090 * again with a different context. The priting code will 3091 * anyway print all contexts that a given PID has, so this 3092 * is not a problem. 3093 */ 3094 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO, 3095 binder_debugfs_dir_entry_proc, 3096 (void *)(unsigned long)proc->pid, 3097 &binder_proc_fops); 3098 } 3099 3100 return 0; 3101 } 3102 3103 static int binder_flush(struct file *filp, fl_owner_t id) 3104 { 3105 struct binder_proc *proc = filp->private_data; 3106 3107 binder_defer_work(proc, BINDER_DEFERRED_FLUSH); 3108 3109 return 0; 3110 } 3111 3112 static void binder_deferred_flush(struct binder_proc *proc) 3113 { 3114 struct rb_node *n; 3115 int wake_count = 0; 3116 3117 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { 3118 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); 3119 3120 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; 3121 if (thread->looper & BINDER_LOOPER_STATE_WAITING) { 3122 wake_up_interruptible(&thread->wait); 3123 wake_count++; 3124 } 3125 } 3126 wake_up_interruptible_all(&proc->wait); 3127 3128 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 3129 "binder_flush: %d woke %d threads\n", proc->pid, 3130 wake_count); 3131 } 3132 3133 static int binder_release(struct inode *nodp, struct file *filp) 3134 { 3135 struct binder_proc *proc = filp->private_data; 3136 3137 debugfs_remove(proc->debugfs_entry); 3138 binder_defer_work(proc, BINDER_DEFERRED_RELEASE); 3139 3140 return 0; 3141 } 3142 3143 static int binder_node_release(struct binder_node *node, int refs) 3144 { 3145 struct binder_ref *ref; 3146 int death = 0; 3147 3148 list_del_init(&node->work.entry); 3149 binder_release_work(&node->async_todo); 3150 3151 if (hlist_empty(&node->refs)) { 3152 kfree(node); 3153 binder_stats_deleted(BINDER_STAT_NODE); 3154 3155 return refs; 3156 } 3157 3158 node->proc = NULL; 3159 node->local_strong_refs = 0; 3160 node->local_weak_refs = 0; 3161 hlist_add_head(&node->dead_node, &binder_dead_nodes); 3162 3163 hlist_for_each_entry(ref, &node->refs, node_entry) { 3164 refs++; 3165 3166 if (!ref->death) 3167 continue; 3168 3169 death++; 3170 3171 if (list_empty(&ref->death->work.entry)) { 3172 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 3173 list_add_tail(&ref->death->work.entry, 3174 &ref->proc->todo); 3175 wake_up_interruptible(&ref->proc->wait); 3176 } else 3177 BUG(); 3178 } 3179 3180 binder_debug(BINDER_DEBUG_DEAD_BINDER, 3181 "node %d now dead, refs %d, death %d\n", 3182 node->debug_id, refs, death); 3183 3184 return refs; 3185 } 3186 3187 static void binder_deferred_release(struct binder_proc *proc) 3188 { 3189 struct binder_transaction *t; 3190 struct binder_context *context = proc->context; 3191 struct rb_node *n; 3192 int threads, nodes, incoming_refs, outgoing_refs, buffers, 3193 active_transactions, page_count; 3194 3195 BUG_ON(proc->vma); 3196 BUG_ON(proc->files); 3197 3198 hlist_del(&proc->proc_node); 3199 3200 if (context->binder_context_mgr_node && 3201 context->binder_context_mgr_node->proc == proc) { 3202 binder_debug(BINDER_DEBUG_DEAD_BINDER, 3203 "%s: %d context_mgr_node gone\n", 3204 __func__, proc->pid); 3205 context->binder_context_mgr_node = NULL; 3206 } 3207 3208 threads = 0; 3209 active_transactions = 0; 3210 while ((n = rb_first(&proc->threads))) { 3211 struct binder_thread *thread; 3212 3213 thread = rb_entry(n, struct binder_thread, rb_node); 3214 threads++; 3215 active_transactions += binder_free_thread(proc, thread); 3216 } 3217 3218 nodes = 0; 3219 incoming_refs = 0; 3220 while ((n = rb_first(&proc->nodes))) { 3221 struct binder_node *node; 3222 3223 node = rb_entry(n, struct binder_node, rb_node); 3224 nodes++; 3225 rb_erase(&node->rb_node, &proc->nodes); 3226 incoming_refs = binder_node_release(node, incoming_refs); 3227 } 3228 3229 outgoing_refs = 0; 3230 while ((n = rb_first(&proc->refs_by_desc))) { 3231 struct binder_ref *ref; 3232 3233 ref = rb_entry(n, struct binder_ref, rb_node_desc); 3234 outgoing_refs++; 3235 binder_delete_ref(ref); 3236 } 3237 3238 binder_release_work(&proc->todo); 3239 binder_release_work(&proc->delivered_death); 3240 3241 buffers = 0; 3242 while ((n = rb_first(&proc->allocated_buffers))) { 3243 struct binder_buffer *buffer; 3244 3245 buffer = rb_entry(n, struct binder_buffer, rb_node); 3246 3247 t = buffer->transaction; 3248 if (t) { 3249 t->buffer = NULL; 3250 buffer->transaction = NULL; 3251 pr_err("release proc %d, transaction %d, not freed\n", 3252 proc->pid, t->debug_id); 3253 /*BUG();*/ 3254 } 3255 3256 binder_free_buf(proc, buffer); 3257 buffers++; 3258 } 3259 3260 binder_stats_deleted(BINDER_STAT_PROC); 3261 3262 page_count = 0; 3263 if (proc->pages) { 3264 int i; 3265 3266 for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) { 3267 void *page_addr; 3268 3269 if (!proc->pages[i]) 3270 continue; 3271 3272 page_addr = proc->buffer + i * PAGE_SIZE; 3273 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 3274 "%s: %d: page %d at %p not freed\n", 3275 __func__, proc->pid, i, page_addr); 3276 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); 3277 __free_page(proc->pages[i]); 3278 page_count++; 3279 } 3280 kfree(proc->pages); 3281 vfree(proc->buffer); 3282 } 3283 3284 put_task_struct(proc->tsk); 3285 3286 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 3287 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n", 3288 __func__, proc->pid, threads, nodes, incoming_refs, 3289 outgoing_refs, active_transactions, buffers, page_count); 3290 3291 kfree(proc); 3292 } 3293 3294 static void binder_deferred_func(struct work_struct *work) 3295 { 3296 struct binder_proc *proc; 3297 struct files_struct *files; 3298 3299 int defer; 3300 3301 do { 3302 binder_lock(__func__); 3303 mutex_lock(&binder_deferred_lock); 3304 if (!hlist_empty(&binder_deferred_list)) { 3305 proc = hlist_entry(binder_deferred_list.first, 3306 struct binder_proc, deferred_work_node); 3307 hlist_del_init(&proc->deferred_work_node); 3308 defer = proc->deferred_work; 3309 proc->deferred_work = 0; 3310 } else { 3311 proc = NULL; 3312 defer = 0; 3313 } 3314 mutex_unlock(&binder_deferred_lock); 3315 3316 files = NULL; 3317 if (defer & BINDER_DEFERRED_PUT_FILES) { 3318 files = proc->files; 3319 if (files) 3320 proc->files = NULL; 3321 } 3322 3323 if (defer & BINDER_DEFERRED_FLUSH) 3324 binder_deferred_flush(proc); 3325 3326 if (defer & BINDER_DEFERRED_RELEASE) 3327 binder_deferred_release(proc); /* frees proc */ 3328 3329 binder_unlock(__func__); 3330 if (files) 3331 put_files_struct(files); 3332 } while (proc); 3333 } 3334 static DECLARE_WORK(binder_deferred_work, binder_deferred_func); 3335 3336 static void 3337 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) 3338 { 3339 mutex_lock(&binder_deferred_lock); 3340 proc->deferred_work |= defer; 3341 if (hlist_unhashed(&proc->deferred_work_node)) { 3342 hlist_add_head(&proc->deferred_work_node, 3343 &binder_deferred_list); 3344 schedule_work(&binder_deferred_work); 3345 } 3346 mutex_unlock(&binder_deferred_lock); 3347 } 3348 3349 static void print_binder_transaction(struct seq_file *m, const char *prefix, 3350 struct binder_transaction *t) 3351 { 3352 seq_printf(m, 3353 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d", 3354 prefix, t->debug_id, t, 3355 t->from ? t->from->proc->pid : 0, 3356 t->from ? t->from->pid : 0, 3357 t->to_proc ? t->to_proc->pid : 0, 3358 t->to_thread ? t->to_thread->pid : 0, 3359 t->code, t->flags, t->priority, t->need_reply); 3360 if (t->buffer == NULL) { 3361 seq_puts(m, " buffer free\n"); 3362 return; 3363 } 3364 if (t->buffer->target_node) 3365 seq_printf(m, " node %d", 3366 t->buffer->target_node->debug_id); 3367 seq_printf(m, " size %zd:%zd data %p\n", 3368 t->buffer->data_size, t->buffer->offsets_size, 3369 t->buffer->data); 3370 } 3371 3372 static void print_binder_buffer(struct seq_file *m, const char *prefix, 3373 struct binder_buffer *buffer) 3374 { 3375 seq_printf(m, "%s %d: %p size %zd:%zd %s\n", 3376 prefix, buffer->debug_id, buffer->data, 3377 buffer->data_size, buffer->offsets_size, 3378 buffer->transaction ? "active" : "delivered"); 3379 } 3380 3381 static void print_binder_work(struct seq_file *m, const char *prefix, 3382 const char *transaction_prefix, 3383 struct binder_work *w) 3384 { 3385 struct binder_node *node; 3386 struct binder_transaction *t; 3387 3388 switch (w->type) { 3389 case BINDER_WORK_TRANSACTION: 3390 t = container_of(w, struct binder_transaction, work); 3391 print_binder_transaction(m, transaction_prefix, t); 3392 break; 3393 case BINDER_WORK_TRANSACTION_COMPLETE: 3394 seq_printf(m, "%stransaction complete\n", prefix); 3395 break; 3396 case BINDER_WORK_NODE: 3397 node = container_of(w, struct binder_node, work); 3398 seq_printf(m, "%snode work %d: u%016llx c%016llx\n", 3399 prefix, node->debug_id, 3400 (u64)node->ptr, (u64)node->cookie); 3401 break; 3402 case BINDER_WORK_DEAD_BINDER: 3403 seq_printf(m, "%shas dead binder\n", prefix); 3404 break; 3405 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 3406 seq_printf(m, "%shas cleared dead binder\n", prefix); 3407 break; 3408 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: 3409 seq_printf(m, "%shas cleared death notification\n", prefix); 3410 break; 3411 default: 3412 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type); 3413 break; 3414 } 3415 } 3416 3417 static void print_binder_thread(struct seq_file *m, 3418 struct binder_thread *thread, 3419 int print_always) 3420 { 3421 struct binder_transaction *t; 3422 struct binder_work *w; 3423 size_t start_pos = m->count; 3424 size_t header_pos; 3425 3426 seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper); 3427 header_pos = m->count; 3428 t = thread->transaction_stack; 3429 while (t) { 3430 if (t->from == thread) { 3431 print_binder_transaction(m, 3432 " outgoing transaction", t); 3433 t = t->from_parent; 3434 } else if (t->to_thread == thread) { 3435 print_binder_transaction(m, 3436 " incoming transaction", t); 3437 t = t->to_parent; 3438 } else { 3439 print_binder_transaction(m, " bad transaction", t); 3440 t = NULL; 3441 } 3442 } 3443 list_for_each_entry(w, &thread->todo, entry) { 3444 print_binder_work(m, " ", " pending transaction", w); 3445 } 3446 if (!print_always && m->count == header_pos) 3447 m->count = start_pos; 3448 } 3449 3450 static void print_binder_node(struct seq_file *m, struct binder_node *node) 3451 { 3452 struct binder_ref *ref; 3453 struct binder_work *w; 3454 int count; 3455 3456 count = 0; 3457 hlist_for_each_entry(ref, &node->refs, node_entry) 3458 count++; 3459 3460 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d", 3461 node->debug_id, (u64)node->ptr, (u64)node->cookie, 3462 node->has_strong_ref, node->has_weak_ref, 3463 node->local_strong_refs, node->local_weak_refs, 3464 node->internal_strong_refs, count); 3465 if (count) { 3466 seq_puts(m, " proc"); 3467 hlist_for_each_entry(ref, &node->refs, node_entry) 3468 seq_printf(m, " %d", ref->proc->pid); 3469 } 3470 seq_puts(m, "\n"); 3471 list_for_each_entry(w, &node->async_todo, entry) 3472 print_binder_work(m, " ", 3473 " pending async transaction", w); 3474 } 3475 3476 static void print_binder_ref(struct seq_file *m, struct binder_ref *ref) 3477 { 3478 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n", 3479 ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ", 3480 ref->node->debug_id, ref->strong, ref->weak, ref->death); 3481 } 3482 3483 static void print_binder_proc(struct seq_file *m, 3484 struct binder_proc *proc, int print_all) 3485 { 3486 struct binder_work *w; 3487 struct rb_node *n; 3488 size_t start_pos = m->count; 3489 size_t header_pos; 3490 3491 seq_printf(m, "proc %d\n", proc->pid); 3492 seq_printf(m, "context %s\n", proc->context->name); 3493 header_pos = m->count; 3494 3495 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 3496 print_binder_thread(m, rb_entry(n, struct binder_thread, 3497 rb_node), print_all); 3498 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { 3499 struct binder_node *node = rb_entry(n, struct binder_node, 3500 rb_node); 3501 if (print_all || node->has_async_transaction) 3502 print_binder_node(m, node); 3503 } 3504 if (print_all) { 3505 for (n = rb_first(&proc->refs_by_desc); 3506 n != NULL; 3507 n = rb_next(n)) 3508 print_binder_ref(m, rb_entry(n, struct binder_ref, 3509 rb_node_desc)); 3510 } 3511 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) 3512 print_binder_buffer(m, " buffer", 3513 rb_entry(n, struct binder_buffer, rb_node)); 3514 list_for_each_entry(w, &proc->todo, entry) 3515 print_binder_work(m, " ", " pending transaction", w); 3516 list_for_each_entry(w, &proc->delivered_death, entry) { 3517 seq_puts(m, " has delivered dead binder\n"); 3518 break; 3519 } 3520 if (!print_all && m->count == header_pos) 3521 m->count = start_pos; 3522 } 3523 3524 static const char * const binder_return_strings[] = { 3525 "BR_ERROR", 3526 "BR_OK", 3527 "BR_TRANSACTION", 3528 "BR_REPLY", 3529 "BR_ACQUIRE_RESULT", 3530 "BR_DEAD_REPLY", 3531 "BR_TRANSACTION_COMPLETE", 3532 "BR_INCREFS", 3533 "BR_ACQUIRE", 3534 "BR_RELEASE", 3535 "BR_DECREFS", 3536 "BR_ATTEMPT_ACQUIRE", 3537 "BR_NOOP", 3538 "BR_SPAWN_LOOPER", 3539 "BR_FINISHED", 3540 "BR_DEAD_BINDER", 3541 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 3542 "BR_FAILED_REPLY" 3543 }; 3544 3545 static const char * const binder_command_strings[] = { 3546 "BC_TRANSACTION", 3547 "BC_REPLY", 3548 "BC_ACQUIRE_RESULT", 3549 "BC_FREE_BUFFER", 3550 "BC_INCREFS", 3551 "BC_ACQUIRE", 3552 "BC_RELEASE", 3553 "BC_DECREFS", 3554 "BC_INCREFS_DONE", 3555 "BC_ACQUIRE_DONE", 3556 "BC_ATTEMPT_ACQUIRE", 3557 "BC_REGISTER_LOOPER", 3558 "BC_ENTER_LOOPER", 3559 "BC_EXIT_LOOPER", 3560 "BC_REQUEST_DEATH_NOTIFICATION", 3561 "BC_CLEAR_DEATH_NOTIFICATION", 3562 "BC_DEAD_BINDER_DONE" 3563 }; 3564 3565 static const char * const binder_objstat_strings[] = { 3566 "proc", 3567 "thread", 3568 "node", 3569 "ref", 3570 "death", 3571 "transaction", 3572 "transaction_complete" 3573 }; 3574 3575 static void print_binder_stats(struct seq_file *m, const char *prefix, 3576 struct binder_stats *stats) 3577 { 3578 int i; 3579 3580 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != 3581 ARRAY_SIZE(binder_command_strings)); 3582 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) { 3583 if (stats->bc[i]) 3584 seq_printf(m, "%s%s: %d\n", prefix, 3585 binder_command_strings[i], stats->bc[i]); 3586 } 3587 3588 BUILD_BUG_ON(ARRAY_SIZE(stats->br) != 3589 ARRAY_SIZE(binder_return_strings)); 3590 for (i = 0; i < ARRAY_SIZE(stats->br); i++) { 3591 if (stats->br[i]) 3592 seq_printf(m, "%s%s: %d\n", prefix, 3593 binder_return_strings[i], stats->br[i]); 3594 } 3595 3596 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 3597 ARRAY_SIZE(binder_objstat_strings)); 3598 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 3599 ARRAY_SIZE(stats->obj_deleted)); 3600 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { 3601 if (stats->obj_created[i] || stats->obj_deleted[i]) 3602 seq_printf(m, "%s%s: active %d total %d\n", prefix, 3603 binder_objstat_strings[i], 3604 stats->obj_created[i] - stats->obj_deleted[i], 3605 stats->obj_created[i]); 3606 } 3607 } 3608 3609 static void print_binder_proc_stats(struct seq_file *m, 3610 struct binder_proc *proc) 3611 { 3612 struct binder_work *w; 3613 struct rb_node *n; 3614 int count, strong, weak; 3615 3616 seq_printf(m, "proc %d\n", proc->pid); 3617 seq_printf(m, "context %s\n", proc->context->name); 3618 count = 0; 3619 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 3620 count++; 3621 seq_printf(m, " threads: %d\n", count); 3622 seq_printf(m, " requested threads: %d+%d/%d\n" 3623 " ready threads %d\n" 3624 " free async space %zd\n", proc->requested_threads, 3625 proc->requested_threads_started, proc->max_threads, 3626 proc->ready_threads, proc->free_async_space); 3627 count = 0; 3628 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) 3629 count++; 3630 seq_printf(m, " nodes: %d\n", count); 3631 count = 0; 3632 strong = 0; 3633 weak = 0; 3634 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 3635 struct binder_ref *ref = rb_entry(n, struct binder_ref, 3636 rb_node_desc); 3637 count++; 3638 strong += ref->strong; 3639 weak += ref->weak; 3640 } 3641 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak); 3642 3643 count = 0; 3644 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) 3645 count++; 3646 seq_printf(m, " buffers: %d\n", count); 3647 3648 count = 0; 3649 list_for_each_entry(w, &proc->todo, entry) { 3650 switch (w->type) { 3651 case BINDER_WORK_TRANSACTION: 3652 count++; 3653 break; 3654 default: 3655 break; 3656 } 3657 } 3658 seq_printf(m, " pending transactions: %d\n", count); 3659 3660 print_binder_stats(m, " ", &proc->stats); 3661 } 3662 3663 3664 static int binder_state_show(struct seq_file *m, void *unused) 3665 { 3666 struct binder_proc *proc; 3667 struct binder_node *node; 3668 int do_lock = !binder_debug_no_lock; 3669 3670 if (do_lock) 3671 binder_lock(__func__); 3672 3673 seq_puts(m, "binder state:\n"); 3674 3675 if (!hlist_empty(&binder_dead_nodes)) 3676 seq_puts(m, "dead nodes:\n"); 3677 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) 3678 print_binder_node(m, node); 3679 3680 hlist_for_each_entry(proc, &binder_procs, proc_node) 3681 print_binder_proc(m, proc, 1); 3682 if (do_lock) 3683 binder_unlock(__func__); 3684 return 0; 3685 } 3686 3687 static int binder_stats_show(struct seq_file *m, void *unused) 3688 { 3689 struct binder_proc *proc; 3690 int do_lock = !binder_debug_no_lock; 3691 3692 if (do_lock) 3693 binder_lock(__func__); 3694 3695 seq_puts(m, "binder stats:\n"); 3696 3697 print_binder_stats(m, "", &binder_stats); 3698 3699 hlist_for_each_entry(proc, &binder_procs, proc_node) 3700 print_binder_proc_stats(m, proc); 3701 if (do_lock) 3702 binder_unlock(__func__); 3703 return 0; 3704 } 3705 3706 static int binder_transactions_show(struct seq_file *m, void *unused) 3707 { 3708 struct binder_proc *proc; 3709 int do_lock = !binder_debug_no_lock; 3710 3711 if (do_lock) 3712 binder_lock(__func__); 3713 3714 seq_puts(m, "binder transactions:\n"); 3715 hlist_for_each_entry(proc, &binder_procs, proc_node) 3716 print_binder_proc(m, proc, 0); 3717 if (do_lock) 3718 binder_unlock(__func__); 3719 return 0; 3720 } 3721 3722 static int binder_proc_show(struct seq_file *m, void *unused) 3723 { 3724 struct binder_proc *itr; 3725 int pid = (unsigned long)m->private; 3726 int do_lock = !binder_debug_no_lock; 3727 3728 if (do_lock) 3729 binder_lock(__func__); 3730 3731 hlist_for_each_entry(itr, &binder_procs, proc_node) { 3732 if (itr->pid == pid) { 3733 seq_puts(m, "binder proc state:\n"); 3734 print_binder_proc(m, itr, 1); 3735 } 3736 } 3737 if (do_lock) 3738 binder_unlock(__func__); 3739 return 0; 3740 } 3741 3742 static void print_binder_transaction_log_entry(struct seq_file *m, 3743 struct binder_transaction_log_entry *e) 3744 { 3745 seq_printf(m, 3746 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d\n", 3747 e->debug_id, (e->call_type == 2) ? "reply" : 3748 ((e->call_type == 1) ? "async" : "call "), e->from_proc, 3749 e->from_thread, e->to_proc, e->to_thread, e->context_name, 3750 e->to_node, e->target_handle, e->data_size, e->offsets_size); 3751 } 3752 3753 static int binder_transaction_log_show(struct seq_file *m, void *unused) 3754 { 3755 struct binder_transaction_log *log = m->private; 3756 int i; 3757 3758 if (log->full) { 3759 for (i = log->next; i < ARRAY_SIZE(log->entry); i++) 3760 print_binder_transaction_log_entry(m, &log->entry[i]); 3761 } 3762 for (i = 0; i < log->next; i++) 3763 print_binder_transaction_log_entry(m, &log->entry[i]); 3764 return 0; 3765 } 3766 3767 static const struct file_operations binder_fops = { 3768 .owner = THIS_MODULE, 3769 .poll = binder_poll, 3770 .unlocked_ioctl = binder_ioctl, 3771 .compat_ioctl = binder_ioctl, 3772 .mmap = binder_mmap, 3773 .open = binder_open, 3774 .flush = binder_flush, 3775 .release = binder_release, 3776 }; 3777 3778 BINDER_DEBUG_ENTRY(state); 3779 BINDER_DEBUG_ENTRY(stats); 3780 BINDER_DEBUG_ENTRY(transactions); 3781 BINDER_DEBUG_ENTRY(transaction_log); 3782 3783 static int __init init_binder_device(const char *name) 3784 { 3785 int ret; 3786 struct binder_device *binder_device; 3787 3788 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL); 3789 if (!binder_device) 3790 return -ENOMEM; 3791 3792 binder_device->miscdev.fops = &binder_fops; 3793 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR; 3794 binder_device->miscdev.name = name; 3795 3796 binder_device->context.binder_context_mgr_uid = INVALID_UID; 3797 binder_device->context.name = name; 3798 3799 ret = misc_register(&binder_device->miscdev); 3800 if (ret < 0) { 3801 kfree(binder_device); 3802 return ret; 3803 } 3804 3805 hlist_add_head(&binder_device->hlist, &binder_devices); 3806 3807 return ret; 3808 } 3809 3810 static int __init binder_init(void) 3811 { 3812 int ret; 3813 char *device_name, *device_names; 3814 struct binder_device *device; 3815 struct hlist_node *tmp; 3816 3817 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); 3818 if (binder_debugfs_dir_entry_root) 3819 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", 3820 binder_debugfs_dir_entry_root); 3821 3822 if (binder_debugfs_dir_entry_root) { 3823 debugfs_create_file("state", 3824 S_IRUGO, 3825 binder_debugfs_dir_entry_root, 3826 NULL, 3827 &binder_state_fops); 3828 debugfs_create_file("stats", 3829 S_IRUGO, 3830 binder_debugfs_dir_entry_root, 3831 NULL, 3832 &binder_stats_fops); 3833 debugfs_create_file("transactions", 3834 S_IRUGO, 3835 binder_debugfs_dir_entry_root, 3836 NULL, 3837 &binder_transactions_fops); 3838 debugfs_create_file("transaction_log", 3839 S_IRUGO, 3840 binder_debugfs_dir_entry_root, 3841 &binder_transaction_log, 3842 &binder_transaction_log_fops); 3843 debugfs_create_file("failed_transaction_log", 3844 S_IRUGO, 3845 binder_debugfs_dir_entry_root, 3846 &binder_transaction_log_failed, 3847 &binder_transaction_log_fops); 3848 } 3849 3850 /* 3851 * Copy the module_parameter string, because we don't want to 3852 * tokenize it in-place. 3853 */ 3854 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL); 3855 if (!device_names) { 3856 ret = -ENOMEM; 3857 goto err_alloc_device_names_failed; 3858 } 3859 strcpy(device_names, binder_devices_param); 3860 3861 while ((device_name = strsep(&device_names, ","))) { 3862 ret = init_binder_device(device_name); 3863 if (ret) 3864 goto err_init_binder_device_failed; 3865 } 3866 3867 return ret; 3868 3869 err_init_binder_device_failed: 3870 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) { 3871 misc_deregister(&device->miscdev); 3872 hlist_del(&device->hlist); 3873 kfree(device); 3874 } 3875 err_alloc_device_names_failed: 3876 debugfs_remove_recursive(binder_debugfs_dir_entry_root); 3877 3878 return ret; 3879 } 3880 3881 device_initcall(binder_init); 3882 3883 #define CREATE_TRACE_POINTS 3884 #include "binder_trace.h" 3885 3886 MODULE_LICENSE("GPL v2"); 3887