1 /* binder.c 2 * 3 * Android IPC Subsystem 4 * 5 * Copyright (C) 2007-2008 Google, Inc. 6 * 7 * This software is licensed under the terms of the GNU General Public 8 * License version 2, as published by the Free Software Foundation, and 9 * may be copied, distributed, and modified under those terms. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 */ 17 18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19 20 #include <asm/cacheflush.h> 21 #include <linux/fdtable.h> 22 #include <linux/file.h> 23 #include <linux/freezer.h> 24 #include <linux/fs.h> 25 #include <linux/list.h> 26 #include <linux/miscdevice.h> 27 #include <linux/mm.h> 28 #include <linux/module.h> 29 #include <linux/mutex.h> 30 #include <linux/nsproxy.h> 31 #include <linux/poll.h> 32 #include <linux/debugfs.h> 33 #include <linux/rbtree.h> 34 #include <linux/sched.h> 35 #include <linux/seq_file.h> 36 #include <linux/uaccess.h> 37 #include <linux/vmalloc.h> 38 #include <linux/slab.h> 39 #include <linux/pid_namespace.h> 40 #include <linux/security.h> 41 42 #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT 43 #define BINDER_IPC_32BIT 1 44 #endif 45 46 #include <uapi/linux/android/binder.h> 47 #include "binder_trace.h" 48 49 static DEFINE_MUTEX(binder_main_lock); 50 static DEFINE_MUTEX(binder_deferred_lock); 51 static DEFINE_MUTEX(binder_mmap_lock); 52 53 static HLIST_HEAD(binder_devices); 54 static HLIST_HEAD(binder_procs); 55 static HLIST_HEAD(binder_deferred_list); 56 static HLIST_HEAD(binder_dead_nodes); 57 58 static struct dentry *binder_debugfs_dir_entry_root; 59 static struct dentry *binder_debugfs_dir_entry_proc; 60 static int binder_last_id; 61 62 #define BINDER_DEBUG_ENTRY(name) \ 63 static int binder_##name##_open(struct inode *inode, struct file *file) \ 64 { \ 65 return single_open(file, binder_##name##_show, inode->i_private); \ 66 } \ 67 \ 68 static const struct file_operations binder_##name##_fops = { \ 69 .owner = THIS_MODULE, \ 70 .open = binder_##name##_open, \ 71 .read = seq_read, \ 72 .llseek = seq_lseek, \ 73 .release = single_release, \ 74 } 75 76 static int binder_proc_show(struct seq_file *m, void *unused); 77 BINDER_DEBUG_ENTRY(proc); 78 79 /* This is only defined in include/asm-arm/sizes.h */ 80 #ifndef SZ_1K 81 #define SZ_1K 0x400 82 #endif 83 84 #ifndef SZ_4M 85 #define SZ_4M 0x400000 86 #endif 87 88 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE) 89 90 #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64) 91 92 enum { 93 BINDER_DEBUG_USER_ERROR = 1U << 0, 94 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1, 95 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2, 96 BINDER_DEBUG_OPEN_CLOSE = 1U << 3, 97 BINDER_DEBUG_DEAD_BINDER = 1U << 4, 98 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5, 99 BINDER_DEBUG_READ_WRITE = 1U << 6, 100 BINDER_DEBUG_USER_REFS = 1U << 7, 101 BINDER_DEBUG_THREADS = 1U << 8, 102 BINDER_DEBUG_TRANSACTION = 1U << 9, 103 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10, 104 BINDER_DEBUG_FREE_BUFFER = 1U << 11, 105 BINDER_DEBUG_INTERNAL_REFS = 1U << 12, 106 BINDER_DEBUG_BUFFER_ALLOC = 1U << 13, 107 BINDER_DEBUG_PRIORITY_CAP = 1U << 14, 108 BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15, 109 }; 110 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | 111 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; 112 module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO); 113 114 static bool binder_debug_no_lock; 115 module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO); 116 117 static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES; 118 module_param_named(devices, binder_devices_param, charp, 0444); 119 120 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); 121 static int binder_stop_on_user_error; 122 123 static int binder_set_stop_on_user_error(const char *val, 124 struct kernel_param *kp) 125 { 126 int ret; 127 128 ret = param_set_int(val, kp); 129 if (binder_stop_on_user_error < 2) 130 wake_up(&binder_user_error_wait); 131 return ret; 132 } 133 module_param_call(stop_on_user_error, binder_set_stop_on_user_error, 134 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO); 135 136 #define binder_debug(mask, x...) \ 137 do { \ 138 if (binder_debug_mask & mask) \ 139 pr_info(x); \ 140 } while (0) 141 142 #define binder_user_error(x...) \ 143 do { \ 144 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \ 145 pr_info(x); \ 146 if (binder_stop_on_user_error) \ 147 binder_stop_on_user_error = 2; \ 148 } while (0) 149 150 #define to_flat_binder_object(hdr) \ 151 container_of(hdr, struct flat_binder_object, hdr) 152 153 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr) 154 155 #define to_binder_buffer_object(hdr) \ 156 container_of(hdr, struct binder_buffer_object, hdr) 157 158 #define to_binder_fd_array_object(hdr) \ 159 container_of(hdr, struct binder_fd_array_object, hdr) 160 161 enum binder_stat_types { 162 BINDER_STAT_PROC, 163 BINDER_STAT_THREAD, 164 BINDER_STAT_NODE, 165 BINDER_STAT_REF, 166 BINDER_STAT_DEATH, 167 BINDER_STAT_TRANSACTION, 168 BINDER_STAT_TRANSACTION_COMPLETE, 169 BINDER_STAT_COUNT 170 }; 171 172 struct binder_stats { 173 int br[_IOC_NR(BR_FAILED_REPLY) + 1]; 174 int bc[_IOC_NR(BC_REPLY_SG) + 1]; 175 int obj_created[BINDER_STAT_COUNT]; 176 int obj_deleted[BINDER_STAT_COUNT]; 177 }; 178 179 static struct binder_stats binder_stats; 180 181 static inline void binder_stats_deleted(enum binder_stat_types type) 182 { 183 binder_stats.obj_deleted[type]++; 184 } 185 186 static inline void binder_stats_created(enum binder_stat_types type) 187 { 188 binder_stats.obj_created[type]++; 189 } 190 191 struct binder_transaction_log_entry { 192 int debug_id; 193 int call_type; 194 int from_proc; 195 int from_thread; 196 int target_handle; 197 int to_proc; 198 int to_thread; 199 int to_node; 200 int data_size; 201 int offsets_size; 202 const char *context_name; 203 }; 204 struct binder_transaction_log { 205 int next; 206 int full; 207 struct binder_transaction_log_entry entry[32]; 208 }; 209 static struct binder_transaction_log binder_transaction_log; 210 static struct binder_transaction_log binder_transaction_log_failed; 211 212 static struct binder_transaction_log_entry *binder_transaction_log_add( 213 struct binder_transaction_log *log) 214 { 215 struct binder_transaction_log_entry *e; 216 217 e = &log->entry[log->next]; 218 memset(e, 0, sizeof(*e)); 219 log->next++; 220 if (log->next == ARRAY_SIZE(log->entry)) { 221 log->next = 0; 222 log->full = 1; 223 } 224 return e; 225 } 226 227 struct binder_context { 228 struct binder_node *binder_context_mgr_node; 229 kuid_t binder_context_mgr_uid; 230 const char *name; 231 }; 232 233 struct binder_device { 234 struct hlist_node hlist; 235 struct miscdevice miscdev; 236 struct binder_context context; 237 }; 238 239 struct binder_work { 240 struct list_head entry; 241 enum { 242 BINDER_WORK_TRANSACTION = 1, 243 BINDER_WORK_TRANSACTION_COMPLETE, 244 BINDER_WORK_NODE, 245 BINDER_WORK_DEAD_BINDER, 246 BINDER_WORK_DEAD_BINDER_AND_CLEAR, 247 BINDER_WORK_CLEAR_DEATH_NOTIFICATION, 248 } type; 249 }; 250 251 struct binder_node { 252 int debug_id; 253 struct binder_work work; 254 union { 255 struct rb_node rb_node; 256 struct hlist_node dead_node; 257 }; 258 struct binder_proc *proc; 259 struct hlist_head refs; 260 int internal_strong_refs; 261 int local_weak_refs; 262 int local_strong_refs; 263 binder_uintptr_t ptr; 264 binder_uintptr_t cookie; 265 unsigned has_strong_ref:1; 266 unsigned pending_strong_ref:1; 267 unsigned has_weak_ref:1; 268 unsigned pending_weak_ref:1; 269 unsigned has_async_transaction:1; 270 unsigned accept_fds:1; 271 unsigned min_priority:8; 272 struct list_head async_todo; 273 }; 274 275 struct binder_ref_death { 276 struct binder_work work; 277 binder_uintptr_t cookie; 278 }; 279 280 struct binder_ref { 281 /* Lookups needed: */ 282 /* node + proc => ref (transaction) */ 283 /* desc + proc => ref (transaction, inc/dec ref) */ 284 /* node => refs + procs (proc exit) */ 285 int debug_id; 286 struct rb_node rb_node_desc; 287 struct rb_node rb_node_node; 288 struct hlist_node node_entry; 289 struct binder_proc *proc; 290 struct binder_node *node; 291 uint32_t desc; 292 int strong; 293 int weak; 294 struct binder_ref_death *death; 295 }; 296 297 struct binder_buffer { 298 struct list_head entry; /* free and allocated entries by address */ 299 struct rb_node rb_node; /* free entry by size or allocated entry */ 300 /* by address */ 301 unsigned free:1; 302 unsigned allow_user_free:1; 303 unsigned async_transaction:1; 304 unsigned debug_id:29; 305 306 struct binder_transaction *transaction; 307 308 struct binder_node *target_node; 309 size_t data_size; 310 size_t offsets_size; 311 size_t extra_buffers_size; 312 uint8_t data[0]; 313 }; 314 315 enum binder_deferred_state { 316 BINDER_DEFERRED_PUT_FILES = 0x01, 317 BINDER_DEFERRED_FLUSH = 0x02, 318 BINDER_DEFERRED_RELEASE = 0x04, 319 }; 320 321 struct binder_proc { 322 struct hlist_node proc_node; 323 struct rb_root threads; 324 struct rb_root nodes; 325 struct rb_root refs_by_desc; 326 struct rb_root refs_by_node; 327 int pid; 328 struct vm_area_struct *vma; 329 struct mm_struct *vma_vm_mm; 330 struct task_struct *tsk; 331 struct files_struct *files; 332 struct hlist_node deferred_work_node; 333 int deferred_work; 334 void *buffer; 335 ptrdiff_t user_buffer_offset; 336 337 struct list_head buffers; 338 struct rb_root free_buffers; 339 struct rb_root allocated_buffers; 340 size_t free_async_space; 341 342 struct page **pages; 343 size_t buffer_size; 344 uint32_t buffer_free; 345 struct list_head todo; 346 wait_queue_head_t wait; 347 struct binder_stats stats; 348 struct list_head delivered_death; 349 int max_threads; 350 int requested_threads; 351 int requested_threads_started; 352 int ready_threads; 353 long default_priority; 354 struct dentry *debugfs_entry; 355 struct binder_context *context; 356 }; 357 358 enum { 359 BINDER_LOOPER_STATE_REGISTERED = 0x01, 360 BINDER_LOOPER_STATE_ENTERED = 0x02, 361 BINDER_LOOPER_STATE_EXITED = 0x04, 362 BINDER_LOOPER_STATE_INVALID = 0x08, 363 BINDER_LOOPER_STATE_WAITING = 0x10, 364 BINDER_LOOPER_STATE_NEED_RETURN = 0x20 365 }; 366 367 struct binder_thread { 368 struct binder_proc *proc; 369 struct rb_node rb_node; 370 int pid; 371 int looper; 372 struct binder_transaction *transaction_stack; 373 struct list_head todo; 374 uint32_t return_error; /* Write failed, return error code in read buf */ 375 uint32_t return_error2; /* Write failed, return error code in read */ 376 /* buffer. Used when sending a reply to a dead process that */ 377 /* we are also waiting on */ 378 wait_queue_head_t wait; 379 struct binder_stats stats; 380 }; 381 382 struct binder_transaction { 383 int debug_id; 384 struct binder_work work; 385 struct binder_thread *from; 386 struct binder_transaction *from_parent; 387 struct binder_proc *to_proc; 388 struct binder_thread *to_thread; 389 struct binder_transaction *to_parent; 390 unsigned need_reply:1; 391 /* unsigned is_dead:1; */ /* not used at the moment */ 392 393 struct binder_buffer *buffer; 394 unsigned int code; 395 unsigned int flags; 396 long priority; 397 long saved_priority; 398 kuid_t sender_euid; 399 }; 400 401 static void 402 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); 403 404 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) 405 { 406 struct files_struct *files = proc->files; 407 unsigned long rlim_cur; 408 unsigned long irqs; 409 410 if (files == NULL) 411 return -ESRCH; 412 413 if (!lock_task_sighand(proc->tsk, &irqs)) 414 return -EMFILE; 415 416 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE); 417 unlock_task_sighand(proc->tsk, &irqs); 418 419 return __alloc_fd(files, 0, rlim_cur, flags); 420 } 421 422 /* 423 * copied from fd_install 424 */ 425 static void task_fd_install( 426 struct binder_proc *proc, unsigned int fd, struct file *file) 427 { 428 if (proc->files) 429 __fd_install(proc->files, fd, file); 430 } 431 432 /* 433 * copied from sys_close 434 */ 435 static long task_close_fd(struct binder_proc *proc, unsigned int fd) 436 { 437 int retval; 438 439 if (proc->files == NULL) 440 return -ESRCH; 441 442 retval = __close_fd(proc->files, fd); 443 /* can't restart close syscall because file table entry was cleared */ 444 if (unlikely(retval == -ERESTARTSYS || 445 retval == -ERESTARTNOINTR || 446 retval == -ERESTARTNOHAND || 447 retval == -ERESTART_RESTARTBLOCK)) 448 retval = -EINTR; 449 450 return retval; 451 } 452 453 static inline void binder_lock(const char *tag) 454 { 455 trace_binder_lock(tag); 456 mutex_lock(&binder_main_lock); 457 trace_binder_locked(tag); 458 } 459 460 static inline void binder_unlock(const char *tag) 461 { 462 trace_binder_unlock(tag); 463 mutex_unlock(&binder_main_lock); 464 } 465 466 static void binder_set_nice(long nice) 467 { 468 long min_nice; 469 470 if (can_nice(current, nice)) { 471 set_user_nice(current, nice); 472 return; 473 } 474 min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur); 475 binder_debug(BINDER_DEBUG_PRIORITY_CAP, 476 "%d: nice value %ld not allowed use %ld instead\n", 477 current->pid, nice, min_nice); 478 set_user_nice(current, min_nice); 479 if (min_nice <= MAX_NICE) 480 return; 481 binder_user_error("%d RLIMIT_NICE not set\n", current->pid); 482 } 483 484 static size_t binder_buffer_size(struct binder_proc *proc, 485 struct binder_buffer *buffer) 486 { 487 if (list_is_last(&buffer->entry, &proc->buffers)) 488 return proc->buffer + proc->buffer_size - (void *)buffer->data; 489 return (size_t)list_entry(buffer->entry.next, 490 struct binder_buffer, entry) - (size_t)buffer->data; 491 } 492 493 static void binder_insert_free_buffer(struct binder_proc *proc, 494 struct binder_buffer *new_buffer) 495 { 496 struct rb_node **p = &proc->free_buffers.rb_node; 497 struct rb_node *parent = NULL; 498 struct binder_buffer *buffer; 499 size_t buffer_size; 500 size_t new_buffer_size; 501 502 BUG_ON(!new_buffer->free); 503 504 new_buffer_size = binder_buffer_size(proc, new_buffer); 505 506 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 507 "%d: add free buffer, size %zd, at %p\n", 508 proc->pid, new_buffer_size, new_buffer); 509 510 while (*p) { 511 parent = *p; 512 buffer = rb_entry(parent, struct binder_buffer, rb_node); 513 BUG_ON(!buffer->free); 514 515 buffer_size = binder_buffer_size(proc, buffer); 516 517 if (new_buffer_size < buffer_size) 518 p = &parent->rb_left; 519 else 520 p = &parent->rb_right; 521 } 522 rb_link_node(&new_buffer->rb_node, parent, p); 523 rb_insert_color(&new_buffer->rb_node, &proc->free_buffers); 524 } 525 526 static void binder_insert_allocated_buffer(struct binder_proc *proc, 527 struct binder_buffer *new_buffer) 528 { 529 struct rb_node **p = &proc->allocated_buffers.rb_node; 530 struct rb_node *parent = NULL; 531 struct binder_buffer *buffer; 532 533 BUG_ON(new_buffer->free); 534 535 while (*p) { 536 parent = *p; 537 buffer = rb_entry(parent, struct binder_buffer, rb_node); 538 BUG_ON(buffer->free); 539 540 if (new_buffer < buffer) 541 p = &parent->rb_left; 542 else if (new_buffer > buffer) 543 p = &parent->rb_right; 544 else 545 BUG(); 546 } 547 rb_link_node(&new_buffer->rb_node, parent, p); 548 rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers); 549 } 550 551 static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc, 552 uintptr_t user_ptr) 553 { 554 struct rb_node *n = proc->allocated_buffers.rb_node; 555 struct binder_buffer *buffer; 556 struct binder_buffer *kern_ptr; 557 558 kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset 559 - offsetof(struct binder_buffer, data)); 560 561 while (n) { 562 buffer = rb_entry(n, struct binder_buffer, rb_node); 563 BUG_ON(buffer->free); 564 565 if (kern_ptr < buffer) 566 n = n->rb_left; 567 else if (kern_ptr > buffer) 568 n = n->rb_right; 569 else 570 return buffer; 571 } 572 return NULL; 573 } 574 575 static int binder_update_page_range(struct binder_proc *proc, int allocate, 576 void *start, void *end, 577 struct vm_area_struct *vma) 578 { 579 void *page_addr; 580 unsigned long user_page_addr; 581 struct page **page; 582 struct mm_struct *mm; 583 584 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 585 "%d: %s pages %p-%p\n", proc->pid, 586 allocate ? "allocate" : "free", start, end); 587 588 if (end <= start) 589 return 0; 590 591 trace_binder_update_page_range(proc, allocate, start, end); 592 593 if (vma) 594 mm = NULL; 595 else 596 mm = get_task_mm(proc->tsk); 597 598 if (mm) { 599 down_write(&mm->mmap_sem); 600 vma = proc->vma; 601 if (vma && mm != proc->vma_vm_mm) { 602 pr_err("%d: vma mm and task mm mismatch\n", 603 proc->pid); 604 vma = NULL; 605 } 606 } 607 608 if (allocate == 0) 609 goto free_range; 610 611 if (vma == NULL) { 612 pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n", 613 proc->pid); 614 goto err_no_vma; 615 } 616 617 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { 618 int ret; 619 620 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; 621 622 BUG_ON(*page); 623 *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); 624 if (*page == NULL) { 625 pr_err("%d: binder_alloc_buf failed for page at %p\n", 626 proc->pid, page_addr); 627 goto err_alloc_page_failed; 628 } 629 ret = map_kernel_range_noflush((unsigned long)page_addr, 630 PAGE_SIZE, PAGE_KERNEL, page); 631 flush_cache_vmap((unsigned long)page_addr, 632 (unsigned long)page_addr + PAGE_SIZE); 633 if (ret != 1) { 634 pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n", 635 proc->pid, page_addr); 636 goto err_map_kernel_failed; 637 } 638 user_page_addr = 639 (uintptr_t)page_addr + proc->user_buffer_offset; 640 ret = vm_insert_page(vma, user_page_addr, page[0]); 641 if (ret) { 642 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", 643 proc->pid, user_page_addr); 644 goto err_vm_insert_page_failed; 645 } 646 /* vm_insert_page does not seem to increment the refcount */ 647 } 648 if (mm) { 649 up_write(&mm->mmap_sem); 650 mmput(mm); 651 } 652 return 0; 653 654 free_range: 655 for (page_addr = end - PAGE_SIZE; page_addr >= start; 656 page_addr -= PAGE_SIZE) { 657 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; 658 if (vma) 659 zap_page_range(vma, (uintptr_t)page_addr + 660 proc->user_buffer_offset, PAGE_SIZE); 661 err_vm_insert_page_failed: 662 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); 663 err_map_kernel_failed: 664 __free_page(*page); 665 *page = NULL; 666 err_alloc_page_failed: 667 ; 668 } 669 err_no_vma: 670 if (mm) { 671 up_write(&mm->mmap_sem); 672 mmput(mm); 673 } 674 return -ENOMEM; 675 } 676 677 static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc, 678 size_t data_size, 679 size_t offsets_size, 680 size_t extra_buffers_size, 681 int is_async) 682 { 683 struct rb_node *n = proc->free_buffers.rb_node; 684 struct binder_buffer *buffer; 685 size_t buffer_size; 686 struct rb_node *best_fit = NULL; 687 void *has_page_addr; 688 void *end_page_addr; 689 size_t size, data_offsets_size; 690 691 if (proc->vma == NULL) { 692 pr_err("%d: binder_alloc_buf, no vma\n", 693 proc->pid); 694 return NULL; 695 } 696 697 data_offsets_size = ALIGN(data_size, sizeof(void *)) + 698 ALIGN(offsets_size, sizeof(void *)); 699 700 if (data_offsets_size < data_size || data_offsets_size < offsets_size) { 701 binder_user_error("%d: got transaction with invalid size %zd-%zd\n", 702 proc->pid, data_size, offsets_size); 703 return NULL; 704 } 705 size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *)); 706 if (size < data_offsets_size || size < extra_buffers_size) { 707 binder_user_error("%d: got transaction with invalid extra_buffers_size %zd\n", 708 proc->pid, extra_buffers_size); 709 return NULL; 710 } 711 if (is_async && 712 proc->free_async_space < size + sizeof(struct binder_buffer)) { 713 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 714 "%d: binder_alloc_buf size %zd failed, no async space left\n", 715 proc->pid, size); 716 return NULL; 717 } 718 719 while (n) { 720 buffer = rb_entry(n, struct binder_buffer, rb_node); 721 BUG_ON(!buffer->free); 722 buffer_size = binder_buffer_size(proc, buffer); 723 724 if (size < buffer_size) { 725 best_fit = n; 726 n = n->rb_left; 727 } else if (size > buffer_size) 728 n = n->rb_right; 729 else { 730 best_fit = n; 731 break; 732 } 733 } 734 if (best_fit == NULL) { 735 pr_err("%d: binder_alloc_buf size %zd failed, no address space\n", 736 proc->pid, size); 737 return NULL; 738 } 739 if (n == NULL) { 740 buffer = rb_entry(best_fit, struct binder_buffer, rb_node); 741 buffer_size = binder_buffer_size(proc, buffer); 742 } 743 744 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 745 "%d: binder_alloc_buf size %zd got buffer %p size %zd\n", 746 proc->pid, size, buffer, buffer_size); 747 748 has_page_addr = 749 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); 750 if (n == NULL) { 751 if (size + sizeof(struct binder_buffer) + 4 >= buffer_size) 752 buffer_size = size; /* no room for other buffers */ 753 else 754 buffer_size = size + sizeof(struct binder_buffer); 755 } 756 end_page_addr = 757 (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size); 758 if (end_page_addr > has_page_addr) 759 end_page_addr = has_page_addr; 760 if (binder_update_page_range(proc, 1, 761 (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL)) 762 return NULL; 763 764 rb_erase(best_fit, &proc->free_buffers); 765 buffer->free = 0; 766 binder_insert_allocated_buffer(proc, buffer); 767 if (buffer_size != size) { 768 struct binder_buffer *new_buffer = (void *)buffer->data + size; 769 770 list_add(&new_buffer->entry, &buffer->entry); 771 new_buffer->free = 1; 772 binder_insert_free_buffer(proc, new_buffer); 773 } 774 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 775 "%d: binder_alloc_buf size %zd got %p\n", 776 proc->pid, size, buffer); 777 buffer->data_size = data_size; 778 buffer->offsets_size = offsets_size; 779 buffer->extra_buffers_size = extra_buffers_size; 780 buffer->async_transaction = is_async; 781 if (is_async) { 782 proc->free_async_space -= size + sizeof(struct binder_buffer); 783 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, 784 "%d: binder_alloc_buf size %zd async free %zd\n", 785 proc->pid, size, proc->free_async_space); 786 } 787 788 return buffer; 789 } 790 791 static void *buffer_start_page(struct binder_buffer *buffer) 792 { 793 return (void *)((uintptr_t)buffer & PAGE_MASK); 794 } 795 796 static void *buffer_end_page(struct binder_buffer *buffer) 797 { 798 return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK); 799 } 800 801 static void binder_delete_free_buffer(struct binder_proc *proc, 802 struct binder_buffer *buffer) 803 { 804 struct binder_buffer *prev, *next = NULL; 805 int free_page_end = 1; 806 int free_page_start = 1; 807 808 BUG_ON(proc->buffers.next == &buffer->entry); 809 prev = list_entry(buffer->entry.prev, struct binder_buffer, entry); 810 BUG_ON(!prev->free); 811 if (buffer_end_page(prev) == buffer_start_page(buffer)) { 812 free_page_start = 0; 813 if (buffer_end_page(prev) == buffer_end_page(buffer)) 814 free_page_end = 0; 815 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 816 "%d: merge free, buffer %p share page with %p\n", 817 proc->pid, buffer, prev); 818 } 819 820 if (!list_is_last(&buffer->entry, &proc->buffers)) { 821 next = list_entry(buffer->entry.next, 822 struct binder_buffer, entry); 823 if (buffer_start_page(next) == buffer_end_page(buffer)) { 824 free_page_end = 0; 825 if (buffer_start_page(next) == 826 buffer_start_page(buffer)) 827 free_page_start = 0; 828 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 829 "%d: merge free, buffer %p share page with %p\n", 830 proc->pid, buffer, prev); 831 } 832 } 833 list_del(&buffer->entry); 834 if (free_page_start || free_page_end) { 835 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 836 "%d: merge free, buffer %p do not share page%s%s with %p or %p\n", 837 proc->pid, buffer, free_page_start ? "" : " end", 838 free_page_end ? "" : " start", prev, next); 839 binder_update_page_range(proc, 0, free_page_start ? 840 buffer_start_page(buffer) : buffer_end_page(buffer), 841 (free_page_end ? buffer_end_page(buffer) : 842 buffer_start_page(buffer)) + PAGE_SIZE, NULL); 843 } 844 } 845 846 static void binder_free_buf(struct binder_proc *proc, 847 struct binder_buffer *buffer) 848 { 849 size_t size, buffer_size; 850 851 buffer_size = binder_buffer_size(proc, buffer); 852 853 size = ALIGN(buffer->data_size, sizeof(void *)) + 854 ALIGN(buffer->offsets_size, sizeof(void *)) + 855 ALIGN(buffer->extra_buffers_size, sizeof(void *)); 856 857 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 858 "%d: binder_free_buf %p size %zd buffer_size %zd\n", 859 proc->pid, buffer, size, buffer_size); 860 861 BUG_ON(buffer->free); 862 BUG_ON(size > buffer_size); 863 BUG_ON(buffer->transaction != NULL); 864 BUG_ON((void *)buffer < proc->buffer); 865 BUG_ON((void *)buffer > proc->buffer + proc->buffer_size); 866 867 if (buffer->async_transaction) { 868 proc->free_async_space += size + sizeof(struct binder_buffer); 869 870 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, 871 "%d: binder_free_buf size %zd async free %zd\n", 872 proc->pid, size, proc->free_async_space); 873 } 874 875 binder_update_page_range(proc, 0, 876 (void *)PAGE_ALIGN((uintptr_t)buffer->data), 877 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK), 878 NULL); 879 rb_erase(&buffer->rb_node, &proc->allocated_buffers); 880 buffer->free = 1; 881 if (!list_is_last(&buffer->entry, &proc->buffers)) { 882 struct binder_buffer *next = list_entry(buffer->entry.next, 883 struct binder_buffer, entry); 884 885 if (next->free) { 886 rb_erase(&next->rb_node, &proc->free_buffers); 887 binder_delete_free_buffer(proc, next); 888 } 889 } 890 if (proc->buffers.next != &buffer->entry) { 891 struct binder_buffer *prev = list_entry(buffer->entry.prev, 892 struct binder_buffer, entry); 893 894 if (prev->free) { 895 binder_delete_free_buffer(proc, buffer); 896 rb_erase(&prev->rb_node, &proc->free_buffers); 897 buffer = prev; 898 } 899 } 900 binder_insert_free_buffer(proc, buffer); 901 } 902 903 static struct binder_node *binder_get_node(struct binder_proc *proc, 904 binder_uintptr_t ptr) 905 { 906 struct rb_node *n = proc->nodes.rb_node; 907 struct binder_node *node; 908 909 while (n) { 910 node = rb_entry(n, struct binder_node, rb_node); 911 912 if (ptr < node->ptr) 913 n = n->rb_left; 914 else if (ptr > node->ptr) 915 n = n->rb_right; 916 else 917 return node; 918 } 919 return NULL; 920 } 921 922 static struct binder_node *binder_new_node(struct binder_proc *proc, 923 binder_uintptr_t ptr, 924 binder_uintptr_t cookie) 925 { 926 struct rb_node **p = &proc->nodes.rb_node; 927 struct rb_node *parent = NULL; 928 struct binder_node *node; 929 930 while (*p) { 931 parent = *p; 932 node = rb_entry(parent, struct binder_node, rb_node); 933 934 if (ptr < node->ptr) 935 p = &(*p)->rb_left; 936 else if (ptr > node->ptr) 937 p = &(*p)->rb_right; 938 else 939 return NULL; 940 } 941 942 node = kzalloc(sizeof(*node), GFP_KERNEL); 943 if (node == NULL) 944 return NULL; 945 binder_stats_created(BINDER_STAT_NODE); 946 rb_link_node(&node->rb_node, parent, p); 947 rb_insert_color(&node->rb_node, &proc->nodes); 948 node->debug_id = ++binder_last_id; 949 node->proc = proc; 950 node->ptr = ptr; 951 node->cookie = cookie; 952 node->work.type = BINDER_WORK_NODE; 953 INIT_LIST_HEAD(&node->work.entry); 954 INIT_LIST_HEAD(&node->async_todo); 955 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 956 "%d:%d node %d u%016llx c%016llx created\n", 957 proc->pid, current->pid, node->debug_id, 958 (u64)node->ptr, (u64)node->cookie); 959 return node; 960 } 961 962 static int binder_inc_node(struct binder_node *node, int strong, int internal, 963 struct list_head *target_list) 964 { 965 if (strong) { 966 if (internal) { 967 if (target_list == NULL && 968 node->internal_strong_refs == 0 && 969 !(node->proc && 970 node == node->proc->context->binder_context_mgr_node && 971 node->has_strong_ref)) { 972 pr_err("invalid inc strong node for %d\n", 973 node->debug_id); 974 return -EINVAL; 975 } 976 node->internal_strong_refs++; 977 } else 978 node->local_strong_refs++; 979 if (!node->has_strong_ref && target_list) { 980 list_del_init(&node->work.entry); 981 list_add_tail(&node->work.entry, target_list); 982 } 983 } else { 984 if (!internal) 985 node->local_weak_refs++; 986 if (!node->has_weak_ref && list_empty(&node->work.entry)) { 987 if (target_list == NULL) { 988 pr_err("invalid inc weak node for %d\n", 989 node->debug_id); 990 return -EINVAL; 991 } 992 list_add_tail(&node->work.entry, target_list); 993 } 994 } 995 return 0; 996 } 997 998 static int binder_dec_node(struct binder_node *node, int strong, int internal) 999 { 1000 if (strong) { 1001 if (internal) 1002 node->internal_strong_refs--; 1003 else 1004 node->local_strong_refs--; 1005 if (node->local_strong_refs || node->internal_strong_refs) 1006 return 0; 1007 } else { 1008 if (!internal) 1009 node->local_weak_refs--; 1010 if (node->local_weak_refs || !hlist_empty(&node->refs)) 1011 return 0; 1012 } 1013 if (node->proc && (node->has_strong_ref || node->has_weak_ref)) { 1014 if (list_empty(&node->work.entry)) { 1015 list_add_tail(&node->work.entry, &node->proc->todo); 1016 wake_up_interruptible(&node->proc->wait); 1017 } 1018 } else { 1019 if (hlist_empty(&node->refs) && !node->local_strong_refs && 1020 !node->local_weak_refs) { 1021 list_del_init(&node->work.entry); 1022 if (node->proc) { 1023 rb_erase(&node->rb_node, &node->proc->nodes); 1024 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1025 "refless node %d deleted\n", 1026 node->debug_id); 1027 } else { 1028 hlist_del(&node->dead_node); 1029 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1030 "dead node %d deleted\n", 1031 node->debug_id); 1032 } 1033 kfree(node); 1034 binder_stats_deleted(BINDER_STAT_NODE); 1035 } 1036 } 1037 1038 return 0; 1039 } 1040 1041 1042 static struct binder_ref *binder_get_ref(struct binder_proc *proc, 1043 u32 desc, bool need_strong_ref) 1044 { 1045 struct rb_node *n = proc->refs_by_desc.rb_node; 1046 struct binder_ref *ref; 1047 1048 while (n) { 1049 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1050 1051 if (desc < ref->desc) { 1052 n = n->rb_left; 1053 } else if (desc > ref->desc) { 1054 n = n->rb_right; 1055 } else if (need_strong_ref && !ref->strong) { 1056 binder_user_error("tried to use weak ref as strong ref\n"); 1057 return NULL; 1058 } else { 1059 return ref; 1060 } 1061 } 1062 return NULL; 1063 } 1064 1065 static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc, 1066 struct binder_node *node) 1067 { 1068 struct rb_node *n; 1069 struct rb_node **p = &proc->refs_by_node.rb_node; 1070 struct rb_node *parent = NULL; 1071 struct binder_ref *ref, *new_ref; 1072 struct binder_context *context = proc->context; 1073 1074 while (*p) { 1075 parent = *p; 1076 ref = rb_entry(parent, struct binder_ref, rb_node_node); 1077 1078 if (node < ref->node) 1079 p = &(*p)->rb_left; 1080 else if (node > ref->node) 1081 p = &(*p)->rb_right; 1082 else 1083 return ref; 1084 } 1085 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); 1086 if (new_ref == NULL) 1087 return NULL; 1088 binder_stats_created(BINDER_STAT_REF); 1089 new_ref->debug_id = ++binder_last_id; 1090 new_ref->proc = proc; 1091 new_ref->node = node; 1092 rb_link_node(&new_ref->rb_node_node, parent, p); 1093 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); 1094 1095 new_ref->desc = (node == context->binder_context_mgr_node) ? 0 : 1; 1096 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 1097 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1098 if (ref->desc > new_ref->desc) 1099 break; 1100 new_ref->desc = ref->desc + 1; 1101 } 1102 1103 p = &proc->refs_by_desc.rb_node; 1104 while (*p) { 1105 parent = *p; 1106 ref = rb_entry(parent, struct binder_ref, rb_node_desc); 1107 1108 if (new_ref->desc < ref->desc) 1109 p = &(*p)->rb_left; 1110 else if (new_ref->desc > ref->desc) 1111 p = &(*p)->rb_right; 1112 else 1113 BUG(); 1114 } 1115 rb_link_node(&new_ref->rb_node_desc, parent, p); 1116 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc); 1117 if (node) { 1118 hlist_add_head(&new_ref->node_entry, &node->refs); 1119 1120 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1121 "%d new ref %d desc %d for node %d\n", 1122 proc->pid, new_ref->debug_id, new_ref->desc, 1123 node->debug_id); 1124 } else { 1125 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1126 "%d new ref %d desc %d for dead node\n", 1127 proc->pid, new_ref->debug_id, new_ref->desc); 1128 } 1129 return new_ref; 1130 } 1131 1132 static void binder_delete_ref(struct binder_ref *ref) 1133 { 1134 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1135 "%d delete ref %d desc %d for node %d\n", 1136 ref->proc->pid, ref->debug_id, ref->desc, 1137 ref->node->debug_id); 1138 1139 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); 1140 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); 1141 if (ref->strong) 1142 binder_dec_node(ref->node, 1, 1); 1143 hlist_del(&ref->node_entry); 1144 binder_dec_node(ref->node, 0, 1); 1145 if (ref->death) { 1146 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1147 "%d delete ref %d desc %d has death notification\n", 1148 ref->proc->pid, ref->debug_id, ref->desc); 1149 list_del(&ref->death->work.entry); 1150 kfree(ref->death); 1151 binder_stats_deleted(BINDER_STAT_DEATH); 1152 } 1153 kfree(ref); 1154 binder_stats_deleted(BINDER_STAT_REF); 1155 } 1156 1157 static int binder_inc_ref(struct binder_ref *ref, int strong, 1158 struct list_head *target_list) 1159 { 1160 int ret; 1161 1162 if (strong) { 1163 if (ref->strong == 0) { 1164 ret = binder_inc_node(ref->node, 1, 1, target_list); 1165 if (ret) 1166 return ret; 1167 } 1168 ref->strong++; 1169 } else { 1170 if (ref->weak == 0) { 1171 ret = binder_inc_node(ref->node, 0, 1, target_list); 1172 if (ret) 1173 return ret; 1174 } 1175 ref->weak++; 1176 } 1177 return 0; 1178 } 1179 1180 1181 static int binder_dec_ref(struct binder_ref *ref, int strong) 1182 { 1183 if (strong) { 1184 if (ref->strong == 0) { 1185 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n", 1186 ref->proc->pid, ref->debug_id, 1187 ref->desc, ref->strong, ref->weak); 1188 return -EINVAL; 1189 } 1190 ref->strong--; 1191 if (ref->strong == 0) { 1192 int ret; 1193 1194 ret = binder_dec_node(ref->node, strong, 1); 1195 if (ret) 1196 return ret; 1197 } 1198 } else { 1199 if (ref->weak == 0) { 1200 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n", 1201 ref->proc->pid, ref->debug_id, 1202 ref->desc, ref->strong, ref->weak); 1203 return -EINVAL; 1204 } 1205 ref->weak--; 1206 } 1207 if (ref->strong == 0 && ref->weak == 0) 1208 binder_delete_ref(ref); 1209 return 0; 1210 } 1211 1212 static void binder_pop_transaction(struct binder_thread *target_thread, 1213 struct binder_transaction *t) 1214 { 1215 if (target_thread) { 1216 BUG_ON(target_thread->transaction_stack != t); 1217 BUG_ON(target_thread->transaction_stack->from != target_thread); 1218 target_thread->transaction_stack = 1219 target_thread->transaction_stack->from_parent; 1220 t->from = NULL; 1221 } 1222 t->need_reply = 0; 1223 if (t->buffer) 1224 t->buffer->transaction = NULL; 1225 kfree(t); 1226 binder_stats_deleted(BINDER_STAT_TRANSACTION); 1227 } 1228 1229 static void binder_send_failed_reply(struct binder_transaction *t, 1230 uint32_t error_code) 1231 { 1232 struct binder_thread *target_thread; 1233 struct binder_transaction *next; 1234 1235 BUG_ON(t->flags & TF_ONE_WAY); 1236 while (1) { 1237 target_thread = t->from; 1238 if (target_thread) { 1239 if (target_thread->return_error != BR_OK && 1240 target_thread->return_error2 == BR_OK) { 1241 target_thread->return_error2 = 1242 target_thread->return_error; 1243 target_thread->return_error = BR_OK; 1244 } 1245 if (target_thread->return_error == BR_OK) { 1246 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1247 "send failed reply for transaction %d to %d:%d\n", 1248 t->debug_id, 1249 target_thread->proc->pid, 1250 target_thread->pid); 1251 1252 binder_pop_transaction(target_thread, t); 1253 target_thread->return_error = error_code; 1254 wake_up_interruptible(&target_thread->wait); 1255 } else { 1256 pr_err("reply failed, target thread, %d:%d, has error code %d already\n", 1257 target_thread->proc->pid, 1258 target_thread->pid, 1259 target_thread->return_error); 1260 } 1261 return; 1262 } 1263 next = t->from_parent; 1264 1265 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1266 "send failed reply for transaction %d, target dead\n", 1267 t->debug_id); 1268 1269 binder_pop_transaction(target_thread, t); 1270 if (next == NULL) { 1271 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1272 "reply failed, no target thread at root\n"); 1273 return; 1274 } 1275 t = next; 1276 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1277 "reply failed, no target thread -- retry %d\n", 1278 t->debug_id); 1279 } 1280 } 1281 1282 /** 1283 * binder_validate_object() - checks for a valid metadata object in a buffer. 1284 * @buffer: binder_buffer that we're parsing. 1285 * @offset: offset in the buffer at which to validate an object. 1286 * 1287 * Return: If there's a valid metadata object at @offset in @buffer, the 1288 * size of that object. Otherwise, it returns zero. 1289 */ 1290 static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset) 1291 { 1292 /* Check if we can read a header first */ 1293 struct binder_object_header *hdr; 1294 size_t object_size = 0; 1295 1296 if (offset > buffer->data_size - sizeof(*hdr) || 1297 buffer->data_size < sizeof(*hdr) || 1298 !IS_ALIGNED(offset, sizeof(u32))) 1299 return 0; 1300 1301 /* Ok, now see if we can read a complete object. */ 1302 hdr = (struct binder_object_header *)(buffer->data + offset); 1303 switch (hdr->type) { 1304 case BINDER_TYPE_BINDER: 1305 case BINDER_TYPE_WEAK_BINDER: 1306 case BINDER_TYPE_HANDLE: 1307 case BINDER_TYPE_WEAK_HANDLE: 1308 object_size = sizeof(struct flat_binder_object); 1309 break; 1310 case BINDER_TYPE_FD: 1311 object_size = sizeof(struct binder_fd_object); 1312 break; 1313 case BINDER_TYPE_PTR: 1314 object_size = sizeof(struct binder_buffer_object); 1315 break; 1316 case BINDER_TYPE_FDA: 1317 object_size = sizeof(struct binder_fd_array_object); 1318 break; 1319 default: 1320 return 0; 1321 } 1322 if (offset <= buffer->data_size - object_size && 1323 buffer->data_size >= object_size) 1324 return object_size; 1325 else 1326 return 0; 1327 } 1328 1329 /** 1330 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer. 1331 * @b: binder_buffer containing the object 1332 * @index: index in offset array at which the binder_buffer_object is 1333 * located 1334 * @start: points to the start of the offset array 1335 * @num_valid: the number of valid offsets in the offset array 1336 * 1337 * Return: If @index is within the valid range of the offset array 1338 * described by @start and @num_valid, and if there's a valid 1339 * binder_buffer_object at the offset found in index @index 1340 * of the offset array, that object is returned. Otherwise, 1341 * %NULL is returned. 1342 * Note that the offset found in index @index itself is not 1343 * verified; this function assumes that @num_valid elements 1344 * from @start were previously verified to have valid offsets. 1345 */ 1346 static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b, 1347 binder_size_t index, 1348 binder_size_t *start, 1349 binder_size_t num_valid) 1350 { 1351 struct binder_buffer_object *buffer_obj; 1352 binder_size_t *offp; 1353 1354 if (index >= num_valid) 1355 return NULL; 1356 1357 offp = start + index; 1358 buffer_obj = (struct binder_buffer_object *)(b->data + *offp); 1359 if (buffer_obj->hdr.type != BINDER_TYPE_PTR) 1360 return NULL; 1361 1362 return buffer_obj; 1363 } 1364 1365 /** 1366 * binder_validate_fixup() - validates pointer/fd fixups happen in order. 1367 * @b: transaction buffer 1368 * @objects_start start of objects buffer 1369 * @buffer: binder_buffer_object in which to fix up 1370 * @offset: start offset in @buffer to fix up 1371 * @last_obj: last binder_buffer_object that we fixed up in 1372 * @last_min_offset: minimum fixup offset in @last_obj 1373 * 1374 * Return: %true if a fixup in buffer @buffer at offset @offset is 1375 * allowed. 1376 * 1377 * For safety reasons, we only allow fixups inside a buffer to happen 1378 * at increasing offsets; additionally, we only allow fixup on the last 1379 * buffer object that was verified, or one of its parents. 1380 * 1381 * Example of what is allowed: 1382 * 1383 * A 1384 * B (parent = A, offset = 0) 1385 * C (parent = A, offset = 16) 1386 * D (parent = C, offset = 0) 1387 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset) 1388 * 1389 * Examples of what is not allowed: 1390 * 1391 * Decreasing offsets within the same parent: 1392 * A 1393 * C (parent = A, offset = 16) 1394 * B (parent = A, offset = 0) // decreasing offset within A 1395 * 1396 * Referring to a parent that wasn't the last object or any of its parents: 1397 * A 1398 * B (parent = A, offset = 0) 1399 * C (parent = A, offset = 0) 1400 * C (parent = A, offset = 16) 1401 * D (parent = B, offset = 0) // B is not A or any of A's parents 1402 */ 1403 static bool binder_validate_fixup(struct binder_buffer *b, 1404 binder_size_t *objects_start, 1405 struct binder_buffer_object *buffer, 1406 binder_size_t fixup_offset, 1407 struct binder_buffer_object *last_obj, 1408 binder_size_t last_min_offset) 1409 { 1410 if (!last_obj) { 1411 /* Nothing to fix up in */ 1412 return false; 1413 } 1414 1415 while (last_obj != buffer) { 1416 /* 1417 * Safe to retrieve the parent of last_obj, since it 1418 * was already previously verified by the driver. 1419 */ 1420 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0) 1421 return false; 1422 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t); 1423 last_obj = (struct binder_buffer_object *) 1424 (b->data + *(objects_start + last_obj->parent)); 1425 } 1426 return (fixup_offset >= last_min_offset); 1427 } 1428 1429 static void binder_transaction_buffer_release(struct binder_proc *proc, 1430 struct binder_buffer *buffer, 1431 binder_size_t *failed_at) 1432 { 1433 binder_size_t *offp, *off_start, *off_end; 1434 int debug_id = buffer->debug_id; 1435 1436 binder_debug(BINDER_DEBUG_TRANSACTION, 1437 "%d buffer release %d, size %zd-%zd, failed at %p\n", 1438 proc->pid, buffer->debug_id, 1439 buffer->data_size, buffer->offsets_size, failed_at); 1440 1441 if (buffer->target_node) 1442 binder_dec_node(buffer->target_node, 1, 0); 1443 1444 off_start = (binder_size_t *)(buffer->data + 1445 ALIGN(buffer->data_size, sizeof(void *))); 1446 if (failed_at) 1447 off_end = failed_at; 1448 else 1449 off_end = (void *)off_start + buffer->offsets_size; 1450 for (offp = off_start; offp < off_end; offp++) { 1451 struct binder_object_header *hdr; 1452 size_t object_size = binder_validate_object(buffer, *offp); 1453 1454 if (object_size == 0) { 1455 pr_err("transaction release %d bad object at offset %lld, size %zd\n", 1456 debug_id, (u64)*offp, buffer->data_size); 1457 continue; 1458 } 1459 hdr = (struct binder_object_header *)(buffer->data + *offp); 1460 switch (hdr->type) { 1461 case BINDER_TYPE_BINDER: 1462 case BINDER_TYPE_WEAK_BINDER: { 1463 struct flat_binder_object *fp; 1464 struct binder_node *node; 1465 1466 fp = to_flat_binder_object(hdr); 1467 node = binder_get_node(proc, fp->binder); 1468 if (node == NULL) { 1469 pr_err("transaction release %d bad node %016llx\n", 1470 debug_id, (u64)fp->binder); 1471 break; 1472 } 1473 binder_debug(BINDER_DEBUG_TRANSACTION, 1474 " node %d u%016llx\n", 1475 node->debug_id, (u64)node->ptr); 1476 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER, 1477 0); 1478 } break; 1479 case BINDER_TYPE_HANDLE: 1480 case BINDER_TYPE_WEAK_HANDLE: { 1481 struct flat_binder_object *fp; 1482 struct binder_ref *ref; 1483 1484 fp = to_flat_binder_object(hdr); 1485 ref = binder_get_ref(proc, fp->handle, 1486 hdr->type == BINDER_TYPE_HANDLE); 1487 if (ref == NULL) { 1488 pr_err("transaction release %d bad handle %d\n", 1489 debug_id, fp->handle); 1490 break; 1491 } 1492 binder_debug(BINDER_DEBUG_TRANSACTION, 1493 " ref %d desc %d (node %d)\n", 1494 ref->debug_id, ref->desc, ref->node->debug_id); 1495 binder_dec_ref(ref, hdr->type == BINDER_TYPE_HANDLE); 1496 } break; 1497 1498 case BINDER_TYPE_FD: { 1499 struct binder_fd_object *fp = to_binder_fd_object(hdr); 1500 1501 binder_debug(BINDER_DEBUG_TRANSACTION, 1502 " fd %d\n", fp->fd); 1503 if (failed_at) 1504 task_close_fd(proc, fp->fd); 1505 } break; 1506 case BINDER_TYPE_PTR: 1507 /* 1508 * Nothing to do here, this will get cleaned up when the 1509 * transaction buffer gets freed 1510 */ 1511 break; 1512 case BINDER_TYPE_FDA: { 1513 struct binder_fd_array_object *fda; 1514 struct binder_buffer_object *parent; 1515 uintptr_t parent_buffer; 1516 u32 *fd_array; 1517 size_t fd_index; 1518 binder_size_t fd_buf_size; 1519 1520 fda = to_binder_fd_array_object(hdr); 1521 parent = binder_validate_ptr(buffer, fda->parent, 1522 off_start, 1523 offp - off_start); 1524 if (!parent) { 1525 pr_err("transaction release %d bad parent offset", 1526 debug_id); 1527 continue; 1528 } 1529 /* 1530 * Since the parent was already fixed up, convert it 1531 * back to kernel address space to access it 1532 */ 1533 parent_buffer = parent->buffer - 1534 proc->user_buffer_offset; 1535 1536 fd_buf_size = sizeof(u32) * fda->num_fds; 1537 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { 1538 pr_err("transaction release %d invalid number of fds (%lld)\n", 1539 debug_id, (u64)fda->num_fds); 1540 continue; 1541 } 1542 if (fd_buf_size > parent->length || 1543 fda->parent_offset > parent->length - fd_buf_size) { 1544 /* No space for all file descriptors here. */ 1545 pr_err("transaction release %d not enough space for %lld fds in buffer\n", 1546 debug_id, (u64)fda->num_fds); 1547 continue; 1548 } 1549 fd_array = (u32 *)(parent_buffer + fda->parent_offset); 1550 for (fd_index = 0; fd_index < fda->num_fds; fd_index++) 1551 task_close_fd(proc, fd_array[fd_index]); 1552 } break; 1553 default: 1554 pr_err("transaction release %d bad object type %x\n", 1555 debug_id, hdr->type); 1556 break; 1557 } 1558 } 1559 } 1560 1561 static int binder_translate_binder(struct flat_binder_object *fp, 1562 struct binder_transaction *t, 1563 struct binder_thread *thread) 1564 { 1565 struct binder_node *node; 1566 struct binder_ref *ref; 1567 struct binder_proc *proc = thread->proc; 1568 struct binder_proc *target_proc = t->to_proc; 1569 1570 node = binder_get_node(proc, fp->binder); 1571 if (!node) { 1572 node = binder_new_node(proc, fp->binder, fp->cookie); 1573 if (!node) 1574 return -ENOMEM; 1575 1576 node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK; 1577 node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); 1578 } 1579 if (fp->cookie != node->cookie) { 1580 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n", 1581 proc->pid, thread->pid, (u64)fp->binder, 1582 node->debug_id, (u64)fp->cookie, 1583 (u64)node->cookie); 1584 return -EINVAL; 1585 } 1586 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) 1587 return -EPERM; 1588 1589 ref = binder_get_ref_for_node(target_proc, node); 1590 if (!ref) 1591 return -EINVAL; 1592 1593 if (fp->hdr.type == BINDER_TYPE_BINDER) 1594 fp->hdr.type = BINDER_TYPE_HANDLE; 1595 else 1596 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE; 1597 fp->binder = 0; 1598 fp->handle = ref->desc; 1599 fp->cookie = 0; 1600 binder_inc_ref(ref, fp->hdr.type == BINDER_TYPE_HANDLE, &thread->todo); 1601 1602 trace_binder_transaction_node_to_ref(t, node, ref); 1603 binder_debug(BINDER_DEBUG_TRANSACTION, 1604 " node %d u%016llx -> ref %d desc %d\n", 1605 node->debug_id, (u64)node->ptr, 1606 ref->debug_id, ref->desc); 1607 1608 return 0; 1609 } 1610 1611 static int binder_translate_handle(struct flat_binder_object *fp, 1612 struct binder_transaction *t, 1613 struct binder_thread *thread) 1614 { 1615 struct binder_ref *ref; 1616 struct binder_proc *proc = thread->proc; 1617 struct binder_proc *target_proc = t->to_proc; 1618 1619 ref = binder_get_ref(proc, fp->handle, 1620 fp->hdr.type == BINDER_TYPE_HANDLE); 1621 if (!ref) { 1622 binder_user_error("%d:%d got transaction with invalid handle, %d\n", 1623 proc->pid, thread->pid, fp->handle); 1624 return -EINVAL; 1625 } 1626 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) 1627 return -EPERM; 1628 1629 if (ref->node->proc == target_proc) { 1630 if (fp->hdr.type == BINDER_TYPE_HANDLE) 1631 fp->hdr.type = BINDER_TYPE_BINDER; 1632 else 1633 fp->hdr.type = BINDER_TYPE_WEAK_BINDER; 1634 fp->binder = ref->node->ptr; 1635 fp->cookie = ref->node->cookie; 1636 binder_inc_node(ref->node, fp->hdr.type == BINDER_TYPE_BINDER, 1637 0, NULL); 1638 trace_binder_transaction_ref_to_node(t, ref); 1639 binder_debug(BINDER_DEBUG_TRANSACTION, 1640 " ref %d desc %d -> node %d u%016llx\n", 1641 ref->debug_id, ref->desc, ref->node->debug_id, 1642 (u64)ref->node->ptr); 1643 } else { 1644 struct binder_ref *new_ref; 1645 1646 new_ref = binder_get_ref_for_node(target_proc, ref->node); 1647 if (!new_ref) 1648 return -EINVAL; 1649 1650 fp->binder = 0; 1651 fp->handle = new_ref->desc; 1652 fp->cookie = 0; 1653 binder_inc_ref(new_ref, fp->hdr.type == BINDER_TYPE_HANDLE, 1654 NULL); 1655 trace_binder_transaction_ref_to_ref(t, ref, new_ref); 1656 binder_debug(BINDER_DEBUG_TRANSACTION, 1657 " ref %d desc %d -> ref %d desc %d (node %d)\n", 1658 ref->debug_id, ref->desc, new_ref->debug_id, 1659 new_ref->desc, ref->node->debug_id); 1660 } 1661 return 0; 1662 } 1663 1664 static int binder_translate_fd(int fd, 1665 struct binder_transaction *t, 1666 struct binder_thread *thread, 1667 struct binder_transaction *in_reply_to) 1668 { 1669 struct binder_proc *proc = thread->proc; 1670 struct binder_proc *target_proc = t->to_proc; 1671 int target_fd; 1672 struct file *file; 1673 int ret; 1674 bool target_allows_fd; 1675 1676 if (in_reply_to) 1677 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS); 1678 else 1679 target_allows_fd = t->buffer->target_node->accept_fds; 1680 if (!target_allows_fd) { 1681 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n", 1682 proc->pid, thread->pid, 1683 in_reply_to ? "reply" : "transaction", 1684 fd); 1685 ret = -EPERM; 1686 goto err_fd_not_accepted; 1687 } 1688 1689 file = fget(fd); 1690 if (!file) { 1691 binder_user_error("%d:%d got transaction with invalid fd, %d\n", 1692 proc->pid, thread->pid, fd); 1693 ret = -EBADF; 1694 goto err_fget; 1695 } 1696 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file); 1697 if (ret < 0) { 1698 ret = -EPERM; 1699 goto err_security; 1700 } 1701 1702 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC); 1703 if (target_fd < 0) { 1704 ret = -ENOMEM; 1705 goto err_get_unused_fd; 1706 } 1707 task_fd_install(target_proc, target_fd, file); 1708 trace_binder_transaction_fd(t, fd, target_fd); 1709 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n", 1710 fd, target_fd); 1711 1712 return target_fd; 1713 1714 err_get_unused_fd: 1715 err_security: 1716 fput(file); 1717 err_fget: 1718 err_fd_not_accepted: 1719 return ret; 1720 } 1721 1722 static int binder_translate_fd_array(struct binder_fd_array_object *fda, 1723 struct binder_buffer_object *parent, 1724 struct binder_transaction *t, 1725 struct binder_thread *thread, 1726 struct binder_transaction *in_reply_to) 1727 { 1728 binder_size_t fdi, fd_buf_size, num_installed_fds; 1729 int target_fd; 1730 uintptr_t parent_buffer; 1731 u32 *fd_array; 1732 struct binder_proc *proc = thread->proc; 1733 struct binder_proc *target_proc = t->to_proc; 1734 1735 fd_buf_size = sizeof(u32) * fda->num_fds; 1736 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { 1737 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n", 1738 proc->pid, thread->pid, (u64)fda->num_fds); 1739 return -EINVAL; 1740 } 1741 if (fd_buf_size > parent->length || 1742 fda->parent_offset > parent->length - fd_buf_size) { 1743 /* No space for all file descriptors here. */ 1744 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n", 1745 proc->pid, thread->pid, (u64)fda->num_fds); 1746 return -EINVAL; 1747 } 1748 /* 1749 * Since the parent was already fixed up, convert it 1750 * back to the kernel address space to access it 1751 */ 1752 parent_buffer = parent->buffer - target_proc->user_buffer_offset; 1753 fd_array = (u32 *)(parent_buffer + fda->parent_offset); 1754 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) { 1755 binder_user_error("%d:%d parent offset not aligned correctly.\n", 1756 proc->pid, thread->pid); 1757 return -EINVAL; 1758 } 1759 for (fdi = 0; fdi < fda->num_fds; fdi++) { 1760 target_fd = binder_translate_fd(fd_array[fdi], t, thread, 1761 in_reply_to); 1762 if (target_fd < 0) 1763 goto err_translate_fd_failed; 1764 fd_array[fdi] = target_fd; 1765 } 1766 return 0; 1767 1768 err_translate_fd_failed: 1769 /* 1770 * Failed to allocate fd or security error, free fds 1771 * installed so far. 1772 */ 1773 num_installed_fds = fdi; 1774 for (fdi = 0; fdi < num_installed_fds; fdi++) 1775 task_close_fd(target_proc, fd_array[fdi]); 1776 return target_fd; 1777 } 1778 1779 static int binder_fixup_parent(struct binder_transaction *t, 1780 struct binder_thread *thread, 1781 struct binder_buffer_object *bp, 1782 binder_size_t *off_start, 1783 binder_size_t num_valid, 1784 struct binder_buffer_object *last_fixup_obj, 1785 binder_size_t last_fixup_min_off) 1786 { 1787 struct binder_buffer_object *parent; 1788 u8 *parent_buffer; 1789 struct binder_buffer *b = t->buffer; 1790 struct binder_proc *proc = thread->proc; 1791 struct binder_proc *target_proc = t->to_proc; 1792 1793 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT)) 1794 return 0; 1795 1796 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid); 1797 if (!parent) { 1798 binder_user_error("%d:%d got transaction with invalid parent offset or type\n", 1799 proc->pid, thread->pid); 1800 return -EINVAL; 1801 } 1802 1803 if (!binder_validate_fixup(b, off_start, 1804 parent, bp->parent_offset, 1805 last_fixup_obj, 1806 last_fixup_min_off)) { 1807 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", 1808 proc->pid, thread->pid); 1809 return -EINVAL; 1810 } 1811 1812 if (parent->length < sizeof(binder_uintptr_t) || 1813 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) { 1814 /* No space for a pointer here! */ 1815 binder_user_error("%d:%d got transaction with invalid parent offset\n", 1816 proc->pid, thread->pid); 1817 return -EINVAL; 1818 } 1819 parent_buffer = (u8 *)(parent->buffer - 1820 target_proc->user_buffer_offset); 1821 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer; 1822 1823 return 0; 1824 } 1825 1826 static void binder_transaction(struct binder_proc *proc, 1827 struct binder_thread *thread, 1828 struct binder_transaction_data *tr, int reply, 1829 binder_size_t extra_buffers_size) 1830 { 1831 int ret; 1832 struct binder_transaction *t; 1833 struct binder_work *tcomplete; 1834 binder_size_t *offp, *off_end, *off_start; 1835 binder_size_t off_min; 1836 u8 *sg_bufp, *sg_buf_end; 1837 struct binder_proc *target_proc; 1838 struct binder_thread *target_thread = NULL; 1839 struct binder_node *target_node = NULL; 1840 struct list_head *target_list; 1841 wait_queue_head_t *target_wait; 1842 struct binder_transaction *in_reply_to = NULL; 1843 struct binder_transaction_log_entry *e; 1844 uint32_t return_error; 1845 struct binder_buffer_object *last_fixup_obj = NULL; 1846 binder_size_t last_fixup_min_off = 0; 1847 struct binder_context *context = proc->context; 1848 1849 e = binder_transaction_log_add(&binder_transaction_log); 1850 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); 1851 e->from_proc = proc->pid; 1852 e->from_thread = thread->pid; 1853 e->target_handle = tr->target.handle; 1854 e->data_size = tr->data_size; 1855 e->offsets_size = tr->offsets_size; 1856 e->context_name = proc->context->name; 1857 1858 if (reply) { 1859 in_reply_to = thread->transaction_stack; 1860 if (in_reply_to == NULL) { 1861 binder_user_error("%d:%d got reply transaction with no transaction stack\n", 1862 proc->pid, thread->pid); 1863 return_error = BR_FAILED_REPLY; 1864 goto err_empty_call_stack; 1865 } 1866 binder_set_nice(in_reply_to->saved_priority); 1867 if (in_reply_to->to_thread != thread) { 1868 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n", 1869 proc->pid, thread->pid, in_reply_to->debug_id, 1870 in_reply_to->to_proc ? 1871 in_reply_to->to_proc->pid : 0, 1872 in_reply_to->to_thread ? 1873 in_reply_to->to_thread->pid : 0); 1874 return_error = BR_FAILED_REPLY; 1875 in_reply_to = NULL; 1876 goto err_bad_call_stack; 1877 } 1878 thread->transaction_stack = in_reply_to->to_parent; 1879 target_thread = in_reply_to->from; 1880 if (target_thread == NULL) { 1881 return_error = BR_DEAD_REPLY; 1882 goto err_dead_binder; 1883 } 1884 if (target_thread->transaction_stack != in_reply_to) { 1885 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n", 1886 proc->pid, thread->pid, 1887 target_thread->transaction_stack ? 1888 target_thread->transaction_stack->debug_id : 0, 1889 in_reply_to->debug_id); 1890 return_error = BR_FAILED_REPLY; 1891 in_reply_to = NULL; 1892 target_thread = NULL; 1893 goto err_dead_binder; 1894 } 1895 target_proc = target_thread->proc; 1896 } else { 1897 if (tr->target.handle) { 1898 struct binder_ref *ref; 1899 1900 ref = binder_get_ref(proc, tr->target.handle, true); 1901 if (ref == NULL) { 1902 binder_user_error("%d:%d got transaction to invalid handle\n", 1903 proc->pid, thread->pid); 1904 return_error = BR_FAILED_REPLY; 1905 goto err_invalid_target_handle; 1906 } 1907 target_node = ref->node; 1908 } else { 1909 target_node = context->binder_context_mgr_node; 1910 if (target_node == NULL) { 1911 return_error = BR_DEAD_REPLY; 1912 goto err_no_context_mgr_node; 1913 } 1914 } 1915 e->to_node = target_node->debug_id; 1916 target_proc = target_node->proc; 1917 if (target_proc == NULL) { 1918 return_error = BR_DEAD_REPLY; 1919 goto err_dead_binder; 1920 } 1921 if (security_binder_transaction(proc->tsk, 1922 target_proc->tsk) < 0) { 1923 return_error = BR_FAILED_REPLY; 1924 goto err_invalid_target_handle; 1925 } 1926 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { 1927 struct binder_transaction *tmp; 1928 1929 tmp = thread->transaction_stack; 1930 if (tmp->to_thread != thread) { 1931 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n", 1932 proc->pid, thread->pid, tmp->debug_id, 1933 tmp->to_proc ? tmp->to_proc->pid : 0, 1934 tmp->to_thread ? 1935 tmp->to_thread->pid : 0); 1936 return_error = BR_FAILED_REPLY; 1937 goto err_bad_call_stack; 1938 } 1939 while (tmp) { 1940 if (tmp->from && tmp->from->proc == target_proc) 1941 target_thread = tmp->from; 1942 tmp = tmp->from_parent; 1943 } 1944 } 1945 } 1946 if (target_thread) { 1947 e->to_thread = target_thread->pid; 1948 target_list = &target_thread->todo; 1949 target_wait = &target_thread->wait; 1950 } else { 1951 target_list = &target_proc->todo; 1952 target_wait = &target_proc->wait; 1953 } 1954 e->to_proc = target_proc->pid; 1955 1956 /* TODO: reuse incoming transaction for reply */ 1957 t = kzalloc(sizeof(*t), GFP_KERNEL); 1958 if (t == NULL) { 1959 return_error = BR_FAILED_REPLY; 1960 goto err_alloc_t_failed; 1961 } 1962 binder_stats_created(BINDER_STAT_TRANSACTION); 1963 1964 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); 1965 if (tcomplete == NULL) { 1966 return_error = BR_FAILED_REPLY; 1967 goto err_alloc_tcomplete_failed; 1968 } 1969 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE); 1970 1971 t->debug_id = ++binder_last_id; 1972 e->debug_id = t->debug_id; 1973 1974 if (reply) 1975 binder_debug(BINDER_DEBUG_TRANSACTION, 1976 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n", 1977 proc->pid, thread->pid, t->debug_id, 1978 target_proc->pid, target_thread->pid, 1979 (u64)tr->data.ptr.buffer, 1980 (u64)tr->data.ptr.offsets, 1981 (u64)tr->data_size, (u64)tr->offsets_size, 1982 (u64)extra_buffers_size); 1983 else 1984 binder_debug(BINDER_DEBUG_TRANSACTION, 1985 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n", 1986 proc->pid, thread->pid, t->debug_id, 1987 target_proc->pid, target_node->debug_id, 1988 (u64)tr->data.ptr.buffer, 1989 (u64)tr->data.ptr.offsets, 1990 (u64)tr->data_size, (u64)tr->offsets_size, 1991 (u64)extra_buffers_size); 1992 1993 if (!reply && !(tr->flags & TF_ONE_WAY)) 1994 t->from = thread; 1995 else 1996 t->from = NULL; 1997 t->sender_euid = task_euid(proc->tsk); 1998 t->to_proc = target_proc; 1999 t->to_thread = target_thread; 2000 t->code = tr->code; 2001 t->flags = tr->flags; 2002 t->priority = task_nice(current); 2003 2004 trace_binder_transaction(reply, t, target_node); 2005 2006 t->buffer = binder_alloc_buf(target_proc, tr->data_size, 2007 tr->offsets_size, extra_buffers_size, 2008 !reply && (t->flags & TF_ONE_WAY)); 2009 if (t->buffer == NULL) { 2010 return_error = BR_FAILED_REPLY; 2011 goto err_binder_alloc_buf_failed; 2012 } 2013 t->buffer->allow_user_free = 0; 2014 t->buffer->debug_id = t->debug_id; 2015 t->buffer->transaction = t; 2016 t->buffer->target_node = target_node; 2017 trace_binder_transaction_alloc_buf(t->buffer); 2018 if (target_node) 2019 binder_inc_node(target_node, 1, 0, NULL); 2020 2021 off_start = (binder_size_t *)(t->buffer->data + 2022 ALIGN(tr->data_size, sizeof(void *))); 2023 offp = off_start; 2024 2025 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t) 2026 tr->data.ptr.buffer, tr->data_size)) { 2027 binder_user_error("%d:%d got transaction with invalid data ptr\n", 2028 proc->pid, thread->pid); 2029 return_error = BR_FAILED_REPLY; 2030 goto err_copy_data_failed; 2031 } 2032 if (copy_from_user(offp, (const void __user *)(uintptr_t) 2033 tr->data.ptr.offsets, tr->offsets_size)) { 2034 binder_user_error("%d:%d got transaction with invalid offsets ptr\n", 2035 proc->pid, thread->pid); 2036 return_error = BR_FAILED_REPLY; 2037 goto err_copy_data_failed; 2038 } 2039 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) { 2040 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n", 2041 proc->pid, thread->pid, (u64)tr->offsets_size); 2042 return_error = BR_FAILED_REPLY; 2043 goto err_bad_offset; 2044 } 2045 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) { 2046 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n", 2047 proc->pid, thread->pid, 2048 (u64)extra_buffers_size); 2049 return_error = BR_FAILED_REPLY; 2050 goto err_bad_offset; 2051 } 2052 off_end = (void *)off_start + tr->offsets_size; 2053 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *))); 2054 sg_buf_end = sg_bufp + extra_buffers_size; 2055 off_min = 0; 2056 for (; offp < off_end; offp++) { 2057 struct binder_object_header *hdr; 2058 size_t object_size = binder_validate_object(t->buffer, *offp); 2059 2060 if (object_size == 0 || *offp < off_min) { 2061 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n", 2062 proc->pid, thread->pid, (u64)*offp, 2063 (u64)off_min, 2064 (u64)t->buffer->data_size); 2065 return_error = BR_FAILED_REPLY; 2066 goto err_bad_offset; 2067 } 2068 2069 hdr = (struct binder_object_header *)(t->buffer->data + *offp); 2070 off_min = *offp + object_size; 2071 switch (hdr->type) { 2072 case BINDER_TYPE_BINDER: 2073 case BINDER_TYPE_WEAK_BINDER: { 2074 struct flat_binder_object *fp; 2075 2076 fp = to_flat_binder_object(hdr); 2077 ret = binder_translate_binder(fp, t, thread); 2078 if (ret < 0) { 2079 return_error = BR_FAILED_REPLY; 2080 goto err_translate_failed; 2081 } 2082 } break; 2083 case BINDER_TYPE_HANDLE: 2084 case BINDER_TYPE_WEAK_HANDLE: { 2085 struct flat_binder_object *fp; 2086 2087 fp = to_flat_binder_object(hdr); 2088 ret = binder_translate_handle(fp, t, thread); 2089 if (ret < 0) { 2090 return_error = BR_FAILED_REPLY; 2091 goto err_translate_failed; 2092 } 2093 } break; 2094 2095 case BINDER_TYPE_FD: { 2096 struct binder_fd_object *fp = to_binder_fd_object(hdr); 2097 int target_fd = binder_translate_fd(fp->fd, t, thread, 2098 in_reply_to); 2099 2100 if (target_fd < 0) { 2101 return_error = BR_FAILED_REPLY; 2102 goto err_translate_failed; 2103 } 2104 fp->pad_binder = 0; 2105 fp->fd = target_fd; 2106 } break; 2107 case BINDER_TYPE_FDA: { 2108 struct binder_fd_array_object *fda = 2109 to_binder_fd_array_object(hdr); 2110 struct binder_buffer_object *parent = 2111 binder_validate_ptr(t->buffer, fda->parent, 2112 off_start, 2113 offp - off_start); 2114 if (!parent) { 2115 binder_user_error("%d:%d got transaction with invalid parent offset or type\n", 2116 proc->pid, thread->pid); 2117 return_error = BR_FAILED_REPLY; 2118 goto err_bad_parent; 2119 } 2120 if (!binder_validate_fixup(t->buffer, off_start, 2121 parent, fda->parent_offset, 2122 last_fixup_obj, 2123 last_fixup_min_off)) { 2124 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", 2125 proc->pid, thread->pid); 2126 return_error = BR_FAILED_REPLY; 2127 goto err_bad_parent; 2128 } 2129 ret = binder_translate_fd_array(fda, parent, t, thread, 2130 in_reply_to); 2131 if (ret < 0) { 2132 return_error = BR_FAILED_REPLY; 2133 goto err_translate_failed; 2134 } 2135 last_fixup_obj = parent; 2136 last_fixup_min_off = 2137 fda->parent_offset + sizeof(u32) * fda->num_fds; 2138 } break; 2139 case BINDER_TYPE_PTR: { 2140 struct binder_buffer_object *bp = 2141 to_binder_buffer_object(hdr); 2142 size_t buf_left = sg_buf_end - sg_bufp; 2143 2144 if (bp->length > buf_left) { 2145 binder_user_error("%d:%d got transaction with too large buffer\n", 2146 proc->pid, thread->pid); 2147 return_error = BR_FAILED_REPLY; 2148 goto err_bad_offset; 2149 } 2150 if (copy_from_user(sg_bufp, 2151 (const void __user *)(uintptr_t) 2152 bp->buffer, bp->length)) { 2153 binder_user_error("%d:%d got transaction with invalid offsets ptr\n", 2154 proc->pid, thread->pid); 2155 return_error = BR_FAILED_REPLY; 2156 goto err_copy_data_failed; 2157 } 2158 /* Fixup buffer pointer to target proc address space */ 2159 bp->buffer = (uintptr_t)sg_bufp + 2160 target_proc->user_buffer_offset; 2161 sg_bufp += ALIGN(bp->length, sizeof(u64)); 2162 2163 ret = binder_fixup_parent(t, thread, bp, off_start, 2164 offp - off_start, 2165 last_fixup_obj, 2166 last_fixup_min_off); 2167 if (ret < 0) { 2168 return_error = BR_FAILED_REPLY; 2169 goto err_translate_failed; 2170 } 2171 last_fixup_obj = bp; 2172 last_fixup_min_off = 0; 2173 } break; 2174 default: 2175 binder_user_error("%d:%d got transaction with invalid object type, %x\n", 2176 proc->pid, thread->pid, hdr->type); 2177 return_error = BR_FAILED_REPLY; 2178 goto err_bad_object_type; 2179 } 2180 } 2181 if (reply) { 2182 BUG_ON(t->buffer->async_transaction != 0); 2183 binder_pop_transaction(target_thread, in_reply_to); 2184 } else if (!(t->flags & TF_ONE_WAY)) { 2185 BUG_ON(t->buffer->async_transaction != 0); 2186 t->need_reply = 1; 2187 t->from_parent = thread->transaction_stack; 2188 thread->transaction_stack = t; 2189 } else { 2190 BUG_ON(target_node == NULL); 2191 BUG_ON(t->buffer->async_transaction != 1); 2192 if (target_node->has_async_transaction) { 2193 target_list = &target_node->async_todo; 2194 target_wait = NULL; 2195 } else 2196 target_node->has_async_transaction = 1; 2197 } 2198 t->work.type = BINDER_WORK_TRANSACTION; 2199 list_add_tail(&t->work.entry, target_list); 2200 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; 2201 list_add_tail(&tcomplete->entry, &thread->todo); 2202 if (target_wait) 2203 wake_up_interruptible(target_wait); 2204 return; 2205 2206 err_translate_failed: 2207 err_bad_object_type: 2208 err_bad_offset: 2209 err_bad_parent: 2210 err_copy_data_failed: 2211 trace_binder_transaction_failed_buffer_release(t->buffer); 2212 binder_transaction_buffer_release(target_proc, t->buffer, offp); 2213 t->buffer->transaction = NULL; 2214 binder_free_buf(target_proc, t->buffer); 2215 err_binder_alloc_buf_failed: 2216 kfree(tcomplete); 2217 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 2218 err_alloc_tcomplete_failed: 2219 kfree(t); 2220 binder_stats_deleted(BINDER_STAT_TRANSACTION); 2221 err_alloc_t_failed: 2222 err_bad_call_stack: 2223 err_empty_call_stack: 2224 err_dead_binder: 2225 err_invalid_target_handle: 2226 err_no_context_mgr_node: 2227 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 2228 "%d:%d transaction failed %d, size %lld-%lld\n", 2229 proc->pid, thread->pid, return_error, 2230 (u64)tr->data_size, (u64)tr->offsets_size); 2231 2232 { 2233 struct binder_transaction_log_entry *fe; 2234 2235 fe = binder_transaction_log_add(&binder_transaction_log_failed); 2236 *fe = *e; 2237 } 2238 2239 BUG_ON(thread->return_error != BR_OK); 2240 if (in_reply_to) { 2241 thread->return_error = BR_TRANSACTION_COMPLETE; 2242 binder_send_failed_reply(in_reply_to, return_error); 2243 } else 2244 thread->return_error = return_error; 2245 } 2246 2247 static int binder_thread_write(struct binder_proc *proc, 2248 struct binder_thread *thread, 2249 binder_uintptr_t binder_buffer, size_t size, 2250 binder_size_t *consumed) 2251 { 2252 uint32_t cmd; 2253 struct binder_context *context = proc->context; 2254 void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 2255 void __user *ptr = buffer + *consumed; 2256 void __user *end = buffer + size; 2257 2258 while (ptr < end && thread->return_error == BR_OK) { 2259 if (get_user(cmd, (uint32_t __user *)ptr)) 2260 return -EFAULT; 2261 ptr += sizeof(uint32_t); 2262 trace_binder_command(cmd); 2263 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { 2264 binder_stats.bc[_IOC_NR(cmd)]++; 2265 proc->stats.bc[_IOC_NR(cmd)]++; 2266 thread->stats.bc[_IOC_NR(cmd)]++; 2267 } 2268 switch (cmd) { 2269 case BC_INCREFS: 2270 case BC_ACQUIRE: 2271 case BC_RELEASE: 2272 case BC_DECREFS: { 2273 uint32_t target; 2274 struct binder_ref *ref; 2275 const char *debug_string; 2276 2277 if (get_user(target, (uint32_t __user *)ptr)) 2278 return -EFAULT; 2279 ptr += sizeof(uint32_t); 2280 if (target == 0 && context->binder_context_mgr_node && 2281 (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) { 2282 ref = binder_get_ref_for_node(proc, 2283 context->binder_context_mgr_node); 2284 if (ref->desc != target) { 2285 binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n", 2286 proc->pid, thread->pid, 2287 ref->desc); 2288 } 2289 } else 2290 ref = binder_get_ref(proc, target, 2291 cmd == BC_ACQUIRE || 2292 cmd == BC_RELEASE); 2293 if (ref == NULL) { 2294 binder_user_error("%d:%d refcount change on invalid ref %d\n", 2295 proc->pid, thread->pid, target); 2296 break; 2297 } 2298 switch (cmd) { 2299 case BC_INCREFS: 2300 debug_string = "IncRefs"; 2301 binder_inc_ref(ref, 0, NULL); 2302 break; 2303 case BC_ACQUIRE: 2304 debug_string = "Acquire"; 2305 binder_inc_ref(ref, 1, NULL); 2306 break; 2307 case BC_RELEASE: 2308 debug_string = "Release"; 2309 binder_dec_ref(ref, 1); 2310 break; 2311 case BC_DECREFS: 2312 default: 2313 debug_string = "DecRefs"; 2314 binder_dec_ref(ref, 0); 2315 break; 2316 } 2317 binder_debug(BINDER_DEBUG_USER_REFS, 2318 "%d:%d %s ref %d desc %d s %d w %d for node %d\n", 2319 proc->pid, thread->pid, debug_string, ref->debug_id, 2320 ref->desc, ref->strong, ref->weak, ref->node->debug_id); 2321 break; 2322 } 2323 case BC_INCREFS_DONE: 2324 case BC_ACQUIRE_DONE: { 2325 binder_uintptr_t node_ptr; 2326 binder_uintptr_t cookie; 2327 struct binder_node *node; 2328 2329 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr)) 2330 return -EFAULT; 2331 ptr += sizeof(binder_uintptr_t); 2332 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 2333 return -EFAULT; 2334 ptr += sizeof(binder_uintptr_t); 2335 node = binder_get_node(proc, node_ptr); 2336 if (node == NULL) { 2337 binder_user_error("%d:%d %s u%016llx no match\n", 2338 proc->pid, thread->pid, 2339 cmd == BC_INCREFS_DONE ? 2340 "BC_INCREFS_DONE" : 2341 "BC_ACQUIRE_DONE", 2342 (u64)node_ptr); 2343 break; 2344 } 2345 if (cookie != node->cookie) { 2346 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n", 2347 proc->pid, thread->pid, 2348 cmd == BC_INCREFS_DONE ? 2349 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 2350 (u64)node_ptr, node->debug_id, 2351 (u64)cookie, (u64)node->cookie); 2352 break; 2353 } 2354 if (cmd == BC_ACQUIRE_DONE) { 2355 if (node->pending_strong_ref == 0) { 2356 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n", 2357 proc->pid, thread->pid, 2358 node->debug_id); 2359 break; 2360 } 2361 node->pending_strong_ref = 0; 2362 } else { 2363 if (node->pending_weak_ref == 0) { 2364 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n", 2365 proc->pid, thread->pid, 2366 node->debug_id); 2367 break; 2368 } 2369 node->pending_weak_ref = 0; 2370 } 2371 binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0); 2372 binder_debug(BINDER_DEBUG_USER_REFS, 2373 "%d:%d %s node %d ls %d lw %d\n", 2374 proc->pid, thread->pid, 2375 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 2376 node->debug_id, node->local_strong_refs, node->local_weak_refs); 2377 break; 2378 } 2379 case BC_ATTEMPT_ACQUIRE: 2380 pr_err("BC_ATTEMPT_ACQUIRE not supported\n"); 2381 return -EINVAL; 2382 case BC_ACQUIRE_RESULT: 2383 pr_err("BC_ACQUIRE_RESULT not supported\n"); 2384 return -EINVAL; 2385 2386 case BC_FREE_BUFFER: { 2387 binder_uintptr_t data_ptr; 2388 struct binder_buffer *buffer; 2389 2390 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr)) 2391 return -EFAULT; 2392 ptr += sizeof(binder_uintptr_t); 2393 2394 buffer = binder_buffer_lookup(proc, data_ptr); 2395 if (buffer == NULL) { 2396 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n", 2397 proc->pid, thread->pid, (u64)data_ptr); 2398 break; 2399 } 2400 if (!buffer->allow_user_free) { 2401 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n", 2402 proc->pid, thread->pid, (u64)data_ptr); 2403 break; 2404 } 2405 binder_debug(BINDER_DEBUG_FREE_BUFFER, 2406 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n", 2407 proc->pid, thread->pid, (u64)data_ptr, 2408 buffer->debug_id, 2409 buffer->transaction ? "active" : "finished"); 2410 2411 if (buffer->transaction) { 2412 buffer->transaction->buffer = NULL; 2413 buffer->transaction = NULL; 2414 } 2415 if (buffer->async_transaction && buffer->target_node) { 2416 BUG_ON(!buffer->target_node->has_async_transaction); 2417 if (list_empty(&buffer->target_node->async_todo)) 2418 buffer->target_node->has_async_transaction = 0; 2419 else 2420 list_move_tail(buffer->target_node->async_todo.next, &thread->todo); 2421 } 2422 trace_binder_transaction_buffer_release(buffer); 2423 binder_transaction_buffer_release(proc, buffer, NULL); 2424 binder_free_buf(proc, buffer); 2425 break; 2426 } 2427 2428 case BC_TRANSACTION_SG: 2429 case BC_REPLY_SG: { 2430 struct binder_transaction_data_sg tr; 2431 2432 if (copy_from_user(&tr, ptr, sizeof(tr))) 2433 return -EFAULT; 2434 ptr += sizeof(tr); 2435 binder_transaction(proc, thread, &tr.transaction_data, 2436 cmd == BC_REPLY_SG, tr.buffers_size); 2437 break; 2438 } 2439 case BC_TRANSACTION: 2440 case BC_REPLY: { 2441 struct binder_transaction_data tr; 2442 2443 if (copy_from_user(&tr, ptr, sizeof(tr))) 2444 return -EFAULT; 2445 ptr += sizeof(tr); 2446 binder_transaction(proc, thread, &tr, 2447 cmd == BC_REPLY, 0); 2448 break; 2449 } 2450 2451 case BC_REGISTER_LOOPER: 2452 binder_debug(BINDER_DEBUG_THREADS, 2453 "%d:%d BC_REGISTER_LOOPER\n", 2454 proc->pid, thread->pid); 2455 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { 2456 thread->looper |= BINDER_LOOPER_STATE_INVALID; 2457 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n", 2458 proc->pid, thread->pid); 2459 } else if (proc->requested_threads == 0) { 2460 thread->looper |= BINDER_LOOPER_STATE_INVALID; 2461 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n", 2462 proc->pid, thread->pid); 2463 } else { 2464 proc->requested_threads--; 2465 proc->requested_threads_started++; 2466 } 2467 thread->looper |= BINDER_LOOPER_STATE_REGISTERED; 2468 break; 2469 case BC_ENTER_LOOPER: 2470 binder_debug(BINDER_DEBUG_THREADS, 2471 "%d:%d BC_ENTER_LOOPER\n", 2472 proc->pid, thread->pid); 2473 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { 2474 thread->looper |= BINDER_LOOPER_STATE_INVALID; 2475 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n", 2476 proc->pid, thread->pid); 2477 } 2478 thread->looper |= BINDER_LOOPER_STATE_ENTERED; 2479 break; 2480 case BC_EXIT_LOOPER: 2481 binder_debug(BINDER_DEBUG_THREADS, 2482 "%d:%d BC_EXIT_LOOPER\n", 2483 proc->pid, thread->pid); 2484 thread->looper |= BINDER_LOOPER_STATE_EXITED; 2485 break; 2486 2487 case BC_REQUEST_DEATH_NOTIFICATION: 2488 case BC_CLEAR_DEATH_NOTIFICATION: { 2489 uint32_t target; 2490 binder_uintptr_t cookie; 2491 struct binder_ref *ref; 2492 struct binder_ref_death *death; 2493 2494 if (get_user(target, (uint32_t __user *)ptr)) 2495 return -EFAULT; 2496 ptr += sizeof(uint32_t); 2497 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 2498 return -EFAULT; 2499 ptr += sizeof(binder_uintptr_t); 2500 ref = binder_get_ref(proc, target, false); 2501 if (ref == NULL) { 2502 binder_user_error("%d:%d %s invalid ref %d\n", 2503 proc->pid, thread->pid, 2504 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 2505 "BC_REQUEST_DEATH_NOTIFICATION" : 2506 "BC_CLEAR_DEATH_NOTIFICATION", 2507 target); 2508 break; 2509 } 2510 2511 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 2512 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n", 2513 proc->pid, thread->pid, 2514 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 2515 "BC_REQUEST_DEATH_NOTIFICATION" : 2516 "BC_CLEAR_DEATH_NOTIFICATION", 2517 (u64)cookie, ref->debug_id, ref->desc, 2518 ref->strong, ref->weak, ref->node->debug_id); 2519 2520 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { 2521 if (ref->death) { 2522 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n", 2523 proc->pid, thread->pid); 2524 break; 2525 } 2526 death = kzalloc(sizeof(*death), GFP_KERNEL); 2527 if (death == NULL) { 2528 thread->return_error = BR_ERROR; 2529 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 2530 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n", 2531 proc->pid, thread->pid); 2532 break; 2533 } 2534 binder_stats_created(BINDER_STAT_DEATH); 2535 INIT_LIST_HEAD(&death->work.entry); 2536 death->cookie = cookie; 2537 ref->death = death; 2538 if (ref->node->proc == NULL) { 2539 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 2540 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { 2541 list_add_tail(&ref->death->work.entry, &thread->todo); 2542 } else { 2543 list_add_tail(&ref->death->work.entry, &proc->todo); 2544 wake_up_interruptible(&proc->wait); 2545 } 2546 } 2547 } else { 2548 if (ref->death == NULL) { 2549 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n", 2550 proc->pid, thread->pid); 2551 break; 2552 } 2553 death = ref->death; 2554 if (death->cookie != cookie) { 2555 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n", 2556 proc->pid, thread->pid, 2557 (u64)death->cookie, 2558 (u64)cookie); 2559 break; 2560 } 2561 ref->death = NULL; 2562 if (list_empty(&death->work.entry)) { 2563 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 2564 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { 2565 list_add_tail(&death->work.entry, &thread->todo); 2566 } else { 2567 list_add_tail(&death->work.entry, &proc->todo); 2568 wake_up_interruptible(&proc->wait); 2569 } 2570 } else { 2571 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); 2572 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; 2573 } 2574 } 2575 } break; 2576 case BC_DEAD_BINDER_DONE: { 2577 struct binder_work *w; 2578 binder_uintptr_t cookie; 2579 struct binder_ref_death *death = NULL; 2580 2581 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 2582 return -EFAULT; 2583 2584 ptr += sizeof(cookie); 2585 list_for_each_entry(w, &proc->delivered_death, entry) { 2586 struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work); 2587 2588 if (tmp_death->cookie == cookie) { 2589 death = tmp_death; 2590 break; 2591 } 2592 } 2593 binder_debug(BINDER_DEBUG_DEAD_BINDER, 2594 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n", 2595 proc->pid, thread->pid, (u64)cookie, 2596 death); 2597 if (death == NULL) { 2598 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n", 2599 proc->pid, thread->pid, (u64)cookie); 2600 break; 2601 } 2602 2603 list_del_init(&death->work.entry); 2604 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) { 2605 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 2606 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { 2607 list_add_tail(&death->work.entry, &thread->todo); 2608 } else { 2609 list_add_tail(&death->work.entry, &proc->todo); 2610 wake_up_interruptible(&proc->wait); 2611 } 2612 } 2613 } break; 2614 2615 default: 2616 pr_err("%d:%d unknown command %d\n", 2617 proc->pid, thread->pid, cmd); 2618 return -EINVAL; 2619 } 2620 *consumed = ptr - buffer; 2621 } 2622 return 0; 2623 } 2624 2625 static void binder_stat_br(struct binder_proc *proc, 2626 struct binder_thread *thread, uint32_t cmd) 2627 { 2628 trace_binder_return(cmd); 2629 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { 2630 binder_stats.br[_IOC_NR(cmd)]++; 2631 proc->stats.br[_IOC_NR(cmd)]++; 2632 thread->stats.br[_IOC_NR(cmd)]++; 2633 } 2634 } 2635 2636 static int binder_has_proc_work(struct binder_proc *proc, 2637 struct binder_thread *thread) 2638 { 2639 return !list_empty(&proc->todo) || 2640 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); 2641 } 2642 2643 static int binder_has_thread_work(struct binder_thread *thread) 2644 { 2645 return !list_empty(&thread->todo) || thread->return_error != BR_OK || 2646 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); 2647 } 2648 2649 static int binder_thread_read(struct binder_proc *proc, 2650 struct binder_thread *thread, 2651 binder_uintptr_t binder_buffer, size_t size, 2652 binder_size_t *consumed, int non_block) 2653 { 2654 void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 2655 void __user *ptr = buffer + *consumed; 2656 void __user *end = buffer + size; 2657 2658 int ret = 0; 2659 int wait_for_proc_work; 2660 2661 if (*consumed == 0) { 2662 if (put_user(BR_NOOP, (uint32_t __user *)ptr)) 2663 return -EFAULT; 2664 ptr += sizeof(uint32_t); 2665 } 2666 2667 retry: 2668 wait_for_proc_work = thread->transaction_stack == NULL && 2669 list_empty(&thread->todo); 2670 2671 if (thread->return_error != BR_OK && ptr < end) { 2672 if (thread->return_error2 != BR_OK) { 2673 if (put_user(thread->return_error2, (uint32_t __user *)ptr)) 2674 return -EFAULT; 2675 ptr += sizeof(uint32_t); 2676 binder_stat_br(proc, thread, thread->return_error2); 2677 if (ptr == end) 2678 goto done; 2679 thread->return_error2 = BR_OK; 2680 } 2681 if (put_user(thread->return_error, (uint32_t __user *)ptr)) 2682 return -EFAULT; 2683 ptr += sizeof(uint32_t); 2684 binder_stat_br(proc, thread, thread->return_error); 2685 thread->return_error = BR_OK; 2686 goto done; 2687 } 2688 2689 2690 thread->looper |= BINDER_LOOPER_STATE_WAITING; 2691 if (wait_for_proc_work) 2692 proc->ready_threads++; 2693 2694 binder_unlock(__func__); 2695 2696 trace_binder_wait_for_work(wait_for_proc_work, 2697 !!thread->transaction_stack, 2698 !list_empty(&thread->todo)); 2699 if (wait_for_proc_work) { 2700 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 2701 BINDER_LOOPER_STATE_ENTERED))) { 2702 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n", 2703 proc->pid, thread->pid, thread->looper); 2704 wait_event_interruptible(binder_user_error_wait, 2705 binder_stop_on_user_error < 2); 2706 } 2707 binder_set_nice(proc->default_priority); 2708 if (non_block) { 2709 if (!binder_has_proc_work(proc, thread)) 2710 ret = -EAGAIN; 2711 } else 2712 ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread)); 2713 } else { 2714 if (non_block) { 2715 if (!binder_has_thread_work(thread)) 2716 ret = -EAGAIN; 2717 } else 2718 ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread)); 2719 } 2720 2721 binder_lock(__func__); 2722 2723 if (wait_for_proc_work) 2724 proc->ready_threads--; 2725 thread->looper &= ~BINDER_LOOPER_STATE_WAITING; 2726 2727 if (ret) 2728 return ret; 2729 2730 while (1) { 2731 uint32_t cmd; 2732 struct binder_transaction_data tr; 2733 struct binder_work *w; 2734 struct binder_transaction *t = NULL; 2735 2736 if (!list_empty(&thread->todo)) { 2737 w = list_first_entry(&thread->todo, struct binder_work, 2738 entry); 2739 } else if (!list_empty(&proc->todo) && wait_for_proc_work) { 2740 w = list_first_entry(&proc->todo, struct binder_work, 2741 entry); 2742 } else { 2743 /* no data added */ 2744 if (ptr - buffer == 4 && 2745 !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) 2746 goto retry; 2747 break; 2748 } 2749 2750 if (end - ptr < sizeof(tr) + 4) 2751 break; 2752 2753 switch (w->type) { 2754 case BINDER_WORK_TRANSACTION: { 2755 t = container_of(w, struct binder_transaction, work); 2756 } break; 2757 case BINDER_WORK_TRANSACTION_COMPLETE: { 2758 cmd = BR_TRANSACTION_COMPLETE; 2759 if (put_user(cmd, (uint32_t __user *)ptr)) 2760 return -EFAULT; 2761 ptr += sizeof(uint32_t); 2762 2763 binder_stat_br(proc, thread, cmd); 2764 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, 2765 "%d:%d BR_TRANSACTION_COMPLETE\n", 2766 proc->pid, thread->pid); 2767 2768 list_del(&w->entry); 2769 kfree(w); 2770 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 2771 } break; 2772 case BINDER_WORK_NODE: { 2773 struct binder_node *node = container_of(w, struct binder_node, work); 2774 uint32_t cmd = BR_NOOP; 2775 const char *cmd_name; 2776 int strong = node->internal_strong_refs || node->local_strong_refs; 2777 int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong; 2778 2779 if (weak && !node->has_weak_ref) { 2780 cmd = BR_INCREFS; 2781 cmd_name = "BR_INCREFS"; 2782 node->has_weak_ref = 1; 2783 node->pending_weak_ref = 1; 2784 node->local_weak_refs++; 2785 } else if (strong && !node->has_strong_ref) { 2786 cmd = BR_ACQUIRE; 2787 cmd_name = "BR_ACQUIRE"; 2788 node->has_strong_ref = 1; 2789 node->pending_strong_ref = 1; 2790 node->local_strong_refs++; 2791 } else if (!strong && node->has_strong_ref) { 2792 cmd = BR_RELEASE; 2793 cmd_name = "BR_RELEASE"; 2794 node->has_strong_ref = 0; 2795 } else if (!weak && node->has_weak_ref) { 2796 cmd = BR_DECREFS; 2797 cmd_name = "BR_DECREFS"; 2798 node->has_weak_ref = 0; 2799 } 2800 if (cmd != BR_NOOP) { 2801 if (put_user(cmd, (uint32_t __user *)ptr)) 2802 return -EFAULT; 2803 ptr += sizeof(uint32_t); 2804 if (put_user(node->ptr, 2805 (binder_uintptr_t __user *)ptr)) 2806 return -EFAULT; 2807 ptr += sizeof(binder_uintptr_t); 2808 if (put_user(node->cookie, 2809 (binder_uintptr_t __user *)ptr)) 2810 return -EFAULT; 2811 ptr += sizeof(binder_uintptr_t); 2812 2813 binder_stat_br(proc, thread, cmd); 2814 binder_debug(BINDER_DEBUG_USER_REFS, 2815 "%d:%d %s %d u%016llx c%016llx\n", 2816 proc->pid, thread->pid, cmd_name, 2817 node->debug_id, 2818 (u64)node->ptr, (u64)node->cookie); 2819 } else { 2820 list_del_init(&w->entry); 2821 if (!weak && !strong) { 2822 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 2823 "%d:%d node %d u%016llx c%016llx deleted\n", 2824 proc->pid, thread->pid, 2825 node->debug_id, 2826 (u64)node->ptr, 2827 (u64)node->cookie); 2828 rb_erase(&node->rb_node, &proc->nodes); 2829 kfree(node); 2830 binder_stats_deleted(BINDER_STAT_NODE); 2831 } else { 2832 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 2833 "%d:%d node %d u%016llx c%016llx state unchanged\n", 2834 proc->pid, thread->pid, 2835 node->debug_id, 2836 (u64)node->ptr, 2837 (u64)node->cookie); 2838 } 2839 } 2840 } break; 2841 case BINDER_WORK_DEAD_BINDER: 2842 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 2843 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 2844 struct binder_ref_death *death; 2845 uint32_t cmd; 2846 2847 death = container_of(w, struct binder_ref_death, work); 2848 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) 2849 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; 2850 else 2851 cmd = BR_DEAD_BINDER; 2852 if (put_user(cmd, (uint32_t __user *)ptr)) 2853 return -EFAULT; 2854 ptr += sizeof(uint32_t); 2855 if (put_user(death->cookie, 2856 (binder_uintptr_t __user *)ptr)) 2857 return -EFAULT; 2858 ptr += sizeof(binder_uintptr_t); 2859 binder_stat_br(proc, thread, cmd); 2860 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 2861 "%d:%d %s %016llx\n", 2862 proc->pid, thread->pid, 2863 cmd == BR_DEAD_BINDER ? 2864 "BR_DEAD_BINDER" : 2865 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 2866 (u64)death->cookie); 2867 2868 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { 2869 list_del(&w->entry); 2870 kfree(death); 2871 binder_stats_deleted(BINDER_STAT_DEATH); 2872 } else 2873 list_move(&w->entry, &proc->delivered_death); 2874 if (cmd == BR_DEAD_BINDER) 2875 goto done; /* DEAD_BINDER notifications can cause transactions */ 2876 } break; 2877 } 2878 2879 if (!t) 2880 continue; 2881 2882 BUG_ON(t->buffer == NULL); 2883 if (t->buffer->target_node) { 2884 struct binder_node *target_node = t->buffer->target_node; 2885 2886 tr.target.ptr = target_node->ptr; 2887 tr.cookie = target_node->cookie; 2888 t->saved_priority = task_nice(current); 2889 if (t->priority < target_node->min_priority && 2890 !(t->flags & TF_ONE_WAY)) 2891 binder_set_nice(t->priority); 2892 else if (!(t->flags & TF_ONE_WAY) || 2893 t->saved_priority > target_node->min_priority) 2894 binder_set_nice(target_node->min_priority); 2895 cmd = BR_TRANSACTION; 2896 } else { 2897 tr.target.ptr = 0; 2898 tr.cookie = 0; 2899 cmd = BR_REPLY; 2900 } 2901 tr.code = t->code; 2902 tr.flags = t->flags; 2903 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid); 2904 2905 if (t->from) { 2906 struct task_struct *sender = t->from->proc->tsk; 2907 2908 tr.sender_pid = task_tgid_nr_ns(sender, 2909 task_active_pid_ns(current)); 2910 } else { 2911 tr.sender_pid = 0; 2912 } 2913 2914 tr.data_size = t->buffer->data_size; 2915 tr.offsets_size = t->buffer->offsets_size; 2916 tr.data.ptr.buffer = (binder_uintptr_t)( 2917 (uintptr_t)t->buffer->data + 2918 proc->user_buffer_offset); 2919 tr.data.ptr.offsets = tr.data.ptr.buffer + 2920 ALIGN(t->buffer->data_size, 2921 sizeof(void *)); 2922 2923 if (put_user(cmd, (uint32_t __user *)ptr)) 2924 return -EFAULT; 2925 ptr += sizeof(uint32_t); 2926 if (copy_to_user(ptr, &tr, sizeof(tr))) 2927 return -EFAULT; 2928 ptr += sizeof(tr); 2929 2930 trace_binder_transaction_received(t); 2931 binder_stat_br(proc, thread, cmd); 2932 binder_debug(BINDER_DEBUG_TRANSACTION, 2933 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n", 2934 proc->pid, thread->pid, 2935 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : 2936 "BR_REPLY", 2937 t->debug_id, t->from ? t->from->proc->pid : 0, 2938 t->from ? t->from->pid : 0, cmd, 2939 t->buffer->data_size, t->buffer->offsets_size, 2940 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets); 2941 2942 list_del(&t->work.entry); 2943 t->buffer->allow_user_free = 1; 2944 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) { 2945 t->to_parent = thread->transaction_stack; 2946 t->to_thread = thread; 2947 thread->transaction_stack = t; 2948 } else { 2949 t->buffer->transaction = NULL; 2950 kfree(t); 2951 binder_stats_deleted(BINDER_STAT_TRANSACTION); 2952 } 2953 break; 2954 } 2955 2956 done: 2957 2958 *consumed = ptr - buffer; 2959 if (proc->requested_threads + proc->ready_threads == 0 && 2960 proc->requested_threads_started < proc->max_threads && 2961 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 2962 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ 2963 /*spawn a new thread if we leave this out */) { 2964 proc->requested_threads++; 2965 binder_debug(BINDER_DEBUG_THREADS, 2966 "%d:%d BR_SPAWN_LOOPER\n", 2967 proc->pid, thread->pid); 2968 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) 2969 return -EFAULT; 2970 binder_stat_br(proc, thread, BR_SPAWN_LOOPER); 2971 } 2972 return 0; 2973 } 2974 2975 static void binder_release_work(struct list_head *list) 2976 { 2977 struct binder_work *w; 2978 2979 while (!list_empty(list)) { 2980 w = list_first_entry(list, struct binder_work, entry); 2981 list_del_init(&w->entry); 2982 switch (w->type) { 2983 case BINDER_WORK_TRANSACTION: { 2984 struct binder_transaction *t; 2985 2986 t = container_of(w, struct binder_transaction, work); 2987 if (t->buffer->target_node && 2988 !(t->flags & TF_ONE_WAY)) { 2989 binder_send_failed_reply(t, BR_DEAD_REPLY); 2990 } else { 2991 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 2992 "undelivered transaction %d\n", 2993 t->debug_id); 2994 t->buffer->transaction = NULL; 2995 kfree(t); 2996 binder_stats_deleted(BINDER_STAT_TRANSACTION); 2997 } 2998 } break; 2999 case BINDER_WORK_TRANSACTION_COMPLETE: { 3000 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 3001 "undelivered TRANSACTION_COMPLETE\n"); 3002 kfree(w); 3003 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 3004 } break; 3005 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 3006 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 3007 struct binder_ref_death *death; 3008 3009 death = container_of(w, struct binder_ref_death, work); 3010 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 3011 "undelivered death notification, %016llx\n", 3012 (u64)death->cookie); 3013 kfree(death); 3014 binder_stats_deleted(BINDER_STAT_DEATH); 3015 } break; 3016 default: 3017 pr_err("unexpected work type, %d, not freed\n", 3018 w->type); 3019 break; 3020 } 3021 } 3022 3023 } 3024 3025 static struct binder_thread *binder_get_thread(struct binder_proc *proc) 3026 { 3027 struct binder_thread *thread = NULL; 3028 struct rb_node *parent = NULL; 3029 struct rb_node **p = &proc->threads.rb_node; 3030 3031 while (*p) { 3032 parent = *p; 3033 thread = rb_entry(parent, struct binder_thread, rb_node); 3034 3035 if (current->pid < thread->pid) 3036 p = &(*p)->rb_left; 3037 else if (current->pid > thread->pid) 3038 p = &(*p)->rb_right; 3039 else 3040 break; 3041 } 3042 if (*p == NULL) { 3043 thread = kzalloc(sizeof(*thread), GFP_KERNEL); 3044 if (thread == NULL) 3045 return NULL; 3046 binder_stats_created(BINDER_STAT_THREAD); 3047 thread->proc = proc; 3048 thread->pid = current->pid; 3049 init_waitqueue_head(&thread->wait); 3050 INIT_LIST_HEAD(&thread->todo); 3051 rb_link_node(&thread->rb_node, parent, p); 3052 rb_insert_color(&thread->rb_node, &proc->threads); 3053 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; 3054 thread->return_error = BR_OK; 3055 thread->return_error2 = BR_OK; 3056 } 3057 return thread; 3058 } 3059 3060 static int binder_free_thread(struct binder_proc *proc, 3061 struct binder_thread *thread) 3062 { 3063 struct binder_transaction *t; 3064 struct binder_transaction *send_reply = NULL; 3065 int active_transactions = 0; 3066 3067 rb_erase(&thread->rb_node, &proc->threads); 3068 t = thread->transaction_stack; 3069 if (t && t->to_thread == thread) 3070 send_reply = t; 3071 while (t) { 3072 active_transactions++; 3073 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 3074 "release %d:%d transaction %d %s, still active\n", 3075 proc->pid, thread->pid, 3076 t->debug_id, 3077 (t->to_thread == thread) ? "in" : "out"); 3078 3079 if (t->to_thread == thread) { 3080 t->to_proc = NULL; 3081 t->to_thread = NULL; 3082 if (t->buffer) { 3083 t->buffer->transaction = NULL; 3084 t->buffer = NULL; 3085 } 3086 t = t->to_parent; 3087 } else if (t->from == thread) { 3088 t->from = NULL; 3089 t = t->from_parent; 3090 } else 3091 BUG(); 3092 } 3093 if (send_reply) 3094 binder_send_failed_reply(send_reply, BR_DEAD_REPLY); 3095 binder_release_work(&thread->todo); 3096 kfree(thread); 3097 binder_stats_deleted(BINDER_STAT_THREAD); 3098 return active_transactions; 3099 } 3100 3101 static unsigned int binder_poll(struct file *filp, 3102 struct poll_table_struct *wait) 3103 { 3104 struct binder_proc *proc = filp->private_data; 3105 struct binder_thread *thread = NULL; 3106 int wait_for_proc_work; 3107 3108 binder_lock(__func__); 3109 3110 thread = binder_get_thread(proc); 3111 3112 wait_for_proc_work = thread->transaction_stack == NULL && 3113 list_empty(&thread->todo) && thread->return_error == BR_OK; 3114 3115 binder_unlock(__func__); 3116 3117 if (wait_for_proc_work) { 3118 if (binder_has_proc_work(proc, thread)) 3119 return POLLIN; 3120 poll_wait(filp, &proc->wait, wait); 3121 if (binder_has_proc_work(proc, thread)) 3122 return POLLIN; 3123 } else { 3124 if (binder_has_thread_work(thread)) 3125 return POLLIN; 3126 poll_wait(filp, &thread->wait, wait); 3127 if (binder_has_thread_work(thread)) 3128 return POLLIN; 3129 } 3130 return 0; 3131 } 3132 3133 static int binder_ioctl_write_read(struct file *filp, 3134 unsigned int cmd, unsigned long arg, 3135 struct binder_thread *thread) 3136 { 3137 int ret = 0; 3138 struct binder_proc *proc = filp->private_data; 3139 unsigned int size = _IOC_SIZE(cmd); 3140 void __user *ubuf = (void __user *)arg; 3141 struct binder_write_read bwr; 3142 3143 if (size != sizeof(struct binder_write_read)) { 3144 ret = -EINVAL; 3145 goto out; 3146 } 3147 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { 3148 ret = -EFAULT; 3149 goto out; 3150 } 3151 binder_debug(BINDER_DEBUG_READ_WRITE, 3152 "%d:%d write %lld at %016llx, read %lld at %016llx\n", 3153 proc->pid, thread->pid, 3154 (u64)bwr.write_size, (u64)bwr.write_buffer, 3155 (u64)bwr.read_size, (u64)bwr.read_buffer); 3156 3157 if (bwr.write_size > 0) { 3158 ret = binder_thread_write(proc, thread, 3159 bwr.write_buffer, 3160 bwr.write_size, 3161 &bwr.write_consumed); 3162 trace_binder_write_done(ret); 3163 if (ret < 0) { 3164 bwr.read_consumed = 0; 3165 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 3166 ret = -EFAULT; 3167 goto out; 3168 } 3169 } 3170 if (bwr.read_size > 0) { 3171 ret = binder_thread_read(proc, thread, bwr.read_buffer, 3172 bwr.read_size, 3173 &bwr.read_consumed, 3174 filp->f_flags & O_NONBLOCK); 3175 trace_binder_read_done(ret); 3176 if (!list_empty(&proc->todo)) 3177 wake_up_interruptible(&proc->wait); 3178 if (ret < 0) { 3179 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 3180 ret = -EFAULT; 3181 goto out; 3182 } 3183 } 3184 binder_debug(BINDER_DEBUG_READ_WRITE, 3185 "%d:%d wrote %lld of %lld, read return %lld of %lld\n", 3186 proc->pid, thread->pid, 3187 (u64)bwr.write_consumed, (u64)bwr.write_size, 3188 (u64)bwr.read_consumed, (u64)bwr.read_size); 3189 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { 3190 ret = -EFAULT; 3191 goto out; 3192 } 3193 out: 3194 return ret; 3195 } 3196 3197 static int binder_ioctl_set_ctx_mgr(struct file *filp) 3198 { 3199 int ret = 0; 3200 struct binder_proc *proc = filp->private_data; 3201 struct binder_context *context = proc->context; 3202 3203 kuid_t curr_euid = current_euid(); 3204 3205 if (context->binder_context_mgr_node) { 3206 pr_err("BINDER_SET_CONTEXT_MGR already set\n"); 3207 ret = -EBUSY; 3208 goto out; 3209 } 3210 ret = security_binder_set_context_mgr(proc->tsk); 3211 if (ret < 0) 3212 goto out; 3213 if (uid_valid(context->binder_context_mgr_uid)) { 3214 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) { 3215 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n", 3216 from_kuid(&init_user_ns, curr_euid), 3217 from_kuid(&init_user_ns, 3218 context->binder_context_mgr_uid)); 3219 ret = -EPERM; 3220 goto out; 3221 } 3222 } else { 3223 context->binder_context_mgr_uid = curr_euid; 3224 } 3225 context->binder_context_mgr_node = binder_new_node(proc, 0, 0); 3226 if (!context->binder_context_mgr_node) { 3227 ret = -ENOMEM; 3228 goto out; 3229 } 3230 context->binder_context_mgr_node->local_weak_refs++; 3231 context->binder_context_mgr_node->local_strong_refs++; 3232 context->binder_context_mgr_node->has_strong_ref = 1; 3233 context->binder_context_mgr_node->has_weak_ref = 1; 3234 out: 3235 return ret; 3236 } 3237 3238 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 3239 { 3240 int ret; 3241 struct binder_proc *proc = filp->private_data; 3242 struct binder_thread *thread; 3243 unsigned int size = _IOC_SIZE(cmd); 3244 void __user *ubuf = (void __user *)arg; 3245 3246 /*pr_info("binder_ioctl: %d:%d %x %lx\n", 3247 proc->pid, current->pid, cmd, arg);*/ 3248 3249 if (unlikely(current->mm != proc->vma_vm_mm)) { 3250 pr_err("current mm mismatch proc mm\n"); 3251 return -EINVAL; 3252 } 3253 trace_binder_ioctl(cmd, arg); 3254 3255 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 3256 if (ret) 3257 goto err_unlocked; 3258 3259 binder_lock(__func__); 3260 thread = binder_get_thread(proc); 3261 if (thread == NULL) { 3262 ret = -ENOMEM; 3263 goto err; 3264 } 3265 3266 switch (cmd) { 3267 case BINDER_WRITE_READ: 3268 ret = binder_ioctl_write_read(filp, cmd, arg, thread); 3269 if (ret) 3270 goto err; 3271 break; 3272 case BINDER_SET_MAX_THREADS: 3273 if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) { 3274 ret = -EINVAL; 3275 goto err; 3276 } 3277 break; 3278 case BINDER_SET_CONTEXT_MGR: 3279 ret = binder_ioctl_set_ctx_mgr(filp); 3280 if (ret) 3281 goto err; 3282 break; 3283 case BINDER_THREAD_EXIT: 3284 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n", 3285 proc->pid, thread->pid); 3286 binder_free_thread(proc, thread); 3287 thread = NULL; 3288 break; 3289 case BINDER_VERSION: { 3290 struct binder_version __user *ver = ubuf; 3291 3292 if (size != sizeof(struct binder_version)) { 3293 ret = -EINVAL; 3294 goto err; 3295 } 3296 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, 3297 &ver->protocol_version)) { 3298 ret = -EINVAL; 3299 goto err; 3300 } 3301 break; 3302 } 3303 default: 3304 ret = -EINVAL; 3305 goto err; 3306 } 3307 ret = 0; 3308 err: 3309 if (thread) 3310 thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN; 3311 binder_unlock(__func__); 3312 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 3313 if (ret && ret != -ERESTARTSYS) 3314 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); 3315 err_unlocked: 3316 trace_binder_ioctl_done(ret); 3317 return ret; 3318 } 3319 3320 static void binder_vma_open(struct vm_area_struct *vma) 3321 { 3322 struct binder_proc *proc = vma->vm_private_data; 3323 3324 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 3325 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 3326 proc->pid, vma->vm_start, vma->vm_end, 3327 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 3328 (unsigned long)pgprot_val(vma->vm_page_prot)); 3329 } 3330 3331 static void binder_vma_close(struct vm_area_struct *vma) 3332 { 3333 struct binder_proc *proc = vma->vm_private_data; 3334 3335 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 3336 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 3337 proc->pid, vma->vm_start, vma->vm_end, 3338 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 3339 (unsigned long)pgprot_val(vma->vm_page_prot)); 3340 proc->vma = NULL; 3341 proc->vma_vm_mm = NULL; 3342 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); 3343 } 3344 3345 static int binder_vm_fault(struct vm_fault *vmf) 3346 { 3347 return VM_FAULT_SIGBUS; 3348 } 3349 3350 static const struct vm_operations_struct binder_vm_ops = { 3351 .open = binder_vma_open, 3352 .close = binder_vma_close, 3353 .fault = binder_vm_fault, 3354 }; 3355 3356 static int binder_mmap(struct file *filp, struct vm_area_struct *vma) 3357 { 3358 int ret; 3359 struct vm_struct *area; 3360 struct binder_proc *proc = filp->private_data; 3361 const char *failure_string; 3362 struct binder_buffer *buffer; 3363 3364 if (proc->tsk != current) 3365 return -EINVAL; 3366 3367 if ((vma->vm_end - vma->vm_start) > SZ_4M) 3368 vma->vm_end = vma->vm_start + SZ_4M; 3369 3370 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 3371 "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", 3372 proc->pid, vma->vm_start, vma->vm_end, 3373 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 3374 (unsigned long)pgprot_val(vma->vm_page_prot)); 3375 3376 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { 3377 ret = -EPERM; 3378 failure_string = "bad vm_flags"; 3379 goto err_bad_arg; 3380 } 3381 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; 3382 3383 mutex_lock(&binder_mmap_lock); 3384 if (proc->buffer) { 3385 ret = -EBUSY; 3386 failure_string = "already mapped"; 3387 goto err_already_mapped; 3388 } 3389 3390 area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); 3391 if (area == NULL) { 3392 ret = -ENOMEM; 3393 failure_string = "get_vm_area"; 3394 goto err_get_vm_area_failed; 3395 } 3396 proc->buffer = area->addr; 3397 proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer; 3398 mutex_unlock(&binder_mmap_lock); 3399 3400 #ifdef CONFIG_CPU_CACHE_VIPT 3401 if (cache_is_vipt_aliasing()) { 3402 while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) { 3403 pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer); 3404 vma->vm_start += PAGE_SIZE; 3405 } 3406 } 3407 #endif 3408 proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL); 3409 if (proc->pages == NULL) { 3410 ret = -ENOMEM; 3411 failure_string = "alloc page array"; 3412 goto err_alloc_pages_failed; 3413 } 3414 proc->buffer_size = vma->vm_end - vma->vm_start; 3415 3416 vma->vm_ops = &binder_vm_ops; 3417 vma->vm_private_data = proc; 3418 3419 if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) { 3420 ret = -ENOMEM; 3421 failure_string = "alloc small buf"; 3422 goto err_alloc_small_buf_failed; 3423 } 3424 buffer = proc->buffer; 3425 INIT_LIST_HEAD(&proc->buffers); 3426 list_add(&buffer->entry, &proc->buffers); 3427 buffer->free = 1; 3428 binder_insert_free_buffer(proc, buffer); 3429 proc->free_async_space = proc->buffer_size / 2; 3430 barrier(); 3431 proc->files = get_files_struct(current); 3432 proc->vma = vma; 3433 proc->vma_vm_mm = vma->vm_mm; 3434 3435 /*pr_info("binder_mmap: %d %lx-%lx maps %p\n", 3436 proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/ 3437 return 0; 3438 3439 err_alloc_small_buf_failed: 3440 kfree(proc->pages); 3441 proc->pages = NULL; 3442 err_alloc_pages_failed: 3443 mutex_lock(&binder_mmap_lock); 3444 vfree(proc->buffer); 3445 proc->buffer = NULL; 3446 err_get_vm_area_failed: 3447 err_already_mapped: 3448 mutex_unlock(&binder_mmap_lock); 3449 err_bad_arg: 3450 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n", 3451 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); 3452 return ret; 3453 } 3454 3455 static int binder_open(struct inode *nodp, struct file *filp) 3456 { 3457 struct binder_proc *proc; 3458 struct binder_device *binder_dev; 3459 3460 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n", 3461 current->group_leader->pid, current->pid); 3462 3463 proc = kzalloc(sizeof(*proc), GFP_KERNEL); 3464 if (proc == NULL) 3465 return -ENOMEM; 3466 get_task_struct(current); 3467 proc->tsk = current; 3468 proc->vma_vm_mm = current->mm; 3469 INIT_LIST_HEAD(&proc->todo); 3470 init_waitqueue_head(&proc->wait); 3471 proc->default_priority = task_nice(current); 3472 binder_dev = container_of(filp->private_data, struct binder_device, 3473 miscdev); 3474 proc->context = &binder_dev->context; 3475 3476 binder_lock(__func__); 3477 3478 binder_stats_created(BINDER_STAT_PROC); 3479 hlist_add_head(&proc->proc_node, &binder_procs); 3480 proc->pid = current->group_leader->pid; 3481 INIT_LIST_HEAD(&proc->delivered_death); 3482 filp->private_data = proc; 3483 3484 binder_unlock(__func__); 3485 3486 if (binder_debugfs_dir_entry_proc) { 3487 char strbuf[11]; 3488 3489 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); 3490 /* 3491 * proc debug entries are shared between contexts, so 3492 * this will fail if the process tries to open the driver 3493 * again with a different context. The priting code will 3494 * anyway print all contexts that a given PID has, so this 3495 * is not a problem. 3496 */ 3497 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO, 3498 binder_debugfs_dir_entry_proc, 3499 (void *)(unsigned long)proc->pid, 3500 &binder_proc_fops); 3501 } 3502 3503 return 0; 3504 } 3505 3506 static int binder_flush(struct file *filp, fl_owner_t id) 3507 { 3508 struct binder_proc *proc = filp->private_data; 3509 3510 binder_defer_work(proc, BINDER_DEFERRED_FLUSH); 3511 3512 return 0; 3513 } 3514 3515 static void binder_deferred_flush(struct binder_proc *proc) 3516 { 3517 struct rb_node *n; 3518 int wake_count = 0; 3519 3520 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { 3521 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); 3522 3523 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; 3524 if (thread->looper & BINDER_LOOPER_STATE_WAITING) { 3525 wake_up_interruptible(&thread->wait); 3526 wake_count++; 3527 } 3528 } 3529 wake_up_interruptible_all(&proc->wait); 3530 3531 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 3532 "binder_flush: %d woke %d threads\n", proc->pid, 3533 wake_count); 3534 } 3535 3536 static int binder_release(struct inode *nodp, struct file *filp) 3537 { 3538 struct binder_proc *proc = filp->private_data; 3539 3540 debugfs_remove(proc->debugfs_entry); 3541 binder_defer_work(proc, BINDER_DEFERRED_RELEASE); 3542 3543 return 0; 3544 } 3545 3546 static int binder_node_release(struct binder_node *node, int refs) 3547 { 3548 struct binder_ref *ref; 3549 int death = 0; 3550 3551 list_del_init(&node->work.entry); 3552 binder_release_work(&node->async_todo); 3553 3554 if (hlist_empty(&node->refs)) { 3555 kfree(node); 3556 binder_stats_deleted(BINDER_STAT_NODE); 3557 3558 return refs; 3559 } 3560 3561 node->proc = NULL; 3562 node->local_strong_refs = 0; 3563 node->local_weak_refs = 0; 3564 hlist_add_head(&node->dead_node, &binder_dead_nodes); 3565 3566 hlist_for_each_entry(ref, &node->refs, node_entry) { 3567 refs++; 3568 3569 if (!ref->death) 3570 continue; 3571 3572 death++; 3573 3574 if (list_empty(&ref->death->work.entry)) { 3575 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 3576 list_add_tail(&ref->death->work.entry, 3577 &ref->proc->todo); 3578 wake_up_interruptible(&ref->proc->wait); 3579 } else 3580 BUG(); 3581 } 3582 3583 binder_debug(BINDER_DEBUG_DEAD_BINDER, 3584 "node %d now dead, refs %d, death %d\n", 3585 node->debug_id, refs, death); 3586 3587 return refs; 3588 } 3589 3590 static void binder_deferred_release(struct binder_proc *proc) 3591 { 3592 struct binder_transaction *t; 3593 struct binder_context *context = proc->context; 3594 struct rb_node *n; 3595 int threads, nodes, incoming_refs, outgoing_refs, buffers, 3596 active_transactions, page_count; 3597 3598 BUG_ON(proc->vma); 3599 BUG_ON(proc->files); 3600 3601 hlist_del(&proc->proc_node); 3602 3603 if (context->binder_context_mgr_node && 3604 context->binder_context_mgr_node->proc == proc) { 3605 binder_debug(BINDER_DEBUG_DEAD_BINDER, 3606 "%s: %d context_mgr_node gone\n", 3607 __func__, proc->pid); 3608 context->binder_context_mgr_node = NULL; 3609 } 3610 3611 threads = 0; 3612 active_transactions = 0; 3613 while ((n = rb_first(&proc->threads))) { 3614 struct binder_thread *thread; 3615 3616 thread = rb_entry(n, struct binder_thread, rb_node); 3617 threads++; 3618 active_transactions += binder_free_thread(proc, thread); 3619 } 3620 3621 nodes = 0; 3622 incoming_refs = 0; 3623 while ((n = rb_first(&proc->nodes))) { 3624 struct binder_node *node; 3625 3626 node = rb_entry(n, struct binder_node, rb_node); 3627 nodes++; 3628 rb_erase(&node->rb_node, &proc->nodes); 3629 incoming_refs = binder_node_release(node, incoming_refs); 3630 } 3631 3632 outgoing_refs = 0; 3633 while ((n = rb_first(&proc->refs_by_desc))) { 3634 struct binder_ref *ref; 3635 3636 ref = rb_entry(n, struct binder_ref, rb_node_desc); 3637 outgoing_refs++; 3638 binder_delete_ref(ref); 3639 } 3640 3641 binder_release_work(&proc->todo); 3642 binder_release_work(&proc->delivered_death); 3643 3644 buffers = 0; 3645 while ((n = rb_first(&proc->allocated_buffers))) { 3646 struct binder_buffer *buffer; 3647 3648 buffer = rb_entry(n, struct binder_buffer, rb_node); 3649 3650 t = buffer->transaction; 3651 if (t) { 3652 t->buffer = NULL; 3653 buffer->transaction = NULL; 3654 pr_err("release proc %d, transaction %d, not freed\n", 3655 proc->pid, t->debug_id); 3656 /*BUG();*/ 3657 } 3658 3659 binder_free_buf(proc, buffer); 3660 buffers++; 3661 } 3662 3663 binder_stats_deleted(BINDER_STAT_PROC); 3664 3665 page_count = 0; 3666 if (proc->pages) { 3667 int i; 3668 3669 for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) { 3670 void *page_addr; 3671 3672 if (!proc->pages[i]) 3673 continue; 3674 3675 page_addr = proc->buffer + i * PAGE_SIZE; 3676 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 3677 "%s: %d: page %d at %p not freed\n", 3678 __func__, proc->pid, i, page_addr); 3679 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); 3680 __free_page(proc->pages[i]); 3681 page_count++; 3682 } 3683 kfree(proc->pages); 3684 vfree(proc->buffer); 3685 } 3686 3687 put_task_struct(proc->tsk); 3688 3689 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 3690 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n", 3691 __func__, proc->pid, threads, nodes, incoming_refs, 3692 outgoing_refs, active_transactions, buffers, page_count); 3693 3694 kfree(proc); 3695 } 3696 3697 static void binder_deferred_func(struct work_struct *work) 3698 { 3699 struct binder_proc *proc; 3700 struct files_struct *files; 3701 3702 int defer; 3703 3704 do { 3705 binder_lock(__func__); 3706 mutex_lock(&binder_deferred_lock); 3707 if (!hlist_empty(&binder_deferred_list)) { 3708 proc = hlist_entry(binder_deferred_list.first, 3709 struct binder_proc, deferred_work_node); 3710 hlist_del_init(&proc->deferred_work_node); 3711 defer = proc->deferred_work; 3712 proc->deferred_work = 0; 3713 } else { 3714 proc = NULL; 3715 defer = 0; 3716 } 3717 mutex_unlock(&binder_deferred_lock); 3718 3719 files = NULL; 3720 if (defer & BINDER_DEFERRED_PUT_FILES) { 3721 files = proc->files; 3722 if (files) 3723 proc->files = NULL; 3724 } 3725 3726 if (defer & BINDER_DEFERRED_FLUSH) 3727 binder_deferred_flush(proc); 3728 3729 if (defer & BINDER_DEFERRED_RELEASE) 3730 binder_deferred_release(proc); /* frees proc */ 3731 3732 binder_unlock(__func__); 3733 if (files) 3734 put_files_struct(files); 3735 } while (proc); 3736 } 3737 static DECLARE_WORK(binder_deferred_work, binder_deferred_func); 3738 3739 static void 3740 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) 3741 { 3742 mutex_lock(&binder_deferred_lock); 3743 proc->deferred_work |= defer; 3744 if (hlist_unhashed(&proc->deferred_work_node)) { 3745 hlist_add_head(&proc->deferred_work_node, 3746 &binder_deferred_list); 3747 schedule_work(&binder_deferred_work); 3748 } 3749 mutex_unlock(&binder_deferred_lock); 3750 } 3751 3752 static void print_binder_transaction(struct seq_file *m, const char *prefix, 3753 struct binder_transaction *t) 3754 { 3755 seq_printf(m, 3756 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d", 3757 prefix, t->debug_id, t, 3758 t->from ? t->from->proc->pid : 0, 3759 t->from ? t->from->pid : 0, 3760 t->to_proc ? t->to_proc->pid : 0, 3761 t->to_thread ? t->to_thread->pid : 0, 3762 t->code, t->flags, t->priority, t->need_reply); 3763 if (t->buffer == NULL) { 3764 seq_puts(m, " buffer free\n"); 3765 return; 3766 } 3767 if (t->buffer->target_node) 3768 seq_printf(m, " node %d", 3769 t->buffer->target_node->debug_id); 3770 seq_printf(m, " size %zd:%zd data %p\n", 3771 t->buffer->data_size, t->buffer->offsets_size, 3772 t->buffer->data); 3773 } 3774 3775 static void print_binder_buffer(struct seq_file *m, const char *prefix, 3776 struct binder_buffer *buffer) 3777 { 3778 seq_printf(m, "%s %d: %p size %zd:%zd %s\n", 3779 prefix, buffer->debug_id, buffer->data, 3780 buffer->data_size, buffer->offsets_size, 3781 buffer->transaction ? "active" : "delivered"); 3782 } 3783 3784 static void print_binder_work(struct seq_file *m, const char *prefix, 3785 const char *transaction_prefix, 3786 struct binder_work *w) 3787 { 3788 struct binder_node *node; 3789 struct binder_transaction *t; 3790 3791 switch (w->type) { 3792 case BINDER_WORK_TRANSACTION: 3793 t = container_of(w, struct binder_transaction, work); 3794 print_binder_transaction(m, transaction_prefix, t); 3795 break; 3796 case BINDER_WORK_TRANSACTION_COMPLETE: 3797 seq_printf(m, "%stransaction complete\n", prefix); 3798 break; 3799 case BINDER_WORK_NODE: 3800 node = container_of(w, struct binder_node, work); 3801 seq_printf(m, "%snode work %d: u%016llx c%016llx\n", 3802 prefix, node->debug_id, 3803 (u64)node->ptr, (u64)node->cookie); 3804 break; 3805 case BINDER_WORK_DEAD_BINDER: 3806 seq_printf(m, "%shas dead binder\n", prefix); 3807 break; 3808 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 3809 seq_printf(m, "%shas cleared dead binder\n", prefix); 3810 break; 3811 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: 3812 seq_printf(m, "%shas cleared death notification\n", prefix); 3813 break; 3814 default: 3815 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type); 3816 break; 3817 } 3818 } 3819 3820 static void print_binder_thread(struct seq_file *m, 3821 struct binder_thread *thread, 3822 int print_always) 3823 { 3824 struct binder_transaction *t; 3825 struct binder_work *w; 3826 size_t start_pos = m->count; 3827 size_t header_pos; 3828 3829 seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper); 3830 header_pos = m->count; 3831 t = thread->transaction_stack; 3832 while (t) { 3833 if (t->from == thread) { 3834 print_binder_transaction(m, 3835 " outgoing transaction", t); 3836 t = t->from_parent; 3837 } else if (t->to_thread == thread) { 3838 print_binder_transaction(m, 3839 " incoming transaction", t); 3840 t = t->to_parent; 3841 } else { 3842 print_binder_transaction(m, " bad transaction", t); 3843 t = NULL; 3844 } 3845 } 3846 list_for_each_entry(w, &thread->todo, entry) { 3847 print_binder_work(m, " ", " pending transaction", w); 3848 } 3849 if (!print_always && m->count == header_pos) 3850 m->count = start_pos; 3851 } 3852 3853 static void print_binder_node(struct seq_file *m, struct binder_node *node) 3854 { 3855 struct binder_ref *ref; 3856 struct binder_work *w; 3857 int count; 3858 3859 count = 0; 3860 hlist_for_each_entry(ref, &node->refs, node_entry) 3861 count++; 3862 3863 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d", 3864 node->debug_id, (u64)node->ptr, (u64)node->cookie, 3865 node->has_strong_ref, node->has_weak_ref, 3866 node->local_strong_refs, node->local_weak_refs, 3867 node->internal_strong_refs, count); 3868 if (count) { 3869 seq_puts(m, " proc"); 3870 hlist_for_each_entry(ref, &node->refs, node_entry) 3871 seq_printf(m, " %d", ref->proc->pid); 3872 } 3873 seq_puts(m, "\n"); 3874 list_for_each_entry(w, &node->async_todo, entry) 3875 print_binder_work(m, " ", 3876 " pending async transaction", w); 3877 } 3878 3879 static void print_binder_ref(struct seq_file *m, struct binder_ref *ref) 3880 { 3881 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n", 3882 ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ", 3883 ref->node->debug_id, ref->strong, ref->weak, ref->death); 3884 } 3885 3886 static void print_binder_proc(struct seq_file *m, 3887 struct binder_proc *proc, int print_all) 3888 { 3889 struct binder_work *w; 3890 struct rb_node *n; 3891 size_t start_pos = m->count; 3892 size_t header_pos; 3893 3894 seq_printf(m, "proc %d\n", proc->pid); 3895 seq_printf(m, "context %s\n", proc->context->name); 3896 header_pos = m->count; 3897 3898 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 3899 print_binder_thread(m, rb_entry(n, struct binder_thread, 3900 rb_node), print_all); 3901 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { 3902 struct binder_node *node = rb_entry(n, struct binder_node, 3903 rb_node); 3904 if (print_all || node->has_async_transaction) 3905 print_binder_node(m, node); 3906 } 3907 if (print_all) { 3908 for (n = rb_first(&proc->refs_by_desc); 3909 n != NULL; 3910 n = rb_next(n)) 3911 print_binder_ref(m, rb_entry(n, struct binder_ref, 3912 rb_node_desc)); 3913 } 3914 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) 3915 print_binder_buffer(m, " buffer", 3916 rb_entry(n, struct binder_buffer, rb_node)); 3917 list_for_each_entry(w, &proc->todo, entry) 3918 print_binder_work(m, " ", " pending transaction", w); 3919 list_for_each_entry(w, &proc->delivered_death, entry) { 3920 seq_puts(m, " has delivered dead binder\n"); 3921 break; 3922 } 3923 if (!print_all && m->count == header_pos) 3924 m->count = start_pos; 3925 } 3926 3927 static const char * const binder_return_strings[] = { 3928 "BR_ERROR", 3929 "BR_OK", 3930 "BR_TRANSACTION", 3931 "BR_REPLY", 3932 "BR_ACQUIRE_RESULT", 3933 "BR_DEAD_REPLY", 3934 "BR_TRANSACTION_COMPLETE", 3935 "BR_INCREFS", 3936 "BR_ACQUIRE", 3937 "BR_RELEASE", 3938 "BR_DECREFS", 3939 "BR_ATTEMPT_ACQUIRE", 3940 "BR_NOOP", 3941 "BR_SPAWN_LOOPER", 3942 "BR_FINISHED", 3943 "BR_DEAD_BINDER", 3944 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 3945 "BR_FAILED_REPLY" 3946 }; 3947 3948 static const char * const binder_command_strings[] = { 3949 "BC_TRANSACTION", 3950 "BC_REPLY", 3951 "BC_ACQUIRE_RESULT", 3952 "BC_FREE_BUFFER", 3953 "BC_INCREFS", 3954 "BC_ACQUIRE", 3955 "BC_RELEASE", 3956 "BC_DECREFS", 3957 "BC_INCREFS_DONE", 3958 "BC_ACQUIRE_DONE", 3959 "BC_ATTEMPT_ACQUIRE", 3960 "BC_REGISTER_LOOPER", 3961 "BC_ENTER_LOOPER", 3962 "BC_EXIT_LOOPER", 3963 "BC_REQUEST_DEATH_NOTIFICATION", 3964 "BC_CLEAR_DEATH_NOTIFICATION", 3965 "BC_DEAD_BINDER_DONE", 3966 "BC_TRANSACTION_SG", 3967 "BC_REPLY_SG", 3968 }; 3969 3970 static const char * const binder_objstat_strings[] = { 3971 "proc", 3972 "thread", 3973 "node", 3974 "ref", 3975 "death", 3976 "transaction", 3977 "transaction_complete" 3978 }; 3979 3980 static void print_binder_stats(struct seq_file *m, const char *prefix, 3981 struct binder_stats *stats) 3982 { 3983 int i; 3984 3985 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != 3986 ARRAY_SIZE(binder_command_strings)); 3987 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) { 3988 if (stats->bc[i]) 3989 seq_printf(m, "%s%s: %d\n", prefix, 3990 binder_command_strings[i], stats->bc[i]); 3991 } 3992 3993 BUILD_BUG_ON(ARRAY_SIZE(stats->br) != 3994 ARRAY_SIZE(binder_return_strings)); 3995 for (i = 0; i < ARRAY_SIZE(stats->br); i++) { 3996 if (stats->br[i]) 3997 seq_printf(m, "%s%s: %d\n", prefix, 3998 binder_return_strings[i], stats->br[i]); 3999 } 4000 4001 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 4002 ARRAY_SIZE(binder_objstat_strings)); 4003 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 4004 ARRAY_SIZE(stats->obj_deleted)); 4005 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { 4006 if (stats->obj_created[i] || stats->obj_deleted[i]) 4007 seq_printf(m, "%s%s: active %d total %d\n", prefix, 4008 binder_objstat_strings[i], 4009 stats->obj_created[i] - stats->obj_deleted[i], 4010 stats->obj_created[i]); 4011 } 4012 } 4013 4014 static void print_binder_proc_stats(struct seq_file *m, 4015 struct binder_proc *proc) 4016 { 4017 struct binder_work *w; 4018 struct rb_node *n; 4019 int count, strong, weak; 4020 4021 seq_printf(m, "proc %d\n", proc->pid); 4022 seq_printf(m, "context %s\n", proc->context->name); 4023 count = 0; 4024 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 4025 count++; 4026 seq_printf(m, " threads: %d\n", count); 4027 seq_printf(m, " requested threads: %d+%d/%d\n" 4028 " ready threads %d\n" 4029 " free async space %zd\n", proc->requested_threads, 4030 proc->requested_threads_started, proc->max_threads, 4031 proc->ready_threads, proc->free_async_space); 4032 count = 0; 4033 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) 4034 count++; 4035 seq_printf(m, " nodes: %d\n", count); 4036 count = 0; 4037 strong = 0; 4038 weak = 0; 4039 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 4040 struct binder_ref *ref = rb_entry(n, struct binder_ref, 4041 rb_node_desc); 4042 count++; 4043 strong += ref->strong; 4044 weak += ref->weak; 4045 } 4046 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak); 4047 4048 count = 0; 4049 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) 4050 count++; 4051 seq_printf(m, " buffers: %d\n", count); 4052 4053 count = 0; 4054 list_for_each_entry(w, &proc->todo, entry) { 4055 switch (w->type) { 4056 case BINDER_WORK_TRANSACTION: 4057 count++; 4058 break; 4059 default: 4060 break; 4061 } 4062 } 4063 seq_printf(m, " pending transactions: %d\n", count); 4064 4065 print_binder_stats(m, " ", &proc->stats); 4066 } 4067 4068 4069 static int binder_state_show(struct seq_file *m, void *unused) 4070 { 4071 struct binder_proc *proc; 4072 struct binder_node *node; 4073 int do_lock = !binder_debug_no_lock; 4074 4075 if (do_lock) 4076 binder_lock(__func__); 4077 4078 seq_puts(m, "binder state:\n"); 4079 4080 if (!hlist_empty(&binder_dead_nodes)) 4081 seq_puts(m, "dead nodes:\n"); 4082 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) 4083 print_binder_node(m, node); 4084 4085 hlist_for_each_entry(proc, &binder_procs, proc_node) 4086 print_binder_proc(m, proc, 1); 4087 if (do_lock) 4088 binder_unlock(__func__); 4089 return 0; 4090 } 4091 4092 static int binder_stats_show(struct seq_file *m, void *unused) 4093 { 4094 struct binder_proc *proc; 4095 int do_lock = !binder_debug_no_lock; 4096 4097 if (do_lock) 4098 binder_lock(__func__); 4099 4100 seq_puts(m, "binder stats:\n"); 4101 4102 print_binder_stats(m, "", &binder_stats); 4103 4104 hlist_for_each_entry(proc, &binder_procs, proc_node) 4105 print_binder_proc_stats(m, proc); 4106 if (do_lock) 4107 binder_unlock(__func__); 4108 return 0; 4109 } 4110 4111 static int binder_transactions_show(struct seq_file *m, void *unused) 4112 { 4113 struct binder_proc *proc; 4114 int do_lock = !binder_debug_no_lock; 4115 4116 if (do_lock) 4117 binder_lock(__func__); 4118 4119 seq_puts(m, "binder transactions:\n"); 4120 hlist_for_each_entry(proc, &binder_procs, proc_node) 4121 print_binder_proc(m, proc, 0); 4122 if (do_lock) 4123 binder_unlock(__func__); 4124 return 0; 4125 } 4126 4127 static int binder_proc_show(struct seq_file *m, void *unused) 4128 { 4129 struct binder_proc *itr; 4130 int pid = (unsigned long)m->private; 4131 int do_lock = !binder_debug_no_lock; 4132 4133 if (do_lock) 4134 binder_lock(__func__); 4135 4136 hlist_for_each_entry(itr, &binder_procs, proc_node) { 4137 if (itr->pid == pid) { 4138 seq_puts(m, "binder proc state:\n"); 4139 print_binder_proc(m, itr, 1); 4140 } 4141 } 4142 if (do_lock) 4143 binder_unlock(__func__); 4144 return 0; 4145 } 4146 4147 static void print_binder_transaction_log_entry(struct seq_file *m, 4148 struct binder_transaction_log_entry *e) 4149 { 4150 seq_printf(m, 4151 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d\n", 4152 e->debug_id, (e->call_type == 2) ? "reply" : 4153 ((e->call_type == 1) ? "async" : "call "), e->from_proc, 4154 e->from_thread, e->to_proc, e->to_thread, e->context_name, 4155 e->to_node, e->target_handle, e->data_size, e->offsets_size); 4156 } 4157 4158 static int binder_transaction_log_show(struct seq_file *m, void *unused) 4159 { 4160 struct binder_transaction_log *log = m->private; 4161 int i; 4162 4163 if (log->full) { 4164 for (i = log->next; i < ARRAY_SIZE(log->entry); i++) 4165 print_binder_transaction_log_entry(m, &log->entry[i]); 4166 } 4167 for (i = 0; i < log->next; i++) 4168 print_binder_transaction_log_entry(m, &log->entry[i]); 4169 return 0; 4170 } 4171 4172 static const struct file_operations binder_fops = { 4173 .owner = THIS_MODULE, 4174 .poll = binder_poll, 4175 .unlocked_ioctl = binder_ioctl, 4176 .compat_ioctl = binder_ioctl, 4177 .mmap = binder_mmap, 4178 .open = binder_open, 4179 .flush = binder_flush, 4180 .release = binder_release, 4181 }; 4182 4183 BINDER_DEBUG_ENTRY(state); 4184 BINDER_DEBUG_ENTRY(stats); 4185 BINDER_DEBUG_ENTRY(transactions); 4186 BINDER_DEBUG_ENTRY(transaction_log); 4187 4188 static int __init init_binder_device(const char *name) 4189 { 4190 int ret; 4191 struct binder_device *binder_device; 4192 4193 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL); 4194 if (!binder_device) 4195 return -ENOMEM; 4196 4197 binder_device->miscdev.fops = &binder_fops; 4198 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR; 4199 binder_device->miscdev.name = name; 4200 4201 binder_device->context.binder_context_mgr_uid = INVALID_UID; 4202 binder_device->context.name = name; 4203 4204 ret = misc_register(&binder_device->miscdev); 4205 if (ret < 0) { 4206 kfree(binder_device); 4207 return ret; 4208 } 4209 4210 hlist_add_head(&binder_device->hlist, &binder_devices); 4211 4212 return ret; 4213 } 4214 4215 static int __init binder_init(void) 4216 { 4217 int ret; 4218 char *device_name, *device_names; 4219 struct binder_device *device; 4220 struct hlist_node *tmp; 4221 4222 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); 4223 if (binder_debugfs_dir_entry_root) 4224 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", 4225 binder_debugfs_dir_entry_root); 4226 4227 if (binder_debugfs_dir_entry_root) { 4228 debugfs_create_file("state", 4229 S_IRUGO, 4230 binder_debugfs_dir_entry_root, 4231 NULL, 4232 &binder_state_fops); 4233 debugfs_create_file("stats", 4234 S_IRUGO, 4235 binder_debugfs_dir_entry_root, 4236 NULL, 4237 &binder_stats_fops); 4238 debugfs_create_file("transactions", 4239 S_IRUGO, 4240 binder_debugfs_dir_entry_root, 4241 NULL, 4242 &binder_transactions_fops); 4243 debugfs_create_file("transaction_log", 4244 S_IRUGO, 4245 binder_debugfs_dir_entry_root, 4246 &binder_transaction_log, 4247 &binder_transaction_log_fops); 4248 debugfs_create_file("failed_transaction_log", 4249 S_IRUGO, 4250 binder_debugfs_dir_entry_root, 4251 &binder_transaction_log_failed, 4252 &binder_transaction_log_fops); 4253 } 4254 4255 /* 4256 * Copy the module_parameter string, because we don't want to 4257 * tokenize it in-place. 4258 */ 4259 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL); 4260 if (!device_names) { 4261 ret = -ENOMEM; 4262 goto err_alloc_device_names_failed; 4263 } 4264 strcpy(device_names, binder_devices_param); 4265 4266 while ((device_name = strsep(&device_names, ","))) { 4267 ret = init_binder_device(device_name); 4268 if (ret) 4269 goto err_init_binder_device_failed; 4270 } 4271 4272 return ret; 4273 4274 err_init_binder_device_failed: 4275 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) { 4276 misc_deregister(&device->miscdev); 4277 hlist_del(&device->hlist); 4278 kfree(device); 4279 } 4280 err_alloc_device_names_failed: 4281 debugfs_remove_recursive(binder_debugfs_dir_entry_root); 4282 4283 return ret; 4284 } 4285 4286 device_initcall(binder_init); 4287 4288 #define CREATE_TRACE_POINTS 4289 #include "binder_trace.h" 4290 4291 MODULE_LICENSE("GPL v2"); 4292