1 /* binder.c 2 * 3 * Android IPC Subsystem 4 * 5 * Copyright (C) 2007-2008 Google, Inc. 6 * 7 * This software is licensed under the terms of the GNU General Public 8 * License version 2, as published by the Free Software Foundation, and 9 * may be copied, distributed, and modified under those terms. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 */ 17 18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19 20 #include <asm/cacheflush.h> 21 #include <linux/fdtable.h> 22 #include <linux/file.h> 23 #include <linux/freezer.h> 24 #include <linux/fs.h> 25 #include <linux/list.h> 26 #include <linux/miscdevice.h> 27 #include <linux/module.h> 28 #include <linux/mutex.h> 29 #include <linux/nsproxy.h> 30 #include <linux/poll.h> 31 #include <linux/debugfs.h> 32 #include <linux/rbtree.h> 33 #include <linux/sched/signal.h> 34 #include <linux/sched/mm.h> 35 #include <linux/seq_file.h> 36 #include <linux/uaccess.h> 37 #include <linux/pid_namespace.h> 38 #include <linux/security.h> 39 40 #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT 41 #define BINDER_IPC_32BIT 1 42 #endif 43 44 #include <uapi/linux/android/binder.h> 45 #include "binder_alloc.h" 46 #include "binder_trace.h" 47 48 static DEFINE_MUTEX(binder_main_lock); 49 50 static HLIST_HEAD(binder_deferred_list); 51 static DEFINE_MUTEX(binder_deferred_lock); 52 53 static HLIST_HEAD(binder_devices); 54 static HLIST_HEAD(binder_procs); 55 static DEFINE_MUTEX(binder_procs_lock); 56 57 static HLIST_HEAD(binder_dead_nodes); 58 static DEFINE_SPINLOCK(binder_dead_nodes_lock); 59 60 static struct dentry *binder_debugfs_dir_entry_root; 61 static struct dentry *binder_debugfs_dir_entry_proc; 62 static atomic_t binder_last_id; 63 64 #define BINDER_DEBUG_ENTRY(name) \ 65 static int binder_##name##_open(struct inode *inode, struct file *file) \ 66 { \ 67 return single_open(file, binder_##name##_show, inode->i_private); \ 68 } \ 69 \ 70 static const struct file_operations binder_##name##_fops = { \ 71 .owner = THIS_MODULE, \ 72 .open = binder_##name##_open, \ 73 .read = seq_read, \ 74 .llseek = seq_lseek, \ 75 .release = single_release, \ 76 } 77 78 static int binder_proc_show(struct seq_file *m, void *unused); 79 BINDER_DEBUG_ENTRY(proc); 80 81 /* This is only defined in include/asm-arm/sizes.h */ 82 #ifndef SZ_1K 83 #define SZ_1K 0x400 84 #endif 85 86 #ifndef SZ_4M 87 #define SZ_4M 0x400000 88 #endif 89 90 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE) 91 92 #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64) 93 94 enum { 95 BINDER_DEBUG_USER_ERROR = 1U << 0, 96 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1, 97 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2, 98 BINDER_DEBUG_OPEN_CLOSE = 1U << 3, 99 BINDER_DEBUG_DEAD_BINDER = 1U << 4, 100 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5, 101 BINDER_DEBUG_READ_WRITE = 1U << 6, 102 BINDER_DEBUG_USER_REFS = 1U << 7, 103 BINDER_DEBUG_THREADS = 1U << 8, 104 BINDER_DEBUG_TRANSACTION = 1U << 9, 105 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10, 106 BINDER_DEBUG_FREE_BUFFER = 1U << 11, 107 BINDER_DEBUG_INTERNAL_REFS = 1U << 12, 108 BINDER_DEBUG_PRIORITY_CAP = 1U << 13, 109 }; 110 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | 111 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; 112 module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO); 113 114 static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES; 115 module_param_named(devices, binder_devices_param, charp, 0444); 116 117 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); 118 static int binder_stop_on_user_error; 119 120 static int binder_set_stop_on_user_error(const char *val, 121 struct kernel_param *kp) 122 { 123 int ret; 124 125 ret = param_set_int(val, kp); 126 if (binder_stop_on_user_error < 2) 127 wake_up(&binder_user_error_wait); 128 return ret; 129 } 130 module_param_call(stop_on_user_error, binder_set_stop_on_user_error, 131 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO); 132 133 #define binder_debug(mask, x...) \ 134 do { \ 135 if (binder_debug_mask & mask) \ 136 pr_info(x); \ 137 } while (0) 138 139 #define binder_user_error(x...) \ 140 do { \ 141 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \ 142 pr_info(x); \ 143 if (binder_stop_on_user_error) \ 144 binder_stop_on_user_error = 2; \ 145 } while (0) 146 147 #define to_flat_binder_object(hdr) \ 148 container_of(hdr, struct flat_binder_object, hdr) 149 150 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr) 151 152 #define to_binder_buffer_object(hdr) \ 153 container_of(hdr, struct binder_buffer_object, hdr) 154 155 #define to_binder_fd_array_object(hdr) \ 156 container_of(hdr, struct binder_fd_array_object, hdr) 157 158 enum binder_stat_types { 159 BINDER_STAT_PROC, 160 BINDER_STAT_THREAD, 161 BINDER_STAT_NODE, 162 BINDER_STAT_REF, 163 BINDER_STAT_DEATH, 164 BINDER_STAT_TRANSACTION, 165 BINDER_STAT_TRANSACTION_COMPLETE, 166 BINDER_STAT_COUNT 167 }; 168 169 struct binder_stats { 170 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1]; 171 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1]; 172 atomic_t obj_created[BINDER_STAT_COUNT]; 173 atomic_t obj_deleted[BINDER_STAT_COUNT]; 174 }; 175 176 static struct binder_stats binder_stats; 177 178 static inline void binder_stats_deleted(enum binder_stat_types type) 179 { 180 atomic_inc(&binder_stats.obj_deleted[type]); 181 } 182 183 static inline void binder_stats_created(enum binder_stat_types type) 184 { 185 atomic_inc(&binder_stats.obj_created[type]); 186 } 187 188 struct binder_transaction_log_entry { 189 int debug_id; 190 int debug_id_done; 191 int call_type; 192 int from_proc; 193 int from_thread; 194 int target_handle; 195 int to_proc; 196 int to_thread; 197 int to_node; 198 int data_size; 199 int offsets_size; 200 int return_error_line; 201 uint32_t return_error; 202 uint32_t return_error_param; 203 const char *context_name; 204 }; 205 struct binder_transaction_log { 206 atomic_t cur; 207 bool full; 208 struct binder_transaction_log_entry entry[32]; 209 }; 210 static struct binder_transaction_log binder_transaction_log; 211 static struct binder_transaction_log binder_transaction_log_failed; 212 213 static struct binder_transaction_log_entry *binder_transaction_log_add( 214 struct binder_transaction_log *log) 215 { 216 struct binder_transaction_log_entry *e; 217 unsigned int cur = atomic_inc_return(&log->cur); 218 219 if (cur >= ARRAY_SIZE(log->entry)) 220 log->full = 1; 221 e = &log->entry[cur % ARRAY_SIZE(log->entry)]; 222 WRITE_ONCE(e->debug_id_done, 0); 223 /* 224 * write-barrier to synchronize access to e->debug_id_done. 225 * We make sure the initialized 0 value is seen before 226 * memset() other fields are zeroed by memset. 227 */ 228 smp_wmb(); 229 memset(e, 0, sizeof(*e)); 230 return e; 231 } 232 233 struct binder_context { 234 struct binder_node *binder_context_mgr_node; 235 struct mutex context_mgr_node_lock; 236 237 kuid_t binder_context_mgr_uid; 238 const char *name; 239 }; 240 241 struct binder_device { 242 struct hlist_node hlist; 243 struct miscdevice miscdev; 244 struct binder_context context; 245 }; 246 247 struct binder_work { 248 struct list_head entry; 249 enum { 250 BINDER_WORK_TRANSACTION = 1, 251 BINDER_WORK_TRANSACTION_COMPLETE, 252 BINDER_WORK_NODE, 253 BINDER_WORK_DEAD_BINDER, 254 BINDER_WORK_DEAD_BINDER_AND_CLEAR, 255 BINDER_WORK_CLEAR_DEATH_NOTIFICATION, 256 } type; 257 }; 258 259 struct binder_node { 260 int debug_id; 261 struct binder_work work; 262 union { 263 struct rb_node rb_node; 264 struct hlist_node dead_node; 265 }; 266 struct binder_proc *proc; 267 struct hlist_head refs; 268 int internal_strong_refs; 269 int local_weak_refs; 270 int local_strong_refs; 271 binder_uintptr_t ptr; 272 binder_uintptr_t cookie; 273 unsigned has_strong_ref:1; 274 unsigned pending_strong_ref:1; 275 unsigned has_weak_ref:1; 276 unsigned pending_weak_ref:1; 277 unsigned has_async_transaction:1; 278 unsigned accept_fds:1; 279 unsigned min_priority:8; 280 struct list_head async_todo; 281 }; 282 283 struct binder_ref_death { 284 struct binder_work work; 285 binder_uintptr_t cookie; 286 }; 287 288 struct binder_ref { 289 /* Lookups needed: */ 290 /* node + proc => ref (transaction) */ 291 /* desc + proc => ref (transaction, inc/dec ref) */ 292 /* node => refs + procs (proc exit) */ 293 int debug_id; 294 struct rb_node rb_node_desc; 295 struct rb_node rb_node_node; 296 struct hlist_node node_entry; 297 struct binder_proc *proc; 298 struct binder_node *node; 299 uint32_t desc; 300 int strong; 301 int weak; 302 struct binder_ref_death *death; 303 }; 304 305 enum binder_deferred_state { 306 BINDER_DEFERRED_PUT_FILES = 0x01, 307 BINDER_DEFERRED_FLUSH = 0x02, 308 BINDER_DEFERRED_RELEASE = 0x04, 309 }; 310 311 struct binder_proc { 312 struct hlist_node proc_node; 313 struct rb_root threads; 314 struct rb_root nodes; 315 struct rb_root refs_by_desc; 316 struct rb_root refs_by_node; 317 int pid; 318 struct task_struct *tsk; 319 struct files_struct *files; 320 struct hlist_node deferred_work_node; 321 int deferred_work; 322 323 struct list_head todo; 324 wait_queue_head_t wait; 325 struct binder_stats stats; 326 struct list_head delivered_death; 327 int max_threads; 328 int requested_threads; 329 int requested_threads_started; 330 int ready_threads; 331 long default_priority; 332 struct dentry *debugfs_entry; 333 struct binder_alloc alloc; 334 struct binder_context *context; 335 }; 336 337 enum { 338 BINDER_LOOPER_STATE_REGISTERED = 0x01, 339 BINDER_LOOPER_STATE_ENTERED = 0x02, 340 BINDER_LOOPER_STATE_EXITED = 0x04, 341 BINDER_LOOPER_STATE_INVALID = 0x08, 342 BINDER_LOOPER_STATE_WAITING = 0x10, 343 }; 344 345 struct binder_thread { 346 struct binder_proc *proc; 347 struct rb_node rb_node; 348 int pid; 349 int looper; /* only modified by this thread */ 350 bool looper_need_return; /* can be written by other thread */ 351 struct binder_transaction *transaction_stack; 352 struct list_head todo; 353 uint32_t return_error; /* Write failed, return error code in read buf */ 354 uint32_t return_error2; /* Write failed, return error code in read */ 355 /* buffer. Used when sending a reply to a dead process that */ 356 /* we are also waiting on */ 357 wait_queue_head_t wait; 358 struct binder_stats stats; 359 }; 360 361 struct binder_transaction { 362 int debug_id; 363 struct binder_work work; 364 struct binder_thread *from; 365 struct binder_transaction *from_parent; 366 struct binder_proc *to_proc; 367 struct binder_thread *to_thread; 368 struct binder_transaction *to_parent; 369 unsigned need_reply:1; 370 /* unsigned is_dead:1; */ /* not used at the moment */ 371 372 struct binder_buffer *buffer; 373 unsigned int code; 374 unsigned int flags; 375 long priority; 376 long saved_priority; 377 kuid_t sender_euid; 378 }; 379 380 static void 381 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); 382 383 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) 384 { 385 struct files_struct *files = proc->files; 386 unsigned long rlim_cur; 387 unsigned long irqs; 388 389 if (files == NULL) 390 return -ESRCH; 391 392 if (!lock_task_sighand(proc->tsk, &irqs)) 393 return -EMFILE; 394 395 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE); 396 unlock_task_sighand(proc->tsk, &irqs); 397 398 return __alloc_fd(files, 0, rlim_cur, flags); 399 } 400 401 /* 402 * copied from fd_install 403 */ 404 static void task_fd_install( 405 struct binder_proc *proc, unsigned int fd, struct file *file) 406 { 407 if (proc->files) 408 __fd_install(proc->files, fd, file); 409 } 410 411 /* 412 * copied from sys_close 413 */ 414 static long task_close_fd(struct binder_proc *proc, unsigned int fd) 415 { 416 int retval; 417 418 if (proc->files == NULL) 419 return -ESRCH; 420 421 retval = __close_fd(proc->files, fd); 422 /* can't restart close syscall because file table entry was cleared */ 423 if (unlikely(retval == -ERESTARTSYS || 424 retval == -ERESTARTNOINTR || 425 retval == -ERESTARTNOHAND || 426 retval == -ERESTART_RESTARTBLOCK)) 427 retval = -EINTR; 428 429 return retval; 430 } 431 432 static inline void binder_lock(const char *tag) 433 { 434 trace_binder_lock(tag); 435 mutex_lock(&binder_main_lock); 436 trace_binder_locked(tag); 437 } 438 439 static inline void binder_unlock(const char *tag) 440 { 441 trace_binder_unlock(tag); 442 mutex_unlock(&binder_main_lock); 443 } 444 445 static void binder_set_nice(long nice) 446 { 447 long min_nice; 448 449 if (can_nice(current, nice)) { 450 set_user_nice(current, nice); 451 return; 452 } 453 min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur); 454 binder_debug(BINDER_DEBUG_PRIORITY_CAP, 455 "%d: nice value %ld not allowed use %ld instead\n", 456 current->pid, nice, min_nice); 457 set_user_nice(current, min_nice); 458 if (min_nice <= MAX_NICE) 459 return; 460 binder_user_error("%d RLIMIT_NICE not set\n", current->pid); 461 } 462 463 static struct binder_node *binder_get_node(struct binder_proc *proc, 464 binder_uintptr_t ptr) 465 { 466 struct rb_node *n = proc->nodes.rb_node; 467 struct binder_node *node; 468 469 while (n) { 470 node = rb_entry(n, struct binder_node, rb_node); 471 472 if (ptr < node->ptr) 473 n = n->rb_left; 474 else if (ptr > node->ptr) 475 n = n->rb_right; 476 else 477 return node; 478 } 479 return NULL; 480 } 481 482 static struct binder_node *binder_new_node(struct binder_proc *proc, 483 binder_uintptr_t ptr, 484 binder_uintptr_t cookie) 485 { 486 struct rb_node **p = &proc->nodes.rb_node; 487 struct rb_node *parent = NULL; 488 struct binder_node *node; 489 490 while (*p) { 491 parent = *p; 492 node = rb_entry(parent, struct binder_node, rb_node); 493 494 if (ptr < node->ptr) 495 p = &(*p)->rb_left; 496 else if (ptr > node->ptr) 497 p = &(*p)->rb_right; 498 else 499 return NULL; 500 } 501 502 node = kzalloc(sizeof(*node), GFP_KERNEL); 503 if (node == NULL) 504 return NULL; 505 binder_stats_created(BINDER_STAT_NODE); 506 rb_link_node(&node->rb_node, parent, p); 507 rb_insert_color(&node->rb_node, &proc->nodes); 508 node->debug_id = atomic_inc_return(&binder_last_id); 509 node->proc = proc; 510 node->ptr = ptr; 511 node->cookie = cookie; 512 node->work.type = BINDER_WORK_NODE; 513 INIT_LIST_HEAD(&node->work.entry); 514 INIT_LIST_HEAD(&node->async_todo); 515 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 516 "%d:%d node %d u%016llx c%016llx created\n", 517 proc->pid, current->pid, node->debug_id, 518 (u64)node->ptr, (u64)node->cookie); 519 return node; 520 } 521 522 static int binder_inc_node(struct binder_node *node, int strong, int internal, 523 struct list_head *target_list) 524 { 525 if (strong) { 526 if (internal) { 527 if (target_list == NULL && 528 node->internal_strong_refs == 0 && 529 !(node->proc && 530 node == node->proc->context->binder_context_mgr_node && 531 node->has_strong_ref)) { 532 pr_err("invalid inc strong node for %d\n", 533 node->debug_id); 534 return -EINVAL; 535 } 536 node->internal_strong_refs++; 537 } else 538 node->local_strong_refs++; 539 if (!node->has_strong_ref && target_list) { 540 list_del_init(&node->work.entry); 541 list_add_tail(&node->work.entry, target_list); 542 } 543 } else { 544 if (!internal) 545 node->local_weak_refs++; 546 if (!node->has_weak_ref && list_empty(&node->work.entry)) { 547 if (target_list == NULL) { 548 pr_err("invalid inc weak node for %d\n", 549 node->debug_id); 550 return -EINVAL; 551 } 552 list_add_tail(&node->work.entry, target_list); 553 } 554 } 555 return 0; 556 } 557 558 static int binder_dec_node(struct binder_node *node, int strong, int internal) 559 { 560 if (strong) { 561 if (internal) 562 node->internal_strong_refs--; 563 else 564 node->local_strong_refs--; 565 if (node->local_strong_refs || node->internal_strong_refs) 566 return 0; 567 } else { 568 if (!internal) 569 node->local_weak_refs--; 570 if (node->local_weak_refs || !hlist_empty(&node->refs)) 571 return 0; 572 } 573 if (node->proc && (node->has_strong_ref || node->has_weak_ref)) { 574 if (list_empty(&node->work.entry)) { 575 list_add_tail(&node->work.entry, &node->proc->todo); 576 wake_up_interruptible(&node->proc->wait); 577 } 578 } else { 579 if (hlist_empty(&node->refs) && !node->local_strong_refs && 580 !node->local_weak_refs) { 581 list_del_init(&node->work.entry); 582 if (node->proc) { 583 rb_erase(&node->rb_node, &node->proc->nodes); 584 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 585 "refless node %d deleted\n", 586 node->debug_id); 587 } else { 588 spin_lock(&binder_dead_nodes_lock); 589 hlist_del(&node->dead_node); 590 spin_unlock(&binder_dead_nodes_lock); 591 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 592 "dead node %d deleted\n", 593 node->debug_id); 594 } 595 kfree(node); 596 binder_stats_deleted(BINDER_STAT_NODE); 597 } 598 } 599 600 return 0; 601 } 602 603 604 static struct binder_ref *binder_get_ref(struct binder_proc *proc, 605 u32 desc, bool need_strong_ref) 606 { 607 struct rb_node *n = proc->refs_by_desc.rb_node; 608 struct binder_ref *ref; 609 610 while (n) { 611 ref = rb_entry(n, struct binder_ref, rb_node_desc); 612 613 if (desc < ref->desc) { 614 n = n->rb_left; 615 } else if (desc > ref->desc) { 616 n = n->rb_right; 617 } else if (need_strong_ref && !ref->strong) { 618 binder_user_error("tried to use weak ref as strong ref\n"); 619 return NULL; 620 } else { 621 return ref; 622 } 623 } 624 return NULL; 625 } 626 627 static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc, 628 struct binder_node *node) 629 { 630 struct rb_node *n; 631 struct rb_node **p = &proc->refs_by_node.rb_node; 632 struct rb_node *parent = NULL; 633 struct binder_ref *ref, *new_ref; 634 struct binder_context *context = proc->context; 635 636 while (*p) { 637 parent = *p; 638 ref = rb_entry(parent, struct binder_ref, rb_node_node); 639 640 if (node < ref->node) 641 p = &(*p)->rb_left; 642 else if (node > ref->node) 643 p = &(*p)->rb_right; 644 else 645 return ref; 646 } 647 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); 648 if (new_ref == NULL) 649 return NULL; 650 binder_stats_created(BINDER_STAT_REF); 651 new_ref->debug_id = atomic_inc_return(&binder_last_id); 652 new_ref->proc = proc; 653 new_ref->node = node; 654 rb_link_node(&new_ref->rb_node_node, parent, p); 655 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); 656 657 new_ref->desc = (node == context->binder_context_mgr_node) ? 0 : 1; 658 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 659 ref = rb_entry(n, struct binder_ref, rb_node_desc); 660 if (ref->desc > new_ref->desc) 661 break; 662 new_ref->desc = ref->desc + 1; 663 } 664 665 p = &proc->refs_by_desc.rb_node; 666 while (*p) { 667 parent = *p; 668 ref = rb_entry(parent, struct binder_ref, rb_node_desc); 669 670 if (new_ref->desc < ref->desc) 671 p = &(*p)->rb_left; 672 else if (new_ref->desc > ref->desc) 673 p = &(*p)->rb_right; 674 else 675 BUG(); 676 } 677 rb_link_node(&new_ref->rb_node_desc, parent, p); 678 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc); 679 hlist_add_head(&new_ref->node_entry, &node->refs); 680 681 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 682 "%d new ref %d desc %d for node %d\n", 683 proc->pid, new_ref->debug_id, new_ref->desc, 684 node->debug_id); 685 return new_ref; 686 } 687 688 static void binder_delete_ref(struct binder_ref *ref) 689 { 690 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 691 "%d delete ref %d desc %d for node %d\n", 692 ref->proc->pid, ref->debug_id, ref->desc, 693 ref->node->debug_id); 694 695 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); 696 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); 697 if (ref->strong) 698 binder_dec_node(ref->node, 1, 1); 699 hlist_del(&ref->node_entry); 700 binder_dec_node(ref->node, 0, 1); 701 if (ref->death) { 702 binder_debug(BINDER_DEBUG_DEAD_BINDER, 703 "%d delete ref %d desc %d has death notification\n", 704 ref->proc->pid, ref->debug_id, ref->desc); 705 list_del(&ref->death->work.entry); 706 kfree(ref->death); 707 binder_stats_deleted(BINDER_STAT_DEATH); 708 } 709 kfree(ref); 710 binder_stats_deleted(BINDER_STAT_REF); 711 } 712 713 static int binder_inc_ref(struct binder_ref *ref, int strong, 714 struct list_head *target_list) 715 { 716 int ret; 717 718 if (strong) { 719 if (ref->strong == 0) { 720 ret = binder_inc_node(ref->node, 1, 1, target_list); 721 if (ret) 722 return ret; 723 } 724 ref->strong++; 725 } else { 726 if (ref->weak == 0) { 727 ret = binder_inc_node(ref->node, 0, 1, target_list); 728 if (ret) 729 return ret; 730 } 731 ref->weak++; 732 } 733 return 0; 734 } 735 736 737 static int binder_dec_ref(struct binder_ref *ref, int strong) 738 { 739 if (strong) { 740 if (ref->strong == 0) { 741 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n", 742 ref->proc->pid, ref->debug_id, 743 ref->desc, ref->strong, ref->weak); 744 return -EINVAL; 745 } 746 ref->strong--; 747 if (ref->strong == 0) { 748 int ret; 749 750 ret = binder_dec_node(ref->node, strong, 1); 751 if (ret) 752 return ret; 753 } 754 } else { 755 if (ref->weak == 0) { 756 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n", 757 ref->proc->pid, ref->debug_id, 758 ref->desc, ref->strong, ref->weak); 759 return -EINVAL; 760 } 761 ref->weak--; 762 } 763 if (ref->strong == 0 && ref->weak == 0) 764 binder_delete_ref(ref); 765 return 0; 766 } 767 768 static void binder_pop_transaction(struct binder_thread *target_thread, 769 struct binder_transaction *t) 770 { 771 BUG_ON(!target_thread); 772 BUG_ON(target_thread->transaction_stack != t); 773 BUG_ON(target_thread->transaction_stack->from != target_thread); 774 target_thread->transaction_stack = 775 target_thread->transaction_stack->from_parent; 776 t->from = NULL; 777 } 778 779 static void binder_free_transaction(struct binder_transaction *t) 780 { 781 if (t->buffer) 782 t->buffer->transaction = NULL; 783 kfree(t); 784 binder_stats_deleted(BINDER_STAT_TRANSACTION); 785 } 786 787 static void binder_send_failed_reply(struct binder_transaction *t, 788 uint32_t error_code) 789 { 790 struct binder_thread *target_thread; 791 struct binder_transaction *next; 792 793 BUG_ON(t->flags & TF_ONE_WAY); 794 while (1) { 795 target_thread = t->from; 796 if (target_thread) { 797 if (target_thread->return_error != BR_OK && 798 target_thread->return_error2 == BR_OK) { 799 target_thread->return_error2 = 800 target_thread->return_error; 801 target_thread->return_error = BR_OK; 802 } 803 if (target_thread->return_error == BR_OK) { 804 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 805 "send failed reply for transaction %d to %d:%d\n", 806 t->debug_id, 807 target_thread->proc->pid, 808 target_thread->pid); 809 810 binder_pop_transaction(target_thread, t); 811 target_thread->return_error = error_code; 812 wake_up_interruptible(&target_thread->wait); 813 binder_free_transaction(t); 814 } else { 815 pr_err("reply failed, target thread, %d:%d, has error code %d already\n", 816 target_thread->proc->pid, 817 target_thread->pid, 818 target_thread->return_error); 819 } 820 return; 821 } 822 next = t->from_parent; 823 824 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 825 "send failed reply for transaction %d, target dead\n", 826 t->debug_id); 827 828 binder_free_transaction(t); 829 if (next == NULL) { 830 binder_debug(BINDER_DEBUG_DEAD_BINDER, 831 "reply failed, no target thread at root\n"); 832 return; 833 } 834 t = next; 835 binder_debug(BINDER_DEBUG_DEAD_BINDER, 836 "reply failed, no target thread -- retry %d\n", 837 t->debug_id); 838 } 839 } 840 841 /** 842 * binder_validate_object() - checks for a valid metadata object in a buffer. 843 * @buffer: binder_buffer that we're parsing. 844 * @offset: offset in the buffer at which to validate an object. 845 * 846 * Return: If there's a valid metadata object at @offset in @buffer, the 847 * size of that object. Otherwise, it returns zero. 848 */ 849 static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset) 850 { 851 /* Check if we can read a header first */ 852 struct binder_object_header *hdr; 853 size_t object_size = 0; 854 855 if (offset > buffer->data_size - sizeof(*hdr) || 856 buffer->data_size < sizeof(*hdr) || 857 !IS_ALIGNED(offset, sizeof(u32))) 858 return 0; 859 860 /* Ok, now see if we can read a complete object. */ 861 hdr = (struct binder_object_header *)(buffer->data + offset); 862 switch (hdr->type) { 863 case BINDER_TYPE_BINDER: 864 case BINDER_TYPE_WEAK_BINDER: 865 case BINDER_TYPE_HANDLE: 866 case BINDER_TYPE_WEAK_HANDLE: 867 object_size = sizeof(struct flat_binder_object); 868 break; 869 case BINDER_TYPE_FD: 870 object_size = sizeof(struct binder_fd_object); 871 break; 872 case BINDER_TYPE_PTR: 873 object_size = sizeof(struct binder_buffer_object); 874 break; 875 case BINDER_TYPE_FDA: 876 object_size = sizeof(struct binder_fd_array_object); 877 break; 878 default: 879 return 0; 880 } 881 if (offset <= buffer->data_size - object_size && 882 buffer->data_size >= object_size) 883 return object_size; 884 else 885 return 0; 886 } 887 888 /** 889 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer. 890 * @b: binder_buffer containing the object 891 * @index: index in offset array at which the binder_buffer_object is 892 * located 893 * @start: points to the start of the offset array 894 * @num_valid: the number of valid offsets in the offset array 895 * 896 * Return: If @index is within the valid range of the offset array 897 * described by @start and @num_valid, and if there's a valid 898 * binder_buffer_object at the offset found in index @index 899 * of the offset array, that object is returned. Otherwise, 900 * %NULL is returned. 901 * Note that the offset found in index @index itself is not 902 * verified; this function assumes that @num_valid elements 903 * from @start were previously verified to have valid offsets. 904 */ 905 static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b, 906 binder_size_t index, 907 binder_size_t *start, 908 binder_size_t num_valid) 909 { 910 struct binder_buffer_object *buffer_obj; 911 binder_size_t *offp; 912 913 if (index >= num_valid) 914 return NULL; 915 916 offp = start + index; 917 buffer_obj = (struct binder_buffer_object *)(b->data + *offp); 918 if (buffer_obj->hdr.type != BINDER_TYPE_PTR) 919 return NULL; 920 921 return buffer_obj; 922 } 923 924 /** 925 * binder_validate_fixup() - validates pointer/fd fixups happen in order. 926 * @b: transaction buffer 927 * @objects_start start of objects buffer 928 * @buffer: binder_buffer_object in which to fix up 929 * @offset: start offset in @buffer to fix up 930 * @last_obj: last binder_buffer_object that we fixed up in 931 * @last_min_offset: minimum fixup offset in @last_obj 932 * 933 * Return: %true if a fixup in buffer @buffer at offset @offset is 934 * allowed. 935 * 936 * For safety reasons, we only allow fixups inside a buffer to happen 937 * at increasing offsets; additionally, we only allow fixup on the last 938 * buffer object that was verified, or one of its parents. 939 * 940 * Example of what is allowed: 941 * 942 * A 943 * B (parent = A, offset = 0) 944 * C (parent = A, offset = 16) 945 * D (parent = C, offset = 0) 946 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset) 947 * 948 * Examples of what is not allowed: 949 * 950 * Decreasing offsets within the same parent: 951 * A 952 * C (parent = A, offset = 16) 953 * B (parent = A, offset = 0) // decreasing offset within A 954 * 955 * Referring to a parent that wasn't the last object or any of its parents: 956 * A 957 * B (parent = A, offset = 0) 958 * C (parent = A, offset = 0) 959 * C (parent = A, offset = 16) 960 * D (parent = B, offset = 0) // B is not A or any of A's parents 961 */ 962 static bool binder_validate_fixup(struct binder_buffer *b, 963 binder_size_t *objects_start, 964 struct binder_buffer_object *buffer, 965 binder_size_t fixup_offset, 966 struct binder_buffer_object *last_obj, 967 binder_size_t last_min_offset) 968 { 969 if (!last_obj) { 970 /* Nothing to fix up in */ 971 return false; 972 } 973 974 while (last_obj != buffer) { 975 /* 976 * Safe to retrieve the parent of last_obj, since it 977 * was already previously verified by the driver. 978 */ 979 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0) 980 return false; 981 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t); 982 last_obj = (struct binder_buffer_object *) 983 (b->data + *(objects_start + last_obj->parent)); 984 } 985 return (fixup_offset >= last_min_offset); 986 } 987 988 static void binder_transaction_buffer_release(struct binder_proc *proc, 989 struct binder_buffer *buffer, 990 binder_size_t *failed_at) 991 { 992 binder_size_t *offp, *off_start, *off_end; 993 int debug_id = buffer->debug_id; 994 995 binder_debug(BINDER_DEBUG_TRANSACTION, 996 "%d buffer release %d, size %zd-%zd, failed at %p\n", 997 proc->pid, buffer->debug_id, 998 buffer->data_size, buffer->offsets_size, failed_at); 999 1000 if (buffer->target_node) 1001 binder_dec_node(buffer->target_node, 1, 0); 1002 1003 off_start = (binder_size_t *)(buffer->data + 1004 ALIGN(buffer->data_size, sizeof(void *))); 1005 if (failed_at) 1006 off_end = failed_at; 1007 else 1008 off_end = (void *)off_start + buffer->offsets_size; 1009 for (offp = off_start; offp < off_end; offp++) { 1010 struct binder_object_header *hdr; 1011 size_t object_size = binder_validate_object(buffer, *offp); 1012 1013 if (object_size == 0) { 1014 pr_err("transaction release %d bad object at offset %lld, size %zd\n", 1015 debug_id, (u64)*offp, buffer->data_size); 1016 continue; 1017 } 1018 hdr = (struct binder_object_header *)(buffer->data + *offp); 1019 switch (hdr->type) { 1020 case BINDER_TYPE_BINDER: 1021 case BINDER_TYPE_WEAK_BINDER: { 1022 struct flat_binder_object *fp; 1023 struct binder_node *node; 1024 1025 fp = to_flat_binder_object(hdr); 1026 node = binder_get_node(proc, fp->binder); 1027 if (node == NULL) { 1028 pr_err("transaction release %d bad node %016llx\n", 1029 debug_id, (u64)fp->binder); 1030 break; 1031 } 1032 binder_debug(BINDER_DEBUG_TRANSACTION, 1033 " node %d u%016llx\n", 1034 node->debug_id, (u64)node->ptr); 1035 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER, 1036 0); 1037 } break; 1038 case BINDER_TYPE_HANDLE: 1039 case BINDER_TYPE_WEAK_HANDLE: { 1040 struct flat_binder_object *fp; 1041 struct binder_ref *ref; 1042 1043 fp = to_flat_binder_object(hdr); 1044 ref = binder_get_ref(proc, fp->handle, 1045 hdr->type == BINDER_TYPE_HANDLE); 1046 if (ref == NULL) { 1047 pr_err("transaction release %d bad handle %d\n", 1048 debug_id, fp->handle); 1049 break; 1050 } 1051 binder_debug(BINDER_DEBUG_TRANSACTION, 1052 " ref %d desc %d (node %d)\n", 1053 ref->debug_id, ref->desc, ref->node->debug_id); 1054 binder_dec_ref(ref, hdr->type == BINDER_TYPE_HANDLE); 1055 } break; 1056 1057 case BINDER_TYPE_FD: { 1058 struct binder_fd_object *fp = to_binder_fd_object(hdr); 1059 1060 binder_debug(BINDER_DEBUG_TRANSACTION, 1061 " fd %d\n", fp->fd); 1062 if (failed_at) 1063 task_close_fd(proc, fp->fd); 1064 } break; 1065 case BINDER_TYPE_PTR: 1066 /* 1067 * Nothing to do here, this will get cleaned up when the 1068 * transaction buffer gets freed 1069 */ 1070 break; 1071 case BINDER_TYPE_FDA: { 1072 struct binder_fd_array_object *fda; 1073 struct binder_buffer_object *parent; 1074 uintptr_t parent_buffer; 1075 u32 *fd_array; 1076 size_t fd_index; 1077 binder_size_t fd_buf_size; 1078 1079 fda = to_binder_fd_array_object(hdr); 1080 parent = binder_validate_ptr(buffer, fda->parent, 1081 off_start, 1082 offp - off_start); 1083 if (!parent) { 1084 pr_err("transaction release %d bad parent offset", 1085 debug_id); 1086 continue; 1087 } 1088 /* 1089 * Since the parent was already fixed up, convert it 1090 * back to kernel address space to access it 1091 */ 1092 parent_buffer = parent->buffer - 1093 binder_alloc_get_user_buffer_offset( 1094 &proc->alloc); 1095 1096 fd_buf_size = sizeof(u32) * fda->num_fds; 1097 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { 1098 pr_err("transaction release %d invalid number of fds (%lld)\n", 1099 debug_id, (u64)fda->num_fds); 1100 continue; 1101 } 1102 if (fd_buf_size > parent->length || 1103 fda->parent_offset > parent->length - fd_buf_size) { 1104 /* No space for all file descriptors here. */ 1105 pr_err("transaction release %d not enough space for %lld fds in buffer\n", 1106 debug_id, (u64)fda->num_fds); 1107 continue; 1108 } 1109 fd_array = (u32 *)(parent_buffer + fda->parent_offset); 1110 for (fd_index = 0; fd_index < fda->num_fds; fd_index++) 1111 task_close_fd(proc, fd_array[fd_index]); 1112 } break; 1113 default: 1114 pr_err("transaction release %d bad object type %x\n", 1115 debug_id, hdr->type); 1116 break; 1117 } 1118 } 1119 } 1120 1121 static int binder_translate_binder(struct flat_binder_object *fp, 1122 struct binder_transaction *t, 1123 struct binder_thread *thread) 1124 { 1125 struct binder_node *node; 1126 struct binder_ref *ref; 1127 struct binder_proc *proc = thread->proc; 1128 struct binder_proc *target_proc = t->to_proc; 1129 1130 node = binder_get_node(proc, fp->binder); 1131 if (!node) { 1132 node = binder_new_node(proc, fp->binder, fp->cookie); 1133 if (!node) 1134 return -ENOMEM; 1135 1136 node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK; 1137 node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); 1138 } 1139 if (fp->cookie != node->cookie) { 1140 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n", 1141 proc->pid, thread->pid, (u64)fp->binder, 1142 node->debug_id, (u64)fp->cookie, 1143 (u64)node->cookie); 1144 return -EINVAL; 1145 } 1146 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) 1147 return -EPERM; 1148 1149 ref = binder_get_ref_for_node(target_proc, node); 1150 if (!ref) 1151 return -ENOMEM; 1152 1153 if (fp->hdr.type == BINDER_TYPE_BINDER) 1154 fp->hdr.type = BINDER_TYPE_HANDLE; 1155 else 1156 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE; 1157 fp->binder = 0; 1158 fp->handle = ref->desc; 1159 fp->cookie = 0; 1160 binder_inc_ref(ref, fp->hdr.type == BINDER_TYPE_HANDLE, &thread->todo); 1161 1162 trace_binder_transaction_node_to_ref(t, node, ref); 1163 binder_debug(BINDER_DEBUG_TRANSACTION, 1164 " node %d u%016llx -> ref %d desc %d\n", 1165 node->debug_id, (u64)node->ptr, 1166 ref->debug_id, ref->desc); 1167 1168 return 0; 1169 } 1170 1171 static int binder_translate_handle(struct flat_binder_object *fp, 1172 struct binder_transaction *t, 1173 struct binder_thread *thread) 1174 { 1175 struct binder_ref *ref; 1176 struct binder_proc *proc = thread->proc; 1177 struct binder_proc *target_proc = t->to_proc; 1178 1179 ref = binder_get_ref(proc, fp->handle, 1180 fp->hdr.type == BINDER_TYPE_HANDLE); 1181 if (!ref) { 1182 binder_user_error("%d:%d got transaction with invalid handle, %d\n", 1183 proc->pid, thread->pid, fp->handle); 1184 return -EINVAL; 1185 } 1186 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) 1187 return -EPERM; 1188 1189 if (ref->node->proc == target_proc) { 1190 if (fp->hdr.type == BINDER_TYPE_HANDLE) 1191 fp->hdr.type = BINDER_TYPE_BINDER; 1192 else 1193 fp->hdr.type = BINDER_TYPE_WEAK_BINDER; 1194 fp->binder = ref->node->ptr; 1195 fp->cookie = ref->node->cookie; 1196 binder_inc_node(ref->node, fp->hdr.type == BINDER_TYPE_BINDER, 1197 0, NULL); 1198 trace_binder_transaction_ref_to_node(t, ref); 1199 binder_debug(BINDER_DEBUG_TRANSACTION, 1200 " ref %d desc %d -> node %d u%016llx\n", 1201 ref->debug_id, ref->desc, ref->node->debug_id, 1202 (u64)ref->node->ptr); 1203 } else { 1204 struct binder_ref *new_ref; 1205 1206 new_ref = binder_get_ref_for_node(target_proc, ref->node); 1207 if (!new_ref) 1208 return -ENOMEM; 1209 1210 fp->binder = 0; 1211 fp->handle = new_ref->desc; 1212 fp->cookie = 0; 1213 binder_inc_ref(new_ref, fp->hdr.type == BINDER_TYPE_HANDLE, 1214 NULL); 1215 trace_binder_transaction_ref_to_ref(t, ref, new_ref); 1216 binder_debug(BINDER_DEBUG_TRANSACTION, 1217 " ref %d desc %d -> ref %d desc %d (node %d)\n", 1218 ref->debug_id, ref->desc, new_ref->debug_id, 1219 new_ref->desc, ref->node->debug_id); 1220 } 1221 return 0; 1222 } 1223 1224 static int binder_translate_fd(int fd, 1225 struct binder_transaction *t, 1226 struct binder_thread *thread, 1227 struct binder_transaction *in_reply_to) 1228 { 1229 struct binder_proc *proc = thread->proc; 1230 struct binder_proc *target_proc = t->to_proc; 1231 int target_fd; 1232 struct file *file; 1233 int ret; 1234 bool target_allows_fd; 1235 1236 if (in_reply_to) 1237 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS); 1238 else 1239 target_allows_fd = t->buffer->target_node->accept_fds; 1240 if (!target_allows_fd) { 1241 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n", 1242 proc->pid, thread->pid, 1243 in_reply_to ? "reply" : "transaction", 1244 fd); 1245 ret = -EPERM; 1246 goto err_fd_not_accepted; 1247 } 1248 1249 file = fget(fd); 1250 if (!file) { 1251 binder_user_error("%d:%d got transaction with invalid fd, %d\n", 1252 proc->pid, thread->pid, fd); 1253 ret = -EBADF; 1254 goto err_fget; 1255 } 1256 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file); 1257 if (ret < 0) { 1258 ret = -EPERM; 1259 goto err_security; 1260 } 1261 1262 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC); 1263 if (target_fd < 0) { 1264 ret = -ENOMEM; 1265 goto err_get_unused_fd; 1266 } 1267 task_fd_install(target_proc, target_fd, file); 1268 trace_binder_transaction_fd(t, fd, target_fd); 1269 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n", 1270 fd, target_fd); 1271 1272 return target_fd; 1273 1274 err_get_unused_fd: 1275 err_security: 1276 fput(file); 1277 err_fget: 1278 err_fd_not_accepted: 1279 return ret; 1280 } 1281 1282 static int binder_translate_fd_array(struct binder_fd_array_object *fda, 1283 struct binder_buffer_object *parent, 1284 struct binder_transaction *t, 1285 struct binder_thread *thread, 1286 struct binder_transaction *in_reply_to) 1287 { 1288 binder_size_t fdi, fd_buf_size, num_installed_fds; 1289 int target_fd; 1290 uintptr_t parent_buffer; 1291 u32 *fd_array; 1292 struct binder_proc *proc = thread->proc; 1293 struct binder_proc *target_proc = t->to_proc; 1294 1295 fd_buf_size = sizeof(u32) * fda->num_fds; 1296 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { 1297 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n", 1298 proc->pid, thread->pid, (u64)fda->num_fds); 1299 return -EINVAL; 1300 } 1301 if (fd_buf_size > parent->length || 1302 fda->parent_offset > parent->length - fd_buf_size) { 1303 /* No space for all file descriptors here. */ 1304 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n", 1305 proc->pid, thread->pid, (u64)fda->num_fds); 1306 return -EINVAL; 1307 } 1308 /* 1309 * Since the parent was already fixed up, convert it 1310 * back to the kernel address space to access it 1311 */ 1312 parent_buffer = parent->buffer - 1313 binder_alloc_get_user_buffer_offset(&target_proc->alloc); 1314 fd_array = (u32 *)(parent_buffer + fda->parent_offset); 1315 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) { 1316 binder_user_error("%d:%d parent offset not aligned correctly.\n", 1317 proc->pid, thread->pid); 1318 return -EINVAL; 1319 } 1320 for (fdi = 0; fdi < fda->num_fds; fdi++) { 1321 target_fd = binder_translate_fd(fd_array[fdi], t, thread, 1322 in_reply_to); 1323 if (target_fd < 0) 1324 goto err_translate_fd_failed; 1325 fd_array[fdi] = target_fd; 1326 } 1327 return 0; 1328 1329 err_translate_fd_failed: 1330 /* 1331 * Failed to allocate fd or security error, free fds 1332 * installed so far. 1333 */ 1334 num_installed_fds = fdi; 1335 for (fdi = 0; fdi < num_installed_fds; fdi++) 1336 task_close_fd(target_proc, fd_array[fdi]); 1337 return target_fd; 1338 } 1339 1340 static int binder_fixup_parent(struct binder_transaction *t, 1341 struct binder_thread *thread, 1342 struct binder_buffer_object *bp, 1343 binder_size_t *off_start, 1344 binder_size_t num_valid, 1345 struct binder_buffer_object *last_fixup_obj, 1346 binder_size_t last_fixup_min_off) 1347 { 1348 struct binder_buffer_object *parent; 1349 u8 *parent_buffer; 1350 struct binder_buffer *b = t->buffer; 1351 struct binder_proc *proc = thread->proc; 1352 struct binder_proc *target_proc = t->to_proc; 1353 1354 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT)) 1355 return 0; 1356 1357 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid); 1358 if (!parent) { 1359 binder_user_error("%d:%d got transaction with invalid parent offset or type\n", 1360 proc->pid, thread->pid); 1361 return -EINVAL; 1362 } 1363 1364 if (!binder_validate_fixup(b, off_start, 1365 parent, bp->parent_offset, 1366 last_fixup_obj, 1367 last_fixup_min_off)) { 1368 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", 1369 proc->pid, thread->pid); 1370 return -EINVAL; 1371 } 1372 1373 if (parent->length < sizeof(binder_uintptr_t) || 1374 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) { 1375 /* No space for a pointer here! */ 1376 binder_user_error("%d:%d got transaction with invalid parent offset\n", 1377 proc->pid, thread->pid); 1378 return -EINVAL; 1379 } 1380 parent_buffer = (u8 *)(parent->buffer - 1381 binder_alloc_get_user_buffer_offset( 1382 &target_proc->alloc)); 1383 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer; 1384 1385 return 0; 1386 } 1387 1388 static void binder_transaction(struct binder_proc *proc, 1389 struct binder_thread *thread, 1390 struct binder_transaction_data *tr, int reply, 1391 binder_size_t extra_buffers_size) 1392 { 1393 int ret; 1394 struct binder_transaction *t; 1395 struct binder_work *tcomplete; 1396 binder_size_t *offp, *off_end, *off_start; 1397 binder_size_t off_min; 1398 u8 *sg_bufp, *sg_buf_end; 1399 struct binder_proc *target_proc; 1400 struct binder_thread *target_thread = NULL; 1401 struct binder_node *target_node = NULL; 1402 struct list_head *target_list; 1403 wait_queue_head_t *target_wait; 1404 struct binder_transaction *in_reply_to = NULL; 1405 struct binder_transaction_log_entry *e; 1406 uint32_t return_error = 0; 1407 uint32_t return_error_param = 0; 1408 uint32_t return_error_line = 0; 1409 struct binder_buffer_object *last_fixup_obj = NULL; 1410 binder_size_t last_fixup_min_off = 0; 1411 struct binder_context *context = proc->context; 1412 int t_debug_id = atomic_inc_return(&binder_last_id); 1413 1414 e = binder_transaction_log_add(&binder_transaction_log); 1415 e->debug_id = t_debug_id; 1416 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); 1417 e->from_proc = proc->pid; 1418 e->from_thread = thread->pid; 1419 e->target_handle = tr->target.handle; 1420 e->data_size = tr->data_size; 1421 e->offsets_size = tr->offsets_size; 1422 e->context_name = proc->context->name; 1423 1424 if (reply) { 1425 in_reply_to = thread->transaction_stack; 1426 if (in_reply_to == NULL) { 1427 binder_user_error("%d:%d got reply transaction with no transaction stack\n", 1428 proc->pid, thread->pid); 1429 return_error = BR_FAILED_REPLY; 1430 return_error_param = -EPROTO; 1431 return_error_line = __LINE__; 1432 goto err_empty_call_stack; 1433 } 1434 binder_set_nice(in_reply_to->saved_priority); 1435 if (in_reply_to->to_thread != thread) { 1436 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n", 1437 proc->pid, thread->pid, in_reply_to->debug_id, 1438 in_reply_to->to_proc ? 1439 in_reply_to->to_proc->pid : 0, 1440 in_reply_to->to_thread ? 1441 in_reply_to->to_thread->pid : 0); 1442 return_error = BR_FAILED_REPLY; 1443 return_error_param = -EPROTO; 1444 return_error_line = __LINE__; 1445 in_reply_to = NULL; 1446 goto err_bad_call_stack; 1447 } 1448 thread->transaction_stack = in_reply_to->to_parent; 1449 target_thread = in_reply_to->from; 1450 if (target_thread == NULL) { 1451 return_error = BR_DEAD_REPLY; 1452 return_error_line = __LINE__; 1453 goto err_dead_binder; 1454 } 1455 if (target_thread->transaction_stack != in_reply_to) { 1456 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n", 1457 proc->pid, thread->pid, 1458 target_thread->transaction_stack ? 1459 target_thread->transaction_stack->debug_id : 0, 1460 in_reply_to->debug_id); 1461 return_error = BR_FAILED_REPLY; 1462 return_error_param = -EPROTO; 1463 return_error_line = __LINE__; 1464 in_reply_to = NULL; 1465 target_thread = NULL; 1466 goto err_dead_binder; 1467 } 1468 target_proc = target_thread->proc; 1469 } else { 1470 if (tr->target.handle) { 1471 struct binder_ref *ref; 1472 1473 ref = binder_get_ref(proc, tr->target.handle, true); 1474 if (ref == NULL) { 1475 binder_user_error("%d:%d got transaction to invalid handle\n", 1476 proc->pid, thread->pid); 1477 return_error = BR_FAILED_REPLY; 1478 return_error_param = -EINVAL; 1479 return_error_line = __LINE__; 1480 goto err_invalid_target_handle; 1481 } 1482 target_node = ref->node; 1483 } else { 1484 mutex_lock(&context->context_mgr_node_lock); 1485 target_node = context->binder_context_mgr_node; 1486 if (target_node == NULL) { 1487 return_error = BR_DEAD_REPLY; 1488 mutex_unlock(&context->context_mgr_node_lock); 1489 return_error_line = __LINE__; 1490 goto err_no_context_mgr_node; 1491 } 1492 mutex_unlock(&context->context_mgr_node_lock); 1493 } 1494 e->to_node = target_node->debug_id; 1495 target_proc = target_node->proc; 1496 if (target_proc == NULL) { 1497 return_error = BR_DEAD_REPLY; 1498 return_error_line = __LINE__; 1499 goto err_dead_binder; 1500 } 1501 if (security_binder_transaction(proc->tsk, 1502 target_proc->tsk) < 0) { 1503 return_error = BR_FAILED_REPLY; 1504 return_error_param = -EPERM; 1505 return_error_line = __LINE__; 1506 goto err_invalid_target_handle; 1507 } 1508 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { 1509 struct binder_transaction *tmp; 1510 1511 tmp = thread->transaction_stack; 1512 if (tmp->to_thread != thread) { 1513 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n", 1514 proc->pid, thread->pid, tmp->debug_id, 1515 tmp->to_proc ? tmp->to_proc->pid : 0, 1516 tmp->to_thread ? 1517 tmp->to_thread->pid : 0); 1518 return_error = BR_FAILED_REPLY; 1519 return_error_param = -EPROTO; 1520 return_error_line = __LINE__; 1521 goto err_bad_call_stack; 1522 } 1523 while (tmp) { 1524 if (tmp->from && tmp->from->proc == target_proc) 1525 target_thread = tmp->from; 1526 tmp = tmp->from_parent; 1527 } 1528 } 1529 } 1530 if (target_thread) { 1531 e->to_thread = target_thread->pid; 1532 target_list = &target_thread->todo; 1533 target_wait = &target_thread->wait; 1534 } else { 1535 target_list = &target_proc->todo; 1536 target_wait = &target_proc->wait; 1537 } 1538 e->to_proc = target_proc->pid; 1539 1540 /* TODO: reuse incoming transaction for reply */ 1541 t = kzalloc(sizeof(*t), GFP_KERNEL); 1542 if (t == NULL) { 1543 return_error = BR_FAILED_REPLY; 1544 return_error_param = -ENOMEM; 1545 return_error_line = __LINE__; 1546 goto err_alloc_t_failed; 1547 } 1548 binder_stats_created(BINDER_STAT_TRANSACTION); 1549 1550 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); 1551 if (tcomplete == NULL) { 1552 return_error = BR_FAILED_REPLY; 1553 return_error_param = -ENOMEM; 1554 return_error_line = __LINE__; 1555 goto err_alloc_tcomplete_failed; 1556 } 1557 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE); 1558 1559 t->debug_id = t_debug_id; 1560 1561 if (reply) 1562 binder_debug(BINDER_DEBUG_TRANSACTION, 1563 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n", 1564 proc->pid, thread->pid, t->debug_id, 1565 target_proc->pid, target_thread->pid, 1566 (u64)tr->data.ptr.buffer, 1567 (u64)tr->data.ptr.offsets, 1568 (u64)tr->data_size, (u64)tr->offsets_size, 1569 (u64)extra_buffers_size); 1570 else 1571 binder_debug(BINDER_DEBUG_TRANSACTION, 1572 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n", 1573 proc->pid, thread->pid, t->debug_id, 1574 target_proc->pid, target_node->debug_id, 1575 (u64)tr->data.ptr.buffer, 1576 (u64)tr->data.ptr.offsets, 1577 (u64)tr->data_size, (u64)tr->offsets_size, 1578 (u64)extra_buffers_size); 1579 1580 if (!reply && !(tr->flags & TF_ONE_WAY)) 1581 t->from = thread; 1582 else 1583 t->from = NULL; 1584 t->sender_euid = task_euid(proc->tsk); 1585 t->to_proc = target_proc; 1586 t->to_thread = target_thread; 1587 t->code = tr->code; 1588 t->flags = tr->flags; 1589 t->priority = task_nice(current); 1590 1591 trace_binder_transaction(reply, t, target_node); 1592 1593 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size, 1594 tr->offsets_size, extra_buffers_size, 1595 !reply && (t->flags & TF_ONE_WAY)); 1596 if (IS_ERR(t->buffer)) { 1597 /* 1598 * -ESRCH indicates VMA cleared. The target is dying. 1599 */ 1600 return_error_param = PTR_ERR(t->buffer); 1601 return_error = return_error_param == -ESRCH ? 1602 BR_DEAD_REPLY : BR_FAILED_REPLY; 1603 return_error_line = __LINE__; 1604 t->buffer = NULL; 1605 goto err_binder_alloc_buf_failed; 1606 } 1607 t->buffer->allow_user_free = 0; 1608 t->buffer->debug_id = t->debug_id; 1609 t->buffer->transaction = t; 1610 t->buffer->target_node = target_node; 1611 trace_binder_transaction_alloc_buf(t->buffer); 1612 if (target_node) 1613 binder_inc_node(target_node, 1, 0, NULL); 1614 1615 off_start = (binder_size_t *)(t->buffer->data + 1616 ALIGN(tr->data_size, sizeof(void *))); 1617 offp = off_start; 1618 1619 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t) 1620 tr->data.ptr.buffer, tr->data_size)) { 1621 binder_user_error("%d:%d got transaction with invalid data ptr\n", 1622 proc->pid, thread->pid); 1623 return_error = BR_FAILED_REPLY; 1624 return_error_param = -EFAULT; 1625 return_error_line = __LINE__; 1626 goto err_copy_data_failed; 1627 } 1628 if (copy_from_user(offp, (const void __user *)(uintptr_t) 1629 tr->data.ptr.offsets, tr->offsets_size)) { 1630 binder_user_error("%d:%d got transaction with invalid offsets ptr\n", 1631 proc->pid, thread->pid); 1632 return_error = BR_FAILED_REPLY; 1633 return_error_param = -EFAULT; 1634 return_error_line = __LINE__; 1635 goto err_copy_data_failed; 1636 } 1637 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) { 1638 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n", 1639 proc->pid, thread->pid, (u64)tr->offsets_size); 1640 return_error = BR_FAILED_REPLY; 1641 return_error_param = -EINVAL; 1642 return_error_line = __LINE__; 1643 goto err_bad_offset; 1644 } 1645 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) { 1646 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n", 1647 proc->pid, thread->pid, 1648 (u64)extra_buffers_size); 1649 return_error = BR_FAILED_REPLY; 1650 return_error_param = -EINVAL; 1651 return_error_line = __LINE__; 1652 goto err_bad_offset; 1653 } 1654 off_end = (void *)off_start + tr->offsets_size; 1655 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *))); 1656 sg_buf_end = sg_bufp + extra_buffers_size; 1657 off_min = 0; 1658 for (; offp < off_end; offp++) { 1659 struct binder_object_header *hdr; 1660 size_t object_size = binder_validate_object(t->buffer, *offp); 1661 1662 if (object_size == 0 || *offp < off_min) { 1663 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n", 1664 proc->pid, thread->pid, (u64)*offp, 1665 (u64)off_min, 1666 (u64)t->buffer->data_size); 1667 return_error = BR_FAILED_REPLY; 1668 return_error_param = -EINVAL; 1669 return_error_line = __LINE__; 1670 goto err_bad_offset; 1671 } 1672 1673 hdr = (struct binder_object_header *)(t->buffer->data + *offp); 1674 off_min = *offp + object_size; 1675 switch (hdr->type) { 1676 case BINDER_TYPE_BINDER: 1677 case BINDER_TYPE_WEAK_BINDER: { 1678 struct flat_binder_object *fp; 1679 1680 fp = to_flat_binder_object(hdr); 1681 ret = binder_translate_binder(fp, t, thread); 1682 if (ret < 0) { 1683 return_error = BR_FAILED_REPLY; 1684 return_error_param = ret; 1685 return_error_line = __LINE__; 1686 goto err_translate_failed; 1687 } 1688 } break; 1689 case BINDER_TYPE_HANDLE: 1690 case BINDER_TYPE_WEAK_HANDLE: { 1691 struct flat_binder_object *fp; 1692 1693 fp = to_flat_binder_object(hdr); 1694 ret = binder_translate_handle(fp, t, thread); 1695 if (ret < 0) { 1696 return_error = BR_FAILED_REPLY; 1697 return_error_param = ret; 1698 return_error_line = __LINE__; 1699 goto err_translate_failed; 1700 } 1701 } break; 1702 1703 case BINDER_TYPE_FD: { 1704 struct binder_fd_object *fp = to_binder_fd_object(hdr); 1705 int target_fd = binder_translate_fd(fp->fd, t, thread, 1706 in_reply_to); 1707 1708 if (target_fd < 0) { 1709 return_error = BR_FAILED_REPLY; 1710 return_error_param = target_fd; 1711 return_error_line = __LINE__; 1712 goto err_translate_failed; 1713 } 1714 fp->pad_binder = 0; 1715 fp->fd = target_fd; 1716 } break; 1717 case BINDER_TYPE_FDA: { 1718 struct binder_fd_array_object *fda = 1719 to_binder_fd_array_object(hdr); 1720 struct binder_buffer_object *parent = 1721 binder_validate_ptr(t->buffer, fda->parent, 1722 off_start, 1723 offp - off_start); 1724 if (!parent) { 1725 binder_user_error("%d:%d got transaction with invalid parent offset or type\n", 1726 proc->pid, thread->pid); 1727 return_error = BR_FAILED_REPLY; 1728 return_error_param = -EINVAL; 1729 return_error_line = __LINE__; 1730 goto err_bad_parent; 1731 } 1732 if (!binder_validate_fixup(t->buffer, off_start, 1733 parent, fda->parent_offset, 1734 last_fixup_obj, 1735 last_fixup_min_off)) { 1736 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", 1737 proc->pid, thread->pid); 1738 return_error = BR_FAILED_REPLY; 1739 return_error_param = -EINVAL; 1740 return_error_line = __LINE__; 1741 goto err_bad_parent; 1742 } 1743 ret = binder_translate_fd_array(fda, parent, t, thread, 1744 in_reply_to); 1745 if (ret < 0) { 1746 return_error = BR_FAILED_REPLY; 1747 return_error_param = ret; 1748 return_error_line = __LINE__; 1749 goto err_translate_failed; 1750 } 1751 last_fixup_obj = parent; 1752 last_fixup_min_off = 1753 fda->parent_offset + sizeof(u32) * fda->num_fds; 1754 } break; 1755 case BINDER_TYPE_PTR: { 1756 struct binder_buffer_object *bp = 1757 to_binder_buffer_object(hdr); 1758 size_t buf_left = sg_buf_end - sg_bufp; 1759 1760 if (bp->length > buf_left) { 1761 binder_user_error("%d:%d got transaction with too large buffer\n", 1762 proc->pid, thread->pid); 1763 return_error = BR_FAILED_REPLY; 1764 return_error_param = -EINVAL; 1765 return_error_line = __LINE__; 1766 goto err_bad_offset; 1767 } 1768 if (copy_from_user(sg_bufp, 1769 (const void __user *)(uintptr_t) 1770 bp->buffer, bp->length)) { 1771 binder_user_error("%d:%d got transaction with invalid offsets ptr\n", 1772 proc->pid, thread->pid); 1773 return_error_param = -EFAULT; 1774 return_error = BR_FAILED_REPLY; 1775 return_error_line = __LINE__; 1776 goto err_copy_data_failed; 1777 } 1778 /* Fixup buffer pointer to target proc address space */ 1779 bp->buffer = (uintptr_t)sg_bufp + 1780 binder_alloc_get_user_buffer_offset( 1781 &target_proc->alloc); 1782 sg_bufp += ALIGN(bp->length, sizeof(u64)); 1783 1784 ret = binder_fixup_parent(t, thread, bp, off_start, 1785 offp - off_start, 1786 last_fixup_obj, 1787 last_fixup_min_off); 1788 if (ret < 0) { 1789 return_error = BR_FAILED_REPLY; 1790 return_error_param = ret; 1791 return_error_line = __LINE__; 1792 goto err_translate_failed; 1793 } 1794 last_fixup_obj = bp; 1795 last_fixup_min_off = 0; 1796 } break; 1797 default: 1798 binder_user_error("%d:%d got transaction with invalid object type, %x\n", 1799 proc->pid, thread->pid, hdr->type); 1800 return_error = BR_FAILED_REPLY; 1801 return_error_param = -EINVAL; 1802 return_error_line = __LINE__; 1803 goto err_bad_object_type; 1804 } 1805 } 1806 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; 1807 list_add_tail(&tcomplete->entry, &thread->todo); 1808 1809 if (reply) { 1810 BUG_ON(t->buffer->async_transaction != 0); 1811 binder_pop_transaction(target_thread, in_reply_to); 1812 binder_free_transaction(in_reply_to); 1813 } else if (!(t->flags & TF_ONE_WAY)) { 1814 BUG_ON(t->buffer->async_transaction != 0); 1815 t->need_reply = 1; 1816 t->from_parent = thread->transaction_stack; 1817 thread->transaction_stack = t; 1818 } else { 1819 BUG_ON(target_node == NULL); 1820 BUG_ON(t->buffer->async_transaction != 1); 1821 if (target_node->has_async_transaction) { 1822 target_list = &target_node->async_todo; 1823 target_wait = NULL; 1824 } else 1825 target_node->has_async_transaction = 1; 1826 } 1827 t->work.type = BINDER_WORK_TRANSACTION; 1828 list_add_tail(&t->work.entry, target_list); 1829 if (target_wait) { 1830 if (reply || !(tr->flags & TF_ONE_WAY)) 1831 wake_up_interruptible_sync(target_wait); 1832 else 1833 wake_up_interruptible(target_wait); 1834 } 1835 /* 1836 * write barrier to synchronize with initialization 1837 * of log entry 1838 */ 1839 smp_wmb(); 1840 WRITE_ONCE(e->debug_id_done, t_debug_id); 1841 return; 1842 1843 err_translate_failed: 1844 err_bad_object_type: 1845 err_bad_offset: 1846 err_bad_parent: 1847 err_copy_data_failed: 1848 trace_binder_transaction_failed_buffer_release(t->buffer); 1849 binder_transaction_buffer_release(target_proc, t->buffer, offp); 1850 t->buffer->transaction = NULL; 1851 binder_alloc_free_buf(&target_proc->alloc, t->buffer); 1852 err_binder_alloc_buf_failed: 1853 kfree(tcomplete); 1854 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 1855 err_alloc_tcomplete_failed: 1856 kfree(t); 1857 binder_stats_deleted(BINDER_STAT_TRANSACTION); 1858 err_alloc_t_failed: 1859 err_bad_call_stack: 1860 err_empty_call_stack: 1861 err_dead_binder: 1862 err_invalid_target_handle: 1863 err_no_context_mgr_node: 1864 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1865 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n", 1866 proc->pid, thread->pid, return_error, return_error_param, 1867 (u64)tr->data_size, (u64)tr->offsets_size, 1868 return_error_line); 1869 1870 { 1871 struct binder_transaction_log_entry *fe; 1872 1873 e->return_error = return_error; 1874 e->return_error_param = return_error_param; 1875 e->return_error_line = return_error_line; 1876 fe = binder_transaction_log_add(&binder_transaction_log_failed); 1877 *fe = *e; 1878 /* 1879 * write barrier to synchronize with initialization 1880 * of log entry 1881 */ 1882 smp_wmb(); 1883 WRITE_ONCE(e->debug_id_done, t_debug_id); 1884 WRITE_ONCE(fe->debug_id_done, t_debug_id); 1885 } 1886 1887 BUG_ON(thread->return_error != BR_OK); 1888 if (in_reply_to) { 1889 thread->return_error = BR_TRANSACTION_COMPLETE; 1890 binder_send_failed_reply(in_reply_to, return_error); 1891 } else 1892 thread->return_error = return_error; 1893 } 1894 1895 static int binder_thread_write(struct binder_proc *proc, 1896 struct binder_thread *thread, 1897 binder_uintptr_t binder_buffer, size_t size, 1898 binder_size_t *consumed) 1899 { 1900 uint32_t cmd; 1901 struct binder_context *context = proc->context; 1902 void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 1903 void __user *ptr = buffer + *consumed; 1904 void __user *end = buffer + size; 1905 1906 while (ptr < end && thread->return_error == BR_OK) { 1907 if (get_user(cmd, (uint32_t __user *)ptr)) 1908 return -EFAULT; 1909 ptr += sizeof(uint32_t); 1910 trace_binder_command(cmd); 1911 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { 1912 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]); 1913 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]); 1914 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]); 1915 } 1916 switch (cmd) { 1917 case BC_INCREFS: 1918 case BC_ACQUIRE: 1919 case BC_RELEASE: 1920 case BC_DECREFS: { 1921 uint32_t target; 1922 struct binder_ref *ref = NULL; 1923 const char *debug_string; 1924 1925 if (get_user(target, (uint32_t __user *)ptr)) 1926 return -EFAULT; 1927 1928 ptr += sizeof(uint32_t); 1929 if (target == 0 && 1930 (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) { 1931 struct binder_node *ctx_mgr_node; 1932 1933 mutex_lock(&context->context_mgr_node_lock); 1934 ctx_mgr_node = context->binder_context_mgr_node; 1935 if (ctx_mgr_node) { 1936 ref = binder_get_ref_for_node(proc, 1937 ctx_mgr_node); 1938 if (ref && ref->desc != target) { 1939 binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n", 1940 proc->pid, thread->pid, 1941 ref->desc); 1942 } 1943 } 1944 mutex_unlock(&context->context_mgr_node_lock); 1945 } 1946 if (ref == NULL) 1947 ref = binder_get_ref(proc, target, 1948 cmd == BC_ACQUIRE || 1949 cmd == BC_RELEASE); 1950 if (ref == NULL) { 1951 binder_user_error("%d:%d refcount change on invalid ref %d\n", 1952 proc->pid, thread->pid, target); 1953 break; 1954 } 1955 switch (cmd) { 1956 case BC_INCREFS: 1957 debug_string = "IncRefs"; 1958 binder_inc_ref(ref, 0, NULL); 1959 break; 1960 case BC_ACQUIRE: 1961 debug_string = "Acquire"; 1962 binder_inc_ref(ref, 1, NULL); 1963 break; 1964 case BC_RELEASE: 1965 debug_string = "Release"; 1966 binder_dec_ref(ref, 1); 1967 break; 1968 case BC_DECREFS: 1969 default: 1970 debug_string = "DecRefs"; 1971 binder_dec_ref(ref, 0); 1972 break; 1973 } 1974 binder_debug(BINDER_DEBUG_USER_REFS, 1975 "%d:%d %s ref %d desc %d s %d w %d for node %d\n", 1976 proc->pid, thread->pid, debug_string, ref->debug_id, 1977 ref->desc, ref->strong, ref->weak, ref->node->debug_id); 1978 break; 1979 } 1980 case BC_INCREFS_DONE: 1981 case BC_ACQUIRE_DONE: { 1982 binder_uintptr_t node_ptr; 1983 binder_uintptr_t cookie; 1984 struct binder_node *node; 1985 1986 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr)) 1987 return -EFAULT; 1988 ptr += sizeof(binder_uintptr_t); 1989 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 1990 return -EFAULT; 1991 ptr += sizeof(binder_uintptr_t); 1992 node = binder_get_node(proc, node_ptr); 1993 if (node == NULL) { 1994 binder_user_error("%d:%d %s u%016llx no match\n", 1995 proc->pid, thread->pid, 1996 cmd == BC_INCREFS_DONE ? 1997 "BC_INCREFS_DONE" : 1998 "BC_ACQUIRE_DONE", 1999 (u64)node_ptr); 2000 break; 2001 } 2002 if (cookie != node->cookie) { 2003 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n", 2004 proc->pid, thread->pid, 2005 cmd == BC_INCREFS_DONE ? 2006 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 2007 (u64)node_ptr, node->debug_id, 2008 (u64)cookie, (u64)node->cookie); 2009 break; 2010 } 2011 if (cmd == BC_ACQUIRE_DONE) { 2012 if (node->pending_strong_ref == 0) { 2013 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n", 2014 proc->pid, thread->pid, 2015 node->debug_id); 2016 break; 2017 } 2018 node->pending_strong_ref = 0; 2019 } else { 2020 if (node->pending_weak_ref == 0) { 2021 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n", 2022 proc->pid, thread->pid, 2023 node->debug_id); 2024 break; 2025 } 2026 node->pending_weak_ref = 0; 2027 } 2028 binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0); 2029 binder_debug(BINDER_DEBUG_USER_REFS, 2030 "%d:%d %s node %d ls %d lw %d\n", 2031 proc->pid, thread->pid, 2032 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 2033 node->debug_id, node->local_strong_refs, node->local_weak_refs); 2034 break; 2035 } 2036 case BC_ATTEMPT_ACQUIRE: 2037 pr_err("BC_ATTEMPT_ACQUIRE not supported\n"); 2038 return -EINVAL; 2039 case BC_ACQUIRE_RESULT: 2040 pr_err("BC_ACQUIRE_RESULT not supported\n"); 2041 return -EINVAL; 2042 2043 case BC_FREE_BUFFER: { 2044 binder_uintptr_t data_ptr; 2045 struct binder_buffer *buffer; 2046 2047 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr)) 2048 return -EFAULT; 2049 ptr += sizeof(binder_uintptr_t); 2050 2051 buffer = binder_alloc_prepare_to_free(&proc->alloc, 2052 data_ptr); 2053 if (buffer == NULL) { 2054 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n", 2055 proc->pid, thread->pid, (u64)data_ptr); 2056 break; 2057 } 2058 if (!buffer->allow_user_free) { 2059 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n", 2060 proc->pid, thread->pid, (u64)data_ptr); 2061 break; 2062 } 2063 binder_debug(BINDER_DEBUG_FREE_BUFFER, 2064 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n", 2065 proc->pid, thread->pid, (u64)data_ptr, 2066 buffer->debug_id, 2067 buffer->transaction ? "active" : "finished"); 2068 2069 if (buffer->transaction) { 2070 buffer->transaction->buffer = NULL; 2071 buffer->transaction = NULL; 2072 } 2073 if (buffer->async_transaction && buffer->target_node) { 2074 BUG_ON(!buffer->target_node->has_async_transaction); 2075 if (list_empty(&buffer->target_node->async_todo)) 2076 buffer->target_node->has_async_transaction = 0; 2077 else 2078 list_move_tail(buffer->target_node->async_todo.next, &thread->todo); 2079 } 2080 trace_binder_transaction_buffer_release(buffer); 2081 binder_transaction_buffer_release(proc, buffer, NULL); 2082 binder_alloc_free_buf(&proc->alloc, buffer); 2083 break; 2084 } 2085 2086 case BC_TRANSACTION_SG: 2087 case BC_REPLY_SG: { 2088 struct binder_transaction_data_sg tr; 2089 2090 if (copy_from_user(&tr, ptr, sizeof(tr))) 2091 return -EFAULT; 2092 ptr += sizeof(tr); 2093 binder_transaction(proc, thread, &tr.transaction_data, 2094 cmd == BC_REPLY_SG, tr.buffers_size); 2095 break; 2096 } 2097 case BC_TRANSACTION: 2098 case BC_REPLY: { 2099 struct binder_transaction_data tr; 2100 2101 if (copy_from_user(&tr, ptr, sizeof(tr))) 2102 return -EFAULT; 2103 ptr += sizeof(tr); 2104 binder_transaction(proc, thread, &tr, 2105 cmd == BC_REPLY, 0); 2106 break; 2107 } 2108 2109 case BC_REGISTER_LOOPER: 2110 binder_debug(BINDER_DEBUG_THREADS, 2111 "%d:%d BC_REGISTER_LOOPER\n", 2112 proc->pid, thread->pid); 2113 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { 2114 thread->looper |= BINDER_LOOPER_STATE_INVALID; 2115 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n", 2116 proc->pid, thread->pid); 2117 } else if (proc->requested_threads == 0) { 2118 thread->looper |= BINDER_LOOPER_STATE_INVALID; 2119 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n", 2120 proc->pid, thread->pid); 2121 } else { 2122 proc->requested_threads--; 2123 proc->requested_threads_started++; 2124 } 2125 thread->looper |= BINDER_LOOPER_STATE_REGISTERED; 2126 break; 2127 case BC_ENTER_LOOPER: 2128 binder_debug(BINDER_DEBUG_THREADS, 2129 "%d:%d BC_ENTER_LOOPER\n", 2130 proc->pid, thread->pid); 2131 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { 2132 thread->looper |= BINDER_LOOPER_STATE_INVALID; 2133 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n", 2134 proc->pid, thread->pid); 2135 } 2136 thread->looper |= BINDER_LOOPER_STATE_ENTERED; 2137 break; 2138 case BC_EXIT_LOOPER: 2139 binder_debug(BINDER_DEBUG_THREADS, 2140 "%d:%d BC_EXIT_LOOPER\n", 2141 proc->pid, thread->pid); 2142 thread->looper |= BINDER_LOOPER_STATE_EXITED; 2143 break; 2144 2145 case BC_REQUEST_DEATH_NOTIFICATION: 2146 case BC_CLEAR_DEATH_NOTIFICATION: { 2147 uint32_t target; 2148 binder_uintptr_t cookie; 2149 struct binder_ref *ref; 2150 struct binder_ref_death *death; 2151 2152 if (get_user(target, (uint32_t __user *)ptr)) 2153 return -EFAULT; 2154 ptr += sizeof(uint32_t); 2155 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 2156 return -EFAULT; 2157 ptr += sizeof(binder_uintptr_t); 2158 ref = binder_get_ref(proc, target, false); 2159 if (ref == NULL) { 2160 binder_user_error("%d:%d %s invalid ref %d\n", 2161 proc->pid, thread->pid, 2162 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 2163 "BC_REQUEST_DEATH_NOTIFICATION" : 2164 "BC_CLEAR_DEATH_NOTIFICATION", 2165 target); 2166 break; 2167 } 2168 2169 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 2170 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n", 2171 proc->pid, thread->pid, 2172 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 2173 "BC_REQUEST_DEATH_NOTIFICATION" : 2174 "BC_CLEAR_DEATH_NOTIFICATION", 2175 (u64)cookie, ref->debug_id, ref->desc, 2176 ref->strong, ref->weak, ref->node->debug_id); 2177 2178 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { 2179 if (ref->death) { 2180 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n", 2181 proc->pid, thread->pid); 2182 break; 2183 } 2184 death = kzalloc(sizeof(*death), GFP_KERNEL); 2185 if (death == NULL) { 2186 thread->return_error = BR_ERROR; 2187 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 2188 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n", 2189 proc->pid, thread->pid); 2190 break; 2191 } 2192 binder_stats_created(BINDER_STAT_DEATH); 2193 INIT_LIST_HEAD(&death->work.entry); 2194 death->cookie = cookie; 2195 ref->death = death; 2196 if (ref->node->proc == NULL) { 2197 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 2198 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { 2199 list_add_tail(&ref->death->work.entry, &thread->todo); 2200 } else { 2201 list_add_tail(&ref->death->work.entry, &proc->todo); 2202 wake_up_interruptible(&proc->wait); 2203 } 2204 } 2205 } else { 2206 if (ref->death == NULL) { 2207 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n", 2208 proc->pid, thread->pid); 2209 break; 2210 } 2211 death = ref->death; 2212 if (death->cookie != cookie) { 2213 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n", 2214 proc->pid, thread->pid, 2215 (u64)death->cookie, 2216 (u64)cookie); 2217 break; 2218 } 2219 ref->death = NULL; 2220 if (list_empty(&death->work.entry)) { 2221 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 2222 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { 2223 list_add_tail(&death->work.entry, &thread->todo); 2224 } else { 2225 list_add_tail(&death->work.entry, &proc->todo); 2226 wake_up_interruptible(&proc->wait); 2227 } 2228 } else { 2229 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); 2230 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; 2231 } 2232 } 2233 } break; 2234 case BC_DEAD_BINDER_DONE: { 2235 struct binder_work *w; 2236 binder_uintptr_t cookie; 2237 struct binder_ref_death *death = NULL; 2238 2239 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 2240 return -EFAULT; 2241 2242 ptr += sizeof(cookie); 2243 list_for_each_entry(w, &proc->delivered_death, entry) { 2244 struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work); 2245 2246 if (tmp_death->cookie == cookie) { 2247 death = tmp_death; 2248 break; 2249 } 2250 } 2251 binder_debug(BINDER_DEBUG_DEAD_BINDER, 2252 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n", 2253 proc->pid, thread->pid, (u64)cookie, 2254 death); 2255 if (death == NULL) { 2256 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n", 2257 proc->pid, thread->pid, (u64)cookie); 2258 break; 2259 } 2260 2261 list_del_init(&death->work.entry); 2262 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) { 2263 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 2264 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { 2265 list_add_tail(&death->work.entry, &thread->todo); 2266 } else { 2267 list_add_tail(&death->work.entry, &proc->todo); 2268 wake_up_interruptible(&proc->wait); 2269 } 2270 } 2271 } break; 2272 2273 default: 2274 pr_err("%d:%d unknown command %d\n", 2275 proc->pid, thread->pid, cmd); 2276 return -EINVAL; 2277 } 2278 *consumed = ptr - buffer; 2279 } 2280 return 0; 2281 } 2282 2283 static void binder_stat_br(struct binder_proc *proc, 2284 struct binder_thread *thread, uint32_t cmd) 2285 { 2286 trace_binder_return(cmd); 2287 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { 2288 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]); 2289 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]); 2290 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]); 2291 } 2292 } 2293 2294 static int binder_has_proc_work(struct binder_proc *proc, 2295 struct binder_thread *thread) 2296 { 2297 return !list_empty(&proc->todo) || thread->looper_need_return; 2298 } 2299 2300 static int binder_has_thread_work(struct binder_thread *thread) 2301 { 2302 return !list_empty(&thread->todo) || thread->return_error != BR_OK || 2303 thread->looper_need_return; 2304 } 2305 2306 static int binder_put_node_cmd(struct binder_proc *proc, 2307 struct binder_thread *thread, 2308 void __user **ptrp, 2309 binder_uintptr_t node_ptr, 2310 binder_uintptr_t node_cookie, 2311 int node_debug_id, 2312 uint32_t cmd, const char *cmd_name) 2313 { 2314 void __user *ptr = *ptrp; 2315 2316 if (put_user(cmd, (uint32_t __user *)ptr)) 2317 return -EFAULT; 2318 ptr += sizeof(uint32_t); 2319 2320 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr)) 2321 return -EFAULT; 2322 ptr += sizeof(binder_uintptr_t); 2323 2324 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr)) 2325 return -EFAULT; 2326 ptr += sizeof(binder_uintptr_t); 2327 2328 binder_stat_br(proc, thread, cmd); 2329 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n", 2330 proc->pid, thread->pid, cmd_name, node_debug_id, 2331 (u64)node_ptr, (u64)node_cookie); 2332 2333 *ptrp = ptr; 2334 return 0; 2335 } 2336 2337 static int binder_thread_read(struct binder_proc *proc, 2338 struct binder_thread *thread, 2339 binder_uintptr_t binder_buffer, size_t size, 2340 binder_size_t *consumed, int non_block) 2341 { 2342 void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 2343 void __user *ptr = buffer + *consumed; 2344 void __user *end = buffer + size; 2345 2346 int ret = 0; 2347 int wait_for_proc_work; 2348 2349 if (*consumed == 0) { 2350 if (put_user(BR_NOOP, (uint32_t __user *)ptr)) 2351 return -EFAULT; 2352 ptr += sizeof(uint32_t); 2353 } 2354 2355 retry: 2356 wait_for_proc_work = thread->transaction_stack == NULL && 2357 list_empty(&thread->todo); 2358 2359 if (thread->return_error != BR_OK && ptr < end) { 2360 if (thread->return_error2 != BR_OK) { 2361 if (put_user(thread->return_error2, (uint32_t __user *)ptr)) 2362 return -EFAULT; 2363 ptr += sizeof(uint32_t); 2364 binder_stat_br(proc, thread, thread->return_error2); 2365 if (ptr == end) 2366 goto done; 2367 thread->return_error2 = BR_OK; 2368 } 2369 if (put_user(thread->return_error, (uint32_t __user *)ptr)) 2370 return -EFAULT; 2371 ptr += sizeof(uint32_t); 2372 binder_stat_br(proc, thread, thread->return_error); 2373 thread->return_error = BR_OK; 2374 goto done; 2375 } 2376 2377 2378 thread->looper |= BINDER_LOOPER_STATE_WAITING; 2379 if (wait_for_proc_work) 2380 proc->ready_threads++; 2381 2382 binder_unlock(__func__); 2383 2384 trace_binder_wait_for_work(wait_for_proc_work, 2385 !!thread->transaction_stack, 2386 !list_empty(&thread->todo)); 2387 if (wait_for_proc_work) { 2388 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 2389 BINDER_LOOPER_STATE_ENTERED))) { 2390 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n", 2391 proc->pid, thread->pid, thread->looper); 2392 wait_event_interruptible(binder_user_error_wait, 2393 binder_stop_on_user_error < 2); 2394 } 2395 binder_set_nice(proc->default_priority); 2396 if (non_block) { 2397 if (!binder_has_proc_work(proc, thread)) 2398 ret = -EAGAIN; 2399 } else 2400 ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread)); 2401 } else { 2402 if (non_block) { 2403 if (!binder_has_thread_work(thread)) 2404 ret = -EAGAIN; 2405 } else 2406 ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread)); 2407 } 2408 2409 binder_lock(__func__); 2410 2411 if (wait_for_proc_work) 2412 proc->ready_threads--; 2413 thread->looper &= ~BINDER_LOOPER_STATE_WAITING; 2414 2415 if (ret) 2416 return ret; 2417 2418 while (1) { 2419 uint32_t cmd; 2420 struct binder_transaction_data tr; 2421 struct binder_work *w; 2422 struct binder_transaction *t = NULL; 2423 2424 if (!list_empty(&thread->todo)) { 2425 w = list_first_entry(&thread->todo, struct binder_work, 2426 entry); 2427 } else if (!list_empty(&proc->todo) && wait_for_proc_work) { 2428 w = list_first_entry(&proc->todo, struct binder_work, 2429 entry); 2430 } else { 2431 /* no data added */ 2432 if (ptr - buffer == 4 && !thread->looper_need_return) 2433 goto retry; 2434 break; 2435 } 2436 2437 if (end - ptr < sizeof(tr) + 4) 2438 break; 2439 2440 switch (w->type) { 2441 case BINDER_WORK_TRANSACTION: { 2442 t = container_of(w, struct binder_transaction, work); 2443 } break; 2444 case BINDER_WORK_TRANSACTION_COMPLETE: { 2445 cmd = BR_TRANSACTION_COMPLETE; 2446 if (put_user(cmd, (uint32_t __user *)ptr)) 2447 return -EFAULT; 2448 ptr += sizeof(uint32_t); 2449 2450 binder_stat_br(proc, thread, cmd); 2451 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, 2452 "%d:%d BR_TRANSACTION_COMPLETE\n", 2453 proc->pid, thread->pid); 2454 2455 list_del(&w->entry); 2456 kfree(w); 2457 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 2458 } break; 2459 case BINDER_WORK_NODE: { 2460 struct binder_node *node = container_of(w, struct binder_node, work); 2461 int strong, weak; 2462 binder_uintptr_t node_ptr = node->ptr; 2463 binder_uintptr_t node_cookie = node->cookie; 2464 int node_debug_id = node->debug_id; 2465 int has_weak_ref; 2466 int has_strong_ref; 2467 void __user *orig_ptr = ptr; 2468 2469 BUG_ON(proc != node->proc); 2470 strong = node->internal_strong_refs || 2471 node->local_strong_refs; 2472 weak = !hlist_empty(&node->refs) || 2473 node->local_weak_refs || strong; 2474 has_strong_ref = node->has_strong_ref; 2475 has_weak_ref = node->has_weak_ref; 2476 2477 if (weak && !has_weak_ref) { 2478 node->has_weak_ref = 1; 2479 node->pending_weak_ref = 1; 2480 node->local_weak_refs++; 2481 } 2482 if (strong && !has_strong_ref) { 2483 node->has_strong_ref = 1; 2484 node->pending_strong_ref = 1; 2485 node->local_strong_refs++; 2486 } 2487 if (!strong && has_strong_ref) 2488 node->has_strong_ref = 0; 2489 if (!weak && has_weak_ref) 2490 node->has_weak_ref = 0; 2491 list_del(&w->entry); 2492 2493 if (!weak && !strong) { 2494 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 2495 "%d:%d node %d u%016llx c%016llx deleted\n", 2496 proc->pid, thread->pid, 2497 node_debug_id, 2498 (u64)node_ptr, 2499 (u64)node_cookie); 2500 rb_erase(&node->rb_node, &proc->nodes); 2501 kfree(node); 2502 binder_stats_deleted(BINDER_STAT_NODE); 2503 } 2504 if (weak && !has_weak_ref) 2505 ret = binder_put_node_cmd( 2506 proc, thread, &ptr, node_ptr, 2507 node_cookie, node_debug_id, 2508 BR_INCREFS, "BR_INCREFS"); 2509 if (!ret && strong && !has_strong_ref) 2510 ret = binder_put_node_cmd( 2511 proc, thread, &ptr, node_ptr, 2512 node_cookie, node_debug_id, 2513 BR_ACQUIRE, "BR_ACQUIRE"); 2514 if (!ret && !strong && has_strong_ref) 2515 ret = binder_put_node_cmd( 2516 proc, thread, &ptr, node_ptr, 2517 node_cookie, node_debug_id, 2518 BR_RELEASE, "BR_RELEASE"); 2519 if (!ret && !weak && has_weak_ref) 2520 ret = binder_put_node_cmd( 2521 proc, thread, &ptr, node_ptr, 2522 node_cookie, node_debug_id, 2523 BR_DECREFS, "BR_DECREFS"); 2524 if (orig_ptr == ptr) 2525 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 2526 "%d:%d node %d u%016llx c%016llx state unchanged\n", 2527 proc->pid, thread->pid, 2528 node_debug_id, 2529 (u64)node_ptr, 2530 (u64)node_cookie); 2531 if (ret) 2532 return ret; 2533 } break; 2534 case BINDER_WORK_DEAD_BINDER: 2535 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 2536 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 2537 struct binder_ref_death *death; 2538 uint32_t cmd; 2539 2540 death = container_of(w, struct binder_ref_death, work); 2541 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) 2542 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; 2543 else 2544 cmd = BR_DEAD_BINDER; 2545 if (put_user(cmd, (uint32_t __user *)ptr)) 2546 return -EFAULT; 2547 ptr += sizeof(uint32_t); 2548 if (put_user(death->cookie, 2549 (binder_uintptr_t __user *)ptr)) 2550 return -EFAULT; 2551 ptr += sizeof(binder_uintptr_t); 2552 binder_stat_br(proc, thread, cmd); 2553 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 2554 "%d:%d %s %016llx\n", 2555 proc->pid, thread->pid, 2556 cmd == BR_DEAD_BINDER ? 2557 "BR_DEAD_BINDER" : 2558 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 2559 (u64)death->cookie); 2560 2561 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { 2562 list_del(&w->entry); 2563 kfree(death); 2564 binder_stats_deleted(BINDER_STAT_DEATH); 2565 } else 2566 list_move(&w->entry, &proc->delivered_death); 2567 if (cmd == BR_DEAD_BINDER) 2568 goto done; /* DEAD_BINDER notifications can cause transactions */ 2569 } break; 2570 } 2571 2572 if (!t) 2573 continue; 2574 2575 BUG_ON(t->buffer == NULL); 2576 if (t->buffer->target_node) { 2577 struct binder_node *target_node = t->buffer->target_node; 2578 2579 tr.target.ptr = target_node->ptr; 2580 tr.cookie = target_node->cookie; 2581 t->saved_priority = task_nice(current); 2582 if (t->priority < target_node->min_priority && 2583 !(t->flags & TF_ONE_WAY)) 2584 binder_set_nice(t->priority); 2585 else if (!(t->flags & TF_ONE_WAY) || 2586 t->saved_priority > target_node->min_priority) 2587 binder_set_nice(target_node->min_priority); 2588 cmd = BR_TRANSACTION; 2589 } else { 2590 tr.target.ptr = 0; 2591 tr.cookie = 0; 2592 cmd = BR_REPLY; 2593 } 2594 tr.code = t->code; 2595 tr.flags = t->flags; 2596 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid); 2597 2598 if (t->from) { 2599 struct task_struct *sender = t->from->proc->tsk; 2600 2601 tr.sender_pid = task_tgid_nr_ns(sender, 2602 task_active_pid_ns(current)); 2603 } else { 2604 tr.sender_pid = 0; 2605 } 2606 2607 tr.data_size = t->buffer->data_size; 2608 tr.offsets_size = t->buffer->offsets_size; 2609 tr.data.ptr.buffer = (binder_uintptr_t) 2610 ((uintptr_t)t->buffer->data + 2611 binder_alloc_get_user_buffer_offset(&proc->alloc)); 2612 tr.data.ptr.offsets = tr.data.ptr.buffer + 2613 ALIGN(t->buffer->data_size, 2614 sizeof(void *)); 2615 2616 if (put_user(cmd, (uint32_t __user *)ptr)) 2617 return -EFAULT; 2618 ptr += sizeof(uint32_t); 2619 if (copy_to_user(ptr, &tr, sizeof(tr))) 2620 return -EFAULT; 2621 ptr += sizeof(tr); 2622 2623 trace_binder_transaction_received(t); 2624 binder_stat_br(proc, thread, cmd); 2625 binder_debug(BINDER_DEBUG_TRANSACTION, 2626 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n", 2627 proc->pid, thread->pid, 2628 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : 2629 "BR_REPLY", 2630 t->debug_id, t->from ? t->from->proc->pid : 0, 2631 t->from ? t->from->pid : 0, cmd, 2632 t->buffer->data_size, t->buffer->offsets_size, 2633 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets); 2634 2635 list_del(&t->work.entry); 2636 t->buffer->allow_user_free = 1; 2637 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) { 2638 t->to_parent = thread->transaction_stack; 2639 t->to_thread = thread; 2640 thread->transaction_stack = t; 2641 } else { 2642 binder_free_transaction(t); 2643 } 2644 break; 2645 } 2646 2647 done: 2648 2649 *consumed = ptr - buffer; 2650 if (proc->requested_threads + proc->ready_threads == 0 && 2651 proc->requested_threads_started < proc->max_threads && 2652 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 2653 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ 2654 /*spawn a new thread if we leave this out */) { 2655 proc->requested_threads++; 2656 binder_debug(BINDER_DEBUG_THREADS, 2657 "%d:%d BR_SPAWN_LOOPER\n", 2658 proc->pid, thread->pid); 2659 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) 2660 return -EFAULT; 2661 binder_stat_br(proc, thread, BR_SPAWN_LOOPER); 2662 } 2663 return 0; 2664 } 2665 2666 static void binder_release_work(struct list_head *list) 2667 { 2668 struct binder_work *w; 2669 2670 while (!list_empty(list)) { 2671 w = list_first_entry(list, struct binder_work, entry); 2672 list_del_init(&w->entry); 2673 switch (w->type) { 2674 case BINDER_WORK_TRANSACTION: { 2675 struct binder_transaction *t; 2676 2677 t = container_of(w, struct binder_transaction, work); 2678 if (t->buffer->target_node && 2679 !(t->flags & TF_ONE_WAY)) { 2680 binder_send_failed_reply(t, BR_DEAD_REPLY); 2681 } else { 2682 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 2683 "undelivered transaction %d\n", 2684 t->debug_id); 2685 binder_free_transaction(t); 2686 } 2687 } break; 2688 case BINDER_WORK_TRANSACTION_COMPLETE: { 2689 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 2690 "undelivered TRANSACTION_COMPLETE\n"); 2691 kfree(w); 2692 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 2693 } break; 2694 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 2695 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 2696 struct binder_ref_death *death; 2697 2698 death = container_of(w, struct binder_ref_death, work); 2699 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 2700 "undelivered death notification, %016llx\n", 2701 (u64)death->cookie); 2702 kfree(death); 2703 binder_stats_deleted(BINDER_STAT_DEATH); 2704 } break; 2705 default: 2706 pr_err("unexpected work type, %d, not freed\n", 2707 w->type); 2708 break; 2709 } 2710 } 2711 2712 } 2713 2714 static struct binder_thread *binder_get_thread(struct binder_proc *proc) 2715 { 2716 struct binder_thread *thread = NULL; 2717 struct rb_node *parent = NULL; 2718 struct rb_node **p = &proc->threads.rb_node; 2719 2720 while (*p) { 2721 parent = *p; 2722 thread = rb_entry(parent, struct binder_thread, rb_node); 2723 2724 if (current->pid < thread->pid) 2725 p = &(*p)->rb_left; 2726 else if (current->pid > thread->pid) 2727 p = &(*p)->rb_right; 2728 else 2729 break; 2730 } 2731 if (*p == NULL) { 2732 thread = kzalloc(sizeof(*thread), GFP_KERNEL); 2733 if (thread == NULL) 2734 return NULL; 2735 binder_stats_created(BINDER_STAT_THREAD); 2736 thread->proc = proc; 2737 thread->pid = current->pid; 2738 init_waitqueue_head(&thread->wait); 2739 INIT_LIST_HEAD(&thread->todo); 2740 rb_link_node(&thread->rb_node, parent, p); 2741 rb_insert_color(&thread->rb_node, &proc->threads); 2742 thread->looper_need_return = true; 2743 thread->return_error = BR_OK; 2744 thread->return_error2 = BR_OK; 2745 } 2746 return thread; 2747 } 2748 2749 static int binder_free_thread(struct binder_proc *proc, 2750 struct binder_thread *thread) 2751 { 2752 struct binder_transaction *t; 2753 struct binder_transaction *send_reply = NULL; 2754 int active_transactions = 0; 2755 2756 rb_erase(&thread->rb_node, &proc->threads); 2757 t = thread->transaction_stack; 2758 if (t && t->to_thread == thread) 2759 send_reply = t; 2760 while (t) { 2761 active_transactions++; 2762 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 2763 "release %d:%d transaction %d %s, still active\n", 2764 proc->pid, thread->pid, 2765 t->debug_id, 2766 (t->to_thread == thread) ? "in" : "out"); 2767 2768 if (t->to_thread == thread) { 2769 t->to_proc = NULL; 2770 t->to_thread = NULL; 2771 if (t->buffer) { 2772 t->buffer->transaction = NULL; 2773 t->buffer = NULL; 2774 } 2775 t = t->to_parent; 2776 } else if (t->from == thread) { 2777 t->from = NULL; 2778 t = t->from_parent; 2779 } else 2780 BUG(); 2781 } 2782 if (send_reply) 2783 binder_send_failed_reply(send_reply, BR_DEAD_REPLY); 2784 binder_release_work(&thread->todo); 2785 kfree(thread); 2786 binder_stats_deleted(BINDER_STAT_THREAD); 2787 return active_transactions; 2788 } 2789 2790 static unsigned int binder_poll(struct file *filp, 2791 struct poll_table_struct *wait) 2792 { 2793 struct binder_proc *proc = filp->private_data; 2794 struct binder_thread *thread = NULL; 2795 int wait_for_proc_work; 2796 2797 binder_lock(__func__); 2798 2799 thread = binder_get_thread(proc); 2800 2801 wait_for_proc_work = thread->transaction_stack == NULL && 2802 list_empty(&thread->todo) && thread->return_error == BR_OK; 2803 2804 binder_unlock(__func__); 2805 2806 if (wait_for_proc_work) { 2807 if (binder_has_proc_work(proc, thread)) 2808 return POLLIN; 2809 poll_wait(filp, &proc->wait, wait); 2810 if (binder_has_proc_work(proc, thread)) 2811 return POLLIN; 2812 } else { 2813 if (binder_has_thread_work(thread)) 2814 return POLLIN; 2815 poll_wait(filp, &thread->wait, wait); 2816 if (binder_has_thread_work(thread)) 2817 return POLLIN; 2818 } 2819 return 0; 2820 } 2821 2822 static int binder_ioctl_write_read(struct file *filp, 2823 unsigned int cmd, unsigned long arg, 2824 struct binder_thread *thread) 2825 { 2826 int ret = 0; 2827 struct binder_proc *proc = filp->private_data; 2828 unsigned int size = _IOC_SIZE(cmd); 2829 void __user *ubuf = (void __user *)arg; 2830 struct binder_write_read bwr; 2831 2832 if (size != sizeof(struct binder_write_read)) { 2833 ret = -EINVAL; 2834 goto out; 2835 } 2836 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { 2837 ret = -EFAULT; 2838 goto out; 2839 } 2840 binder_debug(BINDER_DEBUG_READ_WRITE, 2841 "%d:%d write %lld at %016llx, read %lld at %016llx\n", 2842 proc->pid, thread->pid, 2843 (u64)bwr.write_size, (u64)bwr.write_buffer, 2844 (u64)bwr.read_size, (u64)bwr.read_buffer); 2845 2846 if (bwr.write_size > 0) { 2847 ret = binder_thread_write(proc, thread, 2848 bwr.write_buffer, 2849 bwr.write_size, 2850 &bwr.write_consumed); 2851 trace_binder_write_done(ret); 2852 if (ret < 0) { 2853 bwr.read_consumed = 0; 2854 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 2855 ret = -EFAULT; 2856 goto out; 2857 } 2858 } 2859 if (bwr.read_size > 0) { 2860 ret = binder_thread_read(proc, thread, bwr.read_buffer, 2861 bwr.read_size, 2862 &bwr.read_consumed, 2863 filp->f_flags & O_NONBLOCK); 2864 trace_binder_read_done(ret); 2865 if (!list_empty(&proc->todo)) 2866 wake_up_interruptible(&proc->wait); 2867 if (ret < 0) { 2868 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 2869 ret = -EFAULT; 2870 goto out; 2871 } 2872 } 2873 binder_debug(BINDER_DEBUG_READ_WRITE, 2874 "%d:%d wrote %lld of %lld, read return %lld of %lld\n", 2875 proc->pid, thread->pid, 2876 (u64)bwr.write_consumed, (u64)bwr.write_size, 2877 (u64)bwr.read_consumed, (u64)bwr.read_size); 2878 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { 2879 ret = -EFAULT; 2880 goto out; 2881 } 2882 out: 2883 return ret; 2884 } 2885 2886 static int binder_ioctl_set_ctx_mgr(struct file *filp) 2887 { 2888 int ret = 0; 2889 struct binder_proc *proc = filp->private_data; 2890 struct binder_context *context = proc->context; 2891 struct binder_node *new_node; 2892 kuid_t curr_euid = current_euid(); 2893 2894 mutex_lock(&context->context_mgr_node_lock); 2895 if (context->binder_context_mgr_node) { 2896 pr_err("BINDER_SET_CONTEXT_MGR already set\n"); 2897 ret = -EBUSY; 2898 goto out; 2899 } 2900 ret = security_binder_set_context_mgr(proc->tsk); 2901 if (ret < 0) 2902 goto out; 2903 if (uid_valid(context->binder_context_mgr_uid)) { 2904 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) { 2905 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n", 2906 from_kuid(&init_user_ns, curr_euid), 2907 from_kuid(&init_user_ns, 2908 context->binder_context_mgr_uid)); 2909 ret = -EPERM; 2910 goto out; 2911 } 2912 } else { 2913 context->binder_context_mgr_uid = curr_euid; 2914 } 2915 new_node = binder_new_node(proc, 0, 0); 2916 if (!new_node) { 2917 ret = -ENOMEM; 2918 goto out; 2919 } 2920 new_node->local_weak_refs++; 2921 new_node->local_strong_refs++; 2922 new_node->has_strong_ref = 1; 2923 new_node->has_weak_ref = 1; 2924 context->binder_context_mgr_node = new_node; 2925 out: 2926 mutex_unlock(&context->context_mgr_node_lock); 2927 return ret; 2928 } 2929 2930 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 2931 { 2932 int ret; 2933 struct binder_proc *proc = filp->private_data; 2934 struct binder_thread *thread; 2935 unsigned int size = _IOC_SIZE(cmd); 2936 void __user *ubuf = (void __user *)arg; 2937 2938 /*pr_info("binder_ioctl: %d:%d %x %lx\n", 2939 proc->pid, current->pid, cmd, arg);*/ 2940 2941 trace_binder_ioctl(cmd, arg); 2942 2943 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 2944 if (ret) 2945 goto err_unlocked; 2946 2947 binder_lock(__func__); 2948 thread = binder_get_thread(proc); 2949 if (thread == NULL) { 2950 ret = -ENOMEM; 2951 goto err; 2952 } 2953 2954 switch (cmd) { 2955 case BINDER_WRITE_READ: 2956 ret = binder_ioctl_write_read(filp, cmd, arg, thread); 2957 if (ret) 2958 goto err; 2959 break; 2960 case BINDER_SET_MAX_THREADS: 2961 if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) { 2962 ret = -EINVAL; 2963 goto err; 2964 } 2965 break; 2966 case BINDER_SET_CONTEXT_MGR: 2967 ret = binder_ioctl_set_ctx_mgr(filp); 2968 if (ret) 2969 goto err; 2970 break; 2971 case BINDER_THREAD_EXIT: 2972 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n", 2973 proc->pid, thread->pid); 2974 binder_free_thread(proc, thread); 2975 thread = NULL; 2976 break; 2977 case BINDER_VERSION: { 2978 struct binder_version __user *ver = ubuf; 2979 2980 if (size != sizeof(struct binder_version)) { 2981 ret = -EINVAL; 2982 goto err; 2983 } 2984 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, 2985 &ver->protocol_version)) { 2986 ret = -EINVAL; 2987 goto err; 2988 } 2989 break; 2990 } 2991 default: 2992 ret = -EINVAL; 2993 goto err; 2994 } 2995 ret = 0; 2996 err: 2997 if (thread) 2998 thread->looper_need_return = false; 2999 binder_unlock(__func__); 3000 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 3001 if (ret && ret != -ERESTARTSYS) 3002 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); 3003 err_unlocked: 3004 trace_binder_ioctl_done(ret); 3005 return ret; 3006 } 3007 3008 static void binder_vma_open(struct vm_area_struct *vma) 3009 { 3010 struct binder_proc *proc = vma->vm_private_data; 3011 3012 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 3013 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 3014 proc->pid, vma->vm_start, vma->vm_end, 3015 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 3016 (unsigned long)pgprot_val(vma->vm_page_prot)); 3017 } 3018 3019 static void binder_vma_close(struct vm_area_struct *vma) 3020 { 3021 struct binder_proc *proc = vma->vm_private_data; 3022 3023 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 3024 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 3025 proc->pid, vma->vm_start, vma->vm_end, 3026 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 3027 (unsigned long)pgprot_val(vma->vm_page_prot)); 3028 binder_alloc_vma_close(&proc->alloc); 3029 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); 3030 } 3031 3032 static int binder_vm_fault(struct vm_fault *vmf) 3033 { 3034 return VM_FAULT_SIGBUS; 3035 } 3036 3037 static const struct vm_operations_struct binder_vm_ops = { 3038 .open = binder_vma_open, 3039 .close = binder_vma_close, 3040 .fault = binder_vm_fault, 3041 }; 3042 3043 static int binder_mmap(struct file *filp, struct vm_area_struct *vma) 3044 { 3045 int ret; 3046 struct binder_proc *proc = filp->private_data; 3047 const char *failure_string; 3048 3049 if (proc->tsk != current->group_leader) 3050 return -EINVAL; 3051 3052 if ((vma->vm_end - vma->vm_start) > SZ_4M) 3053 vma->vm_end = vma->vm_start + SZ_4M; 3054 3055 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 3056 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", 3057 __func__, proc->pid, vma->vm_start, vma->vm_end, 3058 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 3059 (unsigned long)pgprot_val(vma->vm_page_prot)); 3060 3061 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { 3062 ret = -EPERM; 3063 failure_string = "bad vm_flags"; 3064 goto err_bad_arg; 3065 } 3066 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; 3067 vma->vm_ops = &binder_vm_ops; 3068 vma->vm_private_data = proc; 3069 3070 ret = binder_alloc_mmap_handler(&proc->alloc, vma); 3071 if (ret) 3072 return ret; 3073 proc->files = get_files_struct(current); 3074 return 0; 3075 3076 err_bad_arg: 3077 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n", 3078 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); 3079 return ret; 3080 } 3081 3082 static int binder_open(struct inode *nodp, struct file *filp) 3083 { 3084 struct binder_proc *proc; 3085 struct binder_device *binder_dev; 3086 3087 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n", 3088 current->group_leader->pid, current->pid); 3089 3090 proc = kzalloc(sizeof(*proc), GFP_KERNEL); 3091 if (proc == NULL) 3092 return -ENOMEM; 3093 get_task_struct(current->group_leader); 3094 proc->tsk = current->group_leader; 3095 INIT_LIST_HEAD(&proc->todo); 3096 init_waitqueue_head(&proc->wait); 3097 proc->default_priority = task_nice(current); 3098 binder_dev = container_of(filp->private_data, struct binder_device, 3099 miscdev); 3100 proc->context = &binder_dev->context; 3101 binder_alloc_init(&proc->alloc); 3102 3103 binder_lock(__func__); 3104 3105 binder_stats_created(BINDER_STAT_PROC); 3106 proc->pid = current->group_leader->pid; 3107 INIT_LIST_HEAD(&proc->delivered_death); 3108 filp->private_data = proc; 3109 3110 binder_unlock(__func__); 3111 3112 mutex_lock(&binder_procs_lock); 3113 hlist_add_head(&proc->proc_node, &binder_procs); 3114 mutex_unlock(&binder_procs_lock); 3115 3116 if (binder_debugfs_dir_entry_proc) { 3117 char strbuf[11]; 3118 3119 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); 3120 /* 3121 * proc debug entries are shared between contexts, so 3122 * this will fail if the process tries to open the driver 3123 * again with a different context. The priting code will 3124 * anyway print all contexts that a given PID has, so this 3125 * is not a problem. 3126 */ 3127 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO, 3128 binder_debugfs_dir_entry_proc, 3129 (void *)(unsigned long)proc->pid, 3130 &binder_proc_fops); 3131 } 3132 3133 return 0; 3134 } 3135 3136 static int binder_flush(struct file *filp, fl_owner_t id) 3137 { 3138 struct binder_proc *proc = filp->private_data; 3139 3140 binder_defer_work(proc, BINDER_DEFERRED_FLUSH); 3141 3142 return 0; 3143 } 3144 3145 static void binder_deferred_flush(struct binder_proc *proc) 3146 { 3147 struct rb_node *n; 3148 int wake_count = 0; 3149 3150 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { 3151 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); 3152 3153 thread->looper_need_return = true; 3154 if (thread->looper & BINDER_LOOPER_STATE_WAITING) { 3155 wake_up_interruptible(&thread->wait); 3156 wake_count++; 3157 } 3158 } 3159 wake_up_interruptible_all(&proc->wait); 3160 3161 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 3162 "binder_flush: %d woke %d threads\n", proc->pid, 3163 wake_count); 3164 } 3165 3166 static int binder_release(struct inode *nodp, struct file *filp) 3167 { 3168 struct binder_proc *proc = filp->private_data; 3169 3170 debugfs_remove(proc->debugfs_entry); 3171 binder_defer_work(proc, BINDER_DEFERRED_RELEASE); 3172 3173 return 0; 3174 } 3175 3176 static int binder_node_release(struct binder_node *node, int refs) 3177 { 3178 struct binder_ref *ref; 3179 int death = 0; 3180 3181 list_del_init(&node->work.entry); 3182 binder_release_work(&node->async_todo); 3183 3184 if (hlist_empty(&node->refs)) { 3185 kfree(node); 3186 binder_stats_deleted(BINDER_STAT_NODE); 3187 3188 return refs; 3189 } 3190 3191 node->proc = NULL; 3192 node->local_strong_refs = 0; 3193 node->local_weak_refs = 0; 3194 3195 spin_lock(&binder_dead_nodes_lock); 3196 hlist_add_head(&node->dead_node, &binder_dead_nodes); 3197 spin_unlock(&binder_dead_nodes_lock); 3198 3199 hlist_for_each_entry(ref, &node->refs, node_entry) { 3200 refs++; 3201 3202 if (!ref->death) 3203 continue; 3204 3205 death++; 3206 3207 if (list_empty(&ref->death->work.entry)) { 3208 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 3209 list_add_tail(&ref->death->work.entry, 3210 &ref->proc->todo); 3211 wake_up_interruptible(&ref->proc->wait); 3212 } else 3213 BUG(); 3214 } 3215 3216 binder_debug(BINDER_DEBUG_DEAD_BINDER, 3217 "node %d now dead, refs %d, death %d\n", 3218 node->debug_id, refs, death); 3219 3220 return refs; 3221 } 3222 3223 static void binder_deferred_release(struct binder_proc *proc) 3224 { 3225 struct binder_context *context = proc->context; 3226 struct rb_node *n; 3227 int threads, nodes, incoming_refs, outgoing_refs, active_transactions; 3228 3229 BUG_ON(proc->files); 3230 3231 mutex_lock(&binder_procs_lock); 3232 hlist_del(&proc->proc_node); 3233 mutex_unlock(&binder_procs_lock); 3234 3235 mutex_lock(&context->context_mgr_node_lock); 3236 if (context->binder_context_mgr_node && 3237 context->binder_context_mgr_node->proc == proc) { 3238 binder_debug(BINDER_DEBUG_DEAD_BINDER, 3239 "%s: %d context_mgr_node gone\n", 3240 __func__, proc->pid); 3241 context->binder_context_mgr_node = NULL; 3242 } 3243 mutex_unlock(&context->context_mgr_node_lock); 3244 3245 threads = 0; 3246 active_transactions = 0; 3247 while ((n = rb_first(&proc->threads))) { 3248 struct binder_thread *thread; 3249 3250 thread = rb_entry(n, struct binder_thread, rb_node); 3251 threads++; 3252 active_transactions += binder_free_thread(proc, thread); 3253 } 3254 3255 nodes = 0; 3256 incoming_refs = 0; 3257 while ((n = rb_first(&proc->nodes))) { 3258 struct binder_node *node; 3259 3260 node = rb_entry(n, struct binder_node, rb_node); 3261 nodes++; 3262 rb_erase(&node->rb_node, &proc->nodes); 3263 incoming_refs = binder_node_release(node, incoming_refs); 3264 } 3265 3266 outgoing_refs = 0; 3267 while ((n = rb_first(&proc->refs_by_desc))) { 3268 struct binder_ref *ref; 3269 3270 ref = rb_entry(n, struct binder_ref, rb_node_desc); 3271 outgoing_refs++; 3272 binder_delete_ref(ref); 3273 } 3274 3275 binder_release_work(&proc->todo); 3276 binder_release_work(&proc->delivered_death); 3277 3278 binder_alloc_deferred_release(&proc->alloc); 3279 binder_stats_deleted(BINDER_STAT_PROC); 3280 3281 put_task_struct(proc->tsk); 3282 3283 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 3284 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n", 3285 __func__, proc->pid, threads, nodes, incoming_refs, 3286 outgoing_refs, active_transactions); 3287 3288 kfree(proc); 3289 } 3290 3291 static void binder_deferred_func(struct work_struct *work) 3292 { 3293 struct binder_proc *proc; 3294 struct files_struct *files; 3295 3296 int defer; 3297 3298 do { 3299 binder_lock(__func__); 3300 mutex_lock(&binder_deferred_lock); 3301 if (!hlist_empty(&binder_deferred_list)) { 3302 proc = hlist_entry(binder_deferred_list.first, 3303 struct binder_proc, deferred_work_node); 3304 hlist_del_init(&proc->deferred_work_node); 3305 defer = proc->deferred_work; 3306 proc->deferred_work = 0; 3307 } else { 3308 proc = NULL; 3309 defer = 0; 3310 } 3311 mutex_unlock(&binder_deferred_lock); 3312 3313 files = NULL; 3314 if (defer & BINDER_DEFERRED_PUT_FILES) { 3315 files = proc->files; 3316 if (files) 3317 proc->files = NULL; 3318 } 3319 3320 if (defer & BINDER_DEFERRED_FLUSH) 3321 binder_deferred_flush(proc); 3322 3323 if (defer & BINDER_DEFERRED_RELEASE) 3324 binder_deferred_release(proc); /* frees proc */ 3325 3326 binder_unlock(__func__); 3327 if (files) 3328 put_files_struct(files); 3329 } while (proc); 3330 } 3331 static DECLARE_WORK(binder_deferred_work, binder_deferred_func); 3332 3333 static void 3334 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) 3335 { 3336 mutex_lock(&binder_deferred_lock); 3337 proc->deferred_work |= defer; 3338 if (hlist_unhashed(&proc->deferred_work_node)) { 3339 hlist_add_head(&proc->deferred_work_node, 3340 &binder_deferred_list); 3341 schedule_work(&binder_deferred_work); 3342 } 3343 mutex_unlock(&binder_deferred_lock); 3344 } 3345 3346 static void print_binder_transaction(struct seq_file *m, const char *prefix, 3347 struct binder_transaction *t) 3348 { 3349 seq_printf(m, 3350 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d", 3351 prefix, t->debug_id, t, 3352 t->from ? t->from->proc->pid : 0, 3353 t->from ? t->from->pid : 0, 3354 t->to_proc ? t->to_proc->pid : 0, 3355 t->to_thread ? t->to_thread->pid : 0, 3356 t->code, t->flags, t->priority, t->need_reply); 3357 if (t->buffer == NULL) { 3358 seq_puts(m, " buffer free\n"); 3359 return; 3360 } 3361 if (t->buffer->target_node) 3362 seq_printf(m, " node %d", 3363 t->buffer->target_node->debug_id); 3364 seq_printf(m, " size %zd:%zd data %p\n", 3365 t->buffer->data_size, t->buffer->offsets_size, 3366 t->buffer->data); 3367 } 3368 3369 static void print_binder_work(struct seq_file *m, const char *prefix, 3370 const char *transaction_prefix, 3371 struct binder_work *w) 3372 { 3373 struct binder_node *node; 3374 struct binder_transaction *t; 3375 3376 switch (w->type) { 3377 case BINDER_WORK_TRANSACTION: 3378 t = container_of(w, struct binder_transaction, work); 3379 print_binder_transaction(m, transaction_prefix, t); 3380 break; 3381 case BINDER_WORK_TRANSACTION_COMPLETE: 3382 seq_printf(m, "%stransaction complete\n", prefix); 3383 break; 3384 case BINDER_WORK_NODE: 3385 node = container_of(w, struct binder_node, work); 3386 seq_printf(m, "%snode work %d: u%016llx c%016llx\n", 3387 prefix, node->debug_id, 3388 (u64)node->ptr, (u64)node->cookie); 3389 break; 3390 case BINDER_WORK_DEAD_BINDER: 3391 seq_printf(m, "%shas dead binder\n", prefix); 3392 break; 3393 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 3394 seq_printf(m, "%shas cleared dead binder\n", prefix); 3395 break; 3396 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: 3397 seq_printf(m, "%shas cleared death notification\n", prefix); 3398 break; 3399 default: 3400 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type); 3401 break; 3402 } 3403 } 3404 3405 static void print_binder_thread(struct seq_file *m, 3406 struct binder_thread *thread, 3407 int print_always) 3408 { 3409 struct binder_transaction *t; 3410 struct binder_work *w; 3411 size_t start_pos = m->count; 3412 size_t header_pos; 3413 3414 seq_printf(m, " thread %d: l %02x need_return %d\n", 3415 thread->pid, thread->looper, 3416 thread->looper_need_return); 3417 header_pos = m->count; 3418 t = thread->transaction_stack; 3419 while (t) { 3420 if (t->from == thread) { 3421 print_binder_transaction(m, 3422 " outgoing transaction", t); 3423 t = t->from_parent; 3424 } else if (t->to_thread == thread) { 3425 print_binder_transaction(m, 3426 " incoming transaction", t); 3427 t = t->to_parent; 3428 } else { 3429 print_binder_transaction(m, " bad transaction", t); 3430 t = NULL; 3431 } 3432 } 3433 list_for_each_entry(w, &thread->todo, entry) { 3434 print_binder_work(m, " ", " pending transaction", w); 3435 } 3436 if (!print_always && m->count == header_pos) 3437 m->count = start_pos; 3438 } 3439 3440 static void print_binder_node(struct seq_file *m, struct binder_node *node) 3441 { 3442 struct binder_ref *ref; 3443 struct binder_work *w; 3444 int count; 3445 3446 count = 0; 3447 hlist_for_each_entry(ref, &node->refs, node_entry) 3448 count++; 3449 3450 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d", 3451 node->debug_id, (u64)node->ptr, (u64)node->cookie, 3452 node->has_strong_ref, node->has_weak_ref, 3453 node->local_strong_refs, node->local_weak_refs, 3454 node->internal_strong_refs, count); 3455 if (count) { 3456 seq_puts(m, " proc"); 3457 hlist_for_each_entry(ref, &node->refs, node_entry) 3458 seq_printf(m, " %d", ref->proc->pid); 3459 } 3460 seq_puts(m, "\n"); 3461 list_for_each_entry(w, &node->async_todo, entry) 3462 print_binder_work(m, " ", 3463 " pending async transaction", w); 3464 } 3465 3466 static void print_binder_ref(struct seq_file *m, struct binder_ref *ref) 3467 { 3468 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n", 3469 ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ", 3470 ref->node->debug_id, ref->strong, ref->weak, ref->death); 3471 } 3472 3473 static void print_binder_proc(struct seq_file *m, 3474 struct binder_proc *proc, int print_all) 3475 { 3476 struct binder_work *w; 3477 struct rb_node *n; 3478 size_t start_pos = m->count; 3479 size_t header_pos; 3480 3481 seq_printf(m, "proc %d\n", proc->pid); 3482 seq_printf(m, "context %s\n", proc->context->name); 3483 header_pos = m->count; 3484 3485 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 3486 print_binder_thread(m, rb_entry(n, struct binder_thread, 3487 rb_node), print_all); 3488 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { 3489 struct binder_node *node = rb_entry(n, struct binder_node, 3490 rb_node); 3491 if (print_all || node->has_async_transaction) 3492 print_binder_node(m, node); 3493 } 3494 if (print_all) { 3495 for (n = rb_first(&proc->refs_by_desc); 3496 n != NULL; 3497 n = rb_next(n)) 3498 print_binder_ref(m, rb_entry(n, struct binder_ref, 3499 rb_node_desc)); 3500 } 3501 binder_alloc_print_allocated(m, &proc->alloc); 3502 list_for_each_entry(w, &proc->todo, entry) 3503 print_binder_work(m, " ", " pending transaction", w); 3504 list_for_each_entry(w, &proc->delivered_death, entry) { 3505 seq_puts(m, " has delivered dead binder\n"); 3506 break; 3507 } 3508 if (!print_all && m->count == header_pos) 3509 m->count = start_pos; 3510 } 3511 3512 static const char * const binder_return_strings[] = { 3513 "BR_ERROR", 3514 "BR_OK", 3515 "BR_TRANSACTION", 3516 "BR_REPLY", 3517 "BR_ACQUIRE_RESULT", 3518 "BR_DEAD_REPLY", 3519 "BR_TRANSACTION_COMPLETE", 3520 "BR_INCREFS", 3521 "BR_ACQUIRE", 3522 "BR_RELEASE", 3523 "BR_DECREFS", 3524 "BR_ATTEMPT_ACQUIRE", 3525 "BR_NOOP", 3526 "BR_SPAWN_LOOPER", 3527 "BR_FINISHED", 3528 "BR_DEAD_BINDER", 3529 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 3530 "BR_FAILED_REPLY" 3531 }; 3532 3533 static const char * const binder_command_strings[] = { 3534 "BC_TRANSACTION", 3535 "BC_REPLY", 3536 "BC_ACQUIRE_RESULT", 3537 "BC_FREE_BUFFER", 3538 "BC_INCREFS", 3539 "BC_ACQUIRE", 3540 "BC_RELEASE", 3541 "BC_DECREFS", 3542 "BC_INCREFS_DONE", 3543 "BC_ACQUIRE_DONE", 3544 "BC_ATTEMPT_ACQUIRE", 3545 "BC_REGISTER_LOOPER", 3546 "BC_ENTER_LOOPER", 3547 "BC_EXIT_LOOPER", 3548 "BC_REQUEST_DEATH_NOTIFICATION", 3549 "BC_CLEAR_DEATH_NOTIFICATION", 3550 "BC_DEAD_BINDER_DONE", 3551 "BC_TRANSACTION_SG", 3552 "BC_REPLY_SG", 3553 }; 3554 3555 static const char * const binder_objstat_strings[] = { 3556 "proc", 3557 "thread", 3558 "node", 3559 "ref", 3560 "death", 3561 "transaction", 3562 "transaction_complete" 3563 }; 3564 3565 static void print_binder_stats(struct seq_file *m, const char *prefix, 3566 struct binder_stats *stats) 3567 { 3568 int i; 3569 3570 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != 3571 ARRAY_SIZE(binder_command_strings)); 3572 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) { 3573 int temp = atomic_read(&stats->bc[i]); 3574 3575 if (temp) 3576 seq_printf(m, "%s%s: %d\n", prefix, 3577 binder_command_strings[i], temp); 3578 } 3579 3580 BUILD_BUG_ON(ARRAY_SIZE(stats->br) != 3581 ARRAY_SIZE(binder_return_strings)); 3582 for (i = 0; i < ARRAY_SIZE(stats->br); i++) { 3583 int temp = atomic_read(&stats->br[i]); 3584 3585 if (temp) 3586 seq_printf(m, "%s%s: %d\n", prefix, 3587 binder_return_strings[i], temp); 3588 } 3589 3590 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 3591 ARRAY_SIZE(binder_objstat_strings)); 3592 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 3593 ARRAY_SIZE(stats->obj_deleted)); 3594 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { 3595 int created = atomic_read(&stats->obj_created[i]); 3596 int deleted = atomic_read(&stats->obj_deleted[i]); 3597 3598 if (created || deleted) 3599 seq_printf(m, "%s%s: active %d total %d\n", 3600 prefix, 3601 binder_objstat_strings[i], 3602 created - deleted, 3603 created); 3604 } 3605 } 3606 3607 static void print_binder_proc_stats(struct seq_file *m, 3608 struct binder_proc *proc) 3609 { 3610 struct binder_work *w; 3611 struct rb_node *n; 3612 int count, strong, weak; 3613 3614 seq_printf(m, "proc %d\n", proc->pid); 3615 seq_printf(m, "context %s\n", proc->context->name); 3616 count = 0; 3617 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 3618 count++; 3619 seq_printf(m, " threads: %d\n", count); 3620 seq_printf(m, " requested threads: %d+%d/%d\n" 3621 " ready threads %d\n" 3622 " free async space %zd\n", proc->requested_threads, 3623 proc->requested_threads_started, proc->max_threads, 3624 proc->ready_threads, 3625 binder_alloc_get_free_async_space(&proc->alloc)); 3626 count = 0; 3627 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) 3628 count++; 3629 seq_printf(m, " nodes: %d\n", count); 3630 count = 0; 3631 strong = 0; 3632 weak = 0; 3633 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 3634 struct binder_ref *ref = rb_entry(n, struct binder_ref, 3635 rb_node_desc); 3636 count++; 3637 strong += ref->strong; 3638 weak += ref->weak; 3639 } 3640 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak); 3641 3642 count = binder_alloc_get_allocated_count(&proc->alloc); 3643 seq_printf(m, " buffers: %d\n", count); 3644 3645 count = 0; 3646 list_for_each_entry(w, &proc->todo, entry) { 3647 switch (w->type) { 3648 case BINDER_WORK_TRANSACTION: 3649 count++; 3650 break; 3651 default: 3652 break; 3653 } 3654 } 3655 seq_printf(m, " pending transactions: %d\n", count); 3656 3657 print_binder_stats(m, " ", &proc->stats); 3658 } 3659 3660 3661 static int binder_state_show(struct seq_file *m, void *unused) 3662 { 3663 struct binder_proc *proc; 3664 struct binder_node *node; 3665 3666 binder_lock(__func__); 3667 3668 seq_puts(m, "binder state:\n"); 3669 3670 spin_lock(&binder_dead_nodes_lock); 3671 if (!hlist_empty(&binder_dead_nodes)) 3672 seq_puts(m, "dead nodes:\n"); 3673 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) 3674 print_binder_node(m, node); 3675 spin_unlock(&binder_dead_nodes_lock); 3676 3677 mutex_lock(&binder_procs_lock); 3678 hlist_for_each_entry(proc, &binder_procs, proc_node) 3679 print_binder_proc(m, proc, 1); 3680 mutex_unlock(&binder_procs_lock); 3681 binder_unlock(__func__); 3682 return 0; 3683 } 3684 3685 static int binder_stats_show(struct seq_file *m, void *unused) 3686 { 3687 struct binder_proc *proc; 3688 3689 binder_lock(__func__); 3690 3691 seq_puts(m, "binder stats:\n"); 3692 3693 print_binder_stats(m, "", &binder_stats); 3694 3695 mutex_lock(&binder_procs_lock); 3696 hlist_for_each_entry(proc, &binder_procs, proc_node) 3697 print_binder_proc_stats(m, proc); 3698 mutex_unlock(&binder_procs_lock); 3699 binder_unlock(__func__); 3700 return 0; 3701 } 3702 3703 static int binder_transactions_show(struct seq_file *m, void *unused) 3704 { 3705 struct binder_proc *proc; 3706 3707 binder_lock(__func__); 3708 3709 seq_puts(m, "binder transactions:\n"); 3710 mutex_lock(&binder_procs_lock); 3711 hlist_for_each_entry(proc, &binder_procs, proc_node) 3712 print_binder_proc(m, proc, 0); 3713 mutex_unlock(&binder_procs_lock); 3714 binder_unlock(__func__); 3715 return 0; 3716 } 3717 3718 static int binder_proc_show(struct seq_file *m, void *unused) 3719 { 3720 struct binder_proc *itr; 3721 int pid = (unsigned long)m->private; 3722 3723 binder_lock(__func__); 3724 3725 mutex_lock(&binder_procs_lock); 3726 hlist_for_each_entry(itr, &binder_procs, proc_node) { 3727 if (itr->pid == pid) { 3728 seq_puts(m, "binder proc state:\n"); 3729 print_binder_proc(m, itr, 1); 3730 } 3731 } 3732 mutex_unlock(&binder_procs_lock); 3733 3734 binder_unlock(__func__); 3735 return 0; 3736 } 3737 3738 static void print_binder_transaction_log_entry(struct seq_file *m, 3739 struct binder_transaction_log_entry *e) 3740 { 3741 int debug_id = READ_ONCE(e->debug_id_done); 3742 /* 3743 * read barrier to guarantee debug_id_done read before 3744 * we print the log values 3745 */ 3746 smp_rmb(); 3747 seq_printf(m, 3748 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d", 3749 e->debug_id, (e->call_type == 2) ? "reply" : 3750 ((e->call_type == 1) ? "async" : "call "), e->from_proc, 3751 e->from_thread, e->to_proc, e->to_thread, e->context_name, 3752 e->to_node, e->target_handle, e->data_size, e->offsets_size, 3753 e->return_error, e->return_error_param, 3754 e->return_error_line); 3755 /* 3756 * read-barrier to guarantee read of debug_id_done after 3757 * done printing the fields of the entry 3758 */ 3759 smp_rmb(); 3760 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ? 3761 "\n" : " (incomplete)\n"); 3762 } 3763 3764 static int binder_transaction_log_show(struct seq_file *m, void *unused) 3765 { 3766 struct binder_transaction_log *log = m->private; 3767 unsigned int log_cur = atomic_read(&log->cur); 3768 unsigned int count; 3769 unsigned int cur; 3770 int i; 3771 3772 count = log_cur + 1; 3773 cur = count < ARRAY_SIZE(log->entry) && !log->full ? 3774 0 : count % ARRAY_SIZE(log->entry); 3775 if (count > ARRAY_SIZE(log->entry) || log->full) 3776 count = ARRAY_SIZE(log->entry); 3777 for (i = 0; i < count; i++) { 3778 unsigned int index = cur++ % ARRAY_SIZE(log->entry); 3779 3780 print_binder_transaction_log_entry(m, &log->entry[index]); 3781 } 3782 return 0; 3783 } 3784 3785 static const struct file_operations binder_fops = { 3786 .owner = THIS_MODULE, 3787 .poll = binder_poll, 3788 .unlocked_ioctl = binder_ioctl, 3789 .compat_ioctl = binder_ioctl, 3790 .mmap = binder_mmap, 3791 .open = binder_open, 3792 .flush = binder_flush, 3793 .release = binder_release, 3794 }; 3795 3796 BINDER_DEBUG_ENTRY(state); 3797 BINDER_DEBUG_ENTRY(stats); 3798 BINDER_DEBUG_ENTRY(transactions); 3799 BINDER_DEBUG_ENTRY(transaction_log); 3800 3801 static int __init init_binder_device(const char *name) 3802 { 3803 int ret; 3804 struct binder_device *binder_device; 3805 3806 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL); 3807 if (!binder_device) 3808 return -ENOMEM; 3809 3810 binder_device->miscdev.fops = &binder_fops; 3811 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR; 3812 binder_device->miscdev.name = name; 3813 3814 binder_device->context.binder_context_mgr_uid = INVALID_UID; 3815 binder_device->context.name = name; 3816 mutex_init(&binder_device->context.context_mgr_node_lock); 3817 3818 ret = misc_register(&binder_device->miscdev); 3819 if (ret < 0) { 3820 kfree(binder_device); 3821 return ret; 3822 } 3823 3824 hlist_add_head(&binder_device->hlist, &binder_devices); 3825 3826 return ret; 3827 } 3828 3829 static int __init binder_init(void) 3830 { 3831 int ret; 3832 char *device_name, *device_names; 3833 struct binder_device *device; 3834 struct hlist_node *tmp; 3835 3836 atomic_set(&binder_transaction_log.cur, ~0U); 3837 atomic_set(&binder_transaction_log_failed.cur, ~0U); 3838 3839 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); 3840 if (binder_debugfs_dir_entry_root) 3841 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", 3842 binder_debugfs_dir_entry_root); 3843 3844 if (binder_debugfs_dir_entry_root) { 3845 debugfs_create_file("state", 3846 S_IRUGO, 3847 binder_debugfs_dir_entry_root, 3848 NULL, 3849 &binder_state_fops); 3850 debugfs_create_file("stats", 3851 S_IRUGO, 3852 binder_debugfs_dir_entry_root, 3853 NULL, 3854 &binder_stats_fops); 3855 debugfs_create_file("transactions", 3856 S_IRUGO, 3857 binder_debugfs_dir_entry_root, 3858 NULL, 3859 &binder_transactions_fops); 3860 debugfs_create_file("transaction_log", 3861 S_IRUGO, 3862 binder_debugfs_dir_entry_root, 3863 &binder_transaction_log, 3864 &binder_transaction_log_fops); 3865 debugfs_create_file("failed_transaction_log", 3866 S_IRUGO, 3867 binder_debugfs_dir_entry_root, 3868 &binder_transaction_log_failed, 3869 &binder_transaction_log_fops); 3870 } 3871 3872 /* 3873 * Copy the module_parameter string, because we don't want to 3874 * tokenize it in-place. 3875 */ 3876 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL); 3877 if (!device_names) { 3878 ret = -ENOMEM; 3879 goto err_alloc_device_names_failed; 3880 } 3881 strcpy(device_names, binder_devices_param); 3882 3883 while ((device_name = strsep(&device_names, ","))) { 3884 ret = init_binder_device(device_name); 3885 if (ret) 3886 goto err_init_binder_device_failed; 3887 } 3888 3889 return ret; 3890 3891 err_init_binder_device_failed: 3892 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) { 3893 misc_deregister(&device->miscdev); 3894 hlist_del(&device->hlist); 3895 kfree(device); 3896 } 3897 err_alloc_device_names_failed: 3898 debugfs_remove_recursive(binder_debugfs_dir_entry_root); 3899 3900 return ret; 3901 } 3902 3903 device_initcall(binder_init); 3904 3905 #define CREATE_TRACE_POINTS 3906 #include "binder_trace.h" 3907 3908 MODULE_LICENSE("GPL v2"); 3909