1 // SPDX-License-Identifier: GPL-2.0-only 2 /* binder.c 3 * 4 * Android IPC Subsystem 5 * 6 * Copyright (C) 2007-2008 Google, Inc. 7 */ 8 9 /* 10 * Locking overview 11 * 12 * There are 3 main spinlocks which must be acquired in the 13 * order shown: 14 * 15 * 1) proc->outer_lock : protects binder_ref 16 * binder_proc_lock() and binder_proc_unlock() are 17 * used to acq/rel. 18 * 2) node->lock : protects most fields of binder_node. 19 * binder_node_lock() and binder_node_unlock() are 20 * used to acq/rel 21 * 3) proc->inner_lock : protects the thread and node lists 22 * (proc->threads, proc->waiting_threads, proc->nodes) 23 * and all todo lists associated with the binder_proc 24 * (proc->todo, thread->todo, proc->delivered_death and 25 * node->async_todo), as well as thread->transaction_stack 26 * binder_inner_proc_lock() and binder_inner_proc_unlock() 27 * are used to acq/rel 28 * 29 * Any lock under procA must never be nested under any lock at the same 30 * level or below on procB. 31 * 32 * Functions that require a lock held on entry indicate which lock 33 * in the suffix of the function name: 34 * 35 * foo_olocked() : requires node->outer_lock 36 * foo_nlocked() : requires node->lock 37 * foo_ilocked() : requires proc->inner_lock 38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock 39 * foo_nilocked(): requires node->lock and proc->inner_lock 40 * ... 41 */ 42 43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 44 45 #include <linux/fdtable.h> 46 #include <linux/file.h> 47 #include <linux/freezer.h> 48 #include <linux/fs.h> 49 #include <linux/list.h> 50 #include <linux/miscdevice.h> 51 #include <linux/module.h> 52 #include <linux/mutex.h> 53 #include <linux/nsproxy.h> 54 #include <linux/poll.h> 55 #include <linux/debugfs.h> 56 #include <linux/rbtree.h> 57 #include <linux/sched/signal.h> 58 #include <linux/sched/mm.h> 59 #include <linux/seq_file.h> 60 #include <linux/string.h> 61 #include <linux/uaccess.h> 62 #include <linux/pid_namespace.h> 63 #include <linux/security.h> 64 #include <linux/spinlock.h> 65 #include <linux/ratelimit.h> 66 #include <linux/syscalls.h> 67 #include <linux/task_work.h> 68 #include <linux/sizes.h> 69 70 #include <uapi/linux/android/binder.h> 71 72 #include <asm/cacheflush.h> 73 74 #include "binder_internal.h" 75 #include "binder_trace.h" 76 77 static HLIST_HEAD(binder_deferred_list); 78 static DEFINE_MUTEX(binder_deferred_lock); 79 80 static HLIST_HEAD(binder_devices); 81 static HLIST_HEAD(binder_procs); 82 static DEFINE_MUTEX(binder_procs_lock); 83 84 static HLIST_HEAD(binder_dead_nodes); 85 static DEFINE_SPINLOCK(binder_dead_nodes_lock); 86 87 static struct dentry *binder_debugfs_dir_entry_root; 88 static struct dentry *binder_debugfs_dir_entry_proc; 89 static atomic_t binder_last_id; 90 91 static int proc_show(struct seq_file *m, void *unused); 92 DEFINE_SHOW_ATTRIBUTE(proc); 93 94 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE) 95 96 enum { 97 BINDER_DEBUG_USER_ERROR = 1U << 0, 98 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1, 99 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2, 100 BINDER_DEBUG_OPEN_CLOSE = 1U << 3, 101 BINDER_DEBUG_DEAD_BINDER = 1U << 4, 102 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5, 103 BINDER_DEBUG_READ_WRITE = 1U << 6, 104 BINDER_DEBUG_USER_REFS = 1U << 7, 105 BINDER_DEBUG_THREADS = 1U << 8, 106 BINDER_DEBUG_TRANSACTION = 1U << 9, 107 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10, 108 BINDER_DEBUG_FREE_BUFFER = 1U << 11, 109 BINDER_DEBUG_INTERNAL_REFS = 1U << 12, 110 BINDER_DEBUG_PRIORITY_CAP = 1U << 13, 111 BINDER_DEBUG_SPINLOCKS = 1U << 14, 112 }; 113 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | 114 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; 115 module_param_named(debug_mask, binder_debug_mask, uint, 0644); 116 117 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES; 118 module_param_named(devices, binder_devices_param, charp, 0444); 119 120 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); 121 static int binder_stop_on_user_error; 122 123 static int binder_set_stop_on_user_error(const char *val, 124 const struct kernel_param *kp) 125 { 126 int ret; 127 128 ret = param_set_int(val, kp); 129 if (binder_stop_on_user_error < 2) 130 wake_up(&binder_user_error_wait); 131 return ret; 132 } 133 module_param_call(stop_on_user_error, binder_set_stop_on_user_error, 134 param_get_int, &binder_stop_on_user_error, 0644); 135 136 #define binder_debug(mask, x...) \ 137 do { \ 138 if (binder_debug_mask & mask) \ 139 pr_info_ratelimited(x); \ 140 } while (0) 141 142 #define binder_user_error(x...) \ 143 do { \ 144 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \ 145 pr_info_ratelimited(x); \ 146 if (binder_stop_on_user_error) \ 147 binder_stop_on_user_error = 2; \ 148 } while (0) 149 150 #define to_flat_binder_object(hdr) \ 151 container_of(hdr, struct flat_binder_object, hdr) 152 153 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr) 154 155 #define to_binder_buffer_object(hdr) \ 156 container_of(hdr, struct binder_buffer_object, hdr) 157 158 #define to_binder_fd_array_object(hdr) \ 159 container_of(hdr, struct binder_fd_array_object, hdr) 160 161 static struct binder_stats binder_stats; 162 163 static inline void binder_stats_deleted(enum binder_stat_types type) 164 { 165 atomic_inc(&binder_stats.obj_deleted[type]); 166 } 167 168 static inline void binder_stats_created(enum binder_stat_types type) 169 { 170 atomic_inc(&binder_stats.obj_created[type]); 171 } 172 173 struct binder_transaction_log binder_transaction_log; 174 struct binder_transaction_log binder_transaction_log_failed; 175 176 static struct binder_transaction_log_entry *binder_transaction_log_add( 177 struct binder_transaction_log *log) 178 { 179 struct binder_transaction_log_entry *e; 180 unsigned int cur = atomic_inc_return(&log->cur); 181 182 if (cur >= ARRAY_SIZE(log->entry)) 183 log->full = true; 184 e = &log->entry[cur % ARRAY_SIZE(log->entry)]; 185 WRITE_ONCE(e->debug_id_done, 0); 186 /* 187 * write-barrier to synchronize access to e->debug_id_done. 188 * We make sure the initialized 0 value is seen before 189 * memset() other fields are zeroed by memset. 190 */ 191 smp_wmb(); 192 memset(e, 0, sizeof(*e)); 193 return e; 194 } 195 196 enum binder_deferred_state { 197 BINDER_DEFERRED_FLUSH = 0x01, 198 BINDER_DEFERRED_RELEASE = 0x02, 199 }; 200 201 enum { 202 BINDER_LOOPER_STATE_REGISTERED = 0x01, 203 BINDER_LOOPER_STATE_ENTERED = 0x02, 204 BINDER_LOOPER_STATE_EXITED = 0x04, 205 BINDER_LOOPER_STATE_INVALID = 0x08, 206 BINDER_LOOPER_STATE_WAITING = 0x10, 207 BINDER_LOOPER_STATE_POLL = 0x20, 208 }; 209 210 /** 211 * binder_proc_lock() - Acquire outer lock for given binder_proc 212 * @proc: struct binder_proc to acquire 213 * 214 * Acquires proc->outer_lock. Used to protect binder_ref 215 * structures associated with the given proc. 216 */ 217 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__) 218 static void 219 _binder_proc_lock(struct binder_proc *proc, int line) 220 __acquires(&proc->outer_lock) 221 { 222 binder_debug(BINDER_DEBUG_SPINLOCKS, 223 "%s: line=%d\n", __func__, line); 224 spin_lock(&proc->outer_lock); 225 } 226 227 /** 228 * binder_proc_unlock() - Release spinlock for given binder_proc 229 * @proc: struct binder_proc to acquire 230 * 231 * Release lock acquired via binder_proc_lock() 232 */ 233 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__) 234 static void 235 _binder_proc_unlock(struct binder_proc *proc, int line) 236 __releases(&proc->outer_lock) 237 { 238 binder_debug(BINDER_DEBUG_SPINLOCKS, 239 "%s: line=%d\n", __func__, line); 240 spin_unlock(&proc->outer_lock); 241 } 242 243 /** 244 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc 245 * @proc: struct binder_proc to acquire 246 * 247 * Acquires proc->inner_lock. Used to protect todo lists 248 */ 249 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__) 250 static void 251 _binder_inner_proc_lock(struct binder_proc *proc, int line) 252 __acquires(&proc->inner_lock) 253 { 254 binder_debug(BINDER_DEBUG_SPINLOCKS, 255 "%s: line=%d\n", __func__, line); 256 spin_lock(&proc->inner_lock); 257 } 258 259 /** 260 * binder_inner_proc_unlock() - Release inner lock for given binder_proc 261 * @proc: struct binder_proc to acquire 262 * 263 * Release lock acquired via binder_inner_proc_lock() 264 */ 265 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__) 266 static void 267 _binder_inner_proc_unlock(struct binder_proc *proc, int line) 268 __releases(&proc->inner_lock) 269 { 270 binder_debug(BINDER_DEBUG_SPINLOCKS, 271 "%s: line=%d\n", __func__, line); 272 spin_unlock(&proc->inner_lock); 273 } 274 275 /** 276 * binder_node_lock() - Acquire spinlock for given binder_node 277 * @node: struct binder_node to acquire 278 * 279 * Acquires node->lock. Used to protect binder_node fields 280 */ 281 #define binder_node_lock(node) _binder_node_lock(node, __LINE__) 282 static void 283 _binder_node_lock(struct binder_node *node, int line) 284 __acquires(&node->lock) 285 { 286 binder_debug(BINDER_DEBUG_SPINLOCKS, 287 "%s: line=%d\n", __func__, line); 288 spin_lock(&node->lock); 289 } 290 291 /** 292 * binder_node_unlock() - Release spinlock for given binder_proc 293 * @node: struct binder_node to acquire 294 * 295 * Release lock acquired via binder_node_lock() 296 */ 297 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__) 298 static void 299 _binder_node_unlock(struct binder_node *node, int line) 300 __releases(&node->lock) 301 { 302 binder_debug(BINDER_DEBUG_SPINLOCKS, 303 "%s: line=%d\n", __func__, line); 304 spin_unlock(&node->lock); 305 } 306 307 /** 308 * binder_node_inner_lock() - Acquire node and inner locks 309 * @node: struct binder_node to acquire 310 * 311 * Acquires node->lock. If node->proc also acquires 312 * proc->inner_lock. Used to protect binder_node fields 313 */ 314 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__) 315 static void 316 _binder_node_inner_lock(struct binder_node *node, int line) 317 __acquires(&node->lock) __acquires(&node->proc->inner_lock) 318 { 319 binder_debug(BINDER_DEBUG_SPINLOCKS, 320 "%s: line=%d\n", __func__, line); 321 spin_lock(&node->lock); 322 if (node->proc) 323 binder_inner_proc_lock(node->proc); 324 else 325 /* annotation for sparse */ 326 __acquire(&node->proc->inner_lock); 327 } 328 329 /** 330 * binder_node_unlock() - Release node and inner locks 331 * @node: struct binder_node to acquire 332 * 333 * Release lock acquired via binder_node_lock() 334 */ 335 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__) 336 static void 337 _binder_node_inner_unlock(struct binder_node *node, int line) 338 __releases(&node->lock) __releases(&node->proc->inner_lock) 339 { 340 struct binder_proc *proc = node->proc; 341 342 binder_debug(BINDER_DEBUG_SPINLOCKS, 343 "%s: line=%d\n", __func__, line); 344 if (proc) 345 binder_inner_proc_unlock(proc); 346 else 347 /* annotation for sparse */ 348 __release(&node->proc->inner_lock); 349 spin_unlock(&node->lock); 350 } 351 352 static bool binder_worklist_empty_ilocked(struct list_head *list) 353 { 354 return list_empty(list); 355 } 356 357 /** 358 * binder_worklist_empty() - Check if no items on the work list 359 * @proc: binder_proc associated with list 360 * @list: list to check 361 * 362 * Return: true if there are no items on list, else false 363 */ 364 static bool binder_worklist_empty(struct binder_proc *proc, 365 struct list_head *list) 366 { 367 bool ret; 368 369 binder_inner_proc_lock(proc); 370 ret = binder_worklist_empty_ilocked(list); 371 binder_inner_proc_unlock(proc); 372 return ret; 373 } 374 375 /** 376 * binder_enqueue_work_ilocked() - Add an item to the work list 377 * @work: struct binder_work to add to list 378 * @target_list: list to add work to 379 * 380 * Adds the work to the specified list. Asserts that work 381 * is not already on a list. 382 * 383 * Requires the proc->inner_lock to be held. 384 */ 385 static void 386 binder_enqueue_work_ilocked(struct binder_work *work, 387 struct list_head *target_list) 388 { 389 BUG_ON(target_list == NULL); 390 BUG_ON(work->entry.next && !list_empty(&work->entry)); 391 list_add_tail(&work->entry, target_list); 392 } 393 394 /** 395 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work 396 * @thread: thread to queue work to 397 * @work: struct binder_work to add to list 398 * 399 * Adds the work to the todo list of the thread. Doesn't set the process_todo 400 * flag, which means that (if it wasn't already set) the thread will go to 401 * sleep without handling this work when it calls read. 402 * 403 * Requires the proc->inner_lock to be held. 404 */ 405 static void 406 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread, 407 struct binder_work *work) 408 { 409 WARN_ON(!list_empty(&thread->waiting_thread_node)); 410 binder_enqueue_work_ilocked(work, &thread->todo); 411 } 412 413 /** 414 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list 415 * @thread: thread to queue work to 416 * @work: struct binder_work to add to list 417 * 418 * Adds the work to the todo list of the thread, and enables processing 419 * of the todo queue. 420 * 421 * Requires the proc->inner_lock to be held. 422 */ 423 static void 424 binder_enqueue_thread_work_ilocked(struct binder_thread *thread, 425 struct binder_work *work) 426 { 427 WARN_ON(!list_empty(&thread->waiting_thread_node)); 428 binder_enqueue_work_ilocked(work, &thread->todo); 429 thread->process_todo = true; 430 } 431 432 /** 433 * binder_enqueue_thread_work() - Add an item to the thread work list 434 * @thread: thread to queue work to 435 * @work: struct binder_work to add to list 436 * 437 * Adds the work to the todo list of the thread, and enables processing 438 * of the todo queue. 439 */ 440 static void 441 binder_enqueue_thread_work(struct binder_thread *thread, 442 struct binder_work *work) 443 { 444 binder_inner_proc_lock(thread->proc); 445 binder_enqueue_thread_work_ilocked(thread, work); 446 binder_inner_proc_unlock(thread->proc); 447 } 448 449 static void 450 binder_dequeue_work_ilocked(struct binder_work *work) 451 { 452 list_del_init(&work->entry); 453 } 454 455 /** 456 * binder_dequeue_work() - Removes an item from the work list 457 * @proc: binder_proc associated with list 458 * @work: struct binder_work to remove from list 459 * 460 * Removes the specified work item from whatever list it is on. 461 * Can safely be called if work is not on any list. 462 */ 463 static void 464 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work) 465 { 466 binder_inner_proc_lock(proc); 467 binder_dequeue_work_ilocked(work); 468 binder_inner_proc_unlock(proc); 469 } 470 471 static struct binder_work *binder_dequeue_work_head_ilocked( 472 struct list_head *list) 473 { 474 struct binder_work *w; 475 476 w = list_first_entry_or_null(list, struct binder_work, entry); 477 if (w) 478 list_del_init(&w->entry); 479 return w; 480 } 481 482 static void 483 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); 484 static void binder_free_thread(struct binder_thread *thread); 485 static void binder_free_proc(struct binder_proc *proc); 486 static void binder_inc_node_tmpref_ilocked(struct binder_node *node); 487 488 static bool binder_has_work_ilocked(struct binder_thread *thread, 489 bool do_proc_work) 490 { 491 return thread->process_todo || 492 thread->looper_need_return || 493 (do_proc_work && 494 !binder_worklist_empty_ilocked(&thread->proc->todo)); 495 } 496 497 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work) 498 { 499 bool has_work; 500 501 binder_inner_proc_lock(thread->proc); 502 has_work = binder_has_work_ilocked(thread, do_proc_work); 503 binder_inner_proc_unlock(thread->proc); 504 505 return has_work; 506 } 507 508 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread) 509 { 510 return !thread->transaction_stack && 511 binder_worklist_empty_ilocked(&thread->todo) && 512 (thread->looper & (BINDER_LOOPER_STATE_ENTERED | 513 BINDER_LOOPER_STATE_REGISTERED)); 514 } 515 516 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc, 517 bool sync) 518 { 519 struct rb_node *n; 520 struct binder_thread *thread; 521 522 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { 523 thread = rb_entry(n, struct binder_thread, rb_node); 524 if (thread->looper & BINDER_LOOPER_STATE_POLL && 525 binder_available_for_proc_work_ilocked(thread)) { 526 if (sync) 527 wake_up_interruptible_sync(&thread->wait); 528 else 529 wake_up_interruptible(&thread->wait); 530 } 531 } 532 } 533 534 /** 535 * binder_select_thread_ilocked() - selects a thread for doing proc work. 536 * @proc: process to select a thread from 537 * 538 * Note that calling this function moves the thread off the waiting_threads 539 * list, so it can only be woken up by the caller of this function, or a 540 * signal. Therefore, callers *should* always wake up the thread this function 541 * returns. 542 * 543 * Return: If there's a thread currently waiting for process work, 544 * returns that thread. Otherwise returns NULL. 545 */ 546 static struct binder_thread * 547 binder_select_thread_ilocked(struct binder_proc *proc) 548 { 549 struct binder_thread *thread; 550 551 assert_spin_locked(&proc->inner_lock); 552 thread = list_first_entry_or_null(&proc->waiting_threads, 553 struct binder_thread, 554 waiting_thread_node); 555 556 if (thread) 557 list_del_init(&thread->waiting_thread_node); 558 559 return thread; 560 } 561 562 /** 563 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work. 564 * @proc: process to wake up a thread in 565 * @thread: specific thread to wake-up (may be NULL) 566 * @sync: whether to do a synchronous wake-up 567 * 568 * This function wakes up a thread in the @proc process. 569 * The caller may provide a specific thread to wake-up in 570 * the @thread parameter. If @thread is NULL, this function 571 * will wake up threads that have called poll(). 572 * 573 * Note that for this function to work as expected, callers 574 * should first call binder_select_thread() to find a thread 575 * to handle the work (if they don't have a thread already), 576 * and pass the result into the @thread parameter. 577 */ 578 static void binder_wakeup_thread_ilocked(struct binder_proc *proc, 579 struct binder_thread *thread, 580 bool sync) 581 { 582 assert_spin_locked(&proc->inner_lock); 583 584 if (thread) { 585 if (sync) 586 wake_up_interruptible_sync(&thread->wait); 587 else 588 wake_up_interruptible(&thread->wait); 589 return; 590 } 591 592 /* Didn't find a thread waiting for proc work; this can happen 593 * in two scenarios: 594 * 1. All threads are busy handling transactions 595 * In that case, one of those threads should call back into 596 * the kernel driver soon and pick up this work. 597 * 2. Threads are using the (e)poll interface, in which case 598 * they may be blocked on the waitqueue without having been 599 * added to waiting_threads. For this case, we just iterate 600 * over all threads not handling transaction work, and 601 * wake them all up. We wake all because we don't know whether 602 * a thread that called into (e)poll is handling non-binder 603 * work currently. 604 */ 605 binder_wakeup_poll_threads_ilocked(proc, sync); 606 } 607 608 static void binder_wakeup_proc_ilocked(struct binder_proc *proc) 609 { 610 struct binder_thread *thread = binder_select_thread_ilocked(proc); 611 612 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false); 613 } 614 615 static void binder_set_nice(long nice) 616 { 617 long min_nice; 618 619 if (can_nice(current, nice)) { 620 set_user_nice(current, nice); 621 return; 622 } 623 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE)); 624 binder_debug(BINDER_DEBUG_PRIORITY_CAP, 625 "%d: nice value %ld not allowed use %ld instead\n", 626 current->pid, nice, min_nice); 627 set_user_nice(current, min_nice); 628 if (min_nice <= MAX_NICE) 629 return; 630 binder_user_error("%d RLIMIT_NICE not set\n", current->pid); 631 } 632 633 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc, 634 binder_uintptr_t ptr) 635 { 636 struct rb_node *n = proc->nodes.rb_node; 637 struct binder_node *node; 638 639 assert_spin_locked(&proc->inner_lock); 640 641 while (n) { 642 node = rb_entry(n, struct binder_node, rb_node); 643 644 if (ptr < node->ptr) 645 n = n->rb_left; 646 else if (ptr > node->ptr) 647 n = n->rb_right; 648 else { 649 /* 650 * take an implicit weak reference 651 * to ensure node stays alive until 652 * call to binder_put_node() 653 */ 654 binder_inc_node_tmpref_ilocked(node); 655 return node; 656 } 657 } 658 return NULL; 659 } 660 661 static struct binder_node *binder_get_node(struct binder_proc *proc, 662 binder_uintptr_t ptr) 663 { 664 struct binder_node *node; 665 666 binder_inner_proc_lock(proc); 667 node = binder_get_node_ilocked(proc, ptr); 668 binder_inner_proc_unlock(proc); 669 return node; 670 } 671 672 static struct binder_node *binder_init_node_ilocked( 673 struct binder_proc *proc, 674 struct binder_node *new_node, 675 struct flat_binder_object *fp) 676 { 677 struct rb_node **p = &proc->nodes.rb_node; 678 struct rb_node *parent = NULL; 679 struct binder_node *node; 680 binder_uintptr_t ptr = fp ? fp->binder : 0; 681 binder_uintptr_t cookie = fp ? fp->cookie : 0; 682 __u32 flags = fp ? fp->flags : 0; 683 684 assert_spin_locked(&proc->inner_lock); 685 686 while (*p) { 687 688 parent = *p; 689 node = rb_entry(parent, struct binder_node, rb_node); 690 691 if (ptr < node->ptr) 692 p = &(*p)->rb_left; 693 else if (ptr > node->ptr) 694 p = &(*p)->rb_right; 695 else { 696 /* 697 * A matching node is already in 698 * the rb tree. Abandon the init 699 * and return it. 700 */ 701 binder_inc_node_tmpref_ilocked(node); 702 return node; 703 } 704 } 705 node = new_node; 706 binder_stats_created(BINDER_STAT_NODE); 707 node->tmp_refs++; 708 rb_link_node(&node->rb_node, parent, p); 709 rb_insert_color(&node->rb_node, &proc->nodes); 710 node->debug_id = atomic_inc_return(&binder_last_id); 711 node->proc = proc; 712 node->ptr = ptr; 713 node->cookie = cookie; 714 node->work.type = BINDER_WORK_NODE; 715 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK; 716 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); 717 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX); 718 spin_lock_init(&node->lock); 719 INIT_LIST_HEAD(&node->work.entry); 720 INIT_LIST_HEAD(&node->async_todo); 721 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 722 "%d:%d node %d u%016llx c%016llx created\n", 723 proc->pid, current->pid, node->debug_id, 724 (u64)node->ptr, (u64)node->cookie); 725 726 return node; 727 } 728 729 static struct binder_node *binder_new_node(struct binder_proc *proc, 730 struct flat_binder_object *fp) 731 { 732 struct binder_node *node; 733 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL); 734 735 if (!new_node) 736 return NULL; 737 binder_inner_proc_lock(proc); 738 node = binder_init_node_ilocked(proc, new_node, fp); 739 binder_inner_proc_unlock(proc); 740 if (node != new_node) 741 /* 742 * The node was already added by another thread 743 */ 744 kfree(new_node); 745 746 return node; 747 } 748 749 static void binder_free_node(struct binder_node *node) 750 { 751 kfree(node); 752 binder_stats_deleted(BINDER_STAT_NODE); 753 } 754 755 static int binder_inc_node_nilocked(struct binder_node *node, int strong, 756 int internal, 757 struct list_head *target_list) 758 { 759 struct binder_proc *proc = node->proc; 760 761 assert_spin_locked(&node->lock); 762 if (proc) 763 assert_spin_locked(&proc->inner_lock); 764 if (strong) { 765 if (internal) { 766 if (target_list == NULL && 767 node->internal_strong_refs == 0 && 768 !(node->proc && 769 node == node->proc->context->binder_context_mgr_node && 770 node->has_strong_ref)) { 771 pr_err("invalid inc strong node for %d\n", 772 node->debug_id); 773 return -EINVAL; 774 } 775 node->internal_strong_refs++; 776 } else 777 node->local_strong_refs++; 778 if (!node->has_strong_ref && target_list) { 779 struct binder_thread *thread = container_of(target_list, 780 struct binder_thread, todo); 781 binder_dequeue_work_ilocked(&node->work); 782 BUG_ON(&thread->todo != target_list); 783 binder_enqueue_deferred_thread_work_ilocked(thread, 784 &node->work); 785 } 786 } else { 787 if (!internal) 788 node->local_weak_refs++; 789 if (!node->has_weak_ref && list_empty(&node->work.entry)) { 790 if (target_list == NULL) { 791 pr_err("invalid inc weak node for %d\n", 792 node->debug_id); 793 return -EINVAL; 794 } 795 /* 796 * See comment above 797 */ 798 binder_enqueue_work_ilocked(&node->work, target_list); 799 } 800 } 801 return 0; 802 } 803 804 static int binder_inc_node(struct binder_node *node, int strong, int internal, 805 struct list_head *target_list) 806 { 807 int ret; 808 809 binder_node_inner_lock(node); 810 ret = binder_inc_node_nilocked(node, strong, internal, target_list); 811 binder_node_inner_unlock(node); 812 813 return ret; 814 } 815 816 static bool binder_dec_node_nilocked(struct binder_node *node, 817 int strong, int internal) 818 { 819 struct binder_proc *proc = node->proc; 820 821 assert_spin_locked(&node->lock); 822 if (proc) 823 assert_spin_locked(&proc->inner_lock); 824 if (strong) { 825 if (internal) 826 node->internal_strong_refs--; 827 else 828 node->local_strong_refs--; 829 if (node->local_strong_refs || node->internal_strong_refs) 830 return false; 831 } else { 832 if (!internal) 833 node->local_weak_refs--; 834 if (node->local_weak_refs || node->tmp_refs || 835 !hlist_empty(&node->refs)) 836 return false; 837 } 838 839 if (proc && (node->has_strong_ref || node->has_weak_ref)) { 840 if (list_empty(&node->work.entry)) { 841 binder_enqueue_work_ilocked(&node->work, &proc->todo); 842 binder_wakeup_proc_ilocked(proc); 843 } 844 } else { 845 if (hlist_empty(&node->refs) && !node->local_strong_refs && 846 !node->local_weak_refs && !node->tmp_refs) { 847 if (proc) { 848 binder_dequeue_work_ilocked(&node->work); 849 rb_erase(&node->rb_node, &proc->nodes); 850 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 851 "refless node %d deleted\n", 852 node->debug_id); 853 } else { 854 BUG_ON(!list_empty(&node->work.entry)); 855 spin_lock(&binder_dead_nodes_lock); 856 /* 857 * tmp_refs could have changed so 858 * check it again 859 */ 860 if (node->tmp_refs) { 861 spin_unlock(&binder_dead_nodes_lock); 862 return false; 863 } 864 hlist_del(&node->dead_node); 865 spin_unlock(&binder_dead_nodes_lock); 866 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 867 "dead node %d deleted\n", 868 node->debug_id); 869 } 870 return true; 871 } 872 } 873 return false; 874 } 875 876 static void binder_dec_node(struct binder_node *node, int strong, int internal) 877 { 878 bool free_node; 879 880 binder_node_inner_lock(node); 881 free_node = binder_dec_node_nilocked(node, strong, internal); 882 binder_node_inner_unlock(node); 883 if (free_node) 884 binder_free_node(node); 885 } 886 887 static void binder_inc_node_tmpref_ilocked(struct binder_node *node) 888 { 889 /* 890 * No call to binder_inc_node() is needed since we 891 * don't need to inform userspace of any changes to 892 * tmp_refs 893 */ 894 node->tmp_refs++; 895 } 896 897 /** 898 * binder_inc_node_tmpref() - take a temporary reference on node 899 * @node: node to reference 900 * 901 * Take reference on node to prevent the node from being freed 902 * while referenced only by a local variable. The inner lock is 903 * needed to serialize with the node work on the queue (which 904 * isn't needed after the node is dead). If the node is dead 905 * (node->proc is NULL), use binder_dead_nodes_lock to protect 906 * node->tmp_refs against dead-node-only cases where the node 907 * lock cannot be acquired (eg traversing the dead node list to 908 * print nodes) 909 */ 910 static void binder_inc_node_tmpref(struct binder_node *node) 911 { 912 binder_node_lock(node); 913 if (node->proc) 914 binder_inner_proc_lock(node->proc); 915 else 916 spin_lock(&binder_dead_nodes_lock); 917 binder_inc_node_tmpref_ilocked(node); 918 if (node->proc) 919 binder_inner_proc_unlock(node->proc); 920 else 921 spin_unlock(&binder_dead_nodes_lock); 922 binder_node_unlock(node); 923 } 924 925 /** 926 * binder_dec_node_tmpref() - remove a temporary reference on node 927 * @node: node to reference 928 * 929 * Release temporary reference on node taken via binder_inc_node_tmpref() 930 */ 931 static void binder_dec_node_tmpref(struct binder_node *node) 932 { 933 bool free_node; 934 935 binder_node_inner_lock(node); 936 if (!node->proc) 937 spin_lock(&binder_dead_nodes_lock); 938 else 939 __acquire(&binder_dead_nodes_lock); 940 node->tmp_refs--; 941 BUG_ON(node->tmp_refs < 0); 942 if (!node->proc) 943 spin_unlock(&binder_dead_nodes_lock); 944 else 945 __release(&binder_dead_nodes_lock); 946 /* 947 * Call binder_dec_node() to check if all refcounts are 0 948 * and cleanup is needed. Calling with strong=0 and internal=1 949 * causes no actual reference to be released in binder_dec_node(). 950 * If that changes, a change is needed here too. 951 */ 952 free_node = binder_dec_node_nilocked(node, 0, 1); 953 binder_node_inner_unlock(node); 954 if (free_node) 955 binder_free_node(node); 956 } 957 958 static void binder_put_node(struct binder_node *node) 959 { 960 binder_dec_node_tmpref(node); 961 } 962 963 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc, 964 u32 desc, bool need_strong_ref) 965 { 966 struct rb_node *n = proc->refs_by_desc.rb_node; 967 struct binder_ref *ref; 968 969 while (n) { 970 ref = rb_entry(n, struct binder_ref, rb_node_desc); 971 972 if (desc < ref->data.desc) { 973 n = n->rb_left; 974 } else if (desc > ref->data.desc) { 975 n = n->rb_right; 976 } else if (need_strong_ref && !ref->data.strong) { 977 binder_user_error("tried to use weak ref as strong ref\n"); 978 return NULL; 979 } else { 980 return ref; 981 } 982 } 983 return NULL; 984 } 985 986 /** 987 * binder_get_ref_for_node_olocked() - get the ref associated with given node 988 * @proc: binder_proc that owns the ref 989 * @node: binder_node of target 990 * @new_ref: newly allocated binder_ref to be initialized or %NULL 991 * 992 * Look up the ref for the given node and return it if it exists 993 * 994 * If it doesn't exist and the caller provides a newly allocated 995 * ref, initialize the fields of the newly allocated ref and insert 996 * into the given proc rb_trees and node refs list. 997 * 998 * Return: the ref for node. It is possible that another thread 999 * allocated/initialized the ref first in which case the 1000 * returned ref would be different than the passed-in 1001 * new_ref. new_ref must be kfree'd by the caller in 1002 * this case. 1003 */ 1004 static struct binder_ref *binder_get_ref_for_node_olocked( 1005 struct binder_proc *proc, 1006 struct binder_node *node, 1007 struct binder_ref *new_ref) 1008 { 1009 struct binder_context *context = proc->context; 1010 struct rb_node **p = &proc->refs_by_node.rb_node; 1011 struct rb_node *parent = NULL; 1012 struct binder_ref *ref; 1013 struct rb_node *n; 1014 1015 while (*p) { 1016 parent = *p; 1017 ref = rb_entry(parent, struct binder_ref, rb_node_node); 1018 1019 if (node < ref->node) 1020 p = &(*p)->rb_left; 1021 else if (node > ref->node) 1022 p = &(*p)->rb_right; 1023 else 1024 return ref; 1025 } 1026 if (!new_ref) 1027 return NULL; 1028 1029 binder_stats_created(BINDER_STAT_REF); 1030 new_ref->data.debug_id = atomic_inc_return(&binder_last_id); 1031 new_ref->proc = proc; 1032 new_ref->node = node; 1033 rb_link_node(&new_ref->rb_node_node, parent, p); 1034 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); 1035 1036 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1; 1037 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 1038 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1039 if (ref->data.desc > new_ref->data.desc) 1040 break; 1041 new_ref->data.desc = ref->data.desc + 1; 1042 } 1043 1044 p = &proc->refs_by_desc.rb_node; 1045 while (*p) { 1046 parent = *p; 1047 ref = rb_entry(parent, struct binder_ref, rb_node_desc); 1048 1049 if (new_ref->data.desc < ref->data.desc) 1050 p = &(*p)->rb_left; 1051 else if (new_ref->data.desc > ref->data.desc) 1052 p = &(*p)->rb_right; 1053 else 1054 BUG(); 1055 } 1056 rb_link_node(&new_ref->rb_node_desc, parent, p); 1057 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc); 1058 1059 binder_node_lock(node); 1060 hlist_add_head(&new_ref->node_entry, &node->refs); 1061 1062 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1063 "%d new ref %d desc %d for node %d\n", 1064 proc->pid, new_ref->data.debug_id, new_ref->data.desc, 1065 node->debug_id); 1066 binder_node_unlock(node); 1067 return new_ref; 1068 } 1069 1070 static void binder_cleanup_ref_olocked(struct binder_ref *ref) 1071 { 1072 bool delete_node = false; 1073 1074 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1075 "%d delete ref %d desc %d for node %d\n", 1076 ref->proc->pid, ref->data.debug_id, ref->data.desc, 1077 ref->node->debug_id); 1078 1079 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); 1080 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); 1081 1082 binder_node_inner_lock(ref->node); 1083 if (ref->data.strong) 1084 binder_dec_node_nilocked(ref->node, 1, 1); 1085 1086 hlist_del(&ref->node_entry); 1087 delete_node = binder_dec_node_nilocked(ref->node, 0, 1); 1088 binder_node_inner_unlock(ref->node); 1089 /* 1090 * Clear ref->node unless we want the caller to free the node 1091 */ 1092 if (!delete_node) { 1093 /* 1094 * The caller uses ref->node to determine 1095 * whether the node needs to be freed. Clear 1096 * it since the node is still alive. 1097 */ 1098 ref->node = NULL; 1099 } 1100 1101 if (ref->death) { 1102 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1103 "%d delete ref %d desc %d has death notification\n", 1104 ref->proc->pid, ref->data.debug_id, 1105 ref->data.desc); 1106 binder_dequeue_work(ref->proc, &ref->death->work); 1107 binder_stats_deleted(BINDER_STAT_DEATH); 1108 } 1109 binder_stats_deleted(BINDER_STAT_REF); 1110 } 1111 1112 /** 1113 * binder_inc_ref_olocked() - increment the ref for given handle 1114 * @ref: ref to be incremented 1115 * @strong: if true, strong increment, else weak 1116 * @target_list: list to queue node work on 1117 * 1118 * Increment the ref. @ref->proc->outer_lock must be held on entry 1119 * 1120 * Return: 0, if successful, else errno 1121 */ 1122 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong, 1123 struct list_head *target_list) 1124 { 1125 int ret; 1126 1127 if (strong) { 1128 if (ref->data.strong == 0) { 1129 ret = binder_inc_node(ref->node, 1, 1, target_list); 1130 if (ret) 1131 return ret; 1132 } 1133 ref->data.strong++; 1134 } else { 1135 if (ref->data.weak == 0) { 1136 ret = binder_inc_node(ref->node, 0, 1, target_list); 1137 if (ret) 1138 return ret; 1139 } 1140 ref->data.weak++; 1141 } 1142 return 0; 1143 } 1144 1145 /** 1146 * binder_dec_ref() - dec the ref for given handle 1147 * @ref: ref to be decremented 1148 * @strong: if true, strong decrement, else weak 1149 * 1150 * Decrement the ref. 1151 * 1152 * Return: true if ref is cleaned up and ready to be freed 1153 */ 1154 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong) 1155 { 1156 if (strong) { 1157 if (ref->data.strong == 0) { 1158 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n", 1159 ref->proc->pid, ref->data.debug_id, 1160 ref->data.desc, ref->data.strong, 1161 ref->data.weak); 1162 return false; 1163 } 1164 ref->data.strong--; 1165 if (ref->data.strong == 0) 1166 binder_dec_node(ref->node, strong, 1); 1167 } else { 1168 if (ref->data.weak == 0) { 1169 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n", 1170 ref->proc->pid, ref->data.debug_id, 1171 ref->data.desc, ref->data.strong, 1172 ref->data.weak); 1173 return false; 1174 } 1175 ref->data.weak--; 1176 } 1177 if (ref->data.strong == 0 && ref->data.weak == 0) { 1178 binder_cleanup_ref_olocked(ref); 1179 return true; 1180 } 1181 return false; 1182 } 1183 1184 /** 1185 * binder_get_node_from_ref() - get the node from the given proc/desc 1186 * @proc: proc containing the ref 1187 * @desc: the handle associated with the ref 1188 * @need_strong_ref: if true, only return node if ref is strong 1189 * @rdata: the id/refcount data for the ref 1190 * 1191 * Given a proc and ref handle, return the associated binder_node 1192 * 1193 * Return: a binder_node or NULL if not found or not strong when strong required 1194 */ 1195 static struct binder_node *binder_get_node_from_ref( 1196 struct binder_proc *proc, 1197 u32 desc, bool need_strong_ref, 1198 struct binder_ref_data *rdata) 1199 { 1200 struct binder_node *node; 1201 struct binder_ref *ref; 1202 1203 binder_proc_lock(proc); 1204 ref = binder_get_ref_olocked(proc, desc, need_strong_ref); 1205 if (!ref) 1206 goto err_no_ref; 1207 node = ref->node; 1208 /* 1209 * Take an implicit reference on the node to ensure 1210 * it stays alive until the call to binder_put_node() 1211 */ 1212 binder_inc_node_tmpref(node); 1213 if (rdata) 1214 *rdata = ref->data; 1215 binder_proc_unlock(proc); 1216 1217 return node; 1218 1219 err_no_ref: 1220 binder_proc_unlock(proc); 1221 return NULL; 1222 } 1223 1224 /** 1225 * binder_free_ref() - free the binder_ref 1226 * @ref: ref to free 1227 * 1228 * Free the binder_ref. Free the binder_node indicated by ref->node 1229 * (if non-NULL) and the binder_ref_death indicated by ref->death. 1230 */ 1231 static void binder_free_ref(struct binder_ref *ref) 1232 { 1233 if (ref->node) 1234 binder_free_node(ref->node); 1235 kfree(ref->death); 1236 kfree(ref); 1237 } 1238 1239 /** 1240 * binder_update_ref_for_handle() - inc/dec the ref for given handle 1241 * @proc: proc containing the ref 1242 * @desc: the handle associated with the ref 1243 * @increment: true=inc reference, false=dec reference 1244 * @strong: true=strong reference, false=weak reference 1245 * @rdata: the id/refcount data for the ref 1246 * 1247 * Given a proc and ref handle, increment or decrement the ref 1248 * according to "increment" arg. 1249 * 1250 * Return: 0 if successful, else errno 1251 */ 1252 static int binder_update_ref_for_handle(struct binder_proc *proc, 1253 uint32_t desc, bool increment, bool strong, 1254 struct binder_ref_data *rdata) 1255 { 1256 int ret = 0; 1257 struct binder_ref *ref; 1258 bool delete_ref = false; 1259 1260 binder_proc_lock(proc); 1261 ref = binder_get_ref_olocked(proc, desc, strong); 1262 if (!ref) { 1263 ret = -EINVAL; 1264 goto err_no_ref; 1265 } 1266 if (increment) 1267 ret = binder_inc_ref_olocked(ref, strong, NULL); 1268 else 1269 delete_ref = binder_dec_ref_olocked(ref, strong); 1270 1271 if (rdata) 1272 *rdata = ref->data; 1273 binder_proc_unlock(proc); 1274 1275 if (delete_ref) 1276 binder_free_ref(ref); 1277 return ret; 1278 1279 err_no_ref: 1280 binder_proc_unlock(proc); 1281 return ret; 1282 } 1283 1284 /** 1285 * binder_dec_ref_for_handle() - dec the ref for given handle 1286 * @proc: proc containing the ref 1287 * @desc: the handle associated with the ref 1288 * @strong: true=strong reference, false=weak reference 1289 * @rdata: the id/refcount data for the ref 1290 * 1291 * Just calls binder_update_ref_for_handle() to decrement the ref. 1292 * 1293 * Return: 0 if successful, else errno 1294 */ 1295 static int binder_dec_ref_for_handle(struct binder_proc *proc, 1296 uint32_t desc, bool strong, struct binder_ref_data *rdata) 1297 { 1298 return binder_update_ref_for_handle(proc, desc, false, strong, rdata); 1299 } 1300 1301 1302 /** 1303 * binder_inc_ref_for_node() - increment the ref for given proc/node 1304 * @proc: proc containing the ref 1305 * @node: target node 1306 * @strong: true=strong reference, false=weak reference 1307 * @target_list: worklist to use if node is incremented 1308 * @rdata: the id/refcount data for the ref 1309 * 1310 * Given a proc and node, increment the ref. Create the ref if it 1311 * doesn't already exist 1312 * 1313 * Return: 0 if successful, else errno 1314 */ 1315 static int binder_inc_ref_for_node(struct binder_proc *proc, 1316 struct binder_node *node, 1317 bool strong, 1318 struct list_head *target_list, 1319 struct binder_ref_data *rdata) 1320 { 1321 struct binder_ref *ref; 1322 struct binder_ref *new_ref = NULL; 1323 int ret = 0; 1324 1325 binder_proc_lock(proc); 1326 ref = binder_get_ref_for_node_olocked(proc, node, NULL); 1327 if (!ref) { 1328 binder_proc_unlock(proc); 1329 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); 1330 if (!new_ref) 1331 return -ENOMEM; 1332 binder_proc_lock(proc); 1333 ref = binder_get_ref_for_node_olocked(proc, node, new_ref); 1334 } 1335 ret = binder_inc_ref_olocked(ref, strong, target_list); 1336 *rdata = ref->data; 1337 binder_proc_unlock(proc); 1338 if (new_ref && ref != new_ref) 1339 /* 1340 * Another thread created the ref first so 1341 * free the one we allocated 1342 */ 1343 kfree(new_ref); 1344 return ret; 1345 } 1346 1347 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread, 1348 struct binder_transaction *t) 1349 { 1350 BUG_ON(!target_thread); 1351 assert_spin_locked(&target_thread->proc->inner_lock); 1352 BUG_ON(target_thread->transaction_stack != t); 1353 BUG_ON(target_thread->transaction_stack->from != target_thread); 1354 target_thread->transaction_stack = 1355 target_thread->transaction_stack->from_parent; 1356 t->from = NULL; 1357 } 1358 1359 /** 1360 * binder_thread_dec_tmpref() - decrement thread->tmp_ref 1361 * @thread: thread to decrement 1362 * 1363 * A thread needs to be kept alive while being used to create or 1364 * handle a transaction. binder_get_txn_from() is used to safely 1365 * extract t->from from a binder_transaction and keep the thread 1366 * indicated by t->from from being freed. When done with that 1367 * binder_thread, this function is called to decrement the 1368 * tmp_ref and free if appropriate (thread has been released 1369 * and no transaction being processed by the driver) 1370 */ 1371 static void binder_thread_dec_tmpref(struct binder_thread *thread) 1372 { 1373 /* 1374 * atomic is used to protect the counter value while 1375 * it cannot reach zero or thread->is_dead is false 1376 */ 1377 binder_inner_proc_lock(thread->proc); 1378 atomic_dec(&thread->tmp_ref); 1379 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) { 1380 binder_inner_proc_unlock(thread->proc); 1381 binder_free_thread(thread); 1382 return; 1383 } 1384 binder_inner_proc_unlock(thread->proc); 1385 } 1386 1387 /** 1388 * binder_proc_dec_tmpref() - decrement proc->tmp_ref 1389 * @proc: proc to decrement 1390 * 1391 * A binder_proc needs to be kept alive while being used to create or 1392 * handle a transaction. proc->tmp_ref is incremented when 1393 * creating a new transaction or the binder_proc is currently in-use 1394 * by threads that are being released. When done with the binder_proc, 1395 * this function is called to decrement the counter and free the 1396 * proc if appropriate (proc has been released, all threads have 1397 * been released and not currenly in-use to process a transaction). 1398 */ 1399 static void binder_proc_dec_tmpref(struct binder_proc *proc) 1400 { 1401 binder_inner_proc_lock(proc); 1402 proc->tmp_ref--; 1403 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) && 1404 !proc->tmp_ref) { 1405 binder_inner_proc_unlock(proc); 1406 binder_free_proc(proc); 1407 return; 1408 } 1409 binder_inner_proc_unlock(proc); 1410 } 1411 1412 /** 1413 * binder_get_txn_from() - safely extract the "from" thread in transaction 1414 * @t: binder transaction for t->from 1415 * 1416 * Atomically return the "from" thread and increment the tmp_ref 1417 * count for the thread to ensure it stays alive until 1418 * binder_thread_dec_tmpref() is called. 1419 * 1420 * Return: the value of t->from 1421 */ 1422 static struct binder_thread *binder_get_txn_from( 1423 struct binder_transaction *t) 1424 { 1425 struct binder_thread *from; 1426 1427 spin_lock(&t->lock); 1428 from = t->from; 1429 if (from) 1430 atomic_inc(&from->tmp_ref); 1431 spin_unlock(&t->lock); 1432 return from; 1433 } 1434 1435 /** 1436 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock 1437 * @t: binder transaction for t->from 1438 * 1439 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock 1440 * to guarantee that the thread cannot be released while operating on it. 1441 * The caller must call binder_inner_proc_unlock() to release the inner lock 1442 * as well as call binder_dec_thread_txn() to release the reference. 1443 * 1444 * Return: the value of t->from 1445 */ 1446 static struct binder_thread *binder_get_txn_from_and_acq_inner( 1447 struct binder_transaction *t) 1448 __acquires(&t->from->proc->inner_lock) 1449 { 1450 struct binder_thread *from; 1451 1452 from = binder_get_txn_from(t); 1453 if (!from) { 1454 __acquire(&from->proc->inner_lock); 1455 return NULL; 1456 } 1457 binder_inner_proc_lock(from->proc); 1458 if (t->from) { 1459 BUG_ON(from != t->from); 1460 return from; 1461 } 1462 binder_inner_proc_unlock(from->proc); 1463 __acquire(&from->proc->inner_lock); 1464 binder_thread_dec_tmpref(from); 1465 return NULL; 1466 } 1467 1468 /** 1469 * binder_free_txn_fixups() - free unprocessed fd fixups 1470 * @t: binder transaction for t->from 1471 * 1472 * If the transaction is being torn down prior to being 1473 * processed by the target process, free all of the 1474 * fd fixups and fput the file structs. It is safe to 1475 * call this function after the fixups have been 1476 * processed -- in that case, the list will be empty. 1477 */ 1478 static void binder_free_txn_fixups(struct binder_transaction *t) 1479 { 1480 struct binder_txn_fd_fixup *fixup, *tmp; 1481 1482 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) { 1483 fput(fixup->file); 1484 list_del(&fixup->fixup_entry); 1485 kfree(fixup); 1486 } 1487 } 1488 1489 static void binder_txn_latency_free(struct binder_transaction *t) 1490 { 1491 int from_proc, from_thread, to_proc, to_thread; 1492 1493 spin_lock(&t->lock); 1494 from_proc = t->from ? t->from->proc->pid : 0; 1495 from_thread = t->from ? t->from->pid : 0; 1496 to_proc = t->to_proc ? t->to_proc->pid : 0; 1497 to_thread = t->to_thread ? t->to_thread->pid : 0; 1498 spin_unlock(&t->lock); 1499 1500 trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread); 1501 } 1502 1503 static void binder_free_transaction(struct binder_transaction *t) 1504 { 1505 struct binder_proc *target_proc = t->to_proc; 1506 1507 if (target_proc) { 1508 binder_inner_proc_lock(target_proc); 1509 target_proc->outstanding_txns--; 1510 if (target_proc->outstanding_txns < 0) 1511 pr_warn("%s: Unexpected outstanding_txns %d\n", 1512 __func__, target_proc->outstanding_txns); 1513 if (!target_proc->outstanding_txns && target_proc->is_frozen) 1514 wake_up_interruptible_all(&target_proc->freeze_wait); 1515 if (t->buffer) 1516 t->buffer->transaction = NULL; 1517 binder_inner_proc_unlock(target_proc); 1518 } 1519 if (trace_binder_txn_latency_free_enabled()) 1520 binder_txn_latency_free(t); 1521 /* 1522 * If the transaction has no target_proc, then 1523 * t->buffer->transaction has already been cleared. 1524 */ 1525 binder_free_txn_fixups(t); 1526 kfree(t); 1527 binder_stats_deleted(BINDER_STAT_TRANSACTION); 1528 } 1529 1530 static void binder_send_failed_reply(struct binder_transaction *t, 1531 uint32_t error_code) 1532 { 1533 struct binder_thread *target_thread; 1534 struct binder_transaction *next; 1535 1536 BUG_ON(t->flags & TF_ONE_WAY); 1537 while (1) { 1538 target_thread = binder_get_txn_from_and_acq_inner(t); 1539 if (target_thread) { 1540 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1541 "send failed reply for transaction %d to %d:%d\n", 1542 t->debug_id, 1543 target_thread->proc->pid, 1544 target_thread->pid); 1545 1546 binder_pop_transaction_ilocked(target_thread, t); 1547 if (target_thread->reply_error.cmd == BR_OK) { 1548 target_thread->reply_error.cmd = error_code; 1549 binder_enqueue_thread_work_ilocked( 1550 target_thread, 1551 &target_thread->reply_error.work); 1552 wake_up_interruptible(&target_thread->wait); 1553 } else { 1554 /* 1555 * Cannot get here for normal operation, but 1556 * we can if multiple synchronous transactions 1557 * are sent without blocking for responses. 1558 * Just ignore the 2nd error in this case. 1559 */ 1560 pr_warn("Unexpected reply error: %u\n", 1561 target_thread->reply_error.cmd); 1562 } 1563 binder_inner_proc_unlock(target_thread->proc); 1564 binder_thread_dec_tmpref(target_thread); 1565 binder_free_transaction(t); 1566 return; 1567 } 1568 __release(&target_thread->proc->inner_lock); 1569 next = t->from_parent; 1570 1571 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1572 "send failed reply for transaction %d, target dead\n", 1573 t->debug_id); 1574 1575 binder_free_transaction(t); 1576 if (next == NULL) { 1577 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1578 "reply failed, no target thread at root\n"); 1579 return; 1580 } 1581 t = next; 1582 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1583 "reply failed, no target thread -- retry %d\n", 1584 t->debug_id); 1585 } 1586 } 1587 1588 /** 1589 * binder_cleanup_transaction() - cleans up undelivered transaction 1590 * @t: transaction that needs to be cleaned up 1591 * @reason: reason the transaction wasn't delivered 1592 * @error_code: error to return to caller (if synchronous call) 1593 */ 1594 static void binder_cleanup_transaction(struct binder_transaction *t, 1595 const char *reason, 1596 uint32_t error_code) 1597 { 1598 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) { 1599 binder_send_failed_reply(t, error_code); 1600 } else { 1601 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 1602 "undelivered transaction %d, %s\n", 1603 t->debug_id, reason); 1604 binder_free_transaction(t); 1605 } 1606 } 1607 1608 /** 1609 * binder_get_object() - gets object and checks for valid metadata 1610 * @proc: binder_proc owning the buffer 1611 * @buffer: binder_buffer that we're parsing. 1612 * @offset: offset in the @buffer at which to validate an object. 1613 * @object: struct binder_object to read into 1614 * 1615 * Return: If there's a valid metadata object at @offset in @buffer, the 1616 * size of that object. Otherwise, it returns zero. The object 1617 * is read into the struct binder_object pointed to by @object. 1618 */ 1619 static size_t binder_get_object(struct binder_proc *proc, 1620 struct binder_buffer *buffer, 1621 unsigned long offset, 1622 struct binder_object *object) 1623 { 1624 size_t read_size; 1625 struct binder_object_header *hdr; 1626 size_t object_size = 0; 1627 1628 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset); 1629 if (offset > buffer->data_size || read_size < sizeof(*hdr) || 1630 binder_alloc_copy_from_buffer(&proc->alloc, object, buffer, 1631 offset, read_size)) 1632 return 0; 1633 1634 /* Ok, now see if we read a complete object. */ 1635 hdr = &object->hdr; 1636 switch (hdr->type) { 1637 case BINDER_TYPE_BINDER: 1638 case BINDER_TYPE_WEAK_BINDER: 1639 case BINDER_TYPE_HANDLE: 1640 case BINDER_TYPE_WEAK_HANDLE: 1641 object_size = sizeof(struct flat_binder_object); 1642 break; 1643 case BINDER_TYPE_FD: 1644 object_size = sizeof(struct binder_fd_object); 1645 break; 1646 case BINDER_TYPE_PTR: 1647 object_size = sizeof(struct binder_buffer_object); 1648 break; 1649 case BINDER_TYPE_FDA: 1650 object_size = sizeof(struct binder_fd_array_object); 1651 break; 1652 default: 1653 return 0; 1654 } 1655 if (offset <= buffer->data_size - object_size && 1656 buffer->data_size >= object_size) 1657 return object_size; 1658 else 1659 return 0; 1660 } 1661 1662 /** 1663 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer. 1664 * @proc: binder_proc owning the buffer 1665 * @b: binder_buffer containing the object 1666 * @object: struct binder_object to read into 1667 * @index: index in offset array at which the binder_buffer_object is 1668 * located 1669 * @start_offset: points to the start of the offset array 1670 * @object_offsetp: offset of @object read from @b 1671 * @num_valid: the number of valid offsets in the offset array 1672 * 1673 * Return: If @index is within the valid range of the offset array 1674 * described by @start and @num_valid, and if there's a valid 1675 * binder_buffer_object at the offset found in index @index 1676 * of the offset array, that object is returned. Otherwise, 1677 * %NULL is returned. 1678 * Note that the offset found in index @index itself is not 1679 * verified; this function assumes that @num_valid elements 1680 * from @start were previously verified to have valid offsets. 1681 * If @object_offsetp is non-NULL, then the offset within 1682 * @b is written to it. 1683 */ 1684 static struct binder_buffer_object *binder_validate_ptr( 1685 struct binder_proc *proc, 1686 struct binder_buffer *b, 1687 struct binder_object *object, 1688 binder_size_t index, 1689 binder_size_t start_offset, 1690 binder_size_t *object_offsetp, 1691 binder_size_t num_valid) 1692 { 1693 size_t object_size; 1694 binder_size_t object_offset; 1695 unsigned long buffer_offset; 1696 1697 if (index >= num_valid) 1698 return NULL; 1699 1700 buffer_offset = start_offset + sizeof(binder_size_t) * index; 1701 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset, 1702 b, buffer_offset, 1703 sizeof(object_offset))) 1704 return NULL; 1705 object_size = binder_get_object(proc, b, object_offset, object); 1706 if (!object_size || object->hdr.type != BINDER_TYPE_PTR) 1707 return NULL; 1708 if (object_offsetp) 1709 *object_offsetp = object_offset; 1710 1711 return &object->bbo; 1712 } 1713 1714 /** 1715 * binder_validate_fixup() - validates pointer/fd fixups happen in order. 1716 * @proc: binder_proc owning the buffer 1717 * @b: transaction buffer 1718 * @objects_start_offset: offset to start of objects buffer 1719 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up 1720 * @fixup_offset: start offset in @buffer to fix up 1721 * @last_obj_offset: offset to last binder_buffer_object that we fixed 1722 * @last_min_offset: minimum fixup offset in object at @last_obj_offset 1723 * 1724 * Return: %true if a fixup in buffer @buffer at offset @offset is 1725 * allowed. 1726 * 1727 * For safety reasons, we only allow fixups inside a buffer to happen 1728 * at increasing offsets; additionally, we only allow fixup on the last 1729 * buffer object that was verified, or one of its parents. 1730 * 1731 * Example of what is allowed: 1732 * 1733 * A 1734 * B (parent = A, offset = 0) 1735 * C (parent = A, offset = 16) 1736 * D (parent = C, offset = 0) 1737 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset) 1738 * 1739 * Examples of what is not allowed: 1740 * 1741 * Decreasing offsets within the same parent: 1742 * A 1743 * C (parent = A, offset = 16) 1744 * B (parent = A, offset = 0) // decreasing offset within A 1745 * 1746 * Referring to a parent that wasn't the last object or any of its parents: 1747 * A 1748 * B (parent = A, offset = 0) 1749 * C (parent = A, offset = 0) 1750 * C (parent = A, offset = 16) 1751 * D (parent = B, offset = 0) // B is not A or any of A's parents 1752 */ 1753 static bool binder_validate_fixup(struct binder_proc *proc, 1754 struct binder_buffer *b, 1755 binder_size_t objects_start_offset, 1756 binder_size_t buffer_obj_offset, 1757 binder_size_t fixup_offset, 1758 binder_size_t last_obj_offset, 1759 binder_size_t last_min_offset) 1760 { 1761 if (!last_obj_offset) { 1762 /* Nothing to fix up in */ 1763 return false; 1764 } 1765 1766 while (last_obj_offset != buffer_obj_offset) { 1767 unsigned long buffer_offset; 1768 struct binder_object last_object; 1769 struct binder_buffer_object *last_bbo; 1770 size_t object_size = binder_get_object(proc, b, last_obj_offset, 1771 &last_object); 1772 if (object_size != sizeof(*last_bbo)) 1773 return false; 1774 1775 last_bbo = &last_object.bbo; 1776 /* 1777 * Safe to retrieve the parent of last_obj, since it 1778 * was already previously verified by the driver. 1779 */ 1780 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0) 1781 return false; 1782 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t); 1783 buffer_offset = objects_start_offset + 1784 sizeof(binder_size_t) * last_bbo->parent; 1785 if (binder_alloc_copy_from_buffer(&proc->alloc, 1786 &last_obj_offset, 1787 b, buffer_offset, 1788 sizeof(last_obj_offset))) 1789 return false; 1790 } 1791 return (fixup_offset >= last_min_offset); 1792 } 1793 1794 /** 1795 * struct binder_task_work_cb - for deferred close 1796 * 1797 * @twork: callback_head for task work 1798 * @fd: fd to close 1799 * 1800 * Structure to pass task work to be handled after 1801 * returning from binder_ioctl() via task_work_add(). 1802 */ 1803 struct binder_task_work_cb { 1804 struct callback_head twork; 1805 struct file *file; 1806 }; 1807 1808 /** 1809 * binder_do_fd_close() - close list of file descriptors 1810 * @twork: callback head for task work 1811 * 1812 * It is not safe to call ksys_close() during the binder_ioctl() 1813 * function if there is a chance that binder's own file descriptor 1814 * might be closed. This is to meet the requirements for using 1815 * fdget() (see comments for __fget_light()). Therefore use 1816 * task_work_add() to schedule the close operation once we have 1817 * returned from binder_ioctl(). This function is a callback 1818 * for that mechanism and does the actual ksys_close() on the 1819 * given file descriptor. 1820 */ 1821 static void binder_do_fd_close(struct callback_head *twork) 1822 { 1823 struct binder_task_work_cb *twcb = container_of(twork, 1824 struct binder_task_work_cb, twork); 1825 1826 fput(twcb->file); 1827 kfree(twcb); 1828 } 1829 1830 /** 1831 * binder_deferred_fd_close() - schedule a close for the given file-descriptor 1832 * @fd: file-descriptor to close 1833 * 1834 * See comments in binder_do_fd_close(). This function is used to schedule 1835 * a file-descriptor to be closed after returning from binder_ioctl(). 1836 */ 1837 static void binder_deferred_fd_close(int fd) 1838 { 1839 struct binder_task_work_cb *twcb; 1840 1841 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL); 1842 if (!twcb) 1843 return; 1844 init_task_work(&twcb->twork, binder_do_fd_close); 1845 close_fd_get_file(fd, &twcb->file); 1846 if (twcb->file) { 1847 filp_close(twcb->file, current->files); 1848 task_work_add(current, &twcb->twork, TWA_RESUME); 1849 } else { 1850 kfree(twcb); 1851 } 1852 } 1853 1854 static void binder_transaction_buffer_release(struct binder_proc *proc, 1855 struct binder_thread *thread, 1856 struct binder_buffer *buffer, 1857 binder_size_t failed_at, 1858 bool is_failure) 1859 { 1860 int debug_id = buffer->debug_id; 1861 binder_size_t off_start_offset, buffer_offset, off_end_offset; 1862 1863 binder_debug(BINDER_DEBUG_TRANSACTION, 1864 "%d buffer release %d, size %zd-%zd, failed at %llx\n", 1865 proc->pid, buffer->debug_id, 1866 buffer->data_size, buffer->offsets_size, 1867 (unsigned long long)failed_at); 1868 1869 if (buffer->target_node) 1870 binder_dec_node(buffer->target_node, 1, 0); 1871 1872 off_start_offset = ALIGN(buffer->data_size, sizeof(void *)); 1873 off_end_offset = is_failure ? failed_at : 1874 off_start_offset + buffer->offsets_size; 1875 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset; 1876 buffer_offset += sizeof(binder_size_t)) { 1877 struct binder_object_header *hdr; 1878 size_t object_size = 0; 1879 struct binder_object object; 1880 binder_size_t object_offset; 1881 1882 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset, 1883 buffer, buffer_offset, 1884 sizeof(object_offset))) 1885 object_size = binder_get_object(proc, buffer, 1886 object_offset, &object); 1887 if (object_size == 0) { 1888 pr_err("transaction release %d bad object at offset %lld, size %zd\n", 1889 debug_id, (u64)object_offset, buffer->data_size); 1890 continue; 1891 } 1892 hdr = &object.hdr; 1893 switch (hdr->type) { 1894 case BINDER_TYPE_BINDER: 1895 case BINDER_TYPE_WEAK_BINDER: { 1896 struct flat_binder_object *fp; 1897 struct binder_node *node; 1898 1899 fp = to_flat_binder_object(hdr); 1900 node = binder_get_node(proc, fp->binder); 1901 if (node == NULL) { 1902 pr_err("transaction release %d bad node %016llx\n", 1903 debug_id, (u64)fp->binder); 1904 break; 1905 } 1906 binder_debug(BINDER_DEBUG_TRANSACTION, 1907 " node %d u%016llx\n", 1908 node->debug_id, (u64)node->ptr); 1909 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER, 1910 0); 1911 binder_put_node(node); 1912 } break; 1913 case BINDER_TYPE_HANDLE: 1914 case BINDER_TYPE_WEAK_HANDLE: { 1915 struct flat_binder_object *fp; 1916 struct binder_ref_data rdata; 1917 int ret; 1918 1919 fp = to_flat_binder_object(hdr); 1920 ret = binder_dec_ref_for_handle(proc, fp->handle, 1921 hdr->type == BINDER_TYPE_HANDLE, &rdata); 1922 1923 if (ret) { 1924 pr_err("transaction release %d bad handle %d, ret = %d\n", 1925 debug_id, fp->handle, ret); 1926 break; 1927 } 1928 binder_debug(BINDER_DEBUG_TRANSACTION, 1929 " ref %d desc %d\n", 1930 rdata.debug_id, rdata.desc); 1931 } break; 1932 1933 case BINDER_TYPE_FD: { 1934 /* 1935 * No need to close the file here since user-space 1936 * closes it for for successfully delivered 1937 * transactions. For transactions that weren't 1938 * delivered, the new fd was never allocated so 1939 * there is no need to close and the fput on the 1940 * file is done when the transaction is torn 1941 * down. 1942 */ 1943 } break; 1944 case BINDER_TYPE_PTR: 1945 /* 1946 * Nothing to do here, this will get cleaned up when the 1947 * transaction buffer gets freed 1948 */ 1949 break; 1950 case BINDER_TYPE_FDA: { 1951 struct binder_fd_array_object *fda; 1952 struct binder_buffer_object *parent; 1953 struct binder_object ptr_object; 1954 binder_size_t fda_offset; 1955 size_t fd_index; 1956 binder_size_t fd_buf_size; 1957 binder_size_t num_valid; 1958 1959 if (proc->tsk != current->group_leader) { 1960 /* 1961 * Nothing to do if running in sender context 1962 * The fd fixups have not been applied so no 1963 * fds need to be closed. 1964 */ 1965 continue; 1966 } 1967 1968 num_valid = (buffer_offset - off_start_offset) / 1969 sizeof(binder_size_t); 1970 fda = to_binder_fd_array_object(hdr); 1971 parent = binder_validate_ptr(proc, buffer, &ptr_object, 1972 fda->parent, 1973 off_start_offset, 1974 NULL, 1975 num_valid); 1976 if (!parent) { 1977 pr_err("transaction release %d bad parent offset\n", 1978 debug_id); 1979 continue; 1980 } 1981 fd_buf_size = sizeof(u32) * fda->num_fds; 1982 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { 1983 pr_err("transaction release %d invalid number of fds (%lld)\n", 1984 debug_id, (u64)fda->num_fds); 1985 continue; 1986 } 1987 if (fd_buf_size > parent->length || 1988 fda->parent_offset > parent->length - fd_buf_size) { 1989 /* No space for all file descriptors here. */ 1990 pr_err("transaction release %d not enough space for %lld fds in buffer\n", 1991 debug_id, (u64)fda->num_fds); 1992 continue; 1993 } 1994 /* 1995 * the source data for binder_buffer_object is visible 1996 * to user-space and the @buffer element is the user 1997 * pointer to the buffer_object containing the fd_array. 1998 * Convert the address to an offset relative to 1999 * the base of the transaction buffer. 2000 */ 2001 fda_offset = 2002 (parent->buffer - (uintptr_t)buffer->user_data) + 2003 fda->parent_offset; 2004 for (fd_index = 0; fd_index < fda->num_fds; 2005 fd_index++) { 2006 u32 fd; 2007 int err; 2008 binder_size_t offset = fda_offset + 2009 fd_index * sizeof(fd); 2010 2011 err = binder_alloc_copy_from_buffer( 2012 &proc->alloc, &fd, buffer, 2013 offset, sizeof(fd)); 2014 WARN_ON(err); 2015 if (!err) { 2016 binder_deferred_fd_close(fd); 2017 /* 2018 * Need to make sure the thread goes 2019 * back to userspace to complete the 2020 * deferred close 2021 */ 2022 if (thread) 2023 thread->looper_need_return = true; 2024 } 2025 } 2026 } break; 2027 default: 2028 pr_err("transaction release %d bad object type %x\n", 2029 debug_id, hdr->type); 2030 break; 2031 } 2032 } 2033 } 2034 2035 static int binder_translate_binder(struct flat_binder_object *fp, 2036 struct binder_transaction *t, 2037 struct binder_thread *thread) 2038 { 2039 struct binder_node *node; 2040 struct binder_proc *proc = thread->proc; 2041 struct binder_proc *target_proc = t->to_proc; 2042 struct binder_ref_data rdata; 2043 int ret = 0; 2044 2045 node = binder_get_node(proc, fp->binder); 2046 if (!node) { 2047 node = binder_new_node(proc, fp); 2048 if (!node) 2049 return -ENOMEM; 2050 } 2051 if (fp->cookie != node->cookie) { 2052 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n", 2053 proc->pid, thread->pid, (u64)fp->binder, 2054 node->debug_id, (u64)fp->cookie, 2055 (u64)node->cookie); 2056 ret = -EINVAL; 2057 goto done; 2058 } 2059 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { 2060 ret = -EPERM; 2061 goto done; 2062 } 2063 2064 ret = binder_inc_ref_for_node(target_proc, node, 2065 fp->hdr.type == BINDER_TYPE_BINDER, 2066 &thread->todo, &rdata); 2067 if (ret) 2068 goto done; 2069 2070 if (fp->hdr.type == BINDER_TYPE_BINDER) 2071 fp->hdr.type = BINDER_TYPE_HANDLE; 2072 else 2073 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE; 2074 fp->binder = 0; 2075 fp->handle = rdata.desc; 2076 fp->cookie = 0; 2077 2078 trace_binder_transaction_node_to_ref(t, node, &rdata); 2079 binder_debug(BINDER_DEBUG_TRANSACTION, 2080 " node %d u%016llx -> ref %d desc %d\n", 2081 node->debug_id, (u64)node->ptr, 2082 rdata.debug_id, rdata.desc); 2083 done: 2084 binder_put_node(node); 2085 return ret; 2086 } 2087 2088 static int binder_translate_handle(struct flat_binder_object *fp, 2089 struct binder_transaction *t, 2090 struct binder_thread *thread) 2091 { 2092 struct binder_proc *proc = thread->proc; 2093 struct binder_proc *target_proc = t->to_proc; 2094 struct binder_node *node; 2095 struct binder_ref_data src_rdata; 2096 int ret = 0; 2097 2098 node = binder_get_node_from_ref(proc, fp->handle, 2099 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata); 2100 if (!node) { 2101 binder_user_error("%d:%d got transaction with invalid handle, %d\n", 2102 proc->pid, thread->pid, fp->handle); 2103 return -EINVAL; 2104 } 2105 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { 2106 ret = -EPERM; 2107 goto done; 2108 } 2109 2110 binder_node_lock(node); 2111 if (node->proc == target_proc) { 2112 if (fp->hdr.type == BINDER_TYPE_HANDLE) 2113 fp->hdr.type = BINDER_TYPE_BINDER; 2114 else 2115 fp->hdr.type = BINDER_TYPE_WEAK_BINDER; 2116 fp->binder = node->ptr; 2117 fp->cookie = node->cookie; 2118 if (node->proc) 2119 binder_inner_proc_lock(node->proc); 2120 else 2121 __acquire(&node->proc->inner_lock); 2122 binder_inc_node_nilocked(node, 2123 fp->hdr.type == BINDER_TYPE_BINDER, 2124 0, NULL); 2125 if (node->proc) 2126 binder_inner_proc_unlock(node->proc); 2127 else 2128 __release(&node->proc->inner_lock); 2129 trace_binder_transaction_ref_to_node(t, node, &src_rdata); 2130 binder_debug(BINDER_DEBUG_TRANSACTION, 2131 " ref %d desc %d -> node %d u%016llx\n", 2132 src_rdata.debug_id, src_rdata.desc, node->debug_id, 2133 (u64)node->ptr); 2134 binder_node_unlock(node); 2135 } else { 2136 struct binder_ref_data dest_rdata; 2137 2138 binder_node_unlock(node); 2139 ret = binder_inc_ref_for_node(target_proc, node, 2140 fp->hdr.type == BINDER_TYPE_HANDLE, 2141 NULL, &dest_rdata); 2142 if (ret) 2143 goto done; 2144 2145 fp->binder = 0; 2146 fp->handle = dest_rdata.desc; 2147 fp->cookie = 0; 2148 trace_binder_transaction_ref_to_ref(t, node, &src_rdata, 2149 &dest_rdata); 2150 binder_debug(BINDER_DEBUG_TRANSACTION, 2151 " ref %d desc %d -> ref %d desc %d (node %d)\n", 2152 src_rdata.debug_id, src_rdata.desc, 2153 dest_rdata.debug_id, dest_rdata.desc, 2154 node->debug_id); 2155 } 2156 done: 2157 binder_put_node(node); 2158 return ret; 2159 } 2160 2161 static int binder_translate_fd(u32 fd, binder_size_t fd_offset, 2162 struct binder_transaction *t, 2163 struct binder_thread *thread, 2164 struct binder_transaction *in_reply_to) 2165 { 2166 struct binder_proc *proc = thread->proc; 2167 struct binder_proc *target_proc = t->to_proc; 2168 struct binder_txn_fd_fixup *fixup; 2169 struct file *file; 2170 int ret = 0; 2171 bool target_allows_fd; 2172 2173 if (in_reply_to) 2174 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS); 2175 else 2176 target_allows_fd = t->buffer->target_node->accept_fds; 2177 if (!target_allows_fd) { 2178 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n", 2179 proc->pid, thread->pid, 2180 in_reply_to ? "reply" : "transaction", 2181 fd); 2182 ret = -EPERM; 2183 goto err_fd_not_accepted; 2184 } 2185 2186 file = fget(fd); 2187 if (!file) { 2188 binder_user_error("%d:%d got transaction with invalid fd, %d\n", 2189 proc->pid, thread->pid, fd); 2190 ret = -EBADF; 2191 goto err_fget; 2192 } 2193 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file); 2194 if (ret < 0) { 2195 ret = -EPERM; 2196 goto err_security; 2197 } 2198 2199 /* 2200 * Add fixup record for this transaction. The allocation 2201 * of the fd in the target needs to be done from a 2202 * target thread. 2203 */ 2204 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL); 2205 if (!fixup) { 2206 ret = -ENOMEM; 2207 goto err_alloc; 2208 } 2209 fixup->file = file; 2210 fixup->offset = fd_offset; 2211 trace_binder_transaction_fd_send(t, fd, fixup->offset); 2212 list_add_tail(&fixup->fixup_entry, &t->fd_fixups); 2213 2214 return ret; 2215 2216 err_alloc: 2217 err_security: 2218 fput(file); 2219 err_fget: 2220 err_fd_not_accepted: 2221 return ret; 2222 } 2223 2224 static int binder_translate_fd_array(struct binder_fd_array_object *fda, 2225 struct binder_buffer_object *parent, 2226 struct binder_transaction *t, 2227 struct binder_thread *thread, 2228 struct binder_transaction *in_reply_to) 2229 { 2230 binder_size_t fdi, fd_buf_size; 2231 binder_size_t fda_offset; 2232 struct binder_proc *proc = thread->proc; 2233 struct binder_proc *target_proc = t->to_proc; 2234 2235 fd_buf_size = sizeof(u32) * fda->num_fds; 2236 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { 2237 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n", 2238 proc->pid, thread->pid, (u64)fda->num_fds); 2239 return -EINVAL; 2240 } 2241 if (fd_buf_size > parent->length || 2242 fda->parent_offset > parent->length - fd_buf_size) { 2243 /* No space for all file descriptors here. */ 2244 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n", 2245 proc->pid, thread->pid, (u64)fda->num_fds); 2246 return -EINVAL; 2247 } 2248 /* 2249 * the source data for binder_buffer_object is visible 2250 * to user-space and the @buffer element is the user 2251 * pointer to the buffer_object containing the fd_array. 2252 * Convert the address to an offset relative to 2253 * the base of the transaction buffer. 2254 */ 2255 fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) + 2256 fda->parent_offset; 2257 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) { 2258 binder_user_error("%d:%d parent offset not aligned correctly.\n", 2259 proc->pid, thread->pid); 2260 return -EINVAL; 2261 } 2262 for (fdi = 0; fdi < fda->num_fds; fdi++) { 2263 u32 fd; 2264 int ret; 2265 binder_size_t offset = fda_offset + fdi * sizeof(fd); 2266 2267 ret = binder_alloc_copy_from_buffer(&target_proc->alloc, 2268 &fd, t->buffer, 2269 offset, sizeof(fd)); 2270 if (!ret) 2271 ret = binder_translate_fd(fd, offset, t, thread, 2272 in_reply_to); 2273 if (ret < 0) 2274 return ret; 2275 } 2276 return 0; 2277 } 2278 2279 static int binder_fixup_parent(struct binder_transaction *t, 2280 struct binder_thread *thread, 2281 struct binder_buffer_object *bp, 2282 binder_size_t off_start_offset, 2283 binder_size_t num_valid, 2284 binder_size_t last_fixup_obj_off, 2285 binder_size_t last_fixup_min_off) 2286 { 2287 struct binder_buffer_object *parent; 2288 struct binder_buffer *b = t->buffer; 2289 struct binder_proc *proc = thread->proc; 2290 struct binder_proc *target_proc = t->to_proc; 2291 struct binder_object object; 2292 binder_size_t buffer_offset; 2293 binder_size_t parent_offset; 2294 2295 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT)) 2296 return 0; 2297 2298 parent = binder_validate_ptr(target_proc, b, &object, bp->parent, 2299 off_start_offset, &parent_offset, 2300 num_valid); 2301 if (!parent) { 2302 binder_user_error("%d:%d got transaction with invalid parent offset or type\n", 2303 proc->pid, thread->pid); 2304 return -EINVAL; 2305 } 2306 2307 if (!binder_validate_fixup(target_proc, b, off_start_offset, 2308 parent_offset, bp->parent_offset, 2309 last_fixup_obj_off, 2310 last_fixup_min_off)) { 2311 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", 2312 proc->pid, thread->pid); 2313 return -EINVAL; 2314 } 2315 2316 if (parent->length < sizeof(binder_uintptr_t) || 2317 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) { 2318 /* No space for a pointer here! */ 2319 binder_user_error("%d:%d got transaction with invalid parent offset\n", 2320 proc->pid, thread->pid); 2321 return -EINVAL; 2322 } 2323 buffer_offset = bp->parent_offset + 2324 (uintptr_t)parent->buffer - (uintptr_t)b->user_data; 2325 if (binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset, 2326 &bp->buffer, sizeof(bp->buffer))) { 2327 binder_user_error("%d:%d got transaction with invalid parent offset\n", 2328 proc->pid, thread->pid); 2329 return -EINVAL; 2330 } 2331 2332 return 0; 2333 } 2334 2335 /** 2336 * binder_proc_transaction() - sends a transaction to a process and wakes it up 2337 * @t: transaction to send 2338 * @proc: process to send the transaction to 2339 * @thread: thread in @proc to send the transaction to (may be NULL) 2340 * 2341 * This function queues a transaction to the specified process. It will try 2342 * to find a thread in the target process to handle the transaction and 2343 * wake it up. If no thread is found, the work is queued to the proc 2344 * waitqueue. 2345 * 2346 * If the @thread parameter is not NULL, the transaction is always queued 2347 * to the waitlist of that specific thread. 2348 * 2349 * Return: 0 if the transaction was successfully queued 2350 * BR_DEAD_REPLY if the target process or thread is dead 2351 * BR_FROZEN_REPLY if the target process or thread is frozen 2352 */ 2353 static int binder_proc_transaction(struct binder_transaction *t, 2354 struct binder_proc *proc, 2355 struct binder_thread *thread) 2356 { 2357 struct binder_node *node = t->buffer->target_node; 2358 bool oneway = !!(t->flags & TF_ONE_WAY); 2359 bool pending_async = false; 2360 2361 BUG_ON(!node); 2362 binder_node_lock(node); 2363 if (oneway) { 2364 BUG_ON(thread); 2365 if (node->has_async_transaction) 2366 pending_async = true; 2367 else 2368 node->has_async_transaction = true; 2369 } 2370 2371 binder_inner_proc_lock(proc); 2372 if (proc->is_frozen) { 2373 proc->sync_recv |= !oneway; 2374 proc->async_recv |= oneway; 2375 } 2376 2377 if ((proc->is_frozen && !oneway) || proc->is_dead || 2378 (thread && thread->is_dead)) { 2379 binder_inner_proc_unlock(proc); 2380 binder_node_unlock(node); 2381 return proc->is_frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY; 2382 } 2383 2384 if (!thread && !pending_async) 2385 thread = binder_select_thread_ilocked(proc); 2386 2387 if (thread) 2388 binder_enqueue_thread_work_ilocked(thread, &t->work); 2389 else if (!pending_async) 2390 binder_enqueue_work_ilocked(&t->work, &proc->todo); 2391 else 2392 binder_enqueue_work_ilocked(&t->work, &node->async_todo); 2393 2394 if (!pending_async) 2395 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */); 2396 2397 proc->outstanding_txns++; 2398 binder_inner_proc_unlock(proc); 2399 binder_node_unlock(node); 2400 2401 return 0; 2402 } 2403 2404 /** 2405 * binder_get_node_refs_for_txn() - Get required refs on node for txn 2406 * @node: struct binder_node for which to get refs 2407 * @proc: returns @node->proc if valid 2408 * @error: if no @proc then returns BR_DEAD_REPLY 2409 * 2410 * User-space normally keeps the node alive when creating a transaction 2411 * since it has a reference to the target. The local strong ref keeps it 2412 * alive if the sending process dies before the target process processes 2413 * the transaction. If the source process is malicious or has a reference 2414 * counting bug, relying on the local strong ref can fail. 2415 * 2416 * Since user-space can cause the local strong ref to go away, we also take 2417 * a tmpref on the node to ensure it survives while we are constructing 2418 * the transaction. We also need a tmpref on the proc while we are 2419 * constructing the transaction, so we take that here as well. 2420 * 2421 * Return: The target_node with refs taken or NULL if no @node->proc is NULL. 2422 * Also sets @proc if valid. If the @node->proc is NULL indicating that the 2423 * target proc has died, @error is set to BR_DEAD_REPLY 2424 */ 2425 static struct binder_node *binder_get_node_refs_for_txn( 2426 struct binder_node *node, 2427 struct binder_proc **procp, 2428 uint32_t *error) 2429 { 2430 struct binder_node *target_node = NULL; 2431 2432 binder_node_inner_lock(node); 2433 if (node->proc) { 2434 target_node = node; 2435 binder_inc_node_nilocked(node, 1, 0, NULL); 2436 binder_inc_node_tmpref_ilocked(node); 2437 node->proc->tmp_ref++; 2438 *procp = node->proc; 2439 } else 2440 *error = BR_DEAD_REPLY; 2441 binder_node_inner_unlock(node); 2442 2443 return target_node; 2444 } 2445 2446 static void binder_transaction(struct binder_proc *proc, 2447 struct binder_thread *thread, 2448 struct binder_transaction_data *tr, int reply, 2449 binder_size_t extra_buffers_size) 2450 { 2451 int ret; 2452 struct binder_transaction *t; 2453 struct binder_work *w; 2454 struct binder_work *tcomplete; 2455 binder_size_t buffer_offset = 0; 2456 binder_size_t off_start_offset, off_end_offset; 2457 binder_size_t off_min; 2458 binder_size_t sg_buf_offset, sg_buf_end_offset; 2459 struct binder_proc *target_proc = NULL; 2460 struct binder_thread *target_thread = NULL; 2461 struct binder_node *target_node = NULL; 2462 struct binder_transaction *in_reply_to = NULL; 2463 struct binder_transaction_log_entry *e; 2464 uint32_t return_error = 0; 2465 uint32_t return_error_param = 0; 2466 uint32_t return_error_line = 0; 2467 binder_size_t last_fixup_obj_off = 0; 2468 binder_size_t last_fixup_min_off = 0; 2469 struct binder_context *context = proc->context; 2470 int t_debug_id = atomic_inc_return(&binder_last_id); 2471 char *secctx = NULL; 2472 u32 secctx_sz = 0; 2473 2474 e = binder_transaction_log_add(&binder_transaction_log); 2475 e->debug_id = t_debug_id; 2476 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); 2477 e->from_proc = proc->pid; 2478 e->from_thread = thread->pid; 2479 e->target_handle = tr->target.handle; 2480 e->data_size = tr->data_size; 2481 e->offsets_size = tr->offsets_size; 2482 strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME); 2483 2484 if (reply) { 2485 binder_inner_proc_lock(proc); 2486 in_reply_to = thread->transaction_stack; 2487 if (in_reply_to == NULL) { 2488 binder_inner_proc_unlock(proc); 2489 binder_user_error("%d:%d got reply transaction with no transaction stack\n", 2490 proc->pid, thread->pid); 2491 return_error = BR_FAILED_REPLY; 2492 return_error_param = -EPROTO; 2493 return_error_line = __LINE__; 2494 goto err_empty_call_stack; 2495 } 2496 if (in_reply_to->to_thread != thread) { 2497 spin_lock(&in_reply_to->lock); 2498 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n", 2499 proc->pid, thread->pid, in_reply_to->debug_id, 2500 in_reply_to->to_proc ? 2501 in_reply_to->to_proc->pid : 0, 2502 in_reply_to->to_thread ? 2503 in_reply_to->to_thread->pid : 0); 2504 spin_unlock(&in_reply_to->lock); 2505 binder_inner_proc_unlock(proc); 2506 return_error = BR_FAILED_REPLY; 2507 return_error_param = -EPROTO; 2508 return_error_line = __LINE__; 2509 in_reply_to = NULL; 2510 goto err_bad_call_stack; 2511 } 2512 thread->transaction_stack = in_reply_to->to_parent; 2513 binder_inner_proc_unlock(proc); 2514 binder_set_nice(in_reply_to->saved_priority); 2515 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to); 2516 if (target_thread == NULL) { 2517 /* annotation for sparse */ 2518 __release(&target_thread->proc->inner_lock); 2519 return_error = BR_DEAD_REPLY; 2520 return_error_line = __LINE__; 2521 goto err_dead_binder; 2522 } 2523 if (target_thread->transaction_stack != in_reply_to) { 2524 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n", 2525 proc->pid, thread->pid, 2526 target_thread->transaction_stack ? 2527 target_thread->transaction_stack->debug_id : 0, 2528 in_reply_to->debug_id); 2529 binder_inner_proc_unlock(target_thread->proc); 2530 return_error = BR_FAILED_REPLY; 2531 return_error_param = -EPROTO; 2532 return_error_line = __LINE__; 2533 in_reply_to = NULL; 2534 target_thread = NULL; 2535 goto err_dead_binder; 2536 } 2537 target_proc = target_thread->proc; 2538 target_proc->tmp_ref++; 2539 binder_inner_proc_unlock(target_thread->proc); 2540 } else { 2541 if (tr->target.handle) { 2542 struct binder_ref *ref; 2543 2544 /* 2545 * There must already be a strong ref 2546 * on this node. If so, do a strong 2547 * increment on the node to ensure it 2548 * stays alive until the transaction is 2549 * done. 2550 */ 2551 binder_proc_lock(proc); 2552 ref = binder_get_ref_olocked(proc, tr->target.handle, 2553 true); 2554 if (ref) { 2555 target_node = binder_get_node_refs_for_txn( 2556 ref->node, &target_proc, 2557 &return_error); 2558 } else { 2559 binder_user_error("%d:%d got transaction to invalid handle, %u\n", 2560 proc->pid, thread->pid, tr->target.handle); 2561 return_error = BR_FAILED_REPLY; 2562 } 2563 binder_proc_unlock(proc); 2564 } else { 2565 mutex_lock(&context->context_mgr_node_lock); 2566 target_node = context->binder_context_mgr_node; 2567 if (target_node) 2568 target_node = binder_get_node_refs_for_txn( 2569 target_node, &target_proc, 2570 &return_error); 2571 else 2572 return_error = BR_DEAD_REPLY; 2573 mutex_unlock(&context->context_mgr_node_lock); 2574 if (target_node && target_proc->pid == proc->pid) { 2575 binder_user_error("%d:%d got transaction to context manager from process owning it\n", 2576 proc->pid, thread->pid); 2577 return_error = BR_FAILED_REPLY; 2578 return_error_param = -EINVAL; 2579 return_error_line = __LINE__; 2580 goto err_invalid_target_handle; 2581 } 2582 } 2583 if (!target_node) { 2584 /* 2585 * return_error is set above 2586 */ 2587 return_error_param = -EINVAL; 2588 return_error_line = __LINE__; 2589 goto err_dead_binder; 2590 } 2591 e->to_node = target_node->debug_id; 2592 if (WARN_ON(proc == target_proc)) { 2593 return_error = BR_FAILED_REPLY; 2594 return_error_param = -EINVAL; 2595 return_error_line = __LINE__; 2596 goto err_invalid_target_handle; 2597 } 2598 if (security_binder_transaction(proc->tsk, 2599 target_proc->tsk) < 0) { 2600 return_error = BR_FAILED_REPLY; 2601 return_error_param = -EPERM; 2602 return_error_line = __LINE__; 2603 goto err_invalid_target_handle; 2604 } 2605 binder_inner_proc_lock(proc); 2606 2607 w = list_first_entry_or_null(&thread->todo, 2608 struct binder_work, entry); 2609 if (!(tr->flags & TF_ONE_WAY) && w && 2610 w->type == BINDER_WORK_TRANSACTION) { 2611 /* 2612 * Do not allow new outgoing transaction from a 2613 * thread that has a transaction at the head of 2614 * its todo list. Only need to check the head 2615 * because binder_select_thread_ilocked picks a 2616 * thread from proc->waiting_threads to enqueue 2617 * the transaction, and nothing is queued to the 2618 * todo list while the thread is on waiting_threads. 2619 */ 2620 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n", 2621 proc->pid, thread->pid); 2622 binder_inner_proc_unlock(proc); 2623 return_error = BR_FAILED_REPLY; 2624 return_error_param = -EPROTO; 2625 return_error_line = __LINE__; 2626 goto err_bad_todo_list; 2627 } 2628 2629 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { 2630 struct binder_transaction *tmp; 2631 2632 tmp = thread->transaction_stack; 2633 if (tmp->to_thread != thread) { 2634 spin_lock(&tmp->lock); 2635 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n", 2636 proc->pid, thread->pid, tmp->debug_id, 2637 tmp->to_proc ? tmp->to_proc->pid : 0, 2638 tmp->to_thread ? 2639 tmp->to_thread->pid : 0); 2640 spin_unlock(&tmp->lock); 2641 binder_inner_proc_unlock(proc); 2642 return_error = BR_FAILED_REPLY; 2643 return_error_param = -EPROTO; 2644 return_error_line = __LINE__; 2645 goto err_bad_call_stack; 2646 } 2647 while (tmp) { 2648 struct binder_thread *from; 2649 2650 spin_lock(&tmp->lock); 2651 from = tmp->from; 2652 if (from && from->proc == target_proc) { 2653 atomic_inc(&from->tmp_ref); 2654 target_thread = from; 2655 spin_unlock(&tmp->lock); 2656 break; 2657 } 2658 spin_unlock(&tmp->lock); 2659 tmp = tmp->from_parent; 2660 } 2661 } 2662 binder_inner_proc_unlock(proc); 2663 } 2664 if (target_thread) 2665 e->to_thread = target_thread->pid; 2666 e->to_proc = target_proc->pid; 2667 2668 /* TODO: reuse incoming transaction for reply */ 2669 t = kzalloc(sizeof(*t), GFP_KERNEL); 2670 if (t == NULL) { 2671 return_error = BR_FAILED_REPLY; 2672 return_error_param = -ENOMEM; 2673 return_error_line = __LINE__; 2674 goto err_alloc_t_failed; 2675 } 2676 INIT_LIST_HEAD(&t->fd_fixups); 2677 binder_stats_created(BINDER_STAT_TRANSACTION); 2678 spin_lock_init(&t->lock); 2679 2680 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); 2681 if (tcomplete == NULL) { 2682 return_error = BR_FAILED_REPLY; 2683 return_error_param = -ENOMEM; 2684 return_error_line = __LINE__; 2685 goto err_alloc_tcomplete_failed; 2686 } 2687 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE); 2688 2689 t->debug_id = t_debug_id; 2690 2691 if (reply) 2692 binder_debug(BINDER_DEBUG_TRANSACTION, 2693 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n", 2694 proc->pid, thread->pid, t->debug_id, 2695 target_proc->pid, target_thread->pid, 2696 (u64)tr->data.ptr.buffer, 2697 (u64)tr->data.ptr.offsets, 2698 (u64)tr->data_size, (u64)tr->offsets_size, 2699 (u64)extra_buffers_size); 2700 else 2701 binder_debug(BINDER_DEBUG_TRANSACTION, 2702 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n", 2703 proc->pid, thread->pid, t->debug_id, 2704 target_proc->pid, target_node->debug_id, 2705 (u64)tr->data.ptr.buffer, 2706 (u64)tr->data.ptr.offsets, 2707 (u64)tr->data_size, (u64)tr->offsets_size, 2708 (u64)extra_buffers_size); 2709 2710 if (!reply && !(tr->flags & TF_ONE_WAY)) 2711 t->from = thread; 2712 else 2713 t->from = NULL; 2714 t->sender_euid = task_euid(proc->tsk); 2715 t->to_proc = target_proc; 2716 t->to_thread = target_thread; 2717 t->code = tr->code; 2718 t->flags = tr->flags; 2719 t->priority = task_nice(current); 2720 2721 if (target_node && target_node->txn_security_ctx) { 2722 u32 secid; 2723 size_t added_size; 2724 2725 /* 2726 * Arguably this should be the task's subjective LSM secid but 2727 * we can't reliably access the subjective creds of a task 2728 * other than our own so we must use the objective creds, which 2729 * are safe to access. The downside is that if a task is 2730 * temporarily overriding it's creds it will not be reflected 2731 * here; however, it isn't clear that binder would handle that 2732 * case well anyway. 2733 */ 2734 security_task_getsecid_obj(proc->tsk, &secid); 2735 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz); 2736 if (ret) { 2737 return_error = BR_FAILED_REPLY; 2738 return_error_param = ret; 2739 return_error_line = __LINE__; 2740 goto err_get_secctx_failed; 2741 } 2742 added_size = ALIGN(secctx_sz, sizeof(u64)); 2743 extra_buffers_size += added_size; 2744 if (extra_buffers_size < added_size) { 2745 /* integer overflow of extra_buffers_size */ 2746 return_error = BR_FAILED_REPLY; 2747 return_error_param = -EINVAL; 2748 return_error_line = __LINE__; 2749 goto err_bad_extra_size; 2750 } 2751 } 2752 2753 trace_binder_transaction(reply, t, target_node); 2754 2755 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size, 2756 tr->offsets_size, extra_buffers_size, 2757 !reply && (t->flags & TF_ONE_WAY), current->tgid); 2758 if (IS_ERR(t->buffer)) { 2759 /* 2760 * -ESRCH indicates VMA cleared. The target is dying. 2761 */ 2762 return_error_param = PTR_ERR(t->buffer); 2763 return_error = return_error_param == -ESRCH ? 2764 BR_DEAD_REPLY : BR_FAILED_REPLY; 2765 return_error_line = __LINE__; 2766 t->buffer = NULL; 2767 goto err_binder_alloc_buf_failed; 2768 } 2769 if (secctx) { 2770 int err; 2771 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) + 2772 ALIGN(tr->offsets_size, sizeof(void *)) + 2773 ALIGN(extra_buffers_size, sizeof(void *)) - 2774 ALIGN(secctx_sz, sizeof(u64)); 2775 2776 t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset; 2777 err = binder_alloc_copy_to_buffer(&target_proc->alloc, 2778 t->buffer, buf_offset, 2779 secctx, secctx_sz); 2780 if (err) { 2781 t->security_ctx = 0; 2782 WARN_ON(1); 2783 } 2784 security_release_secctx(secctx, secctx_sz); 2785 secctx = NULL; 2786 } 2787 t->buffer->debug_id = t->debug_id; 2788 t->buffer->transaction = t; 2789 t->buffer->target_node = target_node; 2790 t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF); 2791 trace_binder_transaction_alloc_buf(t->buffer); 2792 2793 if (binder_alloc_copy_user_to_buffer( 2794 &target_proc->alloc, 2795 t->buffer, 0, 2796 (const void __user *) 2797 (uintptr_t)tr->data.ptr.buffer, 2798 tr->data_size)) { 2799 binder_user_error("%d:%d got transaction with invalid data ptr\n", 2800 proc->pid, thread->pid); 2801 return_error = BR_FAILED_REPLY; 2802 return_error_param = -EFAULT; 2803 return_error_line = __LINE__; 2804 goto err_copy_data_failed; 2805 } 2806 if (binder_alloc_copy_user_to_buffer( 2807 &target_proc->alloc, 2808 t->buffer, 2809 ALIGN(tr->data_size, sizeof(void *)), 2810 (const void __user *) 2811 (uintptr_t)tr->data.ptr.offsets, 2812 tr->offsets_size)) { 2813 binder_user_error("%d:%d got transaction with invalid offsets ptr\n", 2814 proc->pid, thread->pid); 2815 return_error = BR_FAILED_REPLY; 2816 return_error_param = -EFAULT; 2817 return_error_line = __LINE__; 2818 goto err_copy_data_failed; 2819 } 2820 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) { 2821 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n", 2822 proc->pid, thread->pid, (u64)tr->offsets_size); 2823 return_error = BR_FAILED_REPLY; 2824 return_error_param = -EINVAL; 2825 return_error_line = __LINE__; 2826 goto err_bad_offset; 2827 } 2828 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) { 2829 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n", 2830 proc->pid, thread->pid, 2831 (u64)extra_buffers_size); 2832 return_error = BR_FAILED_REPLY; 2833 return_error_param = -EINVAL; 2834 return_error_line = __LINE__; 2835 goto err_bad_offset; 2836 } 2837 off_start_offset = ALIGN(tr->data_size, sizeof(void *)); 2838 buffer_offset = off_start_offset; 2839 off_end_offset = off_start_offset + tr->offsets_size; 2840 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *)); 2841 sg_buf_end_offset = sg_buf_offset + extra_buffers_size - 2842 ALIGN(secctx_sz, sizeof(u64)); 2843 off_min = 0; 2844 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset; 2845 buffer_offset += sizeof(binder_size_t)) { 2846 struct binder_object_header *hdr; 2847 size_t object_size; 2848 struct binder_object object; 2849 binder_size_t object_offset; 2850 2851 if (binder_alloc_copy_from_buffer(&target_proc->alloc, 2852 &object_offset, 2853 t->buffer, 2854 buffer_offset, 2855 sizeof(object_offset))) { 2856 return_error = BR_FAILED_REPLY; 2857 return_error_param = -EINVAL; 2858 return_error_line = __LINE__; 2859 goto err_bad_offset; 2860 } 2861 object_size = binder_get_object(target_proc, t->buffer, 2862 object_offset, &object); 2863 if (object_size == 0 || object_offset < off_min) { 2864 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n", 2865 proc->pid, thread->pid, 2866 (u64)object_offset, 2867 (u64)off_min, 2868 (u64)t->buffer->data_size); 2869 return_error = BR_FAILED_REPLY; 2870 return_error_param = -EINVAL; 2871 return_error_line = __LINE__; 2872 goto err_bad_offset; 2873 } 2874 2875 hdr = &object.hdr; 2876 off_min = object_offset + object_size; 2877 switch (hdr->type) { 2878 case BINDER_TYPE_BINDER: 2879 case BINDER_TYPE_WEAK_BINDER: { 2880 struct flat_binder_object *fp; 2881 2882 fp = to_flat_binder_object(hdr); 2883 ret = binder_translate_binder(fp, t, thread); 2884 2885 if (ret < 0 || 2886 binder_alloc_copy_to_buffer(&target_proc->alloc, 2887 t->buffer, 2888 object_offset, 2889 fp, sizeof(*fp))) { 2890 return_error = BR_FAILED_REPLY; 2891 return_error_param = ret; 2892 return_error_line = __LINE__; 2893 goto err_translate_failed; 2894 } 2895 } break; 2896 case BINDER_TYPE_HANDLE: 2897 case BINDER_TYPE_WEAK_HANDLE: { 2898 struct flat_binder_object *fp; 2899 2900 fp = to_flat_binder_object(hdr); 2901 ret = binder_translate_handle(fp, t, thread); 2902 if (ret < 0 || 2903 binder_alloc_copy_to_buffer(&target_proc->alloc, 2904 t->buffer, 2905 object_offset, 2906 fp, sizeof(*fp))) { 2907 return_error = BR_FAILED_REPLY; 2908 return_error_param = ret; 2909 return_error_line = __LINE__; 2910 goto err_translate_failed; 2911 } 2912 } break; 2913 2914 case BINDER_TYPE_FD: { 2915 struct binder_fd_object *fp = to_binder_fd_object(hdr); 2916 binder_size_t fd_offset = object_offset + 2917 (uintptr_t)&fp->fd - (uintptr_t)fp; 2918 int ret = binder_translate_fd(fp->fd, fd_offset, t, 2919 thread, in_reply_to); 2920 2921 fp->pad_binder = 0; 2922 if (ret < 0 || 2923 binder_alloc_copy_to_buffer(&target_proc->alloc, 2924 t->buffer, 2925 object_offset, 2926 fp, sizeof(*fp))) { 2927 return_error = BR_FAILED_REPLY; 2928 return_error_param = ret; 2929 return_error_line = __LINE__; 2930 goto err_translate_failed; 2931 } 2932 } break; 2933 case BINDER_TYPE_FDA: { 2934 struct binder_object ptr_object; 2935 binder_size_t parent_offset; 2936 struct binder_fd_array_object *fda = 2937 to_binder_fd_array_object(hdr); 2938 size_t num_valid = (buffer_offset - off_start_offset) / 2939 sizeof(binder_size_t); 2940 struct binder_buffer_object *parent = 2941 binder_validate_ptr(target_proc, t->buffer, 2942 &ptr_object, fda->parent, 2943 off_start_offset, 2944 &parent_offset, 2945 num_valid); 2946 if (!parent) { 2947 binder_user_error("%d:%d got transaction with invalid parent offset or type\n", 2948 proc->pid, thread->pid); 2949 return_error = BR_FAILED_REPLY; 2950 return_error_param = -EINVAL; 2951 return_error_line = __LINE__; 2952 goto err_bad_parent; 2953 } 2954 if (!binder_validate_fixup(target_proc, t->buffer, 2955 off_start_offset, 2956 parent_offset, 2957 fda->parent_offset, 2958 last_fixup_obj_off, 2959 last_fixup_min_off)) { 2960 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", 2961 proc->pid, thread->pid); 2962 return_error = BR_FAILED_REPLY; 2963 return_error_param = -EINVAL; 2964 return_error_line = __LINE__; 2965 goto err_bad_parent; 2966 } 2967 ret = binder_translate_fd_array(fda, parent, t, thread, 2968 in_reply_to); 2969 if (ret < 0) { 2970 return_error = BR_FAILED_REPLY; 2971 return_error_param = ret; 2972 return_error_line = __LINE__; 2973 goto err_translate_failed; 2974 } 2975 last_fixup_obj_off = parent_offset; 2976 last_fixup_min_off = 2977 fda->parent_offset + sizeof(u32) * fda->num_fds; 2978 } break; 2979 case BINDER_TYPE_PTR: { 2980 struct binder_buffer_object *bp = 2981 to_binder_buffer_object(hdr); 2982 size_t buf_left = sg_buf_end_offset - sg_buf_offset; 2983 size_t num_valid; 2984 2985 if (bp->length > buf_left) { 2986 binder_user_error("%d:%d got transaction with too large buffer\n", 2987 proc->pid, thread->pid); 2988 return_error = BR_FAILED_REPLY; 2989 return_error_param = -EINVAL; 2990 return_error_line = __LINE__; 2991 goto err_bad_offset; 2992 } 2993 if (binder_alloc_copy_user_to_buffer( 2994 &target_proc->alloc, 2995 t->buffer, 2996 sg_buf_offset, 2997 (const void __user *) 2998 (uintptr_t)bp->buffer, 2999 bp->length)) { 3000 binder_user_error("%d:%d got transaction with invalid offsets ptr\n", 3001 proc->pid, thread->pid); 3002 return_error_param = -EFAULT; 3003 return_error = BR_FAILED_REPLY; 3004 return_error_line = __LINE__; 3005 goto err_copy_data_failed; 3006 } 3007 /* Fixup buffer pointer to target proc address space */ 3008 bp->buffer = (uintptr_t) 3009 t->buffer->user_data + sg_buf_offset; 3010 sg_buf_offset += ALIGN(bp->length, sizeof(u64)); 3011 3012 num_valid = (buffer_offset - off_start_offset) / 3013 sizeof(binder_size_t); 3014 ret = binder_fixup_parent(t, thread, bp, 3015 off_start_offset, 3016 num_valid, 3017 last_fixup_obj_off, 3018 last_fixup_min_off); 3019 if (ret < 0 || 3020 binder_alloc_copy_to_buffer(&target_proc->alloc, 3021 t->buffer, 3022 object_offset, 3023 bp, sizeof(*bp))) { 3024 return_error = BR_FAILED_REPLY; 3025 return_error_param = ret; 3026 return_error_line = __LINE__; 3027 goto err_translate_failed; 3028 } 3029 last_fixup_obj_off = object_offset; 3030 last_fixup_min_off = 0; 3031 } break; 3032 default: 3033 binder_user_error("%d:%d got transaction with invalid object type, %x\n", 3034 proc->pid, thread->pid, hdr->type); 3035 return_error = BR_FAILED_REPLY; 3036 return_error_param = -EINVAL; 3037 return_error_line = __LINE__; 3038 goto err_bad_object_type; 3039 } 3040 } 3041 if (t->buffer->oneway_spam_suspect) 3042 tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT; 3043 else 3044 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; 3045 t->work.type = BINDER_WORK_TRANSACTION; 3046 3047 if (reply) { 3048 binder_enqueue_thread_work(thread, tcomplete); 3049 binder_inner_proc_lock(target_proc); 3050 if (target_thread->is_dead) { 3051 return_error = BR_DEAD_REPLY; 3052 binder_inner_proc_unlock(target_proc); 3053 goto err_dead_proc_or_thread; 3054 } 3055 BUG_ON(t->buffer->async_transaction != 0); 3056 binder_pop_transaction_ilocked(target_thread, in_reply_to); 3057 binder_enqueue_thread_work_ilocked(target_thread, &t->work); 3058 target_proc->outstanding_txns++; 3059 binder_inner_proc_unlock(target_proc); 3060 wake_up_interruptible_sync(&target_thread->wait); 3061 binder_free_transaction(in_reply_to); 3062 } else if (!(t->flags & TF_ONE_WAY)) { 3063 BUG_ON(t->buffer->async_transaction != 0); 3064 binder_inner_proc_lock(proc); 3065 /* 3066 * Defer the TRANSACTION_COMPLETE, so we don't return to 3067 * userspace immediately; this allows the target process to 3068 * immediately start processing this transaction, reducing 3069 * latency. We will then return the TRANSACTION_COMPLETE when 3070 * the target replies (or there is an error). 3071 */ 3072 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete); 3073 t->need_reply = 1; 3074 t->from_parent = thread->transaction_stack; 3075 thread->transaction_stack = t; 3076 binder_inner_proc_unlock(proc); 3077 return_error = binder_proc_transaction(t, 3078 target_proc, target_thread); 3079 if (return_error) { 3080 binder_inner_proc_lock(proc); 3081 binder_pop_transaction_ilocked(thread, t); 3082 binder_inner_proc_unlock(proc); 3083 goto err_dead_proc_or_thread; 3084 } 3085 } else { 3086 BUG_ON(target_node == NULL); 3087 BUG_ON(t->buffer->async_transaction != 1); 3088 binder_enqueue_thread_work(thread, tcomplete); 3089 return_error = binder_proc_transaction(t, target_proc, NULL); 3090 if (return_error) 3091 goto err_dead_proc_or_thread; 3092 } 3093 if (target_thread) 3094 binder_thread_dec_tmpref(target_thread); 3095 binder_proc_dec_tmpref(target_proc); 3096 if (target_node) 3097 binder_dec_node_tmpref(target_node); 3098 /* 3099 * write barrier to synchronize with initialization 3100 * of log entry 3101 */ 3102 smp_wmb(); 3103 WRITE_ONCE(e->debug_id_done, t_debug_id); 3104 return; 3105 3106 err_dead_proc_or_thread: 3107 return_error_line = __LINE__; 3108 binder_dequeue_work(proc, tcomplete); 3109 err_translate_failed: 3110 err_bad_object_type: 3111 err_bad_offset: 3112 err_bad_parent: 3113 err_copy_data_failed: 3114 binder_free_txn_fixups(t); 3115 trace_binder_transaction_failed_buffer_release(t->buffer); 3116 binder_transaction_buffer_release(target_proc, NULL, t->buffer, 3117 buffer_offset, true); 3118 if (target_node) 3119 binder_dec_node_tmpref(target_node); 3120 target_node = NULL; 3121 t->buffer->transaction = NULL; 3122 binder_alloc_free_buf(&target_proc->alloc, t->buffer); 3123 err_binder_alloc_buf_failed: 3124 err_bad_extra_size: 3125 if (secctx) 3126 security_release_secctx(secctx, secctx_sz); 3127 err_get_secctx_failed: 3128 kfree(tcomplete); 3129 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 3130 err_alloc_tcomplete_failed: 3131 if (trace_binder_txn_latency_free_enabled()) 3132 binder_txn_latency_free(t); 3133 kfree(t); 3134 binder_stats_deleted(BINDER_STAT_TRANSACTION); 3135 err_alloc_t_failed: 3136 err_bad_todo_list: 3137 err_bad_call_stack: 3138 err_empty_call_stack: 3139 err_dead_binder: 3140 err_invalid_target_handle: 3141 if (target_thread) 3142 binder_thread_dec_tmpref(target_thread); 3143 if (target_proc) 3144 binder_proc_dec_tmpref(target_proc); 3145 if (target_node) { 3146 binder_dec_node(target_node, 1, 0); 3147 binder_dec_node_tmpref(target_node); 3148 } 3149 3150 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 3151 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n", 3152 proc->pid, thread->pid, return_error, return_error_param, 3153 (u64)tr->data_size, (u64)tr->offsets_size, 3154 return_error_line); 3155 3156 { 3157 struct binder_transaction_log_entry *fe; 3158 3159 e->return_error = return_error; 3160 e->return_error_param = return_error_param; 3161 e->return_error_line = return_error_line; 3162 fe = binder_transaction_log_add(&binder_transaction_log_failed); 3163 *fe = *e; 3164 /* 3165 * write barrier to synchronize with initialization 3166 * of log entry 3167 */ 3168 smp_wmb(); 3169 WRITE_ONCE(e->debug_id_done, t_debug_id); 3170 WRITE_ONCE(fe->debug_id_done, t_debug_id); 3171 } 3172 3173 BUG_ON(thread->return_error.cmd != BR_OK); 3174 if (in_reply_to) { 3175 thread->return_error.cmd = BR_TRANSACTION_COMPLETE; 3176 binder_enqueue_thread_work(thread, &thread->return_error.work); 3177 binder_send_failed_reply(in_reply_to, return_error); 3178 } else { 3179 thread->return_error.cmd = return_error; 3180 binder_enqueue_thread_work(thread, &thread->return_error.work); 3181 } 3182 } 3183 3184 /** 3185 * binder_free_buf() - free the specified buffer 3186 * @proc: binder proc that owns buffer 3187 * @buffer: buffer to be freed 3188 * 3189 * If buffer for an async transaction, enqueue the next async 3190 * transaction from the node. 3191 * 3192 * Cleanup buffer and free it. 3193 */ 3194 static void 3195 binder_free_buf(struct binder_proc *proc, 3196 struct binder_thread *thread, 3197 struct binder_buffer *buffer) 3198 { 3199 binder_inner_proc_lock(proc); 3200 if (buffer->transaction) { 3201 buffer->transaction->buffer = NULL; 3202 buffer->transaction = NULL; 3203 } 3204 binder_inner_proc_unlock(proc); 3205 if (buffer->async_transaction && buffer->target_node) { 3206 struct binder_node *buf_node; 3207 struct binder_work *w; 3208 3209 buf_node = buffer->target_node; 3210 binder_node_inner_lock(buf_node); 3211 BUG_ON(!buf_node->has_async_transaction); 3212 BUG_ON(buf_node->proc != proc); 3213 w = binder_dequeue_work_head_ilocked( 3214 &buf_node->async_todo); 3215 if (!w) { 3216 buf_node->has_async_transaction = false; 3217 } else { 3218 binder_enqueue_work_ilocked( 3219 w, &proc->todo); 3220 binder_wakeup_proc_ilocked(proc); 3221 } 3222 binder_node_inner_unlock(buf_node); 3223 } 3224 trace_binder_transaction_buffer_release(buffer); 3225 binder_transaction_buffer_release(proc, thread, buffer, 0, false); 3226 binder_alloc_free_buf(&proc->alloc, buffer); 3227 } 3228 3229 static int binder_thread_write(struct binder_proc *proc, 3230 struct binder_thread *thread, 3231 binder_uintptr_t binder_buffer, size_t size, 3232 binder_size_t *consumed) 3233 { 3234 uint32_t cmd; 3235 struct binder_context *context = proc->context; 3236 void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 3237 void __user *ptr = buffer + *consumed; 3238 void __user *end = buffer + size; 3239 3240 while (ptr < end && thread->return_error.cmd == BR_OK) { 3241 int ret; 3242 3243 if (get_user(cmd, (uint32_t __user *)ptr)) 3244 return -EFAULT; 3245 ptr += sizeof(uint32_t); 3246 trace_binder_command(cmd); 3247 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { 3248 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]); 3249 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]); 3250 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]); 3251 } 3252 switch (cmd) { 3253 case BC_INCREFS: 3254 case BC_ACQUIRE: 3255 case BC_RELEASE: 3256 case BC_DECREFS: { 3257 uint32_t target; 3258 const char *debug_string; 3259 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE; 3260 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE; 3261 struct binder_ref_data rdata; 3262 3263 if (get_user(target, (uint32_t __user *)ptr)) 3264 return -EFAULT; 3265 3266 ptr += sizeof(uint32_t); 3267 ret = -1; 3268 if (increment && !target) { 3269 struct binder_node *ctx_mgr_node; 3270 3271 mutex_lock(&context->context_mgr_node_lock); 3272 ctx_mgr_node = context->binder_context_mgr_node; 3273 if (ctx_mgr_node) { 3274 if (ctx_mgr_node->proc == proc) { 3275 binder_user_error("%d:%d context manager tried to acquire desc 0\n", 3276 proc->pid, thread->pid); 3277 mutex_unlock(&context->context_mgr_node_lock); 3278 return -EINVAL; 3279 } 3280 ret = binder_inc_ref_for_node( 3281 proc, ctx_mgr_node, 3282 strong, NULL, &rdata); 3283 } 3284 mutex_unlock(&context->context_mgr_node_lock); 3285 } 3286 if (ret) 3287 ret = binder_update_ref_for_handle( 3288 proc, target, increment, strong, 3289 &rdata); 3290 if (!ret && rdata.desc != target) { 3291 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n", 3292 proc->pid, thread->pid, 3293 target, rdata.desc); 3294 } 3295 switch (cmd) { 3296 case BC_INCREFS: 3297 debug_string = "IncRefs"; 3298 break; 3299 case BC_ACQUIRE: 3300 debug_string = "Acquire"; 3301 break; 3302 case BC_RELEASE: 3303 debug_string = "Release"; 3304 break; 3305 case BC_DECREFS: 3306 default: 3307 debug_string = "DecRefs"; 3308 break; 3309 } 3310 if (ret) { 3311 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n", 3312 proc->pid, thread->pid, debug_string, 3313 strong, target, ret); 3314 break; 3315 } 3316 binder_debug(BINDER_DEBUG_USER_REFS, 3317 "%d:%d %s ref %d desc %d s %d w %d\n", 3318 proc->pid, thread->pid, debug_string, 3319 rdata.debug_id, rdata.desc, rdata.strong, 3320 rdata.weak); 3321 break; 3322 } 3323 case BC_INCREFS_DONE: 3324 case BC_ACQUIRE_DONE: { 3325 binder_uintptr_t node_ptr; 3326 binder_uintptr_t cookie; 3327 struct binder_node *node; 3328 bool free_node; 3329 3330 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr)) 3331 return -EFAULT; 3332 ptr += sizeof(binder_uintptr_t); 3333 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 3334 return -EFAULT; 3335 ptr += sizeof(binder_uintptr_t); 3336 node = binder_get_node(proc, node_ptr); 3337 if (node == NULL) { 3338 binder_user_error("%d:%d %s u%016llx no match\n", 3339 proc->pid, thread->pid, 3340 cmd == BC_INCREFS_DONE ? 3341 "BC_INCREFS_DONE" : 3342 "BC_ACQUIRE_DONE", 3343 (u64)node_ptr); 3344 break; 3345 } 3346 if (cookie != node->cookie) { 3347 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n", 3348 proc->pid, thread->pid, 3349 cmd == BC_INCREFS_DONE ? 3350 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 3351 (u64)node_ptr, node->debug_id, 3352 (u64)cookie, (u64)node->cookie); 3353 binder_put_node(node); 3354 break; 3355 } 3356 binder_node_inner_lock(node); 3357 if (cmd == BC_ACQUIRE_DONE) { 3358 if (node->pending_strong_ref == 0) { 3359 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n", 3360 proc->pid, thread->pid, 3361 node->debug_id); 3362 binder_node_inner_unlock(node); 3363 binder_put_node(node); 3364 break; 3365 } 3366 node->pending_strong_ref = 0; 3367 } else { 3368 if (node->pending_weak_ref == 0) { 3369 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n", 3370 proc->pid, thread->pid, 3371 node->debug_id); 3372 binder_node_inner_unlock(node); 3373 binder_put_node(node); 3374 break; 3375 } 3376 node->pending_weak_ref = 0; 3377 } 3378 free_node = binder_dec_node_nilocked(node, 3379 cmd == BC_ACQUIRE_DONE, 0); 3380 WARN_ON(free_node); 3381 binder_debug(BINDER_DEBUG_USER_REFS, 3382 "%d:%d %s node %d ls %d lw %d tr %d\n", 3383 proc->pid, thread->pid, 3384 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 3385 node->debug_id, node->local_strong_refs, 3386 node->local_weak_refs, node->tmp_refs); 3387 binder_node_inner_unlock(node); 3388 binder_put_node(node); 3389 break; 3390 } 3391 case BC_ATTEMPT_ACQUIRE: 3392 pr_err("BC_ATTEMPT_ACQUIRE not supported\n"); 3393 return -EINVAL; 3394 case BC_ACQUIRE_RESULT: 3395 pr_err("BC_ACQUIRE_RESULT not supported\n"); 3396 return -EINVAL; 3397 3398 case BC_FREE_BUFFER: { 3399 binder_uintptr_t data_ptr; 3400 struct binder_buffer *buffer; 3401 3402 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr)) 3403 return -EFAULT; 3404 ptr += sizeof(binder_uintptr_t); 3405 3406 buffer = binder_alloc_prepare_to_free(&proc->alloc, 3407 data_ptr); 3408 if (IS_ERR_OR_NULL(buffer)) { 3409 if (PTR_ERR(buffer) == -EPERM) { 3410 binder_user_error( 3411 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n", 3412 proc->pid, thread->pid, 3413 (u64)data_ptr); 3414 } else { 3415 binder_user_error( 3416 "%d:%d BC_FREE_BUFFER u%016llx no match\n", 3417 proc->pid, thread->pid, 3418 (u64)data_ptr); 3419 } 3420 break; 3421 } 3422 binder_debug(BINDER_DEBUG_FREE_BUFFER, 3423 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n", 3424 proc->pid, thread->pid, (u64)data_ptr, 3425 buffer->debug_id, 3426 buffer->transaction ? "active" : "finished"); 3427 binder_free_buf(proc, thread, buffer); 3428 break; 3429 } 3430 3431 case BC_TRANSACTION_SG: 3432 case BC_REPLY_SG: { 3433 struct binder_transaction_data_sg tr; 3434 3435 if (copy_from_user(&tr, ptr, sizeof(tr))) 3436 return -EFAULT; 3437 ptr += sizeof(tr); 3438 binder_transaction(proc, thread, &tr.transaction_data, 3439 cmd == BC_REPLY_SG, tr.buffers_size); 3440 break; 3441 } 3442 case BC_TRANSACTION: 3443 case BC_REPLY: { 3444 struct binder_transaction_data tr; 3445 3446 if (copy_from_user(&tr, ptr, sizeof(tr))) 3447 return -EFAULT; 3448 ptr += sizeof(tr); 3449 binder_transaction(proc, thread, &tr, 3450 cmd == BC_REPLY, 0); 3451 break; 3452 } 3453 3454 case BC_REGISTER_LOOPER: 3455 binder_debug(BINDER_DEBUG_THREADS, 3456 "%d:%d BC_REGISTER_LOOPER\n", 3457 proc->pid, thread->pid); 3458 binder_inner_proc_lock(proc); 3459 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { 3460 thread->looper |= BINDER_LOOPER_STATE_INVALID; 3461 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n", 3462 proc->pid, thread->pid); 3463 } else if (proc->requested_threads == 0) { 3464 thread->looper |= BINDER_LOOPER_STATE_INVALID; 3465 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n", 3466 proc->pid, thread->pid); 3467 } else { 3468 proc->requested_threads--; 3469 proc->requested_threads_started++; 3470 } 3471 thread->looper |= BINDER_LOOPER_STATE_REGISTERED; 3472 binder_inner_proc_unlock(proc); 3473 break; 3474 case BC_ENTER_LOOPER: 3475 binder_debug(BINDER_DEBUG_THREADS, 3476 "%d:%d BC_ENTER_LOOPER\n", 3477 proc->pid, thread->pid); 3478 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { 3479 thread->looper |= BINDER_LOOPER_STATE_INVALID; 3480 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n", 3481 proc->pid, thread->pid); 3482 } 3483 thread->looper |= BINDER_LOOPER_STATE_ENTERED; 3484 break; 3485 case BC_EXIT_LOOPER: 3486 binder_debug(BINDER_DEBUG_THREADS, 3487 "%d:%d BC_EXIT_LOOPER\n", 3488 proc->pid, thread->pid); 3489 thread->looper |= BINDER_LOOPER_STATE_EXITED; 3490 break; 3491 3492 case BC_REQUEST_DEATH_NOTIFICATION: 3493 case BC_CLEAR_DEATH_NOTIFICATION: { 3494 uint32_t target; 3495 binder_uintptr_t cookie; 3496 struct binder_ref *ref; 3497 struct binder_ref_death *death = NULL; 3498 3499 if (get_user(target, (uint32_t __user *)ptr)) 3500 return -EFAULT; 3501 ptr += sizeof(uint32_t); 3502 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 3503 return -EFAULT; 3504 ptr += sizeof(binder_uintptr_t); 3505 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { 3506 /* 3507 * Allocate memory for death notification 3508 * before taking lock 3509 */ 3510 death = kzalloc(sizeof(*death), GFP_KERNEL); 3511 if (death == NULL) { 3512 WARN_ON(thread->return_error.cmd != 3513 BR_OK); 3514 thread->return_error.cmd = BR_ERROR; 3515 binder_enqueue_thread_work( 3516 thread, 3517 &thread->return_error.work); 3518 binder_debug( 3519 BINDER_DEBUG_FAILED_TRANSACTION, 3520 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n", 3521 proc->pid, thread->pid); 3522 break; 3523 } 3524 } 3525 binder_proc_lock(proc); 3526 ref = binder_get_ref_olocked(proc, target, false); 3527 if (ref == NULL) { 3528 binder_user_error("%d:%d %s invalid ref %d\n", 3529 proc->pid, thread->pid, 3530 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 3531 "BC_REQUEST_DEATH_NOTIFICATION" : 3532 "BC_CLEAR_DEATH_NOTIFICATION", 3533 target); 3534 binder_proc_unlock(proc); 3535 kfree(death); 3536 break; 3537 } 3538 3539 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 3540 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n", 3541 proc->pid, thread->pid, 3542 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 3543 "BC_REQUEST_DEATH_NOTIFICATION" : 3544 "BC_CLEAR_DEATH_NOTIFICATION", 3545 (u64)cookie, ref->data.debug_id, 3546 ref->data.desc, ref->data.strong, 3547 ref->data.weak, ref->node->debug_id); 3548 3549 binder_node_lock(ref->node); 3550 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { 3551 if (ref->death) { 3552 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n", 3553 proc->pid, thread->pid); 3554 binder_node_unlock(ref->node); 3555 binder_proc_unlock(proc); 3556 kfree(death); 3557 break; 3558 } 3559 binder_stats_created(BINDER_STAT_DEATH); 3560 INIT_LIST_HEAD(&death->work.entry); 3561 death->cookie = cookie; 3562 ref->death = death; 3563 if (ref->node->proc == NULL) { 3564 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 3565 3566 binder_inner_proc_lock(proc); 3567 binder_enqueue_work_ilocked( 3568 &ref->death->work, &proc->todo); 3569 binder_wakeup_proc_ilocked(proc); 3570 binder_inner_proc_unlock(proc); 3571 } 3572 } else { 3573 if (ref->death == NULL) { 3574 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n", 3575 proc->pid, thread->pid); 3576 binder_node_unlock(ref->node); 3577 binder_proc_unlock(proc); 3578 break; 3579 } 3580 death = ref->death; 3581 if (death->cookie != cookie) { 3582 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n", 3583 proc->pid, thread->pid, 3584 (u64)death->cookie, 3585 (u64)cookie); 3586 binder_node_unlock(ref->node); 3587 binder_proc_unlock(proc); 3588 break; 3589 } 3590 ref->death = NULL; 3591 binder_inner_proc_lock(proc); 3592 if (list_empty(&death->work.entry)) { 3593 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 3594 if (thread->looper & 3595 (BINDER_LOOPER_STATE_REGISTERED | 3596 BINDER_LOOPER_STATE_ENTERED)) 3597 binder_enqueue_thread_work_ilocked( 3598 thread, 3599 &death->work); 3600 else { 3601 binder_enqueue_work_ilocked( 3602 &death->work, 3603 &proc->todo); 3604 binder_wakeup_proc_ilocked( 3605 proc); 3606 } 3607 } else { 3608 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); 3609 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; 3610 } 3611 binder_inner_proc_unlock(proc); 3612 } 3613 binder_node_unlock(ref->node); 3614 binder_proc_unlock(proc); 3615 } break; 3616 case BC_DEAD_BINDER_DONE: { 3617 struct binder_work *w; 3618 binder_uintptr_t cookie; 3619 struct binder_ref_death *death = NULL; 3620 3621 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 3622 return -EFAULT; 3623 3624 ptr += sizeof(cookie); 3625 binder_inner_proc_lock(proc); 3626 list_for_each_entry(w, &proc->delivered_death, 3627 entry) { 3628 struct binder_ref_death *tmp_death = 3629 container_of(w, 3630 struct binder_ref_death, 3631 work); 3632 3633 if (tmp_death->cookie == cookie) { 3634 death = tmp_death; 3635 break; 3636 } 3637 } 3638 binder_debug(BINDER_DEBUG_DEAD_BINDER, 3639 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n", 3640 proc->pid, thread->pid, (u64)cookie, 3641 death); 3642 if (death == NULL) { 3643 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n", 3644 proc->pid, thread->pid, (u64)cookie); 3645 binder_inner_proc_unlock(proc); 3646 break; 3647 } 3648 binder_dequeue_work_ilocked(&death->work); 3649 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) { 3650 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 3651 if (thread->looper & 3652 (BINDER_LOOPER_STATE_REGISTERED | 3653 BINDER_LOOPER_STATE_ENTERED)) 3654 binder_enqueue_thread_work_ilocked( 3655 thread, &death->work); 3656 else { 3657 binder_enqueue_work_ilocked( 3658 &death->work, 3659 &proc->todo); 3660 binder_wakeup_proc_ilocked(proc); 3661 } 3662 } 3663 binder_inner_proc_unlock(proc); 3664 } break; 3665 3666 default: 3667 pr_err("%d:%d unknown command %d\n", 3668 proc->pid, thread->pid, cmd); 3669 return -EINVAL; 3670 } 3671 *consumed = ptr - buffer; 3672 } 3673 return 0; 3674 } 3675 3676 static void binder_stat_br(struct binder_proc *proc, 3677 struct binder_thread *thread, uint32_t cmd) 3678 { 3679 trace_binder_return(cmd); 3680 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { 3681 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]); 3682 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]); 3683 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]); 3684 } 3685 } 3686 3687 static int binder_put_node_cmd(struct binder_proc *proc, 3688 struct binder_thread *thread, 3689 void __user **ptrp, 3690 binder_uintptr_t node_ptr, 3691 binder_uintptr_t node_cookie, 3692 int node_debug_id, 3693 uint32_t cmd, const char *cmd_name) 3694 { 3695 void __user *ptr = *ptrp; 3696 3697 if (put_user(cmd, (uint32_t __user *)ptr)) 3698 return -EFAULT; 3699 ptr += sizeof(uint32_t); 3700 3701 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr)) 3702 return -EFAULT; 3703 ptr += sizeof(binder_uintptr_t); 3704 3705 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr)) 3706 return -EFAULT; 3707 ptr += sizeof(binder_uintptr_t); 3708 3709 binder_stat_br(proc, thread, cmd); 3710 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n", 3711 proc->pid, thread->pid, cmd_name, node_debug_id, 3712 (u64)node_ptr, (u64)node_cookie); 3713 3714 *ptrp = ptr; 3715 return 0; 3716 } 3717 3718 static int binder_wait_for_work(struct binder_thread *thread, 3719 bool do_proc_work) 3720 { 3721 DEFINE_WAIT(wait); 3722 struct binder_proc *proc = thread->proc; 3723 int ret = 0; 3724 3725 freezer_do_not_count(); 3726 binder_inner_proc_lock(proc); 3727 for (;;) { 3728 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE); 3729 if (binder_has_work_ilocked(thread, do_proc_work)) 3730 break; 3731 if (do_proc_work) 3732 list_add(&thread->waiting_thread_node, 3733 &proc->waiting_threads); 3734 binder_inner_proc_unlock(proc); 3735 schedule(); 3736 binder_inner_proc_lock(proc); 3737 list_del_init(&thread->waiting_thread_node); 3738 if (signal_pending(current)) { 3739 ret = -EINTR; 3740 break; 3741 } 3742 } 3743 finish_wait(&thread->wait, &wait); 3744 binder_inner_proc_unlock(proc); 3745 freezer_count(); 3746 3747 return ret; 3748 } 3749 3750 /** 3751 * binder_apply_fd_fixups() - finish fd translation 3752 * @proc: binder_proc associated @t->buffer 3753 * @t: binder transaction with list of fd fixups 3754 * 3755 * Now that we are in the context of the transaction target 3756 * process, we can allocate and install fds. Process the 3757 * list of fds to translate and fixup the buffer with the 3758 * new fds. 3759 * 3760 * If we fail to allocate an fd, then free the resources by 3761 * fput'ing files that have not been processed and ksys_close'ing 3762 * any fds that have already been allocated. 3763 */ 3764 static int binder_apply_fd_fixups(struct binder_proc *proc, 3765 struct binder_transaction *t) 3766 { 3767 struct binder_txn_fd_fixup *fixup, *tmp; 3768 int ret = 0; 3769 3770 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) { 3771 int fd = get_unused_fd_flags(O_CLOEXEC); 3772 3773 if (fd < 0) { 3774 binder_debug(BINDER_DEBUG_TRANSACTION, 3775 "failed fd fixup txn %d fd %d\n", 3776 t->debug_id, fd); 3777 ret = -ENOMEM; 3778 break; 3779 } 3780 binder_debug(BINDER_DEBUG_TRANSACTION, 3781 "fd fixup txn %d fd %d\n", 3782 t->debug_id, fd); 3783 trace_binder_transaction_fd_recv(t, fd, fixup->offset); 3784 fd_install(fd, fixup->file); 3785 fixup->file = NULL; 3786 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer, 3787 fixup->offset, &fd, 3788 sizeof(u32))) { 3789 ret = -EINVAL; 3790 break; 3791 } 3792 } 3793 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) { 3794 if (fixup->file) { 3795 fput(fixup->file); 3796 } else if (ret) { 3797 u32 fd; 3798 int err; 3799 3800 err = binder_alloc_copy_from_buffer(&proc->alloc, &fd, 3801 t->buffer, 3802 fixup->offset, 3803 sizeof(fd)); 3804 WARN_ON(err); 3805 if (!err) 3806 binder_deferred_fd_close(fd); 3807 } 3808 list_del(&fixup->fixup_entry); 3809 kfree(fixup); 3810 } 3811 3812 return ret; 3813 } 3814 3815 static int binder_thread_read(struct binder_proc *proc, 3816 struct binder_thread *thread, 3817 binder_uintptr_t binder_buffer, size_t size, 3818 binder_size_t *consumed, int non_block) 3819 { 3820 void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 3821 void __user *ptr = buffer + *consumed; 3822 void __user *end = buffer + size; 3823 3824 int ret = 0; 3825 int wait_for_proc_work; 3826 3827 if (*consumed == 0) { 3828 if (put_user(BR_NOOP, (uint32_t __user *)ptr)) 3829 return -EFAULT; 3830 ptr += sizeof(uint32_t); 3831 } 3832 3833 retry: 3834 binder_inner_proc_lock(proc); 3835 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread); 3836 binder_inner_proc_unlock(proc); 3837 3838 thread->looper |= BINDER_LOOPER_STATE_WAITING; 3839 3840 trace_binder_wait_for_work(wait_for_proc_work, 3841 !!thread->transaction_stack, 3842 !binder_worklist_empty(proc, &thread->todo)); 3843 if (wait_for_proc_work) { 3844 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 3845 BINDER_LOOPER_STATE_ENTERED))) { 3846 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n", 3847 proc->pid, thread->pid, thread->looper); 3848 wait_event_interruptible(binder_user_error_wait, 3849 binder_stop_on_user_error < 2); 3850 } 3851 binder_set_nice(proc->default_priority); 3852 } 3853 3854 if (non_block) { 3855 if (!binder_has_work(thread, wait_for_proc_work)) 3856 ret = -EAGAIN; 3857 } else { 3858 ret = binder_wait_for_work(thread, wait_for_proc_work); 3859 } 3860 3861 thread->looper &= ~BINDER_LOOPER_STATE_WAITING; 3862 3863 if (ret) 3864 return ret; 3865 3866 while (1) { 3867 uint32_t cmd; 3868 struct binder_transaction_data_secctx tr; 3869 struct binder_transaction_data *trd = &tr.transaction_data; 3870 struct binder_work *w = NULL; 3871 struct list_head *list = NULL; 3872 struct binder_transaction *t = NULL; 3873 struct binder_thread *t_from; 3874 size_t trsize = sizeof(*trd); 3875 3876 binder_inner_proc_lock(proc); 3877 if (!binder_worklist_empty_ilocked(&thread->todo)) 3878 list = &thread->todo; 3879 else if (!binder_worklist_empty_ilocked(&proc->todo) && 3880 wait_for_proc_work) 3881 list = &proc->todo; 3882 else { 3883 binder_inner_proc_unlock(proc); 3884 3885 /* no data added */ 3886 if (ptr - buffer == 4 && !thread->looper_need_return) 3887 goto retry; 3888 break; 3889 } 3890 3891 if (end - ptr < sizeof(tr) + 4) { 3892 binder_inner_proc_unlock(proc); 3893 break; 3894 } 3895 w = binder_dequeue_work_head_ilocked(list); 3896 if (binder_worklist_empty_ilocked(&thread->todo)) 3897 thread->process_todo = false; 3898 3899 switch (w->type) { 3900 case BINDER_WORK_TRANSACTION: { 3901 binder_inner_proc_unlock(proc); 3902 t = container_of(w, struct binder_transaction, work); 3903 } break; 3904 case BINDER_WORK_RETURN_ERROR: { 3905 struct binder_error *e = container_of( 3906 w, struct binder_error, work); 3907 3908 WARN_ON(e->cmd == BR_OK); 3909 binder_inner_proc_unlock(proc); 3910 if (put_user(e->cmd, (uint32_t __user *)ptr)) 3911 return -EFAULT; 3912 cmd = e->cmd; 3913 e->cmd = BR_OK; 3914 ptr += sizeof(uint32_t); 3915 3916 binder_stat_br(proc, thread, cmd); 3917 } break; 3918 case BINDER_WORK_TRANSACTION_COMPLETE: 3919 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: { 3920 if (proc->oneway_spam_detection_enabled && 3921 w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT) 3922 cmd = BR_ONEWAY_SPAM_SUSPECT; 3923 else 3924 cmd = BR_TRANSACTION_COMPLETE; 3925 binder_inner_proc_unlock(proc); 3926 kfree(w); 3927 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 3928 if (put_user(cmd, (uint32_t __user *)ptr)) 3929 return -EFAULT; 3930 ptr += sizeof(uint32_t); 3931 3932 binder_stat_br(proc, thread, cmd); 3933 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, 3934 "%d:%d BR_TRANSACTION_COMPLETE\n", 3935 proc->pid, thread->pid); 3936 } break; 3937 case BINDER_WORK_NODE: { 3938 struct binder_node *node = container_of(w, struct binder_node, work); 3939 int strong, weak; 3940 binder_uintptr_t node_ptr = node->ptr; 3941 binder_uintptr_t node_cookie = node->cookie; 3942 int node_debug_id = node->debug_id; 3943 int has_weak_ref; 3944 int has_strong_ref; 3945 void __user *orig_ptr = ptr; 3946 3947 BUG_ON(proc != node->proc); 3948 strong = node->internal_strong_refs || 3949 node->local_strong_refs; 3950 weak = !hlist_empty(&node->refs) || 3951 node->local_weak_refs || 3952 node->tmp_refs || strong; 3953 has_strong_ref = node->has_strong_ref; 3954 has_weak_ref = node->has_weak_ref; 3955 3956 if (weak && !has_weak_ref) { 3957 node->has_weak_ref = 1; 3958 node->pending_weak_ref = 1; 3959 node->local_weak_refs++; 3960 } 3961 if (strong && !has_strong_ref) { 3962 node->has_strong_ref = 1; 3963 node->pending_strong_ref = 1; 3964 node->local_strong_refs++; 3965 } 3966 if (!strong && has_strong_ref) 3967 node->has_strong_ref = 0; 3968 if (!weak && has_weak_ref) 3969 node->has_weak_ref = 0; 3970 if (!weak && !strong) { 3971 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 3972 "%d:%d node %d u%016llx c%016llx deleted\n", 3973 proc->pid, thread->pid, 3974 node_debug_id, 3975 (u64)node_ptr, 3976 (u64)node_cookie); 3977 rb_erase(&node->rb_node, &proc->nodes); 3978 binder_inner_proc_unlock(proc); 3979 binder_node_lock(node); 3980 /* 3981 * Acquire the node lock before freeing the 3982 * node to serialize with other threads that 3983 * may have been holding the node lock while 3984 * decrementing this node (avoids race where 3985 * this thread frees while the other thread 3986 * is unlocking the node after the final 3987 * decrement) 3988 */ 3989 binder_node_unlock(node); 3990 binder_free_node(node); 3991 } else 3992 binder_inner_proc_unlock(proc); 3993 3994 if (weak && !has_weak_ref) 3995 ret = binder_put_node_cmd( 3996 proc, thread, &ptr, node_ptr, 3997 node_cookie, node_debug_id, 3998 BR_INCREFS, "BR_INCREFS"); 3999 if (!ret && strong && !has_strong_ref) 4000 ret = binder_put_node_cmd( 4001 proc, thread, &ptr, node_ptr, 4002 node_cookie, node_debug_id, 4003 BR_ACQUIRE, "BR_ACQUIRE"); 4004 if (!ret && !strong && has_strong_ref) 4005 ret = binder_put_node_cmd( 4006 proc, thread, &ptr, node_ptr, 4007 node_cookie, node_debug_id, 4008 BR_RELEASE, "BR_RELEASE"); 4009 if (!ret && !weak && has_weak_ref) 4010 ret = binder_put_node_cmd( 4011 proc, thread, &ptr, node_ptr, 4012 node_cookie, node_debug_id, 4013 BR_DECREFS, "BR_DECREFS"); 4014 if (orig_ptr == ptr) 4015 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 4016 "%d:%d node %d u%016llx c%016llx state unchanged\n", 4017 proc->pid, thread->pid, 4018 node_debug_id, 4019 (u64)node_ptr, 4020 (u64)node_cookie); 4021 if (ret) 4022 return ret; 4023 } break; 4024 case BINDER_WORK_DEAD_BINDER: 4025 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 4026 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 4027 struct binder_ref_death *death; 4028 uint32_t cmd; 4029 binder_uintptr_t cookie; 4030 4031 death = container_of(w, struct binder_ref_death, work); 4032 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) 4033 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; 4034 else 4035 cmd = BR_DEAD_BINDER; 4036 cookie = death->cookie; 4037 4038 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 4039 "%d:%d %s %016llx\n", 4040 proc->pid, thread->pid, 4041 cmd == BR_DEAD_BINDER ? 4042 "BR_DEAD_BINDER" : 4043 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 4044 (u64)cookie); 4045 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { 4046 binder_inner_proc_unlock(proc); 4047 kfree(death); 4048 binder_stats_deleted(BINDER_STAT_DEATH); 4049 } else { 4050 binder_enqueue_work_ilocked( 4051 w, &proc->delivered_death); 4052 binder_inner_proc_unlock(proc); 4053 } 4054 if (put_user(cmd, (uint32_t __user *)ptr)) 4055 return -EFAULT; 4056 ptr += sizeof(uint32_t); 4057 if (put_user(cookie, 4058 (binder_uintptr_t __user *)ptr)) 4059 return -EFAULT; 4060 ptr += sizeof(binder_uintptr_t); 4061 binder_stat_br(proc, thread, cmd); 4062 if (cmd == BR_DEAD_BINDER) 4063 goto done; /* DEAD_BINDER notifications can cause transactions */ 4064 } break; 4065 default: 4066 binder_inner_proc_unlock(proc); 4067 pr_err("%d:%d: bad work type %d\n", 4068 proc->pid, thread->pid, w->type); 4069 break; 4070 } 4071 4072 if (!t) 4073 continue; 4074 4075 BUG_ON(t->buffer == NULL); 4076 if (t->buffer->target_node) { 4077 struct binder_node *target_node = t->buffer->target_node; 4078 4079 trd->target.ptr = target_node->ptr; 4080 trd->cookie = target_node->cookie; 4081 t->saved_priority = task_nice(current); 4082 if (t->priority < target_node->min_priority && 4083 !(t->flags & TF_ONE_WAY)) 4084 binder_set_nice(t->priority); 4085 else if (!(t->flags & TF_ONE_WAY) || 4086 t->saved_priority > target_node->min_priority) 4087 binder_set_nice(target_node->min_priority); 4088 cmd = BR_TRANSACTION; 4089 } else { 4090 trd->target.ptr = 0; 4091 trd->cookie = 0; 4092 cmd = BR_REPLY; 4093 } 4094 trd->code = t->code; 4095 trd->flags = t->flags; 4096 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid); 4097 4098 t_from = binder_get_txn_from(t); 4099 if (t_from) { 4100 struct task_struct *sender = t_from->proc->tsk; 4101 4102 trd->sender_pid = 4103 task_tgid_nr_ns(sender, 4104 task_active_pid_ns(current)); 4105 } else { 4106 trd->sender_pid = 0; 4107 } 4108 4109 ret = binder_apply_fd_fixups(proc, t); 4110 if (ret) { 4111 struct binder_buffer *buffer = t->buffer; 4112 bool oneway = !!(t->flags & TF_ONE_WAY); 4113 int tid = t->debug_id; 4114 4115 if (t_from) 4116 binder_thread_dec_tmpref(t_from); 4117 buffer->transaction = NULL; 4118 binder_cleanup_transaction(t, "fd fixups failed", 4119 BR_FAILED_REPLY); 4120 binder_free_buf(proc, thread, buffer); 4121 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 4122 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n", 4123 proc->pid, thread->pid, 4124 oneway ? "async " : 4125 (cmd == BR_REPLY ? "reply " : ""), 4126 tid, BR_FAILED_REPLY, ret, __LINE__); 4127 if (cmd == BR_REPLY) { 4128 cmd = BR_FAILED_REPLY; 4129 if (put_user(cmd, (uint32_t __user *)ptr)) 4130 return -EFAULT; 4131 ptr += sizeof(uint32_t); 4132 binder_stat_br(proc, thread, cmd); 4133 break; 4134 } 4135 continue; 4136 } 4137 trd->data_size = t->buffer->data_size; 4138 trd->offsets_size = t->buffer->offsets_size; 4139 trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data; 4140 trd->data.ptr.offsets = trd->data.ptr.buffer + 4141 ALIGN(t->buffer->data_size, 4142 sizeof(void *)); 4143 4144 tr.secctx = t->security_ctx; 4145 if (t->security_ctx) { 4146 cmd = BR_TRANSACTION_SEC_CTX; 4147 trsize = sizeof(tr); 4148 } 4149 if (put_user(cmd, (uint32_t __user *)ptr)) { 4150 if (t_from) 4151 binder_thread_dec_tmpref(t_from); 4152 4153 binder_cleanup_transaction(t, "put_user failed", 4154 BR_FAILED_REPLY); 4155 4156 return -EFAULT; 4157 } 4158 ptr += sizeof(uint32_t); 4159 if (copy_to_user(ptr, &tr, trsize)) { 4160 if (t_from) 4161 binder_thread_dec_tmpref(t_from); 4162 4163 binder_cleanup_transaction(t, "copy_to_user failed", 4164 BR_FAILED_REPLY); 4165 4166 return -EFAULT; 4167 } 4168 ptr += trsize; 4169 4170 trace_binder_transaction_received(t); 4171 binder_stat_br(proc, thread, cmd); 4172 binder_debug(BINDER_DEBUG_TRANSACTION, 4173 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n", 4174 proc->pid, thread->pid, 4175 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : 4176 (cmd == BR_TRANSACTION_SEC_CTX) ? 4177 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY", 4178 t->debug_id, t_from ? t_from->proc->pid : 0, 4179 t_from ? t_from->pid : 0, cmd, 4180 t->buffer->data_size, t->buffer->offsets_size, 4181 (u64)trd->data.ptr.buffer, 4182 (u64)trd->data.ptr.offsets); 4183 4184 if (t_from) 4185 binder_thread_dec_tmpref(t_from); 4186 t->buffer->allow_user_free = 1; 4187 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) { 4188 binder_inner_proc_lock(thread->proc); 4189 t->to_parent = thread->transaction_stack; 4190 t->to_thread = thread; 4191 thread->transaction_stack = t; 4192 binder_inner_proc_unlock(thread->proc); 4193 } else { 4194 binder_free_transaction(t); 4195 } 4196 break; 4197 } 4198 4199 done: 4200 4201 *consumed = ptr - buffer; 4202 binder_inner_proc_lock(proc); 4203 if (proc->requested_threads == 0 && 4204 list_empty(&thread->proc->waiting_threads) && 4205 proc->requested_threads_started < proc->max_threads && 4206 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 4207 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ 4208 /*spawn a new thread if we leave this out */) { 4209 proc->requested_threads++; 4210 binder_inner_proc_unlock(proc); 4211 binder_debug(BINDER_DEBUG_THREADS, 4212 "%d:%d BR_SPAWN_LOOPER\n", 4213 proc->pid, thread->pid); 4214 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) 4215 return -EFAULT; 4216 binder_stat_br(proc, thread, BR_SPAWN_LOOPER); 4217 } else 4218 binder_inner_proc_unlock(proc); 4219 return 0; 4220 } 4221 4222 static void binder_release_work(struct binder_proc *proc, 4223 struct list_head *list) 4224 { 4225 struct binder_work *w; 4226 enum binder_work_type wtype; 4227 4228 while (1) { 4229 binder_inner_proc_lock(proc); 4230 w = binder_dequeue_work_head_ilocked(list); 4231 wtype = w ? w->type : 0; 4232 binder_inner_proc_unlock(proc); 4233 if (!w) 4234 return; 4235 4236 switch (wtype) { 4237 case BINDER_WORK_TRANSACTION: { 4238 struct binder_transaction *t; 4239 4240 t = container_of(w, struct binder_transaction, work); 4241 4242 binder_cleanup_transaction(t, "process died.", 4243 BR_DEAD_REPLY); 4244 } break; 4245 case BINDER_WORK_RETURN_ERROR: { 4246 struct binder_error *e = container_of( 4247 w, struct binder_error, work); 4248 4249 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4250 "undelivered TRANSACTION_ERROR: %u\n", 4251 e->cmd); 4252 } break; 4253 case BINDER_WORK_TRANSACTION_COMPLETE: { 4254 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4255 "undelivered TRANSACTION_COMPLETE\n"); 4256 kfree(w); 4257 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 4258 } break; 4259 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 4260 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 4261 struct binder_ref_death *death; 4262 4263 death = container_of(w, struct binder_ref_death, work); 4264 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4265 "undelivered death notification, %016llx\n", 4266 (u64)death->cookie); 4267 kfree(death); 4268 binder_stats_deleted(BINDER_STAT_DEATH); 4269 } break; 4270 case BINDER_WORK_NODE: 4271 break; 4272 default: 4273 pr_err("unexpected work type, %d, not freed\n", 4274 wtype); 4275 break; 4276 } 4277 } 4278 4279 } 4280 4281 static struct binder_thread *binder_get_thread_ilocked( 4282 struct binder_proc *proc, struct binder_thread *new_thread) 4283 { 4284 struct binder_thread *thread = NULL; 4285 struct rb_node *parent = NULL; 4286 struct rb_node **p = &proc->threads.rb_node; 4287 4288 while (*p) { 4289 parent = *p; 4290 thread = rb_entry(parent, struct binder_thread, rb_node); 4291 4292 if (current->pid < thread->pid) 4293 p = &(*p)->rb_left; 4294 else if (current->pid > thread->pid) 4295 p = &(*p)->rb_right; 4296 else 4297 return thread; 4298 } 4299 if (!new_thread) 4300 return NULL; 4301 thread = new_thread; 4302 binder_stats_created(BINDER_STAT_THREAD); 4303 thread->proc = proc; 4304 thread->pid = current->pid; 4305 atomic_set(&thread->tmp_ref, 0); 4306 init_waitqueue_head(&thread->wait); 4307 INIT_LIST_HEAD(&thread->todo); 4308 rb_link_node(&thread->rb_node, parent, p); 4309 rb_insert_color(&thread->rb_node, &proc->threads); 4310 thread->looper_need_return = true; 4311 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR; 4312 thread->return_error.cmd = BR_OK; 4313 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR; 4314 thread->reply_error.cmd = BR_OK; 4315 INIT_LIST_HEAD(&new_thread->waiting_thread_node); 4316 return thread; 4317 } 4318 4319 static struct binder_thread *binder_get_thread(struct binder_proc *proc) 4320 { 4321 struct binder_thread *thread; 4322 struct binder_thread *new_thread; 4323 4324 binder_inner_proc_lock(proc); 4325 thread = binder_get_thread_ilocked(proc, NULL); 4326 binder_inner_proc_unlock(proc); 4327 if (!thread) { 4328 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL); 4329 if (new_thread == NULL) 4330 return NULL; 4331 binder_inner_proc_lock(proc); 4332 thread = binder_get_thread_ilocked(proc, new_thread); 4333 binder_inner_proc_unlock(proc); 4334 if (thread != new_thread) 4335 kfree(new_thread); 4336 } 4337 return thread; 4338 } 4339 4340 static void binder_free_proc(struct binder_proc *proc) 4341 { 4342 struct binder_device *device; 4343 4344 BUG_ON(!list_empty(&proc->todo)); 4345 BUG_ON(!list_empty(&proc->delivered_death)); 4346 if (proc->outstanding_txns) 4347 pr_warn("%s: Unexpected outstanding_txns %d\n", 4348 __func__, proc->outstanding_txns); 4349 device = container_of(proc->context, struct binder_device, context); 4350 if (refcount_dec_and_test(&device->ref)) { 4351 kfree(proc->context->name); 4352 kfree(device); 4353 } 4354 binder_alloc_deferred_release(&proc->alloc); 4355 put_task_struct(proc->tsk); 4356 binder_stats_deleted(BINDER_STAT_PROC); 4357 kfree(proc); 4358 } 4359 4360 static void binder_free_thread(struct binder_thread *thread) 4361 { 4362 BUG_ON(!list_empty(&thread->todo)); 4363 binder_stats_deleted(BINDER_STAT_THREAD); 4364 binder_proc_dec_tmpref(thread->proc); 4365 kfree(thread); 4366 } 4367 4368 static int binder_thread_release(struct binder_proc *proc, 4369 struct binder_thread *thread) 4370 { 4371 struct binder_transaction *t; 4372 struct binder_transaction *send_reply = NULL; 4373 int active_transactions = 0; 4374 struct binder_transaction *last_t = NULL; 4375 4376 binder_inner_proc_lock(thread->proc); 4377 /* 4378 * take a ref on the proc so it survives 4379 * after we remove this thread from proc->threads. 4380 * The corresponding dec is when we actually 4381 * free the thread in binder_free_thread() 4382 */ 4383 proc->tmp_ref++; 4384 /* 4385 * take a ref on this thread to ensure it 4386 * survives while we are releasing it 4387 */ 4388 atomic_inc(&thread->tmp_ref); 4389 rb_erase(&thread->rb_node, &proc->threads); 4390 t = thread->transaction_stack; 4391 if (t) { 4392 spin_lock(&t->lock); 4393 if (t->to_thread == thread) 4394 send_reply = t; 4395 } else { 4396 __acquire(&t->lock); 4397 } 4398 thread->is_dead = true; 4399 4400 while (t) { 4401 last_t = t; 4402 active_transactions++; 4403 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4404 "release %d:%d transaction %d %s, still active\n", 4405 proc->pid, thread->pid, 4406 t->debug_id, 4407 (t->to_thread == thread) ? "in" : "out"); 4408 4409 if (t->to_thread == thread) { 4410 thread->proc->outstanding_txns--; 4411 t->to_proc = NULL; 4412 t->to_thread = NULL; 4413 if (t->buffer) { 4414 t->buffer->transaction = NULL; 4415 t->buffer = NULL; 4416 } 4417 t = t->to_parent; 4418 } else if (t->from == thread) { 4419 t->from = NULL; 4420 t = t->from_parent; 4421 } else 4422 BUG(); 4423 spin_unlock(&last_t->lock); 4424 if (t) 4425 spin_lock(&t->lock); 4426 else 4427 __acquire(&t->lock); 4428 } 4429 /* annotation for sparse, lock not acquired in last iteration above */ 4430 __release(&t->lock); 4431 4432 /* 4433 * If this thread used poll, make sure we remove the waitqueue 4434 * from any epoll data structures holding it with POLLFREE. 4435 * waitqueue_active() is safe to use here because we're holding 4436 * the inner lock. 4437 */ 4438 if ((thread->looper & BINDER_LOOPER_STATE_POLL) && 4439 waitqueue_active(&thread->wait)) { 4440 wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE); 4441 } 4442 4443 binder_inner_proc_unlock(thread->proc); 4444 4445 /* 4446 * This is needed to avoid races between wake_up_poll() above and 4447 * and ep_remove_waitqueue() called for other reasons (eg the epoll file 4448 * descriptor being closed); ep_remove_waitqueue() holds an RCU read 4449 * lock, so we can be sure it's done after calling synchronize_rcu(). 4450 */ 4451 if (thread->looper & BINDER_LOOPER_STATE_POLL) 4452 synchronize_rcu(); 4453 4454 if (send_reply) 4455 binder_send_failed_reply(send_reply, BR_DEAD_REPLY); 4456 binder_release_work(proc, &thread->todo); 4457 binder_thread_dec_tmpref(thread); 4458 return active_transactions; 4459 } 4460 4461 static __poll_t binder_poll(struct file *filp, 4462 struct poll_table_struct *wait) 4463 { 4464 struct binder_proc *proc = filp->private_data; 4465 struct binder_thread *thread = NULL; 4466 bool wait_for_proc_work; 4467 4468 thread = binder_get_thread(proc); 4469 if (!thread) 4470 return POLLERR; 4471 4472 binder_inner_proc_lock(thread->proc); 4473 thread->looper |= BINDER_LOOPER_STATE_POLL; 4474 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread); 4475 4476 binder_inner_proc_unlock(thread->proc); 4477 4478 poll_wait(filp, &thread->wait, wait); 4479 4480 if (binder_has_work(thread, wait_for_proc_work)) 4481 return EPOLLIN; 4482 4483 return 0; 4484 } 4485 4486 static int binder_ioctl_write_read(struct file *filp, 4487 unsigned int cmd, unsigned long arg, 4488 struct binder_thread *thread) 4489 { 4490 int ret = 0; 4491 struct binder_proc *proc = filp->private_data; 4492 unsigned int size = _IOC_SIZE(cmd); 4493 void __user *ubuf = (void __user *)arg; 4494 struct binder_write_read bwr; 4495 4496 if (size != sizeof(struct binder_write_read)) { 4497 ret = -EINVAL; 4498 goto out; 4499 } 4500 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { 4501 ret = -EFAULT; 4502 goto out; 4503 } 4504 binder_debug(BINDER_DEBUG_READ_WRITE, 4505 "%d:%d write %lld at %016llx, read %lld at %016llx\n", 4506 proc->pid, thread->pid, 4507 (u64)bwr.write_size, (u64)bwr.write_buffer, 4508 (u64)bwr.read_size, (u64)bwr.read_buffer); 4509 4510 if (bwr.write_size > 0) { 4511 ret = binder_thread_write(proc, thread, 4512 bwr.write_buffer, 4513 bwr.write_size, 4514 &bwr.write_consumed); 4515 trace_binder_write_done(ret); 4516 if (ret < 0) { 4517 bwr.read_consumed = 0; 4518 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 4519 ret = -EFAULT; 4520 goto out; 4521 } 4522 } 4523 if (bwr.read_size > 0) { 4524 ret = binder_thread_read(proc, thread, bwr.read_buffer, 4525 bwr.read_size, 4526 &bwr.read_consumed, 4527 filp->f_flags & O_NONBLOCK); 4528 trace_binder_read_done(ret); 4529 binder_inner_proc_lock(proc); 4530 if (!binder_worklist_empty_ilocked(&proc->todo)) 4531 binder_wakeup_proc_ilocked(proc); 4532 binder_inner_proc_unlock(proc); 4533 if (ret < 0) { 4534 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 4535 ret = -EFAULT; 4536 goto out; 4537 } 4538 } 4539 binder_debug(BINDER_DEBUG_READ_WRITE, 4540 "%d:%d wrote %lld of %lld, read return %lld of %lld\n", 4541 proc->pid, thread->pid, 4542 (u64)bwr.write_consumed, (u64)bwr.write_size, 4543 (u64)bwr.read_consumed, (u64)bwr.read_size); 4544 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { 4545 ret = -EFAULT; 4546 goto out; 4547 } 4548 out: 4549 return ret; 4550 } 4551 4552 static int binder_ioctl_set_ctx_mgr(struct file *filp, 4553 struct flat_binder_object *fbo) 4554 { 4555 int ret = 0; 4556 struct binder_proc *proc = filp->private_data; 4557 struct binder_context *context = proc->context; 4558 struct binder_node *new_node; 4559 kuid_t curr_euid = current_euid(); 4560 4561 mutex_lock(&context->context_mgr_node_lock); 4562 if (context->binder_context_mgr_node) { 4563 pr_err("BINDER_SET_CONTEXT_MGR already set\n"); 4564 ret = -EBUSY; 4565 goto out; 4566 } 4567 ret = security_binder_set_context_mgr(proc->tsk); 4568 if (ret < 0) 4569 goto out; 4570 if (uid_valid(context->binder_context_mgr_uid)) { 4571 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) { 4572 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n", 4573 from_kuid(&init_user_ns, curr_euid), 4574 from_kuid(&init_user_ns, 4575 context->binder_context_mgr_uid)); 4576 ret = -EPERM; 4577 goto out; 4578 } 4579 } else { 4580 context->binder_context_mgr_uid = curr_euid; 4581 } 4582 new_node = binder_new_node(proc, fbo); 4583 if (!new_node) { 4584 ret = -ENOMEM; 4585 goto out; 4586 } 4587 binder_node_lock(new_node); 4588 new_node->local_weak_refs++; 4589 new_node->local_strong_refs++; 4590 new_node->has_strong_ref = 1; 4591 new_node->has_weak_ref = 1; 4592 context->binder_context_mgr_node = new_node; 4593 binder_node_unlock(new_node); 4594 binder_put_node(new_node); 4595 out: 4596 mutex_unlock(&context->context_mgr_node_lock); 4597 return ret; 4598 } 4599 4600 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc, 4601 struct binder_node_info_for_ref *info) 4602 { 4603 struct binder_node *node; 4604 struct binder_context *context = proc->context; 4605 __u32 handle = info->handle; 4606 4607 if (info->strong_count || info->weak_count || info->reserved1 || 4608 info->reserved2 || info->reserved3) { 4609 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.", 4610 proc->pid); 4611 return -EINVAL; 4612 } 4613 4614 /* This ioctl may only be used by the context manager */ 4615 mutex_lock(&context->context_mgr_node_lock); 4616 if (!context->binder_context_mgr_node || 4617 context->binder_context_mgr_node->proc != proc) { 4618 mutex_unlock(&context->context_mgr_node_lock); 4619 return -EPERM; 4620 } 4621 mutex_unlock(&context->context_mgr_node_lock); 4622 4623 node = binder_get_node_from_ref(proc, handle, true, NULL); 4624 if (!node) 4625 return -EINVAL; 4626 4627 info->strong_count = node->local_strong_refs + 4628 node->internal_strong_refs; 4629 info->weak_count = node->local_weak_refs; 4630 4631 binder_put_node(node); 4632 4633 return 0; 4634 } 4635 4636 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc, 4637 struct binder_node_debug_info *info) 4638 { 4639 struct rb_node *n; 4640 binder_uintptr_t ptr = info->ptr; 4641 4642 memset(info, 0, sizeof(*info)); 4643 4644 binder_inner_proc_lock(proc); 4645 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { 4646 struct binder_node *node = rb_entry(n, struct binder_node, 4647 rb_node); 4648 if (node->ptr > ptr) { 4649 info->ptr = node->ptr; 4650 info->cookie = node->cookie; 4651 info->has_strong_ref = node->has_strong_ref; 4652 info->has_weak_ref = node->has_weak_ref; 4653 break; 4654 } 4655 } 4656 binder_inner_proc_unlock(proc); 4657 4658 return 0; 4659 } 4660 4661 static bool binder_txns_pending_ilocked(struct binder_proc *proc) 4662 { 4663 struct rb_node *n; 4664 struct binder_thread *thread; 4665 4666 if (proc->outstanding_txns > 0) 4667 return true; 4668 4669 for (n = rb_first(&proc->threads); n; n = rb_next(n)) { 4670 thread = rb_entry(n, struct binder_thread, rb_node); 4671 if (thread->transaction_stack) 4672 return true; 4673 } 4674 return false; 4675 } 4676 4677 static int binder_ioctl_freeze(struct binder_freeze_info *info, 4678 struct binder_proc *target_proc) 4679 { 4680 int ret = 0; 4681 4682 if (!info->enable) { 4683 binder_inner_proc_lock(target_proc); 4684 target_proc->sync_recv = false; 4685 target_proc->async_recv = false; 4686 target_proc->is_frozen = false; 4687 binder_inner_proc_unlock(target_proc); 4688 return 0; 4689 } 4690 4691 /* 4692 * Freezing the target. Prevent new transactions by 4693 * setting frozen state. If timeout specified, wait 4694 * for transactions to drain. 4695 */ 4696 binder_inner_proc_lock(target_proc); 4697 target_proc->sync_recv = false; 4698 target_proc->async_recv = false; 4699 target_proc->is_frozen = true; 4700 binder_inner_proc_unlock(target_proc); 4701 4702 if (info->timeout_ms > 0) 4703 ret = wait_event_interruptible_timeout( 4704 target_proc->freeze_wait, 4705 (!target_proc->outstanding_txns), 4706 msecs_to_jiffies(info->timeout_ms)); 4707 4708 /* Check pending transactions that wait for reply */ 4709 if (ret >= 0) { 4710 binder_inner_proc_lock(target_proc); 4711 if (binder_txns_pending_ilocked(target_proc)) 4712 ret = -EAGAIN; 4713 binder_inner_proc_unlock(target_proc); 4714 } 4715 4716 if (ret < 0) { 4717 binder_inner_proc_lock(target_proc); 4718 target_proc->is_frozen = false; 4719 binder_inner_proc_unlock(target_proc); 4720 } 4721 4722 return ret; 4723 } 4724 4725 static int binder_ioctl_get_freezer_info( 4726 struct binder_frozen_status_info *info) 4727 { 4728 struct binder_proc *target_proc; 4729 bool found = false; 4730 __u32 txns_pending; 4731 4732 info->sync_recv = 0; 4733 info->async_recv = 0; 4734 4735 mutex_lock(&binder_procs_lock); 4736 hlist_for_each_entry(target_proc, &binder_procs, proc_node) { 4737 if (target_proc->pid == info->pid) { 4738 found = true; 4739 binder_inner_proc_lock(target_proc); 4740 txns_pending = binder_txns_pending_ilocked(target_proc); 4741 info->sync_recv |= target_proc->sync_recv | 4742 (txns_pending << 1); 4743 info->async_recv |= target_proc->async_recv; 4744 binder_inner_proc_unlock(target_proc); 4745 } 4746 } 4747 mutex_unlock(&binder_procs_lock); 4748 4749 if (!found) 4750 return -EINVAL; 4751 4752 return 0; 4753 } 4754 4755 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 4756 { 4757 int ret; 4758 struct binder_proc *proc = filp->private_data; 4759 struct binder_thread *thread; 4760 unsigned int size = _IOC_SIZE(cmd); 4761 void __user *ubuf = (void __user *)arg; 4762 4763 /*pr_info("binder_ioctl: %d:%d %x %lx\n", 4764 proc->pid, current->pid, cmd, arg);*/ 4765 4766 binder_selftest_alloc(&proc->alloc); 4767 4768 trace_binder_ioctl(cmd, arg); 4769 4770 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 4771 if (ret) 4772 goto err_unlocked; 4773 4774 thread = binder_get_thread(proc); 4775 if (thread == NULL) { 4776 ret = -ENOMEM; 4777 goto err; 4778 } 4779 4780 switch (cmd) { 4781 case BINDER_WRITE_READ: 4782 ret = binder_ioctl_write_read(filp, cmd, arg, thread); 4783 if (ret) 4784 goto err; 4785 break; 4786 case BINDER_SET_MAX_THREADS: { 4787 int max_threads; 4788 4789 if (copy_from_user(&max_threads, ubuf, 4790 sizeof(max_threads))) { 4791 ret = -EINVAL; 4792 goto err; 4793 } 4794 binder_inner_proc_lock(proc); 4795 proc->max_threads = max_threads; 4796 binder_inner_proc_unlock(proc); 4797 break; 4798 } 4799 case BINDER_SET_CONTEXT_MGR_EXT: { 4800 struct flat_binder_object fbo; 4801 4802 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) { 4803 ret = -EINVAL; 4804 goto err; 4805 } 4806 ret = binder_ioctl_set_ctx_mgr(filp, &fbo); 4807 if (ret) 4808 goto err; 4809 break; 4810 } 4811 case BINDER_SET_CONTEXT_MGR: 4812 ret = binder_ioctl_set_ctx_mgr(filp, NULL); 4813 if (ret) 4814 goto err; 4815 break; 4816 case BINDER_THREAD_EXIT: 4817 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n", 4818 proc->pid, thread->pid); 4819 binder_thread_release(proc, thread); 4820 thread = NULL; 4821 break; 4822 case BINDER_VERSION: { 4823 struct binder_version __user *ver = ubuf; 4824 4825 if (size != sizeof(struct binder_version)) { 4826 ret = -EINVAL; 4827 goto err; 4828 } 4829 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, 4830 &ver->protocol_version)) { 4831 ret = -EINVAL; 4832 goto err; 4833 } 4834 break; 4835 } 4836 case BINDER_GET_NODE_INFO_FOR_REF: { 4837 struct binder_node_info_for_ref info; 4838 4839 if (copy_from_user(&info, ubuf, sizeof(info))) { 4840 ret = -EFAULT; 4841 goto err; 4842 } 4843 4844 ret = binder_ioctl_get_node_info_for_ref(proc, &info); 4845 if (ret < 0) 4846 goto err; 4847 4848 if (copy_to_user(ubuf, &info, sizeof(info))) { 4849 ret = -EFAULT; 4850 goto err; 4851 } 4852 4853 break; 4854 } 4855 case BINDER_GET_NODE_DEBUG_INFO: { 4856 struct binder_node_debug_info info; 4857 4858 if (copy_from_user(&info, ubuf, sizeof(info))) { 4859 ret = -EFAULT; 4860 goto err; 4861 } 4862 4863 ret = binder_ioctl_get_node_debug_info(proc, &info); 4864 if (ret < 0) 4865 goto err; 4866 4867 if (copy_to_user(ubuf, &info, sizeof(info))) { 4868 ret = -EFAULT; 4869 goto err; 4870 } 4871 break; 4872 } 4873 case BINDER_FREEZE: { 4874 struct binder_freeze_info info; 4875 struct binder_proc **target_procs = NULL, *target_proc; 4876 int target_procs_count = 0, i = 0; 4877 4878 ret = 0; 4879 4880 if (copy_from_user(&info, ubuf, sizeof(info))) { 4881 ret = -EFAULT; 4882 goto err; 4883 } 4884 4885 mutex_lock(&binder_procs_lock); 4886 hlist_for_each_entry(target_proc, &binder_procs, proc_node) { 4887 if (target_proc->pid == info.pid) 4888 target_procs_count++; 4889 } 4890 4891 if (target_procs_count == 0) { 4892 mutex_unlock(&binder_procs_lock); 4893 ret = -EINVAL; 4894 goto err; 4895 } 4896 4897 target_procs = kcalloc(target_procs_count, 4898 sizeof(struct binder_proc *), 4899 GFP_KERNEL); 4900 4901 if (!target_procs) { 4902 mutex_unlock(&binder_procs_lock); 4903 ret = -ENOMEM; 4904 goto err; 4905 } 4906 4907 hlist_for_each_entry(target_proc, &binder_procs, proc_node) { 4908 if (target_proc->pid != info.pid) 4909 continue; 4910 4911 binder_inner_proc_lock(target_proc); 4912 target_proc->tmp_ref++; 4913 binder_inner_proc_unlock(target_proc); 4914 4915 target_procs[i++] = target_proc; 4916 } 4917 mutex_unlock(&binder_procs_lock); 4918 4919 for (i = 0; i < target_procs_count; i++) { 4920 if (ret >= 0) 4921 ret = binder_ioctl_freeze(&info, 4922 target_procs[i]); 4923 4924 binder_proc_dec_tmpref(target_procs[i]); 4925 } 4926 4927 kfree(target_procs); 4928 4929 if (ret < 0) 4930 goto err; 4931 break; 4932 } 4933 case BINDER_GET_FROZEN_INFO: { 4934 struct binder_frozen_status_info info; 4935 4936 if (copy_from_user(&info, ubuf, sizeof(info))) { 4937 ret = -EFAULT; 4938 goto err; 4939 } 4940 4941 ret = binder_ioctl_get_freezer_info(&info); 4942 if (ret < 0) 4943 goto err; 4944 4945 if (copy_to_user(ubuf, &info, sizeof(info))) { 4946 ret = -EFAULT; 4947 goto err; 4948 } 4949 break; 4950 } 4951 case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: { 4952 uint32_t enable; 4953 4954 if (copy_from_user(&enable, ubuf, sizeof(enable))) { 4955 ret = -EFAULT; 4956 goto err; 4957 } 4958 binder_inner_proc_lock(proc); 4959 proc->oneway_spam_detection_enabled = (bool)enable; 4960 binder_inner_proc_unlock(proc); 4961 break; 4962 } 4963 default: 4964 ret = -EINVAL; 4965 goto err; 4966 } 4967 ret = 0; 4968 err: 4969 if (thread) 4970 thread->looper_need_return = false; 4971 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 4972 if (ret && ret != -EINTR) 4973 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); 4974 err_unlocked: 4975 trace_binder_ioctl_done(ret); 4976 return ret; 4977 } 4978 4979 static void binder_vma_open(struct vm_area_struct *vma) 4980 { 4981 struct binder_proc *proc = vma->vm_private_data; 4982 4983 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 4984 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 4985 proc->pid, vma->vm_start, vma->vm_end, 4986 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 4987 (unsigned long)pgprot_val(vma->vm_page_prot)); 4988 } 4989 4990 static void binder_vma_close(struct vm_area_struct *vma) 4991 { 4992 struct binder_proc *proc = vma->vm_private_data; 4993 4994 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 4995 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 4996 proc->pid, vma->vm_start, vma->vm_end, 4997 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 4998 (unsigned long)pgprot_val(vma->vm_page_prot)); 4999 binder_alloc_vma_close(&proc->alloc); 5000 } 5001 5002 static vm_fault_t binder_vm_fault(struct vm_fault *vmf) 5003 { 5004 return VM_FAULT_SIGBUS; 5005 } 5006 5007 static const struct vm_operations_struct binder_vm_ops = { 5008 .open = binder_vma_open, 5009 .close = binder_vma_close, 5010 .fault = binder_vm_fault, 5011 }; 5012 5013 static int binder_mmap(struct file *filp, struct vm_area_struct *vma) 5014 { 5015 struct binder_proc *proc = filp->private_data; 5016 5017 if (proc->tsk != current->group_leader) 5018 return -EINVAL; 5019 5020 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 5021 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", 5022 __func__, proc->pid, vma->vm_start, vma->vm_end, 5023 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 5024 (unsigned long)pgprot_val(vma->vm_page_prot)); 5025 5026 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { 5027 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__, 5028 proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM); 5029 return -EPERM; 5030 } 5031 vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP; 5032 vma->vm_flags &= ~VM_MAYWRITE; 5033 5034 vma->vm_ops = &binder_vm_ops; 5035 vma->vm_private_data = proc; 5036 5037 return binder_alloc_mmap_handler(&proc->alloc, vma); 5038 } 5039 5040 static int binder_open(struct inode *nodp, struct file *filp) 5041 { 5042 struct binder_proc *proc, *itr; 5043 struct binder_device *binder_dev; 5044 struct binderfs_info *info; 5045 struct dentry *binder_binderfs_dir_entry_proc = NULL; 5046 bool existing_pid = false; 5047 5048 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__, 5049 current->group_leader->pid, current->pid); 5050 5051 proc = kzalloc(sizeof(*proc), GFP_KERNEL); 5052 if (proc == NULL) 5053 return -ENOMEM; 5054 spin_lock_init(&proc->inner_lock); 5055 spin_lock_init(&proc->outer_lock); 5056 get_task_struct(current->group_leader); 5057 proc->tsk = current->group_leader; 5058 INIT_LIST_HEAD(&proc->todo); 5059 init_waitqueue_head(&proc->freeze_wait); 5060 proc->default_priority = task_nice(current); 5061 /* binderfs stashes devices in i_private */ 5062 if (is_binderfs_device(nodp)) { 5063 binder_dev = nodp->i_private; 5064 info = nodp->i_sb->s_fs_info; 5065 binder_binderfs_dir_entry_proc = info->proc_log_dir; 5066 } else { 5067 binder_dev = container_of(filp->private_data, 5068 struct binder_device, miscdev); 5069 } 5070 refcount_inc(&binder_dev->ref); 5071 proc->context = &binder_dev->context; 5072 binder_alloc_init(&proc->alloc); 5073 5074 binder_stats_created(BINDER_STAT_PROC); 5075 proc->pid = current->group_leader->pid; 5076 INIT_LIST_HEAD(&proc->delivered_death); 5077 INIT_LIST_HEAD(&proc->waiting_threads); 5078 filp->private_data = proc; 5079 5080 mutex_lock(&binder_procs_lock); 5081 hlist_for_each_entry(itr, &binder_procs, proc_node) { 5082 if (itr->pid == proc->pid) { 5083 existing_pid = true; 5084 break; 5085 } 5086 } 5087 hlist_add_head(&proc->proc_node, &binder_procs); 5088 mutex_unlock(&binder_procs_lock); 5089 5090 if (binder_debugfs_dir_entry_proc && !existing_pid) { 5091 char strbuf[11]; 5092 5093 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); 5094 /* 5095 * proc debug entries are shared between contexts. 5096 * Only create for the first PID to avoid debugfs log spamming 5097 * The printing code will anyway print all contexts for a given 5098 * PID so this is not a problem. 5099 */ 5100 proc->debugfs_entry = debugfs_create_file(strbuf, 0444, 5101 binder_debugfs_dir_entry_proc, 5102 (void *)(unsigned long)proc->pid, 5103 &proc_fops); 5104 } 5105 5106 if (binder_binderfs_dir_entry_proc && !existing_pid) { 5107 char strbuf[11]; 5108 struct dentry *binderfs_entry; 5109 5110 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); 5111 /* 5112 * Similar to debugfs, the process specific log file is shared 5113 * between contexts. Only create for the first PID. 5114 * This is ok since same as debugfs, the log file will contain 5115 * information on all contexts of a given PID. 5116 */ 5117 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc, 5118 strbuf, &proc_fops, (void *)(unsigned long)proc->pid); 5119 if (!IS_ERR(binderfs_entry)) { 5120 proc->binderfs_entry = binderfs_entry; 5121 } else { 5122 int error; 5123 5124 error = PTR_ERR(binderfs_entry); 5125 pr_warn("Unable to create file %s in binderfs (error %d)\n", 5126 strbuf, error); 5127 } 5128 } 5129 5130 return 0; 5131 } 5132 5133 static int binder_flush(struct file *filp, fl_owner_t id) 5134 { 5135 struct binder_proc *proc = filp->private_data; 5136 5137 binder_defer_work(proc, BINDER_DEFERRED_FLUSH); 5138 5139 return 0; 5140 } 5141 5142 static void binder_deferred_flush(struct binder_proc *proc) 5143 { 5144 struct rb_node *n; 5145 int wake_count = 0; 5146 5147 binder_inner_proc_lock(proc); 5148 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { 5149 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); 5150 5151 thread->looper_need_return = true; 5152 if (thread->looper & BINDER_LOOPER_STATE_WAITING) { 5153 wake_up_interruptible(&thread->wait); 5154 wake_count++; 5155 } 5156 } 5157 binder_inner_proc_unlock(proc); 5158 5159 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 5160 "binder_flush: %d woke %d threads\n", proc->pid, 5161 wake_count); 5162 } 5163 5164 static int binder_release(struct inode *nodp, struct file *filp) 5165 { 5166 struct binder_proc *proc = filp->private_data; 5167 5168 debugfs_remove(proc->debugfs_entry); 5169 5170 if (proc->binderfs_entry) { 5171 binderfs_remove_file(proc->binderfs_entry); 5172 proc->binderfs_entry = NULL; 5173 } 5174 5175 binder_defer_work(proc, BINDER_DEFERRED_RELEASE); 5176 5177 return 0; 5178 } 5179 5180 static int binder_node_release(struct binder_node *node, int refs) 5181 { 5182 struct binder_ref *ref; 5183 int death = 0; 5184 struct binder_proc *proc = node->proc; 5185 5186 binder_release_work(proc, &node->async_todo); 5187 5188 binder_node_lock(node); 5189 binder_inner_proc_lock(proc); 5190 binder_dequeue_work_ilocked(&node->work); 5191 /* 5192 * The caller must have taken a temporary ref on the node, 5193 */ 5194 BUG_ON(!node->tmp_refs); 5195 if (hlist_empty(&node->refs) && node->tmp_refs == 1) { 5196 binder_inner_proc_unlock(proc); 5197 binder_node_unlock(node); 5198 binder_free_node(node); 5199 5200 return refs; 5201 } 5202 5203 node->proc = NULL; 5204 node->local_strong_refs = 0; 5205 node->local_weak_refs = 0; 5206 binder_inner_proc_unlock(proc); 5207 5208 spin_lock(&binder_dead_nodes_lock); 5209 hlist_add_head(&node->dead_node, &binder_dead_nodes); 5210 spin_unlock(&binder_dead_nodes_lock); 5211 5212 hlist_for_each_entry(ref, &node->refs, node_entry) { 5213 refs++; 5214 /* 5215 * Need the node lock to synchronize 5216 * with new notification requests and the 5217 * inner lock to synchronize with queued 5218 * death notifications. 5219 */ 5220 binder_inner_proc_lock(ref->proc); 5221 if (!ref->death) { 5222 binder_inner_proc_unlock(ref->proc); 5223 continue; 5224 } 5225 5226 death++; 5227 5228 BUG_ON(!list_empty(&ref->death->work.entry)); 5229 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 5230 binder_enqueue_work_ilocked(&ref->death->work, 5231 &ref->proc->todo); 5232 binder_wakeup_proc_ilocked(ref->proc); 5233 binder_inner_proc_unlock(ref->proc); 5234 } 5235 5236 binder_debug(BINDER_DEBUG_DEAD_BINDER, 5237 "node %d now dead, refs %d, death %d\n", 5238 node->debug_id, refs, death); 5239 binder_node_unlock(node); 5240 binder_put_node(node); 5241 5242 return refs; 5243 } 5244 5245 static void binder_deferred_release(struct binder_proc *proc) 5246 { 5247 struct binder_context *context = proc->context; 5248 struct rb_node *n; 5249 int threads, nodes, incoming_refs, outgoing_refs, active_transactions; 5250 5251 mutex_lock(&binder_procs_lock); 5252 hlist_del(&proc->proc_node); 5253 mutex_unlock(&binder_procs_lock); 5254 5255 mutex_lock(&context->context_mgr_node_lock); 5256 if (context->binder_context_mgr_node && 5257 context->binder_context_mgr_node->proc == proc) { 5258 binder_debug(BINDER_DEBUG_DEAD_BINDER, 5259 "%s: %d context_mgr_node gone\n", 5260 __func__, proc->pid); 5261 context->binder_context_mgr_node = NULL; 5262 } 5263 mutex_unlock(&context->context_mgr_node_lock); 5264 binder_inner_proc_lock(proc); 5265 /* 5266 * Make sure proc stays alive after we 5267 * remove all the threads 5268 */ 5269 proc->tmp_ref++; 5270 5271 proc->is_dead = true; 5272 proc->is_frozen = false; 5273 proc->sync_recv = false; 5274 proc->async_recv = false; 5275 threads = 0; 5276 active_transactions = 0; 5277 while ((n = rb_first(&proc->threads))) { 5278 struct binder_thread *thread; 5279 5280 thread = rb_entry(n, struct binder_thread, rb_node); 5281 binder_inner_proc_unlock(proc); 5282 threads++; 5283 active_transactions += binder_thread_release(proc, thread); 5284 binder_inner_proc_lock(proc); 5285 } 5286 5287 nodes = 0; 5288 incoming_refs = 0; 5289 while ((n = rb_first(&proc->nodes))) { 5290 struct binder_node *node; 5291 5292 node = rb_entry(n, struct binder_node, rb_node); 5293 nodes++; 5294 /* 5295 * take a temporary ref on the node before 5296 * calling binder_node_release() which will either 5297 * kfree() the node or call binder_put_node() 5298 */ 5299 binder_inc_node_tmpref_ilocked(node); 5300 rb_erase(&node->rb_node, &proc->nodes); 5301 binder_inner_proc_unlock(proc); 5302 incoming_refs = binder_node_release(node, incoming_refs); 5303 binder_inner_proc_lock(proc); 5304 } 5305 binder_inner_proc_unlock(proc); 5306 5307 outgoing_refs = 0; 5308 binder_proc_lock(proc); 5309 while ((n = rb_first(&proc->refs_by_desc))) { 5310 struct binder_ref *ref; 5311 5312 ref = rb_entry(n, struct binder_ref, rb_node_desc); 5313 outgoing_refs++; 5314 binder_cleanup_ref_olocked(ref); 5315 binder_proc_unlock(proc); 5316 binder_free_ref(ref); 5317 binder_proc_lock(proc); 5318 } 5319 binder_proc_unlock(proc); 5320 5321 binder_release_work(proc, &proc->todo); 5322 binder_release_work(proc, &proc->delivered_death); 5323 5324 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 5325 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n", 5326 __func__, proc->pid, threads, nodes, incoming_refs, 5327 outgoing_refs, active_transactions); 5328 5329 binder_proc_dec_tmpref(proc); 5330 } 5331 5332 static void binder_deferred_func(struct work_struct *work) 5333 { 5334 struct binder_proc *proc; 5335 5336 int defer; 5337 5338 do { 5339 mutex_lock(&binder_deferred_lock); 5340 if (!hlist_empty(&binder_deferred_list)) { 5341 proc = hlist_entry(binder_deferred_list.first, 5342 struct binder_proc, deferred_work_node); 5343 hlist_del_init(&proc->deferred_work_node); 5344 defer = proc->deferred_work; 5345 proc->deferred_work = 0; 5346 } else { 5347 proc = NULL; 5348 defer = 0; 5349 } 5350 mutex_unlock(&binder_deferred_lock); 5351 5352 if (defer & BINDER_DEFERRED_FLUSH) 5353 binder_deferred_flush(proc); 5354 5355 if (defer & BINDER_DEFERRED_RELEASE) 5356 binder_deferred_release(proc); /* frees proc */ 5357 } while (proc); 5358 } 5359 static DECLARE_WORK(binder_deferred_work, binder_deferred_func); 5360 5361 static void 5362 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) 5363 { 5364 mutex_lock(&binder_deferred_lock); 5365 proc->deferred_work |= defer; 5366 if (hlist_unhashed(&proc->deferred_work_node)) { 5367 hlist_add_head(&proc->deferred_work_node, 5368 &binder_deferred_list); 5369 schedule_work(&binder_deferred_work); 5370 } 5371 mutex_unlock(&binder_deferred_lock); 5372 } 5373 5374 static void print_binder_transaction_ilocked(struct seq_file *m, 5375 struct binder_proc *proc, 5376 const char *prefix, 5377 struct binder_transaction *t) 5378 { 5379 struct binder_proc *to_proc; 5380 struct binder_buffer *buffer = t->buffer; 5381 5382 spin_lock(&t->lock); 5383 to_proc = t->to_proc; 5384 seq_printf(m, 5385 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d", 5386 prefix, t->debug_id, t, 5387 t->from ? t->from->proc->pid : 0, 5388 t->from ? t->from->pid : 0, 5389 to_proc ? to_proc->pid : 0, 5390 t->to_thread ? t->to_thread->pid : 0, 5391 t->code, t->flags, t->priority, t->need_reply); 5392 spin_unlock(&t->lock); 5393 5394 if (proc != to_proc) { 5395 /* 5396 * Can only safely deref buffer if we are holding the 5397 * correct proc inner lock for this node 5398 */ 5399 seq_puts(m, "\n"); 5400 return; 5401 } 5402 5403 if (buffer == NULL) { 5404 seq_puts(m, " buffer free\n"); 5405 return; 5406 } 5407 if (buffer->target_node) 5408 seq_printf(m, " node %d", buffer->target_node->debug_id); 5409 seq_printf(m, " size %zd:%zd data %pK\n", 5410 buffer->data_size, buffer->offsets_size, 5411 buffer->user_data); 5412 } 5413 5414 static void print_binder_work_ilocked(struct seq_file *m, 5415 struct binder_proc *proc, 5416 const char *prefix, 5417 const char *transaction_prefix, 5418 struct binder_work *w) 5419 { 5420 struct binder_node *node; 5421 struct binder_transaction *t; 5422 5423 switch (w->type) { 5424 case BINDER_WORK_TRANSACTION: 5425 t = container_of(w, struct binder_transaction, work); 5426 print_binder_transaction_ilocked( 5427 m, proc, transaction_prefix, t); 5428 break; 5429 case BINDER_WORK_RETURN_ERROR: { 5430 struct binder_error *e = container_of( 5431 w, struct binder_error, work); 5432 5433 seq_printf(m, "%stransaction error: %u\n", 5434 prefix, e->cmd); 5435 } break; 5436 case BINDER_WORK_TRANSACTION_COMPLETE: 5437 seq_printf(m, "%stransaction complete\n", prefix); 5438 break; 5439 case BINDER_WORK_NODE: 5440 node = container_of(w, struct binder_node, work); 5441 seq_printf(m, "%snode work %d: u%016llx c%016llx\n", 5442 prefix, node->debug_id, 5443 (u64)node->ptr, (u64)node->cookie); 5444 break; 5445 case BINDER_WORK_DEAD_BINDER: 5446 seq_printf(m, "%shas dead binder\n", prefix); 5447 break; 5448 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 5449 seq_printf(m, "%shas cleared dead binder\n", prefix); 5450 break; 5451 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: 5452 seq_printf(m, "%shas cleared death notification\n", prefix); 5453 break; 5454 default: 5455 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type); 5456 break; 5457 } 5458 } 5459 5460 static void print_binder_thread_ilocked(struct seq_file *m, 5461 struct binder_thread *thread, 5462 int print_always) 5463 { 5464 struct binder_transaction *t; 5465 struct binder_work *w; 5466 size_t start_pos = m->count; 5467 size_t header_pos; 5468 5469 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n", 5470 thread->pid, thread->looper, 5471 thread->looper_need_return, 5472 atomic_read(&thread->tmp_ref)); 5473 header_pos = m->count; 5474 t = thread->transaction_stack; 5475 while (t) { 5476 if (t->from == thread) { 5477 print_binder_transaction_ilocked(m, thread->proc, 5478 " outgoing transaction", t); 5479 t = t->from_parent; 5480 } else if (t->to_thread == thread) { 5481 print_binder_transaction_ilocked(m, thread->proc, 5482 " incoming transaction", t); 5483 t = t->to_parent; 5484 } else { 5485 print_binder_transaction_ilocked(m, thread->proc, 5486 " bad transaction", t); 5487 t = NULL; 5488 } 5489 } 5490 list_for_each_entry(w, &thread->todo, entry) { 5491 print_binder_work_ilocked(m, thread->proc, " ", 5492 " pending transaction", w); 5493 } 5494 if (!print_always && m->count == header_pos) 5495 m->count = start_pos; 5496 } 5497 5498 static void print_binder_node_nilocked(struct seq_file *m, 5499 struct binder_node *node) 5500 { 5501 struct binder_ref *ref; 5502 struct binder_work *w; 5503 int count; 5504 5505 count = 0; 5506 hlist_for_each_entry(ref, &node->refs, node_entry) 5507 count++; 5508 5509 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d", 5510 node->debug_id, (u64)node->ptr, (u64)node->cookie, 5511 node->has_strong_ref, node->has_weak_ref, 5512 node->local_strong_refs, node->local_weak_refs, 5513 node->internal_strong_refs, count, node->tmp_refs); 5514 if (count) { 5515 seq_puts(m, " proc"); 5516 hlist_for_each_entry(ref, &node->refs, node_entry) 5517 seq_printf(m, " %d", ref->proc->pid); 5518 } 5519 seq_puts(m, "\n"); 5520 if (node->proc) { 5521 list_for_each_entry(w, &node->async_todo, entry) 5522 print_binder_work_ilocked(m, node->proc, " ", 5523 " pending async transaction", w); 5524 } 5525 } 5526 5527 static void print_binder_ref_olocked(struct seq_file *m, 5528 struct binder_ref *ref) 5529 { 5530 binder_node_lock(ref->node); 5531 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n", 5532 ref->data.debug_id, ref->data.desc, 5533 ref->node->proc ? "" : "dead ", 5534 ref->node->debug_id, ref->data.strong, 5535 ref->data.weak, ref->death); 5536 binder_node_unlock(ref->node); 5537 } 5538 5539 static void print_binder_proc(struct seq_file *m, 5540 struct binder_proc *proc, int print_all) 5541 { 5542 struct binder_work *w; 5543 struct rb_node *n; 5544 size_t start_pos = m->count; 5545 size_t header_pos; 5546 struct binder_node *last_node = NULL; 5547 5548 seq_printf(m, "proc %d\n", proc->pid); 5549 seq_printf(m, "context %s\n", proc->context->name); 5550 header_pos = m->count; 5551 5552 binder_inner_proc_lock(proc); 5553 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 5554 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread, 5555 rb_node), print_all); 5556 5557 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { 5558 struct binder_node *node = rb_entry(n, struct binder_node, 5559 rb_node); 5560 if (!print_all && !node->has_async_transaction) 5561 continue; 5562 5563 /* 5564 * take a temporary reference on the node so it 5565 * survives and isn't removed from the tree 5566 * while we print it. 5567 */ 5568 binder_inc_node_tmpref_ilocked(node); 5569 /* Need to drop inner lock to take node lock */ 5570 binder_inner_proc_unlock(proc); 5571 if (last_node) 5572 binder_put_node(last_node); 5573 binder_node_inner_lock(node); 5574 print_binder_node_nilocked(m, node); 5575 binder_node_inner_unlock(node); 5576 last_node = node; 5577 binder_inner_proc_lock(proc); 5578 } 5579 binder_inner_proc_unlock(proc); 5580 if (last_node) 5581 binder_put_node(last_node); 5582 5583 if (print_all) { 5584 binder_proc_lock(proc); 5585 for (n = rb_first(&proc->refs_by_desc); 5586 n != NULL; 5587 n = rb_next(n)) 5588 print_binder_ref_olocked(m, rb_entry(n, 5589 struct binder_ref, 5590 rb_node_desc)); 5591 binder_proc_unlock(proc); 5592 } 5593 binder_alloc_print_allocated(m, &proc->alloc); 5594 binder_inner_proc_lock(proc); 5595 list_for_each_entry(w, &proc->todo, entry) 5596 print_binder_work_ilocked(m, proc, " ", 5597 " pending transaction", w); 5598 list_for_each_entry(w, &proc->delivered_death, entry) { 5599 seq_puts(m, " has delivered dead binder\n"); 5600 break; 5601 } 5602 binder_inner_proc_unlock(proc); 5603 if (!print_all && m->count == header_pos) 5604 m->count = start_pos; 5605 } 5606 5607 static const char * const binder_return_strings[] = { 5608 "BR_ERROR", 5609 "BR_OK", 5610 "BR_TRANSACTION", 5611 "BR_REPLY", 5612 "BR_ACQUIRE_RESULT", 5613 "BR_DEAD_REPLY", 5614 "BR_TRANSACTION_COMPLETE", 5615 "BR_INCREFS", 5616 "BR_ACQUIRE", 5617 "BR_RELEASE", 5618 "BR_DECREFS", 5619 "BR_ATTEMPT_ACQUIRE", 5620 "BR_NOOP", 5621 "BR_SPAWN_LOOPER", 5622 "BR_FINISHED", 5623 "BR_DEAD_BINDER", 5624 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 5625 "BR_FAILED_REPLY", 5626 "BR_FROZEN_REPLY", 5627 "BR_ONEWAY_SPAM_SUSPECT", 5628 }; 5629 5630 static const char * const binder_command_strings[] = { 5631 "BC_TRANSACTION", 5632 "BC_REPLY", 5633 "BC_ACQUIRE_RESULT", 5634 "BC_FREE_BUFFER", 5635 "BC_INCREFS", 5636 "BC_ACQUIRE", 5637 "BC_RELEASE", 5638 "BC_DECREFS", 5639 "BC_INCREFS_DONE", 5640 "BC_ACQUIRE_DONE", 5641 "BC_ATTEMPT_ACQUIRE", 5642 "BC_REGISTER_LOOPER", 5643 "BC_ENTER_LOOPER", 5644 "BC_EXIT_LOOPER", 5645 "BC_REQUEST_DEATH_NOTIFICATION", 5646 "BC_CLEAR_DEATH_NOTIFICATION", 5647 "BC_DEAD_BINDER_DONE", 5648 "BC_TRANSACTION_SG", 5649 "BC_REPLY_SG", 5650 }; 5651 5652 static const char * const binder_objstat_strings[] = { 5653 "proc", 5654 "thread", 5655 "node", 5656 "ref", 5657 "death", 5658 "transaction", 5659 "transaction_complete" 5660 }; 5661 5662 static void print_binder_stats(struct seq_file *m, const char *prefix, 5663 struct binder_stats *stats) 5664 { 5665 int i; 5666 5667 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != 5668 ARRAY_SIZE(binder_command_strings)); 5669 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) { 5670 int temp = atomic_read(&stats->bc[i]); 5671 5672 if (temp) 5673 seq_printf(m, "%s%s: %d\n", prefix, 5674 binder_command_strings[i], temp); 5675 } 5676 5677 BUILD_BUG_ON(ARRAY_SIZE(stats->br) != 5678 ARRAY_SIZE(binder_return_strings)); 5679 for (i = 0; i < ARRAY_SIZE(stats->br); i++) { 5680 int temp = atomic_read(&stats->br[i]); 5681 5682 if (temp) 5683 seq_printf(m, "%s%s: %d\n", prefix, 5684 binder_return_strings[i], temp); 5685 } 5686 5687 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 5688 ARRAY_SIZE(binder_objstat_strings)); 5689 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 5690 ARRAY_SIZE(stats->obj_deleted)); 5691 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { 5692 int created = atomic_read(&stats->obj_created[i]); 5693 int deleted = atomic_read(&stats->obj_deleted[i]); 5694 5695 if (created || deleted) 5696 seq_printf(m, "%s%s: active %d total %d\n", 5697 prefix, 5698 binder_objstat_strings[i], 5699 created - deleted, 5700 created); 5701 } 5702 } 5703 5704 static void print_binder_proc_stats(struct seq_file *m, 5705 struct binder_proc *proc) 5706 { 5707 struct binder_work *w; 5708 struct binder_thread *thread; 5709 struct rb_node *n; 5710 int count, strong, weak, ready_threads; 5711 size_t free_async_space = 5712 binder_alloc_get_free_async_space(&proc->alloc); 5713 5714 seq_printf(m, "proc %d\n", proc->pid); 5715 seq_printf(m, "context %s\n", proc->context->name); 5716 count = 0; 5717 ready_threads = 0; 5718 binder_inner_proc_lock(proc); 5719 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 5720 count++; 5721 5722 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node) 5723 ready_threads++; 5724 5725 seq_printf(m, " threads: %d\n", count); 5726 seq_printf(m, " requested threads: %d+%d/%d\n" 5727 " ready threads %d\n" 5728 " free async space %zd\n", proc->requested_threads, 5729 proc->requested_threads_started, proc->max_threads, 5730 ready_threads, 5731 free_async_space); 5732 count = 0; 5733 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) 5734 count++; 5735 binder_inner_proc_unlock(proc); 5736 seq_printf(m, " nodes: %d\n", count); 5737 count = 0; 5738 strong = 0; 5739 weak = 0; 5740 binder_proc_lock(proc); 5741 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 5742 struct binder_ref *ref = rb_entry(n, struct binder_ref, 5743 rb_node_desc); 5744 count++; 5745 strong += ref->data.strong; 5746 weak += ref->data.weak; 5747 } 5748 binder_proc_unlock(proc); 5749 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak); 5750 5751 count = binder_alloc_get_allocated_count(&proc->alloc); 5752 seq_printf(m, " buffers: %d\n", count); 5753 5754 binder_alloc_print_pages(m, &proc->alloc); 5755 5756 count = 0; 5757 binder_inner_proc_lock(proc); 5758 list_for_each_entry(w, &proc->todo, entry) { 5759 if (w->type == BINDER_WORK_TRANSACTION) 5760 count++; 5761 } 5762 binder_inner_proc_unlock(proc); 5763 seq_printf(m, " pending transactions: %d\n", count); 5764 5765 print_binder_stats(m, " ", &proc->stats); 5766 } 5767 5768 5769 int binder_state_show(struct seq_file *m, void *unused) 5770 { 5771 struct binder_proc *proc; 5772 struct binder_node *node; 5773 struct binder_node *last_node = NULL; 5774 5775 seq_puts(m, "binder state:\n"); 5776 5777 spin_lock(&binder_dead_nodes_lock); 5778 if (!hlist_empty(&binder_dead_nodes)) 5779 seq_puts(m, "dead nodes:\n"); 5780 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) { 5781 /* 5782 * take a temporary reference on the node so it 5783 * survives and isn't removed from the list 5784 * while we print it. 5785 */ 5786 node->tmp_refs++; 5787 spin_unlock(&binder_dead_nodes_lock); 5788 if (last_node) 5789 binder_put_node(last_node); 5790 binder_node_lock(node); 5791 print_binder_node_nilocked(m, node); 5792 binder_node_unlock(node); 5793 last_node = node; 5794 spin_lock(&binder_dead_nodes_lock); 5795 } 5796 spin_unlock(&binder_dead_nodes_lock); 5797 if (last_node) 5798 binder_put_node(last_node); 5799 5800 mutex_lock(&binder_procs_lock); 5801 hlist_for_each_entry(proc, &binder_procs, proc_node) 5802 print_binder_proc(m, proc, 1); 5803 mutex_unlock(&binder_procs_lock); 5804 5805 return 0; 5806 } 5807 5808 int binder_stats_show(struct seq_file *m, void *unused) 5809 { 5810 struct binder_proc *proc; 5811 5812 seq_puts(m, "binder stats:\n"); 5813 5814 print_binder_stats(m, "", &binder_stats); 5815 5816 mutex_lock(&binder_procs_lock); 5817 hlist_for_each_entry(proc, &binder_procs, proc_node) 5818 print_binder_proc_stats(m, proc); 5819 mutex_unlock(&binder_procs_lock); 5820 5821 return 0; 5822 } 5823 5824 int binder_transactions_show(struct seq_file *m, void *unused) 5825 { 5826 struct binder_proc *proc; 5827 5828 seq_puts(m, "binder transactions:\n"); 5829 mutex_lock(&binder_procs_lock); 5830 hlist_for_each_entry(proc, &binder_procs, proc_node) 5831 print_binder_proc(m, proc, 0); 5832 mutex_unlock(&binder_procs_lock); 5833 5834 return 0; 5835 } 5836 5837 static int proc_show(struct seq_file *m, void *unused) 5838 { 5839 struct binder_proc *itr; 5840 int pid = (unsigned long)m->private; 5841 5842 mutex_lock(&binder_procs_lock); 5843 hlist_for_each_entry(itr, &binder_procs, proc_node) { 5844 if (itr->pid == pid) { 5845 seq_puts(m, "binder proc state:\n"); 5846 print_binder_proc(m, itr, 1); 5847 } 5848 } 5849 mutex_unlock(&binder_procs_lock); 5850 5851 return 0; 5852 } 5853 5854 static void print_binder_transaction_log_entry(struct seq_file *m, 5855 struct binder_transaction_log_entry *e) 5856 { 5857 int debug_id = READ_ONCE(e->debug_id_done); 5858 /* 5859 * read barrier to guarantee debug_id_done read before 5860 * we print the log values 5861 */ 5862 smp_rmb(); 5863 seq_printf(m, 5864 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d", 5865 e->debug_id, (e->call_type == 2) ? "reply" : 5866 ((e->call_type == 1) ? "async" : "call "), e->from_proc, 5867 e->from_thread, e->to_proc, e->to_thread, e->context_name, 5868 e->to_node, e->target_handle, e->data_size, e->offsets_size, 5869 e->return_error, e->return_error_param, 5870 e->return_error_line); 5871 /* 5872 * read-barrier to guarantee read of debug_id_done after 5873 * done printing the fields of the entry 5874 */ 5875 smp_rmb(); 5876 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ? 5877 "\n" : " (incomplete)\n"); 5878 } 5879 5880 int binder_transaction_log_show(struct seq_file *m, void *unused) 5881 { 5882 struct binder_transaction_log *log = m->private; 5883 unsigned int log_cur = atomic_read(&log->cur); 5884 unsigned int count; 5885 unsigned int cur; 5886 int i; 5887 5888 count = log_cur + 1; 5889 cur = count < ARRAY_SIZE(log->entry) && !log->full ? 5890 0 : count % ARRAY_SIZE(log->entry); 5891 if (count > ARRAY_SIZE(log->entry) || log->full) 5892 count = ARRAY_SIZE(log->entry); 5893 for (i = 0; i < count; i++) { 5894 unsigned int index = cur++ % ARRAY_SIZE(log->entry); 5895 5896 print_binder_transaction_log_entry(m, &log->entry[index]); 5897 } 5898 return 0; 5899 } 5900 5901 const struct file_operations binder_fops = { 5902 .owner = THIS_MODULE, 5903 .poll = binder_poll, 5904 .unlocked_ioctl = binder_ioctl, 5905 .compat_ioctl = compat_ptr_ioctl, 5906 .mmap = binder_mmap, 5907 .open = binder_open, 5908 .flush = binder_flush, 5909 .release = binder_release, 5910 }; 5911 5912 static int __init init_binder_device(const char *name) 5913 { 5914 int ret; 5915 struct binder_device *binder_device; 5916 5917 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL); 5918 if (!binder_device) 5919 return -ENOMEM; 5920 5921 binder_device->miscdev.fops = &binder_fops; 5922 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR; 5923 binder_device->miscdev.name = name; 5924 5925 refcount_set(&binder_device->ref, 1); 5926 binder_device->context.binder_context_mgr_uid = INVALID_UID; 5927 binder_device->context.name = name; 5928 mutex_init(&binder_device->context.context_mgr_node_lock); 5929 5930 ret = misc_register(&binder_device->miscdev); 5931 if (ret < 0) { 5932 kfree(binder_device); 5933 return ret; 5934 } 5935 5936 hlist_add_head(&binder_device->hlist, &binder_devices); 5937 5938 return ret; 5939 } 5940 5941 static int __init binder_init(void) 5942 { 5943 int ret; 5944 char *device_name, *device_tmp; 5945 struct binder_device *device; 5946 struct hlist_node *tmp; 5947 char *device_names = NULL; 5948 5949 ret = binder_alloc_shrinker_init(); 5950 if (ret) 5951 return ret; 5952 5953 atomic_set(&binder_transaction_log.cur, ~0U); 5954 atomic_set(&binder_transaction_log_failed.cur, ~0U); 5955 5956 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); 5957 if (binder_debugfs_dir_entry_root) 5958 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", 5959 binder_debugfs_dir_entry_root); 5960 5961 if (binder_debugfs_dir_entry_root) { 5962 debugfs_create_file("state", 5963 0444, 5964 binder_debugfs_dir_entry_root, 5965 NULL, 5966 &binder_state_fops); 5967 debugfs_create_file("stats", 5968 0444, 5969 binder_debugfs_dir_entry_root, 5970 NULL, 5971 &binder_stats_fops); 5972 debugfs_create_file("transactions", 5973 0444, 5974 binder_debugfs_dir_entry_root, 5975 NULL, 5976 &binder_transactions_fops); 5977 debugfs_create_file("transaction_log", 5978 0444, 5979 binder_debugfs_dir_entry_root, 5980 &binder_transaction_log, 5981 &binder_transaction_log_fops); 5982 debugfs_create_file("failed_transaction_log", 5983 0444, 5984 binder_debugfs_dir_entry_root, 5985 &binder_transaction_log_failed, 5986 &binder_transaction_log_fops); 5987 } 5988 5989 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) && 5990 strcmp(binder_devices_param, "") != 0) { 5991 /* 5992 * Copy the module_parameter string, because we don't want to 5993 * tokenize it in-place. 5994 */ 5995 device_names = kstrdup(binder_devices_param, GFP_KERNEL); 5996 if (!device_names) { 5997 ret = -ENOMEM; 5998 goto err_alloc_device_names_failed; 5999 } 6000 6001 device_tmp = device_names; 6002 while ((device_name = strsep(&device_tmp, ","))) { 6003 ret = init_binder_device(device_name); 6004 if (ret) 6005 goto err_init_binder_device_failed; 6006 } 6007 } 6008 6009 ret = init_binderfs(); 6010 if (ret) 6011 goto err_init_binder_device_failed; 6012 6013 return ret; 6014 6015 err_init_binder_device_failed: 6016 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) { 6017 misc_deregister(&device->miscdev); 6018 hlist_del(&device->hlist); 6019 kfree(device); 6020 } 6021 6022 kfree(device_names); 6023 6024 err_alloc_device_names_failed: 6025 debugfs_remove_recursive(binder_debugfs_dir_entry_root); 6026 6027 return ret; 6028 } 6029 6030 device_initcall(binder_init); 6031 6032 #define CREATE_TRACE_POINTS 6033 #include "binder_trace.h" 6034 6035 MODULE_LICENSE("GPL v2"); 6036