1 // SPDX-License-Identifier: GPL-2.0-only 2 /* binder.c 3 * 4 * Android IPC Subsystem 5 * 6 * Copyright (C) 2007-2008 Google, Inc. 7 */ 8 9 /* 10 * Locking overview 11 * 12 * There are 3 main spinlocks which must be acquired in the 13 * order shown: 14 * 15 * 1) proc->outer_lock : protects binder_ref 16 * binder_proc_lock() and binder_proc_unlock() are 17 * used to acq/rel. 18 * 2) node->lock : protects most fields of binder_node. 19 * binder_node_lock() and binder_node_unlock() are 20 * used to acq/rel 21 * 3) proc->inner_lock : protects the thread and node lists 22 * (proc->threads, proc->waiting_threads, proc->nodes) 23 * and all todo lists associated with the binder_proc 24 * (proc->todo, thread->todo, proc->delivered_death and 25 * node->async_todo), as well as thread->transaction_stack 26 * binder_inner_proc_lock() and binder_inner_proc_unlock() 27 * are used to acq/rel 28 * 29 * Any lock under procA must never be nested under any lock at the same 30 * level or below on procB. 31 * 32 * Functions that require a lock held on entry indicate which lock 33 * in the suffix of the function name: 34 * 35 * foo_olocked() : requires node->outer_lock 36 * foo_nlocked() : requires node->lock 37 * foo_ilocked() : requires proc->inner_lock 38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock 39 * foo_nilocked(): requires node->lock and proc->inner_lock 40 * ... 41 */ 42 43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 44 45 #include <linux/fdtable.h> 46 #include <linux/file.h> 47 #include <linux/freezer.h> 48 #include <linux/fs.h> 49 #include <linux/list.h> 50 #include <linux/miscdevice.h> 51 #include <linux/module.h> 52 #include <linux/mutex.h> 53 #include <linux/nsproxy.h> 54 #include <linux/poll.h> 55 #include <linux/debugfs.h> 56 #include <linux/rbtree.h> 57 #include <linux/sched/signal.h> 58 #include <linux/sched/mm.h> 59 #include <linux/seq_file.h> 60 #include <linux/string.h> 61 #include <linux/uaccess.h> 62 #include <linux/pid_namespace.h> 63 #include <linux/security.h> 64 #include <linux/spinlock.h> 65 #include <linux/ratelimit.h> 66 #include <linux/syscalls.h> 67 #include <linux/task_work.h> 68 #include <linux/sizes.h> 69 70 #include <uapi/linux/android/binder.h> 71 72 #include <linux/cacheflush.h> 73 74 #include "binder_internal.h" 75 #include "binder_trace.h" 76 77 static HLIST_HEAD(binder_deferred_list); 78 static DEFINE_MUTEX(binder_deferred_lock); 79 80 static HLIST_HEAD(binder_devices); 81 static HLIST_HEAD(binder_procs); 82 static DEFINE_MUTEX(binder_procs_lock); 83 84 static HLIST_HEAD(binder_dead_nodes); 85 static DEFINE_SPINLOCK(binder_dead_nodes_lock); 86 87 static struct dentry *binder_debugfs_dir_entry_root; 88 static struct dentry *binder_debugfs_dir_entry_proc; 89 static atomic_t binder_last_id; 90 91 static int proc_show(struct seq_file *m, void *unused); 92 DEFINE_SHOW_ATTRIBUTE(proc); 93 94 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE) 95 96 enum { 97 BINDER_DEBUG_USER_ERROR = 1U << 0, 98 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1, 99 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2, 100 BINDER_DEBUG_OPEN_CLOSE = 1U << 3, 101 BINDER_DEBUG_DEAD_BINDER = 1U << 4, 102 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5, 103 BINDER_DEBUG_READ_WRITE = 1U << 6, 104 BINDER_DEBUG_USER_REFS = 1U << 7, 105 BINDER_DEBUG_THREADS = 1U << 8, 106 BINDER_DEBUG_TRANSACTION = 1U << 9, 107 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10, 108 BINDER_DEBUG_FREE_BUFFER = 1U << 11, 109 BINDER_DEBUG_INTERNAL_REFS = 1U << 12, 110 BINDER_DEBUG_PRIORITY_CAP = 1U << 13, 111 BINDER_DEBUG_SPINLOCKS = 1U << 14, 112 }; 113 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | 114 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; 115 module_param_named(debug_mask, binder_debug_mask, uint, 0644); 116 117 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES; 118 module_param_named(devices, binder_devices_param, charp, 0444); 119 120 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); 121 static int binder_stop_on_user_error; 122 123 static int binder_set_stop_on_user_error(const char *val, 124 const struct kernel_param *kp) 125 { 126 int ret; 127 128 ret = param_set_int(val, kp); 129 if (binder_stop_on_user_error < 2) 130 wake_up(&binder_user_error_wait); 131 return ret; 132 } 133 module_param_call(stop_on_user_error, binder_set_stop_on_user_error, 134 param_get_int, &binder_stop_on_user_error, 0644); 135 136 static __printf(2, 3) void binder_debug(int mask, const char *format, ...) 137 { 138 struct va_format vaf; 139 va_list args; 140 141 if (binder_debug_mask & mask) { 142 va_start(args, format); 143 vaf.va = &args; 144 vaf.fmt = format; 145 pr_info_ratelimited("%pV", &vaf); 146 va_end(args); 147 } 148 } 149 150 #define binder_txn_error(x...) \ 151 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, x) 152 153 static __printf(1, 2) void binder_user_error(const char *format, ...) 154 { 155 struct va_format vaf; 156 va_list args; 157 158 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) { 159 va_start(args, format); 160 vaf.va = &args; 161 vaf.fmt = format; 162 pr_info_ratelimited("%pV", &vaf); 163 va_end(args); 164 } 165 166 if (binder_stop_on_user_error) 167 binder_stop_on_user_error = 2; 168 } 169 170 #define binder_set_extended_error(ee, _id, _command, _param) \ 171 do { \ 172 (ee)->id = _id; \ 173 (ee)->command = _command; \ 174 (ee)->param = _param; \ 175 } while (0) 176 177 #define to_flat_binder_object(hdr) \ 178 container_of(hdr, struct flat_binder_object, hdr) 179 180 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr) 181 182 #define to_binder_buffer_object(hdr) \ 183 container_of(hdr, struct binder_buffer_object, hdr) 184 185 #define to_binder_fd_array_object(hdr) \ 186 container_of(hdr, struct binder_fd_array_object, hdr) 187 188 static struct binder_stats binder_stats; 189 190 static inline void binder_stats_deleted(enum binder_stat_types type) 191 { 192 atomic_inc(&binder_stats.obj_deleted[type]); 193 } 194 195 static inline void binder_stats_created(enum binder_stat_types type) 196 { 197 atomic_inc(&binder_stats.obj_created[type]); 198 } 199 200 struct binder_transaction_log binder_transaction_log; 201 struct binder_transaction_log binder_transaction_log_failed; 202 203 static struct binder_transaction_log_entry *binder_transaction_log_add( 204 struct binder_transaction_log *log) 205 { 206 struct binder_transaction_log_entry *e; 207 unsigned int cur = atomic_inc_return(&log->cur); 208 209 if (cur >= ARRAY_SIZE(log->entry)) 210 log->full = true; 211 e = &log->entry[cur % ARRAY_SIZE(log->entry)]; 212 WRITE_ONCE(e->debug_id_done, 0); 213 /* 214 * write-barrier to synchronize access to e->debug_id_done. 215 * We make sure the initialized 0 value is seen before 216 * memset() other fields are zeroed by memset. 217 */ 218 smp_wmb(); 219 memset(e, 0, sizeof(*e)); 220 return e; 221 } 222 223 enum binder_deferred_state { 224 BINDER_DEFERRED_FLUSH = 0x01, 225 BINDER_DEFERRED_RELEASE = 0x02, 226 }; 227 228 enum { 229 BINDER_LOOPER_STATE_REGISTERED = 0x01, 230 BINDER_LOOPER_STATE_ENTERED = 0x02, 231 BINDER_LOOPER_STATE_EXITED = 0x04, 232 BINDER_LOOPER_STATE_INVALID = 0x08, 233 BINDER_LOOPER_STATE_WAITING = 0x10, 234 BINDER_LOOPER_STATE_POLL = 0x20, 235 }; 236 237 /** 238 * binder_proc_lock() - Acquire outer lock for given binder_proc 239 * @proc: struct binder_proc to acquire 240 * 241 * Acquires proc->outer_lock. Used to protect binder_ref 242 * structures associated with the given proc. 243 */ 244 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__) 245 static void 246 _binder_proc_lock(struct binder_proc *proc, int line) 247 __acquires(&proc->outer_lock) 248 { 249 binder_debug(BINDER_DEBUG_SPINLOCKS, 250 "%s: line=%d\n", __func__, line); 251 spin_lock(&proc->outer_lock); 252 } 253 254 /** 255 * binder_proc_unlock() - Release spinlock for given binder_proc 256 * @proc: struct binder_proc to acquire 257 * 258 * Release lock acquired via binder_proc_lock() 259 */ 260 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__) 261 static void 262 _binder_proc_unlock(struct binder_proc *proc, int line) 263 __releases(&proc->outer_lock) 264 { 265 binder_debug(BINDER_DEBUG_SPINLOCKS, 266 "%s: line=%d\n", __func__, line); 267 spin_unlock(&proc->outer_lock); 268 } 269 270 /** 271 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc 272 * @proc: struct binder_proc to acquire 273 * 274 * Acquires proc->inner_lock. Used to protect todo lists 275 */ 276 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__) 277 static void 278 _binder_inner_proc_lock(struct binder_proc *proc, int line) 279 __acquires(&proc->inner_lock) 280 { 281 binder_debug(BINDER_DEBUG_SPINLOCKS, 282 "%s: line=%d\n", __func__, line); 283 spin_lock(&proc->inner_lock); 284 } 285 286 /** 287 * binder_inner_proc_unlock() - Release inner lock for given binder_proc 288 * @proc: struct binder_proc to acquire 289 * 290 * Release lock acquired via binder_inner_proc_lock() 291 */ 292 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__) 293 static void 294 _binder_inner_proc_unlock(struct binder_proc *proc, int line) 295 __releases(&proc->inner_lock) 296 { 297 binder_debug(BINDER_DEBUG_SPINLOCKS, 298 "%s: line=%d\n", __func__, line); 299 spin_unlock(&proc->inner_lock); 300 } 301 302 /** 303 * binder_node_lock() - Acquire spinlock for given binder_node 304 * @node: struct binder_node to acquire 305 * 306 * Acquires node->lock. Used to protect binder_node fields 307 */ 308 #define binder_node_lock(node) _binder_node_lock(node, __LINE__) 309 static void 310 _binder_node_lock(struct binder_node *node, int line) 311 __acquires(&node->lock) 312 { 313 binder_debug(BINDER_DEBUG_SPINLOCKS, 314 "%s: line=%d\n", __func__, line); 315 spin_lock(&node->lock); 316 } 317 318 /** 319 * binder_node_unlock() - Release spinlock for given binder_proc 320 * @node: struct binder_node to acquire 321 * 322 * Release lock acquired via binder_node_lock() 323 */ 324 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__) 325 static void 326 _binder_node_unlock(struct binder_node *node, int line) 327 __releases(&node->lock) 328 { 329 binder_debug(BINDER_DEBUG_SPINLOCKS, 330 "%s: line=%d\n", __func__, line); 331 spin_unlock(&node->lock); 332 } 333 334 /** 335 * binder_node_inner_lock() - Acquire node and inner locks 336 * @node: struct binder_node to acquire 337 * 338 * Acquires node->lock. If node->proc also acquires 339 * proc->inner_lock. Used to protect binder_node fields 340 */ 341 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__) 342 static void 343 _binder_node_inner_lock(struct binder_node *node, int line) 344 __acquires(&node->lock) __acquires(&node->proc->inner_lock) 345 { 346 binder_debug(BINDER_DEBUG_SPINLOCKS, 347 "%s: line=%d\n", __func__, line); 348 spin_lock(&node->lock); 349 if (node->proc) 350 binder_inner_proc_lock(node->proc); 351 else 352 /* annotation for sparse */ 353 __acquire(&node->proc->inner_lock); 354 } 355 356 /** 357 * binder_node_unlock() - Release node and inner locks 358 * @node: struct binder_node to acquire 359 * 360 * Release lock acquired via binder_node_lock() 361 */ 362 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__) 363 static void 364 _binder_node_inner_unlock(struct binder_node *node, int line) 365 __releases(&node->lock) __releases(&node->proc->inner_lock) 366 { 367 struct binder_proc *proc = node->proc; 368 369 binder_debug(BINDER_DEBUG_SPINLOCKS, 370 "%s: line=%d\n", __func__, line); 371 if (proc) 372 binder_inner_proc_unlock(proc); 373 else 374 /* annotation for sparse */ 375 __release(&node->proc->inner_lock); 376 spin_unlock(&node->lock); 377 } 378 379 static bool binder_worklist_empty_ilocked(struct list_head *list) 380 { 381 return list_empty(list); 382 } 383 384 /** 385 * binder_worklist_empty() - Check if no items on the work list 386 * @proc: binder_proc associated with list 387 * @list: list to check 388 * 389 * Return: true if there are no items on list, else false 390 */ 391 static bool binder_worklist_empty(struct binder_proc *proc, 392 struct list_head *list) 393 { 394 bool ret; 395 396 binder_inner_proc_lock(proc); 397 ret = binder_worklist_empty_ilocked(list); 398 binder_inner_proc_unlock(proc); 399 return ret; 400 } 401 402 /** 403 * binder_enqueue_work_ilocked() - Add an item to the work list 404 * @work: struct binder_work to add to list 405 * @target_list: list to add work to 406 * 407 * Adds the work to the specified list. Asserts that work 408 * is not already on a list. 409 * 410 * Requires the proc->inner_lock to be held. 411 */ 412 static void 413 binder_enqueue_work_ilocked(struct binder_work *work, 414 struct list_head *target_list) 415 { 416 BUG_ON(target_list == NULL); 417 BUG_ON(work->entry.next && !list_empty(&work->entry)); 418 list_add_tail(&work->entry, target_list); 419 } 420 421 /** 422 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work 423 * @thread: thread to queue work to 424 * @work: struct binder_work to add to list 425 * 426 * Adds the work to the todo list of the thread. Doesn't set the process_todo 427 * flag, which means that (if it wasn't already set) the thread will go to 428 * sleep without handling this work when it calls read. 429 * 430 * Requires the proc->inner_lock to be held. 431 */ 432 static void 433 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread, 434 struct binder_work *work) 435 { 436 WARN_ON(!list_empty(&thread->waiting_thread_node)); 437 binder_enqueue_work_ilocked(work, &thread->todo); 438 } 439 440 /** 441 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list 442 * @thread: thread to queue work to 443 * @work: struct binder_work to add to list 444 * 445 * Adds the work to the todo list of the thread, and enables processing 446 * of the todo queue. 447 * 448 * Requires the proc->inner_lock to be held. 449 */ 450 static void 451 binder_enqueue_thread_work_ilocked(struct binder_thread *thread, 452 struct binder_work *work) 453 { 454 WARN_ON(!list_empty(&thread->waiting_thread_node)); 455 binder_enqueue_work_ilocked(work, &thread->todo); 456 thread->process_todo = true; 457 } 458 459 /** 460 * binder_enqueue_thread_work() - Add an item to the thread work list 461 * @thread: thread to queue work to 462 * @work: struct binder_work to add to list 463 * 464 * Adds the work to the todo list of the thread, and enables processing 465 * of the todo queue. 466 */ 467 static void 468 binder_enqueue_thread_work(struct binder_thread *thread, 469 struct binder_work *work) 470 { 471 binder_inner_proc_lock(thread->proc); 472 binder_enqueue_thread_work_ilocked(thread, work); 473 binder_inner_proc_unlock(thread->proc); 474 } 475 476 static void 477 binder_dequeue_work_ilocked(struct binder_work *work) 478 { 479 list_del_init(&work->entry); 480 } 481 482 /** 483 * binder_dequeue_work() - Removes an item from the work list 484 * @proc: binder_proc associated with list 485 * @work: struct binder_work to remove from list 486 * 487 * Removes the specified work item from whatever list it is on. 488 * Can safely be called if work is not on any list. 489 */ 490 static void 491 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work) 492 { 493 binder_inner_proc_lock(proc); 494 binder_dequeue_work_ilocked(work); 495 binder_inner_proc_unlock(proc); 496 } 497 498 static struct binder_work *binder_dequeue_work_head_ilocked( 499 struct list_head *list) 500 { 501 struct binder_work *w; 502 503 w = list_first_entry_or_null(list, struct binder_work, entry); 504 if (w) 505 list_del_init(&w->entry); 506 return w; 507 } 508 509 static void 510 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); 511 static void binder_free_thread(struct binder_thread *thread); 512 static void binder_free_proc(struct binder_proc *proc); 513 static void binder_inc_node_tmpref_ilocked(struct binder_node *node); 514 515 static bool binder_has_work_ilocked(struct binder_thread *thread, 516 bool do_proc_work) 517 { 518 return thread->process_todo || 519 thread->looper_need_return || 520 (do_proc_work && 521 !binder_worklist_empty_ilocked(&thread->proc->todo)); 522 } 523 524 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work) 525 { 526 bool has_work; 527 528 binder_inner_proc_lock(thread->proc); 529 has_work = binder_has_work_ilocked(thread, do_proc_work); 530 binder_inner_proc_unlock(thread->proc); 531 532 return has_work; 533 } 534 535 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread) 536 { 537 return !thread->transaction_stack && 538 binder_worklist_empty_ilocked(&thread->todo) && 539 (thread->looper & (BINDER_LOOPER_STATE_ENTERED | 540 BINDER_LOOPER_STATE_REGISTERED)); 541 } 542 543 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc, 544 bool sync) 545 { 546 struct rb_node *n; 547 struct binder_thread *thread; 548 549 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { 550 thread = rb_entry(n, struct binder_thread, rb_node); 551 if (thread->looper & BINDER_LOOPER_STATE_POLL && 552 binder_available_for_proc_work_ilocked(thread)) { 553 if (sync) 554 wake_up_interruptible_sync(&thread->wait); 555 else 556 wake_up_interruptible(&thread->wait); 557 } 558 } 559 } 560 561 /** 562 * binder_select_thread_ilocked() - selects a thread for doing proc work. 563 * @proc: process to select a thread from 564 * 565 * Note that calling this function moves the thread off the waiting_threads 566 * list, so it can only be woken up by the caller of this function, or a 567 * signal. Therefore, callers *should* always wake up the thread this function 568 * returns. 569 * 570 * Return: If there's a thread currently waiting for process work, 571 * returns that thread. Otherwise returns NULL. 572 */ 573 static struct binder_thread * 574 binder_select_thread_ilocked(struct binder_proc *proc) 575 { 576 struct binder_thread *thread; 577 578 assert_spin_locked(&proc->inner_lock); 579 thread = list_first_entry_or_null(&proc->waiting_threads, 580 struct binder_thread, 581 waiting_thread_node); 582 583 if (thread) 584 list_del_init(&thread->waiting_thread_node); 585 586 return thread; 587 } 588 589 /** 590 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work. 591 * @proc: process to wake up a thread in 592 * @thread: specific thread to wake-up (may be NULL) 593 * @sync: whether to do a synchronous wake-up 594 * 595 * This function wakes up a thread in the @proc process. 596 * The caller may provide a specific thread to wake-up in 597 * the @thread parameter. If @thread is NULL, this function 598 * will wake up threads that have called poll(). 599 * 600 * Note that for this function to work as expected, callers 601 * should first call binder_select_thread() to find a thread 602 * to handle the work (if they don't have a thread already), 603 * and pass the result into the @thread parameter. 604 */ 605 static void binder_wakeup_thread_ilocked(struct binder_proc *proc, 606 struct binder_thread *thread, 607 bool sync) 608 { 609 assert_spin_locked(&proc->inner_lock); 610 611 if (thread) { 612 if (sync) 613 wake_up_interruptible_sync(&thread->wait); 614 else 615 wake_up_interruptible(&thread->wait); 616 return; 617 } 618 619 /* Didn't find a thread waiting for proc work; this can happen 620 * in two scenarios: 621 * 1. All threads are busy handling transactions 622 * In that case, one of those threads should call back into 623 * the kernel driver soon and pick up this work. 624 * 2. Threads are using the (e)poll interface, in which case 625 * they may be blocked on the waitqueue without having been 626 * added to waiting_threads. For this case, we just iterate 627 * over all threads not handling transaction work, and 628 * wake them all up. We wake all because we don't know whether 629 * a thread that called into (e)poll is handling non-binder 630 * work currently. 631 */ 632 binder_wakeup_poll_threads_ilocked(proc, sync); 633 } 634 635 static void binder_wakeup_proc_ilocked(struct binder_proc *proc) 636 { 637 struct binder_thread *thread = binder_select_thread_ilocked(proc); 638 639 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false); 640 } 641 642 static void binder_set_nice(long nice) 643 { 644 long min_nice; 645 646 if (can_nice(current, nice)) { 647 set_user_nice(current, nice); 648 return; 649 } 650 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE)); 651 binder_debug(BINDER_DEBUG_PRIORITY_CAP, 652 "%d: nice value %ld not allowed use %ld instead\n", 653 current->pid, nice, min_nice); 654 set_user_nice(current, min_nice); 655 if (min_nice <= MAX_NICE) 656 return; 657 binder_user_error("%d RLIMIT_NICE not set\n", current->pid); 658 } 659 660 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc, 661 binder_uintptr_t ptr) 662 { 663 struct rb_node *n = proc->nodes.rb_node; 664 struct binder_node *node; 665 666 assert_spin_locked(&proc->inner_lock); 667 668 while (n) { 669 node = rb_entry(n, struct binder_node, rb_node); 670 671 if (ptr < node->ptr) 672 n = n->rb_left; 673 else if (ptr > node->ptr) 674 n = n->rb_right; 675 else { 676 /* 677 * take an implicit weak reference 678 * to ensure node stays alive until 679 * call to binder_put_node() 680 */ 681 binder_inc_node_tmpref_ilocked(node); 682 return node; 683 } 684 } 685 return NULL; 686 } 687 688 static struct binder_node *binder_get_node(struct binder_proc *proc, 689 binder_uintptr_t ptr) 690 { 691 struct binder_node *node; 692 693 binder_inner_proc_lock(proc); 694 node = binder_get_node_ilocked(proc, ptr); 695 binder_inner_proc_unlock(proc); 696 return node; 697 } 698 699 static struct binder_node *binder_init_node_ilocked( 700 struct binder_proc *proc, 701 struct binder_node *new_node, 702 struct flat_binder_object *fp) 703 { 704 struct rb_node **p = &proc->nodes.rb_node; 705 struct rb_node *parent = NULL; 706 struct binder_node *node; 707 binder_uintptr_t ptr = fp ? fp->binder : 0; 708 binder_uintptr_t cookie = fp ? fp->cookie : 0; 709 __u32 flags = fp ? fp->flags : 0; 710 711 assert_spin_locked(&proc->inner_lock); 712 713 while (*p) { 714 715 parent = *p; 716 node = rb_entry(parent, struct binder_node, rb_node); 717 718 if (ptr < node->ptr) 719 p = &(*p)->rb_left; 720 else if (ptr > node->ptr) 721 p = &(*p)->rb_right; 722 else { 723 /* 724 * A matching node is already in 725 * the rb tree. Abandon the init 726 * and return it. 727 */ 728 binder_inc_node_tmpref_ilocked(node); 729 return node; 730 } 731 } 732 node = new_node; 733 binder_stats_created(BINDER_STAT_NODE); 734 node->tmp_refs++; 735 rb_link_node(&node->rb_node, parent, p); 736 rb_insert_color(&node->rb_node, &proc->nodes); 737 node->debug_id = atomic_inc_return(&binder_last_id); 738 node->proc = proc; 739 node->ptr = ptr; 740 node->cookie = cookie; 741 node->work.type = BINDER_WORK_NODE; 742 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK; 743 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); 744 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX); 745 spin_lock_init(&node->lock); 746 INIT_LIST_HEAD(&node->work.entry); 747 INIT_LIST_HEAD(&node->async_todo); 748 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 749 "%d:%d node %d u%016llx c%016llx created\n", 750 proc->pid, current->pid, node->debug_id, 751 (u64)node->ptr, (u64)node->cookie); 752 753 return node; 754 } 755 756 static struct binder_node *binder_new_node(struct binder_proc *proc, 757 struct flat_binder_object *fp) 758 { 759 struct binder_node *node; 760 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL); 761 762 if (!new_node) 763 return NULL; 764 binder_inner_proc_lock(proc); 765 node = binder_init_node_ilocked(proc, new_node, fp); 766 binder_inner_proc_unlock(proc); 767 if (node != new_node) 768 /* 769 * The node was already added by another thread 770 */ 771 kfree(new_node); 772 773 return node; 774 } 775 776 static void binder_free_node(struct binder_node *node) 777 { 778 kfree(node); 779 binder_stats_deleted(BINDER_STAT_NODE); 780 } 781 782 static int binder_inc_node_nilocked(struct binder_node *node, int strong, 783 int internal, 784 struct list_head *target_list) 785 { 786 struct binder_proc *proc = node->proc; 787 788 assert_spin_locked(&node->lock); 789 if (proc) 790 assert_spin_locked(&proc->inner_lock); 791 if (strong) { 792 if (internal) { 793 if (target_list == NULL && 794 node->internal_strong_refs == 0 && 795 !(node->proc && 796 node == node->proc->context->binder_context_mgr_node && 797 node->has_strong_ref)) { 798 pr_err("invalid inc strong node for %d\n", 799 node->debug_id); 800 return -EINVAL; 801 } 802 node->internal_strong_refs++; 803 } else 804 node->local_strong_refs++; 805 if (!node->has_strong_ref && target_list) { 806 struct binder_thread *thread = container_of(target_list, 807 struct binder_thread, todo); 808 binder_dequeue_work_ilocked(&node->work); 809 BUG_ON(&thread->todo != target_list); 810 binder_enqueue_deferred_thread_work_ilocked(thread, 811 &node->work); 812 } 813 } else { 814 if (!internal) 815 node->local_weak_refs++; 816 if (!node->has_weak_ref && list_empty(&node->work.entry)) { 817 if (target_list == NULL) { 818 pr_err("invalid inc weak node for %d\n", 819 node->debug_id); 820 return -EINVAL; 821 } 822 /* 823 * See comment above 824 */ 825 binder_enqueue_work_ilocked(&node->work, target_list); 826 } 827 } 828 return 0; 829 } 830 831 static int binder_inc_node(struct binder_node *node, int strong, int internal, 832 struct list_head *target_list) 833 { 834 int ret; 835 836 binder_node_inner_lock(node); 837 ret = binder_inc_node_nilocked(node, strong, internal, target_list); 838 binder_node_inner_unlock(node); 839 840 return ret; 841 } 842 843 static bool binder_dec_node_nilocked(struct binder_node *node, 844 int strong, int internal) 845 { 846 struct binder_proc *proc = node->proc; 847 848 assert_spin_locked(&node->lock); 849 if (proc) 850 assert_spin_locked(&proc->inner_lock); 851 if (strong) { 852 if (internal) 853 node->internal_strong_refs--; 854 else 855 node->local_strong_refs--; 856 if (node->local_strong_refs || node->internal_strong_refs) 857 return false; 858 } else { 859 if (!internal) 860 node->local_weak_refs--; 861 if (node->local_weak_refs || node->tmp_refs || 862 !hlist_empty(&node->refs)) 863 return false; 864 } 865 866 if (proc && (node->has_strong_ref || node->has_weak_ref)) { 867 if (list_empty(&node->work.entry)) { 868 binder_enqueue_work_ilocked(&node->work, &proc->todo); 869 binder_wakeup_proc_ilocked(proc); 870 } 871 } else { 872 if (hlist_empty(&node->refs) && !node->local_strong_refs && 873 !node->local_weak_refs && !node->tmp_refs) { 874 if (proc) { 875 binder_dequeue_work_ilocked(&node->work); 876 rb_erase(&node->rb_node, &proc->nodes); 877 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 878 "refless node %d deleted\n", 879 node->debug_id); 880 } else { 881 BUG_ON(!list_empty(&node->work.entry)); 882 spin_lock(&binder_dead_nodes_lock); 883 /* 884 * tmp_refs could have changed so 885 * check it again 886 */ 887 if (node->tmp_refs) { 888 spin_unlock(&binder_dead_nodes_lock); 889 return false; 890 } 891 hlist_del(&node->dead_node); 892 spin_unlock(&binder_dead_nodes_lock); 893 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 894 "dead node %d deleted\n", 895 node->debug_id); 896 } 897 return true; 898 } 899 } 900 return false; 901 } 902 903 static void binder_dec_node(struct binder_node *node, int strong, int internal) 904 { 905 bool free_node; 906 907 binder_node_inner_lock(node); 908 free_node = binder_dec_node_nilocked(node, strong, internal); 909 binder_node_inner_unlock(node); 910 if (free_node) 911 binder_free_node(node); 912 } 913 914 static void binder_inc_node_tmpref_ilocked(struct binder_node *node) 915 { 916 /* 917 * No call to binder_inc_node() is needed since we 918 * don't need to inform userspace of any changes to 919 * tmp_refs 920 */ 921 node->tmp_refs++; 922 } 923 924 /** 925 * binder_inc_node_tmpref() - take a temporary reference on node 926 * @node: node to reference 927 * 928 * Take reference on node to prevent the node from being freed 929 * while referenced only by a local variable. The inner lock is 930 * needed to serialize with the node work on the queue (which 931 * isn't needed after the node is dead). If the node is dead 932 * (node->proc is NULL), use binder_dead_nodes_lock to protect 933 * node->tmp_refs against dead-node-only cases where the node 934 * lock cannot be acquired (eg traversing the dead node list to 935 * print nodes) 936 */ 937 static void binder_inc_node_tmpref(struct binder_node *node) 938 { 939 binder_node_lock(node); 940 if (node->proc) 941 binder_inner_proc_lock(node->proc); 942 else 943 spin_lock(&binder_dead_nodes_lock); 944 binder_inc_node_tmpref_ilocked(node); 945 if (node->proc) 946 binder_inner_proc_unlock(node->proc); 947 else 948 spin_unlock(&binder_dead_nodes_lock); 949 binder_node_unlock(node); 950 } 951 952 /** 953 * binder_dec_node_tmpref() - remove a temporary reference on node 954 * @node: node to reference 955 * 956 * Release temporary reference on node taken via binder_inc_node_tmpref() 957 */ 958 static void binder_dec_node_tmpref(struct binder_node *node) 959 { 960 bool free_node; 961 962 binder_node_inner_lock(node); 963 if (!node->proc) 964 spin_lock(&binder_dead_nodes_lock); 965 else 966 __acquire(&binder_dead_nodes_lock); 967 node->tmp_refs--; 968 BUG_ON(node->tmp_refs < 0); 969 if (!node->proc) 970 spin_unlock(&binder_dead_nodes_lock); 971 else 972 __release(&binder_dead_nodes_lock); 973 /* 974 * Call binder_dec_node() to check if all refcounts are 0 975 * and cleanup is needed. Calling with strong=0 and internal=1 976 * causes no actual reference to be released in binder_dec_node(). 977 * If that changes, a change is needed here too. 978 */ 979 free_node = binder_dec_node_nilocked(node, 0, 1); 980 binder_node_inner_unlock(node); 981 if (free_node) 982 binder_free_node(node); 983 } 984 985 static void binder_put_node(struct binder_node *node) 986 { 987 binder_dec_node_tmpref(node); 988 } 989 990 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc, 991 u32 desc, bool need_strong_ref) 992 { 993 struct rb_node *n = proc->refs_by_desc.rb_node; 994 struct binder_ref *ref; 995 996 while (n) { 997 ref = rb_entry(n, struct binder_ref, rb_node_desc); 998 999 if (desc < ref->data.desc) { 1000 n = n->rb_left; 1001 } else if (desc > ref->data.desc) { 1002 n = n->rb_right; 1003 } else if (need_strong_ref && !ref->data.strong) { 1004 binder_user_error("tried to use weak ref as strong ref\n"); 1005 return NULL; 1006 } else { 1007 return ref; 1008 } 1009 } 1010 return NULL; 1011 } 1012 1013 /** 1014 * binder_get_ref_for_node_olocked() - get the ref associated with given node 1015 * @proc: binder_proc that owns the ref 1016 * @node: binder_node of target 1017 * @new_ref: newly allocated binder_ref to be initialized or %NULL 1018 * 1019 * Look up the ref for the given node and return it if it exists 1020 * 1021 * If it doesn't exist and the caller provides a newly allocated 1022 * ref, initialize the fields of the newly allocated ref and insert 1023 * into the given proc rb_trees and node refs list. 1024 * 1025 * Return: the ref for node. It is possible that another thread 1026 * allocated/initialized the ref first in which case the 1027 * returned ref would be different than the passed-in 1028 * new_ref. new_ref must be kfree'd by the caller in 1029 * this case. 1030 */ 1031 static struct binder_ref *binder_get_ref_for_node_olocked( 1032 struct binder_proc *proc, 1033 struct binder_node *node, 1034 struct binder_ref *new_ref) 1035 { 1036 struct binder_context *context = proc->context; 1037 struct rb_node **p = &proc->refs_by_node.rb_node; 1038 struct rb_node *parent = NULL; 1039 struct binder_ref *ref; 1040 struct rb_node *n; 1041 1042 while (*p) { 1043 parent = *p; 1044 ref = rb_entry(parent, struct binder_ref, rb_node_node); 1045 1046 if (node < ref->node) 1047 p = &(*p)->rb_left; 1048 else if (node > ref->node) 1049 p = &(*p)->rb_right; 1050 else 1051 return ref; 1052 } 1053 if (!new_ref) 1054 return NULL; 1055 1056 binder_stats_created(BINDER_STAT_REF); 1057 new_ref->data.debug_id = atomic_inc_return(&binder_last_id); 1058 new_ref->proc = proc; 1059 new_ref->node = node; 1060 rb_link_node(&new_ref->rb_node_node, parent, p); 1061 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); 1062 1063 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1; 1064 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 1065 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1066 if (ref->data.desc > new_ref->data.desc) 1067 break; 1068 new_ref->data.desc = ref->data.desc + 1; 1069 } 1070 1071 p = &proc->refs_by_desc.rb_node; 1072 while (*p) { 1073 parent = *p; 1074 ref = rb_entry(parent, struct binder_ref, rb_node_desc); 1075 1076 if (new_ref->data.desc < ref->data.desc) 1077 p = &(*p)->rb_left; 1078 else if (new_ref->data.desc > ref->data.desc) 1079 p = &(*p)->rb_right; 1080 else 1081 BUG(); 1082 } 1083 rb_link_node(&new_ref->rb_node_desc, parent, p); 1084 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc); 1085 1086 binder_node_lock(node); 1087 hlist_add_head(&new_ref->node_entry, &node->refs); 1088 1089 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1090 "%d new ref %d desc %d for node %d\n", 1091 proc->pid, new_ref->data.debug_id, new_ref->data.desc, 1092 node->debug_id); 1093 binder_node_unlock(node); 1094 return new_ref; 1095 } 1096 1097 static void binder_cleanup_ref_olocked(struct binder_ref *ref) 1098 { 1099 bool delete_node = false; 1100 1101 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1102 "%d delete ref %d desc %d for node %d\n", 1103 ref->proc->pid, ref->data.debug_id, ref->data.desc, 1104 ref->node->debug_id); 1105 1106 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); 1107 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); 1108 1109 binder_node_inner_lock(ref->node); 1110 if (ref->data.strong) 1111 binder_dec_node_nilocked(ref->node, 1, 1); 1112 1113 hlist_del(&ref->node_entry); 1114 delete_node = binder_dec_node_nilocked(ref->node, 0, 1); 1115 binder_node_inner_unlock(ref->node); 1116 /* 1117 * Clear ref->node unless we want the caller to free the node 1118 */ 1119 if (!delete_node) { 1120 /* 1121 * The caller uses ref->node to determine 1122 * whether the node needs to be freed. Clear 1123 * it since the node is still alive. 1124 */ 1125 ref->node = NULL; 1126 } 1127 1128 if (ref->death) { 1129 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1130 "%d delete ref %d desc %d has death notification\n", 1131 ref->proc->pid, ref->data.debug_id, 1132 ref->data.desc); 1133 binder_dequeue_work(ref->proc, &ref->death->work); 1134 binder_stats_deleted(BINDER_STAT_DEATH); 1135 } 1136 binder_stats_deleted(BINDER_STAT_REF); 1137 } 1138 1139 /** 1140 * binder_inc_ref_olocked() - increment the ref for given handle 1141 * @ref: ref to be incremented 1142 * @strong: if true, strong increment, else weak 1143 * @target_list: list to queue node work on 1144 * 1145 * Increment the ref. @ref->proc->outer_lock must be held on entry 1146 * 1147 * Return: 0, if successful, else errno 1148 */ 1149 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong, 1150 struct list_head *target_list) 1151 { 1152 int ret; 1153 1154 if (strong) { 1155 if (ref->data.strong == 0) { 1156 ret = binder_inc_node(ref->node, 1, 1, target_list); 1157 if (ret) 1158 return ret; 1159 } 1160 ref->data.strong++; 1161 } else { 1162 if (ref->data.weak == 0) { 1163 ret = binder_inc_node(ref->node, 0, 1, target_list); 1164 if (ret) 1165 return ret; 1166 } 1167 ref->data.weak++; 1168 } 1169 return 0; 1170 } 1171 1172 /** 1173 * binder_dec_ref() - dec the ref for given handle 1174 * @ref: ref to be decremented 1175 * @strong: if true, strong decrement, else weak 1176 * 1177 * Decrement the ref. 1178 * 1179 * Return: true if ref is cleaned up and ready to be freed 1180 */ 1181 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong) 1182 { 1183 if (strong) { 1184 if (ref->data.strong == 0) { 1185 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n", 1186 ref->proc->pid, ref->data.debug_id, 1187 ref->data.desc, ref->data.strong, 1188 ref->data.weak); 1189 return false; 1190 } 1191 ref->data.strong--; 1192 if (ref->data.strong == 0) 1193 binder_dec_node(ref->node, strong, 1); 1194 } else { 1195 if (ref->data.weak == 0) { 1196 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n", 1197 ref->proc->pid, ref->data.debug_id, 1198 ref->data.desc, ref->data.strong, 1199 ref->data.weak); 1200 return false; 1201 } 1202 ref->data.weak--; 1203 } 1204 if (ref->data.strong == 0 && ref->data.weak == 0) { 1205 binder_cleanup_ref_olocked(ref); 1206 return true; 1207 } 1208 return false; 1209 } 1210 1211 /** 1212 * binder_get_node_from_ref() - get the node from the given proc/desc 1213 * @proc: proc containing the ref 1214 * @desc: the handle associated with the ref 1215 * @need_strong_ref: if true, only return node if ref is strong 1216 * @rdata: the id/refcount data for the ref 1217 * 1218 * Given a proc and ref handle, return the associated binder_node 1219 * 1220 * Return: a binder_node or NULL if not found or not strong when strong required 1221 */ 1222 static struct binder_node *binder_get_node_from_ref( 1223 struct binder_proc *proc, 1224 u32 desc, bool need_strong_ref, 1225 struct binder_ref_data *rdata) 1226 { 1227 struct binder_node *node; 1228 struct binder_ref *ref; 1229 1230 binder_proc_lock(proc); 1231 ref = binder_get_ref_olocked(proc, desc, need_strong_ref); 1232 if (!ref) 1233 goto err_no_ref; 1234 node = ref->node; 1235 /* 1236 * Take an implicit reference on the node to ensure 1237 * it stays alive until the call to binder_put_node() 1238 */ 1239 binder_inc_node_tmpref(node); 1240 if (rdata) 1241 *rdata = ref->data; 1242 binder_proc_unlock(proc); 1243 1244 return node; 1245 1246 err_no_ref: 1247 binder_proc_unlock(proc); 1248 return NULL; 1249 } 1250 1251 /** 1252 * binder_free_ref() - free the binder_ref 1253 * @ref: ref to free 1254 * 1255 * Free the binder_ref. Free the binder_node indicated by ref->node 1256 * (if non-NULL) and the binder_ref_death indicated by ref->death. 1257 */ 1258 static void binder_free_ref(struct binder_ref *ref) 1259 { 1260 if (ref->node) 1261 binder_free_node(ref->node); 1262 kfree(ref->death); 1263 kfree(ref); 1264 } 1265 1266 /** 1267 * binder_update_ref_for_handle() - inc/dec the ref for given handle 1268 * @proc: proc containing the ref 1269 * @desc: the handle associated with the ref 1270 * @increment: true=inc reference, false=dec reference 1271 * @strong: true=strong reference, false=weak reference 1272 * @rdata: the id/refcount data for the ref 1273 * 1274 * Given a proc and ref handle, increment or decrement the ref 1275 * according to "increment" arg. 1276 * 1277 * Return: 0 if successful, else errno 1278 */ 1279 static int binder_update_ref_for_handle(struct binder_proc *proc, 1280 uint32_t desc, bool increment, bool strong, 1281 struct binder_ref_data *rdata) 1282 { 1283 int ret = 0; 1284 struct binder_ref *ref; 1285 bool delete_ref = false; 1286 1287 binder_proc_lock(proc); 1288 ref = binder_get_ref_olocked(proc, desc, strong); 1289 if (!ref) { 1290 ret = -EINVAL; 1291 goto err_no_ref; 1292 } 1293 if (increment) 1294 ret = binder_inc_ref_olocked(ref, strong, NULL); 1295 else 1296 delete_ref = binder_dec_ref_olocked(ref, strong); 1297 1298 if (rdata) 1299 *rdata = ref->data; 1300 binder_proc_unlock(proc); 1301 1302 if (delete_ref) 1303 binder_free_ref(ref); 1304 return ret; 1305 1306 err_no_ref: 1307 binder_proc_unlock(proc); 1308 return ret; 1309 } 1310 1311 /** 1312 * binder_dec_ref_for_handle() - dec the ref for given handle 1313 * @proc: proc containing the ref 1314 * @desc: the handle associated with the ref 1315 * @strong: true=strong reference, false=weak reference 1316 * @rdata: the id/refcount data for the ref 1317 * 1318 * Just calls binder_update_ref_for_handle() to decrement the ref. 1319 * 1320 * Return: 0 if successful, else errno 1321 */ 1322 static int binder_dec_ref_for_handle(struct binder_proc *proc, 1323 uint32_t desc, bool strong, struct binder_ref_data *rdata) 1324 { 1325 return binder_update_ref_for_handle(proc, desc, false, strong, rdata); 1326 } 1327 1328 1329 /** 1330 * binder_inc_ref_for_node() - increment the ref for given proc/node 1331 * @proc: proc containing the ref 1332 * @node: target node 1333 * @strong: true=strong reference, false=weak reference 1334 * @target_list: worklist to use if node is incremented 1335 * @rdata: the id/refcount data for the ref 1336 * 1337 * Given a proc and node, increment the ref. Create the ref if it 1338 * doesn't already exist 1339 * 1340 * Return: 0 if successful, else errno 1341 */ 1342 static int binder_inc_ref_for_node(struct binder_proc *proc, 1343 struct binder_node *node, 1344 bool strong, 1345 struct list_head *target_list, 1346 struct binder_ref_data *rdata) 1347 { 1348 struct binder_ref *ref; 1349 struct binder_ref *new_ref = NULL; 1350 int ret = 0; 1351 1352 binder_proc_lock(proc); 1353 ref = binder_get_ref_for_node_olocked(proc, node, NULL); 1354 if (!ref) { 1355 binder_proc_unlock(proc); 1356 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); 1357 if (!new_ref) 1358 return -ENOMEM; 1359 binder_proc_lock(proc); 1360 ref = binder_get_ref_for_node_olocked(proc, node, new_ref); 1361 } 1362 ret = binder_inc_ref_olocked(ref, strong, target_list); 1363 *rdata = ref->data; 1364 binder_proc_unlock(proc); 1365 if (new_ref && ref != new_ref) 1366 /* 1367 * Another thread created the ref first so 1368 * free the one we allocated 1369 */ 1370 kfree(new_ref); 1371 return ret; 1372 } 1373 1374 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread, 1375 struct binder_transaction *t) 1376 { 1377 BUG_ON(!target_thread); 1378 assert_spin_locked(&target_thread->proc->inner_lock); 1379 BUG_ON(target_thread->transaction_stack != t); 1380 BUG_ON(target_thread->transaction_stack->from != target_thread); 1381 target_thread->transaction_stack = 1382 target_thread->transaction_stack->from_parent; 1383 t->from = NULL; 1384 } 1385 1386 /** 1387 * binder_thread_dec_tmpref() - decrement thread->tmp_ref 1388 * @thread: thread to decrement 1389 * 1390 * A thread needs to be kept alive while being used to create or 1391 * handle a transaction. binder_get_txn_from() is used to safely 1392 * extract t->from from a binder_transaction and keep the thread 1393 * indicated by t->from from being freed. When done with that 1394 * binder_thread, this function is called to decrement the 1395 * tmp_ref and free if appropriate (thread has been released 1396 * and no transaction being processed by the driver) 1397 */ 1398 static void binder_thread_dec_tmpref(struct binder_thread *thread) 1399 { 1400 /* 1401 * atomic is used to protect the counter value while 1402 * it cannot reach zero or thread->is_dead is false 1403 */ 1404 binder_inner_proc_lock(thread->proc); 1405 atomic_dec(&thread->tmp_ref); 1406 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) { 1407 binder_inner_proc_unlock(thread->proc); 1408 binder_free_thread(thread); 1409 return; 1410 } 1411 binder_inner_proc_unlock(thread->proc); 1412 } 1413 1414 /** 1415 * binder_proc_dec_tmpref() - decrement proc->tmp_ref 1416 * @proc: proc to decrement 1417 * 1418 * A binder_proc needs to be kept alive while being used to create or 1419 * handle a transaction. proc->tmp_ref is incremented when 1420 * creating a new transaction or the binder_proc is currently in-use 1421 * by threads that are being released. When done with the binder_proc, 1422 * this function is called to decrement the counter and free the 1423 * proc if appropriate (proc has been released, all threads have 1424 * been released and not currenly in-use to process a transaction). 1425 */ 1426 static void binder_proc_dec_tmpref(struct binder_proc *proc) 1427 { 1428 binder_inner_proc_lock(proc); 1429 proc->tmp_ref--; 1430 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) && 1431 !proc->tmp_ref) { 1432 binder_inner_proc_unlock(proc); 1433 binder_free_proc(proc); 1434 return; 1435 } 1436 binder_inner_proc_unlock(proc); 1437 } 1438 1439 /** 1440 * binder_get_txn_from() - safely extract the "from" thread in transaction 1441 * @t: binder transaction for t->from 1442 * 1443 * Atomically return the "from" thread and increment the tmp_ref 1444 * count for the thread to ensure it stays alive until 1445 * binder_thread_dec_tmpref() is called. 1446 * 1447 * Return: the value of t->from 1448 */ 1449 static struct binder_thread *binder_get_txn_from( 1450 struct binder_transaction *t) 1451 { 1452 struct binder_thread *from; 1453 1454 spin_lock(&t->lock); 1455 from = t->from; 1456 if (from) 1457 atomic_inc(&from->tmp_ref); 1458 spin_unlock(&t->lock); 1459 return from; 1460 } 1461 1462 /** 1463 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock 1464 * @t: binder transaction for t->from 1465 * 1466 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock 1467 * to guarantee that the thread cannot be released while operating on it. 1468 * The caller must call binder_inner_proc_unlock() to release the inner lock 1469 * as well as call binder_dec_thread_txn() to release the reference. 1470 * 1471 * Return: the value of t->from 1472 */ 1473 static struct binder_thread *binder_get_txn_from_and_acq_inner( 1474 struct binder_transaction *t) 1475 __acquires(&t->from->proc->inner_lock) 1476 { 1477 struct binder_thread *from; 1478 1479 from = binder_get_txn_from(t); 1480 if (!from) { 1481 __acquire(&from->proc->inner_lock); 1482 return NULL; 1483 } 1484 binder_inner_proc_lock(from->proc); 1485 if (t->from) { 1486 BUG_ON(from != t->from); 1487 return from; 1488 } 1489 binder_inner_proc_unlock(from->proc); 1490 __acquire(&from->proc->inner_lock); 1491 binder_thread_dec_tmpref(from); 1492 return NULL; 1493 } 1494 1495 /** 1496 * binder_free_txn_fixups() - free unprocessed fd fixups 1497 * @t: binder transaction for t->from 1498 * 1499 * If the transaction is being torn down prior to being 1500 * processed by the target process, free all of the 1501 * fd fixups and fput the file structs. It is safe to 1502 * call this function after the fixups have been 1503 * processed -- in that case, the list will be empty. 1504 */ 1505 static void binder_free_txn_fixups(struct binder_transaction *t) 1506 { 1507 struct binder_txn_fd_fixup *fixup, *tmp; 1508 1509 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) { 1510 fput(fixup->file); 1511 if (fixup->target_fd >= 0) 1512 put_unused_fd(fixup->target_fd); 1513 list_del(&fixup->fixup_entry); 1514 kfree(fixup); 1515 } 1516 } 1517 1518 static void binder_txn_latency_free(struct binder_transaction *t) 1519 { 1520 int from_proc, from_thread, to_proc, to_thread; 1521 1522 spin_lock(&t->lock); 1523 from_proc = t->from ? t->from->proc->pid : 0; 1524 from_thread = t->from ? t->from->pid : 0; 1525 to_proc = t->to_proc ? t->to_proc->pid : 0; 1526 to_thread = t->to_thread ? t->to_thread->pid : 0; 1527 spin_unlock(&t->lock); 1528 1529 trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread); 1530 } 1531 1532 static void binder_free_transaction(struct binder_transaction *t) 1533 { 1534 struct binder_proc *target_proc = t->to_proc; 1535 1536 if (target_proc) { 1537 binder_inner_proc_lock(target_proc); 1538 target_proc->outstanding_txns--; 1539 if (target_proc->outstanding_txns < 0) 1540 pr_warn("%s: Unexpected outstanding_txns %d\n", 1541 __func__, target_proc->outstanding_txns); 1542 if (!target_proc->outstanding_txns && target_proc->is_frozen) 1543 wake_up_interruptible_all(&target_proc->freeze_wait); 1544 if (t->buffer) 1545 t->buffer->transaction = NULL; 1546 binder_inner_proc_unlock(target_proc); 1547 } 1548 if (trace_binder_txn_latency_free_enabled()) 1549 binder_txn_latency_free(t); 1550 /* 1551 * If the transaction has no target_proc, then 1552 * t->buffer->transaction has already been cleared. 1553 */ 1554 binder_free_txn_fixups(t); 1555 kfree(t); 1556 binder_stats_deleted(BINDER_STAT_TRANSACTION); 1557 } 1558 1559 static void binder_send_failed_reply(struct binder_transaction *t, 1560 uint32_t error_code) 1561 { 1562 struct binder_thread *target_thread; 1563 struct binder_transaction *next; 1564 1565 BUG_ON(t->flags & TF_ONE_WAY); 1566 while (1) { 1567 target_thread = binder_get_txn_from_and_acq_inner(t); 1568 if (target_thread) { 1569 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1570 "send failed reply for transaction %d to %d:%d\n", 1571 t->debug_id, 1572 target_thread->proc->pid, 1573 target_thread->pid); 1574 1575 binder_pop_transaction_ilocked(target_thread, t); 1576 if (target_thread->reply_error.cmd == BR_OK) { 1577 target_thread->reply_error.cmd = error_code; 1578 binder_enqueue_thread_work_ilocked( 1579 target_thread, 1580 &target_thread->reply_error.work); 1581 wake_up_interruptible(&target_thread->wait); 1582 } else { 1583 /* 1584 * Cannot get here for normal operation, but 1585 * we can if multiple synchronous transactions 1586 * are sent without blocking for responses. 1587 * Just ignore the 2nd error in this case. 1588 */ 1589 pr_warn("Unexpected reply error: %u\n", 1590 target_thread->reply_error.cmd); 1591 } 1592 binder_inner_proc_unlock(target_thread->proc); 1593 binder_thread_dec_tmpref(target_thread); 1594 binder_free_transaction(t); 1595 return; 1596 } 1597 __release(&target_thread->proc->inner_lock); 1598 next = t->from_parent; 1599 1600 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1601 "send failed reply for transaction %d, target dead\n", 1602 t->debug_id); 1603 1604 binder_free_transaction(t); 1605 if (next == NULL) { 1606 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1607 "reply failed, no target thread at root\n"); 1608 return; 1609 } 1610 t = next; 1611 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1612 "reply failed, no target thread -- retry %d\n", 1613 t->debug_id); 1614 } 1615 } 1616 1617 /** 1618 * binder_cleanup_transaction() - cleans up undelivered transaction 1619 * @t: transaction that needs to be cleaned up 1620 * @reason: reason the transaction wasn't delivered 1621 * @error_code: error to return to caller (if synchronous call) 1622 */ 1623 static void binder_cleanup_transaction(struct binder_transaction *t, 1624 const char *reason, 1625 uint32_t error_code) 1626 { 1627 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) { 1628 binder_send_failed_reply(t, error_code); 1629 } else { 1630 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 1631 "undelivered transaction %d, %s\n", 1632 t->debug_id, reason); 1633 binder_free_transaction(t); 1634 } 1635 } 1636 1637 /** 1638 * binder_get_object() - gets object and checks for valid metadata 1639 * @proc: binder_proc owning the buffer 1640 * @u: sender's user pointer to base of buffer 1641 * @buffer: binder_buffer that we're parsing. 1642 * @offset: offset in the @buffer at which to validate an object. 1643 * @object: struct binder_object to read into 1644 * 1645 * Copy the binder object at the given offset into @object. If @u is 1646 * provided then the copy is from the sender's buffer. If not, then 1647 * it is copied from the target's @buffer. 1648 * 1649 * Return: If there's a valid metadata object at @offset, the 1650 * size of that object. Otherwise, it returns zero. The object 1651 * is read into the struct binder_object pointed to by @object. 1652 */ 1653 static size_t binder_get_object(struct binder_proc *proc, 1654 const void __user *u, 1655 struct binder_buffer *buffer, 1656 unsigned long offset, 1657 struct binder_object *object) 1658 { 1659 size_t read_size; 1660 struct binder_object_header *hdr; 1661 size_t object_size = 0; 1662 1663 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset); 1664 if (offset > buffer->data_size || read_size < sizeof(*hdr)) 1665 return 0; 1666 if (u) { 1667 if (copy_from_user(object, u + offset, read_size)) 1668 return 0; 1669 } else { 1670 if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer, 1671 offset, read_size)) 1672 return 0; 1673 } 1674 1675 /* Ok, now see if we read a complete object. */ 1676 hdr = &object->hdr; 1677 switch (hdr->type) { 1678 case BINDER_TYPE_BINDER: 1679 case BINDER_TYPE_WEAK_BINDER: 1680 case BINDER_TYPE_HANDLE: 1681 case BINDER_TYPE_WEAK_HANDLE: 1682 object_size = sizeof(struct flat_binder_object); 1683 break; 1684 case BINDER_TYPE_FD: 1685 object_size = sizeof(struct binder_fd_object); 1686 break; 1687 case BINDER_TYPE_PTR: 1688 object_size = sizeof(struct binder_buffer_object); 1689 break; 1690 case BINDER_TYPE_FDA: 1691 object_size = sizeof(struct binder_fd_array_object); 1692 break; 1693 default: 1694 return 0; 1695 } 1696 if (offset <= buffer->data_size - object_size && 1697 buffer->data_size >= object_size) 1698 return object_size; 1699 else 1700 return 0; 1701 } 1702 1703 /** 1704 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer. 1705 * @proc: binder_proc owning the buffer 1706 * @b: binder_buffer containing the object 1707 * @object: struct binder_object to read into 1708 * @index: index in offset array at which the binder_buffer_object is 1709 * located 1710 * @start_offset: points to the start of the offset array 1711 * @object_offsetp: offset of @object read from @b 1712 * @num_valid: the number of valid offsets in the offset array 1713 * 1714 * Return: If @index is within the valid range of the offset array 1715 * described by @start and @num_valid, and if there's a valid 1716 * binder_buffer_object at the offset found in index @index 1717 * of the offset array, that object is returned. Otherwise, 1718 * %NULL is returned. 1719 * Note that the offset found in index @index itself is not 1720 * verified; this function assumes that @num_valid elements 1721 * from @start were previously verified to have valid offsets. 1722 * If @object_offsetp is non-NULL, then the offset within 1723 * @b is written to it. 1724 */ 1725 static struct binder_buffer_object *binder_validate_ptr( 1726 struct binder_proc *proc, 1727 struct binder_buffer *b, 1728 struct binder_object *object, 1729 binder_size_t index, 1730 binder_size_t start_offset, 1731 binder_size_t *object_offsetp, 1732 binder_size_t num_valid) 1733 { 1734 size_t object_size; 1735 binder_size_t object_offset; 1736 unsigned long buffer_offset; 1737 1738 if (index >= num_valid) 1739 return NULL; 1740 1741 buffer_offset = start_offset + sizeof(binder_size_t) * index; 1742 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset, 1743 b, buffer_offset, 1744 sizeof(object_offset))) 1745 return NULL; 1746 object_size = binder_get_object(proc, NULL, b, object_offset, object); 1747 if (!object_size || object->hdr.type != BINDER_TYPE_PTR) 1748 return NULL; 1749 if (object_offsetp) 1750 *object_offsetp = object_offset; 1751 1752 return &object->bbo; 1753 } 1754 1755 /** 1756 * binder_validate_fixup() - validates pointer/fd fixups happen in order. 1757 * @proc: binder_proc owning the buffer 1758 * @b: transaction buffer 1759 * @objects_start_offset: offset to start of objects buffer 1760 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up 1761 * @fixup_offset: start offset in @buffer to fix up 1762 * @last_obj_offset: offset to last binder_buffer_object that we fixed 1763 * @last_min_offset: minimum fixup offset in object at @last_obj_offset 1764 * 1765 * Return: %true if a fixup in buffer @buffer at offset @offset is 1766 * allowed. 1767 * 1768 * For safety reasons, we only allow fixups inside a buffer to happen 1769 * at increasing offsets; additionally, we only allow fixup on the last 1770 * buffer object that was verified, or one of its parents. 1771 * 1772 * Example of what is allowed: 1773 * 1774 * A 1775 * B (parent = A, offset = 0) 1776 * C (parent = A, offset = 16) 1777 * D (parent = C, offset = 0) 1778 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset) 1779 * 1780 * Examples of what is not allowed: 1781 * 1782 * Decreasing offsets within the same parent: 1783 * A 1784 * C (parent = A, offset = 16) 1785 * B (parent = A, offset = 0) // decreasing offset within A 1786 * 1787 * Referring to a parent that wasn't the last object or any of its parents: 1788 * A 1789 * B (parent = A, offset = 0) 1790 * C (parent = A, offset = 0) 1791 * C (parent = A, offset = 16) 1792 * D (parent = B, offset = 0) // B is not A or any of A's parents 1793 */ 1794 static bool binder_validate_fixup(struct binder_proc *proc, 1795 struct binder_buffer *b, 1796 binder_size_t objects_start_offset, 1797 binder_size_t buffer_obj_offset, 1798 binder_size_t fixup_offset, 1799 binder_size_t last_obj_offset, 1800 binder_size_t last_min_offset) 1801 { 1802 if (!last_obj_offset) { 1803 /* Nothing to fix up in */ 1804 return false; 1805 } 1806 1807 while (last_obj_offset != buffer_obj_offset) { 1808 unsigned long buffer_offset; 1809 struct binder_object last_object; 1810 struct binder_buffer_object *last_bbo; 1811 size_t object_size = binder_get_object(proc, NULL, b, 1812 last_obj_offset, 1813 &last_object); 1814 if (object_size != sizeof(*last_bbo)) 1815 return false; 1816 1817 last_bbo = &last_object.bbo; 1818 /* 1819 * Safe to retrieve the parent of last_obj, since it 1820 * was already previously verified by the driver. 1821 */ 1822 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0) 1823 return false; 1824 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t); 1825 buffer_offset = objects_start_offset + 1826 sizeof(binder_size_t) * last_bbo->parent; 1827 if (binder_alloc_copy_from_buffer(&proc->alloc, 1828 &last_obj_offset, 1829 b, buffer_offset, 1830 sizeof(last_obj_offset))) 1831 return false; 1832 } 1833 return (fixup_offset >= last_min_offset); 1834 } 1835 1836 /** 1837 * struct binder_task_work_cb - for deferred close 1838 * 1839 * @twork: callback_head for task work 1840 * @fd: fd to close 1841 * 1842 * Structure to pass task work to be handled after 1843 * returning from binder_ioctl() via task_work_add(). 1844 */ 1845 struct binder_task_work_cb { 1846 struct callback_head twork; 1847 struct file *file; 1848 }; 1849 1850 /** 1851 * binder_do_fd_close() - close list of file descriptors 1852 * @twork: callback head for task work 1853 * 1854 * It is not safe to call ksys_close() during the binder_ioctl() 1855 * function if there is a chance that binder's own file descriptor 1856 * might be closed. This is to meet the requirements for using 1857 * fdget() (see comments for __fget_light()). Therefore use 1858 * task_work_add() to schedule the close operation once we have 1859 * returned from binder_ioctl(). This function is a callback 1860 * for that mechanism and does the actual ksys_close() on the 1861 * given file descriptor. 1862 */ 1863 static void binder_do_fd_close(struct callback_head *twork) 1864 { 1865 struct binder_task_work_cb *twcb = container_of(twork, 1866 struct binder_task_work_cb, twork); 1867 1868 fput(twcb->file); 1869 kfree(twcb); 1870 } 1871 1872 /** 1873 * binder_deferred_fd_close() - schedule a close for the given file-descriptor 1874 * @fd: file-descriptor to close 1875 * 1876 * See comments in binder_do_fd_close(). This function is used to schedule 1877 * a file-descriptor to be closed after returning from binder_ioctl(). 1878 */ 1879 static void binder_deferred_fd_close(int fd) 1880 { 1881 struct binder_task_work_cb *twcb; 1882 1883 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL); 1884 if (!twcb) 1885 return; 1886 init_task_work(&twcb->twork, binder_do_fd_close); 1887 close_fd_get_file(fd, &twcb->file); 1888 if (twcb->file) { 1889 filp_close(twcb->file, current->files); 1890 task_work_add(current, &twcb->twork, TWA_RESUME); 1891 } else { 1892 kfree(twcb); 1893 } 1894 } 1895 1896 static void binder_transaction_buffer_release(struct binder_proc *proc, 1897 struct binder_thread *thread, 1898 struct binder_buffer *buffer, 1899 binder_size_t failed_at, 1900 bool is_failure) 1901 { 1902 int debug_id = buffer->debug_id; 1903 binder_size_t off_start_offset, buffer_offset, off_end_offset; 1904 1905 binder_debug(BINDER_DEBUG_TRANSACTION, 1906 "%d buffer release %d, size %zd-%zd, failed at %llx\n", 1907 proc->pid, buffer->debug_id, 1908 buffer->data_size, buffer->offsets_size, 1909 (unsigned long long)failed_at); 1910 1911 if (buffer->target_node) 1912 binder_dec_node(buffer->target_node, 1, 0); 1913 1914 off_start_offset = ALIGN(buffer->data_size, sizeof(void *)); 1915 off_end_offset = is_failure && failed_at ? failed_at : 1916 off_start_offset + buffer->offsets_size; 1917 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset; 1918 buffer_offset += sizeof(binder_size_t)) { 1919 struct binder_object_header *hdr; 1920 size_t object_size = 0; 1921 struct binder_object object; 1922 binder_size_t object_offset; 1923 1924 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset, 1925 buffer, buffer_offset, 1926 sizeof(object_offset))) 1927 object_size = binder_get_object(proc, NULL, buffer, 1928 object_offset, &object); 1929 if (object_size == 0) { 1930 pr_err("transaction release %d bad object at offset %lld, size %zd\n", 1931 debug_id, (u64)object_offset, buffer->data_size); 1932 continue; 1933 } 1934 hdr = &object.hdr; 1935 switch (hdr->type) { 1936 case BINDER_TYPE_BINDER: 1937 case BINDER_TYPE_WEAK_BINDER: { 1938 struct flat_binder_object *fp; 1939 struct binder_node *node; 1940 1941 fp = to_flat_binder_object(hdr); 1942 node = binder_get_node(proc, fp->binder); 1943 if (node == NULL) { 1944 pr_err("transaction release %d bad node %016llx\n", 1945 debug_id, (u64)fp->binder); 1946 break; 1947 } 1948 binder_debug(BINDER_DEBUG_TRANSACTION, 1949 " node %d u%016llx\n", 1950 node->debug_id, (u64)node->ptr); 1951 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER, 1952 0); 1953 binder_put_node(node); 1954 } break; 1955 case BINDER_TYPE_HANDLE: 1956 case BINDER_TYPE_WEAK_HANDLE: { 1957 struct flat_binder_object *fp; 1958 struct binder_ref_data rdata; 1959 int ret; 1960 1961 fp = to_flat_binder_object(hdr); 1962 ret = binder_dec_ref_for_handle(proc, fp->handle, 1963 hdr->type == BINDER_TYPE_HANDLE, &rdata); 1964 1965 if (ret) { 1966 pr_err("transaction release %d bad handle %d, ret = %d\n", 1967 debug_id, fp->handle, ret); 1968 break; 1969 } 1970 binder_debug(BINDER_DEBUG_TRANSACTION, 1971 " ref %d desc %d\n", 1972 rdata.debug_id, rdata.desc); 1973 } break; 1974 1975 case BINDER_TYPE_FD: { 1976 /* 1977 * No need to close the file here since user-space 1978 * closes it for successfully delivered 1979 * transactions. For transactions that weren't 1980 * delivered, the new fd was never allocated so 1981 * there is no need to close and the fput on the 1982 * file is done when the transaction is torn 1983 * down. 1984 */ 1985 } break; 1986 case BINDER_TYPE_PTR: 1987 /* 1988 * Nothing to do here, this will get cleaned up when the 1989 * transaction buffer gets freed 1990 */ 1991 break; 1992 case BINDER_TYPE_FDA: { 1993 struct binder_fd_array_object *fda; 1994 struct binder_buffer_object *parent; 1995 struct binder_object ptr_object; 1996 binder_size_t fda_offset; 1997 size_t fd_index; 1998 binder_size_t fd_buf_size; 1999 binder_size_t num_valid; 2000 2001 if (is_failure) { 2002 /* 2003 * The fd fixups have not been applied so no 2004 * fds need to be closed. 2005 */ 2006 continue; 2007 } 2008 2009 num_valid = (buffer_offset - off_start_offset) / 2010 sizeof(binder_size_t); 2011 fda = to_binder_fd_array_object(hdr); 2012 parent = binder_validate_ptr(proc, buffer, &ptr_object, 2013 fda->parent, 2014 off_start_offset, 2015 NULL, 2016 num_valid); 2017 if (!parent) { 2018 pr_err("transaction release %d bad parent offset\n", 2019 debug_id); 2020 continue; 2021 } 2022 fd_buf_size = sizeof(u32) * fda->num_fds; 2023 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { 2024 pr_err("transaction release %d invalid number of fds (%lld)\n", 2025 debug_id, (u64)fda->num_fds); 2026 continue; 2027 } 2028 if (fd_buf_size > parent->length || 2029 fda->parent_offset > parent->length - fd_buf_size) { 2030 /* No space for all file descriptors here. */ 2031 pr_err("transaction release %d not enough space for %lld fds in buffer\n", 2032 debug_id, (u64)fda->num_fds); 2033 continue; 2034 } 2035 /* 2036 * the source data for binder_buffer_object is visible 2037 * to user-space and the @buffer element is the user 2038 * pointer to the buffer_object containing the fd_array. 2039 * Convert the address to an offset relative to 2040 * the base of the transaction buffer. 2041 */ 2042 fda_offset = 2043 (parent->buffer - (uintptr_t)buffer->user_data) + 2044 fda->parent_offset; 2045 for (fd_index = 0; fd_index < fda->num_fds; 2046 fd_index++) { 2047 u32 fd; 2048 int err; 2049 binder_size_t offset = fda_offset + 2050 fd_index * sizeof(fd); 2051 2052 err = binder_alloc_copy_from_buffer( 2053 &proc->alloc, &fd, buffer, 2054 offset, sizeof(fd)); 2055 WARN_ON(err); 2056 if (!err) { 2057 binder_deferred_fd_close(fd); 2058 /* 2059 * Need to make sure the thread goes 2060 * back to userspace to complete the 2061 * deferred close 2062 */ 2063 if (thread) 2064 thread->looper_need_return = true; 2065 } 2066 } 2067 } break; 2068 default: 2069 pr_err("transaction release %d bad object type %x\n", 2070 debug_id, hdr->type); 2071 break; 2072 } 2073 } 2074 } 2075 2076 static int binder_translate_binder(struct flat_binder_object *fp, 2077 struct binder_transaction *t, 2078 struct binder_thread *thread) 2079 { 2080 struct binder_node *node; 2081 struct binder_proc *proc = thread->proc; 2082 struct binder_proc *target_proc = t->to_proc; 2083 struct binder_ref_data rdata; 2084 int ret = 0; 2085 2086 node = binder_get_node(proc, fp->binder); 2087 if (!node) { 2088 node = binder_new_node(proc, fp); 2089 if (!node) 2090 return -ENOMEM; 2091 } 2092 if (fp->cookie != node->cookie) { 2093 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n", 2094 proc->pid, thread->pid, (u64)fp->binder, 2095 node->debug_id, (u64)fp->cookie, 2096 (u64)node->cookie); 2097 ret = -EINVAL; 2098 goto done; 2099 } 2100 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) { 2101 ret = -EPERM; 2102 goto done; 2103 } 2104 2105 ret = binder_inc_ref_for_node(target_proc, node, 2106 fp->hdr.type == BINDER_TYPE_BINDER, 2107 &thread->todo, &rdata); 2108 if (ret) 2109 goto done; 2110 2111 if (fp->hdr.type == BINDER_TYPE_BINDER) 2112 fp->hdr.type = BINDER_TYPE_HANDLE; 2113 else 2114 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE; 2115 fp->binder = 0; 2116 fp->handle = rdata.desc; 2117 fp->cookie = 0; 2118 2119 trace_binder_transaction_node_to_ref(t, node, &rdata); 2120 binder_debug(BINDER_DEBUG_TRANSACTION, 2121 " node %d u%016llx -> ref %d desc %d\n", 2122 node->debug_id, (u64)node->ptr, 2123 rdata.debug_id, rdata.desc); 2124 done: 2125 binder_put_node(node); 2126 return ret; 2127 } 2128 2129 static int binder_translate_handle(struct flat_binder_object *fp, 2130 struct binder_transaction *t, 2131 struct binder_thread *thread) 2132 { 2133 struct binder_proc *proc = thread->proc; 2134 struct binder_proc *target_proc = t->to_proc; 2135 struct binder_node *node; 2136 struct binder_ref_data src_rdata; 2137 int ret = 0; 2138 2139 node = binder_get_node_from_ref(proc, fp->handle, 2140 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata); 2141 if (!node) { 2142 binder_user_error("%d:%d got transaction with invalid handle, %d\n", 2143 proc->pid, thread->pid, fp->handle); 2144 return -EINVAL; 2145 } 2146 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) { 2147 ret = -EPERM; 2148 goto done; 2149 } 2150 2151 binder_node_lock(node); 2152 if (node->proc == target_proc) { 2153 if (fp->hdr.type == BINDER_TYPE_HANDLE) 2154 fp->hdr.type = BINDER_TYPE_BINDER; 2155 else 2156 fp->hdr.type = BINDER_TYPE_WEAK_BINDER; 2157 fp->binder = node->ptr; 2158 fp->cookie = node->cookie; 2159 if (node->proc) 2160 binder_inner_proc_lock(node->proc); 2161 else 2162 __acquire(&node->proc->inner_lock); 2163 binder_inc_node_nilocked(node, 2164 fp->hdr.type == BINDER_TYPE_BINDER, 2165 0, NULL); 2166 if (node->proc) 2167 binder_inner_proc_unlock(node->proc); 2168 else 2169 __release(&node->proc->inner_lock); 2170 trace_binder_transaction_ref_to_node(t, node, &src_rdata); 2171 binder_debug(BINDER_DEBUG_TRANSACTION, 2172 " ref %d desc %d -> node %d u%016llx\n", 2173 src_rdata.debug_id, src_rdata.desc, node->debug_id, 2174 (u64)node->ptr); 2175 binder_node_unlock(node); 2176 } else { 2177 struct binder_ref_data dest_rdata; 2178 2179 binder_node_unlock(node); 2180 ret = binder_inc_ref_for_node(target_proc, node, 2181 fp->hdr.type == BINDER_TYPE_HANDLE, 2182 NULL, &dest_rdata); 2183 if (ret) 2184 goto done; 2185 2186 fp->binder = 0; 2187 fp->handle = dest_rdata.desc; 2188 fp->cookie = 0; 2189 trace_binder_transaction_ref_to_ref(t, node, &src_rdata, 2190 &dest_rdata); 2191 binder_debug(BINDER_DEBUG_TRANSACTION, 2192 " ref %d desc %d -> ref %d desc %d (node %d)\n", 2193 src_rdata.debug_id, src_rdata.desc, 2194 dest_rdata.debug_id, dest_rdata.desc, 2195 node->debug_id); 2196 } 2197 done: 2198 binder_put_node(node); 2199 return ret; 2200 } 2201 2202 static int binder_translate_fd(u32 fd, binder_size_t fd_offset, 2203 struct binder_transaction *t, 2204 struct binder_thread *thread, 2205 struct binder_transaction *in_reply_to) 2206 { 2207 struct binder_proc *proc = thread->proc; 2208 struct binder_proc *target_proc = t->to_proc; 2209 struct binder_txn_fd_fixup *fixup; 2210 struct file *file; 2211 int ret = 0; 2212 bool target_allows_fd; 2213 2214 if (in_reply_to) 2215 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS); 2216 else 2217 target_allows_fd = t->buffer->target_node->accept_fds; 2218 if (!target_allows_fd) { 2219 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n", 2220 proc->pid, thread->pid, 2221 in_reply_to ? "reply" : "transaction", 2222 fd); 2223 ret = -EPERM; 2224 goto err_fd_not_accepted; 2225 } 2226 2227 file = fget(fd); 2228 if (!file) { 2229 binder_user_error("%d:%d got transaction with invalid fd, %d\n", 2230 proc->pid, thread->pid, fd); 2231 ret = -EBADF; 2232 goto err_fget; 2233 } 2234 ret = security_binder_transfer_file(proc->cred, target_proc->cred, file); 2235 if (ret < 0) { 2236 ret = -EPERM; 2237 goto err_security; 2238 } 2239 2240 /* 2241 * Add fixup record for this transaction. The allocation 2242 * of the fd in the target needs to be done from a 2243 * target thread. 2244 */ 2245 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL); 2246 if (!fixup) { 2247 ret = -ENOMEM; 2248 goto err_alloc; 2249 } 2250 fixup->file = file; 2251 fixup->offset = fd_offset; 2252 fixup->target_fd = -1; 2253 trace_binder_transaction_fd_send(t, fd, fixup->offset); 2254 list_add_tail(&fixup->fixup_entry, &t->fd_fixups); 2255 2256 return ret; 2257 2258 err_alloc: 2259 err_security: 2260 fput(file); 2261 err_fget: 2262 err_fd_not_accepted: 2263 return ret; 2264 } 2265 2266 /** 2267 * struct binder_ptr_fixup - data to be fixed-up in target buffer 2268 * @offset offset in target buffer to fixup 2269 * @skip_size bytes to skip in copy (fixup will be written later) 2270 * @fixup_data data to write at fixup offset 2271 * @node list node 2272 * 2273 * This is used for the pointer fixup list (pf) which is created and consumed 2274 * during binder_transaction() and is only accessed locally. No 2275 * locking is necessary. 2276 * 2277 * The list is ordered by @offset. 2278 */ 2279 struct binder_ptr_fixup { 2280 binder_size_t offset; 2281 size_t skip_size; 2282 binder_uintptr_t fixup_data; 2283 struct list_head node; 2284 }; 2285 2286 /** 2287 * struct binder_sg_copy - scatter-gather data to be copied 2288 * @offset offset in target buffer 2289 * @sender_uaddr user address in source buffer 2290 * @length bytes to copy 2291 * @node list node 2292 * 2293 * This is used for the sg copy list (sgc) which is created and consumed 2294 * during binder_transaction() and is only accessed locally. No 2295 * locking is necessary. 2296 * 2297 * The list is ordered by @offset. 2298 */ 2299 struct binder_sg_copy { 2300 binder_size_t offset; 2301 const void __user *sender_uaddr; 2302 size_t length; 2303 struct list_head node; 2304 }; 2305 2306 /** 2307 * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data 2308 * @alloc: binder_alloc associated with @buffer 2309 * @buffer: binder buffer in target process 2310 * @sgc_head: list_head of scatter-gather copy list 2311 * @pf_head: list_head of pointer fixup list 2312 * 2313 * Processes all elements of @sgc_head, applying fixups from @pf_head 2314 * and copying the scatter-gather data from the source process' user 2315 * buffer to the target's buffer. It is expected that the list creation 2316 * and processing all occurs during binder_transaction() so these lists 2317 * are only accessed in local context. 2318 * 2319 * Return: 0=success, else -errno 2320 */ 2321 static int binder_do_deferred_txn_copies(struct binder_alloc *alloc, 2322 struct binder_buffer *buffer, 2323 struct list_head *sgc_head, 2324 struct list_head *pf_head) 2325 { 2326 int ret = 0; 2327 struct binder_sg_copy *sgc, *tmpsgc; 2328 struct binder_ptr_fixup *tmppf; 2329 struct binder_ptr_fixup *pf = 2330 list_first_entry_or_null(pf_head, struct binder_ptr_fixup, 2331 node); 2332 2333 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) { 2334 size_t bytes_copied = 0; 2335 2336 while (bytes_copied < sgc->length) { 2337 size_t copy_size; 2338 size_t bytes_left = sgc->length - bytes_copied; 2339 size_t offset = sgc->offset + bytes_copied; 2340 2341 /* 2342 * We copy up to the fixup (pointed to by pf) 2343 */ 2344 copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset) 2345 : bytes_left; 2346 if (!ret && copy_size) 2347 ret = binder_alloc_copy_user_to_buffer( 2348 alloc, buffer, 2349 offset, 2350 sgc->sender_uaddr + bytes_copied, 2351 copy_size); 2352 bytes_copied += copy_size; 2353 if (copy_size != bytes_left) { 2354 BUG_ON(!pf); 2355 /* we stopped at a fixup offset */ 2356 if (pf->skip_size) { 2357 /* 2358 * we are just skipping. This is for 2359 * BINDER_TYPE_FDA where the translated 2360 * fds will be fixed up when we get 2361 * to target context. 2362 */ 2363 bytes_copied += pf->skip_size; 2364 } else { 2365 /* apply the fixup indicated by pf */ 2366 if (!ret) 2367 ret = binder_alloc_copy_to_buffer( 2368 alloc, buffer, 2369 pf->offset, 2370 &pf->fixup_data, 2371 sizeof(pf->fixup_data)); 2372 bytes_copied += sizeof(pf->fixup_data); 2373 } 2374 list_del(&pf->node); 2375 kfree(pf); 2376 pf = list_first_entry_or_null(pf_head, 2377 struct binder_ptr_fixup, node); 2378 } 2379 } 2380 list_del(&sgc->node); 2381 kfree(sgc); 2382 } 2383 list_for_each_entry_safe(pf, tmppf, pf_head, node) { 2384 BUG_ON(pf->skip_size == 0); 2385 list_del(&pf->node); 2386 kfree(pf); 2387 } 2388 BUG_ON(!list_empty(sgc_head)); 2389 2390 return ret > 0 ? -EINVAL : ret; 2391 } 2392 2393 /** 2394 * binder_cleanup_deferred_txn_lists() - free specified lists 2395 * @sgc_head: list_head of scatter-gather copy list 2396 * @pf_head: list_head of pointer fixup list 2397 * 2398 * Called to clean up @sgc_head and @pf_head if there is an 2399 * error. 2400 */ 2401 static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head, 2402 struct list_head *pf_head) 2403 { 2404 struct binder_sg_copy *sgc, *tmpsgc; 2405 struct binder_ptr_fixup *pf, *tmppf; 2406 2407 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) { 2408 list_del(&sgc->node); 2409 kfree(sgc); 2410 } 2411 list_for_each_entry_safe(pf, tmppf, pf_head, node) { 2412 list_del(&pf->node); 2413 kfree(pf); 2414 } 2415 } 2416 2417 /** 2418 * binder_defer_copy() - queue a scatter-gather buffer for copy 2419 * @sgc_head: list_head of scatter-gather copy list 2420 * @offset: binder buffer offset in target process 2421 * @sender_uaddr: user address in source process 2422 * @length: bytes to copy 2423 * 2424 * Specify a scatter-gather block to be copied. The actual copy must 2425 * be deferred until all the needed fixups are identified and queued. 2426 * Then the copy and fixups are done together so un-translated values 2427 * from the source are never visible in the target buffer. 2428 * 2429 * We are guaranteed that repeated calls to this function will have 2430 * monotonically increasing @offset values so the list will naturally 2431 * be ordered. 2432 * 2433 * Return: 0=success, else -errno 2434 */ 2435 static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset, 2436 const void __user *sender_uaddr, size_t length) 2437 { 2438 struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL); 2439 2440 if (!bc) 2441 return -ENOMEM; 2442 2443 bc->offset = offset; 2444 bc->sender_uaddr = sender_uaddr; 2445 bc->length = length; 2446 INIT_LIST_HEAD(&bc->node); 2447 2448 /* 2449 * We are guaranteed that the deferred copies are in-order 2450 * so just add to the tail. 2451 */ 2452 list_add_tail(&bc->node, sgc_head); 2453 2454 return 0; 2455 } 2456 2457 /** 2458 * binder_add_fixup() - queue a fixup to be applied to sg copy 2459 * @pf_head: list_head of binder ptr fixup list 2460 * @offset: binder buffer offset in target process 2461 * @fixup: bytes to be copied for fixup 2462 * @skip_size: bytes to skip when copying (fixup will be applied later) 2463 * 2464 * Add the specified fixup to a list ordered by @offset. When copying 2465 * the scatter-gather buffers, the fixup will be copied instead of 2466 * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup 2467 * will be applied later (in target process context), so we just skip 2468 * the bytes specified by @skip_size. If @skip_size is 0, we copy the 2469 * value in @fixup. 2470 * 2471 * This function is called *mostly* in @offset order, but there are 2472 * exceptions. Since out-of-order inserts are relatively uncommon, 2473 * we insert the new element by searching backward from the tail of 2474 * the list. 2475 * 2476 * Return: 0=success, else -errno 2477 */ 2478 static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset, 2479 binder_uintptr_t fixup, size_t skip_size) 2480 { 2481 struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL); 2482 struct binder_ptr_fixup *tmppf; 2483 2484 if (!pf) 2485 return -ENOMEM; 2486 2487 pf->offset = offset; 2488 pf->fixup_data = fixup; 2489 pf->skip_size = skip_size; 2490 INIT_LIST_HEAD(&pf->node); 2491 2492 /* Fixups are *mostly* added in-order, but there are some 2493 * exceptions. Look backwards through list for insertion point. 2494 */ 2495 list_for_each_entry_reverse(tmppf, pf_head, node) { 2496 if (tmppf->offset < pf->offset) { 2497 list_add(&pf->node, &tmppf->node); 2498 return 0; 2499 } 2500 } 2501 /* 2502 * if we get here, then the new offset is the lowest so 2503 * insert at the head 2504 */ 2505 list_add(&pf->node, pf_head); 2506 return 0; 2507 } 2508 2509 static int binder_translate_fd_array(struct list_head *pf_head, 2510 struct binder_fd_array_object *fda, 2511 const void __user *sender_ubuffer, 2512 struct binder_buffer_object *parent, 2513 struct binder_buffer_object *sender_uparent, 2514 struct binder_transaction *t, 2515 struct binder_thread *thread, 2516 struct binder_transaction *in_reply_to) 2517 { 2518 binder_size_t fdi, fd_buf_size; 2519 binder_size_t fda_offset; 2520 const void __user *sender_ufda_base; 2521 struct binder_proc *proc = thread->proc; 2522 int ret; 2523 2524 if (fda->num_fds == 0) 2525 return 0; 2526 2527 fd_buf_size = sizeof(u32) * fda->num_fds; 2528 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { 2529 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n", 2530 proc->pid, thread->pid, (u64)fda->num_fds); 2531 return -EINVAL; 2532 } 2533 if (fd_buf_size > parent->length || 2534 fda->parent_offset > parent->length - fd_buf_size) { 2535 /* No space for all file descriptors here. */ 2536 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n", 2537 proc->pid, thread->pid, (u64)fda->num_fds); 2538 return -EINVAL; 2539 } 2540 /* 2541 * the source data for binder_buffer_object is visible 2542 * to user-space and the @buffer element is the user 2543 * pointer to the buffer_object containing the fd_array. 2544 * Convert the address to an offset relative to 2545 * the base of the transaction buffer. 2546 */ 2547 fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) + 2548 fda->parent_offset; 2549 sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer + 2550 fda->parent_offset; 2551 2552 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) || 2553 !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) { 2554 binder_user_error("%d:%d parent offset not aligned correctly.\n", 2555 proc->pid, thread->pid); 2556 return -EINVAL; 2557 } 2558 ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32)); 2559 if (ret) 2560 return ret; 2561 2562 for (fdi = 0; fdi < fda->num_fds; fdi++) { 2563 u32 fd; 2564 binder_size_t offset = fda_offset + fdi * sizeof(fd); 2565 binder_size_t sender_uoffset = fdi * sizeof(fd); 2566 2567 ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd)); 2568 if (!ret) 2569 ret = binder_translate_fd(fd, offset, t, thread, 2570 in_reply_to); 2571 if (ret) 2572 return ret > 0 ? -EINVAL : ret; 2573 } 2574 return 0; 2575 } 2576 2577 static int binder_fixup_parent(struct list_head *pf_head, 2578 struct binder_transaction *t, 2579 struct binder_thread *thread, 2580 struct binder_buffer_object *bp, 2581 binder_size_t off_start_offset, 2582 binder_size_t num_valid, 2583 binder_size_t last_fixup_obj_off, 2584 binder_size_t last_fixup_min_off) 2585 { 2586 struct binder_buffer_object *parent; 2587 struct binder_buffer *b = t->buffer; 2588 struct binder_proc *proc = thread->proc; 2589 struct binder_proc *target_proc = t->to_proc; 2590 struct binder_object object; 2591 binder_size_t buffer_offset; 2592 binder_size_t parent_offset; 2593 2594 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT)) 2595 return 0; 2596 2597 parent = binder_validate_ptr(target_proc, b, &object, bp->parent, 2598 off_start_offset, &parent_offset, 2599 num_valid); 2600 if (!parent) { 2601 binder_user_error("%d:%d got transaction with invalid parent offset or type\n", 2602 proc->pid, thread->pid); 2603 return -EINVAL; 2604 } 2605 2606 if (!binder_validate_fixup(target_proc, b, off_start_offset, 2607 parent_offset, bp->parent_offset, 2608 last_fixup_obj_off, 2609 last_fixup_min_off)) { 2610 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", 2611 proc->pid, thread->pid); 2612 return -EINVAL; 2613 } 2614 2615 if (parent->length < sizeof(binder_uintptr_t) || 2616 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) { 2617 /* No space for a pointer here! */ 2618 binder_user_error("%d:%d got transaction with invalid parent offset\n", 2619 proc->pid, thread->pid); 2620 return -EINVAL; 2621 } 2622 buffer_offset = bp->parent_offset + 2623 (uintptr_t)parent->buffer - (uintptr_t)b->user_data; 2624 return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0); 2625 } 2626 2627 /** 2628 * binder_proc_transaction() - sends a transaction to a process and wakes it up 2629 * @t: transaction to send 2630 * @proc: process to send the transaction to 2631 * @thread: thread in @proc to send the transaction to (may be NULL) 2632 * 2633 * This function queues a transaction to the specified process. It will try 2634 * to find a thread in the target process to handle the transaction and 2635 * wake it up. If no thread is found, the work is queued to the proc 2636 * waitqueue. 2637 * 2638 * If the @thread parameter is not NULL, the transaction is always queued 2639 * to the waitlist of that specific thread. 2640 * 2641 * Return: 0 if the transaction was successfully queued 2642 * BR_DEAD_REPLY if the target process or thread is dead 2643 * BR_FROZEN_REPLY if the target process or thread is frozen 2644 */ 2645 static int binder_proc_transaction(struct binder_transaction *t, 2646 struct binder_proc *proc, 2647 struct binder_thread *thread) 2648 { 2649 struct binder_node *node = t->buffer->target_node; 2650 bool oneway = !!(t->flags & TF_ONE_WAY); 2651 bool pending_async = false; 2652 2653 BUG_ON(!node); 2654 binder_node_lock(node); 2655 if (oneway) { 2656 BUG_ON(thread); 2657 if (node->has_async_transaction) 2658 pending_async = true; 2659 else 2660 node->has_async_transaction = true; 2661 } 2662 2663 binder_inner_proc_lock(proc); 2664 if (proc->is_frozen) { 2665 proc->sync_recv |= !oneway; 2666 proc->async_recv |= oneway; 2667 } 2668 2669 if ((proc->is_frozen && !oneway) || proc->is_dead || 2670 (thread && thread->is_dead)) { 2671 binder_inner_proc_unlock(proc); 2672 binder_node_unlock(node); 2673 return proc->is_frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY; 2674 } 2675 2676 if (!thread && !pending_async) 2677 thread = binder_select_thread_ilocked(proc); 2678 2679 if (thread) 2680 binder_enqueue_thread_work_ilocked(thread, &t->work); 2681 else if (!pending_async) 2682 binder_enqueue_work_ilocked(&t->work, &proc->todo); 2683 else 2684 binder_enqueue_work_ilocked(&t->work, &node->async_todo); 2685 2686 if (!pending_async) 2687 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */); 2688 2689 proc->outstanding_txns++; 2690 binder_inner_proc_unlock(proc); 2691 binder_node_unlock(node); 2692 2693 return 0; 2694 } 2695 2696 /** 2697 * binder_get_node_refs_for_txn() - Get required refs on node for txn 2698 * @node: struct binder_node for which to get refs 2699 * @proc: returns @node->proc if valid 2700 * @error: if no @proc then returns BR_DEAD_REPLY 2701 * 2702 * User-space normally keeps the node alive when creating a transaction 2703 * since it has a reference to the target. The local strong ref keeps it 2704 * alive if the sending process dies before the target process processes 2705 * the transaction. If the source process is malicious or has a reference 2706 * counting bug, relying on the local strong ref can fail. 2707 * 2708 * Since user-space can cause the local strong ref to go away, we also take 2709 * a tmpref on the node to ensure it survives while we are constructing 2710 * the transaction. We also need a tmpref on the proc while we are 2711 * constructing the transaction, so we take that here as well. 2712 * 2713 * Return: The target_node with refs taken or NULL if no @node->proc is NULL. 2714 * Also sets @proc if valid. If the @node->proc is NULL indicating that the 2715 * target proc has died, @error is set to BR_DEAD_REPLY 2716 */ 2717 static struct binder_node *binder_get_node_refs_for_txn( 2718 struct binder_node *node, 2719 struct binder_proc **procp, 2720 uint32_t *error) 2721 { 2722 struct binder_node *target_node = NULL; 2723 2724 binder_node_inner_lock(node); 2725 if (node->proc) { 2726 target_node = node; 2727 binder_inc_node_nilocked(node, 1, 0, NULL); 2728 binder_inc_node_tmpref_ilocked(node); 2729 node->proc->tmp_ref++; 2730 *procp = node->proc; 2731 } else 2732 *error = BR_DEAD_REPLY; 2733 binder_node_inner_unlock(node); 2734 2735 return target_node; 2736 } 2737 2738 static void binder_set_txn_from_error(struct binder_transaction *t, int id, 2739 uint32_t command, int32_t param) 2740 { 2741 struct binder_thread *from = binder_get_txn_from_and_acq_inner(t); 2742 2743 if (!from) { 2744 /* annotation for sparse */ 2745 __release(&from->proc->inner_lock); 2746 return; 2747 } 2748 2749 /* don't override existing errors */ 2750 if (from->ee.command == BR_OK) 2751 binder_set_extended_error(&from->ee, id, command, param); 2752 binder_inner_proc_unlock(from->proc); 2753 binder_thread_dec_tmpref(from); 2754 } 2755 2756 static void binder_transaction(struct binder_proc *proc, 2757 struct binder_thread *thread, 2758 struct binder_transaction_data *tr, int reply, 2759 binder_size_t extra_buffers_size) 2760 { 2761 int ret; 2762 struct binder_transaction *t; 2763 struct binder_work *w; 2764 struct binder_work *tcomplete; 2765 binder_size_t buffer_offset = 0; 2766 binder_size_t off_start_offset, off_end_offset; 2767 binder_size_t off_min; 2768 binder_size_t sg_buf_offset, sg_buf_end_offset; 2769 binder_size_t user_offset = 0; 2770 struct binder_proc *target_proc = NULL; 2771 struct binder_thread *target_thread = NULL; 2772 struct binder_node *target_node = NULL; 2773 struct binder_transaction *in_reply_to = NULL; 2774 struct binder_transaction_log_entry *e; 2775 uint32_t return_error = 0; 2776 uint32_t return_error_param = 0; 2777 uint32_t return_error_line = 0; 2778 binder_size_t last_fixup_obj_off = 0; 2779 binder_size_t last_fixup_min_off = 0; 2780 struct binder_context *context = proc->context; 2781 int t_debug_id = atomic_inc_return(&binder_last_id); 2782 char *secctx = NULL; 2783 u32 secctx_sz = 0; 2784 struct list_head sgc_head; 2785 struct list_head pf_head; 2786 const void __user *user_buffer = (const void __user *) 2787 (uintptr_t)tr->data.ptr.buffer; 2788 INIT_LIST_HEAD(&sgc_head); 2789 INIT_LIST_HEAD(&pf_head); 2790 2791 e = binder_transaction_log_add(&binder_transaction_log); 2792 e->debug_id = t_debug_id; 2793 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); 2794 e->from_proc = proc->pid; 2795 e->from_thread = thread->pid; 2796 e->target_handle = tr->target.handle; 2797 e->data_size = tr->data_size; 2798 e->offsets_size = tr->offsets_size; 2799 strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME); 2800 2801 binder_inner_proc_lock(proc); 2802 binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0); 2803 binder_inner_proc_unlock(proc); 2804 2805 if (reply) { 2806 binder_inner_proc_lock(proc); 2807 in_reply_to = thread->transaction_stack; 2808 if (in_reply_to == NULL) { 2809 binder_inner_proc_unlock(proc); 2810 binder_user_error("%d:%d got reply transaction with no transaction stack\n", 2811 proc->pid, thread->pid); 2812 return_error = BR_FAILED_REPLY; 2813 return_error_param = -EPROTO; 2814 return_error_line = __LINE__; 2815 goto err_empty_call_stack; 2816 } 2817 if (in_reply_to->to_thread != thread) { 2818 spin_lock(&in_reply_to->lock); 2819 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n", 2820 proc->pid, thread->pid, in_reply_to->debug_id, 2821 in_reply_to->to_proc ? 2822 in_reply_to->to_proc->pid : 0, 2823 in_reply_to->to_thread ? 2824 in_reply_to->to_thread->pid : 0); 2825 spin_unlock(&in_reply_to->lock); 2826 binder_inner_proc_unlock(proc); 2827 return_error = BR_FAILED_REPLY; 2828 return_error_param = -EPROTO; 2829 return_error_line = __LINE__; 2830 in_reply_to = NULL; 2831 goto err_bad_call_stack; 2832 } 2833 thread->transaction_stack = in_reply_to->to_parent; 2834 binder_inner_proc_unlock(proc); 2835 binder_set_nice(in_reply_to->saved_priority); 2836 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to); 2837 if (target_thread == NULL) { 2838 /* annotation for sparse */ 2839 __release(&target_thread->proc->inner_lock); 2840 binder_txn_error("%d:%d reply target not found\n", 2841 thread->pid, proc->pid); 2842 return_error = BR_DEAD_REPLY; 2843 return_error_line = __LINE__; 2844 goto err_dead_binder; 2845 } 2846 if (target_thread->transaction_stack != in_reply_to) { 2847 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n", 2848 proc->pid, thread->pid, 2849 target_thread->transaction_stack ? 2850 target_thread->transaction_stack->debug_id : 0, 2851 in_reply_to->debug_id); 2852 binder_inner_proc_unlock(target_thread->proc); 2853 return_error = BR_FAILED_REPLY; 2854 return_error_param = -EPROTO; 2855 return_error_line = __LINE__; 2856 in_reply_to = NULL; 2857 target_thread = NULL; 2858 goto err_dead_binder; 2859 } 2860 target_proc = target_thread->proc; 2861 target_proc->tmp_ref++; 2862 binder_inner_proc_unlock(target_thread->proc); 2863 } else { 2864 if (tr->target.handle) { 2865 struct binder_ref *ref; 2866 2867 /* 2868 * There must already be a strong ref 2869 * on this node. If so, do a strong 2870 * increment on the node to ensure it 2871 * stays alive until the transaction is 2872 * done. 2873 */ 2874 binder_proc_lock(proc); 2875 ref = binder_get_ref_olocked(proc, tr->target.handle, 2876 true); 2877 if (ref) { 2878 target_node = binder_get_node_refs_for_txn( 2879 ref->node, &target_proc, 2880 &return_error); 2881 } else { 2882 binder_user_error("%d:%d got transaction to invalid handle, %u\n", 2883 proc->pid, thread->pid, tr->target.handle); 2884 return_error = BR_FAILED_REPLY; 2885 } 2886 binder_proc_unlock(proc); 2887 } else { 2888 mutex_lock(&context->context_mgr_node_lock); 2889 target_node = context->binder_context_mgr_node; 2890 if (target_node) 2891 target_node = binder_get_node_refs_for_txn( 2892 target_node, &target_proc, 2893 &return_error); 2894 else 2895 return_error = BR_DEAD_REPLY; 2896 mutex_unlock(&context->context_mgr_node_lock); 2897 if (target_node && target_proc->pid == proc->pid) { 2898 binder_user_error("%d:%d got transaction to context manager from process owning it\n", 2899 proc->pid, thread->pid); 2900 return_error = BR_FAILED_REPLY; 2901 return_error_param = -EINVAL; 2902 return_error_line = __LINE__; 2903 goto err_invalid_target_handle; 2904 } 2905 } 2906 if (!target_node) { 2907 binder_txn_error("%d:%d cannot find target node\n", 2908 thread->pid, proc->pid); 2909 /* 2910 * return_error is set above 2911 */ 2912 return_error_param = -EINVAL; 2913 return_error_line = __LINE__; 2914 goto err_dead_binder; 2915 } 2916 e->to_node = target_node->debug_id; 2917 if (WARN_ON(proc == target_proc)) { 2918 binder_txn_error("%d:%d self transactions not allowed\n", 2919 thread->pid, proc->pid); 2920 return_error = BR_FAILED_REPLY; 2921 return_error_param = -EINVAL; 2922 return_error_line = __LINE__; 2923 goto err_invalid_target_handle; 2924 } 2925 if (security_binder_transaction(proc->cred, 2926 target_proc->cred) < 0) { 2927 binder_txn_error("%d:%d transaction credentials failed\n", 2928 thread->pid, proc->pid); 2929 return_error = BR_FAILED_REPLY; 2930 return_error_param = -EPERM; 2931 return_error_line = __LINE__; 2932 goto err_invalid_target_handle; 2933 } 2934 binder_inner_proc_lock(proc); 2935 2936 w = list_first_entry_or_null(&thread->todo, 2937 struct binder_work, entry); 2938 if (!(tr->flags & TF_ONE_WAY) && w && 2939 w->type == BINDER_WORK_TRANSACTION) { 2940 /* 2941 * Do not allow new outgoing transaction from a 2942 * thread that has a transaction at the head of 2943 * its todo list. Only need to check the head 2944 * because binder_select_thread_ilocked picks a 2945 * thread from proc->waiting_threads to enqueue 2946 * the transaction, and nothing is queued to the 2947 * todo list while the thread is on waiting_threads. 2948 */ 2949 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n", 2950 proc->pid, thread->pid); 2951 binder_inner_proc_unlock(proc); 2952 return_error = BR_FAILED_REPLY; 2953 return_error_param = -EPROTO; 2954 return_error_line = __LINE__; 2955 goto err_bad_todo_list; 2956 } 2957 2958 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { 2959 struct binder_transaction *tmp; 2960 2961 tmp = thread->transaction_stack; 2962 if (tmp->to_thread != thread) { 2963 spin_lock(&tmp->lock); 2964 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n", 2965 proc->pid, thread->pid, tmp->debug_id, 2966 tmp->to_proc ? tmp->to_proc->pid : 0, 2967 tmp->to_thread ? 2968 tmp->to_thread->pid : 0); 2969 spin_unlock(&tmp->lock); 2970 binder_inner_proc_unlock(proc); 2971 return_error = BR_FAILED_REPLY; 2972 return_error_param = -EPROTO; 2973 return_error_line = __LINE__; 2974 goto err_bad_call_stack; 2975 } 2976 while (tmp) { 2977 struct binder_thread *from; 2978 2979 spin_lock(&tmp->lock); 2980 from = tmp->from; 2981 if (from && from->proc == target_proc) { 2982 atomic_inc(&from->tmp_ref); 2983 target_thread = from; 2984 spin_unlock(&tmp->lock); 2985 break; 2986 } 2987 spin_unlock(&tmp->lock); 2988 tmp = tmp->from_parent; 2989 } 2990 } 2991 binder_inner_proc_unlock(proc); 2992 } 2993 if (target_thread) 2994 e->to_thread = target_thread->pid; 2995 e->to_proc = target_proc->pid; 2996 2997 /* TODO: reuse incoming transaction for reply */ 2998 t = kzalloc(sizeof(*t), GFP_KERNEL); 2999 if (t == NULL) { 3000 binder_txn_error("%d:%d cannot allocate transaction\n", 3001 thread->pid, proc->pid); 3002 return_error = BR_FAILED_REPLY; 3003 return_error_param = -ENOMEM; 3004 return_error_line = __LINE__; 3005 goto err_alloc_t_failed; 3006 } 3007 INIT_LIST_HEAD(&t->fd_fixups); 3008 binder_stats_created(BINDER_STAT_TRANSACTION); 3009 spin_lock_init(&t->lock); 3010 3011 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); 3012 if (tcomplete == NULL) { 3013 binder_txn_error("%d:%d cannot allocate work for transaction\n", 3014 thread->pid, proc->pid); 3015 return_error = BR_FAILED_REPLY; 3016 return_error_param = -ENOMEM; 3017 return_error_line = __LINE__; 3018 goto err_alloc_tcomplete_failed; 3019 } 3020 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE); 3021 3022 t->debug_id = t_debug_id; 3023 3024 if (reply) 3025 binder_debug(BINDER_DEBUG_TRANSACTION, 3026 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n", 3027 proc->pid, thread->pid, t->debug_id, 3028 target_proc->pid, target_thread->pid, 3029 (u64)tr->data.ptr.buffer, 3030 (u64)tr->data.ptr.offsets, 3031 (u64)tr->data_size, (u64)tr->offsets_size, 3032 (u64)extra_buffers_size); 3033 else 3034 binder_debug(BINDER_DEBUG_TRANSACTION, 3035 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n", 3036 proc->pid, thread->pid, t->debug_id, 3037 target_proc->pid, target_node->debug_id, 3038 (u64)tr->data.ptr.buffer, 3039 (u64)tr->data.ptr.offsets, 3040 (u64)tr->data_size, (u64)tr->offsets_size, 3041 (u64)extra_buffers_size); 3042 3043 if (!reply && !(tr->flags & TF_ONE_WAY)) 3044 t->from = thread; 3045 else 3046 t->from = NULL; 3047 t->sender_euid = task_euid(proc->tsk); 3048 t->to_proc = target_proc; 3049 t->to_thread = target_thread; 3050 t->code = tr->code; 3051 t->flags = tr->flags; 3052 t->priority = task_nice(current); 3053 3054 if (target_node && target_node->txn_security_ctx) { 3055 u32 secid; 3056 size_t added_size; 3057 3058 security_cred_getsecid(proc->cred, &secid); 3059 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz); 3060 if (ret) { 3061 binder_txn_error("%d:%d failed to get security context\n", 3062 thread->pid, proc->pid); 3063 return_error = BR_FAILED_REPLY; 3064 return_error_param = ret; 3065 return_error_line = __LINE__; 3066 goto err_get_secctx_failed; 3067 } 3068 added_size = ALIGN(secctx_sz, sizeof(u64)); 3069 extra_buffers_size += added_size; 3070 if (extra_buffers_size < added_size) { 3071 binder_txn_error("%d:%d integer overflow of extra_buffers_size\n", 3072 thread->pid, proc->pid); 3073 return_error = BR_FAILED_REPLY; 3074 return_error_param = -EINVAL; 3075 return_error_line = __LINE__; 3076 goto err_bad_extra_size; 3077 } 3078 } 3079 3080 trace_binder_transaction(reply, t, target_node); 3081 3082 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size, 3083 tr->offsets_size, extra_buffers_size, 3084 !reply && (t->flags & TF_ONE_WAY), current->tgid); 3085 if (IS_ERR(t->buffer)) { 3086 char *s; 3087 3088 ret = PTR_ERR(t->buffer); 3089 s = (ret == -ESRCH) ? ": vma cleared, target dead or dying" 3090 : (ret == -ENOSPC) ? ": no space left" 3091 : (ret == -ENOMEM) ? ": memory allocation failed" 3092 : ""; 3093 binder_txn_error("cannot allocate buffer%s", s); 3094 3095 return_error_param = PTR_ERR(t->buffer); 3096 return_error = return_error_param == -ESRCH ? 3097 BR_DEAD_REPLY : BR_FAILED_REPLY; 3098 return_error_line = __LINE__; 3099 t->buffer = NULL; 3100 goto err_binder_alloc_buf_failed; 3101 } 3102 if (secctx) { 3103 int err; 3104 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) + 3105 ALIGN(tr->offsets_size, sizeof(void *)) + 3106 ALIGN(extra_buffers_size, sizeof(void *)) - 3107 ALIGN(secctx_sz, sizeof(u64)); 3108 3109 t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset; 3110 err = binder_alloc_copy_to_buffer(&target_proc->alloc, 3111 t->buffer, buf_offset, 3112 secctx, secctx_sz); 3113 if (err) { 3114 t->security_ctx = 0; 3115 WARN_ON(1); 3116 } 3117 security_release_secctx(secctx, secctx_sz); 3118 secctx = NULL; 3119 } 3120 t->buffer->debug_id = t->debug_id; 3121 t->buffer->transaction = t; 3122 t->buffer->target_node = target_node; 3123 t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF); 3124 trace_binder_transaction_alloc_buf(t->buffer); 3125 3126 if (binder_alloc_copy_user_to_buffer( 3127 &target_proc->alloc, 3128 t->buffer, 3129 ALIGN(tr->data_size, sizeof(void *)), 3130 (const void __user *) 3131 (uintptr_t)tr->data.ptr.offsets, 3132 tr->offsets_size)) { 3133 binder_user_error("%d:%d got transaction with invalid offsets ptr\n", 3134 proc->pid, thread->pid); 3135 return_error = BR_FAILED_REPLY; 3136 return_error_param = -EFAULT; 3137 return_error_line = __LINE__; 3138 goto err_copy_data_failed; 3139 } 3140 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) { 3141 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n", 3142 proc->pid, thread->pid, (u64)tr->offsets_size); 3143 return_error = BR_FAILED_REPLY; 3144 return_error_param = -EINVAL; 3145 return_error_line = __LINE__; 3146 goto err_bad_offset; 3147 } 3148 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) { 3149 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n", 3150 proc->pid, thread->pid, 3151 (u64)extra_buffers_size); 3152 return_error = BR_FAILED_REPLY; 3153 return_error_param = -EINVAL; 3154 return_error_line = __LINE__; 3155 goto err_bad_offset; 3156 } 3157 off_start_offset = ALIGN(tr->data_size, sizeof(void *)); 3158 buffer_offset = off_start_offset; 3159 off_end_offset = off_start_offset + tr->offsets_size; 3160 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *)); 3161 sg_buf_end_offset = sg_buf_offset + extra_buffers_size - 3162 ALIGN(secctx_sz, sizeof(u64)); 3163 off_min = 0; 3164 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset; 3165 buffer_offset += sizeof(binder_size_t)) { 3166 struct binder_object_header *hdr; 3167 size_t object_size; 3168 struct binder_object object; 3169 binder_size_t object_offset; 3170 binder_size_t copy_size; 3171 3172 if (binder_alloc_copy_from_buffer(&target_proc->alloc, 3173 &object_offset, 3174 t->buffer, 3175 buffer_offset, 3176 sizeof(object_offset))) { 3177 binder_txn_error("%d:%d copy offset from buffer failed\n", 3178 thread->pid, proc->pid); 3179 return_error = BR_FAILED_REPLY; 3180 return_error_param = -EINVAL; 3181 return_error_line = __LINE__; 3182 goto err_bad_offset; 3183 } 3184 3185 /* 3186 * Copy the source user buffer up to the next object 3187 * that will be processed. 3188 */ 3189 copy_size = object_offset - user_offset; 3190 if (copy_size && (user_offset > object_offset || 3191 binder_alloc_copy_user_to_buffer( 3192 &target_proc->alloc, 3193 t->buffer, user_offset, 3194 user_buffer + user_offset, 3195 copy_size))) { 3196 binder_user_error("%d:%d got transaction with invalid data ptr\n", 3197 proc->pid, thread->pid); 3198 return_error = BR_FAILED_REPLY; 3199 return_error_param = -EFAULT; 3200 return_error_line = __LINE__; 3201 goto err_copy_data_failed; 3202 } 3203 object_size = binder_get_object(target_proc, user_buffer, 3204 t->buffer, object_offset, &object); 3205 if (object_size == 0 || object_offset < off_min) { 3206 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n", 3207 proc->pid, thread->pid, 3208 (u64)object_offset, 3209 (u64)off_min, 3210 (u64)t->buffer->data_size); 3211 return_error = BR_FAILED_REPLY; 3212 return_error_param = -EINVAL; 3213 return_error_line = __LINE__; 3214 goto err_bad_offset; 3215 } 3216 /* 3217 * Set offset to the next buffer fragment to be 3218 * copied 3219 */ 3220 user_offset = object_offset + object_size; 3221 3222 hdr = &object.hdr; 3223 off_min = object_offset + object_size; 3224 switch (hdr->type) { 3225 case BINDER_TYPE_BINDER: 3226 case BINDER_TYPE_WEAK_BINDER: { 3227 struct flat_binder_object *fp; 3228 3229 fp = to_flat_binder_object(hdr); 3230 ret = binder_translate_binder(fp, t, thread); 3231 3232 if (ret < 0 || 3233 binder_alloc_copy_to_buffer(&target_proc->alloc, 3234 t->buffer, 3235 object_offset, 3236 fp, sizeof(*fp))) { 3237 binder_txn_error("%d:%d translate binder failed\n", 3238 thread->pid, proc->pid); 3239 return_error = BR_FAILED_REPLY; 3240 return_error_param = ret; 3241 return_error_line = __LINE__; 3242 goto err_translate_failed; 3243 } 3244 } break; 3245 case BINDER_TYPE_HANDLE: 3246 case BINDER_TYPE_WEAK_HANDLE: { 3247 struct flat_binder_object *fp; 3248 3249 fp = to_flat_binder_object(hdr); 3250 ret = binder_translate_handle(fp, t, thread); 3251 if (ret < 0 || 3252 binder_alloc_copy_to_buffer(&target_proc->alloc, 3253 t->buffer, 3254 object_offset, 3255 fp, sizeof(*fp))) { 3256 binder_txn_error("%d:%d translate handle failed\n", 3257 thread->pid, proc->pid); 3258 return_error = BR_FAILED_REPLY; 3259 return_error_param = ret; 3260 return_error_line = __LINE__; 3261 goto err_translate_failed; 3262 } 3263 } break; 3264 3265 case BINDER_TYPE_FD: { 3266 struct binder_fd_object *fp = to_binder_fd_object(hdr); 3267 binder_size_t fd_offset = object_offset + 3268 (uintptr_t)&fp->fd - (uintptr_t)fp; 3269 int ret = binder_translate_fd(fp->fd, fd_offset, t, 3270 thread, in_reply_to); 3271 3272 fp->pad_binder = 0; 3273 if (ret < 0 || 3274 binder_alloc_copy_to_buffer(&target_proc->alloc, 3275 t->buffer, 3276 object_offset, 3277 fp, sizeof(*fp))) { 3278 binder_txn_error("%d:%d translate fd failed\n", 3279 thread->pid, proc->pid); 3280 return_error = BR_FAILED_REPLY; 3281 return_error_param = ret; 3282 return_error_line = __LINE__; 3283 goto err_translate_failed; 3284 } 3285 } break; 3286 case BINDER_TYPE_FDA: { 3287 struct binder_object ptr_object; 3288 binder_size_t parent_offset; 3289 struct binder_object user_object; 3290 size_t user_parent_size; 3291 struct binder_fd_array_object *fda = 3292 to_binder_fd_array_object(hdr); 3293 size_t num_valid = (buffer_offset - off_start_offset) / 3294 sizeof(binder_size_t); 3295 struct binder_buffer_object *parent = 3296 binder_validate_ptr(target_proc, t->buffer, 3297 &ptr_object, fda->parent, 3298 off_start_offset, 3299 &parent_offset, 3300 num_valid); 3301 if (!parent) { 3302 binder_user_error("%d:%d got transaction with invalid parent offset or type\n", 3303 proc->pid, thread->pid); 3304 return_error = BR_FAILED_REPLY; 3305 return_error_param = -EINVAL; 3306 return_error_line = __LINE__; 3307 goto err_bad_parent; 3308 } 3309 if (!binder_validate_fixup(target_proc, t->buffer, 3310 off_start_offset, 3311 parent_offset, 3312 fda->parent_offset, 3313 last_fixup_obj_off, 3314 last_fixup_min_off)) { 3315 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", 3316 proc->pid, thread->pid); 3317 return_error = BR_FAILED_REPLY; 3318 return_error_param = -EINVAL; 3319 return_error_line = __LINE__; 3320 goto err_bad_parent; 3321 } 3322 /* 3323 * We need to read the user version of the parent 3324 * object to get the original user offset 3325 */ 3326 user_parent_size = 3327 binder_get_object(proc, user_buffer, t->buffer, 3328 parent_offset, &user_object); 3329 if (user_parent_size != sizeof(user_object.bbo)) { 3330 binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n", 3331 proc->pid, thread->pid, 3332 user_parent_size, 3333 sizeof(user_object.bbo)); 3334 return_error = BR_FAILED_REPLY; 3335 return_error_param = -EINVAL; 3336 return_error_line = __LINE__; 3337 goto err_bad_parent; 3338 } 3339 ret = binder_translate_fd_array(&pf_head, fda, 3340 user_buffer, parent, 3341 &user_object.bbo, t, 3342 thread, in_reply_to); 3343 if (!ret) 3344 ret = binder_alloc_copy_to_buffer(&target_proc->alloc, 3345 t->buffer, 3346 object_offset, 3347 fda, sizeof(*fda)); 3348 if (ret) { 3349 binder_txn_error("%d:%d translate fd array failed\n", 3350 thread->pid, proc->pid); 3351 return_error = BR_FAILED_REPLY; 3352 return_error_param = ret > 0 ? -EINVAL : ret; 3353 return_error_line = __LINE__; 3354 goto err_translate_failed; 3355 } 3356 last_fixup_obj_off = parent_offset; 3357 last_fixup_min_off = 3358 fda->parent_offset + sizeof(u32) * fda->num_fds; 3359 } break; 3360 case BINDER_TYPE_PTR: { 3361 struct binder_buffer_object *bp = 3362 to_binder_buffer_object(hdr); 3363 size_t buf_left = sg_buf_end_offset - sg_buf_offset; 3364 size_t num_valid; 3365 3366 if (bp->length > buf_left) { 3367 binder_user_error("%d:%d got transaction with too large buffer\n", 3368 proc->pid, thread->pid); 3369 return_error = BR_FAILED_REPLY; 3370 return_error_param = -EINVAL; 3371 return_error_line = __LINE__; 3372 goto err_bad_offset; 3373 } 3374 ret = binder_defer_copy(&sgc_head, sg_buf_offset, 3375 (const void __user *)(uintptr_t)bp->buffer, 3376 bp->length); 3377 if (ret) { 3378 binder_txn_error("%d:%d deferred copy failed\n", 3379 thread->pid, proc->pid); 3380 return_error = BR_FAILED_REPLY; 3381 return_error_param = ret; 3382 return_error_line = __LINE__; 3383 goto err_translate_failed; 3384 } 3385 /* Fixup buffer pointer to target proc address space */ 3386 bp->buffer = (uintptr_t) 3387 t->buffer->user_data + sg_buf_offset; 3388 sg_buf_offset += ALIGN(bp->length, sizeof(u64)); 3389 3390 num_valid = (buffer_offset - off_start_offset) / 3391 sizeof(binder_size_t); 3392 ret = binder_fixup_parent(&pf_head, t, 3393 thread, bp, 3394 off_start_offset, 3395 num_valid, 3396 last_fixup_obj_off, 3397 last_fixup_min_off); 3398 if (ret < 0 || 3399 binder_alloc_copy_to_buffer(&target_proc->alloc, 3400 t->buffer, 3401 object_offset, 3402 bp, sizeof(*bp))) { 3403 binder_txn_error("%d:%d failed to fixup parent\n", 3404 thread->pid, proc->pid); 3405 return_error = BR_FAILED_REPLY; 3406 return_error_param = ret; 3407 return_error_line = __LINE__; 3408 goto err_translate_failed; 3409 } 3410 last_fixup_obj_off = object_offset; 3411 last_fixup_min_off = 0; 3412 } break; 3413 default: 3414 binder_user_error("%d:%d got transaction with invalid object type, %x\n", 3415 proc->pid, thread->pid, hdr->type); 3416 return_error = BR_FAILED_REPLY; 3417 return_error_param = -EINVAL; 3418 return_error_line = __LINE__; 3419 goto err_bad_object_type; 3420 } 3421 } 3422 /* Done processing objects, copy the rest of the buffer */ 3423 if (binder_alloc_copy_user_to_buffer( 3424 &target_proc->alloc, 3425 t->buffer, user_offset, 3426 user_buffer + user_offset, 3427 tr->data_size - user_offset)) { 3428 binder_user_error("%d:%d got transaction with invalid data ptr\n", 3429 proc->pid, thread->pid); 3430 return_error = BR_FAILED_REPLY; 3431 return_error_param = -EFAULT; 3432 return_error_line = __LINE__; 3433 goto err_copy_data_failed; 3434 } 3435 3436 ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer, 3437 &sgc_head, &pf_head); 3438 if (ret) { 3439 binder_user_error("%d:%d got transaction with invalid offsets ptr\n", 3440 proc->pid, thread->pid); 3441 return_error = BR_FAILED_REPLY; 3442 return_error_param = ret; 3443 return_error_line = __LINE__; 3444 goto err_copy_data_failed; 3445 } 3446 if (t->buffer->oneway_spam_suspect) 3447 tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT; 3448 else 3449 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; 3450 t->work.type = BINDER_WORK_TRANSACTION; 3451 3452 if (reply) { 3453 binder_enqueue_thread_work(thread, tcomplete); 3454 binder_inner_proc_lock(target_proc); 3455 if (target_thread->is_dead) { 3456 return_error = BR_DEAD_REPLY; 3457 binder_inner_proc_unlock(target_proc); 3458 goto err_dead_proc_or_thread; 3459 } 3460 BUG_ON(t->buffer->async_transaction != 0); 3461 binder_pop_transaction_ilocked(target_thread, in_reply_to); 3462 binder_enqueue_thread_work_ilocked(target_thread, &t->work); 3463 target_proc->outstanding_txns++; 3464 binder_inner_proc_unlock(target_proc); 3465 wake_up_interruptible_sync(&target_thread->wait); 3466 binder_free_transaction(in_reply_to); 3467 } else if (!(t->flags & TF_ONE_WAY)) { 3468 BUG_ON(t->buffer->async_transaction != 0); 3469 binder_inner_proc_lock(proc); 3470 /* 3471 * Defer the TRANSACTION_COMPLETE, so we don't return to 3472 * userspace immediately; this allows the target process to 3473 * immediately start processing this transaction, reducing 3474 * latency. We will then return the TRANSACTION_COMPLETE when 3475 * the target replies (or there is an error). 3476 */ 3477 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete); 3478 t->need_reply = 1; 3479 t->from_parent = thread->transaction_stack; 3480 thread->transaction_stack = t; 3481 binder_inner_proc_unlock(proc); 3482 return_error = binder_proc_transaction(t, 3483 target_proc, target_thread); 3484 if (return_error) { 3485 binder_inner_proc_lock(proc); 3486 binder_pop_transaction_ilocked(thread, t); 3487 binder_inner_proc_unlock(proc); 3488 goto err_dead_proc_or_thread; 3489 } 3490 } else { 3491 BUG_ON(target_node == NULL); 3492 BUG_ON(t->buffer->async_transaction != 1); 3493 binder_enqueue_thread_work(thread, tcomplete); 3494 return_error = binder_proc_transaction(t, target_proc, NULL); 3495 if (return_error) 3496 goto err_dead_proc_or_thread; 3497 } 3498 if (target_thread) 3499 binder_thread_dec_tmpref(target_thread); 3500 binder_proc_dec_tmpref(target_proc); 3501 if (target_node) 3502 binder_dec_node_tmpref(target_node); 3503 /* 3504 * write barrier to synchronize with initialization 3505 * of log entry 3506 */ 3507 smp_wmb(); 3508 WRITE_ONCE(e->debug_id_done, t_debug_id); 3509 return; 3510 3511 err_dead_proc_or_thread: 3512 binder_txn_error("%d:%d dead process or thread\n", 3513 thread->pid, proc->pid); 3514 return_error_line = __LINE__; 3515 binder_dequeue_work(proc, tcomplete); 3516 err_translate_failed: 3517 err_bad_object_type: 3518 err_bad_offset: 3519 err_bad_parent: 3520 err_copy_data_failed: 3521 binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head); 3522 binder_free_txn_fixups(t); 3523 trace_binder_transaction_failed_buffer_release(t->buffer); 3524 binder_transaction_buffer_release(target_proc, NULL, t->buffer, 3525 buffer_offset, true); 3526 if (target_node) 3527 binder_dec_node_tmpref(target_node); 3528 target_node = NULL; 3529 t->buffer->transaction = NULL; 3530 binder_alloc_free_buf(&target_proc->alloc, t->buffer); 3531 err_binder_alloc_buf_failed: 3532 err_bad_extra_size: 3533 if (secctx) 3534 security_release_secctx(secctx, secctx_sz); 3535 err_get_secctx_failed: 3536 kfree(tcomplete); 3537 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 3538 err_alloc_tcomplete_failed: 3539 if (trace_binder_txn_latency_free_enabled()) 3540 binder_txn_latency_free(t); 3541 kfree(t); 3542 binder_stats_deleted(BINDER_STAT_TRANSACTION); 3543 err_alloc_t_failed: 3544 err_bad_todo_list: 3545 err_bad_call_stack: 3546 err_empty_call_stack: 3547 err_dead_binder: 3548 err_invalid_target_handle: 3549 if (target_node) { 3550 binder_dec_node(target_node, 1, 0); 3551 binder_dec_node_tmpref(target_node); 3552 } 3553 3554 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 3555 "%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n", 3556 proc->pid, thread->pid, reply ? "reply" : 3557 (tr->flags & TF_ONE_WAY ? "async" : "call"), 3558 target_proc ? target_proc->pid : 0, 3559 target_thread ? target_thread->pid : 0, 3560 t_debug_id, return_error, return_error_param, 3561 (u64)tr->data_size, (u64)tr->offsets_size, 3562 return_error_line); 3563 3564 if (target_thread) 3565 binder_thread_dec_tmpref(target_thread); 3566 if (target_proc) 3567 binder_proc_dec_tmpref(target_proc); 3568 3569 { 3570 struct binder_transaction_log_entry *fe; 3571 3572 e->return_error = return_error; 3573 e->return_error_param = return_error_param; 3574 e->return_error_line = return_error_line; 3575 fe = binder_transaction_log_add(&binder_transaction_log_failed); 3576 *fe = *e; 3577 /* 3578 * write barrier to synchronize with initialization 3579 * of log entry 3580 */ 3581 smp_wmb(); 3582 WRITE_ONCE(e->debug_id_done, t_debug_id); 3583 WRITE_ONCE(fe->debug_id_done, t_debug_id); 3584 } 3585 3586 BUG_ON(thread->return_error.cmd != BR_OK); 3587 if (in_reply_to) { 3588 binder_set_txn_from_error(in_reply_to, t_debug_id, 3589 return_error, return_error_param); 3590 thread->return_error.cmd = BR_TRANSACTION_COMPLETE; 3591 binder_enqueue_thread_work(thread, &thread->return_error.work); 3592 binder_send_failed_reply(in_reply_to, return_error); 3593 } else { 3594 binder_inner_proc_lock(proc); 3595 binder_set_extended_error(&thread->ee, t_debug_id, 3596 return_error, return_error_param); 3597 binder_inner_proc_unlock(proc); 3598 thread->return_error.cmd = return_error; 3599 binder_enqueue_thread_work(thread, &thread->return_error.work); 3600 } 3601 } 3602 3603 /** 3604 * binder_free_buf() - free the specified buffer 3605 * @proc: binder proc that owns buffer 3606 * @buffer: buffer to be freed 3607 * @is_failure: failed to send transaction 3608 * 3609 * If buffer for an async transaction, enqueue the next async 3610 * transaction from the node. 3611 * 3612 * Cleanup buffer and free it. 3613 */ 3614 static void 3615 binder_free_buf(struct binder_proc *proc, 3616 struct binder_thread *thread, 3617 struct binder_buffer *buffer, bool is_failure) 3618 { 3619 binder_inner_proc_lock(proc); 3620 if (buffer->transaction) { 3621 buffer->transaction->buffer = NULL; 3622 buffer->transaction = NULL; 3623 } 3624 binder_inner_proc_unlock(proc); 3625 if (buffer->async_transaction && buffer->target_node) { 3626 struct binder_node *buf_node; 3627 struct binder_work *w; 3628 3629 buf_node = buffer->target_node; 3630 binder_node_inner_lock(buf_node); 3631 BUG_ON(!buf_node->has_async_transaction); 3632 BUG_ON(buf_node->proc != proc); 3633 w = binder_dequeue_work_head_ilocked( 3634 &buf_node->async_todo); 3635 if (!w) { 3636 buf_node->has_async_transaction = false; 3637 } else { 3638 binder_enqueue_work_ilocked( 3639 w, &proc->todo); 3640 binder_wakeup_proc_ilocked(proc); 3641 } 3642 binder_node_inner_unlock(buf_node); 3643 } 3644 trace_binder_transaction_buffer_release(buffer); 3645 binder_transaction_buffer_release(proc, thread, buffer, 0, is_failure); 3646 binder_alloc_free_buf(&proc->alloc, buffer); 3647 } 3648 3649 static int binder_thread_write(struct binder_proc *proc, 3650 struct binder_thread *thread, 3651 binder_uintptr_t binder_buffer, size_t size, 3652 binder_size_t *consumed) 3653 { 3654 uint32_t cmd; 3655 struct binder_context *context = proc->context; 3656 void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 3657 void __user *ptr = buffer + *consumed; 3658 void __user *end = buffer + size; 3659 3660 while (ptr < end && thread->return_error.cmd == BR_OK) { 3661 int ret; 3662 3663 if (get_user(cmd, (uint32_t __user *)ptr)) 3664 return -EFAULT; 3665 ptr += sizeof(uint32_t); 3666 trace_binder_command(cmd); 3667 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { 3668 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]); 3669 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]); 3670 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]); 3671 } 3672 switch (cmd) { 3673 case BC_INCREFS: 3674 case BC_ACQUIRE: 3675 case BC_RELEASE: 3676 case BC_DECREFS: { 3677 uint32_t target; 3678 const char *debug_string; 3679 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE; 3680 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE; 3681 struct binder_ref_data rdata; 3682 3683 if (get_user(target, (uint32_t __user *)ptr)) 3684 return -EFAULT; 3685 3686 ptr += sizeof(uint32_t); 3687 ret = -1; 3688 if (increment && !target) { 3689 struct binder_node *ctx_mgr_node; 3690 3691 mutex_lock(&context->context_mgr_node_lock); 3692 ctx_mgr_node = context->binder_context_mgr_node; 3693 if (ctx_mgr_node) { 3694 if (ctx_mgr_node->proc == proc) { 3695 binder_user_error("%d:%d context manager tried to acquire desc 0\n", 3696 proc->pid, thread->pid); 3697 mutex_unlock(&context->context_mgr_node_lock); 3698 return -EINVAL; 3699 } 3700 ret = binder_inc_ref_for_node( 3701 proc, ctx_mgr_node, 3702 strong, NULL, &rdata); 3703 } 3704 mutex_unlock(&context->context_mgr_node_lock); 3705 } 3706 if (ret) 3707 ret = binder_update_ref_for_handle( 3708 proc, target, increment, strong, 3709 &rdata); 3710 if (!ret && rdata.desc != target) { 3711 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n", 3712 proc->pid, thread->pid, 3713 target, rdata.desc); 3714 } 3715 switch (cmd) { 3716 case BC_INCREFS: 3717 debug_string = "IncRefs"; 3718 break; 3719 case BC_ACQUIRE: 3720 debug_string = "Acquire"; 3721 break; 3722 case BC_RELEASE: 3723 debug_string = "Release"; 3724 break; 3725 case BC_DECREFS: 3726 default: 3727 debug_string = "DecRefs"; 3728 break; 3729 } 3730 if (ret) { 3731 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n", 3732 proc->pid, thread->pid, debug_string, 3733 strong, target, ret); 3734 break; 3735 } 3736 binder_debug(BINDER_DEBUG_USER_REFS, 3737 "%d:%d %s ref %d desc %d s %d w %d\n", 3738 proc->pid, thread->pid, debug_string, 3739 rdata.debug_id, rdata.desc, rdata.strong, 3740 rdata.weak); 3741 break; 3742 } 3743 case BC_INCREFS_DONE: 3744 case BC_ACQUIRE_DONE: { 3745 binder_uintptr_t node_ptr; 3746 binder_uintptr_t cookie; 3747 struct binder_node *node; 3748 bool free_node; 3749 3750 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr)) 3751 return -EFAULT; 3752 ptr += sizeof(binder_uintptr_t); 3753 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 3754 return -EFAULT; 3755 ptr += sizeof(binder_uintptr_t); 3756 node = binder_get_node(proc, node_ptr); 3757 if (node == NULL) { 3758 binder_user_error("%d:%d %s u%016llx no match\n", 3759 proc->pid, thread->pid, 3760 cmd == BC_INCREFS_DONE ? 3761 "BC_INCREFS_DONE" : 3762 "BC_ACQUIRE_DONE", 3763 (u64)node_ptr); 3764 break; 3765 } 3766 if (cookie != node->cookie) { 3767 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n", 3768 proc->pid, thread->pid, 3769 cmd == BC_INCREFS_DONE ? 3770 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 3771 (u64)node_ptr, node->debug_id, 3772 (u64)cookie, (u64)node->cookie); 3773 binder_put_node(node); 3774 break; 3775 } 3776 binder_node_inner_lock(node); 3777 if (cmd == BC_ACQUIRE_DONE) { 3778 if (node->pending_strong_ref == 0) { 3779 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n", 3780 proc->pid, thread->pid, 3781 node->debug_id); 3782 binder_node_inner_unlock(node); 3783 binder_put_node(node); 3784 break; 3785 } 3786 node->pending_strong_ref = 0; 3787 } else { 3788 if (node->pending_weak_ref == 0) { 3789 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n", 3790 proc->pid, thread->pid, 3791 node->debug_id); 3792 binder_node_inner_unlock(node); 3793 binder_put_node(node); 3794 break; 3795 } 3796 node->pending_weak_ref = 0; 3797 } 3798 free_node = binder_dec_node_nilocked(node, 3799 cmd == BC_ACQUIRE_DONE, 0); 3800 WARN_ON(free_node); 3801 binder_debug(BINDER_DEBUG_USER_REFS, 3802 "%d:%d %s node %d ls %d lw %d tr %d\n", 3803 proc->pid, thread->pid, 3804 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 3805 node->debug_id, node->local_strong_refs, 3806 node->local_weak_refs, node->tmp_refs); 3807 binder_node_inner_unlock(node); 3808 binder_put_node(node); 3809 break; 3810 } 3811 case BC_ATTEMPT_ACQUIRE: 3812 pr_err("BC_ATTEMPT_ACQUIRE not supported\n"); 3813 return -EINVAL; 3814 case BC_ACQUIRE_RESULT: 3815 pr_err("BC_ACQUIRE_RESULT not supported\n"); 3816 return -EINVAL; 3817 3818 case BC_FREE_BUFFER: { 3819 binder_uintptr_t data_ptr; 3820 struct binder_buffer *buffer; 3821 3822 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr)) 3823 return -EFAULT; 3824 ptr += sizeof(binder_uintptr_t); 3825 3826 buffer = binder_alloc_prepare_to_free(&proc->alloc, 3827 data_ptr); 3828 if (IS_ERR_OR_NULL(buffer)) { 3829 if (PTR_ERR(buffer) == -EPERM) { 3830 binder_user_error( 3831 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n", 3832 proc->pid, thread->pid, 3833 (u64)data_ptr); 3834 } else { 3835 binder_user_error( 3836 "%d:%d BC_FREE_BUFFER u%016llx no match\n", 3837 proc->pid, thread->pid, 3838 (u64)data_ptr); 3839 } 3840 break; 3841 } 3842 binder_debug(BINDER_DEBUG_FREE_BUFFER, 3843 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n", 3844 proc->pid, thread->pid, (u64)data_ptr, 3845 buffer->debug_id, 3846 buffer->transaction ? "active" : "finished"); 3847 binder_free_buf(proc, thread, buffer, false); 3848 break; 3849 } 3850 3851 case BC_TRANSACTION_SG: 3852 case BC_REPLY_SG: { 3853 struct binder_transaction_data_sg tr; 3854 3855 if (copy_from_user(&tr, ptr, sizeof(tr))) 3856 return -EFAULT; 3857 ptr += sizeof(tr); 3858 binder_transaction(proc, thread, &tr.transaction_data, 3859 cmd == BC_REPLY_SG, tr.buffers_size); 3860 break; 3861 } 3862 case BC_TRANSACTION: 3863 case BC_REPLY: { 3864 struct binder_transaction_data tr; 3865 3866 if (copy_from_user(&tr, ptr, sizeof(tr))) 3867 return -EFAULT; 3868 ptr += sizeof(tr); 3869 binder_transaction(proc, thread, &tr, 3870 cmd == BC_REPLY, 0); 3871 break; 3872 } 3873 3874 case BC_REGISTER_LOOPER: 3875 binder_debug(BINDER_DEBUG_THREADS, 3876 "%d:%d BC_REGISTER_LOOPER\n", 3877 proc->pid, thread->pid); 3878 binder_inner_proc_lock(proc); 3879 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { 3880 thread->looper |= BINDER_LOOPER_STATE_INVALID; 3881 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n", 3882 proc->pid, thread->pid); 3883 } else if (proc->requested_threads == 0) { 3884 thread->looper |= BINDER_LOOPER_STATE_INVALID; 3885 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n", 3886 proc->pid, thread->pid); 3887 } else { 3888 proc->requested_threads--; 3889 proc->requested_threads_started++; 3890 } 3891 thread->looper |= BINDER_LOOPER_STATE_REGISTERED; 3892 binder_inner_proc_unlock(proc); 3893 break; 3894 case BC_ENTER_LOOPER: 3895 binder_debug(BINDER_DEBUG_THREADS, 3896 "%d:%d BC_ENTER_LOOPER\n", 3897 proc->pid, thread->pid); 3898 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { 3899 thread->looper |= BINDER_LOOPER_STATE_INVALID; 3900 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n", 3901 proc->pid, thread->pid); 3902 } 3903 thread->looper |= BINDER_LOOPER_STATE_ENTERED; 3904 break; 3905 case BC_EXIT_LOOPER: 3906 binder_debug(BINDER_DEBUG_THREADS, 3907 "%d:%d BC_EXIT_LOOPER\n", 3908 proc->pid, thread->pid); 3909 thread->looper |= BINDER_LOOPER_STATE_EXITED; 3910 break; 3911 3912 case BC_REQUEST_DEATH_NOTIFICATION: 3913 case BC_CLEAR_DEATH_NOTIFICATION: { 3914 uint32_t target; 3915 binder_uintptr_t cookie; 3916 struct binder_ref *ref; 3917 struct binder_ref_death *death = NULL; 3918 3919 if (get_user(target, (uint32_t __user *)ptr)) 3920 return -EFAULT; 3921 ptr += sizeof(uint32_t); 3922 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 3923 return -EFAULT; 3924 ptr += sizeof(binder_uintptr_t); 3925 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { 3926 /* 3927 * Allocate memory for death notification 3928 * before taking lock 3929 */ 3930 death = kzalloc(sizeof(*death), GFP_KERNEL); 3931 if (death == NULL) { 3932 WARN_ON(thread->return_error.cmd != 3933 BR_OK); 3934 thread->return_error.cmd = BR_ERROR; 3935 binder_enqueue_thread_work( 3936 thread, 3937 &thread->return_error.work); 3938 binder_debug( 3939 BINDER_DEBUG_FAILED_TRANSACTION, 3940 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n", 3941 proc->pid, thread->pid); 3942 break; 3943 } 3944 } 3945 binder_proc_lock(proc); 3946 ref = binder_get_ref_olocked(proc, target, false); 3947 if (ref == NULL) { 3948 binder_user_error("%d:%d %s invalid ref %d\n", 3949 proc->pid, thread->pid, 3950 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 3951 "BC_REQUEST_DEATH_NOTIFICATION" : 3952 "BC_CLEAR_DEATH_NOTIFICATION", 3953 target); 3954 binder_proc_unlock(proc); 3955 kfree(death); 3956 break; 3957 } 3958 3959 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 3960 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n", 3961 proc->pid, thread->pid, 3962 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 3963 "BC_REQUEST_DEATH_NOTIFICATION" : 3964 "BC_CLEAR_DEATH_NOTIFICATION", 3965 (u64)cookie, ref->data.debug_id, 3966 ref->data.desc, ref->data.strong, 3967 ref->data.weak, ref->node->debug_id); 3968 3969 binder_node_lock(ref->node); 3970 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { 3971 if (ref->death) { 3972 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n", 3973 proc->pid, thread->pid); 3974 binder_node_unlock(ref->node); 3975 binder_proc_unlock(proc); 3976 kfree(death); 3977 break; 3978 } 3979 binder_stats_created(BINDER_STAT_DEATH); 3980 INIT_LIST_HEAD(&death->work.entry); 3981 death->cookie = cookie; 3982 ref->death = death; 3983 if (ref->node->proc == NULL) { 3984 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 3985 3986 binder_inner_proc_lock(proc); 3987 binder_enqueue_work_ilocked( 3988 &ref->death->work, &proc->todo); 3989 binder_wakeup_proc_ilocked(proc); 3990 binder_inner_proc_unlock(proc); 3991 } 3992 } else { 3993 if (ref->death == NULL) { 3994 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n", 3995 proc->pid, thread->pid); 3996 binder_node_unlock(ref->node); 3997 binder_proc_unlock(proc); 3998 break; 3999 } 4000 death = ref->death; 4001 if (death->cookie != cookie) { 4002 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n", 4003 proc->pid, thread->pid, 4004 (u64)death->cookie, 4005 (u64)cookie); 4006 binder_node_unlock(ref->node); 4007 binder_proc_unlock(proc); 4008 break; 4009 } 4010 ref->death = NULL; 4011 binder_inner_proc_lock(proc); 4012 if (list_empty(&death->work.entry)) { 4013 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 4014 if (thread->looper & 4015 (BINDER_LOOPER_STATE_REGISTERED | 4016 BINDER_LOOPER_STATE_ENTERED)) 4017 binder_enqueue_thread_work_ilocked( 4018 thread, 4019 &death->work); 4020 else { 4021 binder_enqueue_work_ilocked( 4022 &death->work, 4023 &proc->todo); 4024 binder_wakeup_proc_ilocked( 4025 proc); 4026 } 4027 } else { 4028 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); 4029 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; 4030 } 4031 binder_inner_proc_unlock(proc); 4032 } 4033 binder_node_unlock(ref->node); 4034 binder_proc_unlock(proc); 4035 } break; 4036 case BC_DEAD_BINDER_DONE: { 4037 struct binder_work *w; 4038 binder_uintptr_t cookie; 4039 struct binder_ref_death *death = NULL; 4040 4041 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 4042 return -EFAULT; 4043 4044 ptr += sizeof(cookie); 4045 binder_inner_proc_lock(proc); 4046 list_for_each_entry(w, &proc->delivered_death, 4047 entry) { 4048 struct binder_ref_death *tmp_death = 4049 container_of(w, 4050 struct binder_ref_death, 4051 work); 4052 4053 if (tmp_death->cookie == cookie) { 4054 death = tmp_death; 4055 break; 4056 } 4057 } 4058 binder_debug(BINDER_DEBUG_DEAD_BINDER, 4059 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n", 4060 proc->pid, thread->pid, (u64)cookie, 4061 death); 4062 if (death == NULL) { 4063 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n", 4064 proc->pid, thread->pid, (u64)cookie); 4065 binder_inner_proc_unlock(proc); 4066 break; 4067 } 4068 binder_dequeue_work_ilocked(&death->work); 4069 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) { 4070 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 4071 if (thread->looper & 4072 (BINDER_LOOPER_STATE_REGISTERED | 4073 BINDER_LOOPER_STATE_ENTERED)) 4074 binder_enqueue_thread_work_ilocked( 4075 thread, &death->work); 4076 else { 4077 binder_enqueue_work_ilocked( 4078 &death->work, 4079 &proc->todo); 4080 binder_wakeup_proc_ilocked(proc); 4081 } 4082 } 4083 binder_inner_proc_unlock(proc); 4084 } break; 4085 4086 default: 4087 pr_err("%d:%d unknown command %u\n", 4088 proc->pid, thread->pid, cmd); 4089 return -EINVAL; 4090 } 4091 *consumed = ptr - buffer; 4092 } 4093 return 0; 4094 } 4095 4096 static void binder_stat_br(struct binder_proc *proc, 4097 struct binder_thread *thread, uint32_t cmd) 4098 { 4099 trace_binder_return(cmd); 4100 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { 4101 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]); 4102 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]); 4103 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]); 4104 } 4105 } 4106 4107 static int binder_put_node_cmd(struct binder_proc *proc, 4108 struct binder_thread *thread, 4109 void __user **ptrp, 4110 binder_uintptr_t node_ptr, 4111 binder_uintptr_t node_cookie, 4112 int node_debug_id, 4113 uint32_t cmd, const char *cmd_name) 4114 { 4115 void __user *ptr = *ptrp; 4116 4117 if (put_user(cmd, (uint32_t __user *)ptr)) 4118 return -EFAULT; 4119 ptr += sizeof(uint32_t); 4120 4121 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr)) 4122 return -EFAULT; 4123 ptr += sizeof(binder_uintptr_t); 4124 4125 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr)) 4126 return -EFAULT; 4127 ptr += sizeof(binder_uintptr_t); 4128 4129 binder_stat_br(proc, thread, cmd); 4130 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n", 4131 proc->pid, thread->pid, cmd_name, node_debug_id, 4132 (u64)node_ptr, (u64)node_cookie); 4133 4134 *ptrp = ptr; 4135 return 0; 4136 } 4137 4138 static int binder_wait_for_work(struct binder_thread *thread, 4139 bool do_proc_work) 4140 { 4141 DEFINE_WAIT(wait); 4142 struct binder_proc *proc = thread->proc; 4143 int ret = 0; 4144 4145 freezer_do_not_count(); 4146 binder_inner_proc_lock(proc); 4147 for (;;) { 4148 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE); 4149 if (binder_has_work_ilocked(thread, do_proc_work)) 4150 break; 4151 if (do_proc_work) 4152 list_add(&thread->waiting_thread_node, 4153 &proc->waiting_threads); 4154 binder_inner_proc_unlock(proc); 4155 schedule(); 4156 binder_inner_proc_lock(proc); 4157 list_del_init(&thread->waiting_thread_node); 4158 if (signal_pending(current)) { 4159 ret = -EINTR; 4160 break; 4161 } 4162 } 4163 finish_wait(&thread->wait, &wait); 4164 binder_inner_proc_unlock(proc); 4165 freezer_count(); 4166 4167 return ret; 4168 } 4169 4170 /** 4171 * binder_apply_fd_fixups() - finish fd translation 4172 * @proc: binder_proc associated @t->buffer 4173 * @t: binder transaction with list of fd fixups 4174 * 4175 * Now that we are in the context of the transaction target 4176 * process, we can allocate and install fds. Process the 4177 * list of fds to translate and fixup the buffer with the 4178 * new fds first and only then install the files. 4179 * 4180 * If we fail to allocate an fd, skip the install and release 4181 * any fds that have already been allocated. 4182 */ 4183 static int binder_apply_fd_fixups(struct binder_proc *proc, 4184 struct binder_transaction *t) 4185 { 4186 struct binder_txn_fd_fixup *fixup, *tmp; 4187 int ret = 0; 4188 4189 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) { 4190 int fd = get_unused_fd_flags(O_CLOEXEC); 4191 4192 if (fd < 0) { 4193 binder_debug(BINDER_DEBUG_TRANSACTION, 4194 "failed fd fixup txn %d fd %d\n", 4195 t->debug_id, fd); 4196 ret = -ENOMEM; 4197 goto err; 4198 } 4199 binder_debug(BINDER_DEBUG_TRANSACTION, 4200 "fd fixup txn %d fd %d\n", 4201 t->debug_id, fd); 4202 trace_binder_transaction_fd_recv(t, fd, fixup->offset); 4203 fixup->target_fd = fd; 4204 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer, 4205 fixup->offset, &fd, 4206 sizeof(u32))) { 4207 ret = -EINVAL; 4208 goto err; 4209 } 4210 } 4211 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) { 4212 fd_install(fixup->target_fd, fixup->file); 4213 list_del(&fixup->fixup_entry); 4214 kfree(fixup); 4215 } 4216 4217 return ret; 4218 4219 err: 4220 binder_free_txn_fixups(t); 4221 return ret; 4222 } 4223 4224 static int binder_thread_read(struct binder_proc *proc, 4225 struct binder_thread *thread, 4226 binder_uintptr_t binder_buffer, size_t size, 4227 binder_size_t *consumed, int non_block) 4228 { 4229 void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 4230 void __user *ptr = buffer + *consumed; 4231 void __user *end = buffer + size; 4232 4233 int ret = 0; 4234 int wait_for_proc_work; 4235 4236 if (*consumed == 0) { 4237 if (put_user(BR_NOOP, (uint32_t __user *)ptr)) 4238 return -EFAULT; 4239 ptr += sizeof(uint32_t); 4240 } 4241 4242 retry: 4243 binder_inner_proc_lock(proc); 4244 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread); 4245 binder_inner_proc_unlock(proc); 4246 4247 thread->looper |= BINDER_LOOPER_STATE_WAITING; 4248 4249 trace_binder_wait_for_work(wait_for_proc_work, 4250 !!thread->transaction_stack, 4251 !binder_worklist_empty(proc, &thread->todo)); 4252 if (wait_for_proc_work) { 4253 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 4254 BINDER_LOOPER_STATE_ENTERED))) { 4255 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n", 4256 proc->pid, thread->pid, thread->looper); 4257 wait_event_interruptible(binder_user_error_wait, 4258 binder_stop_on_user_error < 2); 4259 } 4260 binder_set_nice(proc->default_priority); 4261 } 4262 4263 if (non_block) { 4264 if (!binder_has_work(thread, wait_for_proc_work)) 4265 ret = -EAGAIN; 4266 } else { 4267 ret = binder_wait_for_work(thread, wait_for_proc_work); 4268 } 4269 4270 thread->looper &= ~BINDER_LOOPER_STATE_WAITING; 4271 4272 if (ret) 4273 return ret; 4274 4275 while (1) { 4276 uint32_t cmd; 4277 struct binder_transaction_data_secctx tr; 4278 struct binder_transaction_data *trd = &tr.transaction_data; 4279 struct binder_work *w = NULL; 4280 struct list_head *list = NULL; 4281 struct binder_transaction *t = NULL; 4282 struct binder_thread *t_from; 4283 size_t trsize = sizeof(*trd); 4284 4285 binder_inner_proc_lock(proc); 4286 if (!binder_worklist_empty_ilocked(&thread->todo)) 4287 list = &thread->todo; 4288 else if (!binder_worklist_empty_ilocked(&proc->todo) && 4289 wait_for_proc_work) 4290 list = &proc->todo; 4291 else { 4292 binder_inner_proc_unlock(proc); 4293 4294 /* no data added */ 4295 if (ptr - buffer == 4 && !thread->looper_need_return) 4296 goto retry; 4297 break; 4298 } 4299 4300 if (end - ptr < sizeof(tr) + 4) { 4301 binder_inner_proc_unlock(proc); 4302 break; 4303 } 4304 w = binder_dequeue_work_head_ilocked(list); 4305 if (binder_worklist_empty_ilocked(&thread->todo)) 4306 thread->process_todo = false; 4307 4308 switch (w->type) { 4309 case BINDER_WORK_TRANSACTION: { 4310 binder_inner_proc_unlock(proc); 4311 t = container_of(w, struct binder_transaction, work); 4312 } break; 4313 case BINDER_WORK_RETURN_ERROR: { 4314 struct binder_error *e = container_of( 4315 w, struct binder_error, work); 4316 4317 WARN_ON(e->cmd == BR_OK); 4318 binder_inner_proc_unlock(proc); 4319 if (put_user(e->cmd, (uint32_t __user *)ptr)) 4320 return -EFAULT; 4321 cmd = e->cmd; 4322 e->cmd = BR_OK; 4323 ptr += sizeof(uint32_t); 4324 4325 binder_stat_br(proc, thread, cmd); 4326 } break; 4327 case BINDER_WORK_TRANSACTION_COMPLETE: 4328 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: { 4329 if (proc->oneway_spam_detection_enabled && 4330 w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT) 4331 cmd = BR_ONEWAY_SPAM_SUSPECT; 4332 else 4333 cmd = BR_TRANSACTION_COMPLETE; 4334 binder_inner_proc_unlock(proc); 4335 kfree(w); 4336 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 4337 if (put_user(cmd, (uint32_t __user *)ptr)) 4338 return -EFAULT; 4339 ptr += sizeof(uint32_t); 4340 4341 binder_stat_br(proc, thread, cmd); 4342 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, 4343 "%d:%d BR_TRANSACTION_COMPLETE\n", 4344 proc->pid, thread->pid); 4345 } break; 4346 case BINDER_WORK_NODE: { 4347 struct binder_node *node = container_of(w, struct binder_node, work); 4348 int strong, weak; 4349 binder_uintptr_t node_ptr = node->ptr; 4350 binder_uintptr_t node_cookie = node->cookie; 4351 int node_debug_id = node->debug_id; 4352 int has_weak_ref; 4353 int has_strong_ref; 4354 void __user *orig_ptr = ptr; 4355 4356 BUG_ON(proc != node->proc); 4357 strong = node->internal_strong_refs || 4358 node->local_strong_refs; 4359 weak = !hlist_empty(&node->refs) || 4360 node->local_weak_refs || 4361 node->tmp_refs || strong; 4362 has_strong_ref = node->has_strong_ref; 4363 has_weak_ref = node->has_weak_ref; 4364 4365 if (weak && !has_weak_ref) { 4366 node->has_weak_ref = 1; 4367 node->pending_weak_ref = 1; 4368 node->local_weak_refs++; 4369 } 4370 if (strong && !has_strong_ref) { 4371 node->has_strong_ref = 1; 4372 node->pending_strong_ref = 1; 4373 node->local_strong_refs++; 4374 } 4375 if (!strong && has_strong_ref) 4376 node->has_strong_ref = 0; 4377 if (!weak && has_weak_ref) 4378 node->has_weak_ref = 0; 4379 if (!weak && !strong) { 4380 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 4381 "%d:%d node %d u%016llx c%016llx deleted\n", 4382 proc->pid, thread->pid, 4383 node_debug_id, 4384 (u64)node_ptr, 4385 (u64)node_cookie); 4386 rb_erase(&node->rb_node, &proc->nodes); 4387 binder_inner_proc_unlock(proc); 4388 binder_node_lock(node); 4389 /* 4390 * Acquire the node lock before freeing the 4391 * node to serialize with other threads that 4392 * may have been holding the node lock while 4393 * decrementing this node (avoids race where 4394 * this thread frees while the other thread 4395 * is unlocking the node after the final 4396 * decrement) 4397 */ 4398 binder_node_unlock(node); 4399 binder_free_node(node); 4400 } else 4401 binder_inner_proc_unlock(proc); 4402 4403 if (weak && !has_weak_ref) 4404 ret = binder_put_node_cmd( 4405 proc, thread, &ptr, node_ptr, 4406 node_cookie, node_debug_id, 4407 BR_INCREFS, "BR_INCREFS"); 4408 if (!ret && strong && !has_strong_ref) 4409 ret = binder_put_node_cmd( 4410 proc, thread, &ptr, node_ptr, 4411 node_cookie, node_debug_id, 4412 BR_ACQUIRE, "BR_ACQUIRE"); 4413 if (!ret && !strong && has_strong_ref) 4414 ret = binder_put_node_cmd( 4415 proc, thread, &ptr, node_ptr, 4416 node_cookie, node_debug_id, 4417 BR_RELEASE, "BR_RELEASE"); 4418 if (!ret && !weak && has_weak_ref) 4419 ret = binder_put_node_cmd( 4420 proc, thread, &ptr, node_ptr, 4421 node_cookie, node_debug_id, 4422 BR_DECREFS, "BR_DECREFS"); 4423 if (orig_ptr == ptr) 4424 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 4425 "%d:%d node %d u%016llx c%016llx state unchanged\n", 4426 proc->pid, thread->pid, 4427 node_debug_id, 4428 (u64)node_ptr, 4429 (u64)node_cookie); 4430 if (ret) 4431 return ret; 4432 } break; 4433 case BINDER_WORK_DEAD_BINDER: 4434 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 4435 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 4436 struct binder_ref_death *death; 4437 uint32_t cmd; 4438 binder_uintptr_t cookie; 4439 4440 death = container_of(w, struct binder_ref_death, work); 4441 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) 4442 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; 4443 else 4444 cmd = BR_DEAD_BINDER; 4445 cookie = death->cookie; 4446 4447 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 4448 "%d:%d %s %016llx\n", 4449 proc->pid, thread->pid, 4450 cmd == BR_DEAD_BINDER ? 4451 "BR_DEAD_BINDER" : 4452 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 4453 (u64)cookie); 4454 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { 4455 binder_inner_proc_unlock(proc); 4456 kfree(death); 4457 binder_stats_deleted(BINDER_STAT_DEATH); 4458 } else { 4459 binder_enqueue_work_ilocked( 4460 w, &proc->delivered_death); 4461 binder_inner_proc_unlock(proc); 4462 } 4463 if (put_user(cmd, (uint32_t __user *)ptr)) 4464 return -EFAULT; 4465 ptr += sizeof(uint32_t); 4466 if (put_user(cookie, 4467 (binder_uintptr_t __user *)ptr)) 4468 return -EFAULT; 4469 ptr += sizeof(binder_uintptr_t); 4470 binder_stat_br(proc, thread, cmd); 4471 if (cmd == BR_DEAD_BINDER) 4472 goto done; /* DEAD_BINDER notifications can cause transactions */ 4473 } break; 4474 default: 4475 binder_inner_proc_unlock(proc); 4476 pr_err("%d:%d: bad work type %d\n", 4477 proc->pid, thread->pid, w->type); 4478 break; 4479 } 4480 4481 if (!t) 4482 continue; 4483 4484 BUG_ON(t->buffer == NULL); 4485 if (t->buffer->target_node) { 4486 struct binder_node *target_node = t->buffer->target_node; 4487 4488 trd->target.ptr = target_node->ptr; 4489 trd->cookie = target_node->cookie; 4490 t->saved_priority = task_nice(current); 4491 if (t->priority < target_node->min_priority && 4492 !(t->flags & TF_ONE_WAY)) 4493 binder_set_nice(t->priority); 4494 else if (!(t->flags & TF_ONE_WAY) || 4495 t->saved_priority > target_node->min_priority) 4496 binder_set_nice(target_node->min_priority); 4497 cmd = BR_TRANSACTION; 4498 } else { 4499 trd->target.ptr = 0; 4500 trd->cookie = 0; 4501 cmd = BR_REPLY; 4502 } 4503 trd->code = t->code; 4504 trd->flags = t->flags; 4505 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid); 4506 4507 t_from = binder_get_txn_from(t); 4508 if (t_from) { 4509 struct task_struct *sender = t_from->proc->tsk; 4510 4511 trd->sender_pid = 4512 task_tgid_nr_ns(sender, 4513 task_active_pid_ns(current)); 4514 } else { 4515 trd->sender_pid = 0; 4516 } 4517 4518 ret = binder_apply_fd_fixups(proc, t); 4519 if (ret) { 4520 struct binder_buffer *buffer = t->buffer; 4521 bool oneway = !!(t->flags & TF_ONE_WAY); 4522 int tid = t->debug_id; 4523 4524 if (t_from) 4525 binder_thread_dec_tmpref(t_from); 4526 buffer->transaction = NULL; 4527 binder_cleanup_transaction(t, "fd fixups failed", 4528 BR_FAILED_REPLY); 4529 binder_free_buf(proc, thread, buffer, true); 4530 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 4531 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n", 4532 proc->pid, thread->pid, 4533 oneway ? "async " : 4534 (cmd == BR_REPLY ? "reply " : ""), 4535 tid, BR_FAILED_REPLY, ret, __LINE__); 4536 if (cmd == BR_REPLY) { 4537 cmd = BR_FAILED_REPLY; 4538 if (put_user(cmd, (uint32_t __user *)ptr)) 4539 return -EFAULT; 4540 ptr += sizeof(uint32_t); 4541 binder_stat_br(proc, thread, cmd); 4542 break; 4543 } 4544 continue; 4545 } 4546 trd->data_size = t->buffer->data_size; 4547 trd->offsets_size = t->buffer->offsets_size; 4548 trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data; 4549 trd->data.ptr.offsets = trd->data.ptr.buffer + 4550 ALIGN(t->buffer->data_size, 4551 sizeof(void *)); 4552 4553 tr.secctx = t->security_ctx; 4554 if (t->security_ctx) { 4555 cmd = BR_TRANSACTION_SEC_CTX; 4556 trsize = sizeof(tr); 4557 } 4558 if (put_user(cmd, (uint32_t __user *)ptr)) { 4559 if (t_from) 4560 binder_thread_dec_tmpref(t_from); 4561 4562 binder_cleanup_transaction(t, "put_user failed", 4563 BR_FAILED_REPLY); 4564 4565 return -EFAULT; 4566 } 4567 ptr += sizeof(uint32_t); 4568 if (copy_to_user(ptr, &tr, trsize)) { 4569 if (t_from) 4570 binder_thread_dec_tmpref(t_from); 4571 4572 binder_cleanup_transaction(t, "copy_to_user failed", 4573 BR_FAILED_REPLY); 4574 4575 return -EFAULT; 4576 } 4577 ptr += trsize; 4578 4579 trace_binder_transaction_received(t); 4580 binder_stat_br(proc, thread, cmd); 4581 binder_debug(BINDER_DEBUG_TRANSACTION, 4582 "%d:%d %s %d %d:%d, cmd %u size %zd-%zd ptr %016llx-%016llx\n", 4583 proc->pid, thread->pid, 4584 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : 4585 (cmd == BR_TRANSACTION_SEC_CTX) ? 4586 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY", 4587 t->debug_id, t_from ? t_from->proc->pid : 0, 4588 t_from ? t_from->pid : 0, cmd, 4589 t->buffer->data_size, t->buffer->offsets_size, 4590 (u64)trd->data.ptr.buffer, 4591 (u64)trd->data.ptr.offsets); 4592 4593 if (t_from) 4594 binder_thread_dec_tmpref(t_from); 4595 t->buffer->allow_user_free = 1; 4596 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) { 4597 binder_inner_proc_lock(thread->proc); 4598 t->to_parent = thread->transaction_stack; 4599 t->to_thread = thread; 4600 thread->transaction_stack = t; 4601 binder_inner_proc_unlock(thread->proc); 4602 } else { 4603 binder_free_transaction(t); 4604 } 4605 break; 4606 } 4607 4608 done: 4609 4610 *consumed = ptr - buffer; 4611 binder_inner_proc_lock(proc); 4612 if (proc->requested_threads == 0 && 4613 list_empty(&thread->proc->waiting_threads) && 4614 proc->requested_threads_started < proc->max_threads && 4615 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 4616 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ 4617 /*spawn a new thread if we leave this out */) { 4618 proc->requested_threads++; 4619 binder_inner_proc_unlock(proc); 4620 binder_debug(BINDER_DEBUG_THREADS, 4621 "%d:%d BR_SPAWN_LOOPER\n", 4622 proc->pid, thread->pid); 4623 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) 4624 return -EFAULT; 4625 binder_stat_br(proc, thread, BR_SPAWN_LOOPER); 4626 } else 4627 binder_inner_proc_unlock(proc); 4628 return 0; 4629 } 4630 4631 static void binder_release_work(struct binder_proc *proc, 4632 struct list_head *list) 4633 { 4634 struct binder_work *w; 4635 enum binder_work_type wtype; 4636 4637 while (1) { 4638 binder_inner_proc_lock(proc); 4639 w = binder_dequeue_work_head_ilocked(list); 4640 wtype = w ? w->type : 0; 4641 binder_inner_proc_unlock(proc); 4642 if (!w) 4643 return; 4644 4645 switch (wtype) { 4646 case BINDER_WORK_TRANSACTION: { 4647 struct binder_transaction *t; 4648 4649 t = container_of(w, struct binder_transaction, work); 4650 4651 binder_cleanup_transaction(t, "process died.", 4652 BR_DEAD_REPLY); 4653 } break; 4654 case BINDER_WORK_RETURN_ERROR: { 4655 struct binder_error *e = container_of( 4656 w, struct binder_error, work); 4657 4658 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4659 "undelivered TRANSACTION_ERROR: %u\n", 4660 e->cmd); 4661 } break; 4662 case BINDER_WORK_TRANSACTION_COMPLETE: { 4663 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4664 "undelivered TRANSACTION_COMPLETE\n"); 4665 kfree(w); 4666 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 4667 } break; 4668 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 4669 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 4670 struct binder_ref_death *death; 4671 4672 death = container_of(w, struct binder_ref_death, work); 4673 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4674 "undelivered death notification, %016llx\n", 4675 (u64)death->cookie); 4676 kfree(death); 4677 binder_stats_deleted(BINDER_STAT_DEATH); 4678 } break; 4679 case BINDER_WORK_NODE: 4680 break; 4681 default: 4682 pr_err("unexpected work type, %d, not freed\n", 4683 wtype); 4684 break; 4685 } 4686 } 4687 4688 } 4689 4690 static struct binder_thread *binder_get_thread_ilocked( 4691 struct binder_proc *proc, struct binder_thread *new_thread) 4692 { 4693 struct binder_thread *thread = NULL; 4694 struct rb_node *parent = NULL; 4695 struct rb_node **p = &proc->threads.rb_node; 4696 4697 while (*p) { 4698 parent = *p; 4699 thread = rb_entry(parent, struct binder_thread, rb_node); 4700 4701 if (current->pid < thread->pid) 4702 p = &(*p)->rb_left; 4703 else if (current->pid > thread->pid) 4704 p = &(*p)->rb_right; 4705 else 4706 return thread; 4707 } 4708 if (!new_thread) 4709 return NULL; 4710 thread = new_thread; 4711 binder_stats_created(BINDER_STAT_THREAD); 4712 thread->proc = proc; 4713 thread->pid = current->pid; 4714 atomic_set(&thread->tmp_ref, 0); 4715 init_waitqueue_head(&thread->wait); 4716 INIT_LIST_HEAD(&thread->todo); 4717 rb_link_node(&thread->rb_node, parent, p); 4718 rb_insert_color(&thread->rb_node, &proc->threads); 4719 thread->looper_need_return = true; 4720 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR; 4721 thread->return_error.cmd = BR_OK; 4722 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR; 4723 thread->reply_error.cmd = BR_OK; 4724 thread->ee.command = BR_OK; 4725 INIT_LIST_HEAD(&new_thread->waiting_thread_node); 4726 return thread; 4727 } 4728 4729 static struct binder_thread *binder_get_thread(struct binder_proc *proc) 4730 { 4731 struct binder_thread *thread; 4732 struct binder_thread *new_thread; 4733 4734 binder_inner_proc_lock(proc); 4735 thread = binder_get_thread_ilocked(proc, NULL); 4736 binder_inner_proc_unlock(proc); 4737 if (!thread) { 4738 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL); 4739 if (new_thread == NULL) 4740 return NULL; 4741 binder_inner_proc_lock(proc); 4742 thread = binder_get_thread_ilocked(proc, new_thread); 4743 binder_inner_proc_unlock(proc); 4744 if (thread != new_thread) 4745 kfree(new_thread); 4746 } 4747 return thread; 4748 } 4749 4750 static void binder_free_proc(struct binder_proc *proc) 4751 { 4752 struct binder_device *device; 4753 4754 BUG_ON(!list_empty(&proc->todo)); 4755 BUG_ON(!list_empty(&proc->delivered_death)); 4756 if (proc->outstanding_txns) 4757 pr_warn("%s: Unexpected outstanding_txns %d\n", 4758 __func__, proc->outstanding_txns); 4759 device = container_of(proc->context, struct binder_device, context); 4760 if (refcount_dec_and_test(&device->ref)) { 4761 kfree(proc->context->name); 4762 kfree(device); 4763 } 4764 binder_alloc_deferred_release(&proc->alloc); 4765 put_task_struct(proc->tsk); 4766 put_cred(proc->cred); 4767 binder_stats_deleted(BINDER_STAT_PROC); 4768 kfree(proc); 4769 } 4770 4771 static void binder_free_thread(struct binder_thread *thread) 4772 { 4773 BUG_ON(!list_empty(&thread->todo)); 4774 binder_stats_deleted(BINDER_STAT_THREAD); 4775 binder_proc_dec_tmpref(thread->proc); 4776 kfree(thread); 4777 } 4778 4779 static int binder_thread_release(struct binder_proc *proc, 4780 struct binder_thread *thread) 4781 { 4782 struct binder_transaction *t; 4783 struct binder_transaction *send_reply = NULL; 4784 int active_transactions = 0; 4785 struct binder_transaction *last_t = NULL; 4786 4787 binder_inner_proc_lock(thread->proc); 4788 /* 4789 * take a ref on the proc so it survives 4790 * after we remove this thread from proc->threads. 4791 * The corresponding dec is when we actually 4792 * free the thread in binder_free_thread() 4793 */ 4794 proc->tmp_ref++; 4795 /* 4796 * take a ref on this thread to ensure it 4797 * survives while we are releasing it 4798 */ 4799 atomic_inc(&thread->tmp_ref); 4800 rb_erase(&thread->rb_node, &proc->threads); 4801 t = thread->transaction_stack; 4802 if (t) { 4803 spin_lock(&t->lock); 4804 if (t->to_thread == thread) 4805 send_reply = t; 4806 } else { 4807 __acquire(&t->lock); 4808 } 4809 thread->is_dead = true; 4810 4811 while (t) { 4812 last_t = t; 4813 active_transactions++; 4814 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4815 "release %d:%d transaction %d %s, still active\n", 4816 proc->pid, thread->pid, 4817 t->debug_id, 4818 (t->to_thread == thread) ? "in" : "out"); 4819 4820 if (t->to_thread == thread) { 4821 thread->proc->outstanding_txns--; 4822 t->to_proc = NULL; 4823 t->to_thread = NULL; 4824 if (t->buffer) { 4825 t->buffer->transaction = NULL; 4826 t->buffer = NULL; 4827 } 4828 t = t->to_parent; 4829 } else if (t->from == thread) { 4830 t->from = NULL; 4831 t = t->from_parent; 4832 } else 4833 BUG(); 4834 spin_unlock(&last_t->lock); 4835 if (t) 4836 spin_lock(&t->lock); 4837 else 4838 __acquire(&t->lock); 4839 } 4840 /* annotation for sparse, lock not acquired in last iteration above */ 4841 __release(&t->lock); 4842 4843 /* 4844 * If this thread used poll, make sure we remove the waitqueue from any 4845 * poll data structures holding it. 4846 */ 4847 if (thread->looper & BINDER_LOOPER_STATE_POLL) 4848 wake_up_pollfree(&thread->wait); 4849 4850 binder_inner_proc_unlock(thread->proc); 4851 4852 /* 4853 * This is needed to avoid races between wake_up_pollfree() above and 4854 * someone else removing the last entry from the queue for other reasons 4855 * (e.g. ep_remove_wait_queue() being called due to an epoll file 4856 * descriptor being closed). Such other users hold an RCU read lock, so 4857 * we can be sure they're done after we call synchronize_rcu(). 4858 */ 4859 if (thread->looper & BINDER_LOOPER_STATE_POLL) 4860 synchronize_rcu(); 4861 4862 if (send_reply) 4863 binder_send_failed_reply(send_reply, BR_DEAD_REPLY); 4864 binder_release_work(proc, &thread->todo); 4865 binder_thread_dec_tmpref(thread); 4866 return active_transactions; 4867 } 4868 4869 static __poll_t binder_poll(struct file *filp, 4870 struct poll_table_struct *wait) 4871 { 4872 struct binder_proc *proc = filp->private_data; 4873 struct binder_thread *thread = NULL; 4874 bool wait_for_proc_work; 4875 4876 thread = binder_get_thread(proc); 4877 if (!thread) 4878 return POLLERR; 4879 4880 binder_inner_proc_lock(thread->proc); 4881 thread->looper |= BINDER_LOOPER_STATE_POLL; 4882 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread); 4883 4884 binder_inner_proc_unlock(thread->proc); 4885 4886 poll_wait(filp, &thread->wait, wait); 4887 4888 if (binder_has_work(thread, wait_for_proc_work)) 4889 return EPOLLIN; 4890 4891 return 0; 4892 } 4893 4894 static int binder_ioctl_write_read(struct file *filp, 4895 unsigned int cmd, unsigned long arg, 4896 struct binder_thread *thread) 4897 { 4898 int ret = 0; 4899 struct binder_proc *proc = filp->private_data; 4900 unsigned int size = _IOC_SIZE(cmd); 4901 void __user *ubuf = (void __user *)arg; 4902 struct binder_write_read bwr; 4903 4904 if (size != sizeof(struct binder_write_read)) { 4905 ret = -EINVAL; 4906 goto out; 4907 } 4908 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { 4909 ret = -EFAULT; 4910 goto out; 4911 } 4912 binder_debug(BINDER_DEBUG_READ_WRITE, 4913 "%d:%d write %lld at %016llx, read %lld at %016llx\n", 4914 proc->pid, thread->pid, 4915 (u64)bwr.write_size, (u64)bwr.write_buffer, 4916 (u64)bwr.read_size, (u64)bwr.read_buffer); 4917 4918 if (bwr.write_size > 0) { 4919 ret = binder_thread_write(proc, thread, 4920 bwr.write_buffer, 4921 bwr.write_size, 4922 &bwr.write_consumed); 4923 trace_binder_write_done(ret); 4924 if (ret < 0) { 4925 bwr.read_consumed = 0; 4926 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 4927 ret = -EFAULT; 4928 goto out; 4929 } 4930 } 4931 if (bwr.read_size > 0) { 4932 ret = binder_thread_read(proc, thread, bwr.read_buffer, 4933 bwr.read_size, 4934 &bwr.read_consumed, 4935 filp->f_flags & O_NONBLOCK); 4936 trace_binder_read_done(ret); 4937 binder_inner_proc_lock(proc); 4938 if (!binder_worklist_empty_ilocked(&proc->todo)) 4939 binder_wakeup_proc_ilocked(proc); 4940 binder_inner_proc_unlock(proc); 4941 if (ret < 0) { 4942 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 4943 ret = -EFAULT; 4944 goto out; 4945 } 4946 } 4947 binder_debug(BINDER_DEBUG_READ_WRITE, 4948 "%d:%d wrote %lld of %lld, read return %lld of %lld\n", 4949 proc->pid, thread->pid, 4950 (u64)bwr.write_consumed, (u64)bwr.write_size, 4951 (u64)bwr.read_consumed, (u64)bwr.read_size); 4952 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { 4953 ret = -EFAULT; 4954 goto out; 4955 } 4956 out: 4957 return ret; 4958 } 4959 4960 static int binder_ioctl_set_ctx_mgr(struct file *filp, 4961 struct flat_binder_object *fbo) 4962 { 4963 int ret = 0; 4964 struct binder_proc *proc = filp->private_data; 4965 struct binder_context *context = proc->context; 4966 struct binder_node *new_node; 4967 kuid_t curr_euid = current_euid(); 4968 4969 mutex_lock(&context->context_mgr_node_lock); 4970 if (context->binder_context_mgr_node) { 4971 pr_err("BINDER_SET_CONTEXT_MGR already set\n"); 4972 ret = -EBUSY; 4973 goto out; 4974 } 4975 ret = security_binder_set_context_mgr(proc->cred); 4976 if (ret < 0) 4977 goto out; 4978 if (uid_valid(context->binder_context_mgr_uid)) { 4979 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) { 4980 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n", 4981 from_kuid(&init_user_ns, curr_euid), 4982 from_kuid(&init_user_ns, 4983 context->binder_context_mgr_uid)); 4984 ret = -EPERM; 4985 goto out; 4986 } 4987 } else { 4988 context->binder_context_mgr_uid = curr_euid; 4989 } 4990 new_node = binder_new_node(proc, fbo); 4991 if (!new_node) { 4992 ret = -ENOMEM; 4993 goto out; 4994 } 4995 binder_node_lock(new_node); 4996 new_node->local_weak_refs++; 4997 new_node->local_strong_refs++; 4998 new_node->has_strong_ref = 1; 4999 new_node->has_weak_ref = 1; 5000 context->binder_context_mgr_node = new_node; 5001 binder_node_unlock(new_node); 5002 binder_put_node(new_node); 5003 out: 5004 mutex_unlock(&context->context_mgr_node_lock); 5005 return ret; 5006 } 5007 5008 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc, 5009 struct binder_node_info_for_ref *info) 5010 { 5011 struct binder_node *node; 5012 struct binder_context *context = proc->context; 5013 __u32 handle = info->handle; 5014 5015 if (info->strong_count || info->weak_count || info->reserved1 || 5016 info->reserved2 || info->reserved3) { 5017 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.", 5018 proc->pid); 5019 return -EINVAL; 5020 } 5021 5022 /* This ioctl may only be used by the context manager */ 5023 mutex_lock(&context->context_mgr_node_lock); 5024 if (!context->binder_context_mgr_node || 5025 context->binder_context_mgr_node->proc != proc) { 5026 mutex_unlock(&context->context_mgr_node_lock); 5027 return -EPERM; 5028 } 5029 mutex_unlock(&context->context_mgr_node_lock); 5030 5031 node = binder_get_node_from_ref(proc, handle, true, NULL); 5032 if (!node) 5033 return -EINVAL; 5034 5035 info->strong_count = node->local_strong_refs + 5036 node->internal_strong_refs; 5037 info->weak_count = node->local_weak_refs; 5038 5039 binder_put_node(node); 5040 5041 return 0; 5042 } 5043 5044 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc, 5045 struct binder_node_debug_info *info) 5046 { 5047 struct rb_node *n; 5048 binder_uintptr_t ptr = info->ptr; 5049 5050 memset(info, 0, sizeof(*info)); 5051 5052 binder_inner_proc_lock(proc); 5053 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { 5054 struct binder_node *node = rb_entry(n, struct binder_node, 5055 rb_node); 5056 if (node->ptr > ptr) { 5057 info->ptr = node->ptr; 5058 info->cookie = node->cookie; 5059 info->has_strong_ref = node->has_strong_ref; 5060 info->has_weak_ref = node->has_weak_ref; 5061 break; 5062 } 5063 } 5064 binder_inner_proc_unlock(proc); 5065 5066 return 0; 5067 } 5068 5069 static bool binder_txns_pending_ilocked(struct binder_proc *proc) 5070 { 5071 struct rb_node *n; 5072 struct binder_thread *thread; 5073 5074 if (proc->outstanding_txns > 0) 5075 return true; 5076 5077 for (n = rb_first(&proc->threads); n; n = rb_next(n)) { 5078 thread = rb_entry(n, struct binder_thread, rb_node); 5079 if (thread->transaction_stack) 5080 return true; 5081 } 5082 return false; 5083 } 5084 5085 static int binder_ioctl_freeze(struct binder_freeze_info *info, 5086 struct binder_proc *target_proc) 5087 { 5088 int ret = 0; 5089 5090 if (!info->enable) { 5091 binder_inner_proc_lock(target_proc); 5092 target_proc->sync_recv = false; 5093 target_proc->async_recv = false; 5094 target_proc->is_frozen = false; 5095 binder_inner_proc_unlock(target_proc); 5096 return 0; 5097 } 5098 5099 /* 5100 * Freezing the target. Prevent new transactions by 5101 * setting frozen state. If timeout specified, wait 5102 * for transactions to drain. 5103 */ 5104 binder_inner_proc_lock(target_proc); 5105 target_proc->sync_recv = false; 5106 target_proc->async_recv = false; 5107 target_proc->is_frozen = true; 5108 binder_inner_proc_unlock(target_proc); 5109 5110 if (info->timeout_ms > 0) 5111 ret = wait_event_interruptible_timeout( 5112 target_proc->freeze_wait, 5113 (!target_proc->outstanding_txns), 5114 msecs_to_jiffies(info->timeout_ms)); 5115 5116 /* Check pending transactions that wait for reply */ 5117 if (ret >= 0) { 5118 binder_inner_proc_lock(target_proc); 5119 if (binder_txns_pending_ilocked(target_proc)) 5120 ret = -EAGAIN; 5121 binder_inner_proc_unlock(target_proc); 5122 } 5123 5124 if (ret < 0) { 5125 binder_inner_proc_lock(target_proc); 5126 target_proc->is_frozen = false; 5127 binder_inner_proc_unlock(target_proc); 5128 } 5129 5130 return ret; 5131 } 5132 5133 static int binder_ioctl_get_freezer_info( 5134 struct binder_frozen_status_info *info) 5135 { 5136 struct binder_proc *target_proc; 5137 bool found = false; 5138 __u32 txns_pending; 5139 5140 info->sync_recv = 0; 5141 info->async_recv = 0; 5142 5143 mutex_lock(&binder_procs_lock); 5144 hlist_for_each_entry(target_proc, &binder_procs, proc_node) { 5145 if (target_proc->pid == info->pid) { 5146 found = true; 5147 binder_inner_proc_lock(target_proc); 5148 txns_pending = binder_txns_pending_ilocked(target_proc); 5149 info->sync_recv |= target_proc->sync_recv | 5150 (txns_pending << 1); 5151 info->async_recv |= target_proc->async_recv; 5152 binder_inner_proc_unlock(target_proc); 5153 } 5154 } 5155 mutex_unlock(&binder_procs_lock); 5156 5157 if (!found) 5158 return -EINVAL; 5159 5160 return 0; 5161 } 5162 5163 static int binder_ioctl_get_extended_error(struct binder_thread *thread, 5164 void __user *ubuf) 5165 { 5166 struct binder_extended_error ee; 5167 5168 binder_inner_proc_lock(thread->proc); 5169 ee = thread->ee; 5170 binder_set_extended_error(&thread->ee, 0, BR_OK, 0); 5171 binder_inner_proc_unlock(thread->proc); 5172 5173 if (copy_to_user(ubuf, &ee, sizeof(ee))) 5174 return -EFAULT; 5175 5176 return 0; 5177 } 5178 5179 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 5180 { 5181 int ret; 5182 struct binder_proc *proc = filp->private_data; 5183 struct binder_thread *thread; 5184 unsigned int size = _IOC_SIZE(cmd); 5185 void __user *ubuf = (void __user *)arg; 5186 5187 /*pr_info("binder_ioctl: %d:%d %x %lx\n", 5188 proc->pid, current->pid, cmd, arg);*/ 5189 5190 binder_selftest_alloc(&proc->alloc); 5191 5192 trace_binder_ioctl(cmd, arg); 5193 5194 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 5195 if (ret) 5196 goto err_unlocked; 5197 5198 thread = binder_get_thread(proc); 5199 if (thread == NULL) { 5200 ret = -ENOMEM; 5201 goto err; 5202 } 5203 5204 switch (cmd) { 5205 case BINDER_WRITE_READ: 5206 ret = binder_ioctl_write_read(filp, cmd, arg, thread); 5207 if (ret) 5208 goto err; 5209 break; 5210 case BINDER_SET_MAX_THREADS: { 5211 int max_threads; 5212 5213 if (copy_from_user(&max_threads, ubuf, 5214 sizeof(max_threads))) { 5215 ret = -EINVAL; 5216 goto err; 5217 } 5218 binder_inner_proc_lock(proc); 5219 proc->max_threads = max_threads; 5220 binder_inner_proc_unlock(proc); 5221 break; 5222 } 5223 case BINDER_SET_CONTEXT_MGR_EXT: { 5224 struct flat_binder_object fbo; 5225 5226 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) { 5227 ret = -EINVAL; 5228 goto err; 5229 } 5230 ret = binder_ioctl_set_ctx_mgr(filp, &fbo); 5231 if (ret) 5232 goto err; 5233 break; 5234 } 5235 case BINDER_SET_CONTEXT_MGR: 5236 ret = binder_ioctl_set_ctx_mgr(filp, NULL); 5237 if (ret) 5238 goto err; 5239 break; 5240 case BINDER_THREAD_EXIT: 5241 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n", 5242 proc->pid, thread->pid); 5243 binder_thread_release(proc, thread); 5244 thread = NULL; 5245 break; 5246 case BINDER_VERSION: { 5247 struct binder_version __user *ver = ubuf; 5248 5249 if (size != sizeof(struct binder_version)) { 5250 ret = -EINVAL; 5251 goto err; 5252 } 5253 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, 5254 &ver->protocol_version)) { 5255 ret = -EINVAL; 5256 goto err; 5257 } 5258 break; 5259 } 5260 case BINDER_GET_NODE_INFO_FOR_REF: { 5261 struct binder_node_info_for_ref info; 5262 5263 if (copy_from_user(&info, ubuf, sizeof(info))) { 5264 ret = -EFAULT; 5265 goto err; 5266 } 5267 5268 ret = binder_ioctl_get_node_info_for_ref(proc, &info); 5269 if (ret < 0) 5270 goto err; 5271 5272 if (copy_to_user(ubuf, &info, sizeof(info))) { 5273 ret = -EFAULT; 5274 goto err; 5275 } 5276 5277 break; 5278 } 5279 case BINDER_GET_NODE_DEBUG_INFO: { 5280 struct binder_node_debug_info info; 5281 5282 if (copy_from_user(&info, ubuf, sizeof(info))) { 5283 ret = -EFAULT; 5284 goto err; 5285 } 5286 5287 ret = binder_ioctl_get_node_debug_info(proc, &info); 5288 if (ret < 0) 5289 goto err; 5290 5291 if (copy_to_user(ubuf, &info, sizeof(info))) { 5292 ret = -EFAULT; 5293 goto err; 5294 } 5295 break; 5296 } 5297 case BINDER_FREEZE: { 5298 struct binder_freeze_info info; 5299 struct binder_proc **target_procs = NULL, *target_proc; 5300 int target_procs_count = 0, i = 0; 5301 5302 ret = 0; 5303 5304 if (copy_from_user(&info, ubuf, sizeof(info))) { 5305 ret = -EFAULT; 5306 goto err; 5307 } 5308 5309 mutex_lock(&binder_procs_lock); 5310 hlist_for_each_entry(target_proc, &binder_procs, proc_node) { 5311 if (target_proc->pid == info.pid) 5312 target_procs_count++; 5313 } 5314 5315 if (target_procs_count == 0) { 5316 mutex_unlock(&binder_procs_lock); 5317 ret = -EINVAL; 5318 goto err; 5319 } 5320 5321 target_procs = kcalloc(target_procs_count, 5322 sizeof(struct binder_proc *), 5323 GFP_KERNEL); 5324 5325 if (!target_procs) { 5326 mutex_unlock(&binder_procs_lock); 5327 ret = -ENOMEM; 5328 goto err; 5329 } 5330 5331 hlist_for_each_entry(target_proc, &binder_procs, proc_node) { 5332 if (target_proc->pid != info.pid) 5333 continue; 5334 5335 binder_inner_proc_lock(target_proc); 5336 target_proc->tmp_ref++; 5337 binder_inner_proc_unlock(target_proc); 5338 5339 target_procs[i++] = target_proc; 5340 } 5341 mutex_unlock(&binder_procs_lock); 5342 5343 for (i = 0; i < target_procs_count; i++) { 5344 if (ret >= 0) 5345 ret = binder_ioctl_freeze(&info, 5346 target_procs[i]); 5347 5348 binder_proc_dec_tmpref(target_procs[i]); 5349 } 5350 5351 kfree(target_procs); 5352 5353 if (ret < 0) 5354 goto err; 5355 break; 5356 } 5357 case BINDER_GET_FROZEN_INFO: { 5358 struct binder_frozen_status_info info; 5359 5360 if (copy_from_user(&info, ubuf, sizeof(info))) { 5361 ret = -EFAULT; 5362 goto err; 5363 } 5364 5365 ret = binder_ioctl_get_freezer_info(&info); 5366 if (ret < 0) 5367 goto err; 5368 5369 if (copy_to_user(ubuf, &info, sizeof(info))) { 5370 ret = -EFAULT; 5371 goto err; 5372 } 5373 break; 5374 } 5375 case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: { 5376 uint32_t enable; 5377 5378 if (copy_from_user(&enable, ubuf, sizeof(enable))) { 5379 ret = -EFAULT; 5380 goto err; 5381 } 5382 binder_inner_proc_lock(proc); 5383 proc->oneway_spam_detection_enabled = (bool)enable; 5384 binder_inner_proc_unlock(proc); 5385 break; 5386 } 5387 case BINDER_GET_EXTENDED_ERROR: 5388 ret = binder_ioctl_get_extended_error(thread, ubuf); 5389 if (ret < 0) 5390 goto err; 5391 break; 5392 default: 5393 ret = -EINVAL; 5394 goto err; 5395 } 5396 ret = 0; 5397 err: 5398 if (thread) 5399 thread->looper_need_return = false; 5400 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 5401 if (ret && ret != -EINTR) 5402 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); 5403 err_unlocked: 5404 trace_binder_ioctl_done(ret); 5405 return ret; 5406 } 5407 5408 static void binder_vma_open(struct vm_area_struct *vma) 5409 { 5410 struct binder_proc *proc = vma->vm_private_data; 5411 5412 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 5413 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 5414 proc->pid, vma->vm_start, vma->vm_end, 5415 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 5416 (unsigned long)pgprot_val(vma->vm_page_prot)); 5417 } 5418 5419 static void binder_vma_close(struct vm_area_struct *vma) 5420 { 5421 struct binder_proc *proc = vma->vm_private_data; 5422 5423 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 5424 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 5425 proc->pid, vma->vm_start, vma->vm_end, 5426 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 5427 (unsigned long)pgprot_val(vma->vm_page_prot)); 5428 binder_alloc_vma_close(&proc->alloc); 5429 } 5430 5431 static vm_fault_t binder_vm_fault(struct vm_fault *vmf) 5432 { 5433 return VM_FAULT_SIGBUS; 5434 } 5435 5436 static const struct vm_operations_struct binder_vm_ops = { 5437 .open = binder_vma_open, 5438 .close = binder_vma_close, 5439 .fault = binder_vm_fault, 5440 }; 5441 5442 static int binder_mmap(struct file *filp, struct vm_area_struct *vma) 5443 { 5444 struct binder_proc *proc = filp->private_data; 5445 5446 if (proc->tsk != current->group_leader) 5447 return -EINVAL; 5448 5449 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 5450 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", 5451 __func__, proc->pid, vma->vm_start, vma->vm_end, 5452 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 5453 (unsigned long)pgprot_val(vma->vm_page_prot)); 5454 5455 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { 5456 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__, 5457 proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM); 5458 return -EPERM; 5459 } 5460 vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP; 5461 vma->vm_flags &= ~VM_MAYWRITE; 5462 5463 vma->vm_ops = &binder_vm_ops; 5464 vma->vm_private_data = proc; 5465 5466 return binder_alloc_mmap_handler(&proc->alloc, vma); 5467 } 5468 5469 static int binder_open(struct inode *nodp, struct file *filp) 5470 { 5471 struct binder_proc *proc, *itr; 5472 struct binder_device *binder_dev; 5473 struct binderfs_info *info; 5474 struct dentry *binder_binderfs_dir_entry_proc = NULL; 5475 bool existing_pid = false; 5476 5477 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__, 5478 current->group_leader->pid, current->pid); 5479 5480 proc = kzalloc(sizeof(*proc), GFP_KERNEL); 5481 if (proc == NULL) 5482 return -ENOMEM; 5483 spin_lock_init(&proc->inner_lock); 5484 spin_lock_init(&proc->outer_lock); 5485 get_task_struct(current->group_leader); 5486 proc->tsk = current->group_leader; 5487 proc->cred = get_cred(filp->f_cred); 5488 INIT_LIST_HEAD(&proc->todo); 5489 init_waitqueue_head(&proc->freeze_wait); 5490 proc->default_priority = task_nice(current); 5491 /* binderfs stashes devices in i_private */ 5492 if (is_binderfs_device(nodp)) { 5493 binder_dev = nodp->i_private; 5494 info = nodp->i_sb->s_fs_info; 5495 binder_binderfs_dir_entry_proc = info->proc_log_dir; 5496 } else { 5497 binder_dev = container_of(filp->private_data, 5498 struct binder_device, miscdev); 5499 } 5500 refcount_inc(&binder_dev->ref); 5501 proc->context = &binder_dev->context; 5502 binder_alloc_init(&proc->alloc); 5503 5504 binder_stats_created(BINDER_STAT_PROC); 5505 proc->pid = current->group_leader->pid; 5506 INIT_LIST_HEAD(&proc->delivered_death); 5507 INIT_LIST_HEAD(&proc->waiting_threads); 5508 filp->private_data = proc; 5509 5510 mutex_lock(&binder_procs_lock); 5511 hlist_for_each_entry(itr, &binder_procs, proc_node) { 5512 if (itr->pid == proc->pid) { 5513 existing_pid = true; 5514 break; 5515 } 5516 } 5517 hlist_add_head(&proc->proc_node, &binder_procs); 5518 mutex_unlock(&binder_procs_lock); 5519 5520 if (binder_debugfs_dir_entry_proc && !existing_pid) { 5521 char strbuf[11]; 5522 5523 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); 5524 /* 5525 * proc debug entries are shared between contexts. 5526 * Only create for the first PID to avoid debugfs log spamming 5527 * The printing code will anyway print all contexts for a given 5528 * PID so this is not a problem. 5529 */ 5530 proc->debugfs_entry = debugfs_create_file(strbuf, 0444, 5531 binder_debugfs_dir_entry_proc, 5532 (void *)(unsigned long)proc->pid, 5533 &proc_fops); 5534 } 5535 5536 if (binder_binderfs_dir_entry_proc && !existing_pid) { 5537 char strbuf[11]; 5538 struct dentry *binderfs_entry; 5539 5540 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); 5541 /* 5542 * Similar to debugfs, the process specific log file is shared 5543 * between contexts. Only create for the first PID. 5544 * This is ok since same as debugfs, the log file will contain 5545 * information on all contexts of a given PID. 5546 */ 5547 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc, 5548 strbuf, &proc_fops, (void *)(unsigned long)proc->pid); 5549 if (!IS_ERR(binderfs_entry)) { 5550 proc->binderfs_entry = binderfs_entry; 5551 } else { 5552 int error; 5553 5554 error = PTR_ERR(binderfs_entry); 5555 pr_warn("Unable to create file %s in binderfs (error %d)\n", 5556 strbuf, error); 5557 } 5558 } 5559 5560 return 0; 5561 } 5562 5563 static int binder_flush(struct file *filp, fl_owner_t id) 5564 { 5565 struct binder_proc *proc = filp->private_data; 5566 5567 binder_defer_work(proc, BINDER_DEFERRED_FLUSH); 5568 5569 return 0; 5570 } 5571 5572 static void binder_deferred_flush(struct binder_proc *proc) 5573 { 5574 struct rb_node *n; 5575 int wake_count = 0; 5576 5577 binder_inner_proc_lock(proc); 5578 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { 5579 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); 5580 5581 thread->looper_need_return = true; 5582 if (thread->looper & BINDER_LOOPER_STATE_WAITING) { 5583 wake_up_interruptible(&thread->wait); 5584 wake_count++; 5585 } 5586 } 5587 binder_inner_proc_unlock(proc); 5588 5589 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 5590 "binder_flush: %d woke %d threads\n", proc->pid, 5591 wake_count); 5592 } 5593 5594 static int binder_release(struct inode *nodp, struct file *filp) 5595 { 5596 struct binder_proc *proc = filp->private_data; 5597 5598 debugfs_remove(proc->debugfs_entry); 5599 5600 if (proc->binderfs_entry) { 5601 binderfs_remove_file(proc->binderfs_entry); 5602 proc->binderfs_entry = NULL; 5603 } 5604 5605 binder_defer_work(proc, BINDER_DEFERRED_RELEASE); 5606 5607 return 0; 5608 } 5609 5610 static int binder_node_release(struct binder_node *node, int refs) 5611 { 5612 struct binder_ref *ref; 5613 int death = 0; 5614 struct binder_proc *proc = node->proc; 5615 5616 binder_release_work(proc, &node->async_todo); 5617 5618 binder_node_lock(node); 5619 binder_inner_proc_lock(proc); 5620 binder_dequeue_work_ilocked(&node->work); 5621 /* 5622 * The caller must have taken a temporary ref on the node, 5623 */ 5624 BUG_ON(!node->tmp_refs); 5625 if (hlist_empty(&node->refs) && node->tmp_refs == 1) { 5626 binder_inner_proc_unlock(proc); 5627 binder_node_unlock(node); 5628 binder_free_node(node); 5629 5630 return refs; 5631 } 5632 5633 node->proc = NULL; 5634 node->local_strong_refs = 0; 5635 node->local_weak_refs = 0; 5636 binder_inner_proc_unlock(proc); 5637 5638 spin_lock(&binder_dead_nodes_lock); 5639 hlist_add_head(&node->dead_node, &binder_dead_nodes); 5640 spin_unlock(&binder_dead_nodes_lock); 5641 5642 hlist_for_each_entry(ref, &node->refs, node_entry) { 5643 refs++; 5644 /* 5645 * Need the node lock to synchronize 5646 * with new notification requests and the 5647 * inner lock to synchronize with queued 5648 * death notifications. 5649 */ 5650 binder_inner_proc_lock(ref->proc); 5651 if (!ref->death) { 5652 binder_inner_proc_unlock(ref->proc); 5653 continue; 5654 } 5655 5656 death++; 5657 5658 BUG_ON(!list_empty(&ref->death->work.entry)); 5659 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 5660 binder_enqueue_work_ilocked(&ref->death->work, 5661 &ref->proc->todo); 5662 binder_wakeup_proc_ilocked(ref->proc); 5663 binder_inner_proc_unlock(ref->proc); 5664 } 5665 5666 binder_debug(BINDER_DEBUG_DEAD_BINDER, 5667 "node %d now dead, refs %d, death %d\n", 5668 node->debug_id, refs, death); 5669 binder_node_unlock(node); 5670 binder_put_node(node); 5671 5672 return refs; 5673 } 5674 5675 static void binder_deferred_release(struct binder_proc *proc) 5676 { 5677 struct binder_context *context = proc->context; 5678 struct rb_node *n; 5679 int threads, nodes, incoming_refs, outgoing_refs, active_transactions; 5680 5681 mutex_lock(&binder_procs_lock); 5682 hlist_del(&proc->proc_node); 5683 mutex_unlock(&binder_procs_lock); 5684 5685 mutex_lock(&context->context_mgr_node_lock); 5686 if (context->binder_context_mgr_node && 5687 context->binder_context_mgr_node->proc == proc) { 5688 binder_debug(BINDER_DEBUG_DEAD_BINDER, 5689 "%s: %d context_mgr_node gone\n", 5690 __func__, proc->pid); 5691 context->binder_context_mgr_node = NULL; 5692 } 5693 mutex_unlock(&context->context_mgr_node_lock); 5694 binder_inner_proc_lock(proc); 5695 /* 5696 * Make sure proc stays alive after we 5697 * remove all the threads 5698 */ 5699 proc->tmp_ref++; 5700 5701 proc->is_dead = true; 5702 proc->is_frozen = false; 5703 proc->sync_recv = false; 5704 proc->async_recv = false; 5705 threads = 0; 5706 active_transactions = 0; 5707 while ((n = rb_first(&proc->threads))) { 5708 struct binder_thread *thread; 5709 5710 thread = rb_entry(n, struct binder_thread, rb_node); 5711 binder_inner_proc_unlock(proc); 5712 threads++; 5713 active_transactions += binder_thread_release(proc, thread); 5714 binder_inner_proc_lock(proc); 5715 } 5716 5717 nodes = 0; 5718 incoming_refs = 0; 5719 while ((n = rb_first(&proc->nodes))) { 5720 struct binder_node *node; 5721 5722 node = rb_entry(n, struct binder_node, rb_node); 5723 nodes++; 5724 /* 5725 * take a temporary ref on the node before 5726 * calling binder_node_release() which will either 5727 * kfree() the node or call binder_put_node() 5728 */ 5729 binder_inc_node_tmpref_ilocked(node); 5730 rb_erase(&node->rb_node, &proc->nodes); 5731 binder_inner_proc_unlock(proc); 5732 incoming_refs = binder_node_release(node, incoming_refs); 5733 binder_inner_proc_lock(proc); 5734 } 5735 binder_inner_proc_unlock(proc); 5736 5737 outgoing_refs = 0; 5738 binder_proc_lock(proc); 5739 while ((n = rb_first(&proc->refs_by_desc))) { 5740 struct binder_ref *ref; 5741 5742 ref = rb_entry(n, struct binder_ref, rb_node_desc); 5743 outgoing_refs++; 5744 binder_cleanup_ref_olocked(ref); 5745 binder_proc_unlock(proc); 5746 binder_free_ref(ref); 5747 binder_proc_lock(proc); 5748 } 5749 binder_proc_unlock(proc); 5750 5751 binder_release_work(proc, &proc->todo); 5752 binder_release_work(proc, &proc->delivered_death); 5753 5754 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 5755 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n", 5756 __func__, proc->pid, threads, nodes, incoming_refs, 5757 outgoing_refs, active_transactions); 5758 5759 binder_proc_dec_tmpref(proc); 5760 } 5761 5762 static void binder_deferred_func(struct work_struct *work) 5763 { 5764 struct binder_proc *proc; 5765 5766 int defer; 5767 5768 do { 5769 mutex_lock(&binder_deferred_lock); 5770 if (!hlist_empty(&binder_deferred_list)) { 5771 proc = hlist_entry(binder_deferred_list.first, 5772 struct binder_proc, deferred_work_node); 5773 hlist_del_init(&proc->deferred_work_node); 5774 defer = proc->deferred_work; 5775 proc->deferred_work = 0; 5776 } else { 5777 proc = NULL; 5778 defer = 0; 5779 } 5780 mutex_unlock(&binder_deferred_lock); 5781 5782 if (defer & BINDER_DEFERRED_FLUSH) 5783 binder_deferred_flush(proc); 5784 5785 if (defer & BINDER_DEFERRED_RELEASE) 5786 binder_deferred_release(proc); /* frees proc */ 5787 } while (proc); 5788 } 5789 static DECLARE_WORK(binder_deferred_work, binder_deferred_func); 5790 5791 static void 5792 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) 5793 { 5794 mutex_lock(&binder_deferred_lock); 5795 proc->deferred_work |= defer; 5796 if (hlist_unhashed(&proc->deferred_work_node)) { 5797 hlist_add_head(&proc->deferred_work_node, 5798 &binder_deferred_list); 5799 schedule_work(&binder_deferred_work); 5800 } 5801 mutex_unlock(&binder_deferred_lock); 5802 } 5803 5804 static void print_binder_transaction_ilocked(struct seq_file *m, 5805 struct binder_proc *proc, 5806 const char *prefix, 5807 struct binder_transaction *t) 5808 { 5809 struct binder_proc *to_proc; 5810 struct binder_buffer *buffer = t->buffer; 5811 5812 spin_lock(&t->lock); 5813 to_proc = t->to_proc; 5814 seq_printf(m, 5815 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d", 5816 prefix, t->debug_id, t, 5817 t->from ? t->from->proc->pid : 0, 5818 t->from ? t->from->pid : 0, 5819 to_proc ? to_proc->pid : 0, 5820 t->to_thread ? t->to_thread->pid : 0, 5821 t->code, t->flags, t->priority, t->need_reply); 5822 spin_unlock(&t->lock); 5823 5824 if (proc != to_proc) { 5825 /* 5826 * Can only safely deref buffer if we are holding the 5827 * correct proc inner lock for this node 5828 */ 5829 seq_puts(m, "\n"); 5830 return; 5831 } 5832 5833 if (buffer == NULL) { 5834 seq_puts(m, " buffer free\n"); 5835 return; 5836 } 5837 if (buffer->target_node) 5838 seq_printf(m, " node %d", buffer->target_node->debug_id); 5839 seq_printf(m, " size %zd:%zd data %pK\n", 5840 buffer->data_size, buffer->offsets_size, 5841 buffer->user_data); 5842 } 5843 5844 static void print_binder_work_ilocked(struct seq_file *m, 5845 struct binder_proc *proc, 5846 const char *prefix, 5847 const char *transaction_prefix, 5848 struct binder_work *w) 5849 { 5850 struct binder_node *node; 5851 struct binder_transaction *t; 5852 5853 switch (w->type) { 5854 case BINDER_WORK_TRANSACTION: 5855 t = container_of(w, struct binder_transaction, work); 5856 print_binder_transaction_ilocked( 5857 m, proc, transaction_prefix, t); 5858 break; 5859 case BINDER_WORK_RETURN_ERROR: { 5860 struct binder_error *e = container_of( 5861 w, struct binder_error, work); 5862 5863 seq_printf(m, "%stransaction error: %u\n", 5864 prefix, e->cmd); 5865 } break; 5866 case BINDER_WORK_TRANSACTION_COMPLETE: 5867 seq_printf(m, "%stransaction complete\n", prefix); 5868 break; 5869 case BINDER_WORK_NODE: 5870 node = container_of(w, struct binder_node, work); 5871 seq_printf(m, "%snode work %d: u%016llx c%016llx\n", 5872 prefix, node->debug_id, 5873 (u64)node->ptr, (u64)node->cookie); 5874 break; 5875 case BINDER_WORK_DEAD_BINDER: 5876 seq_printf(m, "%shas dead binder\n", prefix); 5877 break; 5878 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 5879 seq_printf(m, "%shas cleared dead binder\n", prefix); 5880 break; 5881 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: 5882 seq_printf(m, "%shas cleared death notification\n", prefix); 5883 break; 5884 default: 5885 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type); 5886 break; 5887 } 5888 } 5889 5890 static void print_binder_thread_ilocked(struct seq_file *m, 5891 struct binder_thread *thread, 5892 int print_always) 5893 { 5894 struct binder_transaction *t; 5895 struct binder_work *w; 5896 size_t start_pos = m->count; 5897 size_t header_pos; 5898 5899 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n", 5900 thread->pid, thread->looper, 5901 thread->looper_need_return, 5902 atomic_read(&thread->tmp_ref)); 5903 header_pos = m->count; 5904 t = thread->transaction_stack; 5905 while (t) { 5906 if (t->from == thread) { 5907 print_binder_transaction_ilocked(m, thread->proc, 5908 " outgoing transaction", t); 5909 t = t->from_parent; 5910 } else if (t->to_thread == thread) { 5911 print_binder_transaction_ilocked(m, thread->proc, 5912 " incoming transaction", t); 5913 t = t->to_parent; 5914 } else { 5915 print_binder_transaction_ilocked(m, thread->proc, 5916 " bad transaction", t); 5917 t = NULL; 5918 } 5919 } 5920 list_for_each_entry(w, &thread->todo, entry) { 5921 print_binder_work_ilocked(m, thread->proc, " ", 5922 " pending transaction", w); 5923 } 5924 if (!print_always && m->count == header_pos) 5925 m->count = start_pos; 5926 } 5927 5928 static void print_binder_node_nilocked(struct seq_file *m, 5929 struct binder_node *node) 5930 { 5931 struct binder_ref *ref; 5932 struct binder_work *w; 5933 int count; 5934 5935 count = 0; 5936 hlist_for_each_entry(ref, &node->refs, node_entry) 5937 count++; 5938 5939 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d", 5940 node->debug_id, (u64)node->ptr, (u64)node->cookie, 5941 node->has_strong_ref, node->has_weak_ref, 5942 node->local_strong_refs, node->local_weak_refs, 5943 node->internal_strong_refs, count, node->tmp_refs); 5944 if (count) { 5945 seq_puts(m, " proc"); 5946 hlist_for_each_entry(ref, &node->refs, node_entry) 5947 seq_printf(m, " %d", ref->proc->pid); 5948 } 5949 seq_puts(m, "\n"); 5950 if (node->proc) { 5951 list_for_each_entry(w, &node->async_todo, entry) 5952 print_binder_work_ilocked(m, node->proc, " ", 5953 " pending async transaction", w); 5954 } 5955 } 5956 5957 static void print_binder_ref_olocked(struct seq_file *m, 5958 struct binder_ref *ref) 5959 { 5960 binder_node_lock(ref->node); 5961 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n", 5962 ref->data.debug_id, ref->data.desc, 5963 ref->node->proc ? "" : "dead ", 5964 ref->node->debug_id, ref->data.strong, 5965 ref->data.weak, ref->death); 5966 binder_node_unlock(ref->node); 5967 } 5968 5969 static void print_binder_proc(struct seq_file *m, 5970 struct binder_proc *proc, int print_all) 5971 { 5972 struct binder_work *w; 5973 struct rb_node *n; 5974 size_t start_pos = m->count; 5975 size_t header_pos; 5976 struct binder_node *last_node = NULL; 5977 5978 seq_printf(m, "proc %d\n", proc->pid); 5979 seq_printf(m, "context %s\n", proc->context->name); 5980 header_pos = m->count; 5981 5982 binder_inner_proc_lock(proc); 5983 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 5984 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread, 5985 rb_node), print_all); 5986 5987 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { 5988 struct binder_node *node = rb_entry(n, struct binder_node, 5989 rb_node); 5990 if (!print_all && !node->has_async_transaction) 5991 continue; 5992 5993 /* 5994 * take a temporary reference on the node so it 5995 * survives and isn't removed from the tree 5996 * while we print it. 5997 */ 5998 binder_inc_node_tmpref_ilocked(node); 5999 /* Need to drop inner lock to take node lock */ 6000 binder_inner_proc_unlock(proc); 6001 if (last_node) 6002 binder_put_node(last_node); 6003 binder_node_inner_lock(node); 6004 print_binder_node_nilocked(m, node); 6005 binder_node_inner_unlock(node); 6006 last_node = node; 6007 binder_inner_proc_lock(proc); 6008 } 6009 binder_inner_proc_unlock(proc); 6010 if (last_node) 6011 binder_put_node(last_node); 6012 6013 if (print_all) { 6014 binder_proc_lock(proc); 6015 for (n = rb_first(&proc->refs_by_desc); 6016 n != NULL; 6017 n = rb_next(n)) 6018 print_binder_ref_olocked(m, rb_entry(n, 6019 struct binder_ref, 6020 rb_node_desc)); 6021 binder_proc_unlock(proc); 6022 } 6023 binder_alloc_print_allocated(m, &proc->alloc); 6024 binder_inner_proc_lock(proc); 6025 list_for_each_entry(w, &proc->todo, entry) 6026 print_binder_work_ilocked(m, proc, " ", 6027 " pending transaction", w); 6028 list_for_each_entry(w, &proc->delivered_death, entry) { 6029 seq_puts(m, " has delivered dead binder\n"); 6030 break; 6031 } 6032 binder_inner_proc_unlock(proc); 6033 if (!print_all && m->count == header_pos) 6034 m->count = start_pos; 6035 } 6036 6037 static const char * const binder_return_strings[] = { 6038 "BR_ERROR", 6039 "BR_OK", 6040 "BR_TRANSACTION", 6041 "BR_REPLY", 6042 "BR_ACQUIRE_RESULT", 6043 "BR_DEAD_REPLY", 6044 "BR_TRANSACTION_COMPLETE", 6045 "BR_INCREFS", 6046 "BR_ACQUIRE", 6047 "BR_RELEASE", 6048 "BR_DECREFS", 6049 "BR_ATTEMPT_ACQUIRE", 6050 "BR_NOOP", 6051 "BR_SPAWN_LOOPER", 6052 "BR_FINISHED", 6053 "BR_DEAD_BINDER", 6054 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 6055 "BR_FAILED_REPLY", 6056 "BR_FROZEN_REPLY", 6057 "BR_ONEWAY_SPAM_SUSPECT", 6058 }; 6059 6060 static const char * const binder_command_strings[] = { 6061 "BC_TRANSACTION", 6062 "BC_REPLY", 6063 "BC_ACQUIRE_RESULT", 6064 "BC_FREE_BUFFER", 6065 "BC_INCREFS", 6066 "BC_ACQUIRE", 6067 "BC_RELEASE", 6068 "BC_DECREFS", 6069 "BC_INCREFS_DONE", 6070 "BC_ACQUIRE_DONE", 6071 "BC_ATTEMPT_ACQUIRE", 6072 "BC_REGISTER_LOOPER", 6073 "BC_ENTER_LOOPER", 6074 "BC_EXIT_LOOPER", 6075 "BC_REQUEST_DEATH_NOTIFICATION", 6076 "BC_CLEAR_DEATH_NOTIFICATION", 6077 "BC_DEAD_BINDER_DONE", 6078 "BC_TRANSACTION_SG", 6079 "BC_REPLY_SG", 6080 }; 6081 6082 static const char * const binder_objstat_strings[] = { 6083 "proc", 6084 "thread", 6085 "node", 6086 "ref", 6087 "death", 6088 "transaction", 6089 "transaction_complete" 6090 }; 6091 6092 static void print_binder_stats(struct seq_file *m, const char *prefix, 6093 struct binder_stats *stats) 6094 { 6095 int i; 6096 6097 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != 6098 ARRAY_SIZE(binder_command_strings)); 6099 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) { 6100 int temp = atomic_read(&stats->bc[i]); 6101 6102 if (temp) 6103 seq_printf(m, "%s%s: %d\n", prefix, 6104 binder_command_strings[i], temp); 6105 } 6106 6107 BUILD_BUG_ON(ARRAY_SIZE(stats->br) != 6108 ARRAY_SIZE(binder_return_strings)); 6109 for (i = 0; i < ARRAY_SIZE(stats->br); i++) { 6110 int temp = atomic_read(&stats->br[i]); 6111 6112 if (temp) 6113 seq_printf(m, "%s%s: %d\n", prefix, 6114 binder_return_strings[i], temp); 6115 } 6116 6117 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 6118 ARRAY_SIZE(binder_objstat_strings)); 6119 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 6120 ARRAY_SIZE(stats->obj_deleted)); 6121 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { 6122 int created = atomic_read(&stats->obj_created[i]); 6123 int deleted = atomic_read(&stats->obj_deleted[i]); 6124 6125 if (created || deleted) 6126 seq_printf(m, "%s%s: active %d total %d\n", 6127 prefix, 6128 binder_objstat_strings[i], 6129 created - deleted, 6130 created); 6131 } 6132 } 6133 6134 static void print_binder_proc_stats(struct seq_file *m, 6135 struct binder_proc *proc) 6136 { 6137 struct binder_work *w; 6138 struct binder_thread *thread; 6139 struct rb_node *n; 6140 int count, strong, weak, ready_threads; 6141 size_t free_async_space = 6142 binder_alloc_get_free_async_space(&proc->alloc); 6143 6144 seq_printf(m, "proc %d\n", proc->pid); 6145 seq_printf(m, "context %s\n", proc->context->name); 6146 count = 0; 6147 ready_threads = 0; 6148 binder_inner_proc_lock(proc); 6149 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 6150 count++; 6151 6152 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node) 6153 ready_threads++; 6154 6155 seq_printf(m, " threads: %d\n", count); 6156 seq_printf(m, " requested threads: %d+%d/%d\n" 6157 " ready threads %d\n" 6158 " free async space %zd\n", proc->requested_threads, 6159 proc->requested_threads_started, proc->max_threads, 6160 ready_threads, 6161 free_async_space); 6162 count = 0; 6163 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) 6164 count++; 6165 binder_inner_proc_unlock(proc); 6166 seq_printf(m, " nodes: %d\n", count); 6167 count = 0; 6168 strong = 0; 6169 weak = 0; 6170 binder_proc_lock(proc); 6171 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 6172 struct binder_ref *ref = rb_entry(n, struct binder_ref, 6173 rb_node_desc); 6174 count++; 6175 strong += ref->data.strong; 6176 weak += ref->data.weak; 6177 } 6178 binder_proc_unlock(proc); 6179 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak); 6180 6181 count = binder_alloc_get_allocated_count(&proc->alloc); 6182 seq_printf(m, " buffers: %d\n", count); 6183 6184 binder_alloc_print_pages(m, &proc->alloc); 6185 6186 count = 0; 6187 binder_inner_proc_lock(proc); 6188 list_for_each_entry(w, &proc->todo, entry) { 6189 if (w->type == BINDER_WORK_TRANSACTION) 6190 count++; 6191 } 6192 binder_inner_proc_unlock(proc); 6193 seq_printf(m, " pending transactions: %d\n", count); 6194 6195 print_binder_stats(m, " ", &proc->stats); 6196 } 6197 6198 6199 int binder_state_show(struct seq_file *m, void *unused) 6200 { 6201 struct binder_proc *proc; 6202 struct binder_node *node; 6203 struct binder_node *last_node = NULL; 6204 6205 seq_puts(m, "binder state:\n"); 6206 6207 spin_lock(&binder_dead_nodes_lock); 6208 if (!hlist_empty(&binder_dead_nodes)) 6209 seq_puts(m, "dead nodes:\n"); 6210 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) { 6211 /* 6212 * take a temporary reference on the node so it 6213 * survives and isn't removed from the list 6214 * while we print it. 6215 */ 6216 node->tmp_refs++; 6217 spin_unlock(&binder_dead_nodes_lock); 6218 if (last_node) 6219 binder_put_node(last_node); 6220 binder_node_lock(node); 6221 print_binder_node_nilocked(m, node); 6222 binder_node_unlock(node); 6223 last_node = node; 6224 spin_lock(&binder_dead_nodes_lock); 6225 } 6226 spin_unlock(&binder_dead_nodes_lock); 6227 if (last_node) 6228 binder_put_node(last_node); 6229 6230 mutex_lock(&binder_procs_lock); 6231 hlist_for_each_entry(proc, &binder_procs, proc_node) 6232 print_binder_proc(m, proc, 1); 6233 mutex_unlock(&binder_procs_lock); 6234 6235 return 0; 6236 } 6237 6238 int binder_stats_show(struct seq_file *m, void *unused) 6239 { 6240 struct binder_proc *proc; 6241 6242 seq_puts(m, "binder stats:\n"); 6243 6244 print_binder_stats(m, "", &binder_stats); 6245 6246 mutex_lock(&binder_procs_lock); 6247 hlist_for_each_entry(proc, &binder_procs, proc_node) 6248 print_binder_proc_stats(m, proc); 6249 mutex_unlock(&binder_procs_lock); 6250 6251 return 0; 6252 } 6253 6254 int binder_transactions_show(struct seq_file *m, void *unused) 6255 { 6256 struct binder_proc *proc; 6257 6258 seq_puts(m, "binder transactions:\n"); 6259 mutex_lock(&binder_procs_lock); 6260 hlist_for_each_entry(proc, &binder_procs, proc_node) 6261 print_binder_proc(m, proc, 0); 6262 mutex_unlock(&binder_procs_lock); 6263 6264 return 0; 6265 } 6266 6267 static int proc_show(struct seq_file *m, void *unused) 6268 { 6269 struct binder_proc *itr; 6270 int pid = (unsigned long)m->private; 6271 6272 mutex_lock(&binder_procs_lock); 6273 hlist_for_each_entry(itr, &binder_procs, proc_node) { 6274 if (itr->pid == pid) { 6275 seq_puts(m, "binder proc state:\n"); 6276 print_binder_proc(m, itr, 1); 6277 } 6278 } 6279 mutex_unlock(&binder_procs_lock); 6280 6281 return 0; 6282 } 6283 6284 static void print_binder_transaction_log_entry(struct seq_file *m, 6285 struct binder_transaction_log_entry *e) 6286 { 6287 int debug_id = READ_ONCE(e->debug_id_done); 6288 /* 6289 * read barrier to guarantee debug_id_done read before 6290 * we print the log values 6291 */ 6292 smp_rmb(); 6293 seq_printf(m, 6294 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d", 6295 e->debug_id, (e->call_type == 2) ? "reply" : 6296 ((e->call_type == 1) ? "async" : "call "), e->from_proc, 6297 e->from_thread, e->to_proc, e->to_thread, e->context_name, 6298 e->to_node, e->target_handle, e->data_size, e->offsets_size, 6299 e->return_error, e->return_error_param, 6300 e->return_error_line); 6301 /* 6302 * read-barrier to guarantee read of debug_id_done after 6303 * done printing the fields of the entry 6304 */ 6305 smp_rmb(); 6306 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ? 6307 "\n" : " (incomplete)\n"); 6308 } 6309 6310 int binder_transaction_log_show(struct seq_file *m, void *unused) 6311 { 6312 struct binder_transaction_log *log = m->private; 6313 unsigned int log_cur = atomic_read(&log->cur); 6314 unsigned int count; 6315 unsigned int cur; 6316 int i; 6317 6318 count = log_cur + 1; 6319 cur = count < ARRAY_SIZE(log->entry) && !log->full ? 6320 0 : count % ARRAY_SIZE(log->entry); 6321 if (count > ARRAY_SIZE(log->entry) || log->full) 6322 count = ARRAY_SIZE(log->entry); 6323 for (i = 0; i < count; i++) { 6324 unsigned int index = cur++ % ARRAY_SIZE(log->entry); 6325 6326 print_binder_transaction_log_entry(m, &log->entry[index]); 6327 } 6328 return 0; 6329 } 6330 6331 const struct file_operations binder_fops = { 6332 .owner = THIS_MODULE, 6333 .poll = binder_poll, 6334 .unlocked_ioctl = binder_ioctl, 6335 .compat_ioctl = compat_ptr_ioctl, 6336 .mmap = binder_mmap, 6337 .open = binder_open, 6338 .flush = binder_flush, 6339 .release = binder_release, 6340 }; 6341 6342 static int __init init_binder_device(const char *name) 6343 { 6344 int ret; 6345 struct binder_device *binder_device; 6346 6347 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL); 6348 if (!binder_device) 6349 return -ENOMEM; 6350 6351 binder_device->miscdev.fops = &binder_fops; 6352 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR; 6353 binder_device->miscdev.name = name; 6354 6355 refcount_set(&binder_device->ref, 1); 6356 binder_device->context.binder_context_mgr_uid = INVALID_UID; 6357 binder_device->context.name = name; 6358 mutex_init(&binder_device->context.context_mgr_node_lock); 6359 6360 ret = misc_register(&binder_device->miscdev); 6361 if (ret < 0) { 6362 kfree(binder_device); 6363 return ret; 6364 } 6365 6366 hlist_add_head(&binder_device->hlist, &binder_devices); 6367 6368 return ret; 6369 } 6370 6371 static int __init binder_init(void) 6372 { 6373 int ret; 6374 char *device_name, *device_tmp; 6375 struct binder_device *device; 6376 struct hlist_node *tmp; 6377 char *device_names = NULL; 6378 6379 ret = binder_alloc_shrinker_init(); 6380 if (ret) 6381 return ret; 6382 6383 atomic_set(&binder_transaction_log.cur, ~0U); 6384 atomic_set(&binder_transaction_log_failed.cur, ~0U); 6385 6386 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); 6387 if (binder_debugfs_dir_entry_root) 6388 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", 6389 binder_debugfs_dir_entry_root); 6390 6391 if (binder_debugfs_dir_entry_root) { 6392 debugfs_create_file("state", 6393 0444, 6394 binder_debugfs_dir_entry_root, 6395 NULL, 6396 &binder_state_fops); 6397 debugfs_create_file("stats", 6398 0444, 6399 binder_debugfs_dir_entry_root, 6400 NULL, 6401 &binder_stats_fops); 6402 debugfs_create_file("transactions", 6403 0444, 6404 binder_debugfs_dir_entry_root, 6405 NULL, 6406 &binder_transactions_fops); 6407 debugfs_create_file("transaction_log", 6408 0444, 6409 binder_debugfs_dir_entry_root, 6410 &binder_transaction_log, 6411 &binder_transaction_log_fops); 6412 debugfs_create_file("failed_transaction_log", 6413 0444, 6414 binder_debugfs_dir_entry_root, 6415 &binder_transaction_log_failed, 6416 &binder_transaction_log_fops); 6417 } 6418 6419 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) && 6420 strcmp(binder_devices_param, "") != 0) { 6421 /* 6422 * Copy the module_parameter string, because we don't want to 6423 * tokenize it in-place. 6424 */ 6425 device_names = kstrdup(binder_devices_param, GFP_KERNEL); 6426 if (!device_names) { 6427 ret = -ENOMEM; 6428 goto err_alloc_device_names_failed; 6429 } 6430 6431 device_tmp = device_names; 6432 while ((device_name = strsep(&device_tmp, ","))) { 6433 ret = init_binder_device(device_name); 6434 if (ret) 6435 goto err_init_binder_device_failed; 6436 } 6437 } 6438 6439 ret = init_binderfs(); 6440 if (ret) 6441 goto err_init_binder_device_failed; 6442 6443 return ret; 6444 6445 err_init_binder_device_failed: 6446 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) { 6447 misc_deregister(&device->miscdev); 6448 hlist_del(&device->hlist); 6449 kfree(device); 6450 } 6451 6452 kfree(device_names); 6453 6454 err_alloc_device_names_failed: 6455 debugfs_remove_recursive(binder_debugfs_dir_entry_root); 6456 6457 return ret; 6458 } 6459 6460 device_initcall(binder_init); 6461 6462 #define CREATE_TRACE_POINTS 6463 #include "binder_trace.h" 6464 6465 MODULE_LICENSE("GPL v2"); 6466