1 // SPDX-License-Identifier: GPL-2.0-only 2 /* binder.c 3 * 4 * Android IPC Subsystem 5 * 6 * Copyright (C) 2007-2008 Google, Inc. 7 */ 8 9 /* 10 * Locking overview 11 * 12 * There are 3 main spinlocks which must be acquired in the 13 * order shown: 14 * 15 * 1) proc->outer_lock : protects binder_ref 16 * binder_proc_lock() and binder_proc_unlock() are 17 * used to acq/rel. 18 * 2) node->lock : protects most fields of binder_node. 19 * binder_node_lock() and binder_node_unlock() are 20 * used to acq/rel 21 * 3) proc->inner_lock : protects the thread and node lists 22 * (proc->threads, proc->waiting_threads, proc->nodes) 23 * and all todo lists associated with the binder_proc 24 * (proc->todo, thread->todo, proc->delivered_death and 25 * node->async_todo), as well as thread->transaction_stack 26 * binder_inner_proc_lock() and binder_inner_proc_unlock() 27 * are used to acq/rel 28 * 29 * Any lock under procA must never be nested under any lock at the same 30 * level or below on procB. 31 * 32 * Functions that require a lock held on entry indicate which lock 33 * in the suffix of the function name: 34 * 35 * foo_olocked() : requires node->outer_lock 36 * foo_nlocked() : requires node->lock 37 * foo_ilocked() : requires proc->inner_lock 38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock 39 * foo_nilocked(): requires node->lock and proc->inner_lock 40 * ... 41 */ 42 43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 44 45 #include <linux/fdtable.h> 46 #include <linux/file.h> 47 #include <linux/freezer.h> 48 #include <linux/fs.h> 49 #include <linux/list.h> 50 #include <linux/miscdevice.h> 51 #include <linux/module.h> 52 #include <linux/mutex.h> 53 #include <linux/nsproxy.h> 54 #include <linux/poll.h> 55 #include <linux/debugfs.h> 56 #include <linux/rbtree.h> 57 #include <linux/sched/signal.h> 58 #include <linux/sched/mm.h> 59 #include <linux/seq_file.h> 60 #include <linux/string.h> 61 #include <linux/uaccess.h> 62 #include <linux/pid_namespace.h> 63 #include <linux/security.h> 64 #include <linux/spinlock.h> 65 #include <linux/ratelimit.h> 66 #include <linux/syscalls.h> 67 #include <linux/task_work.h> 68 #include <linux/sizes.h> 69 #include <linux/ktime.h> 70 71 #include <uapi/linux/android/binder.h> 72 73 #include <linux/cacheflush.h> 74 75 #include "binder_internal.h" 76 #include "binder_trace.h" 77 78 static HLIST_HEAD(binder_deferred_list); 79 static DEFINE_MUTEX(binder_deferred_lock); 80 81 static HLIST_HEAD(binder_devices); 82 static HLIST_HEAD(binder_procs); 83 static DEFINE_MUTEX(binder_procs_lock); 84 85 static HLIST_HEAD(binder_dead_nodes); 86 static DEFINE_SPINLOCK(binder_dead_nodes_lock); 87 88 static struct dentry *binder_debugfs_dir_entry_root; 89 static struct dentry *binder_debugfs_dir_entry_proc; 90 static atomic_t binder_last_id; 91 92 static int proc_show(struct seq_file *m, void *unused); 93 DEFINE_SHOW_ATTRIBUTE(proc); 94 95 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE) 96 97 enum { 98 BINDER_DEBUG_USER_ERROR = 1U << 0, 99 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1, 100 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2, 101 BINDER_DEBUG_OPEN_CLOSE = 1U << 3, 102 BINDER_DEBUG_DEAD_BINDER = 1U << 4, 103 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5, 104 BINDER_DEBUG_READ_WRITE = 1U << 6, 105 BINDER_DEBUG_USER_REFS = 1U << 7, 106 BINDER_DEBUG_THREADS = 1U << 8, 107 BINDER_DEBUG_TRANSACTION = 1U << 9, 108 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10, 109 BINDER_DEBUG_FREE_BUFFER = 1U << 11, 110 BINDER_DEBUG_INTERNAL_REFS = 1U << 12, 111 BINDER_DEBUG_PRIORITY_CAP = 1U << 13, 112 BINDER_DEBUG_SPINLOCKS = 1U << 14, 113 }; 114 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | 115 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; 116 module_param_named(debug_mask, binder_debug_mask, uint, 0644); 117 118 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES; 119 module_param_named(devices, binder_devices_param, charp, 0444); 120 121 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); 122 static int binder_stop_on_user_error; 123 124 static int binder_set_stop_on_user_error(const char *val, 125 const struct kernel_param *kp) 126 { 127 int ret; 128 129 ret = param_set_int(val, kp); 130 if (binder_stop_on_user_error < 2) 131 wake_up(&binder_user_error_wait); 132 return ret; 133 } 134 module_param_call(stop_on_user_error, binder_set_stop_on_user_error, 135 param_get_int, &binder_stop_on_user_error, 0644); 136 137 static __printf(2, 3) void binder_debug(int mask, const char *format, ...) 138 { 139 struct va_format vaf; 140 va_list args; 141 142 if (binder_debug_mask & mask) { 143 va_start(args, format); 144 vaf.va = &args; 145 vaf.fmt = format; 146 pr_info_ratelimited("%pV", &vaf); 147 va_end(args); 148 } 149 } 150 151 #define binder_txn_error(x...) \ 152 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, x) 153 154 static __printf(1, 2) void binder_user_error(const char *format, ...) 155 { 156 struct va_format vaf; 157 va_list args; 158 159 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) { 160 va_start(args, format); 161 vaf.va = &args; 162 vaf.fmt = format; 163 pr_info_ratelimited("%pV", &vaf); 164 va_end(args); 165 } 166 167 if (binder_stop_on_user_error) 168 binder_stop_on_user_error = 2; 169 } 170 171 #define binder_set_extended_error(ee, _id, _command, _param) \ 172 do { \ 173 (ee)->id = _id; \ 174 (ee)->command = _command; \ 175 (ee)->param = _param; \ 176 } while (0) 177 178 #define to_flat_binder_object(hdr) \ 179 container_of(hdr, struct flat_binder_object, hdr) 180 181 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr) 182 183 #define to_binder_buffer_object(hdr) \ 184 container_of(hdr, struct binder_buffer_object, hdr) 185 186 #define to_binder_fd_array_object(hdr) \ 187 container_of(hdr, struct binder_fd_array_object, hdr) 188 189 static struct binder_stats binder_stats; 190 191 static inline void binder_stats_deleted(enum binder_stat_types type) 192 { 193 atomic_inc(&binder_stats.obj_deleted[type]); 194 } 195 196 static inline void binder_stats_created(enum binder_stat_types type) 197 { 198 atomic_inc(&binder_stats.obj_created[type]); 199 } 200 201 struct binder_transaction_log_entry { 202 int debug_id; 203 int debug_id_done; 204 int call_type; 205 int from_proc; 206 int from_thread; 207 int target_handle; 208 int to_proc; 209 int to_thread; 210 int to_node; 211 int data_size; 212 int offsets_size; 213 int return_error_line; 214 uint32_t return_error; 215 uint32_t return_error_param; 216 char context_name[BINDERFS_MAX_NAME + 1]; 217 }; 218 219 struct binder_transaction_log { 220 atomic_t cur; 221 bool full; 222 struct binder_transaction_log_entry entry[32]; 223 }; 224 225 static struct binder_transaction_log binder_transaction_log; 226 static struct binder_transaction_log binder_transaction_log_failed; 227 228 static struct binder_transaction_log_entry *binder_transaction_log_add( 229 struct binder_transaction_log *log) 230 { 231 struct binder_transaction_log_entry *e; 232 unsigned int cur = atomic_inc_return(&log->cur); 233 234 if (cur >= ARRAY_SIZE(log->entry)) 235 log->full = true; 236 e = &log->entry[cur % ARRAY_SIZE(log->entry)]; 237 WRITE_ONCE(e->debug_id_done, 0); 238 /* 239 * write-barrier to synchronize access to e->debug_id_done. 240 * We make sure the initialized 0 value is seen before 241 * memset() other fields are zeroed by memset. 242 */ 243 smp_wmb(); 244 memset(e, 0, sizeof(*e)); 245 return e; 246 } 247 248 enum binder_deferred_state { 249 BINDER_DEFERRED_FLUSH = 0x01, 250 BINDER_DEFERRED_RELEASE = 0x02, 251 }; 252 253 enum { 254 BINDER_LOOPER_STATE_REGISTERED = 0x01, 255 BINDER_LOOPER_STATE_ENTERED = 0x02, 256 BINDER_LOOPER_STATE_EXITED = 0x04, 257 BINDER_LOOPER_STATE_INVALID = 0x08, 258 BINDER_LOOPER_STATE_WAITING = 0x10, 259 BINDER_LOOPER_STATE_POLL = 0x20, 260 }; 261 262 /** 263 * binder_proc_lock() - Acquire outer lock for given binder_proc 264 * @proc: struct binder_proc to acquire 265 * 266 * Acquires proc->outer_lock. Used to protect binder_ref 267 * structures associated with the given proc. 268 */ 269 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__) 270 static void 271 _binder_proc_lock(struct binder_proc *proc, int line) 272 __acquires(&proc->outer_lock) 273 { 274 binder_debug(BINDER_DEBUG_SPINLOCKS, 275 "%s: line=%d\n", __func__, line); 276 spin_lock(&proc->outer_lock); 277 } 278 279 /** 280 * binder_proc_unlock() - Release spinlock for given binder_proc 281 * @proc: struct binder_proc to acquire 282 * 283 * Release lock acquired via binder_proc_lock() 284 */ 285 #define binder_proc_unlock(proc) _binder_proc_unlock(proc, __LINE__) 286 static void 287 _binder_proc_unlock(struct binder_proc *proc, int line) 288 __releases(&proc->outer_lock) 289 { 290 binder_debug(BINDER_DEBUG_SPINLOCKS, 291 "%s: line=%d\n", __func__, line); 292 spin_unlock(&proc->outer_lock); 293 } 294 295 /** 296 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc 297 * @proc: struct binder_proc to acquire 298 * 299 * Acquires proc->inner_lock. Used to protect todo lists 300 */ 301 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__) 302 static void 303 _binder_inner_proc_lock(struct binder_proc *proc, int line) 304 __acquires(&proc->inner_lock) 305 { 306 binder_debug(BINDER_DEBUG_SPINLOCKS, 307 "%s: line=%d\n", __func__, line); 308 spin_lock(&proc->inner_lock); 309 } 310 311 /** 312 * binder_inner_proc_unlock() - Release inner lock for given binder_proc 313 * @proc: struct binder_proc to acquire 314 * 315 * Release lock acquired via binder_inner_proc_lock() 316 */ 317 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__) 318 static void 319 _binder_inner_proc_unlock(struct binder_proc *proc, int line) 320 __releases(&proc->inner_lock) 321 { 322 binder_debug(BINDER_DEBUG_SPINLOCKS, 323 "%s: line=%d\n", __func__, line); 324 spin_unlock(&proc->inner_lock); 325 } 326 327 /** 328 * binder_node_lock() - Acquire spinlock for given binder_node 329 * @node: struct binder_node to acquire 330 * 331 * Acquires node->lock. Used to protect binder_node fields 332 */ 333 #define binder_node_lock(node) _binder_node_lock(node, __LINE__) 334 static void 335 _binder_node_lock(struct binder_node *node, int line) 336 __acquires(&node->lock) 337 { 338 binder_debug(BINDER_DEBUG_SPINLOCKS, 339 "%s: line=%d\n", __func__, line); 340 spin_lock(&node->lock); 341 } 342 343 /** 344 * binder_node_unlock() - Release spinlock for given binder_proc 345 * @node: struct binder_node to acquire 346 * 347 * Release lock acquired via binder_node_lock() 348 */ 349 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__) 350 static void 351 _binder_node_unlock(struct binder_node *node, int line) 352 __releases(&node->lock) 353 { 354 binder_debug(BINDER_DEBUG_SPINLOCKS, 355 "%s: line=%d\n", __func__, line); 356 spin_unlock(&node->lock); 357 } 358 359 /** 360 * binder_node_inner_lock() - Acquire node and inner locks 361 * @node: struct binder_node to acquire 362 * 363 * Acquires node->lock. If node->proc also acquires 364 * proc->inner_lock. Used to protect binder_node fields 365 */ 366 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__) 367 static void 368 _binder_node_inner_lock(struct binder_node *node, int line) 369 __acquires(&node->lock) __acquires(&node->proc->inner_lock) 370 { 371 binder_debug(BINDER_DEBUG_SPINLOCKS, 372 "%s: line=%d\n", __func__, line); 373 spin_lock(&node->lock); 374 if (node->proc) 375 binder_inner_proc_lock(node->proc); 376 else 377 /* annotation for sparse */ 378 __acquire(&node->proc->inner_lock); 379 } 380 381 /** 382 * binder_node_inner_unlock() - Release node and inner locks 383 * @node: struct binder_node to acquire 384 * 385 * Release lock acquired via binder_node_lock() 386 */ 387 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__) 388 static void 389 _binder_node_inner_unlock(struct binder_node *node, int line) 390 __releases(&node->lock) __releases(&node->proc->inner_lock) 391 { 392 struct binder_proc *proc = node->proc; 393 394 binder_debug(BINDER_DEBUG_SPINLOCKS, 395 "%s: line=%d\n", __func__, line); 396 if (proc) 397 binder_inner_proc_unlock(proc); 398 else 399 /* annotation for sparse */ 400 __release(&node->proc->inner_lock); 401 spin_unlock(&node->lock); 402 } 403 404 static bool binder_worklist_empty_ilocked(struct list_head *list) 405 { 406 return list_empty(list); 407 } 408 409 /** 410 * binder_worklist_empty() - Check if no items on the work list 411 * @proc: binder_proc associated with list 412 * @list: list to check 413 * 414 * Return: true if there are no items on list, else false 415 */ 416 static bool binder_worklist_empty(struct binder_proc *proc, 417 struct list_head *list) 418 { 419 bool ret; 420 421 binder_inner_proc_lock(proc); 422 ret = binder_worklist_empty_ilocked(list); 423 binder_inner_proc_unlock(proc); 424 return ret; 425 } 426 427 /** 428 * binder_enqueue_work_ilocked() - Add an item to the work list 429 * @work: struct binder_work to add to list 430 * @target_list: list to add work to 431 * 432 * Adds the work to the specified list. Asserts that work 433 * is not already on a list. 434 * 435 * Requires the proc->inner_lock to be held. 436 */ 437 static void 438 binder_enqueue_work_ilocked(struct binder_work *work, 439 struct list_head *target_list) 440 { 441 BUG_ON(target_list == NULL); 442 BUG_ON(work->entry.next && !list_empty(&work->entry)); 443 list_add_tail(&work->entry, target_list); 444 } 445 446 /** 447 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work 448 * @thread: thread to queue work to 449 * @work: struct binder_work to add to list 450 * 451 * Adds the work to the todo list of the thread. Doesn't set the process_todo 452 * flag, which means that (if it wasn't already set) the thread will go to 453 * sleep without handling this work when it calls read. 454 * 455 * Requires the proc->inner_lock to be held. 456 */ 457 static void 458 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread, 459 struct binder_work *work) 460 { 461 WARN_ON(!list_empty(&thread->waiting_thread_node)); 462 binder_enqueue_work_ilocked(work, &thread->todo); 463 } 464 465 /** 466 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list 467 * @thread: thread to queue work to 468 * @work: struct binder_work to add to list 469 * 470 * Adds the work to the todo list of the thread, and enables processing 471 * of the todo queue. 472 * 473 * Requires the proc->inner_lock to be held. 474 */ 475 static void 476 binder_enqueue_thread_work_ilocked(struct binder_thread *thread, 477 struct binder_work *work) 478 { 479 WARN_ON(!list_empty(&thread->waiting_thread_node)); 480 binder_enqueue_work_ilocked(work, &thread->todo); 481 482 /* (e)poll-based threads require an explicit wakeup signal when 483 * queuing their own work; they rely on these events to consume 484 * messages without I/O block. Without it, threads risk waiting 485 * indefinitely without handling the work. 486 */ 487 if (thread->looper & BINDER_LOOPER_STATE_POLL && 488 thread->pid == current->pid && !thread->process_todo) 489 wake_up_interruptible_sync(&thread->wait); 490 491 thread->process_todo = true; 492 } 493 494 /** 495 * binder_enqueue_thread_work() - Add an item to the thread work list 496 * @thread: thread to queue work to 497 * @work: struct binder_work to add to list 498 * 499 * Adds the work to the todo list of the thread, and enables processing 500 * of the todo queue. 501 */ 502 static void 503 binder_enqueue_thread_work(struct binder_thread *thread, 504 struct binder_work *work) 505 { 506 binder_inner_proc_lock(thread->proc); 507 binder_enqueue_thread_work_ilocked(thread, work); 508 binder_inner_proc_unlock(thread->proc); 509 } 510 511 static void 512 binder_dequeue_work_ilocked(struct binder_work *work) 513 { 514 list_del_init(&work->entry); 515 } 516 517 /** 518 * binder_dequeue_work() - Removes an item from the work list 519 * @proc: binder_proc associated with list 520 * @work: struct binder_work to remove from list 521 * 522 * Removes the specified work item from whatever list it is on. 523 * Can safely be called if work is not on any list. 524 */ 525 static void 526 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work) 527 { 528 binder_inner_proc_lock(proc); 529 binder_dequeue_work_ilocked(work); 530 binder_inner_proc_unlock(proc); 531 } 532 533 static struct binder_work *binder_dequeue_work_head_ilocked( 534 struct list_head *list) 535 { 536 struct binder_work *w; 537 538 w = list_first_entry_or_null(list, struct binder_work, entry); 539 if (w) 540 list_del_init(&w->entry); 541 return w; 542 } 543 544 static void 545 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); 546 static void binder_free_thread(struct binder_thread *thread); 547 static void binder_free_proc(struct binder_proc *proc); 548 static void binder_inc_node_tmpref_ilocked(struct binder_node *node); 549 550 static bool binder_has_work_ilocked(struct binder_thread *thread, 551 bool do_proc_work) 552 { 553 return thread->process_todo || 554 thread->looper_need_return || 555 (do_proc_work && 556 !binder_worklist_empty_ilocked(&thread->proc->todo)); 557 } 558 559 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work) 560 { 561 bool has_work; 562 563 binder_inner_proc_lock(thread->proc); 564 has_work = binder_has_work_ilocked(thread, do_proc_work); 565 binder_inner_proc_unlock(thread->proc); 566 567 return has_work; 568 } 569 570 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread) 571 { 572 return !thread->transaction_stack && 573 binder_worklist_empty_ilocked(&thread->todo) && 574 (thread->looper & (BINDER_LOOPER_STATE_ENTERED | 575 BINDER_LOOPER_STATE_REGISTERED)); 576 } 577 578 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc, 579 bool sync) 580 { 581 struct rb_node *n; 582 struct binder_thread *thread; 583 584 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { 585 thread = rb_entry(n, struct binder_thread, rb_node); 586 if (thread->looper & BINDER_LOOPER_STATE_POLL && 587 binder_available_for_proc_work_ilocked(thread)) { 588 if (sync) 589 wake_up_interruptible_sync(&thread->wait); 590 else 591 wake_up_interruptible(&thread->wait); 592 } 593 } 594 } 595 596 /** 597 * binder_select_thread_ilocked() - selects a thread for doing proc work. 598 * @proc: process to select a thread from 599 * 600 * Note that calling this function moves the thread off the waiting_threads 601 * list, so it can only be woken up by the caller of this function, or a 602 * signal. Therefore, callers *should* always wake up the thread this function 603 * returns. 604 * 605 * Return: If there's a thread currently waiting for process work, 606 * returns that thread. Otherwise returns NULL. 607 */ 608 static struct binder_thread * 609 binder_select_thread_ilocked(struct binder_proc *proc) 610 { 611 struct binder_thread *thread; 612 613 assert_spin_locked(&proc->inner_lock); 614 thread = list_first_entry_or_null(&proc->waiting_threads, 615 struct binder_thread, 616 waiting_thread_node); 617 618 if (thread) 619 list_del_init(&thread->waiting_thread_node); 620 621 return thread; 622 } 623 624 /** 625 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work. 626 * @proc: process to wake up a thread in 627 * @thread: specific thread to wake-up (may be NULL) 628 * @sync: whether to do a synchronous wake-up 629 * 630 * This function wakes up a thread in the @proc process. 631 * The caller may provide a specific thread to wake-up in 632 * the @thread parameter. If @thread is NULL, this function 633 * will wake up threads that have called poll(). 634 * 635 * Note that for this function to work as expected, callers 636 * should first call binder_select_thread() to find a thread 637 * to handle the work (if they don't have a thread already), 638 * and pass the result into the @thread parameter. 639 */ 640 static void binder_wakeup_thread_ilocked(struct binder_proc *proc, 641 struct binder_thread *thread, 642 bool sync) 643 { 644 assert_spin_locked(&proc->inner_lock); 645 646 if (thread) { 647 if (sync) 648 wake_up_interruptible_sync(&thread->wait); 649 else 650 wake_up_interruptible(&thread->wait); 651 return; 652 } 653 654 /* Didn't find a thread waiting for proc work; this can happen 655 * in two scenarios: 656 * 1. All threads are busy handling transactions 657 * In that case, one of those threads should call back into 658 * the kernel driver soon and pick up this work. 659 * 2. Threads are using the (e)poll interface, in which case 660 * they may be blocked on the waitqueue without having been 661 * added to waiting_threads. For this case, we just iterate 662 * over all threads not handling transaction work, and 663 * wake them all up. We wake all because we don't know whether 664 * a thread that called into (e)poll is handling non-binder 665 * work currently. 666 */ 667 binder_wakeup_poll_threads_ilocked(proc, sync); 668 } 669 670 static void binder_wakeup_proc_ilocked(struct binder_proc *proc) 671 { 672 struct binder_thread *thread = binder_select_thread_ilocked(proc); 673 674 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false); 675 } 676 677 static void binder_set_nice(long nice) 678 { 679 long min_nice; 680 681 if (can_nice(current, nice)) { 682 set_user_nice(current, nice); 683 return; 684 } 685 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE)); 686 binder_debug(BINDER_DEBUG_PRIORITY_CAP, 687 "%d: nice value %ld not allowed use %ld instead\n", 688 current->pid, nice, min_nice); 689 set_user_nice(current, min_nice); 690 if (min_nice <= MAX_NICE) 691 return; 692 binder_user_error("%d RLIMIT_NICE not set\n", current->pid); 693 } 694 695 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc, 696 binder_uintptr_t ptr) 697 { 698 struct rb_node *n = proc->nodes.rb_node; 699 struct binder_node *node; 700 701 assert_spin_locked(&proc->inner_lock); 702 703 while (n) { 704 node = rb_entry(n, struct binder_node, rb_node); 705 706 if (ptr < node->ptr) 707 n = n->rb_left; 708 else if (ptr > node->ptr) 709 n = n->rb_right; 710 else { 711 /* 712 * take an implicit weak reference 713 * to ensure node stays alive until 714 * call to binder_put_node() 715 */ 716 binder_inc_node_tmpref_ilocked(node); 717 return node; 718 } 719 } 720 return NULL; 721 } 722 723 static struct binder_node *binder_get_node(struct binder_proc *proc, 724 binder_uintptr_t ptr) 725 { 726 struct binder_node *node; 727 728 binder_inner_proc_lock(proc); 729 node = binder_get_node_ilocked(proc, ptr); 730 binder_inner_proc_unlock(proc); 731 return node; 732 } 733 734 static struct binder_node *binder_init_node_ilocked( 735 struct binder_proc *proc, 736 struct binder_node *new_node, 737 struct flat_binder_object *fp) 738 { 739 struct rb_node **p = &proc->nodes.rb_node; 740 struct rb_node *parent = NULL; 741 struct binder_node *node; 742 binder_uintptr_t ptr = fp ? fp->binder : 0; 743 binder_uintptr_t cookie = fp ? fp->cookie : 0; 744 __u32 flags = fp ? fp->flags : 0; 745 746 assert_spin_locked(&proc->inner_lock); 747 748 while (*p) { 749 750 parent = *p; 751 node = rb_entry(parent, struct binder_node, rb_node); 752 753 if (ptr < node->ptr) 754 p = &(*p)->rb_left; 755 else if (ptr > node->ptr) 756 p = &(*p)->rb_right; 757 else { 758 /* 759 * A matching node is already in 760 * the rb tree. Abandon the init 761 * and return it. 762 */ 763 binder_inc_node_tmpref_ilocked(node); 764 return node; 765 } 766 } 767 node = new_node; 768 binder_stats_created(BINDER_STAT_NODE); 769 node->tmp_refs++; 770 rb_link_node(&node->rb_node, parent, p); 771 rb_insert_color(&node->rb_node, &proc->nodes); 772 node->debug_id = atomic_inc_return(&binder_last_id); 773 node->proc = proc; 774 node->ptr = ptr; 775 node->cookie = cookie; 776 node->work.type = BINDER_WORK_NODE; 777 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK; 778 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); 779 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX); 780 spin_lock_init(&node->lock); 781 INIT_LIST_HEAD(&node->work.entry); 782 INIT_LIST_HEAD(&node->async_todo); 783 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 784 "%d:%d node %d u%016llx c%016llx created\n", 785 proc->pid, current->pid, node->debug_id, 786 (u64)node->ptr, (u64)node->cookie); 787 788 return node; 789 } 790 791 static struct binder_node *binder_new_node(struct binder_proc *proc, 792 struct flat_binder_object *fp) 793 { 794 struct binder_node *node; 795 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL); 796 797 if (!new_node) 798 return NULL; 799 binder_inner_proc_lock(proc); 800 node = binder_init_node_ilocked(proc, new_node, fp); 801 binder_inner_proc_unlock(proc); 802 if (node != new_node) 803 /* 804 * The node was already added by another thread 805 */ 806 kfree(new_node); 807 808 return node; 809 } 810 811 static void binder_free_node(struct binder_node *node) 812 { 813 kfree(node); 814 binder_stats_deleted(BINDER_STAT_NODE); 815 } 816 817 static int binder_inc_node_nilocked(struct binder_node *node, int strong, 818 int internal, 819 struct list_head *target_list) 820 { 821 struct binder_proc *proc = node->proc; 822 823 assert_spin_locked(&node->lock); 824 if (proc) 825 assert_spin_locked(&proc->inner_lock); 826 if (strong) { 827 if (internal) { 828 if (target_list == NULL && 829 node->internal_strong_refs == 0 && 830 !(node->proc && 831 node == node->proc->context->binder_context_mgr_node && 832 node->has_strong_ref)) { 833 pr_err("invalid inc strong node for %d\n", 834 node->debug_id); 835 return -EINVAL; 836 } 837 node->internal_strong_refs++; 838 } else 839 node->local_strong_refs++; 840 if (!node->has_strong_ref && target_list) { 841 struct binder_thread *thread = container_of(target_list, 842 struct binder_thread, todo); 843 binder_dequeue_work_ilocked(&node->work); 844 BUG_ON(&thread->todo != target_list); 845 binder_enqueue_deferred_thread_work_ilocked(thread, 846 &node->work); 847 } 848 } else { 849 if (!internal) 850 node->local_weak_refs++; 851 if (!node->has_weak_ref && list_empty(&node->work.entry)) { 852 if (target_list == NULL) { 853 pr_err("invalid inc weak node for %d\n", 854 node->debug_id); 855 return -EINVAL; 856 } 857 /* 858 * See comment above 859 */ 860 binder_enqueue_work_ilocked(&node->work, target_list); 861 } 862 } 863 return 0; 864 } 865 866 static int binder_inc_node(struct binder_node *node, int strong, int internal, 867 struct list_head *target_list) 868 { 869 int ret; 870 871 binder_node_inner_lock(node); 872 ret = binder_inc_node_nilocked(node, strong, internal, target_list); 873 binder_node_inner_unlock(node); 874 875 return ret; 876 } 877 878 static bool binder_dec_node_nilocked(struct binder_node *node, 879 int strong, int internal) 880 { 881 struct binder_proc *proc = node->proc; 882 883 assert_spin_locked(&node->lock); 884 if (proc) 885 assert_spin_locked(&proc->inner_lock); 886 if (strong) { 887 if (internal) 888 node->internal_strong_refs--; 889 else 890 node->local_strong_refs--; 891 if (node->local_strong_refs || node->internal_strong_refs) 892 return false; 893 } else { 894 if (!internal) 895 node->local_weak_refs--; 896 if (node->local_weak_refs || node->tmp_refs || 897 !hlist_empty(&node->refs)) 898 return false; 899 } 900 901 if (proc && (node->has_strong_ref || node->has_weak_ref)) { 902 if (list_empty(&node->work.entry)) { 903 binder_enqueue_work_ilocked(&node->work, &proc->todo); 904 binder_wakeup_proc_ilocked(proc); 905 } 906 } else { 907 if (hlist_empty(&node->refs) && !node->local_strong_refs && 908 !node->local_weak_refs && !node->tmp_refs) { 909 if (proc) { 910 binder_dequeue_work_ilocked(&node->work); 911 rb_erase(&node->rb_node, &proc->nodes); 912 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 913 "refless node %d deleted\n", 914 node->debug_id); 915 } else { 916 BUG_ON(!list_empty(&node->work.entry)); 917 spin_lock(&binder_dead_nodes_lock); 918 /* 919 * tmp_refs could have changed so 920 * check it again 921 */ 922 if (node->tmp_refs) { 923 spin_unlock(&binder_dead_nodes_lock); 924 return false; 925 } 926 hlist_del(&node->dead_node); 927 spin_unlock(&binder_dead_nodes_lock); 928 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 929 "dead node %d deleted\n", 930 node->debug_id); 931 } 932 return true; 933 } 934 } 935 return false; 936 } 937 938 static void binder_dec_node(struct binder_node *node, int strong, int internal) 939 { 940 bool free_node; 941 942 binder_node_inner_lock(node); 943 free_node = binder_dec_node_nilocked(node, strong, internal); 944 binder_node_inner_unlock(node); 945 if (free_node) 946 binder_free_node(node); 947 } 948 949 static void binder_inc_node_tmpref_ilocked(struct binder_node *node) 950 { 951 /* 952 * No call to binder_inc_node() is needed since we 953 * don't need to inform userspace of any changes to 954 * tmp_refs 955 */ 956 node->tmp_refs++; 957 } 958 959 /** 960 * binder_inc_node_tmpref() - take a temporary reference on node 961 * @node: node to reference 962 * 963 * Take reference on node to prevent the node from being freed 964 * while referenced only by a local variable. The inner lock is 965 * needed to serialize with the node work on the queue (which 966 * isn't needed after the node is dead). If the node is dead 967 * (node->proc is NULL), use binder_dead_nodes_lock to protect 968 * node->tmp_refs against dead-node-only cases where the node 969 * lock cannot be acquired (eg traversing the dead node list to 970 * print nodes) 971 */ 972 static void binder_inc_node_tmpref(struct binder_node *node) 973 { 974 binder_node_lock(node); 975 if (node->proc) 976 binder_inner_proc_lock(node->proc); 977 else 978 spin_lock(&binder_dead_nodes_lock); 979 binder_inc_node_tmpref_ilocked(node); 980 if (node->proc) 981 binder_inner_proc_unlock(node->proc); 982 else 983 spin_unlock(&binder_dead_nodes_lock); 984 binder_node_unlock(node); 985 } 986 987 /** 988 * binder_dec_node_tmpref() - remove a temporary reference on node 989 * @node: node to reference 990 * 991 * Release temporary reference on node taken via binder_inc_node_tmpref() 992 */ 993 static void binder_dec_node_tmpref(struct binder_node *node) 994 { 995 bool free_node; 996 997 binder_node_inner_lock(node); 998 if (!node->proc) 999 spin_lock(&binder_dead_nodes_lock); 1000 else 1001 __acquire(&binder_dead_nodes_lock); 1002 node->tmp_refs--; 1003 BUG_ON(node->tmp_refs < 0); 1004 if (!node->proc) 1005 spin_unlock(&binder_dead_nodes_lock); 1006 else 1007 __release(&binder_dead_nodes_lock); 1008 /* 1009 * Call binder_dec_node() to check if all refcounts are 0 1010 * and cleanup is needed. Calling with strong=0 and internal=1 1011 * causes no actual reference to be released in binder_dec_node(). 1012 * If that changes, a change is needed here too. 1013 */ 1014 free_node = binder_dec_node_nilocked(node, 0, 1); 1015 binder_node_inner_unlock(node); 1016 if (free_node) 1017 binder_free_node(node); 1018 } 1019 1020 static void binder_put_node(struct binder_node *node) 1021 { 1022 binder_dec_node_tmpref(node); 1023 } 1024 1025 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc, 1026 u32 desc, bool need_strong_ref) 1027 { 1028 struct rb_node *n = proc->refs_by_desc.rb_node; 1029 struct binder_ref *ref; 1030 1031 while (n) { 1032 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1033 1034 if (desc < ref->data.desc) { 1035 n = n->rb_left; 1036 } else if (desc > ref->data.desc) { 1037 n = n->rb_right; 1038 } else if (need_strong_ref && !ref->data.strong) { 1039 binder_user_error("tried to use weak ref as strong ref\n"); 1040 return NULL; 1041 } else { 1042 return ref; 1043 } 1044 } 1045 return NULL; 1046 } 1047 1048 /** 1049 * binder_get_ref_for_node_olocked() - get the ref associated with given node 1050 * @proc: binder_proc that owns the ref 1051 * @node: binder_node of target 1052 * @new_ref: newly allocated binder_ref to be initialized or %NULL 1053 * 1054 * Look up the ref for the given node and return it if it exists 1055 * 1056 * If it doesn't exist and the caller provides a newly allocated 1057 * ref, initialize the fields of the newly allocated ref and insert 1058 * into the given proc rb_trees and node refs list. 1059 * 1060 * Return: the ref for node. It is possible that another thread 1061 * allocated/initialized the ref first in which case the 1062 * returned ref would be different than the passed-in 1063 * new_ref. new_ref must be kfree'd by the caller in 1064 * this case. 1065 */ 1066 static struct binder_ref *binder_get_ref_for_node_olocked( 1067 struct binder_proc *proc, 1068 struct binder_node *node, 1069 struct binder_ref *new_ref) 1070 { 1071 struct binder_context *context = proc->context; 1072 struct rb_node **p = &proc->refs_by_node.rb_node; 1073 struct rb_node *parent = NULL; 1074 struct binder_ref *ref; 1075 struct rb_node *n; 1076 1077 while (*p) { 1078 parent = *p; 1079 ref = rb_entry(parent, struct binder_ref, rb_node_node); 1080 1081 if (node < ref->node) 1082 p = &(*p)->rb_left; 1083 else if (node > ref->node) 1084 p = &(*p)->rb_right; 1085 else 1086 return ref; 1087 } 1088 if (!new_ref) 1089 return NULL; 1090 1091 binder_stats_created(BINDER_STAT_REF); 1092 new_ref->data.debug_id = atomic_inc_return(&binder_last_id); 1093 new_ref->proc = proc; 1094 new_ref->node = node; 1095 rb_link_node(&new_ref->rb_node_node, parent, p); 1096 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); 1097 1098 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1; 1099 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 1100 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1101 if (ref->data.desc > new_ref->data.desc) 1102 break; 1103 new_ref->data.desc = ref->data.desc + 1; 1104 } 1105 1106 p = &proc->refs_by_desc.rb_node; 1107 while (*p) { 1108 parent = *p; 1109 ref = rb_entry(parent, struct binder_ref, rb_node_desc); 1110 1111 if (new_ref->data.desc < ref->data.desc) 1112 p = &(*p)->rb_left; 1113 else if (new_ref->data.desc > ref->data.desc) 1114 p = &(*p)->rb_right; 1115 else 1116 BUG(); 1117 } 1118 rb_link_node(&new_ref->rb_node_desc, parent, p); 1119 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc); 1120 1121 binder_node_lock(node); 1122 hlist_add_head(&new_ref->node_entry, &node->refs); 1123 1124 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1125 "%d new ref %d desc %d for node %d\n", 1126 proc->pid, new_ref->data.debug_id, new_ref->data.desc, 1127 node->debug_id); 1128 binder_node_unlock(node); 1129 return new_ref; 1130 } 1131 1132 static void binder_cleanup_ref_olocked(struct binder_ref *ref) 1133 { 1134 bool delete_node = false; 1135 1136 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1137 "%d delete ref %d desc %d for node %d\n", 1138 ref->proc->pid, ref->data.debug_id, ref->data.desc, 1139 ref->node->debug_id); 1140 1141 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); 1142 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); 1143 1144 binder_node_inner_lock(ref->node); 1145 if (ref->data.strong) 1146 binder_dec_node_nilocked(ref->node, 1, 1); 1147 1148 hlist_del(&ref->node_entry); 1149 delete_node = binder_dec_node_nilocked(ref->node, 0, 1); 1150 binder_node_inner_unlock(ref->node); 1151 /* 1152 * Clear ref->node unless we want the caller to free the node 1153 */ 1154 if (!delete_node) { 1155 /* 1156 * The caller uses ref->node to determine 1157 * whether the node needs to be freed. Clear 1158 * it since the node is still alive. 1159 */ 1160 ref->node = NULL; 1161 } 1162 1163 if (ref->death) { 1164 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1165 "%d delete ref %d desc %d has death notification\n", 1166 ref->proc->pid, ref->data.debug_id, 1167 ref->data.desc); 1168 binder_dequeue_work(ref->proc, &ref->death->work); 1169 binder_stats_deleted(BINDER_STAT_DEATH); 1170 } 1171 binder_stats_deleted(BINDER_STAT_REF); 1172 } 1173 1174 /** 1175 * binder_inc_ref_olocked() - increment the ref for given handle 1176 * @ref: ref to be incremented 1177 * @strong: if true, strong increment, else weak 1178 * @target_list: list to queue node work on 1179 * 1180 * Increment the ref. @ref->proc->outer_lock must be held on entry 1181 * 1182 * Return: 0, if successful, else errno 1183 */ 1184 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong, 1185 struct list_head *target_list) 1186 { 1187 int ret; 1188 1189 if (strong) { 1190 if (ref->data.strong == 0) { 1191 ret = binder_inc_node(ref->node, 1, 1, target_list); 1192 if (ret) 1193 return ret; 1194 } 1195 ref->data.strong++; 1196 } else { 1197 if (ref->data.weak == 0) { 1198 ret = binder_inc_node(ref->node, 0, 1, target_list); 1199 if (ret) 1200 return ret; 1201 } 1202 ref->data.weak++; 1203 } 1204 return 0; 1205 } 1206 1207 /** 1208 * binder_dec_ref_olocked() - dec the ref for given handle 1209 * @ref: ref to be decremented 1210 * @strong: if true, strong decrement, else weak 1211 * 1212 * Decrement the ref. 1213 * 1214 * Return: %true if ref is cleaned up and ready to be freed. 1215 */ 1216 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong) 1217 { 1218 if (strong) { 1219 if (ref->data.strong == 0) { 1220 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n", 1221 ref->proc->pid, ref->data.debug_id, 1222 ref->data.desc, ref->data.strong, 1223 ref->data.weak); 1224 return false; 1225 } 1226 ref->data.strong--; 1227 if (ref->data.strong == 0) 1228 binder_dec_node(ref->node, strong, 1); 1229 } else { 1230 if (ref->data.weak == 0) { 1231 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n", 1232 ref->proc->pid, ref->data.debug_id, 1233 ref->data.desc, ref->data.strong, 1234 ref->data.weak); 1235 return false; 1236 } 1237 ref->data.weak--; 1238 } 1239 if (ref->data.strong == 0 && ref->data.weak == 0) { 1240 binder_cleanup_ref_olocked(ref); 1241 return true; 1242 } 1243 return false; 1244 } 1245 1246 /** 1247 * binder_get_node_from_ref() - get the node from the given proc/desc 1248 * @proc: proc containing the ref 1249 * @desc: the handle associated with the ref 1250 * @need_strong_ref: if true, only return node if ref is strong 1251 * @rdata: the id/refcount data for the ref 1252 * 1253 * Given a proc and ref handle, return the associated binder_node 1254 * 1255 * Return: a binder_node or NULL if not found or not strong when strong required 1256 */ 1257 static struct binder_node *binder_get_node_from_ref( 1258 struct binder_proc *proc, 1259 u32 desc, bool need_strong_ref, 1260 struct binder_ref_data *rdata) 1261 { 1262 struct binder_node *node; 1263 struct binder_ref *ref; 1264 1265 binder_proc_lock(proc); 1266 ref = binder_get_ref_olocked(proc, desc, need_strong_ref); 1267 if (!ref) 1268 goto err_no_ref; 1269 node = ref->node; 1270 /* 1271 * Take an implicit reference on the node to ensure 1272 * it stays alive until the call to binder_put_node() 1273 */ 1274 binder_inc_node_tmpref(node); 1275 if (rdata) 1276 *rdata = ref->data; 1277 binder_proc_unlock(proc); 1278 1279 return node; 1280 1281 err_no_ref: 1282 binder_proc_unlock(proc); 1283 return NULL; 1284 } 1285 1286 /** 1287 * binder_free_ref() - free the binder_ref 1288 * @ref: ref to free 1289 * 1290 * Free the binder_ref. Free the binder_node indicated by ref->node 1291 * (if non-NULL) and the binder_ref_death indicated by ref->death. 1292 */ 1293 static void binder_free_ref(struct binder_ref *ref) 1294 { 1295 if (ref->node) 1296 binder_free_node(ref->node); 1297 kfree(ref->death); 1298 kfree(ref); 1299 } 1300 1301 /** 1302 * binder_update_ref_for_handle() - inc/dec the ref for given handle 1303 * @proc: proc containing the ref 1304 * @desc: the handle associated with the ref 1305 * @increment: true=inc reference, false=dec reference 1306 * @strong: true=strong reference, false=weak reference 1307 * @rdata: the id/refcount data for the ref 1308 * 1309 * Given a proc and ref handle, increment or decrement the ref 1310 * according to "increment" arg. 1311 * 1312 * Return: 0 if successful, else errno 1313 */ 1314 static int binder_update_ref_for_handle(struct binder_proc *proc, 1315 uint32_t desc, bool increment, bool strong, 1316 struct binder_ref_data *rdata) 1317 { 1318 int ret = 0; 1319 struct binder_ref *ref; 1320 bool delete_ref = false; 1321 1322 binder_proc_lock(proc); 1323 ref = binder_get_ref_olocked(proc, desc, strong); 1324 if (!ref) { 1325 ret = -EINVAL; 1326 goto err_no_ref; 1327 } 1328 if (increment) 1329 ret = binder_inc_ref_olocked(ref, strong, NULL); 1330 else 1331 delete_ref = binder_dec_ref_olocked(ref, strong); 1332 1333 if (rdata) 1334 *rdata = ref->data; 1335 binder_proc_unlock(proc); 1336 1337 if (delete_ref) 1338 binder_free_ref(ref); 1339 return ret; 1340 1341 err_no_ref: 1342 binder_proc_unlock(proc); 1343 return ret; 1344 } 1345 1346 /** 1347 * binder_dec_ref_for_handle() - dec the ref for given handle 1348 * @proc: proc containing the ref 1349 * @desc: the handle associated with the ref 1350 * @strong: true=strong reference, false=weak reference 1351 * @rdata: the id/refcount data for the ref 1352 * 1353 * Just calls binder_update_ref_for_handle() to decrement the ref. 1354 * 1355 * Return: 0 if successful, else errno 1356 */ 1357 static int binder_dec_ref_for_handle(struct binder_proc *proc, 1358 uint32_t desc, bool strong, struct binder_ref_data *rdata) 1359 { 1360 return binder_update_ref_for_handle(proc, desc, false, strong, rdata); 1361 } 1362 1363 1364 /** 1365 * binder_inc_ref_for_node() - increment the ref for given proc/node 1366 * @proc: proc containing the ref 1367 * @node: target node 1368 * @strong: true=strong reference, false=weak reference 1369 * @target_list: worklist to use if node is incremented 1370 * @rdata: the id/refcount data for the ref 1371 * 1372 * Given a proc and node, increment the ref. Create the ref if it 1373 * doesn't already exist 1374 * 1375 * Return: 0 if successful, else errno 1376 */ 1377 static int binder_inc_ref_for_node(struct binder_proc *proc, 1378 struct binder_node *node, 1379 bool strong, 1380 struct list_head *target_list, 1381 struct binder_ref_data *rdata) 1382 { 1383 struct binder_ref *ref; 1384 struct binder_ref *new_ref = NULL; 1385 int ret = 0; 1386 1387 binder_proc_lock(proc); 1388 ref = binder_get_ref_for_node_olocked(proc, node, NULL); 1389 if (!ref) { 1390 binder_proc_unlock(proc); 1391 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); 1392 if (!new_ref) 1393 return -ENOMEM; 1394 binder_proc_lock(proc); 1395 ref = binder_get_ref_for_node_olocked(proc, node, new_ref); 1396 } 1397 ret = binder_inc_ref_olocked(ref, strong, target_list); 1398 *rdata = ref->data; 1399 if (ret && ref == new_ref) { 1400 /* 1401 * Cleanup the failed reference here as the target 1402 * could now be dead and have already released its 1403 * references by now. Calling on the new reference 1404 * with strong=0 and a tmp_refs will not decrement 1405 * the node. The new_ref gets kfree'd below. 1406 */ 1407 binder_cleanup_ref_olocked(new_ref); 1408 ref = NULL; 1409 } 1410 1411 binder_proc_unlock(proc); 1412 if (new_ref && ref != new_ref) 1413 /* 1414 * Another thread created the ref first so 1415 * free the one we allocated 1416 */ 1417 kfree(new_ref); 1418 return ret; 1419 } 1420 1421 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread, 1422 struct binder_transaction *t) 1423 { 1424 BUG_ON(!target_thread); 1425 assert_spin_locked(&target_thread->proc->inner_lock); 1426 BUG_ON(target_thread->transaction_stack != t); 1427 BUG_ON(target_thread->transaction_stack->from != target_thread); 1428 target_thread->transaction_stack = 1429 target_thread->transaction_stack->from_parent; 1430 t->from = NULL; 1431 } 1432 1433 /** 1434 * binder_thread_dec_tmpref() - decrement thread->tmp_ref 1435 * @thread: thread to decrement 1436 * 1437 * A thread needs to be kept alive while being used to create or 1438 * handle a transaction. binder_get_txn_from() is used to safely 1439 * extract t->from from a binder_transaction and keep the thread 1440 * indicated by t->from from being freed. When done with that 1441 * binder_thread, this function is called to decrement the 1442 * tmp_ref and free if appropriate (thread has been released 1443 * and no transaction being processed by the driver) 1444 */ 1445 static void binder_thread_dec_tmpref(struct binder_thread *thread) 1446 { 1447 /* 1448 * atomic is used to protect the counter value while 1449 * it cannot reach zero or thread->is_dead is false 1450 */ 1451 binder_inner_proc_lock(thread->proc); 1452 atomic_dec(&thread->tmp_ref); 1453 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) { 1454 binder_inner_proc_unlock(thread->proc); 1455 binder_free_thread(thread); 1456 return; 1457 } 1458 binder_inner_proc_unlock(thread->proc); 1459 } 1460 1461 /** 1462 * binder_proc_dec_tmpref() - decrement proc->tmp_ref 1463 * @proc: proc to decrement 1464 * 1465 * A binder_proc needs to be kept alive while being used to create or 1466 * handle a transaction. proc->tmp_ref is incremented when 1467 * creating a new transaction or the binder_proc is currently in-use 1468 * by threads that are being released. When done with the binder_proc, 1469 * this function is called to decrement the counter and free the 1470 * proc if appropriate (proc has been released, all threads have 1471 * been released and not currenly in-use to process a transaction). 1472 */ 1473 static void binder_proc_dec_tmpref(struct binder_proc *proc) 1474 { 1475 binder_inner_proc_lock(proc); 1476 proc->tmp_ref--; 1477 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) && 1478 !proc->tmp_ref) { 1479 binder_inner_proc_unlock(proc); 1480 binder_free_proc(proc); 1481 return; 1482 } 1483 binder_inner_proc_unlock(proc); 1484 } 1485 1486 /** 1487 * binder_get_txn_from() - safely extract the "from" thread in transaction 1488 * @t: binder transaction for t->from 1489 * 1490 * Atomically return the "from" thread and increment the tmp_ref 1491 * count for the thread to ensure it stays alive until 1492 * binder_thread_dec_tmpref() is called. 1493 * 1494 * Return: the value of t->from 1495 */ 1496 static struct binder_thread *binder_get_txn_from( 1497 struct binder_transaction *t) 1498 { 1499 struct binder_thread *from; 1500 1501 spin_lock(&t->lock); 1502 from = t->from; 1503 if (from) 1504 atomic_inc(&from->tmp_ref); 1505 spin_unlock(&t->lock); 1506 return from; 1507 } 1508 1509 /** 1510 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock 1511 * @t: binder transaction for t->from 1512 * 1513 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock 1514 * to guarantee that the thread cannot be released while operating on it. 1515 * The caller must call binder_inner_proc_unlock() to release the inner lock 1516 * as well as call binder_dec_thread_txn() to release the reference. 1517 * 1518 * Return: the value of t->from 1519 */ 1520 static struct binder_thread *binder_get_txn_from_and_acq_inner( 1521 struct binder_transaction *t) 1522 __acquires(&t->from->proc->inner_lock) 1523 { 1524 struct binder_thread *from; 1525 1526 from = binder_get_txn_from(t); 1527 if (!from) { 1528 __acquire(&from->proc->inner_lock); 1529 return NULL; 1530 } 1531 binder_inner_proc_lock(from->proc); 1532 if (t->from) { 1533 BUG_ON(from != t->from); 1534 return from; 1535 } 1536 binder_inner_proc_unlock(from->proc); 1537 __acquire(&from->proc->inner_lock); 1538 binder_thread_dec_tmpref(from); 1539 return NULL; 1540 } 1541 1542 /** 1543 * binder_free_txn_fixups() - free unprocessed fd fixups 1544 * @t: binder transaction for t->from 1545 * 1546 * If the transaction is being torn down prior to being 1547 * processed by the target process, free all of the 1548 * fd fixups and fput the file structs. It is safe to 1549 * call this function after the fixups have been 1550 * processed -- in that case, the list will be empty. 1551 */ 1552 static void binder_free_txn_fixups(struct binder_transaction *t) 1553 { 1554 struct binder_txn_fd_fixup *fixup, *tmp; 1555 1556 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) { 1557 fput(fixup->file); 1558 if (fixup->target_fd >= 0) 1559 put_unused_fd(fixup->target_fd); 1560 list_del(&fixup->fixup_entry); 1561 kfree(fixup); 1562 } 1563 } 1564 1565 static void binder_txn_latency_free(struct binder_transaction *t) 1566 { 1567 int from_proc, from_thread, to_proc, to_thread; 1568 1569 spin_lock(&t->lock); 1570 from_proc = t->from ? t->from->proc->pid : 0; 1571 from_thread = t->from ? t->from->pid : 0; 1572 to_proc = t->to_proc ? t->to_proc->pid : 0; 1573 to_thread = t->to_thread ? t->to_thread->pid : 0; 1574 spin_unlock(&t->lock); 1575 1576 trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread); 1577 } 1578 1579 static void binder_free_transaction(struct binder_transaction *t) 1580 { 1581 struct binder_proc *target_proc = t->to_proc; 1582 1583 if (target_proc) { 1584 binder_inner_proc_lock(target_proc); 1585 target_proc->outstanding_txns--; 1586 if (target_proc->outstanding_txns < 0) 1587 pr_warn("%s: Unexpected outstanding_txns %d\n", 1588 __func__, target_proc->outstanding_txns); 1589 if (!target_proc->outstanding_txns && target_proc->is_frozen) 1590 wake_up_interruptible_all(&target_proc->freeze_wait); 1591 if (t->buffer) 1592 t->buffer->transaction = NULL; 1593 binder_inner_proc_unlock(target_proc); 1594 } 1595 if (trace_binder_txn_latency_free_enabled()) 1596 binder_txn_latency_free(t); 1597 /* 1598 * If the transaction has no target_proc, then 1599 * t->buffer->transaction has already been cleared. 1600 */ 1601 binder_free_txn_fixups(t); 1602 kfree(t); 1603 binder_stats_deleted(BINDER_STAT_TRANSACTION); 1604 } 1605 1606 static void binder_send_failed_reply(struct binder_transaction *t, 1607 uint32_t error_code) 1608 { 1609 struct binder_thread *target_thread; 1610 struct binder_transaction *next; 1611 1612 BUG_ON(t->flags & TF_ONE_WAY); 1613 while (1) { 1614 target_thread = binder_get_txn_from_and_acq_inner(t); 1615 if (target_thread) { 1616 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1617 "send failed reply for transaction %d to %d:%d\n", 1618 t->debug_id, 1619 target_thread->proc->pid, 1620 target_thread->pid); 1621 1622 binder_pop_transaction_ilocked(target_thread, t); 1623 if (target_thread->reply_error.cmd == BR_OK) { 1624 target_thread->reply_error.cmd = error_code; 1625 binder_enqueue_thread_work_ilocked( 1626 target_thread, 1627 &target_thread->reply_error.work); 1628 wake_up_interruptible(&target_thread->wait); 1629 } else { 1630 /* 1631 * Cannot get here for normal operation, but 1632 * we can if multiple synchronous transactions 1633 * are sent without blocking for responses. 1634 * Just ignore the 2nd error in this case. 1635 */ 1636 pr_warn("Unexpected reply error: %u\n", 1637 target_thread->reply_error.cmd); 1638 } 1639 binder_inner_proc_unlock(target_thread->proc); 1640 binder_thread_dec_tmpref(target_thread); 1641 binder_free_transaction(t); 1642 return; 1643 } 1644 __release(&target_thread->proc->inner_lock); 1645 next = t->from_parent; 1646 1647 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1648 "send failed reply for transaction %d, target dead\n", 1649 t->debug_id); 1650 1651 binder_free_transaction(t); 1652 if (next == NULL) { 1653 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1654 "reply failed, no target thread at root\n"); 1655 return; 1656 } 1657 t = next; 1658 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1659 "reply failed, no target thread -- retry %d\n", 1660 t->debug_id); 1661 } 1662 } 1663 1664 /** 1665 * binder_cleanup_transaction() - cleans up undelivered transaction 1666 * @t: transaction that needs to be cleaned up 1667 * @reason: reason the transaction wasn't delivered 1668 * @error_code: error to return to caller (if synchronous call) 1669 */ 1670 static void binder_cleanup_transaction(struct binder_transaction *t, 1671 const char *reason, 1672 uint32_t error_code) 1673 { 1674 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) { 1675 binder_send_failed_reply(t, error_code); 1676 } else { 1677 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 1678 "undelivered transaction %d, %s\n", 1679 t->debug_id, reason); 1680 binder_free_transaction(t); 1681 } 1682 } 1683 1684 /** 1685 * binder_get_object() - gets object and checks for valid metadata 1686 * @proc: binder_proc owning the buffer 1687 * @u: sender's user pointer to base of buffer 1688 * @buffer: binder_buffer that we're parsing. 1689 * @offset: offset in the @buffer at which to validate an object. 1690 * @object: struct binder_object to read into 1691 * 1692 * Copy the binder object at the given offset into @object. If @u is 1693 * provided then the copy is from the sender's buffer. If not, then 1694 * it is copied from the target's @buffer. 1695 * 1696 * Return: If there's a valid metadata object at @offset, the 1697 * size of that object. Otherwise, it returns zero. The object 1698 * is read into the struct binder_object pointed to by @object. 1699 */ 1700 static size_t binder_get_object(struct binder_proc *proc, 1701 const void __user *u, 1702 struct binder_buffer *buffer, 1703 unsigned long offset, 1704 struct binder_object *object) 1705 { 1706 size_t read_size; 1707 struct binder_object_header *hdr; 1708 size_t object_size = 0; 1709 1710 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset); 1711 if (offset > buffer->data_size || read_size < sizeof(*hdr) || 1712 !IS_ALIGNED(offset, sizeof(u32))) 1713 return 0; 1714 1715 if (u) { 1716 if (copy_from_user(object, u + offset, read_size)) 1717 return 0; 1718 } else { 1719 if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer, 1720 offset, read_size)) 1721 return 0; 1722 } 1723 1724 /* Ok, now see if we read a complete object. */ 1725 hdr = &object->hdr; 1726 switch (hdr->type) { 1727 case BINDER_TYPE_BINDER: 1728 case BINDER_TYPE_WEAK_BINDER: 1729 case BINDER_TYPE_HANDLE: 1730 case BINDER_TYPE_WEAK_HANDLE: 1731 object_size = sizeof(struct flat_binder_object); 1732 break; 1733 case BINDER_TYPE_FD: 1734 object_size = sizeof(struct binder_fd_object); 1735 break; 1736 case BINDER_TYPE_PTR: 1737 object_size = sizeof(struct binder_buffer_object); 1738 break; 1739 case BINDER_TYPE_FDA: 1740 object_size = sizeof(struct binder_fd_array_object); 1741 break; 1742 default: 1743 return 0; 1744 } 1745 if (offset <= buffer->data_size - object_size && 1746 buffer->data_size >= object_size) 1747 return object_size; 1748 else 1749 return 0; 1750 } 1751 1752 /** 1753 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer. 1754 * @proc: binder_proc owning the buffer 1755 * @b: binder_buffer containing the object 1756 * @object: struct binder_object to read into 1757 * @index: index in offset array at which the binder_buffer_object is 1758 * located 1759 * @start_offset: points to the start of the offset array 1760 * @object_offsetp: offset of @object read from @b 1761 * @num_valid: the number of valid offsets in the offset array 1762 * 1763 * Return: If @index is within the valid range of the offset array 1764 * described by @start and @num_valid, and if there's a valid 1765 * binder_buffer_object at the offset found in index @index 1766 * of the offset array, that object is returned. Otherwise, 1767 * %NULL is returned. 1768 * Note that the offset found in index @index itself is not 1769 * verified; this function assumes that @num_valid elements 1770 * from @start were previously verified to have valid offsets. 1771 * If @object_offsetp is non-NULL, then the offset within 1772 * @b is written to it. 1773 */ 1774 static struct binder_buffer_object *binder_validate_ptr( 1775 struct binder_proc *proc, 1776 struct binder_buffer *b, 1777 struct binder_object *object, 1778 binder_size_t index, 1779 binder_size_t start_offset, 1780 binder_size_t *object_offsetp, 1781 binder_size_t num_valid) 1782 { 1783 size_t object_size; 1784 binder_size_t object_offset; 1785 unsigned long buffer_offset; 1786 1787 if (index >= num_valid) 1788 return NULL; 1789 1790 buffer_offset = start_offset + sizeof(binder_size_t) * index; 1791 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset, 1792 b, buffer_offset, 1793 sizeof(object_offset))) 1794 return NULL; 1795 object_size = binder_get_object(proc, NULL, b, object_offset, object); 1796 if (!object_size || object->hdr.type != BINDER_TYPE_PTR) 1797 return NULL; 1798 if (object_offsetp) 1799 *object_offsetp = object_offset; 1800 1801 return &object->bbo; 1802 } 1803 1804 /** 1805 * binder_validate_fixup() - validates pointer/fd fixups happen in order. 1806 * @proc: binder_proc owning the buffer 1807 * @b: transaction buffer 1808 * @objects_start_offset: offset to start of objects buffer 1809 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up 1810 * @fixup_offset: start offset in @buffer to fix up 1811 * @last_obj_offset: offset to last binder_buffer_object that we fixed 1812 * @last_min_offset: minimum fixup offset in object at @last_obj_offset 1813 * 1814 * Return: %true if a fixup in buffer @buffer at offset @offset is 1815 * allowed. 1816 * 1817 * For safety reasons, we only allow fixups inside a buffer to happen 1818 * at increasing offsets; additionally, we only allow fixup on the last 1819 * buffer object that was verified, or one of its parents. 1820 * 1821 * Example of what is allowed: 1822 * 1823 * A 1824 * B (parent = A, offset = 0) 1825 * C (parent = A, offset = 16) 1826 * D (parent = C, offset = 0) 1827 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset) 1828 * 1829 * Examples of what is not allowed: 1830 * 1831 * Decreasing offsets within the same parent: 1832 * A 1833 * C (parent = A, offset = 16) 1834 * B (parent = A, offset = 0) // decreasing offset within A 1835 * 1836 * Referring to a parent that wasn't the last object or any of its parents: 1837 * A 1838 * B (parent = A, offset = 0) 1839 * C (parent = A, offset = 0) 1840 * C (parent = A, offset = 16) 1841 * D (parent = B, offset = 0) // B is not A or any of A's parents 1842 */ 1843 static bool binder_validate_fixup(struct binder_proc *proc, 1844 struct binder_buffer *b, 1845 binder_size_t objects_start_offset, 1846 binder_size_t buffer_obj_offset, 1847 binder_size_t fixup_offset, 1848 binder_size_t last_obj_offset, 1849 binder_size_t last_min_offset) 1850 { 1851 if (!last_obj_offset) { 1852 /* Nothing to fix up in */ 1853 return false; 1854 } 1855 1856 while (last_obj_offset != buffer_obj_offset) { 1857 unsigned long buffer_offset; 1858 struct binder_object last_object; 1859 struct binder_buffer_object *last_bbo; 1860 size_t object_size = binder_get_object(proc, NULL, b, 1861 last_obj_offset, 1862 &last_object); 1863 if (object_size != sizeof(*last_bbo)) 1864 return false; 1865 1866 last_bbo = &last_object.bbo; 1867 /* 1868 * Safe to retrieve the parent of last_obj, since it 1869 * was already previously verified by the driver. 1870 */ 1871 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0) 1872 return false; 1873 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t); 1874 buffer_offset = objects_start_offset + 1875 sizeof(binder_size_t) * last_bbo->parent; 1876 if (binder_alloc_copy_from_buffer(&proc->alloc, 1877 &last_obj_offset, 1878 b, buffer_offset, 1879 sizeof(last_obj_offset))) 1880 return false; 1881 } 1882 return (fixup_offset >= last_min_offset); 1883 } 1884 1885 /** 1886 * struct binder_task_work_cb - for deferred close 1887 * 1888 * @twork: callback_head for task work 1889 * @fd: fd to close 1890 * 1891 * Structure to pass task work to be handled after 1892 * returning from binder_ioctl() via task_work_add(). 1893 */ 1894 struct binder_task_work_cb { 1895 struct callback_head twork; 1896 struct file *file; 1897 }; 1898 1899 /** 1900 * binder_do_fd_close() - close list of file descriptors 1901 * @twork: callback head for task work 1902 * 1903 * It is not safe to call ksys_close() during the binder_ioctl() 1904 * function if there is a chance that binder's own file descriptor 1905 * might be closed. This is to meet the requirements for using 1906 * fdget() (see comments for __fget_light()). Therefore use 1907 * task_work_add() to schedule the close operation once we have 1908 * returned from binder_ioctl(). This function is a callback 1909 * for that mechanism and does the actual ksys_close() on the 1910 * given file descriptor. 1911 */ 1912 static void binder_do_fd_close(struct callback_head *twork) 1913 { 1914 struct binder_task_work_cb *twcb = container_of(twork, 1915 struct binder_task_work_cb, twork); 1916 1917 fput(twcb->file); 1918 kfree(twcb); 1919 } 1920 1921 /** 1922 * binder_deferred_fd_close() - schedule a close for the given file-descriptor 1923 * @fd: file-descriptor to close 1924 * 1925 * See comments in binder_do_fd_close(). This function is used to schedule 1926 * a file-descriptor to be closed after returning from binder_ioctl(). 1927 */ 1928 static void binder_deferred_fd_close(int fd) 1929 { 1930 struct binder_task_work_cb *twcb; 1931 1932 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL); 1933 if (!twcb) 1934 return; 1935 init_task_work(&twcb->twork, binder_do_fd_close); 1936 twcb->file = close_fd_get_file(fd); 1937 if (twcb->file) { 1938 // pin it until binder_do_fd_close(); see comments there 1939 get_file(twcb->file); 1940 filp_close(twcb->file, current->files); 1941 task_work_add(current, &twcb->twork, TWA_RESUME); 1942 } else { 1943 kfree(twcb); 1944 } 1945 } 1946 1947 static void binder_transaction_buffer_release(struct binder_proc *proc, 1948 struct binder_thread *thread, 1949 struct binder_buffer *buffer, 1950 binder_size_t off_end_offset, 1951 bool is_failure) 1952 { 1953 int debug_id = buffer->debug_id; 1954 binder_size_t off_start_offset, buffer_offset; 1955 1956 binder_debug(BINDER_DEBUG_TRANSACTION, 1957 "%d buffer release %d, size %zd-%zd, failed at %llx\n", 1958 proc->pid, buffer->debug_id, 1959 buffer->data_size, buffer->offsets_size, 1960 (unsigned long long)off_end_offset); 1961 1962 if (buffer->target_node) 1963 binder_dec_node(buffer->target_node, 1, 0); 1964 1965 off_start_offset = ALIGN(buffer->data_size, sizeof(void *)); 1966 1967 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset; 1968 buffer_offset += sizeof(binder_size_t)) { 1969 struct binder_object_header *hdr; 1970 size_t object_size = 0; 1971 struct binder_object object; 1972 binder_size_t object_offset; 1973 1974 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset, 1975 buffer, buffer_offset, 1976 sizeof(object_offset))) 1977 object_size = binder_get_object(proc, NULL, buffer, 1978 object_offset, &object); 1979 if (object_size == 0) { 1980 pr_err("transaction release %d bad object at offset %lld, size %zd\n", 1981 debug_id, (u64)object_offset, buffer->data_size); 1982 continue; 1983 } 1984 hdr = &object.hdr; 1985 switch (hdr->type) { 1986 case BINDER_TYPE_BINDER: 1987 case BINDER_TYPE_WEAK_BINDER: { 1988 struct flat_binder_object *fp; 1989 struct binder_node *node; 1990 1991 fp = to_flat_binder_object(hdr); 1992 node = binder_get_node(proc, fp->binder); 1993 if (node == NULL) { 1994 pr_err("transaction release %d bad node %016llx\n", 1995 debug_id, (u64)fp->binder); 1996 break; 1997 } 1998 binder_debug(BINDER_DEBUG_TRANSACTION, 1999 " node %d u%016llx\n", 2000 node->debug_id, (u64)node->ptr); 2001 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER, 2002 0); 2003 binder_put_node(node); 2004 } break; 2005 case BINDER_TYPE_HANDLE: 2006 case BINDER_TYPE_WEAK_HANDLE: { 2007 struct flat_binder_object *fp; 2008 struct binder_ref_data rdata; 2009 int ret; 2010 2011 fp = to_flat_binder_object(hdr); 2012 ret = binder_dec_ref_for_handle(proc, fp->handle, 2013 hdr->type == BINDER_TYPE_HANDLE, &rdata); 2014 2015 if (ret) { 2016 pr_err("transaction release %d bad handle %d, ret = %d\n", 2017 debug_id, fp->handle, ret); 2018 break; 2019 } 2020 binder_debug(BINDER_DEBUG_TRANSACTION, 2021 " ref %d desc %d\n", 2022 rdata.debug_id, rdata.desc); 2023 } break; 2024 2025 case BINDER_TYPE_FD: { 2026 /* 2027 * No need to close the file here since user-space 2028 * closes it for successfully delivered 2029 * transactions. For transactions that weren't 2030 * delivered, the new fd was never allocated so 2031 * there is no need to close and the fput on the 2032 * file is done when the transaction is torn 2033 * down. 2034 */ 2035 } break; 2036 case BINDER_TYPE_PTR: 2037 /* 2038 * Nothing to do here, this will get cleaned up when the 2039 * transaction buffer gets freed 2040 */ 2041 break; 2042 case BINDER_TYPE_FDA: { 2043 struct binder_fd_array_object *fda; 2044 struct binder_buffer_object *parent; 2045 struct binder_object ptr_object; 2046 binder_size_t fda_offset; 2047 size_t fd_index; 2048 binder_size_t fd_buf_size; 2049 binder_size_t num_valid; 2050 2051 if (is_failure) { 2052 /* 2053 * The fd fixups have not been applied so no 2054 * fds need to be closed. 2055 */ 2056 continue; 2057 } 2058 2059 num_valid = (buffer_offset - off_start_offset) / 2060 sizeof(binder_size_t); 2061 fda = to_binder_fd_array_object(hdr); 2062 parent = binder_validate_ptr(proc, buffer, &ptr_object, 2063 fda->parent, 2064 off_start_offset, 2065 NULL, 2066 num_valid); 2067 if (!parent) { 2068 pr_err("transaction release %d bad parent offset\n", 2069 debug_id); 2070 continue; 2071 } 2072 fd_buf_size = sizeof(u32) * fda->num_fds; 2073 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { 2074 pr_err("transaction release %d invalid number of fds (%lld)\n", 2075 debug_id, (u64)fda->num_fds); 2076 continue; 2077 } 2078 if (fd_buf_size > parent->length || 2079 fda->parent_offset > parent->length - fd_buf_size) { 2080 /* No space for all file descriptors here. */ 2081 pr_err("transaction release %d not enough space for %lld fds in buffer\n", 2082 debug_id, (u64)fda->num_fds); 2083 continue; 2084 } 2085 /* 2086 * the source data for binder_buffer_object is visible 2087 * to user-space and the @buffer element is the user 2088 * pointer to the buffer_object containing the fd_array. 2089 * Convert the address to an offset relative to 2090 * the base of the transaction buffer. 2091 */ 2092 fda_offset = 2093 (parent->buffer - (uintptr_t)buffer->user_data) + 2094 fda->parent_offset; 2095 for (fd_index = 0; fd_index < fda->num_fds; 2096 fd_index++) { 2097 u32 fd; 2098 int err; 2099 binder_size_t offset = fda_offset + 2100 fd_index * sizeof(fd); 2101 2102 err = binder_alloc_copy_from_buffer( 2103 &proc->alloc, &fd, buffer, 2104 offset, sizeof(fd)); 2105 WARN_ON(err); 2106 if (!err) { 2107 binder_deferred_fd_close(fd); 2108 /* 2109 * Need to make sure the thread goes 2110 * back to userspace to complete the 2111 * deferred close 2112 */ 2113 if (thread) 2114 thread->looper_need_return = true; 2115 } 2116 } 2117 } break; 2118 default: 2119 pr_err("transaction release %d bad object type %x\n", 2120 debug_id, hdr->type); 2121 break; 2122 } 2123 } 2124 } 2125 2126 /* Clean up all the objects in the buffer */ 2127 static inline void binder_release_entire_buffer(struct binder_proc *proc, 2128 struct binder_thread *thread, 2129 struct binder_buffer *buffer, 2130 bool is_failure) 2131 { 2132 binder_size_t off_end_offset; 2133 2134 off_end_offset = ALIGN(buffer->data_size, sizeof(void *)); 2135 off_end_offset += buffer->offsets_size; 2136 2137 binder_transaction_buffer_release(proc, thread, buffer, 2138 off_end_offset, is_failure); 2139 } 2140 2141 static int binder_translate_binder(struct flat_binder_object *fp, 2142 struct binder_transaction *t, 2143 struct binder_thread *thread) 2144 { 2145 struct binder_node *node; 2146 struct binder_proc *proc = thread->proc; 2147 struct binder_proc *target_proc = t->to_proc; 2148 struct binder_ref_data rdata; 2149 int ret = 0; 2150 2151 node = binder_get_node(proc, fp->binder); 2152 if (!node) { 2153 node = binder_new_node(proc, fp); 2154 if (!node) 2155 return -ENOMEM; 2156 } 2157 if (fp->cookie != node->cookie) { 2158 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n", 2159 proc->pid, thread->pid, (u64)fp->binder, 2160 node->debug_id, (u64)fp->cookie, 2161 (u64)node->cookie); 2162 ret = -EINVAL; 2163 goto done; 2164 } 2165 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) { 2166 ret = -EPERM; 2167 goto done; 2168 } 2169 2170 ret = binder_inc_ref_for_node(target_proc, node, 2171 fp->hdr.type == BINDER_TYPE_BINDER, 2172 &thread->todo, &rdata); 2173 if (ret) 2174 goto done; 2175 2176 if (fp->hdr.type == BINDER_TYPE_BINDER) 2177 fp->hdr.type = BINDER_TYPE_HANDLE; 2178 else 2179 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE; 2180 fp->binder = 0; 2181 fp->handle = rdata.desc; 2182 fp->cookie = 0; 2183 2184 trace_binder_transaction_node_to_ref(t, node, &rdata); 2185 binder_debug(BINDER_DEBUG_TRANSACTION, 2186 " node %d u%016llx -> ref %d desc %d\n", 2187 node->debug_id, (u64)node->ptr, 2188 rdata.debug_id, rdata.desc); 2189 done: 2190 binder_put_node(node); 2191 return ret; 2192 } 2193 2194 static int binder_translate_handle(struct flat_binder_object *fp, 2195 struct binder_transaction *t, 2196 struct binder_thread *thread) 2197 { 2198 struct binder_proc *proc = thread->proc; 2199 struct binder_proc *target_proc = t->to_proc; 2200 struct binder_node *node; 2201 struct binder_ref_data src_rdata; 2202 int ret = 0; 2203 2204 node = binder_get_node_from_ref(proc, fp->handle, 2205 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata); 2206 if (!node) { 2207 binder_user_error("%d:%d got transaction with invalid handle, %d\n", 2208 proc->pid, thread->pid, fp->handle); 2209 return -EINVAL; 2210 } 2211 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) { 2212 ret = -EPERM; 2213 goto done; 2214 } 2215 2216 binder_node_lock(node); 2217 if (node->proc == target_proc) { 2218 if (fp->hdr.type == BINDER_TYPE_HANDLE) 2219 fp->hdr.type = BINDER_TYPE_BINDER; 2220 else 2221 fp->hdr.type = BINDER_TYPE_WEAK_BINDER; 2222 fp->binder = node->ptr; 2223 fp->cookie = node->cookie; 2224 if (node->proc) 2225 binder_inner_proc_lock(node->proc); 2226 else 2227 __acquire(&node->proc->inner_lock); 2228 binder_inc_node_nilocked(node, 2229 fp->hdr.type == BINDER_TYPE_BINDER, 2230 0, NULL); 2231 if (node->proc) 2232 binder_inner_proc_unlock(node->proc); 2233 else 2234 __release(&node->proc->inner_lock); 2235 trace_binder_transaction_ref_to_node(t, node, &src_rdata); 2236 binder_debug(BINDER_DEBUG_TRANSACTION, 2237 " ref %d desc %d -> node %d u%016llx\n", 2238 src_rdata.debug_id, src_rdata.desc, node->debug_id, 2239 (u64)node->ptr); 2240 binder_node_unlock(node); 2241 } else { 2242 struct binder_ref_data dest_rdata; 2243 2244 binder_node_unlock(node); 2245 ret = binder_inc_ref_for_node(target_proc, node, 2246 fp->hdr.type == BINDER_TYPE_HANDLE, 2247 NULL, &dest_rdata); 2248 if (ret) 2249 goto done; 2250 2251 fp->binder = 0; 2252 fp->handle = dest_rdata.desc; 2253 fp->cookie = 0; 2254 trace_binder_transaction_ref_to_ref(t, node, &src_rdata, 2255 &dest_rdata); 2256 binder_debug(BINDER_DEBUG_TRANSACTION, 2257 " ref %d desc %d -> ref %d desc %d (node %d)\n", 2258 src_rdata.debug_id, src_rdata.desc, 2259 dest_rdata.debug_id, dest_rdata.desc, 2260 node->debug_id); 2261 } 2262 done: 2263 binder_put_node(node); 2264 return ret; 2265 } 2266 2267 static int binder_translate_fd(u32 fd, binder_size_t fd_offset, 2268 struct binder_transaction *t, 2269 struct binder_thread *thread, 2270 struct binder_transaction *in_reply_to) 2271 { 2272 struct binder_proc *proc = thread->proc; 2273 struct binder_proc *target_proc = t->to_proc; 2274 struct binder_txn_fd_fixup *fixup; 2275 struct file *file; 2276 int ret = 0; 2277 bool target_allows_fd; 2278 2279 if (in_reply_to) 2280 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS); 2281 else 2282 target_allows_fd = t->buffer->target_node->accept_fds; 2283 if (!target_allows_fd) { 2284 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n", 2285 proc->pid, thread->pid, 2286 in_reply_to ? "reply" : "transaction", 2287 fd); 2288 ret = -EPERM; 2289 goto err_fd_not_accepted; 2290 } 2291 2292 file = fget(fd); 2293 if (!file) { 2294 binder_user_error("%d:%d got transaction with invalid fd, %d\n", 2295 proc->pid, thread->pid, fd); 2296 ret = -EBADF; 2297 goto err_fget; 2298 } 2299 ret = security_binder_transfer_file(proc->cred, target_proc->cred, file); 2300 if (ret < 0) { 2301 ret = -EPERM; 2302 goto err_security; 2303 } 2304 2305 /* 2306 * Add fixup record for this transaction. The allocation 2307 * of the fd in the target needs to be done from a 2308 * target thread. 2309 */ 2310 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL); 2311 if (!fixup) { 2312 ret = -ENOMEM; 2313 goto err_alloc; 2314 } 2315 fixup->file = file; 2316 fixup->offset = fd_offset; 2317 fixup->target_fd = -1; 2318 trace_binder_transaction_fd_send(t, fd, fixup->offset); 2319 list_add_tail(&fixup->fixup_entry, &t->fd_fixups); 2320 2321 return ret; 2322 2323 err_alloc: 2324 err_security: 2325 fput(file); 2326 err_fget: 2327 err_fd_not_accepted: 2328 return ret; 2329 } 2330 2331 /** 2332 * struct binder_ptr_fixup - data to be fixed-up in target buffer 2333 * @offset offset in target buffer to fixup 2334 * @skip_size bytes to skip in copy (fixup will be written later) 2335 * @fixup_data data to write at fixup offset 2336 * @node list node 2337 * 2338 * This is used for the pointer fixup list (pf) which is created and consumed 2339 * during binder_transaction() and is only accessed locally. No 2340 * locking is necessary. 2341 * 2342 * The list is ordered by @offset. 2343 */ 2344 struct binder_ptr_fixup { 2345 binder_size_t offset; 2346 size_t skip_size; 2347 binder_uintptr_t fixup_data; 2348 struct list_head node; 2349 }; 2350 2351 /** 2352 * struct binder_sg_copy - scatter-gather data to be copied 2353 * @offset offset in target buffer 2354 * @sender_uaddr user address in source buffer 2355 * @length bytes to copy 2356 * @node list node 2357 * 2358 * This is used for the sg copy list (sgc) which is created and consumed 2359 * during binder_transaction() and is only accessed locally. No 2360 * locking is necessary. 2361 * 2362 * The list is ordered by @offset. 2363 */ 2364 struct binder_sg_copy { 2365 binder_size_t offset; 2366 const void __user *sender_uaddr; 2367 size_t length; 2368 struct list_head node; 2369 }; 2370 2371 /** 2372 * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data 2373 * @alloc: binder_alloc associated with @buffer 2374 * @buffer: binder buffer in target process 2375 * @sgc_head: list_head of scatter-gather copy list 2376 * @pf_head: list_head of pointer fixup list 2377 * 2378 * Processes all elements of @sgc_head, applying fixups from @pf_head 2379 * and copying the scatter-gather data from the source process' user 2380 * buffer to the target's buffer. It is expected that the list creation 2381 * and processing all occurs during binder_transaction() so these lists 2382 * are only accessed in local context. 2383 * 2384 * Return: 0=success, else -errno 2385 */ 2386 static int binder_do_deferred_txn_copies(struct binder_alloc *alloc, 2387 struct binder_buffer *buffer, 2388 struct list_head *sgc_head, 2389 struct list_head *pf_head) 2390 { 2391 int ret = 0; 2392 struct binder_sg_copy *sgc, *tmpsgc; 2393 struct binder_ptr_fixup *tmppf; 2394 struct binder_ptr_fixup *pf = 2395 list_first_entry_or_null(pf_head, struct binder_ptr_fixup, 2396 node); 2397 2398 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) { 2399 size_t bytes_copied = 0; 2400 2401 while (bytes_copied < sgc->length) { 2402 size_t copy_size; 2403 size_t bytes_left = sgc->length - bytes_copied; 2404 size_t offset = sgc->offset + bytes_copied; 2405 2406 /* 2407 * We copy up to the fixup (pointed to by pf) 2408 */ 2409 copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset) 2410 : bytes_left; 2411 if (!ret && copy_size) 2412 ret = binder_alloc_copy_user_to_buffer( 2413 alloc, buffer, 2414 offset, 2415 sgc->sender_uaddr + bytes_copied, 2416 copy_size); 2417 bytes_copied += copy_size; 2418 if (copy_size != bytes_left) { 2419 BUG_ON(!pf); 2420 /* we stopped at a fixup offset */ 2421 if (pf->skip_size) { 2422 /* 2423 * we are just skipping. This is for 2424 * BINDER_TYPE_FDA where the translated 2425 * fds will be fixed up when we get 2426 * to target context. 2427 */ 2428 bytes_copied += pf->skip_size; 2429 } else { 2430 /* apply the fixup indicated by pf */ 2431 if (!ret) 2432 ret = binder_alloc_copy_to_buffer( 2433 alloc, buffer, 2434 pf->offset, 2435 &pf->fixup_data, 2436 sizeof(pf->fixup_data)); 2437 bytes_copied += sizeof(pf->fixup_data); 2438 } 2439 list_del(&pf->node); 2440 kfree(pf); 2441 pf = list_first_entry_or_null(pf_head, 2442 struct binder_ptr_fixup, node); 2443 } 2444 } 2445 list_del(&sgc->node); 2446 kfree(sgc); 2447 } 2448 list_for_each_entry_safe(pf, tmppf, pf_head, node) { 2449 BUG_ON(pf->skip_size == 0); 2450 list_del(&pf->node); 2451 kfree(pf); 2452 } 2453 BUG_ON(!list_empty(sgc_head)); 2454 2455 return ret > 0 ? -EINVAL : ret; 2456 } 2457 2458 /** 2459 * binder_cleanup_deferred_txn_lists() - free specified lists 2460 * @sgc_head: list_head of scatter-gather copy list 2461 * @pf_head: list_head of pointer fixup list 2462 * 2463 * Called to clean up @sgc_head and @pf_head if there is an 2464 * error. 2465 */ 2466 static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head, 2467 struct list_head *pf_head) 2468 { 2469 struct binder_sg_copy *sgc, *tmpsgc; 2470 struct binder_ptr_fixup *pf, *tmppf; 2471 2472 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) { 2473 list_del(&sgc->node); 2474 kfree(sgc); 2475 } 2476 list_for_each_entry_safe(pf, tmppf, pf_head, node) { 2477 list_del(&pf->node); 2478 kfree(pf); 2479 } 2480 } 2481 2482 /** 2483 * binder_defer_copy() - queue a scatter-gather buffer for copy 2484 * @sgc_head: list_head of scatter-gather copy list 2485 * @offset: binder buffer offset in target process 2486 * @sender_uaddr: user address in source process 2487 * @length: bytes to copy 2488 * 2489 * Specify a scatter-gather block to be copied. The actual copy must 2490 * be deferred until all the needed fixups are identified and queued. 2491 * Then the copy and fixups are done together so un-translated values 2492 * from the source are never visible in the target buffer. 2493 * 2494 * We are guaranteed that repeated calls to this function will have 2495 * monotonically increasing @offset values so the list will naturally 2496 * be ordered. 2497 * 2498 * Return: 0=success, else -errno 2499 */ 2500 static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset, 2501 const void __user *sender_uaddr, size_t length) 2502 { 2503 struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL); 2504 2505 if (!bc) 2506 return -ENOMEM; 2507 2508 bc->offset = offset; 2509 bc->sender_uaddr = sender_uaddr; 2510 bc->length = length; 2511 INIT_LIST_HEAD(&bc->node); 2512 2513 /* 2514 * We are guaranteed that the deferred copies are in-order 2515 * so just add to the tail. 2516 */ 2517 list_add_tail(&bc->node, sgc_head); 2518 2519 return 0; 2520 } 2521 2522 /** 2523 * binder_add_fixup() - queue a fixup to be applied to sg copy 2524 * @pf_head: list_head of binder ptr fixup list 2525 * @offset: binder buffer offset in target process 2526 * @fixup: bytes to be copied for fixup 2527 * @skip_size: bytes to skip when copying (fixup will be applied later) 2528 * 2529 * Add the specified fixup to a list ordered by @offset. When copying 2530 * the scatter-gather buffers, the fixup will be copied instead of 2531 * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup 2532 * will be applied later (in target process context), so we just skip 2533 * the bytes specified by @skip_size. If @skip_size is 0, we copy the 2534 * value in @fixup. 2535 * 2536 * This function is called *mostly* in @offset order, but there are 2537 * exceptions. Since out-of-order inserts are relatively uncommon, 2538 * we insert the new element by searching backward from the tail of 2539 * the list. 2540 * 2541 * Return: 0=success, else -errno 2542 */ 2543 static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset, 2544 binder_uintptr_t fixup, size_t skip_size) 2545 { 2546 struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL); 2547 struct binder_ptr_fixup *tmppf; 2548 2549 if (!pf) 2550 return -ENOMEM; 2551 2552 pf->offset = offset; 2553 pf->fixup_data = fixup; 2554 pf->skip_size = skip_size; 2555 INIT_LIST_HEAD(&pf->node); 2556 2557 /* Fixups are *mostly* added in-order, but there are some 2558 * exceptions. Look backwards through list for insertion point. 2559 */ 2560 list_for_each_entry_reverse(tmppf, pf_head, node) { 2561 if (tmppf->offset < pf->offset) { 2562 list_add(&pf->node, &tmppf->node); 2563 return 0; 2564 } 2565 } 2566 /* 2567 * if we get here, then the new offset is the lowest so 2568 * insert at the head 2569 */ 2570 list_add(&pf->node, pf_head); 2571 return 0; 2572 } 2573 2574 static int binder_translate_fd_array(struct list_head *pf_head, 2575 struct binder_fd_array_object *fda, 2576 const void __user *sender_ubuffer, 2577 struct binder_buffer_object *parent, 2578 struct binder_buffer_object *sender_uparent, 2579 struct binder_transaction *t, 2580 struct binder_thread *thread, 2581 struct binder_transaction *in_reply_to) 2582 { 2583 binder_size_t fdi, fd_buf_size; 2584 binder_size_t fda_offset; 2585 const void __user *sender_ufda_base; 2586 struct binder_proc *proc = thread->proc; 2587 int ret; 2588 2589 if (fda->num_fds == 0) 2590 return 0; 2591 2592 fd_buf_size = sizeof(u32) * fda->num_fds; 2593 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { 2594 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n", 2595 proc->pid, thread->pid, (u64)fda->num_fds); 2596 return -EINVAL; 2597 } 2598 if (fd_buf_size > parent->length || 2599 fda->parent_offset > parent->length - fd_buf_size) { 2600 /* No space for all file descriptors here. */ 2601 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n", 2602 proc->pid, thread->pid, (u64)fda->num_fds); 2603 return -EINVAL; 2604 } 2605 /* 2606 * the source data for binder_buffer_object is visible 2607 * to user-space and the @buffer element is the user 2608 * pointer to the buffer_object containing the fd_array. 2609 * Convert the address to an offset relative to 2610 * the base of the transaction buffer. 2611 */ 2612 fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) + 2613 fda->parent_offset; 2614 sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer + 2615 fda->parent_offset; 2616 2617 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) || 2618 !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) { 2619 binder_user_error("%d:%d parent offset not aligned correctly.\n", 2620 proc->pid, thread->pid); 2621 return -EINVAL; 2622 } 2623 ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32)); 2624 if (ret) 2625 return ret; 2626 2627 for (fdi = 0; fdi < fda->num_fds; fdi++) { 2628 u32 fd; 2629 binder_size_t offset = fda_offset + fdi * sizeof(fd); 2630 binder_size_t sender_uoffset = fdi * sizeof(fd); 2631 2632 ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd)); 2633 if (!ret) 2634 ret = binder_translate_fd(fd, offset, t, thread, 2635 in_reply_to); 2636 if (ret) 2637 return ret > 0 ? -EINVAL : ret; 2638 } 2639 return 0; 2640 } 2641 2642 static int binder_fixup_parent(struct list_head *pf_head, 2643 struct binder_transaction *t, 2644 struct binder_thread *thread, 2645 struct binder_buffer_object *bp, 2646 binder_size_t off_start_offset, 2647 binder_size_t num_valid, 2648 binder_size_t last_fixup_obj_off, 2649 binder_size_t last_fixup_min_off) 2650 { 2651 struct binder_buffer_object *parent; 2652 struct binder_buffer *b = t->buffer; 2653 struct binder_proc *proc = thread->proc; 2654 struct binder_proc *target_proc = t->to_proc; 2655 struct binder_object object; 2656 binder_size_t buffer_offset; 2657 binder_size_t parent_offset; 2658 2659 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT)) 2660 return 0; 2661 2662 parent = binder_validate_ptr(target_proc, b, &object, bp->parent, 2663 off_start_offset, &parent_offset, 2664 num_valid); 2665 if (!parent) { 2666 binder_user_error("%d:%d got transaction with invalid parent offset or type\n", 2667 proc->pid, thread->pid); 2668 return -EINVAL; 2669 } 2670 2671 if (!binder_validate_fixup(target_proc, b, off_start_offset, 2672 parent_offset, bp->parent_offset, 2673 last_fixup_obj_off, 2674 last_fixup_min_off)) { 2675 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", 2676 proc->pid, thread->pid); 2677 return -EINVAL; 2678 } 2679 2680 if (parent->length < sizeof(binder_uintptr_t) || 2681 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) { 2682 /* No space for a pointer here! */ 2683 binder_user_error("%d:%d got transaction with invalid parent offset\n", 2684 proc->pid, thread->pid); 2685 return -EINVAL; 2686 } 2687 buffer_offset = bp->parent_offset + 2688 (uintptr_t)parent->buffer - (uintptr_t)b->user_data; 2689 return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0); 2690 } 2691 2692 /** 2693 * binder_can_update_transaction() - Can a txn be superseded by an updated one? 2694 * @t1: the pending async txn in the frozen process 2695 * @t2: the new async txn to supersede the outdated pending one 2696 * 2697 * Return: true if t2 can supersede t1 2698 * false if t2 can not supersede t1 2699 */ 2700 static bool binder_can_update_transaction(struct binder_transaction *t1, 2701 struct binder_transaction *t2) 2702 { 2703 if ((t1->flags & t2->flags & (TF_ONE_WAY | TF_UPDATE_TXN)) != 2704 (TF_ONE_WAY | TF_UPDATE_TXN) || !t1->to_proc || !t2->to_proc) 2705 return false; 2706 if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code && 2707 t1->flags == t2->flags && t1->buffer->pid == t2->buffer->pid && 2708 t1->buffer->target_node->ptr == t2->buffer->target_node->ptr && 2709 t1->buffer->target_node->cookie == t2->buffer->target_node->cookie) 2710 return true; 2711 return false; 2712 } 2713 2714 /** 2715 * binder_find_outdated_transaction_ilocked() - Find the outdated transaction 2716 * @t: new async transaction 2717 * @target_list: list to find outdated transaction 2718 * 2719 * Return: the outdated transaction if found 2720 * NULL if no outdated transacton can be found 2721 * 2722 * Requires the proc->inner_lock to be held. 2723 */ 2724 static struct binder_transaction * 2725 binder_find_outdated_transaction_ilocked(struct binder_transaction *t, 2726 struct list_head *target_list) 2727 { 2728 struct binder_work *w; 2729 2730 list_for_each_entry(w, target_list, entry) { 2731 struct binder_transaction *t_queued; 2732 2733 if (w->type != BINDER_WORK_TRANSACTION) 2734 continue; 2735 t_queued = container_of(w, struct binder_transaction, work); 2736 if (binder_can_update_transaction(t_queued, t)) 2737 return t_queued; 2738 } 2739 return NULL; 2740 } 2741 2742 /** 2743 * binder_proc_transaction() - sends a transaction to a process and wakes it up 2744 * @t: transaction to send 2745 * @proc: process to send the transaction to 2746 * @thread: thread in @proc to send the transaction to (may be NULL) 2747 * 2748 * This function queues a transaction to the specified process. It will try 2749 * to find a thread in the target process to handle the transaction and 2750 * wake it up. If no thread is found, the work is queued to the proc 2751 * waitqueue. 2752 * 2753 * If the @thread parameter is not NULL, the transaction is always queued 2754 * to the waitlist of that specific thread. 2755 * 2756 * Return: 0 if the transaction was successfully queued 2757 * BR_DEAD_REPLY if the target process or thread is dead 2758 * BR_FROZEN_REPLY if the target process or thread is frozen and 2759 * the sync transaction was rejected 2760 * BR_TRANSACTION_PENDING_FROZEN if the target process is frozen 2761 * and the async transaction was successfully queued 2762 */ 2763 static int binder_proc_transaction(struct binder_transaction *t, 2764 struct binder_proc *proc, 2765 struct binder_thread *thread) 2766 { 2767 struct binder_node *node = t->buffer->target_node; 2768 bool oneway = !!(t->flags & TF_ONE_WAY); 2769 bool pending_async = false; 2770 struct binder_transaction *t_outdated = NULL; 2771 bool frozen = false; 2772 2773 BUG_ON(!node); 2774 binder_node_lock(node); 2775 if (oneway) { 2776 BUG_ON(thread); 2777 if (node->has_async_transaction) 2778 pending_async = true; 2779 else 2780 node->has_async_transaction = true; 2781 } 2782 2783 binder_inner_proc_lock(proc); 2784 if (proc->is_frozen) { 2785 frozen = true; 2786 proc->sync_recv |= !oneway; 2787 proc->async_recv |= oneway; 2788 } 2789 2790 if ((frozen && !oneway) || proc->is_dead || 2791 (thread && thread->is_dead)) { 2792 binder_inner_proc_unlock(proc); 2793 binder_node_unlock(node); 2794 return frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY; 2795 } 2796 2797 if (!thread && !pending_async) 2798 thread = binder_select_thread_ilocked(proc); 2799 2800 if (thread) { 2801 binder_enqueue_thread_work_ilocked(thread, &t->work); 2802 } else if (!pending_async) { 2803 binder_enqueue_work_ilocked(&t->work, &proc->todo); 2804 } else { 2805 if ((t->flags & TF_UPDATE_TXN) && frozen) { 2806 t_outdated = binder_find_outdated_transaction_ilocked(t, 2807 &node->async_todo); 2808 if (t_outdated) { 2809 binder_debug(BINDER_DEBUG_TRANSACTION, 2810 "txn %d supersedes %d\n", 2811 t->debug_id, t_outdated->debug_id); 2812 list_del_init(&t_outdated->work.entry); 2813 proc->outstanding_txns--; 2814 } 2815 } 2816 binder_enqueue_work_ilocked(&t->work, &node->async_todo); 2817 } 2818 2819 if (!pending_async) 2820 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */); 2821 2822 proc->outstanding_txns++; 2823 binder_inner_proc_unlock(proc); 2824 binder_node_unlock(node); 2825 2826 /* 2827 * To reduce potential contention, free the outdated transaction and 2828 * buffer after releasing the locks. 2829 */ 2830 if (t_outdated) { 2831 struct binder_buffer *buffer = t_outdated->buffer; 2832 2833 t_outdated->buffer = NULL; 2834 buffer->transaction = NULL; 2835 trace_binder_transaction_update_buffer_release(buffer); 2836 binder_release_entire_buffer(proc, NULL, buffer, false); 2837 binder_alloc_free_buf(&proc->alloc, buffer); 2838 kfree(t_outdated); 2839 binder_stats_deleted(BINDER_STAT_TRANSACTION); 2840 } 2841 2842 if (oneway && frozen) 2843 return BR_TRANSACTION_PENDING_FROZEN; 2844 2845 return 0; 2846 } 2847 2848 /** 2849 * binder_get_node_refs_for_txn() - Get required refs on node for txn 2850 * @node: struct binder_node for which to get refs 2851 * @procp: returns @node->proc if valid 2852 * @error: if no @procp then returns BR_DEAD_REPLY 2853 * 2854 * User-space normally keeps the node alive when creating a transaction 2855 * since it has a reference to the target. The local strong ref keeps it 2856 * alive if the sending process dies before the target process processes 2857 * the transaction. If the source process is malicious or has a reference 2858 * counting bug, relying on the local strong ref can fail. 2859 * 2860 * Since user-space can cause the local strong ref to go away, we also take 2861 * a tmpref on the node to ensure it survives while we are constructing 2862 * the transaction. We also need a tmpref on the proc while we are 2863 * constructing the transaction, so we take that here as well. 2864 * 2865 * Return: The target_node with refs taken or NULL if no @node->proc is NULL. 2866 * Also sets @procp if valid. If the @node->proc is NULL indicating that the 2867 * target proc has died, @error is set to BR_DEAD_REPLY. 2868 */ 2869 static struct binder_node *binder_get_node_refs_for_txn( 2870 struct binder_node *node, 2871 struct binder_proc **procp, 2872 uint32_t *error) 2873 { 2874 struct binder_node *target_node = NULL; 2875 2876 binder_node_inner_lock(node); 2877 if (node->proc) { 2878 target_node = node; 2879 binder_inc_node_nilocked(node, 1, 0, NULL); 2880 binder_inc_node_tmpref_ilocked(node); 2881 node->proc->tmp_ref++; 2882 *procp = node->proc; 2883 } else 2884 *error = BR_DEAD_REPLY; 2885 binder_node_inner_unlock(node); 2886 2887 return target_node; 2888 } 2889 2890 static void binder_set_txn_from_error(struct binder_transaction *t, int id, 2891 uint32_t command, int32_t param) 2892 { 2893 struct binder_thread *from = binder_get_txn_from_and_acq_inner(t); 2894 2895 if (!from) { 2896 /* annotation for sparse */ 2897 __release(&from->proc->inner_lock); 2898 return; 2899 } 2900 2901 /* don't override existing errors */ 2902 if (from->ee.command == BR_OK) 2903 binder_set_extended_error(&from->ee, id, command, param); 2904 binder_inner_proc_unlock(from->proc); 2905 binder_thread_dec_tmpref(from); 2906 } 2907 2908 static void binder_transaction(struct binder_proc *proc, 2909 struct binder_thread *thread, 2910 struct binder_transaction_data *tr, int reply, 2911 binder_size_t extra_buffers_size) 2912 { 2913 int ret; 2914 struct binder_transaction *t; 2915 struct binder_work *w; 2916 struct binder_work *tcomplete; 2917 binder_size_t buffer_offset = 0; 2918 binder_size_t off_start_offset, off_end_offset; 2919 binder_size_t off_min; 2920 binder_size_t sg_buf_offset, sg_buf_end_offset; 2921 binder_size_t user_offset = 0; 2922 struct binder_proc *target_proc = NULL; 2923 struct binder_thread *target_thread = NULL; 2924 struct binder_node *target_node = NULL; 2925 struct binder_transaction *in_reply_to = NULL; 2926 struct binder_transaction_log_entry *e; 2927 uint32_t return_error = 0; 2928 uint32_t return_error_param = 0; 2929 uint32_t return_error_line = 0; 2930 binder_size_t last_fixup_obj_off = 0; 2931 binder_size_t last_fixup_min_off = 0; 2932 struct binder_context *context = proc->context; 2933 int t_debug_id = atomic_inc_return(&binder_last_id); 2934 ktime_t t_start_time = ktime_get(); 2935 char *secctx = NULL; 2936 u32 secctx_sz = 0; 2937 struct list_head sgc_head; 2938 struct list_head pf_head; 2939 const void __user *user_buffer = (const void __user *) 2940 (uintptr_t)tr->data.ptr.buffer; 2941 INIT_LIST_HEAD(&sgc_head); 2942 INIT_LIST_HEAD(&pf_head); 2943 2944 e = binder_transaction_log_add(&binder_transaction_log); 2945 e->debug_id = t_debug_id; 2946 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); 2947 e->from_proc = proc->pid; 2948 e->from_thread = thread->pid; 2949 e->target_handle = tr->target.handle; 2950 e->data_size = tr->data_size; 2951 e->offsets_size = tr->offsets_size; 2952 strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME); 2953 2954 binder_inner_proc_lock(proc); 2955 binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0); 2956 binder_inner_proc_unlock(proc); 2957 2958 if (reply) { 2959 binder_inner_proc_lock(proc); 2960 in_reply_to = thread->transaction_stack; 2961 if (in_reply_to == NULL) { 2962 binder_inner_proc_unlock(proc); 2963 binder_user_error("%d:%d got reply transaction with no transaction stack\n", 2964 proc->pid, thread->pid); 2965 return_error = BR_FAILED_REPLY; 2966 return_error_param = -EPROTO; 2967 return_error_line = __LINE__; 2968 goto err_empty_call_stack; 2969 } 2970 if (in_reply_to->to_thread != thread) { 2971 spin_lock(&in_reply_to->lock); 2972 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n", 2973 proc->pid, thread->pid, in_reply_to->debug_id, 2974 in_reply_to->to_proc ? 2975 in_reply_to->to_proc->pid : 0, 2976 in_reply_to->to_thread ? 2977 in_reply_to->to_thread->pid : 0); 2978 spin_unlock(&in_reply_to->lock); 2979 binder_inner_proc_unlock(proc); 2980 return_error = BR_FAILED_REPLY; 2981 return_error_param = -EPROTO; 2982 return_error_line = __LINE__; 2983 in_reply_to = NULL; 2984 goto err_bad_call_stack; 2985 } 2986 thread->transaction_stack = in_reply_to->to_parent; 2987 binder_inner_proc_unlock(proc); 2988 binder_set_nice(in_reply_to->saved_priority); 2989 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to); 2990 if (target_thread == NULL) { 2991 /* annotation for sparse */ 2992 __release(&target_thread->proc->inner_lock); 2993 binder_txn_error("%d:%d reply target not found\n", 2994 thread->pid, proc->pid); 2995 return_error = BR_DEAD_REPLY; 2996 return_error_line = __LINE__; 2997 goto err_dead_binder; 2998 } 2999 if (target_thread->transaction_stack != in_reply_to) { 3000 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n", 3001 proc->pid, thread->pid, 3002 target_thread->transaction_stack ? 3003 target_thread->transaction_stack->debug_id : 0, 3004 in_reply_to->debug_id); 3005 binder_inner_proc_unlock(target_thread->proc); 3006 return_error = BR_FAILED_REPLY; 3007 return_error_param = -EPROTO; 3008 return_error_line = __LINE__; 3009 in_reply_to = NULL; 3010 target_thread = NULL; 3011 goto err_dead_binder; 3012 } 3013 target_proc = target_thread->proc; 3014 target_proc->tmp_ref++; 3015 binder_inner_proc_unlock(target_thread->proc); 3016 } else { 3017 if (tr->target.handle) { 3018 struct binder_ref *ref; 3019 3020 /* 3021 * There must already be a strong ref 3022 * on this node. If so, do a strong 3023 * increment on the node to ensure it 3024 * stays alive until the transaction is 3025 * done. 3026 */ 3027 binder_proc_lock(proc); 3028 ref = binder_get_ref_olocked(proc, tr->target.handle, 3029 true); 3030 if (ref) { 3031 target_node = binder_get_node_refs_for_txn( 3032 ref->node, &target_proc, 3033 &return_error); 3034 } else { 3035 binder_user_error("%d:%d got transaction to invalid handle, %u\n", 3036 proc->pid, thread->pid, tr->target.handle); 3037 return_error = BR_FAILED_REPLY; 3038 } 3039 binder_proc_unlock(proc); 3040 } else { 3041 mutex_lock(&context->context_mgr_node_lock); 3042 target_node = context->binder_context_mgr_node; 3043 if (target_node) 3044 target_node = binder_get_node_refs_for_txn( 3045 target_node, &target_proc, 3046 &return_error); 3047 else 3048 return_error = BR_DEAD_REPLY; 3049 mutex_unlock(&context->context_mgr_node_lock); 3050 if (target_node && target_proc->pid == proc->pid) { 3051 binder_user_error("%d:%d got transaction to context manager from process owning it\n", 3052 proc->pid, thread->pid); 3053 return_error = BR_FAILED_REPLY; 3054 return_error_param = -EINVAL; 3055 return_error_line = __LINE__; 3056 goto err_invalid_target_handle; 3057 } 3058 } 3059 if (!target_node) { 3060 binder_txn_error("%d:%d cannot find target node\n", 3061 thread->pid, proc->pid); 3062 /* 3063 * return_error is set above 3064 */ 3065 return_error_param = -EINVAL; 3066 return_error_line = __LINE__; 3067 goto err_dead_binder; 3068 } 3069 e->to_node = target_node->debug_id; 3070 if (WARN_ON(proc == target_proc)) { 3071 binder_txn_error("%d:%d self transactions not allowed\n", 3072 thread->pid, proc->pid); 3073 return_error = BR_FAILED_REPLY; 3074 return_error_param = -EINVAL; 3075 return_error_line = __LINE__; 3076 goto err_invalid_target_handle; 3077 } 3078 if (security_binder_transaction(proc->cred, 3079 target_proc->cred) < 0) { 3080 binder_txn_error("%d:%d transaction credentials failed\n", 3081 thread->pid, proc->pid); 3082 return_error = BR_FAILED_REPLY; 3083 return_error_param = -EPERM; 3084 return_error_line = __LINE__; 3085 goto err_invalid_target_handle; 3086 } 3087 binder_inner_proc_lock(proc); 3088 3089 w = list_first_entry_or_null(&thread->todo, 3090 struct binder_work, entry); 3091 if (!(tr->flags & TF_ONE_WAY) && w && 3092 w->type == BINDER_WORK_TRANSACTION) { 3093 /* 3094 * Do not allow new outgoing transaction from a 3095 * thread that has a transaction at the head of 3096 * its todo list. Only need to check the head 3097 * because binder_select_thread_ilocked picks a 3098 * thread from proc->waiting_threads to enqueue 3099 * the transaction, and nothing is queued to the 3100 * todo list while the thread is on waiting_threads. 3101 */ 3102 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n", 3103 proc->pid, thread->pid); 3104 binder_inner_proc_unlock(proc); 3105 return_error = BR_FAILED_REPLY; 3106 return_error_param = -EPROTO; 3107 return_error_line = __LINE__; 3108 goto err_bad_todo_list; 3109 } 3110 3111 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { 3112 struct binder_transaction *tmp; 3113 3114 tmp = thread->transaction_stack; 3115 if (tmp->to_thread != thread) { 3116 spin_lock(&tmp->lock); 3117 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n", 3118 proc->pid, thread->pid, tmp->debug_id, 3119 tmp->to_proc ? tmp->to_proc->pid : 0, 3120 tmp->to_thread ? 3121 tmp->to_thread->pid : 0); 3122 spin_unlock(&tmp->lock); 3123 binder_inner_proc_unlock(proc); 3124 return_error = BR_FAILED_REPLY; 3125 return_error_param = -EPROTO; 3126 return_error_line = __LINE__; 3127 goto err_bad_call_stack; 3128 } 3129 while (tmp) { 3130 struct binder_thread *from; 3131 3132 spin_lock(&tmp->lock); 3133 from = tmp->from; 3134 if (from && from->proc == target_proc) { 3135 atomic_inc(&from->tmp_ref); 3136 target_thread = from; 3137 spin_unlock(&tmp->lock); 3138 break; 3139 } 3140 spin_unlock(&tmp->lock); 3141 tmp = tmp->from_parent; 3142 } 3143 } 3144 binder_inner_proc_unlock(proc); 3145 } 3146 if (target_thread) 3147 e->to_thread = target_thread->pid; 3148 e->to_proc = target_proc->pid; 3149 3150 /* TODO: reuse incoming transaction for reply */ 3151 t = kzalloc(sizeof(*t), GFP_KERNEL); 3152 if (t == NULL) { 3153 binder_txn_error("%d:%d cannot allocate transaction\n", 3154 thread->pid, proc->pid); 3155 return_error = BR_FAILED_REPLY; 3156 return_error_param = -ENOMEM; 3157 return_error_line = __LINE__; 3158 goto err_alloc_t_failed; 3159 } 3160 INIT_LIST_HEAD(&t->fd_fixups); 3161 binder_stats_created(BINDER_STAT_TRANSACTION); 3162 spin_lock_init(&t->lock); 3163 3164 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); 3165 if (tcomplete == NULL) { 3166 binder_txn_error("%d:%d cannot allocate work for transaction\n", 3167 thread->pid, proc->pid); 3168 return_error = BR_FAILED_REPLY; 3169 return_error_param = -ENOMEM; 3170 return_error_line = __LINE__; 3171 goto err_alloc_tcomplete_failed; 3172 } 3173 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE); 3174 3175 t->debug_id = t_debug_id; 3176 t->start_time = t_start_time; 3177 3178 if (reply) 3179 binder_debug(BINDER_DEBUG_TRANSACTION, 3180 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n", 3181 proc->pid, thread->pid, t->debug_id, 3182 target_proc->pid, target_thread->pid, 3183 (u64)tr->data.ptr.buffer, 3184 (u64)tr->data.ptr.offsets, 3185 (u64)tr->data_size, (u64)tr->offsets_size, 3186 (u64)extra_buffers_size); 3187 else 3188 binder_debug(BINDER_DEBUG_TRANSACTION, 3189 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n", 3190 proc->pid, thread->pid, t->debug_id, 3191 target_proc->pid, target_node->debug_id, 3192 (u64)tr->data.ptr.buffer, 3193 (u64)tr->data.ptr.offsets, 3194 (u64)tr->data_size, (u64)tr->offsets_size, 3195 (u64)extra_buffers_size); 3196 3197 if (!reply && !(tr->flags & TF_ONE_WAY)) 3198 t->from = thread; 3199 else 3200 t->from = NULL; 3201 t->from_pid = proc->pid; 3202 t->from_tid = thread->pid; 3203 t->sender_euid = task_euid(proc->tsk); 3204 t->to_proc = target_proc; 3205 t->to_thread = target_thread; 3206 t->code = tr->code; 3207 t->flags = tr->flags; 3208 t->priority = task_nice(current); 3209 3210 if (target_node && target_node->txn_security_ctx) { 3211 u32 secid; 3212 size_t added_size; 3213 3214 security_cred_getsecid(proc->cred, &secid); 3215 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz); 3216 if (ret) { 3217 binder_txn_error("%d:%d failed to get security context\n", 3218 thread->pid, proc->pid); 3219 return_error = BR_FAILED_REPLY; 3220 return_error_param = ret; 3221 return_error_line = __LINE__; 3222 goto err_get_secctx_failed; 3223 } 3224 added_size = ALIGN(secctx_sz, sizeof(u64)); 3225 extra_buffers_size += added_size; 3226 if (extra_buffers_size < added_size) { 3227 binder_txn_error("%d:%d integer overflow of extra_buffers_size\n", 3228 thread->pid, proc->pid); 3229 return_error = BR_FAILED_REPLY; 3230 return_error_param = -EINVAL; 3231 return_error_line = __LINE__; 3232 goto err_bad_extra_size; 3233 } 3234 } 3235 3236 trace_binder_transaction(reply, t, target_node); 3237 3238 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size, 3239 tr->offsets_size, extra_buffers_size, 3240 !reply && (t->flags & TF_ONE_WAY), current->tgid); 3241 if (IS_ERR(t->buffer)) { 3242 char *s; 3243 3244 ret = PTR_ERR(t->buffer); 3245 s = (ret == -ESRCH) ? ": vma cleared, target dead or dying" 3246 : (ret == -ENOSPC) ? ": no space left" 3247 : (ret == -ENOMEM) ? ": memory allocation failed" 3248 : ""; 3249 binder_txn_error("cannot allocate buffer%s", s); 3250 3251 return_error_param = PTR_ERR(t->buffer); 3252 return_error = return_error_param == -ESRCH ? 3253 BR_DEAD_REPLY : BR_FAILED_REPLY; 3254 return_error_line = __LINE__; 3255 t->buffer = NULL; 3256 goto err_binder_alloc_buf_failed; 3257 } 3258 if (secctx) { 3259 int err; 3260 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) + 3261 ALIGN(tr->offsets_size, sizeof(void *)) + 3262 ALIGN(extra_buffers_size, sizeof(void *)) - 3263 ALIGN(secctx_sz, sizeof(u64)); 3264 3265 t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset; 3266 err = binder_alloc_copy_to_buffer(&target_proc->alloc, 3267 t->buffer, buf_offset, 3268 secctx, secctx_sz); 3269 if (err) { 3270 t->security_ctx = 0; 3271 WARN_ON(1); 3272 } 3273 security_release_secctx(secctx, secctx_sz); 3274 secctx = NULL; 3275 } 3276 t->buffer->debug_id = t->debug_id; 3277 t->buffer->transaction = t; 3278 t->buffer->target_node = target_node; 3279 t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF); 3280 trace_binder_transaction_alloc_buf(t->buffer); 3281 3282 if (binder_alloc_copy_user_to_buffer( 3283 &target_proc->alloc, 3284 t->buffer, 3285 ALIGN(tr->data_size, sizeof(void *)), 3286 (const void __user *) 3287 (uintptr_t)tr->data.ptr.offsets, 3288 tr->offsets_size)) { 3289 binder_user_error("%d:%d got transaction with invalid offsets ptr\n", 3290 proc->pid, thread->pid); 3291 return_error = BR_FAILED_REPLY; 3292 return_error_param = -EFAULT; 3293 return_error_line = __LINE__; 3294 goto err_copy_data_failed; 3295 } 3296 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) { 3297 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n", 3298 proc->pid, thread->pid, (u64)tr->offsets_size); 3299 return_error = BR_FAILED_REPLY; 3300 return_error_param = -EINVAL; 3301 return_error_line = __LINE__; 3302 goto err_bad_offset; 3303 } 3304 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) { 3305 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n", 3306 proc->pid, thread->pid, 3307 (u64)extra_buffers_size); 3308 return_error = BR_FAILED_REPLY; 3309 return_error_param = -EINVAL; 3310 return_error_line = __LINE__; 3311 goto err_bad_offset; 3312 } 3313 off_start_offset = ALIGN(tr->data_size, sizeof(void *)); 3314 buffer_offset = off_start_offset; 3315 off_end_offset = off_start_offset + tr->offsets_size; 3316 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *)); 3317 sg_buf_end_offset = sg_buf_offset + extra_buffers_size - 3318 ALIGN(secctx_sz, sizeof(u64)); 3319 off_min = 0; 3320 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset; 3321 buffer_offset += sizeof(binder_size_t)) { 3322 struct binder_object_header *hdr; 3323 size_t object_size; 3324 struct binder_object object; 3325 binder_size_t object_offset; 3326 binder_size_t copy_size; 3327 3328 if (binder_alloc_copy_from_buffer(&target_proc->alloc, 3329 &object_offset, 3330 t->buffer, 3331 buffer_offset, 3332 sizeof(object_offset))) { 3333 binder_txn_error("%d:%d copy offset from buffer failed\n", 3334 thread->pid, proc->pid); 3335 return_error = BR_FAILED_REPLY; 3336 return_error_param = -EINVAL; 3337 return_error_line = __LINE__; 3338 goto err_bad_offset; 3339 } 3340 3341 /* 3342 * Copy the source user buffer up to the next object 3343 * that will be processed. 3344 */ 3345 copy_size = object_offset - user_offset; 3346 if (copy_size && (user_offset > object_offset || 3347 binder_alloc_copy_user_to_buffer( 3348 &target_proc->alloc, 3349 t->buffer, user_offset, 3350 user_buffer + user_offset, 3351 copy_size))) { 3352 binder_user_error("%d:%d got transaction with invalid data ptr\n", 3353 proc->pid, thread->pid); 3354 return_error = BR_FAILED_REPLY; 3355 return_error_param = -EFAULT; 3356 return_error_line = __LINE__; 3357 goto err_copy_data_failed; 3358 } 3359 object_size = binder_get_object(target_proc, user_buffer, 3360 t->buffer, object_offset, &object); 3361 if (object_size == 0 || object_offset < off_min) { 3362 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n", 3363 proc->pid, thread->pid, 3364 (u64)object_offset, 3365 (u64)off_min, 3366 (u64)t->buffer->data_size); 3367 return_error = BR_FAILED_REPLY; 3368 return_error_param = -EINVAL; 3369 return_error_line = __LINE__; 3370 goto err_bad_offset; 3371 } 3372 /* 3373 * Set offset to the next buffer fragment to be 3374 * copied 3375 */ 3376 user_offset = object_offset + object_size; 3377 3378 hdr = &object.hdr; 3379 off_min = object_offset + object_size; 3380 switch (hdr->type) { 3381 case BINDER_TYPE_BINDER: 3382 case BINDER_TYPE_WEAK_BINDER: { 3383 struct flat_binder_object *fp; 3384 3385 fp = to_flat_binder_object(hdr); 3386 ret = binder_translate_binder(fp, t, thread); 3387 3388 if (ret < 0 || 3389 binder_alloc_copy_to_buffer(&target_proc->alloc, 3390 t->buffer, 3391 object_offset, 3392 fp, sizeof(*fp))) { 3393 binder_txn_error("%d:%d translate binder failed\n", 3394 thread->pid, proc->pid); 3395 return_error = BR_FAILED_REPLY; 3396 return_error_param = ret; 3397 return_error_line = __LINE__; 3398 goto err_translate_failed; 3399 } 3400 } break; 3401 case BINDER_TYPE_HANDLE: 3402 case BINDER_TYPE_WEAK_HANDLE: { 3403 struct flat_binder_object *fp; 3404 3405 fp = to_flat_binder_object(hdr); 3406 ret = binder_translate_handle(fp, t, thread); 3407 if (ret < 0 || 3408 binder_alloc_copy_to_buffer(&target_proc->alloc, 3409 t->buffer, 3410 object_offset, 3411 fp, sizeof(*fp))) { 3412 binder_txn_error("%d:%d translate handle failed\n", 3413 thread->pid, proc->pid); 3414 return_error = BR_FAILED_REPLY; 3415 return_error_param = ret; 3416 return_error_line = __LINE__; 3417 goto err_translate_failed; 3418 } 3419 } break; 3420 3421 case BINDER_TYPE_FD: { 3422 struct binder_fd_object *fp = to_binder_fd_object(hdr); 3423 binder_size_t fd_offset = object_offset + 3424 (uintptr_t)&fp->fd - (uintptr_t)fp; 3425 int ret = binder_translate_fd(fp->fd, fd_offset, t, 3426 thread, in_reply_to); 3427 3428 fp->pad_binder = 0; 3429 if (ret < 0 || 3430 binder_alloc_copy_to_buffer(&target_proc->alloc, 3431 t->buffer, 3432 object_offset, 3433 fp, sizeof(*fp))) { 3434 binder_txn_error("%d:%d translate fd failed\n", 3435 thread->pid, proc->pid); 3436 return_error = BR_FAILED_REPLY; 3437 return_error_param = ret; 3438 return_error_line = __LINE__; 3439 goto err_translate_failed; 3440 } 3441 } break; 3442 case BINDER_TYPE_FDA: { 3443 struct binder_object ptr_object; 3444 binder_size_t parent_offset; 3445 struct binder_object user_object; 3446 size_t user_parent_size; 3447 struct binder_fd_array_object *fda = 3448 to_binder_fd_array_object(hdr); 3449 size_t num_valid = (buffer_offset - off_start_offset) / 3450 sizeof(binder_size_t); 3451 struct binder_buffer_object *parent = 3452 binder_validate_ptr(target_proc, t->buffer, 3453 &ptr_object, fda->parent, 3454 off_start_offset, 3455 &parent_offset, 3456 num_valid); 3457 if (!parent) { 3458 binder_user_error("%d:%d got transaction with invalid parent offset or type\n", 3459 proc->pid, thread->pid); 3460 return_error = BR_FAILED_REPLY; 3461 return_error_param = -EINVAL; 3462 return_error_line = __LINE__; 3463 goto err_bad_parent; 3464 } 3465 if (!binder_validate_fixup(target_proc, t->buffer, 3466 off_start_offset, 3467 parent_offset, 3468 fda->parent_offset, 3469 last_fixup_obj_off, 3470 last_fixup_min_off)) { 3471 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", 3472 proc->pid, thread->pid); 3473 return_error = BR_FAILED_REPLY; 3474 return_error_param = -EINVAL; 3475 return_error_line = __LINE__; 3476 goto err_bad_parent; 3477 } 3478 /* 3479 * We need to read the user version of the parent 3480 * object to get the original user offset 3481 */ 3482 user_parent_size = 3483 binder_get_object(proc, user_buffer, t->buffer, 3484 parent_offset, &user_object); 3485 if (user_parent_size != sizeof(user_object.bbo)) { 3486 binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n", 3487 proc->pid, thread->pid, 3488 user_parent_size, 3489 sizeof(user_object.bbo)); 3490 return_error = BR_FAILED_REPLY; 3491 return_error_param = -EINVAL; 3492 return_error_line = __LINE__; 3493 goto err_bad_parent; 3494 } 3495 ret = binder_translate_fd_array(&pf_head, fda, 3496 user_buffer, parent, 3497 &user_object.bbo, t, 3498 thread, in_reply_to); 3499 if (!ret) 3500 ret = binder_alloc_copy_to_buffer(&target_proc->alloc, 3501 t->buffer, 3502 object_offset, 3503 fda, sizeof(*fda)); 3504 if (ret) { 3505 binder_txn_error("%d:%d translate fd array failed\n", 3506 thread->pid, proc->pid); 3507 return_error = BR_FAILED_REPLY; 3508 return_error_param = ret > 0 ? -EINVAL : ret; 3509 return_error_line = __LINE__; 3510 goto err_translate_failed; 3511 } 3512 last_fixup_obj_off = parent_offset; 3513 last_fixup_min_off = 3514 fda->parent_offset + sizeof(u32) * fda->num_fds; 3515 } break; 3516 case BINDER_TYPE_PTR: { 3517 struct binder_buffer_object *bp = 3518 to_binder_buffer_object(hdr); 3519 size_t buf_left = sg_buf_end_offset - sg_buf_offset; 3520 size_t num_valid; 3521 3522 if (bp->length > buf_left) { 3523 binder_user_error("%d:%d got transaction with too large buffer\n", 3524 proc->pid, thread->pid); 3525 return_error = BR_FAILED_REPLY; 3526 return_error_param = -EINVAL; 3527 return_error_line = __LINE__; 3528 goto err_bad_offset; 3529 } 3530 ret = binder_defer_copy(&sgc_head, sg_buf_offset, 3531 (const void __user *)(uintptr_t)bp->buffer, 3532 bp->length); 3533 if (ret) { 3534 binder_txn_error("%d:%d deferred copy failed\n", 3535 thread->pid, proc->pid); 3536 return_error = BR_FAILED_REPLY; 3537 return_error_param = ret; 3538 return_error_line = __LINE__; 3539 goto err_translate_failed; 3540 } 3541 /* Fixup buffer pointer to target proc address space */ 3542 bp->buffer = (uintptr_t) 3543 t->buffer->user_data + sg_buf_offset; 3544 sg_buf_offset += ALIGN(bp->length, sizeof(u64)); 3545 3546 num_valid = (buffer_offset - off_start_offset) / 3547 sizeof(binder_size_t); 3548 ret = binder_fixup_parent(&pf_head, t, 3549 thread, bp, 3550 off_start_offset, 3551 num_valid, 3552 last_fixup_obj_off, 3553 last_fixup_min_off); 3554 if (ret < 0 || 3555 binder_alloc_copy_to_buffer(&target_proc->alloc, 3556 t->buffer, 3557 object_offset, 3558 bp, sizeof(*bp))) { 3559 binder_txn_error("%d:%d failed to fixup parent\n", 3560 thread->pid, proc->pid); 3561 return_error = BR_FAILED_REPLY; 3562 return_error_param = ret; 3563 return_error_line = __LINE__; 3564 goto err_translate_failed; 3565 } 3566 last_fixup_obj_off = object_offset; 3567 last_fixup_min_off = 0; 3568 } break; 3569 default: 3570 binder_user_error("%d:%d got transaction with invalid object type, %x\n", 3571 proc->pid, thread->pid, hdr->type); 3572 return_error = BR_FAILED_REPLY; 3573 return_error_param = -EINVAL; 3574 return_error_line = __LINE__; 3575 goto err_bad_object_type; 3576 } 3577 } 3578 /* Done processing objects, copy the rest of the buffer */ 3579 if (binder_alloc_copy_user_to_buffer( 3580 &target_proc->alloc, 3581 t->buffer, user_offset, 3582 user_buffer + user_offset, 3583 tr->data_size - user_offset)) { 3584 binder_user_error("%d:%d got transaction with invalid data ptr\n", 3585 proc->pid, thread->pid); 3586 return_error = BR_FAILED_REPLY; 3587 return_error_param = -EFAULT; 3588 return_error_line = __LINE__; 3589 goto err_copy_data_failed; 3590 } 3591 3592 ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer, 3593 &sgc_head, &pf_head); 3594 if (ret) { 3595 binder_user_error("%d:%d got transaction with invalid offsets ptr\n", 3596 proc->pid, thread->pid); 3597 return_error = BR_FAILED_REPLY; 3598 return_error_param = ret; 3599 return_error_line = __LINE__; 3600 goto err_copy_data_failed; 3601 } 3602 if (t->buffer->oneway_spam_suspect) 3603 tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT; 3604 else 3605 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; 3606 t->work.type = BINDER_WORK_TRANSACTION; 3607 3608 if (reply) { 3609 binder_enqueue_thread_work(thread, tcomplete); 3610 binder_inner_proc_lock(target_proc); 3611 if (target_thread->is_dead) { 3612 return_error = BR_DEAD_REPLY; 3613 binder_inner_proc_unlock(target_proc); 3614 goto err_dead_proc_or_thread; 3615 } 3616 BUG_ON(t->buffer->async_transaction != 0); 3617 binder_pop_transaction_ilocked(target_thread, in_reply_to); 3618 binder_enqueue_thread_work_ilocked(target_thread, &t->work); 3619 target_proc->outstanding_txns++; 3620 binder_inner_proc_unlock(target_proc); 3621 wake_up_interruptible_sync(&target_thread->wait); 3622 binder_free_transaction(in_reply_to); 3623 } else if (!(t->flags & TF_ONE_WAY)) { 3624 BUG_ON(t->buffer->async_transaction != 0); 3625 binder_inner_proc_lock(proc); 3626 /* 3627 * Defer the TRANSACTION_COMPLETE, so we don't return to 3628 * userspace immediately; this allows the target process to 3629 * immediately start processing this transaction, reducing 3630 * latency. We will then return the TRANSACTION_COMPLETE when 3631 * the target replies (or there is an error). 3632 */ 3633 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete); 3634 t->need_reply = 1; 3635 t->from_parent = thread->transaction_stack; 3636 thread->transaction_stack = t; 3637 binder_inner_proc_unlock(proc); 3638 return_error = binder_proc_transaction(t, 3639 target_proc, target_thread); 3640 if (return_error) { 3641 binder_inner_proc_lock(proc); 3642 binder_pop_transaction_ilocked(thread, t); 3643 binder_inner_proc_unlock(proc); 3644 goto err_dead_proc_or_thread; 3645 } 3646 } else { 3647 BUG_ON(target_node == NULL); 3648 BUG_ON(t->buffer->async_transaction != 1); 3649 return_error = binder_proc_transaction(t, target_proc, NULL); 3650 /* 3651 * Let the caller know when async transaction reaches a frozen 3652 * process and is put in a pending queue, waiting for the target 3653 * process to be unfrozen. 3654 */ 3655 if (return_error == BR_TRANSACTION_PENDING_FROZEN) 3656 tcomplete->type = BINDER_WORK_TRANSACTION_PENDING; 3657 binder_enqueue_thread_work(thread, tcomplete); 3658 if (return_error && 3659 return_error != BR_TRANSACTION_PENDING_FROZEN) 3660 goto err_dead_proc_or_thread; 3661 } 3662 if (target_thread) 3663 binder_thread_dec_tmpref(target_thread); 3664 binder_proc_dec_tmpref(target_proc); 3665 if (target_node) 3666 binder_dec_node_tmpref(target_node); 3667 /* 3668 * write barrier to synchronize with initialization 3669 * of log entry 3670 */ 3671 smp_wmb(); 3672 WRITE_ONCE(e->debug_id_done, t_debug_id); 3673 return; 3674 3675 err_dead_proc_or_thread: 3676 binder_txn_error("%d:%d dead process or thread\n", 3677 thread->pid, proc->pid); 3678 return_error_line = __LINE__; 3679 binder_dequeue_work(proc, tcomplete); 3680 err_translate_failed: 3681 err_bad_object_type: 3682 err_bad_offset: 3683 err_bad_parent: 3684 err_copy_data_failed: 3685 binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head); 3686 binder_free_txn_fixups(t); 3687 trace_binder_transaction_failed_buffer_release(t->buffer); 3688 binder_transaction_buffer_release(target_proc, NULL, t->buffer, 3689 buffer_offset, true); 3690 if (target_node) 3691 binder_dec_node_tmpref(target_node); 3692 target_node = NULL; 3693 t->buffer->transaction = NULL; 3694 binder_alloc_free_buf(&target_proc->alloc, t->buffer); 3695 err_binder_alloc_buf_failed: 3696 err_bad_extra_size: 3697 if (secctx) 3698 security_release_secctx(secctx, secctx_sz); 3699 err_get_secctx_failed: 3700 kfree(tcomplete); 3701 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 3702 err_alloc_tcomplete_failed: 3703 if (trace_binder_txn_latency_free_enabled()) 3704 binder_txn_latency_free(t); 3705 kfree(t); 3706 binder_stats_deleted(BINDER_STAT_TRANSACTION); 3707 err_alloc_t_failed: 3708 err_bad_todo_list: 3709 err_bad_call_stack: 3710 err_empty_call_stack: 3711 err_dead_binder: 3712 err_invalid_target_handle: 3713 if (target_node) { 3714 binder_dec_node(target_node, 1, 0); 3715 binder_dec_node_tmpref(target_node); 3716 } 3717 3718 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 3719 "%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n", 3720 proc->pid, thread->pid, reply ? "reply" : 3721 (tr->flags & TF_ONE_WAY ? "async" : "call"), 3722 target_proc ? target_proc->pid : 0, 3723 target_thread ? target_thread->pid : 0, 3724 t_debug_id, return_error, return_error_param, 3725 (u64)tr->data_size, (u64)tr->offsets_size, 3726 return_error_line); 3727 3728 if (target_thread) 3729 binder_thread_dec_tmpref(target_thread); 3730 if (target_proc) 3731 binder_proc_dec_tmpref(target_proc); 3732 3733 { 3734 struct binder_transaction_log_entry *fe; 3735 3736 e->return_error = return_error; 3737 e->return_error_param = return_error_param; 3738 e->return_error_line = return_error_line; 3739 fe = binder_transaction_log_add(&binder_transaction_log_failed); 3740 *fe = *e; 3741 /* 3742 * write barrier to synchronize with initialization 3743 * of log entry 3744 */ 3745 smp_wmb(); 3746 WRITE_ONCE(e->debug_id_done, t_debug_id); 3747 WRITE_ONCE(fe->debug_id_done, t_debug_id); 3748 } 3749 3750 BUG_ON(thread->return_error.cmd != BR_OK); 3751 if (in_reply_to) { 3752 binder_set_txn_from_error(in_reply_to, t_debug_id, 3753 return_error, return_error_param); 3754 thread->return_error.cmd = BR_TRANSACTION_COMPLETE; 3755 binder_enqueue_thread_work(thread, &thread->return_error.work); 3756 binder_send_failed_reply(in_reply_to, return_error); 3757 } else { 3758 binder_inner_proc_lock(proc); 3759 binder_set_extended_error(&thread->ee, t_debug_id, 3760 return_error, return_error_param); 3761 binder_inner_proc_unlock(proc); 3762 thread->return_error.cmd = return_error; 3763 binder_enqueue_thread_work(thread, &thread->return_error.work); 3764 } 3765 } 3766 3767 /** 3768 * binder_free_buf() - free the specified buffer 3769 * @proc: binder proc that owns buffer 3770 * @buffer: buffer to be freed 3771 * @is_failure: failed to send transaction 3772 * 3773 * If buffer for an async transaction, enqueue the next async 3774 * transaction from the node. 3775 * 3776 * Cleanup buffer and free it. 3777 */ 3778 static void 3779 binder_free_buf(struct binder_proc *proc, 3780 struct binder_thread *thread, 3781 struct binder_buffer *buffer, bool is_failure) 3782 { 3783 binder_inner_proc_lock(proc); 3784 if (buffer->transaction) { 3785 buffer->transaction->buffer = NULL; 3786 buffer->transaction = NULL; 3787 } 3788 binder_inner_proc_unlock(proc); 3789 if (buffer->async_transaction && buffer->target_node) { 3790 struct binder_node *buf_node; 3791 struct binder_work *w; 3792 3793 buf_node = buffer->target_node; 3794 binder_node_inner_lock(buf_node); 3795 BUG_ON(!buf_node->has_async_transaction); 3796 BUG_ON(buf_node->proc != proc); 3797 w = binder_dequeue_work_head_ilocked( 3798 &buf_node->async_todo); 3799 if (!w) { 3800 buf_node->has_async_transaction = false; 3801 } else { 3802 binder_enqueue_work_ilocked( 3803 w, &proc->todo); 3804 binder_wakeup_proc_ilocked(proc); 3805 } 3806 binder_node_inner_unlock(buf_node); 3807 } 3808 trace_binder_transaction_buffer_release(buffer); 3809 binder_release_entire_buffer(proc, thread, buffer, is_failure); 3810 binder_alloc_free_buf(&proc->alloc, buffer); 3811 } 3812 3813 static int binder_thread_write(struct binder_proc *proc, 3814 struct binder_thread *thread, 3815 binder_uintptr_t binder_buffer, size_t size, 3816 binder_size_t *consumed) 3817 { 3818 uint32_t cmd; 3819 struct binder_context *context = proc->context; 3820 void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 3821 void __user *ptr = buffer + *consumed; 3822 void __user *end = buffer + size; 3823 3824 while (ptr < end && thread->return_error.cmd == BR_OK) { 3825 int ret; 3826 3827 if (get_user(cmd, (uint32_t __user *)ptr)) 3828 return -EFAULT; 3829 ptr += sizeof(uint32_t); 3830 trace_binder_command(cmd); 3831 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { 3832 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]); 3833 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]); 3834 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]); 3835 } 3836 switch (cmd) { 3837 case BC_INCREFS: 3838 case BC_ACQUIRE: 3839 case BC_RELEASE: 3840 case BC_DECREFS: { 3841 uint32_t target; 3842 const char *debug_string; 3843 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE; 3844 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE; 3845 struct binder_ref_data rdata; 3846 3847 if (get_user(target, (uint32_t __user *)ptr)) 3848 return -EFAULT; 3849 3850 ptr += sizeof(uint32_t); 3851 ret = -1; 3852 if (increment && !target) { 3853 struct binder_node *ctx_mgr_node; 3854 3855 mutex_lock(&context->context_mgr_node_lock); 3856 ctx_mgr_node = context->binder_context_mgr_node; 3857 if (ctx_mgr_node) { 3858 if (ctx_mgr_node->proc == proc) { 3859 binder_user_error("%d:%d context manager tried to acquire desc 0\n", 3860 proc->pid, thread->pid); 3861 mutex_unlock(&context->context_mgr_node_lock); 3862 return -EINVAL; 3863 } 3864 ret = binder_inc_ref_for_node( 3865 proc, ctx_mgr_node, 3866 strong, NULL, &rdata); 3867 } 3868 mutex_unlock(&context->context_mgr_node_lock); 3869 } 3870 if (ret) 3871 ret = binder_update_ref_for_handle( 3872 proc, target, increment, strong, 3873 &rdata); 3874 if (!ret && rdata.desc != target) { 3875 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n", 3876 proc->pid, thread->pid, 3877 target, rdata.desc); 3878 } 3879 switch (cmd) { 3880 case BC_INCREFS: 3881 debug_string = "IncRefs"; 3882 break; 3883 case BC_ACQUIRE: 3884 debug_string = "Acquire"; 3885 break; 3886 case BC_RELEASE: 3887 debug_string = "Release"; 3888 break; 3889 case BC_DECREFS: 3890 default: 3891 debug_string = "DecRefs"; 3892 break; 3893 } 3894 if (ret) { 3895 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n", 3896 proc->pid, thread->pid, debug_string, 3897 strong, target, ret); 3898 break; 3899 } 3900 binder_debug(BINDER_DEBUG_USER_REFS, 3901 "%d:%d %s ref %d desc %d s %d w %d\n", 3902 proc->pid, thread->pid, debug_string, 3903 rdata.debug_id, rdata.desc, rdata.strong, 3904 rdata.weak); 3905 break; 3906 } 3907 case BC_INCREFS_DONE: 3908 case BC_ACQUIRE_DONE: { 3909 binder_uintptr_t node_ptr; 3910 binder_uintptr_t cookie; 3911 struct binder_node *node; 3912 bool free_node; 3913 3914 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr)) 3915 return -EFAULT; 3916 ptr += sizeof(binder_uintptr_t); 3917 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 3918 return -EFAULT; 3919 ptr += sizeof(binder_uintptr_t); 3920 node = binder_get_node(proc, node_ptr); 3921 if (node == NULL) { 3922 binder_user_error("%d:%d %s u%016llx no match\n", 3923 proc->pid, thread->pid, 3924 cmd == BC_INCREFS_DONE ? 3925 "BC_INCREFS_DONE" : 3926 "BC_ACQUIRE_DONE", 3927 (u64)node_ptr); 3928 break; 3929 } 3930 if (cookie != node->cookie) { 3931 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n", 3932 proc->pid, thread->pid, 3933 cmd == BC_INCREFS_DONE ? 3934 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 3935 (u64)node_ptr, node->debug_id, 3936 (u64)cookie, (u64)node->cookie); 3937 binder_put_node(node); 3938 break; 3939 } 3940 binder_node_inner_lock(node); 3941 if (cmd == BC_ACQUIRE_DONE) { 3942 if (node->pending_strong_ref == 0) { 3943 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n", 3944 proc->pid, thread->pid, 3945 node->debug_id); 3946 binder_node_inner_unlock(node); 3947 binder_put_node(node); 3948 break; 3949 } 3950 node->pending_strong_ref = 0; 3951 } else { 3952 if (node->pending_weak_ref == 0) { 3953 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n", 3954 proc->pid, thread->pid, 3955 node->debug_id); 3956 binder_node_inner_unlock(node); 3957 binder_put_node(node); 3958 break; 3959 } 3960 node->pending_weak_ref = 0; 3961 } 3962 free_node = binder_dec_node_nilocked(node, 3963 cmd == BC_ACQUIRE_DONE, 0); 3964 WARN_ON(free_node); 3965 binder_debug(BINDER_DEBUG_USER_REFS, 3966 "%d:%d %s node %d ls %d lw %d tr %d\n", 3967 proc->pid, thread->pid, 3968 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 3969 node->debug_id, node->local_strong_refs, 3970 node->local_weak_refs, node->tmp_refs); 3971 binder_node_inner_unlock(node); 3972 binder_put_node(node); 3973 break; 3974 } 3975 case BC_ATTEMPT_ACQUIRE: 3976 pr_err("BC_ATTEMPT_ACQUIRE not supported\n"); 3977 return -EINVAL; 3978 case BC_ACQUIRE_RESULT: 3979 pr_err("BC_ACQUIRE_RESULT not supported\n"); 3980 return -EINVAL; 3981 3982 case BC_FREE_BUFFER: { 3983 binder_uintptr_t data_ptr; 3984 struct binder_buffer *buffer; 3985 3986 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr)) 3987 return -EFAULT; 3988 ptr += sizeof(binder_uintptr_t); 3989 3990 buffer = binder_alloc_prepare_to_free(&proc->alloc, 3991 data_ptr); 3992 if (IS_ERR_OR_NULL(buffer)) { 3993 if (PTR_ERR(buffer) == -EPERM) { 3994 binder_user_error( 3995 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n", 3996 proc->pid, thread->pid, 3997 (u64)data_ptr); 3998 } else { 3999 binder_user_error( 4000 "%d:%d BC_FREE_BUFFER u%016llx no match\n", 4001 proc->pid, thread->pid, 4002 (u64)data_ptr); 4003 } 4004 break; 4005 } 4006 binder_debug(BINDER_DEBUG_FREE_BUFFER, 4007 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n", 4008 proc->pid, thread->pid, (u64)data_ptr, 4009 buffer->debug_id, 4010 buffer->transaction ? "active" : "finished"); 4011 binder_free_buf(proc, thread, buffer, false); 4012 break; 4013 } 4014 4015 case BC_TRANSACTION_SG: 4016 case BC_REPLY_SG: { 4017 struct binder_transaction_data_sg tr; 4018 4019 if (copy_from_user(&tr, ptr, sizeof(tr))) 4020 return -EFAULT; 4021 ptr += sizeof(tr); 4022 binder_transaction(proc, thread, &tr.transaction_data, 4023 cmd == BC_REPLY_SG, tr.buffers_size); 4024 break; 4025 } 4026 case BC_TRANSACTION: 4027 case BC_REPLY: { 4028 struct binder_transaction_data tr; 4029 4030 if (copy_from_user(&tr, ptr, sizeof(tr))) 4031 return -EFAULT; 4032 ptr += sizeof(tr); 4033 binder_transaction(proc, thread, &tr, 4034 cmd == BC_REPLY, 0); 4035 break; 4036 } 4037 4038 case BC_REGISTER_LOOPER: 4039 binder_debug(BINDER_DEBUG_THREADS, 4040 "%d:%d BC_REGISTER_LOOPER\n", 4041 proc->pid, thread->pid); 4042 binder_inner_proc_lock(proc); 4043 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { 4044 thread->looper |= BINDER_LOOPER_STATE_INVALID; 4045 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n", 4046 proc->pid, thread->pid); 4047 } else if (proc->requested_threads == 0) { 4048 thread->looper |= BINDER_LOOPER_STATE_INVALID; 4049 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n", 4050 proc->pid, thread->pid); 4051 } else { 4052 proc->requested_threads--; 4053 proc->requested_threads_started++; 4054 } 4055 thread->looper |= BINDER_LOOPER_STATE_REGISTERED; 4056 binder_inner_proc_unlock(proc); 4057 break; 4058 case BC_ENTER_LOOPER: 4059 binder_debug(BINDER_DEBUG_THREADS, 4060 "%d:%d BC_ENTER_LOOPER\n", 4061 proc->pid, thread->pid); 4062 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { 4063 thread->looper |= BINDER_LOOPER_STATE_INVALID; 4064 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n", 4065 proc->pid, thread->pid); 4066 } 4067 thread->looper |= BINDER_LOOPER_STATE_ENTERED; 4068 break; 4069 case BC_EXIT_LOOPER: 4070 binder_debug(BINDER_DEBUG_THREADS, 4071 "%d:%d BC_EXIT_LOOPER\n", 4072 proc->pid, thread->pid); 4073 thread->looper |= BINDER_LOOPER_STATE_EXITED; 4074 break; 4075 4076 case BC_REQUEST_DEATH_NOTIFICATION: 4077 case BC_CLEAR_DEATH_NOTIFICATION: { 4078 uint32_t target; 4079 binder_uintptr_t cookie; 4080 struct binder_ref *ref; 4081 struct binder_ref_death *death = NULL; 4082 4083 if (get_user(target, (uint32_t __user *)ptr)) 4084 return -EFAULT; 4085 ptr += sizeof(uint32_t); 4086 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 4087 return -EFAULT; 4088 ptr += sizeof(binder_uintptr_t); 4089 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { 4090 /* 4091 * Allocate memory for death notification 4092 * before taking lock 4093 */ 4094 death = kzalloc(sizeof(*death), GFP_KERNEL); 4095 if (death == NULL) { 4096 WARN_ON(thread->return_error.cmd != 4097 BR_OK); 4098 thread->return_error.cmd = BR_ERROR; 4099 binder_enqueue_thread_work( 4100 thread, 4101 &thread->return_error.work); 4102 binder_debug( 4103 BINDER_DEBUG_FAILED_TRANSACTION, 4104 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n", 4105 proc->pid, thread->pid); 4106 break; 4107 } 4108 } 4109 binder_proc_lock(proc); 4110 ref = binder_get_ref_olocked(proc, target, false); 4111 if (ref == NULL) { 4112 binder_user_error("%d:%d %s invalid ref %d\n", 4113 proc->pid, thread->pid, 4114 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 4115 "BC_REQUEST_DEATH_NOTIFICATION" : 4116 "BC_CLEAR_DEATH_NOTIFICATION", 4117 target); 4118 binder_proc_unlock(proc); 4119 kfree(death); 4120 break; 4121 } 4122 4123 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 4124 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n", 4125 proc->pid, thread->pid, 4126 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 4127 "BC_REQUEST_DEATH_NOTIFICATION" : 4128 "BC_CLEAR_DEATH_NOTIFICATION", 4129 (u64)cookie, ref->data.debug_id, 4130 ref->data.desc, ref->data.strong, 4131 ref->data.weak, ref->node->debug_id); 4132 4133 binder_node_lock(ref->node); 4134 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { 4135 if (ref->death) { 4136 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n", 4137 proc->pid, thread->pid); 4138 binder_node_unlock(ref->node); 4139 binder_proc_unlock(proc); 4140 kfree(death); 4141 break; 4142 } 4143 binder_stats_created(BINDER_STAT_DEATH); 4144 INIT_LIST_HEAD(&death->work.entry); 4145 death->cookie = cookie; 4146 ref->death = death; 4147 if (ref->node->proc == NULL) { 4148 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 4149 4150 binder_inner_proc_lock(proc); 4151 binder_enqueue_work_ilocked( 4152 &ref->death->work, &proc->todo); 4153 binder_wakeup_proc_ilocked(proc); 4154 binder_inner_proc_unlock(proc); 4155 } 4156 } else { 4157 if (ref->death == NULL) { 4158 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n", 4159 proc->pid, thread->pid); 4160 binder_node_unlock(ref->node); 4161 binder_proc_unlock(proc); 4162 break; 4163 } 4164 death = ref->death; 4165 if (death->cookie != cookie) { 4166 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n", 4167 proc->pid, thread->pid, 4168 (u64)death->cookie, 4169 (u64)cookie); 4170 binder_node_unlock(ref->node); 4171 binder_proc_unlock(proc); 4172 break; 4173 } 4174 ref->death = NULL; 4175 binder_inner_proc_lock(proc); 4176 if (list_empty(&death->work.entry)) { 4177 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 4178 if (thread->looper & 4179 (BINDER_LOOPER_STATE_REGISTERED | 4180 BINDER_LOOPER_STATE_ENTERED)) 4181 binder_enqueue_thread_work_ilocked( 4182 thread, 4183 &death->work); 4184 else { 4185 binder_enqueue_work_ilocked( 4186 &death->work, 4187 &proc->todo); 4188 binder_wakeup_proc_ilocked( 4189 proc); 4190 } 4191 } else { 4192 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); 4193 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; 4194 } 4195 binder_inner_proc_unlock(proc); 4196 } 4197 binder_node_unlock(ref->node); 4198 binder_proc_unlock(proc); 4199 } break; 4200 case BC_DEAD_BINDER_DONE: { 4201 struct binder_work *w; 4202 binder_uintptr_t cookie; 4203 struct binder_ref_death *death = NULL; 4204 4205 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 4206 return -EFAULT; 4207 4208 ptr += sizeof(cookie); 4209 binder_inner_proc_lock(proc); 4210 list_for_each_entry(w, &proc->delivered_death, 4211 entry) { 4212 struct binder_ref_death *tmp_death = 4213 container_of(w, 4214 struct binder_ref_death, 4215 work); 4216 4217 if (tmp_death->cookie == cookie) { 4218 death = tmp_death; 4219 break; 4220 } 4221 } 4222 binder_debug(BINDER_DEBUG_DEAD_BINDER, 4223 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n", 4224 proc->pid, thread->pid, (u64)cookie, 4225 death); 4226 if (death == NULL) { 4227 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n", 4228 proc->pid, thread->pid, (u64)cookie); 4229 binder_inner_proc_unlock(proc); 4230 break; 4231 } 4232 binder_dequeue_work_ilocked(&death->work); 4233 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) { 4234 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 4235 if (thread->looper & 4236 (BINDER_LOOPER_STATE_REGISTERED | 4237 BINDER_LOOPER_STATE_ENTERED)) 4238 binder_enqueue_thread_work_ilocked( 4239 thread, &death->work); 4240 else { 4241 binder_enqueue_work_ilocked( 4242 &death->work, 4243 &proc->todo); 4244 binder_wakeup_proc_ilocked(proc); 4245 } 4246 } 4247 binder_inner_proc_unlock(proc); 4248 } break; 4249 4250 default: 4251 pr_err("%d:%d unknown command %u\n", 4252 proc->pid, thread->pid, cmd); 4253 return -EINVAL; 4254 } 4255 *consumed = ptr - buffer; 4256 } 4257 return 0; 4258 } 4259 4260 static void binder_stat_br(struct binder_proc *proc, 4261 struct binder_thread *thread, uint32_t cmd) 4262 { 4263 trace_binder_return(cmd); 4264 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { 4265 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]); 4266 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]); 4267 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]); 4268 } 4269 } 4270 4271 static int binder_put_node_cmd(struct binder_proc *proc, 4272 struct binder_thread *thread, 4273 void __user **ptrp, 4274 binder_uintptr_t node_ptr, 4275 binder_uintptr_t node_cookie, 4276 int node_debug_id, 4277 uint32_t cmd, const char *cmd_name) 4278 { 4279 void __user *ptr = *ptrp; 4280 4281 if (put_user(cmd, (uint32_t __user *)ptr)) 4282 return -EFAULT; 4283 ptr += sizeof(uint32_t); 4284 4285 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr)) 4286 return -EFAULT; 4287 ptr += sizeof(binder_uintptr_t); 4288 4289 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr)) 4290 return -EFAULT; 4291 ptr += sizeof(binder_uintptr_t); 4292 4293 binder_stat_br(proc, thread, cmd); 4294 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n", 4295 proc->pid, thread->pid, cmd_name, node_debug_id, 4296 (u64)node_ptr, (u64)node_cookie); 4297 4298 *ptrp = ptr; 4299 return 0; 4300 } 4301 4302 static int binder_wait_for_work(struct binder_thread *thread, 4303 bool do_proc_work) 4304 { 4305 DEFINE_WAIT(wait); 4306 struct binder_proc *proc = thread->proc; 4307 int ret = 0; 4308 4309 binder_inner_proc_lock(proc); 4310 for (;;) { 4311 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE); 4312 if (binder_has_work_ilocked(thread, do_proc_work)) 4313 break; 4314 if (do_proc_work) 4315 list_add(&thread->waiting_thread_node, 4316 &proc->waiting_threads); 4317 binder_inner_proc_unlock(proc); 4318 schedule(); 4319 binder_inner_proc_lock(proc); 4320 list_del_init(&thread->waiting_thread_node); 4321 if (signal_pending(current)) { 4322 ret = -EINTR; 4323 break; 4324 } 4325 } 4326 finish_wait(&thread->wait, &wait); 4327 binder_inner_proc_unlock(proc); 4328 4329 return ret; 4330 } 4331 4332 /** 4333 * binder_apply_fd_fixups() - finish fd translation 4334 * @proc: binder_proc associated @t->buffer 4335 * @t: binder transaction with list of fd fixups 4336 * 4337 * Now that we are in the context of the transaction target 4338 * process, we can allocate and install fds. Process the 4339 * list of fds to translate and fixup the buffer with the 4340 * new fds first and only then install the files. 4341 * 4342 * If we fail to allocate an fd, skip the install and release 4343 * any fds that have already been allocated. 4344 */ 4345 static int binder_apply_fd_fixups(struct binder_proc *proc, 4346 struct binder_transaction *t) 4347 { 4348 struct binder_txn_fd_fixup *fixup, *tmp; 4349 int ret = 0; 4350 4351 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) { 4352 int fd = get_unused_fd_flags(O_CLOEXEC); 4353 4354 if (fd < 0) { 4355 binder_debug(BINDER_DEBUG_TRANSACTION, 4356 "failed fd fixup txn %d fd %d\n", 4357 t->debug_id, fd); 4358 ret = -ENOMEM; 4359 goto err; 4360 } 4361 binder_debug(BINDER_DEBUG_TRANSACTION, 4362 "fd fixup txn %d fd %d\n", 4363 t->debug_id, fd); 4364 trace_binder_transaction_fd_recv(t, fd, fixup->offset); 4365 fixup->target_fd = fd; 4366 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer, 4367 fixup->offset, &fd, 4368 sizeof(u32))) { 4369 ret = -EINVAL; 4370 goto err; 4371 } 4372 } 4373 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) { 4374 fd_install(fixup->target_fd, fixup->file); 4375 list_del(&fixup->fixup_entry); 4376 kfree(fixup); 4377 } 4378 4379 return ret; 4380 4381 err: 4382 binder_free_txn_fixups(t); 4383 return ret; 4384 } 4385 4386 static int binder_thread_read(struct binder_proc *proc, 4387 struct binder_thread *thread, 4388 binder_uintptr_t binder_buffer, size_t size, 4389 binder_size_t *consumed, int non_block) 4390 { 4391 void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 4392 void __user *ptr = buffer + *consumed; 4393 void __user *end = buffer + size; 4394 4395 int ret = 0; 4396 int wait_for_proc_work; 4397 4398 if (*consumed == 0) { 4399 if (put_user(BR_NOOP, (uint32_t __user *)ptr)) 4400 return -EFAULT; 4401 ptr += sizeof(uint32_t); 4402 } 4403 4404 retry: 4405 binder_inner_proc_lock(proc); 4406 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread); 4407 binder_inner_proc_unlock(proc); 4408 4409 thread->looper |= BINDER_LOOPER_STATE_WAITING; 4410 4411 trace_binder_wait_for_work(wait_for_proc_work, 4412 !!thread->transaction_stack, 4413 !binder_worklist_empty(proc, &thread->todo)); 4414 if (wait_for_proc_work) { 4415 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 4416 BINDER_LOOPER_STATE_ENTERED))) { 4417 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n", 4418 proc->pid, thread->pid, thread->looper); 4419 wait_event_interruptible(binder_user_error_wait, 4420 binder_stop_on_user_error < 2); 4421 } 4422 binder_set_nice(proc->default_priority); 4423 } 4424 4425 if (non_block) { 4426 if (!binder_has_work(thread, wait_for_proc_work)) 4427 ret = -EAGAIN; 4428 } else { 4429 ret = binder_wait_for_work(thread, wait_for_proc_work); 4430 } 4431 4432 thread->looper &= ~BINDER_LOOPER_STATE_WAITING; 4433 4434 if (ret) 4435 return ret; 4436 4437 while (1) { 4438 uint32_t cmd; 4439 struct binder_transaction_data_secctx tr; 4440 struct binder_transaction_data *trd = &tr.transaction_data; 4441 struct binder_work *w = NULL; 4442 struct list_head *list = NULL; 4443 struct binder_transaction *t = NULL; 4444 struct binder_thread *t_from; 4445 size_t trsize = sizeof(*trd); 4446 4447 binder_inner_proc_lock(proc); 4448 if (!binder_worklist_empty_ilocked(&thread->todo)) 4449 list = &thread->todo; 4450 else if (!binder_worklist_empty_ilocked(&proc->todo) && 4451 wait_for_proc_work) 4452 list = &proc->todo; 4453 else { 4454 binder_inner_proc_unlock(proc); 4455 4456 /* no data added */ 4457 if (ptr - buffer == 4 && !thread->looper_need_return) 4458 goto retry; 4459 break; 4460 } 4461 4462 if (end - ptr < sizeof(tr) + 4) { 4463 binder_inner_proc_unlock(proc); 4464 break; 4465 } 4466 w = binder_dequeue_work_head_ilocked(list); 4467 if (binder_worklist_empty_ilocked(&thread->todo)) 4468 thread->process_todo = false; 4469 4470 switch (w->type) { 4471 case BINDER_WORK_TRANSACTION: { 4472 binder_inner_proc_unlock(proc); 4473 t = container_of(w, struct binder_transaction, work); 4474 } break; 4475 case BINDER_WORK_RETURN_ERROR: { 4476 struct binder_error *e = container_of( 4477 w, struct binder_error, work); 4478 4479 WARN_ON(e->cmd == BR_OK); 4480 binder_inner_proc_unlock(proc); 4481 if (put_user(e->cmd, (uint32_t __user *)ptr)) 4482 return -EFAULT; 4483 cmd = e->cmd; 4484 e->cmd = BR_OK; 4485 ptr += sizeof(uint32_t); 4486 4487 binder_stat_br(proc, thread, cmd); 4488 } break; 4489 case BINDER_WORK_TRANSACTION_COMPLETE: 4490 case BINDER_WORK_TRANSACTION_PENDING: 4491 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: { 4492 if (proc->oneway_spam_detection_enabled && 4493 w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT) 4494 cmd = BR_ONEWAY_SPAM_SUSPECT; 4495 else if (w->type == BINDER_WORK_TRANSACTION_PENDING) 4496 cmd = BR_TRANSACTION_PENDING_FROZEN; 4497 else 4498 cmd = BR_TRANSACTION_COMPLETE; 4499 binder_inner_proc_unlock(proc); 4500 kfree(w); 4501 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 4502 if (put_user(cmd, (uint32_t __user *)ptr)) 4503 return -EFAULT; 4504 ptr += sizeof(uint32_t); 4505 4506 binder_stat_br(proc, thread, cmd); 4507 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, 4508 "%d:%d BR_TRANSACTION_COMPLETE\n", 4509 proc->pid, thread->pid); 4510 } break; 4511 case BINDER_WORK_NODE: { 4512 struct binder_node *node = container_of(w, struct binder_node, work); 4513 int strong, weak; 4514 binder_uintptr_t node_ptr = node->ptr; 4515 binder_uintptr_t node_cookie = node->cookie; 4516 int node_debug_id = node->debug_id; 4517 int has_weak_ref; 4518 int has_strong_ref; 4519 void __user *orig_ptr = ptr; 4520 4521 BUG_ON(proc != node->proc); 4522 strong = node->internal_strong_refs || 4523 node->local_strong_refs; 4524 weak = !hlist_empty(&node->refs) || 4525 node->local_weak_refs || 4526 node->tmp_refs || strong; 4527 has_strong_ref = node->has_strong_ref; 4528 has_weak_ref = node->has_weak_ref; 4529 4530 if (weak && !has_weak_ref) { 4531 node->has_weak_ref = 1; 4532 node->pending_weak_ref = 1; 4533 node->local_weak_refs++; 4534 } 4535 if (strong && !has_strong_ref) { 4536 node->has_strong_ref = 1; 4537 node->pending_strong_ref = 1; 4538 node->local_strong_refs++; 4539 } 4540 if (!strong && has_strong_ref) 4541 node->has_strong_ref = 0; 4542 if (!weak && has_weak_ref) 4543 node->has_weak_ref = 0; 4544 if (!weak && !strong) { 4545 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 4546 "%d:%d node %d u%016llx c%016llx deleted\n", 4547 proc->pid, thread->pid, 4548 node_debug_id, 4549 (u64)node_ptr, 4550 (u64)node_cookie); 4551 rb_erase(&node->rb_node, &proc->nodes); 4552 binder_inner_proc_unlock(proc); 4553 binder_node_lock(node); 4554 /* 4555 * Acquire the node lock before freeing the 4556 * node to serialize with other threads that 4557 * may have been holding the node lock while 4558 * decrementing this node (avoids race where 4559 * this thread frees while the other thread 4560 * is unlocking the node after the final 4561 * decrement) 4562 */ 4563 binder_node_unlock(node); 4564 binder_free_node(node); 4565 } else 4566 binder_inner_proc_unlock(proc); 4567 4568 if (weak && !has_weak_ref) 4569 ret = binder_put_node_cmd( 4570 proc, thread, &ptr, node_ptr, 4571 node_cookie, node_debug_id, 4572 BR_INCREFS, "BR_INCREFS"); 4573 if (!ret && strong && !has_strong_ref) 4574 ret = binder_put_node_cmd( 4575 proc, thread, &ptr, node_ptr, 4576 node_cookie, node_debug_id, 4577 BR_ACQUIRE, "BR_ACQUIRE"); 4578 if (!ret && !strong && has_strong_ref) 4579 ret = binder_put_node_cmd( 4580 proc, thread, &ptr, node_ptr, 4581 node_cookie, node_debug_id, 4582 BR_RELEASE, "BR_RELEASE"); 4583 if (!ret && !weak && has_weak_ref) 4584 ret = binder_put_node_cmd( 4585 proc, thread, &ptr, node_ptr, 4586 node_cookie, node_debug_id, 4587 BR_DECREFS, "BR_DECREFS"); 4588 if (orig_ptr == ptr) 4589 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 4590 "%d:%d node %d u%016llx c%016llx state unchanged\n", 4591 proc->pid, thread->pid, 4592 node_debug_id, 4593 (u64)node_ptr, 4594 (u64)node_cookie); 4595 if (ret) 4596 return ret; 4597 } break; 4598 case BINDER_WORK_DEAD_BINDER: 4599 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 4600 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 4601 struct binder_ref_death *death; 4602 uint32_t cmd; 4603 binder_uintptr_t cookie; 4604 4605 death = container_of(w, struct binder_ref_death, work); 4606 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) 4607 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; 4608 else 4609 cmd = BR_DEAD_BINDER; 4610 cookie = death->cookie; 4611 4612 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 4613 "%d:%d %s %016llx\n", 4614 proc->pid, thread->pid, 4615 cmd == BR_DEAD_BINDER ? 4616 "BR_DEAD_BINDER" : 4617 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 4618 (u64)cookie); 4619 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { 4620 binder_inner_proc_unlock(proc); 4621 kfree(death); 4622 binder_stats_deleted(BINDER_STAT_DEATH); 4623 } else { 4624 binder_enqueue_work_ilocked( 4625 w, &proc->delivered_death); 4626 binder_inner_proc_unlock(proc); 4627 } 4628 if (put_user(cmd, (uint32_t __user *)ptr)) 4629 return -EFAULT; 4630 ptr += sizeof(uint32_t); 4631 if (put_user(cookie, 4632 (binder_uintptr_t __user *)ptr)) 4633 return -EFAULT; 4634 ptr += sizeof(binder_uintptr_t); 4635 binder_stat_br(proc, thread, cmd); 4636 if (cmd == BR_DEAD_BINDER) 4637 goto done; /* DEAD_BINDER notifications can cause transactions */ 4638 } break; 4639 default: 4640 binder_inner_proc_unlock(proc); 4641 pr_err("%d:%d: bad work type %d\n", 4642 proc->pid, thread->pid, w->type); 4643 break; 4644 } 4645 4646 if (!t) 4647 continue; 4648 4649 BUG_ON(t->buffer == NULL); 4650 if (t->buffer->target_node) { 4651 struct binder_node *target_node = t->buffer->target_node; 4652 4653 trd->target.ptr = target_node->ptr; 4654 trd->cookie = target_node->cookie; 4655 t->saved_priority = task_nice(current); 4656 if (t->priority < target_node->min_priority && 4657 !(t->flags & TF_ONE_WAY)) 4658 binder_set_nice(t->priority); 4659 else if (!(t->flags & TF_ONE_WAY) || 4660 t->saved_priority > target_node->min_priority) 4661 binder_set_nice(target_node->min_priority); 4662 cmd = BR_TRANSACTION; 4663 } else { 4664 trd->target.ptr = 0; 4665 trd->cookie = 0; 4666 cmd = BR_REPLY; 4667 } 4668 trd->code = t->code; 4669 trd->flags = t->flags; 4670 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid); 4671 4672 t_from = binder_get_txn_from(t); 4673 if (t_from) { 4674 struct task_struct *sender = t_from->proc->tsk; 4675 4676 trd->sender_pid = 4677 task_tgid_nr_ns(sender, 4678 task_active_pid_ns(current)); 4679 } else { 4680 trd->sender_pid = 0; 4681 } 4682 4683 ret = binder_apply_fd_fixups(proc, t); 4684 if (ret) { 4685 struct binder_buffer *buffer = t->buffer; 4686 bool oneway = !!(t->flags & TF_ONE_WAY); 4687 int tid = t->debug_id; 4688 4689 if (t_from) 4690 binder_thread_dec_tmpref(t_from); 4691 buffer->transaction = NULL; 4692 binder_cleanup_transaction(t, "fd fixups failed", 4693 BR_FAILED_REPLY); 4694 binder_free_buf(proc, thread, buffer, true); 4695 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 4696 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n", 4697 proc->pid, thread->pid, 4698 oneway ? "async " : 4699 (cmd == BR_REPLY ? "reply " : ""), 4700 tid, BR_FAILED_REPLY, ret, __LINE__); 4701 if (cmd == BR_REPLY) { 4702 cmd = BR_FAILED_REPLY; 4703 if (put_user(cmd, (uint32_t __user *)ptr)) 4704 return -EFAULT; 4705 ptr += sizeof(uint32_t); 4706 binder_stat_br(proc, thread, cmd); 4707 break; 4708 } 4709 continue; 4710 } 4711 trd->data_size = t->buffer->data_size; 4712 trd->offsets_size = t->buffer->offsets_size; 4713 trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data; 4714 trd->data.ptr.offsets = trd->data.ptr.buffer + 4715 ALIGN(t->buffer->data_size, 4716 sizeof(void *)); 4717 4718 tr.secctx = t->security_ctx; 4719 if (t->security_ctx) { 4720 cmd = BR_TRANSACTION_SEC_CTX; 4721 trsize = sizeof(tr); 4722 } 4723 if (put_user(cmd, (uint32_t __user *)ptr)) { 4724 if (t_from) 4725 binder_thread_dec_tmpref(t_from); 4726 4727 binder_cleanup_transaction(t, "put_user failed", 4728 BR_FAILED_REPLY); 4729 4730 return -EFAULT; 4731 } 4732 ptr += sizeof(uint32_t); 4733 if (copy_to_user(ptr, &tr, trsize)) { 4734 if (t_from) 4735 binder_thread_dec_tmpref(t_from); 4736 4737 binder_cleanup_transaction(t, "copy_to_user failed", 4738 BR_FAILED_REPLY); 4739 4740 return -EFAULT; 4741 } 4742 ptr += trsize; 4743 4744 trace_binder_transaction_received(t); 4745 binder_stat_br(proc, thread, cmd); 4746 binder_debug(BINDER_DEBUG_TRANSACTION, 4747 "%d:%d %s %d %d:%d, cmd %u size %zd-%zd ptr %016llx-%016llx\n", 4748 proc->pid, thread->pid, 4749 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : 4750 (cmd == BR_TRANSACTION_SEC_CTX) ? 4751 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY", 4752 t->debug_id, t_from ? t_from->proc->pid : 0, 4753 t_from ? t_from->pid : 0, cmd, 4754 t->buffer->data_size, t->buffer->offsets_size, 4755 (u64)trd->data.ptr.buffer, 4756 (u64)trd->data.ptr.offsets); 4757 4758 if (t_from) 4759 binder_thread_dec_tmpref(t_from); 4760 t->buffer->allow_user_free = 1; 4761 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) { 4762 binder_inner_proc_lock(thread->proc); 4763 t->to_parent = thread->transaction_stack; 4764 t->to_thread = thread; 4765 thread->transaction_stack = t; 4766 binder_inner_proc_unlock(thread->proc); 4767 } else { 4768 binder_free_transaction(t); 4769 } 4770 break; 4771 } 4772 4773 done: 4774 4775 *consumed = ptr - buffer; 4776 binder_inner_proc_lock(proc); 4777 if (proc->requested_threads == 0 && 4778 list_empty(&thread->proc->waiting_threads) && 4779 proc->requested_threads_started < proc->max_threads && 4780 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 4781 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ 4782 /*spawn a new thread if we leave this out */) { 4783 proc->requested_threads++; 4784 binder_inner_proc_unlock(proc); 4785 binder_debug(BINDER_DEBUG_THREADS, 4786 "%d:%d BR_SPAWN_LOOPER\n", 4787 proc->pid, thread->pid); 4788 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) 4789 return -EFAULT; 4790 binder_stat_br(proc, thread, BR_SPAWN_LOOPER); 4791 } else 4792 binder_inner_proc_unlock(proc); 4793 return 0; 4794 } 4795 4796 static void binder_release_work(struct binder_proc *proc, 4797 struct list_head *list) 4798 { 4799 struct binder_work *w; 4800 enum binder_work_type wtype; 4801 4802 while (1) { 4803 binder_inner_proc_lock(proc); 4804 w = binder_dequeue_work_head_ilocked(list); 4805 wtype = w ? w->type : 0; 4806 binder_inner_proc_unlock(proc); 4807 if (!w) 4808 return; 4809 4810 switch (wtype) { 4811 case BINDER_WORK_TRANSACTION: { 4812 struct binder_transaction *t; 4813 4814 t = container_of(w, struct binder_transaction, work); 4815 4816 binder_cleanup_transaction(t, "process died.", 4817 BR_DEAD_REPLY); 4818 } break; 4819 case BINDER_WORK_RETURN_ERROR: { 4820 struct binder_error *e = container_of( 4821 w, struct binder_error, work); 4822 4823 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4824 "undelivered TRANSACTION_ERROR: %u\n", 4825 e->cmd); 4826 } break; 4827 case BINDER_WORK_TRANSACTION_PENDING: 4828 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: 4829 case BINDER_WORK_TRANSACTION_COMPLETE: { 4830 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4831 "undelivered TRANSACTION_COMPLETE\n"); 4832 kfree(w); 4833 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 4834 } break; 4835 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 4836 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 4837 struct binder_ref_death *death; 4838 4839 death = container_of(w, struct binder_ref_death, work); 4840 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4841 "undelivered death notification, %016llx\n", 4842 (u64)death->cookie); 4843 kfree(death); 4844 binder_stats_deleted(BINDER_STAT_DEATH); 4845 } break; 4846 case BINDER_WORK_NODE: 4847 break; 4848 default: 4849 pr_err("unexpected work type, %d, not freed\n", 4850 wtype); 4851 break; 4852 } 4853 } 4854 4855 } 4856 4857 static struct binder_thread *binder_get_thread_ilocked( 4858 struct binder_proc *proc, struct binder_thread *new_thread) 4859 { 4860 struct binder_thread *thread = NULL; 4861 struct rb_node *parent = NULL; 4862 struct rb_node **p = &proc->threads.rb_node; 4863 4864 while (*p) { 4865 parent = *p; 4866 thread = rb_entry(parent, struct binder_thread, rb_node); 4867 4868 if (current->pid < thread->pid) 4869 p = &(*p)->rb_left; 4870 else if (current->pid > thread->pid) 4871 p = &(*p)->rb_right; 4872 else 4873 return thread; 4874 } 4875 if (!new_thread) 4876 return NULL; 4877 thread = new_thread; 4878 binder_stats_created(BINDER_STAT_THREAD); 4879 thread->proc = proc; 4880 thread->pid = current->pid; 4881 atomic_set(&thread->tmp_ref, 0); 4882 init_waitqueue_head(&thread->wait); 4883 INIT_LIST_HEAD(&thread->todo); 4884 rb_link_node(&thread->rb_node, parent, p); 4885 rb_insert_color(&thread->rb_node, &proc->threads); 4886 thread->looper_need_return = true; 4887 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR; 4888 thread->return_error.cmd = BR_OK; 4889 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR; 4890 thread->reply_error.cmd = BR_OK; 4891 thread->ee.command = BR_OK; 4892 INIT_LIST_HEAD(&new_thread->waiting_thread_node); 4893 return thread; 4894 } 4895 4896 static struct binder_thread *binder_get_thread(struct binder_proc *proc) 4897 { 4898 struct binder_thread *thread; 4899 struct binder_thread *new_thread; 4900 4901 binder_inner_proc_lock(proc); 4902 thread = binder_get_thread_ilocked(proc, NULL); 4903 binder_inner_proc_unlock(proc); 4904 if (!thread) { 4905 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL); 4906 if (new_thread == NULL) 4907 return NULL; 4908 binder_inner_proc_lock(proc); 4909 thread = binder_get_thread_ilocked(proc, new_thread); 4910 binder_inner_proc_unlock(proc); 4911 if (thread != new_thread) 4912 kfree(new_thread); 4913 } 4914 return thread; 4915 } 4916 4917 static void binder_free_proc(struct binder_proc *proc) 4918 { 4919 struct binder_device *device; 4920 4921 BUG_ON(!list_empty(&proc->todo)); 4922 BUG_ON(!list_empty(&proc->delivered_death)); 4923 if (proc->outstanding_txns) 4924 pr_warn("%s: Unexpected outstanding_txns %d\n", 4925 __func__, proc->outstanding_txns); 4926 device = container_of(proc->context, struct binder_device, context); 4927 if (refcount_dec_and_test(&device->ref)) { 4928 kfree(proc->context->name); 4929 kfree(device); 4930 } 4931 binder_alloc_deferred_release(&proc->alloc); 4932 put_task_struct(proc->tsk); 4933 put_cred(proc->cred); 4934 binder_stats_deleted(BINDER_STAT_PROC); 4935 kfree(proc); 4936 } 4937 4938 static void binder_free_thread(struct binder_thread *thread) 4939 { 4940 BUG_ON(!list_empty(&thread->todo)); 4941 binder_stats_deleted(BINDER_STAT_THREAD); 4942 binder_proc_dec_tmpref(thread->proc); 4943 kfree(thread); 4944 } 4945 4946 static int binder_thread_release(struct binder_proc *proc, 4947 struct binder_thread *thread) 4948 { 4949 struct binder_transaction *t; 4950 struct binder_transaction *send_reply = NULL; 4951 int active_transactions = 0; 4952 struct binder_transaction *last_t = NULL; 4953 4954 binder_inner_proc_lock(thread->proc); 4955 /* 4956 * take a ref on the proc so it survives 4957 * after we remove this thread from proc->threads. 4958 * The corresponding dec is when we actually 4959 * free the thread in binder_free_thread() 4960 */ 4961 proc->tmp_ref++; 4962 /* 4963 * take a ref on this thread to ensure it 4964 * survives while we are releasing it 4965 */ 4966 atomic_inc(&thread->tmp_ref); 4967 rb_erase(&thread->rb_node, &proc->threads); 4968 t = thread->transaction_stack; 4969 if (t) { 4970 spin_lock(&t->lock); 4971 if (t->to_thread == thread) 4972 send_reply = t; 4973 } else { 4974 __acquire(&t->lock); 4975 } 4976 thread->is_dead = true; 4977 4978 while (t) { 4979 last_t = t; 4980 active_transactions++; 4981 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4982 "release %d:%d transaction %d %s, still active\n", 4983 proc->pid, thread->pid, 4984 t->debug_id, 4985 (t->to_thread == thread) ? "in" : "out"); 4986 4987 if (t->to_thread == thread) { 4988 thread->proc->outstanding_txns--; 4989 t->to_proc = NULL; 4990 t->to_thread = NULL; 4991 if (t->buffer) { 4992 t->buffer->transaction = NULL; 4993 t->buffer = NULL; 4994 } 4995 t = t->to_parent; 4996 } else if (t->from == thread) { 4997 t->from = NULL; 4998 t = t->from_parent; 4999 } else 5000 BUG(); 5001 spin_unlock(&last_t->lock); 5002 if (t) 5003 spin_lock(&t->lock); 5004 else 5005 __acquire(&t->lock); 5006 } 5007 /* annotation for sparse, lock not acquired in last iteration above */ 5008 __release(&t->lock); 5009 5010 /* 5011 * If this thread used poll, make sure we remove the waitqueue from any 5012 * poll data structures holding it. 5013 */ 5014 if (thread->looper & BINDER_LOOPER_STATE_POLL) 5015 wake_up_pollfree(&thread->wait); 5016 5017 binder_inner_proc_unlock(thread->proc); 5018 5019 /* 5020 * This is needed to avoid races between wake_up_pollfree() above and 5021 * someone else removing the last entry from the queue for other reasons 5022 * (e.g. ep_remove_wait_queue() being called due to an epoll file 5023 * descriptor being closed). Such other users hold an RCU read lock, so 5024 * we can be sure they're done after we call synchronize_rcu(). 5025 */ 5026 if (thread->looper & BINDER_LOOPER_STATE_POLL) 5027 synchronize_rcu(); 5028 5029 if (send_reply) 5030 binder_send_failed_reply(send_reply, BR_DEAD_REPLY); 5031 binder_release_work(proc, &thread->todo); 5032 binder_thread_dec_tmpref(thread); 5033 return active_transactions; 5034 } 5035 5036 static __poll_t binder_poll(struct file *filp, 5037 struct poll_table_struct *wait) 5038 { 5039 struct binder_proc *proc = filp->private_data; 5040 struct binder_thread *thread = NULL; 5041 bool wait_for_proc_work; 5042 5043 thread = binder_get_thread(proc); 5044 if (!thread) 5045 return EPOLLERR; 5046 5047 binder_inner_proc_lock(thread->proc); 5048 thread->looper |= BINDER_LOOPER_STATE_POLL; 5049 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread); 5050 5051 binder_inner_proc_unlock(thread->proc); 5052 5053 poll_wait(filp, &thread->wait, wait); 5054 5055 if (binder_has_work(thread, wait_for_proc_work)) 5056 return EPOLLIN; 5057 5058 return 0; 5059 } 5060 5061 static int binder_ioctl_write_read(struct file *filp, unsigned long arg, 5062 struct binder_thread *thread) 5063 { 5064 int ret = 0; 5065 struct binder_proc *proc = filp->private_data; 5066 void __user *ubuf = (void __user *)arg; 5067 struct binder_write_read bwr; 5068 5069 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { 5070 ret = -EFAULT; 5071 goto out; 5072 } 5073 binder_debug(BINDER_DEBUG_READ_WRITE, 5074 "%d:%d write %lld at %016llx, read %lld at %016llx\n", 5075 proc->pid, thread->pid, 5076 (u64)bwr.write_size, (u64)bwr.write_buffer, 5077 (u64)bwr.read_size, (u64)bwr.read_buffer); 5078 5079 if (bwr.write_size > 0) { 5080 ret = binder_thread_write(proc, thread, 5081 bwr.write_buffer, 5082 bwr.write_size, 5083 &bwr.write_consumed); 5084 trace_binder_write_done(ret); 5085 if (ret < 0) { 5086 bwr.read_consumed = 0; 5087 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 5088 ret = -EFAULT; 5089 goto out; 5090 } 5091 } 5092 if (bwr.read_size > 0) { 5093 ret = binder_thread_read(proc, thread, bwr.read_buffer, 5094 bwr.read_size, 5095 &bwr.read_consumed, 5096 filp->f_flags & O_NONBLOCK); 5097 trace_binder_read_done(ret); 5098 binder_inner_proc_lock(proc); 5099 if (!binder_worklist_empty_ilocked(&proc->todo)) 5100 binder_wakeup_proc_ilocked(proc); 5101 binder_inner_proc_unlock(proc); 5102 if (ret < 0) { 5103 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 5104 ret = -EFAULT; 5105 goto out; 5106 } 5107 } 5108 binder_debug(BINDER_DEBUG_READ_WRITE, 5109 "%d:%d wrote %lld of %lld, read return %lld of %lld\n", 5110 proc->pid, thread->pid, 5111 (u64)bwr.write_consumed, (u64)bwr.write_size, 5112 (u64)bwr.read_consumed, (u64)bwr.read_size); 5113 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { 5114 ret = -EFAULT; 5115 goto out; 5116 } 5117 out: 5118 return ret; 5119 } 5120 5121 static int binder_ioctl_set_ctx_mgr(struct file *filp, 5122 struct flat_binder_object *fbo) 5123 { 5124 int ret = 0; 5125 struct binder_proc *proc = filp->private_data; 5126 struct binder_context *context = proc->context; 5127 struct binder_node *new_node; 5128 kuid_t curr_euid = current_euid(); 5129 5130 mutex_lock(&context->context_mgr_node_lock); 5131 if (context->binder_context_mgr_node) { 5132 pr_err("BINDER_SET_CONTEXT_MGR already set\n"); 5133 ret = -EBUSY; 5134 goto out; 5135 } 5136 ret = security_binder_set_context_mgr(proc->cred); 5137 if (ret < 0) 5138 goto out; 5139 if (uid_valid(context->binder_context_mgr_uid)) { 5140 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) { 5141 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n", 5142 from_kuid(&init_user_ns, curr_euid), 5143 from_kuid(&init_user_ns, 5144 context->binder_context_mgr_uid)); 5145 ret = -EPERM; 5146 goto out; 5147 } 5148 } else { 5149 context->binder_context_mgr_uid = curr_euid; 5150 } 5151 new_node = binder_new_node(proc, fbo); 5152 if (!new_node) { 5153 ret = -ENOMEM; 5154 goto out; 5155 } 5156 binder_node_lock(new_node); 5157 new_node->local_weak_refs++; 5158 new_node->local_strong_refs++; 5159 new_node->has_strong_ref = 1; 5160 new_node->has_weak_ref = 1; 5161 context->binder_context_mgr_node = new_node; 5162 binder_node_unlock(new_node); 5163 binder_put_node(new_node); 5164 out: 5165 mutex_unlock(&context->context_mgr_node_lock); 5166 return ret; 5167 } 5168 5169 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc, 5170 struct binder_node_info_for_ref *info) 5171 { 5172 struct binder_node *node; 5173 struct binder_context *context = proc->context; 5174 __u32 handle = info->handle; 5175 5176 if (info->strong_count || info->weak_count || info->reserved1 || 5177 info->reserved2 || info->reserved3) { 5178 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.", 5179 proc->pid); 5180 return -EINVAL; 5181 } 5182 5183 /* This ioctl may only be used by the context manager */ 5184 mutex_lock(&context->context_mgr_node_lock); 5185 if (!context->binder_context_mgr_node || 5186 context->binder_context_mgr_node->proc != proc) { 5187 mutex_unlock(&context->context_mgr_node_lock); 5188 return -EPERM; 5189 } 5190 mutex_unlock(&context->context_mgr_node_lock); 5191 5192 node = binder_get_node_from_ref(proc, handle, true, NULL); 5193 if (!node) 5194 return -EINVAL; 5195 5196 info->strong_count = node->local_strong_refs + 5197 node->internal_strong_refs; 5198 info->weak_count = node->local_weak_refs; 5199 5200 binder_put_node(node); 5201 5202 return 0; 5203 } 5204 5205 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc, 5206 struct binder_node_debug_info *info) 5207 { 5208 struct rb_node *n; 5209 binder_uintptr_t ptr = info->ptr; 5210 5211 memset(info, 0, sizeof(*info)); 5212 5213 binder_inner_proc_lock(proc); 5214 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { 5215 struct binder_node *node = rb_entry(n, struct binder_node, 5216 rb_node); 5217 if (node->ptr > ptr) { 5218 info->ptr = node->ptr; 5219 info->cookie = node->cookie; 5220 info->has_strong_ref = node->has_strong_ref; 5221 info->has_weak_ref = node->has_weak_ref; 5222 break; 5223 } 5224 } 5225 binder_inner_proc_unlock(proc); 5226 5227 return 0; 5228 } 5229 5230 static bool binder_txns_pending_ilocked(struct binder_proc *proc) 5231 { 5232 struct rb_node *n; 5233 struct binder_thread *thread; 5234 5235 if (proc->outstanding_txns > 0) 5236 return true; 5237 5238 for (n = rb_first(&proc->threads); n; n = rb_next(n)) { 5239 thread = rb_entry(n, struct binder_thread, rb_node); 5240 if (thread->transaction_stack) 5241 return true; 5242 } 5243 return false; 5244 } 5245 5246 static int binder_ioctl_freeze(struct binder_freeze_info *info, 5247 struct binder_proc *target_proc) 5248 { 5249 int ret = 0; 5250 5251 if (!info->enable) { 5252 binder_inner_proc_lock(target_proc); 5253 target_proc->sync_recv = false; 5254 target_proc->async_recv = false; 5255 target_proc->is_frozen = false; 5256 binder_inner_proc_unlock(target_proc); 5257 return 0; 5258 } 5259 5260 /* 5261 * Freezing the target. Prevent new transactions by 5262 * setting frozen state. If timeout specified, wait 5263 * for transactions to drain. 5264 */ 5265 binder_inner_proc_lock(target_proc); 5266 target_proc->sync_recv = false; 5267 target_proc->async_recv = false; 5268 target_proc->is_frozen = true; 5269 binder_inner_proc_unlock(target_proc); 5270 5271 if (info->timeout_ms > 0) 5272 ret = wait_event_interruptible_timeout( 5273 target_proc->freeze_wait, 5274 (!target_proc->outstanding_txns), 5275 msecs_to_jiffies(info->timeout_ms)); 5276 5277 /* Check pending transactions that wait for reply */ 5278 if (ret >= 0) { 5279 binder_inner_proc_lock(target_proc); 5280 if (binder_txns_pending_ilocked(target_proc)) 5281 ret = -EAGAIN; 5282 binder_inner_proc_unlock(target_proc); 5283 } 5284 5285 if (ret < 0) { 5286 binder_inner_proc_lock(target_proc); 5287 target_proc->is_frozen = false; 5288 binder_inner_proc_unlock(target_proc); 5289 } 5290 5291 return ret; 5292 } 5293 5294 static int binder_ioctl_get_freezer_info( 5295 struct binder_frozen_status_info *info) 5296 { 5297 struct binder_proc *target_proc; 5298 bool found = false; 5299 __u32 txns_pending; 5300 5301 info->sync_recv = 0; 5302 info->async_recv = 0; 5303 5304 mutex_lock(&binder_procs_lock); 5305 hlist_for_each_entry(target_proc, &binder_procs, proc_node) { 5306 if (target_proc->pid == info->pid) { 5307 found = true; 5308 binder_inner_proc_lock(target_proc); 5309 txns_pending = binder_txns_pending_ilocked(target_proc); 5310 info->sync_recv |= target_proc->sync_recv | 5311 (txns_pending << 1); 5312 info->async_recv |= target_proc->async_recv; 5313 binder_inner_proc_unlock(target_proc); 5314 } 5315 } 5316 mutex_unlock(&binder_procs_lock); 5317 5318 if (!found) 5319 return -EINVAL; 5320 5321 return 0; 5322 } 5323 5324 static int binder_ioctl_get_extended_error(struct binder_thread *thread, 5325 void __user *ubuf) 5326 { 5327 struct binder_extended_error ee; 5328 5329 binder_inner_proc_lock(thread->proc); 5330 ee = thread->ee; 5331 binder_set_extended_error(&thread->ee, 0, BR_OK, 0); 5332 binder_inner_proc_unlock(thread->proc); 5333 5334 if (copy_to_user(ubuf, &ee, sizeof(ee))) 5335 return -EFAULT; 5336 5337 return 0; 5338 } 5339 5340 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 5341 { 5342 int ret; 5343 struct binder_proc *proc = filp->private_data; 5344 struct binder_thread *thread; 5345 void __user *ubuf = (void __user *)arg; 5346 5347 /*pr_info("binder_ioctl: %d:%d %x %lx\n", 5348 proc->pid, current->pid, cmd, arg);*/ 5349 5350 binder_selftest_alloc(&proc->alloc); 5351 5352 trace_binder_ioctl(cmd, arg); 5353 5354 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 5355 if (ret) 5356 goto err_unlocked; 5357 5358 thread = binder_get_thread(proc); 5359 if (thread == NULL) { 5360 ret = -ENOMEM; 5361 goto err; 5362 } 5363 5364 switch (cmd) { 5365 case BINDER_WRITE_READ: 5366 ret = binder_ioctl_write_read(filp, arg, thread); 5367 if (ret) 5368 goto err; 5369 break; 5370 case BINDER_SET_MAX_THREADS: { 5371 int max_threads; 5372 5373 if (copy_from_user(&max_threads, ubuf, 5374 sizeof(max_threads))) { 5375 ret = -EINVAL; 5376 goto err; 5377 } 5378 binder_inner_proc_lock(proc); 5379 proc->max_threads = max_threads; 5380 binder_inner_proc_unlock(proc); 5381 break; 5382 } 5383 case BINDER_SET_CONTEXT_MGR_EXT: { 5384 struct flat_binder_object fbo; 5385 5386 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) { 5387 ret = -EINVAL; 5388 goto err; 5389 } 5390 ret = binder_ioctl_set_ctx_mgr(filp, &fbo); 5391 if (ret) 5392 goto err; 5393 break; 5394 } 5395 case BINDER_SET_CONTEXT_MGR: 5396 ret = binder_ioctl_set_ctx_mgr(filp, NULL); 5397 if (ret) 5398 goto err; 5399 break; 5400 case BINDER_THREAD_EXIT: 5401 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n", 5402 proc->pid, thread->pid); 5403 binder_thread_release(proc, thread); 5404 thread = NULL; 5405 break; 5406 case BINDER_VERSION: { 5407 struct binder_version __user *ver = ubuf; 5408 5409 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, 5410 &ver->protocol_version)) { 5411 ret = -EINVAL; 5412 goto err; 5413 } 5414 break; 5415 } 5416 case BINDER_GET_NODE_INFO_FOR_REF: { 5417 struct binder_node_info_for_ref info; 5418 5419 if (copy_from_user(&info, ubuf, sizeof(info))) { 5420 ret = -EFAULT; 5421 goto err; 5422 } 5423 5424 ret = binder_ioctl_get_node_info_for_ref(proc, &info); 5425 if (ret < 0) 5426 goto err; 5427 5428 if (copy_to_user(ubuf, &info, sizeof(info))) { 5429 ret = -EFAULT; 5430 goto err; 5431 } 5432 5433 break; 5434 } 5435 case BINDER_GET_NODE_DEBUG_INFO: { 5436 struct binder_node_debug_info info; 5437 5438 if (copy_from_user(&info, ubuf, sizeof(info))) { 5439 ret = -EFAULT; 5440 goto err; 5441 } 5442 5443 ret = binder_ioctl_get_node_debug_info(proc, &info); 5444 if (ret < 0) 5445 goto err; 5446 5447 if (copy_to_user(ubuf, &info, sizeof(info))) { 5448 ret = -EFAULT; 5449 goto err; 5450 } 5451 break; 5452 } 5453 case BINDER_FREEZE: { 5454 struct binder_freeze_info info; 5455 struct binder_proc **target_procs = NULL, *target_proc; 5456 int target_procs_count = 0, i = 0; 5457 5458 ret = 0; 5459 5460 if (copy_from_user(&info, ubuf, sizeof(info))) { 5461 ret = -EFAULT; 5462 goto err; 5463 } 5464 5465 mutex_lock(&binder_procs_lock); 5466 hlist_for_each_entry(target_proc, &binder_procs, proc_node) { 5467 if (target_proc->pid == info.pid) 5468 target_procs_count++; 5469 } 5470 5471 if (target_procs_count == 0) { 5472 mutex_unlock(&binder_procs_lock); 5473 ret = -EINVAL; 5474 goto err; 5475 } 5476 5477 target_procs = kcalloc(target_procs_count, 5478 sizeof(struct binder_proc *), 5479 GFP_KERNEL); 5480 5481 if (!target_procs) { 5482 mutex_unlock(&binder_procs_lock); 5483 ret = -ENOMEM; 5484 goto err; 5485 } 5486 5487 hlist_for_each_entry(target_proc, &binder_procs, proc_node) { 5488 if (target_proc->pid != info.pid) 5489 continue; 5490 5491 binder_inner_proc_lock(target_proc); 5492 target_proc->tmp_ref++; 5493 binder_inner_proc_unlock(target_proc); 5494 5495 target_procs[i++] = target_proc; 5496 } 5497 mutex_unlock(&binder_procs_lock); 5498 5499 for (i = 0; i < target_procs_count; i++) { 5500 if (ret >= 0) 5501 ret = binder_ioctl_freeze(&info, 5502 target_procs[i]); 5503 5504 binder_proc_dec_tmpref(target_procs[i]); 5505 } 5506 5507 kfree(target_procs); 5508 5509 if (ret < 0) 5510 goto err; 5511 break; 5512 } 5513 case BINDER_GET_FROZEN_INFO: { 5514 struct binder_frozen_status_info info; 5515 5516 if (copy_from_user(&info, ubuf, sizeof(info))) { 5517 ret = -EFAULT; 5518 goto err; 5519 } 5520 5521 ret = binder_ioctl_get_freezer_info(&info); 5522 if (ret < 0) 5523 goto err; 5524 5525 if (copy_to_user(ubuf, &info, sizeof(info))) { 5526 ret = -EFAULT; 5527 goto err; 5528 } 5529 break; 5530 } 5531 case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: { 5532 uint32_t enable; 5533 5534 if (copy_from_user(&enable, ubuf, sizeof(enable))) { 5535 ret = -EFAULT; 5536 goto err; 5537 } 5538 binder_inner_proc_lock(proc); 5539 proc->oneway_spam_detection_enabled = (bool)enable; 5540 binder_inner_proc_unlock(proc); 5541 break; 5542 } 5543 case BINDER_GET_EXTENDED_ERROR: 5544 ret = binder_ioctl_get_extended_error(thread, ubuf); 5545 if (ret < 0) 5546 goto err; 5547 break; 5548 default: 5549 ret = -EINVAL; 5550 goto err; 5551 } 5552 ret = 0; 5553 err: 5554 if (thread) 5555 thread->looper_need_return = false; 5556 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 5557 if (ret && ret != -EINTR) 5558 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); 5559 err_unlocked: 5560 trace_binder_ioctl_done(ret); 5561 return ret; 5562 } 5563 5564 static void binder_vma_open(struct vm_area_struct *vma) 5565 { 5566 struct binder_proc *proc = vma->vm_private_data; 5567 5568 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 5569 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 5570 proc->pid, vma->vm_start, vma->vm_end, 5571 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 5572 (unsigned long)pgprot_val(vma->vm_page_prot)); 5573 } 5574 5575 static void binder_vma_close(struct vm_area_struct *vma) 5576 { 5577 struct binder_proc *proc = vma->vm_private_data; 5578 5579 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 5580 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 5581 proc->pid, vma->vm_start, vma->vm_end, 5582 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 5583 (unsigned long)pgprot_val(vma->vm_page_prot)); 5584 binder_alloc_vma_close(&proc->alloc); 5585 } 5586 5587 static vm_fault_t binder_vm_fault(struct vm_fault *vmf) 5588 { 5589 return VM_FAULT_SIGBUS; 5590 } 5591 5592 static const struct vm_operations_struct binder_vm_ops = { 5593 .open = binder_vma_open, 5594 .close = binder_vma_close, 5595 .fault = binder_vm_fault, 5596 }; 5597 5598 static int binder_mmap(struct file *filp, struct vm_area_struct *vma) 5599 { 5600 struct binder_proc *proc = filp->private_data; 5601 5602 if (proc->tsk != current->group_leader) 5603 return -EINVAL; 5604 5605 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 5606 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", 5607 __func__, proc->pid, vma->vm_start, vma->vm_end, 5608 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 5609 (unsigned long)pgprot_val(vma->vm_page_prot)); 5610 5611 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { 5612 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__, 5613 proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM); 5614 return -EPERM; 5615 } 5616 vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE); 5617 5618 vma->vm_ops = &binder_vm_ops; 5619 vma->vm_private_data = proc; 5620 5621 return binder_alloc_mmap_handler(&proc->alloc, vma); 5622 } 5623 5624 static int binder_open(struct inode *nodp, struct file *filp) 5625 { 5626 struct binder_proc *proc, *itr; 5627 struct binder_device *binder_dev; 5628 struct binderfs_info *info; 5629 struct dentry *binder_binderfs_dir_entry_proc = NULL; 5630 bool existing_pid = false; 5631 5632 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__, 5633 current->group_leader->pid, current->pid); 5634 5635 proc = kzalloc(sizeof(*proc), GFP_KERNEL); 5636 if (proc == NULL) 5637 return -ENOMEM; 5638 spin_lock_init(&proc->inner_lock); 5639 spin_lock_init(&proc->outer_lock); 5640 get_task_struct(current->group_leader); 5641 proc->tsk = current->group_leader; 5642 proc->cred = get_cred(filp->f_cred); 5643 INIT_LIST_HEAD(&proc->todo); 5644 init_waitqueue_head(&proc->freeze_wait); 5645 proc->default_priority = task_nice(current); 5646 /* binderfs stashes devices in i_private */ 5647 if (is_binderfs_device(nodp)) { 5648 binder_dev = nodp->i_private; 5649 info = nodp->i_sb->s_fs_info; 5650 binder_binderfs_dir_entry_proc = info->proc_log_dir; 5651 } else { 5652 binder_dev = container_of(filp->private_data, 5653 struct binder_device, miscdev); 5654 } 5655 refcount_inc(&binder_dev->ref); 5656 proc->context = &binder_dev->context; 5657 binder_alloc_init(&proc->alloc); 5658 5659 binder_stats_created(BINDER_STAT_PROC); 5660 proc->pid = current->group_leader->pid; 5661 INIT_LIST_HEAD(&proc->delivered_death); 5662 INIT_LIST_HEAD(&proc->waiting_threads); 5663 filp->private_data = proc; 5664 5665 mutex_lock(&binder_procs_lock); 5666 hlist_for_each_entry(itr, &binder_procs, proc_node) { 5667 if (itr->pid == proc->pid) { 5668 existing_pid = true; 5669 break; 5670 } 5671 } 5672 hlist_add_head(&proc->proc_node, &binder_procs); 5673 mutex_unlock(&binder_procs_lock); 5674 5675 if (binder_debugfs_dir_entry_proc && !existing_pid) { 5676 char strbuf[11]; 5677 5678 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); 5679 /* 5680 * proc debug entries are shared between contexts. 5681 * Only create for the first PID to avoid debugfs log spamming 5682 * The printing code will anyway print all contexts for a given 5683 * PID so this is not a problem. 5684 */ 5685 proc->debugfs_entry = debugfs_create_file(strbuf, 0444, 5686 binder_debugfs_dir_entry_proc, 5687 (void *)(unsigned long)proc->pid, 5688 &proc_fops); 5689 } 5690 5691 if (binder_binderfs_dir_entry_proc && !existing_pid) { 5692 char strbuf[11]; 5693 struct dentry *binderfs_entry; 5694 5695 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); 5696 /* 5697 * Similar to debugfs, the process specific log file is shared 5698 * between contexts. Only create for the first PID. 5699 * This is ok since same as debugfs, the log file will contain 5700 * information on all contexts of a given PID. 5701 */ 5702 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc, 5703 strbuf, &proc_fops, (void *)(unsigned long)proc->pid); 5704 if (!IS_ERR(binderfs_entry)) { 5705 proc->binderfs_entry = binderfs_entry; 5706 } else { 5707 int error; 5708 5709 error = PTR_ERR(binderfs_entry); 5710 pr_warn("Unable to create file %s in binderfs (error %d)\n", 5711 strbuf, error); 5712 } 5713 } 5714 5715 return 0; 5716 } 5717 5718 static int binder_flush(struct file *filp, fl_owner_t id) 5719 { 5720 struct binder_proc *proc = filp->private_data; 5721 5722 binder_defer_work(proc, BINDER_DEFERRED_FLUSH); 5723 5724 return 0; 5725 } 5726 5727 static void binder_deferred_flush(struct binder_proc *proc) 5728 { 5729 struct rb_node *n; 5730 int wake_count = 0; 5731 5732 binder_inner_proc_lock(proc); 5733 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { 5734 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); 5735 5736 thread->looper_need_return = true; 5737 if (thread->looper & BINDER_LOOPER_STATE_WAITING) { 5738 wake_up_interruptible(&thread->wait); 5739 wake_count++; 5740 } 5741 } 5742 binder_inner_proc_unlock(proc); 5743 5744 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 5745 "binder_flush: %d woke %d threads\n", proc->pid, 5746 wake_count); 5747 } 5748 5749 static int binder_release(struct inode *nodp, struct file *filp) 5750 { 5751 struct binder_proc *proc = filp->private_data; 5752 5753 debugfs_remove(proc->debugfs_entry); 5754 5755 if (proc->binderfs_entry) { 5756 binderfs_remove_file(proc->binderfs_entry); 5757 proc->binderfs_entry = NULL; 5758 } 5759 5760 binder_defer_work(proc, BINDER_DEFERRED_RELEASE); 5761 5762 return 0; 5763 } 5764 5765 static int binder_node_release(struct binder_node *node, int refs) 5766 { 5767 struct binder_ref *ref; 5768 int death = 0; 5769 struct binder_proc *proc = node->proc; 5770 5771 binder_release_work(proc, &node->async_todo); 5772 5773 binder_node_lock(node); 5774 binder_inner_proc_lock(proc); 5775 binder_dequeue_work_ilocked(&node->work); 5776 /* 5777 * The caller must have taken a temporary ref on the node, 5778 */ 5779 BUG_ON(!node->tmp_refs); 5780 if (hlist_empty(&node->refs) && node->tmp_refs == 1) { 5781 binder_inner_proc_unlock(proc); 5782 binder_node_unlock(node); 5783 binder_free_node(node); 5784 5785 return refs; 5786 } 5787 5788 node->proc = NULL; 5789 node->local_strong_refs = 0; 5790 node->local_weak_refs = 0; 5791 binder_inner_proc_unlock(proc); 5792 5793 spin_lock(&binder_dead_nodes_lock); 5794 hlist_add_head(&node->dead_node, &binder_dead_nodes); 5795 spin_unlock(&binder_dead_nodes_lock); 5796 5797 hlist_for_each_entry(ref, &node->refs, node_entry) { 5798 refs++; 5799 /* 5800 * Need the node lock to synchronize 5801 * with new notification requests and the 5802 * inner lock to synchronize with queued 5803 * death notifications. 5804 */ 5805 binder_inner_proc_lock(ref->proc); 5806 if (!ref->death) { 5807 binder_inner_proc_unlock(ref->proc); 5808 continue; 5809 } 5810 5811 death++; 5812 5813 BUG_ON(!list_empty(&ref->death->work.entry)); 5814 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 5815 binder_enqueue_work_ilocked(&ref->death->work, 5816 &ref->proc->todo); 5817 binder_wakeup_proc_ilocked(ref->proc); 5818 binder_inner_proc_unlock(ref->proc); 5819 } 5820 5821 binder_debug(BINDER_DEBUG_DEAD_BINDER, 5822 "node %d now dead, refs %d, death %d\n", 5823 node->debug_id, refs, death); 5824 binder_node_unlock(node); 5825 binder_put_node(node); 5826 5827 return refs; 5828 } 5829 5830 static void binder_deferred_release(struct binder_proc *proc) 5831 { 5832 struct binder_context *context = proc->context; 5833 struct rb_node *n; 5834 int threads, nodes, incoming_refs, outgoing_refs, active_transactions; 5835 5836 mutex_lock(&binder_procs_lock); 5837 hlist_del(&proc->proc_node); 5838 mutex_unlock(&binder_procs_lock); 5839 5840 mutex_lock(&context->context_mgr_node_lock); 5841 if (context->binder_context_mgr_node && 5842 context->binder_context_mgr_node->proc == proc) { 5843 binder_debug(BINDER_DEBUG_DEAD_BINDER, 5844 "%s: %d context_mgr_node gone\n", 5845 __func__, proc->pid); 5846 context->binder_context_mgr_node = NULL; 5847 } 5848 mutex_unlock(&context->context_mgr_node_lock); 5849 binder_inner_proc_lock(proc); 5850 /* 5851 * Make sure proc stays alive after we 5852 * remove all the threads 5853 */ 5854 proc->tmp_ref++; 5855 5856 proc->is_dead = true; 5857 proc->is_frozen = false; 5858 proc->sync_recv = false; 5859 proc->async_recv = false; 5860 threads = 0; 5861 active_transactions = 0; 5862 while ((n = rb_first(&proc->threads))) { 5863 struct binder_thread *thread; 5864 5865 thread = rb_entry(n, struct binder_thread, rb_node); 5866 binder_inner_proc_unlock(proc); 5867 threads++; 5868 active_transactions += binder_thread_release(proc, thread); 5869 binder_inner_proc_lock(proc); 5870 } 5871 5872 nodes = 0; 5873 incoming_refs = 0; 5874 while ((n = rb_first(&proc->nodes))) { 5875 struct binder_node *node; 5876 5877 node = rb_entry(n, struct binder_node, rb_node); 5878 nodes++; 5879 /* 5880 * take a temporary ref on the node before 5881 * calling binder_node_release() which will either 5882 * kfree() the node or call binder_put_node() 5883 */ 5884 binder_inc_node_tmpref_ilocked(node); 5885 rb_erase(&node->rb_node, &proc->nodes); 5886 binder_inner_proc_unlock(proc); 5887 incoming_refs = binder_node_release(node, incoming_refs); 5888 binder_inner_proc_lock(proc); 5889 } 5890 binder_inner_proc_unlock(proc); 5891 5892 outgoing_refs = 0; 5893 binder_proc_lock(proc); 5894 while ((n = rb_first(&proc->refs_by_desc))) { 5895 struct binder_ref *ref; 5896 5897 ref = rb_entry(n, struct binder_ref, rb_node_desc); 5898 outgoing_refs++; 5899 binder_cleanup_ref_olocked(ref); 5900 binder_proc_unlock(proc); 5901 binder_free_ref(ref); 5902 binder_proc_lock(proc); 5903 } 5904 binder_proc_unlock(proc); 5905 5906 binder_release_work(proc, &proc->todo); 5907 binder_release_work(proc, &proc->delivered_death); 5908 5909 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 5910 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n", 5911 __func__, proc->pid, threads, nodes, incoming_refs, 5912 outgoing_refs, active_transactions); 5913 5914 binder_proc_dec_tmpref(proc); 5915 } 5916 5917 static void binder_deferred_func(struct work_struct *work) 5918 { 5919 struct binder_proc *proc; 5920 5921 int defer; 5922 5923 do { 5924 mutex_lock(&binder_deferred_lock); 5925 if (!hlist_empty(&binder_deferred_list)) { 5926 proc = hlist_entry(binder_deferred_list.first, 5927 struct binder_proc, deferred_work_node); 5928 hlist_del_init(&proc->deferred_work_node); 5929 defer = proc->deferred_work; 5930 proc->deferred_work = 0; 5931 } else { 5932 proc = NULL; 5933 defer = 0; 5934 } 5935 mutex_unlock(&binder_deferred_lock); 5936 5937 if (defer & BINDER_DEFERRED_FLUSH) 5938 binder_deferred_flush(proc); 5939 5940 if (defer & BINDER_DEFERRED_RELEASE) 5941 binder_deferred_release(proc); /* frees proc */ 5942 } while (proc); 5943 } 5944 static DECLARE_WORK(binder_deferred_work, binder_deferred_func); 5945 5946 static void 5947 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) 5948 { 5949 mutex_lock(&binder_deferred_lock); 5950 proc->deferred_work |= defer; 5951 if (hlist_unhashed(&proc->deferred_work_node)) { 5952 hlist_add_head(&proc->deferred_work_node, 5953 &binder_deferred_list); 5954 schedule_work(&binder_deferred_work); 5955 } 5956 mutex_unlock(&binder_deferred_lock); 5957 } 5958 5959 static void print_binder_transaction_ilocked(struct seq_file *m, 5960 struct binder_proc *proc, 5961 const char *prefix, 5962 struct binder_transaction *t) 5963 { 5964 struct binder_proc *to_proc; 5965 struct binder_buffer *buffer = t->buffer; 5966 ktime_t current_time = ktime_get(); 5967 5968 spin_lock(&t->lock); 5969 to_proc = t->to_proc; 5970 seq_printf(m, 5971 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d elapsed %lldms", 5972 prefix, t->debug_id, t, 5973 t->from_pid, 5974 t->from_tid, 5975 to_proc ? to_proc->pid : 0, 5976 t->to_thread ? t->to_thread->pid : 0, 5977 t->code, t->flags, t->priority, t->need_reply, 5978 ktime_ms_delta(current_time, t->start_time)); 5979 spin_unlock(&t->lock); 5980 5981 if (proc != to_proc) { 5982 /* 5983 * Can only safely deref buffer if we are holding the 5984 * correct proc inner lock for this node 5985 */ 5986 seq_puts(m, "\n"); 5987 return; 5988 } 5989 5990 if (buffer == NULL) { 5991 seq_puts(m, " buffer free\n"); 5992 return; 5993 } 5994 if (buffer->target_node) 5995 seq_printf(m, " node %d", buffer->target_node->debug_id); 5996 seq_printf(m, " size %zd:%zd data %pK\n", 5997 buffer->data_size, buffer->offsets_size, 5998 buffer->user_data); 5999 } 6000 6001 static void print_binder_work_ilocked(struct seq_file *m, 6002 struct binder_proc *proc, 6003 const char *prefix, 6004 const char *transaction_prefix, 6005 struct binder_work *w) 6006 { 6007 struct binder_node *node; 6008 struct binder_transaction *t; 6009 6010 switch (w->type) { 6011 case BINDER_WORK_TRANSACTION: 6012 t = container_of(w, struct binder_transaction, work); 6013 print_binder_transaction_ilocked( 6014 m, proc, transaction_prefix, t); 6015 break; 6016 case BINDER_WORK_RETURN_ERROR: { 6017 struct binder_error *e = container_of( 6018 w, struct binder_error, work); 6019 6020 seq_printf(m, "%stransaction error: %u\n", 6021 prefix, e->cmd); 6022 } break; 6023 case BINDER_WORK_TRANSACTION_COMPLETE: 6024 seq_printf(m, "%stransaction complete\n", prefix); 6025 break; 6026 case BINDER_WORK_NODE: 6027 node = container_of(w, struct binder_node, work); 6028 seq_printf(m, "%snode work %d: u%016llx c%016llx\n", 6029 prefix, node->debug_id, 6030 (u64)node->ptr, (u64)node->cookie); 6031 break; 6032 case BINDER_WORK_DEAD_BINDER: 6033 seq_printf(m, "%shas dead binder\n", prefix); 6034 break; 6035 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 6036 seq_printf(m, "%shas cleared dead binder\n", prefix); 6037 break; 6038 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: 6039 seq_printf(m, "%shas cleared death notification\n", prefix); 6040 break; 6041 default: 6042 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type); 6043 break; 6044 } 6045 } 6046 6047 static void print_binder_thread_ilocked(struct seq_file *m, 6048 struct binder_thread *thread, 6049 int print_always) 6050 { 6051 struct binder_transaction *t; 6052 struct binder_work *w; 6053 size_t start_pos = m->count; 6054 size_t header_pos; 6055 6056 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n", 6057 thread->pid, thread->looper, 6058 thread->looper_need_return, 6059 atomic_read(&thread->tmp_ref)); 6060 header_pos = m->count; 6061 t = thread->transaction_stack; 6062 while (t) { 6063 if (t->from == thread) { 6064 print_binder_transaction_ilocked(m, thread->proc, 6065 " outgoing transaction", t); 6066 t = t->from_parent; 6067 } else if (t->to_thread == thread) { 6068 print_binder_transaction_ilocked(m, thread->proc, 6069 " incoming transaction", t); 6070 t = t->to_parent; 6071 } else { 6072 print_binder_transaction_ilocked(m, thread->proc, 6073 " bad transaction", t); 6074 t = NULL; 6075 } 6076 } 6077 list_for_each_entry(w, &thread->todo, entry) { 6078 print_binder_work_ilocked(m, thread->proc, " ", 6079 " pending transaction", w); 6080 } 6081 if (!print_always && m->count == header_pos) 6082 m->count = start_pos; 6083 } 6084 6085 static void print_binder_node_nilocked(struct seq_file *m, 6086 struct binder_node *node) 6087 { 6088 struct binder_ref *ref; 6089 struct binder_work *w; 6090 int count; 6091 6092 count = 0; 6093 hlist_for_each_entry(ref, &node->refs, node_entry) 6094 count++; 6095 6096 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d", 6097 node->debug_id, (u64)node->ptr, (u64)node->cookie, 6098 node->has_strong_ref, node->has_weak_ref, 6099 node->local_strong_refs, node->local_weak_refs, 6100 node->internal_strong_refs, count, node->tmp_refs); 6101 if (count) { 6102 seq_puts(m, " proc"); 6103 hlist_for_each_entry(ref, &node->refs, node_entry) 6104 seq_printf(m, " %d", ref->proc->pid); 6105 } 6106 seq_puts(m, "\n"); 6107 if (node->proc) { 6108 list_for_each_entry(w, &node->async_todo, entry) 6109 print_binder_work_ilocked(m, node->proc, " ", 6110 " pending async transaction", w); 6111 } 6112 } 6113 6114 static void print_binder_ref_olocked(struct seq_file *m, 6115 struct binder_ref *ref) 6116 { 6117 binder_node_lock(ref->node); 6118 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n", 6119 ref->data.debug_id, ref->data.desc, 6120 ref->node->proc ? "" : "dead ", 6121 ref->node->debug_id, ref->data.strong, 6122 ref->data.weak, ref->death); 6123 binder_node_unlock(ref->node); 6124 } 6125 6126 static void print_binder_proc(struct seq_file *m, 6127 struct binder_proc *proc, int print_all) 6128 { 6129 struct binder_work *w; 6130 struct rb_node *n; 6131 size_t start_pos = m->count; 6132 size_t header_pos; 6133 struct binder_node *last_node = NULL; 6134 6135 seq_printf(m, "proc %d\n", proc->pid); 6136 seq_printf(m, "context %s\n", proc->context->name); 6137 header_pos = m->count; 6138 6139 binder_inner_proc_lock(proc); 6140 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 6141 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread, 6142 rb_node), print_all); 6143 6144 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { 6145 struct binder_node *node = rb_entry(n, struct binder_node, 6146 rb_node); 6147 if (!print_all && !node->has_async_transaction) 6148 continue; 6149 6150 /* 6151 * take a temporary reference on the node so it 6152 * survives and isn't removed from the tree 6153 * while we print it. 6154 */ 6155 binder_inc_node_tmpref_ilocked(node); 6156 /* Need to drop inner lock to take node lock */ 6157 binder_inner_proc_unlock(proc); 6158 if (last_node) 6159 binder_put_node(last_node); 6160 binder_node_inner_lock(node); 6161 print_binder_node_nilocked(m, node); 6162 binder_node_inner_unlock(node); 6163 last_node = node; 6164 binder_inner_proc_lock(proc); 6165 } 6166 binder_inner_proc_unlock(proc); 6167 if (last_node) 6168 binder_put_node(last_node); 6169 6170 if (print_all) { 6171 binder_proc_lock(proc); 6172 for (n = rb_first(&proc->refs_by_desc); 6173 n != NULL; 6174 n = rb_next(n)) 6175 print_binder_ref_olocked(m, rb_entry(n, 6176 struct binder_ref, 6177 rb_node_desc)); 6178 binder_proc_unlock(proc); 6179 } 6180 binder_alloc_print_allocated(m, &proc->alloc); 6181 binder_inner_proc_lock(proc); 6182 list_for_each_entry(w, &proc->todo, entry) 6183 print_binder_work_ilocked(m, proc, " ", 6184 " pending transaction", w); 6185 list_for_each_entry(w, &proc->delivered_death, entry) { 6186 seq_puts(m, " has delivered dead binder\n"); 6187 break; 6188 } 6189 binder_inner_proc_unlock(proc); 6190 if (!print_all && m->count == header_pos) 6191 m->count = start_pos; 6192 } 6193 6194 static const char * const binder_return_strings[] = { 6195 "BR_ERROR", 6196 "BR_OK", 6197 "BR_TRANSACTION", 6198 "BR_REPLY", 6199 "BR_ACQUIRE_RESULT", 6200 "BR_DEAD_REPLY", 6201 "BR_TRANSACTION_COMPLETE", 6202 "BR_INCREFS", 6203 "BR_ACQUIRE", 6204 "BR_RELEASE", 6205 "BR_DECREFS", 6206 "BR_ATTEMPT_ACQUIRE", 6207 "BR_NOOP", 6208 "BR_SPAWN_LOOPER", 6209 "BR_FINISHED", 6210 "BR_DEAD_BINDER", 6211 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 6212 "BR_FAILED_REPLY", 6213 "BR_FROZEN_REPLY", 6214 "BR_ONEWAY_SPAM_SUSPECT", 6215 "BR_TRANSACTION_PENDING_FROZEN" 6216 }; 6217 6218 static const char * const binder_command_strings[] = { 6219 "BC_TRANSACTION", 6220 "BC_REPLY", 6221 "BC_ACQUIRE_RESULT", 6222 "BC_FREE_BUFFER", 6223 "BC_INCREFS", 6224 "BC_ACQUIRE", 6225 "BC_RELEASE", 6226 "BC_DECREFS", 6227 "BC_INCREFS_DONE", 6228 "BC_ACQUIRE_DONE", 6229 "BC_ATTEMPT_ACQUIRE", 6230 "BC_REGISTER_LOOPER", 6231 "BC_ENTER_LOOPER", 6232 "BC_EXIT_LOOPER", 6233 "BC_REQUEST_DEATH_NOTIFICATION", 6234 "BC_CLEAR_DEATH_NOTIFICATION", 6235 "BC_DEAD_BINDER_DONE", 6236 "BC_TRANSACTION_SG", 6237 "BC_REPLY_SG", 6238 }; 6239 6240 static const char * const binder_objstat_strings[] = { 6241 "proc", 6242 "thread", 6243 "node", 6244 "ref", 6245 "death", 6246 "transaction", 6247 "transaction_complete" 6248 }; 6249 6250 static void print_binder_stats(struct seq_file *m, const char *prefix, 6251 struct binder_stats *stats) 6252 { 6253 int i; 6254 6255 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != 6256 ARRAY_SIZE(binder_command_strings)); 6257 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) { 6258 int temp = atomic_read(&stats->bc[i]); 6259 6260 if (temp) 6261 seq_printf(m, "%s%s: %d\n", prefix, 6262 binder_command_strings[i], temp); 6263 } 6264 6265 BUILD_BUG_ON(ARRAY_SIZE(stats->br) != 6266 ARRAY_SIZE(binder_return_strings)); 6267 for (i = 0; i < ARRAY_SIZE(stats->br); i++) { 6268 int temp = atomic_read(&stats->br[i]); 6269 6270 if (temp) 6271 seq_printf(m, "%s%s: %d\n", prefix, 6272 binder_return_strings[i], temp); 6273 } 6274 6275 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 6276 ARRAY_SIZE(binder_objstat_strings)); 6277 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 6278 ARRAY_SIZE(stats->obj_deleted)); 6279 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { 6280 int created = atomic_read(&stats->obj_created[i]); 6281 int deleted = atomic_read(&stats->obj_deleted[i]); 6282 6283 if (created || deleted) 6284 seq_printf(m, "%s%s: active %d total %d\n", 6285 prefix, 6286 binder_objstat_strings[i], 6287 created - deleted, 6288 created); 6289 } 6290 } 6291 6292 static void print_binder_proc_stats(struct seq_file *m, 6293 struct binder_proc *proc) 6294 { 6295 struct binder_work *w; 6296 struct binder_thread *thread; 6297 struct rb_node *n; 6298 int count, strong, weak, ready_threads; 6299 size_t free_async_space = 6300 binder_alloc_get_free_async_space(&proc->alloc); 6301 6302 seq_printf(m, "proc %d\n", proc->pid); 6303 seq_printf(m, "context %s\n", proc->context->name); 6304 count = 0; 6305 ready_threads = 0; 6306 binder_inner_proc_lock(proc); 6307 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 6308 count++; 6309 6310 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node) 6311 ready_threads++; 6312 6313 seq_printf(m, " threads: %d\n", count); 6314 seq_printf(m, " requested threads: %d+%d/%d\n" 6315 " ready threads %d\n" 6316 " free async space %zd\n", proc->requested_threads, 6317 proc->requested_threads_started, proc->max_threads, 6318 ready_threads, 6319 free_async_space); 6320 count = 0; 6321 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) 6322 count++; 6323 binder_inner_proc_unlock(proc); 6324 seq_printf(m, " nodes: %d\n", count); 6325 count = 0; 6326 strong = 0; 6327 weak = 0; 6328 binder_proc_lock(proc); 6329 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 6330 struct binder_ref *ref = rb_entry(n, struct binder_ref, 6331 rb_node_desc); 6332 count++; 6333 strong += ref->data.strong; 6334 weak += ref->data.weak; 6335 } 6336 binder_proc_unlock(proc); 6337 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak); 6338 6339 count = binder_alloc_get_allocated_count(&proc->alloc); 6340 seq_printf(m, " buffers: %d\n", count); 6341 6342 binder_alloc_print_pages(m, &proc->alloc); 6343 6344 count = 0; 6345 binder_inner_proc_lock(proc); 6346 list_for_each_entry(w, &proc->todo, entry) { 6347 if (w->type == BINDER_WORK_TRANSACTION) 6348 count++; 6349 } 6350 binder_inner_proc_unlock(proc); 6351 seq_printf(m, " pending transactions: %d\n", count); 6352 6353 print_binder_stats(m, " ", &proc->stats); 6354 } 6355 6356 static int state_show(struct seq_file *m, void *unused) 6357 { 6358 struct binder_proc *proc; 6359 struct binder_node *node; 6360 struct binder_node *last_node = NULL; 6361 6362 seq_puts(m, "binder state:\n"); 6363 6364 spin_lock(&binder_dead_nodes_lock); 6365 if (!hlist_empty(&binder_dead_nodes)) 6366 seq_puts(m, "dead nodes:\n"); 6367 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) { 6368 /* 6369 * take a temporary reference on the node so it 6370 * survives and isn't removed from the list 6371 * while we print it. 6372 */ 6373 node->tmp_refs++; 6374 spin_unlock(&binder_dead_nodes_lock); 6375 if (last_node) 6376 binder_put_node(last_node); 6377 binder_node_lock(node); 6378 print_binder_node_nilocked(m, node); 6379 binder_node_unlock(node); 6380 last_node = node; 6381 spin_lock(&binder_dead_nodes_lock); 6382 } 6383 spin_unlock(&binder_dead_nodes_lock); 6384 if (last_node) 6385 binder_put_node(last_node); 6386 6387 mutex_lock(&binder_procs_lock); 6388 hlist_for_each_entry(proc, &binder_procs, proc_node) 6389 print_binder_proc(m, proc, 1); 6390 mutex_unlock(&binder_procs_lock); 6391 6392 return 0; 6393 } 6394 6395 static int stats_show(struct seq_file *m, void *unused) 6396 { 6397 struct binder_proc *proc; 6398 6399 seq_puts(m, "binder stats:\n"); 6400 6401 print_binder_stats(m, "", &binder_stats); 6402 6403 mutex_lock(&binder_procs_lock); 6404 hlist_for_each_entry(proc, &binder_procs, proc_node) 6405 print_binder_proc_stats(m, proc); 6406 mutex_unlock(&binder_procs_lock); 6407 6408 return 0; 6409 } 6410 6411 static int transactions_show(struct seq_file *m, void *unused) 6412 { 6413 struct binder_proc *proc; 6414 6415 seq_puts(m, "binder transactions:\n"); 6416 mutex_lock(&binder_procs_lock); 6417 hlist_for_each_entry(proc, &binder_procs, proc_node) 6418 print_binder_proc(m, proc, 0); 6419 mutex_unlock(&binder_procs_lock); 6420 6421 return 0; 6422 } 6423 6424 static int proc_show(struct seq_file *m, void *unused) 6425 { 6426 struct binder_proc *itr; 6427 int pid = (unsigned long)m->private; 6428 6429 mutex_lock(&binder_procs_lock); 6430 hlist_for_each_entry(itr, &binder_procs, proc_node) { 6431 if (itr->pid == pid) { 6432 seq_puts(m, "binder proc state:\n"); 6433 print_binder_proc(m, itr, 1); 6434 } 6435 } 6436 mutex_unlock(&binder_procs_lock); 6437 6438 return 0; 6439 } 6440 6441 static void print_binder_transaction_log_entry(struct seq_file *m, 6442 struct binder_transaction_log_entry *e) 6443 { 6444 int debug_id = READ_ONCE(e->debug_id_done); 6445 /* 6446 * read barrier to guarantee debug_id_done read before 6447 * we print the log values 6448 */ 6449 smp_rmb(); 6450 seq_printf(m, 6451 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d", 6452 e->debug_id, (e->call_type == 2) ? "reply" : 6453 ((e->call_type == 1) ? "async" : "call "), e->from_proc, 6454 e->from_thread, e->to_proc, e->to_thread, e->context_name, 6455 e->to_node, e->target_handle, e->data_size, e->offsets_size, 6456 e->return_error, e->return_error_param, 6457 e->return_error_line); 6458 /* 6459 * read-barrier to guarantee read of debug_id_done after 6460 * done printing the fields of the entry 6461 */ 6462 smp_rmb(); 6463 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ? 6464 "\n" : " (incomplete)\n"); 6465 } 6466 6467 static int transaction_log_show(struct seq_file *m, void *unused) 6468 { 6469 struct binder_transaction_log *log = m->private; 6470 unsigned int log_cur = atomic_read(&log->cur); 6471 unsigned int count; 6472 unsigned int cur; 6473 int i; 6474 6475 count = log_cur + 1; 6476 cur = count < ARRAY_SIZE(log->entry) && !log->full ? 6477 0 : count % ARRAY_SIZE(log->entry); 6478 if (count > ARRAY_SIZE(log->entry) || log->full) 6479 count = ARRAY_SIZE(log->entry); 6480 for (i = 0; i < count; i++) { 6481 unsigned int index = cur++ % ARRAY_SIZE(log->entry); 6482 6483 print_binder_transaction_log_entry(m, &log->entry[index]); 6484 } 6485 return 0; 6486 } 6487 6488 const struct file_operations binder_fops = { 6489 .owner = THIS_MODULE, 6490 .poll = binder_poll, 6491 .unlocked_ioctl = binder_ioctl, 6492 .compat_ioctl = compat_ptr_ioctl, 6493 .mmap = binder_mmap, 6494 .open = binder_open, 6495 .flush = binder_flush, 6496 .release = binder_release, 6497 }; 6498 6499 DEFINE_SHOW_ATTRIBUTE(state); 6500 DEFINE_SHOW_ATTRIBUTE(stats); 6501 DEFINE_SHOW_ATTRIBUTE(transactions); 6502 DEFINE_SHOW_ATTRIBUTE(transaction_log); 6503 6504 const struct binder_debugfs_entry binder_debugfs_entries[] = { 6505 { 6506 .name = "state", 6507 .mode = 0444, 6508 .fops = &state_fops, 6509 .data = NULL, 6510 }, 6511 { 6512 .name = "stats", 6513 .mode = 0444, 6514 .fops = &stats_fops, 6515 .data = NULL, 6516 }, 6517 { 6518 .name = "transactions", 6519 .mode = 0444, 6520 .fops = &transactions_fops, 6521 .data = NULL, 6522 }, 6523 { 6524 .name = "transaction_log", 6525 .mode = 0444, 6526 .fops = &transaction_log_fops, 6527 .data = &binder_transaction_log, 6528 }, 6529 { 6530 .name = "failed_transaction_log", 6531 .mode = 0444, 6532 .fops = &transaction_log_fops, 6533 .data = &binder_transaction_log_failed, 6534 }, 6535 {} /* terminator */ 6536 }; 6537 6538 static int __init init_binder_device(const char *name) 6539 { 6540 int ret; 6541 struct binder_device *binder_device; 6542 6543 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL); 6544 if (!binder_device) 6545 return -ENOMEM; 6546 6547 binder_device->miscdev.fops = &binder_fops; 6548 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR; 6549 binder_device->miscdev.name = name; 6550 6551 refcount_set(&binder_device->ref, 1); 6552 binder_device->context.binder_context_mgr_uid = INVALID_UID; 6553 binder_device->context.name = name; 6554 mutex_init(&binder_device->context.context_mgr_node_lock); 6555 6556 ret = misc_register(&binder_device->miscdev); 6557 if (ret < 0) { 6558 kfree(binder_device); 6559 return ret; 6560 } 6561 6562 hlist_add_head(&binder_device->hlist, &binder_devices); 6563 6564 return ret; 6565 } 6566 6567 static int __init binder_init(void) 6568 { 6569 int ret; 6570 char *device_name, *device_tmp; 6571 struct binder_device *device; 6572 struct hlist_node *tmp; 6573 char *device_names = NULL; 6574 const struct binder_debugfs_entry *db_entry; 6575 6576 ret = binder_alloc_shrinker_init(); 6577 if (ret) 6578 return ret; 6579 6580 atomic_set(&binder_transaction_log.cur, ~0U); 6581 atomic_set(&binder_transaction_log_failed.cur, ~0U); 6582 6583 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); 6584 6585 binder_for_each_debugfs_entry(db_entry) 6586 debugfs_create_file(db_entry->name, 6587 db_entry->mode, 6588 binder_debugfs_dir_entry_root, 6589 db_entry->data, 6590 db_entry->fops); 6591 6592 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", 6593 binder_debugfs_dir_entry_root); 6594 6595 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) && 6596 strcmp(binder_devices_param, "") != 0) { 6597 /* 6598 * Copy the module_parameter string, because we don't want to 6599 * tokenize it in-place. 6600 */ 6601 device_names = kstrdup(binder_devices_param, GFP_KERNEL); 6602 if (!device_names) { 6603 ret = -ENOMEM; 6604 goto err_alloc_device_names_failed; 6605 } 6606 6607 device_tmp = device_names; 6608 while ((device_name = strsep(&device_tmp, ","))) { 6609 ret = init_binder_device(device_name); 6610 if (ret) 6611 goto err_init_binder_device_failed; 6612 } 6613 } 6614 6615 ret = init_binderfs(); 6616 if (ret) 6617 goto err_init_binder_device_failed; 6618 6619 return ret; 6620 6621 err_init_binder_device_failed: 6622 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) { 6623 misc_deregister(&device->miscdev); 6624 hlist_del(&device->hlist); 6625 kfree(device); 6626 } 6627 6628 kfree(device_names); 6629 6630 err_alloc_device_names_failed: 6631 debugfs_remove_recursive(binder_debugfs_dir_entry_root); 6632 binder_alloc_shrinker_exit(); 6633 6634 return ret; 6635 } 6636 6637 device_initcall(binder_init); 6638 6639 #define CREATE_TRACE_POINTS 6640 #include "binder_trace.h" 6641 6642 MODULE_LICENSE("GPL v2"); 6643