1 /* binder.c 2 * 3 * Android IPC Subsystem 4 * 5 * Copyright (C) 2007-2008 Google, Inc. 6 * 7 * This software is licensed under the terms of the GNU General Public 8 * License version 2, as published by the Free Software Foundation, and 9 * may be copied, distributed, and modified under those terms. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 */ 17 18 /* 19 * Locking overview 20 * 21 * There are 3 main spinlocks which must be acquired in the 22 * order shown: 23 * 24 * 1) proc->outer_lock : protects binder_ref 25 * binder_proc_lock() and binder_proc_unlock() are 26 * used to acq/rel. 27 * 2) node->lock : protects most fields of binder_node. 28 * binder_node_lock() and binder_node_unlock() are 29 * used to acq/rel 30 * 3) proc->inner_lock : protects the thread and node lists 31 * (proc->threads, proc->waiting_threads, proc->nodes) 32 * and all todo lists associated with the binder_proc 33 * (proc->todo, thread->todo, proc->delivered_death and 34 * node->async_todo), as well as thread->transaction_stack 35 * binder_inner_proc_lock() and binder_inner_proc_unlock() 36 * are used to acq/rel 37 * 38 * Any lock under procA must never be nested under any lock at the same 39 * level or below on procB. 40 * 41 * Functions that require a lock held on entry indicate which lock 42 * in the suffix of the function name: 43 * 44 * foo_olocked() : requires node->outer_lock 45 * foo_nlocked() : requires node->lock 46 * foo_ilocked() : requires proc->inner_lock 47 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock 48 * foo_nilocked(): requires node->lock and proc->inner_lock 49 * ... 50 */ 51 52 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 53 54 #include <asm/cacheflush.h> 55 #include <linux/fdtable.h> 56 #include <linux/file.h> 57 #include <linux/freezer.h> 58 #include <linux/fs.h> 59 #include <linux/list.h> 60 #include <linux/miscdevice.h> 61 #include <linux/module.h> 62 #include <linux/mutex.h> 63 #include <linux/nsproxy.h> 64 #include <linux/poll.h> 65 #include <linux/debugfs.h> 66 #include <linux/rbtree.h> 67 #include <linux/sched/signal.h> 68 #include <linux/sched/mm.h> 69 #include <linux/seq_file.h> 70 #include <linux/uaccess.h> 71 #include <linux/pid_namespace.h> 72 #include <linux/security.h> 73 #include <linux/spinlock.h> 74 75 #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT 76 #define BINDER_IPC_32BIT 1 77 #endif 78 79 #include <uapi/linux/android/binder.h> 80 #include "binder_alloc.h" 81 #include "binder_trace.h" 82 83 static HLIST_HEAD(binder_deferred_list); 84 static DEFINE_MUTEX(binder_deferred_lock); 85 86 static HLIST_HEAD(binder_devices); 87 static HLIST_HEAD(binder_procs); 88 static DEFINE_MUTEX(binder_procs_lock); 89 90 static HLIST_HEAD(binder_dead_nodes); 91 static DEFINE_SPINLOCK(binder_dead_nodes_lock); 92 93 static struct dentry *binder_debugfs_dir_entry_root; 94 static struct dentry *binder_debugfs_dir_entry_proc; 95 static atomic_t binder_last_id; 96 97 #define BINDER_DEBUG_ENTRY(name) \ 98 static int binder_##name##_open(struct inode *inode, struct file *file) \ 99 { \ 100 return single_open(file, binder_##name##_show, inode->i_private); \ 101 } \ 102 \ 103 static const struct file_operations binder_##name##_fops = { \ 104 .owner = THIS_MODULE, \ 105 .open = binder_##name##_open, \ 106 .read = seq_read, \ 107 .llseek = seq_lseek, \ 108 .release = single_release, \ 109 } 110 111 static int binder_proc_show(struct seq_file *m, void *unused); 112 BINDER_DEBUG_ENTRY(proc); 113 114 /* This is only defined in include/asm-arm/sizes.h */ 115 #ifndef SZ_1K 116 #define SZ_1K 0x400 117 #endif 118 119 #ifndef SZ_4M 120 #define SZ_4M 0x400000 121 #endif 122 123 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE) 124 125 enum { 126 BINDER_DEBUG_USER_ERROR = 1U << 0, 127 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1, 128 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2, 129 BINDER_DEBUG_OPEN_CLOSE = 1U << 3, 130 BINDER_DEBUG_DEAD_BINDER = 1U << 4, 131 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5, 132 BINDER_DEBUG_READ_WRITE = 1U << 6, 133 BINDER_DEBUG_USER_REFS = 1U << 7, 134 BINDER_DEBUG_THREADS = 1U << 8, 135 BINDER_DEBUG_TRANSACTION = 1U << 9, 136 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10, 137 BINDER_DEBUG_FREE_BUFFER = 1U << 11, 138 BINDER_DEBUG_INTERNAL_REFS = 1U << 12, 139 BINDER_DEBUG_PRIORITY_CAP = 1U << 13, 140 BINDER_DEBUG_SPINLOCKS = 1U << 14, 141 }; 142 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | 143 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; 144 module_param_named(debug_mask, binder_debug_mask, uint, 0644); 145 146 static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES; 147 module_param_named(devices, binder_devices_param, charp, 0444); 148 149 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); 150 static int binder_stop_on_user_error; 151 152 static int binder_set_stop_on_user_error(const char *val, 153 const struct kernel_param *kp) 154 { 155 int ret; 156 157 ret = param_set_int(val, kp); 158 if (binder_stop_on_user_error < 2) 159 wake_up(&binder_user_error_wait); 160 return ret; 161 } 162 module_param_call(stop_on_user_error, binder_set_stop_on_user_error, 163 param_get_int, &binder_stop_on_user_error, 0644); 164 165 #define binder_debug(mask, x...) \ 166 do { \ 167 if (binder_debug_mask & mask) \ 168 pr_info(x); \ 169 } while (0) 170 171 #define binder_user_error(x...) \ 172 do { \ 173 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \ 174 pr_info(x); \ 175 if (binder_stop_on_user_error) \ 176 binder_stop_on_user_error = 2; \ 177 } while (0) 178 179 #define to_flat_binder_object(hdr) \ 180 container_of(hdr, struct flat_binder_object, hdr) 181 182 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr) 183 184 #define to_binder_buffer_object(hdr) \ 185 container_of(hdr, struct binder_buffer_object, hdr) 186 187 #define to_binder_fd_array_object(hdr) \ 188 container_of(hdr, struct binder_fd_array_object, hdr) 189 190 enum binder_stat_types { 191 BINDER_STAT_PROC, 192 BINDER_STAT_THREAD, 193 BINDER_STAT_NODE, 194 BINDER_STAT_REF, 195 BINDER_STAT_DEATH, 196 BINDER_STAT_TRANSACTION, 197 BINDER_STAT_TRANSACTION_COMPLETE, 198 BINDER_STAT_COUNT 199 }; 200 201 struct binder_stats { 202 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1]; 203 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1]; 204 atomic_t obj_created[BINDER_STAT_COUNT]; 205 atomic_t obj_deleted[BINDER_STAT_COUNT]; 206 }; 207 208 static struct binder_stats binder_stats; 209 210 static inline void binder_stats_deleted(enum binder_stat_types type) 211 { 212 atomic_inc(&binder_stats.obj_deleted[type]); 213 } 214 215 static inline void binder_stats_created(enum binder_stat_types type) 216 { 217 atomic_inc(&binder_stats.obj_created[type]); 218 } 219 220 struct binder_transaction_log_entry { 221 int debug_id; 222 int debug_id_done; 223 int call_type; 224 int from_proc; 225 int from_thread; 226 int target_handle; 227 int to_proc; 228 int to_thread; 229 int to_node; 230 int data_size; 231 int offsets_size; 232 int return_error_line; 233 uint32_t return_error; 234 uint32_t return_error_param; 235 const char *context_name; 236 }; 237 struct binder_transaction_log { 238 atomic_t cur; 239 bool full; 240 struct binder_transaction_log_entry entry[32]; 241 }; 242 static struct binder_transaction_log binder_transaction_log; 243 static struct binder_transaction_log binder_transaction_log_failed; 244 245 static struct binder_transaction_log_entry *binder_transaction_log_add( 246 struct binder_transaction_log *log) 247 { 248 struct binder_transaction_log_entry *e; 249 unsigned int cur = atomic_inc_return(&log->cur); 250 251 if (cur >= ARRAY_SIZE(log->entry)) 252 log->full = true; 253 e = &log->entry[cur % ARRAY_SIZE(log->entry)]; 254 WRITE_ONCE(e->debug_id_done, 0); 255 /* 256 * write-barrier to synchronize access to e->debug_id_done. 257 * We make sure the initialized 0 value is seen before 258 * memset() other fields are zeroed by memset. 259 */ 260 smp_wmb(); 261 memset(e, 0, sizeof(*e)); 262 return e; 263 } 264 265 struct binder_context { 266 struct binder_node *binder_context_mgr_node; 267 struct mutex context_mgr_node_lock; 268 269 kuid_t binder_context_mgr_uid; 270 const char *name; 271 }; 272 273 struct binder_device { 274 struct hlist_node hlist; 275 struct miscdevice miscdev; 276 struct binder_context context; 277 }; 278 279 /** 280 * struct binder_work - work enqueued on a worklist 281 * @entry: node enqueued on list 282 * @type: type of work to be performed 283 * 284 * There are separate work lists for proc, thread, and node (async). 285 */ 286 struct binder_work { 287 struct list_head entry; 288 289 enum { 290 BINDER_WORK_TRANSACTION = 1, 291 BINDER_WORK_TRANSACTION_COMPLETE, 292 BINDER_WORK_RETURN_ERROR, 293 BINDER_WORK_NODE, 294 BINDER_WORK_DEAD_BINDER, 295 BINDER_WORK_DEAD_BINDER_AND_CLEAR, 296 BINDER_WORK_CLEAR_DEATH_NOTIFICATION, 297 } type; 298 }; 299 300 struct binder_error { 301 struct binder_work work; 302 uint32_t cmd; 303 }; 304 305 /** 306 * struct binder_node - binder node bookkeeping 307 * @debug_id: unique ID for debugging 308 * (invariant after initialized) 309 * @lock: lock for node fields 310 * @work: worklist element for node work 311 * (protected by @proc->inner_lock) 312 * @rb_node: element for proc->nodes tree 313 * (protected by @proc->inner_lock) 314 * @dead_node: element for binder_dead_nodes list 315 * (protected by binder_dead_nodes_lock) 316 * @proc: binder_proc that owns this node 317 * (invariant after initialized) 318 * @refs: list of references on this node 319 * (protected by @lock) 320 * @internal_strong_refs: used to take strong references when 321 * initiating a transaction 322 * (protected by @proc->inner_lock if @proc 323 * and by @lock) 324 * @local_weak_refs: weak user refs from local process 325 * (protected by @proc->inner_lock if @proc 326 * and by @lock) 327 * @local_strong_refs: strong user refs from local process 328 * (protected by @proc->inner_lock if @proc 329 * and by @lock) 330 * @tmp_refs: temporary kernel refs 331 * (protected by @proc->inner_lock while @proc 332 * is valid, and by binder_dead_nodes_lock 333 * if @proc is NULL. During inc/dec and node release 334 * it is also protected by @lock to provide safety 335 * as the node dies and @proc becomes NULL) 336 * @ptr: userspace pointer for node 337 * (invariant, no lock needed) 338 * @cookie: userspace cookie for node 339 * (invariant, no lock needed) 340 * @has_strong_ref: userspace notified of strong ref 341 * (protected by @proc->inner_lock if @proc 342 * and by @lock) 343 * @pending_strong_ref: userspace has acked notification of strong ref 344 * (protected by @proc->inner_lock if @proc 345 * and by @lock) 346 * @has_weak_ref: userspace notified of weak ref 347 * (protected by @proc->inner_lock if @proc 348 * and by @lock) 349 * @pending_weak_ref: userspace has acked notification of weak ref 350 * (protected by @proc->inner_lock if @proc 351 * and by @lock) 352 * @has_async_transaction: async transaction to node in progress 353 * (protected by @lock) 354 * @accept_fds: file descriptor operations supported for node 355 * (invariant after initialized) 356 * @min_priority: minimum scheduling priority 357 * (invariant after initialized) 358 * @async_todo: list of async work items 359 * (protected by @proc->inner_lock) 360 * 361 * Bookkeeping structure for binder nodes. 362 */ 363 struct binder_node { 364 int debug_id; 365 spinlock_t lock; 366 struct binder_work work; 367 union { 368 struct rb_node rb_node; 369 struct hlist_node dead_node; 370 }; 371 struct binder_proc *proc; 372 struct hlist_head refs; 373 int internal_strong_refs; 374 int local_weak_refs; 375 int local_strong_refs; 376 int tmp_refs; 377 binder_uintptr_t ptr; 378 binder_uintptr_t cookie; 379 struct { 380 /* 381 * bitfield elements protected by 382 * proc inner_lock 383 */ 384 u8 has_strong_ref:1; 385 u8 pending_strong_ref:1; 386 u8 has_weak_ref:1; 387 u8 pending_weak_ref:1; 388 }; 389 struct { 390 /* 391 * invariant after initialization 392 */ 393 u8 accept_fds:1; 394 u8 min_priority; 395 }; 396 bool has_async_transaction; 397 struct list_head async_todo; 398 }; 399 400 struct binder_ref_death { 401 /** 402 * @work: worklist element for death notifications 403 * (protected by inner_lock of the proc that 404 * this ref belongs to) 405 */ 406 struct binder_work work; 407 binder_uintptr_t cookie; 408 }; 409 410 /** 411 * struct binder_ref_data - binder_ref counts and id 412 * @debug_id: unique ID for the ref 413 * @desc: unique userspace handle for ref 414 * @strong: strong ref count (debugging only if not locked) 415 * @weak: weak ref count (debugging only if not locked) 416 * 417 * Structure to hold ref count and ref id information. Since 418 * the actual ref can only be accessed with a lock, this structure 419 * is used to return information about the ref to callers of 420 * ref inc/dec functions. 421 */ 422 struct binder_ref_data { 423 int debug_id; 424 uint32_t desc; 425 int strong; 426 int weak; 427 }; 428 429 /** 430 * struct binder_ref - struct to track references on nodes 431 * @data: binder_ref_data containing id, handle, and current refcounts 432 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree 433 * @rb_node_node: node for lookup by @node in proc's rb_tree 434 * @node_entry: list entry for node->refs list in target node 435 * (protected by @node->lock) 436 * @proc: binder_proc containing ref 437 * @node: binder_node of target node. When cleaning up a 438 * ref for deletion in binder_cleanup_ref, a non-NULL 439 * @node indicates the node must be freed 440 * @death: pointer to death notification (ref_death) if requested 441 * (protected by @node->lock) 442 * 443 * Structure to track references from procA to target node (on procB). This 444 * structure is unsafe to access without holding @proc->outer_lock. 445 */ 446 struct binder_ref { 447 /* Lookups needed: */ 448 /* node + proc => ref (transaction) */ 449 /* desc + proc => ref (transaction, inc/dec ref) */ 450 /* node => refs + procs (proc exit) */ 451 struct binder_ref_data data; 452 struct rb_node rb_node_desc; 453 struct rb_node rb_node_node; 454 struct hlist_node node_entry; 455 struct binder_proc *proc; 456 struct binder_node *node; 457 struct binder_ref_death *death; 458 }; 459 460 enum binder_deferred_state { 461 BINDER_DEFERRED_PUT_FILES = 0x01, 462 BINDER_DEFERRED_FLUSH = 0x02, 463 BINDER_DEFERRED_RELEASE = 0x04, 464 }; 465 466 /** 467 * struct binder_proc - binder process bookkeeping 468 * @proc_node: element for binder_procs list 469 * @threads: rbtree of binder_threads in this proc 470 * (protected by @inner_lock) 471 * @nodes: rbtree of binder nodes associated with 472 * this proc ordered by node->ptr 473 * (protected by @inner_lock) 474 * @refs_by_desc: rbtree of refs ordered by ref->desc 475 * (protected by @outer_lock) 476 * @refs_by_node: rbtree of refs ordered by ref->node 477 * (protected by @outer_lock) 478 * @waiting_threads: threads currently waiting for proc work 479 * (protected by @inner_lock) 480 * @pid PID of group_leader of process 481 * (invariant after initialized) 482 * @tsk task_struct for group_leader of process 483 * (invariant after initialized) 484 * @files files_struct for process 485 * (protected by @files_lock) 486 * @files_lock mutex to protect @files 487 * @deferred_work_node: element for binder_deferred_list 488 * (protected by binder_deferred_lock) 489 * @deferred_work: bitmap of deferred work to perform 490 * (protected by binder_deferred_lock) 491 * @is_dead: process is dead and awaiting free 492 * when outstanding transactions are cleaned up 493 * (protected by @inner_lock) 494 * @todo: list of work for this process 495 * (protected by @inner_lock) 496 * @stats: per-process binder statistics 497 * (atomics, no lock needed) 498 * @delivered_death: list of delivered death notification 499 * (protected by @inner_lock) 500 * @max_threads: cap on number of binder threads 501 * (protected by @inner_lock) 502 * @requested_threads: number of binder threads requested but not 503 * yet started. In current implementation, can 504 * only be 0 or 1. 505 * (protected by @inner_lock) 506 * @requested_threads_started: number binder threads started 507 * (protected by @inner_lock) 508 * @tmp_ref: temporary reference to indicate proc is in use 509 * (protected by @inner_lock) 510 * @default_priority: default scheduler priority 511 * (invariant after initialized) 512 * @debugfs_entry: debugfs node 513 * @alloc: binder allocator bookkeeping 514 * @context: binder_context for this proc 515 * (invariant after initialized) 516 * @inner_lock: can nest under outer_lock and/or node lock 517 * @outer_lock: no nesting under innor or node lock 518 * Lock order: 1) outer, 2) node, 3) inner 519 * 520 * Bookkeeping structure for binder processes 521 */ 522 struct binder_proc { 523 struct hlist_node proc_node; 524 struct rb_root threads; 525 struct rb_root nodes; 526 struct rb_root refs_by_desc; 527 struct rb_root refs_by_node; 528 struct list_head waiting_threads; 529 int pid; 530 struct task_struct *tsk; 531 struct files_struct *files; 532 struct mutex files_lock; 533 struct hlist_node deferred_work_node; 534 int deferred_work; 535 bool is_dead; 536 537 struct list_head todo; 538 struct binder_stats stats; 539 struct list_head delivered_death; 540 int max_threads; 541 int requested_threads; 542 int requested_threads_started; 543 int tmp_ref; 544 long default_priority; 545 struct dentry *debugfs_entry; 546 struct binder_alloc alloc; 547 struct binder_context *context; 548 spinlock_t inner_lock; 549 spinlock_t outer_lock; 550 }; 551 552 enum { 553 BINDER_LOOPER_STATE_REGISTERED = 0x01, 554 BINDER_LOOPER_STATE_ENTERED = 0x02, 555 BINDER_LOOPER_STATE_EXITED = 0x04, 556 BINDER_LOOPER_STATE_INVALID = 0x08, 557 BINDER_LOOPER_STATE_WAITING = 0x10, 558 BINDER_LOOPER_STATE_POLL = 0x20, 559 }; 560 561 /** 562 * struct binder_thread - binder thread bookkeeping 563 * @proc: binder process for this thread 564 * (invariant after initialization) 565 * @rb_node: element for proc->threads rbtree 566 * (protected by @proc->inner_lock) 567 * @waiting_thread_node: element for @proc->waiting_threads list 568 * (protected by @proc->inner_lock) 569 * @pid: PID for this thread 570 * (invariant after initialization) 571 * @looper: bitmap of looping state 572 * (only accessed by this thread) 573 * @looper_needs_return: looping thread needs to exit driver 574 * (no lock needed) 575 * @transaction_stack: stack of in-progress transactions for this thread 576 * (protected by @proc->inner_lock) 577 * @todo: list of work to do for this thread 578 * (protected by @proc->inner_lock) 579 * @process_todo: whether work in @todo should be processed 580 * (protected by @proc->inner_lock) 581 * @return_error: transaction errors reported by this thread 582 * (only accessed by this thread) 583 * @reply_error: transaction errors reported by target thread 584 * (protected by @proc->inner_lock) 585 * @wait: wait queue for thread work 586 * @stats: per-thread statistics 587 * (atomics, no lock needed) 588 * @tmp_ref: temporary reference to indicate thread is in use 589 * (atomic since @proc->inner_lock cannot 590 * always be acquired) 591 * @is_dead: thread is dead and awaiting free 592 * when outstanding transactions are cleaned up 593 * (protected by @proc->inner_lock) 594 * 595 * Bookkeeping structure for binder threads. 596 */ 597 struct binder_thread { 598 struct binder_proc *proc; 599 struct rb_node rb_node; 600 struct list_head waiting_thread_node; 601 int pid; 602 int looper; /* only modified by this thread */ 603 bool looper_need_return; /* can be written by other thread */ 604 struct binder_transaction *transaction_stack; 605 struct list_head todo; 606 bool process_todo; 607 struct binder_error return_error; 608 struct binder_error reply_error; 609 wait_queue_head_t wait; 610 struct binder_stats stats; 611 atomic_t tmp_ref; 612 bool is_dead; 613 }; 614 615 struct binder_transaction { 616 int debug_id; 617 struct binder_work work; 618 struct binder_thread *from; 619 struct binder_transaction *from_parent; 620 struct binder_proc *to_proc; 621 struct binder_thread *to_thread; 622 struct binder_transaction *to_parent; 623 unsigned need_reply:1; 624 /* unsigned is_dead:1; */ /* not used at the moment */ 625 626 struct binder_buffer *buffer; 627 unsigned int code; 628 unsigned int flags; 629 long priority; 630 long saved_priority; 631 kuid_t sender_euid; 632 /** 633 * @lock: protects @from, @to_proc, and @to_thread 634 * 635 * @from, @to_proc, and @to_thread can be set to NULL 636 * during thread teardown 637 */ 638 spinlock_t lock; 639 }; 640 641 /** 642 * binder_proc_lock() - Acquire outer lock for given binder_proc 643 * @proc: struct binder_proc to acquire 644 * 645 * Acquires proc->outer_lock. Used to protect binder_ref 646 * structures associated with the given proc. 647 */ 648 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__) 649 static void 650 _binder_proc_lock(struct binder_proc *proc, int line) 651 { 652 binder_debug(BINDER_DEBUG_SPINLOCKS, 653 "%s: line=%d\n", __func__, line); 654 spin_lock(&proc->outer_lock); 655 } 656 657 /** 658 * binder_proc_unlock() - Release spinlock for given binder_proc 659 * @proc: struct binder_proc to acquire 660 * 661 * Release lock acquired via binder_proc_lock() 662 */ 663 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__) 664 static void 665 _binder_proc_unlock(struct binder_proc *proc, int line) 666 { 667 binder_debug(BINDER_DEBUG_SPINLOCKS, 668 "%s: line=%d\n", __func__, line); 669 spin_unlock(&proc->outer_lock); 670 } 671 672 /** 673 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc 674 * @proc: struct binder_proc to acquire 675 * 676 * Acquires proc->inner_lock. Used to protect todo lists 677 */ 678 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__) 679 static void 680 _binder_inner_proc_lock(struct binder_proc *proc, int line) 681 { 682 binder_debug(BINDER_DEBUG_SPINLOCKS, 683 "%s: line=%d\n", __func__, line); 684 spin_lock(&proc->inner_lock); 685 } 686 687 /** 688 * binder_inner_proc_unlock() - Release inner lock for given binder_proc 689 * @proc: struct binder_proc to acquire 690 * 691 * Release lock acquired via binder_inner_proc_lock() 692 */ 693 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__) 694 static void 695 _binder_inner_proc_unlock(struct binder_proc *proc, int line) 696 { 697 binder_debug(BINDER_DEBUG_SPINLOCKS, 698 "%s: line=%d\n", __func__, line); 699 spin_unlock(&proc->inner_lock); 700 } 701 702 /** 703 * binder_node_lock() - Acquire spinlock for given binder_node 704 * @node: struct binder_node to acquire 705 * 706 * Acquires node->lock. Used to protect binder_node fields 707 */ 708 #define binder_node_lock(node) _binder_node_lock(node, __LINE__) 709 static void 710 _binder_node_lock(struct binder_node *node, int line) 711 { 712 binder_debug(BINDER_DEBUG_SPINLOCKS, 713 "%s: line=%d\n", __func__, line); 714 spin_lock(&node->lock); 715 } 716 717 /** 718 * binder_node_unlock() - Release spinlock for given binder_proc 719 * @node: struct binder_node to acquire 720 * 721 * Release lock acquired via binder_node_lock() 722 */ 723 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__) 724 static void 725 _binder_node_unlock(struct binder_node *node, int line) 726 { 727 binder_debug(BINDER_DEBUG_SPINLOCKS, 728 "%s: line=%d\n", __func__, line); 729 spin_unlock(&node->lock); 730 } 731 732 /** 733 * binder_node_inner_lock() - Acquire node and inner locks 734 * @node: struct binder_node to acquire 735 * 736 * Acquires node->lock. If node->proc also acquires 737 * proc->inner_lock. Used to protect binder_node fields 738 */ 739 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__) 740 static void 741 _binder_node_inner_lock(struct binder_node *node, int line) 742 { 743 binder_debug(BINDER_DEBUG_SPINLOCKS, 744 "%s: line=%d\n", __func__, line); 745 spin_lock(&node->lock); 746 if (node->proc) 747 binder_inner_proc_lock(node->proc); 748 } 749 750 /** 751 * binder_node_unlock() - Release node and inner locks 752 * @node: struct binder_node to acquire 753 * 754 * Release lock acquired via binder_node_lock() 755 */ 756 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__) 757 static void 758 _binder_node_inner_unlock(struct binder_node *node, int line) 759 { 760 struct binder_proc *proc = node->proc; 761 762 binder_debug(BINDER_DEBUG_SPINLOCKS, 763 "%s: line=%d\n", __func__, line); 764 if (proc) 765 binder_inner_proc_unlock(proc); 766 spin_unlock(&node->lock); 767 } 768 769 static bool binder_worklist_empty_ilocked(struct list_head *list) 770 { 771 return list_empty(list); 772 } 773 774 /** 775 * binder_worklist_empty() - Check if no items on the work list 776 * @proc: binder_proc associated with list 777 * @list: list to check 778 * 779 * Return: true if there are no items on list, else false 780 */ 781 static bool binder_worklist_empty(struct binder_proc *proc, 782 struct list_head *list) 783 { 784 bool ret; 785 786 binder_inner_proc_lock(proc); 787 ret = binder_worklist_empty_ilocked(list); 788 binder_inner_proc_unlock(proc); 789 return ret; 790 } 791 792 /** 793 * binder_enqueue_work_ilocked() - Add an item to the work list 794 * @work: struct binder_work to add to list 795 * @target_list: list to add work to 796 * 797 * Adds the work to the specified list. Asserts that work 798 * is not already on a list. 799 * 800 * Requires the proc->inner_lock to be held. 801 */ 802 static void 803 binder_enqueue_work_ilocked(struct binder_work *work, 804 struct list_head *target_list) 805 { 806 BUG_ON(target_list == NULL); 807 BUG_ON(work->entry.next && !list_empty(&work->entry)); 808 list_add_tail(&work->entry, target_list); 809 } 810 811 /** 812 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work 813 * @thread: thread to queue work to 814 * @work: struct binder_work to add to list 815 * 816 * Adds the work to the todo list of the thread. Doesn't set the process_todo 817 * flag, which means that (if it wasn't already set) the thread will go to 818 * sleep without handling this work when it calls read. 819 * 820 * Requires the proc->inner_lock to be held. 821 */ 822 static void 823 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread, 824 struct binder_work *work) 825 { 826 binder_enqueue_work_ilocked(work, &thread->todo); 827 } 828 829 /** 830 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list 831 * @thread: thread to queue work to 832 * @work: struct binder_work to add to list 833 * 834 * Adds the work to the todo list of the thread, and enables processing 835 * of the todo queue. 836 * 837 * Requires the proc->inner_lock to be held. 838 */ 839 static void 840 binder_enqueue_thread_work_ilocked(struct binder_thread *thread, 841 struct binder_work *work) 842 { 843 binder_enqueue_work_ilocked(work, &thread->todo); 844 thread->process_todo = true; 845 } 846 847 /** 848 * binder_enqueue_thread_work() - Add an item to the thread work list 849 * @thread: thread to queue work to 850 * @work: struct binder_work to add to list 851 * 852 * Adds the work to the todo list of the thread, and enables processing 853 * of the todo queue. 854 */ 855 static void 856 binder_enqueue_thread_work(struct binder_thread *thread, 857 struct binder_work *work) 858 { 859 binder_inner_proc_lock(thread->proc); 860 binder_enqueue_thread_work_ilocked(thread, work); 861 binder_inner_proc_unlock(thread->proc); 862 } 863 864 static void 865 binder_dequeue_work_ilocked(struct binder_work *work) 866 { 867 list_del_init(&work->entry); 868 } 869 870 /** 871 * binder_dequeue_work() - Removes an item from the work list 872 * @proc: binder_proc associated with list 873 * @work: struct binder_work to remove from list 874 * 875 * Removes the specified work item from whatever list it is on. 876 * Can safely be called if work is not on any list. 877 */ 878 static void 879 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work) 880 { 881 binder_inner_proc_lock(proc); 882 binder_dequeue_work_ilocked(work); 883 binder_inner_proc_unlock(proc); 884 } 885 886 static struct binder_work *binder_dequeue_work_head_ilocked( 887 struct list_head *list) 888 { 889 struct binder_work *w; 890 891 w = list_first_entry_or_null(list, struct binder_work, entry); 892 if (w) 893 list_del_init(&w->entry); 894 return w; 895 } 896 897 /** 898 * binder_dequeue_work_head() - Dequeues the item at head of list 899 * @proc: binder_proc associated with list 900 * @list: list to dequeue head 901 * 902 * Removes the head of the list if there are items on the list 903 * 904 * Return: pointer dequeued binder_work, NULL if list was empty 905 */ 906 static struct binder_work *binder_dequeue_work_head( 907 struct binder_proc *proc, 908 struct list_head *list) 909 { 910 struct binder_work *w; 911 912 binder_inner_proc_lock(proc); 913 w = binder_dequeue_work_head_ilocked(list); 914 binder_inner_proc_unlock(proc); 915 return w; 916 } 917 918 static void 919 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); 920 static void binder_free_thread(struct binder_thread *thread); 921 static void binder_free_proc(struct binder_proc *proc); 922 static void binder_inc_node_tmpref_ilocked(struct binder_node *node); 923 924 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) 925 { 926 unsigned long rlim_cur; 927 unsigned long irqs; 928 int ret; 929 930 mutex_lock(&proc->files_lock); 931 if (proc->files == NULL) { 932 ret = -ESRCH; 933 goto err; 934 } 935 if (!lock_task_sighand(proc->tsk, &irqs)) { 936 ret = -EMFILE; 937 goto err; 938 } 939 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE); 940 unlock_task_sighand(proc->tsk, &irqs); 941 942 ret = __alloc_fd(proc->files, 0, rlim_cur, flags); 943 err: 944 mutex_unlock(&proc->files_lock); 945 return ret; 946 } 947 948 /* 949 * copied from fd_install 950 */ 951 static void task_fd_install( 952 struct binder_proc *proc, unsigned int fd, struct file *file) 953 { 954 mutex_lock(&proc->files_lock); 955 if (proc->files) 956 __fd_install(proc->files, fd, file); 957 mutex_unlock(&proc->files_lock); 958 } 959 960 /* 961 * copied from sys_close 962 */ 963 static long task_close_fd(struct binder_proc *proc, unsigned int fd) 964 { 965 int retval; 966 967 mutex_lock(&proc->files_lock); 968 if (proc->files == NULL) { 969 retval = -ESRCH; 970 goto err; 971 } 972 retval = __close_fd(proc->files, fd); 973 /* can't restart close syscall because file table entry was cleared */ 974 if (unlikely(retval == -ERESTARTSYS || 975 retval == -ERESTARTNOINTR || 976 retval == -ERESTARTNOHAND || 977 retval == -ERESTART_RESTARTBLOCK)) 978 retval = -EINTR; 979 err: 980 mutex_unlock(&proc->files_lock); 981 return retval; 982 } 983 984 static bool binder_has_work_ilocked(struct binder_thread *thread, 985 bool do_proc_work) 986 { 987 return thread->process_todo || 988 thread->looper_need_return || 989 (do_proc_work && 990 !binder_worklist_empty_ilocked(&thread->proc->todo)); 991 } 992 993 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work) 994 { 995 bool has_work; 996 997 binder_inner_proc_lock(thread->proc); 998 has_work = binder_has_work_ilocked(thread, do_proc_work); 999 binder_inner_proc_unlock(thread->proc); 1000 1001 return has_work; 1002 } 1003 1004 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread) 1005 { 1006 return !thread->transaction_stack && 1007 binder_worklist_empty_ilocked(&thread->todo) && 1008 (thread->looper & (BINDER_LOOPER_STATE_ENTERED | 1009 BINDER_LOOPER_STATE_REGISTERED)); 1010 } 1011 1012 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc, 1013 bool sync) 1014 { 1015 struct rb_node *n; 1016 struct binder_thread *thread; 1017 1018 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { 1019 thread = rb_entry(n, struct binder_thread, rb_node); 1020 if (thread->looper & BINDER_LOOPER_STATE_POLL && 1021 binder_available_for_proc_work_ilocked(thread)) { 1022 if (sync) 1023 wake_up_interruptible_sync(&thread->wait); 1024 else 1025 wake_up_interruptible(&thread->wait); 1026 } 1027 } 1028 } 1029 1030 /** 1031 * binder_select_thread_ilocked() - selects a thread for doing proc work. 1032 * @proc: process to select a thread from 1033 * 1034 * Note that calling this function moves the thread off the waiting_threads 1035 * list, so it can only be woken up by the caller of this function, or a 1036 * signal. Therefore, callers *should* always wake up the thread this function 1037 * returns. 1038 * 1039 * Return: If there's a thread currently waiting for process work, 1040 * returns that thread. Otherwise returns NULL. 1041 */ 1042 static struct binder_thread * 1043 binder_select_thread_ilocked(struct binder_proc *proc) 1044 { 1045 struct binder_thread *thread; 1046 1047 assert_spin_locked(&proc->inner_lock); 1048 thread = list_first_entry_or_null(&proc->waiting_threads, 1049 struct binder_thread, 1050 waiting_thread_node); 1051 1052 if (thread) 1053 list_del_init(&thread->waiting_thread_node); 1054 1055 return thread; 1056 } 1057 1058 /** 1059 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work. 1060 * @proc: process to wake up a thread in 1061 * @thread: specific thread to wake-up (may be NULL) 1062 * @sync: whether to do a synchronous wake-up 1063 * 1064 * This function wakes up a thread in the @proc process. 1065 * The caller may provide a specific thread to wake-up in 1066 * the @thread parameter. If @thread is NULL, this function 1067 * will wake up threads that have called poll(). 1068 * 1069 * Note that for this function to work as expected, callers 1070 * should first call binder_select_thread() to find a thread 1071 * to handle the work (if they don't have a thread already), 1072 * and pass the result into the @thread parameter. 1073 */ 1074 static void binder_wakeup_thread_ilocked(struct binder_proc *proc, 1075 struct binder_thread *thread, 1076 bool sync) 1077 { 1078 assert_spin_locked(&proc->inner_lock); 1079 1080 if (thread) { 1081 if (sync) 1082 wake_up_interruptible_sync(&thread->wait); 1083 else 1084 wake_up_interruptible(&thread->wait); 1085 return; 1086 } 1087 1088 /* Didn't find a thread waiting for proc work; this can happen 1089 * in two scenarios: 1090 * 1. All threads are busy handling transactions 1091 * In that case, one of those threads should call back into 1092 * the kernel driver soon and pick up this work. 1093 * 2. Threads are using the (e)poll interface, in which case 1094 * they may be blocked on the waitqueue without having been 1095 * added to waiting_threads. For this case, we just iterate 1096 * over all threads not handling transaction work, and 1097 * wake them all up. We wake all because we don't know whether 1098 * a thread that called into (e)poll is handling non-binder 1099 * work currently. 1100 */ 1101 binder_wakeup_poll_threads_ilocked(proc, sync); 1102 } 1103 1104 static void binder_wakeup_proc_ilocked(struct binder_proc *proc) 1105 { 1106 struct binder_thread *thread = binder_select_thread_ilocked(proc); 1107 1108 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false); 1109 } 1110 1111 static void binder_set_nice(long nice) 1112 { 1113 long min_nice; 1114 1115 if (can_nice(current, nice)) { 1116 set_user_nice(current, nice); 1117 return; 1118 } 1119 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE)); 1120 binder_debug(BINDER_DEBUG_PRIORITY_CAP, 1121 "%d: nice value %ld not allowed use %ld instead\n", 1122 current->pid, nice, min_nice); 1123 set_user_nice(current, min_nice); 1124 if (min_nice <= MAX_NICE) 1125 return; 1126 binder_user_error("%d RLIMIT_NICE not set\n", current->pid); 1127 } 1128 1129 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc, 1130 binder_uintptr_t ptr) 1131 { 1132 struct rb_node *n = proc->nodes.rb_node; 1133 struct binder_node *node; 1134 1135 assert_spin_locked(&proc->inner_lock); 1136 1137 while (n) { 1138 node = rb_entry(n, struct binder_node, rb_node); 1139 1140 if (ptr < node->ptr) 1141 n = n->rb_left; 1142 else if (ptr > node->ptr) 1143 n = n->rb_right; 1144 else { 1145 /* 1146 * take an implicit weak reference 1147 * to ensure node stays alive until 1148 * call to binder_put_node() 1149 */ 1150 binder_inc_node_tmpref_ilocked(node); 1151 return node; 1152 } 1153 } 1154 return NULL; 1155 } 1156 1157 static struct binder_node *binder_get_node(struct binder_proc *proc, 1158 binder_uintptr_t ptr) 1159 { 1160 struct binder_node *node; 1161 1162 binder_inner_proc_lock(proc); 1163 node = binder_get_node_ilocked(proc, ptr); 1164 binder_inner_proc_unlock(proc); 1165 return node; 1166 } 1167 1168 static struct binder_node *binder_init_node_ilocked( 1169 struct binder_proc *proc, 1170 struct binder_node *new_node, 1171 struct flat_binder_object *fp) 1172 { 1173 struct rb_node **p = &proc->nodes.rb_node; 1174 struct rb_node *parent = NULL; 1175 struct binder_node *node; 1176 binder_uintptr_t ptr = fp ? fp->binder : 0; 1177 binder_uintptr_t cookie = fp ? fp->cookie : 0; 1178 __u32 flags = fp ? fp->flags : 0; 1179 1180 assert_spin_locked(&proc->inner_lock); 1181 1182 while (*p) { 1183 1184 parent = *p; 1185 node = rb_entry(parent, struct binder_node, rb_node); 1186 1187 if (ptr < node->ptr) 1188 p = &(*p)->rb_left; 1189 else if (ptr > node->ptr) 1190 p = &(*p)->rb_right; 1191 else { 1192 /* 1193 * A matching node is already in 1194 * the rb tree. Abandon the init 1195 * and return it. 1196 */ 1197 binder_inc_node_tmpref_ilocked(node); 1198 return node; 1199 } 1200 } 1201 node = new_node; 1202 binder_stats_created(BINDER_STAT_NODE); 1203 node->tmp_refs++; 1204 rb_link_node(&node->rb_node, parent, p); 1205 rb_insert_color(&node->rb_node, &proc->nodes); 1206 node->debug_id = atomic_inc_return(&binder_last_id); 1207 node->proc = proc; 1208 node->ptr = ptr; 1209 node->cookie = cookie; 1210 node->work.type = BINDER_WORK_NODE; 1211 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK; 1212 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); 1213 spin_lock_init(&node->lock); 1214 INIT_LIST_HEAD(&node->work.entry); 1215 INIT_LIST_HEAD(&node->async_todo); 1216 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1217 "%d:%d node %d u%016llx c%016llx created\n", 1218 proc->pid, current->pid, node->debug_id, 1219 (u64)node->ptr, (u64)node->cookie); 1220 1221 return node; 1222 } 1223 1224 static struct binder_node *binder_new_node(struct binder_proc *proc, 1225 struct flat_binder_object *fp) 1226 { 1227 struct binder_node *node; 1228 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL); 1229 1230 if (!new_node) 1231 return NULL; 1232 binder_inner_proc_lock(proc); 1233 node = binder_init_node_ilocked(proc, new_node, fp); 1234 binder_inner_proc_unlock(proc); 1235 if (node != new_node) 1236 /* 1237 * The node was already added by another thread 1238 */ 1239 kfree(new_node); 1240 1241 return node; 1242 } 1243 1244 static void binder_free_node(struct binder_node *node) 1245 { 1246 kfree(node); 1247 binder_stats_deleted(BINDER_STAT_NODE); 1248 } 1249 1250 static int binder_inc_node_nilocked(struct binder_node *node, int strong, 1251 int internal, 1252 struct list_head *target_list) 1253 { 1254 struct binder_proc *proc = node->proc; 1255 1256 assert_spin_locked(&node->lock); 1257 if (proc) 1258 assert_spin_locked(&proc->inner_lock); 1259 if (strong) { 1260 if (internal) { 1261 if (target_list == NULL && 1262 node->internal_strong_refs == 0 && 1263 !(node->proc && 1264 node == node->proc->context->binder_context_mgr_node && 1265 node->has_strong_ref)) { 1266 pr_err("invalid inc strong node for %d\n", 1267 node->debug_id); 1268 return -EINVAL; 1269 } 1270 node->internal_strong_refs++; 1271 } else 1272 node->local_strong_refs++; 1273 if (!node->has_strong_ref && target_list) { 1274 binder_dequeue_work_ilocked(&node->work); 1275 /* 1276 * Note: this function is the only place where we queue 1277 * directly to a thread->todo without using the 1278 * corresponding binder_enqueue_thread_work() helper 1279 * functions; in this case it's ok to not set the 1280 * process_todo flag, since we know this node work will 1281 * always be followed by other work that starts queue 1282 * processing: in case of synchronous transactions, a 1283 * BR_REPLY or BR_ERROR; in case of oneway 1284 * transactions, a BR_TRANSACTION_COMPLETE. 1285 */ 1286 binder_enqueue_work_ilocked(&node->work, target_list); 1287 } 1288 } else { 1289 if (!internal) 1290 node->local_weak_refs++; 1291 if (!node->has_weak_ref && list_empty(&node->work.entry)) { 1292 if (target_list == NULL) { 1293 pr_err("invalid inc weak node for %d\n", 1294 node->debug_id); 1295 return -EINVAL; 1296 } 1297 /* 1298 * See comment above 1299 */ 1300 binder_enqueue_work_ilocked(&node->work, target_list); 1301 } 1302 } 1303 return 0; 1304 } 1305 1306 static int binder_inc_node(struct binder_node *node, int strong, int internal, 1307 struct list_head *target_list) 1308 { 1309 int ret; 1310 1311 binder_node_inner_lock(node); 1312 ret = binder_inc_node_nilocked(node, strong, internal, target_list); 1313 binder_node_inner_unlock(node); 1314 1315 return ret; 1316 } 1317 1318 static bool binder_dec_node_nilocked(struct binder_node *node, 1319 int strong, int internal) 1320 { 1321 struct binder_proc *proc = node->proc; 1322 1323 assert_spin_locked(&node->lock); 1324 if (proc) 1325 assert_spin_locked(&proc->inner_lock); 1326 if (strong) { 1327 if (internal) 1328 node->internal_strong_refs--; 1329 else 1330 node->local_strong_refs--; 1331 if (node->local_strong_refs || node->internal_strong_refs) 1332 return false; 1333 } else { 1334 if (!internal) 1335 node->local_weak_refs--; 1336 if (node->local_weak_refs || node->tmp_refs || 1337 !hlist_empty(&node->refs)) 1338 return false; 1339 } 1340 1341 if (proc && (node->has_strong_ref || node->has_weak_ref)) { 1342 if (list_empty(&node->work.entry)) { 1343 binder_enqueue_work_ilocked(&node->work, &proc->todo); 1344 binder_wakeup_proc_ilocked(proc); 1345 } 1346 } else { 1347 if (hlist_empty(&node->refs) && !node->local_strong_refs && 1348 !node->local_weak_refs && !node->tmp_refs) { 1349 if (proc) { 1350 binder_dequeue_work_ilocked(&node->work); 1351 rb_erase(&node->rb_node, &proc->nodes); 1352 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1353 "refless node %d deleted\n", 1354 node->debug_id); 1355 } else { 1356 BUG_ON(!list_empty(&node->work.entry)); 1357 spin_lock(&binder_dead_nodes_lock); 1358 /* 1359 * tmp_refs could have changed so 1360 * check it again 1361 */ 1362 if (node->tmp_refs) { 1363 spin_unlock(&binder_dead_nodes_lock); 1364 return false; 1365 } 1366 hlist_del(&node->dead_node); 1367 spin_unlock(&binder_dead_nodes_lock); 1368 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1369 "dead node %d deleted\n", 1370 node->debug_id); 1371 } 1372 return true; 1373 } 1374 } 1375 return false; 1376 } 1377 1378 static void binder_dec_node(struct binder_node *node, int strong, int internal) 1379 { 1380 bool free_node; 1381 1382 binder_node_inner_lock(node); 1383 free_node = binder_dec_node_nilocked(node, strong, internal); 1384 binder_node_inner_unlock(node); 1385 if (free_node) 1386 binder_free_node(node); 1387 } 1388 1389 static void binder_inc_node_tmpref_ilocked(struct binder_node *node) 1390 { 1391 /* 1392 * No call to binder_inc_node() is needed since we 1393 * don't need to inform userspace of any changes to 1394 * tmp_refs 1395 */ 1396 node->tmp_refs++; 1397 } 1398 1399 /** 1400 * binder_inc_node_tmpref() - take a temporary reference on node 1401 * @node: node to reference 1402 * 1403 * Take reference on node to prevent the node from being freed 1404 * while referenced only by a local variable. The inner lock is 1405 * needed to serialize with the node work on the queue (which 1406 * isn't needed after the node is dead). If the node is dead 1407 * (node->proc is NULL), use binder_dead_nodes_lock to protect 1408 * node->tmp_refs against dead-node-only cases where the node 1409 * lock cannot be acquired (eg traversing the dead node list to 1410 * print nodes) 1411 */ 1412 static void binder_inc_node_tmpref(struct binder_node *node) 1413 { 1414 binder_node_lock(node); 1415 if (node->proc) 1416 binder_inner_proc_lock(node->proc); 1417 else 1418 spin_lock(&binder_dead_nodes_lock); 1419 binder_inc_node_tmpref_ilocked(node); 1420 if (node->proc) 1421 binder_inner_proc_unlock(node->proc); 1422 else 1423 spin_unlock(&binder_dead_nodes_lock); 1424 binder_node_unlock(node); 1425 } 1426 1427 /** 1428 * binder_dec_node_tmpref() - remove a temporary reference on node 1429 * @node: node to reference 1430 * 1431 * Release temporary reference on node taken via binder_inc_node_tmpref() 1432 */ 1433 static void binder_dec_node_tmpref(struct binder_node *node) 1434 { 1435 bool free_node; 1436 1437 binder_node_inner_lock(node); 1438 if (!node->proc) 1439 spin_lock(&binder_dead_nodes_lock); 1440 node->tmp_refs--; 1441 BUG_ON(node->tmp_refs < 0); 1442 if (!node->proc) 1443 spin_unlock(&binder_dead_nodes_lock); 1444 /* 1445 * Call binder_dec_node() to check if all refcounts are 0 1446 * and cleanup is needed. Calling with strong=0 and internal=1 1447 * causes no actual reference to be released in binder_dec_node(). 1448 * If that changes, a change is needed here too. 1449 */ 1450 free_node = binder_dec_node_nilocked(node, 0, 1); 1451 binder_node_inner_unlock(node); 1452 if (free_node) 1453 binder_free_node(node); 1454 } 1455 1456 static void binder_put_node(struct binder_node *node) 1457 { 1458 binder_dec_node_tmpref(node); 1459 } 1460 1461 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc, 1462 u32 desc, bool need_strong_ref) 1463 { 1464 struct rb_node *n = proc->refs_by_desc.rb_node; 1465 struct binder_ref *ref; 1466 1467 while (n) { 1468 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1469 1470 if (desc < ref->data.desc) { 1471 n = n->rb_left; 1472 } else if (desc > ref->data.desc) { 1473 n = n->rb_right; 1474 } else if (need_strong_ref && !ref->data.strong) { 1475 binder_user_error("tried to use weak ref as strong ref\n"); 1476 return NULL; 1477 } else { 1478 return ref; 1479 } 1480 } 1481 return NULL; 1482 } 1483 1484 /** 1485 * binder_get_ref_for_node_olocked() - get the ref associated with given node 1486 * @proc: binder_proc that owns the ref 1487 * @node: binder_node of target 1488 * @new_ref: newly allocated binder_ref to be initialized or %NULL 1489 * 1490 * Look up the ref for the given node and return it if it exists 1491 * 1492 * If it doesn't exist and the caller provides a newly allocated 1493 * ref, initialize the fields of the newly allocated ref and insert 1494 * into the given proc rb_trees and node refs list. 1495 * 1496 * Return: the ref for node. It is possible that another thread 1497 * allocated/initialized the ref first in which case the 1498 * returned ref would be different than the passed-in 1499 * new_ref. new_ref must be kfree'd by the caller in 1500 * this case. 1501 */ 1502 static struct binder_ref *binder_get_ref_for_node_olocked( 1503 struct binder_proc *proc, 1504 struct binder_node *node, 1505 struct binder_ref *new_ref) 1506 { 1507 struct binder_context *context = proc->context; 1508 struct rb_node **p = &proc->refs_by_node.rb_node; 1509 struct rb_node *parent = NULL; 1510 struct binder_ref *ref; 1511 struct rb_node *n; 1512 1513 while (*p) { 1514 parent = *p; 1515 ref = rb_entry(parent, struct binder_ref, rb_node_node); 1516 1517 if (node < ref->node) 1518 p = &(*p)->rb_left; 1519 else if (node > ref->node) 1520 p = &(*p)->rb_right; 1521 else 1522 return ref; 1523 } 1524 if (!new_ref) 1525 return NULL; 1526 1527 binder_stats_created(BINDER_STAT_REF); 1528 new_ref->data.debug_id = atomic_inc_return(&binder_last_id); 1529 new_ref->proc = proc; 1530 new_ref->node = node; 1531 rb_link_node(&new_ref->rb_node_node, parent, p); 1532 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); 1533 1534 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1; 1535 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 1536 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1537 if (ref->data.desc > new_ref->data.desc) 1538 break; 1539 new_ref->data.desc = ref->data.desc + 1; 1540 } 1541 1542 p = &proc->refs_by_desc.rb_node; 1543 while (*p) { 1544 parent = *p; 1545 ref = rb_entry(parent, struct binder_ref, rb_node_desc); 1546 1547 if (new_ref->data.desc < ref->data.desc) 1548 p = &(*p)->rb_left; 1549 else if (new_ref->data.desc > ref->data.desc) 1550 p = &(*p)->rb_right; 1551 else 1552 BUG(); 1553 } 1554 rb_link_node(&new_ref->rb_node_desc, parent, p); 1555 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc); 1556 1557 binder_node_lock(node); 1558 hlist_add_head(&new_ref->node_entry, &node->refs); 1559 1560 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1561 "%d new ref %d desc %d for node %d\n", 1562 proc->pid, new_ref->data.debug_id, new_ref->data.desc, 1563 node->debug_id); 1564 binder_node_unlock(node); 1565 return new_ref; 1566 } 1567 1568 static void binder_cleanup_ref_olocked(struct binder_ref *ref) 1569 { 1570 bool delete_node = false; 1571 1572 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1573 "%d delete ref %d desc %d for node %d\n", 1574 ref->proc->pid, ref->data.debug_id, ref->data.desc, 1575 ref->node->debug_id); 1576 1577 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); 1578 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); 1579 1580 binder_node_inner_lock(ref->node); 1581 if (ref->data.strong) 1582 binder_dec_node_nilocked(ref->node, 1, 1); 1583 1584 hlist_del(&ref->node_entry); 1585 delete_node = binder_dec_node_nilocked(ref->node, 0, 1); 1586 binder_node_inner_unlock(ref->node); 1587 /* 1588 * Clear ref->node unless we want the caller to free the node 1589 */ 1590 if (!delete_node) { 1591 /* 1592 * The caller uses ref->node to determine 1593 * whether the node needs to be freed. Clear 1594 * it since the node is still alive. 1595 */ 1596 ref->node = NULL; 1597 } 1598 1599 if (ref->death) { 1600 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1601 "%d delete ref %d desc %d has death notification\n", 1602 ref->proc->pid, ref->data.debug_id, 1603 ref->data.desc); 1604 binder_dequeue_work(ref->proc, &ref->death->work); 1605 binder_stats_deleted(BINDER_STAT_DEATH); 1606 } 1607 binder_stats_deleted(BINDER_STAT_REF); 1608 } 1609 1610 /** 1611 * binder_inc_ref_olocked() - increment the ref for given handle 1612 * @ref: ref to be incremented 1613 * @strong: if true, strong increment, else weak 1614 * @target_list: list to queue node work on 1615 * 1616 * Increment the ref. @ref->proc->outer_lock must be held on entry 1617 * 1618 * Return: 0, if successful, else errno 1619 */ 1620 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong, 1621 struct list_head *target_list) 1622 { 1623 int ret; 1624 1625 if (strong) { 1626 if (ref->data.strong == 0) { 1627 ret = binder_inc_node(ref->node, 1, 1, target_list); 1628 if (ret) 1629 return ret; 1630 } 1631 ref->data.strong++; 1632 } else { 1633 if (ref->data.weak == 0) { 1634 ret = binder_inc_node(ref->node, 0, 1, target_list); 1635 if (ret) 1636 return ret; 1637 } 1638 ref->data.weak++; 1639 } 1640 return 0; 1641 } 1642 1643 /** 1644 * binder_dec_ref() - dec the ref for given handle 1645 * @ref: ref to be decremented 1646 * @strong: if true, strong decrement, else weak 1647 * 1648 * Decrement the ref. 1649 * 1650 * Return: true if ref is cleaned up and ready to be freed 1651 */ 1652 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong) 1653 { 1654 if (strong) { 1655 if (ref->data.strong == 0) { 1656 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n", 1657 ref->proc->pid, ref->data.debug_id, 1658 ref->data.desc, ref->data.strong, 1659 ref->data.weak); 1660 return false; 1661 } 1662 ref->data.strong--; 1663 if (ref->data.strong == 0) 1664 binder_dec_node(ref->node, strong, 1); 1665 } else { 1666 if (ref->data.weak == 0) { 1667 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n", 1668 ref->proc->pid, ref->data.debug_id, 1669 ref->data.desc, ref->data.strong, 1670 ref->data.weak); 1671 return false; 1672 } 1673 ref->data.weak--; 1674 } 1675 if (ref->data.strong == 0 && ref->data.weak == 0) { 1676 binder_cleanup_ref_olocked(ref); 1677 return true; 1678 } 1679 return false; 1680 } 1681 1682 /** 1683 * binder_get_node_from_ref() - get the node from the given proc/desc 1684 * @proc: proc containing the ref 1685 * @desc: the handle associated with the ref 1686 * @need_strong_ref: if true, only return node if ref is strong 1687 * @rdata: the id/refcount data for the ref 1688 * 1689 * Given a proc and ref handle, return the associated binder_node 1690 * 1691 * Return: a binder_node or NULL if not found or not strong when strong required 1692 */ 1693 static struct binder_node *binder_get_node_from_ref( 1694 struct binder_proc *proc, 1695 u32 desc, bool need_strong_ref, 1696 struct binder_ref_data *rdata) 1697 { 1698 struct binder_node *node; 1699 struct binder_ref *ref; 1700 1701 binder_proc_lock(proc); 1702 ref = binder_get_ref_olocked(proc, desc, need_strong_ref); 1703 if (!ref) 1704 goto err_no_ref; 1705 node = ref->node; 1706 /* 1707 * Take an implicit reference on the node to ensure 1708 * it stays alive until the call to binder_put_node() 1709 */ 1710 binder_inc_node_tmpref(node); 1711 if (rdata) 1712 *rdata = ref->data; 1713 binder_proc_unlock(proc); 1714 1715 return node; 1716 1717 err_no_ref: 1718 binder_proc_unlock(proc); 1719 return NULL; 1720 } 1721 1722 /** 1723 * binder_free_ref() - free the binder_ref 1724 * @ref: ref to free 1725 * 1726 * Free the binder_ref. Free the binder_node indicated by ref->node 1727 * (if non-NULL) and the binder_ref_death indicated by ref->death. 1728 */ 1729 static void binder_free_ref(struct binder_ref *ref) 1730 { 1731 if (ref->node) 1732 binder_free_node(ref->node); 1733 kfree(ref->death); 1734 kfree(ref); 1735 } 1736 1737 /** 1738 * binder_update_ref_for_handle() - inc/dec the ref for given handle 1739 * @proc: proc containing the ref 1740 * @desc: the handle associated with the ref 1741 * @increment: true=inc reference, false=dec reference 1742 * @strong: true=strong reference, false=weak reference 1743 * @rdata: the id/refcount data for the ref 1744 * 1745 * Given a proc and ref handle, increment or decrement the ref 1746 * according to "increment" arg. 1747 * 1748 * Return: 0 if successful, else errno 1749 */ 1750 static int binder_update_ref_for_handle(struct binder_proc *proc, 1751 uint32_t desc, bool increment, bool strong, 1752 struct binder_ref_data *rdata) 1753 { 1754 int ret = 0; 1755 struct binder_ref *ref; 1756 bool delete_ref = false; 1757 1758 binder_proc_lock(proc); 1759 ref = binder_get_ref_olocked(proc, desc, strong); 1760 if (!ref) { 1761 ret = -EINVAL; 1762 goto err_no_ref; 1763 } 1764 if (increment) 1765 ret = binder_inc_ref_olocked(ref, strong, NULL); 1766 else 1767 delete_ref = binder_dec_ref_olocked(ref, strong); 1768 1769 if (rdata) 1770 *rdata = ref->data; 1771 binder_proc_unlock(proc); 1772 1773 if (delete_ref) 1774 binder_free_ref(ref); 1775 return ret; 1776 1777 err_no_ref: 1778 binder_proc_unlock(proc); 1779 return ret; 1780 } 1781 1782 /** 1783 * binder_dec_ref_for_handle() - dec the ref for given handle 1784 * @proc: proc containing the ref 1785 * @desc: the handle associated with the ref 1786 * @strong: true=strong reference, false=weak reference 1787 * @rdata: the id/refcount data for the ref 1788 * 1789 * Just calls binder_update_ref_for_handle() to decrement the ref. 1790 * 1791 * Return: 0 if successful, else errno 1792 */ 1793 static int binder_dec_ref_for_handle(struct binder_proc *proc, 1794 uint32_t desc, bool strong, struct binder_ref_data *rdata) 1795 { 1796 return binder_update_ref_for_handle(proc, desc, false, strong, rdata); 1797 } 1798 1799 1800 /** 1801 * binder_inc_ref_for_node() - increment the ref for given proc/node 1802 * @proc: proc containing the ref 1803 * @node: target node 1804 * @strong: true=strong reference, false=weak reference 1805 * @target_list: worklist to use if node is incremented 1806 * @rdata: the id/refcount data for the ref 1807 * 1808 * Given a proc and node, increment the ref. Create the ref if it 1809 * doesn't already exist 1810 * 1811 * Return: 0 if successful, else errno 1812 */ 1813 static int binder_inc_ref_for_node(struct binder_proc *proc, 1814 struct binder_node *node, 1815 bool strong, 1816 struct list_head *target_list, 1817 struct binder_ref_data *rdata) 1818 { 1819 struct binder_ref *ref; 1820 struct binder_ref *new_ref = NULL; 1821 int ret = 0; 1822 1823 binder_proc_lock(proc); 1824 ref = binder_get_ref_for_node_olocked(proc, node, NULL); 1825 if (!ref) { 1826 binder_proc_unlock(proc); 1827 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); 1828 if (!new_ref) 1829 return -ENOMEM; 1830 binder_proc_lock(proc); 1831 ref = binder_get_ref_for_node_olocked(proc, node, new_ref); 1832 } 1833 ret = binder_inc_ref_olocked(ref, strong, target_list); 1834 *rdata = ref->data; 1835 binder_proc_unlock(proc); 1836 if (new_ref && ref != new_ref) 1837 /* 1838 * Another thread created the ref first so 1839 * free the one we allocated 1840 */ 1841 kfree(new_ref); 1842 return ret; 1843 } 1844 1845 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread, 1846 struct binder_transaction *t) 1847 { 1848 BUG_ON(!target_thread); 1849 assert_spin_locked(&target_thread->proc->inner_lock); 1850 BUG_ON(target_thread->transaction_stack != t); 1851 BUG_ON(target_thread->transaction_stack->from != target_thread); 1852 target_thread->transaction_stack = 1853 target_thread->transaction_stack->from_parent; 1854 t->from = NULL; 1855 } 1856 1857 /** 1858 * binder_thread_dec_tmpref() - decrement thread->tmp_ref 1859 * @thread: thread to decrement 1860 * 1861 * A thread needs to be kept alive while being used to create or 1862 * handle a transaction. binder_get_txn_from() is used to safely 1863 * extract t->from from a binder_transaction and keep the thread 1864 * indicated by t->from from being freed. When done with that 1865 * binder_thread, this function is called to decrement the 1866 * tmp_ref and free if appropriate (thread has been released 1867 * and no transaction being processed by the driver) 1868 */ 1869 static void binder_thread_dec_tmpref(struct binder_thread *thread) 1870 { 1871 /* 1872 * atomic is used to protect the counter value while 1873 * it cannot reach zero or thread->is_dead is false 1874 */ 1875 binder_inner_proc_lock(thread->proc); 1876 atomic_dec(&thread->tmp_ref); 1877 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) { 1878 binder_inner_proc_unlock(thread->proc); 1879 binder_free_thread(thread); 1880 return; 1881 } 1882 binder_inner_proc_unlock(thread->proc); 1883 } 1884 1885 /** 1886 * binder_proc_dec_tmpref() - decrement proc->tmp_ref 1887 * @proc: proc to decrement 1888 * 1889 * A binder_proc needs to be kept alive while being used to create or 1890 * handle a transaction. proc->tmp_ref is incremented when 1891 * creating a new transaction or the binder_proc is currently in-use 1892 * by threads that are being released. When done with the binder_proc, 1893 * this function is called to decrement the counter and free the 1894 * proc if appropriate (proc has been released, all threads have 1895 * been released and not currenly in-use to process a transaction). 1896 */ 1897 static void binder_proc_dec_tmpref(struct binder_proc *proc) 1898 { 1899 binder_inner_proc_lock(proc); 1900 proc->tmp_ref--; 1901 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) && 1902 !proc->tmp_ref) { 1903 binder_inner_proc_unlock(proc); 1904 binder_free_proc(proc); 1905 return; 1906 } 1907 binder_inner_proc_unlock(proc); 1908 } 1909 1910 /** 1911 * binder_get_txn_from() - safely extract the "from" thread in transaction 1912 * @t: binder transaction for t->from 1913 * 1914 * Atomically return the "from" thread and increment the tmp_ref 1915 * count for the thread to ensure it stays alive until 1916 * binder_thread_dec_tmpref() is called. 1917 * 1918 * Return: the value of t->from 1919 */ 1920 static struct binder_thread *binder_get_txn_from( 1921 struct binder_transaction *t) 1922 { 1923 struct binder_thread *from; 1924 1925 spin_lock(&t->lock); 1926 from = t->from; 1927 if (from) 1928 atomic_inc(&from->tmp_ref); 1929 spin_unlock(&t->lock); 1930 return from; 1931 } 1932 1933 /** 1934 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock 1935 * @t: binder transaction for t->from 1936 * 1937 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock 1938 * to guarantee that the thread cannot be released while operating on it. 1939 * The caller must call binder_inner_proc_unlock() to release the inner lock 1940 * as well as call binder_dec_thread_txn() to release the reference. 1941 * 1942 * Return: the value of t->from 1943 */ 1944 static struct binder_thread *binder_get_txn_from_and_acq_inner( 1945 struct binder_transaction *t) 1946 { 1947 struct binder_thread *from; 1948 1949 from = binder_get_txn_from(t); 1950 if (!from) 1951 return NULL; 1952 binder_inner_proc_lock(from->proc); 1953 if (t->from) { 1954 BUG_ON(from != t->from); 1955 return from; 1956 } 1957 binder_inner_proc_unlock(from->proc); 1958 binder_thread_dec_tmpref(from); 1959 return NULL; 1960 } 1961 1962 static void binder_free_transaction(struct binder_transaction *t) 1963 { 1964 if (t->buffer) 1965 t->buffer->transaction = NULL; 1966 kfree(t); 1967 binder_stats_deleted(BINDER_STAT_TRANSACTION); 1968 } 1969 1970 static void binder_send_failed_reply(struct binder_transaction *t, 1971 uint32_t error_code) 1972 { 1973 struct binder_thread *target_thread; 1974 struct binder_transaction *next; 1975 1976 BUG_ON(t->flags & TF_ONE_WAY); 1977 while (1) { 1978 target_thread = binder_get_txn_from_and_acq_inner(t); 1979 if (target_thread) { 1980 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1981 "send failed reply for transaction %d to %d:%d\n", 1982 t->debug_id, 1983 target_thread->proc->pid, 1984 target_thread->pid); 1985 1986 binder_pop_transaction_ilocked(target_thread, t); 1987 if (target_thread->reply_error.cmd == BR_OK) { 1988 target_thread->reply_error.cmd = error_code; 1989 binder_enqueue_thread_work_ilocked( 1990 target_thread, 1991 &target_thread->reply_error.work); 1992 wake_up_interruptible(&target_thread->wait); 1993 } else { 1994 /* 1995 * Cannot get here for normal operation, but 1996 * we can if multiple synchronous transactions 1997 * are sent without blocking for responses. 1998 * Just ignore the 2nd error in this case. 1999 */ 2000 pr_warn("Unexpected reply error: %u\n", 2001 target_thread->reply_error.cmd); 2002 } 2003 binder_inner_proc_unlock(target_thread->proc); 2004 binder_thread_dec_tmpref(target_thread); 2005 binder_free_transaction(t); 2006 return; 2007 } 2008 next = t->from_parent; 2009 2010 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 2011 "send failed reply for transaction %d, target dead\n", 2012 t->debug_id); 2013 2014 binder_free_transaction(t); 2015 if (next == NULL) { 2016 binder_debug(BINDER_DEBUG_DEAD_BINDER, 2017 "reply failed, no target thread at root\n"); 2018 return; 2019 } 2020 t = next; 2021 binder_debug(BINDER_DEBUG_DEAD_BINDER, 2022 "reply failed, no target thread -- retry %d\n", 2023 t->debug_id); 2024 } 2025 } 2026 2027 /** 2028 * binder_cleanup_transaction() - cleans up undelivered transaction 2029 * @t: transaction that needs to be cleaned up 2030 * @reason: reason the transaction wasn't delivered 2031 * @error_code: error to return to caller (if synchronous call) 2032 */ 2033 static void binder_cleanup_transaction(struct binder_transaction *t, 2034 const char *reason, 2035 uint32_t error_code) 2036 { 2037 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) { 2038 binder_send_failed_reply(t, error_code); 2039 } else { 2040 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 2041 "undelivered transaction %d, %s\n", 2042 t->debug_id, reason); 2043 binder_free_transaction(t); 2044 } 2045 } 2046 2047 /** 2048 * binder_validate_object() - checks for a valid metadata object in a buffer. 2049 * @buffer: binder_buffer that we're parsing. 2050 * @offset: offset in the buffer at which to validate an object. 2051 * 2052 * Return: If there's a valid metadata object at @offset in @buffer, the 2053 * size of that object. Otherwise, it returns zero. 2054 */ 2055 static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset) 2056 { 2057 /* Check if we can read a header first */ 2058 struct binder_object_header *hdr; 2059 size_t object_size = 0; 2060 2061 if (offset > buffer->data_size - sizeof(*hdr) || 2062 buffer->data_size < sizeof(*hdr) || 2063 !IS_ALIGNED(offset, sizeof(u32))) 2064 return 0; 2065 2066 /* Ok, now see if we can read a complete object. */ 2067 hdr = (struct binder_object_header *)(buffer->data + offset); 2068 switch (hdr->type) { 2069 case BINDER_TYPE_BINDER: 2070 case BINDER_TYPE_WEAK_BINDER: 2071 case BINDER_TYPE_HANDLE: 2072 case BINDER_TYPE_WEAK_HANDLE: 2073 object_size = sizeof(struct flat_binder_object); 2074 break; 2075 case BINDER_TYPE_FD: 2076 object_size = sizeof(struct binder_fd_object); 2077 break; 2078 case BINDER_TYPE_PTR: 2079 object_size = sizeof(struct binder_buffer_object); 2080 break; 2081 case BINDER_TYPE_FDA: 2082 object_size = sizeof(struct binder_fd_array_object); 2083 break; 2084 default: 2085 return 0; 2086 } 2087 if (offset <= buffer->data_size - object_size && 2088 buffer->data_size >= object_size) 2089 return object_size; 2090 else 2091 return 0; 2092 } 2093 2094 /** 2095 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer. 2096 * @b: binder_buffer containing the object 2097 * @index: index in offset array at which the binder_buffer_object is 2098 * located 2099 * @start: points to the start of the offset array 2100 * @num_valid: the number of valid offsets in the offset array 2101 * 2102 * Return: If @index is within the valid range of the offset array 2103 * described by @start and @num_valid, and if there's a valid 2104 * binder_buffer_object at the offset found in index @index 2105 * of the offset array, that object is returned. Otherwise, 2106 * %NULL is returned. 2107 * Note that the offset found in index @index itself is not 2108 * verified; this function assumes that @num_valid elements 2109 * from @start were previously verified to have valid offsets. 2110 */ 2111 static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b, 2112 binder_size_t index, 2113 binder_size_t *start, 2114 binder_size_t num_valid) 2115 { 2116 struct binder_buffer_object *buffer_obj; 2117 binder_size_t *offp; 2118 2119 if (index >= num_valid) 2120 return NULL; 2121 2122 offp = start + index; 2123 buffer_obj = (struct binder_buffer_object *)(b->data + *offp); 2124 if (buffer_obj->hdr.type != BINDER_TYPE_PTR) 2125 return NULL; 2126 2127 return buffer_obj; 2128 } 2129 2130 /** 2131 * binder_validate_fixup() - validates pointer/fd fixups happen in order. 2132 * @b: transaction buffer 2133 * @objects_start start of objects buffer 2134 * @buffer: binder_buffer_object in which to fix up 2135 * @offset: start offset in @buffer to fix up 2136 * @last_obj: last binder_buffer_object that we fixed up in 2137 * @last_min_offset: minimum fixup offset in @last_obj 2138 * 2139 * Return: %true if a fixup in buffer @buffer at offset @offset is 2140 * allowed. 2141 * 2142 * For safety reasons, we only allow fixups inside a buffer to happen 2143 * at increasing offsets; additionally, we only allow fixup on the last 2144 * buffer object that was verified, or one of its parents. 2145 * 2146 * Example of what is allowed: 2147 * 2148 * A 2149 * B (parent = A, offset = 0) 2150 * C (parent = A, offset = 16) 2151 * D (parent = C, offset = 0) 2152 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset) 2153 * 2154 * Examples of what is not allowed: 2155 * 2156 * Decreasing offsets within the same parent: 2157 * A 2158 * C (parent = A, offset = 16) 2159 * B (parent = A, offset = 0) // decreasing offset within A 2160 * 2161 * Referring to a parent that wasn't the last object or any of its parents: 2162 * A 2163 * B (parent = A, offset = 0) 2164 * C (parent = A, offset = 0) 2165 * C (parent = A, offset = 16) 2166 * D (parent = B, offset = 0) // B is not A or any of A's parents 2167 */ 2168 static bool binder_validate_fixup(struct binder_buffer *b, 2169 binder_size_t *objects_start, 2170 struct binder_buffer_object *buffer, 2171 binder_size_t fixup_offset, 2172 struct binder_buffer_object *last_obj, 2173 binder_size_t last_min_offset) 2174 { 2175 if (!last_obj) { 2176 /* Nothing to fix up in */ 2177 return false; 2178 } 2179 2180 while (last_obj != buffer) { 2181 /* 2182 * Safe to retrieve the parent of last_obj, since it 2183 * was already previously verified by the driver. 2184 */ 2185 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0) 2186 return false; 2187 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t); 2188 last_obj = (struct binder_buffer_object *) 2189 (b->data + *(objects_start + last_obj->parent)); 2190 } 2191 return (fixup_offset >= last_min_offset); 2192 } 2193 2194 static void binder_transaction_buffer_release(struct binder_proc *proc, 2195 struct binder_buffer *buffer, 2196 binder_size_t *failed_at) 2197 { 2198 binder_size_t *offp, *off_start, *off_end; 2199 int debug_id = buffer->debug_id; 2200 2201 binder_debug(BINDER_DEBUG_TRANSACTION, 2202 "%d buffer release %d, size %zd-%zd, failed at %pK\n", 2203 proc->pid, buffer->debug_id, 2204 buffer->data_size, buffer->offsets_size, failed_at); 2205 2206 if (buffer->target_node) 2207 binder_dec_node(buffer->target_node, 1, 0); 2208 2209 off_start = (binder_size_t *)(buffer->data + 2210 ALIGN(buffer->data_size, sizeof(void *))); 2211 if (failed_at) 2212 off_end = failed_at; 2213 else 2214 off_end = (void *)off_start + buffer->offsets_size; 2215 for (offp = off_start; offp < off_end; offp++) { 2216 struct binder_object_header *hdr; 2217 size_t object_size = binder_validate_object(buffer, *offp); 2218 2219 if (object_size == 0) { 2220 pr_err("transaction release %d bad object at offset %lld, size %zd\n", 2221 debug_id, (u64)*offp, buffer->data_size); 2222 continue; 2223 } 2224 hdr = (struct binder_object_header *)(buffer->data + *offp); 2225 switch (hdr->type) { 2226 case BINDER_TYPE_BINDER: 2227 case BINDER_TYPE_WEAK_BINDER: { 2228 struct flat_binder_object *fp; 2229 struct binder_node *node; 2230 2231 fp = to_flat_binder_object(hdr); 2232 node = binder_get_node(proc, fp->binder); 2233 if (node == NULL) { 2234 pr_err("transaction release %d bad node %016llx\n", 2235 debug_id, (u64)fp->binder); 2236 break; 2237 } 2238 binder_debug(BINDER_DEBUG_TRANSACTION, 2239 " node %d u%016llx\n", 2240 node->debug_id, (u64)node->ptr); 2241 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER, 2242 0); 2243 binder_put_node(node); 2244 } break; 2245 case BINDER_TYPE_HANDLE: 2246 case BINDER_TYPE_WEAK_HANDLE: { 2247 struct flat_binder_object *fp; 2248 struct binder_ref_data rdata; 2249 int ret; 2250 2251 fp = to_flat_binder_object(hdr); 2252 ret = binder_dec_ref_for_handle(proc, fp->handle, 2253 hdr->type == BINDER_TYPE_HANDLE, &rdata); 2254 2255 if (ret) { 2256 pr_err("transaction release %d bad handle %d, ret = %d\n", 2257 debug_id, fp->handle, ret); 2258 break; 2259 } 2260 binder_debug(BINDER_DEBUG_TRANSACTION, 2261 " ref %d desc %d\n", 2262 rdata.debug_id, rdata.desc); 2263 } break; 2264 2265 case BINDER_TYPE_FD: { 2266 struct binder_fd_object *fp = to_binder_fd_object(hdr); 2267 2268 binder_debug(BINDER_DEBUG_TRANSACTION, 2269 " fd %d\n", fp->fd); 2270 if (failed_at) 2271 task_close_fd(proc, fp->fd); 2272 } break; 2273 case BINDER_TYPE_PTR: 2274 /* 2275 * Nothing to do here, this will get cleaned up when the 2276 * transaction buffer gets freed 2277 */ 2278 break; 2279 case BINDER_TYPE_FDA: { 2280 struct binder_fd_array_object *fda; 2281 struct binder_buffer_object *parent; 2282 uintptr_t parent_buffer; 2283 u32 *fd_array; 2284 size_t fd_index; 2285 binder_size_t fd_buf_size; 2286 2287 fda = to_binder_fd_array_object(hdr); 2288 parent = binder_validate_ptr(buffer, fda->parent, 2289 off_start, 2290 offp - off_start); 2291 if (!parent) { 2292 pr_err("transaction release %d bad parent offset\n", 2293 debug_id); 2294 continue; 2295 } 2296 /* 2297 * Since the parent was already fixed up, convert it 2298 * back to kernel address space to access it 2299 */ 2300 parent_buffer = parent->buffer - 2301 binder_alloc_get_user_buffer_offset( 2302 &proc->alloc); 2303 2304 fd_buf_size = sizeof(u32) * fda->num_fds; 2305 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { 2306 pr_err("transaction release %d invalid number of fds (%lld)\n", 2307 debug_id, (u64)fda->num_fds); 2308 continue; 2309 } 2310 if (fd_buf_size > parent->length || 2311 fda->parent_offset > parent->length - fd_buf_size) { 2312 /* No space for all file descriptors here. */ 2313 pr_err("transaction release %d not enough space for %lld fds in buffer\n", 2314 debug_id, (u64)fda->num_fds); 2315 continue; 2316 } 2317 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset); 2318 for (fd_index = 0; fd_index < fda->num_fds; fd_index++) 2319 task_close_fd(proc, fd_array[fd_index]); 2320 } break; 2321 default: 2322 pr_err("transaction release %d bad object type %x\n", 2323 debug_id, hdr->type); 2324 break; 2325 } 2326 } 2327 } 2328 2329 static int binder_translate_binder(struct flat_binder_object *fp, 2330 struct binder_transaction *t, 2331 struct binder_thread *thread) 2332 { 2333 struct binder_node *node; 2334 struct binder_proc *proc = thread->proc; 2335 struct binder_proc *target_proc = t->to_proc; 2336 struct binder_ref_data rdata; 2337 int ret = 0; 2338 2339 node = binder_get_node(proc, fp->binder); 2340 if (!node) { 2341 node = binder_new_node(proc, fp); 2342 if (!node) 2343 return -ENOMEM; 2344 } 2345 if (fp->cookie != node->cookie) { 2346 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n", 2347 proc->pid, thread->pid, (u64)fp->binder, 2348 node->debug_id, (u64)fp->cookie, 2349 (u64)node->cookie); 2350 ret = -EINVAL; 2351 goto done; 2352 } 2353 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { 2354 ret = -EPERM; 2355 goto done; 2356 } 2357 2358 ret = binder_inc_ref_for_node(target_proc, node, 2359 fp->hdr.type == BINDER_TYPE_BINDER, 2360 &thread->todo, &rdata); 2361 if (ret) 2362 goto done; 2363 2364 if (fp->hdr.type == BINDER_TYPE_BINDER) 2365 fp->hdr.type = BINDER_TYPE_HANDLE; 2366 else 2367 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE; 2368 fp->binder = 0; 2369 fp->handle = rdata.desc; 2370 fp->cookie = 0; 2371 2372 trace_binder_transaction_node_to_ref(t, node, &rdata); 2373 binder_debug(BINDER_DEBUG_TRANSACTION, 2374 " node %d u%016llx -> ref %d desc %d\n", 2375 node->debug_id, (u64)node->ptr, 2376 rdata.debug_id, rdata.desc); 2377 done: 2378 binder_put_node(node); 2379 return ret; 2380 } 2381 2382 static int binder_translate_handle(struct flat_binder_object *fp, 2383 struct binder_transaction *t, 2384 struct binder_thread *thread) 2385 { 2386 struct binder_proc *proc = thread->proc; 2387 struct binder_proc *target_proc = t->to_proc; 2388 struct binder_node *node; 2389 struct binder_ref_data src_rdata; 2390 int ret = 0; 2391 2392 node = binder_get_node_from_ref(proc, fp->handle, 2393 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata); 2394 if (!node) { 2395 binder_user_error("%d:%d got transaction with invalid handle, %d\n", 2396 proc->pid, thread->pid, fp->handle); 2397 return -EINVAL; 2398 } 2399 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { 2400 ret = -EPERM; 2401 goto done; 2402 } 2403 2404 binder_node_lock(node); 2405 if (node->proc == target_proc) { 2406 if (fp->hdr.type == BINDER_TYPE_HANDLE) 2407 fp->hdr.type = BINDER_TYPE_BINDER; 2408 else 2409 fp->hdr.type = BINDER_TYPE_WEAK_BINDER; 2410 fp->binder = node->ptr; 2411 fp->cookie = node->cookie; 2412 if (node->proc) 2413 binder_inner_proc_lock(node->proc); 2414 binder_inc_node_nilocked(node, 2415 fp->hdr.type == BINDER_TYPE_BINDER, 2416 0, NULL); 2417 if (node->proc) 2418 binder_inner_proc_unlock(node->proc); 2419 trace_binder_transaction_ref_to_node(t, node, &src_rdata); 2420 binder_debug(BINDER_DEBUG_TRANSACTION, 2421 " ref %d desc %d -> node %d u%016llx\n", 2422 src_rdata.debug_id, src_rdata.desc, node->debug_id, 2423 (u64)node->ptr); 2424 binder_node_unlock(node); 2425 } else { 2426 struct binder_ref_data dest_rdata; 2427 2428 binder_node_unlock(node); 2429 ret = binder_inc_ref_for_node(target_proc, node, 2430 fp->hdr.type == BINDER_TYPE_HANDLE, 2431 NULL, &dest_rdata); 2432 if (ret) 2433 goto done; 2434 2435 fp->binder = 0; 2436 fp->handle = dest_rdata.desc; 2437 fp->cookie = 0; 2438 trace_binder_transaction_ref_to_ref(t, node, &src_rdata, 2439 &dest_rdata); 2440 binder_debug(BINDER_DEBUG_TRANSACTION, 2441 " ref %d desc %d -> ref %d desc %d (node %d)\n", 2442 src_rdata.debug_id, src_rdata.desc, 2443 dest_rdata.debug_id, dest_rdata.desc, 2444 node->debug_id); 2445 } 2446 done: 2447 binder_put_node(node); 2448 return ret; 2449 } 2450 2451 static int binder_translate_fd(int fd, 2452 struct binder_transaction *t, 2453 struct binder_thread *thread, 2454 struct binder_transaction *in_reply_to) 2455 { 2456 struct binder_proc *proc = thread->proc; 2457 struct binder_proc *target_proc = t->to_proc; 2458 int target_fd; 2459 struct file *file; 2460 int ret; 2461 bool target_allows_fd; 2462 2463 if (in_reply_to) 2464 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS); 2465 else 2466 target_allows_fd = t->buffer->target_node->accept_fds; 2467 if (!target_allows_fd) { 2468 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n", 2469 proc->pid, thread->pid, 2470 in_reply_to ? "reply" : "transaction", 2471 fd); 2472 ret = -EPERM; 2473 goto err_fd_not_accepted; 2474 } 2475 2476 file = fget(fd); 2477 if (!file) { 2478 binder_user_error("%d:%d got transaction with invalid fd, %d\n", 2479 proc->pid, thread->pid, fd); 2480 ret = -EBADF; 2481 goto err_fget; 2482 } 2483 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file); 2484 if (ret < 0) { 2485 ret = -EPERM; 2486 goto err_security; 2487 } 2488 2489 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC); 2490 if (target_fd < 0) { 2491 ret = -ENOMEM; 2492 goto err_get_unused_fd; 2493 } 2494 task_fd_install(target_proc, target_fd, file); 2495 trace_binder_transaction_fd(t, fd, target_fd); 2496 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n", 2497 fd, target_fd); 2498 2499 return target_fd; 2500 2501 err_get_unused_fd: 2502 err_security: 2503 fput(file); 2504 err_fget: 2505 err_fd_not_accepted: 2506 return ret; 2507 } 2508 2509 static int binder_translate_fd_array(struct binder_fd_array_object *fda, 2510 struct binder_buffer_object *parent, 2511 struct binder_transaction *t, 2512 struct binder_thread *thread, 2513 struct binder_transaction *in_reply_to) 2514 { 2515 binder_size_t fdi, fd_buf_size, num_installed_fds; 2516 int target_fd; 2517 uintptr_t parent_buffer; 2518 u32 *fd_array; 2519 struct binder_proc *proc = thread->proc; 2520 struct binder_proc *target_proc = t->to_proc; 2521 2522 fd_buf_size = sizeof(u32) * fda->num_fds; 2523 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { 2524 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n", 2525 proc->pid, thread->pid, (u64)fda->num_fds); 2526 return -EINVAL; 2527 } 2528 if (fd_buf_size > parent->length || 2529 fda->parent_offset > parent->length - fd_buf_size) { 2530 /* No space for all file descriptors here. */ 2531 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n", 2532 proc->pid, thread->pid, (u64)fda->num_fds); 2533 return -EINVAL; 2534 } 2535 /* 2536 * Since the parent was already fixed up, convert it 2537 * back to the kernel address space to access it 2538 */ 2539 parent_buffer = parent->buffer - 2540 binder_alloc_get_user_buffer_offset(&target_proc->alloc); 2541 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset); 2542 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) { 2543 binder_user_error("%d:%d parent offset not aligned correctly.\n", 2544 proc->pid, thread->pid); 2545 return -EINVAL; 2546 } 2547 for (fdi = 0; fdi < fda->num_fds; fdi++) { 2548 target_fd = binder_translate_fd(fd_array[fdi], t, thread, 2549 in_reply_to); 2550 if (target_fd < 0) 2551 goto err_translate_fd_failed; 2552 fd_array[fdi] = target_fd; 2553 } 2554 return 0; 2555 2556 err_translate_fd_failed: 2557 /* 2558 * Failed to allocate fd or security error, free fds 2559 * installed so far. 2560 */ 2561 num_installed_fds = fdi; 2562 for (fdi = 0; fdi < num_installed_fds; fdi++) 2563 task_close_fd(target_proc, fd_array[fdi]); 2564 return target_fd; 2565 } 2566 2567 static int binder_fixup_parent(struct binder_transaction *t, 2568 struct binder_thread *thread, 2569 struct binder_buffer_object *bp, 2570 binder_size_t *off_start, 2571 binder_size_t num_valid, 2572 struct binder_buffer_object *last_fixup_obj, 2573 binder_size_t last_fixup_min_off) 2574 { 2575 struct binder_buffer_object *parent; 2576 u8 *parent_buffer; 2577 struct binder_buffer *b = t->buffer; 2578 struct binder_proc *proc = thread->proc; 2579 struct binder_proc *target_proc = t->to_proc; 2580 2581 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT)) 2582 return 0; 2583 2584 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid); 2585 if (!parent) { 2586 binder_user_error("%d:%d got transaction with invalid parent offset or type\n", 2587 proc->pid, thread->pid); 2588 return -EINVAL; 2589 } 2590 2591 if (!binder_validate_fixup(b, off_start, 2592 parent, bp->parent_offset, 2593 last_fixup_obj, 2594 last_fixup_min_off)) { 2595 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", 2596 proc->pid, thread->pid); 2597 return -EINVAL; 2598 } 2599 2600 if (parent->length < sizeof(binder_uintptr_t) || 2601 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) { 2602 /* No space for a pointer here! */ 2603 binder_user_error("%d:%d got transaction with invalid parent offset\n", 2604 proc->pid, thread->pid); 2605 return -EINVAL; 2606 } 2607 parent_buffer = (u8 *)((uintptr_t)parent->buffer - 2608 binder_alloc_get_user_buffer_offset( 2609 &target_proc->alloc)); 2610 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer; 2611 2612 return 0; 2613 } 2614 2615 /** 2616 * binder_proc_transaction() - sends a transaction to a process and wakes it up 2617 * @t: transaction to send 2618 * @proc: process to send the transaction to 2619 * @thread: thread in @proc to send the transaction to (may be NULL) 2620 * 2621 * This function queues a transaction to the specified process. It will try 2622 * to find a thread in the target process to handle the transaction and 2623 * wake it up. If no thread is found, the work is queued to the proc 2624 * waitqueue. 2625 * 2626 * If the @thread parameter is not NULL, the transaction is always queued 2627 * to the waitlist of that specific thread. 2628 * 2629 * Return: true if the transactions was successfully queued 2630 * false if the target process or thread is dead 2631 */ 2632 static bool binder_proc_transaction(struct binder_transaction *t, 2633 struct binder_proc *proc, 2634 struct binder_thread *thread) 2635 { 2636 struct binder_node *node = t->buffer->target_node; 2637 bool oneway = !!(t->flags & TF_ONE_WAY); 2638 bool pending_async = false; 2639 2640 BUG_ON(!node); 2641 binder_node_lock(node); 2642 if (oneway) { 2643 BUG_ON(thread); 2644 if (node->has_async_transaction) { 2645 pending_async = true; 2646 } else { 2647 node->has_async_transaction = true; 2648 } 2649 } 2650 2651 binder_inner_proc_lock(proc); 2652 2653 if (proc->is_dead || (thread && thread->is_dead)) { 2654 binder_inner_proc_unlock(proc); 2655 binder_node_unlock(node); 2656 return false; 2657 } 2658 2659 if (!thread && !pending_async) 2660 thread = binder_select_thread_ilocked(proc); 2661 2662 if (thread) 2663 binder_enqueue_thread_work_ilocked(thread, &t->work); 2664 else if (!pending_async) 2665 binder_enqueue_work_ilocked(&t->work, &proc->todo); 2666 else 2667 binder_enqueue_work_ilocked(&t->work, &node->async_todo); 2668 2669 if (!pending_async) 2670 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */); 2671 2672 binder_inner_proc_unlock(proc); 2673 binder_node_unlock(node); 2674 2675 return true; 2676 } 2677 2678 /** 2679 * binder_get_node_refs_for_txn() - Get required refs on node for txn 2680 * @node: struct binder_node for which to get refs 2681 * @proc: returns @node->proc if valid 2682 * @error: if no @proc then returns BR_DEAD_REPLY 2683 * 2684 * User-space normally keeps the node alive when creating a transaction 2685 * since it has a reference to the target. The local strong ref keeps it 2686 * alive if the sending process dies before the target process processes 2687 * the transaction. If the source process is malicious or has a reference 2688 * counting bug, relying on the local strong ref can fail. 2689 * 2690 * Since user-space can cause the local strong ref to go away, we also take 2691 * a tmpref on the node to ensure it survives while we are constructing 2692 * the transaction. We also need a tmpref on the proc while we are 2693 * constructing the transaction, so we take that here as well. 2694 * 2695 * Return: The target_node with refs taken or NULL if no @node->proc is NULL. 2696 * Also sets @proc if valid. If the @node->proc is NULL indicating that the 2697 * target proc has died, @error is set to BR_DEAD_REPLY 2698 */ 2699 static struct binder_node *binder_get_node_refs_for_txn( 2700 struct binder_node *node, 2701 struct binder_proc **procp, 2702 uint32_t *error) 2703 { 2704 struct binder_node *target_node = NULL; 2705 2706 binder_node_inner_lock(node); 2707 if (node->proc) { 2708 target_node = node; 2709 binder_inc_node_nilocked(node, 1, 0, NULL); 2710 binder_inc_node_tmpref_ilocked(node); 2711 node->proc->tmp_ref++; 2712 *procp = node->proc; 2713 } else 2714 *error = BR_DEAD_REPLY; 2715 binder_node_inner_unlock(node); 2716 2717 return target_node; 2718 } 2719 2720 static void binder_transaction(struct binder_proc *proc, 2721 struct binder_thread *thread, 2722 struct binder_transaction_data *tr, int reply, 2723 binder_size_t extra_buffers_size) 2724 { 2725 int ret; 2726 struct binder_transaction *t; 2727 struct binder_work *tcomplete; 2728 binder_size_t *offp, *off_end, *off_start; 2729 binder_size_t off_min; 2730 u8 *sg_bufp, *sg_buf_end; 2731 struct binder_proc *target_proc = NULL; 2732 struct binder_thread *target_thread = NULL; 2733 struct binder_node *target_node = NULL; 2734 struct binder_transaction *in_reply_to = NULL; 2735 struct binder_transaction_log_entry *e; 2736 uint32_t return_error = 0; 2737 uint32_t return_error_param = 0; 2738 uint32_t return_error_line = 0; 2739 struct binder_buffer_object *last_fixup_obj = NULL; 2740 binder_size_t last_fixup_min_off = 0; 2741 struct binder_context *context = proc->context; 2742 int t_debug_id = atomic_inc_return(&binder_last_id); 2743 2744 e = binder_transaction_log_add(&binder_transaction_log); 2745 e->debug_id = t_debug_id; 2746 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); 2747 e->from_proc = proc->pid; 2748 e->from_thread = thread->pid; 2749 e->target_handle = tr->target.handle; 2750 e->data_size = tr->data_size; 2751 e->offsets_size = tr->offsets_size; 2752 e->context_name = proc->context->name; 2753 2754 if (reply) { 2755 binder_inner_proc_lock(proc); 2756 in_reply_to = thread->transaction_stack; 2757 if (in_reply_to == NULL) { 2758 binder_inner_proc_unlock(proc); 2759 binder_user_error("%d:%d got reply transaction with no transaction stack\n", 2760 proc->pid, thread->pid); 2761 return_error = BR_FAILED_REPLY; 2762 return_error_param = -EPROTO; 2763 return_error_line = __LINE__; 2764 goto err_empty_call_stack; 2765 } 2766 if (in_reply_to->to_thread != thread) { 2767 spin_lock(&in_reply_to->lock); 2768 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n", 2769 proc->pid, thread->pid, in_reply_to->debug_id, 2770 in_reply_to->to_proc ? 2771 in_reply_to->to_proc->pid : 0, 2772 in_reply_to->to_thread ? 2773 in_reply_to->to_thread->pid : 0); 2774 spin_unlock(&in_reply_to->lock); 2775 binder_inner_proc_unlock(proc); 2776 return_error = BR_FAILED_REPLY; 2777 return_error_param = -EPROTO; 2778 return_error_line = __LINE__; 2779 in_reply_to = NULL; 2780 goto err_bad_call_stack; 2781 } 2782 thread->transaction_stack = in_reply_to->to_parent; 2783 binder_inner_proc_unlock(proc); 2784 binder_set_nice(in_reply_to->saved_priority); 2785 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to); 2786 if (target_thread == NULL) { 2787 return_error = BR_DEAD_REPLY; 2788 return_error_line = __LINE__; 2789 goto err_dead_binder; 2790 } 2791 if (target_thread->transaction_stack != in_reply_to) { 2792 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n", 2793 proc->pid, thread->pid, 2794 target_thread->transaction_stack ? 2795 target_thread->transaction_stack->debug_id : 0, 2796 in_reply_to->debug_id); 2797 binder_inner_proc_unlock(target_thread->proc); 2798 return_error = BR_FAILED_REPLY; 2799 return_error_param = -EPROTO; 2800 return_error_line = __LINE__; 2801 in_reply_to = NULL; 2802 target_thread = NULL; 2803 goto err_dead_binder; 2804 } 2805 target_proc = target_thread->proc; 2806 target_proc->tmp_ref++; 2807 binder_inner_proc_unlock(target_thread->proc); 2808 } else { 2809 if (tr->target.handle) { 2810 struct binder_ref *ref; 2811 2812 /* 2813 * There must already be a strong ref 2814 * on this node. If so, do a strong 2815 * increment on the node to ensure it 2816 * stays alive until the transaction is 2817 * done. 2818 */ 2819 binder_proc_lock(proc); 2820 ref = binder_get_ref_olocked(proc, tr->target.handle, 2821 true); 2822 if (ref) { 2823 target_node = binder_get_node_refs_for_txn( 2824 ref->node, &target_proc, 2825 &return_error); 2826 } else { 2827 binder_user_error("%d:%d got transaction to invalid handle\n", 2828 proc->pid, thread->pid); 2829 return_error = BR_FAILED_REPLY; 2830 } 2831 binder_proc_unlock(proc); 2832 } else { 2833 mutex_lock(&context->context_mgr_node_lock); 2834 target_node = context->binder_context_mgr_node; 2835 if (target_node) 2836 target_node = binder_get_node_refs_for_txn( 2837 target_node, &target_proc, 2838 &return_error); 2839 else 2840 return_error = BR_DEAD_REPLY; 2841 mutex_unlock(&context->context_mgr_node_lock); 2842 if (target_node && target_proc == proc) { 2843 binder_user_error("%d:%d got transaction to context manager from process owning it\n", 2844 proc->pid, thread->pid); 2845 return_error = BR_FAILED_REPLY; 2846 return_error_param = -EINVAL; 2847 return_error_line = __LINE__; 2848 goto err_invalid_target_handle; 2849 } 2850 } 2851 if (!target_node) { 2852 /* 2853 * return_error is set above 2854 */ 2855 return_error_param = -EINVAL; 2856 return_error_line = __LINE__; 2857 goto err_dead_binder; 2858 } 2859 e->to_node = target_node->debug_id; 2860 if (security_binder_transaction(proc->tsk, 2861 target_proc->tsk) < 0) { 2862 return_error = BR_FAILED_REPLY; 2863 return_error_param = -EPERM; 2864 return_error_line = __LINE__; 2865 goto err_invalid_target_handle; 2866 } 2867 binder_inner_proc_lock(proc); 2868 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { 2869 struct binder_transaction *tmp; 2870 2871 tmp = thread->transaction_stack; 2872 if (tmp->to_thread != thread) { 2873 spin_lock(&tmp->lock); 2874 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n", 2875 proc->pid, thread->pid, tmp->debug_id, 2876 tmp->to_proc ? tmp->to_proc->pid : 0, 2877 tmp->to_thread ? 2878 tmp->to_thread->pid : 0); 2879 spin_unlock(&tmp->lock); 2880 binder_inner_proc_unlock(proc); 2881 return_error = BR_FAILED_REPLY; 2882 return_error_param = -EPROTO; 2883 return_error_line = __LINE__; 2884 goto err_bad_call_stack; 2885 } 2886 while (tmp) { 2887 struct binder_thread *from; 2888 2889 spin_lock(&tmp->lock); 2890 from = tmp->from; 2891 if (from && from->proc == target_proc) { 2892 atomic_inc(&from->tmp_ref); 2893 target_thread = from; 2894 spin_unlock(&tmp->lock); 2895 break; 2896 } 2897 spin_unlock(&tmp->lock); 2898 tmp = tmp->from_parent; 2899 } 2900 } 2901 binder_inner_proc_unlock(proc); 2902 } 2903 if (target_thread) 2904 e->to_thread = target_thread->pid; 2905 e->to_proc = target_proc->pid; 2906 2907 /* TODO: reuse incoming transaction for reply */ 2908 t = kzalloc(sizeof(*t), GFP_KERNEL); 2909 if (t == NULL) { 2910 return_error = BR_FAILED_REPLY; 2911 return_error_param = -ENOMEM; 2912 return_error_line = __LINE__; 2913 goto err_alloc_t_failed; 2914 } 2915 binder_stats_created(BINDER_STAT_TRANSACTION); 2916 spin_lock_init(&t->lock); 2917 2918 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); 2919 if (tcomplete == NULL) { 2920 return_error = BR_FAILED_REPLY; 2921 return_error_param = -ENOMEM; 2922 return_error_line = __LINE__; 2923 goto err_alloc_tcomplete_failed; 2924 } 2925 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE); 2926 2927 t->debug_id = t_debug_id; 2928 2929 if (reply) 2930 binder_debug(BINDER_DEBUG_TRANSACTION, 2931 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n", 2932 proc->pid, thread->pid, t->debug_id, 2933 target_proc->pid, target_thread->pid, 2934 (u64)tr->data.ptr.buffer, 2935 (u64)tr->data.ptr.offsets, 2936 (u64)tr->data_size, (u64)tr->offsets_size, 2937 (u64)extra_buffers_size); 2938 else 2939 binder_debug(BINDER_DEBUG_TRANSACTION, 2940 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n", 2941 proc->pid, thread->pid, t->debug_id, 2942 target_proc->pid, target_node->debug_id, 2943 (u64)tr->data.ptr.buffer, 2944 (u64)tr->data.ptr.offsets, 2945 (u64)tr->data_size, (u64)tr->offsets_size, 2946 (u64)extra_buffers_size); 2947 2948 if (!reply && !(tr->flags & TF_ONE_WAY)) 2949 t->from = thread; 2950 else 2951 t->from = NULL; 2952 t->sender_euid = task_euid(proc->tsk); 2953 t->to_proc = target_proc; 2954 t->to_thread = target_thread; 2955 t->code = tr->code; 2956 t->flags = tr->flags; 2957 t->priority = task_nice(current); 2958 2959 trace_binder_transaction(reply, t, target_node); 2960 2961 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size, 2962 tr->offsets_size, extra_buffers_size, 2963 !reply && (t->flags & TF_ONE_WAY)); 2964 if (IS_ERR(t->buffer)) { 2965 /* 2966 * -ESRCH indicates VMA cleared. The target is dying. 2967 */ 2968 return_error_param = PTR_ERR(t->buffer); 2969 return_error = return_error_param == -ESRCH ? 2970 BR_DEAD_REPLY : BR_FAILED_REPLY; 2971 return_error_line = __LINE__; 2972 t->buffer = NULL; 2973 goto err_binder_alloc_buf_failed; 2974 } 2975 t->buffer->allow_user_free = 0; 2976 t->buffer->debug_id = t->debug_id; 2977 t->buffer->transaction = t; 2978 t->buffer->target_node = target_node; 2979 trace_binder_transaction_alloc_buf(t->buffer); 2980 off_start = (binder_size_t *)(t->buffer->data + 2981 ALIGN(tr->data_size, sizeof(void *))); 2982 offp = off_start; 2983 2984 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t) 2985 tr->data.ptr.buffer, tr->data_size)) { 2986 binder_user_error("%d:%d got transaction with invalid data ptr\n", 2987 proc->pid, thread->pid); 2988 return_error = BR_FAILED_REPLY; 2989 return_error_param = -EFAULT; 2990 return_error_line = __LINE__; 2991 goto err_copy_data_failed; 2992 } 2993 if (copy_from_user(offp, (const void __user *)(uintptr_t) 2994 tr->data.ptr.offsets, tr->offsets_size)) { 2995 binder_user_error("%d:%d got transaction with invalid offsets ptr\n", 2996 proc->pid, thread->pid); 2997 return_error = BR_FAILED_REPLY; 2998 return_error_param = -EFAULT; 2999 return_error_line = __LINE__; 3000 goto err_copy_data_failed; 3001 } 3002 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) { 3003 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n", 3004 proc->pid, thread->pid, (u64)tr->offsets_size); 3005 return_error = BR_FAILED_REPLY; 3006 return_error_param = -EINVAL; 3007 return_error_line = __LINE__; 3008 goto err_bad_offset; 3009 } 3010 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) { 3011 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n", 3012 proc->pid, thread->pid, 3013 (u64)extra_buffers_size); 3014 return_error = BR_FAILED_REPLY; 3015 return_error_param = -EINVAL; 3016 return_error_line = __LINE__; 3017 goto err_bad_offset; 3018 } 3019 off_end = (void *)off_start + tr->offsets_size; 3020 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *))); 3021 sg_buf_end = sg_bufp + extra_buffers_size; 3022 off_min = 0; 3023 for (; offp < off_end; offp++) { 3024 struct binder_object_header *hdr; 3025 size_t object_size = binder_validate_object(t->buffer, *offp); 3026 3027 if (object_size == 0 || *offp < off_min) { 3028 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n", 3029 proc->pid, thread->pid, (u64)*offp, 3030 (u64)off_min, 3031 (u64)t->buffer->data_size); 3032 return_error = BR_FAILED_REPLY; 3033 return_error_param = -EINVAL; 3034 return_error_line = __LINE__; 3035 goto err_bad_offset; 3036 } 3037 3038 hdr = (struct binder_object_header *)(t->buffer->data + *offp); 3039 off_min = *offp + object_size; 3040 switch (hdr->type) { 3041 case BINDER_TYPE_BINDER: 3042 case BINDER_TYPE_WEAK_BINDER: { 3043 struct flat_binder_object *fp; 3044 3045 fp = to_flat_binder_object(hdr); 3046 ret = binder_translate_binder(fp, t, thread); 3047 if (ret < 0) { 3048 return_error = BR_FAILED_REPLY; 3049 return_error_param = ret; 3050 return_error_line = __LINE__; 3051 goto err_translate_failed; 3052 } 3053 } break; 3054 case BINDER_TYPE_HANDLE: 3055 case BINDER_TYPE_WEAK_HANDLE: { 3056 struct flat_binder_object *fp; 3057 3058 fp = to_flat_binder_object(hdr); 3059 ret = binder_translate_handle(fp, t, thread); 3060 if (ret < 0) { 3061 return_error = BR_FAILED_REPLY; 3062 return_error_param = ret; 3063 return_error_line = __LINE__; 3064 goto err_translate_failed; 3065 } 3066 } break; 3067 3068 case BINDER_TYPE_FD: { 3069 struct binder_fd_object *fp = to_binder_fd_object(hdr); 3070 int target_fd = binder_translate_fd(fp->fd, t, thread, 3071 in_reply_to); 3072 3073 if (target_fd < 0) { 3074 return_error = BR_FAILED_REPLY; 3075 return_error_param = target_fd; 3076 return_error_line = __LINE__; 3077 goto err_translate_failed; 3078 } 3079 fp->pad_binder = 0; 3080 fp->fd = target_fd; 3081 } break; 3082 case BINDER_TYPE_FDA: { 3083 struct binder_fd_array_object *fda = 3084 to_binder_fd_array_object(hdr); 3085 struct binder_buffer_object *parent = 3086 binder_validate_ptr(t->buffer, fda->parent, 3087 off_start, 3088 offp - off_start); 3089 if (!parent) { 3090 binder_user_error("%d:%d got transaction with invalid parent offset or type\n", 3091 proc->pid, thread->pid); 3092 return_error = BR_FAILED_REPLY; 3093 return_error_param = -EINVAL; 3094 return_error_line = __LINE__; 3095 goto err_bad_parent; 3096 } 3097 if (!binder_validate_fixup(t->buffer, off_start, 3098 parent, fda->parent_offset, 3099 last_fixup_obj, 3100 last_fixup_min_off)) { 3101 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", 3102 proc->pid, thread->pid); 3103 return_error = BR_FAILED_REPLY; 3104 return_error_param = -EINVAL; 3105 return_error_line = __LINE__; 3106 goto err_bad_parent; 3107 } 3108 ret = binder_translate_fd_array(fda, parent, t, thread, 3109 in_reply_to); 3110 if (ret < 0) { 3111 return_error = BR_FAILED_REPLY; 3112 return_error_param = ret; 3113 return_error_line = __LINE__; 3114 goto err_translate_failed; 3115 } 3116 last_fixup_obj = parent; 3117 last_fixup_min_off = 3118 fda->parent_offset + sizeof(u32) * fda->num_fds; 3119 } break; 3120 case BINDER_TYPE_PTR: { 3121 struct binder_buffer_object *bp = 3122 to_binder_buffer_object(hdr); 3123 size_t buf_left = sg_buf_end - sg_bufp; 3124 3125 if (bp->length > buf_left) { 3126 binder_user_error("%d:%d got transaction with too large buffer\n", 3127 proc->pid, thread->pid); 3128 return_error = BR_FAILED_REPLY; 3129 return_error_param = -EINVAL; 3130 return_error_line = __LINE__; 3131 goto err_bad_offset; 3132 } 3133 if (copy_from_user(sg_bufp, 3134 (const void __user *)(uintptr_t) 3135 bp->buffer, bp->length)) { 3136 binder_user_error("%d:%d got transaction with invalid offsets ptr\n", 3137 proc->pid, thread->pid); 3138 return_error_param = -EFAULT; 3139 return_error = BR_FAILED_REPLY; 3140 return_error_line = __LINE__; 3141 goto err_copy_data_failed; 3142 } 3143 /* Fixup buffer pointer to target proc address space */ 3144 bp->buffer = (uintptr_t)sg_bufp + 3145 binder_alloc_get_user_buffer_offset( 3146 &target_proc->alloc); 3147 sg_bufp += ALIGN(bp->length, sizeof(u64)); 3148 3149 ret = binder_fixup_parent(t, thread, bp, off_start, 3150 offp - off_start, 3151 last_fixup_obj, 3152 last_fixup_min_off); 3153 if (ret < 0) { 3154 return_error = BR_FAILED_REPLY; 3155 return_error_param = ret; 3156 return_error_line = __LINE__; 3157 goto err_translate_failed; 3158 } 3159 last_fixup_obj = bp; 3160 last_fixup_min_off = 0; 3161 } break; 3162 default: 3163 binder_user_error("%d:%d got transaction with invalid object type, %x\n", 3164 proc->pid, thread->pid, hdr->type); 3165 return_error = BR_FAILED_REPLY; 3166 return_error_param = -EINVAL; 3167 return_error_line = __LINE__; 3168 goto err_bad_object_type; 3169 } 3170 } 3171 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; 3172 t->work.type = BINDER_WORK_TRANSACTION; 3173 3174 if (reply) { 3175 binder_enqueue_thread_work(thread, tcomplete); 3176 binder_inner_proc_lock(target_proc); 3177 if (target_thread->is_dead) { 3178 binder_inner_proc_unlock(target_proc); 3179 goto err_dead_proc_or_thread; 3180 } 3181 BUG_ON(t->buffer->async_transaction != 0); 3182 binder_pop_transaction_ilocked(target_thread, in_reply_to); 3183 binder_enqueue_thread_work_ilocked(target_thread, &t->work); 3184 binder_inner_proc_unlock(target_proc); 3185 wake_up_interruptible_sync(&target_thread->wait); 3186 binder_free_transaction(in_reply_to); 3187 } else if (!(t->flags & TF_ONE_WAY)) { 3188 BUG_ON(t->buffer->async_transaction != 0); 3189 binder_inner_proc_lock(proc); 3190 /* 3191 * Defer the TRANSACTION_COMPLETE, so we don't return to 3192 * userspace immediately; this allows the target process to 3193 * immediately start processing this transaction, reducing 3194 * latency. We will then return the TRANSACTION_COMPLETE when 3195 * the target replies (or there is an error). 3196 */ 3197 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete); 3198 t->need_reply = 1; 3199 t->from_parent = thread->transaction_stack; 3200 thread->transaction_stack = t; 3201 binder_inner_proc_unlock(proc); 3202 if (!binder_proc_transaction(t, target_proc, target_thread)) { 3203 binder_inner_proc_lock(proc); 3204 binder_pop_transaction_ilocked(thread, t); 3205 binder_inner_proc_unlock(proc); 3206 goto err_dead_proc_or_thread; 3207 } 3208 } else { 3209 BUG_ON(target_node == NULL); 3210 BUG_ON(t->buffer->async_transaction != 1); 3211 binder_enqueue_thread_work(thread, tcomplete); 3212 if (!binder_proc_transaction(t, target_proc, NULL)) 3213 goto err_dead_proc_or_thread; 3214 } 3215 if (target_thread) 3216 binder_thread_dec_tmpref(target_thread); 3217 binder_proc_dec_tmpref(target_proc); 3218 if (target_node) 3219 binder_dec_node_tmpref(target_node); 3220 /* 3221 * write barrier to synchronize with initialization 3222 * of log entry 3223 */ 3224 smp_wmb(); 3225 WRITE_ONCE(e->debug_id_done, t_debug_id); 3226 return; 3227 3228 err_dead_proc_or_thread: 3229 return_error = BR_DEAD_REPLY; 3230 return_error_line = __LINE__; 3231 binder_dequeue_work(proc, tcomplete); 3232 err_translate_failed: 3233 err_bad_object_type: 3234 err_bad_offset: 3235 err_bad_parent: 3236 err_copy_data_failed: 3237 trace_binder_transaction_failed_buffer_release(t->buffer); 3238 binder_transaction_buffer_release(target_proc, t->buffer, offp); 3239 if (target_node) 3240 binder_dec_node_tmpref(target_node); 3241 target_node = NULL; 3242 t->buffer->transaction = NULL; 3243 binder_alloc_free_buf(&target_proc->alloc, t->buffer); 3244 err_binder_alloc_buf_failed: 3245 kfree(tcomplete); 3246 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 3247 err_alloc_tcomplete_failed: 3248 kfree(t); 3249 binder_stats_deleted(BINDER_STAT_TRANSACTION); 3250 err_alloc_t_failed: 3251 err_bad_call_stack: 3252 err_empty_call_stack: 3253 err_dead_binder: 3254 err_invalid_target_handle: 3255 if (target_thread) 3256 binder_thread_dec_tmpref(target_thread); 3257 if (target_proc) 3258 binder_proc_dec_tmpref(target_proc); 3259 if (target_node) { 3260 binder_dec_node(target_node, 1, 0); 3261 binder_dec_node_tmpref(target_node); 3262 } 3263 3264 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 3265 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n", 3266 proc->pid, thread->pid, return_error, return_error_param, 3267 (u64)tr->data_size, (u64)tr->offsets_size, 3268 return_error_line); 3269 3270 { 3271 struct binder_transaction_log_entry *fe; 3272 3273 e->return_error = return_error; 3274 e->return_error_param = return_error_param; 3275 e->return_error_line = return_error_line; 3276 fe = binder_transaction_log_add(&binder_transaction_log_failed); 3277 *fe = *e; 3278 /* 3279 * write barrier to synchronize with initialization 3280 * of log entry 3281 */ 3282 smp_wmb(); 3283 WRITE_ONCE(e->debug_id_done, t_debug_id); 3284 WRITE_ONCE(fe->debug_id_done, t_debug_id); 3285 } 3286 3287 BUG_ON(thread->return_error.cmd != BR_OK); 3288 if (in_reply_to) { 3289 thread->return_error.cmd = BR_TRANSACTION_COMPLETE; 3290 binder_enqueue_thread_work(thread, &thread->return_error.work); 3291 binder_send_failed_reply(in_reply_to, return_error); 3292 } else { 3293 thread->return_error.cmd = return_error; 3294 binder_enqueue_thread_work(thread, &thread->return_error.work); 3295 } 3296 } 3297 3298 static int binder_thread_write(struct binder_proc *proc, 3299 struct binder_thread *thread, 3300 binder_uintptr_t binder_buffer, size_t size, 3301 binder_size_t *consumed) 3302 { 3303 uint32_t cmd; 3304 struct binder_context *context = proc->context; 3305 void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 3306 void __user *ptr = buffer + *consumed; 3307 void __user *end = buffer + size; 3308 3309 while (ptr < end && thread->return_error.cmd == BR_OK) { 3310 int ret; 3311 3312 if (get_user(cmd, (uint32_t __user *)ptr)) 3313 return -EFAULT; 3314 ptr += sizeof(uint32_t); 3315 trace_binder_command(cmd); 3316 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { 3317 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]); 3318 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]); 3319 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]); 3320 } 3321 switch (cmd) { 3322 case BC_INCREFS: 3323 case BC_ACQUIRE: 3324 case BC_RELEASE: 3325 case BC_DECREFS: { 3326 uint32_t target; 3327 const char *debug_string; 3328 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE; 3329 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE; 3330 struct binder_ref_data rdata; 3331 3332 if (get_user(target, (uint32_t __user *)ptr)) 3333 return -EFAULT; 3334 3335 ptr += sizeof(uint32_t); 3336 ret = -1; 3337 if (increment && !target) { 3338 struct binder_node *ctx_mgr_node; 3339 mutex_lock(&context->context_mgr_node_lock); 3340 ctx_mgr_node = context->binder_context_mgr_node; 3341 if (ctx_mgr_node) 3342 ret = binder_inc_ref_for_node( 3343 proc, ctx_mgr_node, 3344 strong, NULL, &rdata); 3345 mutex_unlock(&context->context_mgr_node_lock); 3346 } 3347 if (ret) 3348 ret = binder_update_ref_for_handle( 3349 proc, target, increment, strong, 3350 &rdata); 3351 if (!ret && rdata.desc != target) { 3352 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n", 3353 proc->pid, thread->pid, 3354 target, rdata.desc); 3355 } 3356 switch (cmd) { 3357 case BC_INCREFS: 3358 debug_string = "IncRefs"; 3359 break; 3360 case BC_ACQUIRE: 3361 debug_string = "Acquire"; 3362 break; 3363 case BC_RELEASE: 3364 debug_string = "Release"; 3365 break; 3366 case BC_DECREFS: 3367 default: 3368 debug_string = "DecRefs"; 3369 break; 3370 } 3371 if (ret) { 3372 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n", 3373 proc->pid, thread->pid, debug_string, 3374 strong, target, ret); 3375 break; 3376 } 3377 binder_debug(BINDER_DEBUG_USER_REFS, 3378 "%d:%d %s ref %d desc %d s %d w %d\n", 3379 proc->pid, thread->pid, debug_string, 3380 rdata.debug_id, rdata.desc, rdata.strong, 3381 rdata.weak); 3382 break; 3383 } 3384 case BC_INCREFS_DONE: 3385 case BC_ACQUIRE_DONE: { 3386 binder_uintptr_t node_ptr; 3387 binder_uintptr_t cookie; 3388 struct binder_node *node; 3389 bool free_node; 3390 3391 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr)) 3392 return -EFAULT; 3393 ptr += sizeof(binder_uintptr_t); 3394 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 3395 return -EFAULT; 3396 ptr += sizeof(binder_uintptr_t); 3397 node = binder_get_node(proc, node_ptr); 3398 if (node == NULL) { 3399 binder_user_error("%d:%d %s u%016llx no match\n", 3400 proc->pid, thread->pid, 3401 cmd == BC_INCREFS_DONE ? 3402 "BC_INCREFS_DONE" : 3403 "BC_ACQUIRE_DONE", 3404 (u64)node_ptr); 3405 break; 3406 } 3407 if (cookie != node->cookie) { 3408 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n", 3409 proc->pid, thread->pid, 3410 cmd == BC_INCREFS_DONE ? 3411 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 3412 (u64)node_ptr, node->debug_id, 3413 (u64)cookie, (u64)node->cookie); 3414 binder_put_node(node); 3415 break; 3416 } 3417 binder_node_inner_lock(node); 3418 if (cmd == BC_ACQUIRE_DONE) { 3419 if (node->pending_strong_ref == 0) { 3420 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n", 3421 proc->pid, thread->pid, 3422 node->debug_id); 3423 binder_node_inner_unlock(node); 3424 binder_put_node(node); 3425 break; 3426 } 3427 node->pending_strong_ref = 0; 3428 } else { 3429 if (node->pending_weak_ref == 0) { 3430 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n", 3431 proc->pid, thread->pid, 3432 node->debug_id); 3433 binder_node_inner_unlock(node); 3434 binder_put_node(node); 3435 break; 3436 } 3437 node->pending_weak_ref = 0; 3438 } 3439 free_node = binder_dec_node_nilocked(node, 3440 cmd == BC_ACQUIRE_DONE, 0); 3441 WARN_ON(free_node); 3442 binder_debug(BINDER_DEBUG_USER_REFS, 3443 "%d:%d %s node %d ls %d lw %d tr %d\n", 3444 proc->pid, thread->pid, 3445 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 3446 node->debug_id, node->local_strong_refs, 3447 node->local_weak_refs, node->tmp_refs); 3448 binder_node_inner_unlock(node); 3449 binder_put_node(node); 3450 break; 3451 } 3452 case BC_ATTEMPT_ACQUIRE: 3453 pr_err("BC_ATTEMPT_ACQUIRE not supported\n"); 3454 return -EINVAL; 3455 case BC_ACQUIRE_RESULT: 3456 pr_err("BC_ACQUIRE_RESULT not supported\n"); 3457 return -EINVAL; 3458 3459 case BC_FREE_BUFFER: { 3460 binder_uintptr_t data_ptr; 3461 struct binder_buffer *buffer; 3462 3463 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr)) 3464 return -EFAULT; 3465 ptr += sizeof(binder_uintptr_t); 3466 3467 buffer = binder_alloc_prepare_to_free(&proc->alloc, 3468 data_ptr); 3469 if (buffer == NULL) { 3470 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n", 3471 proc->pid, thread->pid, (u64)data_ptr); 3472 break; 3473 } 3474 if (!buffer->allow_user_free) { 3475 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n", 3476 proc->pid, thread->pid, (u64)data_ptr); 3477 break; 3478 } 3479 binder_debug(BINDER_DEBUG_FREE_BUFFER, 3480 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n", 3481 proc->pid, thread->pid, (u64)data_ptr, 3482 buffer->debug_id, 3483 buffer->transaction ? "active" : "finished"); 3484 3485 if (buffer->transaction) { 3486 buffer->transaction->buffer = NULL; 3487 buffer->transaction = NULL; 3488 } 3489 if (buffer->async_transaction && buffer->target_node) { 3490 struct binder_node *buf_node; 3491 struct binder_work *w; 3492 3493 buf_node = buffer->target_node; 3494 binder_node_inner_lock(buf_node); 3495 BUG_ON(!buf_node->has_async_transaction); 3496 BUG_ON(buf_node->proc != proc); 3497 w = binder_dequeue_work_head_ilocked( 3498 &buf_node->async_todo); 3499 if (!w) { 3500 buf_node->has_async_transaction = false; 3501 } else { 3502 binder_enqueue_work_ilocked( 3503 w, &proc->todo); 3504 binder_wakeup_proc_ilocked(proc); 3505 } 3506 binder_node_inner_unlock(buf_node); 3507 } 3508 trace_binder_transaction_buffer_release(buffer); 3509 binder_transaction_buffer_release(proc, buffer, NULL); 3510 binder_alloc_free_buf(&proc->alloc, buffer); 3511 break; 3512 } 3513 3514 case BC_TRANSACTION_SG: 3515 case BC_REPLY_SG: { 3516 struct binder_transaction_data_sg tr; 3517 3518 if (copy_from_user(&tr, ptr, sizeof(tr))) 3519 return -EFAULT; 3520 ptr += sizeof(tr); 3521 binder_transaction(proc, thread, &tr.transaction_data, 3522 cmd == BC_REPLY_SG, tr.buffers_size); 3523 break; 3524 } 3525 case BC_TRANSACTION: 3526 case BC_REPLY: { 3527 struct binder_transaction_data tr; 3528 3529 if (copy_from_user(&tr, ptr, sizeof(tr))) 3530 return -EFAULT; 3531 ptr += sizeof(tr); 3532 binder_transaction(proc, thread, &tr, 3533 cmd == BC_REPLY, 0); 3534 break; 3535 } 3536 3537 case BC_REGISTER_LOOPER: 3538 binder_debug(BINDER_DEBUG_THREADS, 3539 "%d:%d BC_REGISTER_LOOPER\n", 3540 proc->pid, thread->pid); 3541 binder_inner_proc_lock(proc); 3542 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { 3543 thread->looper |= BINDER_LOOPER_STATE_INVALID; 3544 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n", 3545 proc->pid, thread->pid); 3546 } else if (proc->requested_threads == 0) { 3547 thread->looper |= BINDER_LOOPER_STATE_INVALID; 3548 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n", 3549 proc->pid, thread->pid); 3550 } else { 3551 proc->requested_threads--; 3552 proc->requested_threads_started++; 3553 } 3554 thread->looper |= BINDER_LOOPER_STATE_REGISTERED; 3555 binder_inner_proc_unlock(proc); 3556 break; 3557 case BC_ENTER_LOOPER: 3558 binder_debug(BINDER_DEBUG_THREADS, 3559 "%d:%d BC_ENTER_LOOPER\n", 3560 proc->pid, thread->pid); 3561 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { 3562 thread->looper |= BINDER_LOOPER_STATE_INVALID; 3563 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n", 3564 proc->pid, thread->pid); 3565 } 3566 thread->looper |= BINDER_LOOPER_STATE_ENTERED; 3567 break; 3568 case BC_EXIT_LOOPER: 3569 binder_debug(BINDER_DEBUG_THREADS, 3570 "%d:%d BC_EXIT_LOOPER\n", 3571 proc->pid, thread->pid); 3572 thread->looper |= BINDER_LOOPER_STATE_EXITED; 3573 break; 3574 3575 case BC_REQUEST_DEATH_NOTIFICATION: 3576 case BC_CLEAR_DEATH_NOTIFICATION: { 3577 uint32_t target; 3578 binder_uintptr_t cookie; 3579 struct binder_ref *ref; 3580 struct binder_ref_death *death = NULL; 3581 3582 if (get_user(target, (uint32_t __user *)ptr)) 3583 return -EFAULT; 3584 ptr += sizeof(uint32_t); 3585 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 3586 return -EFAULT; 3587 ptr += sizeof(binder_uintptr_t); 3588 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { 3589 /* 3590 * Allocate memory for death notification 3591 * before taking lock 3592 */ 3593 death = kzalloc(sizeof(*death), GFP_KERNEL); 3594 if (death == NULL) { 3595 WARN_ON(thread->return_error.cmd != 3596 BR_OK); 3597 thread->return_error.cmd = BR_ERROR; 3598 binder_enqueue_thread_work( 3599 thread, 3600 &thread->return_error.work); 3601 binder_debug( 3602 BINDER_DEBUG_FAILED_TRANSACTION, 3603 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n", 3604 proc->pid, thread->pid); 3605 break; 3606 } 3607 } 3608 binder_proc_lock(proc); 3609 ref = binder_get_ref_olocked(proc, target, false); 3610 if (ref == NULL) { 3611 binder_user_error("%d:%d %s invalid ref %d\n", 3612 proc->pid, thread->pid, 3613 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 3614 "BC_REQUEST_DEATH_NOTIFICATION" : 3615 "BC_CLEAR_DEATH_NOTIFICATION", 3616 target); 3617 binder_proc_unlock(proc); 3618 kfree(death); 3619 break; 3620 } 3621 3622 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 3623 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n", 3624 proc->pid, thread->pid, 3625 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 3626 "BC_REQUEST_DEATH_NOTIFICATION" : 3627 "BC_CLEAR_DEATH_NOTIFICATION", 3628 (u64)cookie, ref->data.debug_id, 3629 ref->data.desc, ref->data.strong, 3630 ref->data.weak, ref->node->debug_id); 3631 3632 binder_node_lock(ref->node); 3633 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { 3634 if (ref->death) { 3635 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n", 3636 proc->pid, thread->pid); 3637 binder_node_unlock(ref->node); 3638 binder_proc_unlock(proc); 3639 kfree(death); 3640 break; 3641 } 3642 binder_stats_created(BINDER_STAT_DEATH); 3643 INIT_LIST_HEAD(&death->work.entry); 3644 death->cookie = cookie; 3645 ref->death = death; 3646 if (ref->node->proc == NULL) { 3647 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 3648 3649 binder_inner_proc_lock(proc); 3650 binder_enqueue_work_ilocked( 3651 &ref->death->work, &proc->todo); 3652 binder_wakeup_proc_ilocked(proc); 3653 binder_inner_proc_unlock(proc); 3654 } 3655 } else { 3656 if (ref->death == NULL) { 3657 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n", 3658 proc->pid, thread->pid); 3659 binder_node_unlock(ref->node); 3660 binder_proc_unlock(proc); 3661 break; 3662 } 3663 death = ref->death; 3664 if (death->cookie != cookie) { 3665 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n", 3666 proc->pid, thread->pid, 3667 (u64)death->cookie, 3668 (u64)cookie); 3669 binder_node_unlock(ref->node); 3670 binder_proc_unlock(proc); 3671 break; 3672 } 3673 ref->death = NULL; 3674 binder_inner_proc_lock(proc); 3675 if (list_empty(&death->work.entry)) { 3676 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 3677 if (thread->looper & 3678 (BINDER_LOOPER_STATE_REGISTERED | 3679 BINDER_LOOPER_STATE_ENTERED)) 3680 binder_enqueue_thread_work_ilocked( 3681 thread, 3682 &death->work); 3683 else { 3684 binder_enqueue_work_ilocked( 3685 &death->work, 3686 &proc->todo); 3687 binder_wakeup_proc_ilocked( 3688 proc); 3689 } 3690 } else { 3691 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); 3692 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; 3693 } 3694 binder_inner_proc_unlock(proc); 3695 } 3696 binder_node_unlock(ref->node); 3697 binder_proc_unlock(proc); 3698 } break; 3699 case BC_DEAD_BINDER_DONE: { 3700 struct binder_work *w; 3701 binder_uintptr_t cookie; 3702 struct binder_ref_death *death = NULL; 3703 3704 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 3705 return -EFAULT; 3706 3707 ptr += sizeof(cookie); 3708 binder_inner_proc_lock(proc); 3709 list_for_each_entry(w, &proc->delivered_death, 3710 entry) { 3711 struct binder_ref_death *tmp_death = 3712 container_of(w, 3713 struct binder_ref_death, 3714 work); 3715 3716 if (tmp_death->cookie == cookie) { 3717 death = tmp_death; 3718 break; 3719 } 3720 } 3721 binder_debug(BINDER_DEBUG_DEAD_BINDER, 3722 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n", 3723 proc->pid, thread->pid, (u64)cookie, 3724 death); 3725 if (death == NULL) { 3726 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n", 3727 proc->pid, thread->pid, (u64)cookie); 3728 binder_inner_proc_unlock(proc); 3729 break; 3730 } 3731 binder_dequeue_work_ilocked(&death->work); 3732 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) { 3733 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 3734 if (thread->looper & 3735 (BINDER_LOOPER_STATE_REGISTERED | 3736 BINDER_LOOPER_STATE_ENTERED)) 3737 binder_enqueue_thread_work_ilocked( 3738 thread, &death->work); 3739 else { 3740 binder_enqueue_work_ilocked( 3741 &death->work, 3742 &proc->todo); 3743 binder_wakeup_proc_ilocked(proc); 3744 } 3745 } 3746 binder_inner_proc_unlock(proc); 3747 } break; 3748 3749 default: 3750 pr_err("%d:%d unknown command %d\n", 3751 proc->pid, thread->pid, cmd); 3752 return -EINVAL; 3753 } 3754 *consumed = ptr - buffer; 3755 } 3756 return 0; 3757 } 3758 3759 static void binder_stat_br(struct binder_proc *proc, 3760 struct binder_thread *thread, uint32_t cmd) 3761 { 3762 trace_binder_return(cmd); 3763 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { 3764 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]); 3765 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]); 3766 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]); 3767 } 3768 } 3769 3770 static int binder_put_node_cmd(struct binder_proc *proc, 3771 struct binder_thread *thread, 3772 void __user **ptrp, 3773 binder_uintptr_t node_ptr, 3774 binder_uintptr_t node_cookie, 3775 int node_debug_id, 3776 uint32_t cmd, const char *cmd_name) 3777 { 3778 void __user *ptr = *ptrp; 3779 3780 if (put_user(cmd, (uint32_t __user *)ptr)) 3781 return -EFAULT; 3782 ptr += sizeof(uint32_t); 3783 3784 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr)) 3785 return -EFAULT; 3786 ptr += sizeof(binder_uintptr_t); 3787 3788 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr)) 3789 return -EFAULT; 3790 ptr += sizeof(binder_uintptr_t); 3791 3792 binder_stat_br(proc, thread, cmd); 3793 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n", 3794 proc->pid, thread->pid, cmd_name, node_debug_id, 3795 (u64)node_ptr, (u64)node_cookie); 3796 3797 *ptrp = ptr; 3798 return 0; 3799 } 3800 3801 static int binder_wait_for_work(struct binder_thread *thread, 3802 bool do_proc_work) 3803 { 3804 DEFINE_WAIT(wait); 3805 struct binder_proc *proc = thread->proc; 3806 int ret = 0; 3807 3808 freezer_do_not_count(); 3809 binder_inner_proc_lock(proc); 3810 for (;;) { 3811 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE); 3812 if (binder_has_work_ilocked(thread, do_proc_work)) 3813 break; 3814 if (do_proc_work) 3815 list_add(&thread->waiting_thread_node, 3816 &proc->waiting_threads); 3817 binder_inner_proc_unlock(proc); 3818 schedule(); 3819 binder_inner_proc_lock(proc); 3820 list_del_init(&thread->waiting_thread_node); 3821 if (signal_pending(current)) { 3822 ret = -ERESTARTSYS; 3823 break; 3824 } 3825 } 3826 finish_wait(&thread->wait, &wait); 3827 binder_inner_proc_unlock(proc); 3828 freezer_count(); 3829 3830 return ret; 3831 } 3832 3833 static int binder_thread_read(struct binder_proc *proc, 3834 struct binder_thread *thread, 3835 binder_uintptr_t binder_buffer, size_t size, 3836 binder_size_t *consumed, int non_block) 3837 { 3838 void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 3839 void __user *ptr = buffer + *consumed; 3840 void __user *end = buffer + size; 3841 3842 int ret = 0; 3843 int wait_for_proc_work; 3844 3845 if (*consumed == 0) { 3846 if (put_user(BR_NOOP, (uint32_t __user *)ptr)) 3847 return -EFAULT; 3848 ptr += sizeof(uint32_t); 3849 } 3850 3851 retry: 3852 binder_inner_proc_lock(proc); 3853 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread); 3854 binder_inner_proc_unlock(proc); 3855 3856 thread->looper |= BINDER_LOOPER_STATE_WAITING; 3857 3858 trace_binder_wait_for_work(wait_for_proc_work, 3859 !!thread->transaction_stack, 3860 !binder_worklist_empty(proc, &thread->todo)); 3861 if (wait_for_proc_work) { 3862 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 3863 BINDER_LOOPER_STATE_ENTERED))) { 3864 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n", 3865 proc->pid, thread->pid, thread->looper); 3866 wait_event_interruptible(binder_user_error_wait, 3867 binder_stop_on_user_error < 2); 3868 } 3869 binder_set_nice(proc->default_priority); 3870 } 3871 3872 if (non_block) { 3873 if (!binder_has_work(thread, wait_for_proc_work)) 3874 ret = -EAGAIN; 3875 } else { 3876 ret = binder_wait_for_work(thread, wait_for_proc_work); 3877 } 3878 3879 thread->looper &= ~BINDER_LOOPER_STATE_WAITING; 3880 3881 if (ret) 3882 return ret; 3883 3884 while (1) { 3885 uint32_t cmd; 3886 struct binder_transaction_data tr; 3887 struct binder_work *w = NULL; 3888 struct list_head *list = NULL; 3889 struct binder_transaction *t = NULL; 3890 struct binder_thread *t_from; 3891 3892 binder_inner_proc_lock(proc); 3893 if (!binder_worklist_empty_ilocked(&thread->todo)) 3894 list = &thread->todo; 3895 else if (!binder_worklist_empty_ilocked(&proc->todo) && 3896 wait_for_proc_work) 3897 list = &proc->todo; 3898 else { 3899 binder_inner_proc_unlock(proc); 3900 3901 /* no data added */ 3902 if (ptr - buffer == 4 && !thread->looper_need_return) 3903 goto retry; 3904 break; 3905 } 3906 3907 if (end - ptr < sizeof(tr) + 4) { 3908 binder_inner_proc_unlock(proc); 3909 break; 3910 } 3911 w = binder_dequeue_work_head_ilocked(list); 3912 if (binder_worklist_empty_ilocked(&thread->todo)) 3913 thread->process_todo = false; 3914 3915 switch (w->type) { 3916 case BINDER_WORK_TRANSACTION: { 3917 binder_inner_proc_unlock(proc); 3918 t = container_of(w, struct binder_transaction, work); 3919 } break; 3920 case BINDER_WORK_RETURN_ERROR: { 3921 struct binder_error *e = container_of( 3922 w, struct binder_error, work); 3923 3924 WARN_ON(e->cmd == BR_OK); 3925 binder_inner_proc_unlock(proc); 3926 if (put_user(e->cmd, (uint32_t __user *)ptr)) 3927 return -EFAULT; 3928 e->cmd = BR_OK; 3929 ptr += sizeof(uint32_t); 3930 3931 binder_stat_br(proc, thread, e->cmd); 3932 } break; 3933 case BINDER_WORK_TRANSACTION_COMPLETE: { 3934 binder_inner_proc_unlock(proc); 3935 cmd = BR_TRANSACTION_COMPLETE; 3936 if (put_user(cmd, (uint32_t __user *)ptr)) 3937 return -EFAULT; 3938 ptr += sizeof(uint32_t); 3939 3940 binder_stat_br(proc, thread, cmd); 3941 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, 3942 "%d:%d BR_TRANSACTION_COMPLETE\n", 3943 proc->pid, thread->pid); 3944 kfree(w); 3945 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 3946 } break; 3947 case BINDER_WORK_NODE: { 3948 struct binder_node *node = container_of(w, struct binder_node, work); 3949 int strong, weak; 3950 binder_uintptr_t node_ptr = node->ptr; 3951 binder_uintptr_t node_cookie = node->cookie; 3952 int node_debug_id = node->debug_id; 3953 int has_weak_ref; 3954 int has_strong_ref; 3955 void __user *orig_ptr = ptr; 3956 3957 BUG_ON(proc != node->proc); 3958 strong = node->internal_strong_refs || 3959 node->local_strong_refs; 3960 weak = !hlist_empty(&node->refs) || 3961 node->local_weak_refs || 3962 node->tmp_refs || strong; 3963 has_strong_ref = node->has_strong_ref; 3964 has_weak_ref = node->has_weak_ref; 3965 3966 if (weak && !has_weak_ref) { 3967 node->has_weak_ref = 1; 3968 node->pending_weak_ref = 1; 3969 node->local_weak_refs++; 3970 } 3971 if (strong && !has_strong_ref) { 3972 node->has_strong_ref = 1; 3973 node->pending_strong_ref = 1; 3974 node->local_strong_refs++; 3975 } 3976 if (!strong && has_strong_ref) 3977 node->has_strong_ref = 0; 3978 if (!weak && has_weak_ref) 3979 node->has_weak_ref = 0; 3980 if (!weak && !strong) { 3981 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 3982 "%d:%d node %d u%016llx c%016llx deleted\n", 3983 proc->pid, thread->pid, 3984 node_debug_id, 3985 (u64)node_ptr, 3986 (u64)node_cookie); 3987 rb_erase(&node->rb_node, &proc->nodes); 3988 binder_inner_proc_unlock(proc); 3989 binder_node_lock(node); 3990 /* 3991 * Acquire the node lock before freeing the 3992 * node to serialize with other threads that 3993 * may have been holding the node lock while 3994 * decrementing this node (avoids race where 3995 * this thread frees while the other thread 3996 * is unlocking the node after the final 3997 * decrement) 3998 */ 3999 binder_node_unlock(node); 4000 binder_free_node(node); 4001 } else 4002 binder_inner_proc_unlock(proc); 4003 4004 if (weak && !has_weak_ref) 4005 ret = binder_put_node_cmd( 4006 proc, thread, &ptr, node_ptr, 4007 node_cookie, node_debug_id, 4008 BR_INCREFS, "BR_INCREFS"); 4009 if (!ret && strong && !has_strong_ref) 4010 ret = binder_put_node_cmd( 4011 proc, thread, &ptr, node_ptr, 4012 node_cookie, node_debug_id, 4013 BR_ACQUIRE, "BR_ACQUIRE"); 4014 if (!ret && !strong && has_strong_ref) 4015 ret = binder_put_node_cmd( 4016 proc, thread, &ptr, node_ptr, 4017 node_cookie, node_debug_id, 4018 BR_RELEASE, "BR_RELEASE"); 4019 if (!ret && !weak && has_weak_ref) 4020 ret = binder_put_node_cmd( 4021 proc, thread, &ptr, node_ptr, 4022 node_cookie, node_debug_id, 4023 BR_DECREFS, "BR_DECREFS"); 4024 if (orig_ptr == ptr) 4025 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 4026 "%d:%d node %d u%016llx c%016llx state unchanged\n", 4027 proc->pid, thread->pid, 4028 node_debug_id, 4029 (u64)node_ptr, 4030 (u64)node_cookie); 4031 if (ret) 4032 return ret; 4033 } break; 4034 case BINDER_WORK_DEAD_BINDER: 4035 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 4036 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 4037 struct binder_ref_death *death; 4038 uint32_t cmd; 4039 binder_uintptr_t cookie; 4040 4041 death = container_of(w, struct binder_ref_death, work); 4042 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) 4043 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; 4044 else 4045 cmd = BR_DEAD_BINDER; 4046 cookie = death->cookie; 4047 4048 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 4049 "%d:%d %s %016llx\n", 4050 proc->pid, thread->pid, 4051 cmd == BR_DEAD_BINDER ? 4052 "BR_DEAD_BINDER" : 4053 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 4054 (u64)cookie); 4055 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { 4056 binder_inner_proc_unlock(proc); 4057 kfree(death); 4058 binder_stats_deleted(BINDER_STAT_DEATH); 4059 } else { 4060 binder_enqueue_work_ilocked( 4061 w, &proc->delivered_death); 4062 binder_inner_proc_unlock(proc); 4063 } 4064 if (put_user(cmd, (uint32_t __user *)ptr)) 4065 return -EFAULT; 4066 ptr += sizeof(uint32_t); 4067 if (put_user(cookie, 4068 (binder_uintptr_t __user *)ptr)) 4069 return -EFAULT; 4070 ptr += sizeof(binder_uintptr_t); 4071 binder_stat_br(proc, thread, cmd); 4072 if (cmd == BR_DEAD_BINDER) 4073 goto done; /* DEAD_BINDER notifications can cause transactions */ 4074 } break; 4075 } 4076 4077 if (!t) 4078 continue; 4079 4080 BUG_ON(t->buffer == NULL); 4081 if (t->buffer->target_node) { 4082 struct binder_node *target_node = t->buffer->target_node; 4083 4084 tr.target.ptr = target_node->ptr; 4085 tr.cookie = target_node->cookie; 4086 t->saved_priority = task_nice(current); 4087 if (t->priority < target_node->min_priority && 4088 !(t->flags & TF_ONE_WAY)) 4089 binder_set_nice(t->priority); 4090 else if (!(t->flags & TF_ONE_WAY) || 4091 t->saved_priority > target_node->min_priority) 4092 binder_set_nice(target_node->min_priority); 4093 cmd = BR_TRANSACTION; 4094 } else { 4095 tr.target.ptr = 0; 4096 tr.cookie = 0; 4097 cmd = BR_REPLY; 4098 } 4099 tr.code = t->code; 4100 tr.flags = t->flags; 4101 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid); 4102 4103 t_from = binder_get_txn_from(t); 4104 if (t_from) { 4105 struct task_struct *sender = t_from->proc->tsk; 4106 4107 tr.sender_pid = task_tgid_nr_ns(sender, 4108 task_active_pid_ns(current)); 4109 } else { 4110 tr.sender_pid = 0; 4111 } 4112 4113 tr.data_size = t->buffer->data_size; 4114 tr.offsets_size = t->buffer->offsets_size; 4115 tr.data.ptr.buffer = (binder_uintptr_t) 4116 ((uintptr_t)t->buffer->data + 4117 binder_alloc_get_user_buffer_offset(&proc->alloc)); 4118 tr.data.ptr.offsets = tr.data.ptr.buffer + 4119 ALIGN(t->buffer->data_size, 4120 sizeof(void *)); 4121 4122 if (put_user(cmd, (uint32_t __user *)ptr)) { 4123 if (t_from) 4124 binder_thread_dec_tmpref(t_from); 4125 4126 binder_cleanup_transaction(t, "put_user failed", 4127 BR_FAILED_REPLY); 4128 4129 return -EFAULT; 4130 } 4131 ptr += sizeof(uint32_t); 4132 if (copy_to_user(ptr, &tr, sizeof(tr))) { 4133 if (t_from) 4134 binder_thread_dec_tmpref(t_from); 4135 4136 binder_cleanup_transaction(t, "copy_to_user failed", 4137 BR_FAILED_REPLY); 4138 4139 return -EFAULT; 4140 } 4141 ptr += sizeof(tr); 4142 4143 trace_binder_transaction_received(t); 4144 binder_stat_br(proc, thread, cmd); 4145 binder_debug(BINDER_DEBUG_TRANSACTION, 4146 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n", 4147 proc->pid, thread->pid, 4148 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : 4149 "BR_REPLY", 4150 t->debug_id, t_from ? t_from->proc->pid : 0, 4151 t_from ? t_from->pid : 0, cmd, 4152 t->buffer->data_size, t->buffer->offsets_size, 4153 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets); 4154 4155 if (t_from) 4156 binder_thread_dec_tmpref(t_from); 4157 t->buffer->allow_user_free = 1; 4158 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) { 4159 binder_inner_proc_lock(thread->proc); 4160 t->to_parent = thread->transaction_stack; 4161 t->to_thread = thread; 4162 thread->transaction_stack = t; 4163 binder_inner_proc_unlock(thread->proc); 4164 } else { 4165 binder_free_transaction(t); 4166 } 4167 break; 4168 } 4169 4170 done: 4171 4172 *consumed = ptr - buffer; 4173 binder_inner_proc_lock(proc); 4174 if (proc->requested_threads == 0 && 4175 list_empty(&thread->proc->waiting_threads) && 4176 proc->requested_threads_started < proc->max_threads && 4177 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 4178 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ 4179 /*spawn a new thread if we leave this out */) { 4180 proc->requested_threads++; 4181 binder_inner_proc_unlock(proc); 4182 binder_debug(BINDER_DEBUG_THREADS, 4183 "%d:%d BR_SPAWN_LOOPER\n", 4184 proc->pid, thread->pid); 4185 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) 4186 return -EFAULT; 4187 binder_stat_br(proc, thread, BR_SPAWN_LOOPER); 4188 } else 4189 binder_inner_proc_unlock(proc); 4190 return 0; 4191 } 4192 4193 static void binder_release_work(struct binder_proc *proc, 4194 struct list_head *list) 4195 { 4196 struct binder_work *w; 4197 4198 while (1) { 4199 w = binder_dequeue_work_head(proc, list); 4200 if (!w) 4201 return; 4202 4203 switch (w->type) { 4204 case BINDER_WORK_TRANSACTION: { 4205 struct binder_transaction *t; 4206 4207 t = container_of(w, struct binder_transaction, work); 4208 4209 binder_cleanup_transaction(t, "process died.", 4210 BR_DEAD_REPLY); 4211 } break; 4212 case BINDER_WORK_RETURN_ERROR: { 4213 struct binder_error *e = container_of( 4214 w, struct binder_error, work); 4215 4216 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4217 "undelivered TRANSACTION_ERROR: %u\n", 4218 e->cmd); 4219 } break; 4220 case BINDER_WORK_TRANSACTION_COMPLETE: { 4221 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4222 "undelivered TRANSACTION_COMPLETE\n"); 4223 kfree(w); 4224 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 4225 } break; 4226 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 4227 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 4228 struct binder_ref_death *death; 4229 4230 death = container_of(w, struct binder_ref_death, work); 4231 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4232 "undelivered death notification, %016llx\n", 4233 (u64)death->cookie); 4234 kfree(death); 4235 binder_stats_deleted(BINDER_STAT_DEATH); 4236 } break; 4237 default: 4238 pr_err("unexpected work type, %d, not freed\n", 4239 w->type); 4240 break; 4241 } 4242 } 4243 4244 } 4245 4246 static struct binder_thread *binder_get_thread_ilocked( 4247 struct binder_proc *proc, struct binder_thread *new_thread) 4248 { 4249 struct binder_thread *thread = NULL; 4250 struct rb_node *parent = NULL; 4251 struct rb_node **p = &proc->threads.rb_node; 4252 4253 while (*p) { 4254 parent = *p; 4255 thread = rb_entry(parent, struct binder_thread, rb_node); 4256 4257 if (current->pid < thread->pid) 4258 p = &(*p)->rb_left; 4259 else if (current->pid > thread->pid) 4260 p = &(*p)->rb_right; 4261 else 4262 return thread; 4263 } 4264 if (!new_thread) 4265 return NULL; 4266 thread = new_thread; 4267 binder_stats_created(BINDER_STAT_THREAD); 4268 thread->proc = proc; 4269 thread->pid = current->pid; 4270 atomic_set(&thread->tmp_ref, 0); 4271 init_waitqueue_head(&thread->wait); 4272 INIT_LIST_HEAD(&thread->todo); 4273 rb_link_node(&thread->rb_node, parent, p); 4274 rb_insert_color(&thread->rb_node, &proc->threads); 4275 thread->looper_need_return = true; 4276 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR; 4277 thread->return_error.cmd = BR_OK; 4278 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR; 4279 thread->reply_error.cmd = BR_OK; 4280 INIT_LIST_HEAD(&new_thread->waiting_thread_node); 4281 return thread; 4282 } 4283 4284 static struct binder_thread *binder_get_thread(struct binder_proc *proc) 4285 { 4286 struct binder_thread *thread; 4287 struct binder_thread *new_thread; 4288 4289 binder_inner_proc_lock(proc); 4290 thread = binder_get_thread_ilocked(proc, NULL); 4291 binder_inner_proc_unlock(proc); 4292 if (!thread) { 4293 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL); 4294 if (new_thread == NULL) 4295 return NULL; 4296 binder_inner_proc_lock(proc); 4297 thread = binder_get_thread_ilocked(proc, new_thread); 4298 binder_inner_proc_unlock(proc); 4299 if (thread != new_thread) 4300 kfree(new_thread); 4301 } 4302 return thread; 4303 } 4304 4305 static void binder_free_proc(struct binder_proc *proc) 4306 { 4307 BUG_ON(!list_empty(&proc->todo)); 4308 BUG_ON(!list_empty(&proc->delivered_death)); 4309 binder_alloc_deferred_release(&proc->alloc); 4310 put_task_struct(proc->tsk); 4311 binder_stats_deleted(BINDER_STAT_PROC); 4312 kfree(proc); 4313 } 4314 4315 static void binder_free_thread(struct binder_thread *thread) 4316 { 4317 BUG_ON(!list_empty(&thread->todo)); 4318 binder_stats_deleted(BINDER_STAT_THREAD); 4319 binder_proc_dec_tmpref(thread->proc); 4320 kfree(thread); 4321 } 4322 4323 static int binder_thread_release(struct binder_proc *proc, 4324 struct binder_thread *thread) 4325 { 4326 struct binder_transaction *t; 4327 struct binder_transaction *send_reply = NULL; 4328 int active_transactions = 0; 4329 struct binder_transaction *last_t = NULL; 4330 4331 binder_inner_proc_lock(thread->proc); 4332 /* 4333 * take a ref on the proc so it survives 4334 * after we remove this thread from proc->threads. 4335 * The corresponding dec is when we actually 4336 * free the thread in binder_free_thread() 4337 */ 4338 proc->tmp_ref++; 4339 /* 4340 * take a ref on this thread to ensure it 4341 * survives while we are releasing it 4342 */ 4343 atomic_inc(&thread->tmp_ref); 4344 rb_erase(&thread->rb_node, &proc->threads); 4345 t = thread->transaction_stack; 4346 if (t) { 4347 spin_lock(&t->lock); 4348 if (t->to_thread == thread) 4349 send_reply = t; 4350 } 4351 thread->is_dead = true; 4352 4353 while (t) { 4354 last_t = t; 4355 active_transactions++; 4356 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4357 "release %d:%d transaction %d %s, still active\n", 4358 proc->pid, thread->pid, 4359 t->debug_id, 4360 (t->to_thread == thread) ? "in" : "out"); 4361 4362 if (t->to_thread == thread) { 4363 t->to_proc = NULL; 4364 t->to_thread = NULL; 4365 if (t->buffer) { 4366 t->buffer->transaction = NULL; 4367 t->buffer = NULL; 4368 } 4369 t = t->to_parent; 4370 } else if (t->from == thread) { 4371 t->from = NULL; 4372 t = t->from_parent; 4373 } else 4374 BUG(); 4375 spin_unlock(&last_t->lock); 4376 if (t) 4377 spin_lock(&t->lock); 4378 } 4379 4380 /* 4381 * If this thread used poll, make sure we remove the waitqueue 4382 * from any epoll data structures holding it with POLLFREE. 4383 * waitqueue_active() is safe to use here because we're holding 4384 * the inner lock. 4385 */ 4386 if ((thread->looper & BINDER_LOOPER_STATE_POLL) && 4387 waitqueue_active(&thread->wait)) { 4388 wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE); 4389 } 4390 4391 binder_inner_proc_unlock(thread->proc); 4392 4393 /* 4394 * This is needed to avoid races between wake_up_poll() above and 4395 * and ep_remove_waitqueue() called for other reasons (eg the epoll file 4396 * descriptor being closed); ep_remove_waitqueue() holds an RCU read 4397 * lock, so we can be sure it's done after calling synchronize_rcu(). 4398 */ 4399 if (thread->looper & BINDER_LOOPER_STATE_POLL) 4400 synchronize_rcu(); 4401 4402 if (send_reply) 4403 binder_send_failed_reply(send_reply, BR_DEAD_REPLY); 4404 binder_release_work(proc, &thread->todo); 4405 binder_thread_dec_tmpref(thread); 4406 return active_transactions; 4407 } 4408 4409 static __poll_t binder_poll(struct file *filp, 4410 struct poll_table_struct *wait) 4411 { 4412 struct binder_proc *proc = filp->private_data; 4413 struct binder_thread *thread = NULL; 4414 bool wait_for_proc_work; 4415 4416 thread = binder_get_thread(proc); 4417 if (!thread) 4418 return POLLERR; 4419 4420 binder_inner_proc_lock(thread->proc); 4421 thread->looper |= BINDER_LOOPER_STATE_POLL; 4422 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread); 4423 4424 binder_inner_proc_unlock(thread->proc); 4425 4426 poll_wait(filp, &thread->wait, wait); 4427 4428 if (binder_has_work(thread, wait_for_proc_work)) 4429 return EPOLLIN; 4430 4431 return 0; 4432 } 4433 4434 static int binder_ioctl_write_read(struct file *filp, 4435 unsigned int cmd, unsigned long arg, 4436 struct binder_thread *thread) 4437 { 4438 int ret = 0; 4439 struct binder_proc *proc = filp->private_data; 4440 unsigned int size = _IOC_SIZE(cmd); 4441 void __user *ubuf = (void __user *)arg; 4442 struct binder_write_read bwr; 4443 4444 if (size != sizeof(struct binder_write_read)) { 4445 ret = -EINVAL; 4446 goto out; 4447 } 4448 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { 4449 ret = -EFAULT; 4450 goto out; 4451 } 4452 binder_debug(BINDER_DEBUG_READ_WRITE, 4453 "%d:%d write %lld at %016llx, read %lld at %016llx\n", 4454 proc->pid, thread->pid, 4455 (u64)bwr.write_size, (u64)bwr.write_buffer, 4456 (u64)bwr.read_size, (u64)bwr.read_buffer); 4457 4458 if (bwr.write_size > 0) { 4459 ret = binder_thread_write(proc, thread, 4460 bwr.write_buffer, 4461 bwr.write_size, 4462 &bwr.write_consumed); 4463 trace_binder_write_done(ret); 4464 if (ret < 0) { 4465 bwr.read_consumed = 0; 4466 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 4467 ret = -EFAULT; 4468 goto out; 4469 } 4470 } 4471 if (bwr.read_size > 0) { 4472 ret = binder_thread_read(proc, thread, bwr.read_buffer, 4473 bwr.read_size, 4474 &bwr.read_consumed, 4475 filp->f_flags & O_NONBLOCK); 4476 trace_binder_read_done(ret); 4477 binder_inner_proc_lock(proc); 4478 if (!binder_worklist_empty_ilocked(&proc->todo)) 4479 binder_wakeup_proc_ilocked(proc); 4480 binder_inner_proc_unlock(proc); 4481 if (ret < 0) { 4482 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 4483 ret = -EFAULT; 4484 goto out; 4485 } 4486 } 4487 binder_debug(BINDER_DEBUG_READ_WRITE, 4488 "%d:%d wrote %lld of %lld, read return %lld of %lld\n", 4489 proc->pid, thread->pid, 4490 (u64)bwr.write_consumed, (u64)bwr.write_size, 4491 (u64)bwr.read_consumed, (u64)bwr.read_size); 4492 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { 4493 ret = -EFAULT; 4494 goto out; 4495 } 4496 out: 4497 return ret; 4498 } 4499 4500 static int binder_ioctl_set_ctx_mgr(struct file *filp) 4501 { 4502 int ret = 0; 4503 struct binder_proc *proc = filp->private_data; 4504 struct binder_context *context = proc->context; 4505 struct binder_node *new_node; 4506 kuid_t curr_euid = current_euid(); 4507 4508 mutex_lock(&context->context_mgr_node_lock); 4509 if (context->binder_context_mgr_node) { 4510 pr_err("BINDER_SET_CONTEXT_MGR already set\n"); 4511 ret = -EBUSY; 4512 goto out; 4513 } 4514 ret = security_binder_set_context_mgr(proc->tsk); 4515 if (ret < 0) 4516 goto out; 4517 if (uid_valid(context->binder_context_mgr_uid)) { 4518 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) { 4519 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n", 4520 from_kuid(&init_user_ns, curr_euid), 4521 from_kuid(&init_user_ns, 4522 context->binder_context_mgr_uid)); 4523 ret = -EPERM; 4524 goto out; 4525 } 4526 } else { 4527 context->binder_context_mgr_uid = curr_euid; 4528 } 4529 new_node = binder_new_node(proc, NULL); 4530 if (!new_node) { 4531 ret = -ENOMEM; 4532 goto out; 4533 } 4534 binder_node_lock(new_node); 4535 new_node->local_weak_refs++; 4536 new_node->local_strong_refs++; 4537 new_node->has_strong_ref = 1; 4538 new_node->has_weak_ref = 1; 4539 context->binder_context_mgr_node = new_node; 4540 binder_node_unlock(new_node); 4541 binder_put_node(new_node); 4542 out: 4543 mutex_unlock(&context->context_mgr_node_lock); 4544 return ret; 4545 } 4546 4547 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc, 4548 struct binder_node_debug_info *info) 4549 { 4550 struct rb_node *n; 4551 binder_uintptr_t ptr = info->ptr; 4552 4553 memset(info, 0, sizeof(*info)); 4554 4555 binder_inner_proc_lock(proc); 4556 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { 4557 struct binder_node *node = rb_entry(n, struct binder_node, 4558 rb_node); 4559 if (node->ptr > ptr) { 4560 info->ptr = node->ptr; 4561 info->cookie = node->cookie; 4562 info->has_strong_ref = node->has_strong_ref; 4563 info->has_weak_ref = node->has_weak_ref; 4564 break; 4565 } 4566 } 4567 binder_inner_proc_unlock(proc); 4568 4569 return 0; 4570 } 4571 4572 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 4573 { 4574 int ret; 4575 struct binder_proc *proc = filp->private_data; 4576 struct binder_thread *thread; 4577 unsigned int size = _IOC_SIZE(cmd); 4578 void __user *ubuf = (void __user *)arg; 4579 4580 /*pr_info("binder_ioctl: %d:%d %x %lx\n", 4581 proc->pid, current->pid, cmd, arg);*/ 4582 4583 binder_selftest_alloc(&proc->alloc); 4584 4585 trace_binder_ioctl(cmd, arg); 4586 4587 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 4588 if (ret) 4589 goto err_unlocked; 4590 4591 thread = binder_get_thread(proc); 4592 if (thread == NULL) { 4593 ret = -ENOMEM; 4594 goto err; 4595 } 4596 4597 switch (cmd) { 4598 case BINDER_WRITE_READ: 4599 ret = binder_ioctl_write_read(filp, cmd, arg, thread); 4600 if (ret) 4601 goto err; 4602 break; 4603 case BINDER_SET_MAX_THREADS: { 4604 int max_threads; 4605 4606 if (copy_from_user(&max_threads, ubuf, 4607 sizeof(max_threads))) { 4608 ret = -EINVAL; 4609 goto err; 4610 } 4611 binder_inner_proc_lock(proc); 4612 proc->max_threads = max_threads; 4613 binder_inner_proc_unlock(proc); 4614 break; 4615 } 4616 case BINDER_SET_CONTEXT_MGR: 4617 ret = binder_ioctl_set_ctx_mgr(filp); 4618 if (ret) 4619 goto err; 4620 break; 4621 case BINDER_THREAD_EXIT: 4622 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n", 4623 proc->pid, thread->pid); 4624 binder_thread_release(proc, thread); 4625 thread = NULL; 4626 break; 4627 case BINDER_VERSION: { 4628 struct binder_version __user *ver = ubuf; 4629 4630 if (size != sizeof(struct binder_version)) { 4631 ret = -EINVAL; 4632 goto err; 4633 } 4634 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, 4635 &ver->protocol_version)) { 4636 ret = -EINVAL; 4637 goto err; 4638 } 4639 break; 4640 } 4641 case BINDER_GET_NODE_DEBUG_INFO: { 4642 struct binder_node_debug_info info; 4643 4644 if (copy_from_user(&info, ubuf, sizeof(info))) { 4645 ret = -EFAULT; 4646 goto err; 4647 } 4648 4649 ret = binder_ioctl_get_node_debug_info(proc, &info); 4650 if (ret < 0) 4651 goto err; 4652 4653 if (copy_to_user(ubuf, &info, sizeof(info))) { 4654 ret = -EFAULT; 4655 goto err; 4656 } 4657 break; 4658 } 4659 default: 4660 ret = -EINVAL; 4661 goto err; 4662 } 4663 ret = 0; 4664 err: 4665 if (thread) 4666 thread->looper_need_return = false; 4667 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 4668 if (ret && ret != -ERESTARTSYS) 4669 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); 4670 err_unlocked: 4671 trace_binder_ioctl_done(ret); 4672 return ret; 4673 } 4674 4675 static void binder_vma_open(struct vm_area_struct *vma) 4676 { 4677 struct binder_proc *proc = vma->vm_private_data; 4678 4679 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 4680 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 4681 proc->pid, vma->vm_start, vma->vm_end, 4682 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 4683 (unsigned long)pgprot_val(vma->vm_page_prot)); 4684 } 4685 4686 static void binder_vma_close(struct vm_area_struct *vma) 4687 { 4688 struct binder_proc *proc = vma->vm_private_data; 4689 4690 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 4691 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 4692 proc->pid, vma->vm_start, vma->vm_end, 4693 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 4694 (unsigned long)pgprot_val(vma->vm_page_prot)); 4695 binder_alloc_vma_close(&proc->alloc); 4696 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); 4697 } 4698 4699 static int binder_vm_fault(struct vm_fault *vmf) 4700 { 4701 return VM_FAULT_SIGBUS; 4702 } 4703 4704 static const struct vm_operations_struct binder_vm_ops = { 4705 .open = binder_vma_open, 4706 .close = binder_vma_close, 4707 .fault = binder_vm_fault, 4708 }; 4709 4710 static int binder_mmap(struct file *filp, struct vm_area_struct *vma) 4711 { 4712 int ret; 4713 struct binder_proc *proc = filp->private_data; 4714 const char *failure_string; 4715 4716 if (proc->tsk != current->group_leader) 4717 return -EINVAL; 4718 4719 if ((vma->vm_end - vma->vm_start) > SZ_4M) 4720 vma->vm_end = vma->vm_start + SZ_4M; 4721 4722 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 4723 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", 4724 __func__, proc->pid, vma->vm_start, vma->vm_end, 4725 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 4726 (unsigned long)pgprot_val(vma->vm_page_prot)); 4727 4728 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { 4729 ret = -EPERM; 4730 failure_string = "bad vm_flags"; 4731 goto err_bad_arg; 4732 } 4733 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; 4734 vma->vm_ops = &binder_vm_ops; 4735 vma->vm_private_data = proc; 4736 4737 ret = binder_alloc_mmap_handler(&proc->alloc, vma); 4738 if (ret) 4739 return ret; 4740 mutex_lock(&proc->files_lock); 4741 proc->files = get_files_struct(current); 4742 mutex_unlock(&proc->files_lock); 4743 return 0; 4744 4745 err_bad_arg: 4746 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__, 4747 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); 4748 return ret; 4749 } 4750 4751 static int binder_open(struct inode *nodp, struct file *filp) 4752 { 4753 struct binder_proc *proc; 4754 struct binder_device *binder_dev; 4755 4756 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__, 4757 current->group_leader->pid, current->pid); 4758 4759 proc = kzalloc(sizeof(*proc), GFP_KERNEL); 4760 if (proc == NULL) 4761 return -ENOMEM; 4762 spin_lock_init(&proc->inner_lock); 4763 spin_lock_init(&proc->outer_lock); 4764 get_task_struct(current->group_leader); 4765 proc->tsk = current->group_leader; 4766 mutex_init(&proc->files_lock); 4767 INIT_LIST_HEAD(&proc->todo); 4768 proc->default_priority = task_nice(current); 4769 binder_dev = container_of(filp->private_data, struct binder_device, 4770 miscdev); 4771 proc->context = &binder_dev->context; 4772 binder_alloc_init(&proc->alloc); 4773 4774 binder_stats_created(BINDER_STAT_PROC); 4775 proc->pid = current->group_leader->pid; 4776 INIT_LIST_HEAD(&proc->delivered_death); 4777 INIT_LIST_HEAD(&proc->waiting_threads); 4778 filp->private_data = proc; 4779 4780 mutex_lock(&binder_procs_lock); 4781 hlist_add_head(&proc->proc_node, &binder_procs); 4782 mutex_unlock(&binder_procs_lock); 4783 4784 if (binder_debugfs_dir_entry_proc) { 4785 char strbuf[11]; 4786 4787 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); 4788 /* 4789 * proc debug entries are shared between contexts, so 4790 * this will fail if the process tries to open the driver 4791 * again with a different context. The priting code will 4792 * anyway print all contexts that a given PID has, so this 4793 * is not a problem. 4794 */ 4795 proc->debugfs_entry = debugfs_create_file(strbuf, 0444, 4796 binder_debugfs_dir_entry_proc, 4797 (void *)(unsigned long)proc->pid, 4798 &binder_proc_fops); 4799 } 4800 4801 return 0; 4802 } 4803 4804 static int binder_flush(struct file *filp, fl_owner_t id) 4805 { 4806 struct binder_proc *proc = filp->private_data; 4807 4808 binder_defer_work(proc, BINDER_DEFERRED_FLUSH); 4809 4810 return 0; 4811 } 4812 4813 static void binder_deferred_flush(struct binder_proc *proc) 4814 { 4815 struct rb_node *n; 4816 int wake_count = 0; 4817 4818 binder_inner_proc_lock(proc); 4819 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { 4820 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); 4821 4822 thread->looper_need_return = true; 4823 if (thread->looper & BINDER_LOOPER_STATE_WAITING) { 4824 wake_up_interruptible(&thread->wait); 4825 wake_count++; 4826 } 4827 } 4828 binder_inner_proc_unlock(proc); 4829 4830 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 4831 "binder_flush: %d woke %d threads\n", proc->pid, 4832 wake_count); 4833 } 4834 4835 static int binder_release(struct inode *nodp, struct file *filp) 4836 { 4837 struct binder_proc *proc = filp->private_data; 4838 4839 debugfs_remove(proc->debugfs_entry); 4840 binder_defer_work(proc, BINDER_DEFERRED_RELEASE); 4841 4842 return 0; 4843 } 4844 4845 static int binder_node_release(struct binder_node *node, int refs) 4846 { 4847 struct binder_ref *ref; 4848 int death = 0; 4849 struct binder_proc *proc = node->proc; 4850 4851 binder_release_work(proc, &node->async_todo); 4852 4853 binder_node_lock(node); 4854 binder_inner_proc_lock(proc); 4855 binder_dequeue_work_ilocked(&node->work); 4856 /* 4857 * The caller must have taken a temporary ref on the node, 4858 */ 4859 BUG_ON(!node->tmp_refs); 4860 if (hlist_empty(&node->refs) && node->tmp_refs == 1) { 4861 binder_inner_proc_unlock(proc); 4862 binder_node_unlock(node); 4863 binder_free_node(node); 4864 4865 return refs; 4866 } 4867 4868 node->proc = NULL; 4869 node->local_strong_refs = 0; 4870 node->local_weak_refs = 0; 4871 binder_inner_proc_unlock(proc); 4872 4873 spin_lock(&binder_dead_nodes_lock); 4874 hlist_add_head(&node->dead_node, &binder_dead_nodes); 4875 spin_unlock(&binder_dead_nodes_lock); 4876 4877 hlist_for_each_entry(ref, &node->refs, node_entry) { 4878 refs++; 4879 /* 4880 * Need the node lock to synchronize 4881 * with new notification requests and the 4882 * inner lock to synchronize with queued 4883 * death notifications. 4884 */ 4885 binder_inner_proc_lock(ref->proc); 4886 if (!ref->death) { 4887 binder_inner_proc_unlock(ref->proc); 4888 continue; 4889 } 4890 4891 death++; 4892 4893 BUG_ON(!list_empty(&ref->death->work.entry)); 4894 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 4895 binder_enqueue_work_ilocked(&ref->death->work, 4896 &ref->proc->todo); 4897 binder_wakeup_proc_ilocked(ref->proc); 4898 binder_inner_proc_unlock(ref->proc); 4899 } 4900 4901 binder_debug(BINDER_DEBUG_DEAD_BINDER, 4902 "node %d now dead, refs %d, death %d\n", 4903 node->debug_id, refs, death); 4904 binder_node_unlock(node); 4905 binder_put_node(node); 4906 4907 return refs; 4908 } 4909 4910 static void binder_deferred_release(struct binder_proc *proc) 4911 { 4912 struct binder_context *context = proc->context; 4913 struct rb_node *n; 4914 int threads, nodes, incoming_refs, outgoing_refs, active_transactions; 4915 4916 BUG_ON(proc->files); 4917 4918 mutex_lock(&binder_procs_lock); 4919 hlist_del(&proc->proc_node); 4920 mutex_unlock(&binder_procs_lock); 4921 4922 mutex_lock(&context->context_mgr_node_lock); 4923 if (context->binder_context_mgr_node && 4924 context->binder_context_mgr_node->proc == proc) { 4925 binder_debug(BINDER_DEBUG_DEAD_BINDER, 4926 "%s: %d context_mgr_node gone\n", 4927 __func__, proc->pid); 4928 context->binder_context_mgr_node = NULL; 4929 } 4930 mutex_unlock(&context->context_mgr_node_lock); 4931 binder_inner_proc_lock(proc); 4932 /* 4933 * Make sure proc stays alive after we 4934 * remove all the threads 4935 */ 4936 proc->tmp_ref++; 4937 4938 proc->is_dead = true; 4939 threads = 0; 4940 active_transactions = 0; 4941 while ((n = rb_first(&proc->threads))) { 4942 struct binder_thread *thread; 4943 4944 thread = rb_entry(n, struct binder_thread, rb_node); 4945 binder_inner_proc_unlock(proc); 4946 threads++; 4947 active_transactions += binder_thread_release(proc, thread); 4948 binder_inner_proc_lock(proc); 4949 } 4950 4951 nodes = 0; 4952 incoming_refs = 0; 4953 while ((n = rb_first(&proc->nodes))) { 4954 struct binder_node *node; 4955 4956 node = rb_entry(n, struct binder_node, rb_node); 4957 nodes++; 4958 /* 4959 * take a temporary ref on the node before 4960 * calling binder_node_release() which will either 4961 * kfree() the node or call binder_put_node() 4962 */ 4963 binder_inc_node_tmpref_ilocked(node); 4964 rb_erase(&node->rb_node, &proc->nodes); 4965 binder_inner_proc_unlock(proc); 4966 incoming_refs = binder_node_release(node, incoming_refs); 4967 binder_inner_proc_lock(proc); 4968 } 4969 binder_inner_proc_unlock(proc); 4970 4971 outgoing_refs = 0; 4972 binder_proc_lock(proc); 4973 while ((n = rb_first(&proc->refs_by_desc))) { 4974 struct binder_ref *ref; 4975 4976 ref = rb_entry(n, struct binder_ref, rb_node_desc); 4977 outgoing_refs++; 4978 binder_cleanup_ref_olocked(ref); 4979 binder_proc_unlock(proc); 4980 binder_free_ref(ref); 4981 binder_proc_lock(proc); 4982 } 4983 binder_proc_unlock(proc); 4984 4985 binder_release_work(proc, &proc->todo); 4986 binder_release_work(proc, &proc->delivered_death); 4987 4988 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 4989 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n", 4990 __func__, proc->pid, threads, nodes, incoming_refs, 4991 outgoing_refs, active_transactions); 4992 4993 binder_proc_dec_tmpref(proc); 4994 } 4995 4996 static void binder_deferred_func(struct work_struct *work) 4997 { 4998 struct binder_proc *proc; 4999 struct files_struct *files; 5000 5001 int defer; 5002 5003 do { 5004 mutex_lock(&binder_deferred_lock); 5005 if (!hlist_empty(&binder_deferred_list)) { 5006 proc = hlist_entry(binder_deferred_list.first, 5007 struct binder_proc, deferred_work_node); 5008 hlist_del_init(&proc->deferred_work_node); 5009 defer = proc->deferred_work; 5010 proc->deferred_work = 0; 5011 } else { 5012 proc = NULL; 5013 defer = 0; 5014 } 5015 mutex_unlock(&binder_deferred_lock); 5016 5017 files = NULL; 5018 if (defer & BINDER_DEFERRED_PUT_FILES) { 5019 mutex_lock(&proc->files_lock); 5020 files = proc->files; 5021 if (files) 5022 proc->files = NULL; 5023 mutex_unlock(&proc->files_lock); 5024 } 5025 5026 if (defer & BINDER_DEFERRED_FLUSH) 5027 binder_deferred_flush(proc); 5028 5029 if (defer & BINDER_DEFERRED_RELEASE) 5030 binder_deferred_release(proc); /* frees proc */ 5031 5032 if (files) 5033 put_files_struct(files); 5034 } while (proc); 5035 } 5036 static DECLARE_WORK(binder_deferred_work, binder_deferred_func); 5037 5038 static void 5039 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) 5040 { 5041 mutex_lock(&binder_deferred_lock); 5042 proc->deferred_work |= defer; 5043 if (hlist_unhashed(&proc->deferred_work_node)) { 5044 hlist_add_head(&proc->deferred_work_node, 5045 &binder_deferred_list); 5046 schedule_work(&binder_deferred_work); 5047 } 5048 mutex_unlock(&binder_deferred_lock); 5049 } 5050 5051 static void print_binder_transaction_ilocked(struct seq_file *m, 5052 struct binder_proc *proc, 5053 const char *prefix, 5054 struct binder_transaction *t) 5055 { 5056 struct binder_proc *to_proc; 5057 struct binder_buffer *buffer = t->buffer; 5058 5059 spin_lock(&t->lock); 5060 to_proc = t->to_proc; 5061 seq_printf(m, 5062 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d", 5063 prefix, t->debug_id, t, 5064 t->from ? t->from->proc->pid : 0, 5065 t->from ? t->from->pid : 0, 5066 to_proc ? to_proc->pid : 0, 5067 t->to_thread ? t->to_thread->pid : 0, 5068 t->code, t->flags, t->priority, t->need_reply); 5069 spin_unlock(&t->lock); 5070 5071 if (proc != to_proc) { 5072 /* 5073 * Can only safely deref buffer if we are holding the 5074 * correct proc inner lock for this node 5075 */ 5076 seq_puts(m, "\n"); 5077 return; 5078 } 5079 5080 if (buffer == NULL) { 5081 seq_puts(m, " buffer free\n"); 5082 return; 5083 } 5084 if (buffer->target_node) 5085 seq_printf(m, " node %d", buffer->target_node->debug_id); 5086 seq_printf(m, " size %zd:%zd data %pK\n", 5087 buffer->data_size, buffer->offsets_size, 5088 buffer->data); 5089 } 5090 5091 static void print_binder_work_ilocked(struct seq_file *m, 5092 struct binder_proc *proc, 5093 const char *prefix, 5094 const char *transaction_prefix, 5095 struct binder_work *w) 5096 { 5097 struct binder_node *node; 5098 struct binder_transaction *t; 5099 5100 switch (w->type) { 5101 case BINDER_WORK_TRANSACTION: 5102 t = container_of(w, struct binder_transaction, work); 5103 print_binder_transaction_ilocked( 5104 m, proc, transaction_prefix, t); 5105 break; 5106 case BINDER_WORK_RETURN_ERROR: { 5107 struct binder_error *e = container_of( 5108 w, struct binder_error, work); 5109 5110 seq_printf(m, "%stransaction error: %u\n", 5111 prefix, e->cmd); 5112 } break; 5113 case BINDER_WORK_TRANSACTION_COMPLETE: 5114 seq_printf(m, "%stransaction complete\n", prefix); 5115 break; 5116 case BINDER_WORK_NODE: 5117 node = container_of(w, struct binder_node, work); 5118 seq_printf(m, "%snode work %d: u%016llx c%016llx\n", 5119 prefix, node->debug_id, 5120 (u64)node->ptr, (u64)node->cookie); 5121 break; 5122 case BINDER_WORK_DEAD_BINDER: 5123 seq_printf(m, "%shas dead binder\n", prefix); 5124 break; 5125 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 5126 seq_printf(m, "%shas cleared dead binder\n", prefix); 5127 break; 5128 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: 5129 seq_printf(m, "%shas cleared death notification\n", prefix); 5130 break; 5131 default: 5132 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type); 5133 break; 5134 } 5135 } 5136 5137 static void print_binder_thread_ilocked(struct seq_file *m, 5138 struct binder_thread *thread, 5139 int print_always) 5140 { 5141 struct binder_transaction *t; 5142 struct binder_work *w; 5143 size_t start_pos = m->count; 5144 size_t header_pos; 5145 5146 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n", 5147 thread->pid, thread->looper, 5148 thread->looper_need_return, 5149 atomic_read(&thread->tmp_ref)); 5150 header_pos = m->count; 5151 t = thread->transaction_stack; 5152 while (t) { 5153 if (t->from == thread) { 5154 print_binder_transaction_ilocked(m, thread->proc, 5155 " outgoing transaction", t); 5156 t = t->from_parent; 5157 } else if (t->to_thread == thread) { 5158 print_binder_transaction_ilocked(m, thread->proc, 5159 " incoming transaction", t); 5160 t = t->to_parent; 5161 } else { 5162 print_binder_transaction_ilocked(m, thread->proc, 5163 " bad transaction", t); 5164 t = NULL; 5165 } 5166 } 5167 list_for_each_entry(w, &thread->todo, entry) { 5168 print_binder_work_ilocked(m, thread->proc, " ", 5169 " pending transaction", w); 5170 } 5171 if (!print_always && m->count == header_pos) 5172 m->count = start_pos; 5173 } 5174 5175 static void print_binder_node_nilocked(struct seq_file *m, 5176 struct binder_node *node) 5177 { 5178 struct binder_ref *ref; 5179 struct binder_work *w; 5180 int count; 5181 5182 count = 0; 5183 hlist_for_each_entry(ref, &node->refs, node_entry) 5184 count++; 5185 5186 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d", 5187 node->debug_id, (u64)node->ptr, (u64)node->cookie, 5188 node->has_strong_ref, node->has_weak_ref, 5189 node->local_strong_refs, node->local_weak_refs, 5190 node->internal_strong_refs, count, node->tmp_refs); 5191 if (count) { 5192 seq_puts(m, " proc"); 5193 hlist_for_each_entry(ref, &node->refs, node_entry) 5194 seq_printf(m, " %d", ref->proc->pid); 5195 } 5196 seq_puts(m, "\n"); 5197 if (node->proc) { 5198 list_for_each_entry(w, &node->async_todo, entry) 5199 print_binder_work_ilocked(m, node->proc, " ", 5200 " pending async transaction", w); 5201 } 5202 } 5203 5204 static void print_binder_ref_olocked(struct seq_file *m, 5205 struct binder_ref *ref) 5206 { 5207 binder_node_lock(ref->node); 5208 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n", 5209 ref->data.debug_id, ref->data.desc, 5210 ref->node->proc ? "" : "dead ", 5211 ref->node->debug_id, ref->data.strong, 5212 ref->data.weak, ref->death); 5213 binder_node_unlock(ref->node); 5214 } 5215 5216 static void print_binder_proc(struct seq_file *m, 5217 struct binder_proc *proc, int print_all) 5218 { 5219 struct binder_work *w; 5220 struct rb_node *n; 5221 size_t start_pos = m->count; 5222 size_t header_pos; 5223 struct binder_node *last_node = NULL; 5224 5225 seq_printf(m, "proc %d\n", proc->pid); 5226 seq_printf(m, "context %s\n", proc->context->name); 5227 header_pos = m->count; 5228 5229 binder_inner_proc_lock(proc); 5230 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 5231 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread, 5232 rb_node), print_all); 5233 5234 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { 5235 struct binder_node *node = rb_entry(n, struct binder_node, 5236 rb_node); 5237 /* 5238 * take a temporary reference on the node so it 5239 * survives and isn't removed from the tree 5240 * while we print it. 5241 */ 5242 binder_inc_node_tmpref_ilocked(node); 5243 /* Need to drop inner lock to take node lock */ 5244 binder_inner_proc_unlock(proc); 5245 if (last_node) 5246 binder_put_node(last_node); 5247 binder_node_inner_lock(node); 5248 print_binder_node_nilocked(m, node); 5249 binder_node_inner_unlock(node); 5250 last_node = node; 5251 binder_inner_proc_lock(proc); 5252 } 5253 binder_inner_proc_unlock(proc); 5254 if (last_node) 5255 binder_put_node(last_node); 5256 5257 if (print_all) { 5258 binder_proc_lock(proc); 5259 for (n = rb_first(&proc->refs_by_desc); 5260 n != NULL; 5261 n = rb_next(n)) 5262 print_binder_ref_olocked(m, rb_entry(n, 5263 struct binder_ref, 5264 rb_node_desc)); 5265 binder_proc_unlock(proc); 5266 } 5267 binder_alloc_print_allocated(m, &proc->alloc); 5268 binder_inner_proc_lock(proc); 5269 list_for_each_entry(w, &proc->todo, entry) 5270 print_binder_work_ilocked(m, proc, " ", 5271 " pending transaction", w); 5272 list_for_each_entry(w, &proc->delivered_death, entry) { 5273 seq_puts(m, " has delivered dead binder\n"); 5274 break; 5275 } 5276 binder_inner_proc_unlock(proc); 5277 if (!print_all && m->count == header_pos) 5278 m->count = start_pos; 5279 } 5280 5281 static const char * const binder_return_strings[] = { 5282 "BR_ERROR", 5283 "BR_OK", 5284 "BR_TRANSACTION", 5285 "BR_REPLY", 5286 "BR_ACQUIRE_RESULT", 5287 "BR_DEAD_REPLY", 5288 "BR_TRANSACTION_COMPLETE", 5289 "BR_INCREFS", 5290 "BR_ACQUIRE", 5291 "BR_RELEASE", 5292 "BR_DECREFS", 5293 "BR_ATTEMPT_ACQUIRE", 5294 "BR_NOOP", 5295 "BR_SPAWN_LOOPER", 5296 "BR_FINISHED", 5297 "BR_DEAD_BINDER", 5298 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 5299 "BR_FAILED_REPLY" 5300 }; 5301 5302 static const char * const binder_command_strings[] = { 5303 "BC_TRANSACTION", 5304 "BC_REPLY", 5305 "BC_ACQUIRE_RESULT", 5306 "BC_FREE_BUFFER", 5307 "BC_INCREFS", 5308 "BC_ACQUIRE", 5309 "BC_RELEASE", 5310 "BC_DECREFS", 5311 "BC_INCREFS_DONE", 5312 "BC_ACQUIRE_DONE", 5313 "BC_ATTEMPT_ACQUIRE", 5314 "BC_REGISTER_LOOPER", 5315 "BC_ENTER_LOOPER", 5316 "BC_EXIT_LOOPER", 5317 "BC_REQUEST_DEATH_NOTIFICATION", 5318 "BC_CLEAR_DEATH_NOTIFICATION", 5319 "BC_DEAD_BINDER_DONE", 5320 "BC_TRANSACTION_SG", 5321 "BC_REPLY_SG", 5322 }; 5323 5324 static const char * const binder_objstat_strings[] = { 5325 "proc", 5326 "thread", 5327 "node", 5328 "ref", 5329 "death", 5330 "transaction", 5331 "transaction_complete" 5332 }; 5333 5334 static void print_binder_stats(struct seq_file *m, const char *prefix, 5335 struct binder_stats *stats) 5336 { 5337 int i; 5338 5339 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != 5340 ARRAY_SIZE(binder_command_strings)); 5341 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) { 5342 int temp = atomic_read(&stats->bc[i]); 5343 5344 if (temp) 5345 seq_printf(m, "%s%s: %d\n", prefix, 5346 binder_command_strings[i], temp); 5347 } 5348 5349 BUILD_BUG_ON(ARRAY_SIZE(stats->br) != 5350 ARRAY_SIZE(binder_return_strings)); 5351 for (i = 0; i < ARRAY_SIZE(stats->br); i++) { 5352 int temp = atomic_read(&stats->br[i]); 5353 5354 if (temp) 5355 seq_printf(m, "%s%s: %d\n", prefix, 5356 binder_return_strings[i], temp); 5357 } 5358 5359 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 5360 ARRAY_SIZE(binder_objstat_strings)); 5361 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 5362 ARRAY_SIZE(stats->obj_deleted)); 5363 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { 5364 int created = atomic_read(&stats->obj_created[i]); 5365 int deleted = atomic_read(&stats->obj_deleted[i]); 5366 5367 if (created || deleted) 5368 seq_printf(m, "%s%s: active %d total %d\n", 5369 prefix, 5370 binder_objstat_strings[i], 5371 created - deleted, 5372 created); 5373 } 5374 } 5375 5376 static void print_binder_proc_stats(struct seq_file *m, 5377 struct binder_proc *proc) 5378 { 5379 struct binder_work *w; 5380 struct binder_thread *thread; 5381 struct rb_node *n; 5382 int count, strong, weak, ready_threads; 5383 size_t free_async_space = 5384 binder_alloc_get_free_async_space(&proc->alloc); 5385 5386 seq_printf(m, "proc %d\n", proc->pid); 5387 seq_printf(m, "context %s\n", proc->context->name); 5388 count = 0; 5389 ready_threads = 0; 5390 binder_inner_proc_lock(proc); 5391 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 5392 count++; 5393 5394 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node) 5395 ready_threads++; 5396 5397 seq_printf(m, " threads: %d\n", count); 5398 seq_printf(m, " requested threads: %d+%d/%d\n" 5399 " ready threads %d\n" 5400 " free async space %zd\n", proc->requested_threads, 5401 proc->requested_threads_started, proc->max_threads, 5402 ready_threads, 5403 free_async_space); 5404 count = 0; 5405 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) 5406 count++; 5407 binder_inner_proc_unlock(proc); 5408 seq_printf(m, " nodes: %d\n", count); 5409 count = 0; 5410 strong = 0; 5411 weak = 0; 5412 binder_proc_lock(proc); 5413 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 5414 struct binder_ref *ref = rb_entry(n, struct binder_ref, 5415 rb_node_desc); 5416 count++; 5417 strong += ref->data.strong; 5418 weak += ref->data.weak; 5419 } 5420 binder_proc_unlock(proc); 5421 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak); 5422 5423 count = binder_alloc_get_allocated_count(&proc->alloc); 5424 seq_printf(m, " buffers: %d\n", count); 5425 5426 binder_alloc_print_pages(m, &proc->alloc); 5427 5428 count = 0; 5429 binder_inner_proc_lock(proc); 5430 list_for_each_entry(w, &proc->todo, entry) { 5431 if (w->type == BINDER_WORK_TRANSACTION) 5432 count++; 5433 } 5434 binder_inner_proc_unlock(proc); 5435 seq_printf(m, " pending transactions: %d\n", count); 5436 5437 print_binder_stats(m, " ", &proc->stats); 5438 } 5439 5440 5441 static int binder_state_show(struct seq_file *m, void *unused) 5442 { 5443 struct binder_proc *proc; 5444 struct binder_node *node; 5445 struct binder_node *last_node = NULL; 5446 5447 seq_puts(m, "binder state:\n"); 5448 5449 spin_lock(&binder_dead_nodes_lock); 5450 if (!hlist_empty(&binder_dead_nodes)) 5451 seq_puts(m, "dead nodes:\n"); 5452 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) { 5453 /* 5454 * take a temporary reference on the node so it 5455 * survives and isn't removed from the list 5456 * while we print it. 5457 */ 5458 node->tmp_refs++; 5459 spin_unlock(&binder_dead_nodes_lock); 5460 if (last_node) 5461 binder_put_node(last_node); 5462 binder_node_lock(node); 5463 print_binder_node_nilocked(m, node); 5464 binder_node_unlock(node); 5465 last_node = node; 5466 spin_lock(&binder_dead_nodes_lock); 5467 } 5468 spin_unlock(&binder_dead_nodes_lock); 5469 if (last_node) 5470 binder_put_node(last_node); 5471 5472 mutex_lock(&binder_procs_lock); 5473 hlist_for_each_entry(proc, &binder_procs, proc_node) 5474 print_binder_proc(m, proc, 1); 5475 mutex_unlock(&binder_procs_lock); 5476 5477 return 0; 5478 } 5479 5480 static int binder_stats_show(struct seq_file *m, void *unused) 5481 { 5482 struct binder_proc *proc; 5483 5484 seq_puts(m, "binder stats:\n"); 5485 5486 print_binder_stats(m, "", &binder_stats); 5487 5488 mutex_lock(&binder_procs_lock); 5489 hlist_for_each_entry(proc, &binder_procs, proc_node) 5490 print_binder_proc_stats(m, proc); 5491 mutex_unlock(&binder_procs_lock); 5492 5493 return 0; 5494 } 5495 5496 static int binder_transactions_show(struct seq_file *m, void *unused) 5497 { 5498 struct binder_proc *proc; 5499 5500 seq_puts(m, "binder transactions:\n"); 5501 mutex_lock(&binder_procs_lock); 5502 hlist_for_each_entry(proc, &binder_procs, proc_node) 5503 print_binder_proc(m, proc, 0); 5504 mutex_unlock(&binder_procs_lock); 5505 5506 return 0; 5507 } 5508 5509 static int binder_proc_show(struct seq_file *m, void *unused) 5510 { 5511 struct binder_proc *itr; 5512 int pid = (unsigned long)m->private; 5513 5514 mutex_lock(&binder_procs_lock); 5515 hlist_for_each_entry(itr, &binder_procs, proc_node) { 5516 if (itr->pid == pid) { 5517 seq_puts(m, "binder proc state:\n"); 5518 print_binder_proc(m, itr, 1); 5519 } 5520 } 5521 mutex_unlock(&binder_procs_lock); 5522 5523 return 0; 5524 } 5525 5526 static void print_binder_transaction_log_entry(struct seq_file *m, 5527 struct binder_transaction_log_entry *e) 5528 { 5529 int debug_id = READ_ONCE(e->debug_id_done); 5530 /* 5531 * read barrier to guarantee debug_id_done read before 5532 * we print the log values 5533 */ 5534 smp_rmb(); 5535 seq_printf(m, 5536 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d", 5537 e->debug_id, (e->call_type == 2) ? "reply" : 5538 ((e->call_type == 1) ? "async" : "call "), e->from_proc, 5539 e->from_thread, e->to_proc, e->to_thread, e->context_name, 5540 e->to_node, e->target_handle, e->data_size, e->offsets_size, 5541 e->return_error, e->return_error_param, 5542 e->return_error_line); 5543 /* 5544 * read-barrier to guarantee read of debug_id_done after 5545 * done printing the fields of the entry 5546 */ 5547 smp_rmb(); 5548 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ? 5549 "\n" : " (incomplete)\n"); 5550 } 5551 5552 static int binder_transaction_log_show(struct seq_file *m, void *unused) 5553 { 5554 struct binder_transaction_log *log = m->private; 5555 unsigned int log_cur = atomic_read(&log->cur); 5556 unsigned int count; 5557 unsigned int cur; 5558 int i; 5559 5560 count = log_cur + 1; 5561 cur = count < ARRAY_SIZE(log->entry) && !log->full ? 5562 0 : count % ARRAY_SIZE(log->entry); 5563 if (count > ARRAY_SIZE(log->entry) || log->full) 5564 count = ARRAY_SIZE(log->entry); 5565 for (i = 0; i < count; i++) { 5566 unsigned int index = cur++ % ARRAY_SIZE(log->entry); 5567 5568 print_binder_transaction_log_entry(m, &log->entry[index]); 5569 } 5570 return 0; 5571 } 5572 5573 static const struct file_operations binder_fops = { 5574 .owner = THIS_MODULE, 5575 .poll = binder_poll, 5576 .unlocked_ioctl = binder_ioctl, 5577 .compat_ioctl = binder_ioctl, 5578 .mmap = binder_mmap, 5579 .open = binder_open, 5580 .flush = binder_flush, 5581 .release = binder_release, 5582 }; 5583 5584 BINDER_DEBUG_ENTRY(state); 5585 BINDER_DEBUG_ENTRY(stats); 5586 BINDER_DEBUG_ENTRY(transactions); 5587 BINDER_DEBUG_ENTRY(transaction_log); 5588 5589 static int __init init_binder_device(const char *name) 5590 { 5591 int ret; 5592 struct binder_device *binder_device; 5593 5594 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL); 5595 if (!binder_device) 5596 return -ENOMEM; 5597 5598 binder_device->miscdev.fops = &binder_fops; 5599 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR; 5600 binder_device->miscdev.name = name; 5601 5602 binder_device->context.binder_context_mgr_uid = INVALID_UID; 5603 binder_device->context.name = name; 5604 mutex_init(&binder_device->context.context_mgr_node_lock); 5605 5606 ret = misc_register(&binder_device->miscdev); 5607 if (ret < 0) { 5608 kfree(binder_device); 5609 return ret; 5610 } 5611 5612 hlist_add_head(&binder_device->hlist, &binder_devices); 5613 5614 return ret; 5615 } 5616 5617 static int __init binder_init(void) 5618 { 5619 int ret; 5620 char *device_name, *device_names, *device_tmp; 5621 struct binder_device *device; 5622 struct hlist_node *tmp; 5623 5624 ret = binder_alloc_shrinker_init(); 5625 if (ret) 5626 return ret; 5627 5628 atomic_set(&binder_transaction_log.cur, ~0U); 5629 atomic_set(&binder_transaction_log_failed.cur, ~0U); 5630 5631 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); 5632 if (binder_debugfs_dir_entry_root) 5633 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", 5634 binder_debugfs_dir_entry_root); 5635 5636 if (binder_debugfs_dir_entry_root) { 5637 debugfs_create_file("state", 5638 0444, 5639 binder_debugfs_dir_entry_root, 5640 NULL, 5641 &binder_state_fops); 5642 debugfs_create_file("stats", 5643 0444, 5644 binder_debugfs_dir_entry_root, 5645 NULL, 5646 &binder_stats_fops); 5647 debugfs_create_file("transactions", 5648 0444, 5649 binder_debugfs_dir_entry_root, 5650 NULL, 5651 &binder_transactions_fops); 5652 debugfs_create_file("transaction_log", 5653 0444, 5654 binder_debugfs_dir_entry_root, 5655 &binder_transaction_log, 5656 &binder_transaction_log_fops); 5657 debugfs_create_file("failed_transaction_log", 5658 0444, 5659 binder_debugfs_dir_entry_root, 5660 &binder_transaction_log_failed, 5661 &binder_transaction_log_fops); 5662 } 5663 5664 /* 5665 * Copy the module_parameter string, because we don't want to 5666 * tokenize it in-place. 5667 */ 5668 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL); 5669 if (!device_names) { 5670 ret = -ENOMEM; 5671 goto err_alloc_device_names_failed; 5672 } 5673 strcpy(device_names, binder_devices_param); 5674 5675 device_tmp = device_names; 5676 while ((device_name = strsep(&device_tmp, ","))) { 5677 ret = init_binder_device(device_name); 5678 if (ret) 5679 goto err_init_binder_device_failed; 5680 } 5681 5682 return ret; 5683 5684 err_init_binder_device_failed: 5685 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) { 5686 misc_deregister(&device->miscdev); 5687 hlist_del(&device->hlist); 5688 kfree(device); 5689 } 5690 5691 kfree(device_names); 5692 5693 err_alloc_device_names_failed: 5694 debugfs_remove_recursive(binder_debugfs_dir_entry_root); 5695 5696 return ret; 5697 } 5698 5699 device_initcall(binder_init); 5700 5701 #define CREATE_TRACE_POINTS 5702 #include "binder_trace.h" 5703 5704 MODULE_LICENSE("GPL v2"); 5705