1 /* binder.c 2 * 3 * Android IPC Subsystem 4 * 5 * Copyright (C) 2007-2008 Google, Inc. 6 * 7 * This software is licensed under the terms of the GNU General Public 8 * License version 2, as published by the Free Software Foundation, and 9 * may be copied, distributed, and modified under those terms. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 */ 17 18 /* 19 * Locking overview 20 * 21 * There are 3 main spinlocks which must be acquired in the 22 * order shown: 23 * 24 * 1) proc->outer_lock : protects binder_ref 25 * binder_proc_lock() and binder_proc_unlock() are 26 * used to acq/rel. 27 * 2) node->lock : protects most fields of binder_node. 28 * binder_node_lock() and binder_node_unlock() are 29 * used to acq/rel 30 * 3) proc->inner_lock : protects the thread and node lists 31 * (proc->threads, proc->waiting_threads, proc->nodes) 32 * and all todo lists associated with the binder_proc 33 * (proc->todo, thread->todo, proc->delivered_death and 34 * node->async_todo), as well as thread->transaction_stack 35 * binder_inner_proc_lock() and binder_inner_proc_unlock() 36 * are used to acq/rel 37 * 38 * Any lock under procA must never be nested under any lock at the same 39 * level or below on procB. 40 * 41 * Functions that require a lock held on entry indicate which lock 42 * in the suffix of the function name: 43 * 44 * foo_olocked() : requires node->outer_lock 45 * foo_nlocked() : requires node->lock 46 * foo_ilocked() : requires proc->inner_lock 47 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock 48 * foo_nilocked(): requires node->lock and proc->inner_lock 49 * ... 50 */ 51 52 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 53 54 #include <asm/cacheflush.h> 55 #include <linux/fdtable.h> 56 #include <linux/file.h> 57 #include <linux/freezer.h> 58 #include <linux/fs.h> 59 #include <linux/list.h> 60 #include <linux/miscdevice.h> 61 #include <linux/module.h> 62 #include <linux/mutex.h> 63 #include <linux/nsproxy.h> 64 #include <linux/poll.h> 65 #include <linux/debugfs.h> 66 #include <linux/rbtree.h> 67 #include <linux/sched/signal.h> 68 #include <linux/sched/mm.h> 69 #include <linux/seq_file.h> 70 #include <linux/uaccess.h> 71 #include <linux/pid_namespace.h> 72 #include <linux/security.h> 73 #include <linux/spinlock.h> 74 75 #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT 76 #define BINDER_IPC_32BIT 1 77 #endif 78 79 #include <uapi/linux/android/binder.h> 80 #include "binder_alloc.h" 81 #include "binder_trace.h" 82 83 static HLIST_HEAD(binder_deferred_list); 84 static DEFINE_MUTEX(binder_deferred_lock); 85 86 static HLIST_HEAD(binder_devices); 87 static HLIST_HEAD(binder_procs); 88 static DEFINE_MUTEX(binder_procs_lock); 89 90 static HLIST_HEAD(binder_dead_nodes); 91 static DEFINE_SPINLOCK(binder_dead_nodes_lock); 92 93 static struct dentry *binder_debugfs_dir_entry_root; 94 static struct dentry *binder_debugfs_dir_entry_proc; 95 static atomic_t binder_last_id; 96 97 #define BINDER_DEBUG_ENTRY(name) \ 98 static int binder_##name##_open(struct inode *inode, struct file *file) \ 99 { \ 100 return single_open(file, binder_##name##_show, inode->i_private); \ 101 } \ 102 \ 103 static const struct file_operations binder_##name##_fops = { \ 104 .owner = THIS_MODULE, \ 105 .open = binder_##name##_open, \ 106 .read = seq_read, \ 107 .llseek = seq_lseek, \ 108 .release = single_release, \ 109 } 110 111 static int binder_proc_show(struct seq_file *m, void *unused); 112 BINDER_DEBUG_ENTRY(proc); 113 114 /* This is only defined in include/asm-arm/sizes.h */ 115 #ifndef SZ_1K 116 #define SZ_1K 0x400 117 #endif 118 119 #ifndef SZ_4M 120 #define SZ_4M 0x400000 121 #endif 122 123 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE) 124 125 enum { 126 BINDER_DEBUG_USER_ERROR = 1U << 0, 127 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1, 128 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2, 129 BINDER_DEBUG_OPEN_CLOSE = 1U << 3, 130 BINDER_DEBUG_DEAD_BINDER = 1U << 4, 131 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5, 132 BINDER_DEBUG_READ_WRITE = 1U << 6, 133 BINDER_DEBUG_USER_REFS = 1U << 7, 134 BINDER_DEBUG_THREADS = 1U << 8, 135 BINDER_DEBUG_TRANSACTION = 1U << 9, 136 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10, 137 BINDER_DEBUG_FREE_BUFFER = 1U << 11, 138 BINDER_DEBUG_INTERNAL_REFS = 1U << 12, 139 BINDER_DEBUG_PRIORITY_CAP = 1U << 13, 140 BINDER_DEBUG_SPINLOCKS = 1U << 14, 141 }; 142 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | 143 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; 144 module_param_named(debug_mask, binder_debug_mask, uint, 0644); 145 146 static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES; 147 module_param_named(devices, binder_devices_param, charp, 0444); 148 149 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); 150 static int binder_stop_on_user_error; 151 152 static int binder_set_stop_on_user_error(const char *val, 153 const struct kernel_param *kp) 154 { 155 int ret; 156 157 ret = param_set_int(val, kp); 158 if (binder_stop_on_user_error < 2) 159 wake_up(&binder_user_error_wait); 160 return ret; 161 } 162 module_param_call(stop_on_user_error, binder_set_stop_on_user_error, 163 param_get_int, &binder_stop_on_user_error, 0644); 164 165 #define binder_debug(mask, x...) \ 166 do { \ 167 if (binder_debug_mask & mask) \ 168 pr_info(x); \ 169 } while (0) 170 171 #define binder_user_error(x...) \ 172 do { \ 173 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \ 174 pr_info(x); \ 175 if (binder_stop_on_user_error) \ 176 binder_stop_on_user_error = 2; \ 177 } while (0) 178 179 #define to_flat_binder_object(hdr) \ 180 container_of(hdr, struct flat_binder_object, hdr) 181 182 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr) 183 184 #define to_binder_buffer_object(hdr) \ 185 container_of(hdr, struct binder_buffer_object, hdr) 186 187 #define to_binder_fd_array_object(hdr) \ 188 container_of(hdr, struct binder_fd_array_object, hdr) 189 190 enum binder_stat_types { 191 BINDER_STAT_PROC, 192 BINDER_STAT_THREAD, 193 BINDER_STAT_NODE, 194 BINDER_STAT_REF, 195 BINDER_STAT_DEATH, 196 BINDER_STAT_TRANSACTION, 197 BINDER_STAT_TRANSACTION_COMPLETE, 198 BINDER_STAT_COUNT 199 }; 200 201 struct binder_stats { 202 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1]; 203 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1]; 204 atomic_t obj_created[BINDER_STAT_COUNT]; 205 atomic_t obj_deleted[BINDER_STAT_COUNT]; 206 }; 207 208 static struct binder_stats binder_stats; 209 210 static inline void binder_stats_deleted(enum binder_stat_types type) 211 { 212 atomic_inc(&binder_stats.obj_deleted[type]); 213 } 214 215 static inline void binder_stats_created(enum binder_stat_types type) 216 { 217 atomic_inc(&binder_stats.obj_created[type]); 218 } 219 220 struct binder_transaction_log_entry { 221 int debug_id; 222 int debug_id_done; 223 int call_type; 224 int from_proc; 225 int from_thread; 226 int target_handle; 227 int to_proc; 228 int to_thread; 229 int to_node; 230 int data_size; 231 int offsets_size; 232 int return_error_line; 233 uint32_t return_error; 234 uint32_t return_error_param; 235 const char *context_name; 236 }; 237 struct binder_transaction_log { 238 atomic_t cur; 239 bool full; 240 struct binder_transaction_log_entry entry[32]; 241 }; 242 static struct binder_transaction_log binder_transaction_log; 243 static struct binder_transaction_log binder_transaction_log_failed; 244 245 static struct binder_transaction_log_entry *binder_transaction_log_add( 246 struct binder_transaction_log *log) 247 { 248 struct binder_transaction_log_entry *e; 249 unsigned int cur = atomic_inc_return(&log->cur); 250 251 if (cur >= ARRAY_SIZE(log->entry)) 252 log->full = true; 253 e = &log->entry[cur % ARRAY_SIZE(log->entry)]; 254 WRITE_ONCE(e->debug_id_done, 0); 255 /* 256 * write-barrier to synchronize access to e->debug_id_done. 257 * We make sure the initialized 0 value is seen before 258 * memset() other fields are zeroed by memset. 259 */ 260 smp_wmb(); 261 memset(e, 0, sizeof(*e)); 262 return e; 263 } 264 265 struct binder_context { 266 struct binder_node *binder_context_mgr_node; 267 struct mutex context_mgr_node_lock; 268 269 kuid_t binder_context_mgr_uid; 270 const char *name; 271 }; 272 273 struct binder_device { 274 struct hlist_node hlist; 275 struct miscdevice miscdev; 276 struct binder_context context; 277 }; 278 279 /** 280 * struct binder_work - work enqueued on a worklist 281 * @entry: node enqueued on list 282 * @type: type of work to be performed 283 * 284 * There are separate work lists for proc, thread, and node (async). 285 */ 286 struct binder_work { 287 struct list_head entry; 288 289 enum { 290 BINDER_WORK_TRANSACTION = 1, 291 BINDER_WORK_TRANSACTION_COMPLETE, 292 BINDER_WORK_RETURN_ERROR, 293 BINDER_WORK_NODE, 294 BINDER_WORK_DEAD_BINDER, 295 BINDER_WORK_DEAD_BINDER_AND_CLEAR, 296 BINDER_WORK_CLEAR_DEATH_NOTIFICATION, 297 } type; 298 }; 299 300 struct binder_error { 301 struct binder_work work; 302 uint32_t cmd; 303 }; 304 305 /** 306 * struct binder_node - binder node bookkeeping 307 * @debug_id: unique ID for debugging 308 * (invariant after initialized) 309 * @lock: lock for node fields 310 * @work: worklist element for node work 311 * (protected by @proc->inner_lock) 312 * @rb_node: element for proc->nodes tree 313 * (protected by @proc->inner_lock) 314 * @dead_node: element for binder_dead_nodes list 315 * (protected by binder_dead_nodes_lock) 316 * @proc: binder_proc that owns this node 317 * (invariant after initialized) 318 * @refs: list of references on this node 319 * (protected by @lock) 320 * @internal_strong_refs: used to take strong references when 321 * initiating a transaction 322 * (protected by @proc->inner_lock if @proc 323 * and by @lock) 324 * @local_weak_refs: weak user refs from local process 325 * (protected by @proc->inner_lock if @proc 326 * and by @lock) 327 * @local_strong_refs: strong user refs from local process 328 * (protected by @proc->inner_lock if @proc 329 * and by @lock) 330 * @tmp_refs: temporary kernel refs 331 * (protected by @proc->inner_lock while @proc 332 * is valid, and by binder_dead_nodes_lock 333 * if @proc is NULL. During inc/dec and node release 334 * it is also protected by @lock to provide safety 335 * as the node dies and @proc becomes NULL) 336 * @ptr: userspace pointer for node 337 * (invariant, no lock needed) 338 * @cookie: userspace cookie for node 339 * (invariant, no lock needed) 340 * @has_strong_ref: userspace notified of strong ref 341 * (protected by @proc->inner_lock if @proc 342 * and by @lock) 343 * @pending_strong_ref: userspace has acked notification of strong ref 344 * (protected by @proc->inner_lock if @proc 345 * and by @lock) 346 * @has_weak_ref: userspace notified of weak ref 347 * (protected by @proc->inner_lock if @proc 348 * and by @lock) 349 * @pending_weak_ref: userspace has acked notification of weak ref 350 * (protected by @proc->inner_lock if @proc 351 * and by @lock) 352 * @has_async_transaction: async transaction to node in progress 353 * (protected by @lock) 354 * @accept_fds: file descriptor operations supported for node 355 * (invariant after initialized) 356 * @min_priority: minimum scheduling priority 357 * (invariant after initialized) 358 * @async_todo: list of async work items 359 * (protected by @proc->inner_lock) 360 * 361 * Bookkeeping structure for binder nodes. 362 */ 363 struct binder_node { 364 int debug_id; 365 spinlock_t lock; 366 struct binder_work work; 367 union { 368 struct rb_node rb_node; 369 struct hlist_node dead_node; 370 }; 371 struct binder_proc *proc; 372 struct hlist_head refs; 373 int internal_strong_refs; 374 int local_weak_refs; 375 int local_strong_refs; 376 int tmp_refs; 377 binder_uintptr_t ptr; 378 binder_uintptr_t cookie; 379 struct { 380 /* 381 * bitfield elements protected by 382 * proc inner_lock 383 */ 384 u8 has_strong_ref:1; 385 u8 pending_strong_ref:1; 386 u8 has_weak_ref:1; 387 u8 pending_weak_ref:1; 388 }; 389 struct { 390 /* 391 * invariant after initialization 392 */ 393 u8 accept_fds:1; 394 u8 min_priority; 395 }; 396 bool has_async_transaction; 397 struct list_head async_todo; 398 }; 399 400 struct binder_ref_death { 401 /** 402 * @work: worklist element for death notifications 403 * (protected by inner_lock of the proc that 404 * this ref belongs to) 405 */ 406 struct binder_work work; 407 binder_uintptr_t cookie; 408 }; 409 410 /** 411 * struct binder_ref_data - binder_ref counts and id 412 * @debug_id: unique ID for the ref 413 * @desc: unique userspace handle for ref 414 * @strong: strong ref count (debugging only if not locked) 415 * @weak: weak ref count (debugging only if not locked) 416 * 417 * Structure to hold ref count and ref id information. Since 418 * the actual ref can only be accessed with a lock, this structure 419 * is used to return information about the ref to callers of 420 * ref inc/dec functions. 421 */ 422 struct binder_ref_data { 423 int debug_id; 424 uint32_t desc; 425 int strong; 426 int weak; 427 }; 428 429 /** 430 * struct binder_ref - struct to track references on nodes 431 * @data: binder_ref_data containing id, handle, and current refcounts 432 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree 433 * @rb_node_node: node for lookup by @node in proc's rb_tree 434 * @node_entry: list entry for node->refs list in target node 435 * (protected by @node->lock) 436 * @proc: binder_proc containing ref 437 * @node: binder_node of target node. When cleaning up a 438 * ref for deletion in binder_cleanup_ref, a non-NULL 439 * @node indicates the node must be freed 440 * @death: pointer to death notification (ref_death) if requested 441 * (protected by @node->lock) 442 * 443 * Structure to track references from procA to target node (on procB). This 444 * structure is unsafe to access without holding @proc->outer_lock. 445 */ 446 struct binder_ref { 447 /* Lookups needed: */ 448 /* node + proc => ref (transaction) */ 449 /* desc + proc => ref (transaction, inc/dec ref) */ 450 /* node => refs + procs (proc exit) */ 451 struct binder_ref_data data; 452 struct rb_node rb_node_desc; 453 struct rb_node rb_node_node; 454 struct hlist_node node_entry; 455 struct binder_proc *proc; 456 struct binder_node *node; 457 struct binder_ref_death *death; 458 }; 459 460 enum binder_deferred_state { 461 BINDER_DEFERRED_PUT_FILES = 0x01, 462 BINDER_DEFERRED_FLUSH = 0x02, 463 BINDER_DEFERRED_RELEASE = 0x04, 464 }; 465 466 /** 467 * struct binder_proc - binder process bookkeeping 468 * @proc_node: element for binder_procs list 469 * @threads: rbtree of binder_threads in this proc 470 * (protected by @inner_lock) 471 * @nodes: rbtree of binder nodes associated with 472 * this proc ordered by node->ptr 473 * (protected by @inner_lock) 474 * @refs_by_desc: rbtree of refs ordered by ref->desc 475 * (protected by @outer_lock) 476 * @refs_by_node: rbtree of refs ordered by ref->node 477 * (protected by @outer_lock) 478 * @waiting_threads: threads currently waiting for proc work 479 * (protected by @inner_lock) 480 * @pid PID of group_leader of process 481 * (invariant after initialized) 482 * @tsk task_struct for group_leader of process 483 * (invariant after initialized) 484 * @files files_struct for process 485 * (protected by @files_lock) 486 * @files_lock mutex to protect @files 487 * @deferred_work_node: element for binder_deferred_list 488 * (protected by binder_deferred_lock) 489 * @deferred_work: bitmap of deferred work to perform 490 * (protected by binder_deferred_lock) 491 * @is_dead: process is dead and awaiting free 492 * when outstanding transactions are cleaned up 493 * (protected by @inner_lock) 494 * @todo: list of work for this process 495 * (protected by @inner_lock) 496 * @stats: per-process binder statistics 497 * (atomics, no lock needed) 498 * @delivered_death: list of delivered death notification 499 * (protected by @inner_lock) 500 * @max_threads: cap on number of binder threads 501 * (protected by @inner_lock) 502 * @requested_threads: number of binder threads requested but not 503 * yet started. In current implementation, can 504 * only be 0 or 1. 505 * (protected by @inner_lock) 506 * @requested_threads_started: number binder threads started 507 * (protected by @inner_lock) 508 * @tmp_ref: temporary reference to indicate proc is in use 509 * (protected by @inner_lock) 510 * @default_priority: default scheduler priority 511 * (invariant after initialized) 512 * @debugfs_entry: debugfs node 513 * @alloc: binder allocator bookkeeping 514 * @context: binder_context for this proc 515 * (invariant after initialized) 516 * @inner_lock: can nest under outer_lock and/or node lock 517 * @outer_lock: no nesting under innor or node lock 518 * Lock order: 1) outer, 2) node, 3) inner 519 * 520 * Bookkeeping structure for binder processes 521 */ 522 struct binder_proc { 523 struct hlist_node proc_node; 524 struct rb_root threads; 525 struct rb_root nodes; 526 struct rb_root refs_by_desc; 527 struct rb_root refs_by_node; 528 struct list_head waiting_threads; 529 int pid; 530 struct task_struct *tsk; 531 struct files_struct *files; 532 struct mutex files_lock; 533 struct hlist_node deferred_work_node; 534 int deferred_work; 535 bool is_dead; 536 537 struct list_head todo; 538 struct binder_stats stats; 539 struct list_head delivered_death; 540 int max_threads; 541 int requested_threads; 542 int requested_threads_started; 543 int tmp_ref; 544 long default_priority; 545 struct dentry *debugfs_entry; 546 struct binder_alloc alloc; 547 struct binder_context *context; 548 spinlock_t inner_lock; 549 spinlock_t outer_lock; 550 }; 551 552 enum { 553 BINDER_LOOPER_STATE_REGISTERED = 0x01, 554 BINDER_LOOPER_STATE_ENTERED = 0x02, 555 BINDER_LOOPER_STATE_EXITED = 0x04, 556 BINDER_LOOPER_STATE_INVALID = 0x08, 557 BINDER_LOOPER_STATE_WAITING = 0x10, 558 BINDER_LOOPER_STATE_POLL = 0x20, 559 }; 560 561 /** 562 * struct binder_thread - binder thread bookkeeping 563 * @proc: binder process for this thread 564 * (invariant after initialization) 565 * @rb_node: element for proc->threads rbtree 566 * (protected by @proc->inner_lock) 567 * @waiting_thread_node: element for @proc->waiting_threads list 568 * (protected by @proc->inner_lock) 569 * @pid: PID for this thread 570 * (invariant after initialization) 571 * @looper: bitmap of looping state 572 * (only accessed by this thread) 573 * @looper_needs_return: looping thread needs to exit driver 574 * (no lock needed) 575 * @transaction_stack: stack of in-progress transactions for this thread 576 * (protected by @proc->inner_lock) 577 * @todo: list of work to do for this thread 578 * (protected by @proc->inner_lock) 579 * @process_todo: whether work in @todo should be processed 580 * (protected by @proc->inner_lock) 581 * @return_error: transaction errors reported by this thread 582 * (only accessed by this thread) 583 * @reply_error: transaction errors reported by target thread 584 * (protected by @proc->inner_lock) 585 * @wait: wait queue for thread work 586 * @stats: per-thread statistics 587 * (atomics, no lock needed) 588 * @tmp_ref: temporary reference to indicate thread is in use 589 * (atomic since @proc->inner_lock cannot 590 * always be acquired) 591 * @is_dead: thread is dead and awaiting free 592 * when outstanding transactions are cleaned up 593 * (protected by @proc->inner_lock) 594 * 595 * Bookkeeping structure for binder threads. 596 */ 597 struct binder_thread { 598 struct binder_proc *proc; 599 struct rb_node rb_node; 600 struct list_head waiting_thread_node; 601 int pid; 602 int looper; /* only modified by this thread */ 603 bool looper_need_return; /* can be written by other thread */ 604 struct binder_transaction *transaction_stack; 605 struct list_head todo; 606 bool process_todo; 607 struct binder_error return_error; 608 struct binder_error reply_error; 609 wait_queue_head_t wait; 610 struct binder_stats stats; 611 atomic_t tmp_ref; 612 bool is_dead; 613 }; 614 615 struct binder_transaction { 616 int debug_id; 617 struct binder_work work; 618 struct binder_thread *from; 619 struct binder_transaction *from_parent; 620 struct binder_proc *to_proc; 621 struct binder_thread *to_thread; 622 struct binder_transaction *to_parent; 623 unsigned need_reply:1; 624 /* unsigned is_dead:1; */ /* not used at the moment */ 625 626 struct binder_buffer *buffer; 627 unsigned int code; 628 unsigned int flags; 629 long priority; 630 long saved_priority; 631 kuid_t sender_euid; 632 /** 633 * @lock: protects @from, @to_proc, and @to_thread 634 * 635 * @from, @to_proc, and @to_thread can be set to NULL 636 * during thread teardown 637 */ 638 spinlock_t lock; 639 }; 640 641 /** 642 * binder_proc_lock() - Acquire outer lock for given binder_proc 643 * @proc: struct binder_proc to acquire 644 * 645 * Acquires proc->outer_lock. Used to protect binder_ref 646 * structures associated with the given proc. 647 */ 648 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__) 649 static void 650 _binder_proc_lock(struct binder_proc *proc, int line) 651 { 652 binder_debug(BINDER_DEBUG_SPINLOCKS, 653 "%s: line=%d\n", __func__, line); 654 spin_lock(&proc->outer_lock); 655 } 656 657 /** 658 * binder_proc_unlock() - Release spinlock for given binder_proc 659 * @proc: struct binder_proc to acquire 660 * 661 * Release lock acquired via binder_proc_lock() 662 */ 663 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__) 664 static void 665 _binder_proc_unlock(struct binder_proc *proc, int line) 666 { 667 binder_debug(BINDER_DEBUG_SPINLOCKS, 668 "%s: line=%d\n", __func__, line); 669 spin_unlock(&proc->outer_lock); 670 } 671 672 /** 673 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc 674 * @proc: struct binder_proc to acquire 675 * 676 * Acquires proc->inner_lock. Used to protect todo lists 677 */ 678 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__) 679 static void 680 _binder_inner_proc_lock(struct binder_proc *proc, int line) 681 { 682 binder_debug(BINDER_DEBUG_SPINLOCKS, 683 "%s: line=%d\n", __func__, line); 684 spin_lock(&proc->inner_lock); 685 } 686 687 /** 688 * binder_inner_proc_unlock() - Release inner lock for given binder_proc 689 * @proc: struct binder_proc to acquire 690 * 691 * Release lock acquired via binder_inner_proc_lock() 692 */ 693 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__) 694 static void 695 _binder_inner_proc_unlock(struct binder_proc *proc, int line) 696 { 697 binder_debug(BINDER_DEBUG_SPINLOCKS, 698 "%s: line=%d\n", __func__, line); 699 spin_unlock(&proc->inner_lock); 700 } 701 702 /** 703 * binder_node_lock() - Acquire spinlock for given binder_node 704 * @node: struct binder_node to acquire 705 * 706 * Acquires node->lock. Used to protect binder_node fields 707 */ 708 #define binder_node_lock(node) _binder_node_lock(node, __LINE__) 709 static void 710 _binder_node_lock(struct binder_node *node, int line) 711 { 712 binder_debug(BINDER_DEBUG_SPINLOCKS, 713 "%s: line=%d\n", __func__, line); 714 spin_lock(&node->lock); 715 } 716 717 /** 718 * binder_node_unlock() - Release spinlock for given binder_proc 719 * @node: struct binder_node to acquire 720 * 721 * Release lock acquired via binder_node_lock() 722 */ 723 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__) 724 static void 725 _binder_node_unlock(struct binder_node *node, int line) 726 { 727 binder_debug(BINDER_DEBUG_SPINLOCKS, 728 "%s: line=%d\n", __func__, line); 729 spin_unlock(&node->lock); 730 } 731 732 /** 733 * binder_node_inner_lock() - Acquire node and inner locks 734 * @node: struct binder_node to acquire 735 * 736 * Acquires node->lock. If node->proc also acquires 737 * proc->inner_lock. Used to protect binder_node fields 738 */ 739 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__) 740 static void 741 _binder_node_inner_lock(struct binder_node *node, int line) 742 { 743 binder_debug(BINDER_DEBUG_SPINLOCKS, 744 "%s: line=%d\n", __func__, line); 745 spin_lock(&node->lock); 746 if (node->proc) 747 binder_inner_proc_lock(node->proc); 748 } 749 750 /** 751 * binder_node_unlock() - Release node and inner locks 752 * @node: struct binder_node to acquire 753 * 754 * Release lock acquired via binder_node_lock() 755 */ 756 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__) 757 static void 758 _binder_node_inner_unlock(struct binder_node *node, int line) 759 { 760 struct binder_proc *proc = node->proc; 761 762 binder_debug(BINDER_DEBUG_SPINLOCKS, 763 "%s: line=%d\n", __func__, line); 764 if (proc) 765 binder_inner_proc_unlock(proc); 766 spin_unlock(&node->lock); 767 } 768 769 static bool binder_worklist_empty_ilocked(struct list_head *list) 770 { 771 return list_empty(list); 772 } 773 774 /** 775 * binder_worklist_empty() - Check if no items on the work list 776 * @proc: binder_proc associated with list 777 * @list: list to check 778 * 779 * Return: true if there are no items on list, else false 780 */ 781 static bool binder_worklist_empty(struct binder_proc *proc, 782 struct list_head *list) 783 { 784 bool ret; 785 786 binder_inner_proc_lock(proc); 787 ret = binder_worklist_empty_ilocked(list); 788 binder_inner_proc_unlock(proc); 789 return ret; 790 } 791 792 /** 793 * binder_enqueue_work_ilocked() - Add an item to the work list 794 * @work: struct binder_work to add to list 795 * @target_list: list to add work to 796 * 797 * Adds the work to the specified list. Asserts that work 798 * is not already on a list. 799 * 800 * Requires the proc->inner_lock to be held. 801 */ 802 static void 803 binder_enqueue_work_ilocked(struct binder_work *work, 804 struct list_head *target_list) 805 { 806 BUG_ON(target_list == NULL); 807 BUG_ON(work->entry.next && !list_empty(&work->entry)); 808 list_add_tail(&work->entry, target_list); 809 } 810 811 /** 812 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work 813 * @thread: thread to queue work to 814 * @work: struct binder_work to add to list 815 * 816 * Adds the work to the todo list of the thread. Doesn't set the process_todo 817 * flag, which means that (if it wasn't already set) the thread will go to 818 * sleep without handling this work when it calls read. 819 * 820 * Requires the proc->inner_lock to be held. 821 */ 822 static void 823 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread, 824 struct binder_work *work) 825 { 826 binder_enqueue_work_ilocked(work, &thread->todo); 827 } 828 829 /** 830 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list 831 * @thread: thread to queue work to 832 * @work: struct binder_work to add to list 833 * 834 * Adds the work to the todo list of the thread, and enables processing 835 * of the todo queue. 836 * 837 * Requires the proc->inner_lock to be held. 838 */ 839 static void 840 binder_enqueue_thread_work_ilocked(struct binder_thread *thread, 841 struct binder_work *work) 842 { 843 binder_enqueue_work_ilocked(work, &thread->todo); 844 thread->process_todo = true; 845 } 846 847 /** 848 * binder_enqueue_thread_work() - Add an item to the thread work list 849 * @thread: thread to queue work to 850 * @work: struct binder_work to add to list 851 * 852 * Adds the work to the todo list of the thread, and enables processing 853 * of the todo queue. 854 */ 855 static void 856 binder_enqueue_thread_work(struct binder_thread *thread, 857 struct binder_work *work) 858 { 859 binder_inner_proc_lock(thread->proc); 860 binder_enqueue_thread_work_ilocked(thread, work); 861 binder_inner_proc_unlock(thread->proc); 862 } 863 864 static void 865 binder_dequeue_work_ilocked(struct binder_work *work) 866 { 867 list_del_init(&work->entry); 868 } 869 870 /** 871 * binder_dequeue_work() - Removes an item from the work list 872 * @proc: binder_proc associated with list 873 * @work: struct binder_work to remove from list 874 * 875 * Removes the specified work item from whatever list it is on. 876 * Can safely be called if work is not on any list. 877 */ 878 static void 879 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work) 880 { 881 binder_inner_proc_lock(proc); 882 binder_dequeue_work_ilocked(work); 883 binder_inner_proc_unlock(proc); 884 } 885 886 static struct binder_work *binder_dequeue_work_head_ilocked( 887 struct list_head *list) 888 { 889 struct binder_work *w; 890 891 w = list_first_entry_or_null(list, struct binder_work, entry); 892 if (w) 893 list_del_init(&w->entry); 894 return w; 895 } 896 897 /** 898 * binder_dequeue_work_head() - Dequeues the item at head of list 899 * @proc: binder_proc associated with list 900 * @list: list to dequeue head 901 * 902 * Removes the head of the list if there are items on the list 903 * 904 * Return: pointer dequeued binder_work, NULL if list was empty 905 */ 906 static struct binder_work *binder_dequeue_work_head( 907 struct binder_proc *proc, 908 struct list_head *list) 909 { 910 struct binder_work *w; 911 912 binder_inner_proc_lock(proc); 913 w = binder_dequeue_work_head_ilocked(list); 914 binder_inner_proc_unlock(proc); 915 return w; 916 } 917 918 static void 919 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); 920 static void binder_free_thread(struct binder_thread *thread); 921 static void binder_free_proc(struct binder_proc *proc); 922 static void binder_inc_node_tmpref_ilocked(struct binder_node *node); 923 924 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) 925 { 926 unsigned long rlim_cur; 927 unsigned long irqs; 928 int ret; 929 930 mutex_lock(&proc->files_lock); 931 if (proc->files == NULL) { 932 ret = -ESRCH; 933 goto err; 934 } 935 if (!lock_task_sighand(proc->tsk, &irqs)) { 936 ret = -EMFILE; 937 goto err; 938 } 939 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE); 940 unlock_task_sighand(proc->tsk, &irqs); 941 942 ret = __alloc_fd(proc->files, 0, rlim_cur, flags); 943 err: 944 mutex_unlock(&proc->files_lock); 945 return ret; 946 } 947 948 /* 949 * copied from fd_install 950 */ 951 static void task_fd_install( 952 struct binder_proc *proc, unsigned int fd, struct file *file) 953 { 954 mutex_lock(&proc->files_lock); 955 if (proc->files) 956 __fd_install(proc->files, fd, file); 957 mutex_unlock(&proc->files_lock); 958 } 959 960 /* 961 * copied from sys_close 962 */ 963 static long task_close_fd(struct binder_proc *proc, unsigned int fd) 964 { 965 int retval; 966 967 mutex_lock(&proc->files_lock); 968 if (proc->files == NULL) { 969 retval = -ESRCH; 970 goto err; 971 } 972 retval = __close_fd(proc->files, fd); 973 /* can't restart close syscall because file table entry was cleared */ 974 if (unlikely(retval == -ERESTARTSYS || 975 retval == -ERESTARTNOINTR || 976 retval == -ERESTARTNOHAND || 977 retval == -ERESTART_RESTARTBLOCK)) 978 retval = -EINTR; 979 err: 980 mutex_unlock(&proc->files_lock); 981 return retval; 982 } 983 984 static bool binder_has_work_ilocked(struct binder_thread *thread, 985 bool do_proc_work) 986 { 987 return thread->process_todo || 988 thread->looper_need_return || 989 (do_proc_work && 990 !binder_worklist_empty_ilocked(&thread->proc->todo)); 991 } 992 993 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work) 994 { 995 bool has_work; 996 997 binder_inner_proc_lock(thread->proc); 998 has_work = binder_has_work_ilocked(thread, do_proc_work); 999 binder_inner_proc_unlock(thread->proc); 1000 1001 return has_work; 1002 } 1003 1004 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread) 1005 { 1006 return !thread->transaction_stack && 1007 binder_worklist_empty_ilocked(&thread->todo) && 1008 (thread->looper & (BINDER_LOOPER_STATE_ENTERED | 1009 BINDER_LOOPER_STATE_REGISTERED)); 1010 } 1011 1012 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc, 1013 bool sync) 1014 { 1015 struct rb_node *n; 1016 struct binder_thread *thread; 1017 1018 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { 1019 thread = rb_entry(n, struct binder_thread, rb_node); 1020 if (thread->looper & BINDER_LOOPER_STATE_POLL && 1021 binder_available_for_proc_work_ilocked(thread)) { 1022 if (sync) 1023 wake_up_interruptible_sync(&thread->wait); 1024 else 1025 wake_up_interruptible(&thread->wait); 1026 } 1027 } 1028 } 1029 1030 /** 1031 * binder_select_thread_ilocked() - selects a thread for doing proc work. 1032 * @proc: process to select a thread from 1033 * 1034 * Note that calling this function moves the thread off the waiting_threads 1035 * list, so it can only be woken up by the caller of this function, or a 1036 * signal. Therefore, callers *should* always wake up the thread this function 1037 * returns. 1038 * 1039 * Return: If there's a thread currently waiting for process work, 1040 * returns that thread. Otherwise returns NULL. 1041 */ 1042 static struct binder_thread * 1043 binder_select_thread_ilocked(struct binder_proc *proc) 1044 { 1045 struct binder_thread *thread; 1046 1047 assert_spin_locked(&proc->inner_lock); 1048 thread = list_first_entry_or_null(&proc->waiting_threads, 1049 struct binder_thread, 1050 waiting_thread_node); 1051 1052 if (thread) 1053 list_del_init(&thread->waiting_thread_node); 1054 1055 return thread; 1056 } 1057 1058 /** 1059 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work. 1060 * @proc: process to wake up a thread in 1061 * @thread: specific thread to wake-up (may be NULL) 1062 * @sync: whether to do a synchronous wake-up 1063 * 1064 * This function wakes up a thread in the @proc process. 1065 * The caller may provide a specific thread to wake-up in 1066 * the @thread parameter. If @thread is NULL, this function 1067 * will wake up threads that have called poll(). 1068 * 1069 * Note that for this function to work as expected, callers 1070 * should first call binder_select_thread() to find a thread 1071 * to handle the work (if they don't have a thread already), 1072 * and pass the result into the @thread parameter. 1073 */ 1074 static void binder_wakeup_thread_ilocked(struct binder_proc *proc, 1075 struct binder_thread *thread, 1076 bool sync) 1077 { 1078 assert_spin_locked(&proc->inner_lock); 1079 1080 if (thread) { 1081 if (sync) 1082 wake_up_interruptible_sync(&thread->wait); 1083 else 1084 wake_up_interruptible(&thread->wait); 1085 return; 1086 } 1087 1088 /* Didn't find a thread waiting for proc work; this can happen 1089 * in two scenarios: 1090 * 1. All threads are busy handling transactions 1091 * In that case, one of those threads should call back into 1092 * the kernel driver soon and pick up this work. 1093 * 2. Threads are using the (e)poll interface, in which case 1094 * they may be blocked on the waitqueue without having been 1095 * added to waiting_threads. For this case, we just iterate 1096 * over all threads not handling transaction work, and 1097 * wake them all up. We wake all because we don't know whether 1098 * a thread that called into (e)poll is handling non-binder 1099 * work currently. 1100 */ 1101 binder_wakeup_poll_threads_ilocked(proc, sync); 1102 } 1103 1104 static void binder_wakeup_proc_ilocked(struct binder_proc *proc) 1105 { 1106 struct binder_thread *thread = binder_select_thread_ilocked(proc); 1107 1108 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false); 1109 } 1110 1111 static void binder_set_nice(long nice) 1112 { 1113 long min_nice; 1114 1115 if (can_nice(current, nice)) { 1116 set_user_nice(current, nice); 1117 return; 1118 } 1119 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE)); 1120 binder_debug(BINDER_DEBUG_PRIORITY_CAP, 1121 "%d: nice value %ld not allowed use %ld instead\n", 1122 current->pid, nice, min_nice); 1123 set_user_nice(current, min_nice); 1124 if (min_nice <= MAX_NICE) 1125 return; 1126 binder_user_error("%d RLIMIT_NICE not set\n", current->pid); 1127 } 1128 1129 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc, 1130 binder_uintptr_t ptr) 1131 { 1132 struct rb_node *n = proc->nodes.rb_node; 1133 struct binder_node *node; 1134 1135 assert_spin_locked(&proc->inner_lock); 1136 1137 while (n) { 1138 node = rb_entry(n, struct binder_node, rb_node); 1139 1140 if (ptr < node->ptr) 1141 n = n->rb_left; 1142 else if (ptr > node->ptr) 1143 n = n->rb_right; 1144 else { 1145 /* 1146 * take an implicit weak reference 1147 * to ensure node stays alive until 1148 * call to binder_put_node() 1149 */ 1150 binder_inc_node_tmpref_ilocked(node); 1151 return node; 1152 } 1153 } 1154 return NULL; 1155 } 1156 1157 static struct binder_node *binder_get_node(struct binder_proc *proc, 1158 binder_uintptr_t ptr) 1159 { 1160 struct binder_node *node; 1161 1162 binder_inner_proc_lock(proc); 1163 node = binder_get_node_ilocked(proc, ptr); 1164 binder_inner_proc_unlock(proc); 1165 return node; 1166 } 1167 1168 static struct binder_node *binder_init_node_ilocked( 1169 struct binder_proc *proc, 1170 struct binder_node *new_node, 1171 struct flat_binder_object *fp) 1172 { 1173 struct rb_node **p = &proc->nodes.rb_node; 1174 struct rb_node *parent = NULL; 1175 struct binder_node *node; 1176 binder_uintptr_t ptr = fp ? fp->binder : 0; 1177 binder_uintptr_t cookie = fp ? fp->cookie : 0; 1178 __u32 flags = fp ? fp->flags : 0; 1179 1180 assert_spin_locked(&proc->inner_lock); 1181 1182 while (*p) { 1183 1184 parent = *p; 1185 node = rb_entry(parent, struct binder_node, rb_node); 1186 1187 if (ptr < node->ptr) 1188 p = &(*p)->rb_left; 1189 else if (ptr > node->ptr) 1190 p = &(*p)->rb_right; 1191 else { 1192 /* 1193 * A matching node is already in 1194 * the rb tree. Abandon the init 1195 * and return it. 1196 */ 1197 binder_inc_node_tmpref_ilocked(node); 1198 return node; 1199 } 1200 } 1201 node = new_node; 1202 binder_stats_created(BINDER_STAT_NODE); 1203 node->tmp_refs++; 1204 rb_link_node(&node->rb_node, parent, p); 1205 rb_insert_color(&node->rb_node, &proc->nodes); 1206 node->debug_id = atomic_inc_return(&binder_last_id); 1207 node->proc = proc; 1208 node->ptr = ptr; 1209 node->cookie = cookie; 1210 node->work.type = BINDER_WORK_NODE; 1211 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK; 1212 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); 1213 spin_lock_init(&node->lock); 1214 INIT_LIST_HEAD(&node->work.entry); 1215 INIT_LIST_HEAD(&node->async_todo); 1216 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1217 "%d:%d node %d u%016llx c%016llx created\n", 1218 proc->pid, current->pid, node->debug_id, 1219 (u64)node->ptr, (u64)node->cookie); 1220 1221 return node; 1222 } 1223 1224 static struct binder_node *binder_new_node(struct binder_proc *proc, 1225 struct flat_binder_object *fp) 1226 { 1227 struct binder_node *node; 1228 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL); 1229 1230 if (!new_node) 1231 return NULL; 1232 binder_inner_proc_lock(proc); 1233 node = binder_init_node_ilocked(proc, new_node, fp); 1234 binder_inner_proc_unlock(proc); 1235 if (node != new_node) 1236 /* 1237 * The node was already added by another thread 1238 */ 1239 kfree(new_node); 1240 1241 return node; 1242 } 1243 1244 static void binder_free_node(struct binder_node *node) 1245 { 1246 kfree(node); 1247 binder_stats_deleted(BINDER_STAT_NODE); 1248 } 1249 1250 static int binder_inc_node_nilocked(struct binder_node *node, int strong, 1251 int internal, 1252 struct list_head *target_list) 1253 { 1254 struct binder_proc *proc = node->proc; 1255 1256 assert_spin_locked(&node->lock); 1257 if (proc) 1258 assert_spin_locked(&proc->inner_lock); 1259 if (strong) { 1260 if (internal) { 1261 if (target_list == NULL && 1262 node->internal_strong_refs == 0 && 1263 !(node->proc && 1264 node == node->proc->context->binder_context_mgr_node && 1265 node->has_strong_ref)) { 1266 pr_err("invalid inc strong node for %d\n", 1267 node->debug_id); 1268 return -EINVAL; 1269 } 1270 node->internal_strong_refs++; 1271 } else 1272 node->local_strong_refs++; 1273 if (!node->has_strong_ref && target_list) { 1274 binder_dequeue_work_ilocked(&node->work); 1275 /* 1276 * Note: this function is the only place where we queue 1277 * directly to a thread->todo without using the 1278 * corresponding binder_enqueue_thread_work() helper 1279 * functions; in this case it's ok to not set the 1280 * process_todo flag, since we know this node work will 1281 * always be followed by other work that starts queue 1282 * processing: in case of synchronous transactions, a 1283 * BR_REPLY or BR_ERROR; in case of oneway 1284 * transactions, a BR_TRANSACTION_COMPLETE. 1285 */ 1286 binder_enqueue_work_ilocked(&node->work, target_list); 1287 } 1288 } else { 1289 if (!internal) 1290 node->local_weak_refs++; 1291 if (!node->has_weak_ref && list_empty(&node->work.entry)) { 1292 if (target_list == NULL) { 1293 pr_err("invalid inc weak node for %d\n", 1294 node->debug_id); 1295 return -EINVAL; 1296 } 1297 /* 1298 * See comment above 1299 */ 1300 binder_enqueue_work_ilocked(&node->work, target_list); 1301 } 1302 } 1303 return 0; 1304 } 1305 1306 static int binder_inc_node(struct binder_node *node, int strong, int internal, 1307 struct list_head *target_list) 1308 { 1309 int ret; 1310 1311 binder_node_inner_lock(node); 1312 ret = binder_inc_node_nilocked(node, strong, internal, target_list); 1313 binder_node_inner_unlock(node); 1314 1315 return ret; 1316 } 1317 1318 static bool binder_dec_node_nilocked(struct binder_node *node, 1319 int strong, int internal) 1320 { 1321 struct binder_proc *proc = node->proc; 1322 1323 assert_spin_locked(&node->lock); 1324 if (proc) 1325 assert_spin_locked(&proc->inner_lock); 1326 if (strong) { 1327 if (internal) 1328 node->internal_strong_refs--; 1329 else 1330 node->local_strong_refs--; 1331 if (node->local_strong_refs || node->internal_strong_refs) 1332 return false; 1333 } else { 1334 if (!internal) 1335 node->local_weak_refs--; 1336 if (node->local_weak_refs || node->tmp_refs || 1337 !hlist_empty(&node->refs)) 1338 return false; 1339 } 1340 1341 if (proc && (node->has_strong_ref || node->has_weak_ref)) { 1342 if (list_empty(&node->work.entry)) { 1343 binder_enqueue_work_ilocked(&node->work, &proc->todo); 1344 binder_wakeup_proc_ilocked(proc); 1345 } 1346 } else { 1347 if (hlist_empty(&node->refs) && !node->local_strong_refs && 1348 !node->local_weak_refs && !node->tmp_refs) { 1349 if (proc) { 1350 binder_dequeue_work_ilocked(&node->work); 1351 rb_erase(&node->rb_node, &proc->nodes); 1352 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1353 "refless node %d deleted\n", 1354 node->debug_id); 1355 } else { 1356 BUG_ON(!list_empty(&node->work.entry)); 1357 spin_lock(&binder_dead_nodes_lock); 1358 /* 1359 * tmp_refs could have changed so 1360 * check it again 1361 */ 1362 if (node->tmp_refs) { 1363 spin_unlock(&binder_dead_nodes_lock); 1364 return false; 1365 } 1366 hlist_del(&node->dead_node); 1367 spin_unlock(&binder_dead_nodes_lock); 1368 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1369 "dead node %d deleted\n", 1370 node->debug_id); 1371 } 1372 return true; 1373 } 1374 } 1375 return false; 1376 } 1377 1378 static void binder_dec_node(struct binder_node *node, int strong, int internal) 1379 { 1380 bool free_node; 1381 1382 binder_node_inner_lock(node); 1383 free_node = binder_dec_node_nilocked(node, strong, internal); 1384 binder_node_inner_unlock(node); 1385 if (free_node) 1386 binder_free_node(node); 1387 } 1388 1389 static void binder_inc_node_tmpref_ilocked(struct binder_node *node) 1390 { 1391 /* 1392 * No call to binder_inc_node() is needed since we 1393 * don't need to inform userspace of any changes to 1394 * tmp_refs 1395 */ 1396 node->tmp_refs++; 1397 } 1398 1399 /** 1400 * binder_inc_node_tmpref() - take a temporary reference on node 1401 * @node: node to reference 1402 * 1403 * Take reference on node to prevent the node from being freed 1404 * while referenced only by a local variable. The inner lock is 1405 * needed to serialize with the node work on the queue (which 1406 * isn't needed after the node is dead). If the node is dead 1407 * (node->proc is NULL), use binder_dead_nodes_lock to protect 1408 * node->tmp_refs against dead-node-only cases where the node 1409 * lock cannot be acquired (eg traversing the dead node list to 1410 * print nodes) 1411 */ 1412 static void binder_inc_node_tmpref(struct binder_node *node) 1413 { 1414 binder_node_lock(node); 1415 if (node->proc) 1416 binder_inner_proc_lock(node->proc); 1417 else 1418 spin_lock(&binder_dead_nodes_lock); 1419 binder_inc_node_tmpref_ilocked(node); 1420 if (node->proc) 1421 binder_inner_proc_unlock(node->proc); 1422 else 1423 spin_unlock(&binder_dead_nodes_lock); 1424 binder_node_unlock(node); 1425 } 1426 1427 /** 1428 * binder_dec_node_tmpref() - remove a temporary reference on node 1429 * @node: node to reference 1430 * 1431 * Release temporary reference on node taken via binder_inc_node_tmpref() 1432 */ 1433 static void binder_dec_node_tmpref(struct binder_node *node) 1434 { 1435 bool free_node; 1436 1437 binder_node_inner_lock(node); 1438 if (!node->proc) 1439 spin_lock(&binder_dead_nodes_lock); 1440 node->tmp_refs--; 1441 BUG_ON(node->tmp_refs < 0); 1442 if (!node->proc) 1443 spin_unlock(&binder_dead_nodes_lock); 1444 /* 1445 * Call binder_dec_node() to check if all refcounts are 0 1446 * and cleanup is needed. Calling with strong=0 and internal=1 1447 * causes no actual reference to be released in binder_dec_node(). 1448 * If that changes, a change is needed here too. 1449 */ 1450 free_node = binder_dec_node_nilocked(node, 0, 1); 1451 binder_node_inner_unlock(node); 1452 if (free_node) 1453 binder_free_node(node); 1454 } 1455 1456 static void binder_put_node(struct binder_node *node) 1457 { 1458 binder_dec_node_tmpref(node); 1459 } 1460 1461 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc, 1462 u32 desc, bool need_strong_ref) 1463 { 1464 struct rb_node *n = proc->refs_by_desc.rb_node; 1465 struct binder_ref *ref; 1466 1467 while (n) { 1468 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1469 1470 if (desc < ref->data.desc) { 1471 n = n->rb_left; 1472 } else if (desc > ref->data.desc) { 1473 n = n->rb_right; 1474 } else if (need_strong_ref && !ref->data.strong) { 1475 binder_user_error("tried to use weak ref as strong ref\n"); 1476 return NULL; 1477 } else { 1478 return ref; 1479 } 1480 } 1481 return NULL; 1482 } 1483 1484 /** 1485 * binder_get_ref_for_node_olocked() - get the ref associated with given node 1486 * @proc: binder_proc that owns the ref 1487 * @node: binder_node of target 1488 * @new_ref: newly allocated binder_ref to be initialized or %NULL 1489 * 1490 * Look up the ref for the given node and return it if it exists 1491 * 1492 * If it doesn't exist and the caller provides a newly allocated 1493 * ref, initialize the fields of the newly allocated ref and insert 1494 * into the given proc rb_trees and node refs list. 1495 * 1496 * Return: the ref for node. It is possible that another thread 1497 * allocated/initialized the ref first in which case the 1498 * returned ref would be different than the passed-in 1499 * new_ref. new_ref must be kfree'd by the caller in 1500 * this case. 1501 */ 1502 static struct binder_ref *binder_get_ref_for_node_olocked( 1503 struct binder_proc *proc, 1504 struct binder_node *node, 1505 struct binder_ref *new_ref) 1506 { 1507 struct binder_context *context = proc->context; 1508 struct rb_node **p = &proc->refs_by_node.rb_node; 1509 struct rb_node *parent = NULL; 1510 struct binder_ref *ref; 1511 struct rb_node *n; 1512 1513 while (*p) { 1514 parent = *p; 1515 ref = rb_entry(parent, struct binder_ref, rb_node_node); 1516 1517 if (node < ref->node) 1518 p = &(*p)->rb_left; 1519 else if (node > ref->node) 1520 p = &(*p)->rb_right; 1521 else 1522 return ref; 1523 } 1524 if (!new_ref) 1525 return NULL; 1526 1527 binder_stats_created(BINDER_STAT_REF); 1528 new_ref->data.debug_id = atomic_inc_return(&binder_last_id); 1529 new_ref->proc = proc; 1530 new_ref->node = node; 1531 rb_link_node(&new_ref->rb_node_node, parent, p); 1532 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); 1533 1534 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1; 1535 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 1536 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1537 if (ref->data.desc > new_ref->data.desc) 1538 break; 1539 new_ref->data.desc = ref->data.desc + 1; 1540 } 1541 1542 p = &proc->refs_by_desc.rb_node; 1543 while (*p) { 1544 parent = *p; 1545 ref = rb_entry(parent, struct binder_ref, rb_node_desc); 1546 1547 if (new_ref->data.desc < ref->data.desc) 1548 p = &(*p)->rb_left; 1549 else if (new_ref->data.desc > ref->data.desc) 1550 p = &(*p)->rb_right; 1551 else 1552 BUG(); 1553 } 1554 rb_link_node(&new_ref->rb_node_desc, parent, p); 1555 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc); 1556 1557 binder_node_lock(node); 1558 hlist_add_head(&new_ref->node_entry, &node->refs); 1559 1560 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1561 "%d new ref %d desc %d for node %d\n", 1562 proc->pid, new_ref->data.debug_id, new_ref->data.desc, 1563 node->debug_id); 1564 binder_node_unlock(node); 1565 return new_ref; 1566 } 1567 1568 static void binder_cleanup_ref_olocked(struct binder_ref *ref) 1569 { 1570 bool delete_node = false; 1571 1572 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1573 "%d delete ref %d desc %d for node %d\n", 1574 ref->proc->pid, ref->data.debug_id, ref->data.desc, 1575 ref->node->debug_id); 1576 1577 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); 1578 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); 1579 1580 binder_node_inner_lock(ref->node); 1581 if (ref->data.strong) 1582 binder_dec_node_nilocked(ref->node, 1, 1); 1583 1584 hlist_del(&ref->node_entry); 1585 delete_node = binder_dec_node_nilocked(ref->node, 0, 1); 1586 binder_node_inner_unlock(ref->node); 1587 /* 1588 * Clear ref->node unless we want the caller to free the node 1589 */ 1590 if (!delete_node) { 1591 /* 1592 * The caller uses ref->node to determine 1593 * whether the node needs to be freed. Clear 1594 * it since the node is still alive. 1595 */ 1596 ref->node = NULL; 1597 } 1598 1599 if (ref->death) { 1600 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1601 "%d delete ref %d desc %d has death notification\n", 1602 ref->proc->pid, ref->data.debug_id, 1603 ref->data.desc); 1604 binder_dequeue_work(ref->proc, &ref->death->work); 1605 binder_stats_deleted(BINDER_STAT_DEATH); 1606 } 1607 binder_stats_deleted(BINDER_STAT_REF); 1608 } 1609 1610 /** 1611 * binder_inc_ref_olocked() - increment the ref for given handle 1612 * @ref: ref to be incremented 1613 * @strong: if true, strong increment, else weak 1614 * @target_list: list to queue node work on 1615 * 1616 * Increment the ref. @ref->proc->outer_lock must be held on entry 1617 * 1618 * Return: 0, if successful, else errno 1619 */ 1620 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong, 1621 struct list_head *target_list) 1622 { 1623 int ret; 1624 1625 if (strong) { 1626 if (ref->data.strong == 0) { 1627 ret = binder_inc_node(ref->node, 1, 1, target_list); 1628 if (ret) 1629 return ret; 1630 } 1631 ref->data.strong++; 1632 } else { 1633 if (ref->data.weak == 0) { 1634 ret = binder_inc_node(ref->node, 0, 1, target_list); 1635 if (ret) 1636 return ret; 1637 } 1638 ref->data.weak++; 1639 } 1640 return 0; 1641 } 1642 1643 /** 1644 * binder_dec_ref() - dec the ref for given handle 1645 * @ref: ref to be decremented 1646 * @strong: if true, strong decrement, else weak 1647 * 1648 * Decrement the ref. 1649 * 1650 * Return: true if ref is cleaned up and ready to be freed 1651 */ 1652 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong) 1653 { 1654 if (strong) { 1655 if (ref->data.strong == 0) { 1656 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n", 1657 ref->proc->pid, ref->data.debug_id, 1658 ref->data.desc, ref->data.strong, 1659 ref->data.weak); 1660 return false; 1661 } 1662 ref->data.strong--; 1663 if (ref->data.strong == 0) 1664 binder_dec_node(ref->node, strong, 1); 1665 } else { 1666 if (ref->data.weak == 0) { 1667 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n", 1668 ref->proc->pid, ref->data.debug_id, 1669 ref->data.desc, ref->data.strong, 1670 ref->data.weak); 1671 return false; 1672 } 1673 ref->data.weak--; 1674 } 1675 if (ref->data.strong == 0 && ref->data.weak == 0) { 1676 binder_cleanup_ref_olocked(ref); 1677 return true; 1678 } 1679 return false; 1680 } 1681 1682 /** 1683 * binder_get_node_from_ref() - get the node from the given proc/desc 1684 * @proc: proc containing the ref 1685 * @desc: the handle associated with the ref 1686 * @need_strong_ref: if true, only return node if ref is strong 1687 * @rdata: the id/refcount data for the ref 1688 * 1689 * Given a proc and ref handle, return the associated binder_node 1690 * 1691 * Return: a binder_node or NULL if not found or not strong when strong required 1692 */ 1693 static struct binder_node *binder_get_node_from_ref( 1694 struct binder_proc *proc, 1695 u32 desc, bool need_strong_ref, 1696 struct binder_ref_data *rdata) 1697 { 1698 struct binder_node *node; 1699 struct binder_ref *ref; 1700 1701 binder_proc_lock(proc); 1702 ref = binder_get_ref_olocked(proc, desc, need_strong_ref); 1703 if (!ref) 1704 goto err_no_ref; 1705 node = ref->node; 1706 /* 1707 * Take an implicit reference on the node to ensure 1708 * it stays alive until the call to binder_put_node() 1709 */ 1710 binder_inc_node_tmpref(node); 1711 if (rdata) 1712 *rdata = ref->data; 1713 binder_proc_unlock(proc); 1714 1715 return node; 1716 1717 err_no_ref: 1718 binder_proc_unlock(proc); 1719 return NULL; 1720 } 1721 1722 /** 1723 * binder_free_ref() - free the binder_ref 1724 * @ref: ref to free 1725 * 1726 * Free the binder_ref. Free the binder_node indicated by ref->node 1727 * (if non-NULL) and the binder_ref_death indicated by ref->death. 1728 */ 1729 static void binder_free_ref(struct binder_ref *ref) 1730 { 1731 if (ref->node) 1732 binder_free_node(ref->node); 1733 kfree(ref->death); 1734 kfree(ref); 1735 } 1736 1737 /** 1738 * binder_update_ref_for_handle() - inc/dec the ref for given handle 1739 * @proc: proc containing the ref 1740 * @desc: the handle associated with the ref 1741 * @increment: true=inc reference, false=dec reference 1742 * @strong: true=strong reference, false=weak reference 1743 * @rdata: the id/refcount data for the ref 1744 * 1745 * Given a proc and ref handle, increment or decrement the ref 1746 * according to "increment" arg. 1747 * 1748 * Return: 0 if successful, else errno 1749 */ 1750 static int binder_update_ref_for_handle(struct binder_proc *proc, 1751 uint32_t desc, bool increment, bool strong, 1752 struct binder_ref_data *rdata) 1753 { 1754 int ret = 0; 1755 struct binder_ref *ref; 1756 bool delete_ref = false; 1757 1758 binder_proc_lock(proc); 1759 ref = binder_get_ref_olocked(proc, desc, strong); 1760 if (!ref) { 1761 ret = -EINVAL; 1762 goto err_no_ref; 1763 } 1764 if (increment) 1765 ret = binder_inc_ref_olocked(ref, strong, NULL); 1766 else 1767 delete_ref = binder_dec_ref_olocked(ref, strong); 1768 1769 if (rdata) 1770 *rdata = ref->data; 1771 binder_proc_unlock(proc); 1772 1773 if (delete_ref) 1774 binder_free_ref(ref); 1775 return ret; 1776 1777 err_no_ref: 1778 binder_proc_unlock(proc); 1779 return ret; 1780 } 1781 1782 /** 1783 * binder_dec_ref_for_handle() - dec the ref for given handle 1784 * @proc: proc containing the ref 1785 * @desc: the handle associated with the ref 1786 * @strong: true=strong reference, false=weak reference 1787 * @rdata: the id/refcount data for the ref 1788 * 1789 * Just calls binder_update_ref_for_handle() to decrement the ref. 1790 * 1791 * Return: 0 if successful, else errno 1792 */ 1793 static int binder_dec_ref_for_handle(struct binder_proc *proc, 1794 uint32_t desc, bool strong, struct binder_ref_data *rdata) 1795 { 1796 return binder_update_ref_for_handle(proc, desc, false, strong, rdata); 1797 } 1798 1799 1800 /** 1801 * binder_inc_ref_for_node() - increment the ref for given proc/node 1802 * @proc: proc containing the ref 1803 * @node: target node 1804 * @strong: true=strong reference, false=weak reference 1805 * @target_list: worklist to use if node is incremented 1806 * @rdata: the id/refcount data for the ref 1807 * 1808 * Given a proc and node, increment the ref. Create the ref if it 1809 * doesn't already exist 1810 * 1811 * Return: 0 if successful, else errno 1812 */ 1813 static int binder_inc_ref_for_node(struct binder_proc *proc, 1814 struct binder_node *node, 1815 bool strong, 1816 struct list_head *target_list, 1817 struct binder_ref_data *rdata) 1818 { 1819 struct binder_ref *ref; 1820 struct binder_ref *new_ref = NULL; 1821 int ret = 0; 1822 1823 binder_proc_lock(proc); 1824 ref = binder_get_ref_for_node_olocked(proc, node, NULL); 1825 if (!ref) { 1826 binder_proc_unlock(proc); 1827 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); 1828 if (!new_ref) 1829 return -ENOMEM; 1830 binder_proc_lock(proc); 1831 ref = binder_get_ref_for_node_olocked(proc, node, new_ref); 1832 } 1833 ret = binder_inc_ref_olocked(ref, strong, target_list); 1834 *rdata = ref->data; 1835 binder_proc_unlock(proc); 1836 if (new_ref && ref != new_ref) 1837 /* 1838 * Another thread created the ref first so 1839 * free the one we allocated 1840 */ 1841 kfree(new_ref); 1842 return ret; 1843 } 1844 1845 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread, 1846 struct binder_transaction *t) 1847 { 1848 BUG_ON(!target_thread); 1849 assert_spin_locked(&target_thread->proc->inner_lock); 1850 BUG_ON(target_thread->transaction_stack != t); 1851 BUG_ON(target_thread->transaction_stack->from != target_thread); 1852 target_thread->transaction_stack = 1853 target_thread->transaction_stack->from_parent; 1854 t->from = NULL; 1855 } 1856 1857 /** 1858 * binder_thread_dec_tmpref() - decrement thread->tmp_ref 1859 * @thread: thread to decrement 1860 * 1861 * A thread needs to be kept alive while being used to create or 1862 * handle a transaction. binder_get_txn_from() is used to safely 1863 * extract t->from from a binder_transaction and keep the thread 1864 * indicated by t->from from being freed. When done with that 1865 * binder_thread, this function is called to decrement the 1866 * tmp_ref and free if appropriate (thread has been released 1867 * and no transaction being processed by the driver) 1868 */ 1869 static void binder_thread_dec_tmpref(struct binder_thread *thread) 1870 { 1871 /* 1872 * atomic is used to protect the counter value while 1873 * it cannot reach zero or thread->is_dead is false 1874 */ 1875 binder_inner_proc_lock(thread->proc); 1876 atomic_dec(&thread->tmp_ref); 1877 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) { 1878 binder_inner_proc_unlock(thread->proc); 1879 binder_free_thread(thread); 1880 return; 1881 } 1882 binder_inner_proc_unlock(thread->proc); 1883 } 1884 1885 /** 1886 * binder_proc_dec_tmpref() - decrement proc->tmp_ref 1887 * @proc: proc to decrement 1888 * 1889 * A binder_proc needs to be kept alive while being used to create or 1890 * handle a transaction. proc->tmp_ref is incremented when 1891 * creating a new transaction or the binder_proc is currently in-use 1892 * by threads that are being released. When done with the binder_proc, 1893 * this function is called to decrement the counter and free the 1894 * proc if appropriate (proc has been released, all threads have 1895 * been released and not currenly in-use to process a transaction). 1896 */ 1897 static void binder_proc_dec_tmpref(struct binder_proc *proc) 1898 { 1899 binder_inner_proc_lock(proc); 1900 proc->tmp_ref--; 1901 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) && 1902 !proc->tmp_ref) { 1903 binder_inner_proc_unlock(proc); 1904 binder_free_proc(proc); 1905 return; 1906 } 1907 binder_inner_proc_unlock(proc); 1908 } 1909 1910 /** 1911 * binder_get_txn_from() - safely extract the "from" thread in transaction 1912 * @t: binder transaction for t->from 1913 * 1914 * Atomically return the "from" thread and increment the tmp_ref 1915 * count for the thread to ensure it stays alive until 1916 * binder_thread_dec_tmpref() is called. 1917 * 1918 * Return: the value of t->from 1919 */ 1920 static struct binder_thread *binder_get_txn_from( 1921 struct binder_transaction *t) 1922 { 1923 struct binder_thread *from; 1924 1925 spin_lock(&t->lock); 1926 from = t->from; 1927 if (from) 1928 atomic_inc(&from->tmp_ref); 1929 spin_unlock(&t->lock); 1930 return from; 1931 } 1932 1933 /** 1934 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock 1935 * @t: binder transaction for t->from 1936 * 1937 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock 1938 * to guarantee that the thread cannot be released while operating on it. 1939 * The caller must call binder_inner_proc_unlock() to release the inner lock 1940 * as well as call binder_dec_thread_txn() to release the reference. 1941 * 1942 * Return: the value of t->from 1943 */ 1944 static struct binder_thread *binder_get_txn_from_and_acq_inner( 1945 struct binder_transaction *t) 1946 { 1947 struct binder_thread *from; 1948 1949 from = binder_get_txn_from(t); 1950 if (!from) 1951 return NULL; 1952 binder_inner_proc_lock(from->proc); 1953 if (t->from) { 1954 BUG_ON(from != t->from); 1955 return from; 1956 } 1957 binder_inner_proc_unlock(from->proc); 1958 binder_thread_dec_tmpref(from); 1959 return NULL; 1960 } 1961 1962 static void binder_free_transaction(struct binder_transaction *t) 1963 { 1964 if (t->buffer) 1965 t->buffer->transaction = NULL; 1966 kfree(t); 1967 binder_stats_deleted(BINDER_STAT_TRANSACTION); 1968 } 1969 1970 static void binder_send_failed_reply(struct binder_transaction *t, 1971 uint32_t error_code) 1972 { 1973 struct binder_thread *target_thread; 1974 struct binder_transaction *next; 1975 1976 BUG_ON(t->flags & TF_ONE_WAY); 1977 while (1) { 1978 target_thread = binder_get_txn_from_and_acq_inner(t); 1979 if (target_thread) { 1980 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1981 "send failed reply for transaction %d to %d:%d\n", 1982 t->debug_id, 1983 target_thread->proc->pid, 1984 target_thread->pid); 1985 1986 binder_pop_transaction_ilocked(target_thread, t); 1987 if (target_thread->reply_error.cmd == BR_OK) { 1988 target_thread->reply_error.cmd = error_code; 1989 binder_enqueue_thread_work_ilocked( 1990 target_thread, 1991 &target_thread->reply_error.work); 1992 wake_up_interruptible(&target_thread->wait); 1993 } else { 1994 WARN(1, "Unexpected reply error: %u\n", 1995 target_thread->reply_error.cmd); 1996 } 1997 binder_inner_proc_unlock(target_thread->proc); 1998 binder_thread_dec_tmpref(target_thread); 1999 binder_free_transaction(t); 2000 return; 2001 } 2002 next = t->from_parent; 2003 2004 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 2005 "send failed reply for transaction %d, target dead\n", 2006 t->debug_id); 2007 2008 binder_free_transaction(t); 2009 if (next == NULL) { 2010 binder_debug(BINDER_DEBUG_DEAD_BINDER, 2011 "reply failed, no target thread at root\n"); 2012 return; 2013 } 2014 t = next; 2015 binder_debug(BINDER_DEBUG_DEAD_BINDER, 2016 "reply failed, no target thread -- retry %d\n", 2017 t->debug_id); 2018 } 2019 } 2020 2021 /** 2022 * binder_cleanup_transaction() - cleans up undelivered transaction 2023 * @t: transaction that needs to be cleaned up 2024 * @reason: reason the transaction wasn't delivered 2025 * @error_code: error to return to caller (if synchronous call) 2026 */ 2027 static void binder_cleanup_transaction(struct binder_transaction *t, 2028 const char *reason, 2029 uint32_t error_code) 2030 { 2031 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) { 2032 binder_send_failed_reply(t, error_code); 2033 } else { 2034 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 2035 "undelivered transaction %d, %s\n", 2036 t->debug_id, reason); 2037 binder_free_transaction(t); 2038 } 2039 } 2040 2041 /** 2042 * binder_validate_object() - checks for a valid metadata object in a buffer. 2043 * @buffer: binder_buffer that we're parsing. 2044 * @offset: offset in the buffer at which to validate an object. 2045 * 2046 * Return: If there's a valid metadata object at @offset in @buffer, the 2047 * size of that object. Otherwise, it returns zero. 2048 */ 2049 static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset) 2050 { 2051 /* Check if we can read a header first */ 2052 struct binder_object_header *hdr; 2053 size_t object_size = 0; 2054 2055 if (offset > buffer->data_size - sizeof(*hdr) || 2056 buffer->data_size < sizeof(*hdr) || 2057 !IS_ALIGNED(offset, sizeof(u32))) 2058 return 0; 2059 2060 /* Ok, now see if we can read a complete object. */ 2061 hdr = (struct binder_object_header *)(buffer->data + offset); 2062 switch (hdr->type) { 2063 case BINDER_TYPE_BINDER: 2064 case BINDER_TYPE_WEAK_BINDER: 2065 case BINDER_TYPE_HANDLE: 2066 case BINDER_TYPE_WEAK_HANDLE: 2067 object_size = sizeof(struct flat_binder_object); 2068 break; 2069 case BINDER_TYPE_FD: 2070 object_size = sizeof(struct binder_fd_object); 2071 break; 2072 case BINDER_TYPE_PTR: 2073 object_size = sizeof(struct binder_buffer_object); 2074 break; 2075 case BINDER_TYPE_FDA: 2076 object_size = sizeof(struct binder_fd_array_object); 2077 break; 2078 default: 2079 return 0; 2080 } 2081 if (offset <= buffer->data_size - object_size && 2082 buffer->data_size >= object_size) 2083 return object_size; 2084 else 2085 return 0; 2086 } 2087 2088 /** 2089 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer. 2090 * @b: binder_buffer containing the object 2091 * @index: index in offset array at which the binder_buffer_object is 2092 * located 2093 * @start: points to the start of the offset array 2094 * @num_valid: the number of valid offsets in the offset array 2095 * 2096 * Return: If @index is within the valid range of the offset array 2097 * described by @start and @num_valid, and if there's a valid 2098 * binder_buffer_object at the offset found in index @index 2099 * of the offset array, that object is returned. Otherwise, 2100 * %NULL is returned. 2101 * Note that the offset found in index @index itself is not 2102 * verified; this function assumes that @num_valid elements 2103 * from @start were previously verified to have valid offsets. 2104 */ 2105 static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b, 2106 binder_size_t index, 2107 binder_size_t *start, 2108 binder_size_t num_valid) 2109 { 2110 struct binder_buffer_object *buffer_obj; 2111 binder_size_t *offp; 2112 2113 if (index >= num_valid) 2114 return NULL; 2115 2116 offp = start + index; 2117 buffer_obj = (struct binder_buffer_object *)(b->data + *offp); 2118 if (buffer_obj->hdr.type != BINDER_TYPE_PTR) 2119 return NULL; 2120 2121 return buffer_obj; 2122 } 2123 2124 /** 2125 * binder_validate_fixup() - validates pointer/fd fixups happen in order. 2126 * @b: transaction buffer 2127 * @objects_start start of objects buffer 2128 * @buffer: binder_buffer_object in which to fix up 2129 * @offset: start offset in @buffer to fix up 2130 * @last_obj: last binder_buffer_object that we fixed up in 2131 * @last_min_offset: minimum fixup offset in @last_obj 2132 * 2133 * Return: %true if a fixup in buffer @buffer at offset @offset is 2134 * allowed. 2135 * 2136 * For safety reasons, we only allow fixups inside a buffer to happen 2137 * at increasing offsets; additionally, we only allow fixup on the last 2138 * buffer object that was verified, or one of its parents. 2139 * 2140 * Example of what is allowed: 2141 * 2142 * A 2143 * B (parent = A, offset = 0) 2144 * C (parent = A, offset = 16) 2145 * D (parent = C, offset = 0) 2146 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset) 2147 * 2148 * Examples of what is not allowed: 2149 * 2150 * Decreasing offsets within the same parent: 2151 * A 2152 * C (parent = A, offset = 16) 2153 * B (parent = A, offset = 0) // decreasing offset within A 2154 * 2155 * Referring to a parent that wasn't the last object or any of its parents: 2156 * A 2157 * B (parent = A, offset = 0) 2158 * C (parent = A, offset = 0) 2159 * C (parent = A, offset = 16) 2160 * D (parent = B, offset = 0) // B is not A or any of A's parents 2161 */ 2162 static bool binder_validate_fixup(struct binder_buffer *b, 2163 binder_size_t *objects_start, 2164 struct binder_buffer_object *buffer, 2165 binder_size_t fixup_offset, 2166 struct binder_buffer_object *last_obj, 2167 binder_size_t last_min_offset) 2168 { 2169 if (!last_obj) { 2170 /* Nothing to fix up in */ 2171 return false; 2172 } 2173 2174 while (last_obj != buffer) { 2175 /* 2176 * Safe to retrieve the parent of last_obj, since it 2177 * was already previously verified by the driver. 2178 */ 2179 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0) 2180 return false; 2181 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t); 2182 last_obj = (struct binder_buffer_object *) 2183 (b->data + *(objects_start + last_obj->parent)); 2184 } 2185 return (fixup_offset >= last_min_offset); 2186 } 2187 2188 static void binder_transaction_buffer_release(struct binder_proc *proc, 2189 struct binder_buffer *buffer, 2190 binder_size_t *failed_at) 2191 { 2192 binder_size_t *offp, *off_start, *off_end; 2193 int debug_id = buffer->debug_id; 2194 2195 binder_debug(BINDER_DEBUG_TRANSACTION, 2196 "%d buffer release %d, size %zd-%zd, failed at %p\n", 2197 proc->pid, buffer->debug_id, 2198 buffer->data_size, buffer->offsets_size, failed_at); 2199 2200 if (buffer->target_node) 2201 binder_dec_node(buffer->target_node, 1, 0); 2202 2203 off_start = (binder_size_t *)(buffer->data + 2204 ALIGN(buffer->data_size, sizeof(void *))); 2205 if (failed_at) 2206 off_end = failed_at; 2207 else 2208 off_end = (void *)off_start + buffer->offsets_size; 2209 for (offp = off_start; offp < off_end; offp++) { 2210 struct binder_object_header *hdr; 2211 size_t object_size = binder_validate_object(buffer, *offp); 2212 2213 if (object_size == 0) { 2214 pr_err("transaction release %d bad object at offset %lld, size %zd\n", 2215 debug_id, (u64)*offp, buffer->data_size); 2216 continue; 2217 } 2218 hdr = (struct binder_object_header *)(buffer->data + *offp); 2219 switch (hdr->type) { 2220 case BINDER_TYPE_BINDER: 2221 case BINDER_TYPE_WEAK_BINDER: { 2222 struct flat_binder_object *fp; 2223 struct binder_node *node; 2224 2225 fp = to_flat_binder_object(hdr); 2226 node = binder_get_node(proc, fp->binder); 2227 if (node == NULL) { 2228 pr_err("transaction release %d bad node %016llx\n", 2229 debug_id, (u64)fp->binder); 2230 break; 2231 } 2232 binder_debug(BINDER_DEBUG_TRANSACTION, 2233 " node %d u%016llx\n", 2234 node->debug_id, (u64)node->ptr); 2235 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER, 2236 0); 2237 binder_put_node(node); 2238 } break; 2239 case BINDER_TYPE_HANDLE: 2240 case BINDER_TYPE_WEAK_HANDLE: { 2241 struct flat_binder_object *fp; 2242 struct binder_ref_data rdata; 2243 int ret; 2244 2245 fp = to_flat_binder_object(hdr); 2246 ret = binder_dec_ref_for_handle(proc, fp->handle, 2247 hdr->type == BINDER_TYPE_HANDLE, &rdata); 2248 2249 if (ret) { 2250 pr_err("transaction release %d bad handle %d, ret = %d\n", 2251 debug_id, fp->handle, ret); 2252 break; 2253 } 2254 binder_debug(BINDER_DEBUG_TRANSACTION, 2255 " ref %d desc %d\n", 2256 rdata.debug_id, rdata.desc); 2257 } break; 2258 2259 case BINDER_TYPE_FD: { 2260 struct binder_fd_object *fp = to_binder_fd_object(hdr); 2261 2262 binder_debug(BINDER_DEBUG_TRANSACTION, 2263 " fd %d\n", fp->fd); 2264 if (failed_at) 2265 task_close_fd(proc, fp->fd); 2266 } break; 2267 case BINDER_TYPE_PTR: 2268 /* 2269 * Nothing to do here, this will get cleaned up when the 2270 * transaction buffer gets freed 2271 */ 2272 break; 2273 case BINDER_TYPE_FDA: { 2274 struct binder_fd_array_object *fda; 2275 struct binder_buffer_object *parent; 2276 uintptr_t parent_buffer; 2277 u32 *fd_array; 2278 size_t fd_index; 2279 binder_size_t fd_buf_size; 2280 2281 fda = to_binder_fd_array_object(hdr); 2282 parent = binder_validate_ptr(buffer, fda->parent, 2283 off_start, 2284 offp - off_start); 2285 if (!parent) { 2286 pr_err("transaction release %d bad parent offset\n", 2287 debug_id); 2288 continue; 2289 } 2290 /* 2291 * Since the parent was already fixed up, convert it 2292 * back to kernel address space to access it 2293 */ 2294 parent_buffer = parent->buffer - 2295 binder_alloc_get_user_buffer_offset( 2296 &proc->alloc); 2297 2298 fd_buf_size = sizeof(u32) * fda->num_fds; 2299 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { 2300 pr_err("transaction release %d invalid number of fds (%lld)\n", 2301 debug_id, (u64)fda->num_fds); 2302 continue; 2303 } 2304 if (fd_buf_size > parent->length || 2305 fda->parent_offset > parent->length - fd_buf_size) { 2306 /* No space for all file descriptors here. */ 2307 pr_err("transaction release %d not enough space for %lld fds in buffer\n", 2308 debug_id, (u64)fda->num_fds); 2309 continue; 2310 } 2311 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset); 2312 for (fd_index = 0; fd_index < fda->num_fds; fd_index++) 2313 task_close_fd(proc, fd_array[fd_index]); 2314 } break; 2315 default: 2316 pr_err("transaction release %d bad object type %x\n", 2317 debug_id, hdr->type); 2318 break; 2319 } 2320 } 2321 } 2322 2323 static int binder_translate_binder(struct flat_binder_object *fp, 2324 struct binder_transaction *t, 2325 struct binder_thread *thread) 2326 { 2327 struct binder_node *node; 2328 struct binder_proc *proc = thread->proc; 2329 struct binder_proc *target_proc = t->to_proc; 2330 struct binder_ref_data rdata; 2331 int ret = 0; 2332 2333 node = binder_get_node(proc, fp->binder); 2334 if (!node) { 2335 node = binder_new_node(proc, fp); 2336 if (!node) 2337 return -ENOMEM; 2338 } 2339 if (fp->cookie != node->cookie) { 2340 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n", 2341 proc->pid, thread->pid, (u64)fp->binder, 2342 node->debug_id, (u64)fp->cookie, 2343 (u64)node->cookie); 2344 ret = -EINVAL; 2345 goto done; 2346 } 2347 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { 2348 ret = -EPERM; 2349 goto done; 2350 } 2351 2352 ret = binder_inc_ref_for_node(target_proc, node, 2353 fp->hdr.type == BINDER_TYPE_BINDER, 2354 &thread->todo, &rdata); 2355 if (ret) 2356 goto done; 2357 2358 if (fp->hdr.type == BINDER_TYPE_BINDER) 2359 fp->hdr.type = BINDER_TYPE_HANDLE; 2360 else 2361 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE; 2362 fp->binder = 0; 2363 fp->handle = rdata.desc; 2364 fp->cookie = 0; 2365 2366 trace_binder_transaction_node_to_ref(t, node, &rdata); 2367 binder_debug(BINDER_DEBUG_TRANSACTION, 2368 " node %d u%016llx -> ref %d desc %d\n", 2369 node->debug_id, (u64)node->ptr, 2370 rdata.debug_id, rdata.desc); 2371 done: 2372 binder_put_node(node); 2373 return ret; 2374 } 2375 2376 static int binder_translate_handle(struct flat_binder_object *fp, 2377 struct binder_transaction *t, 2378 struct binder_thread *thread) 2379 { 2380 struct binder_proc *proc = thread->proc; 2381 struct binder_proc *target_proc = t->to_proc; 2382 struct binder_node *node; 2383 struct binder_ref_data src_rdata; 2384 int ret = 0; 2385 2386 node = binder_get_node_from_ref(proc, fp->handle, 2387 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata); 2388 if (!node) { 2389 binder_user_error("%d:%d got transaction with invalid handle, %d\n", 2390 proc->pid, thread->pid, fp->handle); 2391 return -EINVAL; 2392 } 2393 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { 2394 ret = -EPERM; 2395 goto done; 2396 } 2397 2398 binder_node_lock(node); 2399 if (node->proc == target_proc) { 2400 if (fp->hdr.type == BINDER_TYPE_HANDLE) 2401 fp->hdr.type = BINDER_TYPE_BINDER; 2402 else 2403 fp->hdr.type = BINDER_TYPE_WEAK_BINDER; 2404 fp->binder = node->ptr; 2405 fp->cookie = node->cookie; 2406 if (node->proc) 2407 binder_inner_proc_lock(node->proc); 2408 binder_inc_node_nilocked(node, 2409 fp->hdr.type == BINDER_TYPE_BINDER, 2410 0, NULL); 2411 if (node->proc) 2412 binder_inner_proc_unlock(node->proc); 2413 trace_binder_transaction_ref_to_node(t, node, &src_rdata); 2414 binder_debug(BINDER_DEBUG_TRANSACTION, 2415 " ref %d desc %d -> node %d u%016llx\n", 2416 src_rdata.debug_id, src_rdata.desc, node->debug_id, 2417 (u64)node->ptr); 2418 binder_node_unlock(node); 2419 } else { 2420 struct binder_ref_data dest_rdata; 2421 2422 binder_node_unlock(node); 2423 ret = binder_inc_ref_for_node(target_proc, node, 2424 fp->hdr.type == BINDER_TYPE_HANDLE, 2425 NULL, &dest_rdata); 2426 if (ret) 2427 goto done; 2428 2429 fp->binder = 0; 2430 fp->handle = dest_rdata.desc; 2431 fp->cookie = 0; 2432 trace_binder_transaction_ref_to_ref(t, node, &src_rdata, 2433 &dest_rdata); 2434 binder_debug(BINDER_DEBUG_TRANSACTION, 2435 " ref %d desc %d -> ref %d desc %d (node %d)\n", 2436 src_rdata.debug_id, src_rdata.desc, 2437 dest_rdata.debug_id, dest_rdata.desc, 2438 node->debug_id); 2439 } 2440 done: 2441 binder_put_node(node); 2442 return ret; 2443 } 2444 2445 static int binder_translate_fd(int fd, 2446 struct binder_transaction *t, 2447 struct binder_thread *thread, 2448 struct binder_transaction *in_reply_to) 2449 { 2450 struct binder_proc *proc = thread->proc; 2451 struct binder_proc *target_proc = t->to_proc; 2452 int target_fd; 2453 struct file *file; 2454 int ret; 2455 bool target_allows_fd; 2456 2457 if (in_reply_to) 2458 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS); 2459 else 2460 target_allows_fd = t->buffer->target_node->accept_fds; 2461 if (!target_allows_fd) { 2462 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n", 2463 proc->pid, thread->pid, 2464 in_reply_to ? "reply" : "transaction", 2465 fd); 2466 ret = -EPERM; 2467 goto err_fd_not_accepted; 2468 } 2469 2470 file = fget(fd); 2471 if (!file) { 2472 binder_user_error("%d:%d got transaction with invalid fd, %d\n", 2473 proc->pid, thread->pid, fd); 2474 ret = -EBADF; 2475 goto err_fget; 2476 } 2477 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file); 2478 if (ret < 0) { 2479 ret = -EPERM; 2480 goto err_security; 2481 } 2482 2483 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC); 2484 if (target_fd < 0) { 2485 ret = -ENOMEM; 2486 goto err_get_unused_fd; 2487 } 2488 task_fd_install(target_proc, target_fd, file); 2489 trace_binder_transaction_fd(t, fd, target_fd); 2490 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n", 2491 fd, target_fd); 2492 2493 return target_fd; 2494 2495 err_get_unused_fd: 2496 err_security: 2497 fput(file); 2498 err_fget: 2499 err_fd_not_accepted: 2500 return ret; 2501 } 2502 2503 static int binder_translate_fd_array(struct binder_fd_array_object *fda, 2504 struct binder_buffer_object *parent, 2505 struct binder_transaction *t, 2506 struct binder_thread *thread, 2507 struct binder_transaction *in_reply_to) 2508 { 2509 binder_size_t fdi, fd_buf_size, num_installed_fds; 2510 int target_fd; 2511 uintptr_t parent_buffer; 2512 u32 *fd_array; 2513 struct binder_proc *proc = thread->proc; 2514 struct binder_proc *target_proc = t->to_proc; 2515 2516 fd_buf_size = sizeof(u32) * fda->num_fds; 2517 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { 2518 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n", 2519 proc->pid, thread->pid, (u64)fda->num_fds); 2520 return -EINVAL; 2521 } 2522 if (fd_buf_size > parent->length || 2523 fda->parent_offset > parent->length - fd_buf_size) { 2524 /* No space for all file descriptors here. */ 2525 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n", 2526 proc->pid, thread->pid, (u64)fda->num_fds); 2527 return -EINVAL; 2528 } 2529 /* 2530 * Since the parent was already fixed up, convert it 2531 * back to the kernel address space to access it 2532 */ 2533 parent_buffer = parent->buffer - 2534 binder_alloc_get_user_buffer_offset(&target_proc->alloc); 2535 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset); 2536 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) { 2537 binder_user_error("%d:%d parent offset not aligned correctly.\n", 2538 proc->pid, thread->pid); 2539 return -EINVAL; 2540 } 2541 for (fdi = 0; fdi < fda->num_fds; fdi++) { 2542 target_fd = binder_translate_fd(fd_array[fdi], t, thread, 2543 in_reply_to); 2544 if (target_fd < 0) 2545 goto err_translate_fd_failed; 2546 fd_array[fdi] = target_fd; 2547 } 2548 return 0; 2549 2550 err_translate_fd_failed: 2551 /* 2552 * Failed to allocate fd or security error, free fds 2553 * installed so far. 2554 */ 2555 num_installed_fds = fdi; 2556 for (fdi = 0; fdi < num_installed_fds; fdi++) 2557 task_close_fd(target_proc, fd_array[fdi]); 2558 return target_fd; 2559 } 2560 2561 static int binder_fixup_parent(struct binder_transaction *t, 2562 struct binder_thread *thread, 2563 struct binder_buffer_object *bp, 2564 binder_size_t *off_start, 2565 binder_size_t num_valid, 2566 struct binder_buffer_object *last_fixup_obj, 2567 binder_size_t last_fixup_min_off) 2568 { 2569 struct binder_buffer_object *parent; 2570 u8 *parent_buffer; 2571 struct binder_buffer *b = t->buffer; 2572 struct binder_proc *proc = thread->proc; 2573 struct binder_proc *target_proc = t->to_proc; 2574 2575 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT)) 2576 return 0; 2577 2578 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid); 2579 if (!parent) { 2580 binder_user_error("%d:%d got transaction with invalid parent offset or type\n", 2581 proc->pid, thread->pid); 2582 return -EINVAL; 2583 } 2584 2585 if (!binder_validate_fixup(b, off_start, 2586 parent, bp->parent_offset, 2587 last_fixup_obj, 2588 last_fixup_min_off)) { 2589 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", 2590 proc->pid, thread->pid); 2591 return -EINVAL; 2592 } 2593 2594 if (parent->length < sizeof(binder_uintptr_t) || 2595 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) { 2596 /* No space for a pointer here! */ 2597 binder_user_error("%d:%d got transaction with invalid parent offset\n", 2598 proc->pid, thread->pid); 2599 return -EINVAL; 2600 } 2601 parent_buffer = (u8 *)((uintptr_t)parent->buffer - 2602 binder_alloc_get_user_buffer_offset( 2603 &target_proc->alloc)); 2604 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer; 2605 2606 return 0; 2607 } 2608 2609 /** 2610 * binder_proc_transaction() - sends a transaction to a process and wakes it up 2611 * @t: transaction to send 2612 * @proc: process to send the transaction to 2613 * @thread: thread in @proc to send the transaction to (may be NULL) 2614 * 2615 * This function queues a transaction to the specified process. It will try 2616 * to find a thread in the target process to handle the transaction and 2617 * wake it up. If no thread is found, the work is queued to the proc 2618 * waitqueue. 2619 * 2620 * If the @thread parameter is not NULL, the transaction is always queued 2621 * to the waitlist of that specific thread. 2622 * 2623 * Return: true if the transactions was successfully queued 2624 * false if the target process or thread is dead 2625 */ 2626 static bool binder_proc_transaction(struct binder_transaction *t, 2627 struct binder_proc *proc, 2628 struct binder_thread *thread) 2629 { 2630 struct binder_node *node = t->buffer->target_node; 2631 bool oneway = !!(t->flags & TF_ONE_WAY); 2632 bool pending_async = false; 2633 2634 BUG_ON(!node); 2635 binder_node_lock(node); 2636 if (oneway) { 2637 BUG_ON(thread); 2638 if (node->has_async_transaction) { 2639 pending_async = true; 2640 } else { 2641 node->has_async_transaction = true; 2642 } 2643 } 2644 2645 binder_inner_proc_lock(proc); 2646 2647 if (proc->is_dead || (thread && thread->is_dead)) { 2648 binder_inner_proc_unlock(proc); 2649 binder_node_unlock(node); 2650 return false; 2651 } 2652 2653 if (!thread && !pending_async) 2654 thread = binder_select_thread_ilocked(proc); 2655 2656 if (thread) 2657 binder_enqueue_thread_work_ilocked(thread, &t->work); 2658 else if (!pending_async) 2659 binder_enqueue_work_ilocked(&t->work, &proc->todo); 2660 else 2661 binder_enqueue_work_ilocked(&t->work, &node->async_todo); 2662 2663 if (!pending_async) 2664 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */); 2665 2666 binder_inner_proc_unlock(proc); 2667 binder_node_unlock(node); 2668 2669 return true; 2670 } 2671 2672 /** 2673 * binder_get_node_refs_for_txn() - Get required refs on node for txn 2674 * @node: struct binder_node for which to get refs 2675 * @proc: returns @node->proc if valid 2676 * @error: if no @proc then returns BR_DEAD_REPLY 2677 * 2678 * User-space normally keeps the node alive when creating a transaction 2679 * since it has a reference to the target. The local strong ref keeps it 2680 * alive if the sending process dies before the target process processes 2681 * the transaction. If the source process is malicious or has a reference 2682 * counting bug, relying on the local strong ref can fail. 2683 * 2684 * Since user-space can cause the local strong ref to go away, we also take 2685 * a tmpref on the node to ensure it survives while we are constructing 2686 * the transaction. We also need a tmpref on the proc while we are 2687 * constructing the transaction, so we take that here as well. 2688 * 2689 * Return: The target_node with refs taken or NULL if no @node->proc is NULL. 2690 * Also sets @proc if valid. If the @node->proc is NULL indicating that the 2691 * target proc has died, @error is set to BR_DEAD_REPLY 2692 */ 2693 static struct binder_node *binder_get_node_refs_for_txn( 2694 struct binder_node *node, 2695 struct binder_proc **procp, 2696 uint32_t *error) 2697 { 2698 struct binder_node *target_node = NULL; 2699 2700 binder_node_inner_lock(node); 2701 if (node->proc) { 2702 target_node = node; 2703 binder_inc_node_nilocked(node, 1, 0, NULL); 2704 binder_inc_node_tmpref_ilocked(node); 2705 node->proc->tmp_ref++; 2706 *procp = node->proc; 2707 } else 2708 *error = BR_DEAD_REPLY; 2709 binder_node_inner_unlock(node); 2710 2711 return target_node; 2712 } 2713 2714 static void binder_transaction(struct binder_proc *proc, 2715 struct binder_thread *thread, 2716 struct binder_transaction_data *tr, int reply, 2717 binder_size_t extra_buffers_size) 2718 { 2719 int ret; 2720 struct binder_transaction *t; 2721 struct binder_work *tcomplete; 2722 binder_size_t *offp, *off_end, *off_start; 2723 binder_size_t off_min; 2724 u8 *sg_bufp, *sg_buf_end; 2725 struct binder_proc *target_proc = NULL; 2726 struct binder_thread *target_thread = NULL; 2727 struct binder_node *target_node = NULL; 2728 struct binder_transaction *in_reply_to = NULL; 2729 struct binder_transaction_log_entry *e; 2730 uint32_t return_error = 0; 2731 uint32_t return_error_param = 0; 2732 uint32_t return_error_line = 0; 2733 struct binder_buffer_object *last_fixup_obj = NULL; 2734 binder_size_t last_fixup_min_off = 0; 2735 struct binder_context *context = proc->context; 2736 int t_debug_id = atomic_inc_return(&binder_last_id); 2737 2738 e = binder_transaction_log_add(&binder_transaction_log); 2739 e->debug_id = t_debug_id; 2740 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); 2741 e->from_proc = proc->pid; 2742 e->from_thread = thread->pid; 2743 e->target_handle = tr->target.handle; 2744 e->data_size = tr->data_size; 2745 e->offsets_size = tr->offsets_size; 2746 e->context_name = proc->context->name; 2747 2748 if (reply) { 2749 binder_inner_proc_lock(proc); 2750 in_reply_to = thread->transaction_stack; 2751 if (in_reply_to == NULL) { 2752 binder_inner_proc_unlock(proc); 2753 binder_user_error("%d:%d got reply transaction with no transaction stack\n", 2754 proc->pid, thread->pid); 2755 return_error = BR_FAILED_REPLY; 2756 return_error_param = -EPROTO; 2757 return_error_line = __LINE__; 2758 goto err_empty_call_stack; 2759 } 2760 if (in_reply_to->to_thread != thread) { 2761 spin_lock(&in_reply_to->lock); 2762 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n", 2763 proc->pid, thread->pid, in_reply_to->debug_id, 2764 in_reply_to->to_proc ? 2765 in_reply_to->to_proc->pid : 0, 2766 in_reply_to->to_thread ? 2767 in_reply_to->to_thread->pid : 0); 2768 spin_unlock(&in_reply_to->lock); 2769 binder_inner_proc_unlock(proc); 2770 return_error = BR_FAILED_REPLY; 2771 return_error_param = -EPROTO; 2772 return_error_line = __LINE__; 2773 in_reply_to = NULL; 2774 goto err_bad_call_stack; 2775 } 2776 thread->transaction_stack = in_reply_to->to_parent; 2777 binder_inner_proc_unlock(proc); 2778 binder_set_nice(in_reply_to->saved_priority); 2779 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to); 2780 if (target_thread == NULL) { 2781 return_error = BR_DEAD_REPLY; 2782 return_error_line = __LINE__; 2783 goto err_dead_binder; 2784 } 2785 if (target_thread->transaction_stack != in_reply_to) { 2786 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n", 2787 proc->pid, thread->pid, 2788 target_thread->transaction_stack ? 2789 target_thread->transaction_stack->debug_id : 0, 2790 in_reply_to->debug_id); 2791 binder_inner_proc_unlock(target_thread->proc); 2792 return_error = BR_FAILED_REPLY; 2793 return_error_param = -EPROTO; 2794 return_error_line = __LINE__; 2795 in_reply_to = NULL; 2796 target_thread = NULL; 2797 goto err_dead_binder; 2798 } 2799 target_proc = target_thread->proc; 2800 target_proc->tmp_ref++; 2801 binder_inner_proc_unlock(target_thread->proc); 2802 } else { 2803 if (tr->target.handle) { 2804 struct binder_ref *ref; 2805 2806 /* 2807 * There must already be a strong ref 2808 * on this node. If so, do a strong 2809 * increment on the node to ensure it 2810 * stays alive until the transaction is 2811 * done. 2812 */ 2813 binder_proc_lock(proc); 2814 ref = binder_get_ref_olocked(proc, tr->target.handle, 2815 true); 2816 if (ref) { 2817 target_node = binder_get_node_refs_for_txn( 2818 ref->node, &target_proc, 2819 &return_error); 2820 } else { 2821 binder_user_error("%d:%d got transaction to invalid handle\n", 2822 proc->pid, thread->pid); 2823 return_error = BR_FAILED_REPLY; 2824 } 2825 binder_proc_unlock(proc); 2826 } else { 2827 mutex_lock(&context->context_mgr_node_lock); 2828 target_node = context->binder_context_mgr_node; 2829 if (target_node) 2830 target_node = binder_get_node_refs_for_txn( 2831 target_node, &target_proc, 2832 &return_error); 2833 else 2834 return_error = BR_DEAD_REPLY; 2835 mutex_unlock(&context->context_mgr_node_lock); 2836 } 2837 if (!target_node) { 2838 /* 2839 * return_error is set above 2840 */ 2841 return_error_param = -EINVAL; 2842 return_error_line = __LINE__; 2843 goto err_dead_binder; 2844 } 2845 e->to_node = target_node->debug_id; 2846 if (security_binder_transaction(proc->tsk, 2847 target_proc->tsk) < 0) { 2848 return_error = BR_FAILED_REPLY; 2849 return_error_param = -EPERM; 2850 return_error_line = __LINE__; 2851 goto err_invalid_target_handle; 2852 } 2853 binder_inner_proc_lock(proc); 2854 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { 2855 struct binder_transaction *tmp; 2856 2857 tmp = thread->transaction_stack; 2858 if (tmp->to_thread != thread) { 2859 spin_lock(&tmp->lock); 2860 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n", 2861 proc->pid, thread->pid, tmp->debug_id, 2862 tmp->to_proc ? tmp->to_proc->pid : 0, 2863 tmp->to_thread ? 2864 tmp->to_thread->pid : 0); 2865 spin_unlock(&tmp->lock); 2866 binder_inner_proc_unlock(proc); 2867 return_error = BR_FAILED_REPLY; 2868 return_error_param = -EPROTO; 2869 return_error_line = __LINE__; 2870 goto err_bad_call_stack; 2871 } 2872 while (tmp) { 2873 struct binder_thread *from; 2874 2875 spin_lock(&tmp->lock); 2876 from = tmp->from; 2877 if (from && from->proc == target_proc) { 2878 atomic_inc(&from->tmp_ref); 2879 target_thread = from; 2880 spin_unlock(&tmp->lock); 2881 break; 2882 } 2883 spin_unlock(&tmp->lock); 2884 tmp = tmp->from_parent; 2885 } 2886 } 2887 binder_inner_proc_unlock(proc); 2888 } 2889 if (target_thread) 2890 e->to_thread = target_thread->pid; 2891 e->to_proc = target_proc->pid; 2892 2893 /* TODO: reuse incoming transaction for reply */ 2894 t = kzalloc(sizeof(*t), GFP_KERNEL); 2895 if (t == NULL) { 2896 return_error = BR_FAILED_REPLY; 2897 return_error_param = -ENOMEM; 2898 return_error_line = __LINE__; 2899 goto err_alloc_t_failed; 2900 } 2901 binder_stats_created(BINDER_STAT_TRANSACTION); 2902 spin_lock_init(&t->lock); 2903 2904 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); 2905 if (tcomplete == NULL) { 2906 return_error = BR_FAILED_REPLY; 2907 return_error_param = -ENOMEM; 2908 return_error_line = __LINE__; 2909 goto err_alloc_tcomplete_failed; 2910 } 2911 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE); 2912 2913 t->debug_id = t_debug_id; 2914 2915 if (reply) 2916 binder_debug(BINDER_DEBUG_TRANSACTION, 2917 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n", 2918 proc->pid, thread->pid, t->debug_id, 2919 target_proc->pid, target_thread->pid, 2920 (u64)tr->data.ptr.buffer, 2921 (u64)tr->data.ptr.offsets, 2922 (u64)tr->data_size, (u64)tr->offsets_size, 2923 (u64)extra_buffers_size); 2924 else 2925 binder_debug(BINDER_DEBUG_TRANSACTION, 2926 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n", 2927 proc->pid, thread->pid, t->debug_id, 2928 target_proc->pid, target_node->debug_id, 2929 (u64)tr->data.ptr.buffer, 2930 (u64)tr->data.ptr.offsets, 2931 (u64)tr->data_size, (u64)tr->offsets_size, 2932 (u64)extra_buffers_size); 2933 2934 if (!reply && !(tr->flags & TF_ONE_WAY)) 2935 t->from = thread; 2936 else 2937 t->from = NULL; 2938 t->sender_euid = task_euid(proc->tsk); 2939 t->to_proc = target_proc; 2940 t->to_thread = target_thread; 2941 t->code = tr->code; 2942 t->flags = tr->flags; 2943 t->priority = task_nice(current); 2944 2945 trace_binder_transaction(reply, t, target_node); 2946 2947 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size, 2948 tr->offsets_size, extra_buffers_size, 2949 !reply && (t->flags & TF_ONE_WAY)); 2950 if (IS_ERR(t->buffer)) { 2951 /* 2952 * -ESRCH indicates VMA cleared. The target is dying. 2953 */ 2954 return_error_param = PTR_ERR(t->buffer); 2955 return_error = return_error_param == -ESRCH ? 2956 BR_DEAD_REPLY : BR_FAILED_REPLY; 2957 return_error_line = __LINE__; 2958 t->buffer = NULL; 2959 goto err_binder_alloc_buf_failed; 2960 } 2961 t->buffer->allow_user_free = 0; 2962 t->buffer->debug_id = t->debug_id; 2963 t->buffer->transaction = t; 2964 t->buffer->target_node = target_node; 2965 trace_binder_transaction_alloc_buf(t->buffer); 2966 off_start = (binder_size_t *)(t->buffer->data + 2967 ALIGN(tr->data_size, sizeof(void *))); 2968 offp = off_start; 2969 2970 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t) 2971 tr->data.ptr.buffer, tr->data_size)) { 2972 binder_user_error("%d:%d got transaction with invalid data ptr\n", 2973 proc->pid, thread->pid); 2974 return_error = BR_FAILED_REPLY; 2975 return_error_param = -EFAULT; 2976 return_error_line = __LINE__; 2977 goto err_copy_data_failed; 2978 } 2979 if (copy_from_user(offp, (const void __user *)(uintptr_t) 2980 tr->data.ptr.offsets, tr->offsets_size)) { 2981 binder_user_error("%d:%d got transaction with invalid offsets ptr\n", 2982 proc->pid, thread->pid); 2983 return_error = BR_FAILED_REPLY; 2984 return_error_param = -EFAULT; 2985 return_error_line = __LINE__; 2986 goto err_copy_data_failed; 2987 } 2988 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) { 2989 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n", 2990 proc->pid, thread->pid, (u64)tr->offsets_size); 2991 return_error = BR_FAILED_REPLY; 2992 return_error_param = -EINVAL; 2993 return_error_line = __LINE__; 2994 goto err_bad_offset; 2995 } 2996 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) { 2997 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n", 2998 proc->pid, thread->pid, 2999 (u64)extra_buffers_size); 3000 return_error = BR_FAILED_REPLY; 3001 return_error_param = -EINVAL; 3002 return_error_line = __LINE__; 3003 goto err_bad_offset; 3004 } 3005 off_end = (void *)off_start + tr->offsets_size; 3006 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *))); 3007 sg_buf_end = sg_bufp + extra_buffers_size; 3008 off_min = 0; 3009 for (; offp < off_end; offp++) { 3010 struct binder_object_header *hdr; 3011 size_t object_size = binder_validate_object(t->buffer, *offp); 3012 3013 if (object_size == 0 || *offp < off_min) { 3014 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n", 3015 proc->pid, thread->pid, (u64)*offp, 3016 (u64)off_min, 3017 (u64)t->buffer->data_size); 3018 return_error = BR_FAILED_REPLY; 3019 return_error_param = -EINVAL; 3020 return_error_line = __LINE__; 3021 goto err_bad_offset; 3022 } 3023 3024 hdr = (struct binder_object_header *)(t->buffer->data + *offp); 3025 off_min = *offp + object_size; 3026 switch (hdr->type) { 3027 case BINDER_TYPE_BINDER: 3028 case BINDER_TYPE_WEAK_BINDER: { 3029 struct flat_binder_object *fp; 3030 3031 fp = to_flat_binder_object(hdr); 3032 ret = binder_translate_binder(fp, t, thread); 3033 if (ret < 0) { 3034 return_error = BR_FAILED_REPLY; 3035 return_error_param = ret; 3036 return_error_line = __LINE__; 3037 goto err_translate_failed; 3038 } 3039 } break; 3040 case BINDER_TYPE_HANDLE: 3041 case BINDER_TYPE_WEAK_HANDLE: { 3042 struct flat_binder_object *fp; 3043 3044 fp = to_flat_binder_object(hdr); 3045 ret = binder_translate_handle(fp, t, thread); 3046 if (ret < 0) { 3047 return_error = BR_FAILED_REPLY; 3048 return_error_param = ret; 3049 return_error_line = __LINE__; 3050 goto err_translate_failed; 3051 } 3052 } break; 3053 3054 case BINDER_TYPE_FD: { 3055 struct binder_fd_object *fp = to_binder_fd_object(hdr); 3056 int target_fd = binder_translate_fd(fp->fd, t, thread, 3057 in_reply_to); 3058 3059 if (target_fd < 0) { 3060 return_error = BR_FAILED_REPLY; 3061 return_error_param = target_fd; 3062 return_error_line = __LINE__; 3063 goto err_translate_failed; 3064 } 3065 fp->pad_binder = 0; 3066 fp->fd = target_fd; 3067 } break; 3068 case BINDER_TYPE_FDA: { 3069 struct binder_fd_array_object *fda = 3070 to_binder_fd_array_object(hdr); 3071 struct binder_buffer_object *parent = 3072 binder_validate_ptr(t->buffer, fda->parent, 3073 off_start, 3074 offp - off_start); 3075 if (!parent) { 3076 binder_user_error("%d:%d got transaction with invalid parent offset or type\n", 3077 proc->pid, thread->pid); 3078 return_error = BR_FAILED_REPLY; 3079 return_error_param = -EINVAL; 3080 return_error_line = __LINE__; 3081 goto err_bad_parent; 3082 } 3083 if (!binder_validate_fixup(t->buffer, off_start, 3084 parent, fda->parent_offset, 3085 last_fixup_obj, 3086 last_fixup_min_off)) { 3087 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", 3088 proc->pid, thread->pid); 3089 return_error = BR_FAILED_REPLY; 3090 return_error_param = -EINVAL; 3091 return_error_line = __LINE__; 3092 goto err_bad_parent; 3093 } 3094 ret = binder_translate_fd_array(fda, parent, t, thread, 3095 in_reply_to); 3096 if (ret < 0) { 3097 return_error = BR_FAILED_REPLY; 3098 return_error_param = ret; 3099 return_error_line = __LINE__; 3100 goto err_translate_failed; 3101 } 3102 last_fixup_obj = parent; 3103 last_fixup_min_off = 3104 fda->parent_offset + sizeof(u32) * fda->num_fds; 3105 } break; 3106 case BINDER_TYPE_PTR: { 3107 struct binder_buffer_object *bp = 3108 to_binder_buffer_object(hdr); 3109 size_t buf_left = sg_buf_end - sg_bufp; 3110 3111 if (bp->length > buf_left) { 3112 binder_user_error("%d:%d got transaction with too large buffer\n", 3113 proc->pid, thread->pid); 3114 return_error = BR_FAILED_REPLY; 3115 return_error_param = -EINVAL; 3116 return_error_line = __LINE__; 3117 goto err_bad_offset; 3118 } 3119 if (copy_from_user(sg_bufp, 3120 (const void __user *)(uintptr_t) 3121 bp->buffer, bp->length)) { 3122 binder_user_error("%d:%d got transaction with invalid offsets ptr\n", 3123 proc->pid, thread->pid); 3124 return_error_param = -EFAULT; 3125 return_error = BR_FAILED_REPLY; 3126 return_error_line = __LINE__; 3127 goto err_copy_data_failed; 3128 } 3129 /* Fixup buffer pointer to target proc address space */ 3130 bp->buffer = (uintptr_t)sg_bufp + 3131 binder_alloc_get_user_buffer_offset( 3132 &target_proc->alloc); 3133 sg_bufp += ALIGN(bp->length, sizeof(u64)); 3134 3135 ret = binder_fixup_parent(t, thread, bp, off_start, 3136 offp - off_start, 3137 last_fixup_obj, 3138 last_fixup_min_off); 3139 if (ret < 0) { 3140 return_error = BR_FAILED_REPLY; 3141 return_error_param = ret; 3142 return_error_line = __LINE__; 3143 goto err_translate_failed; 3144 } 3145 last_fixup_obj = bp; 3146 last_fixup_min_off = 0; 3147 } break; 3148 default: 3149 binder_user_error("%d:%d got transaction with invalid object type, %x\n", 3150 proc->pid, thread->pid, hdr->type); 3151 return_error = BR_FAILED_REPLY; 3152 return_error_param = -EINVAL; 3153 return_error_line = __LINE__; 3154 goto err_bad_object_type; 3155 } 3156 } 3157 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; 3158 t->work.type = BINDER_WORK_TRANSACTION; 3159 3160 if (reply) { 3161 binder_enqueue_thread_work(thread, tcomplete); 3162 binder_inner_proc_lock(target_proc); 3163 if (target_thread->is_dead) { 3164 binder_inner_proc_unlock(target_proc); 3165 goto err_dead_proc_or_thread; 3166 } 3167 BUG_ON(t->buffer->async_transaction != 0); 3168 binder_pop_transaction_ilocked(target_thread, in_reply_to); 3169 binder_enqueue_thread_work_ilocked(target_thread, &t->work); 3170 binder_inner_proc_unlock(target_proc); 3171 wake_up_interruptible_sync(&target_thread->wait); 3172 binder_free_transaction(in_reply_to); 3173 } else if (!(t->flags & TF_ONE_WAY)) { 3174 BUG_ON(t->buffer->async_transaction != 0); 3175 binder_inner_proc_lock(proc); 3176 /* 3177 * Defer the TRANSACTION_COMPLETE, so we don't return to 3178 * userspace immediately; this allows the target process to 3179 * immediately start processing this transaction, reducing 3180 * latency. We will then return the TRANSACTION_COMPLETE when 3181 * the target replies (or there is an error). 3182 */ 3183 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete); 3184 t->need_reply = 1; 3185 t->from_parent = thread->transaction_stack; 3186 thread->transaction_stack = t; 3187 binder_inner_proc_unlock(proc); 3188 if (!binder_proc_transaction(t, target_proc, target_thread)) { 3189 binder_inner_proc_lock(proc); 3190 binder_pop_transaction_ilocked(thread, t); 3191 binder_inner_proc_unlock(proc); 3192 goto err_dead_proc_or_thread; 3193 } 3194 } else { 3195 BUG_ON(target_node == NULL); 3196 BUG_ON(t->buffer->async_transaction != 1); 3197 binder_enqueue_thread_work(thread, tcomplete); 3198 if (!binder_proc_transaction(t, target_proc, NULL)) 3199 goto err_dead_proc_or_thread; 3200 } 3201 if (target_thread) 3202 binder_thread_dec_tmpref(target_thread); 3203 binder_proc_dec_tmpref(target_proc); 3204 if (target_node) 3205 binder_dec_node_tmpref(target_node); 3206 /* 3207 * write barrier to synchronize with initialization 3208 * of log entry 3209 */ 3210 smp_wmb(); 3211 WRITE_ONCE(e->debug_id_done, t_debug_id); 3212 return; 3213 3214 err_dead_proc_or_thread: 3215 return_error = BR_DEAD_REPLY; 3216 return_error_line = __LINE__; 3217 binder_dequeue_work(proc, tcomplete); 3218 err_translate_failed: 3219 err_bad_object_type: 3220 err_bad_offset: 3221 err_bad_parent: 3222 err_copy_data_failed: 3223 trace_binder_transaction_failed_buffer_release(t->buffer); 3224 binder_transaction_buffer_release(target_proc, t->buffer, offp); 3225 if (target_node) 3226 binder_dec_node_tmpref(target_node); 3227 target_node = NULL; 3228 t->buffer->transaction = NULL; 3229 binder_alloc_free_buf(&target_proc->alloc, t->buffer); 3230 err_binder_alloc_buf_failed: 3231 kfree(tcomplete); 3232 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 3233 err_alloc_tcomplete_failed: 3234 kfree(t); 3235 binder_stats_deleted(BINDER_STAT_TRANSACTION); 3236 err_alloc_t_failed: 3237 err_bad_call_stack: 3238 err_empty_call_stack: 3239 err_dead_binder: 3240 err_invalid_target_handle: 3241 if (target_thread) 3242 binder_thread_dec_tmpref(target_thread); 3243 if (target_proc) 3244 binder_proc_dec_tmpref(target_proc); 3245 if (target_node) { 3246 binder_dec_node(target_node, 1, 0); 3247 binder_dec_node_tmpref(target_node); 3248 } 3249 3250 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 3251 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n", 3252 proc->pid, thread->pid, return_error, return_error_param, 3253 (u64)tr->data_size, (u64)tr->offsets_size, 3254 return_error_line); 3255 3256 { 3257 struct binder_transaction_log_entry *fe; 3258 3259 e->return_error = return_error; 3260 e->return_error_param = return_error_param; 3261 e->return_error_line = return_error_line; 3262 fe = binder_transaction_log_add(&binder_transaction_log_failed); 3263 *fe = *e; 3264 /* 3265 * write barrier to synchronize with initialization 3266 * of log entry 3267 */ 3268 smp_wmb(); 3269 WRITE_ONCE(e->debug_id_done, t_debug_id); 3270 WRITE_ONCE(fe->debug_id_done, t_debug_id); 3271 } 3272 3273 BUG_ON(thread->return_error.cmd != BR_OK); 3274 if (in_reply_to) { 3275 thread->return_error.cmd = BR_TRANSACTION_COMPLETE; 3276 binder_enqueue_thread_work(thread, &thread->return_error.work); 3277 binder_send_failed_reply(in_reply_to, return_error); 3278 } else { 3279 thread->return_error.cmd = return_error; 3280 binder_enqueue_thread_work(thread, &thread->return_error.work); 3281 } 3282 } 3283 3284 static int binder_thread_write(struct binder_proc *proc, 3285 struct binder_thread *thread, 3286 binder_uintptr_t binder_buffer, size_t size, 3287 binder_size_t *consumed) 3288 { 3289 uint32_t cmd; 3290 struct binder_context *context = proc->context; 3291 void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 3292 void __user *ptr = buffer + *consumed; 3293 void __user *end = buffer + size; 3294 3295 while (ptr < end && thread->return_error.cmd == BR_OK) { 3296 int ret; 3297 3298 if (get_user(cmd, (uint32_t __user *)ptr)) 3299 return -EFAULT; 3300 ptr += sizeof(uint32_t); 3301 trace_binder_command(cmd); 3302 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { 3303 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]); 3304 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]); 3305 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]); 3306 } 3307 switch (cmd) { 3308 case BC_INCREFS: 3309 case BC_ACQUIRE: 3310 case BC_RELEASE: 3311 case BC_DECREFS: { 3312 uint32_t target; 3313 const char *debug_string; 3314 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE; 3315 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE; 3316 struct binder_ref_data rdata; 3317 3318 if (get_user(target, (uint32_t __user *)ptr)) 3319 return -EFAULT; 3320 3321 ptr += sizeof(uint32_t); 3322 ret = -1; 3323 if (increment && !target) { 3324 struct binder_node *ctx_mgr_node; 3325 mutex_lock(&context->context_mgr_node_lock); 3326 ctx_mgr_node = context->binder_context_mgr_node; 3327 if (ctx_mgr_node) 3328 ret = binder_inc_ref_for_node( 3329 proc, ctx_mgr_node, 3330 strong, NULL, &rdata); 3331 mutex_unlock(&context->context_mgr_node_lock); 3332 } 3333 if (ret) 3334 ret = binder_update_ref_for_handle( 3335 proc, target, increment, strong, 3336 &rdata); 3337 if (!ret && rdata.desc != target) { 3338 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n", 3339 proc->pid, thread->pid, 3340 target, rdata.desc); 3341 } 3342 switch (cmd) { 3343 case BC_INCREFS: 3344 debug_string = "IncRefs"; 3345 break; 3346 case BC_ACQUIRE: 3347 debug_string = "Acquire"; 3348 break; 3349 case BC_RELEASE: 3350 debug_string = "Release"; 3351 break; 3352 case BC_DECREFS: 3353 default: 3354 debug_string = "DecRefs"; 3355 break; 3356 } 3357 if (ret) { 3358 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n", 3359 proc->pid, thread->pid, debug_string, 3360 strong, target, ret); 3361 break; 3362 } 3363 binder_debug(BINDER_DEBUG_USER_REFS, 3364 "%d:%d %s ref %d desc %d s %d w %d\n", 3365 proc->pid, thread->pid, debug_string, 3366 rdata.debug_id, rdata.desc, rdata.strong, 3367 rdata.weak); 3368 break; 3369 } 3370 case BC_INCREFS_DONE: 3371 case BC_ACQUIRE_DONE: { 3372 binder_uintptr_t node_ptr; 3373 binder_uintptr_t cookie; 3374 struct binder_node *node; 3375 bool free_node; 3376 3377 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr)) 3378 return -EFAULT; 3379 ptr += sizeof(binder_uintptr_t); 3380 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 3381 return -EFAULT; 3382 ptr += sizeof(binder_uintptr_t); 3383 node = binder_get_node(proc, node_ptr); 3384 if (node == NULL) { 3385 binder_user_error("%d:%d %s u%016llx no match\n", 3386 proc->pid, thread->pid, 3387 cmd == BC_INCREFS_DONE ? 3388 "BC_INCREFS_DONE" : 3389 "BC_ACQUIRE_DONE", 3390 (u64)node_ptr); 3391 break; 3392 } 3393 if (cookie != node->cookie) { 3394 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n", 3395 proc->pid, thread->pid, 3396 cmd == BC_INCREFS_DONE ? 3397 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 3398 (u64)node_ptr, node->debug_id, 3399 (u64)cookie, (u64)node->cookie); 3400 binder_put_node(node); 3401 break; 3402 } 3403 binder_node_inner_lock(node); 3404 if (cmd == BC_ACQUIRE_DONE) { 3405 if (node->pending_strong_ref == 0) { 3406 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n", 3407 proc->pid, thread->pid, 3408 node->debug_id); 3409 binder_node_inner_unlock(node); 3410 binder_put_node(node); 3411 break; 3412 } 3413 node->pending_strong_ref = 0; 3414 } else { 3415 if (node->pending_weak_ref == 0) { 3416 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n", 3417 proc->pid, thread->pid, 3418 node->debug_id); 3419 binder_node_inner_unlock(node); 3420 binder_put_node(node); 3421 break; 3422 } 3423 node->pending_weak_ref = 0; 3424 } 3425 free_node = binder_dec_node_nilocked(node, 3426 cmd == BC_ACQUIRE_DONE, 0); 3427 WARN_ON(free_node); 3428 binder_debug(BINDER_DEBUG_USER_REFS, 3429 "%d:%d %s node %d ls %d lw %d tr %d\n", 3430 proc->pid, thread->pid, 3431 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 3432 node->debug_id, node->local_strong_refs, 3433 node->local_weak_refs, node->tmp_refs); 3434 binder_node_inner_unlock(node); 3435 binder_put_node(node); 3436 break; 3437 } 3438 case BC_ATTEMPT_ACQUIRE: 3439 pr_err("BC_ATTEMPT_ACQUIRE not supported\n"); 3440 return -EINVAL; 3441 case BC_ACQUIRE_RESULT: 3442 pr_err("BC_ACQUIRE_RESULT not supported\n"); 3443 return -EINVAL; 3444 3445 case BC_FREE_BUFFER: { 3446 binder_uintptr_t data_ptr; 3447 struct binder_buffer *buffer; 3448 3449 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr)) 3450 return -EFAULT; 3451 ptr += sizeof(binder_uintptr_t); 3452 3453 buffer = binder_alloc_prepare_to_free(&proc->alloc, 3454 data_ptr); 3455 if (buffer == NULL) { 3456 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n", 3457 proc->pid, thread->pid, (u64)data_ptr); 3458 break; 3459 } 3460 if (!buffer->allow_user_free) { 3461 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n", 3462 proc->pid, thread->pid, (u64)data_ptr); 3463 break; 3464 } 3465 binder_debug(BINDER_DEBUG_FREE_BUFFER, 3466 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n", 3467 proc->pid, thread->pid, (u64)data_ptr, 3468 buffer->debug_id, 3469 buffer->transaction ? "active" : "finished"); 3470 3471 if (buffer->transaction) { 3472 buffer->transaction->buffer = NULL; 3473 buffer->transaction = NULL; 3474 } 3475 if (buffer->async_transaction && buffer->target_node) { 3476 struct binder_node *buf_node; 3477 struct binder_work *w; 3478 3479 buf_node = buffer->target_node; 3480 binder_node_inner_lock(buf_node); 3481 BUG_ON(!buf_node->has_async_transaction); 3482 BUG_ON(buf_node->proc != proc); 3483 w = binder_dequeue_work_head_ilocked( 3484 &buf_node->async_todo); 3485 if (!w) { 3486 buf_node->has_async_transaction = false; 3487 } else { 3488 binder_enqueue_work_ilocked( 3489 w, &proc->todo); 3490 binder_wakeup_proc_ilocked(proc); 3491 } 3492 binder_node_inner_unlock(buf_node); 3493 } 3494 trace_binder_transaction_buffer_release(buffer); 3495 binder_transaction_buffer_release(proc, buffer, NULL); 3496 binder_alloc_free_buf(&proc->alloc, buffer); 3497 break; 3498 } 3499 3500 case BC_TRANSACTION_SG: 3501 case BC_REPLY_SG: { 3502 struct binder_transaction_data_sg tr; 3503 3504 if (copy_from_user(&tr, ptr, sizeof(tr))) 3505 return -EFAULT; 3506 ptr += sizeof(tr); 3507 binder_transaction(proc, thread, &tr.transaction_data, 3508 cmd == BC_REPLY_SG, tr.buffers_size); 3509 break; 3510 } 3511 case BC_TRANSACTION: 3512 case BC_REPLY: { 3513 struct binder_transaction_data tr; 3514 3515 if (copy_from_user(&tr, ptr, sizeof(tr))) 3516 return -EFAULT; 3517 ptr += sizeof(tr); 3518 binder_transaction(proc, thread, &tr, 3519 cmd == BC_REPLY, 0); 3520 break; 3521 } 3522 3523 case BC_REGISTER_LOOPER: 3524 binder_debug(BINDER_DEBUG_THREADS, 3525 "%d:%d BC_REGISTER_LOOPER\n", 3526 proc->pid, thread->pid); 3527 binder_inner_proc_lock(proc); 3528 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { 3529 thread->looper |= BINDER_LOOPER_STATE_INVALID; 3530 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n", 3531 proc->pid, thread->pid); 3532 } else if (proc->requested_threads == 0) { 3533 thread->looper |= BINDER_LOOPER_STATE_INVALID; 3534 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n", 3535 proc->pid, thread->pid); 3536 } else { 3537 proc->requested_threads--; 3538 proc->requested_threads_started++; 3539 } 3540 thread->looper |= BINDER_LOOPER_STATE_REGISTERED; 3541 binder_inner_proc_unlock(proc); 3542 break; 3543 case BC_ENTER_LOOPER: 3544 binder_debug(BINDER_DEBUG_THREADS, 3545 "%d:%d BC_ENTER_LOOPER\n", 3546 proc->pid, thread->pid); 3547 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { 3548 thread->looper |= BINDER_LOOPER_STATE_INVALID; 3549 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n", 3550 proc->pid, thread->pid); 3551 } 3552 thread->looper |= BINDER_LOOPER_STATE_ENTERED; 3553 break; 3554 case BC_EXIT_LOOPER: 3555 binder_debug(BINDER_DEBUG_THREADS, 3556 "%d:%d BC_EXIT_LOOPER\n", 3557 proc->pid, thread->pid); 3558 thread->looper |= BINDER_LOOPER_STATE_EXITED; 3559 break; 3560 3561 case BC_REQUEST_DEATH_NOTIFICATION: 3562 case BC_CLEAR_DEATH_NOTIFICATION: { 3563 uint32_t target; 3564 binder_uintptr_t cookie; 3565 struct binder_ref *ref; 3566 struct binder_ref_death *death = NULL; 3567 3568 if (get_user(target, (uint32_t __user *)ptr)) 3569 return -EFAULT; 3570 ptr += sizeof(uint32_t); 3571 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 3572 return -EFAULT; 3573 ptr += sizeof(binder_uintptr_t); 3574 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { 3575 /* 3576 * Allocate memory for death notification 3577 * before taking lock 3578 */ 3579 death = kzalloc(sizeof(*death), GFP_KERNEL); 3580 if (death == NULL) { 3581 WARN_ON(thread->return_error.cmd != 3582 BR_OK); 3583 thread->return_error.cmd = BR_ERROR; 3584 binder_enqueue_thread_work( 3585 thread, 3586 &thread->return_error.work); 3587 binder_debug( 3588 BINDER_DEBUG_FAILED_TRANSACTION, 3589 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n", 3590 proc->pid, thread->pid); 3591 break; 3592 } 3593 } 3594 binder_proc_lock(proc); 3595 ref = binder_get_ref_olocked(proc, target, false); 3596 if (ref == NULL) { 3597 binder_user_error("%d:%d %s invalid ref %d\n", 3598 proc->pid, thread->pid, 3599 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 3600 "BC_REQUEST_DEATH_NOTIFICATION" : 3601 "BC_CLEAR_DEATH_NOTIFICATION", 3602 target); 3603 binder_proc_unlock(proc); 3604 kfree(death); 3605 break; 3606 } 3607 3608 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 3609 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n", 3610 proc->pid, thread->pid, 3611 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 3612 "BC_REQUEST_DEATH_NOTIFICATION" : 3613 "BC_CLEAR_DEATH_NOTIFICATION", 3614 (u64)cookie, ref->data.debug_id, 3615 ref->data.desc, ref->data.strong, 3616 ref->data.weak, ref->node->debug_id); 3617 3618 binder_node_lock(ref->node); 3619 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { 3620 if (ref->death) { 3621 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n", 3622 proc->pid, thread->pid); 3623 binder_node_unlock(ref->node); 3624 binder_proc_unlock(proc); 3625 kfree(death); 3626 break; 3627 } 3628 binder_stats_created(BINDER_STAT_DEATH); 3629 INIT_LIST_HEAD(&death->work.entry); 3630 death->cookie = cookie; 3631 ref->death = death; 3632 if (ref->node->proc == NULL) { 3633 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 3634 3635 binder_inner_proc_lock(proc); 3636 binder_enqueue_work_ilocked( 3637 &ref->death->work, &proc->todo); 3638 binder_wakeup_proc_ilocked(proc); 3639 binder_inner_proc_unlock(proc); 3640 } 3641 } else { 3642 if (ref->death == NULL) { 3643 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n", 3644 proc->pid, thread->pid); 3645 binder_node_unlock(ref->node); 3646 binder_proc_unlock(proc); 3647 break; 3648 } 3649 death = ref->death; 3650 if (death->cookie != cookie) { 3651 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n", 3652 proc->pid, thread->pid, 3653 (u64)death->cookie, 3654 (u64)cookie); 3655 binder_node_unlock(ref->node); 3656 binder_proc_unlock(proc); 3657 break; 3658 } 3659 ref->death = NULL; 3660 binder_inner_proc_lock(proc); 3661 if (list_empty(&death->work.entry)) { 3662 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 3663 if (thread->looper & 3664 (BINDER_LOOPER_STATE_REGISTERED | 3665 BINDER_LOOPER_STATE_ENTERED)) 3666 binder_enqueue_thread_work_ilocked( 3667 thread, 3668 &death->work); 3669 else { 3670 binder_enqueue_work_ilocked( 3671 &death->work, 3672 &proc->todo); 3673 binder_wakeup_proc_ilocked( 3674 proc); 3675 } 3676 } else { 3677 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); 3678 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; 3679 } 3680 binder_inner_proc_unlock(proc); 3681 } 3682 binder_node_unlock(ref->node); 3683 binder_proc_unlock(proc); 3684 } break; 3685 case BC_DEAD_BINDER_DONE: { 3686 struct binder_work *w; 3687 binder_uintptr_t cookie; 3688 struct binder_ref_death *death = NULL; 3689 3690 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 3691 return -EFAULT; 3692 3693 ptr += sizeof(cookie); 3694 binder_inner_proc_lock(proc); 3695 list_for_each_entry(w, &proc->delivered_death, 3696 entry) { 3697 struct binder_ref_death *tmp_death = 3698 container_of(w, 3699 struct binder_ref_death, 3700 work); 3701 3702 if (tmp_death->cookie == cookie) { 3703 death = tmp_death; 3704 break; 3705 } 3706 } 3707 binder_debug(BINDER_DEBUG_DEAD_BINDER, 3708 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n", 3709 proc->pid, thread->pid, (u64)cookie, 3710 death); 3711 if (death == NULL) { 3712 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n", 3713 proc->pid, thread->pid, (u64)cookie); 3714 binder_inner_proc_unlock(proc); 3715 break; 3716 } 3717 binder_dequeue_work_ilocked(&death->work); 3718 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) { 3719 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 3720 if (thread->looper & 3721 (BINDER_LOOPER_STATE_REGISTERED | 3722 BINDER_LOOPER_STATE_ENTERED)) 3723 binder_enqueue_thread_work_ilocked( 3724 thread, &death->work); 3725 else { 3726 binder_enqueue_work_ilocked( 3727 &death->work, 3728 &proc->todo); 3729 binder_wakeup_proc_ilocked(proc); 3730 } 3731 } 3732 binder_inner_proc_unlock(proc); 3733 } break; 3734 3735 default: 3736 pr_err("%d:%d unknown command %d\n", 3737 proc->pid, thread->pid, cmd); 3738 return -EINVAL; 3739 } 3740 *consumed = ptr - buffer; 3741 } 3742 return 0; 3743 } 3744 3745 static void binder_stat_br(struct binder_proc *proc, 3746 struct binder_thread *thread, uint32_t cmd) 3747 { 3748 trace_binder_return(cmd); 3749 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { 3750 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]); 3751 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]); 3752 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]); 3753 } 3754 } 3755 3756 static int binder_put_node_cmd(struct binder_proc *proc, 3757 struct binder_thread *thread, 3758 void __user **ptrp, 3759 binder_uintptr_t node_ptr, 3760 binder_uintptr_t node_cookie, 3761 int node_debug_id, 3762 uint32_t cmd, const char *cmd_name) 3763 { 3764 void __user *ptr = *ptrp; 3765 3766 if (put_user(cmd, (uint32_t __user *)ptr)) 3767 return -EFAULT; 3768 ptr += sizeof(uint32_t); 3769 3770 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr)) 3771 return -EFAULT; 3772 ptr += sizeof(binder_uintptr_t); 3773 3774 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr)) 3775 return -EFAULT; 3776 ptr += sizeof(binder_uintptr_t); 3777 3778 binder_stat_br(proc, thread, cmd); 3779 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n", 3780 proc->pid, thread->pid, cmd_name, node_debug_id, 3781 (u64)node_ptr, (u64)node_cookie); 3782 3783 *ptrp = ptr; 3784 return 0; 3785 } 3786 3787 static int binder_wait_for_work(struct binder_thread *thread, 3788 bool do_proc_work) 3789 { 3790 DEFINE_WAIT(wait); 3791 struct binder_proc *proc = thread->proc; 3792 int ret = 0; 3793 3794 freezer_do_not_count(); 3795 binder_inner_proc_lock(proc); 3796 for (;;) { 3797 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE); 3798 if (binder_has_work_ilocked(thread, do_proc_work)) 3799 break; 3800 if (do_proc_work) 3801 list_add(&thread->waiting_thread_node, 3802 &proc->waiting_threads); 3803 binder_inner_proc_unlock(proc); 3804 schedule(); 3805 binder_inner_proc_lock(proc); 3806 list_del_init(&thread->waiting_thread_node); 3807 if (signal_pending(current)) { 3808 ret = -ERESTARTSYS; 3809 break; 3810 } 3811 } 3812 finish_wait(&thread->wait, &wait); 3813 binder_inner_proc_unlock(proc); 3814 freezer_count(); 3815 3816 return ret; 3817 } 3818 3819 static int binder_thread_read(struct binder_proc *proc, 3820 struct binder_thread *thread, 3821 binder_uintptr_t binder_buffer, size_t size, 3822 binder_size_t *consumed, int non_block) 3823 { 3824 void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 3825 void __user *ptr = buffer + *consumed; 3826 void __user *end = buffer + size; 3827 3828 int ret = 0; 3829 int wait_for_proc_work; 3830 3831 if (*consumed == 0) { 3832 if (put_user(BR_NOOP, (uint32_t __user *)ptr)) 3833 return -EFAULT; 3834 ptr += sizeof(uint32_t); 3835 } 3836 3837 retry: 3838 binder_inner_proc_lock(proc); 3839 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread); 3840 binder_inner_proc_unlock(proc); 3841 3842 thread->looper |= BINDER_LOOPER_STATE_WAITING; 3843 3844 trace_binder_wait_for_work(wait_for_proc_work, 3845 !!thread->transaction_stack, 3846 !binder_worklist_empty(proc, &thread->todo)); 3847 if (wait_for_proc_work) { 3848 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 3849 BINDER_LOOPER_STATE_ENTERED))) { 3850 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n", 3851 proc->pid, thread->pid, thread->looper); 3852 wait_event_interruptible(binder_user_error_wait, 3853 binder_stop_on_user_error < 2); 3854 } 3855 binder_set_nice(proc->default_priority); 3856 } 3857 3858 if (non_block) { 3859 if (!binder_has_work(thread, wait_for_proc_work)) 3860 ret = -EAGAIN; 3861 } else { 3862 ret = binder_wait_for_work(thread, wait_for_proc_work); 3863 } 3864 3865 thread->looper &= ~BINDER_LOOPER_STATE_WAITING; 3866 3867 if (ret) 3868 return ret; 3869 3870 while (1) { 3871 uint32_t cmd; 3872 struct binder_transaction_data tr; 3873 struct binder_work *w = NULL; 3874 struct list_head *list = NULL; 3875 struct binder_transaction *t = NULL; 3876 struct binder_thread *t_from; 3877 3878 binder_inner_proc_lock(proc); 3879 if (!binder_worklist_empty_ilocked(&thread->todo)) 3880 list = &thread->todo; 3881 else if (!binder_worklist_empty_ilocked(&proc->todo) && 3882 wait_for_proc_work) 3883 list = &proc->todo; 3884 else { 3885 binder_inner_proc_unlock(proc); 3886 3887 /* no data added */ 3888 if (ptr - buffer == 4 && !thread->looper_need_return) 3889 goto retry; 3890 break; 3891 } 3892 3893 if (end - ptr < sizeof(tr) + 4) { 3894 binder_inner_proc_unlock(proc); 3895 break; 3896 } 3897 w = binder_dequeue_work_head_ilocked(list); 3898 if (binder_worklist_empty_ilocked(&thread->todo)) 3899 thread->process_todo = false; 3900 3901 switch (w->type) { 3902 case BINDER_WORK_TRANSACTION: { 3903 binder_inner_proc_unlock(proc); 3904 t = container_of(w, struct binder_transaction, work); 3905 } break; 3906 case BINDER_WORK_RETURN_ERROR: { 3907 struct binder_error *e = container_of( 3908 w, struct binder_error, work); 3909 3910 WARN_ON(e->cmd == BR_OK); 3911 binder_inner_proc_unlock(proc); 3912 if (put_user(e->cmd, (uint32_t __user *)ptr)) 3913 return -EFAULT; 3914 e->cmd = BR_OK; 3915 ptr += sizeof(uint32_t); 3916 3917 binder_stat_br(proc, thread, e->cmd); 3918 } break; 3919 case BINDER_WORK_TRANSACTION_COMPLETE: { 3920 binder_inner_proc_unlock(proc); 3921 cmd = BR_TRANSACTION_COMPLETE; 3922 if (put_user(cmd, (uint32_t __user *)ptr)) 3923 return -EFAULT; 3924 ptr += sizeof(uint32_t); 3925 3926 binder_stat_br(proc, thread, cmd); 3927 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, 3928 "%d:%d BR_TRANSACTION_COMPLETE\n", 3929 proc->pid, thread->pid); 3930 kfree(w); 3931 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 3932 } break; 3933 case BINDER_WORK_NODE: { 3934 struct binder_node *node = container_of(w, struct binder_node, work); 3935 int strong, weak; 3936 binder_uintptr_t node_ptr = node->ptr; 3937 binder_uintptr_t node_cookie = node->cookie; 3938 int node_debug_id = node->debug_id; 3939 int has_weak_ref; 3940 int has_strong_ref; 3941 void __user *orig_ptr = ptr; 3942 3943 BUG_ON(proc != node->proc); 3944 strong = node->internal_strong_refs || 3945 node->local_strong_refs; 3946 weak = !hlist_empty(&node->refs) || 3947 node->local_weak_refs || 3948 node->tmp_refs || strong; 3949 has_strong_ref = node->has_strong_ref; 3950 has_weak_ref = node->has_weak_ref; 3951 3952 if (weak && !has_weak_ref) { 3953 node->has_weak_ref = 1; 3954 node->pending_weak_ref = 1; 3955 node->local_weak_refs++; 3956 } 3957 if (strong && !has_strong_ref) { 3958 node->has_strong_ref = 1; 3959 node->pending_strong_ref = 1; 3960 node->local_strong_refs++; 3961 } 3962 if (!strong && has_strong_ref) 3963 node->has_strong_ref = 0; 3964 if (!weak && has_weak_ref) 3965 node->has_weak_ref = 0; 3966 if (!weak && !strong) { 3967 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 3968 "%d:%d node %d u%016llx c%016llx deleted\n", 3969 proc->pid, thread->pid, 3970 node_debug_id, 3971 (u64)node_ptr, 3972 (u64)node_cookie); 3973 rb_erase(&node->rb_node, &proc->nodes); 3974 binder_inner_proc_unlock(proc); 3975 binder_node_lock(node); 3976 /* 3977 * Acquire the node lock before freeing the 3978 * node to serialize with other threads that 3979 * may have been holding the node lock while 3980 * decrementing this node (avoids race where 3981 * this thread frees while the other thread 3982 * is unlocking the node after the final 3983 * decrement) 3984 */ 3985 binder_node_unlock(node); 3986 binder_free_node(node); 3987 } else 3988 binder_inner_proc_unlock(proc); 3989 3990 if (weak && !has_weak_ref) 3991 ret = binder_put_node_cmd( 3992 proc, thread, &ptr, node_ptr, 3993 node_cookie, node_debug_id, 3994 BR_INCREFS, "BR_INCREFS"); 3995 if (!ret && strong && !has_strong_ref) 3996 ret = binder_put_node_cmd( 3997 proc, thread, &ptr, node_ptr, 3998 node_cookie, node_debug_id, 3999 BR_ACQUIRE, "BR_ACQUIRE"); 4000 if (!ret && !strong && has_strong_ref) 4001 ret = binder_put_node_cmd( 4002 proc, thread, &ptr, node_ptr, 4003 node_cookie, node_debug_id, 4004 BR_RELEASE, "BR_RELEASE"); 4005 if (!ret && !weak && has_weak_ref) 4006 ret = binder_put_node_cmd( 4007 proc, thread, &ptr, node_ptr, 4008 node_cookie, node_debug_id, 4009 BR_DECREFS, "BR_DECREFS"); 4010 if (orig_ptr == ptr) 4011 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 4012 "%d:%d node %d u%016llx c%016llx state unchanged\n", 4013 proc->pid, thread->pid, 4014 node_debug_id, 4015 (u64)node_ptr, 4016 (u64)node_cookie); 4017 if (ret) 4018 return ret; 4019 } break; 4020 case BINDER_WORK_DEAD_BINDER: 4021 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 4022 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 4023 struct binder_ref_death *death; 4024 uint32_t cmd; 4025 binder_uintptr_t cookie; 4026 4027 death = container_of(w, struct binder_ref_death, work); 4028 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) 4029 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; 4030 else 4031 cmd = BR_DEAD_BINDER; 4032 cookie = death->cookie; 4033 4034 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 4035 "%d:%d %s %016llx\n", 4036 proc->pid, thread->pid, 4037 cmd == BR_DEAD_BINDER ? 4038 "BR_DEAD_BINDER" : 4039 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 4040 (u64)cookie); 4041 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { 4042 binder_inner_proc_unlock(proc); 4043 kfree(death); 4044 binder_stats_deleted(BINDER_STAT_DEATH); 4045 } else { 4046 binder_enqueue_work_ilocked( 4047 w, &proc->delivered_death); 4048 binder_inner_proc_unlock(proc); 4049 } 4050 if (put_user(cmd, (uint32_t __user *)ptr)) 4051 return -EFAULT; 4052 ptr += sizeof(uint32_t); 4053 if (put_user(cookie, 4054 (binder_uintptr_t __user *)ptr)) 4055 return -EFAULT; 4056 ptr += sizeof(binder_uintptr_t); 4057 binder_stat_br(proc, thread, cmd); 4058 if (cmd == BR_DEAD_BINDER) 4059 goto done; /* DEAD_BINDER notifications can cause transactions */ 4060 } break; 4061 } 4062 4063 if (!t) 4064 continue; 4065 4066 BUG_ON(t->buffer == NULL); 4067 if (t->buffer->target_node) { 4068 struct binder_node *target_node = t->buffer->target_node; 4069 4070 tr.target.ptr = target_node->ptr; 4071 tr.cookie = target_node->cookie; 4072 t->saved_priority = task_nice(current); 4073 if (t->priority < target_node->min_priority && 4074 !(t->flags & TF_ONE_WAY)) 4075 binder_set_nice(t->priority); 4076 else if (!(t->flags & TF_ONE_WAY) || 4077 t->saved_priority > target_node->min_priority) 4078 binder_set_nice(target_node->min_priority); 4079 cmd = BR_TRANSACTION; 4080 } else { 4081 tr.target.ptr = 0; 4082 tr.cookie = 0; 4083 cmd = BR_REPLY; 4084 } 4085 tr.code = t->code; 4086 tr.flags = t->flags; 4087 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid); 4088 4089 t_from = binder_get_txn_from(t); 4090 if (t_from) { 4091 struct task_struct *sender = t_from->proc->tsk; 4092 4093 tr.sender_pid = task_tgid_nr_ns(sender, 4094 task_active_pid_ns(current)); 4095 } else { 4096 tr.sender_pid = 0; 4097 } 4098 4099 tr.data_size = t->buffer->data_size; 4100 tr.offsets_size = t->buffer->offsets_size; 4101 tr.data.ptr.buffer = (binder_uintptr_t) 4102 ((uintptr_t)t->buffer->data + 4103 binder_alloc_get_user_buffer_offset(&proc->alloc)); 4104 tr.data.ptr.offsets = tr.data.ptr.buffer + 4105 ALIGN(t->buffer->data_size, 4106 sizeof(void *)); 4107 4108 if (put_user(cmd, (uint32_t __user *)ptr)) { 4109 if (t_from) 4110 binder_thread_dec_tmpref(t_from); 4111 4112 binder_cleanup_transaction(t, "put_user failed", 4113 BR_FAILED_REPLY); 4114 4115 return -EFAULT; 4116 } 4117 ptr += sizeof(uint32_t); 4118 if (copy_to_user(ptr, &tr, sizeof(tr))) { 4119 if (t_from) 4120 binder_thread_dec_tmpref(t_from); 4121 4122 binder_cleanup_transaction(t, "copy_to_user failed", 4123 BR_FAILED_REPLY); 4124 4125 return -EFAULT; 4126 } 4127 ptr += sizeof(tr); 4128 4129 trace_binder_transaction_received(t); 4130 binder_stat_br(proc, thread, cmd); 4131 binder_debug(BINDER_DEBUG_TRANSACTION, 4132 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n", 4133 proc->pid, thread->pid, 4134 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : 4135 "BR_REPLY", 4136 t->debug_id, t_from ? t_from->proc->pid : 0, 4137 t_from ? t_from->pid : 0, cmd, 4138 t->buffer->data_size, t->buffer->offsets_size, 4139 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets); 4140 4141 if (t_from) 4142 binder_thread_dec_tmpref(t_from); 4143 t->buffer->allow_user_free = 1; 4144 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) { 4145 binder_inner_proc_lock(thread->proc); 4146 t->to_parent = thread->transaction_stack; 4147 t->to_thread = thread; 4148 thread->transaction_stack = t; 4149 binder_inner_proc_unlock(thread->proc); 4150 } else { 4151 binder_free_transaction(t); 4152 } 4153 break; 4154 } 4155 4156 done: 4157 4158 *consumed = ptr - buffer; 4159 binder_inner_proc_lock(proc); 4160 if (proc->requested_threads == 0 && 4161 list_empty(&thread->proc->waiting_threads) && 4162 proc->requested_threads_started < proc->max_threads && 4163 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 4164 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ 4165 /*spawn a new thread if we leave this out */) { 4166 proc->requested_threads++; 4167 binder_inner_proc_unlock(proc); 4168 binder_debug(BINDER_DEBUG_THREADS, 4169 "%d:%d BR_SPAWN_LOOPER\n", 4170 proc->pid, thread->pid); 4171 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) 4172 return -EFAULT; 4173 binder_stat_br(proc, thread, BR_SPAWN_LOOPER); 4174 } else 4175 binder_inner_proc_unlock(proc); 4176 return 0; 4177 } 4178 4179 static void binder_release_work(struct binder_proc *proc, 4180 struct list_head *list) 4181 { 4182 struct binder_work *w; 4183 4184 while (1) { 4185 w = binder_dequeue_work_head(proc, list); 4186 if (!w) 4187 return; 4188 4189 switch (w->type) { 4190 case BINDER_WORK_TRANSACTION: { 4191 struct binder_transaction *t; 4192 4193 t = container_of(w, struct binder_transaction, work); 4194 4195 binder_cleanup_transaction(t, "process died.", 4196 BR_DEAD_REPLY); 4197 } break; 4198 case BINDER_WORK_RETURN_ERROR: { 4199 struct binder_error *e = container_of( 4200 w, struct binder_error, work); 4201 4202 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4203 "undelivered TRANSACTION_ERROR: %u\n", 4204 e->cmd); 4205 } break; 4206 case BINDER_WORK_TRANSACTION_COMPLETE: { 4207 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4208 "undelivered TRANSACTION_COMPLETE\n"); 4209 kfree(w); 4210 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 4211 } break; 4212 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 4213 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 4214 struct binder_ref_death *death; 4215 4216 death = container_of(w, struct binder_ref_death, work); 4217 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4218 "undelivered death notification, %016llx\n", 4219 (u64)death->cookie); 4220 kfree(death); 4221 binder_stats_deleted(BINDER_STAT_DEATH); 4222 } break; 4223 default: 4224 pr_err("unexpected work type, %d, not freed\n", 4225 w->type); 4226 break; 4227 } 4228 } 4229 4230 } 4231 4232 static struct binder_thread *binder_get_thread_ilocked( 4233 struct binder_proc *proc, struct binder_thread *new_thread) 4234 { 4235 struct binder_thread *thread = NULL; 4236 struct rb_node *parent = NULL; 4237 struct rb_node **p = &proc->threads.rb_node; 4238 4239 while (*p) { 4240 parent = *p; 4241 thread = rb_entry(parent, struct binder_thread, rb_node); 4242 4243 if (current->pid < thread->pid) 4244 p = &(*p)->rb_left; 4245 else if (current->pid > thread->pid) 4246 p = &(*p)->rb_right; 4247 else 4248 return thread; 4249 } 4250 if (!new_thread) 4251 return NULL; 4252 thread = new_thread; 4253 binder_stats_created(BINDER_STAT_THREAD); 4254 thread->proc = proc; 4255 thread->pid = current->pid; 4256 atomic_set(&thread->tmp_ref, 0); 4257 init_waitqueue_head(&thread->wait); 4258 INIT_LIST_HEAD(&thread->todo); 4259 rb_link_node(&thread->rb_node, parent, p); 4260 rb_insert_color(&thread->rb_node, &proc->threads); 4261 thread->looper_need_return = true; 4262 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR; 4263 thread->return_error.cmd = BR_OK; 4264 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR; 4265 thread->reply_error.cmd = BR_OK; 4266 INIT_LIST_HEAD(&new_thread->waiting_thread_node); 4267 return thread; 4268 } 4269 4270 static struct binder_thread *binder_get_thread(struct binder_proc *proc) 4271 { 4272 struct binder_thread *thread; 4273 struct binder_thread *new_thread; 4274 4275 binder_inner_proc_lock(proc); 4276 thread = binder_get_thread_ilocked(proc, NULL); 4277 binder_inner_proc_unlock(proc); 4278 if (!thread) { 4279 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL); 4280 if (new_thread == NULL) 4281 return NULL; 4282 binder_inner_proc_lock(proc); 4283 thread = binder_get_thread_ilocked(proc, new_thread); 4284 binder_inner_proc_unlock(proc); 4285 if (thread != new_thread) 4286 kfree(new_thread); 4287 } 4288 return thread; 4289 } 4290 4291 static void binder_free_proc(struct binder_proc *proc) 4292 { 4293 BUG_ON(!list_empty(&proc->todo)); 4294 BUG_ON(!list_empty(&proc->delivered_death)); 4295 binder_alloc_deferred_release(&proc->alloc); 4296 put_task_struct(proc->tsk); 4297 binder_stats_deleted(BINDER_STAT_PROC); 4298 kfree(proc); 4299 } 4300 4301 static void binder_free_thread(struct binder_thread *thread) 4302 { 4303 BUG_ON(!list_empty(&thread->todo)); 4304 binder_stats_deleted(BINDER_STAT_THREAD); 4305 binder_proc_dec_tmpref(thread->proc); 4306 kfree(thread); 4307 } 4308 4309 static int binder_thread_release(struct binder_proc *proc, 4310 struct binder_thread *thread) 4311 { 4312 struct binder_transaction *t; 4313 struct binder_transaction *send_reply = NULL; 4314 int active_transactions = 0; 4315 struct binder_transaction *last_t = NULL; 4316 4317 binder_inner_proc_lock(thread->proc); 4318 /* 4319 * take a ref on the proc so it survives 4320 * after we remove this thread from proc->threads. 4321 * The corresponding dec is when we actually 4322 * free the thread in binder_free_thread() 4323 */ 4324 proc->tmp_ref++; 4325 /* 4326 * take a ref on this thread to ensure it 4327 * survives while we are releasing it 4328 */ 4329 atomic_inc(&thread->tmp_ref); 4330 rb_erase(&thread->rb_node, &proc->threads); 4331 t = thread->transaction_stack; 4332 if (t) { 4333 spin_lock(&t->lock); 4334 if (t->to_thread == thread) 4335 send_reply = t; 4336 } 4337 thread->is_dead = true; 4338 4339 while (t) { 4340 last_t = t; 4341 active_transactions++; 4342 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4343 "release %d:%d transaction %d %s, still active\n", 4344 proc->pid, thread->pid, 4345 t->debug_id, 4346 (t->to_thread == thread) ? "in" : "out"); 4347 4348 if (t->to_thread == thread) { 4349 t->to_proc = NULL; 4350 t->to_thread = NULL; 4351 if (t->buffer) { 4352 t->buffer->transaction = NULL; 4353 t->buffer = NULL; 4354 } 4355 t = t->to_parent; 4356 } else if (t->from == thread) { 4357 t->from = NULL; 4358 t = t->from_parent; 4359 } else 4360 BUG(); 4361 spin_unlock(&last_t->lock); 4362 if (t) 4363 spin_lock(&t->lock); 4364 } 4365 4366 /* 4367 * If this thread used poll, make sure we remove the waitqueue 4368 * from any epoll data structures holding it with POLLFREE. 4369 * waitqueue_active() is safe to use here because we're holding 4370 * the inner lock. 4371 */ 4372 if ((thread->looper & BINDER_LOOPER_STATE_POLL) && 4373 waitqueue_active(&thread->wait)) { 4374 wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE); 4375 } 4376 4377 binder_inner_proc_unlock(thread->proc); 4378 4379 if (send_reply) 4380 binder_send_failed_reply(send_reply, BR_DEAD_REPLY); 4381 binder_release_work(proc, &thread->todo); 4382 binder_thread_dec_tmpref(thread); 4383 return active_transactions; 4384 } 4385 4386 static __poll_t binder_poll(struct file *filp, 4387 struct poll_table_struct *wait) 4388 { 4389 struct binder_proc *proc = filp->private_data; 4390 struct binder_thread *thread = NULL; 4391 bool wait_for_proc_work; 4392 4393 thread = binder_get_thread(proc); 4394 4395 binder_inner_proc_lock(thread->proc); 4396 thread->looper |= BINDER_LOOPER_STATE_POLL; 4397 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread); 4398 4399 binder_inner_proc_unlock(thread->proc); 4400 4401 poll_wait(filp, &thread->wait, wait); 4402 4403 if (binder_has_work(thread, wait_for_proc_work)) 4404 return EPOLLIN; 4405 4406 return 0; 4407 } 4408 4409 static int binder_ioctl_write_read(struct file *filp, 4410 unsigned int cmd, unsigned long arg, 4411 struct binder_thread *thread) 4412 { 4413 int ret = 0; 4414 struct binder_proc *proc = filp->private_data; 4415 unsigned int size = _IOC_SIZE(cmd); 4416 void __user *ubuf = (void __user *)arg; 4417 struct binder_write_read bwr; 4418 4419 if (size != sizeof(struct binder_write_read)) { 4420 ret = -EINVAL; 4421 goto out; 4422 } 4423 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { 4424 ret = -EFAULT; 4425 goto out; 4426 } 4427 binder_debug(BINDER_DEBUG_READ_WRITE, 4428 "%d:%d write %lld at %016llx, read %lld at %016llx\n", 4429 proc->pid, thread->pid, 4430 (u64)bwr.write_size, (u64)bwr.write_buffer, 4431 (u64)bwr.read_size, (u64)bwr.read_buffer); 4432 4433 if (bwr.write_size > 0) { 4434 ret = binder_thread_write(proc, thread, 4435 bwr.write_buffer, 4436 bwr.write_size, 4437 &bwr.write_consumed); 4438 trace_binder_write_done(ret); 4439 if (ret < 0) { 4440 bwr.read_consumed = 0; 4441 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 4442 ret = -EFAULT; 4443 goto out; 4444 } 4445 } 4446 if (bwr.read_size > 0) { 4447 ret = binder_thread_read(proc, thread, bwr.read_buffer, 4448 bwr.read_size, 4449 &bwr.read_consumed, 4450 filp->f_flags & O_NONBLOCK); 4451 trace_binder_read_done(ret); 4452 binder_inner_proc_lock(proc); 4453 if (!binder_worklist_empty_ilocked(&proc->todo)) 4454 binder_wakeup_proc_ilocked(proc); 4455 binder_inner_proc_unlock(proc); 4456 if (ret < 0) { 4457 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 4458 ret = -EFAULT; 4459 goto out; 4460 } 4461 } 4462 binder_debug(BINDER_DEBUG_READ_WRITE, 4463 "%d:%d wrote %lld of %lld, read return %lld of %lld\n", 4464 proc->pid, thread->pid, 4465 (u64)bwr.write_consumed, (u64)bwr.write_size, 4466 (u64)bwr.read_consumed, (u64)bwr.read_size); 4467 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { 4468 ret = -EFAULT; 4469 goto out; 4470 } 4471 out: 4472 return ret; 4473 } 4474 4475 static int binder_ioctl_set_ctx_mgr(struct file *filp) 4476 { 4477 int ret = 0; 4478 struct binder_proc *proc = filp->private_data; 4479 struct binder_context *context = proc->context; 4480 struct binder_node *new_node; 4481 kuid_t curr_euid = current_euid(); 4482 4483 mutex_lock(&context->context_mgr_node_lock); 4484 if (context->binder_context_mgr_node) { 4485 pr_err("BINDER_SET_CONTEXT_MGR already set\n"); 4486 ret = -EBUSY; 4487 goto out; 4488 } 4489 ret = security_binder_set_context_mgr(proc->tsk); 4490 if (ret < 0) 4491 goto out; 4492 if (uid_valid(context->binder_context_mgr_uid)) { 4493 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) { 4494 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n", 4495 from_kuid(&init_user_ns, curr_euid), 4496 from_kuid(&init_user_ns, 4497 context->binder_context_mgr_uid)); 4498 ret = -EPERM; 4499 goto out; 4500 } 4501 } else { 4502 context->binder_context_mgr_uid = curr_euid; 4503 } 4504 new_node = binder_new_node(proc, NULL); 4505 if (!new_node) { 4506 ret = -ENOMEM; 4507 goto out; 4508 } 4509 binder_node_lock(new_node); 4510 new_node->local_weak_refs++; 4511 new_node->local_strong_refs++; 4512 new_node->has_strong_ref = 1; 4513 new_node->has_weak_ref = 1; 4514 context->binder_context_mgr_node = new_node; 4515 binder_node_unlock(new_node); 4516 binder_put_node(new_node); 4517 out: 4518 mutex_unlock(&context->context_mgr_node_lock); 4519 return ret; 4520 } 4521 4522 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc, 4523 struct binder_node_debug_info *info) 4524 { 4525 struct rb_node *n; 4526 binder_uintptr_t ptr = info->ptr; 4527 4528 memset(info, 0, sizeof(*info)); 4529 4530 binder_inner_proc_lock(proc); 4531 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { 4532 struct binder_node *node = rb_entry(n, struct binder_node, 4533 rb_node); 4534 if (node->ptr > ptr) { 4535 info->ptr = node->ptr; 4536 info->cookie = node->cookie; 4537 info->has_strong_ref = node->has_strong_ref; 4538 info->has_weak_ref = node->has_weak_ref; 4539 break; 4540 } 4541 } 4542 binder_inner_proc_unlock(proc); 4543 4544 return 0; 4545 } 4546 4547 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 4548 { 4549 int ret; 4550 struct binder_proc *proc = filp->private_data; 4551 struct binder_thread *thread; 4552 unsigned int size = _IOC_SIZE(cmd); 4553 void __user *ubuf = (void __user *)arg; 4554 4555 /*pr_info("binder_ioctl: %d:%d %x %lx\n", 4556 proc->pid, current->pid, cmd, arg);*/ 4557 4558 binder_selftest_alloc(&proc->alloc); 4559 4560 trace_binder_ioctl(cmd, arg); 4561 4562 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 4563 if (ret) 4564 goto err_unlocked; 4565 4566 thread = binder_get_thread(proc); 4567 if (thread == NULL) { 4568 ret = -ENOMEM; 4569 goto err; 4570 } 4571 4572 switch (cmd) { 4573 case BINDER_WRITE_READ: 4574 ret = binder_ioctl_write_read(filp, cmd, arg, thread); 4575 if (ret) 4576 goto err; 4577 break; 4578 case BINDER_SET_MAX_THREADS: { 4579 int max_threads; 4580 4581 if (copy_from_user(&max_threads, ubuf, 4582 sizeof(max_threads))) { 4583 ret = -EINVAL; 4584 goto err; 4585 } 4586 binder_inner_proc_lock(proc); 4587 proc->max_threads = max_threads; 4588 binder_inner_proc_unlock(proc); 4589 break; 4590 } 4591 case BINDER_SET_CONTEXT_MGR: 4592 ret = binder_ioctl_set_ctx_mgr(filp); 4593 if (ret) 4594 goto err; 4595 break; 4596 case BINDER_THREAD_EXIT: 4597 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n", 4598 proc->pid, thread->pid); 4599 binder_thread_release(proc, thread); 4600 thread = NULL; 4601 break; 4602 case BINDER_VERSION: { 4603 struct binder_version __user *ver = ubuf; 4604 4605 if (size != sizeof(struct binder_version)) { 4606 ret = -EINVAL; 4607 goto err; 4608 } 4609 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, 4610 &ver->protocol_version)) { 4611 ret = -EINVAL; 4612 goto err; 4613 } 4614 break; 4615 } 4616 case BINDER_GET_NODE_DEBUG_INFO: { 4617 struct binder_node_debug_info info; 4618 4619 if (copy_from_user(&info, ubuf, sizeof(info))) { 4620 ret = -EFAULT; 4621 goto err; 4622 } 4623 4624 ret = binder_ioctl_get_node_debug_info(proc, &info); 4625 if (ret < 0) 4626 goto err; 4627 4628 if (copy_to_user(ubuf, &info, sizeof(info))) { 4629 ret = -EFAULT; 4630 goto err; 4631 } 4632 break; 4633 } 4634 default: 4635 ret = -EINVAL; 4636 goto err; 4637 } 4638 ret = 0; 4639 err: 4640 if (thread) 4641 thread->looper_need_return = false; 4642 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 4643 if (ret && ret != -ERESTARTSYS) 4644 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); 4645 err_unlocked: 4646 trace_binder_ioctl_done(ret); 4647 return ret; 4648 } 4649 4650 static void binder_vma_open(struct vm_area_struct *vma) 4651 { 4652 struct binder_proc *proc = vma->vm_private_data; 4653 4654 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 4655 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 4656 proc->pid, vma->vm_start, vma->vm_end, 4657 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 4658 (unsigned long)pgprot_val(vma->vm_page_prot)); 4659 } 4660 4661 static void binder_vma_close(struct vm_area_struct *vma) 4662 { 4663 struct binder_proc *proc = vma->vm_private_data; 4664 4665 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 4666 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 4667 proc->pid, vma->vm_start, vma->vm_end, 4668 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 4669 (unsigned long)pgprot_val(vma->vm_page_prot)); 4670 binder_alloc_vma_close(&proc->alloc); 4671 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); 4672 } 4673 4674 static int binder_vm_fault(struct vm_fault *vmf) 4675 { 4676 return VM_FAULT_SIGBUS; 4677 } 4678 4679 static const struct vm_operations_struct binder_vm_ops = { 4680 .open = binder_vma_open, 4681 .close = binder_vma_close, 4682 .fault = binder_vm_fault, 4683 }; 4684 4685 static int binder_mmap(struct file *filp, struct vm_area_struct *vma) 4686 { 4687 int ret; 4688 struct binder_proc *proc = filp->private_data; 4689 const char *failure_string; 4690 4691 if (proc->tsk != current->group_leader) 4692 return -EINVAL; 4693 4694 if ((vma->vm_end - vma->vm_start) > SZ_4M) 4695 vma->vm_end = vma->vm_start + SZ_4M; 4696 4697 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 4698 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", 4699 __func__, proc->pid, vma->vm_start, vma->vm_end, 4700 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 4701 (unsigned long)pgprot_val(vma->vm_page_prot)); 4702 4703 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { 4704 ret = -EPERM; 4705 failure_string = "bad vm_flags"; 4706 goto err_bad_arg; 4707 } 4708 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; 4709 vma->vm_ops = &binder_vm_ops; 4710 vma->vm_private_data = proc; 4711 4712 ret = binder_alloc_mmap_handler(&proc->alloc, vma); 4713 if (ret) 4714 return ret; 4715 mutex_lock(&proc->files_lock); 4716 proc->files = get_files_struct(current); 4717 mutex_unlock(&proc->files_lock); 4718 return 0; 4719 4720 err_bad_arg: 4721 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__, 4722 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); 4723 return ret; 4724 } 4725 4726 static int binder_open(struct inode *nodp, struct file *filp) 4727 { 4728 struct binder_proc *proc; 4729 struct binder_device *binder_dev; 4730 4731 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__, 4732 current->group_leader->pid, current->pid); 4733 4734 proc = kzalloc(sizeof(*proc), GFP_KERNEL); 4735 if (proc == NULL) 4736 return -ENOMEM; 4737 spin_lock_init(&proc->inner_lock); 4738 spin_lock_init(&proc->outer_lock); 4739 get_task_struct(current->group_leader); 4740 proc->tsk = current->group_leader; 4741 mutex_init(&proc->files_lock); 4742 INIT_LIST_HEAD(&proc->todo); 4743 proc->default_priority = task_nice(current); 4744 binder_dev = container_of(filp->private_data, struct binder_device, 4745 miscdev); 4746 proc->context = &binder_dev->context; 4747 binder_alloc_init(&proc->alloc); 4748 4749 binder_stats_created(BINDER_STAT_PROC); 4750 proc->pid = current->group_leader->pid; 4751 INIT_LIST_HEAD(&proc->delivered_death); 4752 INIT_LIST_HEAD(&proc->waiting_threads); 4753 filp->private_data = proc; 4754 4755 mutex_lock(&binder_procs_lock); 4756 hlist_add_head(&proc->proc_node, &binder_procs); 4757 mutex_unlock(&binder_procs_lock); 4758 4759 if (binder_debugfs_dir_entry_proc) { 4760 char strbuf[11]; 4761 4762 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); 4763 /* 4764 * proc debug entries are shared between contexts, so 4765 * this will fail if the process tries to open the driver 4766 * again with a different context. The priting code will 4767 * anyway print all contexts that a given PID has, so this 4768 * is not a problem. 4769 */ 4770 proc->debugfs_entry = debugfs_create_file(strbuf, 0444, 4771 binder_debugfs_dir_entry_proc, 4772 (void *)(unsigned long)proc->pid, 4773 &binder_proc_fops); 4774 } 4775 4776 return 0; 4777 } 4778 4779 static int binder_flush(struct file *filp, fl_owner_t id) 4780 { 4781 struct binder_proc *proc = filp->private_data; 4782 4783 binder_defer_work(proc, BINDER_DEFERRED_FLUSH); 4784 4785 return 0; 4786 } 4787 4788 static void binder_deferred_flush(struct binder_proc *proc) 4789 { 4790 struct rb_node *n; 4791 int wake_count = 0; 4792 4793 binder_inner_proc_lock(proc); 4794 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { 4795 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); 4796 4797 thread->looper_need_return = true; 4798 if (thread->looper & BINDER_LOOPER_STATE_WAITING) { 4799 wake_up_interruptible(&thread->wait); 4800 wake_count++; 4801 } 4802 } 4803 binder_inner_proc_unlock(proc); 4804 4805 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 4806 "binder_flush: %d woke %d threads\n", proc->pid, 4807 wake_count); 4808 } 4809 4810 static int binder_release(struct inode *nodp, struct file *filp) 4811 { 4812 struct binder_proc *proc = filp->private_data; 4813 4814 debugfs_remove(proc->debugfs_entry); 4815 binder_defer_work(proc, BINDER_DEFERRED_RELEASE); 4816 4817 return 0; 4818 } 4819 4820 static int binder_node_release(struct binder_node *node, int refs) 4821 { 4822 struct binder_ref *ref; 4823 int death = 0; 4824 struct binder_proc *proc = node->proc; 4825 4826 binder_release_work(proc, &node->async_todo); 4827 4828 binder_node_lock(node); 4829 binder_inner_proc_lock(proc); 4830 binder_dequeue_work_ilocked(&node->work); 4831 /* 4832 * The caller must have taken a temporary ref on the node, 4833 */ 4834 BUG_ON(!node->tmp_refs); 4835 if (hlist_empty(&node->refs) && node->tmp_refs == 1) { 4836 binder_inner_proc_unlock(proc); 4837 binder_node_unlock(node); 4838 binder_free_node(node); 4839 4840 return refs; 4841 } 4842 4843 node->proc = NULL; 4844 node->local_strong_refs = 0; 4845 node->local_weak_refs = 0; 4846 binder_inner_proc_unlock(proc); 4847 4848 spin_lock(&binder_dead_nodes_lock); 4849 hlist_add_head(&node->dead_node, &binder_dead_nodes); 4850 spin_unlock(&binder_dead_nodes_lock); 4851 4852 hlist_for_each_entry(ref, &node->refs, node_entry) { 4853 refs++; 4854 /* 4855 * Need the node lock to synchronize 4856 * with new notification requests and the 4857 * inner lock to synchronize with queued 4858 * death notifications. 4859 */ 4860 binder_inner_proc_lock(ref->proc); 4861 if (!ref->death) { 4862 binder_inner_proc_unlock(ref->proc); 4863 continue; 4864 } 4865 4866 death++; 4867 4868 BUG_ON(!list_empty(&ref->death->work.entry)); 4869 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 4870 binder_enqueue_work_ilocked(&ref->death->work, 4871 &ref->proc->todo); 4872 binder_wakeup_proc_ilocked(ref->proc); 4873 binder_inner_proc_unlock(ref->proc); 4874 } 4875 4876 binder_debug(BINDER_DEBUG_DEAD_BINDER, 4877 "node %d now dead, refs %d, death %d\n", 4878 node->debug_id, refs, death); 4879 binder_node_unlock(node); 4880 binder_put_node(node); 4881 4882 return refs; 4883 } 4884 4885 static void binder_deferred_release(struct binder_proc *proc) 4886 { 4887 struct binder_context *context = proc->context; 4888 struct rb_node *n; 4889 int threads, nodes, incoming_refs, outgoing_refs, active_transactions; 4890 4891 BUG_ON(proc->files); 4892 4893 mutex_lock(&binder_procs_lock); 4894 hlist_del(&proc->proc_node); 4895 mutex_unlock(&binder_procs_lock); 4896 4897 mutex_lock(&context->context_mgr_node_lock); 4898 if (context->binder_context_mgr_node && 4899 context->binder_context_mgr_node->proc == proc) { 4900 binder_debug(BINDER_DEBUG_DEAD_BINDER, 4901 "%s: %d context_mgr_node gone\n", 4902 __func__, proc->pid); 4903 context->binder_context_mgr_node = NULL; 4904 } 4905 mutex_unlock(&context->context_mgr_node_lock); 4906 binder_inner_proc_lock(proc); 4907 /* 4908 * Make sure proc stays alive after we 4909 * remove all the threads 4910 */ 4911 proc->tmp_ref++; 4912 4913 proc->is_dead = true; 4914 threads = 0; 4915 active_transactions = 0; 4916 while ((n = rb_first(&proc->threads))) { 4917 struct binder_thread *thread; 4918 4919 thread = rb_entry(n, struct binder_thread, rb_node); 4920 binder_inner_proc_unlock(proc); 4921 threads++; 4922 active_transactions += binder_thread_release(proc, thread); 4923 binder_inner_proc_lock(proc); 4924 } 4925 4926 nodes = 0; 4927 incoming_refs = 0; 4928 while ((n = rb_first(&proc->nodes))) { 4929 struct binder_node *node; 4930 4931 node = rb_entry(n, struct binder_node, rb_node); 4932 nodes++; 4933 /* 4934 * take a temporary ref on the node before 4935 * calling binder_node_release() which will either 4936 * kfree() the node or call binder_put_node() 4937 */ 4938 binder_inc_node_tmpref_ilocked(node); 4939 rb_erase(&node->rb_node, &proc->nodes); 4940 binder_inner_proc_unlock(proc); 4941 incoming_refs = binder_node_release(node, incoming_refs); 4942 binder_inner_proc_lock(proc); 4943 } 4944 binder_inner_proc_unlock(proc); 4945 4946 outgoing_refs = 0; 4947 binder_proc_lock(proc); 4948 while ((n = rb_first(&proc->refs_by_desc))) { 4949 struct binder_ref *ref; 4950 4951 ref = rb_entry(n, struct binder_ref, rb_node_desc); 4952 outgoing_refs++; 4953 binder_cleanup_ref_olocked(ref); 4954 binder_proc_unlock(proc); 4955 binder_free_ref(ref); 4956 binder_proc_lock(proc); 4957 } 4958 binder_proc_unlock(proc); 4959 4960 binder_release_work(proc, &proc->todo); 4961 binder_release_work(proc, &proc->delivered_death); 4962 4963 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 4964 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n", 4965 __func__, proc->pid, threads, nodes, incoming_refs, 4966 outgoing_refs, active_transactions); 4967 4968 binder_proc_dec_tmpref(proc); 4969 } 4970 4971 static void binder_deferred_func(struct work_struct *work) 4972 { 4973 struct binder_proc *proc; 4974 struct files_struct *files; 4975 4976 int defer; 4977 4978 do { 4979 mutex_lock(&binder_deferred_lock); 4980 if (!hlist_empty(&binder_deferred_list)) { 4981 proc = hlist_entry(binder_deferred_list.first, 4982 struct binder_proc, deferred_work_node); 4983 hlist_del_init(&proc->deferred_work_node); 4984 defer = proc->deferred_work; 4985 proc->deferred_work = 0; 4986 } else { 4987 proc = NULL; 4988 defer = 0; 4989 } 4990 mutex_unlock(&binder_deferred_lock); 4991 4992 files = NULL; 4993 if (defer & BINDER_DEFERRED_PUT_FILES) { 4994 mutex_lock(&proc->files_lock); 4995 files = proc->files; 4996 if (files) 4997 proc->files = NULL; 4998 mutex_unlock(&proc->files_lock); 4999 } 5000 5001 if (defer & BINDER_DEFERRED_FLUSH) 5002 binder_deferred_flush(proc); 5003 5004 if (defer & BINDER_DEFERRED_RELEASE) 5005 binder_deferred_release(proc); /* frees proc */ 5006 5007 if (files) 5008 put_files_struct(files); 5009 } while (proc); 5010 } 5011 static DECLARE_WORK(binder_deferred_work, binder_deferred_func); 5012 5013 static void 5014 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) 5015 { 5016 mutex_lock(&binder_deferred_lock); 5017 proc->deferred_work |= defer; 5018 if (hlist_unhashed(&proc->deferred_work_node)) { 5019 hlist_add_head(&proc->deferred_work_node, 5020 &binder_deferred_list); 5021 schedule_work(&binder_deferred_work); 5022 } 5023 mutex_unlock(&binder_deferred_lock); 5024 } 5025 5026 static void print_binder_transaction_ilocked(struct seq_file *m, 5027 struct binder_proc *proc, 5028 const char *prefix, 5029 struct binder_transaction *t) 5030 { 5031 struct binder_proc *to_proc; 5032 struct binder_buffer *buffer = t->buffer; 5033 5034 spin_lock(&t->lock); 5035 to_proc = t->to_proc; 5036 seq_printf(m, 5037 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d", 5038 prefix, t->debug_id, t, 5039 t->from ? t->from->proc->pid : 0, 5040 t->from ? t->from->pid : 0, 5041 to_proc ? to_proc->pid : 0, 5042 t->to_thread ? t->to_thread->pid : 0, 5043 t->code, t->flags, t->priority, t->need_reply); 5044 spin_unlock(&t->lock); 5045 5046 if (proc != to_proc) { 5047 /* 5048 * Can only safely deref buffer if we are holding the 5049 * correct proc inner lock for this node 5050 */ 5051 seq_puts(m, "\n"); 5052 return; 5053 } 5054 5055 if (buffer == NULL) { 5056 seq_puts(m, " buffer free\n"); 5057 return; 5058 } 5059 if (buffer->target_node) 5060 seq_printf(m, " node %d", buffer->target_node->debug_id); 5061 seq_printf(m, " size %zd:%zd data %p\n", 5062 buffer->data_size, buffer->offsets_size, 5063 buffer->data); 5064 } 5065 5066 static void print_binder_work_ilocked(struct seq_file *m, 5067 struct binder_proc *proc, 5068 const char *prefix, 5069 const char *transaction_prefix, 5070 struct binder_work *w) 5071 { 5072 struct binder_node *node; 5073 struct binder_transaction *t; 5074 5075 switch (w->type) { 5076 case BINDER_WORK_TRANSACTION: 5077 t = container_of(w, struct binder_transaction, work); 5078 print_binder_transaction_ilocked( 5079 m, proc, transaction_prefix, t); 5080 break; 5081 case BINDER_WORK_RETURN_ERROR: { 5082 struct binder_error *e = container_of( 5083 w, struct binder_error, work); 5084 5085 seq_printf(m, "%stransaction error: %u\n", 5086 prefix, e->cmd); 5087 } break; 5088 case BINDER_WORK_TRANSACTION_COMPLETE: 5089 seq_printf(m, "%stransaction complete\n", prefix); 5090 break; 5091 case BINDER_WORK_NODE: 5092 node = container_of(w, struct binder_node, work); 5093 seq_printf(m, "%snode work %d: u%016llx c%016llx\n", 5094 prefix, node->debug_id, 5095 (u64)node->ptr, (u64)node->cookie); 5096 break; 5097 case BINDER_WORK_DEAD_BINDER: 5098 seq_printf(m, "%shas dead binder\n", prefix); 5099 break; 5100 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 5101 seq_printf(m, "%shas cleared dead binder\n", prefix); 5102 break; 5103 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: 5104 seq_printf(m, "%shas cleared death notification\n", prefix); 5105 break; 5106 default: 5107 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type); 5108 break; 5109 } 5110 } 5111 5112 static void print_binder_thread_ilocked(struct seq_file *m, 5113 struct binder_thread *thread, 5114 int print_always) 5115 { 5116 struct binder_transaction *t; 5117 struct binder_work *w; 5118 size_t start_pos = m->count; 5119 size_t header_pos; 5120 5121 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n", 5122 thread->pid, thread->looper, 5123 thread->looper_need_return, 5124 atomic_read(&thread->tmp_ref)); 5125 header_pos = m->count; 5126 t = thread->transaction_stack; 5127 while (t) { 5128 if (t->from == thread) { 5129 print_binder_transaction_ilocked(m, thread->proc, 5130 " outgoing transaction", t); 5131 t = t->from_parent; 5132 } else if (t->to_thread == thread) { 5133 print_binder_transaction_ilocked(m, thread->proc, 5134 " incoming transaction", t); 5135 t = t->to_parent; 5136 } else { 5137 print_binder_transaction_ilocked(m, thread->proc, 5138 " bad transaction", t); 5139 t = NULL; 5140 } 5141 } 5142 list_for_each_entry(w, &thread->todo, entry) { 5143 print_binder_work_ilocked(m, thread->proc, " ", 5144 " pending transaction", w); 5145 } 5146 if (!print_always && m->count == header_pos) 5147 m->count = start_pos; 5148 } 5149 5150 static void print_binder_node_nilocked(struct seq_file *m, 5151 struct binder_node *node) 5152 { 5153 struct binder_ref *ref; 5154 struct binder_work *w; 5155 int count; 5156 5157 count = 0; 5158 hlist_for_each_entry(ref, &node->refs, node_entry) 5159 count++; 5160 5161 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d", 5162 node->debug_id, (u64)node->ptr, (u64)node->cookie, 5163 node->has_strong_ref, node->has_weak_ref, 5164 node->local_strong_refs, node->local_weak_refs, 5165 node->internal_strong_refs, count, node->tmp_refs); 5166 if (count) { 5167 seq_puts(m, " proc"); 5168 hlist_for_each_entry(ref, &node->refs, node_entry) 5169 seq_printf(m, " %d", ref->proc->pid); 5170 } 5171 seq_puts(m, "\n"); 5172 if (node->proc) { 5173 list_for_each_entry(w, &node->async_todo, entry) 5174 print_binder_work_ilocked(m, node->proc, " ", 5175 " pending async transaction", w); 5176 } 5177 } 5178 5179 static void print_binder_ref_olocked(struct seq_file *m, 5180 struct binder_ref *ref) 5181 { 5182 binder_node_lock(ref->node); 5183 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n", 5184 ref->data.debug_id, ref->data.desc, 5185 ref->node->proc ? "" : "dead ", 5186 ref->node->debug_id, ref->data.strong, 5187 ref->data.weak, ref->death); 5188 binder_node_unlock(ref->node); 5189 } 5190 5191 static void print_binder_proc(struct seq_file *m, 5192 struct binder_proc *proc, int print_all) 5193 { 5194 struct binder_work *w; 5195 struct rb_node *n; 5196 size_t start_pos = m->count; 5197 size_t header_pos; 5198 struct binder_node *last_node = NULL; 5199 5200 seq_printf(m, "proc %d\n", proc->pid); 5201 seq_printf(m, "context %s\n", proc->context->name); 5202 header_pos = m->count; 5203 5204 binder_inner_proc_lock(proc); 5205 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 5206 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread, 5207 rb_node), print_all); 5208 5209 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { 5210 struct binder_node *node = rb_entry(n, struct binder_node, 5211 rb_node); 5212 /* 5213 * take a temporary reference on the node so it 5214 * survives and isn't removed from the tree 5215 * while we print it. 5216 */ 5217 binder_inc_node_tmpref_ilocked(node); 5218 /* Need to drop inner lock to take node lock */ 5219 binder_inner_proc_unlock(proc); 5220 if (last_node) 5221 binder_put_node(last_node); 5222 binder_node_inner_lock(node); 5223 print_binder_node_nilocked(m, node); 5224 binder_node_inner_unlock(node); 5225 last_node = node; 5226 binder_inner_proc_lock(proc); 5227 } 5228 binder_inner_proc_unlock(proc); 5229 if (last_node) 5230 binder_put_node(last_node); 5231 5232 if (print_all) { 5233 binder_proc_lock(proc); 5234 for (n = rb_first(&proc->refs_by_desc); 5235 n != NULL; 5236 n = rb_next(n)) 5237 print_binder_ref_olocked(m, rb_entry(n, 5238 struct binder_ref, 5239 rb_node_desc)); 5240 binder_proc_unlock(proc); 5241 } 5242 binder_alloc_print_allocated(m, &proc->alloc); 5243 binder_inner_proc_lock(proc); 5244 list_for_each_entry(w, &proc->todo, entry) 5245 print_binder_work_ilocked(m, proc, " ", 5246 " pending transaction", w); 5247 list_for_each_entry(w, &proc->delivered_death, entry) { 5248 seq_puts(m, " has delivered dead binder\n"); 5249 break; 5250 } 5251 binder_inner_proc_unlock(proc); 5252 if (!print_all && m->count == header_pos) 5253 m->count = start_pos; 5254 } 5255 5256 static const char * const binder_return_strings[] = { 5257 "BR_ERROR", 5258 "BR_OK", 5259 "BR_TRANSACTION", 5260 "BR_REPLY", 5261 "BR_ACQUIRE_RESULT", 5262 "BR_DEAD_REPLY", 5263 "BR_TRANSACTION_COMPLETE", 5264 "BR_INCREFS", 5265 "BR_ACQUIRE", 5266 "BR_RELEASE", 5267 "BR_DECREFS", 5268 "BR_ATTEMPT_ACQUIRE", 5269 "BR_NOOP", 5270 "BR_SPAWN_LOOPER", 5271 "BR_FINISHED", 5272 "BR_DEAD_BINDER", 5273 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 5274 "BR_FAILED_REPLY" 5275 }; 5276 5277 static const char * const binder_command_strings[] = { 5278 "BC_TRANSACTION", 5279 "BC_REPLY", 5280 "BC_ACQUIRE_RESULT", 5281 "BC_FREE_BUFFER", 5282 "BC_INCREFS", 5283 "BC_ACQUIRE", 5284 "BC_RELEASE", 5285 "BC_DECREFS", 5286 "BC_INCREFS_DONE", 5287 "BC_ACQUIRE_DONE", 5288 "BC_ATTEMPT_ACQUIRE", 5289 "BC_REGISTER_LOOPER", 5290 "BC_ENTER_LOOPER", 5291 "BC_EXIT_LOOPER", 5292 "BC_REQUEST_DEATH_NOTIFICATION", 5293 "BC_CLEAR_DEATH_NOTIFICATION", 5294 "BC_DEAD_BINDER_DONE", 5295 "BC_TRANSACTION_SG", 5296 "BC_REPLY_SG", 5297 }; 5298 5299 static const char * const binder_objstat_strings[] = { 5300 "proc", 5301 "thread", 5302 "node", 5303 "ref", 5304 "death", 5305 "transaction", 5306 "transaction_complete" 5307 }; 5308 5309 static void print_binder_stats(struct seq_file *m, const char *prefix, 5310 struct binder_stats *stats) 5311 { 5312 int i; 5313 5314 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != 5315 ARRAY_SIZE(binder_command_strings)); 5316 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) { 5317 int temp = atomic_read(&stats->bc[i]); 5318 5319 if (temp) 5320 seq_printf(m, "%s%s: %d\n", prefix, 5321 binder_command_strings[i], temp); 5322 } 5323 5324 BUILD_BUG_ON(ARRAY_SIZE(stats->br) != 5325 ARRAY_SIZE(binder_return_strings)); 5326 for (i = 0; i < ARRAY_SIZE(stats->br); i++) { 5327 int temp = atomic_read(&stats->br[i]); 5328 5329 if (temp) 5330 seq_printf(m, "%s%s: %d\n", prefix, 5331 binder_return_strings[i], temp); 5332 } 5333 5334 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 5335 ARRAY_SIZE(binder_objstat_strings)); 5336 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 5337 ARRAY_SIZE(stats->obj_deleted)); 5338 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { 5339 int created = atomic_read(&stats->obj_created[i]); 5340 int deleted = atomic_read(&stats->obj_deleted[i]); 5341 5342 if (created || deleted) 5343 seq_printf(m, "%s%s: active %d total %d\n", 5344 prefix, 5345 binder_objstat_strings[i], 5346 created - deleted, 5347 created); 5348 } 5349 } 5350 5351 static void print_binder_proc_stats(struct seq_file *m, 5352 struct binder_proc *proc) 5353 { 5354 struct binder_work *w; 5355 struct binder_thread *thread; 5356 struct rb_node *n; 5357 int count, strong, weak, ready_threads; 5358 size_t free_async_space = 5359 binder_alloc_get_free_async_space(&proc->alloc); 5360 5361 seq_printf(m, "proc %d\n", proc->pid); 5362 seq_printf(m, "context %s\n", proc->context->name); 5363 count = 0; 5364 ready_threads = 0; 5365 binder_inner_proc_lock(proc); 5366 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 5367 count++; 5368 5369 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node) 5370 ready_threads++; 5371 5372 seq_printf(m, " threads: %d\n", count); 5373 seq_printf(m, " requested threads: %d+%d/%d\n" 5374 " ready threads %d\n" 5375 " free async space %zd\n", proc->requested_threads, 5376 proc->requested_threads_started, proc->max_threads, 5377 ready_threads, 5378 free_async_space); 5379 count = 0; 5380 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) 5381 count++; 5382 binder_inner_proc_unlock(proc); 5383 seq_printf(m, " nodes: %d\n", count); 5384 count = 0; 5385 strong = 0; 5386 weak = 0; 5387 binder_proc_lock(proc); 5388 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 5389 struct binder_ref *ref = rb_entry(n, struct binder_ref, 5390 rb_node_desc); 5391 count++; 5392 strong += ref->data.strong; 5393 weak += ref->data.weak; 5394 } 5395 binder_proc_unlock(proc); 5396 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak); 5397 5398 count = binder_alloc_get_allocated_count(&proc->alloc); 5399 seq_printf(m, " buffers: %d\n", count); 5400 5401 binder_alloc_print_pages(m, &proc->alloc); 5402 5403 count = 0; 5404 binder_inner_proc_lock(proc); 5405 list_for_each_entry(w, &proc->todo, entry) { 5406 if (w->type == BINDER_WORK_TRANSACTION) 5407 count++; 5408 } 5409 binder_inner_proc_unlock(proc); 5410 seq_printf(m, " pending transactions: %d\n", count); 5411 5412 print_binder_stats(m, " ", &proc->stats); 5413 } 5414 5415 5416 static int binder_state_show(struct seq_file *m, void *unused) 5417 { 5418 struct binder_proc *proc; 5419 struct binder_node *node; 5420 struct binder_node *last_node = NULL; 5421 5422 seq_puts(m, "binder state:\n"); 5423 5424 spin_lock(&binder_dead_nodes_lock); 5425 if (!hlist_empty(&binder_dead_nodes)) 5426 seq_puts(m, "dead nodes:\n"); 5427 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) { 5428 /* 5429 * take a temporary reference on the node so it 5430 * survives and isn't removed from the list 5431 * while we print it. 5432 */ 5433 node->tmp_refs++; 5434 spin_unlock(&binder_dead_nodes_lock); 5435 if (last_node) 5436 binder_put_node(last_node); 5437 binder_node_lock(node); 5438 print_binder_node_nilocked(m, node); 5439 binder_node_unlock(node); 5440 last_node = node; 5441 spin_lock(&binder_dead_nodes_lock); 5442 } 5443 spin_unlock(&binder_dead_nodes_lock); 5444 if (last_node) 5445 binder_put_node(last_node); 5446 5447 mutex_lock(&binder_procs_lock); 5448 hlist_for_each_entry(proc, &binder_procs, proc_node) 5449 print_binder_proc(m, proc, 1); 5450 mutex_unlock(&binder_procs_lock); 5451 5452 return 0; 5453 } 5454 5455 static int binder_stats_show(struct seq_file *m, void *unused) 5456 { 5457 struct binder_proc *proc; 5458 5459 seq_puts(m, "binder stats:\n"); 5460 5461 print_binder_stats(m, "", &binder_stats); 5462 5463 mutex_lock(&binder_procs_lock); 5464 hlist_for_each_entry(proc, &binder_procs, proc_node) 5465 print_binder_proc_stats(m, proc); 5466 mutex_unlock(&binder_procs_lock); 5467 5468 return 0; 5469 } 5470 5471 static int binder_transactions_show(struct seq_file *m, void *unused) 5472 { 5473 struct binder_proc *proc; 5474 5475 seq_puts(m, "binder transactions:\n"); 5476 mutex_lock(&binder_procs_lock); 5477 hlist_for_each_entry(proc, &binder_procs, proc_node) 5478 print_binder_proc(m, proc, 0); 5479 mutex_unlock(&binder_procs_lock); 5480 5481 return 0; 5482 } 5483 5484 static int binder_proc_show(struct seq_file *m, void *unused) 5485 { 5486 struct binder_proc *itr; 5487 int pid = (unsigned long)m->private; 5488 5489 mutex_lock(&binder_procs_lock); 5490 hlist_for_each_entry(itr, &binder_procs, proc_node) { 5491 if (itr->pid == pid) { 5492 seq_puts(m, "binder proc state:\n"); 5493 print_binder_proc(m, itr, 1); 5494 } 5495 } 5496 mutex_unlock(&binder_procs_lock); 5497 5498 return 0; 5499 } 5500 5501 static void print_binder_transaction_log_entry(struct seq_file *m, 5502 struct binder_transaction_log_entry *e) 5503 { 5504 int debug_id = READ_ONCE(e->debug_id_done); 5505 /* 5506 * read barrier to guarantee debug_id_done read before 5507 * we print the log values 5508 */ 5509 smp_rmb(); 5510 seq_printf(m, 5511 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d", 5512 e->debug_id, (e->call_type == 2) ? "reply" : 5513 ((e->call_type == 1) ? "async" : "call "), e->from_proc, 5514 e->from_thread, e->to_proc, e->to_thread, e->context_name, 5515 e->to_node, e->target_handle, e->data_size, e->offsets_size, 5516 e->return_error, e->return_error_param, 5517 e->return_error_line); 5518 /* 5519 * read-barrier to guarantee read of debug_id_done after 5520 * done printing the fields of the entry 5521 */ 5522 smp_rmb(); 5523 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ? 5524 "\n" : " (incomplete)\n"); 5525 } 5526 5527 static int binder_transaction_log_show(struct seq_file *m, void *unused) 5528 { 5529 struct binder_transaction_log *log = m->private; 5530 unsigned int log_cur = atomic_read(&log->cur); 5531 unsigned int count; 5532 unsigned int cur; 5533 int i; 5534 5535 count = log_cur + 1; 5536 cur = count < ARRAY_SIZE(log->entry) && !log->full ? 5537 0 : count % ARRAY_SIZE(log->entry); 5538 if (count > ARRAY_SIZE(log->entry) || log->full) 5539 count = ARRAY_SIZE(log->entry); 5540 for (i = 0; i < count; i++) { 5541 unsigned int index = cur++ % ARRAY_SIZE(log->entry); 5542 5543 print_binder_transaction_log_entry(m, &log->entry[index]); 5544 } 5545 return 0; 5546 } 5547 5548 static const struct file_operations binder_fops = { 5549 .owner = THIS_MODULE, 5550 .poll = binder_poll, 5551 .unlocked_ioctl = binder_ioctl, 5552 .compat_ioctl = binder_ioctl, 5553 .mmap = binder_mmap, 5554 .open = binder_open, 5555 .flush = binder_flush, 5556 .release = binder_release, 5557 }; 5558 5559 BINDER_DEBUG_ENTRY(state); 5560 BINDER_DEBUG_ENTRY(stats); 5561 BINDER_DEBUG_ENTRY(transactions); 5562 BINDER_DEBUG_ENTRY(transaction_log); 5563 5564 static int __init init_binder_device(const char *name) 5565 { 5566 int ret; 5567 struct binder_device *binder_device; 5568 5569 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL); 5570 if (!binder_device) 5571 return -ENOMEM; 5572 5573 binder_device->miscdev.fops = &binder_fops; 5574 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR; 5575 binder_device->miscdev.name = name; 5576 5577 binder_device->context.binder_context_mgr_uid = INVALID_UID; 5578 binder_device->context.name = name; 5579 mutex_init(&binder_device->context.context_mgr_node_lock); 5580 5581 ret = misc_register(&binder_device->miscdev); 5582 if (ret < 0) { 5583 kfree(binder_device); 5584 return ret; 5585 } 5586 5587 hlist_add_head(&binder_device->hlist, &binder_devices); 5588 5589 return ret; 5590 } 5591 5592 static int __init binder_init(void) 5593 { 5594 int ret; 5595 char *device_name, *device_names, *device_tmp; 5596 struct binder_device *device; 5597 struct hlist_node *tmp; 5598 5599 ret = binder_alloc_shrinker_init(); 5600 if (ret) 5601 return ret; 5602 5603 atomic_set(&binder_transaction_log.cur, ~0U); 5604 atomic_set(&binder_transaction_log_failed.cur, ~0U); 5605 5606 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); 5607 if (binder_debugfs_dir_entry_root) 5608 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", 5609 binder_debugfs_dir_entry_root); 5610 5611 if (binder_debugfs_dir_entry_root) { 5612 debugfs_create_file("state", 5613 0444, 5614 binder_debugfs_dir_entry_root, 5615 NULL, 5616 &binder_state_fops); 5617 debugfs_create_file("stats", 5618 0444, 5619 binder_debugfs_dir_entry_root, 5620 NULL, 5621 &binder_stats_fops); 5622 debugfs_create_file("transactions", 5623 0444, 5624 binder_debugfs_dir_entry_root, 5625 NULL, 5626 &binder_transactions_fops); 5627 debugfs_create_file("transaction_log", 5628 0444, 5629 binder_debugfs_dir_entry_root, 5630 &binder_transaction_log, 5631 &binder_transaction_log_fops); 5632 debugfs_create_file("failed_transaction_log", 5633 0444, 5634 binder_debugfs_dir_entry_root, 5635 &binder_transaction_log_failed, 5636 &binder_transaction_log_fops); 5637 } 5638 5639 /* 5640 * Copy the module_parameter string, because we don't want to 5641 * tokenize it in-place. 5642 */ 5643 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL); 5644 if (!device_names) { 5645 ret = -ENOMEM; 5646 goto err_alloc_device_names_failed; 5647 } 5648 strcpy(device_names, binder_devices_param); 5649 5650 device_tmp = device_names; 5651 while ((device_name = strsep(&device_tmp, ","))) { 5652 ret = init_binder_device(device_name); 5653 if (ret) 5654 goto err_init_binder_device_failed; 5655 } 5656 5657 return ret; 5658 5659 err_init_binder_device_failed: 5660 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) { 5661 misc_deregister(&device->miscdev); 5662 hlist_del(&device->hlist); 5663 kfree(device); 5664 } 5665 5666 kfree(device_names); 5667 5668 err_alloc_device_names_failed: 5669 debugfs_remove_recursive(binder_debugfs_dir_entry_root); 5670 5671 return ret; 5672 } 5673 5674 device_initcall(binder_init); 5675 5676 #define CREATE_TRACE_POINTS 5677 #include "binder_trace.h" 5678 5679 MODULE_LICENSE("GPL v2"); 5680