1 /* binder.c 2 * 3 * Android IPC Subsystem 4 * 5 * Copyright (C) 2007-2008 Google, Inc. 6 * 7 * This software is licensed under the terms of the GNU General Public 8 * License version 2, as published by the Free Software Foundation, and 9 * may be copied, distributed, and modified under those terms. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 */ 17 18 /* 19 * Locking overview 20 * 21 * There are 3 main spinlocks which must be acquired in the 22 * order shown: 23 * 24 * 1) proc->outer_lock : protects binder_ref 25 * binder_proc_lock() and binder_proc_unlock() are 26 * used to acq/rel. 27 * 2) node->lock : protects most fields of binder_node. 28 * binder_node_lock() and binder_node_unlock() are 29 * used to acq/rel 30 * 3) proc->inner_lock : protects the thread and node lists 31 * (proc->threads, proc->waiting_threads, proc->nodes) 32 * and all todo lists associated with the binder_proc 33 * (proc->todo, thread->todo, proc->delivered_death and 34 * node->async_todo), as well as thread->transaction_stack 35 * binder_inner_proc_lock() and binder_inner_proc_unlock() 36 * are used to acq/rel 37 * 38 * Any lock under procA must never be nested under any lock at the same 39 * level or below on procB. 40 * 41 * Functions that require a lock held on entry indicate which lock 42 * in the suffix of the function name: 43 * 44 * foo_olocked() : requires node->outer_lock 45 * foo_nlocked() : requires node->lock 46 * foo_ilocked() : requires proc->inner_lock 47 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock 48 * foo_nilocked(): requires node->lock and proc->inner_lock 49 * ... 50 */ 51 52 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 53 54 #include <asm/cacheflush.h> 55 #include <linux/fdtable.h> 56 #include <linux/file.h> 57 #include <linux/freezer.h> 58 #include <linux/fs.h> 59 #include <linux/list.h> 60 #include <linux/miscdevice.h> 61 #include <linux/module.h> 62 #include <linux/mutex.h> 63 #include <linux/nsproxy.h> 64 #include <linux/poll.h> 65 #include <linux/debugfs.h> 66 #include <linux/rbtree.h> 67 #include <linux/sched/signal.h> 68 #include <linux/sched/mm.h> 69 #include <linux/seq_file.h> 70 #include <linux/uaccess.h> 71 #include <linux/pid_namespace.h> 72 #include <linux/security.h> 73 #include <linux/spinlock.h> 74 75 #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT 76 #define BINDER_IPC_32BIT 1 77 #endif 78 79 #include <uapi/linux/android/binder.h> 80 #include "binder_alloc.h" 81 #include "binder_trace.h" 82 83 static HLIST_HEAD(binder_deferred_list); 84 static DEFINE_MUTEX(binder_deferred_lock); 85 86 static HLIST_HEAD(binder_devices); 87 static HLIST_HEAD(binder_procs); 88 static DEFINE_MUTEX(binder_procs_lock); 89 90 static HLIST_HEAD(binder_dead_nodes); 91 static DEFINE_SPINLOCK(binder_dead_nodes_lock); 92 93 static struct dentry *binder_debugfs_dir_entry_root; 94 static struct dentry *binder_debugfs_dir_entry_proc; 95 static atomic_t binder_last_id; 96 97 #define BINDER_DEBUG_ENTRY(name) \ 98 static int binder_##name##_open(struct inode *inode, struct file *file) \ 99 { \ 100 return single_open(file, binder_##name##_show, inode->i_private); \ 101 } \ 102 \ 103 static const struct file_operations binder_##name##_fops = { \ 104 .owner = THIS_MODULE, \ 105 .open = binder_##name##_open, \ 106 .read = seq_read, \ 107 .llseek = seq_lseek, \ 108 .release = single_release, \ 109 } 110 111 static int binder_proc_show(struct seq_file *m, void *unused); 112 BINDER_DEBUG_ENTRY(proc); 113 114 /* This is only defined in include/asm-arm/sizes.h */ 115 #ifndef SZ_1K 116 #define SZ_1K 0x400 117 #endif 118 119 #ifndef SZ_4M 120 #define SZ_4M 0x400000 121 #endif 122 123 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE) 124 125 enum { 126 BINDER_DEBUG_USER_ERROR = 1U << 0, 127 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1, 128 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2, 129 BINDER_DEBUG_OPEN_CLOSE = 1U << 3, 130 BINDER_DEBUG_DEAD_BINDER = 1U << 4, 131 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5, 132 BINDER_DEBUG_READ_WRITE = 1U << 6, 133 BINDER_DEBUG_USER_REFS = 1U << 7, 134 BINDER_DEBUG_THREADS = 1U << 8, 135 BINDER_DEBUG_TRANSACTION = 1U << 9, 136 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10, 137 BINDER_DEBUG_FREE_BUFFER = 1U << 11, 138 BINDER_DEBUG_INTERNAL_REFS = 1U << 12, 139 BINDER_DEBUG_PRIORITY_CAP = 1U << 13, 140 BINDER_DEBUG_SPINLOCKS = 1U << 14, 141 }; 142 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | 143 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; 144 module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO); 145 146 static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES; 147 module_param_named(devices, binder_devices_param, charp, 0444); 148 149 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); 150 static int binder_stop_on_user_error; 151 152 static int binder_set_stop_on_user_error(const char *val, 153 const struct kernel_param *kp) 154 { 155 int ret; 156 157 ret = param_set_int(val, kp); 158 if (binder_stop_on_user_error < 2) 159 wake_up(&binder_user_error_wait); 160 return ret; 161 } 162 module_param_call(stop_on_user_error, binder_set_stop_on_user_error, 163 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO); 164 165 #define binder_debug(mask, x...) \ 166 do { \ 167 if (binder_debug_mask & mask) \ 168 pr_info(x); \ 169 } while (0) 170 171 #define binder_user_error(x...) \ 172 do { \ 173 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \ 174 pr_info(x); \ 175 if (binder_stop_on_user_error) \ 176 binder_stop_on_user_error = 2; \ 177 } while (0) 178 179 #define to_flat_binder_object(hdr) \ 180 container_of(hdr, struct flat_binder_object, hdr) 181 182 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr) 183 184 #define to_binder_buffer_object(hdr) \ 185 container_of(hdr, struct binder_buffer_object, hdr) 186 187 #define to_binder_fd_array_object(hdr) \ 188 container_of(hdr, struct binder_fd_array_object, hdr) 189 190 enum binder_stat_types { 191 BINDER_STAT_PROC, 192 BINDER_STAT_THREAD, 193 BINDER_STAT_NODE, 194 BINDER_STAT_REF, 195 BINDER_STAT_DEATH, 196 BINDER_STAT_TRANSACTION, 197 BINDER_STAT_TRANSACTION_COMPLETE, 198 BINDER_STAT_COUNT 199 }; 200 201 struct binder_stats { 202 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1]; 203 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1]; 204 atomic_t obj_created[BINDER_STAT_COUNT]; 205 atomic_t obj_deleted[BINDER_STAT_COUNT]; 206 }; 207 208 static struct binder_stats binder_stats; 209 210 static inline void binder_stats_deleted(enum binder_stat_types type) 211 { 212 atomic_inc(&binder_stats.obj_deleted[type]); 213 } 214 215 static inline void binder_stats_created(enum binder_stat_types type) 216 { 217 atomic_inc(&binder_stats.obj_created[type]); 218 } 219 220 struct binder_transaction_log_entry { 221 int debug_id; 222 int debug_id_done; 223 int call_type; 224 int from_proc; 225 int from_thread; 226 int target_handle; 227 int to_proc; 228 int to_thread; 229 int to_node; 230 int data_size; 231 int offsets_size; 232 int return_error_line; 233 uint32_t return_error; 234 uint32_t return_error_param; 235 const char *context_name; 236 }; 237 struct binder_transaction_log { 238 atomic_t cur; 239 bool full; 240 struct binder_transaction_log_entry entry[32]; 241 }; 242 static struct binder_transaction_log binder_transaction_log; 243 static struct binder_transaction_log binder_transaction_log_failed; 244 245 static struct binder_transaction_log_entry *binder_transaction_log_add( 246 struct binder_transaction_log *log) 247 { 248 struct binder_transaction_log_entry *e; 249 unsigned int cur = atomic_inc_return(&log->cur); 250 251 if (cur >= ARRAY_SIZE(log->entry)) 252 log->full = 1; 253 e = &log->entry[cur % ARRAY_SIZE(log->entry)]; 254 WRITE_ONCE(e->debug_id_done, 0); 255 /* 256 * write-barrier to synchronize access to e->debug_id_done. 257 * We make sure the initialized 0 value is seen before 258 * memset() other fields are zeroed by memset. 259 */ 260 smp_wmb(); 261 memset(e, 0, sizeof(*e)); 262 return e; 263 } 264 265 struct binder_context { 266 struct binder_node *binder_context_mgr_node; 267 struct mutex context_mgr_node_lock; 268 269 kuid_t binder_context_mgr_uid; 270 const char *name; 271 }; 272 273 struct binder_device { 274 struct hlist_node hlist; 275 struct miscdevice miscdev; 276 struct binder_context context; 277 }; 278 279 /** 280 * struct binder_work - work enqueued on a worklist 281 * @entry: node enqueued on list 282 * @type: type of work to be performed 283 * 284 * There are separate work lists for proc, thread, and node (async). 285 */ 286 struct binder_work { 287 struct list_head entry; 288 289 enum { 290 BINDER_WORK_TRANSACTION = 1, 291 BINDER_WORK_TRANSACTION_COMPLETE, 292 BINDER_WORK_RETURN_ERROR, 293 BINDER_WORK_NODE, 294 BINDER_WORK_DEAD_BINDER, 295 BINDER_WORK_DEAD_BINDER_AND_CLEAR, 296 BINDER_WORK_CLEAR_DEATH_NOTIFICATION, 297 } type; 298 }; 299 300 struct binder_error { 301 struct binder_work work; 302 uint32_t cmd; 303 }; 304 305 /** 306 * struct binder_node - binder node bookkeeping 307 * @debug_id: unique ID for debugging 308 * (invariant after initialized) 309 * @lock: lock for node fields 310 * @work: worklist element for node work 311 * (protected by @proc->inner_lock) 312 * @rb_node: element for proc->nodes tree 313 * (protected by @proc->inner_lock) 314 * @dead_node: element for binder_dead_nodes list 315 * (protected by binder_dead_nodes_lock) 316 * @proc: binder_proc that owns this node 317 * (invariant after initialized) 318 * @refs: list of references on this node 319 * (protected by @lock) 320 * @internal_strong_refs: used to take strong references when 321 * initiating a transaction 322 * (protected by @proc->inner_lock if @proc 323 * and by @lock) 324 * @local_weak_refs: weak user refs from local process 325 * (protected by @proc->inner_lock if @proc 326 * and by @lock) 327 * @local_strong_refs: strong user refs from local process 328 * (protected by @proc->inner_lock if @proc 329 * and by @lock) 330 * @tmp_refs: temporary kernel refs 331 * (protected by @proc->inner_lock while @proc 332 * is valid, and by binder_dead_nodes_lock 333 * if @proc is NULL. During inc/dec and node release 334 * it is also protected by @lock to provide safety 335 * as the node dies and @proc becomes NULL) 336 * @ptr: userspace pointer for node 337 * (invariant, no lock needed) 338 * @cookie: userspace cookie for node 339 * (invariant, no lock needed) 340 * @has_strong_ref: userspace notified of strong ref 341 * (protected by @proc->inner_lock if @proc 342 * and by @lock) 343 * @pending_strong_ref: userspace has acked notification of strong ref 344 * (protected by @proc->inner_lock if @proc 345 * and by @lock) 346 * @has_weak_ref: userspace notified of weak ref 347 * (protected by @proc->inner_lock if @proc 348 * and by @lock) 349 * @pending_weak_ref: userspace has acked notification of weak ref 350 * (protected by @proc->inner_lock if @proc 351 * and by @lock) 352 * @has_async_transaction: async transaction to node in progress 353 * (protected by @lock) 354 * @accept_fds: file descriptor operations supported for node 355 * (invariant after initialized) 356 * @min_priority: minimum scheduling priority 357 * (invariant after initialized) 358 * @async_todo: list of async work items 359 * (protected by @proc->inner_lock) 360 * 361 * Bookkeeping structure for binder nodes. 362 */ 363 struct binder_node { 364 int debug_id; 365 spinlock_t lock; 366 struct binder_work work; 367 union { 368 struct rb_node rb_node; 369 struct hlist_node dead_node; 370 }; 371 struct binder_proc *proc; 372 struct hlist_head refs; 373 int internal_strong_refs; 374 int local_weak_refs; 375 int local_strong_refs; 376 int tmp_refs; 377 binder_uintptr_t ptr; 378 binder_uintptr_t cookie; 379 struct { 380 /* 381 * bitfield elements protected by 382 * proc inner_lock 383 */ 384 u8 has_strong_ref:1; 385 u8 pending_strong_ref:1; 386 u8 has_weak_ref:1; 387 u8 pending_weak_ref:1; 388 }; 389 struct { 390 /* 391 * invariant after initialization 392 */ 393 u8 accept_fds:1; 394 u8 min_priority; 395 }; 396 bool has_async_transaction; 397 struct list_head async_todo; 398 }; 399 400 struct binder_ref_death { 401 /** 402 * @work: worklist element for death notifications 403 * (protected by inner_lock of the proc that 404 * this ref belongs to) 405 */ 406 struct binder_work work; 407 binder_uintptr_t cookie; 408 }; 409 410 /** 411 * struct binder_ref_data - binder_ref counts and id 412 * @debug_id: unique ID for the ref 413 * @desc: unique userspace handle for ref 414 * @strong: strong ref count (debugging only if not locked) 415 * @weak: weak ref count (debugging only if not locked) 416 * 417 * Structure to hold ref count and ref id information. Since 418 * the actual ref can only be accessed with a lock, this structure 419 * is used to return information about the ref to callers of 420 * ref inc/dec functions. 421 */ 422 struct binder_ref_data { 423 int debug_id; 424 uint32_t desc; 425 int strong; 426 int weak; 427 }; 428 429 /** 430 * struct binder_ref - struct to track references on nodes 431 * @data: binder_ref_data containing id, handle, and current refcounts 432 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree 433 * @rb_node_node: node for lookup by @node in proc's rb_tree 434 * @node_entry: list entry for node->refs list in target node 435 * (protected by @node->lock) 436 * @proc: binder_proc containing ref 437 * @node: binder_node of target node. When cleaning up a 438 * ref for deletion in binder_cleanup_ref, a non-NULL 439 * @node indicates the node must be freed 440 * @death: pointer to death notification (ref_death) if requested 441 * (protected by @node->lock) 442 * 443 * Structure to track references from procA to target node (on procB). This 444 * structure is unsafe to access without holding @proc->outer_lock. 445 */ 446 struct binder_ref { 447 /* Lookups needed: */ 448 /* node + proc => ref (transaction) */ 449 /* desc + proc => ref (transaction, inc/dec ref) */ 450 /* node => refs + procs (proc exit) */ 451 struct binder_ref_data data; 452 struct rb_node rb_node_desc; 453 struct rb_node rb_node_node; 454 struct hlist_node node_entry; 455 struct binder_proc *proc; 456 struct binder_node *node; 457 struct binder_ref_death *death; 458 }; 459 460 enum binder_deferred_state { 461 BINDER_DEFERRED_PUT_FILES = 0x01, 462 BINDER_DEFERRED_FLUSH = 0x02, 463 BINDER_DEFERRED_RELEASE = 0x04, 464 }; 465 466 /** 467 * struct binder_proc - binder process bookkeeping 468 * @proc_node: element for binder_procs list 469 * @threads: rbtree of binder_threads in this proc 470 * (protected by @inner_lock) 471 * @nodes: rbtree of binder nodes associated with 472 * this proc ordered by node->ptr 473 * (protected by @inner_lock) 474 * @refs_by_desc: rbtree of refs ordered by ref->desc 475 * (protected by @outer_lock) 476 * @refs_by_node: rbtree of refs ordered by ref->node 477 * (protected by @outer_lock) 478 * @waiting_threads: threads currently waiting for proc work 479 * (protected by @inner_lock) 480 * @pid PID of group_leader of process 481 * (invariant after initialized) 482 * @tsk task_struct for group_leader of process 483 * (invariant after initialized) 484 * @files files_struct for process 485 * (protected by @files_lock) 486 * @files_lock mutex to protect @files 487 * @deferred_work_node: element for binder_deferred_list 488 * (protected by binder_deferred_lock) 489 * @deferred_work: bitmap of deferred work to perform 490 * (protected by binder_deferred_lock) 491 * @is_dead: process is dead and awaiting free 492 * when outstanding transactions are cleaned up 493 * (protected by @inner_lock) 494 * @todo: list of work for this process 495 * (protected by @inner_lock) 496 * @wait: wait queue head to wait for proc work 497 * (invariant after initialized) 498 * @stats: per-process binder statistics 499 * (atomics, no lock needed) 500 * @delivered_death: list of delivered death notification 501 * (protected by @inner_lock) 502 * @max_threads: cap on number of binder threads 503 * (protected by @inner_lock) 504 * @requested_threads: number of binder threads requested but not 505 * yet started. In current implementation, can 506 * only be 0 or 1. 507 * (protected by @inner_lock) 508 * @requested_threads_started: number binder threads started 509 * (protected by @inner_lock) 510 * @tmp_ref: temporary reference to indicate proc is in use 511 * (protected by @inner_lock) 512 * @default_priority: default scheduler priority 513 * (invariant after initialized) 514 * @debugfs_entry: debugfs node 515 * @alloc: binder allocator bookkeeping 516 * @context: binder_context for this proc 517 * (invariant after initialized) 518 * @inner_lock: can nest under outer_lock and/or node lock 519 * @outer_lock: no nesting under innor or node lock 520 * Lock order: 1) outer, 2) node, 3) inner 521 * 522 * Bookkeeping structure for binder processes 523 */ 524 struct binder_proc { 525 struct hlist_node proc_node; 526 struct rb_root threads; 527 struct rb_root nodes; 528 struct rb_root refs_by_desc; 529 struct rb_root refs_by_node; 530 struct list_head waiting_threads; 531 int pid; 532 struct task_struct *tsk; 533 struct files_struct *files; 534 struct mutex files_lock; 535 struct hlist_node deferred_work_node; 536 int deferred_work; 537 bool is_dead; 538 539 struct list_head todo; 540 wait_queue_head_t wait; 541 struct binder_stats stats; 542 struct list_head delivered_death; 543 int max_threads; 544 int requested_threads; 545 int requested_threads_started; 546 int tmp_ref; 547 long default_priority; 548 struct dentry *debugfs_entry; 549 struct binder_alloc alloc; 550 struct binder_context *context; 551 spinlock_t inner_lock; 552 spinlock_t outer_lock; 553 }; 554 555 enum { 556 BINDER_LOOPER_STATE_REGISTERED = 0x01, 557 BINDER_LOOPER_STATE_ENTERED = 0x02, 558 BINDER_LOOPER_STATE_EXITED = 0x04, 559 BINDER_LOOPER_STATE_INVALID = 0x08, 560 BINDER_LOOPER_STATE_WAITING = 0x10, 561 BINDER_LOOPER_STATE_POLL = 0x20, 562 }; 563 564 /** 565 * struct binder_thread - binder thread bookkeeping 566 * @proc: binder process for this thread 567 * (invariant after initialization) 568 * @rb_node: element for proc->threads rbtree 569 * (protected by @proc->inner_lock) 570 * @waiting_thread_node: element for @proc->waiting_threads list 571 * (protected by @proc->inner_lock) 572 * @pid: PID for this thread 573 * (invariant after initialization) 574 * @looper: bitmap of looping state 575 * (only accessed by this thread) 576 * @looper_needs_return: looping thread needs to exit driver 577 * (no lock needed) 578 * @transaction_stack: stack of in-progress transactions for this thread 579 * (protected by @proc->inner_lock) 580 * @todo: list of work to do for this thread 581 * (protected by @proc->inner_lock) 582 * @process_todo: whether work in @todo should be processed 583 * (protected by @proc->inner_lock) 584 * @return_error: transaction errors reported by this thread 585 * (only accessed by this thread) 586 * @reply_error: transaction errors reported by target thread 587 * (protected by @proc->inner_lock) 588 * @wait: wait queue for thread work 589 * @stats: per-thread statistics 590 * (atomics, no lock needed) 591 * @tmp_ref: temporary reference to indicate thread is in use 592 * (atomic since @proc->inner_lock cannot 593 * always be acquired) 594 * @is_dead: thread is dead and awaiting free 595 * when outstanding transactions are cleaned up 596 * (protected by @proc->inner_lock) 597 * 598 * Bookkeeping structure for binder threads. 599 */ 600 struct binder_thread { 601 struct binder_proc *proc; 602 struct rb_node rb_node; 603 struct list_head waiting_thread_node; 604 int pid; 605 int looper; /* only modified by this thread */ 606 bool looper_need_return; /* can be written by other thread */ 607 struct binder_transaction *transaction_stack; 608 struct list_head todo; 609 bool process_todo; 610 struct binder_error return_error; 611 struct binder_error reply_error; 612 wait_queue_head_t wait; 613 struct binder_stats stats; 614 atomic_t tmp_ref; 615 bool is_dead; 616 }; 617 618 struct binder_transaction { 619 int debug_id; 620 struct binder_work work; 621 struct binder_thread *from; 622 struct binder_transaction *from_parent; 623 struct binder_proc *to_proc; 624 struct binder_thread *to_thread; 625 struct binder_transaction *to_parent; 626 unsigned need_reply:1; 627 /* unsigned is_dead:1; */ /* not used at the moment */ 628 629 struct binder_buffer *buffer; 630 unsigned int code; 631 unsigned int flags; 632 long priority; 633 long saved_priority; 634 kuid_t sender_euid; 635 /** 636 * @lock: protects @from, @to_proc, and @to_thread 637 * 638 * @from, @to_proc, and @to_thread can be set to NULL 639 * during thread teardown 640 */ 641 spinlock_t lock; 642 }; 643 644 /** 645 * binder_proc_lock() - Acquire outer lock for given binder_proc 646 * @proc: struct binder_proc to acquire 647 * 648 * Acquires proc->outer_lock. Used to protect binder_ref 649 * structures associated with the given proc. 650 */ 651 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__) 652 static void 653 _binder_proc_lock(struct binder_proc *proc, int line) 654 { 655 binder_debug(BINDER_DEBUG_SPINLOCKS, 656 "%s: line=%d\n", __func__, line); 657 spin_lock(&proc->outer_lock); 658 } 659 660 /** 661 * binder_proc_unlock() - Release spinlock for given binder_proc 662 * @proc: struct binder_proc to acquire 663 * 664 * Release lock acquired via binder_proc_lock() 665 */ 666 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__) 667 static void 668 _binder_proc_unlock(struct binder_proc *proc, int line) 669 { 670 binder_debug(BINDER_DEBUG_SPINLOCKS, 671 "%s: line=%d\n", __func__, line); 672 spin_unlock(&proc->outer_lock); 673 } 674 675 /** 676 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc 677 * @proc: struct binder_proc to acquire 678 * 679 * Acquires proc->inner_lock. Used to protect todo lists 680 */ 681 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__) 682 static void 683 _binder_inner_proc_lock(struct binder_proc *proc, int line) 684 { 685 binder_debug(BINDER_DEBUG_SPINLOCKS, 686 "%s: line=%d\n", __func__, line); 687 spin_lock(&proc->inner_lock); 688 } 689 690 /** 691 * binder_inner_proc_unlock() - Release inner lock for given binder_proc 692 * @proc: struct binder_proc to acquire 693 * 694 * Release lock acquired via binder_inner_proc_lock() 695 */ 696 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__) 697 static void 698 _binder_inner_proc_unlock(struct binder_proc *proc, int line) 699 { 700 binder_debug(BINDER_DEBUG_SPINLOCKS, 701 "%s: line=%d\n", __func__, line); 702 spin_unlock(&proc->inner_lock); 703 } 704 705 /** 706 * binder_node_lock() - Acquire spinlock for given binder_node 707 * @node: struct binder_node to acquire 708 * 709 * Acquires node->lock. Used to protect binder_node fields 710 */ 711 #define binder_node_lock(node) _binder_node_lock(node, __LINE__) 712 static void 713 _binder_node_lock(struct binder_node *node, int line) 714 { 715 binder_debug(BINDER_DEBUG_SPINLOCKS, 716 "%s: line=%d\n", __func__, line); 717 spin_lock(&node->lock); 718 } 719 720 /** 721 * binder_node_unlock() - Release spinlock for given binder_proc 722 * @node: struct binder_node to acquire 723 * 724 * Release lock acquired via binder_node_lock() 725 */ 726 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__) 727 static void 728 _binder_node_unlock(struct binder_node *node, int line) 729 { 730 binder_debug(BINDER_DEBUG_SPINLOCKS, 731 "%s: line=%d\n", __func__, line); 732 spin_unlock(&node->lock); 733 } 734 735 /** 736 * binder_node_inner_lock() - Acquire node and inner locks 737 * @node: struct binder_node to acquire 738 * 739 * Acquires node->lock. If node->proc also acquires 740 * proc->inner_lock. Used to protect binder_node fields 741 */ 742 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__) 743 static void 744 _binder_node_inner_lock(struct binder_node *node, int line) 745 { 746 binder_debug(BINDER_DEBUG_SPINLOCKS, 747 "%s: line=%d\n", __func__, line); 748 spin_lock(&node->lock); 749 if (node->proc) 750 binder_inner_proc_lock(node->proc); 751 } 752 753 /** 754 * binder_node_unlock() - Release node and inner locks 755 * @node: struct binder_node to acquire 756 * 757 * Release lock acquired via binder_node_lock() 758 */ 759 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__) 760 static void 761 _binder_node_inner_unlock(struct binder_node *node, int line) 762 { 763 struct binder_proc *proc = node->proc; 764 765 binder_debug(BINDER_DEBUG_SPINLOCKS, 766 "%s: line=%d\n", __func__, line); 767 if (proc) 768 binder_inner_proc_unlock(proc); 769 spin_unlock(&node->lock); 770 } 771 772 static bool binder_worklist_empty_ilocked(struct list_head *list) 773 { 774 return list_empty(list); 775 } 776 777 /** 778 * binder_worklist_empty() - Check if no items on the work list 779 * @proc: binder_proc associated with list 780 * @list: list to check 781 * 782 * Return: true if there are no items on list, else false 783 */ 784 static bool binder_worklist_empty(struct binder_proc *proc, 785 struct list_head *list) 786 { 787 bool ret; 788 789 binder_inner_proc_lock(proc); 790 ret = binder_worklist_empty_ilocked(list); 791 binder_inner_proc_unlock(proc); 792 return ret; 793 } 794 795 /** 796 * binder_enqueue_work_ilocked() - Add an item to the work list 797 * @work: struct binder_work to add to list 798 * @target_list: list to add work to 799 * 800 * Adds the work to the specified list. Asserts that work 801 * is not already on a list. 802 * 803 * Requires the proc->inner_lock to be held. 804 */ 805 static void 806 binder_enqueue_work_ilocked(struct binder_work *work, 807 struct list_head *target_list) 808 { 809 BUG_ON(target_list == NULL); 810 BUG_ON(work->entry.next && !list_empty(&work->entry)); 811 list_add_tail(&work->entry, target_list); 812 } 813 814 /** 815 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work 816 * @thread: thread to queue work to 817 * @work: struct binder_work to add to list 818 * 819 * Adds the work to the todo list of the thread. Doesn't set the process_todo 820 * flag, which means that (if it wasn't already set) the thread will go to 821 * sleep without handling this work when it calls read. 822 * 823 * Requires the proc->inner_lock to be held. 824 */ 825 static void 826 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread, 827 struct binder_work *work) 828 { 829 binder_enqueue_work_ilocked(work, &thread->todo); 830 } 831 832 /** 833 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list 834 * @thread: thread to queue work to 835 * @work: struct binder_work to add to list 836 * 837 * Adds the work to the todo list of the thread, and enables processing 838 * of the todo queue. 839 * 840 * Requires the proc->inner_lock to be held. 841 */ 842 static void 843 binder_enqueue_thread_work_ilocked(struct binder_thread *thread, 844 struct binder_work *work) 845 { 846 binder_enqueue_work_ilocked(work, &thread->todo); 847 thread->process_todo = true; 848 } 849 850 /** 851 * binder_enqueue_thread_work() - Add an item to the thread work list 852 * @thread: thread to queue work to 853 * @work: struct binder_work to add to list 854 * 855 * Adds the work to the todo list of the thread, and enables processing 856 * of the todo queue. 857 */ 858 static void 859 binder_enqueue_thread_work(struct binder_thread *thread, 860 struct binder_work *work) 861 { 862 binder_inner_proc_lock(thread->proc); 863 binder_enqueue_thread_work_ilocked(thread, work); 864 binder_inner_proc_unlock(thread->proc); 865 } 866 867 static void 868 binder_dequeue_work_ilocked(struct binder_work *work) 869 { 870 list_del_init(&work->entry); 871 } 872 873 /** 874 * binder_dequeue_work() - Removes an item from the work list 875 * @proc: binder_proc associated with list 876 * @work: struct binder_work to remove from list 877 * 878 * Removes the specified work item from whatever list it is on. 879 * Can safely be called if work is not on any list. 880 */ 881 static void 882 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work) 883 { 884 binder_inner_proc_lock(proc); 885 binder_dequeue_work_ilocked(work); 886 binder_inner_proc_unlock(proc); 887 } 888 889 static struct binder_work *binder_dequeue_work_head_ilocked( 890 struct list_head *list) 891 { 892 struct binder_work *w; 893 894 w = list_first_entry_or_null(list, struct binder_work, entry); 895 if (w) 896 list_del_init(&w->entry); 897 return w; 898 } 899 900 /** 901 * binder_dequeue_work_head() - Dequeues the item at head of list 902 * @proc: binder_proc associated with list 903 * @list: list to dequeue head 904 * 905 * Removes the head of the list if there are items on the list 906 * 907 * Return: pointer dequeued binder_work, NULL if list was empty 908 */ 909 static struct binder_work *binder_dequeue_work_head( 910 struct binder_proc *proc, 911 struct list_head *list) 912 { 913 struct binder_work *w; 914 915 binder_inner_proc_lock(proc); 916 w = binder_dequeue_work_head_ilocked(list); 917 binder_inner_proc_unlock(proc); 918 return w; 919 } 920 921 static void 922 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); 923 static void binder_free_thread(struct binder_thread *thread); 924 static void binder_free_proc(struct binder_proc *proc); 925 static void binder_inc_node_tmpref_ilocked(struct binder_node *node); 926 927 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) 928 { 929 unsigned long rlim_cur; 930 unsigned long irqs; 931 int ret; 932 933 mutex_lock(&proc->files_lock); 934 if (proc->files == NULL) { 935 ret = -ESRCH; 936 goto err; 937 } 938 if (!lock_task_sighand(proc->tsk, &irqs)) { 939 ret = -EMFILE; 940 goto err; 941 } 942 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE); 943 unlock_task_sighand(proc->tsk, &irqs); 944 945 ret = __alloc_fd(proc->files, 0, rlim_cur, flags); 946 err: 947 mutex_unlock(&proc->files_lock); 948 return ret; 949 } 950 951 /* 952 * copied from fd_install 953 */ 954 static void task_fd_install( 955 struct binder_proc *proc, unsigned int fd, struct file *file) 956 { 957 mutex_lock(&proc->files_lock); 958 if (proc->files) 959 __fd_install(proc->files, fd, file); 960 mutex_unlock(&proc->files_lock); 961 } 962 963 /* 964 * copied from sys_close 965 */ 966 static long task_close_fd(struct binder_proc *proc, unsigned int fd) 967 { 968 int retval; 969 970 mutex_lock(&proc->files_lock); 971 if (proc->files == NULL) { 972 retval = -ESRCH; 973 goto err; 974 } 975 retval = __close_fd(proc->files, fd); 976 /* can't restart close syscall because file table entry was cleared */ 977 if (unlikely(retval == -ERESTARTSYS || 978 retval == -ERESTARTNOINTR || 979 retval == -ERESTARTNOHAND || 980 retval == -ERESTART_RESTARTBLOCK)) 981 retval = -EINTR; 982 err: 983 mutex_unlock(&proc->files_lock); 984 return retval; 985 } 986 987 static bool binder_has_work_ilocked(struct binder_thread *thread, 988 bool do_proc_work) 989 { 990 return thread->process_todo || 991 thread->looper_need_return || 992 (do_proc_work && 993 !binder_worklist_empty_ilocked(&thread->proc->todo)); 994 } 995 996 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work) 997 { 998 bool has_work; 999 1000 binder_inner_proc_lock(thread->proc); 1001 has_work = binder_has_work_ilocked(thread, do_proc_work); 1002 binder_inner_proc_unlock(thread->proc); 1003 1004 return has_work; 1005 } 1006 1007 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread) 1008 { 1009 return !thread->transaction_stack && 1010 binder_worklist_empty_ilocked(&thread->todo) && 1011 (thread->looper & (BINDER_LOOPER_STATE_ENTERED | 1012 BINDER_LOOPER_STATE_REGISTERED)); 1013 } 1014 1015 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc, 1016 bool sync) 1017 { 1018 struct rb_node *n; 1019 struct binder_thread *thread; 1020 1021 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { 1022 thread = rb_entry(n, struct binder_thread, rb_node); 1023 if (thread->looper & BINDER_LOOPER_STATE_POLL && 1024 binder_available_for_proc_work_ilocked(thread)) { 1025 if (sync) 1026 wake_up_interruptible_sync(&thread->wait); 1027 else 1028 wake_up_interruptible(&thread->wait); 1029 } 1030 } 1031 } 1032 1033 /** 1034 * binder_select_thread_ilocked() - selects a thread for doing proc work. 1035 * @proc: process to select a thread from 1036 * 1037 * Note that calling this function moves the thread off the waiting_threads 1038 * list, so it can only be woken up by the caller of this function, or a 1039 * signal. Therefore, callers *should* always wake up the thread this function 1040 * returns. 1041 * 1042 * Return: If there's a thread currently waiting for process work, 1043 * returns that thread. Otherwise returns NULL. 1044 */ 1045 static struct binder_thread * 1046 binder_select_thread_ilocked(struct binder_proc *proc) 1047 { 1048 struct binder_thread *thread; 1049 1050 assert_spin_locked(&proc->inner_lock); 1051 thread = list_first_entry_or_null(&proc->waiting_threads, 1052 struct binder_thread, 1053 waiting_thread_node); 1054 1055 if (thread) 1056 list_del_init(&thread->waiting_thread_node); 1057 1058 return thread; 1059 } 1060 1061 /** 1062 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work. 1063 * @proc: process to wake up a thread in 1064 * @thread: specific thread to wake-up (may be NULL) 1065 * @sync: whether to do a synchronous wake-up 1066 * 1067 * This function wakes up a thread in the @proc process. 1068 * The caller may provide a specific thread to wake-up in 1069 * the @thread parameter. If @thread is NULL, this function 1070 * will wake up threads that have called poll(). 1071 * 1072 * Note that for this function to work as expected, callers 1073 * should first call binder_select_thread() to find a thread 1074 * to handle the work (if they don't have a thread already), 1075 * and pass the result into the @thread parameter. 1076 */ 1077 static void binder_wakeup_thread_ilocked(struct binder_proc *proc, 1078 struct binder_thread *thread, 1079 bool sync) 1080 { 1081 assert_spin_locked(&proc->inner_lock); 1082 1083 if (thread) { 1084 if (sync) 1085 wake_up_interruptible_sync(&thread->wait); 1086 else 1087 wake_up_interruptible(&thread->wait); 1088 return; 1089 } 1090 1091 /* Didn't find a thread waiting for proc work; this can happen 1092 * in two scenarios: 1093 * 1. All threads are busy handling transactions 1094 * In that case, one of those threads should call back into 1095 * the kernel driver soon and pick up this work. 1096 * 2. Threads are using the (e)poll interface, in which case 1097 * they may be blocked on the waitqueue without having been 1098 * added to waiting_threads. For this case, we just iterate 1099 * over all threads not handling transaction work, and 1100 * wake them all up. We wake all because we don't know whether 1101 * a thread that called into (e)poll is handling non-binder 1102 * work currently. 1103 */ 1104 binder_wakeup_poll_threads_ilocked(proc, sync); 1105 } 1106 1107 static void binder_wakeup_proc_ilocked(struct binder_proc *proc) 1108 { 1109 struct binder_thread *thread = binder_select_thread_ilocked(proc); 1110 1111 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false); 1112 } 1113 1114 static void binder_set_nice(long nice) 1115 { 1116 long min_nice; 1117 1118 if (can_nice(current, nice)) { 1119 set_user_nice(current, nice); 1120 return; 1121 } 1122 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE)); 1123 binder_debug(BINDER_DEBUG_PRIORITY_CAP, 1124 "%d: nice value %ld not allowed use %ld instead\n", 1125 current->pid, nice, min_nice); 1126 set_user_nice(current, min_nice); 1127 if (min_nice <= MAX_NICE) 1128 return; 1129 binder_user_error("%d RLIMIT_NICE not set\n", current->pid); 1130 } 1131 1132 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc, 1133 binder_uintptr_t ptr) 1134 { 1135 struct rb_node *n = proc->nodes.rb_node; 1136 struct binder_node *node; 1137 1138 assert_spin_locked(&proc->inner_lock); 1139 1140 while (n) { 1141 node = rb_entry(n, struct binder_node, rb_node); 1142 1143 if (ptr < node->ptr) 1144 n = n->rb_left; 1145 else if (ptr > node->ptr) 1146 n = n->rb_right; 1147 else { 1148 /* 1149 * take an implicit weak reference 1150 * to ensure node stays alive until 1151 * call to binder_put_node() 1152 */ 1153 binder_inc_node_tmpref_ilocked(node); 1154 return node; 1155 } 1156 } 1157 return NULL; 1158 } 1159 1160 static struct binder_node *binder_get_node(struct binder_proc *proc, 1161 binder_uintptr_t ptr) 1162 { 1163 struct binder_node *node; 1164 1165 binder_inner_proc_lock(proc); 1166 node = binder_get_node_ilocked(proc, ptr); 1167 binder_inner_proc_unlock(proc); 1168 return node; 1169 } 1170 1171 static struct binder_node *binder_init_node_ilocked( 1172 struct binder_proc *proc, 1173 struct binder_node *new_node, 1174 struct flat_binder_object *fp) 1175 { 1176 struct rb_node **p = &proc->nodes.rb_node; 1177 struct rb_node *parent = NULL; 1178 struct binder_node *node; 1179 binder_uintptr_t ptr = fp ? fp->binder : 0; 1180 binder_uintptr_t cookie = fp ? fp->cookie : 0; 1181 __u32 flags = fp ? fp->flags : 0; 1182 1183 assert_spin_locked(&proc->inner_lock); 1184 1185 while (*p) { 1186 1187 parent = *p; 1188 node = rb_entry(parent, struct binder_node, rb_node); 1189 1190 if (ptr < node->ptr) 1191 p = &(*p)->rb_left; 1192 else if (ptr > node->ptr) 1193 p = &(*p)->rb_right; 1194 else { 1195 /* 1196 * A matching node is already in 1197 * the rb tree. Abandon the init 1198 * and return it. 1199 */ 1200 binder_inc_node_tmpref_ilocked(node); 1201 return node; 1202 } 1203 } 1204 node = new_node; 1205 binder_stats_created(BINDER_STAT_NODE); 1206 node->tmp_refs++; 1207 rb_link_node(&node->rb_node, parent, p); 1208 rb_insert_color(&node->rb_node, &proc->nodes); 1209 node->debug_id = atomic_inc_return(&binder_last_id); 1210 node->proc = proc; 1211 node->ptr = ptr; 1212 node->cookie = cookie; 1213 node->work.type = BINDER_WORK_NODE; 1214 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK; 1215 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); 1216 spin_lock_init(&node->lock); 1217 INIT_LIST_HEAD(&node->work.entry); 1218 INIT_LIST_HEAD(&node->async_todo); 1219 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1220 "%d:%d node %d u%016llx c%016llx created\n", 1221 proc->pid, current->pid, node->debug_id, 1222 (u64)node->ptr, (u64)node->cookie); 1223 1224 return node; 1225 } 1226 1227 static struct binder_node *binder_new_node(struct binder_proc *proc, 1228 struct flat_binder_object *fp) 1229 { 1230 struct binder_node *node; 1231 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL); 1232 1233 if (!new_node) 1234 return NULL; 1235 binder_inner_proc_lock(proc); 1236 node = binder_init_node_ilocked(proc, new_node, fp); 1237 binder_inner_proc_unlock(proc); 1238 if (node != new_node) 1239 /* 1240 * The node was already added by another thread 1241 */ 1242 kfree(new_node); 1243 1244 return node; 1245 } 1246 1247 static void binder_free_node(struct binder_node *node) 1248 { 1249 kfree(node); 1250 binder_stats_deleted(BINDER_STAT_NODE); 1251 } 1252 1253 static int binder_inc_node_nilocked(struct binder_node *node, int strong, 1254 int internal, 1255 struct list_head *target_list) 1256 { 1257 struct binder_proc *proc = node->proc; 1258 1259 assert_spin_locked(&node->lock); 1260 if (proc) 1261 assert_spin_locked(&proc->inner_lock); 1262 if (strong) { 1263 if (internal) { 1264 if (target_list == NULL && 1265 node->internal_strong_refs == 0 && 1266 !(node->proc && 1267 node == node->proc->context->binder_context_mgr_node && 1268 node->has_strong_ref)) { 1269 pr_err("invalid inc strong node for %d\n", 1270 node->debug_id); 1271 return -EINVAL; 1272 } 1273 node->internal_strong_refs++; 1274 } else 1275 node->local_strong_refs++; 1276 if (!node->has_strong_ref && target_list) { 1277 binder_dequeue_work_ilocked(&node->work); 1278 /* 1279 * Note: this function is the only place where we queue 1280 * directly to a thread->todo without using the 1281 * corresponding binder_enqueue_thread_work() helper 1282 * functions; in this case it's ok to not set the 1283 * process_todo flag, since we know this node work will 1284 * always be followed by other work that starts queue 1285 * processing: in case of synchronous transactions, a 1286 * BR_REPLY or BR_ERROR; in case of oneway 1287 * transactions, a BR_TRANSACTION_COMPLETE. 1288 */ 1289 binder_enqueue_work_ilocked(&node->work, target_list); 1290 } 1291 } else { 1292 if (!internal) 1293 node->local_weak_refs++; 1294 if (!node->has_weak_ref && list_empty(&node->work.entry)) { 1295 if (target_list == NULL) { 1296 pr_err("invalid inc weak node for %d\n", 1297 node->debug_id); 1298 return -EINVAL; 1299 } 1300 /* 1301 * See comment above 1302 */ 1303 binder_enqueue_work_ilocked(&node->work, target_list); 1304 } 1305 } 1306 return 0; 1307 } 1308 1309 static int binder_inc_node(struct binder_node *node, int strong, int internal, 1310 struct list_head *target_list) 1311 { 1312 int ret; 1313 1314 binder_node_inner_lock(node); 1315 ret = binder_inc_node_nilocked(node, strong, internal, target_list); 1316 binder_node_inner_unlock(node); 1317 1318 return ret; 1319 } 1320 1321 static bool binder_dec_node_nilocked(struct binder_node *node, 1322 int strong, int internal) 1323 { 1324 struct binder_proc *proc = node->proc; 1325 1326 assert_spin_locked(&node->lock); 1327 if (proc) 1328 assert_spin_locked(&proc->inner_lock); 1329 if (strong) { 1330 if (internal) 1331 node->internal_strong_refs--; 1332 else 1333 node->local_strong_refs--; 1334 if (node->local_strong_refs || node->internal_strong_refs) 1335 return false; 1336 } else { 1337 if (!internal) 1338 node->local_weak_refs--; 1339 if (node->local_weak_refs || node->tmp_refs || 1340 !hlist_empty(&node->refs)) 1341 return false; 1342 } 1343 1344 if (proc && (node->has_strong_ref || node->has_weak_ref)) { 1345 if (list_empty(&node->work.entry)) { 1346 binder_enqueue_work_ilocked(&node->work, &proc->todo); 1347 binder_wakeup_proc_ilocked(proc); 1348 } 1349 } else { 1350 if (hlist_empty(&node->refs) && !node->local_strong_refs && 1351 !node->local_weak_refs && !node->tmp_refs) { 1352 if (proc) { 1353 binder_dequeue_work_ilocked(&node->work); 1354 rb_erase(&node->rb_node, &proc->nodes); 1355 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1356 "refless node %d deleted\n", 1357 node->debug_id); 1358 } else { 1359 BUG_ON(!list_empty(&node->work.entry)); 1360 spin_lock(&binder_dead_nodes_lock); 1361 /* 1362 * tmp_refs could have changed so 1363 * check it again 1364 */ 1365 if (node->tmp_refs) { 1366 spin_unlock(&binder_dead_nodes_lock); 1367 return false; 1368 } 1369 hlist_del(&node->dead_node); 1370 spin_unlock(&binder_dead_nodes_lock); 1371 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1372 "dead node %d deleted\n", 1373 node->debug_id); 1374 } 1375 return true; 1376 } 1377 } 1378 return false; 1379 } 1380 1381 static void binder_dec_node(struct binder_node *node, int strong, int internal) 1382 { 1383 bool free_node; 1384 1385 binder_node_inner_lock(node); 1386 free_node = binder_dec_node_nilocked(node, strong, internal); 1387 binder_node_inner_unlock(node); 1388 if (free_node) 1389 binder_free_node(node); 1390 } 1391 1392 static void binder_inc_node_tmpref_ilocked(struct binder_node *node) 1393 { 1394 /* 1395 * No call to binder_inc_node() is needed since we 1396 * don't need to inform userspace of any changes to 1397 * tmp_refs 1398 */ 1399 node->tmp_refs++; 1400 } 1401 1402 /** 1403 * binder_inc_node_tmpref() - take a temporary reference on node 1404 * @node: node to reference 1405 * 1406 * Take reference on node to prevent the node from being freed 1407 * while referenced only by a local variable. The inner lock is 1408 * needed to serialize with the node work on the queue (which 1409 * isn't needed after the node is dead). If the node is dead 1410 * (node->proc is NULL), use binder_dead_nodes_lock to protect 1411 * node->tmp_refs against dead-node-only cases where the node 1412 * lock cannot be acquired (eg traversing the dead node list to 1413 * print nodes) 1414 */ 1415 static void binder_inc_node_tmpref(struct binder_node *node) 1416 { 1417 binder_node_lock(node); 1418 if (node->proc) 1419 binder_inner_proc_lock(node->proc); 1420 else 1421 spin_lock(&binder_dead_nodes_lock); 1422 binder_inc_node_tmpref_ilocked(node); 1423 if (node->proc) 1424 binder_inner_proc_unlock(node->proc); 1425 else 1426 spin_unlock(&binder_dead_nodes_lock); 1427 binder_node_unlock(node); 1428 } 1429 1430 /** 1431 * binder_dec_node_tmpref() - remove a temporary reference on node 1432 * @node: node to reference 1433 * 1434 * Release temporary reference on node taken via binder_inc_node_tmpref() 1435 */ 1436 static void binder_dec_node_tmpref(struct binder_node *node) 1437 { 1438 bool free_node; 1439 1440 binder_node_inner_lock(node); 1441 if (!node->proc) 1442 spin_lock(&binder_dead_nodes_lock); 1443 node->tmp_refs--; 1444 BUG_ON(node->tmp_refs < 0); 1445 if (!node->proc) 1446 spin_unlock(&binder_dead_nodes_lock); 1447 /* 1448 * Call binder_dec_node() to check if all refcounts are 0 1449 * and cleanup is needed. Calling with strong=0 and internal=1 1450 * causes no actual reference to be released in binder_dec_node(). 1451 * If that changes, a change is needed here too. 1452 */ 1453 free_node = binder_dec_node_nilocked(node, 0, 1); 1454 binder_node_inner_unlock(node); 1455 if (free_node) 1456 binder_free_node(node); 1457 } 1458 1459 static void binder_put_node(struct binder_node *node) 1460 { 1461 binder_dec_node_tmpref(node); 1462 } 1463 1464 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc, 1465 u32 desc, bool need_strong_ref) 1466 { 1467 struct rb_node *n = proc->refs_by_desc.rb_node; 1468 struct binder_ref *ref; 1469 1470 while (n) { 1471 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1472 1473 if (desc < ref->data.desc) { 1474 n = n->rb_left; 1475 } else if (desc > ref->data.desc) { 1476 n = n->rb_right; 1477 } else if (need_strong_ref && !ref->data.strong) { 1478 binder_user_error("tried to use weak ref as strong ref\n"); 1479 return NULL; 1480 } else { 1481 return ref; 1482 } 1483 } 1484 return NULL; 1485 } 1486 1487 /** 1488 * binder_get_ref_for_node_olocked() - get the ref associated with given node 1489 * @proc: binder_proc that owns the ref 1490 * @node: binder_node of target 1491 * @new_ref: newly allocated binder_ref to be initialized or %NULL 1492 * 1493 * Look up the ref for the given node and return it if it exists 1494 * 1495 * If it doesn't exist and the caller provides a newly allocated 1496 * ref, initialize the fields of the newly allocated ref and insert 1497 * into the given proc rb_trees and node refs list. 1498 * 1499 * Return: the ref for node. It is possible that another thread 1500 * allocated/initialized the ref first in which case the 1501 * returned ref would be different than the passed-in 1502 * new_ref. new_ref must be kfree'd by the caller in 1503 * this case. 1504 */ 1505 static struct binder_ref *binder_get_ref_for_node_olocked( 1506 struct binder_proc *proc, 1507 struct binder_node *node, 1508 struct binder_ref *new_ref) 1509 { 1510 struct binder_context *context = proc->context; 1511 struct rb_node **p = &proc->refs_by_node.rb_node; 1512 struct rb_node *parent = NULL; 1513 struct binder_ref *ref; 1514 struct rb_node *n; 1515 1516 while (*p) { 1517 parent = *p; 1518 ref = rb_entry(parent, struct binder_ref, rb_node_node); 1519 1520 if (node < ref->node) 1521 p = &(*p)->rb_left; 1522 else if (node > ref->node) 1523 p = &(*p)->rb_right; 1524 else 1525 return ref; 1526 } 1527 if (!new_ref) 1528 return NULL; 1529 1530 binder_stats_created(BINDER_STAT_REF); 1531 new_ref->data.debug_id = atomic_inc_return(&binder_last_id); 1532 new_ref->proc = proc; 1533 new_ref->node = node; 1534 rb_link_node(&new_ref->rb_node_node, parent, p); 1535 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); 1536 1537 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1; 1538 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 1539 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1540 if (ref->data.desc > new_ref->data.desc) 1541 break; 1542 new_ref->data.desc = ref->data.desc + 1; 1543 } 1544 1545 p = &proc->refs_by_desc.rb_node; 1546 while (*p) { 1547 parent = *p; 1548 ref = rb_entry(parent, struct binder_ref, rb_node_desc); 1549 1550 if (new_ref->data.desc < ref->data.desc) 1551 p = &(*p)->rb_left; 1552 else if (new_ref->data.desc > ref->data.desc) 1553 p = &(*p)->rb_right; 1554 else 1555 BUG(); 1556 } 1557 rb_link_node(&new_ref->rb_node_desc, parent, p); 1558 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc); 1559 1560 binder_node_lock(node); 1561 hlist_add_head(&new_ref->node_entry, &node->refs); 1562 1563 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1564 "%d new ref %d desc %d for node %d\n", 1565 proc->pid, new_ref->data.debug_id, new_ref->data.desc, 1566 node->debug_id); 1567 binder_node_unlock(node); 1568 return new_ref; 1569 } 1570 1571 static void binder_cleanup_ref_olocked(struct binder_ref *ref) 1572 { 1573 bool delete_node = false; 1574 1575 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1576 "%d delete ref %d desc %d for node %d\n", 1577 ref->proc->pid, ref->data.debug_id, ref->data.desc, 1578 ref->node->debug_id); 1579 1580 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); 1581 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); 1582 1583 binder_node_inner_lock(ref->node); 1584 if (ref->data.strong) 1585 binder_dec_node_nilocked(ref->node, 1, 1); 1586 1587 hlist_del(&ref->node_entry); 1588 delete_node = binder_dec_node_nilocked(ref->node, 0, 1); 1589 binder_node_inner_unlock(ref->node); 1590 /* 1591 * Clear ref->node unless we want the caller to free the node 1592 */ 1593 if (!delete_node) { 1594 /* 1595 * The caller uses ref->node to determine 1596 * whether the node needs to be freed. Clear 1597 * it since the node is still alive. 1598 */ 1599 ref->node = NULL; 1600 } 1601 1602 if (ref->death) { 1603 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1604 "%d delete ref %d desc %d has death notification\n", 1605 ref->proc->pid, ref->data.debug_id, 1606 ref->data.desc); 1607 binder_dequeue_work(ref->proc, &ref->death->work); 1608 binder_stats_deleted(BINDER_STAT_DEATH); 1609 } 1610 binder_stats_deleted(BINDER_STAT_REF); 1611 } 1612 1613 /** 1614 * binder_inc_ref_olocked() - increment the ref for given handle 1615 * @ref: ref to be incremented 1616 * @strong: if true, strong increment, else weak 1617 * @target_list: list to queue node work on 1618 * 1619 * Increment the ref. @ref->proc->outer_lock must be held on entry 1620 * 1621 * Return: 0, if successful, else errno 1622 */ 1623 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong, 1624 struct list_head *target_list) 1625 { 1626 int ret; 1627 1628 if (strong) { 1629 if (ref->data.strong == 0) { 1630 ret = binder_inc_node(ref->node, 1, 1, target_list); 1631 if (ret) 1632 return ret; 1633 } 1634 ref->data.strong++; 1635 } else { 1636 if (ref->data.weak == 0) { 1637 ret = binder_inc_node(ref->node, 0, 1, target_list); 1638 if (ret) 1639 return ret; 1640 } 1641 ref->data.weak++; 1642 } 1643 return 0; 1644 } 1645 1646 /** 1647 * binder_dec_ref() - dec the ref for given handle 1648 * @ref: ref to be decremented 1649 * @strong: if true, strong decrement, else weak 1650 * 1651 * Decrement the ref. 1652 * 1653 * Return: true if ref is cleaned up and ready to be freed 1654 */ 1655 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong) 1656 { 1657 if (strong) { 1658 if (ref->data.strong == 0) { 1659 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n", 1660 ref->proc->pid, ref->data.debug_id, 1661 ref->data.desc, ref->data.strong, 1662 ref->data.weak); 1663 return false; 1664 } 1665 ref->data.strong--; 1666 if (ref->data.strong == 0) 1667 binder_dec_node(ref->node, strong, 1); 1668 } else { 1669 if (ref->data.weak == 0) { 1670 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n", 1671 ref->proc->pid, ref->data.debug_id, 1672 ref->data.desc, ref->data.strong, 1673 ref->data.weak); 1674 return false; 1675 } 1676 ref->data.weak--; 1677 } 1678 if (ref->data.strong == 0 && ref->data.weak == 0) { 1679 binder_cleanup_ref_olocked(ref); 1680 return true; 1681 } 1682 return false; 1683 } 1684 1685 /** 1686 * binder_get_node_from_ref() - get the node from the given proc/desc 1687 * @proc: proc containing the ref 1688 * @desc: the handle associated with the ref 1689 * @need_strong_ref: if true, only return node if ref is strong 1690 * @rdata: the id/refcount data for the ref 1691 * 1692 * Given a proc and ref handle, return the associated binder_node 1693 * 1694 * Return: a binder_node or NULL if not found or not strong when strong required 1695 */ 1696 static struct binder_node *binder_get_node_from_ref( 1697 struct binder_proc *proc, 1698 u32 desc, bool need_strong_ref, 1699 struct binder_ref_data *rdata) 1700 { 1701 struct binder_node *node; 1702 struct binder_ref *ref; 1703 1704 binder_proc_lock(proc); 1705 ref = binder_get_ref_olocked(proc, desc, need_strong_ref); 1706 if (!ref) 1707 goto err_no_ref; 1708 node = ref->node; 1709 /* 1710 * Take an implicit reference on the node to ensure 1711 * it stays alive until the call to binder_put_node() 1712 */ 1713 binder_inc_node_tmpref(node); 1714 if (rdata) 1715 *rdata = ref->data; 1716 binder_proc_unlock(proc); 1717 1718 return node; 1719 1720 err_no_ref: 1721 binder_proc_unlock(proc); 1722 return NULL; 1723 } 1724 1725 /** 1726 * binder_free_ref() - free the binder_ref 1727 * @ref: ref to free 1728 * 1729 * Free the binder_ref. Free the binder_node indicated by ref->node 1730 * (if non-NULL) and the binder_ref_death indicated by ref->death. 1731 */ 1732 static void binder_free_ref(struct binder_ref *ref) 1733 { 1734 if (ref->node) 1735 binder_free_node(ref->node); 1736 kfree(ref->death); 1737 kfree(ref); 1738 } 1739 1740 /** 1741 * binder_update_ref_for_handle() - inc/dec the ref for given handle 1742 * @proc: proc containing the ref 1743 * @desc: the handle associated with the ref 1744 * @increment: true=inc reference, false=dec reference 1745 * @strong: true=strong reference, false=weak reference 1746 * @rdata: the id/refcount data for the ref 1747 * 1748 * Given a proc and ref handle, increment or decrement the ref 1749 * according to "increment" arg. 1750 * 1751 * Return: 0 if successful, else errno 1752 */ 1753 static int binder_update_ref_for_handle(struct binder_proc *proc, 1754 uint32_t desc, bool increment, bool strong, 1755 struct binder_ref_data *rdata) 1756 { 1757 int ret = 0; 1758 struct binder_ref *ref; 1759 bool delete_ref = false; 1760 1761 binder_proc_lock(proc); 1762 ref = binder_get_ref_olocked(proc, desc, strong); 1763 if (!ref) { 1764 ret = -EINVAL; 1765 goto err_no_ref; 1766 } 1767 if (increment) 1768 ret = binder_inc_ref_olocked(ref, strong, NULL); 1769 else 1770 delete_ref = binder_dec_ref_olocked(ref, strong); 1771 1772 if (rdata) 1773 *rdata = ref->data; 1774 binder_proc_unlock(proc); 1775 1776 if (delete_ref) 1777 binder_free_ref(ref); 1778 return ret; 1779 1780 err_no_ref: 1781 binder_proc_unlock(proc); 1782 return ret; 1783 } 1784 1785 /** 1786 * binder_dec_ref_for_handle() - dec the ref for given handle 1787 * @proc: proc containing the ref 1788 * @desc: the handle associated with the ref 1789 * @strong: true=strong reference, false=weak reference 1790 * @rdata: the id/refcount data for the ref 1791 * 1792 * Just calls binder_update_ref_for_handle() to decrement the ref. 1793 * 1794 * Return: 0 if successful, else errno 1795 */ 1796 static int binder_dec_ref_for_handle(struct binder_proc *proc, 1797 uint32_t desc, bool strong, struct binder_ref_data *rdata) 1798 { 1799 return binder_update_ref_for_handle(proc, desc, false, strong, rdata); 1800 } 1801 1802 1803 /** 1804 * binder_inc_ref_for_node() - increment the ref for given proc/node 1805 * @proc: proc containing the ref 1806 * @node: target node 1807 * @strong: true=strong reference, false=weak reference 1808 * @target_list: worklist to use if node is incremented 1809 * @rdata: the id/refcount data for the ref 1810 * 1811 * Given a proc and node, increment the ref. Create the ref if it 1812 * doesn't already exist 1813 * 1814 * Return: 0 if successful, else errno 1815 */ 1816 static int binder_inc_ref_for_node(struct binder_proc *proc, 1817 struct binder_node *node, 1818 bool strong, 1819 struct list_head *target_list, 1820 struct binder_ref_data *rdata) 1821 { 1822 struct binder_ref *ref; 1823 struct binder_ref *new_ref = NULL; 1824 int ret = 0; 1825 1826 binder_proc_lock(proc); 1827 ref = binder_get_ref_for_node_olocked(proc, node, NULL); 1828 if (!ref) { 1829 binder_proc_unlock(proc); 1830 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); 1831 if (!new_ref) 1832 return -ENOMEM; 1833 binder_proc_lock(proc); 1834 ref = binder_get_ref_for_node_olocked(proc, node, new_ref); 1835 } 1836 ret = binder_inc_ref_olocked(ref, strong, target_list); 1837 *rdata = ref->data; 1838 binder_proc_unlock(proc); 1839 if (new_ref && ref != new_ref) 1840 /* 1841 * Another thread created the ref first so 1842 * free the one we allocated 1843 */ 1844 kfree(new_ref); 1845 return ret; 1846 } 1847 1848 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread, 1849 struct binder_transaction *t) 1850 { 1851 BUG_ON(!target_thread); 1852 assert_spin_locked(&target_thread->proc->inner_lock); 1853 BUG_ON(target_thread->transaction_stack != t); 1854 BUG_ON(target_thread->transaction_stack->from != target_thread); 1855 target_thread->transaction_stack = 1856 target_thread->transaction_stack->from_parent; 1857 t->from = NULL; 1858 } 1859 1860 /** 1861 * binder_thread_dec_tmpref() - decrement thread->tmp_ref 1862 * @thread: thread to decrement 1863 * 1864 * A thread needs to be kept alive while being used to create or 1865 * handle a transaction. binder_get_txn_from() is used to safely 1866 * extract t->from from a binder_transaction and keep the thread 1867 * indicated by t->from from being freed. When done with that 1868 * binder_thread, this function is called to decrement the 1869 * tmp_ref and free if appropriate (thread has been released 1870 * and no transaction being processed by the driver) 1871 */ 1872 static void binder_thread_dec_tmpref(struct binder_thread *thread) 1873 { 1874 /* 1875 * atomic is used to protect the counter value while 1876 * it cannot reach zero or thread->is_dead is false 1877 */ 1878 binder_inner_proc_lock(thread->proc); 1879 atomic_dec(&thread->tmp_ref); 1880 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) { 1881 binder_inner_proc_unlock(thread->proc); 1882 binder_free_thread(thread); 1883 return; 1884 } 1885 binder_inner_proc_unlock(thread->proc); 1886 } 1887 1888 /** 1889 * binder_proc_dec_tmpref() - decrement proc->tmp_ref 1890 * @proc: proc to decrement 1891 * 1892 * A binder_proc needs to be kept alive while being used to create or 1893 * handle a transaction. proc->tmp_ref is incremented when 1894 * creating a new transaction or the binder_proc is currently in-use 1895 * by threads that are being released. When done with the binder_proc, 1896 * this function is called to decrement the counter and free the 1897 * proc if appropriate (proc has been released, all threads have 1898 * been released and not currenly in-use to process a transaction). 1899 */ 1900 static void binder_proc_dec_tmpref(struct binder_proc *proc) 1901 { 1902 binder_inner_proc_lock(proc); 1903 proc->tmp_ref--; 1904 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) && 1905 !proc->tmp_ref) { 1906 binder_inner_proc_unlock(proc); 1907 binder_free_proc(proc); 1908 return; 1909 } 1910 binder_inner_proc_unlock(proc); 1911 } 1912 1913 /** 1914 * binder_get_txn_from() - safely extract the "from" thread in transaction 1915 * @t: binder transaction for t->from 1916 * 1917 * Atomically return the "from" thread and increment the tmp_ref 1918 * count for the thread to ensure it stays alive until 1919 * binder_thread_dec_tmpref() is called. 1920 * 1921 * Return: the value of t->from 1922 */ 1923 static struct binder_thread *binder_get_txn_from( 1924 struct binder_transaction *t) 1925 { 1926 struct binder_thread *from; 1927 1928 spin_lock(&t->lock); 1929 from = t->from; 1930 if (from) 1931 atomic_inc(&from->tmp_ref); 1932 spin_unlock(&t->lock); 1933 return from; 1934 } 1935 1936 /** 1937 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock 1938 * @t: binder transaction for t->from 1939 * 1940 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock 1941 * to guarantee that the thread cannot be released while operating on it. 1942 * The caller must call binder_inner_proc_unlock() to release the inner lock 1943 * as well as call binder_dec_thread_txn() to release the reference. 1944 * 1945 * Return: the value of t->from 1946 */ 1947 static struct binder_thread *binder_get_txn_from_and_acq_inner( 1948 struct binder_transaction *t) 1949 { 1950 struct binder_thread *from; 1951 1952 from = binder_get_txn_from(t); 1953 if (!from) 1954 return NULL; 1955 binder_inner_proc_lock(from->proc); 1956 if (t->from) { 1957 BUG_ON(from != t->from); 1958 return from; 1959 } 1960 binder_inner_proc_unlock(from->proc); 1961 binder_thread_dec_tmpref(from); 1962 return NULL; 1963 } 1964 1965 static void binder_free_transaction(struct binder_transaction *t) 1966 { 1967 if (t->buffer) 1968 t->buffer->transaction = NULL; 1969 kfree(t); 1970 binder_stats_deleted(BINDER_STAT_TRANSACTION); 1971 } 1972 1973 static void binder_send_failed_reply(struct binder_transaction *t, 1974 uint32_t error_code) 1975 { 1976 struct binder_thread *target_thread; 1977 struct binder_transaction *next; 1978 1979 BUG_ON(t->flags & TF_ONE_WAY); 1980 while (1) { 1981 target_thread = binder_get_txn_from_and_acq_inner(t); 1982 if (target_thread) { 1983 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1984 "send failed reply for transaction %d to %d:%d\n", 1985 t->debug_id, 1986 target_thread->proc->pid, 1987 target_thread->pid); 1988 1989 binder_pop_transaction_ilocked(target_thread, t); 1990 if (target_thread->reply_error.cmd == BR_OK) { 1991 target_thread->reply_error.cmd = error_code; 1992 binder_enqueue_thread_work_ilocked( 1993 target_thread, 1994 &target_thread->reply_error.work); 1995 wake_up_interruptible(&target_thread->wait); 1996 } else { 1997 WARN(1, "Unexpected reply error: %u\n", 1998 target_thread->reply_error.cmd); 1999 } 2000 binder_inner_proc_unlock(target_thread->proc); 2001 binder_thread_dec_tmpref(target_thread); 2002 binder_free_transaction(t); 2003 return; 2004 } 2005 next = t->from_parent; 2006 2007 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 2008 "send failed reply for transaction %d, target dead\n", 2009 t->debug_id); 2010 2011 binder_free_transaction(t); 2012 if (next == NULL) { 2013 binder_debug(BINDER_DEBUG_DEAD_BINDER, 2014 "reply failed, no target thread at root\n"); 2015 return; 2016 } 2017 t = next; 2018 binder_debug(BINDER_DEBUG_DEAD_BINDER, 2019 "reply failed, no target thread -- retry %d\n", 2020 t->debug_id); 2021 } 2022 } 2023 2024 /** 2025 * binder_cleanup_transaction() - cleans up undelivered transaction 2026 * @t: transaction that needs to be cleaned up 2027 * @reason: reason the transaction wasn't delivered 2028 * @error_code: error to return to caller (if synchronous call) 2029 */ 2030 static void binder_cleanup_transaction(struct binder_transaction *t, 2031 const char *reason, 2032 uint32_t error_code) 2033 { 2034 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) { 2035 binder_send_failed_reply(t, error_code); 2036 } else { 2037 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 2038 "undelivered transaction %d, %s\n", 2039 t->debug_id, reason); 2040 binder_free_transaction(t); 2041 } 2042 } 2043 2044 /** 2045 * binder_validate_object() - checks for a valid metadata object in a buffer. 2046 * @buffer: binder_buffer that we're parsing. 2047 * @offset: offset in the buffer at which to validate an object. 2048 * 2049 * Return: If there's a valid metadata object at @offset in @buffer, the 2050 * size of that object. Otherwise, it returns zero. 2051 */ 2052 static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset) 2053 { 2054 /* Check if we can read a header first */ 2055 struct binder_object_header *hdr; 2056 size_t object_size = 0; 2057 2058 if (offset > buffer->data_size - sizeof(*hdr) || 2059 buffer->data_size < sizeof(*hdr) || 2060 !IS_ALIGNED(offset, sizeof(u32))) 2061 return 0; 2062 2063 /* Ok, now see if we can read a complete object. */ 2064 hdr = (struct binder_object_header *)(buffer->data + offset); 2065 switch (hdr->type) { 2066 case BINDER_TYPE_BINDER: 2067 case BINDER_TYPE_WEAK_BINDER: 2068 case BINDER_TYPE_HANDLE: 2069 case BINDER_TYPE_WEAK_HANDLE: 2070 object_size = sizeof(struct flat_binder_object); 2071 break; 2072 case BINDER_TYPE_FD: 2073 object_size = sizeof(struct binder_fd_object); 2074 break; 2075 case BINDER_TYPE_PTR: 2076 object_size = sizeof(struct binder_buffer_object); 2077 break; 2078 case BINDER_TYPE_FDA: 2079 object_size = sizeof(struct binder_fd_array_object); 2080 break; 2081 default: 2082 return 0; 2083 } 2084 if (offset <= buffer->data_size - object_size && 2085 buffer->data_size >= object_size) 2086 return object_size; 2087 else 2088 return 0; 2089 } 2090 2091 /** 2092 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer. 2093 * @b: binder_buffer containing the object 2094 * @index: index in offset array at which the binder_buffer_object is 2095 * located 2096 * @start: points to the start of the offset array 2097 * @num_valid: the number of valid offsets in the offset array 2098 * 2099 * Return: If @index is within the valid range of the offset array 2100 * described by @start and @num_valid, and if there's a valid 2101 * binder_buffer_object at the offset found in index @index 2102 * of the offset array, that object is returned. Otherwise, 2103 * %NULL is returned. 2104 * Note that the offset found in index @index itself is not 2105 * verified; this function assumes that @num_valid elements 2106 * from @start were previously verified to have valid offsets. 2107 */ 2108 static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b, 2109 binder_size_t index, 2110 binder_size_t *start, 2111 binder_size_t num_valid) 2112 { 2113 struct binder_buffer_object *buffer_obj; 2114 binder_size_t *offp; 2115 2116 if (index >= num_valid) 2117 return NULL; 2118 2119 offp = start + index; 2120 buffer_obj = (struct binder_buffer_object *)(b->data + *offp); 2121 if (buffer_obj->hdr.type != BINDER_TYPE_PTR) 2122 return NULL; 2123 2124 return buffer_obj; 2125 } 2126 2127 /** 2128 * binder_validate_fixup() - validates pointer/fd fixups happen in order. 2129 * @b: transaction buffer 2130 * @objects_start start of objects buffer 2131 * @buffer: binder_buffer_object in which to fix up 2132 * @offset: start offset in @buffer to fix up 2133 * @last_obj: last binder_buffer_object that we fixed up in 2134 * @last_min_offset: minimum fixup offset in @last_obj 2135 * 2136 * Return: %true if a fixup in buffer @buffer at offset @offset is 2137 * allowed. 2138 * 2139 * For safety reasons, we only allow fixups inside a buffer to happen 2140 * at increasing offsets; additionally, we only allow fixup on the last 2141 * buffer object that was verified, or one of its parents. 2142 * 2143 * Example of what is allowed: 2144 * 2145 * A 2146 * B (parent = A, offset = 0) 2147 * C (parent = A, offset = 16) 2148 * D (parent = C, offset = 0) 2149 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset) 2150 * 2151 * Examples of what is not allowed: 2152 * 2153 * Decreasing offsets within the same parent: 2154 * A 2155 * C (parent = A, offset = 16) 2156 * B (parent = A, offset = 0) // decreasing offset within A 2157 * 2158 * Referring to a parent that wasn't the last object or any of its parents: 2159 * A 2160 * B (parent = A, offset = 0) 2161 * C (parent = A, offset = 0) 2162 * C (parent = A, offset = 16) 2163 * D (parent = B, offset = 0) // B is not A or any of A's parents 2164 */ 2165 static bool binder_validate_fixup(struct binder_buffer *b, 2166 binder_size_t *objects_start, 2167 struct binder_buffer_object *buffer, 2168 binder_size_t fixup_offset, 2169 struct binder_buffer_object *last_obj, 2170 binder_size_t last_min_offset) 2171 { 2172 if (!last_obj) { 2173 /* Nothing to fix up in */ 2174 return false; 2175 } 2176 2177 while (last_obj != buffer) { 2178 /* 2179 * Safe to retrieve the parent of last_obj, since it 2180 * was already previously verified by the driver. 2181 */ 2182 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0) 2183 return false; 2184 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t); 2185 last_obj = (struct binder_buffer_object *) 2186 (b->data + *(objects_start + last_obj->parent)); 2187 } 2188 return (fixup_offset >= last_min_offset); 2189 } 2190 2191 static void binder_transaction_buffer_release(struct binder_proc *proc, 2192 struct binder_buffer *buffer, 2193 binder_size_t *failed_at) 2194 { 2195 binder_size_t *offp, *off_start, *off_end; 2196 int debug_id = buffer->debug_id; 2197 2198 binder_debug(BINDER_DEBUG_TRANSACTION, 2199 "%d buffer release %d, size %zd-%zd, failed at %p\n", 2200 proc->pid, buffer->debug_id, 2201 buffer->data_size, buffer->offsets_size, failed_at); 2202 2203 if (buffer->target_node) 2204 binder_dec_node(buffer->target_node, 1, 0); 2205 2206 off_start = (binder_size_t *)(buffer->data + 2207 ALIGN(buffer->data_size, sizeof(void *))); 2208 if (failed_at) 2209 off_end = failed_at; 2210 else 2211 off_end = (void *)off_start + buffer->offsets_size; 2212 for (offp = off_start; offp < off_end; offp++) { 2213 struct binder_object_header *hdr; 2214 size_t object_size = binder_validate_object(buffer, *offp); 2215 2216 if (object_size == 0) { 2217 pr_err("transaction release %d bad object at offset %lld, size %zd\n", 2218 debug_id, (u64)*offp, buffer->data_size); 2219 continue; 2220 } 2221 hdr = (struct binder_object_header *)(buffer->data + *offp); 2222 switch (hdr->type) { 2223 case BINDER_TYPE_BINDER: 2224 case BINDER_TYPE_WEAK_BINDER: { 2225 struct flat_binder_object *fp; 2226 struct binder_node *node; 2227 2228 fp = to_flat_binder_object(hdr); 2229 node = binder_get_node(proc, fp->binder); 2230 if (node == NULL) { 2231 pr_err("transaction release %d bad node %016llx\n", 2232 debug_id, (u64)fp->binder); 2233 break; 2234 } 2235 binder_debug(BINDER_DEBUG_TRANSACTION, 2236 " node %d u%016llx\n", 2237 node->debug_id, (u64)node->ptr); 2238 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER, 2239 0); 2240 binder_put_node(node); 2241 } break; 2242 case BINDER_TYPE_HANDLE: 2243 case BINDER_TYPE_WEAK_HANDLE: { 2244 struct flat_binder_object *fp; 2245 struct binder_ref_data rdata; 2246 int ret; 2247 2248 fp = to_flat_binder_object(hdr); 2249 ret = binder_dec_ref_for_handle(proc, fp->handle, 2250 hdr->type == BINDER_TYPE_HANDLE, &rdata); 2251 2252 if (ret) { 2253 pr_err("transaction release %d bad handle %d, ret = %d\n", 2254 debug_id, fp->handle, ret); 2255 break; 2256 } 2257 binder_debug(BINDER_DEBUG_TRANSACTION, 2258 " ref %d desc %d\n", 2259 rdata.debug_id, rdata.desc); 2260 } break; 2261 2262 case BINDER_TYPE_FD: { 2263 struct binder_fd_object *fp = to_binder_fd_object(hdr); 2264 2265 binder_debug(BINDER_DEBUG_TRANSACTION, 2266 " fd %d\n", fp->fd); 2267 if (failed_at) 2268 task_close_fd(proc, fp->fd); 2269 } break; 2270 case BINDER_TYPE_PTR: 2271 /* 2272 * Nothing to do here, this will get cleaned up when the 2273 * transaction buffer gets freed 2274 */ 2275 break; 2276 case BINDER_TYPE_FDA: { 2277 struct binder_fd_array_object *fda; 2278 struct binder_buffer_object *parent; 2279 uintptr_t parent_buffer; 2280 u32 *fd_array; 2281 size_t fd_index; 2282 binder_size_t fd_buf_size; 2283 2284 fda = to_binder_fd_array_object(hdr); 2285 parent = binder_validate_ptr(buffer, fda->parent, 2286 off_start, 2287 offp - off_start); 2288 if (!parent) { 2289 pr_err("transaction release %d bad parent offset\n", 2290 debug_id); 2291 continue; 2292 } 2293 /* 2294 * Since the parent was already fixed up, convert it 2295 * back to kernel address space to access it 2296 */ 2297 parent_buffer = parent->buffer - 2298 binder_alloc_get_user_buffer_offset( 2299 &proc->alloc); 2300 2301 fd_buf_size = sizeof(u32) * fda->num_fds; 2302 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { 2303 pr_err("transaction release %d invalid number of fds (%lld)\n", 2304 debug_id, (u64)fda->num_fds); 2305 continue; 2306 } 2307 if (fd_buf_size > parent->length || 2308 fda->parent_offset > parent->length - fd_buf_size) { 2309 /* No space for all file descriptors here. */ 2310 pr_err("transaction release %d not enough space for %lld fds in buffer\n", 2311 debug_id, (u64)fda->num_fds); 2312 continue; 2313 } 2314 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset); 2315 for (fd_index = 0; fd_index < fda->num_fds; fd_index++) 2316 task_close_fd(proc, fd_array[fd_index]); 2317 } break; 2318 default: 2319 pr_err("transaction release %d bad object type %x\n", 2320 debug_id, hdr->type); 2321 break; 2322 } 2323 } 2324 } 2325 2326 static int binder_translate_binder(struct flat_binder_object *fp, 2327 struct binder_transaction *t, 2328 struct binder_thread *thread) 2329 { 2330 struct binder_node *node; 2331 struct binder_proc *proc = thread->proc; 2332 struct binder_proc *target_proc = t->to_proc; 2333 struct binder_ref_data rdata; 2334 int ret = 0; 2335 2336 node = binder_get_node(proc, fp->binder); 2337 if (!node) { 2338 node = binder_new_node(proc, fp); 2339 if (!node) 2340 return -ENOMEM; 2341 } 2342 if (fp->cookie != node->cookie) { 2343 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n", 2344 proc->pid, thread->pid, (u64)fp->binder, 2345 node->debug_id, (u64)fp->cookie, 2346 (u64)node->cookie); 2347 ret = -EINVAL; 2348 goto done; 2349 } 2350 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { 2351 ret = -EPERM; 2352 goto done; 2353 } 2354 2355 ret = binder_inc_ref_for_node(target_proc, node, 2356 fp->hdr.type == BINDER_TYPE_BINDER, 2357 &thread->todo, &rdata); 2358 if (ret) 2359 goto done; 2360 2361 if (fp->hdr.type == BINDER_TYPE_BINDER) 2362 fp->hdr.type = BINDER_TYPE_HANDLE; 2363 else 2364 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE; 2365 fp->binder = 0; 2366 fp->handle = rdata.desc; 2367 fp->cookie = 0; 2368 2369 trace_binder_transaction_node_to_ref(t, node, &rdata); 2370 binder_debug(BINDER_DEBUG_TRANSACTION, 2371 " node %d u%016llx -> ref %d desc %d\n", 2372 node->debug_id, (u64)node->ptr, 2373 rdata.debug_id, rdata.desc); 2374 done: 2375 binder_put_node(node); 2376 return ret; 2377 } 2378 2379 static int binder_translate_handle(struct flat_binder_object *fp, 2380 struct binder_transaction *t, 2381 struct binder_thread *thread) 2382 { 2383 struct binder_proc *proc = thread->proc; 2384 struct binder_proc *target_proc = t->to_proc; 2385 struct binder_node *node; 2386 struct binder_ref_data src_rdata; 2387 int ret = 0; 2388 2389 node = binder_get_node_from_ref(proc, fp->handle, 2390 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata); 2391 if (!node) { 2392 binder_user_error("%d:%d got transaction with invalid handle, %d\n", 2393 proc->pid, thread->pid, fp->handle); 2394 return -EINVAL; 2395 } 2396 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { 2397 ret = -EPERM; 2398 goto done; 2399 } 2400 2401 binder_node_lock(node); 2402 if (node->proc == target_proc) { 2403 if (fp->hdr.type == BINDER_TYPE_HANDLE) 2404 fp->hdr.type = BINDER_TYPE_BINDER; 2405 else 2406 fp->hdr.type = BINDER_TYPE_WEAK_BINDER; 2407 fp->binder = node->ptr; 2408 fp->cookie = node->cookie; 2409 if (node->proc) 2410 binder_inner_proc_lock(node->proc); 2411 binder_inc_node_nilocked(node, 2412 fp->hdr.type == BINDER_TYPE_BINDER, 2413 0, NULL); 2414 if (node->proc) 2415 binder_inner_proc_unlock(node->proc); 2416 trace_binder_transaction_ref_to_node(t, node, &src_rdata); 2417 binder_debug(BINDER_DEBUG_TRANSACTION, 2418 " ref %d desc %d -> node %d u%016llx\n", 2419 src_rdata.debug_id, src_rdata.desc, node->debug_id, 2420 (u64)node->ptr); 2421 binder_node_unlock(node); 2422 } else { 2423 struct binder_ref_data dest_rdata; 2424 2425 binder_node_unlock(node); 2426 ret = binder_inc_ref_for_node(target_proc, node, 2427 fp->hdr.type == BINDER_TYPE_HANDLE, 2428 NULL, &dest_rdata); 2429 if (ret) 2430 goto done; 2431 2432 fp->binder = 0; 2433 fp->handle = dest_rdata.desc; 2434 fp->cookie = 0; 2435 trace_binder_transaction_ref_to_ref(t, node, &src_rdata, 2436 &dest_rdata); 2437 binder_debug(BINDER_DEBUG_TRANSACTION, 2438 " ref %d desc %d -> ref %d desc %d (node %d)\n", 2439 src_rdata.debug_id, src_rdata.desc, 2440 dest_rdata.debug_id, dest_rdata.desc, 2441 node->debug_id); 2442 } 2443 done: 2444 binder_put_node(node); 2445 return ret; 2446 } 2447 2448 static int binder_translate_fd(int fd, 2449 struct binder_transaction *t, 2450 struct binder_thread *thread, 2451 struct binder_transaction *in_reply_to) 2452 { 2453 struct binder_proc *proc = thread->proc; 2454 struct binder_proc *target_proc = t->to_proc; 2455 int target_fd; 2456 struct file *file; 2457 int ret; 2458 bool target_allows_fd; 2459 2460 if (in_reply_to) 2461 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS); 2462 else 2463 target_allows_fd = t->buffer->target_node->accept_fds; 2464 if (!target_allows_fd) { 2465 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n", 2466 proc->pid, thread->pid, 2467 in_reply_to ? "reply" : "transaction", 2468 fd); 2469 ret = -EPERM; 2470 goto err_fd_not_accepted; 2471 } 2472 2473 file = fget(fd); 2474 if (!file) { 2475 binder_user_error("%d:%d got transaction with invalid fd, %d\n", 2476 proc->pid, thread->pid, fd); 2477 ret = -EBADF; 2478 goto err_fget; 2479 } 2480 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file); 2481 if (ret < 0) { 2482 ret = -EPERM; 2483 goto err_security; 2484 } 2485 2486 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC); 2487 if (target_fd < 0) { 2488 ret = -ENOMEM; 2489 goto err_get_unused_fd; 2490 } 2491 task_fd_install(target_proc, target_fd, file); 2492 trace_binder_transaction_fd(t, fd, target_fd); 2493 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n", 2494 fd, target_fd); 2495 2496 return target_fd; 2497 2498 err_get_unused_fd: 2499 err_security: 2500 fput(file); 2501 err_fget: 2502 err_fd_not_accepted: 2503 return ret; 2504 } 2505 2506 static int binder_translate_fd_array(struct binder_fd_array_object *fda, 2507 struct binder_buffer_object *parent, 2508 struct binder_transaction *t, 2509 struct binder_thread *thread, 2510 struct binder_transaction *in_reply_to) 2511 { 2512 binder_size_t fdi, fd_buf_size, num_installed_fds; 2513 int target_fd; 2514 uintptr_t parent_buffer; 2515 u32 *fd_array; 2516 struct binder_proc *proc = thread->proc; 2517 struct binder_proc *target_proc = t->to_proc; 2518 2519 fd_buf_size = sizeof(u32) * fda->num_fds; 2520 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { 2521 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n", 2522 proc->pid, thread->pid, (u64)fda->num_fds); 2523 return -EINVAL; 2524 } 2525 if (fd_buf_size > parent->length || 2526 fda->parent_offset > parent->length - fd_buf_size) { 2527 /* No space for all file descriptors here. */ 2528 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n", 2529 proc->pid, thread->pid, (u64)fda->num_fds); 2530 return -EINVAL; 2531 } 2532 /* 2533 * Since the parent was already fixed up, convert it 2534 * back to the kernel address space to access it 2535 */ 2536 parent_buffer = parent->buffer - 2537 binder_alloc_get_user_buffer_offset(&target_proc->alloc); 2538 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset); 2539 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) { 2540 binder_user_error("%d:%d parent offset not aligned correctly.\n", 2541 proc->pid, thread->pid); 2542 return -EINVAL; 2543 } 2544 for (fdi = 0; fdi < fda->num_fds; fdi++) { 2545 target_fd = binder_translate_fd(fd_array[fdi], t, thread, 2546 in_reply_to); 2547 if (target_fd < 0) 2548 goto err_translate_fd_failed; 2549 fd_array[fdi] = target_fd; 2550 } 2551 return 0; 2552 2553 err_translate_fd_failed: 2554 /* 2555 * Failed to allocate fd or security error, free fds 2556 * installed so far. 2557 */ 2558 num_installed_fds = fdi; 2559 for (fdi = 0; fdi < num_installed_fds; fdi++) 2560 task_close_fd(target_proc, fd_array[fdi]); 2561 return target_fd; 2562 } 2563 2564 static int binder_fixup_parent(struct binder_transaction *t, 2565 struct binder_thread *thread, 2566 struct binder_buffer_object *bp, 2567 binder_size_t *off_start, 2568 binder_size_t num_valid, 2569 struct binder_buffer_object *last_fixup_obj, 2570 binder_size_t last_fixup_min_off) 2571 { 2572 struct binder_buffer_object *parent; 2573 u8 *parent_buffer; 2574 struct binder_buffer *b = t->buffer; 2575 struct binder_proc *proc = thread->proc; 2576 struct binder_proc *target_proc = t->to_proc; 2577 2578 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT)) 2579 return 0; 2580 2581 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid); 2582 if (!parent) { 2583 binder_user_error("%d:%d got transaction with invalid parent offset or type\n", 2584 proc->pid, thread->pid); 2585 return -EINVAL; 2586 } 2587 2588 if (!binder_validate_fixup(b, off_start, 2589 parent, bp->parent_offset, 2590 last_fixup_obj, 2591 last_fixup_min_off)) { 2592 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", 2593 proc->pid, thread->pid); 2594 return -EINVAL; 2595 } 2596 2597 if (parent->length < sizeof(binder_uintptr_t) || 2598 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) { 2599 /* No space for a pointer here! */ 2600 binder_user_error("%d:%d got transaction with invalid parent offset\n", 2601 proc->pid, thread->pid); 2602 return -EINVAL; 2603 } 2604 parent_buffer = (u8 *)((uintptr_t)parent->buffer - 2605 binder_alloc_get_user_buffer_offset( 2606 &target_proc->alloc)); 2607 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer; 2608 2609 return 0; 2610 } 2611 2612 /** 2613 * binder_proc_transaction() - sends a transaction to a process and wakes it up 2614 * @t: transaction to send 2615 * @proc: process to send the transaction to 2616 * @thread: thread in @proc to send the transaction to (may be NULL) 2617 * 2618 * This function queues a transaction to the specified process. It will try 2619 * to find a thread in the target process to handle the transaction and 2620 * wake it up. If no thread is found, the work is queued to the proc 2621 * waitqueue. 2622 * 2623 * If the @thread parameter is not NULL, the transaction is always queued 2624 * to the waitlist of that specific thread. 2625 * 2626 * Return: true if the transactions was successfully queued 2627 * false if the target process or thread is dead 2628 */ 2629 static bool binder_proc_transaction(struct binder_transaction *t, 2630 struct binder_proc *proc, 2631 struct binder_thread *thread) 2632 { 2633 struct binder_node *node = t->buffer->target_node; 2634 bool oneway = !!(t->flags & TF_ONE_WAY); 2635 bool pending_async = false; 2636 2637 BUG_ON(!node); 2638 binder_node_lock(node); 2639 if (oneway) { 2640 BUG_ON(thread); 2641 if (node->has_async_transaction) { 2642 pending_async = true; 2643 } else { 2644 node->has_async_transaction = 1; 2645 } 2646 } 2647 2648 binder_inner_proc_lock(proc); 2649 2650 if (proc->is_dead || (thread && thread->is_dead)) { 2651 binder_inner_proc_unlock(proc); 2652 binder_node_unlock(node); 2653 return false; 2654 } 2655 2656 if (!thread && !pending_async) 2657 thread = binder_select_thread_ilocked(proc); 2658 2659 if (thread) 2660 binder_enqueue_thread_work_ilocked(thread, &t->work); 2661 else if (!pending_async) 2662 binder_enqueue_work_ilocked(&t->work, &proc->todo); 2663 else 2664 binder_enqueue_work_ilocked(&t->work, &node->async_todo); 2665 2666 if (!pending_async) 2667 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */); 2668 2669 binder_inner_proc_unlock(proc); 2670 binder_node_unlock(node); 2671 2672 return true; 2673 } 2674 2675 /** 2676 * binder_get_node_refs_for_txn() - Get required refs on node for txn 2677 * @node: struct binder_node for which to get refs 2678 * @proc: returns @node->proc if valid 2679 * @error: if no @proc then returns BR_DEAD_REPLY 2680 * 2681 * User-space normally keeps the node alive when creating a transaction 2682 * since it has a reference to the target. The local strong ref keeps it 2683 * alive if the sending process dies before the target process processes 2684 * the transaction. If the source process is malicious or has a reference 2685 * counting bug, relying on the local strong ref can fail. 2686 * 2687 * Since user-space can cause the local strong ref to go away, we also take 2688 * a tmpref on the node to ensure it survives while we are constructing 2689 * the transaction. We also need a tmpref on the proc while we are 2690 * constructing the transaction, so we take that here as well. 2691 * 2692 * Return: The target_node with refs taken or NULL if no @node->proc is NULL. 2693 * Also sets @proc if valid. If the @node->proc is NULL indicating that the 2694 * target proc has died, @error is set to BR_DEAD_REPLY 2695 */ 2696 static struct binder_node *binder_get_node_refs_for_txn( 2697 struct binder_node *node, 2698 struct binder_proc **procp, 2699 uint32_t *error) 2700 { 2701 struct binder_node *target_node = NULL; 2702 2703 binder_node_inner_lock(node); 2704 if (node->proc) { 2705 target_node = node; 2706 binder_inc_node_nilocked(node, 1, 0, NULL); 2707 binder_inc_node_tmpref_ilocked(node); 2708 node->proc->tmp_ref++; 2709 *procp = node->proc; 2710 } else 2711 *error = BR_DEAD_REPLY; 2712 binder_node_inner_unlock(node); 2713 2714 return target_node; 2715 } 2716 2717 static void binder_transaction(struct binder_proc *proc, 2718 struct binder_thread *thread, 2719 struct binder_transaction_data *tr, int reply, 2720 binder_size_t extra_buffers_size) 2721 { 2722 int ret; 2723 struct binder_transaction *t; 2724 struct binder_work *tcomplete; 2725 binder_size_t *offp, *off_end, *off_start; 2726 binder_size_t off_min; 2727 u8 *sg_bufp, *sg_buf_end; 2728 struct binder_proc *target_proc = NULL; 2729 struct binder_thread *target_thread = NULL; 2730 struct binder_node *target_node = NULL; 2731 struct binder_transaction *in_reply_to = NULL; 2732 struct binder_transaction_log_entry *e; 2733 uint32_t return_error = 0; 2734 uint32_t return_error_param = 0; 2735 uint32_t return_error_line = 0; 2736 struct binder_buffer_object *last_fixup_obj = NULL; 2737 binder_size_t last_fixup_min_off = 0; 2738 struct binder_context *context = proc->context; 2739 int t_debug_id = atomic_inc_return(&binder_last_id); 2740 2741 e = binder_transaction_log_add(&binder_transaction_log); 2742 e->debug_id = t_debug_id; 2743 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); 2744 e->from_proc = proc->pid; 2745 e->from_thread = thread->pid; 2746 e->target_handle = tr->target.handle; 2747 e->data_size = tr->data_size; 2748 e->offsets_size = tr->offsets_size; 2749 e->context_name = proc->context->name; 2750 2751 if (reply) { 2752 binder_inner_proc_lock(proc); 2753 in_reply_to = thread->transaction_stack; 2754 if (in_reply_to == NULL) { 2755 binder_inner_proc_unlock(proc); 2756 binder_user_error("%d:%d got reply transaction with no transaction stack\n", 2757 proc->pid, thread->pid); 2758 return_error = BR_FAILED_REPLY; 2759 return_error_param = -EPROTO; 2760 return_error_line = __LINE__; 2761 goto err_empty_call_stack; 2762 } 2763 if (in_reply_to->to_thread != thread) { 2764 spin_lock(&in_reply_to->lock); 2765 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n", 2766 proc->pid, thread->pid, in_reply_to->debug_id, 2767 in_reply_to->to_proc ? 2768 in_reply_to->to_proc->pid : 0, 2769 in_reply_to->to_thread ? 2770 in_reply_to->to_thread->pid : 0); 2771 spin_unlock(&in_reply_to->lock); 2772 binder_inner_proc_unlock(proc); 2773 return_error = BR_FAILED_REPLY; 2774 return_error_param = -EPROTO; 2775 return_error_line = __LINE__; 2776 in_reply_to = NULL; 2777 goto err_bad_call_stack; 2778 } 2779 thread->transaction_stack = in_reply_to->to_parent; 2780 binder_inner_proc_unlock(proc); 2781 binder_set_nice(in_reply_to->saved_priority); 2782 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to); 2783 if (target_thread == NULL) { 2784 return_error = BR_DEAD_REPLY; 2785 return_error_line = __LINE__; 2786 goto err_dead_binder; 2787 } 2788 if (target_thread->transaction_stack != in_reply_to) { 2789 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n", 2790 proc->pid, thread->pid, 2791 target_thread->transaction_stack ? 2792 target_thread->transaction_stack->debug_id : 0, 2793 in_reply_to->debug_id); 2794 binder_inner_proc_unlock(target_thread->proc); 2795 return_error = BR_FAILED_REPLY; 2796 return_error_param = -EPROTO; 2797 return_error_line = __LINE__; 2798 in_reply_to = NULL; 2799 target_thread = NULL; 2800 goto err_dead_binder; 2801 } 2802 target_proc = target_thread->proc; 2803 target_proc->tmp_ref++; 2804 binder_inner_proc_unlock(target_thread->proc); 2805 } else { 2806 if (tr->target.handle) { 2807 struct binder_ref *ref; 2808 2809 /* 2810 * There must already be a strong ref 2811 * on this node. If so, do a strong 2812 * increment on the node to ensure it 2813 * stays alive until the transaction is 2814 * done. 2815 */ 2816 binder_proc_lock(proc); 2817 ref = binder_get_ref_olocked(proc, tr->target.handle, 2818 true); 2819 if (ref) { 2820 target_node = binder_get_node_refs_for_txn( 2821 ref->node, &target_proc, 2822 &return_error); 2823 } else { 2824 binder_user_error("%d:%d got transaction to invalid handle\n", 2825 proc->pid, thread->pid); 2826 return_error = BR_FAILED_REPLY; 2827 } 2828 binder_proc_unlock(proc); 2829 } else { 2830 mutex_lock(&context->context_mgr_node_lock); 2831 target_node = context->binder_context_mgr_node; 2832 if (target_node) 2833 target_node = binder_get_node_refs_for_txn( 2834 target_node, &target_proc, 2835 &return_error); 2836 else 2837 return_error = BR_DEAD_REPLY; 2838 mutex_unlock(&context->context_mgr_node_lock); 2839 } 2840 if (!target_node) { 2841 /* 2842 * return_error is set above 2843 */ 2844 return_error_param = -EINVAL; 2845 return_error_line = __LINE__; 2846 goto err_dead_binder; 2847 } 2848 e->to_node = target_node->debug_id; 2849 if (security_binder_transaction(proc->tsk, 2850 target_proc->tsk) < 0) { 2851 return_error = BR_FAILED_REPLY; 2852 return_error_param = -EPERM; 2853 return_error_line = __LINE__; 2854 goto err_invalid_target_handle; 2855 } 2856 binder_inner_proc_lock(proc); 2857 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { 2858 struct binder_transaction *tmp; 2859 2860 tmp = thread->transaction_stack; 2861 if (tmp->to_thread != thread) { 2862 spin_lock(&tmp->lock); 2863 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n", 2864 proc->pid, thread->pid, tmp->debug_id, 2865 tmp->to_proc ? tmp->to_proc->pid : 0, 2866 tmp->to_thread ? 2867 tmp->to_thread->pid : 0); 2868 spin_unlock(&tmp->lock); 2869 binder_inner_proc_unlock(proc); 2870 return_error = BR_FAILED_REPLY; 2871 return_error_param = -EPROTO; 2872 return_error_line = __LINE__; 2873 goto err_bad_call_stack; 2874 } 2875 while (tmp) { 2876 struct binder_thread *from; 2877 2878 spin_lock(&tmp->lock); 2879 from = tmp->from; 2880 if (from && from->proc == target_proc) { 2881 atomic_inc(&from->tmp_ref); 2882 target_thread = from; 2883 spin_unlock(&tmp->lock); 2884 break; 2885 } 2886 spin_unlock(&tmp->lock); 2887 tmp = tmp->from_parent; 2888 } 2889 } 2890 binder_inner_proc_unlock(proc); 2891 } 2892 if (target_thread) 2893 e->to_thread = target_thread->pid; 2894 e->to_proc = target_proc->pid; 2895 2896 /* TODO: reuse incoming transaction for reply */ 2897 t = kzalloc(sizeof(*t), GFP_KERNEL); 2898 if (t == NULL) { 2899 return_error = BR_FAILED_REPLY; 2900 return_error_param = -ENOMEM; 2901 return_error_line = __LINE__; 2902 goto err_alloc_t_failed; 2903 } 2904 binder_stats_created(BINDER_STAT_TRANSACTION); 2905 spin_lock_init(&t->lock); 2906 2907 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); 2908 if (tcomplete == NULL) { 2909 return_error = BR_FAILED_REPLY; 2910 return_error_param = -ENOMEM; 2911 return_error_line = __LINE__; 2912 goto err_alloc_tcomplete_failed; 2913 } 2914 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE); 2915 2916 t->debug_id = t_debug_id; 2917 2918 if (reply) 2919 binder_debug(BINDER_DEBUG_TRANSACTION, 2920 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n", 2921 proc->pid, thread->pid, t->debug_id, 2922 target_proc->pid, target_thread->pid, 2923 (u64)tr->data.ptr.buffer, 2924 (u64)tr->data.ptr.offsets, 2925 (u64)tr->data_size, (u64)tr->offsets_size, 2926 (u64)extra_buffers_size); 2927 else 2928 binder_debug(BINDER_DEBUG_TRANSACTION, 2929 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n", 2930 proc->pid, thread->pid, t->debug_id, 2931 target_proc->pid, target_node->debug_id, 2932 (u64)tr->data.ptr.buffer, 2933 (u64)tr->data.ptr.offsets, 2934 (u64)tr->data_size, (u64)tr->offsets_size, 2935 (u64)extra_buffers_size); 2936 2937 if (!reply && !(tr->flags & TF_ONE_WAY)) 2938 t->from = thread; 2939 else 2940 t->from = NULL; 2941 t->sender_euid = task_euid(proc->tsk); 2942 t->to_proc = target_proc; 2943 t->to_thread = target_thread; 2944 t->code = tr->code; 2945 t->flags = tr->flags; 2946 t->priority = task_nice(current); 2947 2948 trace_binder_transaction(reply, t, target_node); 2949 2950 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size, 2951 tr->offsets_size, extra_buffers_size, 2952 !reply && (t->flags & TF_ONE_WAY)); 2953 if (IS_ERR(t->buffer)) { 2954 /* 2955 * -ESRCH indicates VMA cleared. The target is dying. 2956 */ 2957 return_error_param = PTR_ERR(t->buffer); 2958 return_error = return_error_param == -ESRCH ? 2959 BR_DEAD_REPLY : BR_FAILED_REPLY; 2960 return_error_line = __LINE__; 2961 t->buffer = NULL; 2962 goto err_binder_alloc_buf_failed; 2963 } 2964 t->buffer->allow_user_free = 0; 2965 t->buffer->debug_id = t->debug_id; 2966 t->buffer->transaction = t; 2967 t->buffer->target_node = target_node; 2968 trace_binder_transaction_alloc_buf(t->buffer); 2969 off_start = (binder_size_t *)(t->buffer->data + 2970 ALIGN(tr->data_size, sizeof(void *))); 2971 offp = off_start; 2972 2973 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t) 2974 tr->data.ptr.buffer, tr->data_size)) { 2975 binder_user_error("%d:%d got transaction with invalid data ptr\n", 2976 proc->pid, thread->pid); 2977 return_error = BR_FAILED_REPLY; 2978 return_error_param = -EFAULT; 2979 return_error_line = __LINE__; 2980 goto err_copy_data_failed; 2981 } 2982 if (copy_from_user(offp, (const void __user *)(uintptr_t) 2983 tr->data.ptr.offsets, tr->offsets_size)) { 2984 binder_user_error("%d:%d got transaction with invalid offsets ptr\n", 2985 proc->pid, thread->pid); 2986 return_error = BR_FAILED_REPLY; 2987 return_error_param = -EFAULT; 2988 return_error_line = __LINE__; 2989 goto err_copy_data_failed; 2990 } 2991 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) { 2992 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n", 2993 proc->pid, thread->pid, (u64)tr->offsets_size); 2994 return_error = BR_FAILED_REPLY; 2995 return_error_param = -EINVAL; 2996 return_error_line = __LINE__; 2997 goto err_bad_offset; 2998 } 2999 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) { 3000 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n", 3001 proc->pid, thread->pid, 3002 (u64)extra_buffers_size); 3003 return_error = BR_FAILED_REPLY; 3004 return_error_param = -EINVAL; 3005 return_error_line = __LINE__; 3006 goto err_bad_offset; 3007 } 3008 off_end = (void *)off_start + tr->offsets_size; 3009 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *))); 3010 sg_buf_end = sg_bufp + extra_buffers_size; 3011 off_min = 0; 3012 for (; offp < off_end; offp++) { 3013 struct binder_object_header *hdr; 3014 size_t object_size = binder_validate_object(t->buffer, *offp); 3015 3016 if (object_size == 0 || *offp < off_min) { 3017 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n", 3018 proc->pid, thread->pid, (u64)*offp, 3019 (u64)off_min, 3020 (u64)t->buffer->data_size); 3021 return_error = BR_FAILED_REPLY; 3022 return_error_param = -EINVAL; 3023 return_error_line = __LINE__; 3024 goto err_bad_offset; 3025 } 3026 3027 hdr = (struct binder_object_header *)(t->buffer->data + *offp); 3028 off_min = *offp + object_size; 3029 switch (hdr->type) { 3030 case BINDER_TYPE_BINDER: 3031 case BINDER_TYPE_WEAK_BINDER: { 3032 struct flat_binder_object *fp; 3033 3034 fp = to_flat_binder_object(hdr); 3035 ret = binder_translate_binder(fp, t, thread); 3036 if (ret < 0) { 3037 return_error = BR_FAILED_REPLY; 3038 return_error_param = ret; 3039 return_error_line = __LINE__; 3040 goto err_translate_failed; 3041 } 3042 } break; 3043 case BINDER_TYPE_HANDLE: 3044 case BINDER_TYPE_WEAK_HANDLE: { 3045 struct flat_binder_object *fp; 3046 3047 fp = to_flat_binder_object(hdr); 3048 ret = binder_translate_handle(fp, t, thread); 3049 if (ret < 0) { 3050 return_error = BR_FAILED_REPLY; 3051 return_error_param = ret; 3052 return_error_line = __LINE__; 3053 goto err_translate_failed; 3054 } 3055 } break; 3056 3057 case BINDER_TYPE_FD: { 3058 struct binder_fd_object *fp = to_binder_fd_object(hdr); 3059 int target_fd = binder_translate_fd(fp->fd, t, thread, 3060 in_reply_to); 3061 3062 if (target_fd < 0) { 3063 return_error = BR_FAILED_REPLY; 3064 return_error_param = target_fd; 3065 return_error_line = __LINE__; 3066 goto err_translate_failed; 3067 } 3068 fp->pad_binder = 0; 3069 fp->fd = target_fd; 3070 } break; 3071 case BINDER_TYPE_FDA: { 3072 struct binder_fd_array_object *fda = 3073 to_binder_fd_array_object(hdr); 3074 struct binder_buffer_object *parent = 3075 binder_validate_ptr(t->buffer, fda->parent, 3076 off_start, 3077 offp - off_start); 3078 if (!parent) { 3079 binder_user_error("%d:%d got transaction with invalid parent offset or type\n", 3080 proc->pid, thread->pid); 3081 return_error = BR_FAILED_REPLY; 3082 return_error_param = -EINVAL; 3083 return_error_line = __LINE__; 3084 goto err_bad_parent; 3085 } 3086 if (!binder_validate_fixup(t->buffer, off_start, 3087 parent, fda->parent_offset, 3088 last_fixup_obj, 3089 last_fixup_min_off)) { 3090 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", 3091 proc->pid, thread->pid); 3092 return_error = BR_FAILED_REPLY; 3093 return_error_param = -EINVAL; 3094 return_error_line = __LINE__; 3095 goto err_bad_parent; 3096 } 3097 ret = binder_translate_fd_array(fda, parent, t, thread, 3098 in_reply_to); 3099 if (ret < 0) { 3100 return_error = BR_FAILED_REPLY; 3101 return_error_param = ret; 3102 return_error_line = __LINE__; 3103 goto err_translate_failed; 3104 } 3105 last_fixup_obj = parent; 3106 last_fixup_min_off = 3107 fda->parent_offset + sizeof(u32) * fda->num_fds; 3108 } break; 3109 case BINDER_TYPE_PTR: { 3110 struct binder_buffer_object *bp = 3111 to_binder_buffer_object(hdr); 3112 size_t buf_left = sg_buf_end - sg_bufp; 3113 3114 if (bp->length > buf_left) { 3115 binder_user_error("%d:%d got transaction with too large buffer\n", 3116 proc->pid, thread->pid); 3117 return_error = BR_FAILED_REPLY; 3118 return_error_param = -EINVAL; 3119 return_error_line = __LINE__; 3120 goto err_bad_offset; 3121 } 3122 if (copy_from_user(sg_bufp, 3123 (const void __user *)(uintptr_t) 3124 bp->buffer, bp->length)) { 3125 binder_user_error("%d:%d got transaction with invalid offsets ptr\n", 3126 proc->pid, thread->pid); 3127 return_error_param = -EFAULT; 3128 return_error = BR_FAILED_REPLY; 3129 return_error_line = __LINE__; 3130 goto err_copy_data_failed; 3131 } 3132 /* Fixup buffer pointer to target proc address space */ 3133 bp->buffer = (uintptr_t)sg_bufp + 3134 binder_alloc_get_user_buffer_offset( 3135 &target_proc->alloc); 3136 sg_bufp += ALIGN(bp->length, sizeof(u64)); 3137 3138 ret = binder_fixup_parent(t, thread, bp, off_start, 3139 offp - off_start, 3140 last_fixup_obj, 3141 last_fixup_min_off); 3142 if (ret < 0) { 3143 return_error = BR_FAILED_REPLY; 3144 return_error_param = ret; 3145 return_error_line = __LINE__; 3146 goto err_translate_failed; 3147 } 3148 last_fixup_obj = bp; 3149 last_fixup_min_off = 0; 3150 } break; 3151 default: 3152 binder_user_error("%d:%d got transaction with invalid object type, %x\n", 3153 proc->pid, thread->pid, hdr->type); 3154 return_error = BR_FAILED_REPLY; 3155 return_error_param = -EINVAL; 3156 return_error_line = __LINE__; 3157 goto err_bad_object_type; 3158 } 3159 } 3160 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; 3161 t->work.type = BINDER_WORK_TRANSACTION; 3162 3163 if (reply) { 3164 binder_enqueue_thread_work(thread, tcomplete); 3165 binder_inner_proc_lock(target_proc); 3166 if (target_thread->is_dead) { 3167 binder_inner_proc_unlock(target_proc); 3168 goto err_dead_proc_or_thread; 3169 } 3170 BUG_ON(t->buffer->async_transaction != 0); 3171 binder_pop_transaction_ilocked(target_thread, in_reply_to); 3172 binder_enqueue_thread_work_ilocked(target_thread, &t->work); 3173 binder_inner_proc_unlock(target_proc); 3174 wake_up_interruptible_sync(&target_thread->wait); 3175 binder_free_transaction(in_reply_to); 3176 } else if (!(t->flags & TF_ONE_WAY)) { 3177 BUG_ON(t->buffer->async_transaction != 0); 3178 binder_inner_proc_lock(proc); 3179 /* 3180 * Defer the TRANSACTION_COMPLETE, so we don't return to 3181 * userspace immediately; this allows the target process to 3182 * immediately start processing this transaction, reducing 3183 * latency. We will then return the TRANSACTION_COMPLETE when 3184 * the target replies (or there is an error). 3185 */ 3186 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete); 3187 t->need_reply = 1; 3188 t->from_parent = thread->transaction_stack; 3189 thread->transaction_stack = t; 3190 binder_inner_proc_unlock(proc); 3191 if (!binder_proc_transaction(t, target_proc, target_thread)) { 3192 binder_inner_proc_lock(proc); 3193 binder_pop_transaction_ilocked(thread, t); 3194 binder_inner_proc_unlock(proc); 3195 goto err_dead_proc_or_thread; 3196 } 3197 } else { 3198 BUG_ON(target_node == NULL); 3199 BUG_ON(t->buffer->async_transaction != 1); 3200 binder_enqueue_thread_work(thread, tcomplete); 3201 if (!binder_proc_transaction(t, target_proc, NULL)) 3202 goto err_dead_proc_or_thread; 3203 } 3204 if (target_thread) 3205 binder_thread_dec_tmpref(target_thread); 3206 binder_proc_dec_tmpref(target_proc); 3207 if (target_node) 3208 binder_dec_node_tmpref(target_node); 3209 /* 3210 * write barrier to synchronize with initialization 3211 * of log entry 3212 */ 3213 smp_wmb(); 3214 WRITE_ONCE(e->debug_id_done, t_debug_id); 3215 return; 3216 3217 err_dead_proc_or_thread: 3218 return_error = BR_DEAD_REPLY; 3219 return_error_line = __LINE__; 3220 binder_dequeue_work(proc, tcomplete); 3221 err_translate_failed: 3222 err_bad_object_type: 3223 err_bad_offset: 3224 err_bad_parent: 3225 err_copy_data_failed: 3226 trace_binder_transaction_failed_buffer_release(t->buffer); 3227 binder_transaction_buffer_release(target_proc, t->buffer, offp); 3228 if (target_node) 3229 binder_dec_node_tmpref(target_node); 3230 target_node = NULL; 3231 t->buffer->transaction = NULL; 3232 binder_alloc_free_buf(&target_proc->alloc, t->buffer); 3233 err_binder_alloc_buf_failed: 3234 kfree(tcomplete); 3235 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 3236 err_alloc_tcomplete_failed: 3237 kfree(t); 3238 binder_stats_deleted(BINDER_STAT_TRANSACTION); 3239 err_alloc_t_failed: 3240 err_bad_call_stack: 3241 err_empty_call_stack: 3242 err_dead_binder: 3243 err_invalid_target_handle: 3244 if (target_thread) 3245 binder_thread_dec_tmpref(target_thread); 3246 if (target_proc) 3247 binder_proc_dec_tmpref(target_proc); 3248 if (target_node) { 3249 binder_dec_node(target_node, 1, 0); 3250 binder_dec_node_tmpref(target_node); 3251 } 3252 3253 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 3254 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n", 3255 proc->pid, thread->pid, return_error, return_error_param, 3256 (u64)tr->data_size, (u64)tr->offsets_size, 3257 return_error_line); 3258 3259 { 3260 struct binder_transaction_log_entry *fe; 3261 3262 e->return_error = return_error; 3263 e->return_error_param = return_error_param; 3264 e->return_error_line = return_error_line; 3265 fe = binder_transaction_log_add(&binder_transaction_log_failed); 3266 *fe = *e; 3267 /* 3268 * write barrier to synchronize with initialization 3269 * of log entry 3270 */ 3271 smp_wmb(); 3272 WRITE_ONCE(e->debug_id_done, t_debug_id); 3273 WRITE_ONCE(fe->debug_id_done, t_debug_id); 3274 } 3275 3276 BUG_ON(thread->return_error.cmd != BR_OK); 3277 if (in_reply_to) { 3278 thread->return_error.cmd = BR_TRANSACTION_COMPLETE; 3279 binder_enqueue_thread_work(thread, &thread->return_error.work); 3280 binder_send_failed_reply(in_reply_to, return_error); 3281 } else { 3282 thread->return_error.cmd = return_error; 3283 binder_enqueue_thread_work(thread, &thread->return_error.work); 3284 } 3285 } 3286 3287 static int binder_thread_write(struct binder_proc *proc, 3288 struct binder_thread *thread, 3289 binder_uintptr_t binder_buffer, size_t size, 3290 binder_size_t *consumed) 3291 { 3292 uint32_t cmd; 3293 struct binder_context *context = proc->context; 3294 void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 3295 void __user *ptr = buffer + *consumed; 3296 void __user *end = buffer + size; 3297 3298 while (ptr < end && thread->return_error.cmd == BR_OK) { 3299 int ret; 3300 3301 if (get_user(cmd, (uint32_t __user *)ptr)) 3302 return -EFAULT; 3303 ptr += sizeof(uint32_t); 3304 trace_binder_command(cmd); 3305 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { 3306 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]); 3307 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]); 3308 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]); 3309 } 3310 switch (cmd) { 3311 case BC_INCREFS: 3312 case BC_ACQUIRE: 3313 case BC_RELEASE: 3314 case BC_DECREFS: { 3315 uint32_t target; 3316 const char *debug_string; 3317 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE; 3318 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE; 3319 struct binder_ref_data rdata; 3320 3321 if (get_user(target, (uint32_t __user *)ptr)) 3322 return -EFAULT; 3323 3324 ptr += sizeof(uint32_t); 3325 ret = -1; 3326 if (increment && !target) { 3327 struct binder_node *ctx_mgr_node; 3328 mutex_lock(&context->context_mgr_node_lock); 3329 ctx_mgr_node = context->binder_context_mgr_node; 3330 if (ctx_mgr_node) 3331 ret = binder_inc_ref_for_node( 3332 proc, ctx_mgr_node, 3333 strong, NULL, &rdata); 3334 mutex_unlock(&context->context_mgr_node_lock); 3335 } 3336 if (ret) 3337 ret = binder_update_ref_for_handle( 3338 proc, target, increment, strong, 3339 &rdata); 3340 if (!ret && rdata.desc != target) { 3341 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n", 3342 proc->pid, thread->pid, 3343 target, rdata.desc); 3344 } 3345 switch (cmd) { 3346 case BC_INCREFS: 3347 debug_string = "IncRefs"; 3348 break; 3349 case BC_ACQUIRE: 3350 debug_string = "Acquire"; 3351 break; 3352 case BC_RELEASE: 3353 debug_string = "Release"; 3354 break; 3355 case BC_DECREFS: 3356 default: 3357 debug_string = "DecRefs"; 3358 break; 3359 } 3360 if (ret) { 3361 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n", 3362 proc->pid, thread->pid, debug_string, 3363 strong, target, ret); 3364 break; 3365 } 3366 binder_debug(BINDER_DEBUG_USER_REFS, 3367 "%d:%d %s ref %d desc %d s %d w %d\n", 3368 proc->pid, thread->pid, debug_string, 3369 rdata.debug_id, rdata.desc, rdata.strong, 3370 rdata.weak); 3371 break; 3372 } 3373 case BC_INCREFS_DONE: 3374 case BC_ACQUIRE_DONE: { 3375 binder_uintptr_t node_ptr; 3376 binder_uintptr_t cookie; 3377 struct binder_node *node; 3378 bool free_node; 3379 3380 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr)) 3381 return -EFAULT; 3382 ptr += sizeof(binder_uintptr_t); 3383 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 3384 return -EFAULT; 3385 ptr += sizeof(binder_uintptr_t); 3386 node = binder_get_node(proc, node_ptr); 3387 if (node == NULL) { 3388 binder_user_error("%d:%d %s u%016llx no match\n", 3389 proc->pid, thread->pid, 3390 cmd == BC_INCREFS_DONE ? 3391 "BC_INCREFS_DONE" : 3392 "BC_ACQUIRE_DONE", 3393 (u64)node_ptr); 3394 break; 3395 } 3396 if (cookie != node->cookie) { 3397 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n", 3398 proc->pid, thread->pid, 3399 cmd == BC_INCREFS_DONE ? 3400 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 3401 (u64)node_ptr, node->debug_id, 3402 (u64)cookie, (u64)node->cookie); 3403 binder_put_node(node); 3404 break; 3405 } 3406 binder_node_inner_lock(node); 3407 if (cmd == BC_ACQUIRE_DONE) { 3408 if (node->pending_strong_ref == 0) { 3409 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n", 3410 proc->pid, thread->pid, 3411 node->debug_id); 3412 binder_node_inner_unlock(node); 3413 binder_put_node(node); 3414 break; 3415 } 3416 node->pending_strong_ref = 0; 3417 } else { 3418 if (node->pending_weak_ref == 0) { 3419 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n", 3420 proc->pid, thread->pid, 3421 node->debug_id); 3422 binder_node_inner_unlock(node); 3423 binder_put_node(node); 3424 break; 3425 } 3426 node->pending_weak_ref = 0; 3427 } 3428 free_node = binder_dec_node_nilocked(node, 3429 cmd == BC_ACQUIRE_DONE, 0); 3430 WARN_ON(free_node); 3431 binder_debug(BINDER_DEBUG_USER_REFS, 3432 "%d:%d %s node %d ls %d lw %d tr %d\n", 3433 proc->pid, thread->pid, 3434 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 3435 node->debug_id, node->local_strong_refs, 3436 node->local_weak_refs, node->tmp_refs); 3437 binder_node_inner_unlock(node); 3438 binder_put_node(node); 3439 break; 3440 } 3441 case BC_ATTEMPT_ACQUIRE: 3442 pr_err("BC_ATTEMPT_ACQUIRE not supported\n"); 3443 return -EINVAL; 3444 case BC_ACQUIRE_RESULT: 3445 pr_err("BC_ACQUIRE_RESULT not supported\n"); 3446 return -EINVAL; 3447 3448 case BC_FREE_BUFFER: { 3449 binder_uintptr_t data_ptr; 3450 struct binder_buffer *buffer; 3451 3452 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr)) 3453 return -EFAULT; 3454 ptr += sizeof(binder_uintptr_t); 3455 3456 buffer = binder_alloc_prepare_to_free(&proc->alloc, 3457 data_ptr); 3458 if (buffer == NULL) { 3459 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n", 3460 proc->pid, thread->pid, (u64)data_ptr); 3461 break; 3462 } 3463 if (!buffer->allow_user_free) { 3464 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n", 3465 proc->pid, thread->pid, (u64)data_ptr); 3466 break; 3467 } 3468 binder_debug(BINDER_DEBUG_FREE_BUFFER, 3469 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n", 3470 proc->pid, thread->pid, (u64)data_ptr, 3471 buffer->debug_id, 3472 buffer->transaction ? "active" : "finished"); 3473 3474 if (buffer->transaction) { 3475 buffer->transaction->buffer = NULL; 3476 buffer->transaction = NULL; 3477 } 3478 if (buffer->async_transaction && buffer->target_node) { 3479 struct binder_node *buf_node; 3480 struct binder_work *w; 3481 3482 buf_node = buffer->target_node; 3483 binder_node_inner_lock(buf_node); 3484 BUG_ON(!buf_node->has_async_transaction); 3485 BUG_ON(buf_node->proc != proc); 3486 w = binder_dequeue_work_head_ilocked( 3487 &buf_node->async_todo); 3488 if (!w) { 3489 buf_node->has_async_transaction = 0; 3490 } else { 3491 binder_enqueue_work_ilocked( 3492 w, &proc->todo); 3493 binder_wakeup_proc_ilocked(proc); 3494 } 3495 binder_node_inner_unlock(buf_node); 3496 } 3497 trace_binder_transaction_buffer_release(buffer); 3498 binder_transaction_buffer_release(proc, buffer, NULL); 3499 binder_alloc_free_buf(&proc->alloc, buffer); 3500 break; 3501 } 3502 3503 case BC_TRANSACTION_SG: 3504 case BC_REPLY_SG: { 3505 struct binder_transaction_data_sg tr; 3506 3507 if (copy_from_user(&tr, ptr, sizeof(tr))) 3508 return -EFAULT; 3509 ptr += sizeof(tr); 3510 binder_transaction(proc, thread, &tr.transaction_data, 3511 cmd == BC_REPLY_SG, tr.buffers_size); 3512 break; 3513 } 3514 case BC_TRANSACTION: 3515 case BC_REPLY: { 3516 struct binder_transaction_data tr; 3517 3518 if (copy_from_user(&tr, ptr, sizeof(tr))) 3519 return -EFAULT; 3520 ptr += sizeof(tr); 3521 binder_transaction(proc, thread, &tr, 3522 cmd == BC_REPLY, 0); 3523 break; 3524 } 3525 3526 case BC_REGISTER_LOOPER: 3527 binder_debug(BINDER_DEBUG_THREADS, 3528 "%d:%d BC_REGISTER_LOOPER\n", 3529 proc->pid, thread->pid); 3530 binder_inner_proc_lock(proc); 3531 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { 3532 thread->looper |= BINDER_LOOPER_STATE_INVALID; 3533 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n", 3534 proc->pid, thread->pid); 3535 } else if (proc->requested_threads == 0) { 3536 thread->looper |= BINDER_LOOPER_STATE_INVALID; 3537 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n", 3538 proc->pid, thread->pid); 3539 } else { 3540 proc->requested_threads--; 3541 proc->requested_threads_started++; 3542 } 3543 thread->looper |= BINDER_LOOPER_STATE_REGISTERED; 3544 binder_inner_proc_unlock(proc); 3545 break; 3546 case BC_ENTER_LOOPER: 3547 binder_debug(BINDER_DEBUG_THREADS, 3548 "%d:%d BC_ENTER_LOOPER\n", 3549 proc->pid, thread->pid); 3550 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { 3551 thread->looper |= BINDER_LOOPER_STATE_INVALID; 3552 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n", 3553 proc->pid, thread->pid); 3554 } 3555 thread->looper |= BINDER_LOOPER_STATE_ENTERED; 3556 break; 3557 case BC_EXIT_LOOPER: 3558 binder_debug(BINDER_DEBUG_THREADS, 3559 "%d:%d BC_EXIT_LOOPER\n", 3560 proc->pid, thread->pid); 3561 thread->looper |= BINDER_LOOPER_STATE_EXITED; 3562 break; 3563 3564 case BC_REQUEST_DEATH_NOTIFICATION: 3565 case BC_CLEAR_DEATH_NOTIFICATION: { 3566 uint32_t target; 3567 binder_uintptr_t cookie; 3568 struct binder_ref *ref; 3569 struct binder_ref_death *death = NULL; 3570 3571 if (get_user(target, (uint32_t __user *)ptr)) 3572 return -EFAULT; 3573 ptr += sizeof(uint32_t); 3574 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 3575 return -EFAULT; 3576 ptr += sizeof(binder_uintptr_t); 3577 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { 3578 /* 3579 * Allocate memory for death notification 3580 * before taking lock 3581 */ 3582 death = kzalloc(sizeof(*death), GFP_KERNEL); 3583 if (death == NULL) { 3584 WARN_ON(thread->return_error.cmd != 3585 BR_OK); 3586 thread->return_error.cmd = BR_ERROR; 3587 binder_enqueue_thread_work( 3588 thread, 3589 &thread->return_error.work); 3590 binder_debug( 3591 BINDER_DEBUG_FAILED_TRANSACTION, 3592 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n", 3593 proc->pid, thread->pid); 3594 break; 3595 } 3596 } 3597 binder_proc_lock(proc); 3598 ref = binder_get_ref_olocked(proc, target, false); 3599 if (ref == NULL) { 3600 binder_user_error("%d:%d %s invalid ref %d\n", 3601 proc->pid, thread->pid, 3602 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 3603 "BC_REQUEST_DEATH_NOTIFICATION" : 3604 "BC_CLEAR_DEATH_NOTIFICATION", 3605 target); 3606 binder_proc_unlock(proc); 3607 kfree(death); 3608 break; 3609 } 3610 3611 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 3612 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n", 3613 proc->pid, thread->pid, 3614 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 3615 "BC_REQUEST_DEATH_NOTIFICATION" : 3616 "BC_CLEAR_DEATH_NOTIFICATION", 3617 (u64)cookie, ref->data.debug_id, 3618 ref->data.desc, ref->data.strong, 3619 ref->data.weak, ref->node->debug_id); 3620 3621 binder_node_lock(ref->node); 3622 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { 3623 if (ref->death) { 3624 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n", 3625 proc->pid, thread->pid); 3626 binder_node_unlock(ref->node); 3627 binder_proc_unlock(proc); 3628 kfree(death); 3629 break; 3630 } 3631 binder_stats_created(BINDER_STAT_DEATH); 3632 INIT_LIST_HEAD(&death->work.entry); 3633 death->cookie = cookie; 3634 ref->death = death; 3635 if (ref->node->proc == NULL) { 3636 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 3637 3638 binder_inner_proc_lock(proc); 3639 binder_enqueue_work_ilocked( 3640 &ref->death->work, &proc->todo); 3641 binder_wakeup_proc_ilocked(proc); 3642 binder_inner_proc_unlock(proc); 3643 } 3644 } else { 3645 if (ref->death == NULL) { 3646 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n", 3647 proc->pid, thread->pid); 3648 binder_node_unlock(ref->node); 3649 binder_proc_unlock(proc); 3650 break; 3651 } 3652 death = ref->death; 3653 if (death->cookie != cookie) { 3654 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n", 3655 proc->pid, thread->pid, 3656 (u64)death->cookie, 3657 (u64)cookie); 3658 binder_node_unlock(ref->node); 3659 binder_proc_unlock(proc); 3660 break; 3661 } 3662 ref->death = NULL; 3663 binder_inner_proc_lock(proc); 3664 if (list_empty(&death->work.entry)) { 3665 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 3666 if (thread->looper & 3667 (BINDER_LOOPER_STATE_REGISTERED | 3668 BINDER_LOOPER_STATE_ENTERED)) 3669 binder_enqueue_thread_work_ilocked( 3670 thread, 3671 &death->work); 3672 else { 3673 binder_enqueue_work_ilocked( 3674 &death->work, 3675 &proc->todo); 3676 binder_wakeup_proc_ilocked( 3677 proc); 3678 } 3679 } else { 3680 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); 3681 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; 3682 } 3683 binder_inner_proc_unlock(proc); 3684 } 3685 binder_node_unlock(ref->node); 3686 binder_proc_unlock(proc); 3687 } break; 3688 case BC_DEAD_BINDER_DONE: { 3689 struct binder_work *w; 3690 binder_uintptr_t cookie; 3691 struct binder_ref_death *death = NULL; 3692 3693 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 3694 return -EFAULT; 3695 3696 ptr += sizeof(cookie); 3697 binder_inner_proc_lock(proc); 3698 list_for_each_entry(w, &proc->delivered_death, 3699 entry) { 3700 struct binder_ref_death *tmp_death = 3701 container_of(w, 3702 struct binder_ref_death, 3703 work); 3704 3705 if (tmp_death->cookie == cookie) { 3706 death = tmp_death; 3707 break; 3708 } 3709 } 3710 binder_debug(BINDER_DEBUG_DEAD_BINDER, 3711 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n", 3712 proc->pid, thread->pid, (u64)cookie, 3713 death); 3714 if (death == NULL) { 3715 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n", 3716 proc->pid, thread->pid, (u64)cookie); 3717 binder_inner_proc_unlock(proc); 3718 break; 3719 } 3720 binder_dequeue_work_ilocked(&death->work); 3721 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) { 3722 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 3723 if (thread->looper & 3724 (BINDER_LOOPER_STATE_REGISTERED | 3725 BINDER_LOOPER_STATE_ENTERED)) 3726 binder_enqueue_thread_work_ilocked( 3727 thread, &death->work); 3728 else { 3729 binder_enqueue_work_ilocked( 3730 &death->work, 3731 &proc->todo); 3732 binder_wakeup_proc_ilocked(proc); 3733 } 3734 } 3735 binder_inner_proc_unlock(proc); 3736 } break; 3737 3738 default: 3739 pr_err("%d:%d unknown command %d\n", 3740 proc->pid, thread->pid, cmd); 3741 return -EINVAL; 3742 } 3743 *consumed = ptr - buffer; 3744 } 3745 return 0; 3746 } 3747 3748 static void binder_stat_br(struct binder_proc *proc, 3749 struct binder_thread *thread, uint32_t cmd) 3750 { 3751 trace_binder_return(cmd); 3752 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { 3753 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]); 3754 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]); 3755 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]); 3756 } 3757 } 3758 3759 static int binder_put_node_cmd(struct binder_proc *proc, 3760 struct binder_thread *thread, 3761 void __user **ptrp, 3762 binder_uintptr_t node_ptr, 3763 binder_uintptr_t node_cookie, 3764 int node_debug_id, 3765 uint32_t cmd, const char *cmd_name) 3766 { 3767 void __user *ptr = *ptrp; 3768 3769 if (put_user(cmd, (uint32_t __user *)ptr)) 3770 return -EFAULT; 3771 ptr += sizeof(uint32_t); 3772 3773 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr)) 3774 return -EFAULT; 3775 ptr += sizeof(binder_uintptr_t); 3776 3777 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr)) 3778 return -EFAULT; 3779 ptr += sizeof(binder_uintptr_t); 3780 3781 binder_stat_br(proc, thread, cmd); 3782 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n", 3783 proc->pid, thread->pid, cmd_name, node_debug_id, 3784 (u64)node_ptr, (u64)node_cookie); 3785 3786 *ptrp = ptr; 3787 return 0; 3788 } 3789 3790 static int binder_wait_for_work(struct binder_thread *thread, 3791 bool do_proc_work) 3792 { 3793 DEFINE_WAIT(wait); 3794 struct binder_proc *proc = thread->proc; 3795 int ret = 0; 3796 3797 freezer_do_not_count(); 3798 binder_inner_proc_lock(proc); 3799 for (;;) { 3800 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE); 3801 if (binder_has_work_ilocked(thread, do_proc_work)) 3802 break; 3803 if (do_proc_work) 3804 list_add(&thread->waiting_thread_node, 3805 &proc->waiting_threads); 3806 binder_inner_proc_unlock(proc); 3807 schedule(); 3808 binder_inner_proc_lock(proc); 3809 list_del_init(&thread->waiting_thread_node); 3810 if (signal_pending(current)) { 3811 ret = -ERESTARTSYS; 3812 break; 3813 } 3814 } 3815 finish_wait(&thread->wait, &wait); 3816 binder_inner_proc_unlock(proc); 3817 freezer_count(); 3818 3819 return ret; 3820 } 3821 3822 static int binder_thread_read(struct binder_proc *proc, 3823 struct binder_thread *thread, 3824 binder_uintptr_t binder_buffer, size_t size, 3825 binder_size_t *consumed, int non_block) 3826 { 3827 void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 3828 void __user *ptr = buffer + *consumed; 3829 void __user *end = buffer + size; 3830 3831 int ret = 0; 3832 int wait_for_proc_work; 3833 3834 if (*consumed == 0) { 3835 if (put_user(BR_NOOP, (uint32_t __user *)ptr)) 3836 return -EFAULT; 3837 ptr += sizeof(uint32_t); 3838 } 3839 3840 retry: 3841 binder_inner_proc_lock(proc); 3842 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread); 3843 binder_inner_proc_unlock(proc); 3844 3845 thread->looper |= BINDER_LOOPER_STATE_WAITING; 3846 3847 trace_binder_wait_for_work(wait_for_proc_work, 3848 !!thread->transaction_stack, 3849 !binder_worklist_empty(proc, &thread->todo)); 3850 if (wait_for_proc_work) { 3851 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 3852 BINDER_LOOPER_STATE_ENTERED))) { 3853 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n", 3854 proc->pid, thread->pid, thread->looper); 3855 wait_event_interruptible(binder_user_error_wait, 3856 binder_stop_on_user_error < 2); 3857 } 3858 binder_set_nice(proc->default_priority); 3859 } 3860 3861 if (non_block) { 3862 if (!binder_has_work(thread, wait_for_proc_work)) 3863 ret = -EAGAIN; 3864 } else { 3865 ret = binder_wait_for_work(thread, wait_for_proc_work); 3866 } 3867 3868 thread->looper &= ~BINDER_LOOPER_STATE_WAITING; 3869 3870 if (ret) 3871 return ret; 3872 3873 while (1) { 3874 uint32_t cmd; 3875 struct binder_transaction_data tr; 3876 struct binder_work *w = NULL; 3877 struct list_head *list = NULL; 3878 struct binder_transaction *t = NULL; 3879 struct binder_thread *t_from; 3880 3881 binder_inner_proc_lock(proc); 3882 if (!binder_worklist_empty_ilocked(&thread->todo)) 3883 list = &thread->todo; 3884 else if (!binder_worklist_empty_ilocked(&proc->todo) && 3885 wait_for_proc_work) 3886 list = &proc->todo; 3887 else { 3888 binder_inner_proc_unlock(proc); 3889 3890 /* no data added */ 3891 if (ptr - buffer == 4 && !thread->looper_need_return) 3892 goto retry; 3893 break; 3894 } 3895 3896 if (end - ptr < sizeof(tr) + 4) { 3897 binder_inner_proc_unlock(proc); 3898 break; 3899 } 3900 w = binder_dequeue_work_head_ilocked(list); 3901 if (binder_worklist_empty_ilocked(&thread->todo)) 3902 thread->process_todo = false; 3903 3904 switch (w->type) { 3905 case BINDER_WORK_TRANSACTION: { 3906 binder_inner_proc_unlock(proc); 3907 t = container_of(w, struct binder_transaction, work); 3908 } break; 3909 case BINDER_WORK_RETURN_ERROR: { 3910 struct binder_error *e = container_of( 3911 w, struct binder_error, work); 3912 3913 WARN_ON(e->cmd == BR_OK); 3914 binder_inner_proc_unlock(proc); 3915 if (put_user(e->cmd, (uint32_t __user *)ptr)) 3916 return -EFAULT; 3917 e->cmd = BR_OK; 3918 ptr += sizeof(uint32_t); 3919 3920 binder_stat_br(proc, thread, e->cmd); 3921 } break; 3922 case BINDER_WORK_TRANSACTION_COMPLETE: { 3923 binder_inner_proc_unlock(proc); 3924 cmd = BR_TRANSACTION_COMPLETE; 3925 if (put_user(cmd, (uint32_t __user *)ptr)) 3926 return -EFAULT; 3927 ptr += sizeof(uint32_t); 3928 3929 binder_stat_br(proc, thread, cmd); 3930 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, 3931 "%d:%d BR_TRANSACTION_COMPLETE\n", 3932 proc->pid, thread->pid); 3933 kfree(w); 3934 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 3935 } break; 3936 case BINDER_WORK_NODE: { 3937 struct binder_node *node = container_of(w, struct binder_node, work); 3938 int strong, weak; 3939 binder_uintptr_t node_ptr = node->ptr; 3940 binder_uintptr_t node_cookie = node->cookie; 3941 int node_debug_id = node->debug_id; 3942 int has_weak_ref; 3943 int has_strong_ref; 3944 void __user *orig_ptr = ptr; 3945 3946 BUG_ON(proc != node->proc); 3947 strong = node->internal_strong_refs || 3948 node->local_strong_refs; 3949 weak = !hlist_empty(&node->refs) || 3950 node->local_weak_refs || 3951 node->tmp_refs || strong; 3952 has_strong_ref = node->has_strong_ref; 3953 has_weak_ref = node->has_weak_ref; 3954 3955 if (weak && !has_weak_ref) { 3956 node->has_weak_ref = 1; 3957 node->pending_weak_ref = 1; 3958 node->local_weak_refs++; 3959 } 3960 if (strong && !has_strong_ref) { 3961 node->has_strong_ref = 1; 3962 node->pending_strong_ref = 1; 3963 node->local_strong_refs++; 3964 } 3965 if (!strong && has_strong_ref) 3966 node->has_strong_ref = 0; 3967 if (!weak && has_weak_ref) 3968 node->has_weak_ref = 0; 3969 if (!weak && !strong) { 3970 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 3971 "%d:%d node %d u%016llx c%016llx deleted\n", 3972 proc->pid, thread->pid, 3973 node_debug_id, 3974 (u64)node_ptr, 3975 (u64)node_cookie); 3976 rb_erase(&node->rb_node, &proc->nodes); 3977 binder_inner_proc_unlock(proc); 3978 binder_node_lock(node); 3979 /* 3980 * Acquire the node lock before freeing the 3981 * node to serialize with other threads that 3982 * may have been holding the node lock while 3983 * decrementing this node (avoids race where 3984 * this thread frees while the other thread 3985 * is unlocking the node after the final 3986 * decrement) 3987 */ 3988 binder_node_unlock(node); 3989 binder_free_node(node); 3990 } else 3991 binder_inner_proc_unlock(proc); 3992 3993 if (weak && !has_weak_ref) 3994 ret = binder_put_node_cmd( 3995 proc, thread, &ptr, node_ptr, 3996 node_cookie, node_debug_id, 3997 BR_INCREFS, "BR_INCREFS"); 3998 if (!ret && strong && !has_strong_ref) 3999 ret = binder_put_node_cmd( 4000 proc, thread, &ptr, node_ptr, 4001 node_cookie, node_debug_id, 4002 BR_ACQUIRE, "BR_ACQUIRE"); 4003 if (!ret && !strong && has_strong_ref) 4004 ret = binder_put_node_cmd( 4005 proc, thread, &ptr, node_ptr, 4006 node_cookie, node_debug_id, 4007 BR_RELEASE, "BR_RELEASE"); 4008 if (!ret && !weak && has_weak_ref) 4009 ret = binder_put_node_cmd( 4010 proc, thread, &ptr, node_ptr, 4011 node_cookie, node_debug_id, 4012 BR_DECREFS, "BR_DECREFS"); 4013 if (orig_ptr == ptr) 4014 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 4015 "%d:%d node %d u%016llx c%016llx state unchanged\n", 4016 proc->pid, thread->pid, 4017 node_debug_id, 4018 (u64)node_ptr, 4019 (u64)node_cookie); 4020 if (ret) 4021 return ret; 4022 } break; 4023 case BINDER_WORK_DEAD_BINDER: 4024 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 4025 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 4026 struct binder_ref_death *death; 4027 uint32_t cmd; 4028 binder_uintptr_t cookie; 4029 4030 death = container_of(w, struct binder_ref_death, work); 4031 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) 4032 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; 4033 else 4034 cmd = BR_DEAD_BINDER; 4035 cookie = death->cookie; 4036 4037 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 4038 "%d:%d %s %016llx\n", 4039 proc->pid, thread->pid, 4040 cmd == BR_DEAD_BINDER ? 4041 "BR_DEAD_BINDER" : 4042 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 4043 (u64)cookie); 4044 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { 4045 binder_inner_proc_unlock(proc); 4046 kfree(death); 4047 binder_stats_deleted(BINDER_STAT_DEATH); 4048 } else { 4049 binder_enqueue_work_ilocked( 4050 w, &proc->delivered_death); 4051 binder_inner_proc_unlock(proc); 4052 } 4053 if (put_user(cmd, (uint32_t __user *)ptr)) 4054 return -EFAULT; 4055 ptr += sizeof(uint32_t); 4056 if (put_user(cookie, 4057 (binder_uintptr_t __user *)ptr)) 4058 return -EFAULT; 4059 ptr += sizeof(binder_uintptr_t); 4060 binder_stat_br(proc, thread, cmd); 4061 if (cmd == BR_DEAD_BINDER) 4062 goto done; /* DEAD_BINDER notifications can cause transactions */ 4063 } break; 4064 } 4065 4066 if (!t) 4067 continue; 4068 4069 BUG_ON(t->buffer == NULL); 4070 if (t->buffer->target_node) { 4071 struct binder_node *target_node = t->buffer->target_node; 4072 4073 tr.target.ptr = target_node->ptr; 4074 tr.cookie = target_node->cookie; 4075 t->saved_priority = task_nice(current); 4076 if (t->priority < target_node->min_priority && 4077 !(t->flags & TF_ONE_WAY)) 4078 binder_set_nice(t->priority); 4079 else if (!(t->flags & TF_ONE_WAY) || 4080 t->saved_priority > target_node->min_priority) 4081 binder_set_nice(target_node->min_priority); 4082 cmd = BR_TRANSACTION; 4083 } else { 4084 tr.target.ptr = 0; 4085 tr.cookie = 0; 4086 cmd = BR_REPLY; 4087 } 4088 tr.code = t->code; 4089 tr.flags = t->flags; 4090 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid); 4091 4092 t_from = binder_get_txn_from(t); 4093 if (t_from) { 4094 struct task_struct *sender = t_from->proc->tsk; 4095 4096 tr.sender_pid = task_tgid_nr_ns(sender, 4097 task_active_pid_ns(current)); 4098 } else { 4099 tr.sender_pid = 0; 4100 } 4101 4102 tr.data_size = t->buffer->data_size; 4103 tr.offsets_size = t->buffer->offsets_size; 4104 tr.data.ptr.buffer = (binder_uintptr_t) 4105 ((uintptr_t)t->buffer->data + 4106 binder_alloc_get_user_buffer_offset(&proc->alloc)); 4107 tr.data.ptr.offsets = tr.data.ptr.buffer + 4108 ALIGN(t->buffer->data_size, 4109 sizeof(void *)); 4110 4111 if (put_user(cmd, (uint32_t __user *)ptr)) { 4112 if (t_from) 4113 binder_thread_dec_tmpref(t_from); 4114 4115 binder_cleanup_transaction(t, "put_user failed", 4116 BR_FAILED_REPLY); 4117 4118 return -EFAULT; 4119 } 4120 ptr += sizeof(uint32_t); 4121 if (copy_to_user(ptr, &tr, sizeof(tr))) { 4122 if (t_from) 4123 binder_thread_dec_tmpref(t_from); 4124 4125 binder_cleanup_transaction(t, "copy_to_user failed", 4126 BR_FAILED_REPLY); 4127 4128 return -EFAULT; 4129 } 4130 ptr += sizeof(tr); 4131 4132 trace_binder_transaction_received(t); 4133 binder_stat_br(proc, thread, cmd); 4134 binder_debug(BINDER_DEBUG_TRANSACTION, 4135 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n", 4136 proc->pid, thread->pid, 4137 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : 4138 "BR_REPLY", 4139 t->debug_id, t_from ? t_from->proc->pid : 0, 4140 t_from ? t_from->pid : 0, cmd, 4141 t->buffer->data_size, t->buffer->offsets_size, 4142 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets); 4143 4144 if (t_from) 4145 binder_thread_dec_tmpref(t_from); 4146 t->buffer->allow_user_free = 1; 4147 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) { 4148 binder_inner_proc_lock(thread->proc); 4149 t->to_parent = thread->transaction_stack; 4150 t->to_thread = thread; 4151 thread->transaction_stack = t; 4152 binder_inner_proc_unlock(thread->proc); 4153 } else { 4154 binder_free_transaction(t); 4155 } 4156 break; 4157 } 4158 4159 done: 4160 4161 *consumed = ptr - buffer; 4162 binder_inner_proc_lock(proc); 4163 if (proc->requested_threads == 0 && 4164 list_empty(&thread->proc->waiting_threads) && 4165 proc->requested_threads_started < proc->max_threads && 4166 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 4167 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ 4168 /*spawn a new thread if we leave this out */) { 4169 proc->requested_threads++; 4170 binder_inner_proc_unlock(proc); 4171 binder_debug(BINDER_DEBUG_THREADS, 4172 "%d:%d BR_SPAWN_LOOPER\n", 4173 proc->pid, thread->pid); 4174 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) 4175 return -EFAULT; 4176 binder_stat_br(proc, thread, BR_SPAWN_LOOPER); 4177 } else 4178 binder_inner_proc_unlock(proc); 4179 return 0; 4180 } 4181 4182 static void binder_release_work(struct binder_proc *proc, 4183 struct list_head *list) 4184 { 4185 struct binder_work *w; 4186 4187 while (1) { 4188 w = binder_dequeue_work_head(proc, list); 4189 if (!w) 4190 return; 4191 4192 switch (w->type) { 4193 case BINDER_WORK_TRANSACTION: { 4194 struct binder_transaction *t; 4195 4196 t = container_of(w, struct binder_transaction, work); 4197 4198 binder_cleanup_transaction(t, "process died.", 4199 BR_DEAD_REPLY); 4200 } break; 4201 case BINDER_WORK_RETURN_ERROR: { 4202 struct binder_error *e = container_of( 4203 w, struct binder_error, work); 4204 4205 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4206 "undelivered TRANSACTION_ERROR: %u\n", 4207 e->cmd); 4208 } break; 4209 case BINDER_WORK_TRANSACTION_COMPLETE: { 4210 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4211 "undelivered TRANSACTION_COMPLETE\n"); 4212 kfree(w); 4213 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 4214 } break; 4215 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 4216 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 4217 struct binder_ref_death *death; 4218 4219 death = container_of(w, struct binder_ref_death, work); 4220 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4221 "undelivered death notification, %016llx\n", 4222 (u64)death->cookie); 4223 kfree(death); 4224 binder_stats_deleted(BINDER_STAT_DEATH); 4225 } break; 4226 default: 4227 pr_err("unexpected work type, %d, not freed\n", 4228 w->type); 4229 break; 4230 } 4231 } 4232 4233 } 4234 4235 static struct binder_thread *binder_get_thread_ilocked( 4236 struct binder_proc *proc, struct binder_thread *new_thread) 4237 { 4238 struct binder_thread *thread = NULL; 4239 struct rb_node *parent = NULL; 4240 struct rb_node **p = &proc->threads.rb_node; 4241 4242 while (*p) { 4243 parent = *p; 4244 thread = rb_entry(parent, struct binder_thread, rb_node); 4245 4246 if (current->pid < thread->pid) 4247 p = &(*p)->rb_left; 4248 else if (current->pid > thread->pid) 4249 p = &(*p)->rb_right; 4250 else 4251 return thread; 4252 } 4253 if (!new_thread) 4254 return NULL; 4255 thread = new_thread; 4256 binder_stats_created(BINDER_STAT_THREAD); 4257 thread->proc = proc; 4258 thread->pid = current->pid; 4259 atomic_set(&thread->tmp_ref, 0); 4260 init_waitqueue_head(&thread->wait); 4261 INIT_LIST_HEAD(&thread->todo); 4262 rb_link_node(&thread->rb_node, parent, p); 4263 rb_insert_color(&thread->rb_node, &proc->threads); 4264 thread->looper_need_return = true; 4265 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR; 4266 thread->return_error.cmd = BR_OK; 4267 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR; 4268 thread->reply_error.cmd = BR_OK; 4269 INIT_LIST_HEAD(&new_thread->waiting_thread_node); 4270 return thread; 4271 } 4272 4273 static struct binder_thread *binder_get_thread(struct binder_proc *proc) 4274 { 4275 struct binder_thread *thread; 4276 struct binder_thread *new_thread; 4277 4278 binder_inner_proc_lock(proc); 4279 thread = binder_get_thread_ilocked(proc, NULL); 4280 binder_inner_proc_unlock(proc); 4281 if (!thread) { 4282 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL); 4283 if (new_thread == NULL) 4284 return NULL; 4285 binder_inner_proc_lock(proc); 4286 thread = binder_get_thread_ilocked(proc, new_thread); 4287 binder_inner_proc_unlock(proc); 4288 if (thread != new_thread) 4289 kfree(new_thread); 4290 } 4291 return thread; 4292 } 4293 4294 static void binder_free_proc(struct binder_proc *proc) 4295 { 4296 BUG_ON(!list_empty(&proc->todo)); 4297 BUG_ON(!list_empty(&proc->delivered_death)); 4298 binder_alloc_deferred_release(&proc->alloc); 4299 put_task_struct(proc->tsk); 4300 binder_stats_deleted(BINDER_STAT_PROC); 4301 kfree(proc); 4302 } 4303 4304 static void binder_free_thread(struct binder_thread *thread) 4305 { 4306 BUG_ON(!list_empty(&thread->todo)); 4307 binder_stats_deleted(BINDER_STAT_THREAD); 4308 binder_proc_dec_tmpref(thread->proc); 4309 kfree(thread); 4310 } 4311 4312 static int binder_thread_release(struct binder_proc *proc, 4313 struct binder_thread *thread) 4314 { 4315 struct binder_transaction *t; 4316 struct binder_transaction *send_reply = NULL; 4317 int active_transactions = 0; 4318 struct binder_transaction *last_t = NULL; 4319 4320 binder_inner_proc_lock(thread->proc); 4321 /* 4322 * take a ref on the proc so it survives 4323 * after we remove this thread from proc->threads. 4324 * The corresponding dec is when we actually 4325 * free the thread in binder_free_thread() 4326 */ 4327 proc->tmp_ref++; 4328 /* 4329 * take a ref on this thread to ensure it 4330 * survives while we are releasing it 4331 */ 4332 atomic_inc(&thread->tmp_ref); 4333 rb_erase(&thread->rb_node, &proc->threads); 4334 t = thread->transaction_stack; 4335 if (t) { 4336 spin_lock(&t->lock); 4337 if (t->to_thread == thread) 4338 send_reply = t; 4339 } 4340 thread->is_dead = true; 4341 4342 while (t) { 4343 last_t = t; 4344 active_transactions++; 4345 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4346 "release %d:%d transaction %d %s, still active\n", 4347 proc->pid, thread->pid, 4348 t->debug_id, 4349 (t->to_thread == thread) ? "in" : "out"); 4350 4351 if (t->to_thread == thread) { 4352 t->to_proc = NULL; 4353 t->to_thread = NULL; 4354 if (t->buffer) { 4355 t->buffer->transaction = NULL; 4356 t->buffer = NULL; 4357 } 4358 t = t->to_parent; 4359 } else if (t->from == thread) { 4360 t->from = NULL; 4361 t = t->from_parent; 4362 } else 4363 BUG(); 4364 spin_unlock(&last_t->lock); 4365 if (t) 4366 spin_lock(&t->lock); 4367 } 4368 binder_inner_proc_unlock(thread->proc); 4369 4370 if (send_reply) 4371 binder_send_failed_reply(send_reply, BR_DEAD_REPLY); 4372 binder_release_work(proc, &thread->todo); 4373 binder_thread_dec_tmpref(thread); 4374 return active_transactions; 4375 } 4376 4377 static unsigned int binder_poll(struct file *filp, 4378 struct poll_table_struct *wait) 4379 { 4380 struct binder_proc *proc = filp->private_data; 4381 struct binder_thread *thread = NULL; 4382 bool wait_for_proc_work; 4383 4384 thread = binder_get_thread(proc); 4385 4386 binder_inner_proc_lock(thread->proc); 4387 thread->looper |= BINDER_LOOPER_STATE_POLL; 4388 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread); 4389 4390 binder_inner_proc_unlock(thread->proc); 4391 4392 poll_wait(filp, &thread->wait, wait); 4393 4394 if (binder_has_work(thread, wait_for_proc_work)) 4395 return POLLIN; 4396 4397 return 0; 4398 } 4399 4400 static int binder_ioctl_write_read(struct file *filp, 4401 unsigned int cmd, unsigned long arg, 4402 struct binder_thread *thread) 4403 { 4404 int ret = 0; 4405 struct binder_proc *proc = filp->private_data; 4406 unsigned int size = _IOC_SIZE(cmd); 4407 void __user *ubuf = (void __user *)arg; 4408 struct binder_write_read bwr; 4409 4410 if (size != sizeof(struct binder_write_read)) { 4411 ret = -EINVAL; 4412 goto out; 4413 } 4414 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { 4415 ret = -EFAULT; 4416 goto out; 4417 } 4418 binder_debug(BINDER_DEBUG_READ_WRITE, 4419 "%d:%d write %lld at %016llx, read %lld at %016llx\n", 4420 proc->pid, thread->pid, 4421 (u64)bwr.write_size, (u64)bwr.write_buffer, 4422 (u64)bwr.read_size, (u64)bwr.read_buffer); 4423 4424 if (bwr.write_size > 0) { 4425 ret = binder_thread_write(proc, thread, 4426 bwr.write_buffer, 4427 bwr.write_size, 4428 &bwr.write_consumed); 4429 trace_binder_write_done(ret); 4430 if (ret < 0) { 4431 bwr.read_consumed = 0; 4432 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 4433 ret = -EFAULT; 4434 goto out; 4435 } 4436 } 4437 if (bwr.read_size > 0) { 4438 ret = binder_thread_read(proc, thread, bwr.read_buffer, 4439 bwr.read_size, 4440 &bwr.read_consumed, 4441 filp->f_flags & O_NONBLOCK); 4442 trace_binder_read_done(ret); 4443 binder_inner_proc_lock(proc); 4444 if (!binder_worklist_empty_ilocked(&proc->todo)) 4445 binder_wakeup_proc_ilocked(proc); 4446 binder_inner_proc_unlock(proc); 4447 if (ret < 0) { 4448 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 4449 ret = -EFAULT; 4450 goto out; 4451 } 4452 } 4453 binder_debug(BINDER_DEBUG_READ_WRITE, 4454 "%d:%d wrote %lld of %lld, read return %lld of %lld\n", 4455 proc->pid, thread->pid, 4456 (u64)bwr.write_consumed, (u64)bwr.write_size, 4457 (u64)bwr.read_consumed, (u64)bwr.read_size); 4458 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { 4459 ret = -EFAULT; 4460 goto out; 4461 } 4462 out: 4463 return ret; 4464 } 4465 4466 static int binder_ioctl_set_ctx_mgr(struct file *filp) 4467 { 4468 int ret = 0; 4469 struct binder_proc *proc = filp->private_data; 4470 struct binder_context *context = proc->context; 4471 struct binder_node *new_node; 4472 kuid_t curr_euid = current_euid(); 4473 4474 mutex_lock(&context->context_mgr_node_lock); 4475 if (context->binder_context_mgr_node) { 4476 pr_err("BINDER_SET_CONTEXT_MGR already set\n"); 4477 ret = -EBUSY; 4478 goto out; 4479 } 4480 ret = security_binder_set_context_mgr(proc->tsk); 4481 if (ret < 0) 4482 goto out; 4483 if (uid_valid(context->binder_context_mgr_uid)) { 4484 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) { 4485 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n", 4486 from_kuid(&init_user_ns, curr_euid), 4487 from_kuid(&init_user_ns, 4488 context->binder_context_mgr_uid)); 4489 ret = -EPERM; 4490 goto out; 4491 } 4492 } else { 4493 context->binder_context_mgr_uid = curr_euid; 4494 } 4495 new_node = binder_new_node(proc, NULL); 4496 if (!new_node) { 4497 ret = -ENOMEM; 4498 goto out; 4499 } 4500 binder_node_lock(new_node); 4501 new_node->local_weak_refs++; 4502 new_node->local_strong_refs++; 4503 new_node->has_strong_ref = 1; 4504 new_node->has_weak_ref = 1; 4505 context->binder_context_mgr_node = new_node; 4506 binder_node_unlock(new_node); 4507 binder_put_node(new_node); 4508 out: 4509 mutex_unlock(&context->context_mgr_node_lock); 4510 return ret; 4511 } 4512 4513 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc, 4514 struct binder_node_debug_info *info) 4515 { 4516 struct rb_node *n; 4517 binder_uintptr_t ptr = info->ptr; 4518 4519 memset(info, 0, sizeof(*info)); 4520 4521 binder_inner_proc_lock(proc); 4522 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { 4523 struct binder_node *node = rb_entry(n, struct binder_node, 4524 rb_node); 4525 if (node->ptr > ptr) { 4526 info->ptr = node->ptr; 4527 info->cookie = node->cookie; 4528 info->has_strong_ref = node->has_strong_ref; 4529 info->has_weak_ref = node->has_weak_ref; 4530 break; 4531 } 4532 } 4533 binder_inner_proc_unlock(proc); 4534 4535 return 0; 4536 } 4537 4538 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 4539 { 4540 int ret; 4541 struct binder_proc *proc = filp->private_data; 4542 struct binder_thread *thread; 4543 unsigned int size = _IOC_SIZE(cmd); 4544 void __user *ubuf = (void __user *)arg; 4545 4546 /*pr_info("binder_ioctl: %d:%d %x %lx\n", 4547 proc->pid, current->pid, cmd, arg);*/ 4548 4549 binder_selftest_alloc(&proc->alloc); 4550 4551 trace_binder_ioctl(cmd, arg); 4552 4553 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 4554 if (ret) 4555 goto err_unlocked; 4556 4557 thread = binder_get_thread(proc); 4558 if (thread == NULL) { 4559 ret = -ENOMEM; 4560 goto err; 4561 } 4562 4563 switch (cmd) { 4564 case BINDER_WRITE_READ: 4565 ret = binder_ioctl_write_read(filp, cmd, arg, thread); 4566 if (ret) 4567 goto err; 4568 break; 4569 case BINDER_SET_MAX_THREADS: { 4570 int max_threads; 4571 4572 if (copy_from_user(&max_threads, ubuf, 4573 sizeof(max_threads))) { 4574 ret = -EINVAL; 4575 goto err; 4576 } 4577 binder_inner_proc_lock(proc); 4578 proc->max_threads = max_threads; 4579 binder_inner_proc_unlock(proc); 4580 break; 4581 } 4582 case BINDER_SET_CONTEXT_MGR: 4583 ret = binder_ioctl_set_ctx_mgr(filp); 4584 if (ret) 4585 goto err; 4586 break; 4587 case BINDER_THREAD_EXIT: 4588 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n", 4589 proc->pid, thread->pid); 4590 binder_thread_release(proc, thread); 4591 thread = NULL; 4592 break; 4593 case BINDER_VERSION: { 4594 struct binder_version __user *ver = ubuf; 4595 4596 if (size != sizeof(struct binder_version)) { 4597 ret = -EINVAL; 4598 goto err; 4599 } 4600 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, 4601 &ver->protocol_version)) { 4602 ret = -EINVAL; 4603 goto err; 4604 } 4605 break; 4606 } 4607 case BINDER_GET_NODE_DEBUG_INFO: { 4608 struct binder_node_debug_info info; 4609 4610 if (copy_from_user(&info, ubuf, sizeof(info))) { 4611 ret = -EFAULT; 4612 goto err; 4613 } 4614 4615 ret = binder_ioctl_get_node_debug_info(proc, &info); 4616 if (ret < 0) 4617 goto err; 4618 4619 if (copy_to_user(ubuf, &info, sizeof(info))) { 4620 ret = -EFAULT; 4621 goto err; 4622 } 4623 break; 4624 } 4625 default: 4626 ret = -EINVAL; 4627 goto err; 4628 } 4629 ret = 0; 4630 err: 4631 if (thread) 4632 thread->looper_need_return = false; 4633 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 4634 if (ret && ret != -ERESTARTSYS) 4635 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); 4636 err_unlocked: 4637 trace_binder_ioctl_done(ret); 4638 return ret; 4639 } 4640 4641 static void binder_vma_open(struct vm_area_struct *vma) 4642 { 4643 struct binder_proc *proc = vma->vm_private_data; 4644 4645 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 4646 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 4647 proc->pid, vma->vm_start, vma->vm_end, 4648 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 4649 (unsigned long)pgprot_val(vma->vm_page_prot)); 4650 } 4651 4652 static void binder_vma_close(struct vm_area_struct *vma) 4653 { 4654 struct binder_proc *proc = vma->vm_private_data; 4655 4656 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 4657 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 4658 proc->pid, vma->vm_start, vma->vm_end, 4659 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 4660 (unsigned long)pgprot_val(vma->vm_page_prot)); 4661 binder_alloc_vma_close(&proc->alloc); 4662 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); 4663 } 4664 4665 static int binder_vm_fault(struct vm_fault *vmf) 4666 { 4667 return VM_FAULT_SIGBUS; 4668 } 4669 4670 static const struct vm_operations_struct binder_vm_ops = { 4671 .open = binder_vma_open, 4672 .close = binder_vma_close, 4673 .fault = binder_vm_fault, 4674 }; 4675 4676 static int binder_mmap(struct file *filp, struct vm_area_struct *vma) 4677 { 4678 int ret; 4679 struct binder_proc *proc = filp->private_data; 4680 const char *failure_string; 4681 4682 if (proc->tsk != current->group_leader) 4683 return -EINVAL; 4684 4685 if ((vma->vm_end - vma->vm_start) > SZ_4M) 4686 vma->vm_end = vma->vm_start + SZ_4M; 4687 4688 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 4689 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", 4690 __func__, proc->pid, vma->vm_start, vma->vm_end, 4691 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 4692 (unsigned long)pgprot_val(vma->vm_page_prot)); 4693 4694 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { 4695 ret = -EPERM; 4696 failure_string = "bad vm_flags"; 4697 goto err_bad_arg; 4698 } 4699 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; 4700 vma->vm_ops = &binder_vm_ops; 4701 vma->vm_private_data = proc; 4702 4703 ret = binder_alloc_mmap_handler(&proc->alloc, vma); 4704 if (ret) 4705 return ret; 4706 mutex_lock(&proc->files_lock); 4707 proc->files = get_files_struct(current); 4708 mutex_unlock(&proc->files_lock); 4709 return 0; 4710 4711 err_bad_arg: 4712 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n", 4713 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); 4714 return ret; 4715 } 4716 4717 static int binder_open(struct inode *nodp, struct file *filp) 4718 { 4719 struct binder_proc *proc; 4720 struct binder_device *binder_dev; 4721 4722 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n", 4723 current->group_leader->pid, current->pid); 4724 4725 proc = kzalloc(sizeof(*proc), GFP_KERNEL); 4726 if (proc == NULL) 4727 return -ENOMEM; 4728 spin_lock_init(&proc->inner_lock); 4729 spin_lock_init(&proc->outer_lock); 4730 get_task_struct(current->group_leader); 4731 proc->tsk = current->group_leader; 4732 mutex_init(&proc->files_lock); 4733 INIT_LIST_HEAD(&proc->todo); 4734 proc->default_priority = task_nice(current); 4735 binder_dev = container_of(filp->private_data, struct binder_device, 4736 miscdev); 4737 proc->context = &binder_dev->context; 4738 binder_alloc_init(&proc->alloc); 4739 4740 binder_stats_created(BINDER_STAT_PROC); 4741 proc->pid = current->group_leader->pid; 4742 INIT_LIST_HEAD(&proc->delivered_death); 4743 INIT_LIST_HEAD(&proc->waiting_threads); 4744 filp->private_data = proc; 4745 4746 mutex_lock(&binder_procs_lock); 4747 hlist_add_head(&proc->proc_node, &binder_procs); 4748 mutex_unlock(&binder_procs_lock); 4749 4750 if (binder_debugfs_dir_entry_proc) { 4751 char strbuf[11]; 4752 4753 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); 4754 /* 4755 * proc debug entries are shared between contexts, so 4756 * this will fail if the process tries to open the driver 4757 * again with a different context. The priting code will 4758 * anyway print all contexts that a given PID has, so this 4759 * is not a problem. 4760 */ 4761 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO, 4762 binder_debugfs_dir_entry_proc, 4763 (void *)(unsigned long)proc->pid, 4764 &binder_proc_fops); 4765 } 4766 4767 return 0; 4768 } 4769 4770 static int binder_flush(struct file *filp, fl_owner_t id) 4771 { 4772 struct binder_proc *proc = filp->private_data; 4773 4774 binder_defer_work(proc, BINDER_DEFERRED_FLUSH); 4775 4776 return 0; 4777 } 4778 4779 static void binder_deferred_flush(struct binder_proc *proc) 4780 { 4781 struct rb_node *n; 4782 int wake_count = 0; 4783 4784 binder_inner_proc_lock(proc); 4785 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { 4786 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); 4787 4788 thread->looper_need_return = true; 4789 if (thread->looper & BINDER_LOOPER_STATE_WAITING) { 4790 wake_up_interruptible(&thread->wait); 4791 wake_count++; 4792 } 4793 } 4794 binder_inner_proc_unlock(proc); 4795 4796 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 4797 "binder_flush: %d woke %d threads\n", proc->pid, 4798 wake_count); 4799 } 4800 4801 static int binder_release(struct inode *nodp, struct file *filp) 4802 { 4803 struct binder_proc *proc = filp->private_data; 4804 4805 debugfs_remove(proc->debugfs_entry); 4806 binder_defer_work(proc, BINDER_DEFERRED_RELEASE); 4807 4808 return 0; 4809 } 4810 4811 static int binder_node_release(struct binder_node *node, int refs) 4812 { 4813 struct binder_ref *ref; 4814 int death = 0; 4815 struct binder_proc *proc = node->proc; 4816 4817 binder_release_work(proc, &node->async_todo); 4818 4819 binder_node_lock(node); 4820 binder_inner_proc_lock(proc); 4821 binder_dequeue_work_ilocked(&node->work); 4822 /* 4823 * The caller must have taken a temporary ref on the node, 4824 */ 4825 BUG_ON(!node->tmp_refs); 4826 if (hlist_empty(&node->refs) && node->tmp_refs == 1) { 4827 binder_inner_proc_unlock(proc); 4828 binder_node_unlock(node); 4829 binder_free_node(node); 4830 4831 return refs; 4832 } 4833 4834 node->proc = NULL; 4835 node->local_strong_refs = 0; 4836 node->local_weak_refs = 0; 4837 binder_inner_proc_unlock(proc); 4838 4839 spin_lock(&binder_dead_nodes_lock); 4840 hlist_add_head(&node->dead_node, &binder_dead_nodes); 4841 spin_unlock(&binder_dead_nodes_lock); 4842 4843 hlist_for_each_entry(ref, &node->refs, node_entry) { 4844 refs++; 4845 /* 4846 * Need the node lock to synchronize 4847 * with new notification requests and the 4848 * inner lock to synchronize with queued 4849 * death notifications. 4850 */ 4851 binder_inner_proc_lock(ref->proc); 4852 if (!ref->death) { 4853 binder_inner_proc_unlock(ref->proc); 4854 continue; 4855 } 4856 4857 death++; 4858 4859 BUG_ON(!list_empty(&ref->death->work.entry)); 4860 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 4861 binder_enqueue_work_ilocked(&ref->death->work, 4862 &ref->proc->todo); 4863 binder_wakeup_proc_ilocked(ref->proc); 4864 binder_inner_proc_unlock(ref->proc); 4865 } 4866 4867 binder_debug(BINDER_DEBUG_DEAD_BINDER, 4868 "node %d now dead, refs %d, death %d\n", 4869 node->debug_id, refs, death); 4870 binder_node_unlock(node); 4871 binder_put_node(node); 4872 4873 return refs; 4874 } 4875 4876 static void binder_deferred_release(struct binder_proc *proc) 4877 { 4878 struct binder_context *context = proc->context; 4879 struct rb_node *n; 4880 int threads, nodes, incoming_refs, outgoing_refs, active_transactions; 4881 4882 BUG_ON(proc->files); 4883 4884 mutex_lock(&binder_procs_lock); 4885 hlist_del(&proc->proc_node); 4886 mutex_unlock(&binder_procs_lock); 4887 4888 mutex_lock(&context->context_mgr_node_lock); 4889 if (context->binder_context_mgr_node && 4890 context->binder_context_mgr_node->proc == proc) { 4891 binder_debug(BINDER_DEBUG_DEAD_BINDER, 4892 "%s: %d context_mgr_node gone\n", 4893 __func__, proc->pid); 4894 context->binder_context_mgr_node = NULL; 4895 } 4896 mutex_unlock(&context->context_mgr_node_lock); 4897 binder_inner_proc_lock(proc); 4898 /* 4899 * Make sure proc stays alive after we 4900 * remove all the threads 4901 */ 4902 proc->tmp_ref++; 4903 4904 proc->is_dead = true; 4905 threads = 0; 4906 active_transactions = 0; 4907 while ((n = rb_first(&proc->threads))) { 4908 struct binder_thread *thread; 4909 4910 thread = rb_entry(n, struct binder_thread, rb_node); 4911 binder_inner_proc_unlock(proc); 4912 threads++; 4913 active_transactions += binder_thread_release(proc, thread); 4914 binder_inner_proc_lock(proc); 4915 } 4916 4917 nodes = 0; 4918 incoming_refs = 0; 4919 while ((n = rb_first(&proc->nodes))) { 4920 struct binder_node *node; 4921 4922 node = rb_entry(n, struct binder_node, rb_node); 4923 nodes++; 4924 /* 4925 * take a temporary ref on the node before 4926 * calling binder_node_release() which will either 4927 * kfree() the node or call binder_put_node() 4928 */ 4929 binder_inc_node_tmpref_ilocked(node); 4930 rb_erase(&node->rb_node, &proc->nodes); 4931 binder_inner_proc_unlock(proc); 4932 incoming_refs = binder_node_release(node, incoming_refs); 4933 binder_inner_proc_lock(proc); 4934 } 4935 binder_inner_proc_unlock(proc); 4936 4937 outgoing_refs = 0; 4938 binder_proc_lock(proc); 4939 while ((n = rb_first(&proc->refs_by_desc))) { 4940 struct binder_ref *ref; 4941 4942 ref = rb_entry(n, struct binder_ref, rb_node_desc); 4943 outgoing_refs++; 4944 binder_cleanup_ref_olocked(ref); 4945 binder_proc_unlock(proc); 4946 binder_free_ref(ref); 4947 binder_proc_lock(proc); 4948 } 4949 binder_proc_unlock(proc); 4950 4951 binder_release_work(proc, &proc->todo); 4952 binder_release_work(proc, &proc->delivered_death); 4953 4954 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 4955 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n", 4956 __func__, proc->pid, threads, nodes, incoming_refs, 4957 outgoing_refs, active_transactions); 4958 4959 binder_proc_dec_tmpref(proc); 4960 } 4961 4962 static void binder_deferred_func(struct work_struct *work) 4963 { 4964 struct binder_proc *proc; 4965 struct files_struct *files; 4966 4967 int defer; 4968 4969 do { 4970 mutex_lock(&binder_deferred_lock); 4971 if (!hlist_empty(&binder_deferred_list)) { 4972 proc = hlist_entry(binder_deferred_list.first, 4973 struct binder_proc, deferred_work_node); 4974 hlist_del_init(&proc->deferred_work_node); 4975 defer = proc->deferred_work; 4976 proc->deferred_work = 0; 4977 } else { 4978 proc = NULL; 4979 defer = 0; 4980 } 4981 mutex_unlock(&binder_deferred_lock); 4982 4983 files = NULL; 4984 if (defer & BINDER_DEFERRED_PUT_FILES) { 4985 mutex_lock(&proc->files_lock); 4986 files = proc->files; 4987 if (files) 4988 proc->files = NULL; 4989 mutex_unlock(&proc->files_lock); 4990 } 4991 4992 if (defer & BINDER_DEFERRED_FLUSH) 4993 binder_deferred_flush(proc); 4994 4995 if (defer & BINDER_DEFERRED_RELEASE) 4996 binder_deferred_release(proc); /* frees proc */ 4997 4998 if (files) 4999 put_files_struct(files); 5000 } while (proc); 5001 } 5002 static DECLARE_WORK(binder_deferred_work, binder_deferred_func); 5003 5004 static void 5005 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) 5006 { 5007 mutex_lock(&binder_deferred_lock); 5008 proc->deferred_work |= defer; 5009 if (hlist_unhashed(&proc->deferred_work_node)) { 5010 hlist_add_head(&proc->deferred_work_node, 5011 &binder_deferred_list); 5012 schedule_work(&binder_deferred_work); 5013 } 5014 mutex_unlock(&binder_deferred_lock); 5015 } 5016 5017 static void print_binder_transaction_ilocked(struct seq_file *m, 5018 struct binder_proc *proc, 5019 const char *prefix, 5020 struct binder_transaction *t) 5021 { 5022 struct binder_proc *to_proc; 5023 struct binder_buffer *buffer = t->buffer; 5024 5025 spin_lock(&t->lock); 5026 to_proc = t->to_proc; 5027 seq_printf(m, 5028 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d", 5029 prefix, t->debug_id, t, 5030 t->from ? t->from->proc->pid : 0, 5031 t->from ? t->from->pid : 0, 5032 to_proc ? to_proc->pid : 0, 5033 t->to_thread ? t->to_thread->pid : 0, 5034 t->code, t->flags, t->priority, t->need_reply); 5035 spin_unlock(&t->lock); 5036 5037 if (proc != to_proc) { 5038 /* 5039 * Can only safely deref buffer if we are holding the 5040 * correct proc inner lock for this node 5041 */ 5042 seq_puts(m, "\n"); 5043 return; 5044 } 5045 5046 if (buffer == NULL) { 5047 seq_puts(m, " buffer free\n"); 5048 return; 5049 } 5050 if (buffer->target_node) 5051 seq_printf(m, " node %d", buffer->target_node->debug_id); 5052 seq_printf(m, " size %zd:%zd data %p\n", 5053 buffer->data_size, buffer->offsets_size, 5054 buffer->data); 5055 } 5056 5057 static void print_binder_work_ilocked(struct seq_file *m, 5058 struct binder_proc *proc, 5059 const char *prefix, 5060 const char *transaction_prefix, 5061 struct binder_work *w) 5062 { 5063 struct binder_node *node; 5064 struct binder_transaction *t; 5065 5066 switch (w->type) { 5067 case BINDER_WORK_TRANSACTION: 5068 t = container_of(w, struct binder_transaction, work); 5069 print_binder_transaction_ilocked( 5070 m, proc, transaction_prefix, t); 5071 break; 5072 case BINDER_WORK_RETURN_ERROR: { 5073 struct binder_error *e = container_of( 5074 w, struct binder_error, work); 5075 5076 seq_printf(m, "%stransaction error: %u\n", 5077 prefix, e->cmd); 5078 } break; 5079 case BINDER_WORK_TRANSACTION_COMPLETE: 5080 seq_printf(m, "%stransaction complete\n", prefix); 5081 break; 5082 case BINDER_WORK_NODE: 5083 node = container_of(w, struct binder_node, work); 5084 seq_printf(m, "%snode work %d: u%016llx c%016llx\n", 5085 prefix, node->debug_id, 5086 (u64)node->ptr, (u64)node->cookie); 5087 break; 5088 case BINDER_WORK_DEAD_BINDER: 5089 seq_printf(m, "%shas dead binder\n", prefix); 5090 break; 5091 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 5092 seq_printf(m, "%shas cleared dead binder\n", prefix); 5093 break; 5094 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: 5095 seq_printf(m, "%shas cleared death notification\n", prefix); 5096 break; 5097 default: 5098 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type); 5099 break; 5100 } 5101 } 5102 5103 static void print_binder_thread_ilocked(struct seq_file *m, 5104 struct binder_thread *thread, 5105 int print_always) 5106 { 5107 struct binder_transaction *t; 5108 struct binder_work *w; 5109 size_t start_pos = m->count; 5110 size_t header_pos; 5111 5112 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n", 5113 thread->pid, thread->looper, 5114 thread->looper_need_return, 5115 atomic_read(&thread->tmp_ref)); 5116 header_pos = m->count; 5117 t = thread->transaction_stack; 5118 while (t) { 5119 if (t->from == thread) { 5120 print_binder_transaction_ilocked(m, thread->proc, 5121 " outgoing transaction", t); 5122 t = t->from_parent; 5123 } else if (t->to_thread == thread) { 5124 print_binder_transaction_ilocked(m, thread->proc, 5125 " incoming transaction", t); 5126 t = t->to_parent; 5127 } else { 5128 print_binder_transaction_ilocked(m, thread->proc, 5129 " bad transaction", t); 5130 t = NULL; 5131 } 5132 } 5133 list_for_each_entry(w, &thread->todo, entry) { 5134 print_binder_work_ilocked(m, thread->proc, " ", 5135 " pending transaction", w); 5136 } 5137 if (!print_always && m->count == header_pos) 5138 m->count = start_pos; 5139 } 5140 5141 static void print_binder_node_nilocked(struct seq_file *m, 5142 struct binder_node *node) 5143 { 5144 struct binder_ref *ref; 5145 struct binder_work *w; 5146 int count; 5147 5148 count = 0; 5149 hlist_for_each_entry(ref, &node->refs, node_entry) 5150 count++; 5151 5152 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d", 5153 node->debug_id, (u64)node->ptr, (u64)node->cookie, 5154 node->has_strong_ref, node->has_weak_ref, 5155 node->local_strong_refs, node->local_weak_refs, 5156 node->internal_strong_refs, count, node->tmp_refs); 5157 if (count) { 5158 seq_puts(m, " proc"); 5159 hlist_for_each_entry(ref, &node->refs, node_entry) 5160 seq_printf(m, " %d", ref->proc->pid); 5161 } 5162 seq_puts(m, "\n"); 5163 if (node->proc) { 5164 list_for_each_entry(w, &node->async_todo, entry) 5165 print_binder_work_ilocked(m, node->proc, " ", 5166 " pending async transaction", w); 5167 } 5168 } 5169 5170 static void print_binder_ref_olocked(struct seq_file *m, 5171 struct binder_ref *ref) 5172 { 5173 binder_node_lock(ref->node); 5174 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n", 5175 ref->data.debug_id, ref->data.desc, 5176 ref->node->proc ? "" : "dead ", 5177 ref->node->debug_id, ref->data.strong, 5178 ref->data.weak, ref->death); 5179 binder_node_unlock(ref->node); 5180 } 5181 5182 static void print_binder_proc(struct seq_file *m, 5183 struct binder_proc *proc, int print_all) 5184 { 5185 struct binder_work *w; 5186 struct rb_node *n; 5187 size_t start_pos = m->count; 5188 size_t header_pos; 5189 struct binder_node *last_node = NULL; 5190 5191 seq_printf(m, "proc %d\n", proc->pid); 5192 seq_printf(m, "context %s\n", proc->context->name); 5193 header_pos = m->count; 5194 5195 binder_inner_proc_lock(proc); 5196 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 5197 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread, 5198 rb_node), print_all); 5199 5200 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { 5201 struct binder_node *node = rb_entry(n, struct binder_node, 5202 rb_node); 5203 /* 5204 * take a temporary reference on the node so it 5205 * survives and isn't removed from the tree 5206 * while we print it. 5207 */ 5208 binder_inc_node_tmpref_ilocked(node); 5209 /* Need to drop inner lock to take node lock */ 5210 binder_inner_proc_unlock(proc); 5211 if (last_node) 5212 binder_put_node(last_node); 5213 binder_node_inner_lock(node); 5214 print_binder_node_nilocked(m, node); 5215 binder_node_inner_unlock(node); 5216 last_node = node; 5217 binder_inner_proc_lock(proc); 5218 } 5219 binder_inner_proc_unlock(proc); 5220 if (last_node) 5221 binder_put_node(last_node); 5222 5223 if (print_all) { 5224 binder_proc_lock(proc); 5225 for (n = rb_first(&proc->refs_by_desc); 5226 n != NULL; 5227 n = rb_next(n)) 5228 print_binder_ref_olocked(m, rb_entry(n, 5229 struct binder_ref, 5230 rb_node_desc)); 5231 binder_proc_unlock(proc); 5232 } 5233 binder_alloc_print_allocated(m, &proc->alloc); 5234 binder_inner_proc_lock(proc); 5235 list_for_each_entry(w, &proc->todo, entry) 5236 print_binder_work_ilocked(m, proc, " ", 5237 " pending transaction", w); 5238 list_for_each_entry(w, &proc->delivered_death, entry) { 5239 seq_puts(m, " has delivered dead binder\n"); 5240 break; 5241 } 5242 binder_inner_proc_unlock(proc); 5243 if (!print_all && m->count == header_pos) 5244 m->count = start_pos; 5245 } 5246 5247 static const char * const binder_return_strings[] = { 5248 "BR_ERROR", 5249 "BR_OK", 5250 "BR_TRANSACTION", 5251 "BR_REPLY", 5252 "BR_ACQUIRE_RESULT", 5253 "BR_DEAD_REPLY", 5254 "BR_TRANSACTION_COMPLETE", 5255 "BR_INCREFS", 5256 "BR_ACQUIRE", 5257 "BR_RELEASE", 5258 "BR_DECREFS", 5259 "BR_ATTEMPT_ACQUIRE", 5260 "BR_NOOP", 5261 "BR_SPAWN_LOOPER", 5262 "BR_FINISHED", 5263 "BR_DEAD_BINDER", 5264 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 5265 "BR_FAILED_REPLY" 5266 }; 5267 5268 static const char * const binder_command_strings[] = { 5269 "BC_TRANSACTION", 5270 "BC_REPLY", 5271 "BC_ACQUIRE_RESULT", 5272 "BC_FREE_BUFFER", 5273 "BC_INCREFS", 5274 "BC_ACQUIRE", 5275 "BC_RELEASE", 5276 "BC_DECREFS", 5277 "BC_INCREFS_DONE", 5278 "BC_ACQUIRE_DONE", 5279 "BC_ATTEMPT_ACQUIRE", 5280 "BC_REGISTER_LOOPER", 5281 "BC_ENTER_LOOPER", 5282 "BC_EXIT_LOOPER", 5283 "BC_REQUEST_DEATH_NOTIFICATION", 5284 "BC_CLEAR_DEATH_NOTIFICATION", 5285 "BC_DEAD_BINDER_DONE", 5286 "BC_TRANSACTION_SG", 5287 "BC_REPLY_SG", 5288 }; 5289 5290 static const char * const binder_objstat_strings[] = { 5291 "proc", 5292 "thread", 5293 "node", 5294 "ref", 5295 "death", 5296 "transaction", 5297 "transaction_complete" 5298 }; 5299 5300 static void print_binder_stats(struct seq_file *m, const char *prefix, 5301 struct binder_stats *stats) 5302 { 5303 int i; 5304 5305 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != 5306 ARRAY_SIZE(binder_command_strings)); 5307 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) { 5308 int temp = atomic_read(&stats->bc[i]); 5309 5310 if (temp) 5311 seq_printf(m, "%s%s: %d\n", prefix, 5312 binder_command_strings[i], temp); 5313 } 5314 5315 BUILD_BUG_ON(ARRAY_SIZE(stats->br) != 5316 ARRAY_SIZE(binder_return_strings)); 5317 for (i = 0; i < ARRAY_SIZE(stats->br); i++) { 5318 int temp = atomic_read(&stats->br[i]); 5319 5320 if (temp) 5321 seq_printf(m, "%s%s: %d\n", prefix, 5322 binder_return_strings[i], temp); 5323 } 5324 5325 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 5326 ARRAY_SIZE(binder_objstat_strings)); 5327 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 5328 ARRAY_SIZE(stats->obj_deleted)); 5329 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { 5330 int created = atomic_read(&stats->obj_created[i]); 5331 int deleted = atomic_read(&stats->obj_deleted[i]); 5332 5333 if (created || deleted) 5334 seq_printf(m, "%s%s: active %d total %d\n", 5335 prefix, 5336 binder_objstat_strings[i], 5337 created - deleted, 5338 created); 5339 } 5340 } 5341 5342 static void print_binder_proc_stats(struct seq_file *m, 5343 struct binder_proc *proc) 5344 { 5345 struct binder_work *w; 5346 struct binder_thread *thread; 5347 struct rb_node *n; 5348 int count, strong, weak, ready_threads; 5349 size_t free_async_space = 5350 binder_alloc_get_free_async_space(&proc->alloc); 5351 5352 seq_printf(m, "proc %d\n", proc->pid); 5353 seq_printf(m, "context %s\n", proc->context->name); 5354 count = 0; 5355 ready_threads = 0; 5356 binder_inner_proc_lock(proc); 5357 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 5358 count++; 5359 5360 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node) 5361 ready_threads++; 5362 5363 seq_printf(m, " threads: %d\n", count); 5364 seq_printf(m, " requested threads: %d+%d/%d\n" 5365 " ready threads %d\n" 5366 " free async space %zd\n", proc->requested_threads, 5367 proc->requested_threads_started, proc->max_threads, 5368 ready_threads, 5369 free_async_space); 5370 count = 0; 5371 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) 5372 count++; 5373 binder_inner_proc_unlock(proc); 5374 seq_printf(m, " nodes: %d\n", count); 5375 count = 0; 5376 strong = 0; 5377 weak = 0; 5378 binder_proc_lock(proc); 5379 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 5380 struct binder_ref *ref = rb_entry(n, struct binder_ref, 5381 rb_node_desc); 5382 count++; 5383 strong += ref->data.strong; 5384 weak += ref->data.weak; 5385 } 5386 binder_proc_unlock(proc); 5387 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak); 5388 5389 count = binder_alloc_get_allocated_count(&proc->alloc); 5390 seq_printf(m, " buffers: %d\n", count); 5391 5392 binder_alloc_print_pages(m, &proc->alloc); 5393 5394 count = 0; 5395 binder_inner_proc_lock(proc); 5396 list_for_each_entry(w, &proc->todo, entry) { 5397 if (w->type == BINDER_WORK_TRANSACTION) 5398 count++; 5399 } 5400 binder_inner_proc_unlock(proc); 5401 seq_printf(m, " pending transactions: %d\n", count); 5402 5403 print_binder_stats(m, " ", &proc->stats); 5404 } 5405 5406 5407 static int binder_state_show(struct seq_file *m, void *unused) 5408 { 5409 struct binder_proc *proc; 5410 struct binder_node *node; 5411 struct binder_node *last_node = NULL; 5412 5413 seq_puts(m, "binder state:\n"); 5414 5415 spin_lock(&binder_dead_nodes_lock); 5416 if (!hlist_empty(&binder_dead_nodes)) 5417 seq_puts(m, "dead nodes:\n"); 5418 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) { 5419 /* 5420 * take a temporary reference on the node so it 5421 * survives and isn't removed from the list 5422 * while we print it. 5423 */ 5424 node->tmp_refs++; 5425 spin_unlock(&binder_dead_nodes_lock); 5426 if (last_node) 5427 binder_put_node(last_node); 5428 binder_node_lock(node); 5429 print_binder_node_nilocked(m, node); 5430 binder_node_unlock(node); 5431 last_node = node; 5432 spin_lock(&binder_dead_nodes_lock); 5433 } 5434 spin_unlock(&binder_dead_nodes_lock); 5435 if (last_node) 5436 binder_put_node(last_node); 5437 5438 mutex_lock(&binder_procs_lock); 5439 hlist_for_each_entry(proc, &binder_procs, proc_node) 5440 print_binder_proc(m, proc, 1); 5441 mutex_unlock(&binder_procs_lock); 5442 5443 return 0; 5444 } 5445 5446 static int binder_stats_show(struct seq_file *m, void *unused) 5447 { 5448 struct binder_proc *proc; 5449 5450 seq_puts(m, "binder stats:\n"); 5451 5452 print_binder_stats(m, "", &binder_stats); 5453 5454 mutex_lock(&binder_procs_lock); 5455 hlist_for_each_entry(proc, &binder_procs, proc_node) 5456 print_binder_proc_stats(m, proc); 5457 mutex_unlock(&binder_procs_lock); 5458 5459 return 0; 5460 } 5461 5462 static int binder_transactions_show(struct seq_file *m, void *unused) 5463 { 5464 struct binder_proc *proc; 5465 5466 seq_puts(m, "binder transactions:\n"); 5467 mutex_lock(&binder_procs_lock); 5468 hlist_for_each_entry(proc, &binder_procs, proc_node) 5469 print_binder_proc(m, proc, 0); 5470 mutex_unlock(&binder_procs_lock); 5471 5472 return 0; 5473 } 5474 5475 static int binder_proc_show(struct seq_file *m, void *unused) 5476 { 5477 struct binder_proc *itr; 5478 int pid = (unsigned long)m->private; 5479 5480 mutex_lock(&binder_procs_lock); 5481 hlist_for_each_entry(itr, &binder_procs, proc_node) { 5482 if (itr->pid == pid) { 5483 seq_puts(m, "binder proc state:\n"); 5484 print_binder_proc(m, itr, 1); 5485 } 5486 } 5487 mutex_unlock(&binder_procs_lock); 5488 5489 return 0; 5490 } 5491 5492 static void print_binder_transaction_log_entry(struct seq_file *m, 5493 struct binder_transaction_log_entry *e) 5494 { 5495 int debug_id = READ_ONCE(e->debug_id_done); 5496 /* 5497 * read barrier to guarantee debug_id_done read before 5498 * we print the log values 5499 */ 5500 smp_rmb(); 5501 seq_printf(m, 5502 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d", 5503 e->debug_id, (e->call_type == 2) ? "reply" : 5504 ((e->call_type == 1) ? "async" : "call "), e->from_proc, 5505 e->from_thread, e->to_proc, e->to_thread, e->context_name, 5506 e->to_node, e->target_handle, e->data_size, e->offsets_size, 5507 e->return_error, e->return_error_param, 5508 e->return_error_line); 5509 /* 5510 * read-barrier to guarantee read of debug_id_done after 5511 * done printing the fields of the entry 5512 */ 5513 smp_rmb(); 5514 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ? 5515 "\n" : " (incomplete)\n"); 5516 } 5517 5518 static int binder_transaction_log_show(struct seq_file *m, void *unused) 5519 { 5520 struct binder_transaction_log *log = m->private; 5521 unsigned int log_cur = atomic_read(&log->cur); 5522 unsigned int count; 5523 unsigned int cur; 5524 int i; 5525 5526 count = log_cur + 1; 5527 cur = count < ARRAY_SIZE(log->entry) && !log->full ? 5528 0 : count % ARRAY_SIZE(log->entry); 5529 if (count > ARRAY_SIZE(log->entry) || log->full) 5530 count = ARRAY_SIZE(log->entry); 5531 for (i = 0; i < count; i++) { 5532 unsigned int index = cur++ % ARRAY_SIZE(log->entry); 5533 5534 print_binder_transaction_log_entry(m, &log->entry[index]); 5535 } 5536 return 0; 5537 } 5538 5539 static const struct file_operations binder_fops = { 5540 .owner = THIS_MODULE, 5541 .poll = binder_poll, 5542 .unlocked_ioctl = binder_ioctl, 5543 .compat_ioctl = binder_ioctl, 5544 .mmap = binder_mmap, 5545 .open = binder_open, 5546 .flush = binder_flush, 5547 .release = binder_release, 5548 }; 5549 5550 BINDER_DEBUG_ENTRY(state); 5551 BINDER_DEBUG_ENTRY(stats); 5552 BINDER_DEBUG_ENTRY(transactions); 5553 BINDER_DEBUG_ENTRY(transaction_log); 5554 5555 static int __init init_binder_device(const char *name) 5556 { 5557 int ret; 5558 struct binder_device *binder_device; 5559 5560 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL); 5561 if (!binder_device) 5562 return -ENOMEM; 5563 5564 binder_device->miscdev.fops = &binder_fops; 5565 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR; 5566 binder_device->miscdev.name = name; 5567 5568 binder_device->context.binder_context_mgr_uid = INVALID_UID; 5569 binder_device->context.name = name; 5570 mutex_init(&binder_device->context.context_mgr_node_lock); 5571 5572 ret = misc_register(&binder_device->miscdev); 5573 if (ret < 0) { 5574 kfree(binder_device); 5575 return ret; 5576 } 5577 5578 hlist_add_head(&binder_device->hlist, &binder_devices); 5579 5580 return ret; 5581 } 5582 5583 static int __init binder_init(void) 5584 { 5585 int ret; 5586 char *device_name, *device_names, *device_tmp; 5587 struct binder_device *device; 5588 struct hlist_node *tmp; 5589 5590 ret = binder_alloc_shrinker_init(); 5591 if (ret) 5592 return ret; 5593 5594 atomic_set(&binder_transaction_log.cur, ~0U); 5595 atomic_set(&binder_transaction_log_failed.cur, ~0U); 5596 5597 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); 5598 if (binder_debugfs_dir_entry_root) 5599 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", 5600 binder_debugfs_dir_entry_root); 5601 5602 if (binder_debugfs_dir_entry_root) { 5603 debugfs_create_file("state", 5604 S_IRUGO, 5605 binder_debugfs_dir_entry_root, 5606 NULL, 5607 &binder_state_fops); 5608 debugfs_create_file("stats", 5609 S_IRUGO, 5610 binder_debugfs_dir_entry_root, 5611 NULL, 5612 &binder_stats_fops); 5613 debugfs_create_file("transactions", 5614 S_IRUGO, 5615 binder_debugfs_dir_entry_root, 5616 NULL, 5617 &binder_transactions_fops); 5618 debugfs_create_file("transaction_log", 5619 S_IRUGO, 5620 binder_debugfs_dir_entry_root, 5621 &binder_transaction_log, 5622 &binder_transaction_log_fops); 5623 debugfs_create_file("failed_transaction_log", 5624 S_IRUGO, 5625 binder_debugfs_dir_entry_root, 5626 &binder_transaction_log_failed, 5627 &binder_transaction_log_fops); 5628 } 5629 5630 /* 5631 * Copy the module_parameter string, because we don't want to 5632 * tokenize it in-place. 5633 */ 5634 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL); 5635 if (!device_names) { 5636 ret = -ENOMEM; 5637 goto err_alloc_device_names_failed; 5638 } 5639 strcpy(device_names, binder_devices_param); 5640 5641 device_tmp = device_names; 5642 while ((device_name = strsep(&device_tmp, ","))) { 5643 ret = init_binder_device(device_name); 5644 if (ret) 5645 goto err_init_binder_device_failed; 5646 } 5647 5648 return ret; 5649 5650 err_init_binder_device_failed: 5651 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) { 5652 misc_deregister(&device->miscdev); 5653 hlist_del(&device->hlist); 5654 kfree(device); 5655 } 5656 5657 kfree(device_names); 5658 5659 err_alloc_device_names_failed: 5660 debugfs_remove_recursive(binder_debugfs_dir_entry_root); 5661 5662 return ret; 5663 } 5664 5665 device_initcall(binder_init); 5666 5667 #define CREATE_TRACE_POINTS 5668 #include "binder_trace.h" 5669 5670 MODULE_LICENSE("GPL v2"); 5671