1 // SPDX-License-Identifier: GPL-2.0-only 2 /* binder.c 3 * 4 * Android IPC Subsystem 5 * 6 * Copyright (C) 2007-2008 Google, Inc. 7 */ 8 9 /* 10 * Locking overview 11 * 12 * There are 3 main spinlocks which must be acquired in the 13 * order shown: 14 * 15 * 1) proc->outer_lock : protects binder_ref 16 * binder_proc_lock() and binder_proc_unlock() are 17 * used to acq/rel. 18 * 2) node->lock : protects most fields of binder_node. 19 * binder_node_lock() and binder_node_unlock() are 20 * used to acq/rel 21 * 3) proc->inner_lock : protects the thread and node lists 22 * (proc->threads, proc->waiting_threads, proc->nodes) 23 * and all todo lists associated with the binder_proc 24 * (proc->todo, thread->todo, proc->delivered_death and 25 * node->async_todo), as well as thread->transaction_stack 26 * binder_inner_proc_lock() and binder_inner_proc_unlock() 27 * are used to acq/rel 28 * 29 * Any lock under procA must never be nested under any lock at the same 30 * level or below on procB. 31 * 32 * Functions that require a lock held on entry indicate which lock 33 * in the suffix of the function name: 34 * 35 * foo_olocked() : requires node->outer_lock 36 * foo_nlocked() : requires node->lock 37 * foo_ilocked() : requires proc->inner_lock 38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock 39 * foo_nilocked(): requires node->lock and proc->inner_lock 40 * ... 41 */ 42 43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 44 45 #include <linux/fdtable.h> 46 #include <linux/file.h> 47 #include <linux/freezer.h> 48 #include <linux/fs.h> 49 #include <linux/list.h> 50 #include <linux/miscdevice.h> 51 #include <linux/module.h> 52 #include <linux/mutex.h> 53 #include <linux/nsproxy.h> 54 #include <linux/poll.h> 55 #include <linux/debugfs.h> 56 #include <linux/rbtree.h> 57 #include <linux/sched/signal.h> 58 #include <linux/sched/mm.h> 59 #include <linux/seq_file.h> 60 #include <linux/string.h> 61 #include <linux/uaccess.h> 62 #include <linux/pid_namespace.h> 63 #include <linux/security.h> 64 #include <linux/spinlock.h> 65 #include <linux/ratelimit.h> 66 #include <linux/syscalls.h> 67 #include <linux/task_work.h> 68 #include <linux/sizes.h> 69 70 #include <uapi/linux/android/binder.h> 71 #include <uapi/linux/android/binderfs.h> 72 73 #include <asm/cacheflush.h> 74 75 #include "binder_alloc.h" 76 #include "binder_internal.h" 77 #include "binder_trace.h" 78 79 static HLIST_HEAD(binder_deferred_list); 80 static DEFINE_MUTEX(binder_deferred_lock); 81 82 static HLIST_HEAD(binder_devices); 83 static HLIST_HEAD(binder_procs); 84 static DEFINE_MUTEX(binder_procs_lock); 85 86 static HLIST_HEAD(binder_dead_nodes); 87 static DEFINE_SPINLOCK(binder_dead_nodes_lock); 88 89 static struct dentry *binder_debugfs_dir_entry_root; 90 static struct dentry *binder_debugfs_dir_entry_proc; 91 static atomic_t binder_last_id; 92 93 static int proc_show(struct seq_file *m, void *unused); 94 DEFINE_SHOW_ATTRIBUTE(proc); 95 96 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE) 97 98 enum { 99 BINDER_DEBUG_USER_ERROR = 1U << 0, 100 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1, 101 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2, 102 BINDER_DEBUG_OPEN_CLOSE = 1U << 3, 103 BINDER_DEBUG_DEAD_BINDER = 1U << 4, 104 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5, 105 BINDER_DEBUG_READ_WRITE = 1U << 6, 106 BINDER_DEBUG_USER_REFS = 1U << 7, 107 BINDER_DEBUG_THREADS = 1U << 8, 108 BINDER_DEBUG_TRANSACTION = 1U << 9, 109 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10, 110 BINDER_DEBUG_FREE_BUFFER = 1U << 11, 111 BINDER_DEBUG_INTERNAL_REFS = 1U << 12, 112 BINDER_DEBUG_PRIORITY_CAP = 1U << 13, 113 BINDER_DEBUG_SPINLOCKS = 1U << 14, 114 }; 115 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | 116 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; 117 module_param_named(debug_mask, binder_debug_mask, uint, 0644); 118 119 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES; 120 module_param_named(devices, binder_devices_param, charp, 0444); 121 122 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); 123 static int binder_stop_on_user_error; 124 125 static int binder_set_stop_on_user_error(const char *val, 126 const struct kernel_param *kp) 127 { 128 int ret; 129 130 ret = param_set_int(val, kp); 131 if (binder_stop_on_user_error < 2) 132 wake_up(&binder_user_error_wait); 133 return ret; 134 } 135 module_param_call(stop_on_user_error, binder_set_stop_on_user_error, 136 param_get_int, &binder_stop_on_user_error, 0644); 137 138 #define binder_debug(mask, x...) \ 139 do { \ 140 if (binder_debug_mask & mask) \ 141 pr_info_ratelimited(x); \ 142 } while (0) 143 144 #define binder_user_error(x...) \ 145 do { \ 146 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \ 147 pr_info_ratelimited(x); \ 148 if (binder_stop_on_user_error) \ 149 binder_stop_on_user_error = 2; \ 150 } while (0) 151 152 #define to_flat_binder_object(hdr) \ 153 container_of(hdr, struct flat_binder_object, hdr) 154 155 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr) 156 157 #define to_binder_buffer_object(hdr) \ 158 container_of(hdr, struct binder_buffer_object, hdr) 159 160 #define to_binder_fd_array_object(hdr) \ 161 container_of(hdr, struct binder_fd_array_object, hdr) 162 163 enum binder_stat_types { 164 BINDER_STAT_PROC, 165 BINDER_STAT_THREAD, 166 BINDER_STAT_NODE, 167 BINDER_STAT_REF, 168 BINDER_STAT_DEATH, 169 BINDER_STAT_TRANSACTION, 170 BINDER_STAT_TRANSACTION_COMPLETE, 171 BINDER_STAT_COUNT 172 }; 173 174 struct binder_stats { 175 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1]; 176 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1]; 177 atomic_t obj_created[BINDER_STAT_COUNT]; 178 atomic_t obj_deleted[BINDER_STAT_COUNT]; 179 }; 180 181 static struct binder_stats binder_stats; 182 183 static inline void binder_stats_deleted(enum binder_stat_types type) 184 { 185 atomic_inc(&binder_stats.obj_deleted[type]); 186 } 187 188 static inline void binder_stats_created(enum binder_stat_types type) 189 { 190 atomic_inc(&binder_stats.obj_created[type]); 191 } 192 193 struct binder_transaction_log binder_transaction_log; 194 struct binder_transaction_log binder_transaction_log_failed; 195 196 static struct binder_transaction_log_entry *binder_transaction_log_add( 197 struct binder_transaction_log *log) 198 { 199 struct binder_transaction_log_entry *e; 200 unsigned int cur = atomic_inc_return(&log->cur); 201 202 if (cur >= ARRAY_SIZE(log->entry)) 203 log->full = true; 204 e = &log->entry[cur % ARRAY_SIZE(log->entry)]; 205 WRITE_ONCE(e->debug_id_done, 0); 206 /* 207 * write-barrier to synchronize access to e->debug_id_done. 208 * We make sure the initialized 0 value is seen before 209 * memset() other fields are zeroed by memset. 210 */ 211 smp_wmb(); 212 memset(e, 0, sizeof(*e)); 213 return e; 214 } 215 216 /** 217 * struct binder_work - work enqueued on a worklist 218 * @entry: node enqueued on list 219 * @type: type of work to be performed 220 * 221 * There are separate work lists for proc, thread, and node (async). 222 */ 223 struct binder_work { 224 struct list_head entry; 225 226 enum binder_work_type { 227 BINDER_WORK_TRANSACTION = 1, 228 BINDER_WORK_TRANSACTION_COMPLETE, 229 BINDER_WORK_RETURN_ERROR, 230 BINDER_WORK_NODE, 231 BINDER_WORK_DEAD_BINDER, 232 BINDER_WORK_DEAD_BINDER_AND_CLEAR, 233 BINDER_WORK_CLEAR_DEATH_NOTIFICATION, 234 } type; 235 }; 236 237 struct binder_error { 238 struct binder_work work; 239 uint32_t cmd; 240 }; 241 242 /** 243 * struct binder_node - binder node bookkeeping 244 * @debug_id: unique ID for debugging 245 * (invariant after initialized) 246 * @lock: lock for node fields 247 * @work: worklist element for node work 248 * (protected by @proc->inner_lock) 249 * @rb_node: element for proc->nodes tree 250 * (protected by @proc->inner_lock) 251 * @dead_node: element for binder_dead_nodes list 252 * (protected by binder_dead_nodes_lock) 253 * @proc: binder_proc that owns this node 254 * (invariant after initialized) 255 * @refs: list of references on this node 256 * (protected by @lock) 257 * @internal_strong_refs: used to take strong references when 258 * initiating a transaction 259 * (protected by @proc->inner_lock if @proc 260 * and by @lock) 261 * @local_weak_refs: weak user refs from local process 262 * (protected by @proc->inner_lock if @proc 263 * and by @lock) 264 * @local_strong_refs: strong user refs from local process 265 * (protected by @proc->inner_lock if @proc 266 * and by @lock) 267 * @tmp_refs: temporary kernel refs 268 * (protected by @proc->inner_lock while @proc 269 * is valid, and by binder_dead_nodes_lock 270 * if @proc is NULL. During inc/dec and node release 271 * it is also protected by @lock to provide safety 272 * as the node dies and @proc becomes NULL) 273 * @ptr: userspace pointer for node 274 * (invariant, no lock needed) 275 * @cookie: userspace cookie for node 276 * (invariant, no lock needed) 277 * @has_strong_ref: userspace notified of strong ref 278 * (protected by @proc->inner_lock if @proc 279 * and by @lock) 280 * @pending_strong_ref: userspace has acked notification of strong ref 281 * (protected by @proc->inner_lock if @proc 282 * and by @lock) 283 * @has_weak_ref: userspace notified of weak ref 284 * (protected by @proc->inner_lock if @proc 285 * and by @lock) 286 * @pending_weak_ref: userspace has acked notification of weak ref 287 * (protected by @proc->inner_lock if @proc 288 * and by @lock) 289 * @has_async_transaction: async transaction to node in progress 290 * (protected by @lock) 291 * @accept_fds: file descriptor operations supported for node 292 * (invariant after initialized) 293 * @min_priority: minimum scheduling priority 294 * (invariant after initialized) 295 * @txn_security_ctx: require sender's security context 296 * (invariant after initialized) 297 * @async_todo: list of async work items 298 * (protected by @proc->inner_lock) 299 * 300 * Bookkeeping structure for binder nodes. 301 */ 302 struct binder_node { 303 int debug_id; 304 spinlock_t lock; 305 struct binder_work work; 306 union { 307 struct rb_node rb_node; 308 struct hlist_node dead_node; 309 }; 310 struct binder_proc *proc; 311 struct hlist_head refs; 312 int internal_strong_refs; 313 int local_weak_refs; 314 int local_strong_refs; 315 int tmp_refs; 316 binder_uintptr_t ptr; 317 binder_uintptr_t cookie; 318 struct { 319 /* 320 * bitfield elements protected by 321 * proc inner_lock 322 */ 323 u8 has_strong_ref:1; 324 u8 pending_strong_ref:1; 325 u8 has_weak_ref:1; 326 u8 pending_weak_ref:1; 327 }; 328 struct { 329 /* 330 * invariant after initialization 331 */ 332 u8 accept_fds:1; 333 u8 txn_security_ctx:1; 334 u8 min_priority; 335 }; 336 bool has_async_transaction; 337 struct list_head async_todo; 338 }; 339 340 struct binder_ref_death { 341 /** 342 * @work: worklist element for death notifications 343 * (protected by inner_lock of the proc that 344 * this ref belongs to) 345 */ 346 struct binder_work work; 347 binder_uintptr_t cookie; 348 }; 349 350 /** 351 * struct binder_ref_data - binder_ref counts and id 352 * @debug_id: unique ID for the ref 353 * @desc: unique userspace handle for ref 354 * @strong: strong ref count (debugging only if not locked) 355 * @weak: weak ref count (debugging only if not locked) 356 * 357 * Structure to hold ref count and ref id information. Since 358 * the actual ref can only be accessed with a lock, this structure 359 * is used to return information about the ref to callers of 360 * ref inc/dec functions. 361 */ 362 struct binder_ref_data { 363 int debug_id; 364 uint32_t desc; 365 int strong; 366 int weak; 367 }; 368 369 /** 370 * struct binder_ref - struct to track references on nodes 371 * @data: binder_ref_data containing id, handle, and current refcounts 372 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree 373 * @rb_node_node: node for lookup by @node in proc's rb_tree 374 * @node_entry: list entry for node->refs list in target node 375 * (protected by @node->lock) 376 * @proc: binder_proc containing ref 377 * @node: binder_node of target node. When cleaning up a 378 * ref for deletion in binder_cleanup_ref, a non-NULL 379 * @node indicates the node must be freed 380 * @death: pointer to death notification (ref_death) if requested 381 * (protected by @node->lock) 382 * 383 * Structure to track references from procA to target node (on procB). This 384 * structure is unsafe to access without holding @proc->outer_lock. 385 */ 386 struct binder_ref { 387 /* Lookups needed: */ 388 /* node + proc => ref (transaction) */ 389 /* desc + proc => ref (transaction, inc/dec ref) */ 390 /* node => refs + procs (proc exit) */ 391 struct binder_ref_data data; 392 struct rb_node rb_node_desc; 393 struct rb_node rb_node_node; 394 struct hlist_node node_entry; 395 struct binder_proc *proc; 396 struct binder_node *node; 397 struct binder_ref_death *death; 398 }; 399 400 enum binder_deferred_state { 401 BINDER_DEFERRED_FLUSH = 0x01, 402 BINDER_DEFERRED_RELEASE = 0x02, 403 }; 404 405 /** 406 * struct binder_proc - binder process bookkeeping 407 * @proc_node: element for binder_procs list 408 * @threads: rbtree of binder_threads in this proc 409 * (protected by @inner_lock) 410 * @nodes: rbtree of binder nodes associated with 411 * this proc ordered by node->ptr 412 * (protected by @inner_lock) 413 * @refs_by_desc: rbtree of refs ordered by ref->desc 414 * (protected by @outer_lock) 415 * @refs_by_node: rbtree of refs ordered by ref->node 416 * (protected by @outer_lock) 417 * @waiting_threads: threads currently waiting for proc work 418 * (protected by @inner_lock) 419 * @pid PID of group_leader of process 420 * (invariant after initialized) 421 * @tsk task_struct for group_leader of process 422 * (invariant after initialized) 423 * @deferred_work_node: element for binder_deferred_list 424 * (protected by binder_deferred_lock) 425 * @deferred_work: bitmap of deferred work to perform 426 * (protected by binder_deferred_lock) 427 * @is_dead: process is dead and awaiting free 428 * when outstanding transactions are cleaned up 429 * (protected by @inner_lock) 430 * @todo: list of work for this process 431 * (protected by @inner_lock) 432 * @stats: per-process binder statistics 433 * (atomics, no lock needed) 434 * @delivered_death: list of delivered death notification 435 * (protected by @inner_lock) 436 * @max_threads: cap on number of binder threads 437 * (protected by @inner_lock) 438 * @requested_threads: number of binder threads requested but not 439 * yet started. In current implementation, can 440 * only be 0 or 1. 441 * (protected by @inner_lock) 442 * @requested_threads_started: number binder threads started 443 * (protected by @inner_lock) 444 * @tmp_ref: temporary reference to indicate proc is in use 445 * (protected by @inner_lock) 446 * @default_priority: default scheduler priority 447 * (invariant after initialized) 448 * @debugfs_entry: debugfs node 449 * @alloc: binder allocator bookkeeping 450 * @context: binder_context for this proc 451 * (invariant after initialized) 452 * @inner_lock: can nest under outer_lock and/or node lock 453 * @outer_lock: no nesting under innor or node lock 454 * Lock order: 1) outer, 2) node, 3) inner 455 * @binderfs_entry: process-specific binderfs log file 456 * 457 * Bookkeeping structure for binder processes 458 */ 459 struct binder_proc { 460 struct hlist_node proc_node; 461 struct rb_root threads; 462 struct rb_root nodes; 463 struct rb_root refs_by_desc; 464 struct rb_root refs_by_node; 465 struct list_head waiting_threads; 466 int pid; 467 struct task_struct *tsk; 468 struct hlist_node deferred_work_node; 469 int deferred_work; 470 bool is_dead; 471 472 struct list_head todo; 473 struct binder_stats stats; 474 struct list_head delivered_death; 475 int max_threads; 476 int requested_threads; 477 int requested_threads_started; 478 int tmp_ref; 479 long default_priority; 480 struct dentry *debugfs_entry; 481 struct binder_alloc alloc; 482 struct binder_context *context; 483 spinlock_t inner_lock; 484 spinlock_t outer_lock; 485 struct dentry *binderfs_entry; 486 }; 487 488 enum { 489 BINDER_LOOPER_STATE_REGISTERED = 0x01, 490 BINDER_LOOPER_STATE_ENTERED = 0x02, 491 BINDER_LOOPER_STATE_EXITED = 0x04, 492 BINDER_LOOPER_STATE_INVALID = 0x08, 493 BINDER_LOOPER_STATE_WAITING = 0x10, 494 BINDER_LOOPER_STATE_POLL = 0x20, 495 }; 496 497 /** 498 * struct binder_thread - binder thread bookkeeping 499 * @proc: binder process for this thread 500 * (invariant after initialization) 501 * @rb_node: element for proc->threads rbtree 502 * (protected by @proc->inner_lock) 503 * @waiting_thread_node: element for @proc->waiting_threads list 504 * (protected by @proc->inner_lock) 505 * @pid: PID for this thread 506 * (invariant after initialization) 507 * @looper: bitmap of looping state 508 * (only accessed by this thread) 509 * @looper_needs_return: looping thread needs to exit driver 510 * (no lock needed) 511 * @transaction_stack: stack of in-progress transactions for this thread 512 * (protected by @proc->inner_lock) 513 * @todo: list of work to do for this thread 514 * (protected by @proc->inner_lock) 515 * @process_todo: whether work in @todo should be processed 516 * (protected by @proc->inner_lock) 517 * @return_error: transaction errors reported by this thread 518 * (only accessed by this thread) 519 * @reply_error: transaction errors reported by target thread 520 * (protected by @proc->inner_lock) 521 * @wait: wait queue for thread work 522 * @stats: per-thread statistics 523 * (atomics, no lock needed) 524 * @tmp_ref: temporary reference to indicate thread is in use 525 * (atomic since @proc->inner_lock cannot 526 * always be acquired) 527 * @is_dead: thread is dead and awaiting free 528 * when outstanding transactions are cleaned up 529 * (protected by @proc->inner_lock) 530 * 531 * Bookkeeping structure for binder threads. 532 */ 533 struct binder_thread { 534 struct binder_proc *proc; 535 struct rb_node rb_node; 536 struct list_head waiting_thread_node; 537 int pid; 538 int looper; /* only modified by this thread */ 539 bool looper_need_return; /* can be written by other thread */ 540 struct binder_transaction *transaction_stack; 541 struct list_head todo; 542 bool process_todo; 543 struct binder_error return_error; 544 struct binder_error reply_error; 545 wait_queue_head_t wait; 546 struct binder_stats stats; 547 atomic_t tmp_ref; 548 bool is_dead; 549 }; 550 551 /** 552 * struct binder_txn_fd_fixup - transaction fd fixup list element 553 * @fixup_entry: list entry 554 * @file: struct file to be associated with new fd 555 * @offset: offset in buffer data to this fixup 556 * 557 * List element for fd fixups in a transaction. Since file 558 * descriptors need to be allocated in the context of the 559 * target process, we pass each fd to be processed in this 560 * struct. 561 */ 562 struct binder_txn_fd_fixup { 563 struct list_head fixup_entry; 564 struct file *file; 565 size_t offset; 566 }; 567 568 struct binder_transaction { 569 int debug_id; 570 struct binder_work work; 571 struct binder_thread *from; 572 struct binder_transaction *from_parent; 573 struct binder_proc *to_proc; 574 struct binder_thread *to_thread; 575 struct binder_transaction *to_parent; 576 unsigned need_reply:1; 577 /* unsigned is_dead:1; */ /* not used at the moment */ 578 579 struct binder_buffer *buffer; 580 unsigned int code; 581 unsigned int flags; 582 long priority; 583 long saved_priority; 584 kuid_t sender_euid; 585 struct list_head fd_fixups; 586 binder_uintptr_t security_ctx; 587 /** 588 * @lock: protects @from, @to_proc, and @to_thread 589 * 590 * @from, @to_proc, and @to_thread can be set to NULL 591 * during thread teardown 592 */ 593 spinlock_t lock; 594 }; 595 596 /** 597 * struct binder_object - union of flat binder object types 598 * @hdr: generic object header 599 * @fbo: binder object (nodes and refs) 600 * @fdo: file descriptor object 601 * @bbo: binder buffer pointer 602 * @fdao: file descriptor array 603 * 604 * Used for type-independent object copies 605 */ 606 struct binder_object { 607 union { 608 struct binder_object_header hdr; 609 struct flat_binder_object fbo; 610 struct binder_fd_object fdo; 611 struct binder_buffer_object bbo; 612 struct binder_fd_array_object fdao; 613 }; 614 }; 615 616 /** 617 * binder_proc_lock() - Acquire outer lock for given binder_proc 618 * @proc: struct binder_proc to acquire 619 * 620 * Acquires proc->outer_lock. Used to protect binder_ref 621 * structures associated with the given proc. 622 */ 623 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__) 624 static void 625 _binder_proc_lock(struct binder_proc *proc, int line) 626 __acquires(&proc->outer_lock) 627 { 628 binder_debug(BINDER_DEBUG_SPINLOCKS, 629 "%s: line=%d\n", __func__, line); 630 spin_lock(&proc->outer_lock); 631 } 632 633 /** 634 * binder_proc_unlock() - Release spinlock for given binder_proc 635 * @proc: struct binder_proc to acquire 636 * 637 * Release lock acquired via binder_proc_lock() 638 */ 639 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__) 640 static void 641 _binder_proc_unlock(struct binder_proc *proc, int line) 642 __releases(&proc->outer_lock) 643 { 644 binder_debug(BINDER_DEBUG_SPINLOCKS, 645 "%s: line=%d\n", __func__, line); 646 spin_unlock(&proc->outer_lock); 647 } 648 649 /** 650 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc 651 * @proc: struct binder_proc to acquire 652 * 653 * Acquires proc->inner_lock. Used to protect todo lists 654 */ 655 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__) 656 static void 657 _binder_inner_proc_lock(struct binder_proc *proc, int line) 658 __acquires(&proc->inner_lock) 659 { 660 binder_debug(BINDER_DEBUG_SPINLOCKS, 661 "%s: line=%d\n", __func__, line); 662 spin_lock(&proc->inner_lock); 663 } 664 665 /** 666 * binder_inner_proc_unlock() - Release inner lock for given binder_proc 667 * @proc: struct binder_proc to acquire 668 * 669 * Release lock acquired via binder_inner_proc_lock() 670 */ 671 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__) 672 static void 673 _binder_inner_proc_unlock(struct binder_proc *proc, int line) 674 __releases(&proc->inner_lock) 675 { 676 binder_debug(BINDER_DEBUG_SPINLOCKS, 677 "%s: line=%d\n", __func__, line); 678 spin_unlock(&proc->inner_lock); 679 } 680 681 /** 682 * binder_node_lock() - Acquire spinlock for given binder_node 683 * @node: struct binder_node to acquire 684 * 685 * Acquires node->lock. Used to protect binder_node fields 686 */ 687 #define binder_node_lock(node) _binder_node_lock(node, __LINE__) 688 static void 689 _binder_node_lock(struct binder_node *node, int line) 690 __acquires(&node->lock) 691 { 692 binder_debug(BINDER_DEBUG_SPINLOCKS, 693 "%s: line=%d\n", __func__, line); 694 spin_lock(&node->lock); 695 } 696 697 /** 698 * binder_node_unlock() - Release spinlock for given binder_proc 699 * @node: struct binder_node to acquire 700 * 701 * Release lock acquired via binder_node_lock() 702 */ 703 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__) 704 static void 705 _binder_node_unlock(struct binder_node *node, int line) 706 __releases(&node->lock) 707 { 708 binder_debug(BINDER_DEBUG_SPINLOCKS, 709 "%s: line=%d\n", __func__, line); 710 spin_unlock(&node->lock); 711 } 712 713 /** 714 * binder_node_inner_lock() - Acquire node and inner locks 715 * @node: struct binder_node to acquire 716 * 717 * Acquires node->lock. If node->proc also acquires 718 * proc->inner_lock. Used to protect binder_node fields 719 */ 720 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__) 721 static void 722 _binder_node_inner_lock(struct binder_node *node, int line) 723 __acquires(&node->lock) __acquires(&node->proc->inner_lock) 724 { 725 binder_debug(BINDER_DEBUG_SPINLOCKS, 726 "%s: line=%d\n", __func__, line); 727 spin_lock(&node->lock); 728 if (node->proc) 729 binder_inner_proc_lock(node->proc); 730 else 731 /* annotation for sparse */ 732 __acquire(&node->proc->inner_lock); 733 } 734 735 /** 736 * binder_node_unlock() - Release node and inner locks 737 * @node: struct binder_node to acquire 738 * 739 * Release lock acquired via binder_node_lock() 740 */ 741 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__) 742 static void 743 _binder_node_inner_unlock(struct binder_node *node, int line) 744 __releases(&node->lock) __releases(&node->proc->inner_lock) 745 { 746 struct binder_proc *proc = node->proc; 747 748 binder_debug(BINDER_DEBUG_SPINLOCKS, 749 "%s: line=%d\n", __func__, line); 750 if (proc) 751 binder_inner_proc_unlock(proc); 752 else 753 /* annotation for sparse */ 754 __release(&node->proc->inner_lock); 755 spin_unlock(&node->lock); 756 } 757 758 static bool binder_worklist_empty_ilocked(struct list_head *list) 759 { 760 return list_empty(list); 761 } 762 763 /** 764 * binder_worklist_empty() - Check if no items on the work list 765 * @proc: binder_proc associated with list 766 * @list: list to check 767 * 768 * Return: true if there are no items on list, else false 769 */ 770 static bool binder_worklist_empty(struct binder_proc *proc, 771 struct list_head *list) 772 { 773 bool ret; 774 775 binder_inner_proc_lock(proc); 776 ret = binder_worklist_empty_ilocked(list); 777 binder_inner_proc_unlock(proc); 778 return ret; 779 } 780 781 /** 782 * binder_enqueue_work_ilocked() - Add an item to the work list 783 * @work: struct binder_work to add to list 784 * @target_list: list to add work to 785 * 786 * Adds the work to the specified list. Asserts that work 787 * is not already on a list. 788 * 789 * Requires the proc->inner_lock to be held. 790 */ 791 static void 792 binder_enqueue_work_ilocked(struct binder_work *work, 793 struct list_head *target_list) 794 { 795 BUG_ON(target_list == NULL); 796 BUG_ON(work->entry.next && !list_empty(&work->entry)); 797 list_add_tail(&work->entry, target_list); 798 } 799 800 /** 801 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work 802 * @thread: thread to queue work to 803 * @work: struct binder_work to add to list 804 * 805 * Adds the work to the todo list of the thread. Doesn't set the process_todo 806 * flag, which means that (if it wasn't already set) the thread will go to 807 * sleep without handling this work when it calls read. 808 * 809 * Requires the proc->inner_lock to be held. 810 */ 811 static void 812 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread, 813 struct binder_work *work) 814 { 815 WARN_ON(!list_empty(&thread->waiting_thread_node)); 816 binder_enqueue_work_ilocked(work, &thread->todo); 817 } 818 819 /** 820 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list 821 * @thread: thread to queue work to 822 * @work: struct binder_work to add to list 823 * 824 * Adds the work to the todo list of the thread, and enables processing 825 * of the todo queue. 826 * 827 * Requires the proc->inner_lock to be held. 828 */ 829 static void 830 binder_enqueue_thread_work_ilocked(struct binder_thread *thread, 831 struct binder_work *work) 832 { 833 WARN_ON(!list_empty(&thread->waiting_thread_node)); 834 binder_enqueue_work_ilocked(work, &thread->todo); 835 thread->process_todo = true; 836 } 837 838 /** 839 * binder_enqueue_thread_work() - Add an item to the thread work list 840 * @thread: thread to queue work to 841 * @work: struct binder_work to add to list 842 * 843 * Adds the work to the todo list of the thread, and enables processing 844 * of the todo queue. 845 */ 846 static void 847 binder_enqueue_thread_work(struct binder_thread *thread, 848 struct binder_work *work) 849 { 850 binder_inner_proc_lock(thread->proc); 851 binder_enqueue_thread_work_ilocked(thread, work); 852 binder_inner_proc_unlock(thread->proc); 853 } 854 855 static void 856 binder_dequeue_work_ilocked(struct binder_work *work) 857 { 858 list_del_init(&work->entry); 859 } 860 861 /** 862 * binder_dequeue_work() - Removes an item from the work list 863 * @proc: binder_proc associated with list 864 * @work: struct binder_work to remove from list 865 * 866 * Removes the specified work item from whatever list it is on. 867 * Can safely be called if work is not on any list. 868 */ 869 static void 870 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work) 871 { 872 binder_inner_proc_lock(proc); 873 binder_dequeue_work_ilocked(work); 874 binder_inner_proc_unlock(proc); 875 } 876 877 static struct binder_work *binder_dequeue_work_head_ilocked( 878 struct list_head *list) 879 { 880 struct binder_work *w; 881 882 w = list_first_entry_or_null(list, struct binder_work, entry); 883 if (w) 884 list_del_init(&w->entry); 885 return w; 886 } 887 888 static void 889 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); 890 static void binder_free_thread(struct binder_thread *thread); 891 static void binder_free_proc(struct binder_proc *proc); 892 static void binder_inc_node_tmpref_ilocked(struct binder_node *node); 893 894 static bool binder_has_work_ilocked(struct binder_thread *thread, 895 bool do_proc_work) 896 { 897 return thread->process_todo || 898 thread->looper_need_return || 899 (do_proc_work && 900 !binder_worklist_empty_ilocked(&thread->proc->todo)); 901 } 902 903 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work) 904 { 905 bool has_work; 906 907 binder_inner_proc_lock(thread->proc); 908 has_work = binder_has_work_ilocked(thread, do_proc_work); 909 binder_inner_proc_unlock(thread->proc); 910 911 return has_work; 912 } 913 914 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread) 915 { 916 return !thread->transaction_stack && 917 binder_worklist_empty_ilocked(&thread->todo) && 918 (thread->looper & (BINDER_LOOPER_STATE_ENTERED | 919 BINDER_LOOPER_STATE_REGISTERED)); 920 } 921 922 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc, 923 bool sync) 924 { 925 struct rb_node *n; 926 struct binder_thread *thread; 927 928 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { 929 thread = rb_entry(n, struct binder_thread, rb_node); 930 if (thread->looper & BINDER_LOOPER_STATE_POLL && 931 binder_available_for_proc_work_ilocked(thread)) { 932 if (sync) 933 wake_up_interruptible_sync(&thread->wait); 934 else 935 wake_up_interruptible(&thread->wait); 936 } 937 } 938 } 939 940 /** 941 * binder_select_thread_ilocked() - selects a thread for doing proc work. 942 * @proc: process to select a thread from 943 * 944 * Note that calling this function moves the thread off the waiting_threads 945 * list, so it can only be woken up by the caller of this function, or a 946 * signal. Therefore, callers *should* always wake up the thread this function 947 * returns. 948 * 949 * Return: If there's a thread currently waiting for process work, 950 * returns that thread. Otherwise returns NULL. 951 */ 952 static struct binder_thread * 953 binder_select_thread_ilocked(struct binder_proc *proc) 954 { 955 struct binder_thread *thread; 956 957 assert_spin_locked(&proc->inner_lock); 958 thread = list_first_entry_or_null(&proc->waiting_threads, 959 struct binder_thread, 960 waiting_thread_node); 961 962 if (thread) 963 list_del_init(&thread->waiting_thread_node); 964 965 return thread; 966 } 967 968 /** 969 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work. 970 * @proc: process to wake up a thread in 971 * @thread: specific thread to wake-up (may be NULL) 972 * @sync: whether to do a synchronous wake-up 973 * 974 * This function wakes up a thread in the @proc process. 975 * The caller may provide a specific thread to wake-up in 976 * the @thread parameter. If @thread is NULL, this function 977 * will wake up threads that have called poll(). 978 * 979 * Note that for this function to work as expected, callers 980 * should first call binder_select_thread() to find a thread 981 * to handle the work (if they don't have a thread already), 982 * and pass the result into the @thread parameter. 983 */ 984 static void binder_wakeup_thread_ilocked(struct binder_proc *proc, 985 struct binder_thread *thread, 986 bool sync) 987 { 988 assert_spin_locked(&proc->inner_lock); 989 990 if (thread) { 991 if (sync) 992 wake_up_interruptible_sync(&thread->wait); 993 else 994 wake_up_interruptible(&thread->wait); 995 return; 996 } 997 998 /* Didn't find a thread waiting for proc work; this can happen 999 * in two scenarios: 1000 * 1. All threads are busy handling transactions 1001 * In that case, one of those threads should call back into 1002 * the kernel driver soon and pick up this work. 1003 * 2. Threads are using the (e)poll interface, in which case 1004 * they may be blocked on the waitqueue without having been 1005 * added to waiting_threads. For this case, we just iterate 1006 * over all threads not handling transaction work, and 1007 * wake them all up. We wake all because we don't know whether 1008 * a thread that called into (e)poll is handling non-binder 1009 * work currently. 1010 */ 1011 binder_wakeup_poll_threads_ilocked(proc, sync); 1012 } 1013 1014 static void binder_wakeup_proc_ilocked(struct binder_proc *proc) 1015 { 1016 struct binder_thread *thread = binder_select_thread_ilocked(proc); 1017 1018 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false); 1019 } 1020 1021 static void binder_set_nice(long nice) 1022 { 1023 long min_nice; 1024 1025 if (can_nice(current, nice)) { 1026 set_user_nice(current, nice); 1027 return; 1028 } 1029 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE)); 1030 binder_debug(BINDER_DEBUG_PRIORITY_CAP, 1031 "%d: nice value %ld not allowed use %ld instead\n", 1032 current->pid, nice, min_nice); 1033 set_user_nice(current, min_nice); 1034 if (min_nice <= MAX_NICE) 1035 return; 1036 binder_user_error("%d RLIMIT_NICE not set\n", current->pid); 1037 } 1038 1039 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc, 1040 binder_uintptr_t ptr) 1041 { 1042 struct rb_node *n = proc->nodes.rb_node; 1043 struct binder_node *node; 1044 1045 assert_spin_locked(&proc->inner_lock); 1046 1047 while (n) { 1048 node = rb_entry(n, struct binder_node, rb_node); 1049 1050 if (ptr < node->ptr) 1051 n = n->rb_left; 1052 else if (ptr > node->ptr) 1053 n = n->rb_right; 1054 else { 1055 /* 1056 * take an implicit weak reference 1057 * to ensure node stays alive until 1058 * call to binder_put_node() 1059 */ 1060 binder_inc_node_tmpref_ilocked(node); 1061 return node; 1062 } 1063 } 1064 return NULL; 1065 } 1066 1067 static struct binder_node *binder_get_node(struct binder_proc *proc, 1068 binder_uintptr_t ptr) 1069 { 1070 struct binder_node *node; 1071 1072 binder_inner_proc_lock(proc); 1073 node = binder_get_node_ilocked(proc, ptr); 1074 binder_inner_proc_unlock(proc); 1075 return node; 1076 } 1077 1078 static struct binder_node *binder_init_node_ilocked( 1079 struct binder_proc *proc, 1080 struct binder_node *new_node, 1081 struct flat_binder_object *fp) 1082 { 1083 struct rb_node **p = &proc->nodes.rb_node; 1084 struct rb_node *parent = NULL; 1085 struct binder_node *node; 1086 binder_uintptr_t ptr = fp ? fp->binder : 0; 1087 binder_uintptr_t cookie = fp ? fp->cookie : 0; 1088 __u32 flags = fp ? fp->flags : 0; 1089 1090 assert_spin_locked(&proc->inner_lock); 1091 1092 while (*p) { 1093 1094 parent = *p; 1095 node = rb_entry(parent, struct binder_node, rb_node); 1096 1097 if (ptr < node->ptr) 1098 p = &(*p)->rb_left; 1099 else if (ptr > node->ptr) 1100 p = &(*p)->rb_right; 1101 else { 1102 /* 1103 * A matching node is already in 1104 * the rb tree. Abandon the init 1105 * and return it. 1106 */ 1107 binder_inc_node_tmpref_ilocked(node); 1108 return node; 1109 } 1110 } 1111 node = new_node; 1112 binder_stats_created(BINDER_STAT_NODE); 1113 node->tmp_refs++; 1114 rb_link_node(&node->rb_node, parent, p); 1115 rb_insert_color(&node->rb_node, &proc->nodes); 1116 node->debug_id = atomic_inc_return(&binder_last_id); 1117 node->proc = proc; 1118 node->ptr = ptr; 1119 node->cookie = cookie; 1120 node->work.type = BINDER_WORK_NODE; 1121 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK; 1122 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); 1123 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX); 1124 spin_lock_init(&node->lock); 1125 INIT_LIST_HEAD(&node->work.entry); 1126 INIT_LIST_HEAD(&node->async_todo); 1127 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1128 "%d:%d node %d u%016llx c%016llx created\n", 1129 proc->pid, current->pid, node->debug_id, 1130 (u64)node->ptr, (u64)node->cookie); 1131 1132 return node; 1133 } 1134 1135 static struct binder_node *binder_new_node(struct binder_proc *proc, 1136 struct flat_binder_object *fp) 1137 { 1138 struct binder_node *node; 1139 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL); 1140 1141 if (!new_node) 1142 return NULL; 1143 binder_inner_proc_lock(proc); 1144 node = binder_init_node_ilocked(proc, new_node, fp); 1145 binder_inner_proc_unlock(proc); 1146 if (node != new_node) 1147 /* 1148 * The node was already added by another thread 1149 */ 1150 kfree(new_node); 1151 1152 return node; 1153 } 1154 1155 static void binder_free_node(struct binder_node *node) 1156 { 1157 kfree(node); 1158 binder_stats_deleted(BINDER_STAT_NODE); 1159 } 1160 1161 static int binder_inc_node_nilocked(struct binder_node *node, int strong, 1162 int internal, 1163 struct list_head *target_list) 1164 { 1165 struct binder_proc *proc = node->proc; 1166 1167 assert_spin_locked(&node->lock); 1168 if (proc) 1169 assert_spin_locked(&proc->inner_lock); 1170 if (strong) { 1171 if (internal) { 1172 if (target_list == NULL && 1173 node->internal_strong_refs == 0 && 1174 !(node->proc && 1175 node == node->proc->context->binder_context_mgr_node && 1176 node->has_strong_ref)) { 1177 pr_err("invalid inc strong node for %d\n", 1178 node->debug_id); 1179 return -EINVAL; 1180 } 1181 node->internal_strong_refs++; 1182 } else 1183 node->local_strong_refs++; 1184 if (!node->has_strong_ref && target_list) { 1185 struct binder_thread *thread = container_of(target_list, 1186 struct binder_thread, todo); 1187 binder_dequeue_work_ilocked(&node->work); 1188 BUG_ON(&thread->todo != target_list); 1189 binder_enqueue_deferred_thread_work_ilocked(thread, 1190 &node->work); 1191 } 1192 } else { 1193 if (!internal) 1194 node->local_weak_refs++; 1195 if (!node->has_weak_ref && list_empty(&node->work.entry)) { 1196 if (target_list == NULL) { 1197 pr_err("invalid inc weak node for %d\n", 1198 node->debug_id); 1199 return -EINVAL; 1200 } 1201 /* 1202 * See comment above 1203 */ 1204 binder_enqueue_work_ilocked(&node->work, target_list); 1205 } 1206 } 1207 return 0; 1208 } 1209 1210 static int binder_inc_node(struct binder_node *node, int strong, int internal, 1211 struct list_head *target_list) 1212 { 1213 int ret; 1214 1215 binder_node_inner_lock(node); 1216 ret = binder_inc_node_nilocked(node, strong, internal, target_list); 1217 binder_node_inner_unlock(node); 1218 1219 return ret; 1220 } 1221 1222 static bool binder_dec_node_nilocked(struct binder_node *node, 1223 int strong, int internal) 1224 { 1225 struct binder_proc *proc = node->proc; 1226 1227 assert_spin_locked(&node->lock); 1228 if (proc) 1229 assert_spin_locked(&proc->inner_lock); 1230 if (strong) { 1231 if (internal) 1232 node->internal_strong_refs--; 1233 else 1234 node->local_strong_refs--; 1235 if (node->local_strong_refs || node->internal_strong_refs) 1236 return false; 1237 } else { 1238 if (!internal) 1239 node->local_weak_refs--; 1240 if (node->local_weak_refs || node->tmp_refs || 1241 !hlist_empty(&node->refs)) 1242 return false; 1243 } 1244 1245 if (proc && (node->has_strong_ref || node->has_weak_ref)) { 1246 if (list_empty(&node->work.entry)) { 1247 binder_enqueue_work_ilocked(&node->work, &proc->todo); 1248 binder_wakeup_proc_ilocked(proc); 1249 } 1250 } else { 1251 if (hlist_empty(&node->refs) && !node->local_strong_refs && 1252 !node->local_weak_refs && !node->tmp_refs) { 1253 if (proc) { 1254 binder_dequeue_work_ilocked(&node->work); 1255 rb_erase(&node->rb_node, &proc->nodes); 1256 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1257 "refless node %d deleted\n", 1258 node->debug_id); 1259 } else { 1260 BUG_ON(!list_empty(&node->work.entry)); 1261 spin_lock(&binder_dead_nodes_lock); 1262 /* 1263 * tmp_refs could have changed so 1264 * check it again 1265 */ 1266 if (node->tmp_refs) { 1267 spin_unlock(&binder_dead_nodes_lock); 1268 return false; 1269 } 1270 hlist_del(&node->dead_node); 1271 spin_unlock(&binder_dead_nodes_lock); 1272 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1273 "dead node %d deleted\n", 1274 node->debug_id); 1275 } 1276 return true; 1277 } 1278 } 1279 return false; 1280 } 1281 1282 static void binder_dec_node(struct binder_node *node, int strong, int internal) 1283 { 1284 bool free_node; 1285 1286 binder_node_inner_lock(node); 1287 free_node = binder_dec_node_nilocked(node, strong, internal); 1288 binder_node_inner_unlock(node); 1289 if (free_node) 1290 binder_free_node(node); 1291 } 1292 1293 static void binder_inc_node_tmpref_ilocked(struct binder_node *node) 1294 { 1295 /* 1296 * No call to binder_inc_node() is needed since we 1297 * don't need to inform userspace of any changes to 1298 * tmp_refs 1299 */ 1300 node->tmp_refs++; 1301 } 1302 1303 /** 1304 * binder_inc_node_tmpref() - take a temporary reference on node 1305 * @node: node to reference 1306 * 1307 * Take reference on node to prevent the node from being freed 1308 * while referenced only by a local variable. The inner lock is 1309 * needed to serialize with the node work on the queue (which 1310 * isn't needed after the node is dead). If the node is dead 1311 * (node->proc is NULL), use binder_dead_nodes_lock to protect 1312 * node->tmp_refs against dead-node-only cases where the node 1313 * lock cannot be acquired (eg traversing the dead node list to 1314 * print nodes) 1315 */ 1316 static void binder_inc_node_tmpref(struct binder_node *node) 1317 { 1318 binder_node_lock(node); 1319 if (node->proc) 1320 binder_inner_proc_lock(node->proc); 1321 else 1322 spin_lock(&binder_dead_nodes_lock); 1323 binder_inc_node_tmpref_ilocked(node); 1324 if (node->proc) 1325 binder_inner_proc_unlock(node->proc); 1326 else 1327 spin_unlock(&binder_dead_nodes_lock); 1328 binder_node_unlock(node); 1329 } 1330 1331 /** 1332 * binder_dec_node_tmpref() - remove a temporary reference on node 1333 * @node: node to reference 1334 * 1335 * Release temporary reference on node taken via binder_inc_node_tmpref() 1336 */ 1337 static void binder_dec_node_tmpref(struct binder_node *node) 1338 { 1339 bool free_node; 1340 1341 binder_node_inner_lock(node); 1342 if (!node->proc) 1343 spin_lock(&binder_dead_nodes_lock); 1344 else 1345 __acquire(&binder_dead_nodes_lock); 1346 node->tmp_refs--; 1347 BUG_ON(node->tmp_refs < 0); 1348 if (!node->proc) 1349 spin_unlock(&binder_dead_nodes_lock); 1350 else 1351 __release(&binder_dead_nodes_lock); 1352 /* 1353 * Call binder_dec_node() to check if all refcounts are 0 1354 * and cleanup is needed. Calling with strong=0 and internal=1 1355 * causes no actual reference to be released in binder_dec_node(). 1356 * If that changes, a change is needed here too. 1357 */ 1358 free_node = binder_dec_node_nilocked(node, 0, 1); 1359 binder_node_inner_unlock(node); 1360 if (free_node) 1361 binder_free_node(node); 1362 } 1363 1364 static void binder_put_node(struct binder_node *node) 1365 { 1366 binder_dec_node_tmpref(node); 1367 } 1368 1369 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc, 1370 u32 desc, bool need_strong_ref) 1371 { 1372 struct rb_node *n = proc->refs_by_desc.rb_node; 1373 struct binder_ref *ref; 1374 1375 while (n) { 1376 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1377 1378 if (desc < ref->data.desc) { 1379 n = n->rb_left; 1380 } else if (desc > ref->data.desc) { 1381 n = n->rb_right; 1382 } else if (need_strong_ref && !ref->data.strong) { 1383 binder_user_error("tried to use weak ref as strong ref\n"); 1384 return NULL; 1385 } else { 1386 return ref; 1387 } 1388 } 1389 return NULL; 1390 } 1391 1392 /** 1393 * binder_get_ref_for_node_olocked() - get the ref associated with given node 1394 * @proc: binder_proc that owns the ref 1395 * @node: binder_node of target 1396 * @new_ref: newly allocated binder_ref to be initialized or %NULL 1397 * 1398 * Look up the ref for the given node and return it if it exists 1399 * 1400 * If it doesn't exist and the caller provides a newly allocated 1401 * ref, initialize the fields of the newly allocated ref and insert 1402 * into the given proc rb_trees and node refs list. 1403 * 1404 * Return: the ref for node. It is possible that another thread 1405 * allocated/initialized the ref first in which case the 1406 * returned ref would be different than the passed-in 1407 * new_ref. new_ref must be kfree'd by the caller in 1408 * this case. 1409 */ 1410 static struct binder_ref *binder_get_ref_for_node_olocked( 1411 struct binder_proc *proc, 1412 struct binder_node *node, 1413 struct binder_ref *new_ref) 1414 { 1415 struct binder_context *context = proc->context; 1416 struct rb_node **p = &proc->refs_by_node.rb_node; 1417 struct rb_node *parent = NULL; 1418 struct binder_ref *ref; 1419 struct rb_node *n; 1420 1421 while (*p) { 1422 parent = *p; 1423 ref = rb_entry(parent, struct binder_ref, rb_node_node); 1424 1425 if (node < ref->node) 1426 p = &(*p)->rb_left; 1427 else if (node > ref->node) 1428 p = &(*p)->rb_right; 1429 else 1430 return ref; 1431 } 1432 if (!new_ref) 1433 return NULL; 1434 1435 binder_stats_created(BINDER_STAT_REF); 1436 new_ref->data.debug_id = atomic_inc_return(&binder_last_id); 1437 new_ref->proc = proc; 1438 new_ref->node = node; 1439 rb_link_node(&new_ref->rb_node_node, parent, p); 1440 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); 1441 1442 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1; 1443 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 1444 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1445 if (ref->data.desc > new_ref->data.desc) 1446 break; 1447 new_ref->data.desc = ref->data.desc + 1; 1448 } 1449 1450 p = &proc->refs_by_desc.rb_node; 1451 while (*p) { 1452 parent = *p; 1453 ref = rb_entry(parent, struct binder_ref, rb_node_desc); 1454 1455 if (new_ref->data.desc < ref->data.desc) 1456 p = &(*p)->rb_left; 1457 else if (new_ref->data.desc > ref->data.desc) 1458 p = &(*p)->rb_right; 1459 else 1460 BUG(); 1461 } 1462 rb_link_node(&new_ref->rb_node_desc, parent, p); 1463 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc); 1464 1465 binder_node_lock(node); 1466 hlist_add_head(&new_ref->node_entry, &node->refs); 1467 1468 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1469 "%d new ref %d desc %d for node %d\n", 1470 proc->pid, new_ref->data.debug_id, new_ref->data.desc, 1471 node->debug_id); 1472 binder_node_unlock(node); 1473 return new_ref; 1474 } 1475 1476 static void binder_cleanup_ref_olocked(struct binder_ref *ref) 1477 { 1478 bool delete_node = false; 1479 1480 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1481 "%d delete ref %d desc %d for node %d\n", 1482 ref->proc->pid, ref->data.debug_id, ref->data.desc, 1483 ref->node->debug_id); 1484 1485 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); 1486 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); 1487 1488 binder_node_inner_lock(ref->node); 1489 if (ref->data.strong) 1490 binder_dec_node_nilocked(ref->node, 1, 1); 1491 1492 hlist_del(&ref->node_entry); 1493 delete_node = binder_dec_node_nilocked(ref->node, 0, 1); 1494 binder_node_inner_unlock(ref->node); 1495 /* 1496 * Clear ref->node unless we want the caller to free the node 1497 */ 1498 if (!delete_node) { 1499 /* 1500 * The caller uses ref->node to determine 1501 * whether the node needs to be freed. Clear 1502 * it since the node is still alive. 1503 */ 1504 ref->node = NULL; 1505 } 1506 1507 if (ref->death) { 1508 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1509 "%d delete ref %d desc %d has death notification\n", 1510 ref->proc->pid, ref->data.debug_id, 1511 ref->data.desc); 1512 binder_dequeue_work(ref->proc, &ref->death->work); 1513 binder_stats_deleted(BINDER_STAT_DEATH); 1514 } 1515 binder_stats_deleted(BINDER_STAT_REF); 1516 } 1517 1518 /** 1519 * binder_inc_ref_olocked() - increment the ref for given handle 1520 * @ref: ref to be incremented 1521 * @strong: if true, strong increment, else weak 1522 * @target_list: list to queue node work on 1523 * 1524 * Increment the ref. @ref->proc->outer_lock must be held on entry 1525 * 1526 * Return: 0, if successful, else errno 1527 */ 1528 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong, 1529 struct list_head *target_list) 1530 { 1531 int ret; 1532 1533 if (strong) { 1534 if (ref->data.strong == 0) { 1535 ret = binder_inc_node(ref->node, 1, 1, target_list); 1536 if (ret) 1537 return ret; 1538 } 1539 ref->data.strong++; 1540 } else { 1541 if (ref->data.weak == 0) { 1542 ret = binder_inc_node(ref->node, 0, 1, target_list); 1543 if (ret) 1544 return ret; 1545 } 1546 ref->data.weak++; 1547 } 1548 return 0; 1549 } 1550 1551 /** 1552 * binder_dec_ref() - dec the ref for given handle 1553 * @ref: ref to be decremented 1554 * @strong: if true, strong decrement, else weak 1555 * 1556 * Decrement the ref. 1557 * 1558 * Return: true if ref is cleaned up and ready to be freed 1559 */ 1560 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong) 1561 { 1562 if (strong) { 1563 if (ref->data.strong == 0) { 1564 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n", 1565 ref->proc->pid, ref->data.debug_id, 1566 ref->data.desc, ref->data.strong, 1567 ref->data.weak); 1568 return false; 1569 } 1570 ref->data.strong--; 1571 if (ref->data.strong == 0) 1572 binder_dec_node(ref->node, strong, 1); 1573 } else { 1574 if (ref->data.weak == 0) { 1575 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n", 1576 ref->proc->pid, ref->data.debug_id, 1577 ref->data.desc, ref->data.strong, 1578 ref->data.weak); 1579 return false; 1580 } 1581 ref->data.weak--; 1582 } 1583 if (ref->data.strong == 0 && ref->data.weak == 0) { 1584 binder_cleanup_ref_olocked(ref); 1585 return true; 1586 } 1587 return false; 1588 } 1589 1590 /** 1591 * binder_get_node_from_ref() - get the node from the given proc/desc 1592 * @proc: proc containing the ref 1593 * @desc: the handle associated with the ref 1594 * @need_strong_ref: if true, only return node if ref is strong 1595 * @rdata: the id/refcount data for the ref 1596 * 1597 * Given a proc and ref handle, return the associated binder_node 1598 * 1599 * Return: a binder_node or NULL if not found or not strong when strong required 1600 */ 1601 static struct binder_node *binder_get_node_from_ref( 1602 struct binder_proc *proc, 1603 u32 desc, bool need_strong_ref, 1604 struct binder_ref_data *rdata) 1605 { 1606 struct binder_node *node; 1607 struct binder_ref *ref; 1608 1609 binder_proc_lock(proc); 1610 ref = binder_get_ref_olocked(proc, desc, need_strong_ref); 1611 if (!ref) 1612 goto err_no_ref; 1613 node = ref->node; 1614 /* 1615 * Take an implicit reference on the node to ensure 1616 * it stays alive until the call to binder_put_node() 1617 */ 1618 binder_inc_node_tmpref(node); 1619 if (rdata) 1620 *rdata = ref->data; 1621 binder_proc_unlock(proc); 1622 1623 return node; 1624 1625 err_no_ref: 1626 binder_proc_unlock(proc); 1627 return NULL; 1628 } 1629 1630 /** 1631 * binder_free_ref() - free the binder_ref 1632 * @ref: ref to free 1633 * 1634 * Free the binder_ref. Free the binder_node indicated by ref->node 1635 * (if non-NULL) and the binder_ref_death indicated by ref->death. 1636 */ 1637 static void binder_free_ref(struct binder_ref *ref) 1638 { 1639 if (ref->node) 1640 binder_free_node(ref->node); 1641 kfree(ref->death); 1642 kfree(ref); 1643 } 1644 1645 /** 1646 * binder_update_ref_for_handle() - inc/dec the ref for given handle 1647 * @proc: proc containing the ref 1648 * @desc: the handle associated with the ref 1649 * @increment: true=inc reference, false=dec reference 1650 * @strong: true=strong reference, false=weak reference 1651 * @rdata: the id/refcount data for the ref 1652 * 1653 * Given a proc and ref handle, increment or decrement the ref 1654 * according to "increment" arg. 1655 * 1656 * Return: 0 if successful, else errno 1657 */ 1658 static int binder_update_ref_for_handle(struct binder_proc *proc, 1659 uint32_t desc, bool increment, bool strong, 1660 struct binder_ref_data *rdata) 1661 { 1662 int ret = 0; 1663 struct binder_ref *ref; 1664 bool delete_ref = false; 1665 1666 binder_proc_lock(proc); 1667 ref = binder_get_ref_olocked(proc, desc, strong); 1668 if (!ref) { 1669 ret = -EINVAL; 1670 goto err_no_ref; 1671 } 1672 if (increment) 1673 ret = binder_inc_ref_olocked(ref, strong, NULL); 1674 else 1675 delete_ref = binder_dec_ref_olocked(ref, strong); 1676 1677 if (rdata) 1678 *rdata = ref->data; 1679 binder_proc_unlock(proc); 1680 1681 if (delete_ref) 1682 binder_free_ref(ref); 1683 return ret; 1684 1685 err_no_ref: 1686 binder_proc_unlock(proc); 1687 return ret; 1688 } 1689 1690 /** 1691 * binder_dec_ref_for_handle() - dec the ref for given handle 1692 * @proc: proc containing the ref 1693 * @desc: the handle associated with the ref 1694 * @strong: true=strong reference, false=weak reference 1695 * @rdata: the id/refcount data for the ref 1696 * 1697 * Just calls binder_update_ref_for_handle() to decrement the ref. 1698 * 1699 * Return: 0 if successful, else errno 1700 */ 1701 static int binder_dec_ref_for_handle(struct binder_proc *proc, 1702 uint32_t desc, bool strong, struct binder_ref_data *rdata) 1703 { 1704 return binder_update_ref_for_handle(proc, desc, false, strong, rdata); 1705 } 1706 1707 1708 /** 1709 * binder_inc_ref_for_node() - increment the ref for given proc/node 1710 * @proc: proc containing the ref 1711 * @node: target node 1712 * @strong: true=strong reference, false=weak reference 1713 * @target_list: worklist to use if node is incremented 1714 * @rdata: the id/refcount data for the ref 1715 * 1716 * Given a proc and node, increment the ref. Create the ref if it 1717 * doesn't already exist 1718 * 1719 * Return: 0 if successful, else errno 1720 */ 1721 static int binder_inc_ref_for_node(struct binder_proc *proc, 1722 struct binder_node *node, 1723 bool strong, 1724 struct list_head *target_list, 1725 struct binder_ref_data *rdata) 1726 { 1727 struct binder_ref *ref; 1728 struct binder_ref *new_ref = NULL; 1729 int ret = 0; 1730 1731 binder_proc_lock(proc); 1732 ref = binder_get_ref_for_node_olocked(proc, node, NULL); 1733 if (!ref) { 1734 binder_proc_unlock(proc); 1735 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); 1736 if (!new_ref) 1737 return -ENOMEM; 1738 binder_proc_lock(proc); 1739 ref = binder_get_ref_for_node_olocked(proc, node, new_ref); 1740 } 1741 ret = binder_inc_ref_olocked(ref, strong, target_list); 1742 *rdata = ref->data; 1743 binder_proc_unlock(proc); 1744 if (new_ref && ref != new_ref) 1745 /* 1746 * Another thread created the ref first so 1747 * free the one we allocated 1748 */ 1749 kfree(new_ref); 1750 return ret; 1751 } 1752 1753 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread, 1754 struct binder_transaction *t) 1755 { 1756 BUG_ON(!target_thread); 1757 assert_spin_locked(&target_thread->proc->inner_lock); 1758 BUG_ON(target_thread->transaction_stack != t); 1759 BUG_ON(target_thread->transaction_stack->from != target_thread); 1760 target_thread->transaction_stack = 1761 target_thread->transaction_stack->from_parent; 1762 t->from = NULL; 1763 } 1764 1765 /** 1766 * binder_thread_dec_tmpref() - decrement thread->tmp_ref 1767 * @thread: thread to decrement 1768 * 1769 * A thread needs to be kept alive while being used to create or 1770 * handle a transaction. binder_get_txn_from() is used to safely 1771 * extract t->from from a binder_transaction and keep the thread 1772 * indicated by t->from from being freed. When done with that 1773 * binder_thread, this function is called to decrement the 1774 * tmp_ref and free if appropriate (thread has been released 1775 * and no transaction being processed by the driver) 1776 */ 1777 static void binder_thread_dec_tmpref(struct binder_thread *thread) 1778 { 1779 /* 1780 * atomic is used to protect the counter value while 1781 * it cannot reach zero or thread->is_dead is false 1782 */ 1783 binder_inner_proc_lock(thread->proc); 1784 atomic_dec(&thread->tmp_ref); 1785 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) { 1786 binder_inner_proc_unlock(thread->proc); 1787 binder_free_thread(thread); 1788 return; 1789 } 1790 binder_inner_proc_unlock(thread->proc); 1791 } 1792 1793 /** 1794 * binder_proc_dec_tmpref() - decrement proc->tmp_ref 1795 * @proc: proc to decrement 1796 * 1797 * A binder_proc needs to be kept alive while being used to create or 1798 * handle a transaction. proc->tmp_ref is incremented when 1799 * creating a new transaction or the binder_proc is currently in-use 1800 * by threads that are being released. When done with the binder_proc, 1801 * this function is called to decrement the counter and free the 1802 * proc if appropriate (proc has been released, all threads have 1803 * been released and not currenly in-use to process a transaction). 1804 */ 1805 static void binder_proc_dec_tmpref(struct binder_proc *proc) 1806 { 1807 binder_inner_proc_lock(proc); 1808 proc->tmp_ref--; 1809 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) && 1810 !proc->tmp_ref) { 1811 binder_inner_proc_unlock(proc); 1812 binder_free_proc(proc); 1813 return; 1814 } 1815 binder_inner_proc_unlock(proc); 1816 } 1817 1818 /** 1819 * binder_get_txn_from() - safely extract the "from" thread in transaction 1820 * @t: binder transaction for t->from 1821 * 1822 * Atomically return the "from" thread and increment the tmp_ref 1823 * count for the thread to ensure it stays alive until 1824 * binder_thread_dec_tmpref() is called. 1825 * 1826 * Return: the value of t->from 1827 */ 1828 static struct binder_thread *binder_get_txn_from( 1829 struct binder_transaction *t) 1830 { 1831 struct binder_thread *from; 1832 1833 spin_lock(&t->lock); 1834 from = t->from; 1835 if (from) 1836 atomic_inc(&from->tmp_ref); 1837 spin_unlock(&t->lock); 1838 return from; 1839 } 1840 1841 /** 1842 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock 1843 * @t: binder transaction for t->from 1844 * 1845 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock 1846 * to guarantee that the thread cannot be released while operating on it. 1847 * The caller must call binder_inner_proc_unlock() to release the inner lock 1848 * as well as call binder_dec_thread_txn() to release the reference. 1849 * 1850 * Return: the value of t->from 1851 */ 1852 static struct binder_thread *binder_get_txn_from_and_acq_inner( 1853 struct binder_transaction *t) 1854 __acquires(&t->from->proc->inner_lock) 1855 { 1856 struct binder_thread *from; 1857 1858 from = binder_get_txn_from(t); 1859 if (!from) { 1860 __acquire(&from->proc->inner_lock); 1861 return NULL; 1862 } 1863 binder_inner_proc_lock(from->proc); 1864 if (t->from) { 1865 BUG_ON(from != t->from); 1866 return from; 1867 } 1868 binder_inner_proc_unlock(from->proc); 1869 __acquire(&from->proc->inner_lock); 1870 binder_thread_dec_tmpref(from); 1871 return NULL; 1872 } 1873 1874 /** 1875 * binder_free_txn_fixups() - free unprocessed fd fixups 1876 * @t: binder transaction for t->from 1877 * 1878 * If the transaction is being torn down prior to being 1879 * processed by the target process, free all of the 1880 * fd fixups and fput the file structs. It is safe to 1881 * call this function after the fixups have been 1882 * processed -- in that case, the list will be empty. 1883 */ 1884 static void binder_free_txn_fixups(struct binder_transaction *t) 1885 { 1886 struct binder_txn_fd_fixup *fixup, *tmp; 1887 1888 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) { 1889 fput(fixup->file); 1890 list_del(&fixup->fixup_entry); 1891 kfree(fixup); 1892 } 1893 } 1894 1895 static void binder_free_transaction(struct binder_transaction *t) 1896 { 1897 struct binder_proc *target_proc = t->to_proc; 1898 1899 if (target_proc) { 1900 binder_inner_proc_lock(target_proc); 1901 if (t->buffer) 1902 t->buffer->transaction = NULL; 1903 binder_inner_proc_unlock(target_proc); 1904 } 1905 /* 1906 * If the transaction has no target_proc, then 1907 * t->buffer->transaction has already been cleared. 1908 */ 1909 binder_free_txn_fixups(t); 1910 kfree(t); 1911 binder_stats_deleted(BINDER_STAT_TRANSACTION); 1912 } 1913 1914 static void binder_send_failed_reply(struct binder_transaction *t, 1915 uint32_t error_code) 1916 { 1917 struct binder_thread *target_thread; 1918 struct binder_transaction *next; 1919 1920 BUG_ON(t->flags & TF_ONE_WAY); 1921 while (1) { 1922 target_thread = binder_get_txn_from_and_acq_inner(t); 1923 if (target_thread) { 1924 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1925 "send failed reply for transaction %d to %d:%d\n", 1926 t->debug_id, 1927 target_thread->proc->pid, 1928 target_thread->pid); 1929 1930 binder_pop_transaction_ilocked(target_thread, t); 1931 if (target_thread->reply_error.cmd == BR_OK) { 1932 target_thread->reply_error.cmd = error_code; 1933 binder_enqueue_thread_work_ilocked( 1934 target_thread, 1935 &target_thread->reply_error.work); 1936 wake_up_interruptible(&target_thread->wait); 1937 } else { 1938 /* 1939 * Cannot get here for normal operation, but 1940 * we can if multiple synchronous transactions 1941 * are sent without blocking for responses. 1942 * Just ignore the 2nd error in this case. 1943 */ 1944 pr_warn("Unexpected reply error: %u\n", 1945 target_thread->reply_error.cmd); 1946 } 1947 binder_inner_proc_unlock(target_thread->proc); 1948 binder_thread_dec_tmpref(target_thread); 1949 binder_free_transaction(t); 1950 return; 1951 } 1952 __release(&target_thread->proc->inner_lock); 1953 next = t->from_parent; 1954 1955 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1956 "send failed reply for transaction %d, target dead\n", 1957 t->debug_id); 1958 1959 binder_free_transaction(t); 1960 if (next == NULL) { 1961 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1962 "reply failed, no target thread at root\n"); 1963 return; 1964 } 1965 t = next; 1966 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1967 "reply failed, no target thread -- retry %d\n", 1968 t->debug_id); 1969 } 1970 } 1971 1972 /** 1973 * binder_cleanup_transaction() - cleans up undelivered transaction 1974 * @t: transaction that needs to be cleaned up 1975 * @reason: reason the transaction wasn't delivered 1976 * @error_code: error to return to caller (if synchronous call) 1977 */ 1978 static void binder_cleanup_transaction(struct binder_transaction *t, 1979 const char *reason, 1980 uint32_t error_code) 1981 { 1982 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) { 1983 binder_send_failed_reply(t, error_code); 1984 } else { 1985 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 1986 "undelivered transaction %d, %s\n", 1987 t->debug_id, reason); 1988 binder_free_transaction(t); 1989 } 1990 } 1991 1992 /** 1993 * binder_get_object() - gets object and checks for valid metadata 1994 * @proc: binder_proc owning the buffer 1995 * @buffer: binder_buffer that we're parsing. 1996 * @offset: offset in the @buffer at which to validate an object. 1997 * @object: struct binder_object to read into 1998 * 1999 * Return: If there's a valid metadata object at @offset in @buffer, the 2000 * size of that object. Otherwise, it returns zero. The object 2001 * is read into the struct binder_object pointed to by @object. 2002 */ 2003 static size_t binder_get_object(struct binder_proc *proc, 2004 struct binder_buffer *buffer, 2005 unsigned long offset, 2006 struct binder_object *object) 2007 { 2008 size_t read_size; 2009 struct binder_object_header *hdr; 2010 size_t object_size = 0; 2011 2012 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset); 2013 if (offset > buffer->data_size || read_size < sizeof(*hdr) || 2014 binder_alloc_copy_from_buffer(&proc->alloc, object, buffer, 2015 offset, read_size)) 2016 return 0; 2017 2018 /* Ok, now see if we read a complete object. */ 2019 hdr = &object->hdr; 2020 switch (hdr->type) { 2021 case BINDER_TYPE_BINDER: 2022 case BINDER_TYPE_WEAK_BINDER: 2023 case BINDER_TYPE_HANDLE: 2024 case BINDER_TYPE_WEAK_HANDLE: 2025 object_size = sizeof(struct flat_binder_object); 2026 break; 2027 case BINDER_TYPE_FD: 2028 object_size = sizeof(struct binder_fd_object); 2029 break; 2030 case BINDER_TYPE_PTR: 2031 object_size = sizeof(struct binder_buffer_object); 2032 break; 2033 case BINDER_TYPE_FDA: 2034 object_size = sizeof(struct binder_fd_array_object); 2035 break; 2036 default: 2037 return 0; 2038 } 2039 if (offset <= buffer->data_size - object_size && 2040 buffer->data_size >= object_size) 2041 return object_size; 2042 else 2043 return 0; 2044 } 2045 2046 /** 2047 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer. 2048 * @proc: binder_proc owning the buffer 2049 * @b: binder_buffer containing the object 2050 * @object: struct binder_object to read into 2051 * @index: index in offset array at which the binder_buffer_object is 2052 * located 2053 * @start_offset: points to the start of the offset array 2054 * @object_offsetp: offset of @object read from @b 2055 * @num_valid: the number of valid offsets in the offset array 2056 * 2057 * Return: If @index is within the valid range of the offset array 2058 * described by @start and @num_valid, and if there's a valid 2059 * binder_buffer_object at the offset found in index @index 2060 * of the offset array, that object is returned. Otherwise, 2061 * %NULL is returned. 2062 * Note that the offset found in index @index itself is not 2063 * verified; this function assumes that @num_valid elements 2064 * from @start were previously verified to have valid offsets. 2065 * If @object_offsetp is non-NULL, then the offset within 2066 * @b is written to it. 2067 */ 2068 static struct binder_buffer_object *binder_validate_ptr( 2069 struct binder_proc *proc, 2070 struct binder_buffer *b, 2071 struct binder_object *object, 2072 binder_size_t index, 2073 binder_size_t start_offset, 2074 binder_size_t *object_offsetp, 2075 binder_size_t num_valid) 2076 { 2077 size_t object_size; 2078 binder_size_t object_offset; 2079 unsigned long buffer_offset; 2080 2081 if (index >= num_valid) 2082 return NULL; 2083 2084 buffer_offset = start_offset + sizeof(binder_size_t) * index; 2085 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset, 2086 b, buffer_offset, 2087 sizeof(object_offset))) 2088 return NULL; 2089 object_size = binder_get_object(proc, b, object_offset, object); 2090 if (!object_size || object->hdr.type != BINDER_TYPE_PTR) 2091 return NULL; 2092 if (object_offsetp) 2093 *object_offsetp = object_offset; 2094 2095 return &object->bbo; 2096 } 2097 2098 /** 2099 * binder_validate_fixup() - validates pointer/fd fixups happen in order. 2100 * @proc: binder_proc owning the buffer 2101 * @b: transaction buffer 2102 * @objects_start_offset: offset to start of objects buffer 2103 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up 2104 * @fixup_offset: start offset in @buffer to fix up 2105 * @last_obj_offset: offset to last binder_buffer_object that we fixed 2106 * @last_min_offset: minimum fixup offset in object at @last_obj_offset 2107 * 2108 * Return: %true if a fixup in buffer @buffer at offset @offset is 2109 * allowed. 2110 * 2111 * For safety reasons, we only allow fixups inside a buffer to happen 2112 * at increasing offsets; additionally, we only allow fixup on the last 2113 * buffer object that was verified, or one of its parents. 2114 * 2115 * Example of what is allowed: 2116 * 2117 * A 2118 * B (parent = A, offset = 0) 2119 * C (parent = A, offset = 16) 2120 * D (parent = C, offset = 0) 2121 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset) 2122 * 2123 * Examples of what is not allowed: 2124 * 2125 * Decreasing offsets within the same parent: 2126 * A 2127 * C (parent = A, offset = 16) 2128 * B (parent = A, offset = 0) // decreasing offset within A 2129 * 2130 * Referring to a parent that wasn't the last object or any of its parents: 2131 * A 2132 * B (parent = A, offset = 0) 2133 * C (parent = A, offset = 0) 2134 * C (parent = A, offset = 16) 2135 * D (parent = B, offset = 0) // B is not A or any of A's parents 2136 */ 2137 static bool binder_validate_fixup(struct binder_proc *proc, 2138 struct binder_buffer *b, 2139 binder_size_t objects_start_offset, 2140 binder_size_t buffer_obj_offset, 2141 binder_size_t fixup_offset, 2142 binder_size_t last_obj_offset, 2143 binder_size_t last_min_offset) 2144 { 2145 if (!last_obj_offset) { 2146 /* Nothing to fix up in */ 2147 return false; 2148 } 2149 2150 while (last_obj_offset != buffer_obj_offset) { 2151 unsigned long buffer_offset; 2152 struct binder_object last_object; 2153 struct binder_buffer_object *last_bbo; 2154 size_t object_size = binder_get_object(proc, b, last_obj_offset, 2155 &last_object); 2156 if (object_size != sizeof(*last_bbo)) 2157 return false; 2158 2159 last_bbo = &last_object.bbo; 2160 /* 2161 * Safe to retrieve the parent of last_obj, since it 2162 * was already previously verified by the driver. 2163 */ 2164 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0) 2165 return false; 2166 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t); 2167 buffer_offset = objects_start_offset + 2168 sizeof(binder_size_t) * last_bbo->parent; 2169 if (binder_alloc_copy_from_buffer(&proc->alloc, 2170 &last_obj_offset, 2171 b, buffer_offset, 2172 sizeof(last_obj_offset))) 2173 return false; 2174 } 2175 return (fixup_offset >= last_min_offset); 2176 } 2177 2178 /** 2179 * struct binder_task_work_cb - for deferred close 2180 * 2181 * @twork: callback_head for task work 2182 * @fd: fd to close 2183 * 2184 * Structure to pass task work to be handled after 2185 * returning from binder_ioctl() via task_work_add(). 2186 */ 2187 struct binder_task_work_cb { 2188 struct callback_head twork; 2189 struct file *file; 2190 }; 2191 2192 /** 2193 * binder_do_fd_close() - close list of file descriptors 2194 * @twork: callback head for task work 2195 * 2196 * It is not safe to call ksys_close() during the binder_ioctl() 2197 * function if there is a chance that binder's own file descriptor 2198 * might be closed. This is to meet the requirements for using 2199 * fdget() (see comments for __fget_light()). Therefore use 2200 * task_work_add() to schedule the close operation once we have 2201 * returned from binder_ioctl(). This function is a callback 2202 * for that mechanism and does the actual ksys_close() on the 2203 * given file descriptor. 2204 */ 2205 static void binder_do_fd_close(struct callback_head *twork) 2206 { 2207 struct binder_task_work_cb *twcb = container_of(twork, 2208 struct binder_task_work_cb, twork); 2209 2210 fput(twcb->file); 2211 kfree(twcb); 2212 } 2213 2214 /** 2215 * binder_deferred_fd_close() - schedule a close for the given file-descriptor 2216 * @fd: file-descriptor to close 2217 * 2218 * See comments in binder_do_fd_close(). This function is used to schedule 2219 * a file-descriptor to be closed after returning from binder_ioctl(). 2220 */ 2221 static void binder_deferred_fd_close(int fd) 2222 { 2223 struct binder_task_work_cb *twcb; 2224 2225 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL); 2226 if (!twcb) 2227 return; 2228 init_task_work(&twcb->twork, binder_do_fd_close); 2229 __close_fd_get_file(fd, &twcb->file); 2230 if (twcb->file) { 2231 filp_close(twcb->file, current->files); 2232 task_work_add(current, &twcb->twork, TWA_RESUME); 2233 } else { 2234 kfree(twcb); 2235 } 2236 } 2237 2238 static void binder_transaction_buffer_release(struct binder_proc *proc, 2239 struct binder_buffer *buffer, 2240 binder_size_t failed_at, 2241 bool is_failure) 2242 { 2243 int debug_id = buffer->debug_id; 2244 binder_size_t off_start_offset, buffer_offset, off_end_offset; 2245 2246 binder_debug(BINDER_DEBUG_TRANSACTION, 2247 "%d buffer release %d, size %zd-%zd, failed at %llx\n", 2248 proc->pid, buffer->debug_id, 2249 buffer->data_size, buffer->offsets_size, 2250 (unsigned long long)failed_at); 2251 2252 if (buffer->target_node) 2253 binder_dec_node(buffer->target_node, 1, 0); 2254 2255 off_start_offset = ALIGN(buffer->data_size, sizeof(void *)); 2256 off_end_offset = is_failure ? failed_at : 2257 off_start_offset + buffer->offsets_size; 2258 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset; 2259 buffer_offset += sizeof(binder_size_t)) { 2260 struct binder_object_header *hdr; 2261 size_t object_size = 0; 2262 struct binder_object object; 2263 binder_size_t object_offset; 2264 2265 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset, 2266 buffer, buffer_offset, 2267 sizeof(object_offset))) 2268 object_size = binder_get_object(proc, buffer, 2269 object_offset, &object); 2270 if (object_size == 0) { 2271 pr_err("transaction release %d bad object at offset %lld, size %zd\n", 2272 debug_id, (u64)object_offset, buffer->data_size); 2273 continue; 2274 } 2275 hdr = &object.hdr; 2276 switch (hdr->type) { 2277 case BINDER_TYPE_BINDER: 2278 case BINDER_TYPE_WEAK_BINDER: { 2279 struct flat_binder_object *fp; 2280 struct binder_node *node; 2281 2282 fp = to_flat_binder_object(hdr); 2283 node = binder_get_node(proc, fp->binder); 2284 if (node == NULL) { 2285 pr_err("transaction release %d bad node %016llx\n", 2286 debug_id, (u64)fp->binder); 2287 break; 2288 } 2289 binder_debug(BINDER_DEBUG_TRANSACTION, 2290 " node %d u%016llx\n", 2291 node->debug_id, (u64)node->ptr); 2292 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER, 2293 0); 2294 binder_put_node(node); 2295 } break; 2296 case BINDER_TYPE_HANDLE: 2297 case BINDER_TYPE_WEAK_HANDLE: { 2298 struct flat_binder_object *fp; 2299 struct binder_ref_data rdata; 2300 int ret; 2301 2302 fp = to_flat_binder_object(hdr); 2303 ret = binder_dec_ref_for_handle(proc, fp->handle, 2304 hdr->type == BINDER_TYPE_HANDLE, &rdata); 2305 2306 if (ret) { 2307 pr_err("transaction release %d bad handle %d, ret = %d\n", 2308 debug_id, fp->handle, ret); 2309 break; 2310 } 2311 binder_debug(BINDER_DEBUG_TRANSACTION, 2312 " ref %d desc %d\n", 2313 rdata.debug_id, rdata.desc); 2314 } break; 2315 2316 case BINDER_TYPE_FD: { 2317 /* 2318 * No need to close the file here since user-space 2319 * closes it for for successfully delivered 2320 * transactions. For transactions that weren't 2321 * delivered, the new fd was never allocated so 2322 * there is no need to close and the fput on the 2323 * file is done when the transaction is torn 2324 * down. 2325 */ 2326 } break; 2327 case BINDER_TYPE_PTR: 2328 /* 2329 * Nothing to do here, this will get cleaned up when the 2330 * transaction buffer gets freed 2331 */ 2332 break; 2333 case BINDER_TYPE_FDA: { 2334 struct binder_fd_array_object *fda; 2335 struct binder_buffer_object *parent; 2336 struct binder_object ptr_object; 2337 binder_size_t fda_offset; 2338 size_t fd_index; 2339 binder_size_t fd_buf_size; 2340 binder_size_t num_valid; 2341 2342 if (proc->tsk != current->group_leader) { 2343 /* 2344 * Nothing to do if running in sender context 2345 * The fd fixups have not been applied so no 2346 * fds need to be closed. 2347 */ 2348 continue; 2349 } 2350 2351 num_valid = (buffer_offset - off_start_offset) / 2352 sizeof(binder_size_t); 2353 fda = to_binder_fd_array_object(hdr); 2354 parent = binder_validate_ptr(proc, buffer, &ptr_object, 2355 fda->parent, 2356 off_start_offset, 2357 NULL, 2358 num_valid); 2359 if (!parent) { 2360 pr_err("transaction release %d bad parent offset\n", 2361 debug_id); 2362 continue; 2363 } 2364 fd_buf_size = sizeof(u32) * fda->num_fds; 2365 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { 2366 pr_err("transaction release %d invalid number of fds (%lld)\n", 2367 debug_id, (u64)fda->num_fds); 2368 continue; 2369 } 2370 if (fd_buf_size > parent->length || 2371 fda->parent_offset > parent->length - fd_buf_size) { 2372 /* No space for all file descriptors here. */ 2373 pr_err("transaction release %d not enough space for %lld fds in buffer\n", 2374 debug_id, (u64)fda->num_fds); 2375 continue; 2376 } 2377 /* 2378 * the source data for binder_buffer_object is visible 2379 * to user-space and the @buffer element is the user 2380 * pointer to the buffer_object containing the fd_array. 2381 * Convert the address to an offset relative to 2382 * the base of the transaction buffer. 2383 */ 2384 fda_offset = 2385 (parent->buffer - (uintptr_t)buffer->user_data) + 2386 fda->parent_offset; 2387 for (fd_index = 0; fd_index < fda->num_fds; 2388 fd_index++) { 2389 u32 fd; 2390 int err; 2391 binder_size_t offset = fda_offset + 2392 fd_index * sizeof(fd); 2393 2394 err = binder_alloc_copy_from_buffer( 2395 &proc->alloc, &fd, buffer, 2396 offset, sizeof(fd)); 2397 WARN_ON(err); 2398 if (!err) 2399 binder_deferred_fd_close(fd); 2400 } 2401 } break; 2402 default: 2403 pr_err("transaction release %d bad object type %x\n", 2404 debug_id, hdr->type); 2405 break; 2406 } 2407 } 2408 } 2409 2410 static int binder_translate_binder(struct flat_binder_object *fp, 2411 struct binder_transaction *t, 2412 struct binder_thread *thread) 2413 { 2414 struct binder_node *node; 2415 struct binder_proc *proc = thread->proc; 2416 struct binder_proc *target_proc = t->to_proc; 2417 struct binder_ref_data rdata; 2418 int ret = 0; 2419 2420 node = binder_get_node(proc, fp->binder); 2421 if (!node) { 2422 node = binder_new_node(proc, fp); 2423 if (!node) 2424 return -ENOMEM; 2425 } 2426 if (fp->cookie != node->cookie) { 2427 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n", 2428 proc->pid, thread->pid, (u64)fp->binder, 2429 node->debug_id, (u64)fp->cookie, 2430 (u64)node->cookie); 2431 ret = -EINVAL; 2432 goto done; 2433 } 2434 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { 2435 ret = -EPERM; 2436 goto done; 2437 } 2438 2439 ret = binder_inc_ref_for_node(target_proc, node, 2440 fp->hdr.type == BINDER_TYPE_BINDER, 2441 &thread->todo, &rdata); 2442 if (ret) 2443 goto done; 2444 2445 if (fp->hdr.type == BINDER_TYPE_BINDER) 2446 fp->hdr.type = BINDER_TYPE_HANDLE; 2447 else 2448 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE; 2449 fp->binder = 0; 2450 fp->handle = rdata.desc; 2451 fp->cookie = 0; 2452 2453 trace_binder_transaction_node_to_ref(t, node, &rdata); 2454 binder_debug(BINDER_DEBUG_TRANSACTION, 2455 " node %d u%016llx -> ref %d desc %d\n", 2456 node->debug_id, (u64)node->ptr, 2457 rdata.debug_id, rdata.desc); 2458 done: 2459 binder_put_node(node); 2460 return ret; 2461 } 2462 2463 static int binder_translate_handle(struct flat_binder_object *fp, 2464 struct binder_transaction *t, 2465 struct binder_thread *thread) 2466 { 2467 struct binder_proc *proc = thread->proc; 2468 struct binder_proc *target_proc = t->to_proc; 2469 struct binder_node *node; 2470 struct binder_ref_data src_rdata; 2471 int ret = 0; 2472 2473 node = binder_get_node_from_ref(proc, fp->handle, 2474 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata); 2475 if (!node) { 2476 binder_user_error("%d:%d got transaction with invalid handle, %d\n", 2477 proc->pid, thread->pid, fp->handle); 2478 return -EINVAL; 2479 } 2480 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { 2481 ret = -EPERM; 2482 goto done; 2483 } 2484 2485 binder_node_lock(node); 2486 if (node->proc == target_proc) { 2487 if (fp->hdr.type == BINDER_TYPE_HANDLE) 2488 fp->hdr.type = BINDER_TYPE_BINDER; 2489 else 2490 fp->hdr.type = BINDER_TYPE_WEAK_BINDER; 2491 fp->binder = node->ptr; 2492 fp->cookie = node->cookie; 2493 if (node->proc) 2494 binder_inner_proc_lock(node->proc); 2495 else 2496 __acquire(&node->proc->inner_lock); 2497 binder_inc_node_nilocked(node, 2498 fp->hdr.type == BINDER_TYPE_BINDER, 2499 0, NULL); 2500 if (node->proc) 2501 binder_inner_proc_unlock(node->proc); 2502 else 2503 __release(&node->proc->inner_lock); 2504 trace_binder_transaction_ref_to_node(t, node, &src_rdata); 2505 binder_debug(BINDER_DEBUG_TRANSACTION, 2506 " ref %d desc %d -> node %d u%016llx\n", 2507 src_rdata.debug_id, src_rdata.desc, node->debug_id, 2508 (u64)node->ptr); 2509 binder_node_unlock(node); 2510 } else { 2511 struct binder_ref_data dest_rdata; 2512 2513 binder_node_unlock(node); 2514 ret = binder_inc_ref_for_node(target_proc, node, 2515 fp->hdr.type == BINDER_TYPE_HANDLE, 2516 NULL, &dest_rdata); 2517 if (ret) 2518 goto done; 2519 2520 fp->binder = 0; 2521 fp->handle = dest_rdata.desc; 2522 fp->cookie = 0; 2523 trace_binder_transaction_ref_to_ref(t, node, &src_rdata, 2524 &dest_rdata); 2525 binder_debug(BINDER_DEBUG_TRANSACTION, 2526 " ref %d desc %d -> ref %d desc %d (node %d)\n", 2527 src_rdata.debug_id, src_rdata.desc, 2528 dest_rdata.debug_id, dest_rdata.desc, 2529 node->debug_id); 2530 } 2531 done: 2532 binder_put_node(node); 2533 return ret; 2534 } 2535 2536 static int binder_translate_fd(u32 fd, binder_size_t fd_offset, 2537 struct binder_transaction *t, 2538 struct binder_thread *thread, 2539 struct binder_transaction *in_reply_to) 2540 { 2541 struct binder_proc *proc = thread->proc; 2542 struct binder_proc *target_proc = t->to_proc; 2543 struct binder_txn_fd_fixup *fixup; 2544 struct file *file; 2545 int ret = 0; 2546 bool target_allows_fd; 2547 2548 if (in_reply_to) 2549 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS); 2550 else 2551 target_allows_fd = t->buffer->target_node->accept_fds; 2552 if (!target_allows_fd) { 2553 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n", 2554 proc->pid, thread->pid, 2555 in_reply_to ? "reply" : "transaction", 2556 fd); 2557 ret = -EPERM; 2558 goto err_fd_not_accepted; 2559 } 2560 2561 file = fget(fd); 2562 if (!file) { 2563 binder_user_error("%d:%d got transaction with invalid fd, %d\n", 2564 proc->pid, thread->pid, fd); 2565 ret = -EBADF; 2566 goto err_fget; 2567 } 2568 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file); 2569 if (ret < 0) { 2570 ret = -EPERM; 2571 goto err_security; 2572 } 2573 2574 /* 2575 * Add fixup record for this transaction. The allocation 2576 * of the fd in the target needs to be done from a 2577 * target thread. 2578 */ 2579 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL); 2580 if (!fixup) { 2581 ret = -ENOMEM; 2582 goto err_alloc; 2583 } 2584 fixup->file = file; 2585 fixup->offset = fd_offset; 2586 trace_binder_transaction_fd_send(t, fd, fixup->offset); 2587 list_add_tail(&fixup->fixup_entry, &t->fd_fixups); 2588 2589 return ret; 2590 2591 err_alloc: 2592 err_security: 2593 fput(file); 2594 err_fget: 2595 err_fd_not_accepted: 2596 return ret; 2597 } 2598 2599 static int binder_translate_fd_array(struct binder_fd_array_object *fda, 2600 struct binder_buffer_object *parent, 2601 struct binder_transaction *t, 2602 struct binder_thread *thread, 2603 struct binder_transaction *in_reply_to) 2604 { 2605 binder_size_t fdi, fd_buf_size; 2606 binder_size_t fda_offset; 2607 struct binder_proc *proc = thread->proc; 2608 struct binder_proc *target_proc = t->to_proc; 2609 2610 fd_buf_size = sizeof(u32) * fda->num_fds; 2611 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { 2612 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n", 2613 proc->pid, thread->pid, (u64)fda->num_fds); 2614 return -EINVAL; 2615 } 2616 if (fd_buf_size > parent->length || 2617 fda->parent_offset > parent->length - fd_buf_size) { 2618 /* No space for all file descriptors here. */ 2619 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n", 2620 proc->pid, thread->pid, (u64)fda->num_fds); 2621 return -EINVAL; 2622 } 2623 /* 2624 * the source data for binder_buffer_object is visible 2625 * to user-space and the @buffer element is the user 2626 * pointer to the buffer_object containing the fd_array. 2627 * Convert the address to an offset relative to 2628 * the base of the transaction buffer. 2629 */ 2630 fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) + 2631 fda->parent_offset; 2632 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) { 2633 binder_user_error("%d:%d parent offset not aligned correctly.\n", 2634 proc->pid, thread->pid); 2635 return -EINVAL; 2636 } 2637 for (fdi = 0; fdi < fda->num_fds; fdi++) { 2638 u32 fd; 2639 int ret; 2640 binder_size_t offset = fda_offset + fdi * sizeof(fd); 2641 2642 ret = binder_alloc_copy_from_buffer(&target_proc->alloc, 2643 &fd, t->buffer, 2644 offset, sizeof(fd)); 2645 if (!ret) 2646 ret = binder_translate_fd(fd, offset, t, thread, 2647 in_reply_to); 2648 if (ret < 0) 2649 return ret; 2650 } 2651 return 0; 2652 } 2653 2654 static int binder_fixup_parent(struct binder_transaction *t, 2655 struct binder_thread *thread, 2656 struct binder_buffer_object *bp, 2657 binder_size_t off_start_offset, 2658 binder_size_t num_valid, 2659 binder_size_t last_fixup_obj_off, 2660 binder_size_t last_fixup_min_off) 2661 { 2662 struct binder_buffer_object *parent; 2663 struct binder_buffer *b = t->buffer; 2664 struct binder_proc *proc = thread->proc; 2665 struct binder_proc *target_proc = t->to_proc; 2666 struct binder_object object; 2667 binder_size_t buffer_offset; 2668 binder_size_t parent_offset; 2669 2670 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT)) 2671 return 0; 2672 2673 parent = binder_validate_ptr(target_proc, b, &object, bp->parent, 2674 off_start_offset, &parent_offset, 2675 num_valid); 2676 if (!parent) { 2677 binder_user_error("%d:%d got transaction with invalid parent offset or type\n", 2678 proc->pid, thread->pid); 2679 return -EINVAL; 2680 } 2681 2682 if (!binder_validate_fixup(target_proc, b, off_start_offset, 2683 parent_offset, bp->parent_offset, 2684 last_fixup_obj_off, 2685 last_fixup_min_off)) { 2686 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", 2687 proc->pid, thread->pid); 2688 return -EINVAL; 2689 } 2690 2691 if (parent->length < sizeof(binder_uintptr_t) || 2692 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) { 2693 /* No space for a pointer here! */ 2694 binder_user_error("%d:%d got transaction with invalid parent offset\n", 2695 proc->pid, thread->pid); 2696 return -EINVAL; 2697 } 2698 buffer_offset = bp->parent_offset + 2699 (uintptr_t)parent->buffer - (uintptr_t)b->user_data; 2700 if (binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset, 2701 &bp->buffer, sizeof(bp->buffer))) { 2702 binder_user_error("%d:%d got transaction with invalid parent offset\n", 2703 proc->pid, thread->pid); 2704 return -EINVAL; 2705 } 2706 2707 return 0; 2708 } 2709 2710 /** 2711 * binder_proc_transaction() - sends a transaction to a process and wakes it up 2712 * @t: transaction to send 2713 * @proc: process to send the transaction to 2714 * @thread: thread in @proc to send the transaction to (may be NULL) 2715 * 2716 * This function queues a transaction to the specified process. It will try 2717 * to find a thread in the target process to handle the transaction and 2718 * wake it up. If no thread is found, the work is queued to the proc 2719 * waitqueue. 2720 * 2721 * If the @thread parameter is not NULL, the transaction is always queued 2722 * to the waitlist of that specific thread. 2723 * 2724 * Return: true if the transactions was successfully queued 2725 * false if the target process or thread is dead 2726 */ 2727 static bool binder_proc_transaction(struct binder_transaction *t, 2728 struct binder_proc *proc, 2729 struct binder_thread *thread) 2730 { 2731 struct binder_node *node = t->buffer->target_node; 2732 bool oneway = !!(t->flags & TF_ONE_WAY); 2733 bool pending_async = false; 2734 2735 BUG_ON(!node); 2736 binder_node_lock(node); 2737 if (oneway) { 2738 BUG_ON(thread); 2739 if (node->has_async_transaction) 2740 pending_async = true; 2741 else 2742 node->has_async_transaction = true; 2743 } 2744 2745 binder_inner_proc_lock(proc); 2746 2747 if (proc->is_dead || (thread && thread->is_dead)) { 2748 binder_inner_proc_unlock(proc); 2749 binder_node_unlock(node); 2750 return false; 2751 } 2752 2753 if (!thread && !pending_async) 2754 thread = binder_select_thread_ilocked(proc); 2755 2756 if (thread) 2757 binder_enqueue_thread_work_ilocked(thread, &t->work); 2758 else if (!pending_async) 2759 binder_enqueue_work_ilocked(&t->work, &proc->todo); 2760 else 2761 binder_enqueue_work_ilocked(&t->work, &node->async_todo); 2762 2763 if (!pending_async) 2764 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */); 2765 2766 binder_inner_proc_unlock(proc); 2767 binder_node_unlock(node); 2768 2769 return true; 2770 } 2771 2772 /** 2773 * binder_get_node_refs_for_txn() - Get required refs on node for txn 2774 * @node: struct binder_node for which to get refs 2775 * @proc: returns @node->proc if valid 2776 * @error: if no @proc then returns BR_DEAD_REPLY 2777 * 2778 * User-space normally keeps the node alive when creating a transaction 2779 * since it has a reference to the target. The local strong ref keeps it 2780 * alive if the sending process dies before the target process processes 2781 * the transaction. If the source process is malicious or has a reference 2782 * counting bug, relying on the local strong ref can fail. 2783 * 2784 * Since user-space can cause the local strong ref to go away, we also take 2785 * a tmpref on the node to ensure it survives while we are constructing 2786 * the transaction. We also need a tmpref on the proc while we are 2787 * constructing the transaction, so we take that here as well. 2788 * 2789 * Return: The target_node with refs taken or NULL if no @node->proc is NULL. 2790 * Also sets @proc if valid. If the @node->proc is NULL indicating that the 2791 * target proc has died, @error is set to BR_DEAD_REPLY 2792 */ 2793 static struct binder_node *binder_get_node_refs_for_txn( 2794 struct binder_node *node, 2795 struct binder_proc **procp, 2796 uint32_t *error) 2797 { 2798 struct binder_node *target_node = NULL; 2799 2800 binder_node_inner_lock(node); 2801 if (node->proc) { 2802 target_node = node; 2803 binder_inc_node_nilocked(node, 1, 0, NULL); 2804 binder_inc_node_tmpref_ilocked(node); 2805 node->proc->tmp_ref++; 2806 *procp = node->proc; 2807 } else 2808 *error = BR_DEAD_REPLY; 2809 binder_node_inner_unlock(node); 2810 2811 return target_node; 2812 } 2813 2814 static void binder_transaction(struct binder_proc *proc, 2815 struct binder_thread *thread, 2816 struct binder_transaction_data *tr, int reply, 2817 binder_size_t extra_buffers_size) 2818 { 2819 int ret; 2820 struct binder_transaction *t; 2821 struct binder_work *w; 2822 struct binder_work *tcomplete; 2823 binder_size_t buffer_offset = 0; 2824 binder_size_t off_start_offset, off_end_offset; 2825 binder_size_t off_min; 2826 binder_size_t sg_buf_offset, sg_buf_end_offset; 2827 struct binder_proc *target_proc = NULL; 2828 struct binder_thread *target_thread = NULL; 2829 struct binder_node *target_node = NULL; 2830 struct binder_transaction *in_reply_to = NULL; 2831 struct binder_transaction_log_entry *e; 2832 uint32_t return_error = 0; 2833 uint32_t return_error_param = 0; 2834 uint32_t return_error_line = 0; 2835 binder_size_t last_fixup_obj_off = 0; 2836 binder_size_t last_fixup_min_off = 0; 2837 struct binder_context *context = proc->context; 2838 int t_debug_id = atomic_inc_return(&binder_last_id); 2839 char *secctx = NULL; 2840 u32 secctx_sz = 0; 2841 2842 e = binder_transaction_log_add(&binder_transaction_log); 2843 e->debug_id = t_debug_id; 2844 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); 2845 e->from_proc = proc->pid; 2846 e->from_thread = thread->pid; 2847 e->target_handle = tr->target.handle; 2848 e->data_size = tr->data_size; 2849 e->offsets_size = tr->offsets_size; 2850 strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME); 2851 2852 if (reply) { 2853 binder_inner_proc_lock(proc); 2854 in_reply_to = thread->transaction_stack; 2855 if (in_reply_to == NULL) { 2856 binder_inner_proc_unlock(proc); 2857 binder_user_error("%d:%d got reply transaction with no transaction stack\n", 2858 proc->pid, thread->pid); 2859 return_error = BR_FAILED_REPLY; 2860 return_error_param = -EPROTO; 2861 return_error_line = __LINE__; 2862 goto err_empty_call_stack; 2863 } 2864 if (in_reply_to->to_thread != thread) { 2865 spin_lock(&in_reply_to->lock); 2866 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n", 2867 proc->pid, thread->pid, in_reply_to->debug_id, 2868 in_reply_to->to_proc ? 2869 in_reply_to->to_proc->pid : 0, 2870 in_reply_to->to_thread ? 2871 in_reply_to->to_thread->pid : 0); 2872 spin_unlock(&in_reply_to->lock); 2873 binder_inner_proc_unlock(proc); 2874 return_error = BR_FAILED_REPLY; 2875 return_error_param = -EPROTO; 2876 return_error_line = __LINE__; 2877 in_reply_to = NULL; 2878 goto err_bad_call_stack; 2879 } 2880 thread->transaction_stack = in_reply_to->to_parent; 2881 binder_inner_proc_unlock(proc); 2882 binder_set_nice(in_reply_to->saved_priority); 2883 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to); 2884 if (target_thread == NULL) { 2885 /* annotation for sparse */ 2886 __release(&target_thread->proc->inner_lock); 2887 return_error = BR_DEAD_REPLY; 2888 return_error_line = __LINE__; 2889 goto err_dead_binder; 2890 } 2891 if (target_thread->transaction_stack != in_reply_to) { 2892 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n", 2893 proc->pid, thread->pid, 2894 target_thread->transaction_stack ? 2895 target_thread->transaction_stack->debug_id : 0, 2896 in_reply_to->debug_id); 2897 binder_inner_proc_unlock(target_thread->proc); 2898 return_error = BR_FAILED_REPLY; 2899 return_error_param = -EPROTO; 2900 return_error_line = __LINE__; 2901 in_reply_to = NULL; 2902 target_thread = NULL; 2903 goto err_dead_binder; 2904 } 2905 target_proc = target_thread->proc; 2906 target_proc->tmp_ref++; 2907 binder_inner_proc_unlock(target_thread->proc); 2908 } else { 2909 if (tr->target.handle) { 2910 struct binder_ref *ref; 2911 2912 /* 2913 * There must already be a strong ref 2914 * on this node. If so, do a strong 2915 * increment on the node to ensure it 2916 * stays alive until the transaction is 2917 * done. 2918 */ 2919 binder_proc_lock(proc); 2920 ref = binder_get_ref_olocked(proc, tr->target.handle, 2921 true); 2922 if (ref) { 2923 target_node = binder_get_node_refs_for_txn( 2924 ref->node, &target_proc, 2925 &return_error); 2926 } else { 2927 binder_user_error("%d:%d got transaction to invalid handle\n", 2928 proc->pid, thread->pid); 2929 return_error = BR_FAILED_REPLY; 2930 } 2931 binder_proc_unlock(proc); 2932 } else { 2933 mutex_lock(&context->context_mgr_node_lock); 2934 target_node = context->binder_context_mgr_node; 2935 if (target_node) 2936 target_node = binder_get_node_refs_for_txn( 2937 target_node, &target_proc, 2938 &return_error); 2939 else 2940 return_error = BR_DEAD_REPLY; 2941 mutex_unlock(&context->context_mgr_node_lock); 2942 if (target_node && target_proc->pid == proc->pid) { 2943 binder_user_error("%d:%d got transaction to context manager from process owning it\n", 2944 proc->pid, thread->pid); 2945 return_error = BR_FAILED_REPLY; 2946 return_error_param = -EINVAL; 2947 return_error_line = __LINE__; 2948 goto err_invalid_target_handle; 2949 } 2950 } 2951 if (!target_node) { 2952 /* 2953 * return_error is set above 2954 */ 2955 return_error_param = -EINVAL; 2956 return_error_line = __LINE__; 2957 goto err_dead_binder; 2958 } 2959 e->to_node = target_node->debug_id; 2960 if (WARN_ON(proc == target_proc)) { 2961 return_error = BR_FAILED_REPLY; 2962 return_error_param = -EINVAL; 2963 return_error_line = __LINE__; 2964 goto err_invalid_target_handle; 2965 } 2966 if (security_binder_transaction(proc->tsk, 2967 target_proc->tsk) < 0) { 2968 return_error = BR_FAILED_REPLY; 2969 return_error_param = -EPERM; 2970 return_error_line = __LINE__; 2971 goto err_invalid_target_handle; 2972 } 2973 binder_inner_proc_lock(proc); 2974 2975 w = list_first_entry_or_null(&thread->todo, 2976 struct binder_work, entry); 2977 if (!(tr->flags & TF_ONE_WAY) && w && 2978 w->type == BINDER_WORK_TRANSACTION) { 2979 /* 2980 * Do not allow new outgoing transaction from a 2981 * thread that has a transaction at the head of 2982 * its todo list. Only need to check the head 2983 * because binder_select_thread_ilocked picks a 2984 * thread from proc->waiting_threads to enqueue 2985 * the transaction, and nothing is queued to the 2986 * todo list while the thread is on waiting_threads. 2987 */ 2988 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n", 2989 proc->pid, thread->pid); 2990 binder_inner_proc_unlock(proc); 2991 return_error = BR_FAILED_REPLY; 2992 return_error_param = -EPROTO; 2993 return_error_line = __LINE__; 2994 goto err_bad_todo_list; 2995 } 2996 2997 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { 2998 struct binder_transaction *tmp; 2999 3000 tmp = thread->transaction_stack; 3001 if (tmp->to_thread != thread) { 3002 spin_lock(&tmp->lock); 3003 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n", 3004 proc->pid, thread->pid, tmp->debug_id, 3005 tmp->to_proc ? tmp->to_proc->pid : 0, 3006 tmp->to_thread ? 3007 tmp->to_thread->pid : 0); 3008 spin_unlock(&tmp->lock); 3009 binder_inner_proc_unlock(proc); 3010 return_error = BR_FAILED_REPLY; 3011 return_error_param = -EPROTO; 3012 return_error_line = __LINE__; 3013 goto err_bad_call_stack; 3014 } 3015 while (tmp) { 3016 struct binder_thread *from; 3017 3018 spin_lock(&tmp->lock); 3019 from = tmp->from; 3020 if (from && from->proc == target_proc) { 3021 atomic_inc(&from->tmp_ref); 3022 target_thread = from; 3023 spin_unlock(&tmp->lock); 3024 break; 3025 } 3026 spin_unlock(&tmp->lock); 3027 tmp = tmp->from_parent; 3028 } 3029 } 3030 binder_inner_proc_unlock(proc); 3031 } 3032 if (target_thread) 3033 e->to_thread = target_thread->pid; 3034 e->to_proc = target_proc->pid; 3035 3036 /* TODO: reuse incoming transaction for reply */ 3037 t = kzalloc(sizeof(*t), GFP_KERNEL); 3038 if (t == NULL) { 3039 return_error = BR_FAILED_REPLY; 3040 return_error_param = -ENOMEM; 3041 return_error_line = __LINE__; 3042 goto err_alloc_t_failed; 3043 } 3044 INIT_LIST_HEAD(&t->fd_fixups); 3045 binder_stats_created(BINDER_STAT_TRANSACTION); 3046 spin_lock_init(&t->lock); 3047 3048 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); 3049 if (tcomplete == NULL) { 3050 return_error = BR_FAILED_REPLY; 3051 return_error_param = -ENOMEM; 3052 return_error_line = __LINE__; 3053 goto err_alloc_tcomplete_failed; 3054 } 3055 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE); 3056 3057 t->debug_id = t_debug_id; 3058 3059 if (reply) 3060 binder_debug(BINDER_DEBUG_TRANSACTION, 3061 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n", 3062 proc->pid, thread->pid, t->debug_id, 3063 target_proc->pid, target_thread->pid, 3064 (u64)tr->data.ptr.buffer, 3065 (u64)tr->data.ptr.offsets, 3066 (u64)tr->data_size, (u64)tr->offsets_size, 3067 (u64)extra_buffers_size); 3068 else 3069 binder_debug(BINDER_DEBUG_TRANSACTION, 3070 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n", 3071 proc->pid, thread->pid, t->debug_id, 3072 target_proc->pid, target_node->debug_id, 3073 (u64)tr->data.ptr.buffer, 3074 (u64)tr->data.ptr.offsets, 3075 (u64)tr->data_size, (u64)tr->offsets_size, 3076 (u64)extra_buffers_size); 3077 3078 if (!reply && !(tr->flags & TF_ONE_WAY)) 3079 t->from = thread; 3080 else 3081 t->from = NULL; 3082 t->sender_euid = task_euid(proc->tsk); 3083 t->to_proc = target_proc; 3084 t->to_thread = target_thread; 3085 t->code = tr->code; 3086 t->flags = tr->flags; 3087 t->priority = task_nice(current); 3088 3089 if (target_node && target_node->txn_security_ctx) { 3090 u32 secid; 3091 size_t added_size; 3092 3093 security_task_getsecid(proc->tsk, &secid); 3094 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz); 3095 if (ret) { 3096 return_error = BR_FAILED_REPLY; 3097 return_error_param = ret; 3098 return_error_line = __LINE__; 3099 goto err_get_secctx_failed; 3100 } 3101 added_size = ALIGN(secctx_sz, sizeof(u64)); 3102 extra_buffers_size += added_size; 3103 if (extra_buffers_size < added_size) { 3104 /* integer overflow of extra_buffers_size */ 3105 return_error = BR_FAILED_REPLY; 3106 return_error_param = EINVAL; 3107 return_error_line = __LINE__; 3108 goto err_bad_extra_size; 3109 } 3110 } 3111 3112 trace_binder_transaction(reply, t, target_node); 3113 3114 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size, 3115 tr->offsets_size, extra_buffers_size, 3116 !reply && (t->flags & TF_ONE_WAY), current->tgid); 3117 if (IS_ERR(t->buffer)) { 3118 /* 3119 * -ESRCH indicates VMA cleared. The target is dying. 3120 */ 3121 return_error_param = PTR_ERR(t->buffer); 3122 return_error = return_error_param == -ESRCH ? 3123 BR_DEAD_REPLY : BR_FAILED_REPLY; 3124 return_error_line = __LINE__; 3125 t->buffer = NULL; 3126 goto err_binder_alloc_buf_failed; 3127 } 3128 if (secctx) { 3129 int err; 3130 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) + 3131 ALIGN(tr->offsets_size, sizeof(void *)) + 3132 ALIGN(extra_buffers_size, sizeof(void *)) - 3133 ALIGN(secctx_sz, sizeof(u64)); 3134 3135 t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset; 3136 err = binder_alloc_copy_to_buffer(&target_proc->alloc, 3137 t->buffer, buf_offset, 3138 secctx, secctx_sz); 3139 if (err) { 3140 t->security_ctx = 0; 3141 WARN_ON(1); 3142 } 3143 security_release_secctx(secctx, secctx_sz); 3144 secctx = NULL; 3145 } 3146 t->buffer->debug_id = t->debug_id; 3147 t->buffer->transaction = t; 3148 t->buffer->target_node = target_node; 3149 trace_binder_transaction_alloc_buf(t->buffer); 3150 3151 if (binder_alloc_copy_user_to_buffer( 3152 &target_proc->alloc, 3153 t->buffer, 0, 3154 (const void __user *) 3155 (uintptr_t)tr->data.ptr.buffer, 3156 tr->data_size)) { 3157 binder_user_error("%d:%d got transaction with invalid data ptr\n", 3158 proc->pid, thread->pid); 3159 return_error = BR_FAILED_REPLY; 3160 return_error_param = -EFAULT; 3161 return_error_line = __LINE__; 3162 goto err_copy_data_failed; 3163 } 3164 if (binder_alloc_copy_user_to_buffer( 3165 &target_proc->alloc, 3166 t->buffer, 3167 ALIGN(tr->data_size, sizeof(void *)), 3168 (const void __user *) 3169 (uintptr_t)tr->data.ptr.offsets, 3170 tr->offsets_size)) { 3171 binder_user_error("%d:%d got transaction with invalid offsets ptr\n", 3172 proc->pid, thread->pid); 3173 return_error = BR_FAILED_REPLY; 3174 return_error_param = -EFAULT; 3175 return_error_line = __LINE__; 3176 goto err_copy_data_failed; 3177 } 3178 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) { 3179 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n", 3180 proc->pid, thread->pid, (u64)tr->offsets_size); 3181 return_error = BR_FAILED_REPLY; 3182 return_error_param = -EINVAL; 3183 return_error_line = __LINE__; 3184 goto err_bad_offset; 3185 } 3186 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) { 3187 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n", 3188 proc->pid, thread->pid, 3189 (u64)extra_buffers_size); 3190 return_error = BR_FAILED_REPLY; 3191 return_error_param = -EINVAL; 3192 return_error_line = __LINE__; 3193 goto err_bad_offset; 3194 } 3195 off_start_offset = ALIGN(tr->data_size, sizeof(void *)); 3196 buffer_offset = off_start_offset; 3197 off_end_offset = off_start_offset + tr->offsets_size; 3198 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *)); 3199 sg_buf_end_offset = sg_buf_offset + extra_buffers_size - 3200 ALIGN(secctx_sz, sizeof(u64)); 3201 off_min = 0; 3202 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset; 3203 buffer_offset += sizeof(binder_size_t)) { 3204 struct binder_object_header *hdr; 3205 size_t object_size; 3206 struct binder_object object; 3207 binder_size_t object_offset; 3208 3209 if (binder_alloc_copy_from_buffer(&target_proc->alloc, 3210 &object_offset, 3211 t->buffer, 3212 buffer_offset, 3213 sizeof(object_offset))) { 3214 return_error = BR_FAILED_REPLY; 3215 return_error_param = -EINVAL; 3216 return_error_line = __LINE__; 3217 goto err_bad_offset; 3218 } 3219 object_size = binder_get_object(target_proc, t->buffer, 3220 object_offset, &object); 3221 if (object_size == 0 || object_offset < off_min) { 3222 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n", 3223 proc->pid, thread->pid, 3224 (u64)object_offset, 3225 (u64)off_min, 3226 (u64)t->buffer->data_size); 3227 return_error = BR_FAILED_REPLY; 3228 return_error_param = -EINVAL; 3229 return_error_line = __LINE__; 3230 goto err_bad_offset; 3231 } 3232 3233 hdr = &object.hdr; 3234 off_min = object_offset + object_size; 3235 switch (hdr->type) { 3236 case BINDER_TYPE_BINDER: 3237 case BINDER_TYPE_WEAK_BINDER: { 3238 struct flat_binder_object *fp; 3239 3240 fp = to_flat_binder_object(hdr); 3241 ret = binder_translate_binder(fp, t, thread); 3242 3243 if (ret < 0 || 3244 binder_alloc_copy_to_buffer(&target_proc->alloc, 3245 t->buffer, 3246 object_offset, 3247 fp, sizeof(*fp))) { 3248 return_error = BR_FAILED_REPLY; 3249 return_error_param = ret; 3250 return_error_line = __LINE__; 3251 goto err_translate_failed; 3252 } 3253 } break; 3254 case BINDER_TYPE_HANDLE: 3255 case BINDER_TYPE_WEAK_HANDLE: { 3256 struct flat_binder_object *fp; 3257 3258 fp = to_flat_binder_object(hdr); 3259 ret = binder_translate_handle(fp, t, thread); 3260 if (ret < 0 || 3261 binder_alloc_copy_to_buffer(&target_proc->alloc, 3262 t->buffer, 3263 object_offset, 3264 fp, sizeof(*fp))) { 3265 return_error = BR_FAILED_REPLY; 3266 return_error_param = ret; 3267 return_error_line = __LINE__; 3268 goto err_translate_failed; 3269 } 3270 } break; 3271 3272 case BINDER_TYPE_FD: { 3273 struct binder_fd_object *fp = to_binder_fd_object(hdr); 3274 binder_size_t fd_offset = object_offset + 3275 (uintptr_t)&fp->fd - (uintptr_t)fp; 3276 int ret = binder_translate_fd(fp->fd, fd_offset, t, 3277 thread, in_reply_to); 3278 3279 fp->pad_binder = 0; 3280 if (ret < 0 || 3281 binder_alloc_copy_to_buffer(&target_proc->alloc, 3282 t->buffer, 3283 object_offset, 3284 fp, sizeof(*fp))) { 3285 return_error = BR_FAILED_REPLY; 3286 return_error_param = ret; 3287 return_error_line = __LINE__; 3288 goto err_translate_failed; 3289 } 3290 } break; 3291 case BINDER_TYPE_FDA: { 3292 struct binder_object ptr_object; 3293 binder_size_t parent_offset; 3294 struct binder_fd_array_object *fda = 3295 to_binder_fd_array_object(hdr); 3296 size_t num_valid = (buffer_offset - off_start_offset) / 3297 sizeof(binder_size_t); 3298 struct binder_buffer_object *parent = 3299 binder_validate_ptr(target_proc, t->buffer, 3300 &ptr_object, fda->parent, 3301 off_start_offset, 3302 &parent_offset, 3303 num_valid); 3304 if (!parent) { 3305 binder_user_error("%d:%d got transaction with invalid parent offset or type\n", 3306 proc->pid, thread->pid); 3307 return_error = BR_FAILED_REPLY; 3308 return_error_param = -EINVAL; 3309 return_error_line = __LINE__; 3310 goto err_bad_parent; 3311 } 3312 if (!binder_validate_fixup(target_proc, t->buffer, 3313 off_start_offset, 3314 parent_offset, 3315 fda->parent_offset, 3316 last_fixup_obj_off, 3317 last_fixup_min_off)) { 3318 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", 3319 proc->pid, thread->pid); 3320 return_error = BR_FAILED_REPLY; 3321 return_error_param = -EINVAL; 3322 return_error_line = __LINE__; 3323 goto err_bad_parent; 3324 } 3325 ret = binder_translate_fd_array(fda, parent, t, thread, 3326 in_reply_to); 3327 if (ret < 0) { 3328 return_error = BR_FAILED_REPLY; 3329 return_error_param = ret; 3330 return_error_line = __LINE__; 3331 goto err_translate_failed; 3332 } 3333 last_fixup_obj_off = parent_offset; 3334 last_fixup_min_off = 3335 fda->parent_offset + sizeof(u32) * fda->num_fds; 3336 } break; 3337 case BINDER_TYPE_PTR: { 3338 struct binder_buffer_object *bp = 3339 to_binder_buffer_object(hdr); 3340 size_t buf_left = sg_buf_end_offset - sg_buf_offset; 3341 size_t num_valid; 3342 3343 if (bp->length > buf_left) { 3344 binder_user_error("%d:%d got transaction with too large buffer\n", 3345 proc->pid, thread->pid); 3346 return_error = BR_FAILED_REPLY; 3347 return_error_param = -EINVAL; 3348 return_error_line = __LINE__; 3349 goto err_bad_offset; 3350 } 3351 if (binder_alloc_copy_user_to_buffer( 3352 &target_proc->alloc, 3353 t->buffer, 3354 sg_buf_offset, 3355 (const void __user *) 3356 (uintptr_t)bp->buffer, 3357 bp->length)) { 3358 binder_user_error("%d:%d got transaction with invalid offsets ptr\n", 3359 proc->pid, thread->pid); 3360 return_error_param = -EFAULT; 3361 return_error = BR_FAILED_REPLY; 3362 return_error_line = __LINE__; 3363 goto err_copy_data_failed; 3364 } 3365 /* Fixup buffer pointer to target proc address space */ 3366 bp->buffer = (uintptr_t) 3367 t->buffer->user_data + sg_buf_offset; 3368 sg_buf_offset += ALIGN(bp->length, sizeof(u64)); 3369 3370 num_valid = (buffer_offset - off_start_offset) / 3371 sizeof(binder_size_t); 3372 ret = binder_fixup_parent(t, thread, bp, 3373 off_start_offset, 3374 num_valid, 3375 last_fixup_obj_off, 3376 last_fixup_min_off); 3377 if (ret < 0 || 3378 binder_alloc_copy_to_buffer(&target_proc->alloc, 3379 t->buffer, 3380 object_offset, 3381 bp, sizeof(*bp))) { 3382 return_error = BR_FAILED_REPLY; 3383 return_error_param = ret; 3384 return_error_line = __LINE__; 3385 goto err_translate_failed; 3386 } 3387 last_fixup_obj_off = object_offset; 3388 last_fixup_min_off = 0; 3389 } break; 3390 default: 3391 binder_user_error("%d:%d got transaction with invalid object type, %x\n", 3392 proc->pid, thread->pid, hdr->type); 3393 return_error = BR_FAILED_REPLY; 3394 return_error_param = -EINVAL; 3395 return_error_line = __LINE__; 3396 goto err_bad_object_type; 3397 } 3398 } 3399 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; 3400 t->work.type = BINDER_WORK_TRANSACTION; 3401 3402 if (reply) { 3403 binder_enqueue_thread_work(thread, tcomplete); 3404 binder_inner_proc_lock(target_proc); 3405 if (target_thread->is_dead) { 3406 binder_inner_proc_unlock(target_proc); 3407 goto err_dead_proc_or_thread; 3408 } 3409 BUG_ON(t->buffer->async_transaction != 0); 3410 binder_pop_transaction_ilocked(target_thread, in_reply_to); 3411 binder_enqueue_thread_work_ilocked(target_thread, &t->work); 3412 binder_inner_proc_unlock(target_proc); 3413 wake_up_interruptible_sync(&target_thread->wait); 3414 binder_free_transaction(in_reply_to); 3415 } else if (!(t->flags & TF_ONE_WAY)) { 3416 BUG_ON(t->buffer->async_transaction != 0); 3417 binder_inner_proc_lock(proc); 3418 /* 3419 * Defer the TRANSACTION_COMPLETE, so we don't return to 3420 * userspace immediately; this allows the target process to 3421 * immediately start processing this transaction, reducing 3422 * latency. We will then return the TRANSACTION_COMPLETE when 3423 * the target replies (or there is an error). 3424 */ 3425 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete); 3426 t->need_reply = 1; 3427 t->from_parent = thread->transaction_stack; 3428 thread->transaction_stack = t; 3429 binder_inner_proc_unlock(proc); 3430 if (!binder_proc_transaction(t, target_proc, target_thread)) { 3431 binder_inner_proc_lock(proc); 3432 binder_pop_transaction_ilocked(thread, t); 3433 binder_inner_proc_unlock(proc); 3434 goto err_dead_proc_or_thread; 3435 } 3436 } else { 3437 BUG_ON(target_node == NULL); 3438 BUG_ON(t->buffer->async_transaction != 1); 3439 binder_enqueue_thread_work(thread, tcomplete); 3440 if (!binder_proc_transaction(t, target_proc, NULL)) 3441 goto err_dead_proc_or_thread; 3442 } 3443 if (target_thread) 3444 binder_thread_dec_tmpref(target_thread); 3445 binder_proc_dec_tmpref(target_proc); 3446 if (target_node) 3447 binder_dec_node_tmpref(target_node); 3448 /* 3449 * write barrier to synchronize with initialization 3450 * of log entry 3451 */ 3452 smp_wmb(); 3453 WRITE_ONCE(e->debug_id_done, t_debug_id); 3454 return; 3455 3456 err_dead_proc_or_thread: 3457 return_error = BR_DEAD_REPLY; 3458 return_error_line = __LINE__; 3459 binder_dequeue_work(proc, tcomplete); 3460 err_translate_failed: 3461 err_bad_object_type: 3462 err_bad_offset: 3463 err_bad_parent: 3464 err_copy_data_failed: 3465 binder_free_txn_fixups(t); 3466 trace_binder_transaction_failed_buffer_release(t->buffer); 3467 binder_transaction_buffer_release(target_proc, t->buffer, 3468 buffer_offset, true); 3469 if (target_node) 3470 binder_dec_node_tmpref(target_node); 3471 target_node = NULL; 3472 t->buffer->transaction = NULL; 3473 binder_alloc_free_buf(&target_proc->alloc, t->buffer); 3474 err_binder_alloc_buf_failed: 3475 err_bad_extra_size: 3476 if (secctx) 3477 security_release_secctx(secctx, secctx_sz); 3478 err_get_secctx_failed: 3479 kfree(tcomplete); 3480 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 3481 err_alloc_tcomplete_failed: 3482 kfree(t); 3483 binder_stats_deleted(BINDER_STAT_TRANSACTION); 3484 err_alloc_t_failed: 3485 err_bad_todo_list: 3486 err_bad_call_stack: 3487 err_empty_call_stack: 3488 err_dead_binder: 3489 err_invalid_target_handle: 3490 if (target_thread) 3491 binder_thread_dec_tmpref(target_thread); 3492 if (target_proc) 3493 binder_proc_dec_tmpref(target_proc); 3494 if (target_node) { 3495 binder_dec_node(target_node, 1, 0); 3496 binder_dec_node_tmpref(target_node); 3497 } 3498 3499 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 3500 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n", 3501 proc->pid, thread->pid, return_error, return_error_param, 3502 (u64)tr->data_size, (u64)tr->offsets_size, 3503 return_error_line); 3504 3505 { 3506 struct binder_transaction_log_entry *fe; 3507 3508 e->return_error = return_error; 3509 e->return_error_param = return_error_param; 3510 e->return_error_line = return_error_line; 3511 fe = binder_transaction_log_add(&binder_transaction_log_failed); 3512 *fe = *e; 3513 /* 3514 * write barrier to synchronize with initialization 3515 * of log entry 3516 */ 3517 smp_wmb(); 3518 WRITE_ONCE(e->debug_id_done, t_debug_id); 3519 WRITE_ONCE(fe->debug_id_done, t_debug_id); 3520 } 3521 3522 BUG_ON(thread->return_error.cmd != BR_OK); 3523 if (in_reply_to) { 3524 thread->return_error.cmd = BR_TRANSACTION_COMPLETE; 3525 binder_enqueue_thread_work(thread, &thread->return_error.work); 3526 binder_send_failed_reply(in_reply_to, return_error); 3527 } else { 3528 thread->return_error.cmd = return_error; 3529 binder_enqueue_thread_work(thread, &thread->return_error.work); 3530 } 3531 } 3532 3533 /** 3534 * binder_free_buf() - free the specified buffer 3535 * @proc: binder proc that owns buffer 3536 * @buffer: buffer to be freed 3537 * 3538 * If buffer for an async transaction, enqueue the next async 3539 * transaction from the node. 3540 * 3541 * Cleanup buffer and free it. 3542 */ 3543 static void 3544 binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer) 3545 { 3546 binder_inner_proc_lock(proc); 3547 if (buffer->transaction) { 3548 buffer->transaction->buffer = NULL; 3549 buffer->transaction = NULL; 3550 } 3551 binder_inner_proc_unlock(proc); 3552 if (buffer->async_transaction && buffer->target_node) { 3553 struct binder_node *buf_node; 3554 struct binder_work *w; 3555 3556 buf_node = buffer->target_node; 3557 binder_node_inner_lock(buf_node); 3558 BUG_ON(!buf_node->has_async_transaction); 3559 BUG_ON(buf_node->proc != proc); 3560 w = binder_dequeue_work_head_ilocked( 3561 &buf_node->async_todo); 3562 if (!w) { 3563 buf_node->has_async_transaction = false; 3564 } else { 3565 binder_enqueue_work_ilocked( 3566 w, &proc->todo); 3567 binder_wakeup_proc_ilocked(proc); 3568 } 3569 binder_node_inner_unlock(buf_node); 3570 } 3571 trace_binder_transaction_buffer_release(buffer); 3572 binder_transaction_buffer_release(proc, buffer, 0, false); 3573 binder_alloc_free_buf(&proc->alloc, buffer); 3574 } 3575 3576 static int binder_thread_write(struct binder_proc *proc, 3577 struct binder_thread *thread, 3578 binder_uintptr_t binder_buffer, size_t size, 3579 binder_size_t *consumed) 3580 { 3581 uint32_t cmd; 3582 struct binder_context *context = proc->context; 3583 void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 3584 void __user *ptr = buffer + *consumed; 3585 void __user *end = buffer + size; 3586 3587 while (ptr < end && thread->return_error.cmd == BR_OK) { 3588 int ret; 3589 3590 if (get_user(cmd, (uint32_t __user *)ptr)) 3591 return -EFAULT; 3592 ptr += sizeof(uint32_t); 3593 trace_binder_command(cmd); 3594 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { 3595 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]); 3596 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]); 3597 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]); 3598 } 3599 switch (cmd) { 3600 case BC_INCREFS: 3601 case BC_ACQUIRE: 3602 case BC_RELEASE: 3603 case BC_DECREFS: { 3604 uint32_t target; 3605 const char *debug_string; 3606 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE; 3607 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE; 3608 struct binder_ref_data rdata; 3609 3610 if (get_user(target, (uint32_t __user *)ptr)) 3611 return -EFAULT; 3612 3613 ptr += sizeof(uint32_t); 3614 ret = -1; 3615 if (increment && !target) { 3616 struct binder_node *ctx_mgr_node; 3617 mutex_lock(&context->context_mgr_node_lock); 3618 ctx_mgr_node = context->binder_context_mgr_node; 3619 if (ctx_mgr_node) { 3620 if (ctx_mgr_node->proc == proc) { 3621 binder_user_error("%d:%d context manager tried to acquire desc 0\n", 3622 proc->pid, thread->pid); 3623 mutex_unlock(&context->context_mgr_node_lock); 3624 return -EINVAL; 3625 } 3626 ret = binder_inc_ref_for_node( 3627 proc, ctx_mgr_node, 3628 strong, NULL, &rdata); 3629 } 3630 mutex_unlock(&context->context_mgr_node_lock); 3631 } 3632 if (ret) 3633 ret = binder_update_ref_for_handle( 3634 proc, target, increment, strong, 3635 &rdata); 3636 if (!ret && rdata.desc != target) { 3637 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n", 3638 proc->pid, thread->pid, 3639 target, rdata.desc); 3640 } 3641 switch (cmd) { 3642 case BC_INCREFS: 3643 debug_string = "IncRefs"; 3644 break; 3645 case BC_ACQUIRE: 3646 debug_string = "Acquire"; 3647 break; 3648 case BC_RELEASE: 3649 debug_string = "Release"; 3650 break; 3651 case BC_DECREFS: 3652 default: 3653 debug_string = "DecRefs"; 3654 break; 3655 } 3656 if (ret) { 3657 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n", 3658 proc->pid, thread->pid, debug_string, 3659 strong, target, ret); 3660 break; 3661 } 3662 binder_debug(BINDER_DEBUG_USER_REFS, 3663 "%d:%d %s ref %d desc %d s %d w %d\n", 3664 proc->pid, thread->pid, debug_string, 3665 rdata.debug_id, rdata.desc, rdata.strong, 3666 rdata.weak); 3667 break; 3668 } 3669 case BC_INCREFS_DONE: 3670 case BC_ACQUIRE_DONE: { 3671 binder_uintptr_t node_ptr; 3672 binder_uintptr_t cookie; 3673 struct binder_node *node; 3674 bool free_node; 3675 3676 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr)) 3677 return -EFAULT; 3678 ptr += sizeof(binder_uintptr_t); 3679 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 3680 return -EFAULT; 3681 ptr += sizeof(binder_uintptr_t); 3682 node = binder_get_node(proc, node_ptr); 3683 if (node == NULL) { 3684 binder_user_error("%d:%d %s u%016llx no match\n", 3685 proc->pid, thread->pid, 3686 cmd == BC_INCREFS_DONE ? 3687 "BC_INCREFS_DONE" : 3688 "BC_ACQUIRE_DONE", 3689 (u64)node_ptr); 3690 break; 3691 } 3692 if (cookie != node->cookie) { 3693 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n", 3694 proc->pid, thread->pid, 3695 cmd == BC_INCREFS_DONE ? 3696 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 3697 (u64)node_ptr, node->debug_id, 3698 (u64)cookie, (u64)node->cookie); 3699 binder_put_node(node); 3700 break; 3701 } 3702 binder_node_inner_lock(node); 3703 if (cmd == BC_ACQUIRE_DONE) { 3704 if (node->pending_strong_ref == 0) { 3705 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n", 3706 proc->pid, thread->pid, 3707 node->debug_id); 3708 binder_node_inner_unlock(node); 3709 binder_put_node(node); 3710 break; 3711 } 3712 node->pending_strong_ref = 0; 3713 } else { 3714 if (node->pending_weak_ref == 0) { 3715 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n", 3716 proc->pid, thread->pid, 3717 node->debug_id); 3718 binder_node_inner_unlock(node); 3719 binder_put_node(node); 3720 break; 3721 } 3722 node->pending_weak_ref = 0; 3723 } 3724 free_node = binder_dec_node_nilocked(node, 3725 cmd == BC_ACQUIRE_DONE, 0); 3726 WARN_ON(free_node); 3727 binder_debug(BINDER_DEBUG_USER_REFS, 3728 "%d:%d %s node %d ls %d lw %d tr %d\n", 3729 proc->pid, thread->pid, 3730 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 3731 node->debug_id, node->local_strong_refs, 3732 node->local_weak_refs, node->tmp_refs); 3733 binder_node_inner_unlock(node); 3734 binder_put_node(node); 3735 break; 3736 } 3737 case BC_ATTEMPT_ACQUIRE: 3738 pr_err("BC_ATTEMPT_ACQUIRE not supported\n"); 3739 return -EINVAL; 3740 case BC_ACQUIRE_RESULT: 3741 pr_err("BC_ACQUIRE_RESULT not supported\n"); 3742 return -EINVAL; 3743 3744 case BC_FREE_BUFFER: { 3745 binder_uintptr_t data_ptr; 3746 struct binder_buffer *buffer; 3747 3748 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr)) 3749 return -EFAULT; 3750 ptr += sizeof(binder_uintptr_t); 3751 3752 buffer = binder_alloc_prepare_to_free(&proc->alloc, 3753 data_ptr); 3754 if (IS_ERR_OR_NULL(buffer)) { 3755 if (PTR_ERR(buffer) == -EPERM) { 3756 binder_user_error( 3757 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n", 3758 proc->pid, thread->pid, 3759 (u64)data_ptr); 3760 } else { 3761 binder_user_error( 3762 "%d:%d BC_FREE_BUFFER u%016llx no match\n", 3763 proc->pid, thread->pid, 3764 (u64)data_ptr); 3765 } 3766 break; 3767 } 3768 binder_debug(BINDER_DEBUG_FREE_BUFFER, 3769 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n", 3770 proc->pid, thread->pid, (u64)data_ptr, 3771 buffer->debug_id, 3772 buffer->transaction ? "active" : "finished"); 3773 binder_free_buf(proc, buffer); 3774 break; 3775 } 3776 3777 case BC_TRANSACTION_SG: 3778 case BC_REPLY_SG: { 3779 struct binder_transaction_data_sg tr; 3780 3781 if (copy_from_user(&tr, ptr, sizeof(tr))) 3782 return -EFAULT; 3783 ptr += sizeof(tr); 3784 binder_transaction(proc, thread, &tr.transaction_data, 3785 cmd == BC_REPLY_SG, tr.buffers_size); 3786 break; 3787 } 3788 case BC_TRANSACTION: 3789 case BC_REPLY: { 3790 struct binder_transaction_data tr; 3791 3792 if (copy_from_user(&tr, ptr, sizeof(tr))) 3793 return -EFAULT; 3794 ptr += sizeof(tr); 3795 binder_transaction(proc, thread, &tr, 3796 cmd == BC_REPLY, 0); 3797 break; 3798 } 3799 3800 case BC_REGISTER_LOOPER: 3801 binder_debug(BINDER_DEBUG_THREADS, 3802 "%d:%d BC_REGISTER_LOOPER\n", 3803 proc->pid, thread->pid); 3804 binder_inner_proc_lock(proc); 3805 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { 3806 thread->looper |= BINDER_LOOPER_STATE_INVALID; 3807 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n", 3808 proc->pid, thread->pid); 3809 } else if (proc->requested_threads == 0) { 3810 thread->looper |= BINDER_LOOPER_STATE_INVALID; 3811 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n", 3812 proc->pid, thread->pid); 3813 } else { 3814 proc->requested_threads--; 3815 proc->requested_threads_started++; 3816 } 3817 thread->looper |= BINDER_LOOPER_STATE_REGISTERED; 3818 binder_inner_proc_unlock(proc); 3819 break; 3820 case BC_ENTER_LOOPER: 3821 binder_debug(BINDER_DEBUG_THREADS, 3822 "%d:%d BC_ENTER_LOOPER\n", 3823 proc->pid, thread->pid); 3824 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { 3825 thread->looper |= BINDER_LOOPER_STATE_INVALID; 3826 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n", 3827 proc->pid, thread->pid); 3828 } 3829 thread->looper |= BINDER_LOOPER_STATE_ENTERED; 3830 break; 3831 case BC_EXIT_LOOPER: 3832 binder_debug(BINDER_DEBUG_THREADS, 3833 "%d:%d BC_EXIT_LOOPER\n", 3834 proc->pid, thread->pid); 3835 thread->looper |= BINDER_LOOPER_STATE_EXITED; 3836 break; 3837 3838 case BC_REQUEST_DEATH_NOTIFICATION: 3839 case BC_CLEAR_DEATH_NOTIFICATION: { 3840 uint32_t target; 3841 binder_uintptr_t cookie; 3842 struct binder_ref *ref; 3843 struct binder_ref_death *death = NULL; 3844 3845 if (get_user(target, (uint32_t __user *)ptr)) 3846 return -EFAULT; 3847 ptr += sizeof(uint32_t); 3848 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 3849 return -EFAULT; 3850 ptr += sizeof(binder_uintptr_t); 3851 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { 3852 /* 3853 * Allocate memory for death notification 3854 * before taking lock 3855 */ 3856 death = kzalloc(sizeof(*death), GFP_KERNEL); 3857 if (death == NULL) { 3858 WARN_ON(thread->return_error.cmd != 3859 BR_OK); 3860 thread->return_error.cmd = BR_ERROR; 3861 binder_enqueue_thread_work( 3862 thread, 3863 &thread->return_error.work); 3864 binder_debug( 3865 BINDER_DEBUG_FAILED_TRANSACTION, 3866 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n", 3867 proc->pid, thread->pid); 3868 break; 3869 } 3870 } 3871 binder_proc_lock(proc); 3872 ref = binder_get_ref_olocked(proc, target, false); 3873 if (ref == NULL) { 3874 binder_user_error("%d:%d %s invalid ref %d\n", 3875 proc->pid, thread->pid, 3876 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 3877 "BC_REQUEST_DEATH_NOTIFICATION" : 3878 "BC_CLEAR_DEATH_NOTIFICATION", 3879 target); 3880 binder_proc_unlock(proc); 3881 kfree(death); 3882 break; 3883 } 3884 3885 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 3886 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n", 3887 proc->pid, thread->pid, 3888 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 3889 "BC_REQUEST_DEATH_NOTIFICATION" : 3890 "BC_CLEAR_DEATH_NOTIFICATION", 3891 (u64)cookie, ref->data.debug_id, 3892 ref->data.desc, ref->data.strong, 3893 ref->data.weak, ref->node->debug_id); 3894 3895 binder_node_lock(ref->node); 3896 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { 3897 if (ref->death) { 3898 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n", 3899 proc->pid, thread->pid); 3900 binder_node_unlock(ref->node); 3901 binder_proc_unlock(proc); 3902 kfree(death); 3903 break; 3904 } 3905 binder_stats_created(BINDER_STAT_DEATH); 3906 INIT_LIST_HEAD(&death->work.entry); 3907 death->cookie = cookie; 3908 ref->death = death; 3909 if (ref->node->proc == NULL) { 3910 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 3911 3912 binder_inner_proc_lock(proc); 3913 binder_enqueue_work_ilocked( 3914 &ref->death->work, &proc->todo); 3915 binder_wakeup_proc_ilocked(proc); 3916 binder_inner_proc_unlock(proc); 3917 } 3918 } else { 3919 if (ref->death == NULL) { 3920 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n", 3921 proc->pid, thread->pid); 3922 binder_node_unlock(ref->node); 3923 binder_proc_unlock(proc); 3924 break; 3925 } 3926 death = ref->death; 3927 if (death->cookie != cookie) { 3928 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n", 3929 proc->pid, thread->pid, 3930 (u64)death->cookie, 3931 (u64)cookie); 3932 binder_node_unlock(ref->node); 3933 binder_proc_unlock(proc); 3934 break; 3935 } 3936 ref->death = NULL; 3937 binder_inner_proc_lock(proc); 3938 if (list_empty(&death->work.entry)) { 3939 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 3940 if (thread->looper & 3941 (BINDER_LOOPER_STATE_REGISTERED | 3942 BINDER_LOOPER_STATE_ENTERED)) 3943 binder_enqueue_thread_work_ilocked( 3944 thread, 3945 &death->work); 3946 else { 3947 binder_enqueue_work_ilocked( 3948 &death->work, 3949 &proc->todo); 3950 binder_wakeup_proc_ilocked( 3951 proc); 3952 } 3953 } else { 3954 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); 3955 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; 3956 } 3957 binder_inner_proc_unlock(proc); 3958 } 3959 binder_node_unlock(ref->node); 3960 binder_proc_unlock(proc); 3961 } break; 3962 case BC_DEAD_BINDER_DONE: { 3963 struct binder_work *w; 3964 binder_uintptr_t cookie; 3965 struct binder_ref_death *death = NULL; 3966 3967 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 3968 return -EFAULT; 3969 3970 ptr += sizeof(cookie); 3971 binder_inner_proc_lock(proc); 3972 list_for_each_entry(w, &proc->delivered_death, 3973 entry) { 3974 struct binder_ref_death *tmp_death = 3975 container_of(w, 3976 struct binder_ref_death, 3977 work); 3978 3979 if (tmp_death->cookie == cookie) { 3980 death = tmp_death; 3981 break; 3982 } 3983 } 3984 binder_debug(BINDER_DEBUG_DEAD_BINDER, 3985 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n", 3986 proc->pid, thread->pid, (u64)cookie, 3987 death); 3988 if (death == NULL) { 3989 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n", 3990 proc->pid, thread->pid, (u64)cookie); 3991 binder_inner_proc_unlock(proc); 3992 break; 3993 } 3994 binder_dequeue_work_ilocked(&death->work); 3995 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) { 3996 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 3997 if (thread->looper & 3998 (BINDER_LOOPER_STATE_REGISTERED | 3999 BINDER_LOOPER_STATE_ENTERED)) 4000 binder_enqueue_thread_work_ilocked( 4001 thread, &death->work); 4002 else { 4003 binder_enqueue_work_ilocked( 4004 &death->work, 4005 &proc->todo); 4006 binder_wakeup_proc_ilocked(proc); 4007 } 4008 } 4009 binder_inner_proc_unlock(proc); 4010 } break; 4011 4012 default: 4013 pr_err("%d:%d unknown command %d\n", 4014 proc->pid, thread->pid, cmd); 4015 return -EINVAL; 4016 } 4017 *consumed = ptr - buffer; 4018 } 4019 return 0; 4020 } 4021 4022 static void binder_stat_br(struct binder_proc *proc, 4023 struct binder_thread *thread, uint32_t cmd) 4024 { 4025 trace_binder_return(cmd); 4026 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { 4027 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]); 4028 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]); 4029 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]); 4030 } 4031 } 4032 4033 static int binder_put_node_cmd(struct binder_proc *proc, 4034 struct binder_thread *thread, 4035 void __user **ptrp, 4036 binder_uintptr_t node_ptr, 4037 binder_uintptr_t node_cookie, 4038 int node_debug_id, 4039 uint32_t cmd, const char *cmd_name) 4040 { 4041 void __user *ptr = *ptrp; 4042 4043 if (put_user(cmd, (uint32_t __user *)ptr)) 4044 return -EFAULT; 4045 ptr += sizeof(uint32_t); 4046 4047 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr)) 4048 return -EFAULT; 4049 ptr += sizeof(binder_uintptr_t); 4050 4051 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr)) 4052 return -EFAULT; 4053 ptr += sizeof(binder_uintptr_t); 4054 4055 binder_stat_br(proc, thread, cmd); 4056 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n", 4057 proc->pid, thread->pid, cmd_name, node_debug_id, 4058 (u64)node_ptr, (u64)node_cookie); 4059 4060 *ptrp = ptr; 4061 return 0; 4062 } 4063 4064 static int binder_wait_for_work(struct binder_thread *thread, 4065 bool do_proc_work) 4066 { 4067 DEFINE_WAIT(wait); 4068 struct binder_proc *proc = thread->proc; 4069 int ret = 0; 4070 4071 freezer_do_not_count(); 4072 binder_inner_proc_lock(proc); 4073 for (;;) { 4074 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE); 4075 if (binder_has_work_ilocked(thread, do_proc_work)) 4076 break; 4077 if (do_proc_work) 4078 list_add(&thread->waiting_thread_node, 4079 &proc->waiting_threads); 4080 binder_inner_proc_unlock(proc); 4081 schedule(); 4082 binder_inner_proc_lock(proc); 4083 list_del_init(&thread->waiting_thread_node); 4084 if (signal_pending(current)) { 4085 ret = -ERESTARTSYS; 4086 break; 4087 } 4088 } 4089 finish_wait(&thread->wait, &wait); 4090 binder_inner_proc_unlock(proc); 4091 freezer_count(); 4092 4093 return ret; 4094 } 4095 4096 /** 4097 * binder_apply_fd_fixups() - finish fd translation 4098 * @proc: binder_proc associated @t->buffer 4099 * @t: binder transaction with list of fd fixups 4100 * 4101 * Now that we are in the context of the transaction target 4102 * process, we can allocate and install fds. Process the 4103 * list of fds to translate and fixup the buffer with the 4104 * new fds. 4105 * 4106 * If we fail to allocate an fd, then free the resources by 4107 * fput'ing files that have not been processed and ksys_close'ing 4108 * any fds that have already been allocated. 4109 */ 4110 static int binder_apply_fd_fixups(struct binder_proc *proc, 4111 struct binder_transaction *t) 4112 { 4113 struct binder_txn_fd_fixup *fixup, *tmp; 4114 int ret = 0; 4115 4116 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) { 4117 int fd = get_unused_fd_flags(O_CLOEXEC); 4118 4119 if (fd < 0) { 4120 binder_debug(BINDER_DEBUG_TRANSACTION, 4121 "failed fd fixup txn %d fd %d\n", 4122 t->debug_id, fd); 4123 ret = -ENOMEM; 4124 break; 4125 } 4126 binder_debug(BINDER_DEBUG_TRANSACTION, 4127 "fd fixup txn %d fd %d\n", 4128 t->debug_id, fd); 4129 trace_binder_transaction_fd_recv(t, fd, fixup->offset); 4130 fd_install(fd, fixup->file); 4131 fixup->file = NULL; 4132 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer, 4133 fixup->offset, &fd, 4134 sizeof(u32))) { 4135 ret = -EINVAL; 4136 break; 4137 } 4138 } 4139 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) { 4140 if (fixup->file) { 4141 fput(fixup->file); 4142 } else if (ret) { 4143 u32 fd; 4144 int err; 4145 4146 err = binder_alloc_copy_from_buffer(&proc->alloc, &fd, 4147 t->buffer, 4148 fixup->offset, 4149 sizeof(fd)); 4150 WARN_ON(err); 4151 if (!err) 4152 binder_deferred_fd_close(fd); 4153 } 4154 list_del(&fixup->fixup_entry); 4155 kfree(fixup); 4156 } 4157 4158 return ret; 4159 } 4160 4161 static int binder_thread_read(struct binder_proc *proc, 4162 struct binder_thread *thread, 4163 binder_uintptr_t binder_buffer, size_t size, 4164 binder_size_t *consumed, int non_block) 4165 { 4166 void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 4167 void __user *ptr = buffer + *consumed; 4168 void __user *end = buffer + size; 4169 4170 int ret = 0; 4171 int wait_for_proc_work; 4172 4173 if (*consumed == 0) { 4174 if (put_user(BR_NOOP, (uint32_t __user *)ptr)) 4175 return -EFAULT; 4176 ptr += sizeof(uint32_t); 4177 } 4178 4179 retry: 4180 binder_inner_proc_lock(proc); 4181 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread); 4182 binder_inner_proc_unlock(proc); 4183 4184 thread->looper |= BINDER_LOOPER_STATE_WAITING; 4185 4186 trace_binder_wait_for_work(wait_for_proc_work, 4187 !!thread->transaction_stack, 4188 !binder_worklist_empty(proc, &thread->todo)); 4189 if (wait_for_proc_work) { 4190 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 4191 BINDER_LOOPER_STATE_ENTERED))) { 4192 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n", 4193 proc->pid, thread->pid, thread->looper); 4194 wait_event_interruptible(binder_user_error_wait, 4195 binder_stop_on_user_error < 2); 4196 } 4197 binder_set_nice(proc->default_priority); 4198 } 4199 4200 if (non_block) { 4201 if (!binder_has_work(thread, wait_for_proc_work)) 4202 ret = -EAGAIN; 4203 } else { 4204 ret = binder_wait_for_work(thread, wait_for_proc_work); 4205 } 4206 4207 thread->looper &= ~BINDER_LOOPER_STATE_WAITING; 4208 4209 if (ret) 4210 return ret; 4211 4212 while (1) { 4213 uint32_t cmd; 4214 struct binder_transaction_data_secctx tr; 4215 struct binder_transaction_data *trd = &tr.transaction_data; 4216 struct binder_work *w = NULL; 4217 struct list_head *list = NULL; 4218 struct binder_transaction *t = NULL; 4219 struct binder_thread *t_from; 4220 size_t trsize = sizeof(*trd); 4221 4222 binder_inner_proc_lock(proc); 4223 if (!binder_worklist_empty_ilocked(&thread->todo)) 4224 list = &thread->todo; 4225 else if (!binder_worklist_empty_ilocked(&proc->todo) && 4226 wait_for_proc_work) 4227 list = &proc->todo; 4228 else { 4229 binder_inner_proc_unlock(proc); 4230 4231 /* no data added */ 4232 if (ptr - buffer == 4 && !thread->looper_need_return) 4233 goto retry; 4234 break; 4235 } 4236 4237 if (end - ptr < sizeof(tr) + 4) { 4238 binder_inner_proc_unlock(proc); 4239 break; 4240 } 4241 w = binder_dequeue_work_head_ilocked(list); 4242 if (binder_worklist_empty_ilocked(&thread->todo)) 4243 thread->process_todo = false; 4244 4245 switch (w->type) { 4246 case BINDER_WORK_TRANSACTION: { 4247 binder_inner_proc_unlock(proc); 4248 t = container_of(w, struct binder_transaction, work); 4249 } break; 4250 case BINDER_WORK_RETURN_ERROR: { 4251 struct binder_error *e = container_of( 4252 w, struct binder_error, work); 4253 4254 WARN_ON(e->cmd == BR_OK); 4255 binder_inner_proc_unlock(proc); 4256 if (put_user(e->cmd, (uint32_t __user *)ptr)) 4257 return -EFAULT; 4258 cmd = e->cmd; 4259 e->cmd = BR_OK; 4260 ptr += sizeof(uint32_t); 4261 4262 binder_stat_br(proc, thread, cmd); 4263 } break; 4264 case BINDER_WORK_TRANSACTION_COMPLETE: { 4265 binder_inner_proc_unlock(proc); 4266 cmd = BR_TRANSACTION_COMPLETE; 4267 kfree(w); 4268 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 4269 if (put_user(cmd, (uint32_t __user *)ptr)) 4270 return -EFAULT; 4271 ptr += sizeof(uint32_t); 4272 4273 binder_stat_br(proc, thread, cmd); 4274 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, 4275 "%d:%d BR_TRANSACTION_COMPLETE\n", 4276 proc->pid, thread->pid); 4277 } break; 4278 case BINDER_WORK_NODE: { 4279 struct binder_node *node = container_of(w, struct binder_node, work); 4280 int strong, weak; 4281 binder_uintptr_t node_ptr = node->ptr; 4282 binder_uintptr_t node_cookie = node->cookie; 4283 int node_debug_id = node->debug_id; 4284 int has_weak_ref; 4285 int has_strong_ref; 4286 void __user *orig_ptr = ptr; 4287 4288 BUG_ON(proc != node->proc); 4289 strong = node->internal_strong_refs || 4290 node->local_strong_refs; 4291 weak = !hlist_empty(&node->refs) || 4292 node->local_weak_refs || 4293 node->tmp_refs || strong; 4294 has_strong_ref = node->has_strong_ref; 4295 has_weak_ref = node->has_weak_ref; 4296 4297 if (weak && !has_weak_ref) { 4298 node->has_weak_ref = 1; 4299 node->pending_weak_ref = 1; 4300 node->local_weak_refs++; 4301 } 4302 if (strong && !has_strong_ref) { 4303 node->has_strong_ref = 1; 4304 node->pending_strong_ref = 1; 4305 node->local_strong_refs++; 4306 } 4307 if (!strong && has_strong_ref) 4308 node->has_strong_ref = 0; 4309 if (!weak && has_weak_ref) 4310 node->has_weak_ref = 0; 4311 if (!weak && !strong) { 4312 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 4313 "%d:%d node %d u%016llx c%016llx deleted\n", 4314 proc->pid, thread->pid, 4315 node_debug_id, 4316 (u64)node_ptr, 4317 (u64)node_cookie); 4318 rb_erase(&node->rb_node, &proc->nodes); 4319 binder_inner_proc_unlock(proc); 4320 binder_node_lock(node); 4321 /* 4322 * Acquire the node lock before freeing the 4323 * node to serialize with other threads that 4324 * may have been holding the node lock while 4325 * decrementing this node (avoids race where 4326 * this thread frees while the other thread 4327 * is unlocking the node after the final 4328 * decrement) 4329 */ 4330 binder_node_unlock(node); 4331 binder_free_node(node); 4332 } else 4333 binder_inner_proc_unlock(proc); 4334 4335 if (weak && !has_weak_ref) 4336 ret = binder_put_node_cmd( 4337 proc, thread, &ptr, node_ptr, 4338 node_cookie, node_debug_id, 4339 BR_INCREFS, "BR_INCREFS"); 4340 if (!ret && strong && !has_strong_ref) 4341 ret = binder_put_node_cmd( 4342 proc, thread, &ptr, node_ptr, 4343 node_cookie, node_debug_id, 4344 BR_ACQUIRE, "BR_ACQUIRE"); 4345 if (!ret && !strong && has_strong_ref) 4346 ret = binder_put_node_cmd( 4347 proc, thread, &ptr, node_ptr, 4348 node_cookie, node_debug_id, 4349 BR_RELEASE, "BR_RELEASE"); 4350 if (!ret && !weak && has_weak_ref) 4351 ret = binder_put_node_cmd( 4352 proc, thread, &ptr, node_ptr, 4353 node_cookie, node_debug_id, 4354 BR_DECREFS, "BR_DECREFS"); 4355 if (orig_ptr == ptr) 4356 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 4357 "%d:%d node %d u%016llx c%016llx state unchanged\n", 4358 proc->pid, thread->pid, 4359 node_debug_id, 4360 (u64)node_ptr, 4361 (u64)node_cookie); 4362 if (ret) 4363 return ret; 4364 } break; 4365 case BINDER_WORK_DEAD_BINDER: 4366 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 4367 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 4368 struct binder_ref_death *death; 4369 uint32_t cmd; 4370 binder_uintptr_t cookie; 4371 4372 death = container_of(w, struct binder_ref_death, work); 4373 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) 4374 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; 4375 else 4376 cmd = BR_DEAD_BINDER; 4377 cookie = death->cookie; 4378 4379 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 4380 "%d:%d %s %016llx\n", 4381 proc->pid, thread->pid, 4382 cmd == BR_DEAD_BINDER ? 4383 "BR_DEAD_BINDER" : 4384 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 4385 (u64)cookie); 4386 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { 4387 binder_inner_proc_unlock(proc); 4388 kfree(death); 4389 binder_stats_deleted(BINDER_STAT_DEATH); 4390 } else { 4391 binder_enqueue_work_ilocked( 4392 w, &proc->delivered_death); 4393 binder_inner_proc_unlock(proc); 4394 } 4395 if (put_user(cmd, (uint32_t __user *)ptr)) 4396 return -EFAULT; 4397 ptr += sizeof(uint32_t); 4398 if (put_user(cookie, 4399 (binder_uintptr_t __user *)ptr)) 4400 return -EFAULT; 4401 ptr += sizeof(binder_uintptr_t); 4402 binder_stat_br(proc, thread, cmd); 4403 if (cmd == BR_DEAD_BINDER) 4404 goto done; /* DEAD_BINDER notifications can cause transactions */ 4405 } break; 4406 default: 4407 binder_inner_proc_unlock(proc); 4408 pr_err("%d:%d: bad work type %d\n", 4409 proc->pid, thread->pid, w->type); 4410 break; 4411 } 4412 4413 if (!t) 4414 continue; 4415 4416 BUG_ON(t->buffer == NULL); 4417 if (t->buffer->target_node) { 4418 struct binder_node *target_node = t->buffer->target_node; 4419 4420 trd->target.ptr = target_node->ptr; 4421 trd->cookie = target_node->cookie; 4422 t->saved_priority = task_nice(current); 4423 if (t->priority < target_node->min_priority && 4424 !(t->flags & TF_ONE_WAY)) 4425 binder_set_nice(t->priority); 4426 else if (!(t->flags & TF_ONE_WAY) || 4427 t->saved_priority > target_node->min_priority) 4428 binder_set_nice(target_node->min_priority); 4429 cmd = BR_TRANSACTION; 4430 } else { 4431 trd->target.ptr = 0; 4432 trd->cookie = 0; 4433 cmd = BR_REPLY; 4434 } 4435 trd->code = t->code; 4436 trd->flags = t->flags; 4437 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid); 4438 4439 t_from = binder_get_txn_from(t); 4440 if (t_from) { 4441 struct task_struct *sender = t_from->proc->tsk; 4442 4443 trd->sender_pid = 4444 task_tgid_nr_ns(sender, 4445 task_active_pid_ns(current)); 4446 } else { 4447 trd->sender_pid = 0; 4448 } 4449 4450 ret = binder_apply_fd_fixups(proc, t); 4451 if (ret) { 4452 struct binder_buffer *buffer = t->buffer; 4453 bool oneway = !!(t->flags & TF_ONE_WAY); 4454 int tid = t->debug_id; 4455 4456 if (t_from) 4457 binder_thread_dec_tmpref(t_from); 4458 buffer->transaction = NULL; 4459 binder_cleanup_transaction(t, "fd fixups failed", 4460 BR_FAILED_REPLY); 4461 binder_free_buf(proc, buffer); 4462 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 4463 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n", 4464 proc->pid, thread->pid, 4465 oneway ? "async " : 4466 (cmd == BR_REPLY ? "reply " : ""), 4467 tid, BR_FAILED_REPLY, ret, __LINE__); 4468 if (cmd == BR_REPLY) { 4469 cmd = BR_FAILED_REPLY; 4470 if (put_user(cmd, (uint32_t __user *)ptr)) 4471 return -EFAULT; 4472 ptr += sizeof(uint32_t); 4473 binder_stat_br(proc, thread, cmd); 4474 break; 4475 } 4476 continue; 4477 } 4478 trd->data_size = t->buffer->data_size; 4479 trd->offsets_size = t->buffer->offsets_size; 4480 trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data; 4481 trd->data.ptr.offsets = trd->data.ptr.buffer + 4482 ALIGN(t->buffer->data_size, 4483 sizeof(void *)); 4484 4485 tr.secctx = t->security_ctx; 4486 if (t->security_ctx) { 4487 cmd = BR_TRANSACTION_SEC_CTX; 4488 trsize = sizeof(tr); 4489 } 4490 if (put_user(cmd, (uint32_t __user *)ptr)) { 4491 if (t_from) 4492 binder_thread_dec_tmpref(t_from); 4493 4494 binder_cleanup_transaction(t, "put_user failed", 4495 BR_FAILED_REPLY); 4496 4497 return -EFAULT; 4498 } 4499 ptr += sizeof(uint32_t); 4500 if (copy_to_user(ptr, &tr, trsize)) { 4501 if (t_from) 4502 binder_thread_dec_tmpref(t_from); 4503 4504 binder_cleanup_transaction(t, "copy_to_user failed", 4505 BR_FAILED_REPLY); 4506 4507 return -EFAULT; 4508 } 4509 ptr += trsize; 4510 4511 trace_binder_transaction_received(t); 4512 binder_stat_br(proc, thread, cmd); 4513 binder_debug(BINDER_DEBUG_TRANSACTION, 4514 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n", 4515 proc->pid, thread->pid, 4516 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : 4517 (cmd == BR_TRANSACTION_SEC_CTX) ? 4518 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY", 4519 t->debug_id, t_from ? t_from->proc->pid : 0, 4520 t_from ? t_from->pid : 0, cmd, 4521 t->buffer->data_size, t->buffer->offsets_size, 4522 (u64)trd->data.ptr.buffer, 4523 (u64)trd->data.ptr.offsets); 4524 4525 if (t_from) 4526 binder_thread_dec_tmpref(t_from); 4527 t->buffer->allow_user_free = 1; 4528 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) { 4529 binder_inner_proc_lock(thread->proc); 4530 t->to_parent = thread->transaction_stack; 4531 t->to_thread = thread; 4532 thread->transaction_stack = t; 4533 binder_inner_proc_unlock(thread->proc); 4534 } else { 4535 binder_free_transaction(t); 4536 } 4537 break; 4538 } 4539 4540 done: 4541 4542 *consumed = ptr - buffer; 4543 binder_inner_proc_lock(proc); 4544 if (proc->requested_threads == 0 && 4545 list_empty(&thread->proc->waiting_threads) && 4546 proc->requested_threads_started < proc->max_threads && 4547 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 4548 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ 4549 /*spawn a new thread if we leave this out */) { 4550 proc->requested_threads++; 4551 binder_inner_proc_unlock(proc); 4552 binder_debug(BINDER_DEBUG_THREADS, 4553 "%d:%d BR_SPAWN_LOOPER\n", 4554 proc->pid, thread->pid); 4555 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) 4556 return -EFAULT; 4557 binder_stat_br(proc, thread, BR_SPAWN_LOOPER); 4558 } else 4559 binder_inner_proc_unlock(proc); 4560 return 0; 4561 } 4562 4563 static void binder_release_work(struct binder_proc *proc, 4564 struct list_head *list) 4565 { 4566 struct binder_work *w; 4567 enum binder_work_type wtype; 4568 4569 while (1) { 4570 binder_inner_proc_lock(proc); 4571 w = binder_dequeue_work_head_ilocked(list); 4572 wtype = w ? w->type : 0; 4573 binder_inner_proc_unlock(proc); 4574 if (!w) 4575 return; 4576 4577 switch (wtype) { 4578 case BINDER_WORK_TRANSACTION: { 4579 struct binder_transaction *t; 4580 4581 t = container_of(w, struct binder_transaction, work); 4582 4583 binder_cleanup_transaction(t, "process died.", 4584 BR_DEAD_REPLY); 4585 } break; 4586 case BINDER_WORK_RETURN_ERROR: { 4587 struct binder_error *e = container_of( 4588 w, struct binder_error, work); 4589 4590 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4591 "undelivered TRANSACTION_ERROR: %u\n", 4592 e->cmd); 4593 } break; 4594 case BINDER_WORK_TRANSACTION_COMPLETE: { 4595 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4596 "undelivered TRANSACTION_COMPLETE\n"); 4597 kfree(w); 4598 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 4599 } break; 4600 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 4601 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 4602 struct binder_ref_death *death; 4603 4604 death = container_of(w, struct binder_ref_death, work); 4605 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4606 "undelivered death notification, %016llx\n", 4607 (u64)death->cookie); 4608 kfree(death); 4609 binder_stats_deleted(BINDER_STAT_DEATH); 4610 } break; 4611 case BINDER_WORK_NODE: 4612 break; 4613 default: 4614 pr_err("unexpected work type, %d, not freed\n", 4615 wtype); 4616 break; 4617 } 4618 } 4619 4620 } 4621 4622 static struct binder_thread *binder_get_thread_ilocked( 4623 struct binder_proc *proc, struct binder_thread *new_thread) 4624 { 4625 struct binder_thread *thread = NULL; 4626 struct rb_node *parent = NULL; 4627 struct rb_node **p = &proc->threads.rb_node; 4628 4629 while (*p) { 4630 parent = *p; 4631 thread = rb_entry(parent, struct binder_thread, rb_node); 4632 4633 if (current->pid < thread->pid) 4634 p = &(*p)->rb_left; 4635 else if (current->pid > thread->pid) 4636 p = &(*p)->rb_right; 4637 else 4638 return thread; 4639 } 4640 if (!new_thread) 4641 return NULL; 4642 thread = new_thread; 4643 binder_stats_created(BINDER_STAT_THREAD); 4644 thread->proc = proc; 4645 thread->pid = current->pid; 4646 atomic_set(&thread->tmp_ref, 0); 4647 init_waitqueue_head(&thread->wait); 4648 INIT_LIST_HEAD(&thread->todo); 4649 rb_link_node(&thread->rb_node, parent, p); 4650 rb_insert_color(&thread->rb_node, &proc->threads); 4651 thread->looper_need_return = true; 4652 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR; 4653 thread->return_error.cmd = BR_OK; 4654 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR; 4655 thread->reply_error.cmd = BR_OK; 4656 INIT_LIST_HEAD(&new_thread->waiting_thread_node); 4657 return thread; 4658 } 4659 4660 static struct binder_thread *binder_get_thread(struct binder_proc *proc) 4661 { 4662 struct binder_thread *thread; 4663 struct binder_thread *new_thread; 4664 4665 binder_inner_proc_lock(proc); 4666 thread = binder_get_thread_ilocked(proc, NULL); 4667 binder_inner_proc_unlock(proc); 4668 if (!thread) { 4669 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL); 4670 if (new_thread == NULL) 4671 return NULL; 4672 binder_inner_proc_lock(proc); 4673 thread = binder_get_thread_ilocked(proc, new_thread); 4674 binder_inner_proc_unlock(proc); 4675 if (thread != new_thread) 4676 kfree(new_thread); 4677 } 4678 return thread; 4679 } 4680 4681 static void binder_free_proc(struct binder_proc *proc) 4682 { 4683 struct binder_device *device; 4684 4685 BUG_ON(!list_empty(&proc->todo)); 4686 BUG_ON(!list_empty(&proc->delivered_death)); 4687 device = container_of(proc->context, struct binder_device, context); 4688 if (refcount_dec_and_test(&device->ref)) { 4689 kfree(proc->context->name); 4690 kfree(device); 4691 } 4692 binder_alloc_deferred_release(&proc->alloc); 4693 put_task_struct(proc->tsk); 4694 binder_stats_deleted(BINDER_STAT_PROC); 4695 kfree(proc); 4696 } 4697 4698 static void binder_free_thread(struct binder_thread *thread) 4699 { 4700 BUG_ON(!list_empty(&thread->todo)); 4701 binder_stats_deleted(BINDER_STAT_THREAD); 4702 binder_proc_dec_tmpref(thread->proc); 4703 kfree(thread); 4704 } 4705 4706 static int binder_thread_release(struct binder_proc *proc, 4707 struct binder_thread *thread) 4708 { 4709 struct binder_transaction *t; 4710 struct binder_transaction *send_reply = NULL; 4711 int active_transactions = 0; 4712 struct binder_transaction *last_t = NULL; 4713 4714 binder_inner_proc_lock(thread->proc); 4715 /* 4716 * take a ref on the proc so it survives 4717 * after we remove this thread from proc->threads. 4718 * The corresponding dec is when we actually 4719 * free the thread in binder_free_thread() 4720 */ 4721 proc->tmp_ref++; 4722 /* 4723 * take a ref on this thread to ensure it 4724 * survives while we are releasing it 4725 */ 4726 atomic_inc(&thread->tmp_ref); 4727 rb_erase(&thread->rb_node, &proc->threads); 4728 t = thread->transaction_stack; 4729 if (t) { 4730 spin_lock(&t->lock); 4731 if (t->to_thread == thread) 4732 send_reply = t; 4733 } else { 4734 __acquire(&t->lock); 4735 } 4736 thread->is_dead = true; 4737 4738 while (t) { 4739 last_t = t; 4740 active_transactions++; 4741 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4742 "release %d:%d transaction %d %s, still active\n", 4743 proc->pid, thread->pid, 4744 t->debug_id, 4745 (t->to_thread == thread) ? "in" : "out"); 4746 4747 if (t->to_thread == thread) { 4748 t->to_proc = NULL; 4749 t->to_thread = NULL; 4750 if (t->buffer) { 4751 t->buffer->transaction = NULL; 4752 t->buffer = NULL; 4753 } 4754 t = t->to_parent; 4755 } else if (t->from == thread) { 4756 t->from = NULL; 4757 t = t->from_parent; 4758 } else 4759 BUG(); 4760 spin_unlock(&last_t->lock); 4761 if (t) 4762 spin_lock(&t->lock); 4763 else 4764 __acquire(&t->lock); 4765 } 4766 /* annotation for sparse, lock not acquired in last iteration above */ 4767 __release(&t->lock); 4768 4769 /* 4770 * If this thread used poll, make sure we remove the waitqueue 4771 * from any epoll data structures holding it with POLLFREE. 4772 * waitqueue_active() is safe to use here because we're holding 4773 * the inner lock. 4774 */ 4775 if ((thread->looper & BINDER_LOOPER_STATE_POLL) && 4776 waitqueue_active(&thread->wait)) { 4777 wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE); 4778 } 4779 4780 binder_inner_proc_unlock(thread->proc); 4781 4782 /* 4783 * This is needed to avoid races between wake_up_poll() above and 4784 * and ep_remove_waitqueue() called for other reasons (eg the epoll file 4785 * descriptor being closed); ep_remove_waitqueue() holds an RCU read 4786 * lock, so we can be sure it's done after calling synchronize_rcu(). 4787 */ 4788 if (thread->looper & BINDER_LOOPER_STATE_POLL) 4789 synchronize_rcu(); 4790 4791 if (send_reply) 4792 binder_send_failed_reply(send_reply, BR_DEAD_REPLY); 4793 binder_release_work(proc, &thread->todo); 4794 binder_thread_dec_tmpref(thread); 4795 return active_transactions; 4796 } 4797 4798 static __poll_t binder_poll(struct file *filp, 4799 struct poll_table_struct *wait) 4800 { 4801 struct binder_proc *proc = filp->private_data; 4802 struct binder_thread *thread = NULL; 4803 bool wait_for_proc_work; 4804 4805 thread = binder_get_thread(proc); 4806 if (!thread) 4807 return POLLERR; 4808 4809 binder_inner_proc_lock(thread->proc); 4810 thread->looper |= BINDER_LOOPER_STATE_POLL; 4811 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread); 4812 4813 binder_inner_proc_unlock(thread->proc); 4814 4815 poll_wait(filp, &thread->wait, wait); 4816 4817 if (binder_has_work(thread, wait_for_proc_work)) 4818 return EPOLLIN; 4819 4820 return 0; 4821 } 4822 4823 static int binder_ioctl_write_read(struct file *filp, 4824 unsigned int cmd, unsigned long arg, 4825 struct binder_thread *thread) 4826 { 4827 int ret = 0; 4828 struct binder_proc *proc = filp->private_data; 4829 unsigned int size = _IOC_SIZE(cmd); 4830 void __user *ubuf = (void __user *)arg; 4831 struct binder_write_read bwr; 4832 4833 if (size != sizeof(struct binder_write_read)) { 4834 ret = -EINVAL; 4835 goto out; 4836 } 4837 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { 4838 ret = -EFAULT; 4839 goto out; 4840 } 4841 binder_debug(BINDER_DEBUG_READ_WRITE, 4842 "%d:%d write %lld at %016llx, read %lld at %016llx\n", 4843 proc->pid, thread->pid, 4844 (u64)bwr.write_size, (u64)bwr.write_buffer, 4845 (u64)bwr.read_size, (u64)bwr.read_buffer); 4846 4847 if (bwr.write_size > 0) { 4848 ret = binder_thread_write(proc, thread, 4849 bwr.write_buffer, 4850 bwr.write_size, 4851 &bwr.write_consumed); 4852 trace_binder_write_done(ret); 4853 if (ret < 0) { 4854 bwr.read_consumed = 0; 4855 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 4856 ret = -EFAULT; 4857 goto out; 4858 } 4859 } 4860 if (bwr.read_size > 0) { 4861 ret = binder_thread_read(proc, thread, bwr.read_buffer, 4862 bwr.read_size, 4863 &bwr.read_consumed, 4864 filp->f_flags & O_NONBLOCK); 4865 trace_binder_read_done(ret); 4866 binder_inner_proc_lock(proc); 4867 if (!binder_worklist_empty_ilocked(&proc->todo)) 4868 binder_wakeup_proc_ilocked(proc); 4869 binder_inner_proc_unlock(proc); 4870 if (ret < 0) { 4871 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 4872 ret = -EFAULT; 4873 goto out; 4874 } 4875 } 4876 binder_debug(BINDER_DEBUG_READ_WRITE, 4877 "%d:%d wrote %lld of %lld, read return %lld of %lld\n", 4878 proc->pid, thread->pid, 4879 (u64)bwr.write_consumed, (u64)bwr.write_size, 4880 (u64)bwr.read_consumed, (u64)bwr.read_size); 4881 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { 4882 ret = -EFAULT; 4883 goto out; 4884 } 4885 out: 4886 return ret; 4887 } 4888 4889 static int binder_ioctl_set_ctx_mgr(struct file *filp, 4890 struct flat_binder_object *fbo) 4891 { 4892 int ret = 0; 4893 struct binder_proc *proc = filp->private_data; 4894 struct binder_context *context = proc->context; 4895 struct binder_node *new_node; 4896 kuid_t curr_euid = current_euid(); 4897 4898 mutex_lock(&context->context_mgr_node_lock); 4899 if (context->binder_context_mgr_node) { 4900 pr_err("BINDER_SET_CONTEXT_MGR already set\n"); 4901 ret = -EBUSY; 4902 goto out; 4903 } 4904 ret = security_binder_set_context_mgr(proc->tsk); 4905 if (ret < 0) 4906 goto out; 4907 if (uid_valid(context->binder_context_mgr_uid)) { 4908 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) { 4909 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n", 4910 from_kuid(&init_user_ns, curr_euid), 4911 from_kuid(&init_user_ns, 4912 context->binder_context_mgr_uid)); 4913 ret = -EPERM; 4914 goto out; 4915 } 4916 } else { 4917 context->binder_context_mgr_uid = curr_euid; 4918 } 4919 new_node = binder_new_node(proc, fbo); 4920 if (!new_node) { 4921 ret = -ENOMEM; 4922 goto out; 4923 } 4924 binder_node_lock(new_node); 4925 new_node->local_weak_refs++; 4926 new_node->local_strong_refs++; 4927 new_node->has_strong_ref = 1; 4928 new_node->has_weak_ref = 1; 4929 context->binder_context_mgr_node = new_node; 4930 binder_node_unlock(new_node); 4931 binder_put_node(new_node); 4932 out: 4933 mutex_unlock(&context->context_mgr_node_lock); 4934 return ret; 4935 } 4936 4937 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc, 4938 struct binder_node_info_for_ref *info) 4939 { 4940 struct binder_node *node; 4941 struct binder_context *context = proc->context; 4942 __u32 handle = info->handle; 4943 4944 if (info->strong_count || info->weak_count || info->reserved1 || 4945 info->reserved2 || info->reserved3) { 4946 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.", 4947 proc->pid); 4948 return -EINVAL; 4949 } 4950 4951 /* This ioctl may only be used by the context manager */ 4952 mutex_lock(&context->context_mgr_node_lock); 4953 if (!context->binder_context_mgr_node || 4954 context->binder_context_mgr_node->proc != proc) { 4955 mutex_unlock(&context->context_mgr_node_lock); 4956 return -EPERM; 4957 } 4958 mutex_unlock(&context->context_mgr_node_lock); 4959 4960 node = binder_get_node_from_ref(proc, handle, true, NULL); 4961 if (!node) 4962 return -EINVAL; 4963 4964 info->strong_count = node->local_strong_refs + 4965 node->internal_strong_refs; 4966 info->weak_count = node->local_weak_refs; 4967 4968 binder_put_node(node); 4969 4970 return 0; 4971 } 4972 4973 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc, 4974 struct binder_node_debug_info *info) 4975 { 4976 struct rb_node *n; 4977 binder_uintptr_t ptr = info->ptr; 4978 4979 memset(info, 0, sizeof(*info)); 4980 4981 binder_inner_proc_lock(proc); 4982 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { 4983 struct binder_node *node = rb_entry(n, struct binder_node, 4984 rb_node); 4985 if (node->ptr > ptr) { 4986 info->ptr = node->ptr; 4987 info->cookie = node->cookie; 4988 info->has_strong_ref = node->has_strong_ref; 4989 info->has_weak_ref = node->has_weak_ref; 4990 break; 4991 } 4992 } 4993 binder_inner_proc_unlock(proc); 4994 4995 return 0; 4996 } 4997 4998 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 4999 { 5000 int ret; 5001 struct binder_proc *proc = filp->private_data; 5002 struct binder_thread *thread; 5003 unsigned int size = _IOC_SIZE(cmd); 5004 void __user *ubuf = (void __user *)arg; 5005 5006 /*pr_info("binder_ioctl: %d:%d %x %lx\n", 5007 proc->pid, current->pid, cmd, arg);*/ 5008 5009 binder_selftest_alloc(&proc->alloc); 5010 5011 trace_binder_ioctl(cmd, arg); 5012 5013 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 5014 if (ret) 5015 goto err_unlocked; 5016 5017 thread = binder_get_thread(proc); 5018 if (thread == NULL) { 5019 ret = -ENOMEM; 5020 goto err; 5021 } 5022 5023 switch (cmd) { 5024 case BINDER_WRITE_READ: 5025 ret = binder_ioctl_write_read(filp, cmd, arg, thread); 5026 if (ret) 5027 goto err; 5028 break; 5029 case BINDER_SET_MAX_THREADS: { 5030 int max_threads; 5031 5032 if (copy_from_user(&max_threads, ubuf, 5033 sizeof(max_threads))) { 5034 ret = -EINVAL; 5035 goto err; 5036 } 5037 binder_inner_proc_lock(proc); 5038 proc->max_threads = max_threads; 5039 binder_inner_proc_unlock(proc); 5040 break; 5041 } 5042 case BINDER_SET_CONTEXT_MGR_EXT: { 5043 struct flat_binder_object fbo; 5044 5045 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) { 5046 ret = -EINVAL; 5047 goto err; 5048 } 5049 ret = binder_ioctl_set_ctx_mgr(filp, &fbo); 5050 if (ret) 5051 goto err; 5052 break; 5053 } 5054 case BINDER_SET_CONTEXT_MGR: 5055 ret = binder_ioctl_set_ctx_mgr(filp, NULL); 5056 if (ret) 5057 goto err; 5058 break; 5059 case BINDER_THREAD_EXIT: 5060 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n", 5061 proc->pid, thread->pid); 5062 binder_thread_release(proc, thread); 5063 thread = NULL; 5064 break; 5065 case BINDER_VERSION: { 5066 struct binder_version __user *ver = ubuf; 5067 5068 if (size != sizeof(struct binder_version)) { 5069 ret = -EINVAL; 5070 goto err; 5071 } 5072 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, 5073 &ver->protocol_version)) { 5074 ret = -EINVAL; 5075 goto err; 5076 } 5077 break; 5078 } 5079 case BINDER_GET_NODE_INFO_FOR_REF: { 5080 struct binder_node_info_for_ref info; 5081 5082 if (copy_from_user(&info, ubuf, sizeof(info))) { 5083 ret = -EFAULT; 5084 goto err; 5085 } 5086 5087 ret = binder_ioctl_get_node_info_for_ref(proc, &info); 5088 if (ret < 0) 5089 goto err; 5090 5091 if (copy_to_user(ubuf, &info, sizeof(info))) { 5092 ret = -EFAULT; 5093 goto err; 5094 } 5095 5096 break; 5097 } 5098 case BINDER_GET_NODE_DEBUG_INFO: { 5099 struct binder_node_debug_info info; 5100 5101 if (copy_from_user(&info, ubuf, sizeof(info))) { 5102 ret = -EFAULT; 5103 goto err; 5104 } 5105 5106 ret = binder_ioctl_get_node_debug_info(proc, &info); 5107 if (ret < 0) 5108 goto err; 5109 5110 if (copy_to_user(ubuf, &info, sizeof(info))) { 5111 ret = -EFAULT; 5112 goto err; 5113 } 5114 break; 5115 } 5116 default: 5117 ret = -EINVAL; 5118 goto err; 5119 } 5120 ret = 0; 5121 err: 5122 if (thread) 5123 thread->looper_need_return = false; 5124 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 5125 if (ret && ret != -ERESTARTSYS) 5126 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); 5127 err_unlocked: 5128 trace_binder_ioctl_done(ret); 5129 return ret; 5130 } 5131 5132 static void binder_vma_open(struct vm_area_struct *vma) 5133 { 5134 struct binder_proc *proc = vma->vm_private_data; 5135 5136 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 5137 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 5138 proc->pid, vma->vm_start, vma->vm_end, 5139 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 5140 (unsigned long)pgprot_val(vma->vm_page_prot)); 5141 } 5142 5143 static void binder_vma_close(struct vm_area_struct *vma) 5144 { 5145 struct binder_proc *proc = vma->vm_private_data; 5146 5147 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 5148 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 5149 proc->pid, vma->vm_start, vma->vm_end, 5150 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 5151 (unsigned long)pgprot_val(vma->vm_page_prot)); 5152 binder_alloc_vma_close(&proc->alloc); 5153 } 5154 5155 static vm_fault_t binder_vm_fault(struct vm_fault *vmf) 5156 { 5157 return VM_FAULT_SIGBUS; 5158 } 5159 5160 static const struct vm_operations_struct binder_vm_ops = { 5161 .open = binder_vma_open, 5162 .close = binder_vma_close, 5163 .fault = binder_vm_fault, 5164 }; 5165 5166 static int binder_mmap(struct file *filp, struct vm_area_struct *vma) 5167 { 5168 struct binder_proc *proc = filp->private_data; 5169 5170 if (proc->tsk != current->group_leader) 5171 return -EINVAL; 5172 5173 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 5174 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", 5175 __func__, proc->pid, vma->vm_start, vma->vm_end, 5176 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 5177 (unsigned long)pgprot_val(vma->vm_page_prot)); 5178 5179 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { 5180 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__, 5181 proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM); 5182 return -EPERM; 5183 } 5184 vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP; 5185 vma->vm_flags &= ~VM_MAYWRITE; 5186 5187 vma->vm_ops = &binder_vm_ops; 5188 vma->vm_private_data = proc; 5189 5190 return binder_alloc_mmap_handler(&proc->alloc, vma); 5191 } 5192 5193 static int binder_open(struct inode *nodp, struct file *filp) 5194 { 5195 struct binder_proc *proc, *itr; 5196 struct binder_device *binder_dev; 5197 struct binderfs_info *info; 5198 struct dentry *binder_binderfs_dir_entry_proc = NULL; 5199 bool existing_pid = false; 5200 5201 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__, 5202 current->group_leader->pid, current->pid); 5203 5204 proc = kzalloc(sizeof(*proc), GFP_KERNEL); 5205 if (proc == NULL) 5206 return -ENOMEM; 5207 spin_lock_init(&proc->inner_lock); 5208 spin_lock_init(&proc->outer_lock); 5209 get_task_struct(current->group_leader); 5210 proc->tsk = current->group_leader; 5211 INIT_LIST_HEAD(&proc->todo); 5212 proc->default_priority = task_nice(current); 5213 /* binderfs stashes devices in i_private */ 5214 if (is_binderfs_device(nodp)) { 5215 binder_dev = nodp->i_private; 5216 info = nodp->i_sb->s_fs_info; 5217 binder_binderfs_dir_entry_proc = info->proc_log_dir; 5218 } else { 5219 binder_dev = container_of(filp->private_data, 5220 struct binder_device, miscdev); 5221 } 5222 refcount_inc(&binder_dev->ref); 5223 proc->context = &binder_dev->context; 5224 binder_alloc_init(&proc->alloc); 5225 5226 binder_stats_created(BINDER_STAT_PROC); 5227 proc->pid = current->group_leader->pid; 5228 INIT_LIST_HEAD(&proc->delivered_death); 5229 INIT_LIST_HEAD(&proc->waiting_threads); 5230 filp->private_data = proc; 5231 5232 mutex_lock(&binder_procs_lock); 5233 hlist_for_each_entry(itr, &binder_procs, proc_node) { 5234 if (itr->pid == proc->pid) { 5235 existing_pid = true; 5236 break; 5237 } 5238 } 5239 hlist_add_head(&proc->proc_node, &binder_procs); 5240 mutex_unlock(&binder_procs_lock); 5241 5242 if (binder_debugfs_dir_entry_proc && !existing_pid) { 5243 char strbuf[11]; 5244 5245 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); 5246 /* 5247 * proc debug entries are shared between contexts. 5248 * Only create for the first PID to avoid debugfs log spamming 5249 * The printing code will anyway print all contexts for a given 5250 * PID so this is not a problem. 5251 */ 5252 proc->debugfs_entry = debugfs_create_file(strbuf, 0444, 5253 binder_debugfs_dir_entry_proc, 5254 (void *)(unsigned long)proc->pid, 5255 &proc_fops); 5256 } 5257 5258 if (binder_binderfs_dir_entry_proc && !existing_pid) { 5259 char strbuf[11]; 5260 struct dentry *binderfs_entry; 5261 5262 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); 5263 /* 5264 * Similar to debugfs, the process specific log file is shared 5265 * between contexts. Only create for the first PID. 5266 * This is ok since same as debugfs, the log file will contain 5267 * information on all contexts of a given PID. 5268 */ 5269 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc, 5270 strbuf, &proc_fops, (void *)(unsigned long)proc->pid); 5271 if (!IS_ERR(binderfs_entry)) { 5272 proc->binderfs_entry = binderfs_entry; 5273 } else { 5274 int error; 5275 5276 error = PTR_ERR(binderfs_entry); 5277 pr_warn("Unable to create file %s in binderfs (error %d)\n", 5278 strbuf, error); 5279 } 5280 } 5281 5282 return 0; 5283 } 5284 5285 static int binder_flush(struct file *filp, fl_owner_t id) 5286 { 5287 struct binder_proc *proc = filp->private_data; 5288 5289 binder_defer_work(proc, BINDER_DEFERRED_FLUSH); 5290 5291 return 0; 5292 } 5293 5294 static void binder_deferred_flush(struct binder_proc *proc) 5295 { 5296 struct rb_node *n; 5297 int wake_count = 0; 5298 5299 binder_inner_proc_lock(proc); 5300 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { 5301 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); 5302 5303 thread->looper_need_return = true; 5304 if (thread->looper & BINDER_LOOPER_STATE_WAITING) { 5305 wake_up_interruptible(&thread->wait); 5306 wake_count++; 5307 } 5308 } 5309 binder_inner_proc_unlock(proc); 5310 5311 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 5312 "binder_flush: %d woke %d threads\n", proc->pid, 5313 wake_count); 5314 } 5315 5316 static int binder_release(struct inode *nodp, struct file *filp) 5317 { 5318 struct binder_proc *proc = filp->private_data; 5319 5320 debugfs_remove(proc->debugfs_entry); 5321 5322 if (proc->binderfs_entry) { 5323 binderfs_remove_file(proc->binderfs_entry); 5324 proc->binderfs_entry = NULL; 5325 } 5326 5327 binder_defer_work(proc, BINDER_DEFERRED_RELEASE); 5328 5329 return 0; 5330 } 5331 5332 static int binder_node_release(struct binder_node *node, int refs) 5333 { 5334 struct binder_ref *ref; 5335 int death = 0; 5336 struct binder_proc *proc = node->proc; 5337 5338 binder_release_work(proc, &node->async_todo); 5339 5340 binder_node_lock(node); 5341 binder_inner_proc_lock(proc); 5342 binder_dequeue_work_ilocked(&node->work); 5343 /* 5344 * The caller must have taken a temporary ref on the node, 5345 */ 5346 BUG_ON(!node->tmp_refs); 5347 if (hlist_empty(&node->refs) && node->tmp_refs == 1) { 5348 binder_inner_proc_unlock(proc); 5349 binder_node_unlock(node); 5350 binder_free_node(node); 5351 5352 return refs; 5353 } 5354 5355 node->proc = NULL; 5356 node->local_strong_refs = 0; 5357 node->local_weak_refs = 0; 5358 binder_inner_proc_unlock(proc); 5359 5360 spin_lock(&binder_dead_nodes_lock); 5361 hlist_add_head(&node->dead_node, &binder_dead_nodes); 5362 spin_unlock(&binder_dead_nodes_lock); 5363 5364 hlist_for_each_entry(ref, &node->refs, node_entry) { 5365 refs++; 5366 /* 5367 * Need the node lock to synchronize 5368 * with new notification requests and the 5369 * inner lock to synchronize with queued 5370 * death notifications. 5371 */ 5372 binder_inner_proc_lock(ref->proc); 5373 if (!ref->death) { 5374 binder_inner_proc_unlock(ref->proc); 5375 continue; 5376 } 5377 5378 death++; 5379 5380 BUG_ON(!list_empty(&ref->death->work.entry)); 5381 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 5382 binder_enqueue_work_ilocked(&ref->death->work, 5383 &ref->proc->todo); 5384 binder_wakeup_proc_ilocked(ref->proc); 5385 binder_inner_proc_unlock(ref->proc); 5386 } 5387 5388 binder_debug(BINDER_DEBUG_DEAD_BINDER, 5389 "node %d now dead, refs %d, death %d\n", 5390 node->debug_id, refs, death); 5391 binder_node_unlock(node); 5392 binder_put_node(node); 5393 5394 return refs; 5395 } 5396 5397 static void binder_deferred_release(struct binder_proc *proc) 5398 { 5399 struct binder_context *context = proc->context; 5400 struct rb_node *n; 5401 int threads, nodes, incoming_refs, outgoing_refs, active_transactions; 5402 5403 mutex_lock(&binder_procs_lock); 5404 hlist_del(&proc->proc_node); 5405 mutex_unlock(&binder_procs_lock); 5406 5407 mutex_lock(&context->context_mgr_node_lock); 5408 if (context->binder_context_mgr_node && 5409 context->binder_context_mgr_node->proc == proc) { 5410 binder_debug(BINDER_DEBUG_DEAD_BINDER, 5411 "%s: %d context_mgr_node gone\n", 5412 __func__, proc->pid); 5413 context->binder_context_mgr_node = NULL; 5414 } 5415 mutex_unlock(&context->context_mgr_node_lock); 5416 binder_inner_proc_lock(proc); 5417 /* 5418 * Make sure proc stays alive after we 5419 * remove all the threads 5420 */ 5421 proc->tmp_ref++; 5422 5423 proc->is_dead = true; 5424 threads = 0; 5425 active_transactions = 0; 5426 while ((n = rb_first(&proc->threads))) { 5427 struct binder_thread *thread; 5428 5429 thread = rb_entry(n, struct binder_thread, rb_node); 5430 binder_inner_proc_unlock(proc); 5431 threads++; 5432 active_transactions += binder_thread_release(proc, thread); 5433 binder_inner_proc_lock(proc); 5434 } 5435 5436 nodes = 0; 5437 incoming_refs = 0; 5438 while ((n = rb_first(&proc->nodes))) { 5439 struct binder_node *node; 5440 5441 node = rb_entry(n, struct binder_node, rb_node); 5442 nodes++; 5443 /* 5444 * take a temporary ref on the node before 5445 * calling binder_node_release() which will either 5446 * kfree() the node or call binder_put_node() 5447 */ 5448 binder_inc_node_tmpref_ilocked(node); 5449 rb_erase(&node->rb_node, &proc->nodes); 5450 binder_inner_proc_unlock(proc); 5451 incoming_refs = binder_node_release(node, incoming_refs); 5452 binder_inner_proc_lock(proc); 5453 } 5454 binder_inner_proc_unlock(proc); 5455 5456 outgoing_refs = 0; 5457 binder_proc_lock(proc); 5458 while ((n = rb_first(&proc->refs_by_desc))) { 5459 struct binder_ref *ref; 5460 5461 ref = rb_entry(n, struct binder_ref, rb_node_desc); 5462 outgoing_refs++; 5463 binder_cleanup_ref_olocked(ref); 5464 binder_proc_unlock(proc); 5465 binder_free_ref(ref); 5466 binder_proc_lock(proc); 5467 } 5468 binder_proc_unlock(proc); 5469 5470 binder_release_work(proc, &proc->todo); 5471 binder_release_work(proc, &proc->delivered_death); 5472 5473 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 5474 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n", 5475 __func__, proc->pid, threads, nodes, incoming_refs, 5476 outgoing_refs, active_transactions); 5477 5478 binder_proc_dec_tmpref(proc); 5479 } 5480 5481 static void binder_deferred_func(struct work_struct *work) 5482 { 5483 struct binder_proc *proc; 5484 5485 int defer; 5486 5487 do { 5488 mutex_lock(&binder_deferred_lock); 5489 if (!hlist_empty(&binder_deferred_list)) { 5490 proc = hlist_entry(binder_deferred_list.first, 5491 struct binder_proc, deferred_work_node); 5492 hlist_del_init(&proc->deferred_work_node); 5493 defer = proc->deferred_work; 5494 proc->deferred_work = 0; 5495 } else { 5496 proc = NULL; 5497 defer = 0; 5498 } 5499 mutex_unlock(&binder_deferred_lock); 5500 5501 if (defer & BINDER_DEFERRED_FLUSH) 5502 binder_deferred_flush(proc); 5503 5504 if (defer & BINDER_DEFERRED_RELEASE) 5505 binder_deferred_release(proc); /* frees proc */ 5506 } while (proc); 5507 } 5508 static DECLARE_WORK(binder_deferred_work, binder_deferred_func); 5509 5510 static void 5511 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) 5512 { 5513 mutex_lock(&binder_deferred_lock); 5514 proc->deferred_work |= defer; 5515 if (hlist_unhashed(&proc->deferred_work_node)) { 5516 hlist_add_head(&proc->deferred_work_node, 5517 &binder_deferred_list); 5518 schedule_work(&binder_deferred_work); 5519 } 5520 mutex_unlock(&binder_deferred_lock); 5521 } 5522 5523 static void print_binder_transaction_ilocked(struct seq_file *m, 5524 struct binder_proc *proc, 5525 const char *prefix, 5526 struct binder_transaction *t) 5527 { 5528 struct binder_proc *to_proc; 5529 struct binder_buffer *buffer = t->buffer; 5530 5531 spin_lock(&t->lock); 5532 to_proc = t->to_proc; 5533 seq_printf(m, 5534 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d", 5535 prefix, t->debug_id, t, 5536 t->from ? t->from->proc->pid : 0, 5537 t->from ? t->from->pid : 0, 5538 to_proc ? to_proc->pid : 0, 5539 t->to_thread ? t->to_thread->pid : 0, 5540 t->code, t->flags, t->priority, t->need_reply); 5541 spin_unlock(&t->lock); 5542 5543 if (proc != to_proc) { 5544 /* 5545 * Can only safely deref buffer if we are holding the 5546 * correct proc inner lock for this node 5547 */ 5548 seq_puts(m, "\n"); 5549 return; 5550 } 5551 5552 if (buffer == NULL) { 5553 seq_puts(m, " buffer free\n"); 5554 return; 5555 } 5556 if (buffer->target_node) 5557 seq_printf(m, " node %d", buffer->target_node->debug_id); 5558 seq_printf(m, " size %zd:%zd data %pK\n", 5559 buffer->data_size, buffer->offsets_size, 5560 buffer->user_data); 5561 } 5562 5563 static void print_binder_work_ilocked(struct seq_file *m, 5564 struct binder_proc *proc, 5565 const char *prefix, 5566 const char *transaction_prefix, 5567 struct binder_work *w) 5568 { 5569 struct binder_node *node; 5570 struct binder_transaction *t; 5571 5572 switch (w->type) { 5573 case BINDER_WORK_TRANSACTION: 5574 t = container_of(w, struct binder_transaction, work); 5575 print_binder_transaction_ilocked( 5576 m, proc, transaction_prefix, t); 5577 break; 5578 case BINDER_WORK_RETURN_ERROR: { 5579 struct binder_error *e = container_of( 5580 w, struct binder_error, work); 5581 5582 seq_printf(m, "%stransaction error: %u\n", 5583 prefix, e->cmd); 5584 } break; 5585 case BINDER_WORK_TRANSACTION_COMPLETE: 5586 seq_printf(m, "%stransaction complete\n", prefix); 5587 break; 5588 case BINDER_WORK_NODE: 5589 node = container_of(w, struct binder_node, work); 5590 seq_printf(m, "%snode work %d: u%016llx c%016llx\n", 5591 prefix, node->debug_id, 5592 (u64)node->ptr, (u64)node->cookie); 5593 break; 5594 case BINDER_WORK_DEAD_BINDER: 5595 seq_printf(m, "%shas dead binder\n", prefix); 5596 break; 5597 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 5598 seq_printf(m, "%shas cleared dead binder\n", prefix); 5599 break; 5600 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: 5601 seq_printf(m, "%shas cleared death notification\n", prefix); 5602 break; 5603 default: 5604 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type); 5605 break; 5606 } 5607 } 5608 5609 static void print_binder_thread_ilocked(struct seq_file *m, 5610 struct binder_thread *thread, 5611 int print_always) 5612 { 5613 struct binder_transaction *t; 5614 struct binder_work *w; 5615 size_t start_pos = m->count; 5616 size_t header_pos; 5617 5618 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n", 5619 thread->pid, thread->looper, 5620 thread->looper_need_return, 5621 atomic_read(&thread->tmp_ref)); 5622 header_pos = m->count; 5623 t = thread->transaction_stack; 5624 while (t) { 5625 if (t->from == thread) { 5626 print_binder_transaction_ilocked(m, thread->proc, 5627 " outgoing transaction", t); 5628 t = t->from_parent; 5629 } else if (t->to_thread == thread) { 5630 print_binder_transaction_ilocked(m, thread->proc, 5631 " incoming transaction", t); 5632 t = t->to_parent; 5633 } else { 5634 print_binder_transaction_ilocked(m, thread->proc, 5635 " bad transaction", t); 5636 t = NULL; 5637 } 5638 } 5639 list_for_each_entry(w, &thread->todo, entry) { 5640 print_binder_work_ilocked(m, thread->proc, " ", 5641 " pending transaction", w); 5642 } 5643 if (!print_always && m->count == header_pos) 5644 m->count = start_pos; 5645 } 5646 5647 static void print_binder_node_nilocked(struct seq_file *m, 5648 struct binder_node *node) 5649 { 5650 struct binder_ref *ref; 5651 struct binder_work *w; 5652 int count; 5653 5654 count = 0; 5655 hlist_for_each_entry(ref, &node->refs, node_entry) 5656 count++; 5657 5658 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d", 5659 node->debug_id, (u64)node->ptr, (u64)node->cookie, 5660 node->has_strong_ref, node->has_weak_ref, 5661 node->local_strong_refs, node->local_weak_refs, 5662 node->internal_strong_refs, count, node->tmp_refs); 5663 if (count) { 5664 seq_puts(m, " proc"); 5665 hlist_for_each_entry(ref, &node->refs, node_entry) 5666 seq_printf(m, " %d", ref->proc->pid); 5667 } 5668 seq_puts(m, "\n"); 5669 if (node->proc) { 5670 list_for_each_entry(w, &node->async_todo, entry) 5671 print_binder_work_ilocked(m, node->proc, " ", 5672 " pending async transaction", w); 5673 } 5674 } 5675 5676 static void print_binder_ref_olocked(struct seq_file *m, 5677 struct binder_ref *ref) 5678 { 5679 binder_node_lock(ref->node); 5680 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n", 5681 ref->data.debug_id, ref->data.desc, 5682 ref->node->proc ? "" : "dead ", 5683 ref->node->debug_id, ref->data.strong, 5684 ref->data.weak, ref->death); 5685 binder_node_unlock(ref->node); 5686 } 5687 5688 static void print_binder_proc(struct seq_file *m, 5689 struct binder_proc *proc, int print_all) 5690 { 5691 struct binder_work *w; 5692 struct rb_node *n; 5693 size_t start_pos = m->count; 5694 size_t header_pos; 5695 struct binder_node *last_node = NULL; 5696 5697 seq_printf(m, "proc %d\n", proc->pid); 5698 seq_printf(m, "context %s\n", proc->context->name); 5699 header_pos = m->count; 5700 5701 binder_inner_proc_lock(proc); 5702 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 5703 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread, 5704 rb_node), print_all); 5705 5706 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { 5707 struct binder_node *node = rb_entry(n, struct binder_node, 5708 rb_node); 5709 if (!print_all && !node->has_async_transaction) 5710 continue; 5711 5712 /* 5713 * take a temporary reference on the node so it 5714 * survives and isn't removed from the tree 5715 * while we print it. 5716 */ 5717 binder_inc_node_tmpref_ilocked(node); 5718 /* Need to drop inner lock to take node lock */ 5719 binder_inner_proc_unlock(proc); 5720 if (last_node) 5721 binder_put_node(last_node); 5722 binder_node_inner_lock(node); 5723 print_binder_node_nilocked(m, node); 5724 binder_node_inner_unlock(node); 5725 last_node = node; 5726 binder_inner_proc_lock(proc); 5727 } 5728 binder_inner_proc_unlock(proc); 5729 if (last_node) 5730 binder_put_node(last_node); 5731 5732 if (print_all) { 5733 binder_proc_lock(proc); 5734 for (n = rb_first(&proc->refs_by_desc); 5735 n != NULL; 5736 n = rb_next(n)) 5737 print_binder_ref_olocked(m, rb_entry(n, 5738 struct binder_ref, 5739 rb_node_desc)); 5740 binder_proc_unlock(proc); 5741 } 5742 binder_alloc_print_allocated(m, &proc->alloc); 5743 binder_inner_proc_lock(proc); 5744 list_for_each_entry(w, &proc->todo, entry) 5745 print_binder_work_ilocked(m, proc, " ", 5746 " pending transaction", w); 5747 list_for_each_entry(w, &proc->delivered_death, entry) { 5748 seq_puts(m, " has delivered dead binder\n"); 5749 break; 5750 } 5751 binder_inner_proc_unlock(proc); 5752 if (!print_all && m->count == header_pos) 5753 m->count = start_pos; 5754 } 5755 5756 static const char * const binder_return_strings[] = { 5757 "BR_ERROR", 5758 "BR_OK", 5759 "BR_TRANSACTION", 5760 "BR_REPLY", 5761 "BR_ACQUIRE_RESULT", 5762 "BR_DEAD_REPLY", 5763 "BR_TRANSACTION_COMPLETE", 5764 "BR_INCREFS", 5765 "BR_ACQUIRE", 5766 "BR_RELEASE", 5767 "BR_DECREFS", 5768 "BR_ATTEMPT_ACQUIRE", 5769 "BR_NOOP", 5770 "BR_SPAWN_LOOPER", 5771 "BR_FINISHED", 5772 "BR_DEAD_BINDER", 5773 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 5774 "BR_FAILED_REPLY" 5775 }; 5776 5777 static const char * const binder_command_strings[] = { 5778 "BC_TRANSACTION", 5779 "BC_REPLY", 5780 "BC_ACQUIRE_RESULT", 5781 "BC_FREE_BUFFER", 5782 "BC_INCREFS", 5783 "BC_ACQUIRE", 5784 "BC_RELEASE", 5785 "BC_DECREFS", 5786 "BC_INCREFS_DONE", 5787 "BC_ACQUIRE_DONE", 5788 "BC_ATTEMPT_ACQUIRE", 5789 "BC_REGISTER_LOOPER", 5790 "BC_ENTER_LOOPER", 5791 "BC_EXIT_LOOPER", 5792 "BC_REQUEST_DEATH_NOTIFICATION", 5793 "BC_CLEAR_DEATH_NOTIFICATION", 5794 "BC_DEAD_BINDER_DONE", 5795 "BC_TRANSACTION_SG", 5796 "BC_REPLY_SG", 5797 }; 5798 5799 static const char * const binder_objstat_strings[] = { 5800 "proc", 5801 "thread", 5802 "node", 5803 "ref", 5804 "death", 5805 "transaction", 5806 "transaction_complete" 5807 }; 5808 5809 static void print_binder_stats(struct seq_file *m, const char *prefix, 5810 struct binder_stats *stats) 5811 { 5812 int i; 5813 5814 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != 5815 ARRAY_SIZE(binder_command_strings)); 5816 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) { 5817 int temp = atomic_read(&stats->bc[i]); 5818 5819 if (temp) 5820 seq_printf(m, "%s%s: %d\n", prefix, 5821 binder_command_strings[i], temp); 5822 } 5823 5824 BUILD_BUG_ON(ARRAY_SIZE(stats->br) != 5825 ARRAY_SIZE(binder_return_strings)); 5826 for (i = 0; i < ARRAY_SIZE(stats->br); i++) { 5827 int temp = atomic_read(&stats->br[i]); 5828 5829 if (temp) 5830 seq_printf(m, "%s%s: %d\n", prefix, 5831 binder_return_strings[i], temp); 5832 } 5833 5834 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 5835 ARRAY_SIZE(binder_objstat_strings)); 5836 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 5837 ARRAY_SIZE(stats->obj_deleted)); 5838 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { 5839 int created = atomic_read(&stats->obj_created[i]); 5840 int deleted = atomic_read(&stats->obj_deleted[i]); 5841 5842 if (created || deleted) 5843 seq_printf(m, "%s%s: active %d total %d\n", 5844 prefix, 5845 binder_objstat_strings[i], 5846 created - deleted, 5847 created); 5848 } 5849 } 5850 5851 static void print_binder_proc_stats(struct seq_file *m, 5852 struct binder_proc *proc) 5853 { 5854 struct binder_work *w; 5855 struct binder_thread *thread; 5856 struct rb_node *n; 5857 int count, strong, weak, ready_threads; 5858 size_t free_async_space = 5859 binder_alloc_get_free_async_space(&proc->alloc); 5860 5861 seq_printf(m, "proc %d\n", proc->pid); 5862 seq_printf(m, "context %s\n", proc->context->name); 5863 count = 0; 5864 ready_threads = 0; 5865 binder_inner_proc_lock(proc); 5866 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 5867 count++; 5868 5869 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node) 5870 ready_threads++; 5871 5872 seq_printf(m, " threads: %d\n", count); 5873 seq_printf(m, " requested threads: %d+%d/%d\n" 5874 " ready threads %d\n" 5875 " free async space %zd\n", proc->requested_threads, 5876 proc->requested_threads_started, proc->max_threads, 5877 ready_threads, 5878 free_async_space); 5879 count = 0; 5880 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) 5881 count++; 5882 binder_inner_proc_unlock(proc); 5883 seq_printf(m, " nodes: %d\n", count); 5884 count = 0; 5885 strong = 0; 5886 weak = 0; 5887 binder_proc_lock(proc); 5888 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 5889 struct binder_ref *ref = rb_entry(n, struct binder_ref, 5890 rb_node_desc); 5891 count++; 5892 strong += ref->data.strong; 5893 weak += ref->data.weak; 5894 } 5895 binder_proc_unlock(proc); 5896 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak); 5897 5898 count = binder_alloc_get_allocated_count(&proc->alloc); 5899 seq_printf(m, " buffers: %d\n", count); 5900 5901 binder_alloc_print_pages(m, &proc->alloc); 5902 5903 count = 0; 5904 binder_inner_proc_lock(proc); 5905 list_for_each_entry(w, &proc->todo, entry) { 5906 if (w->type == BINDER_WORK_TRANSACTION) 5907 count++; 5908 } 5909 binder_inner_proc_unlock(proc); 5910 seq_printf(m, " pending transactions: %d\n", count); 5911 5912 print_binder_stats(m, " ", &proc->stats); 5913 } 5914 5915 5916 int binder_state_show(struct seq_file *m, void *unused) 5917 { 5918 struct binder_proc *proc; 5919 struct binder_node *node; 5920 struct binder_node *last_node = NULL; 5921 5922 seq_puts(m, "binder state:\n"); 5923 5924 spin_lock(&binder_dead_nodes_lock); 5925 if (!hlist_empty(&binder_dead_nodes)) 5926 seq_puts(m, "dead nodes:\n"); 5927 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) { 5928 /* 5929 * take a temporary reference on the node so it 5930 * survives and isn't removed from the list 5931 * while we print it. 5932 */ 5933 node->tmp_refs++; 5934 spin_unlock(&binder_dead_nodes_lock); 5935 if (last_node) 5936 binder_put_node(last_node); 5937 binder_node_lock(node); 5938 print_binder_node_nilocked(m, node); 5939 binder_node_unlock(node); 5940 last_node = node; 5941 spin_lock(&binder_dead_nodes_lock); 5942 } 5943 spin_unlock(&binder_dead_nodes_lock); 5944 if (last_node) 5945 binder_put_node(last_node); 5946 5947 mutex_lock(&binder_procs_lock); 5948 hlist_for_each_entry(proc, &binder_procs, proc_node) 5949 print_binder_proc(m, proc, 1); 5950 mutex_unlock(&binder_procs_lock); 5951 5952 return 0; 5953 } 5954 5955 int binder_stats_show(struct seq_file *m, void *unused) 5956 { 5957 struct binder_proc *proc; 5958 5959 seq_puts(m, "binder stats:\n"); 5960 5961 print_binder_stats(m, "", &binder_stats); 5962 5963 mutex_lock(&binder_procs_lock); 5964 hlist_for_each_entry(proc, &binder_procs, proc_node) 5965 print_binder_proc_stats(m, proc); 5966 mutex_unlock(&binder_procs_lock); 5967 5968 return 0; 5969 } 5970 5971 int binder_transactions_show(struct seq_file *m, void *unused) 5972 { 5973 struct binder_proc *proc; 5974 5975 seq_puts(m, "binder transactions:\n"); 5976 mutex_lock(&binder_procs_lock); 5977 hlist_for_each_entry(proc, &binder_procs, proc_node) 5978 print_binder_proc(m, proc, 0); 5979 mutex_unlock(&binder_procs_lock); 5980 5981 return 0; 5982 } 5983 5984 static int proc_show(struct seq_file *m, void *unused) 5985 { 5986 struct binder_proc *itr; 5987 int pid = (unsigned long)m->private; 5988 5989 mutex_lock(&binder_procs_lock); 5990 hlist_for_each_entry(itr, &binder_procs, proc_node) { 5991 if (itr->pid == pid) { 5992 seq_puts(m, "binder proc state:\n"); 5993 print_binder_proc(m, itr, 1); 5994 } 5995 } 5996 mutex_unlock(&binder_procs_lock); 5997 5998 return 0; 5999 } 6000 6001 static void print_binder_transaction_log_entry(struct seq_file *m, 6002 struct binder_transaction_log_entry *e) 6003 { 6004 int debug_id = READ_ONCE(e->debug_id_done); 6005 /* 6006 * read barrier to guarantee debug_id_done read before 6007 * we print the log values 6008 */ 6009 smp_rmb(); 6010 seq_printf(m, 6011 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d", 6012 e->debug_id, (e->call_type == 2) ? "reply" : 6013 ((e->call_type == 1) ? "async" : "call "), e->from_proc, 6014 e->from_thread, e->to_proc, e->to_thread, e->context_name, 6015 e->to_node, e->target_handle, e->data_size, e->offsets_size, 6016 e->return_error, e->return_error_param, 6017 e->return_error_line); 6018 /* 6019 * read-barrier to guarantee read of debug_id_done after 6020 * done printing the fields of the entry 6021 */ 6022 smp_rmb(); 6023 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ? 6024 "\n" : " (incomplete)\n"); 6025 } 6026 6027 int binder_transaction_log_show(struct seq_file *m, void *unused) 6028 { 6029 struct binder_transaction_log *log = m->private; 6030 unsigned int log_cur = atomic_read(&log->cur); 6031 unsigned int count; 6032 unsigned int cur; 6033 int i; 6034 6035 count = log_cur + 1; 6036 cur = count < ARRAY_SIZE(log->entry) && !log->full ? 6037 0 : count % ARRAY_SIZE(log->entry); 6038 if (count > ARRAY_SIZE(log->entry) || log->full) 6039 count = ARRAY_SIZE(log->entry); 6040 for (i = 0; i < count; i++) { 6041 unsigned int index = cur++ % ARRAY_SIZE(log->entry); 6042 6043 print_binder_transaction_log_entry(m, &log->entry[index]); 6044 } 6045 return 0; 6046 } 6047 6048 const struct file_operations binder_fops = { 6049 .owner = THIS_MODULE, 6050 .poll = binder_poll, 6051 .unlocked_ioctl = binder_ioctl, 6052 .compat_ioctl = compat_ptr_ioctl, 6053 .mmap = binder_mmap, 6054 .open = binder_open, 6055 .flush = binder_flush, 6056 .release = binder_release, 6057 }; 6058 6059 static int __init init_binder_device(const char *name) 6060 { 6061 int ret; 6062 struct binder_device *binder_device; 6063 6064 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL); 6065 if (!binder_device) 6066 return -ENOMEM; 6067 6068 binder_device->miscdev.fops = &binder_fops; 6069 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR; 6070 binder_device->miscdev.name = name; 6071 6072 refcount_set(&binder_device->ref, 1); 6073 binder_device->context.binder_context_mgr_uid = INVALID_UID; 6074 binder_device->context.name = name; 6075 mutex_init(&binder_device->context.context_mgr_node_lock); 6076 6077 ret = misc_register(&binder_device->miscdev); 6078 if (ret < 0) { 6079 kfree(binder_device); 6080 return ret; 6081 } 6082 6083 hlist_add_head(&binder_device->hlist, &binder_devices); 6084 6085 return ret; 6086 } 6087 6088 static int __init binder_init(void) 6089 { 6090 int ret; 6091 char *device_name, *device_tmp; 6092 struct binder_device *device; 6093 struct hlist_node *tmp; 6094 char *device_names = NULL; 6095 6096 ret = binder_alloc_shrinker_init(); 6097 if (ret) 6098 return ret; 6099 6100 atomic_set(&binder_transaction_log.cur, ~0U); 6101 atomic_set(&binder_transaction_log_failed.cur, ~0U); 6102 6103 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); 6104 if (binder_debugfs_dir_entry_root) 6105 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", 6106 binder_debugfs_dir_entry_root); 6107 6108 if (binder_debugfs_dir_entry_root) { 6109 debugfs_create_file("state", 6110 0444, 6111 binder_debugfs_dir_entry_root, 6112 NULL, 6113 &binder_state_fops); 6114 debugfs_create_file("stats", 6115 0444, 6116 binder_debugfs_dir_entry_root, 6117 NULL, 6118 &binder_stats_fops); 6119 debugfs_create_file("transactions", 6120 0444, 6121 binder_debugfs_dir_entry_root, 6122 NULL, 6123 &binder_transactions_fops); 6124 debugfs_create_file("transaction_log", 6125 0444, 6126 binder_debugfs_dir_entry_root, 6127 &binder_transaction_log, 6128 &binder_transaction_log_fops); 6129 debugfs_create_file("failed_transaction_log", 6130 0444, 6131 binder_debugfs_dir_entry_root, 6132 &binder_transaction_log_failed, 6133 &binder_transaction_log_fops); 6134 } 6135 6136 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) && 6137 strcmp(binder_devices_param, "") != 0) { 6138 /* 6139 * Copy the module_parameter string, because we don't want to 6140 * tokenize it in-place. 6141 */ 6142 device_names = kstrdup(binder_devices_param, GFP_KERNEL); 6143 if (!device_names) { 6144 ret = -ENOMEM; 6145 goto err_alloc_device_names_failed; 6146 } 6147 6148 device_tmp = device_names; 6149 while ((device_name = strsep(&device_tmp, ","))) { 6150 ret = init_binder_device(device_name); 6151 if (ret) 6152 goto err_init_binder_device_failed; 6153 } 6154 } 6155 6156 ret = init_binderfs(); 6157 if (ret) 6158 goto err_init_binder_device_failed; 6159 6160 return ret; 6161 6162 err_init_binder_device_failed: 6163 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) { 6164 misc_deregister(&device->miscdev); 6165 hlist_del(&device->hlist); 6166 kfree(device); 6167 } 6168 6169 kfree(device_names); 6170 6171 err_alloc_device_names_failed: 6172 debugfs_remove_recursive(binder_debugfs_dir_entry_root); 6173 6174 return ret; 6175 } 6176 6177 device_initcall(binder_init); 6178 6179 #define CREATE_TRACE_POINTS 6180 #include "binder_trace.h" 6181 6182 MODULE_LICENSE("GPL v2"); 6183