1 // SPDX-License-Identifier: GPL-2.0-only 2 /* binder.c 3 * 4 * Android IPC Subsystem 5 * 6 * Copyright (C) 2007-2008 Google, Inc. 7 */ 8 9 /* 10 * Locking overview 11 * 12 * There are 3 main spinlocks which must be acquired in the 13 * order shown: 14 * 15 * 1) proc->outer_lock : protects binder_ref 16 * binder_proc_lock() and binder_proc_unlock() are 17 * used to acq/rel. 18 * 2) node->lock : protects most fields of binder_node. 19 * binder_node_lock() and binder_node_unlock() are 20 * used to acq/rel 21 * 3) proc->inner_lock : protects the thread and node lists 22 * (proc->threads, proc->waiting_threads, proc->nodes) 23 * and all todo lists associated with the binder_proc 24 * (proc->todo, thread->todo, proc->delivered_death and 25 * node->async_todo), as well as thread->transaction_stack 26 * binder_inner_proc_lock() and binder_inner_proc_unlock() 27 * are used to acq/rel 28 * 29 * Any lock under procA must never be nested under any lock at the same 30 * level or below on procB. 31 * 32 * Functions that require a lock held on entry indicate which lock 33 * in the suffix of the function name: 34 * 35 * foo_olocked() : requires node->outer_lock 36 * foo_nlocked() : requires node->lock 37 * foo_ilocked() : requires proc->inner_lock 38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock 39 * foo_nilocked(): requires node->lock and proc->inner_lock 40 * ... 41 */ 42 43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 44 45 #include <linux/fdtable.h> 46 #include <linux/file.h> 47 #include <linux/freezer.h> 48 #include <linux/fs.h> 49 #include <linux/list.h> 50 #include <linux/miscdevice.h> 51 #include <linux/module.h> 52 #include <linux/mutex.h> 53 #include <linux/nsproxy.h> 54 #include <linux/poll.h> 55 #include <linux/debugfs.h> 56 #include <linux/rbtree.h> 57 #include <linux/sched/signal.h> 58 #include <linux/sched/mm.h> 59 #include <linux/seq_file.h> 60 #include <linux/uaccess.h> 61 #include <linux/pid_namespace.h> 62 #include <linux/security.h> 63 #include <linux/spinlock.h> 64 #include <linux/ratelimit.h> 65 #include <linux/syscalls.h> 66 #include <linux/task_work.h> 67 68 #include <uapi/linux/android/binder.h> 69 70 #include <asm/cacheflush.h> 71 72 #include "binder_alloc.h" 73 #include "binder_internal.h" 74 #include "binder_trace.h" 75 76 static HLIST_HEAD(binder_deferred_list); 77 static DEFINE_MUTEX(binder_deferred_lock); 78 79 static HLIST_HEAD(binder_devices); 80 static HLIST_HEAD(binder_procs); 81 static DEFINE_MUTEX(binder_procs_lock); 82 83 static HLIST_HEAD(binder_dead_nodes); 84 static DEFINE_SPINLOCK(binder_dead_nodes_lock); 85 86 static struct dentry *binder_debugfs_dir_entry_root; 87 static struct dentry *binder_debugfs_dir_entry_proc; 88 static atomic_t binder_last_id; 89 90 static int proc_show(struct seq_file *m, void *unused); 91 DEFINE_SHOW_ATTRIBUTE(proc); 92 93 /* This is only defined in include/asm-arm/sizes.h */ 94 #ifndef SZ_1K 95 #define SZ_1K 0x400 96 #endif 97 98 #ifndef SZ_4M 99 #define SZ_4M 0x400000 100 #endif 101 102 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE) 103 104 enum { 105 BINDER_DEBUG_USER_ERROR = 1U << 0, 106 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1, 107 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2, 108 BINDER_DEBUG_OPEN_CLOSE = 1U << 3, 109 BINDER_DEBUG_DEAD_BINDER = 1U << 4, 110 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5, 111 BINDER_DEBUG_READ_WRITE = 1U << 6, 112 BINDER_DEBUG_USER_REFS = 1U << 7, 113 BINDER_DEBUG_THREADS = 1U << 8, 114 BINDER_DEBUG_TRANSACTION = 1U << 9, 115 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10, 116 BINDER_DEBUG_FREE_BUFFER = 1U << 11, 117 BINDER_DEBUG_INTERNAL_REFS = 1U << 12, 118 BINDER_DEBUG_PRIORITY_CAP = 1U << 13, 119 BINDER_DEBUG_SPINLOCKS = 1U << 14, 120 }; 121 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | 122 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; 123 module_param_named(debug_mask, binder_debug_mask, uint, 0644); 124 125 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES; 126 module_param_named(devices, binder_devices_param, charp, 0444); 127 128 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); 129 static int binder_stop_on_user_error; 130 131 static int binder_set_stop_on_user_error(const char *val, 132 const struct kernel_param *kp) 133 { 134 int ret; 135 136 ret = param_set_int(val, kp); 137 if (binder_stop_on_user_error < 2) 138 wake_up(&binder_user_error_wait); 139 return ret; 140 } 141 module_param_call(stop_on_user_error, binder_set_stop_on_user_error, 142 param_get_int, &binder_stop_on_user_error, 0644); 143 144 #define binder_debug(mask, x...) \ 145 do { \ 146 if (binder_debug_mask & mask) \ 147 pr_info_ratelimited(x); \ 148 } while (0) 149 150 #define binder_user_error(x...) \ 151 do { \ 152 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \ 153 pr_info_ratelimited(x); \ 154 if (binder_stop_on_user_error) \ 155 binder_stop_on_user_error = 2; \ 156 } while (0) 157 158 #define to_flat_binder_object(hdr) \ 159 container_of(hdr, struct flat_binder_object, hdr) 160 161 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr) 162 163 #define to_binder_buffer_object(hdr) \ 164 container_of(hdr, struct binder_buffer_object, hdr) 165 166 #define to_binder_fd_array_object(hdr) \ 167 container_of(hdr, struct binder_fd_array_object, hdr) 168 169 enum binder_stat_types { 170 BINDER_STAT_PROC, 171 BINDER_STAT_THREAD, 172 BINDER_STAT_NODE, 173 BINDER_STAT_REF, 174 BINDER_STAT_DEATH, 175 BINDER_STAT_TRANSACTION, 176 BINDER_STAT_TRANSACTION_COMPLETE, 177 BINDER_STAT_COUNT 178 }; 179 180 struct binder_stats { 181 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1]; 182 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1]; 183 atomic_t obj_created[BINDER_STAT_COUNT]; 184 atomic_t obj_deleted[BINDER_STAT_COUNT]; 185 }; 186 187 static struct binder_stats binder_stats; 188 189 static inline void binder_stats_deleted(enum binder_stat_types type) 190 { 191 atomic_inc(&binder_stats.obj_deleted[type]); 192 } 193 194 static inline void binder_stats_created(enum binder_stat_types type) 195 { 196 atomic_inc(&binder_stats.obj_created[type]); 197 } 198 199 struct binder_transaction_log binder_transaction_log; 200 struct binder_transaction_log binder_transaction_log_failed; 201 202 static struct binder_transaction_log_entry *binder_transaction_log_add( 203 struct binder_transaction_log *log) 204 { 205 struct binder_transaction_log_entry *e; 206 unsigned int cur = atomic_inc_return(&log->cur); 207 208 if (cur >= ARRAY_SIZE(log->entry)) 209 log->full = true; 210 e = &log->entry[cur % ARRAY_SIZE(log->entry)]; 211 WRITE_ONCE(e->debug_id_done, 0); 212 /* 213 * write-barrier to synchronize access to e->debug_id_done. 214 * We make sure the initialized 0 value is seen before 215 * memset() other fields are zeroed by memset. 216 */ 217 smp_wmb(); 218 memset(e, 0, sizeof(*e)); 219 return e; 220 } 221 222 /** 223 * struct binder_work - work enqueued on a worklist 224 * @entry: node enqueued on list 225 * @type: type of work to be performed 226 * 227 * There are separate work lists for proc, thread, and node (async). 228 */ 229 struct binder_work { 230 struct list_head entry; 231 232 enum { 233 BINDER_WORK_TRANSACTION = 1, 234 BINDER_WORK_TRANSACTION_COMPLETE, 235 BINDER_WORK_RETURN_ERROR, 236 BINDER_WORK_NODE, 237 BINDER_WORK_DEAD_BINDER, 238 BINDER_WORK_DEAD_BINDER_AND_CLEAR, 239 BINDER_WORK_CLEAR_DEATH_NOTIFICATION, 240 } type; 241 }; 242 243 struct binder_error { 244 struct binder_work work; 245 uint32_t cmd; 246 }; 247 248 /** 249 * struct binder_node - binder node bookkeeping 250 * @debug_id: unique ID for debugging 251 * (invariant after initialized) 252 * @lock: lock for node fields 253 * @work: worklist element for node work 254 * (protected by @proc->inner_lock) 255 * @rb_node: element for proc->nodes tree 256 * (protected by @proc->inner_lock) 257 * @dead_node: element for binder_dead_nodes list 258 * (protected by binder_dead_nodes_lock) 259 * @proc: binder_proc that owns this node 260 * (invariant after initialized) 261 * @refs: list of references on this node 262 * (protected by @lock) 263 * @internal_strong_refs: used to take strong references when 264 * initiating a transaction 265 * (protected by @proc->inner_lock if @proc 266 * and by @lock) 267 * @local_weak_refs: weak user refs from local process 268 * (protected by @proc->inner_lock if @proc 269 * and by @lock) 270 * @local_strong_refs: strong user refs from local process 271 * (protected by @proc->inner_lock if @proc 272 * and by @lock) 273 * @tmp_refs: temporary kernel refs 274 * (protected by @proc->inner_lock while @proc 275 * is valid, and by binder_dead_nodes_lock 276 * if @proc is NULL. During inc/dec and node release 277 * it is also protected by @lock to provide safety 278 * as the node dies and @proc becomes NULL) 279 * @ptr: userspace pointer for node 280 * (invariant, no lock needed) 281 * @cookie: userspace cookie for node 282 * (invariant, no lock needed) 283 * @has_strong_ref: userspace notified of strong ref 284 * (protected by @proc->inner_lock if @proc 285 * and by @lock) 286 * @pending_strong_ref: userspace has acked notification of strong ref 287 * (protected by @proc->inner_lock if @proc 288 * and by @lock) 289 * @has_weak_ref: userspace notified of weak ref 290 * (protected by @proc->inner_lock if @proc 291 * and by @lock) 292 * @pending_weak_ref: userspace has acked notification of weak ref 293 * (protected by @proc->inner_lock if @proc 294 * and by @lock) 295 * @has_async_transaction: async transaction to node in progress 296 * (protected by @lock) 297 * @accept_fds: file descriptor operations supported for node 298 * (invariant after initialized) 299 * @min_priority: minimum scheduling priority 300 * (invariant after initialized) 301 * @txn_security_ctx: require sender's security context 302 * (invariant after initialized) 303 * @async_todo: list of async work items 304 * (protected by @proc->inner_lock) 305 * 306 * Bookkeeping structure for binder nodes. 307 */ 308 struct binder_node { 309 int debug_id; 310 spinlock_t lock; 311 struct binder_work work; 312 union { 313 struct rb_node rb_node; 314 struct hlist_node dead_node; 315 }; 316 struct binder_proc *proc; 317 struct hlist_head refs; 318 int internal_strong_refs; 319 int local_weak_refs; 320 int local_strong_refs; 321 int tmp_refs; 322 binder_uintptr_t ptr; 323 binder_uintptr_t cookie; 324 struct { 325 /* 326 * bitfield elements protected by 327 * proc inner_lock 328 */ 329 u8 has_strong_ref:1; 330 u8 pending_strong_ref:1; 331 u8 has_weak_ref:1; 332 u8 pending_weak_ref:1; 333 }; 334 struct { 335 /* 336 * invariant after initialization 337 */ 338 u8 accept_fds:1; 339 u8 txn_security_ctx:1; 340 u8 min_priority; 341 }; 342 bool has_async_transaction; 343 struct list_head async_todo; 344 }; 345 346 struct binder_ref_death { 347 /** 348 * @work: worklist element for death notifications 349 * (protected by inner_lock of the proc that 350 * this ref belongs to) 351 */ 352 struct binder_work work; 353 binder_uintptr_t cookie; 354 }; 355 356 /** 357 * struct binder_ref_data - binder_ref counts and id 358 * @debug_id: unique ID for the ref 359 * @desc: unique userspace handle for ref 360 * @strong: strong ref count (debugging only if not locked) 361 * @weak: weak ref count (debugging only if not locked) 362 * 363 * Structure to hold ref count and ref id information. Since 364 * the actual ref can only be accessed with a lock, this structure 365 * is used to return information about the ref to callers of 366 * ref inc/dec functions. 367 */ 368 struct binder_ref_data { 369 int debug_id; 370 uint32_t desc; 371 int strong; 372 int weak; 373 }; 374 375 /** 376 * struct binder_ref - struct to track references on nodes 377 * @data: binder_ref_data containing id, handle, and current refcounts 378 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree 379 * @rb_node_node: node for lookup by @node in proc's rb_tree 380 * @node_entry: list entry for node->refs list in target node 381 * (protected by @node->lock) 382 * @proc: binder_proc containing ref 383 * @node: binder_node of target node. When cleaning up a 384 * ref for deletion in binder_cleanup_ref, a non-NULL 385 * @node indicates the node must be freed 386 * @death: pointer to death notification (ref_death) if requested 387 * (protected by @node->lock) 388 * 389 * Structure to track references from procA to target node (on procB). This 390 * structure is unsafe to access without holding @proc->outer_lock. 391 */ 392 struct binder_ref { 393 /* Lookups needed: */ 394 /* node + proc => ref (transaction) */ 395 /* desc + proc => ref (transaction, inc/dec ref) */ 396 /* node => refs + procs (proc exit) */ 397 struct binder_ref_data data; 398 struct rb_node rb_node_desc; 399 struct rb_node rb_node_node; 400 struct hlist_node node_entry; 401 struct binder_proc *proc; 402 struct binder_node *node; 403 struct binder_ref_death *death; 404 }; 405 406 enum binder_deferred_state { 407 BINDER_DEFERRED_FLUSH = 0x01, 408 BINDER_DEFERRED_RELEASE = 0x02, 409 }; 410 411 /** 412 * struct binder_proc - binder process bookkeeping 413 * @proc_node: element for binder_procs list 414 * @threads: rbtree of binder_threads in this proc 415 * (protected by @inner_lock) 416 * @nodes: rbtree of binder nodes associated with 417 * this proc ordered by node->ptr 418 * (protected by @inner_lock) 419 * @refs_by_desc: rbtree of refs ordered by ref->desc 420 * (protected by @outer_lock) 421 * @refs_by_node: rbtree of refs ordered by ref->node 422 * (protected by @outer_lock) 423 * @waiting_threads: threads currently waiting for proc work 424 * (protected by @inner_lock) 425 * @pid PID of group_leader of process 426 * (invariant after initialized) 427 * @tsk task_struct for group_leader of process 428 * (invariant after initialized) 429 * @deferred_work_node: element for binder_deferred_list 430 * (protected by binder_deferred_lock) 431 * @deferred_work: bitmap of deferred work to perform 432 * (protected by binder_deferred_lock) 433 * @is_dead: process is dead and awaiting free 434 * when outstanding transactions are cleaned up 435 * (protected by @inner_lock) 436 * @todo: list of work for this process 437 * (protected by @inner_lock) 438 * @stats: per-process binder statistics 439 * (atomics, no lock needed) 440 * @delivered_death: list of delivered death notification 441 * (protected by @inner_lock) 442 * @max_threads: cap on number of binder threads 443 * (protected by @inner_lock) 444 * @requested_threads: number of binder threads requested but not 445 * yet started. In current implementation, can 446 * only be 0 or 1. 447 * (protected by @inner_lock) 448 * @requested_threads_started: number binder threads started 449 * (protected by @inner_lock) 450 * @tmp_ref: temporary reference to indicate proc is in use 451 * (protected by @inner_lock) 452 * @default_priority: default scheduler priority 453 * (invariant after initialized) 454 * @debugfs_entry: debugfs node 455 * @alloc: binder allocator bookkeeping 456 * @context: binder_context for this proc 457 * (invariant after initialized) 458 * @inner_lock: can nest under outer_lock and/or node lock 459 * @outer_lock: no nesting under innor or node lock 460 * Lock order: 1) outer, 2) node, 3) inner 461 * @binderfs_entry: process-specific binderfs log file 462 * 463 * Bookkeeping structure for binder processes 464 */ 465 struct binder_proc { 466 struct hlist_node proc_node; 467 struct rb_root threads; 468 struct rb_root nodes; 469 struct rb_root refs_by_desc; 470 struct rb_root refs_by_node; 471 struct list_head waiting_threads; 472 int pid; 473 struct task_struct *tsk; 474 struct hlist_node deferred_work_node; 475 int deferred_work; 476 bool is_dead; 477 478 struct list_head todo; 479 struct binder_stats stats; 480 struct list_head delivered_death; 481 int max_threads; 482 int requested_threads; 483 int requested_threads_started; 484 int tmp_ref; 485 long default_priority; 486 struct dentry *debugfs_entry; 487 struct binder_alloc alloc; 488 struct binder_context *context; 489 spinlock_t inner_lock; 490 spinlock_t outer_lock; 491 struct dentry *binderfs_entry; 492 }; 493 494 enum { 495 BINDER_LOOPER_STATE_REGISTERED = 0x01, 496 BINDER_LOOPER_STATE_ENTERED = 0x02, 497 BINDER_LOOPER_STATE_EXITED = 0x04, 498 BINDER_LOOPER_STATE_INVALID = 0x08, 499 BINDER_LOOPER_STATE_WAITING = 0x10, 500 BINDER_LOOPER_STATE_POLL = 0x20, 501 }; 502 503 /** 504 * struct binder_thread - binder thread bookkeeping 505 * @proc: binder process for this thread 506 * (invariant after initialization) 507 * @rb_node: element for proc->threads rbtree 508 * (protected by @proc->inner_lock) 509 * @waiting_thread_node: element for @proc->waiting_threads list 510 * (protected by @proc->inner_lock) 511 * @pid: PID for this thread 512 * (invariant after initialization) 513 * @looper: bitmap of looping state 514 * (only accessed by this thread) 515 * @looper_needs_return: looping thread needs to exit driver 516 * (no lock needed) 517 * @transaction_stack: stack of in-progress transactions for this thread 518 * (protected by @proc->inner_lock) 519 * @todo: list of work to do for this thread 520 * (protected by @proc->inner_lock) 521 * @process_todo: whether work in @todo should be processed 522 * (protected by @proc->inner_lock) 523 * @return_error: transaction errors reported by this thread 524 * (only accessed by this thread) 525 * @reply_error: transaction errors reported by target thread 526 * (protected by @proc->inner_lock) 527 * @wait: wait queue for thread work 528 * @stats: per-thread statistics 529 * (atomics, no lock needed) 530 * @tmp_ref: temporary reference to indicate thread is in use 531 * (atomic since @proc->inner_lock cannot 532 * always be acquired) 533 * @is_dead: thread is dead and awaiting free 534 * when outstanding transactions are cleaned up 535 * (protected by @proc->inner_lock) 536 * 537 * Bookkeeping structure for binder threads. 538 */ 539 struct binder_thread { 540 struct binder_proc *proc; 541 struct rb_node rb_node; 542 struct list_head waiting_thread_node; 543 int pid; 544 int looper; /* only modified by this thread */ 545 bool looper_need_return; /* can be written by other thread */ 546 struct binder_transaction *transaction_stack; 547 struct list_head todo; 548 bool process_todo; 549 struct binder_error return_error; 550 struct binder_error reply_error; 551 wait_queue_head_t wait; 552 struct binder_stats stats; 553 atomic_t tmp_ref; 554 bool is_dead; 555 }; 556 557 /** 558 * struct binder_txn_fd_fixup - transaction fd fixup list element 559 * @fixup_entry: list entry 560 * @file: struct file to be associated with new fd 561 * @offset: offset in buffer data to this fixup 562 * 563 * List element for fd fixups in a transaction. Since file 564 * descriptors need to be allocated in the context of the 565 * target process, we pass each fd to be processed in this 566 * struct. 567 */ 568 struct binder_txn_fd_fixup { 569 struct list_head fixup_entry; 570 struct file *file; 571 size_t offset; 572 }; 573 574 struct binder_transaction { 575 int debug_id; 576 struct binder_work work; 577 struct binder_thread *from; 578 struct binder_transaction *from_parent; 579 struct binder_proc *to_proc; 580 struct binder_thread *to_thread; 581 struct binder_transaction *to_parent; 582 unsigned need_reply:1; 583 /* unsigned is_dead:1; */ /* not used at the moment */ 584 585 struct binder_buffer *buffer; 586 unsigned int code; 587 unsigned int flags; 588 long priority; 589 long saved_priority; 590 kuid_t sender_euid; 591 struct list_head fd_fixups; 592 binder_uintptr_t security_ctx; 593 /** 594 * @lock: protects @from, @to_proc, and @to_thread 595 * 596 * @from, @to_proc, and @to_thread can be set to NULL 597 * during thread teardown 598 */ 599 spinlock_t lock; 600 }; 601 602 /** 603 * struct binder_object - union of flat binder object types 604 * @hdr: generic object header 605 * @fbo: binder object (nodes and refs) 606 * @fdo: file descriptor object 607 * @bbo: binder buffer pointer 608 * @fdao: file descriptor array 609 * 610 * Used for type-independent object copies 611 */ 612 struct binder_object { 613 union { 614 struct binder_object_header hdr; 615 struct flat_binder_object fbo; 616 struct binder_fd_object fdo; 617 struct binder_buffer_object bbo; 618 struct binder_fd_array_object fdao; 619 }; 620 }; 621 622 /** 623 * binder_proc_lock() - Acquire outer lock for given binder_proc 624 * @proc: struct binder_proc to acquire 625 * 626 * Acquires proc->outer_lock. Used to protect binder_ref 627 * structures associated with the given proc. 628 */ 629 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__) 630 static void 631 _binder_proc_lock(struct binder_proc *proc, int line) 632 __acquires(&proc->outer_lock) 633 { 634 binder_debug(BINDER_DEBUG_SPINLOCKS, 635 "%s: line=%d\n", __func__, line); 636 spin_lock(&proc->outer_lock); 637 } 638 639 /** 640 * binder_proc_unlock() - Release spinlock for given binder_proc 641 * @proc: struct binder_proc to acquire 642 * 643 * Release lock acquired via binder_proc_lock() 644 */ 645 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__) 646 static void 647 _binder_proc_unlock(struct binder_proc *proc, int line) 648 __releases(&proc->outer_lock) 649 { 650 binder_debug(BINDER_DEBUG_SPINLOCKS, 651 "%s: line=%d\n", __func__, line); 652 spin_unlock(&proc->outer_lock); 653 } 654 655 /** 656 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc 657 * @proc: struct binder_proc to acquire 658 * 659 * Acquires proc->inner_lock. Used to protect todo lists 660 */ 661 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__) 662 static void 663 _binder_inner_proc_lock(struct binder_proc *proc, int line) 664 __acquires(&proc->inner_lock) 665 { 666 binder_debug(BINDER_DEBUG_SPINLOCKS, 667 "%s: line=%d\n", __func__, line); 668 spin_lock(&proc->inner_lock); 669 } 670 671 /** 672 * binder_inner_proc_unlock() - Release inner lock for given binder_proc 673 * @proc: struct binder_proc to acquire 674 * 675 * Release lock acquired via binder_inner_proc_lock() 676 */ 677 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__) 678 static void 679 _binder_inner_proc_unlock(struct binder_proc *proc, int line) 680 __releases(&proc->inner_lock) 681 { 682 binder_debug(BINDER_DEBUG_SPINLOCKS, 683 "%s: line=%d\n", __func__, line); 684 spin_unlock(&proc->inner_lock); 685 } 686 687 /** 688 * binder_node_lock() - Acquire spinlock for given binder_node 689 * @node: struct binder_node to acquire 690 * 691 * Acquires node->lock. Used to protect binder_node fields 692 */ 693 #define binder_node_lock(node) _binder_node_lock(node, __LINE__) 694 static void 695 _binder_node_lock(struct binder_node *node, int line) 696 __acquires(&node->lock) 697 { 698 binder_debug(BINDER_DEBUG_SPINLOCKS, 699 "%s: line=%d\n", __func__, line); 700 spin_lock(&node->lock); 701 } 702 703 /** 704 * binder_node_unlock() - Release spinlock for given binder_proc 705 * @node: struct binder_node to acquire 706 * 707 * Release lock acquired via binder_node_lock() 708 */ 709 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__) 710 static void 711 _binder_node_unlock(struct binder_node *node, int line) 712 __releases(&node->lock) 713 { 714 binder_debug(BINDER_DEBUG_SPINLOCKS, 715 "%s: line=%d\n", __func__, line); 716 spin_unlock(&node->lock); 717 } 718 719 /** 720 * binder_node_inner_lock() - Acquire node and inner locks 721 * @node: struct binder_node to acquire 722 * 723 * Acquires node->lock. If node->proc also acquires 724 * proc->inner_lock. Used to protect binder_node fields 725 */ 726 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__) 727 static void 728 _binder_node_inner_lock(struct binder_node *node, int line) 729 __acquires(&node->lock) __acquires(&node->proc->inner_lock) 730 { 731 binder_debug(BINDER_DEBUG_SPINLOCKS, 732 "%s: line=%d\n", __func__, line); 733 spin_lock(&node->lock); 734 if (node->proc) 735 binder_inner_proc_lock(node->proc); 736 else 737 /* annotation for sparse */ 738 __acquire(&node->proc->inner_lock); 739 } 740 741 /** 742 * binder_node_unlock() - Release node and inner locks 743 * @node: struct binder_node to acquire 744 * 745 * Release lock acquired via binder_node_lock() 746 */ 747 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__) 748 static void 749 _binder_node_inner_unlock(struct binder_node *node, int line) 750 __releases(&node->lock) __releases(&node->proc->inner_lock) 751 { 752 struct binder_proc *proc = node->proc; 753 754 binder_debug(BINDER_DEBUG_SPINLOCKS, 755 "%s: line=%d\n", __func__, line); 756 if (proc) 757 binder_inner_proc_unlock(proc); 758 else 759 /* annotation for sparse */ 760 __release(&node->proc->inner_lock); 761 spin_unlock(&node->lock); 762 } 763 764 static bool binder_worklist_empty_ilocked(struct list_head *list) 765 { 766 return list_empty(list); 767 } 768 769 /** 770 * binder_worklist_empty() - Check if no items on the work list 771 * @proc: binder_proc associated with list 772 * @list: list to check 773 * 774 * Return: true if there are no items on list, else false 775 */ 776 static bool binder_worklist_empty(struct binder_proc *proc, 777 struct list_head *list) 778 { 779 bool ret; 780 781 binder_inner_proc_lock(proc); 782 ret = binder_worklist_empty_ilocked(list); 783 binder_inner_proc_unlock(proc); 784 return ret; 785 } 786 787 /** 788 * binder_enqueue_work_ilocked() - Add an item to the work list 789 * @work: struct binder_work to add to list 790 * @target_list: list to add work to 791 * 792 * Adds the work to the specified list. Asserts that work 793 * is not already on a list. 794 * 795 * Requires the proc->inner_lock to be held. 796 */ 797 static void 798 binder_enqueue_work_ilocked(struct binder_work *work, 799 struct list_head *target_list) 800 { 801 BUG_ON(target_list == NULL); 802 BUG_ON(work->entry.next && !list_empty(&work->entry)); 803 list_add_tail(&work->entry, target_list); 804 } 805 806 /** 807 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work 808 * @thread: thread to queue work to 809 * @work: struct binder_work to add to list 810 * 811 * Adds the work to the todo list of the thread. Doesn't set the process_todo 812 * flag, which means that (if it wasn't already set) the thread will go to 813 * sleep without handling this work when it calls read. 814 * 815 * Requires the proc->inner_lock to be held. 816 */ 817 static void 818 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread, 819 struct binder_work *work) 820 { 821 WARN_ON(!list_empty(&thread->waiting_thread_node)); 822 binder_enqueue_work_ilocked(work, &thread->todo); 823 } 824 825 /** 826 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list 827 * @thread: thread to queue work to 828 * @work: struct binder_work to add to list 829 * 830 * Adds the work to the todo list of the thread, and enables processing 831 * of the todo queue. 832 * 833 * Requires the proc->inner_lock to be held. 834 */ 835 static void 836 binder_enqueue_thread_work_ilocked(struct binder_thread *thread, 837 struct binder_work *work) 838 { 839 WARN_ON(!list_empty(&thread->waiting_thread_node)); 840 binder_enqueue_work_ilocked(work, &thread->todo); 841 thread->process_todo = true; 842 } 843 844 /** 845 * binder_enqueue_thread_work() - Add an item to the thread work list 846 * @thread: thread to queue work to 847 * @work: struct binder_work to add to list 848 * 849 * Adds the work to the todo list of the thread, and enables processing 850 * of the todo queue. 851 */ 852 static void 853 binder_enqueue_thread_work(struct binder_thread *thread, 854 struct binder_work *work) 855 { 856 binder_inner_proc_lock(thread->proc); 857 binder_enqueue_thread_work_ilocked(thread, work); 858 binder_inner_proc_unlock(thread->proc); 859 } 860 861 static void 862 binder_dequeue_work_ilocked(struct binder_work *work) 863 { 864 list_del_init(&work->entry); 865 } 866 867 /** 868 * binder_dequeue_work() - Removes an item from the work list 869 * @proc: binder_proc associated with list 870 * @work: struct binder_work to remove from list 871 * 872 * Removes the specified work item from whatever list it is on. 873 * Can safely be called if work is not on any list. 874 */ 875 static void 876 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work) 877 { 878 binder_inner_proc_lock(proc); 879 binder_dequeue_work_ilocked(work); 880 binder_inner_proc_unlock(proc); 881 } 882 883 static struct binder_work *binder_dequeue_work_head_ilocked( 884 struct list_head *list) 885 { 886 struct binder_work *w; 887 888 w = list_first_entry_or_null(list, struct binder_work, entry); 889 if (w) 890 list_del_init(&w->entry); 891 return w; 892 } 893 894 /** 895 * binder_dequeue_work_head() - Dequeues the item at head of list 896 * @proc: binder_proc associated with list 897 * @list: list to dequeue head 898 * 899 * Removes the head of the list if there are items on the list 900 * 901 * Return: pointer dequeued binder_work, NULL if list was empty 902 */ 903 static struct binder_work *binder_dequeue_work_head( 904 struct binder_proc *proc, 905 struct list_head *list) 906 { 907 struct binder_work *w; 908 909 binder_inner_proc_lock(proc); 910 w = binder_dequeue_work_head_ilocked(list); 911 binder_inner_proc_unlock(proc); 912 return w; 913 } 914 915 static void 916 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); 917 static void binder_free_thread(struct binder_thread *thread); 918 static void binder_free_proc(struct binder_proc *proc); 919 static void binder_inc_node_tmpref_ilocked(struct binder_node *node); 920 921 static bool binder_has_work_ilocked(struct binder_thread *thread, 922 bool do_proc_work) 923 { 924 return thread->process_todo || 925 thread->looper_need_return || 926 (do_proc_work && 927 !binder_worklist_empty_ilocked(&thread->proc->todo)); 928 } 929 930 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work) 931 { 932 bool has_work; 933 934 binder_inner_proc_lock(thread->proc); 935 has_work = binder_has_work_ilocked(thread, do_proc_work); 936 binder_inner_proc_unlock(thread->proc); 937 938 return has_work; 939 } 940 941 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread) 942 { 943 return !thread->transaction_stack && 944 binder_worklist_empty_ilocked(&thread->todo) && 945 (thread->looper & (BINDER_LOOPER_STATE_ENTERED | 946 BINDER_LOOPER_STATE_REGISTERED)); 947 } 948 949 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc, 950 bool sync) 951 { 952 struct rb_node *n; 953 struct binder_thread *thread; 954 955 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { 956 thread = rb_entry(n, struct binder_thread, rb_node); 957 if (thread->looper & BINDER_LOOPER_STATE_POLL && 958 binder_available_for_proc_work_ilocked(thread)) { 959 if (sync) 960 wake_up_interruptible_sync(&thread->wait); 961 else 962 wake_up_interruptible(&thread->wait); 963 } 964 } 965 } 966 967 /** 968 * binder_select_thread_ilocked() - selects a thread for doing proc work. 969 * @proc: process to select a thread from 970 * 971 * Note that calling this function moves the thread off the waiting_threads 972 * list, so it can only be woken up by the caller of this function, or a 973 * signal. Therefore, callers *should* always wake up the thread this function 974 * returns. 975 * 976 * Return: If there's a thread currently waiting for process work, 977 * returns that thread. Otherwise returns NULL. 978 */ 979 static struct binder_thread * 980 binder_select_thread_ilocked(struct binder_proc *proc) 981 { 982 struct binder_thread *thread; 983 984 assert_spin_locked(&proc->inner_lock); 985 thread = list_first_entry_or_null(&proc->waiting_threads, 986 struct binder_thread, 987 waiting_thread_node); 988 989 if (thread) 990 list_del_init(&thread->waiting_thread_node); 991 992 return thread; 993 } 994 995 /** 996 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work. 997 * @proc: process to wake up a thread in 998 * @thread: specific thread to wake-up (may be NULL) 999 * @sync: whether to do a synchronous wake-up 1000 * 1001 * This function wakes up a thread in the @proc process. 1002 * The caller may provide a specific thread to wake-up in 1003 * the @thread parameter. If @thread is NULL, this function 1004 * will wake up threads that have called poll(). 1005 * 1006 * Note that for this function to work as expected, callers 1007 * should first call binder_select_thread() to find a thread 1008 * to handle the work (if they don't have a thread already), 1009 * and pass the result into the @thread parameter. 1010 */ 1011 static void binder_wakeup_thread_ilocked(struct binder_proc *proc, 1012 struct binder_thread *thread, 1013 bool sync) 1014 { 1015 assert_spin_locked(&proc->inner_lock); 1016 1017 if (thread) { 1018 if (sync) 1019 wake_up_interruptible_sync(&thread->wait); 1020 else 1021 wake_up_interruptible(&thread->wait); 1022 return; 1023 } 1024 1025 /* Didn't find a thread waiting for proc work; this can happen 1026 * in two scenarios: 1027 * 1. All threads are busy handling transactions 1028 * In that case, one of those threads should call back into 1029 * the kernel driver soon and pick up this work. 1030 * 2. Threads are using the (e)poll interface, in which case 1031 * they may be blocked on the waitqueue without having been 1032 * added to waiting_threads. For this case, we just iterate 1033 * over all threads not handling transaction work, and 1034 * wake them all up. We wake all because we don't know whether 1035 * a thread that called into (e)poll is handling non-binder 1036 * work currently. 1037 */ 1038 binder_wakeup_poll_threads_ilocked(proc, sync); 1039 } 1040 1041 static void binder_wakeup_proc_ilocked(struct binder_proc *proc) 1042 { 1043 struct binder_thread *thread = binder_select_thread_ilocked(proc); 1044 1045 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false); 1046 } 1047 1048 static void binder_set_nice(long nice) 1049 { 1050 long min_nice; 1051 1052 if (can_nice(current, nice)) { 1053 set_user_nice(current, nice); 1054 return; 1055 } 1056 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE)); 1057 binder_debug(BINDER_DEBUG_PRIORITY_CAP, 1058 "%d: nice value %ld not allowed use %ld instead\n", 1059 current->pid, nice, min_nice); 1060 set_user_nice(current, min_nice); 1061 if (min_nice <= MAX_NICE) 1062 return; 1063 binder_user_error("%d RLIMIT_NICE not set\n", current->pid); 1064 } 1065 1066 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc, 1067 binder_uintptr_t ptr) 1068 { 1069 struct rb_node *n = proc->nodes.rb_node; 1070 struct binder_node *node; 1071 1072 assert_spin_locked(&proc->inner_lock); 1073 1074 while (n) { 1075 node = rb_entry(n, struct binder_node, rb_node); 1076 1077 if (ptr < node->ptr) 1078 n = n->rb_left; 1079 else if (ptr > node->ptr) 1080 n = n->rb_right; 1081 else { 1082 /* 1083 * take an implicit weak reference 1084 * to ensure node stays alive until 1085 * call to binder_put_node() 1086 */ 1087 binder_inc_node_tmpref_ilocked(node); 1088 return node; 1089 } 1090 } 1091 return NULL; 1092 } 1093 1094 static struct binder_node *binder_get_node(struct binder_proc *proc, 1095 binder_uintptr_t ptr) 1096 { 1097 struct binder_node *node; 1098 1099 binder_inner_proc_lock(proc); 1100 node = binder_get_node_ilocked(proc, ptr); 1101 binder_inner_proc_unlock(proc); 1102 return node; 1103 } 1104 1105 static struct binder_node *binder_init_node_ilocked( 1106 struct binder_proc *proc, 1107 struct binder_node *new_node, 1108 struct flat_binder_object *fp) 1109 { 1110 struct rb_node **p = &proc->nodes.rb_node; 1111 struct rb_node *parent = NULL; 1112 struct binder_node *node; 1113 binder_uintptr_t ptr = fp ? fp->binder : 0; 1114 binder_uintptr_t cookie = fp ? fp->cookie : 0; 1115 __u32 flags = fp ? fp->flags : 0; 1116 1117 assert_spin_locked(&proc->inner_lock); 1118 1119 while (*p) { 1120 1121 parent = *p; 1122 node = rb_entry(parent, struct binder_node, rb_node); 1123 1124 if (ptr < node->ptr) 1125 p = &(*p)->rb_left; 1126 else if (ptr > node->ptr) 1127 p = &(*p)->rb_right; 1128 else { 1129 /* 1130 * A matching node is already in 1131 * the rb tree. Abandon the init 1132 * and return it. 1133 */ 1134 binder_inc_node_tmpref_ilocked(node); 1135 return node; 1136 } 1137 } 1138 node = new_node; 1139 binder_stats_created(BINDER_STAT_NODE); 1140 node->tmp_refs++; 1141 rb_link_node(&node->rb_node, parent, p); 1142 rb_insert_color(&node->rb_node, &proc->nodes); 1143 node->debug_id = atomic_inc_return(&binder_last_id); 1144 node->proc = proc; 1145 node->ptr = ptr; 1146 node->cookie = cookie; 1147 node->work.type = BINDER_WORK_NODE; 1148 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK; 1149 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); 1150 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX); 1151 spin_lock_init(&node->lock); 1152 INIT_LIST_HEAD(&node->work.entry); 1153 INIT_LIST_HEAD(&node->async_todo); 1154 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1155 "%d:%d node %d u%016llx c%016llx created\n", 1156 proc->pid, current->pid, node->debug_id, 1157 (u64)node->ptr, (u64)node->cookie); 1158 1159 return node; 1160 } 1161 1162 static struct binder_node *binder_new_node(struct binder_proc *proc, 1163 struct flat_binder_object *fp) 1164 { 1165 struct binder_node *node; 1166 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL); 1167 1168 if (!new_node) 1169 return NULL; 1170 binder_inner_proc_lock(proc); 1171 node = binder_init_node_ilocked(proc, new_node, fp); 1172 binder_inner_proc_unlock(proc); 1173 if (node != new_node) 1174 /* 1175 * The node was already added by another thread 1176 */ 1177 kfree(new_node); 1178 1179 return node; 1180 } 1181 1182 static void binder_free_node(struct binder_node *node) 1183 { 1184 kfree(node); 1185 binder_stats_deleted(BINDER_STAT_NODE); 1186 } 1187 1188 static int binder_inc_node_nilocked(struct binder_node *node, int strong, 1189 int internal, 1190 struct list_head *target_list) 1191 { 1192 struct binder_proc *proc = node->proc; 1193 1194 assert_spin_locked(&node->lock); 1195 if (proc) 1196 assert_spin_locked(&proc->inner_lock); 1197 if (strong) { 1198 if (internal) { 1199 if (target_list == NULL && 1200 node->internal_strong_refs == 0 && 1201 !(node->proc && 1202 node == node->proc->context->binder_context_mgr_node && 1203 node->has_strong_ref)) { 1204 pr_err("invalid inc strong node for %d\n", 1205 node->debug_id); 1206 return -EINVAL; 1207 } 1208 node->internal_strong_refs++; 1209 } else 1210 node->local_strong_refs++; 1211 if (!node->has_strong_ref && target_list) { 1212 struct binder_thread *thread = container_of(target_list, 1213 struct binder_thread, todo); 1214 binder_dequeue_work_ilocked(&node->work); 1215 BUG_ON(&thread->todo != target_list); 1216 binder_enqueue_deferred_thread_work_ilocked(thread, 1217 &node->work); 1218 } 1219 } else { 1220 if (!internal) 1221 node->local_weak_refs++; 1222 if (!node->has_weak_ref && list_empty(&node->work.entry)) { 1223 if (target_list == NULL) { 1224 pr_err("invalid inc weak node for %d\n", 1225 node->debug_id); 1226 return -EINVAL; 1227 } 1228 /* 1229 * See comment above 1230 */ 1231 binder_enqueue_work_ilocked(&node->work, target_list); 1232 } 1233 } 1234 return 0; 1235 } 1236 1237 static int binder_inc_node(struct binder_node *node, int strong, int internal, 1238 struct list_head *target_list) 1239 { 1240 int ret; 1241 1242 binder_node_inner_lock(node); 1243 ret = binder_inc_node_nilocked(node, strong, internal, target_list); 1244 binder_node_inner_unlock(node); 1245 1246 return ret; 1247 } 1248 1249 static bool binder_dec_node_nilocked(struct binder_node *node, 1250 int strong, int internal) 1251 { 1252 struct binder_proc *proc = node->proc; 1253 1254 assert_spin_locked(&node->lock); 1255 if (proc) 1256 assert_spin_locked(&proc->inner_lock); 1257 if (strong) { 1258 if (internal) 1259 node->internal_strong_refs--; 1260 else 1261 node->local_strong_refs--; 1262 if (node->local_strong_refs || node->internal_strong_refs) 1263 return false; 1264 } else { 1265 if (!internal) 1266 node->local_weak_refs--; 1267 if (node->local_weak_refs || node->tmp_refs || 1268 !hlist_empty(&node->refs)) 1269 return false; 1270 } 1271 1272 if (proc && (node->has_strong_ref || node->has_weak_ref)) { 1273 if (list_empty(&node->work.entry)) { 1274 binder_enqueue_work_ilocked(&node->work, &proc->todo); 1275 binder_wakeup_proc_ilocked(proc); 1276 } 1277 } else { 1278 if (hlist_empty(&node->refs) && !node->local_strong_refs && 1279 !node->local_weak_refs && !node->tmp_refs) { 1280 if (proc) { 1281 binder_dequeue_work_ilocked(&node->work); 1282 rb_erase(&node->rb_node, &proc->nodes); 1283 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1284 "refless node %d deleted\n", 1285 node->debug_id); 1286 } else { 1287 BUG_ON(!list_empty(&node->work.entry)); 1288 spin_lock(&binder_dead_nodes_lock); 1289 /* 1290 * tmp_refs could have changed so 1291 * check it again 1292 */ 1293 if (node->tmp_refs) { 1294 spin_unlock(&binder_dead_nodes_lock); 1295 return false; 1296 } 1297 hlist_del(&node->dead_node); 1298 spin_unlock(&binder_dead_nodes_lock); 1299 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1300 "dead node %d deleted\n", 1301 node->debug_id); 1302 } 1303 return true; 1304 } 1305 } 1306 return false; 1307 } 1308 1309 static void binder_dec_node(struct binder_node *node, int strong, int internal) 1310 { 1311 bool free_node; 1312 1313 binder_node_inner_lock(node); 1314 free_node = binder_dec_node_nilocked(node, strong, internal); 1315 binder_node_inner_unlock(node); 1316 if (free_node) 1317 binder_free_node(node); 1318 } 1319 1320 static void binder_inc_node_tmpref_ilocked(struct binder_node *node) 1321 { 1322 /* 1323 * No call to binder_inc_node() is needed since we 1324 * don't need to inform userspace of any changes to 1325 * tmp_refs 1326 */ 1327 node->tmp_refs++; 1328 } 1329 1330 /** 1331 * binder_inc_node_tmpref() - take a temporary reference on node 1332 * @node: node to reference 1333 * 1334 * Take reference on node to prevent the node from being freed 1335 * while referenced only by a local variable. The inner lock is 1336 * needed to serialize with the node work on the queue (which 1337 * isn't needed after the node is dead). If the node is dead 1338 * (node->proc is NULL), use binder_dead_nodes_lock to protect 1339 * node->tmp_refs against dead-node-only cases where the node 1340 * lock cannot be acquired (eg traversing the dead node list to 1341 * print nodes) 1342 */ 1343 static void binder_inc_node_tmpref(struct binder_node *node) 1344 { 1345 binder_node_lock(node); 1346 if (node->proc) 1347 binder_inner_proc_lock(node->proc); 1348 else 1349 spin_lock(&binder_dead_nodes_lock); 1350 binder_inc_node_tmpref_ilocked(node); 1351 if (node->proc) 1352 binder_inner_proc_unlock(node->proc); 1353 else 1354 spin_unlock(&binder_dead_nodes_lock); 1355 binder_node_unlock(node); 1356 } 1357 1358 /** 1359 * binder_dec_node_tmpref() - remove a temporary reference on node 1360 * @node: node to reference 1361 * 1362 * Release temporary reference on node taken via binder_inc_node_tmpref() 1363 */ 1364 static void binder_dec_node_tmpref(struct binder_node *node) 1365 { 1366 bool free_node; 1367 1368 binder_node_inner_lock(node); 1369 if (!node->proc) 1370 spin_lock(&binder_dead_nodes_lock); 1371 else 1372 __acquire(&binder_dead_nodes_lock); 1373 node->tmp_refs--; 1374 BUG_ON(node->tmp_refs < 0); 1375 if (!node->proc) 1376 spin_unlock(&binder_dead_nodes_lock); 1377 else 1378 __release(&binder_dead_nodes_lock); 1379 /* 1380 * Call binder_dec_node() to check if all refcounts are 0 1381 * and cleanup is needed. Calling with strong=0 and internal=1 1382 * causes no actual reference to be released in binder_dec_node(). 1383 * If that changes, a change is needed here too. 1384 */ 1385 free_node = binder_dec_node_nilocked(node, 0, 1); 1386 binder_node_inner_unlock(node); 1387 if (free_node) 1388 binder_free_node(node); 1389 } 1390 1391 static void binder_put_node(struct binder_node *node) 1392 { 1393 binder_dec_node_tmpref(node); 1394 } 1395 1396 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc, 1397 u32 desc, bool need_strong_ref) 1398 { 1399 struct rb_node *n = proc->refs_by_desc.rb_node; 1400 struct binder_ref *ref; 1401 1402 while (n) { 1403 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1404 1405 if (desc < ref->data.desc) { 1406 n = n->rb_left; 1407 } else if (desc > ref->data.desc) { 1408 n = n->rb_right; 1409 } else if (need_strong_ref && !ref->data.strong) { 1410 binder_user_error("tried to use weak ref as strong ref\n"); 1411 return NULL; 1412 } else { 1413 return ref; 1414 } 1415 } 1416 return NULL; 1417 } 1418 1419 /** 1420 * binder_get_ref_for_node_olocked() - get the ref associated with given node 1421 * @proc: binder_proc that owns the ref 1422 * @node: binder_node of target 1423 * @new_ref: newly allocated binder_ref to be initialized or %NULL 1424 * 1425 * Look up the ref for the given node and return it if it exists 1426 * 1427 * If it doesn't exist and the caller provides a newly allocated 1428 * ref, initialize the fields of the newly allocated ref and insert 1429 * into the given proc rb_trees and node refs list. 1430 * 1431 * Return: the ref for node. It is possible that another thread 1432 * allocated/initialized the ref first in which case the 1433 * returned ref would be different than the passed-in 1434 * new_ref. new_ref must be kfree'd by the caller in 1435 * this case. 1436 */ 1437 static struct binder_ref *binder_get_ref_for_node_olocked( 1438 struct binder_proc *proc, 1439 struct binder_node *node, 1440 struct binder_ref *new_ref) 1441 { 1442 struct binder_context *context = proc->context; 1443 struct rb_node **p = &proc->refs_by_node.rb_node; 1444 struct rb_node *parent = NULL; 1445 struct binder_ref *ref; 1446 struct rb_node *n; 1447 1448 while (*p) { 1449 parent = *p; 1450 ref = rb_entry(parent, struct binder_ref, rb_node_node); 1451 1452 if (node < ref->node) 1453 p = &(*p)->rb_left; 1454 else if (node > ref->node) 1455 p = &(*p)->rb_right; 1456 else 1457 return ref; 1458 } 1459 if (!new_ref) 1460 return NULL; 1461 1462 binder_stats_created(BINDER_STAT_REF); 1463 new_ref->data.debug_id = atomic_inc_return(&binder_last_id); 1464 new_ref->proc = proc; 1465 new_ref->node = node; 1466 rb_link_node(&new_ref->rb_node_node, parent, p); 1467 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); 1468 1469 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1; 1470 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 1471 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1472 if (ref->data.desc > new_ref->data.desc) 1473 break; 1474 new_ref->data.desc = ref->data.desc + 1; 1475 } 1476 1477 p = &proc->refs_by_desc.rb_node; 1478 while (*p) { 1479 parent = *p; 1480 ref = rb_entry(parent, struct binder_ref, rb_node_desc); 1481 1482 if (new_ref->data.desc < ref->data.desc) 1483 p = &(*p)->rb_left; 1484 else if (new_ref->data.desc > ref->data.desc) 1485 p = &(*p)->rb_right; 1486 else 1487 BUG(); 1488 } 1489 rb_link_node(&new_ref->rb_node_desc, parent, p); 1490 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc); 1491 1492 binder_node_lock(node); 1493 hlist_add_head(&new_ref->node_entry, &node->refs); 1494 1495 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1496 "%d new ref %d desc %d for node %d\n", 1497 proc->pid, new_ref->data.debug_id, new_ref->data.desc, 1498 node->debug_id); 1499 binder_node_unlock(node); 1500 return new_ref; 1501 } 1502 1503 static void binder_cleanup_ref_olocked(struct binder_ref *ref) 1504 { 1505 bool delete_node = false; 1506 1507 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1508 "%d delete ref %d desc %d for node %d\n", 1509 ref->proc->pid, ref->data.debug_id, ref->data.desc, 1510 ref->node->debug_id); 1511 1512 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); 1513 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); 1514 1515 binder_node_inner_lock(ref->node); 1516 if (ref->data.strong) 1517 binder_dec_node_nilocked(ref->node, 1, 1); 1518 1519 hlist_del(&ref->node_entry); 1520 delete_node = binder_dec_node_nilocked(ref->node, 0, 1); 1521 binder_node_inner_unlock(ref->node); 1522 /* 1523 * Clear ref->node unless we want the caller to free the node 1524 */ 1525 if (!delete_node) { 1526 /* 1527 * The caller uses ref->node to determine 1528 * whether the node needs to be freed. Clear 1529 * it since the node is still alive. 1530 */ 1531 ref->node = NULL; 1532 } 1533 1534 if (ref->death) { 1535 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1536 "%d delete ref %d desc %d has death notification\n", 1537 ref->proc->pid, ref->data.debug_id, 1538 ref->data.desc); 1539 binder_dequeue_work(ref->proc, &ref->death->work); 1540 binder_stats_deleted(BINDER_STAT_DEATH); 1541 } 1542 binder_stats_deleted(BINDER_STAT_REF); 1543 } 1544 1545 /** 1546 * binder_inc_ref_olocked() - increment the ref for given handle 1547 * @ref: ref to be incremented 1548 * @strong: if true, strong increment, else weak 1549 * @target_list: list to queue node work on 1550 * 1551 * Increment the ref. @ref->proc->outer_lock must be held on entry 1552 * 1553 * Return: 0, if successful, else errno 1554 */ 1555 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong, 1556 struct list_head *target_list) 1557 { 1558 int ret; 1559 1560 if (strong) { 1561 if (ref->data.strong == 0) { 1562 ret = binder_inc_node(ref->node, 1, 1, target_list); 1563 if (ret) 1564 return ret; 1565 } 1566 ref->data.strong++; 1567 } else { 1568 if (ref->data.weak == 0) { 1569 ret = binder_inc_node(ref->node, 0, 1, target_list); 1570 if (ret) 1571 return ret; 1572 } 1573 ref->data.weak++; 1574 } 1575 return 0; 1576 } 1577 1578 /** 1579 * binder_dec_ref() - dec the ref for given handle 1580 * @ref: ref to be decremented 1581 * @strong: if true, strong decrement, else weak 1582 * 1583 * Decrement the ref. 1584 * 1585 * Return: true if ref is cleaned up and ready to be freed 1586 */ 1587 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong) 1588 { 1589 if (strong) { 1590 if (ref->data.strong == 0) { 1591 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n", 1592 ref->proc->pid, ref->data.debug_id, 1593 ref->data.desc, ref->data.strong, 1594 ref->data.weak); 1595 return false; 1596 } 1597 ref->data.strong--; 1598 if (ref->data.strong == 0) 1599 binder_dec_node(ref->node, strong, 1); 1600 } else { 1601 if (ref->data.weak == 0) { 1602 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n", 1603 ref->proc->pid, ref->data.debug_id, 1604 ref->data.desc, ref->data.strong, 1605 ref->data.weak); 1606 return false; 1607 } 1608 ref->data.weak--; 1609 } 1610 if (ref->data.strong == 0 && ref->data.weak == 0) { 1611 binder_cleanup_ref_olocked(ref); 1612 return true; 1613 } 1614 return false; 1615 } 1616 1617 /** 1618 * binder_get_node_from_ref() - get the node from the given proc/desc 1619 * @proc: proc containing the ref 1620 * @desc: the handle associated with the ref 1621 * @need_strong_ref: if true, only return node if ref is strong 1622 * @rdata: the id/refcount data for the ref 1623 * 1624 * Given a proc and ref handle, return the associated binder_node 1625 * 1626 * Return: a binder_node or NULL if not found or not strong when strong required 1627 */ 1628 static struct binder_node *binder_get_node_from_ref( 1629 struct binder_proc *proc, 1630 u32 desc, bool need_strong_ref, 1631 struct binder_ref_data *rdata) 1632 { 1633 struct binder_node *node; 1634 struct binder_ref *ref; 1635 1636 binder_proc_lock(proc); 1637 ref = binder_get_ref_olocked(proc, desc, need_strong_ref); 1638 if (!ref) 1639 goto err_no_ref; 1640 node = ref->node; 1641 /* 1642 * Take an implicit reference on the node to ensure 1643 * it stays alive until the call to binder_put_node() 1644 */ 1645 binder_inc_node_tmpref(node); 1646 if (rdata) 1647 *rdata = ref->data; 1648 binder_proc_unlock(proc); 1649 1650 return node; 1651 1652 err_no_ref: 1653 binder_proc_unlock(proc); 1654 return NULL; 1655 } 1656 1657 /** 1658 * binder_free_ref() - free the binder_ref 1659 * @ref: ref to free 1660 * 1661 * Free the binder_ref. Free the binder_node indicated by ref->node 1662 * (if non-NULL) and the binder_ref_death indicated by ref->death. 1663 */ 1664 static void binder_free_ref(struct binder_ref *ref) 1665 { 1666 if (ref->node) 1667 binder_free_node(ref->node); 1668 kfree(ref->death); 1669 kfree(ref); 1670 } 1671 1672 /** 1673 * binder_update_ref_for_handle() - inc/dec the ref for given handle 1674 * @proc: proc containing the ref 1675 * @desc: the handle associated with the ref 1676 * @increment: true=inc reference, false=dec reference 1677 * @strong: true=strong reference, false=weak reference 1678 * @rdata: the id/refcount data for the ref 1679 * 1680 * Given a proc and ref handle, increment or decrement the ref 1681 * according to "increment" arg. 1682 * 1683 * Return: 0 if successful, else errno 1684 */ 1685 static int binder_update_ref_for_handle(struct binder_proc *proc, 1686 uint32_t desc, bool increment, bool strong, 1687 struct binder_ref_data *rdata) 1688 { 1689 int ret = 0; 1690 struct binder_ref *ref; 1691 bool delete_ref = false; 1692 1693 binder_proc_lock(proc); 1694 ref = binder_get_ref_olocked(proc, desc, strong); 1695 if (!ref) { 1696 ret = -EINVAL; 1697 goto err_no_ref; 1698 } 1699 if (increment) 1700 ret = binder_inc_ref_olocked(ref, strong, NULL); 1701 else 1702 delete_ref = binder_dec_ref_olocked(ref, strong); 1703 1704 if (rdata) 1705 *rdata = ref->data; 1706 binder_proc_unlock(proc); 1707 1708 if (delete_ref) 1709 binder_free_ref(ref); 1710 return ret; 1711 1712 err_no_ref: 1713 binder_proc_unlock(proc); 1714 return ret; 1715 } 1716 1717 /** 1718 * binder_dec_ref_for_handle() - dec the ref for given handle 1719 * @proc: proc containing the ref 1720 * @desc: the handle associated with the ref 1721 * @strong: true=strong reference, false=weak reference 1722 * @rdata: the id/refcount data for the ref 1723 * 1724 * Just calls binder_update_ref_for_handle() to decrement the ref. 1725 * 1726 * Return: 0 if successful, else errno 1727 */ 1728 static int binder_dec_ref_for_handle(struct binder_proc *proc, 1729 uint32_t desc, bool strong, struct binder_ref_data *rdata) 1730 { 1731 return binder_update_ref_for_handle(proc, desc, false, strong, rdata); 1732 } 1733 1734 1735 /** 1736 * binder_inc_ref_for_node() - increment the ref for given proc/node 1737 * @proc: proc containing the ref 1738 * @node: target node 1739 * @strong: true=strong reference, false=weak reference 1740 * @target_list: worklist to use if node is incremented 1741 * @rdata: the id/refcount data for the ref 1742 * 1743 * Given a proc and node, increment the ref. Create the ref if it 1744 * doesn't already exist 1745 * 1746 * Return: 0 if successful, else errno 1747 */ 1748 static int binder_inc_ref_for_node(struct binder_proc *proc, 1749 struct binder_node *node, 1750 bool strong, 1751 struct list_head *target_list, 1752 struct binder_ref_data *rdata) 1753 { 1754 struct binder_ref *ref; 1755 struct binder_ref *new_ref = NULL; 1756 int ret = 0; 1757 1758 binder_proc_lock(proc); 1759 ref = binder_get_ref_for_node_olocked(proc, node, NULL); 1760 if (!ref) { 1761 binder_proc_unlock(proc); 1762 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); 1763 if (!new_ref) 1764 return -ENOMEM; 1765 binder_proc_lock(proc); 1766 ref = binder_get_ref_for_node_olocked(proc, node, new_ref); 1767 } 1768 ret = binder_inc_ref_olocked(ref, strong, target_list); 1769 *rdata = ref->data; 1770 binder_proc_unlock(proc); 1771 if (new_ref && ref != new_ref) 1772 /* 1773 * Another thread created the ref first so 1774 * free the one we allocated 1775 */ 1776 kfree(new_ref); 1777 return ret; 1778 } 1779 1780 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread, 1781 struct binder_transaction *t) 1782 { 1783 BUG_ON(!target_thread); 1784 assert_spin_locked(&target_thread->proc->inner_lock); 1785 BUG_ON(target_thread->transaction_stack != t); 1786 BUG_ON(target_thread->transaction_stack->from != target_thread); 1787 target_thread->transaction_stack = 1788 target_thread->transaction_stack->from_parent; 1789 t->from = NULL; 1790 } 1791 1792 /** 1793 * binder_thread_dec_tmpref() - decrement thread->tmp_ref 1794 * @thread: thread to decrement 1795 * 1796 * A thread needs to be kept alive while being used to create or 1797 * handle a transaction. binder_get_txn_from() is used to safely 1798 * extract t->from from a binder_transaction and keep the thread 1799 * indicated by t->from from being freed. When done with that 1800 * binder_thread, this function is called to decrement the 1801 * tmp_ref and free if appropriate (thread has been released 1802 * and no transaction being processed by the driver) 1803 */ 1804 static void binder_thread_dec_tmpref(struct binder_thread *thread) 1805 { 1806 /* 1807 * atomic is used to protect the counter value while 1808 * it cannot reach zero or thread->is_dead is false 1809 */ 1810 binder_inner_proc_lock(thread->proc); 1811 atomic_dec(&thread->tmp_ref); 1812 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) { 1813 binder_inner_proc_unlock(thread->proc); 1814 binder_free_thread(thread); 1815 return; 1816 } 1817 binder_inner_proc_unlock(thread->proc); 1818 } 1819 1820 /** 1821 * binder_proc_dec_tmpref() - decrement proc->tmp_ref 1822 * @proc: proc to decrement 1823 * 1824 * A binder_proc needs to be kept alive while being used to create or 1825 * handle a transaction. proc->tmp_ref is incremented when 1826 * creating a new transaction or the binder_proc is currently in-use 1827 * by threads that are being released. When done with the binder_proc, 1828 * this function is called to decrement the counter and free the 1829 * proc if appropriate (proc has been released, all threads have 1830 * been released and not currenly in-use to process a transaction). 1831 */ 1832 static void binder_proc_dec_tmpref(struct binder_proc *proc) 1833 { 1834 binder_inner_proc_lock(proc); 1835 proc->tmp_ref--; 1836 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) && 1837 !proc->tmp_ref) { 1838 binder_inner_proc_unlock(proc); 1839 binder_free_proc(proc); 1840 return; 1841 } 1842 binder_inner_proc_unlock(proc); 1843 } 1844 1845 /** 1846 * binder_get_txn_from() - safely extract the "from" thread in transaction 1847 * @t: binder transaction for t->from 1848 * 1849 * Atomically return the "from" thread and increment the tmp_ref 1850 * count for the thread to ensure it stays alive until 1851 * binder_thread_dec_tmpref() is called. 1852 * 1853 * Return: the value of t->from 1854 */ 1855 static struct binder_thread *binder_get_txn_from( 1856 struct binder_transaction *t) 1857 { 1858 struct binder_thread *from; 1859 1860 spin_lock(&t->lock); 1861 from = t->from; 1862 if (from) 1863 atomic_inc(&from->tmp_ref); 1864 spin_unlock(&t->lock); 1865 return from; 1866 } 1867 1868 /** 1869 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock 1870 * @t: binder transaction for t->from 1871 * 1872 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock 1873 * to guarantee that the thread cannot be released while operating on it. 1874 * The caller must call binder_inner_proc_unlock() to release the inner lock 1875 * as well as call binder_dec_thread_txn() to release the reference. 1876 * 1877 * Return: the value of t->from 1878 */ 1879 static struct binder_thread *binder_get_txn_from_and_acq_inner( 1880 struct binder_transaction *t) 1881 __acquires(&t->from->proc->inner_lock) 1882 { 1883 struct binder_thread *from; 1884 1885 from = binder_get_txn_from(t); 1886 if (!from) { 1887 __acquire(&from->proc->inner_lock); 1888 return NULL; 1889 } 1890 binder_inner_proc_lock(from->proc); 1891 if (t->from) { 1892 BUG_ON(from != t->from); 1893 return from; 1894 } 1895 binder_inner_proc_unlock(from->proc); 1896 __acquire(&from->proc->inner_lock); 1897 binder_thread_dec_tmpref(from); 1898 return NULL; 1899 } 1900 1901 /** 1902 * binder_free_txn_fixups() - free unprocessed fd fixups 1903 * @t: binder transaction for t->from 1904 * 1905 * If the transaction is being torn down prior to being 1906 * processed by the target process, free all of the 1907 * fd fixups and fput the file structs. It is safe to 1908 * call this function after the fixups have been 1909 * processed -- in that case, the list will be empty. 1910 */ 1911 static void binder_free_txn_fixups(struct binder_transaction *t) 1912 { 1913 struct binder_txn_fd_fixup *fixup, *tmp; 1914 1915 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) { 1916 fput(fixup->file); 1917 list_del(&fixup->fixup_entry); 1918 kfree(fixup); 1919 } 1920 } 1921 1922 static void binder_free_transaction(struct binder_transaction *t) 1923 { 1924 struct binder_proc *target_proc = t->to_proc; 1925 1926 if (target_proc) { 1927 binder_inner_proc_lock(target_proc); 1928 if (t->buffer) 1929 t->buffer->transaction = NULL; 1930 binder_inner_proc_unlock(target_proc); 1931 } 1932 /* 1933 * If the transaction has no target_proc, then 1934 * t->buffer->transaction has already been cleared. 1935 */ 1936 binder_free_txn_fixups(t); 1937 kfree(t); 1938 binder_stats_deleted(BINDER_STAT_TRANSACTION); 1939 } 1940 1941 static void binder_send_failed_reply(struct binder_transaction *t, 1942 uint32_t error_code) 1943 { 1944 struct binder_thread *target_thread; 1945 struct binder_transaction *next; 1946 1947 BUG_ON(t->flags & TF_ONE_WAY); 1948 while (1) { 1949 target_thread = binder_get_txn_from_and_acq_inner(t); 1950 if (target_thread) { 1951 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1952 "send failed reply for transaction %d to %d:%d\n", 1953 t->debug_id, 1954 target_thread->proc->pid, 1955 target_thread->pid); 1956 1957 binder_pop_transaction_ilocked(target_thread, t); 1958 if (target_thread->reply_error.cmd == BR_OK) { 1959 target_thread->reply_error.cmd = error_code; 1960 binder_enqueue_thread_work_ilocked( 1961 target_thread, 1962 &target_thread->reply_error.work); 1963 wake_up_interruptible(&target_thread->wait); 1964 } else { 1965 /* 1966 * Cannot get here for normal operation, but 1967 * we can if multiple synchronous transactions 1968 * are sent without blocking for responses. 1969 * Just ignore the 2nd error in this case. 1970 */ 1971 pr_warn("Unexpected reply error: %u\n", 1972 target_thread->reply_error.cmd); 1973 } 1974 binder_inner_proc_unlock(target_thread->proc); 1975 binder_thread_dec_tmpref(target_thread); 1976 binder_free_transaction(t); 1977 return; 1978 } else { 1979 __release(&target_thread->proc->inner_lock); 1980 } 1981 next = t->from_parent; 1982 1983 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1984 "send failed reply for transaction %d, target dead\n", 1985 t->debug_id); 1986 1987 binder_free_transaction(t); 1988 if (next == NULL) { 1989 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1990 "reply failed, no target thread at root\n"); 1991 return; 1992 } 1993 t = next; 1994 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1995 "reply failed, no target thread -- retry %d\n", 1996 t->debug_id); 1997 } 1998 } 1999 2000 /** 2001 * binder_cleanup_transaction() - cleans up undelivered transaction 2002 * @t: transaction that needs to be cleaned up 2003 * @reason: reason the transaction wasn't delivered 2004 * @error_code: error to return to caller (if synchronous call) 2005 */ 2006 static void binder_cleanup_transaction(struct binder_transaction *t, 2007 const char *reason, 2008 uint32_t error_code) 2009 { 2010 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) { 2011 binder_send_failed_reply(t, error_code); 2012 } else { 2013 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 2014 "undelivered transaction %d, %s\n", 2015 t->debug_id, reason); 2016 binder_free_transaction(t); 2017 } 2018 } 2019 2020 /** 2021 * binder_get_object() - gets object and checks for valid metadata 2022 * @proc: binder_proc owning the buffer 2023 * @buffer: binder_buffer that we're parsing. 2024 * @offset: offset in the @buffer at which to validate an object. 2025 * @object: struct binder_object to read into 2026 * 2027 * Return: If there's a valid metadata object at @offset in @buffer, the 2028 * size of that object. Otherwise, it returns zero. The object 2029 * is read into the struct binder_object pointed to by @object. 2030 */ 2031 static size_t binder_get_object(struct binder_proc *proc, 2032 struct binder_buffer *buffer, 2033 unsigned long offset, 2034 struct binder_object *object) 2035 { 2036 size_t read_size; 2037 struct binder_object_header *hdr; 2038 size_t object_size = 0; 2039 2040 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset); 2041 if (offset > buffer->data_size || read_size < sizeof(*hdr) || 2042 binder_alloc_copy_from_buffer(&proc->alloc, object, buffer, 2043 offset, read_size)) 2044 return 0; 2045 2046 /* Ok, now see if we read a complete object. */ 2047 hdr = &object->hdr; 2048 switch (hdr->type) { 2049 case BINDER_TYPE_BINDER: 2050 case BINDER_TYPE_WEAK_BINDER: 2051 case BINDER_TYPE_HANDLE: 2052 case BINDER_TYPE_WEAK_HANDLE: 2053 object_size = sizeof(struct flat_binder_object); 2054 break; 2055 case BINDER_TYPE_FD: 2056 object_size = sizeof(struct binder_fd_object); 2057 break; 2058 case BINDER_TYPE_PTR: 2059 object_size = sizeof(struct binder_buffer_object); 2060 break; 2061 case BINDER_TYPE_FDA: 2062 object_size = sizeof(struct binder_fd_array_object); 2063 break; 2064 default: 2065 return 0; 2066 } 2067 if (offset <= buffer->data_size - object_size && 2068 buffer->data_size >= object_size) 2069 return object_size; 2070 else 2071 return 0; 2072 } 2073 2074 /** 2075 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer. 2076 * @proc: binder_proc owning the buffer 2077 * @b: binder_buffer containing the object 2078 * @object: struct binder_object to read into 2079 * @index: index in offset array at which the binder_buffer_object is 2080 * located 2081 * @start_offset: points to the start of the offset array 2082 * @object_offsetp: offset of @object read from @b 2083 * @num_valid: the number of valid offsets in the offset array 2084 * 2085 * Return: If @index is within the valid range of the offset array 2086 * described by @start and @num_valid, and if there's a valid 2087 * binder_buffer_object at the offset found in index @index 2088 * of the offset array, that object is returned. Otherwise, 2089 * %NULL is returned. 2090 * Note that the offset found in index @index itself is not 2091 * verified; this function assumes that @num_valid elements 2092 * from @start were previously verified to have valid offsets. 2093 * If @object_offsetp is non-NULL, then the offset within 2094 * @b is written to it. 2095 */ 2096 static struct binder_buffer_object *binder_validate_ptr( 2097 struct binder_proc *proc, 2098 struct binder_buffer *b, 2099 struct binder_object *object, 2100 binder_size_t index, 2101 binder_size_t start_offset, 2102 binder_size_t *object_offsetp, 2103 binder_size_t num_valid) 2104 { 2105 size_t object_size; 2106 binder_size_t object_offset; 2107 unsigned long buffer_offset; 2108 2109 if (index >= num_valid) 2110 return NULL; 2111 2112 buffer_offset = start_offset + sizeof(binder_size_t) * index; 2113 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset, 2114 b, buffer_offset, 2115 sizeof(object_offset))) 2116 return NULL; 2117 object_size = binder_get_object(proc, b, object_offset, object); 2118 if (!object_size || object->hdr.type != BINDER_TYPE_PTR) 2119 return NULL; 2120 if (object_offsetp) 2121 *object_offsetp = object_offset; 2122 2123 return &object->bbo; 2124 } 2125 2126 /** 2127 * binder_validate_fixup() - validates pointer/fd fixups happen in order. 2128 * @proc: binder_proc owning the buffer 2129 * @b: transaction buffer 2130 * @objects_start_offset: offset to start of objects buffer 2131 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up 2132 * @fixup_offset: start offset in @buffer to fix up 2133 * @last_obj_offset: offset to last binder_buffer_object that we fixed 2134 * @last_min_offset: minimum fixup offset in object at @last_obj_offset 2135 * 2136 * Return: %true if a fixup in buffer @buffer at offset @offset is 2137 * allowed. 2138 * 2139 * For safety reasons, we only allow fixups inside a buffer to happen 2140 * at increasing offsets; additionally, we only allow fixup on the last 2141 * buffer object that was verified, or one of its parents. 2142 * 2143 * Example of what is allowed: 2144 * 2145 * A 2146 * B (parent = A, offset = 0) 2147 * C (parent = A, offset = 16) 2148 * D (parent = C, offset = 0) 2149 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset) 2150 * 2151 * Examples of what is not allowed: 2152 * 2153 * Decreasing offsets within the same parent: 2154 * A 2155 * C (parent = A, offset = 16) 2156 * B (parent = A, offset = 0) // decreasing offset within A 2157 * 2158 * Referring to a parent that wasn't the last object or any of its parents: 2159 * A 2160 * B (parent = A, offset = 0) 2161 * C (parent = A, offset = 0) 2162 * C (parent = A, offset = 16) 2163 * D (parent = B, offset = 0) // B is not A or any of A's parents 2164 */ 2165 static bool binder_validate_fixup(struct binder_proc *proc, 2166 struct binder_buffer *b, 2167 binder_size_t objects_start_offset, 2168 binder_size_t buffer_obj_offset, 2169 binder_size_t fixup_offset, 2170 binder_size_t last_obj_offset, 2171 binder_size_t last_min_offset) 2172 { 2173 if (!last_obj_offset) { 2174 /* Nothing to fix up in */ 2175 return false; 2176 } 2177 2178 while (last_obj_offset != buffer_obj_offset) { 2179 unsigned long buffer_offset; 2180 struct binder_object last_object; 2181 struct binder_buffer_object *last_bbo; 2182 size_t object_size = binder_get_object(proc, b, last_obj_offset, 2183 &last_object); 2184 if (object_size != sizeof(*last_bbo)) 2185 return false; 2186 2187 last_bbo = &last_object.bbo; 2188 /* 2189 * Safe to retrieve the parent of last_obj, since it 2190 * was already previously verified by the driver. 2191 */ 2192 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0) 2193 return false; 2194 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t); 2195 buffer_offset = objects_start_offset + 2196 sizeof(binder_size_t) * last_bbo->parent; 2197 if (binder_alloc_copy_from_buffer(&proc->alloc, 2198 &last_obj_offset, 2199 b, buffer_offset, 2200 sizeof(last_obj_offset))) 2201 return false; 2202 } 2203 return (fixup_offset >= last_min_offset); 2204 } 2205 2206 /** 2207 * struct binder_task_work_cb - for deferred close 2208 * 2209 * @twork: callback_head for task work 2210 * @fd: fd to close 2211 * 2212 * Structure to pass task work to be handled after 2213 * returning from binder_ioctl() via task_work_add(). 2214 */ 2215 struct binder_task_work_cb { 2216 struct callback_head twork; 2217 struct file *file; 2218 }; 2219 2220 /** 2221 * binder_do_fd_close() - close list of file descriptors 2222 * @twork: callback head for task work 2223 * 2224 * It is not safe to call ksys_close() during the binder_ioctl() 2225 * function if there is a chance that binder's own file descriptor 2226 * might be closed. This is to meet the requirements for using 2227 * fdget() (see comments for __fget_light()). Therefore use 2228 * task_work_add() to schedule the close operation once we have 2229 * returned from binder_ioctl(). This function is a callback 2230 * for that mechanism and does the actual ksys_close() on the 2231 * given file descriptor. 2232 */ 2233 static void binder_do_fd_close(struct callback_head *twork) 2234 { 2235 struct binder_task_work_cb *twcb = container_of(twork, 2236 struct binder_task_work_cb, twork); 2237 2238 fput(twcb->file); 2239 kfree(twcb); 2240 } 2241 2242 /** 2243 * binder_deferred_fd_close() - schedule a close for the given file-descriptor 2244 * @fd: file-descriptor to close 2245 * 2246 * See comments in binder_do_fd_close(). This function is used to schedule 2247 * a file-descriptor to be closed after returning from binder_ioctl(). 2248 */ 2249 static void binder_deferred_fd_close(int fd) 2250 { 2251 struct binder_task_work_cb *twcb; 2252 2253 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL); 2254 if (!twcb) 2255 return; 2256 init_task_work(&twcb->twork, binder_do_fd_close); 2257 __close_fd_get_file(fd, &twcb->file); 2258 if (twcb->file) 2259 task_work_add(current, &twcb->twork, true); 2260 else 2261 kfree(twcb); 2262 } 2263 2264 static void binder_transaction_buffer_release(struct binder_proc *proc, 2265 struct binder_buffer *buffer, 2266 binder_size_t failed_at, 2267 bool is_failure) 2268 { 2269 int debug_id = buffer->debug_id; 2270 binder_size_t off_start_offset, buffer_offset, off_end_offset; 2271 2272 binder_debug(BINDER_DEBUG_TRANSACTION, 2273 "%d buffer release %d, size %zd-%zd, failed at %llx\n", 2274 proc->pid, buffer->debug_id, 2275 buffer->data_size, buffer->offsets_size, 2276 (unsigned long long)failed_at); 2277 2278 if (buffer->target_node) 2279 binder_dec_node(buffer->target_node, 1, 0); 2280 2281 off_start_offset = ALIGN(buffer->data_size, sizeof(void *)); 2282 off_end_offset = is_failure ? failed_at : 2283 off_start_offset + buffer->offsets_size; 2284 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset; 2285 buffer_offset += sizeof(binder_size_t)) { 2286 struct binder_object_header *hdr; 2287 size_t object_size = 0; 2288 struct binder_object object; 2289 binder_size_t object_offset; 2290 2291 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset, 2292 buffer, buffer_offset, 2293 sizeof(object_offset))) 2294 object_size = binder_get_object(proc, buffer, 2295 object_offset, &object); 2296 if (object_size == 0) { 2297 pr_err("transaction release %d bad object at offset %lld, size %zd\n", 2298 debug_id, (u64)object_offset, buffer->data_size); 2299 continue; 2300 } 2301 hdr = &object.hdr; 2302 switch (hdr->type) { 2303 case BINDER_TYPE_BINDER: 2304 case BINDER_TYPE_WEAK_BINDER: { 2305 struct flat_binder_object *fp; 2306 struct binder_node *node; 2307 2308 fp = to_flat_binder_object(hdr); 2309 node = binder_get_node(proc, fp->binder); 2310 if (node == NULL) { 2311 pr_err("transaction release %d bad node %016llx\n", 2312 debug_id, (u64)fp->binder); 2313 break; 2314 } 2315 binder_debug(BINDER_DEBUG_TRANSACTION, 2316 " node %d u%016llx\n", 2317 node->debug_id, (u64)node->ptr); 2318 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER, 2319 0); 2320 binder_put_node(node); 2321 } break; 2322 case BINDER_TYPE_HANDLE: 2323 case BINDER_TYPE_WEAK_HANDLE: { 2324 struct flat_binder_object *fp; 2325 struct binder_ref_data rdata; 2326 int ret; 2327 2328 fp = to_flat_binder_object(hdr); 2329 ret = binder_dec_ref_for_handle(proc, fp->handle, 2330 hdr->type == BINDER_TYPE_HANDLE, &rdata); 2331 2332 if (ret) { 2333 pr_err("transaction release %d bad handle %d, ret = %d\n", 2334 debug_id, fp->handle, ret); 2335 break; 2336 } 2337 binder_debug(BINDER_DEBUG_TRANSACTION, 2338 " ref %d desc %d\n", 2339 rdata.debug_id, rdata.desc); 2340 } break; 2341 2342 case BINDER_TYPE_FD: { 2343 /* 2344 * No need to close the file here since user-space 2345 * closes it for for successfully delivered 2346 * transactions. For transactions that weren't 2347 * delivered, the new fd was never allocated so 2348 * there is no need to close and the fput on the 2349 * file is done when the transaction is torn 2350 * down. 2351 */ 2352 WARN_ON(failed_at && 2353 proc->tsk == current->group_leader); 2354 } break; 2355 case BINDER_TYPE_PTR: 2356 /* 2357 * Nothing to do here, this will get cleaned up when the 2358 * transaction buffer gets freed 2359 */ 2360 break; 2361 case BINDER_TYPE_FDA: { 2362 struct binder_fd_array_object *fda; 2363 struct binder_buffer_object *parent; 2364 struct binder_object ptr_object; 2365 binder_size_t fda_offset; 2366 size_t fd_index; 2367 binder_size_t fd_buf_size; 2368 binder_size_t num_valid; 2369 2370 if (proc->tsk != current->group_leader) { 2371 /* 2372 * Nothing to do if running in sender context 2373 * The fd fixups have not been applied so no 2374 * fds need to be closed. 2375 */ 2376 continue; 2377 } 2378 2379 num_valid = (buffer_offset - off_start_offset) / 2380 sizeof(binder_size_t); 2381 fda = to_binder_fd_array_object(hdr); 2382 parent = binder_validate_ptr(proc, buffer, &ptr_object, 2383 fda->parent, 2384 off_start_offset, 2385 NULL, 2386 num_valid); 2387 if (!parent) { 2388 pr_err("transaction release %d bad parent offset\n", 2389 debug_id); 2390 continue; 2391 } 2392 fd_buf_size = sizeof(u32) * fda->num_fds; 2393 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { 2394 pr_err("transaction release %d invalid number of fds (%lld)\n", 2395 debug_id, (u64)fda->num_fds); 2396 continue; 2397 } 2398 if (fd_buf_size > parent->length || 2399 fda->parent_offset > parent->length - fd_buf_size) { 2400 /* No space for all file descriptors here. */ 2401 pr_err("transaction release %d not enough space for %lld fds in buffer\n", 2402 debug_id, (u64)fda->num_fds); 2403 continue; 2404 } 2405 /* 2406 * the source data for binder_buffer_object is visible 2407 * to user-space and the @buffer element is the user 2408 * pointer to the buffer_object containing the fd_array. 2409 * Convert the address to an offset relative to 2410 * the base of the transaction buffer. 2411 */ 2412 fda_offset = 2413 (parent->buffer - (uintptr_t)buffer->user_data) + 2414 fda->parent_offset; 2415 for (fd_index = 0; fd_index < fda->num_fds; 2416 fd_index++) { 2417 u32 fd; 2418 int err; 2419 binder_size_t offset = fda_offset + 2420 fd_index * sizeof(fd); 2421 2422 err = binder_alloc_copy_from_buffer( 2423 &proc->alloc, &fd, buffer, 2424 offset, sizeof(fd)); 2425 WARN_ON(err); 2426 if (!err) 2427 binder_deferred_fd_close(fd); 2428 } 2429 } break; 2430 default: 2431 pr_err("transaction release %d bad object type %x\n", 2432 debug_id, hdr->type); 2433 break; 2434 } 2435 } 2436 } 2437 2438 static int binder_translate_binder(struct flat_binder_object *fp, 2439 struct binder_transaction *t, 2440 struct binder_thread *thread) 2441 { 2442 struct binder_node *node; 2443 struct binder_proc *proc = thread->proc; 2444 struct binder_proc *target_proc = t->to_proc; 2445 struct binder_ref_data rdata; 2446 int ret = 0; 2447 2448 node = binder_get_node(proc, fp->binder); 2449 if (!node) { 2450 node = binder_new_node(proc, fp); 2451 if (!node) 2452 return -ENOMEM; 2453 } 2454 if (fp->cookie != node->cookie) { 2455 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n", 2456 proc->pid, thread->pid, (u64)fp->binder, 2457 node->debug_id, (u64)fp->cookie, 2458 (u64)node->cookie); 2459 ret = -EINVAL; 2460 goto done; 2461 } 2462 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { 2463 ret = -EPERM; 2464 goto done; 2465 } 2466 2467 ret = binder_inc_ref_for_node(target_proc, node, 2468 fp->hdr.type == BINDER_TYPE_BINDER, 2469 &thread->todo, &rdata); 2470 if (ret) 2471 goto done; 2472 2473 if (fp->hdr.type == BINDER_TYPE_BINDER) 2474 fp->hdr.type = BINDER_TYPE_HANDLE; 2475 else 2476 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE; 2477 fp->binder = 0; 2478 fp->handle = rdata.desc; 2479 fp->cookie = 0; 2480 2481 trace_binder_transaction_node_to_ref(t, node, &rdata); 2482 binder_debug(BINDER_DEBUG_TRANSACTION, 2483 " node %d u%016llx -> ref %d desc %d\n", 2484 node->debug_id, (u64)node->ptr, 2485 rdata.debug_id, rdata.desc); 2486 done: 2487 binder_put_node(node); 2488 return ret; 2489 } 2490 2491 static int binder_translate_handle(struct flat_binder_object *fp, 2492 struct binder_transaction *t, 2493 struct binder_thread *thread) 2494 { 2495 struct binder_proc *proc = thread->proc; 2496 struct binder_proc *target_proc = t->to_proc; 2497 struct binder_node *node; 2498 struct binder_ref_data src_rdata; 2499 int ret = 0; 2500 2501 node = binder_get_node_from_ref(proc, fp->handle, 2502 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata); 2503 if (!node) { 2504 binder_user_error("%d:%d got transaction with invalid handle, %d\n", 2505 proc->pid, thread->pid, fp->handle); 2506 return -EINVAL; 2507 } 2508 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { 2509 ret = -EPERM; 2510 goto done; 2511 } 2512 2513 binder_node_lock(node); 2514 if (node->proc == target_proc) { 2515 if (fp->hdr.type == BINDER_TYPE_HANDLE) 2516 fp->hdr.type = BINDER_TYPE_BINDER; 2517 else 2518 fp->hdr.type = BINDER_TYPE_WEAK_BINDER; 2519 fp->binder = node->ptr; 2520 fp->cookie = node->cookie; 2521 if (node->proc) 2522 binder_inner_proc_lock(node->proc); 2523 else 2524 __acquire(&node->proc->inner_lock); 2525 binder_inc_node_nilocked(node, 2526 fp->hdr.type == BINDER_TYPE_BINDER, 2527 0, NULL); 2528 if (node->proc) 2529 binder_inner_proc_unlock(node->proc); 2530 else 2531 __release(&node->proc->inner_lock); 2532 trace_binder_transaction_ref_to_node(t, node, &src_rdata); 2533 binder_debug(BINDER_DEBUG_TRANSACTION, 2534 " ref %d desc %d -> node %d u%016llx\n", 2535 src_rdata.debug_id, src_rdata.desc, node->debug_id, 2536 (u64)node->ptr); 2537 binder_node_unlock(node); 2538 } else { 2539 struct binder_ref_data dest_rdata; 2540 2541 binder_node_unlock(node); 2542 ret = binder_inc_ref_for_node(target_proc, node, 2543 fp->hdr.type == BINDER_TYPE_HANDLE, 2544 NULL, &dest_rdata); 2545 if (ret) 2546 goto done; 2547 2548 fp->binder = 0; 2549 fp->handle = dest_rdata.desc; 2550 fp->cookie = 0; 2551 trace_binder_transaction_ref_to_ref(t, node, &src_rdata, 2552 &dest_rdata); 2553 binder_debug(BINDER_DEBUG_TRANSACTION, 2554 " ref %d desc %d -> ref %d desc %d (node %d)\n", 2555 src_rdata.debug_id, src_rdata.desc, 2556 dest_rdata.debug_id, dest_rdata.desc, 2557 node->debug_id); 2558 } 2559 done: 2560 binder_put_node(node); 2561 return ret; 2562 } 2563 2564 static int binder_translate_fd(u32 fd, binder_size_t fd_offset, 2565 struct binder_transaction *t, 2566 struct binder_thread *thread, 2567 struct binder_transaction *in_reply_to) 2568 { 2569 struct binder_proc *proc = thread->proc; 2570 struct binder_proc *target_proc = t->to_proc; 2571 struct binder_txn_fd_fixup *fixup; 2572 struct file *file; 2573 int ret = 0; 2574 bool target_allows_fd; 2575 2576 if (in_reply_to) 2577 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS); 2578 else 2579 target_allows_fd = t->buffer->target_node->accept_fds; 2580 if (!target_allows_fd) { 2581 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n", 2582 proc->pid, thread->pid, 2583 in_reply_to ? "reply" : "transaction", 2584 fd); 2585 ret = -EPERM; 2586 goto err_fd_not_accepted; 2587 } 2588 2589 file = fget(fd); 2590 if (!file) { 2591 binder_user_error("%d:%d got transaction with invalid fd, %d\n", 2592 proc->pid, thread->pid, fd); 2593 ret = -EBADF; 2594 goto err_fget; 2595 } 2596 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file); 2597 if (ret < 0) { 2598 ret = -EPERM; 2599 goto err_security; 2600 } 2601 2602 /* 2603 * Add fixup record for this transaction. The allocation 2604 * of the fd in the target needs to be done from a 2605 * target thread. 2606 */ 2607 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL); 2608 if (!fixup) { 2609 ret = -ENOMEM; 2610 goto err_alloc; 2611 } 2612 fixup->file = file; 2613 fixup->offset = fd_offset; 2614 trace_binder_transaction_fd_send(t, fd, fixup->offset); 2615 list_add_tail(&fixup->fixup_entry, &t->fd_fixups); 2616 2617 return ret; 2618 2619 err_alloc: 2620 err_security: 2621 fput(file); 2622 err_fget: 2623 err_fd_not_accepted: 2624 return ret; 2625 } 2626 2627 static int binder_translate_fd_array(struct binder_fd_array_object *fda, 2628 struct binder_buffer_object *parent, 2629 struct binder_transaction *t, 2630 struct binder_thread *thread, 2631 struct binder_transaction *in_reply_to) 2632 { 2633 binder_size_t fdi, fd_buf_size; 2634 binder_size_t fda_offset; 2635 struct binder_proc *proc = thread->proc; 2636 struct binder_proc *target_proc = t->to_proc; 2637 2638 fd_buf_size = sizeof(u32) * fda->num_fds; 2639 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { 2640 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n", 2641 proc->pid, thread->pid, (u64)fda->num_fds); 2642 return -EINVAL; 2643 } 2644 if (fd_buf_size > parent->length || 2645 fda->parent_offset > parent->length - fd_buf_size) { 2646 /* No space for all file descriptors here. */ 2647 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n", 2648 proc->pid, thread->pid, (u64)fda->num_fds); 2649 return -EINVAL; 2650 } 2651 /* 2652 * the source data for binder_buffer_object is visible 2653 * to user-space and the @buffer element is the user 2654 * pointer to the buffer_object containing the fd_array. 2655 * Convert the address to an offset relative to 2656 * the base of the transaction buffer. 2657 */ 2658 fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) + 2659 fda->parent_offset; 2660 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) { 2661 binder_user_error("%d:%d parent offset not aligned correctly.\n", 2662 proc->pid, thread->pid); 2663 return -EINVAL; 2664 } 2665 for (fdi = 0; fdi < fda->num_fds; fdi++) { 2666 u32 fd; 2667 int ret; 2668 binder_size_t offset = fda_offset + fdi * sizeof(fd); 2669 2670 ret = binder_alloc_copy_from_buffer(&target_proc->alloc, 2671 &fd, t->buffer, 2672 offset, sizeof(fd)); 2673 if (!ret) 2674 ret = binder_translate_fd(fd, offset, t, thread, 2675 in_reply_to); 2676 if (ret < 0) 2677 return ret; 2678 } 2679 return 0; 2680 } 2681 2682 static int binder_fixup_parent(struct binder_transaction *t, 2683 struct binder_thread *thread, 2684 struct binder_buffer_object *bp, 2685 binder_size_t off_start_offset, 2686 binder_size_t num_valid, 2687 binder_size_t last_fixup_obj_off, 2688 binder_size_t last_fixup_min_off) 2689 { 2690 struct binder_buffer_object *parent; 2691 struct binder_buffer *b = t->buffer; 2692 struct binder_proc *proc = thread->proc; 2693 struct binder_proc *target_proc = t->to_proc; 2694 struct binder_object object; 2695 binder_size_t buffer_offset; 2696 binder_size_t parent_offset; 2697 2698 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT)) 2699 return 0; 2700 2701 parent = binder_validate_ptr(target_proc, b, &object, bp->parent, 2702 off_start_offset, &parent_offset, 2703 num_valid); 2704 if (!parent) { 2705 binder_user_error("%d:%d got transaction with invalid parent offset or type\n", 2706 proc->pid, thread->pid); 2707 return -EINVAL; 2708 } 2709 2710 if (!binder_validate_fixup(target_proc, b, off_start_offset, 2711 parent_offset, bp->parent_offset, 2712 last_fixup_obj_off, 2713 last_fixup_min_off)) { 2714 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", 2715 proc->pid, thread->pid); 2716 return -EINVAL; 2717 } 2718 2719 if (parent->length < sizeof(binder_uintptr_t) || 2720 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) { 2721 /* No space for a pointer here! */ 2722 binder_user_error("%d:%d got transaction with invalid parent offset\n", 2723 proc->pid, thread->pid); 2724 return -EINVAL; 2725 } 2726 buffer_offset = bp->parent_offset + 2727 (uintptr_t)parent->buffer - (uintptr_t)b->user_data; 2728 if (binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset, 2729 &bp->buffer, sizeof(bp->buffer))) { 2730 binder_user_error("%d:%d got transaction with invalid parent offset\n", 2731 proc->pid, thread->pid); 2732 return -EINVAL; 2733 } 2734 2735 return 0; 2736 } 2737 2738 /** 2739 * binder_proc_transaction() - sends a transaction to a process and wakes it up 2740 * @t: transaction to send 2741 * @proc: process to send the transaction to 2742 * @thread: thread in @proc to send the transaction to (may be NULL) 2743 * 2744 * This function queues a transaction to the specified process. It will try 2745 * to find a thread in the target process to handle the transaction and 2746 * wake it up. If no thread is found, the work is queued to the proc 2747 * waitqueue. 2748 * 2749 * If the @thread parameter is not NULL, the transaction is always queued 2750 * to the waitlist of that specific thread. 2751 * 2752 * Return: true if the transactions was successfully queued 2753 * false if the target process or thread is dead 2754 */ 2755 static bool binder_proc_transaction(struct binder_transaction *t, 2756 struct binder_proc *proc, 2757 struct binder_thread *thread) 2758 { 2759 struct binder_node *node = t->buffer->target_node; 2760 bool oneway = !!(t->flags & TF_ONE_WAY); 2761 bool pending_async = false; 2762 2763 BUG_ON(!node); 2764 binder_node_lock(node); 2765 if (oneway) { 2766 BUG_ON(thread); 2767 if (node->has_async_transaction) { 2768 pending_async = true; 2769 } else { 2770 node->has_async_transaction = true; 2771 } 2772 } 2773 2774 binder_inner_proc_lock(proc); 2775 2776 if (proc->is_dead || (thread && thread->is_dead)) { 2777 binder_inner_proc_unlock(proc); 2778 binder_node_unlock(node); 2779 return false; 2780 } 2781 2782 if (!thread && !pending_async) 2783 thread = binder_select_thread_ilocked(proc); 2784 2785 if (thread) 2786 binder_enqueue_thread_work_ilocked(thread, &t->work); 2787 else if (!pending_async) 2788 binder_enqueue_work_ilocked(&t->work, &proc->todo); 2789 else 2790 binder_enqueue_work_ilocked(&t->work, &node->async_todo); 2791 2792 if (!pending_async) 2793 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */); 2794 2795 binder_inner_proc_unlock(proc); 2796 binder_node_unlock(node); 2797 2798 return true; 2799 } 2800 2801 /** 2802 * binder_get_node_refs_for_txn() - Get required refs on node for txn 2803 * @node: struct binder_node for which to get refs 2804 * @proc: returns @node->proc if valid 2805 * @error: if no @proc then returns BR_DEAD_REPLY 2806 * 2807 * User-space normally keeps the node alive when creating a transaction 2808 * since it has a reference to the target. The local strong ref keeps it 2809 * alive if the sending process dies before the target process processes 2810 * the transaction. If the source process is malicious or has a reference 2811 * counting bug, relying on the local strong ref can fail. 2812 * 2813 * Since user-space can cause the local strong ref to go away, we also take 2814 * a tmpref on the node to ensure it survives while we are constructing 2815 * the transaction. We also need a tmpref on the proc while we are 2816 * constructing the transaction, so we take that here as well. 2817 * 2818 * Return: The target_node with refs taken or NULL if no @node->proc is NULL. 2819 * Also sets @proc if valid. If the @node->proc is NULL indicating that the 2820 * target proc has died, @error is set to BR_DEAD_REPLY 2821 */ 2822 static struct binder_node *binder_get_node_refs_for_txn( 2823 struct binder_node *node, 2824 struct binder_proc **procp, 2825 uint32_t *error) 2826 { 2827 struct binder_node *target_node = NULL; 2828 2829 binder_node_inner_lock(node); 2830 if (node->proc) { 2831 target_node = node; 2832 binder_inc_node_nilocked(node, 1, 0, NULL); 2833 binder_inc_node_tmpref_ilocked(node); 2834 node->proc->tmp_ref++; 2835 *procp = node->proc; 2836 } else 2837 *error = BR_DEAD_REPLY; 2838 binder_node_inner_unlock(node); 2839 2840 return target_node; 2841 } 2842 2843 static void binder_transaction(struct binder_proc *proc, 2844 struct binder_thread *thread, 2845 struct binder_transaction_data *tr, int reply, 2846 binder_size_t extra_buffers_size) 2847 { 2848 int ret; 2849 struct binder_transaction *t; 2850 struct binder_work *w; 2851 struct binder_work *tcomplete; 2852 binder_size_t buffer_offset = 0; 2853 binder_size_t off_start_offset, off_end_offset; 2854 binder_size_t off_min; 2855 binder_size_t sg_buf_offset, sg_buf_end_offset; 2856 struct binder_proc *target_proc = NULL; 2857 struct binder_thread *target_thread = NULL; 2858 struct binder_node *target_node = NULL; 2859 struct binder_transaction *in_reply_to = NULL; 2860 struct binder_transaction_log_entry *e; 2861 uint32_t return_error = 0; 2862 uint32_t return_error_param = 0; 2863 uint32_t return_error_line = 0; 2864 binder_size_t last_fixup_obj_off = 0; 2865 binder_size_t last_fixup_min_off = 0; 2866 struct binder_context *context = proc->context; 2867 int t_debug_id = atomic_inc_return(&binder_last_id); 2868 char *secctx = NULL; 2869 u32 secctx_sz = 0; 2870 2871 e = binder_transaction_log_add(&binder_transaction_log); 2872 e->debug_id = t_debug_id; 2873 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); 2874 e->from_proc = proc->pid; 2875 e->from_thread = thread->pid; 2876 e->target_handle = tr->target.handle; 2877 e->data_size = tr->data_size; 2878 e->offsets_size = tr->offsets_size; 2879 e->context_name = proc->context->name; 2880 2881 if (reply) { 2882 binder_inner_proc_lock(proc); 2883 in_reply_to = thread->transaction_stack; 2884 if (in_reply_to == NULL) { 2885 binder_inner_proc_unlock(proc); 2886 binder_user_error("%d:%d got reply transaction with no transaction stack\n", 2887 proc->pid, thread->pid); 2888 return_error = BR_FAILED_REPLY; 2889 return_error_param = -EPROTO; 2890 return_error_line = __LINE__; 2891 goto err_empty_call_stack; 2892 } 2893 if (in_reply_to->to_thread != thread) { 2894 spin_lock(&in_reply_to->lock); 2895 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n", 2896 proc->pid, thread->pid, in_reply_to->debug_id, 2897 in_reply_to->to_proc ? 2898 in_reply_to->to_proc->pid : 0, 2899 in_reply_to->to_thread ? 2900 in_reply_to->to_thread->pid : 0); 2901 spin_unlock(&in_reply_to->lock); 2902 binder_inner_proc_unlock(proc); 2903 return_error = BR_FAILED_REPLY; 2904 return_error_param = -EPROTO; 2905 return_error_line = __LINE__; 2906 in_reply_to = NULL; 2907 goto err_bad_call_stack; 2908 } 2909 thread->transaction_stack = in_reply_to->to_parent; 2910 binder_inner_proc_unlock(proc); 2911 binder_set_nice(in_reply_to->saved_priority); 2912 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to); 2913 if (target_thread == NULL) { 2914 /* annotation for sparse */ 2915 __release(&target_thread->proc->inner_lock); 2916 return_error = BR_DEAD_REPLY; 2917 return_error_line = __LINE__; 2918 goto err_dead_binder; 2919 } 2920 if (target_thread->transaction_stack != in_reply_to) { 2921 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n", 2922 proc->pid, thread->pid, 2923 target_thread->transaction_stack ? 2924 target_thread->transaction_stack->debug_id : 0, 2925 in_reply_to->debug_id); 2926 binder_inner_proc_unlock(target_thread->proc); 2927 return_error = BR_FAILED_REPLY; 2928 return_error_param = -EPROTO; 2929 return_error_line = __LINE__; 2930 in_reply_to = NULL; 2931 target_thread = NULL; 2932 goto err_dead_binder; 2933 } 2934 target_proc = target_thread->proc; 2935 target_proc->tmp_ref++; 2936 binder_inner_proc_unlock(target_thread->proc); 2937 } else { 2938 if (tr->target.handle) { 2939 struct binder_ref *ref; 2940 2941 /* 2942 * There must already be a strong ref 2943 * on this node. If so, do a strong 2944 * increment on the node to ensure it 2945 * stays alive until the transaction is 2946 * done. 2947 */ 2948 binder_proc_lock(proc); 2949 ref = binder_get_ref_olocked(proc, tr->target.handle, 2950 true); 2951 if (ref) { 2952 target_node = binder_get_node_refs_for_txn( 2953 ref->node, &target_proc, 2954 &return_error); 2955 } else { 2956 binder_user_error("%d:%d got transaction to invalid handle\n", 2957 proc->pid, thread->pid); 2958 return_error = BR_FAILED_REPLY; 2959 } 2960 binder_proc_unlock(proc); 2961 } else { 2962 mutex_lock(&context->context_mgr_node_lock); 2963 target_node = context->binder_context_mgr_node; 2964 if (target_node) 2965 target_node = binder_get_node_refs_for_txn( 2966 target_node, &target_proc, 2967 &return_error); 2968 else 2969 return_error = BR_DEAD_REPLY; 2970 mutex_unlock(&context->context_mgr_node_lock); 2971 if (target_node && target_proc->pid == proc->pid) { 2972 binder_user_error("%d:%d got transaction to context manager from process owning it\n", 2973 proc->pid, thread->pid); 2974 return_error = BR_FAILED_REPLY; 2975 return_error_param = -EINVAL; 2976 return_error_line = __LINE__; 2977 goto err_invalid_target_handle; 2978 } 2979 } 2980 if (!target_node) { 2981 /* 2982 * return_error is set above 2983 */ 2984 return_error_param = -EINVAL; 2985 return_error_line = __LINE__; 2986 goto err_dead_binder; 2987 } 2988 e->to_node = target_node->debug_id; 2989 if (security_binder_transaction(proc->tsk, 2990 target_proc->tsk) < 0) { 2991 return_error = BR_FAILED_REPLY; 2992 return_error_param = -EPERM; 2993 return_error_line = __LINE__; 2994 goto err_invalid_target_handle; 2995 } 2996 binder_inner_proc_lock(proc); 2997 2998 w = list_first_entry_or_null(&thread->todo, 2999 struct binder_work, entry); 3000 if (!(tr->flags & TF_ONE_WAY) && w && 3001 w->type == BINDER_WORK_TRANSACTION) { 3002 /* 3003 * Do not allow new outgoing transaction from a 3004 * thread that has a transaction at the head of 3005 * its todo list. Only need to check the head 3006 * because binder_select_thread_ilocked picks a 3007 * thread from proc->waiting_threads to enqueue 3008 * the transaction, and nothing is queued to the 3009 * todo list while the thread is on waiting_threads. 3010 */ 3011 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n", 3012 proc->pid, thread->pid); 3013 binder_inner_proc_unlock(proc); 3014 return_error = BR_FAILED_REPLY; 3015 return_error_param = -EPROTO; 3016 return_error_line = __LINE__; 3017 goto err_bad_todo_list; 3018 } 3019 3020 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { 3021 struct binder_transaction *tmp; 3022 3023 tmp = thread->transaction_stack; 3024 if (tmp->to_thread != thread) { 3025 spin_lock(&tmp->lock); 3026 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n", 3027 proc->pid, thread->pid, tmp->debug_id, 3028 tmp->to_proc ? tmp->to_proc->pid : 0, 3029 tmp->to_thread ? 3030 tmp->to_thread->pid : 0); 3031 spin_unlock(&tmp->lock); 3032 binder_inner_proc_unlock(proc); 3033 return_error = BR_FAILED_REPLY; 3034 return_error_param = -EPROTO; 3035 return_error_line = __LINE__; 3036 goto err_bad_call_stack; 3037 } 3038 while (tmp) { 3039 struct binder_thread *from; 3040 3041 spin_lock(&tmp->lock); 3042 from = tmp->from; 3043 if (from && from->proc == target_proc) { 3044 atomic_inc(&from->tmp_ref); 3045 target_thread = from; 3046 spin_unlock(&tmp->lock); 3047 break; 3048 } 3049 spin_unlock(&tmp->lock); 3050 tmp = tmp->from_parent; 3051 } 3052 } 3053 binder_inner_proc_unlock(proc); 3054 } 3055 if (target_thread) 3056 e->to_thread = target_thread->pid; 3057 e->to_proc = target_proc->pid; 3058 3059 /* TODO: reuse incoming transaction for reply */ 3060 t = kzalloc(sizeof(*t), GFP_KERNEL); 3061 if (t == NULL) { 3062 return_error = BR_FAILED_REPLY; 3063 return_error_param = -ENOMEM; 3064 return_error_line = __LINE__; 3065 goto err_alloc_t_failed; 3066 } 3067 INIT_LIST_HEAD(&t->fd_fixups); 3068 binder_stats_created(BINDER_STAT_TRANSACTION); 3069 spin_lock_init(&t->lock); 3070 3071 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); 3072 if (tcomplete == NULL) { 3073 return_error = BR_FAILED_REPLY; 3074 return_error_param = -ENOMEM; 3075 return_error_line = __LINE__; 3076 goto err_alloc_tcomplete_failed; 3077 } 3078 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE); 3079 3080 t->debug_id = t_debug_id; 3081 3082 if (reply) 3083 binder_debug(BINDER_DEBUG_TRANSACTION, 3084 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n", 3085 proc->pid, thread->pid, t->debug_id, 3086 target_proc->pid, target_thread->pid, 3087 (u64)tr->data.ptr.buffer, 3088 (u64)tr->data.ptr.offsets, 3089 (u64)tr->data_size, (u64)tr->offsets_size, 3090 (u64)extra_buffers_size); 3091 else 3092 binder_debug(BINDER_DEBUG_TRANSACTION, 3093 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n", 3094 proc->pid, thread->pid, t->debug_id, 3095 target_proc->pid, target_node->debug_id, 3096 (u64)tr->data.ptr.buffer, 3097 (u64)tr->data.ptr.offsets, 3098 (u64)tr->data_size, (u64)tr->offsets_size, 3099 (u64)extra_buffers_size); 3100 3101 if (!reply && !(tr->flags & TF_ONE_WAY)) 3102 t->from = thread; 3103 else 3104 t->from = NULL; 3105 t->sender_euid = task_euid(proc->tsk); 3106 t->to_proc = target_proc; 3107 t->to_thread = target_thread; 3108 t->code = tr->code; 3109 t->flags = tr->flags; 3110 t->priority = task_nice(current); 3111 3112 if (target_node && target_node->txn_security_ctx) { 3113 u32 secid; 3114 size_t added_size; 3115 3116 security_task_getsecid(proc->tsk, &secid); 3117 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz); 3118 if (ret) { 3119 return_error = BR_FAILED_REPLY; 3120 return_error_param = ret; 3121 return_error_line = __LINE__; 3122 goto err_get_secctx_failed; 3123 } 3124 added_size = ALIGN(secctx_sz, sizeof(u64)); 3125 extra_buffers_size += added_size; 3126 if (extra_buffers_size < added_size) { 3127 /* integer overflow of extra_buffers_size */ 3128 return_error = BR_FAILED_REPLY; 3129 return_error_param = EINVAL; 3130 return_error_line = __LINE__; 3131 goto err_bad_extra_size; 3132 } 3133 } 3134 3135 trace_binder_transaction(reply, t, target_node); 3136 3137 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size, 3138 tr->offsets_size, extra_buffers_size, 3139 !reply && (t->flags & TF_ONE_WAY)); 3140 if (IS_ERR(t->buffer)) { 3141 /* 3142 * -ESRCH indicates VMA cleared. The target is dying. 3143 */ 3144 return_error_param = PTR_ERR(t->buffer); 3145 return_error = return_error_param == -ESRCH ? 3146 BR_DEAD_REPLY : BR_FAILED_REPLY; 3147 return_error_line = __LINE__; 3148 t->buffer = NULL; 3149 goto err_binder_alloc_buf_failed; 3150 } 3151 if (secctx) { 3152 int err; 3153 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) + 3154 ALIGN(tr->offsets_size, sizeof(void *)) + 3155 ALIGN(extra_buffers_size, sizeof(void *)) - 3156 ALIGN(secctx_sz, sizeof(u64)); 3157 3158 t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset; 3159 err = binder_alloc_copy_to_buffer(&target_proc->alloc, 3160 t->buffer, buf_offset, 3161 secctx, secctx_sz); 3162 if (err) { 3163 t->security_ctx = 0; 3164 WARN_ON(1); 3165 } 3166 security_release_secctx(secctx, secctx_sz); 3167 secctx = NULL; 3168 } 3169 t->buffer->debug_id = t->debug_id; 3170 t->buffer->transaction = t; 3171 t->buffer->target_node = target_node; 3172 trace_binder_transaction_alloc_buf(t->buffer); 3173 3174 if (binder_alloc_copy_user_to_buffer( 3175 &target_proc->alloc, 3176 t->buffer, 0, 3177 (const void __user *) 3178 (uintptr_t)tr->data.ptr.buffer, 3179 tr->data_size)) { 3180 binder_user_error("%d:%d got transaction with invalid data ptr\n", 3181 proc->pid, thread->pid); 3182 return_error = BR_FAILED_REPLY; 3183 return_error_param = -EFAULT; 3184 return_error_line = __LINE__; 3185 goto err_copy_data_failed; 3186 } 3187 if (binder_alloc_copy_user_to_buffer( 3188 &target_proc->alloc, 3189 t->buffer, 3190 ALIGN(tr->data_size, sizeof(void *)), 3191 (const void __user *) 3192 (uintptr_t)tr->data.ptr.offsets, 3193 tr->offsets_size)) { 3194 binder_user_error("%d:%d got transaction with invalid offsets ptr\n", 3195 proc->pid, thread->pid); 3196 return_error = BR_FAILED_REPLY; 3197 return_error_param = -EFAULT; 3198 return_error_line = __LINE__; 3199 goto err_copy_data_failed; 3200 } 3201 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) { 3202 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n", 3203 proc->pid, thread->pid, (u64)tr->offsets_size); 3204 return_error = BR_FAILED_REPLY; 3205 return_error_param = -EINVAL; 3206 return_error_line = __LINE__; 3207 goto err_bad_offset; 3208 } 3209 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) { 3210 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n", 3211 proc->pid, thread->pid, 3212 (u64)extra_buffers_size); 3213 return_error = BR_FAILED_REPLY; 3214 return_error_param = -EINVAL; 3215 return_error_line = __LINE__; 3216 goto err_bad_offset; 3217 } 3218 off_start_offset = ALIGN(tr->data_size, sizeof(void *)); 3219 buffer_offset = off_start_offset; 3220 off_end_offset = off_start_offset + tr->offsets_size; 3221 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *)); 3222 sg_buf_end_offset = sg_buf_offset + extra_buffers_size - 3223 ALIGN(secctx_sz, sizeof(u64)); 3224 off_min = 0; 3225 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset; 3226 buffer_offset += sizeof(binder_size_t)) { 3227 struct binder_object_header *hdr; 3228 size_t object_size; 3229 struct binder_object object; 3230 binder_size_t object_offset; 3231 3232 if (binder_alloc_copy_from_buffer(&target_proc->alloc, 3233 &object_offset, 3234 t->buffer, 3235 buffer_offset, 3236 sizeof(object_offset))) { 3237 return_error = BR_FAILED_REPLY; 3238 return_error_param = -EINVAL; 3239 return_error_line = __LINE__; 3240 goto err_bad_offset; 3241 } 3242 object_size = binder_get_object(target_proc, t->buffer, 3243 object_offset, &object); 3244 if (object_size == 0 || object_offset < off_min) { 3245 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n", 3246 proc->pid, thread->pid, 3247 (u64)object_offset, 3248 (u64)off_min, 3249 (u64)t->buffer->data_size); 3250 return_error = BR_FAILED_REPLY; 3251 return_error_param = -EINVAL; 3252 return_error_line = __LINE__; 3253 goto err_bad_offset; 3254 } 3255 3256 hdr = &object.hdr; 3257 off_min = object_offset + object_size; 3258 switch (hdr->type) { 3259 case BINDER_TYPE_BINDER: 3260 case BINDER_TYPE_WEAK_BINDER: { 3261 struct flat_binder_object *fp; 3262 3263 fp = to_flat_binder_object(hdr); 3264 ret = binder_translate_binder(fp, t, thread); 3265 3266 if (ret < 0 || 3267 binder_alloc_copy_to_buffer(&target_proc->alloc, 3268 t->buffer, 3269 object_offset, 3270 fp, sizeof(*fp))) { 3271 return_error = BR_FAILED_REPLY; 3272 return_error_param = ret; 3273 return_error_line = __LINE__; 3274 goto err_translate_failed; 3275 } 3276 } break; 3277 case BINDER_TYPE_HANDLE: 3278 case BINDER_TYPE_WEAK_HANDLE: { 3279 struct flat_binder_object *fp; 3280 3281 fp = to_flat_binder_object(hdr); 3282 ret = binder_translate_handle(fp, t, thread); 3283 if (ret < 0 || 3284 binder_alloc_copy_to_buffer(&target_proc->alloc, 3285 t->buffer, 3286 object_offset, 3287 fp, sizeof(*fp))) { 3288 return_error = BR_FAILED_REPLY; 3289 return_error_param = ret; 3290 return_error_line = __LINE__; 3291 goto err_translate_failed; 3292 } 3293 } break; 3294 3295 case BINDER_TYPE_FD: { 3296 struct binder_fd_object *fp = to_binder_fd_object(hdr); 3297 binder_size_t fd_offset = object_offset + 3298 (uintptr_t)&fp->fd - (uintptr_t)fp; 3299 int ret = binder_translate_fd(fp->fd, fd_offset, t, 3300 thread, in_reply_to); 3301 3302 fp->pad_binder = 0; 3303 if (ret < 0 || 3304 binder_alloc_copy_to_buffer(&target_proc->alloc, 3305 t->buffer, 3306 object_offset, 3307 fp, sizeof(*fp))) { 3308 return_error = BR_FAILED_REPLY; 3309 return_error_param = ret; 3310 return_error_line = __LINE__; 3311 goto err_translate_failed; 3312 } 3313 } break; 3314 case BINDER_TYPE_FDA: { 3315 struct binder_object ptr_object; 3316 binder_size_t parent_offset; 3317 struct binder_fd_array_object *fda = 3318 to_binder_fd_array_object(hdr); 3319 size_t num_valid = (buffer_offset - off_start_offset) * 3320 sizeof(binder_size_t); 3321 struct binder_buffer_object *parent = 3322 binder_validate_ptr(target_proc, t->buffer, 3323 &ptr_object, fda->parent, 3324 off_start_offset, 3325 &parent_offset, 3326 num_valid); 3327 if (!parent) { 3328 binder_user_error("%d:%d got transaction with invalid parent offset or type\n", 3329 proc->pid, thread->pid); 3330 return_error = BR_FAILED_REPLY; 3331 return_error_param = -EINVAL; 3332 return_error_line = __LINE__; 3333 goto err_bad_parent; 3334 } 3335 if (!binder_validate_fixup(target_proc, t->buffer, 3336 off_start_offset, 3337 parent_offset, 3338 fda->parent_offset, 3339 last_fixup_obj_off, 3340 last_fixup_min_off)) { 3341 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", 3342 proc->pid, thread->pid); 3343 return_error = BR_FAILED_REPLY; 3344 return_error_param = -EINVAL; 3345 return_error_line = __LINE__; 3346 goto err_bad_parent; 3347 } 3348 ret = binder_translate_fd_array(fda, parent, t, thread, 3349 in_reply_to); 3350 if (ret < 0) { 3351 return_error = BR_FAILED_REPLY; 3352 return_error_param = ret; 3353 return_error_line = __LINE__; 3354 goto err_translate_failed; 3355 } 3356 last_fixup_obj_off = parent_offset; 3357 last_fixup_min_off = 3358 fda->parent_offset + sizeof(u32) * fda->num_fds; 3359 } break; 3360 case BINDER_TYPE_PTR: { 3361 struct binder_buffer_object *bp = 3362 to_binder_buffer_object(hdr); 3363 size_t buf_left = sg_buf_end_offset - sg_buf_offset; 3364 size_t num_valid; 3365 3366 if (bp->length > buf_left) { 3367 binder_user_error("%d:%d got transaction with too large buffer\n", 3368 proc->pid, thread->pid); 3369 return_error = BR_FAILED_REPLY; 3370 return_error_param = -EINVAL; 3371 return_error_line = __LINE__; 3372 goto err_bad_offset; 3373 } 3374 if (binder_alloc_copy_user_to_buffer( 3375 &target_proc->alloc, 3376 t->buffer, 3377 sg_buf_offset, 3378 (const void __user *) 3379 (uintptr_t)bp->buffer, 3380 bp->length)) { 3381 binder_user_error("%d:%d got transaction with invalid offsets ptr\n", 3382 proc->pid, thread->pid); 3383 return_error_param = -EFAULT; 3384 return_error = BR_FAILED_REPLY; 3385 return_error_line = __LINE__; 3386 goto err_copy_data_failed; 3387 } 3388 /* Fixup buffer pointer to target proc address space */ 3389 bp->buffer = (uintptr_t) 3390 t->buffer->user_data + sg_buf_offset; 3391 sg_buf_offset += ALIGN(bp->length, sizeof(u64)); 3392 3393 num_valid = (buffer_offset - off_start_offset) * 3394 sizeof(binder_size_t); 3395 ret = binder_fixup_parent(t, thread, bp, 3396 off_start_offset, 3397 num_valid, 3398 last_fixup_obj_off, 3399 last_fixup_min_off); 3400 if (ret < 0 || 3401 binder_alloc_copy_to_buffer(&target_proc->alloc, 3402 t->buffer, 3403 object_offset, 3404 bp, sizeof(*bp))) { 3405 return_error = BR_FAILED_REPLY; 3406 return_error_param = ret; 3407 return_error_line = __LINE__; 3408 goto err_translate_failed; 3409 } 3410 last_fixup_obj_off = object_offset; 3411 last_fixup_min_off = 0; 3412 } break; 3413 default: 3414 binder_user_error("%d:%d got transaction with invalid object type, %x\n", 3415 proc->pid, thread->pid, hdr->type); 3416 return_error = BR_FAILED_REPLY; 3417 return_error_param = -EINVAL; 3418 return_error_line = __LINE__; 3419 goto err_bad_object_type; 3420 } 3421 } 3422 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; 3423 t->work.type = BINDER_WORK_TRANSACTION; 3424 3425 if (reply) { 3426 binder_enqueue_thread_work(thread, tcomplete); 3427 binder_inner_proc_lock(target_proc); 3428 if (target_thread->is_dead) { 3429 binder_inner_proc_unlock(target_proc); 3430 goto err_dead_proc_or_thread; 3431 } 3432 BUG_ON(t->buffer->async_transaction != 0); 3433 binder_pop_transaction_ilocked(target_thread, in_reply_to); 3434 binder_enqueue_thread_work_ilocked(target_thread, &t->work); 3435 binder_inner_proc_unlock(target_proc); 3436 wake_up_interruptible_sync(&target_thread->wait); 3437 binder_free_transaction(in_reply_to); 3438 } else if (!(t->flags & TF_ONE_WAY)) { 3439 BUG_ON(t->buffer->async_transaction != 0); 3440 binder_inner_proc_lock(proc); 3441 /* 3442 * Defer the TRANSACTION_COMPLETE, so we don't return to 3443 * userspace immediately; this allows the target process to 3444 * immediately start processing this transaction, reducing 3445 * latency. We will then return the TRANSACTION_COMPLETE when 3446 * the target replies (or there is an error). 3447 */ 3448 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete); 3449 t->need_reply = 1; 3450 t->from_parent = thread->transaction_stack; 3451 thread->transaction_stack = t; 3452 binder_inner_proc_unlock(proc); 3453 if (!binder_proc_transaction(t, target_proc, target_thread)) { 3454 binder_inner_proc_lock(proc); 3455 binder_pop_transaction_ilocked(thread, t); 3456 binder_inner_proc_unlock(proc); 3457 goto err_dead_proc_or_thread; 3458 } 3459 } else { 3460 BUG_ON(target_node == NULL); 3461 BUG_ON(t->buffer->async_transaction != 1); 3462 binder_enqueue_thread_work(thread, tcomplete); 3463 if (!binder_proc_transaction(t, target_proc, NULL)) 3464 goto err_dead_proc_or_thread; 3465 } 3466 if (target_thread) 3467 binder_thread_dec_tmpref(target_thread); 3468 binder_proc_dec_tmpref(target_proc); 3469 if (target_node) 3470 binder_dec_node_tmpref(target_node); 3471 /* 3472 * write barrier to synchronize with initialization 3473 * of log entry 3474 */ 3475 smp_wmb(); 3476 WRITE_ONCE(e->debug_id_done, t_debug_id); 3477 return; 3478 3479 err_dead_proc_or_thread: 3480 return_error = BR_DEAD_REPLY; 3481 return_error_line = __LINE__; 3482 binder_dequeue_work(proc, tcomplete); 3483 err_translate_failed: 3484 err_bad_object_type: 3485 err_bad_offset: 3486 err_bad_parent: 3487 err_copy_data_failed: 3488 binder_free_txn_fixups(t); 3489 trace_binder_transaction_failed_buffer_release(t->buffer); 3490 binder_transaction_buffer_release(target_proc, t->buffer, 3491 buffer_offset, true); 3492 if (target_node) 3493 binder_dec_node_tmpref(target_node); 3494 target_node = NULL; 3495 t->buffer->transaction = NULL; 3496 binder_alloc_free_buf(&target_proc->alloc, t->buffer); 3497 err_binder_alloc_buf_failed: 3498 err_bad_extra_size: 3499 if (secctx) 3500 security_release_secctx(secctx, secctx_sz); 3501 err_get_secctx_failed: 3502 kfree(tcomplete); 3503 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 3504 err_alloc_tcomplete_failed: 3505 kfree(t); 3506 binder_stats_deleted(BINDER_STAT_TRANSACTION); 3507 err_alloc_t_failed: 3508 err_bad_todo_list: 3509 err_bad_call_stack: 3510 err_empty_call_stack: 3511 err_dead_binder: 3512 err_invalid_target_handle: 3513 if (target_thread) 3514 binder_thread_dec_tmpref(target_thread); 3515 if (target_proc) 3516 binder_proc_dec_tmpref(target_proc); 3517 if (target_node) { 3518 binder_dec_node(target_node, 1, 0); 3519 binder_dec_node_tmpref(target_node); 3520 } 3521 3522 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 3523 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n", 3524 proc->pid, thread->pid, return_error, return_error_param, 3525 (u64)tr->data_size, (u64)tr->offsets_size, 3526 return_error_line); 3527 3528 { 3529 struct binder_transaction_log_entry *fe; 3530 3531 e->return_error = return_error; 3532 e->return_error_param = return_error_param; 3533 e->return_error_line = return_error_line; 3534 fe = binder_transaction_log_add(&binder_transaction_log_failed); 3535 *fe = *e; 3536 /* 3537 * write barrier to synchronize with initialization 3538 * of log entry 3539 */ 3540 smp_wmb(); 3541 WRITE_ONCE(e->debug_id_done, t_debug_id); 3542 WRITE_ONCE(fe->debug_id_done, t_debug_id); 3543 } 3544 3545 BUG_ON(thread->return_error.cmd != BR_OK); 3546 if (in_reply_to) { 3547 thread->return_error.cmd = BR_TRANSACTION_COMPLETE; 3548 binder_enqueue_thread_work(thread, &thread->return_error.work); 3549 binder_send_failed_reply(in_reply_to, return_error); 3550 } else { 3551 thread->return_error.cmd = return_error; 3552 binder_enqueue_thread_work(thread, &thread->return_error.work); 3553 } 3554 } 3555 3556 /** 3557 * binder_free_buf() - free the specified buffer 3558 * @proc: binder proc that owns buffer 3559 * @buffer: buffer to be freed 3560 * 3561 * If buffer for an async transaction, enqueue the next async 3562 * transaction from the node. 3563 * 3564 * Cleanup buffer and free it. 3565 */ 3566 static void 3567 binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer) 3568 { 3569 binder_inner_proc_lock(proc); 3570 if (buffer->transaction) { 3571 buffer->transaction->buffer = NULL; 3572 buffer->transaction = NULL; 3573 } 3574 binder_inner_proc_unlock(proc); 3575 if (buffer->async_transaction && buffer->target_node) { 3576 struct binder_node *buf_node; 3577 struct binder_work *w; 3578 3579 buf_node = buffer->target_node; 3580 binder_node_inner_lock(buf_node); 3581 BUG_ON(!buf_node->has_async_transaction); 3582 BUG_ON(buf_node->proc != proc); 3583 w = binder_dequeue_work_head_ilocked( 3584 &buf_node->async_todo); 3585 if (!w) { 3586 buf_node->has_async_transaction = false; 3587 } else { 3588 binder_enqueue_work_ilocked( 3589 w, &proc->todo); 3590 binder_wakeup_proc_ilocked(proc); 3591 } 3592 binder_node_inner_unlock(buf_node); 3593 } 3594 trace_binder_transaction_buffer_release(buffer); 3595 binder_transaction_buffer_release(proc, buffer, 0, false); 3596 binder_alloc_free_buf(&proc->alloc, buffer); 3597 } 3598 3599 static int binder_thread_write(struct binder_proc *proc, 3600 struct binder_thread *thread, 3601 binder_uintptr_t binder_buffer, size_t size, 3602 binder_size_t *consumed) 3603 { 3604 uint32_t cmd; 3605 struct binder_context *context = proc->context; 3606 void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 3607 void __user *ptr = buffer + *consumed; 3608 void __user *end = buffer + size; 3609 3610 while (ptr < end && thread->return_error.cmd == BR_OK) { 3611 int ret; 3612 3613 if (get_user(cmd, (uint32_t __user *)ptr)) 3614 return -EFAULT; 3615 ptr += sizeof(uint32_t); 3616 trace_binder_command(cmd); 3617 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { 3618 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]); 3619 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]); 3620 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]); 3621 } 3622 switch (cmd) { 3623 case BC_INCREFS: 3624 case BC_ACQUIRE: 3625 case BC_RELEASE: 3626 case BC_DECREFS: { 3627 uint32_t target; 3628 const char *debug_string; 3629 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE; 3630 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE; 3631 struct binder_ref_data rdata; 3632 3633 if (get_user(target, (uint32_t __user *)ptr)) 3634 return -EFAULT; 3635 3636 ptr += sizeof(uint32_t); 3637 ret = -1; 3638 if (increment && !target) { 3639 struct binder_node *ctx_mgr_node; 3640 mutex_lock(&context->context_mgr_node_lock); 3641 ctx_mgr_node = context->binder_context_mgr_node; 3642 if (ctx_mgr_node) 3643 ret = binder_inc_ref_for_node( 3644 proc, ctx_mgr_node, 3645 strong, NULL, &rdata); 3646 mutex_unlock(&context->context_mgr_node_lock); 3647 } 3648 if (ret) 3649 ret = binder_update_ref_for_handle( 3650 proc, target, increment, strong, 3651 &rdata); 3652 if (!ret && rdata.desc != target) { 3653 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n", 3654 proc->pid, thread->pid, 3655 target, rdata.desc); 3656 } 3657 switch (cmd) { 3658 case BC_INCREFS: 3659 debug_string = "IncRefs"; 3660 break; 3661 case BC_ACQUIRE: 3662 debug_string = "Acquire"; 3663 break; 3664 case BC_RELEASE: 3665 debug_string = "Release"; 3666 break; 3667 case BC_DECREFS: 3668 default: 3669 debug_string = "DecRefs"; 3670 break; 3671 } 3672 if (ret) { 3673 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n", 3674 proc->pid, thread->pid, debug_string, 3675 strong, target, ret); 3676 break; 3677 } 3678 binder_debug(BINDER_DEBUG_USER_REFS, 3679 "%d:%d %s ref %d desc %d s %d w %d\n", 3680 proc->pid, thread->pid, debug_string, 3681 rdata.debug_id, rdata.desc, rdata.strong, 3682 rdata.weak); 3683 break; 3684 } 3685 case BC_INCREFS_DONE: 3686 case BC_ACQUIRE_DONE: { 3687 binder_uintptr_t node_ptr; 3688 binder_uintptr_t cookie; 3689 struct binder_node *node; 3690 bool free_node; 3691 3692 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr)) 3693 return -EFAULT; 3694 ptr += sizeof(binder_uintptr_t); 3695 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 3696 return -EFAULT; 3697 ptr += sizeof(binder_uintptr_t); 3698 node = binder_get_node(proc, node_ptr); 3699 if (node == NULL) { 3700 binder_user_error("%d:%d %s u%016llx no match\n", 3701 proc->pid, thread->pid, 3702 cmd == BC_INCREFS_DONE ? 3703 "BC_INCREFS_DONE" : 3704 "BC_ACQUIRE_DONE", 3705 (u64)node_ptr); 3706 break; 3707 } 3708 if (cookie != node->cookie) { 3709 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n", 3710 proc->pid, thread->pid, 3711 cmd == BC_INCREFS_DONE ? 3712 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 3713 (u64)node_ptr, node->debug_id, 3714 (u64)cookie, (u64)node->cookie); 3715 binder_put_node(node); 3716 break; 3717 } 3718 binder_node_inner_lock(node); 3719 if (cmd == BC_ACQUIRE_DONE) { 3720 if (node->pending_strong_ref == 0) { 3721 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n", 3722 proc->pid, thread->pid, 3723 node->debug_id); 3724 binder_node_inner_unlock(node); 3725 binder_put_node(node); 3726 break; 3727 } 3728 node->pending_strong_ref = 0; 3729 } else { 3730 if (node->pending_weak_ref == 0) { 3731 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n", 3732 proc->pid, thread->pid, 3733 node->debug_id); 3734 binder_node_inner_unlock(node); 3735 binder_put_node(node); 3736 break; 3737 } 3738 node->pending_weak_ref = 0; 3739 } 3740 free_node = binder_dec_node_nilocked(node, 3741 cmd == BC_ACQUIRE_DONE, 0); 3742 WARN_ON(free_node); 3743 binder_debug(BINDER_DEBUG_USER_REFS, 3744 "%d:%d %s node %d ls %d lw %d tr %d\n", 3745 proc->pid, thread->pid, 3746 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 3747 node->debug_id, node->local_strong_refs, 3748 node->local_weak_refs, node->tmp_refs); 3749 binder_node_inner_unlock(node); 3750 binder_put_node(node); 3751 break; 3752 } 3753 case BC_ATTEMPT_ACQUIRE: 3754 pr_err("BC_ATTEMPT_ACQUIRE not supported\n"); 3755 return -EINVAL; 3756 case BC_ACQUIRE_RESULT: 3757 pr_err("BC_ACQUIRE_RESULT not supported\n"); 3758 return -EINVAL; 3759 3760 case BC_FREE_BUFFER: { 3761 binder_uintptr_t data_ptr; 3762 struct binder_buffer *buffer; 3763 3764 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr)) 3765 return -EFAULT; 3766 ptr += sizeof(binder_uintptr_t); 3767 3768 buffer = binder_alloc_prepare_to_free(&proc->alloc, 3769 data_ptr); 3770 if (IS_ERR_OR_NULL(buffer)) { 3771 if (PTR_ERR(buffer) == -EPERM) { 3772 binder_user_error( 3773 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n", 3774 proc->pid, thread->pid, 3775 (u64)data_ptr); 3776 } else { 3777 binder_user_error( 3778 "%d:%d BC_FREE_BUFFER u%016llx no match\n", 3779 proc->pid, thread->pid, 3780 (u64)data_ptr); 3781 } 3782 break; 3783 } 3784 binder_debug(BINDER_DEBUG_FREE_BUFFER, 3785 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n", 3786 proc->pid, thread->pid, (u64)data_ptr, 3787 buffer->debug_id, 3788 buffer->transaction ? "active" : "finished"); 3789 binder_free_buf(proc, buffer); 3790 break; 3791 } 3792 3793 case BC_TRANSACTION_SG: 3794 case BC_REPLY_SG: { 3795 struct binder_transaction_data_sg tr; 3796 3797 if (copy_from_user(&tr, ptr, sizeof(tr))) 3798 return -EFAULT; 3799 ptr += sizeof(tr); 3800 binder_transaction(proc, thread, &tr.transaction_data, 3801 cmd == BC_REPLY_SG, tr.buffers_size); 3802 break; 3803 } 3804 case BC_TRANSACTION: 3805 case BC_REPLY: { 3806 struct binder_transaction_data tr; 3807 3808 if (copy_from_user(&tr, ptr, sizeof(tr))) 3809 return -EFAULT; 3810 ptr += sizeof(tr); 3811 binder_transaction(proc, thread, &tr, 3812 cmd == BC_REPLY, 0); 3813 break; 3814 } 3815 3816 case BC_REGISTER_LOOPER: 3817 binder_debug(BINDER_DEBUG_THREADS, 3818 "%d:%d BC_REGISTER_LOOPER\n", 3819 proc->pid, thread->pid); 3820 binder_inner_proc_lock(proc); 3821 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { 3822 thread->looper |= BINDER_LOOPER_STATE_INVALID; 3823 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n", 3824 proc->pid, thread->pid); 3825 } else if (proc->requested_threads == 0) { 3826 thread->looper |= BINDER_LOOPER_STATE_INVALID; 3827 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n", 3828 proc->pid, thread->pid); 3829 } else { 3830 proc->requested_threads--; 3831 proc->requested_threads_started++; 3832 } 3833 thread->looper |= BINDER_LOOPER_STATE_REGISTERED; 3834 binder_inner_proc_unlock(proc); 3835 break; 3836 case BC_ENTER_LOOPER: 3837 binder_debug(BINDER_DEBUG_THREADS, 3838 "%d:%d BC_ENTER_LOOPER\n", 3839 proc->pid, thread->pid); 3840 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { 3841 thread->looper |= BINDER_LOOPER_STATE_INVALID; 3842 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n", 3843 proc->pid, thread->pid); 3844 } 3845 thread->looper |= BINDER_LOOPER_STATE_ENTERED; 3846 break; 3847 case BC_EXIT_LOOPER: 3848 binder_debug(BINDER_DEBUG_THREADS, 3849 "%d:%d BC_EXIT_LOOPER\n", 3850 proc->pid, thread->pid); 3851 thread->looper |= BINDER_LOOPER_STATE_EXITED; 3852 break; 3853 3854 case BC_REQUEST_DEATH_NOTIFICATION: 3855 case BC_CLEAR_DEATH_NOTIFICATION: { 3856 uint32_t target; 3857 binder_uintptr_t cookie; 3858 struct binder_ref *ref; 3859 struct binder_ref_death *death = NULL; 3860 3861 if (get_user(target, (uint32_t __user *)ptr)) 3862 return -EFAULT; 3863 ptr += sizeof(uint32_t); 3864 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 3865 return -EFAULT; 3866 ptr += sizeof(binder_uintptr_t); 3867 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { 3868 /* 3869 * Allocate memory for death notification 3870 * before taking lock 3871 */ 3872 death = kzalloc(sizeof(*death), GFP_KERNEL); 3873 if (death == NULL) { 3874 WARN_ON(thread->return_error.cmd != 3875 BR_OK); 3876 thread->return_error.cmd = BR_ERROR; 3877 binder_enqueue_thread_work( 3878 thread, 3879 &thread->return_error.work); 3880 binder_debug( 3881 BINDER_DEBUG_FAILED_TRANSACTION, 3882 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n", 3883 proc->pid, thread->pid); 3884 break; 3885 } 3886 } 3887 binder_proc_lock(proc); 3888 ref = binder_get_ref_olocked(proc, target, false); 3889 if (ref == NULL) { 3890 binder_user_error("%d:%d %s invalid ref %d\n", 3891 proc->pid, thread->pid, 3892 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 3893 "BC_REQUEST_DEATH_NOTIFICATION" : 3894 "BC_CLEAR_DEATH_NOTIFICATION", 3895 target); 3896 binder_proc_unlock(proc); 3897 kfree(death); 3898 break; 3899 } 3900 3901 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 3902 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n", 3903 proc->pid, thread->pid, 3904 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 3905 "BC_REQUEST_DEATH_NOTIFICATION" : 3906 "BC_CLEAR_DEATH_NOTIFICATION", 3907 (u64)cookie, ref->data.debug_id, 3908 ref->data.desc, ref->data.strong, 3909 ref->data.weak, ref->node->debug_id); 3910 3911 binder_node_lock(ref->node); 3912 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { 3913 if (ref->death) { 3914 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n", 3915 proc->pid, thread->pid); 3916 binder_node_unlock(ref->node); 3917 binder_proc_unlock(proc); 3918 kfree(death); 3919 break; 3920 } 3921 binder_stats_created(BINDER_STAT_DEATH); 3922 INIT_LIST_HEAD(&death->work.entry); 3923 death->cookie = cookie; 3924 ref->death = death; 3925 if (ref->node->proc == NULL) { 3926 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 3927 3928 binder_inner_proc_lock(proc); 3929 binder_enqueue_work_ilocked( 3930 &ref->death->work, &proc->todo); 3931 binder_wakeup_proc_ilocked(proc); 3932 binder_inner_proc_unlock(proc); 3933 } 3934 } else { 3935 if (ref->death == NULL) { 3936 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n", 3937 proc->pid, thread->pid); 3938 binder_node_unlock(ref->node); 3939 binder_proc_unlock(proc); 3940 break; 3941 } 3942 death = ref->death; 3943 if (death->cookie != cookie) { 3944 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n", 3945 proc->pid, thread->pid, 3946 (u64)death->cookie, 3947 (u64)cookie); 3948 binder_node_unlock(ref->node); 3949 binder_proc_unlock(proc); 3950 break; 3951 } 3952 ref->death = NULL; 3953 binder_inner_proc_lock(proc); 3954 if (list_empty(&death->work.entry)) { 3955 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 3956 if (thread->looper & 3957 (BINDER_LOOPER_STATE_REGISTERED | 3958 BINDER_LOOPER_STATE_ENTERED)) 3959 binder_enqueue_thread_work_ilocked( 3960 thread, 3961 &death->work); 3962 else { 3963 binder_enqueue_work_ilocked( 3964 &death->work, 3965 &proc->todo); 3966 binder_wakeup_proc_ilocked( 3967 proc); 3968 } 3969 } else { 3970 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); 3971 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; 3972 } 3973 binder_inner_proc_unlock(proc); 3974 } 3975 binder_node_unlock(ref->node); 3976 binder_proc_unlock(proc); 3977 } break; 3978 case BC_DEAD_BINDER_DONE: { 3979 struct binder_work *w; 3980 binder_uintptr_t cookie; 3981 struct binder_ref_death *death = NULL; 3982 3983 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 3984 return -EFAULT; 3985 3986 ptr += sizeof(cookie); 3987 binder_inner_proc_lock(proc); 3988 list_for_each_entry(w, &proc->delivered_death, 3989 entry) { 3990 struct binder_ref_death *tmp_death = 3991 container_of(w, 3992 struct binder_ref_death, 3993 work); 3994 3995 if (tmp_death->cookie == cookie) { 3996 death = tmp_death; 3997 break; 3998 } 3999 } 4000 binder_debug(BINDER_DEBUG_DEAD_BINDER, 4001 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n", 4002 proc->pid, thread->pid, (u64)cookie, 4003 death); 4004 if (death == NULL) { 4005 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n", 4006 proc->pid, thread->pid, (u64)cookie); 4007 binder_inner_proc_unlock(proc); 4008 break; 4009 } 4010 binder_dequeue_work_ilocked(&death->work); 4011 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) { 4012 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 4013 if (thread->looper & 4014 (BINDER_LOOPER_STATE_REGISTERED | 4015 BINDER_LOOPER_STATE_ENTERED)) 4016 binder_enqueue_thread_work_ilocked( 4017 thread, &death->work); 4018 else { 4019 binder_enqueue_work_ilocked( 4020 &death->work, 4021 &proc->todo); 4022 binder_wakeup_proc_ilocked(proc); 4023 } 4024 } 4025 binder_inner_proc_unlock(proc); 4026 } break; 4027 4028 default: 4029 pr_err("%d:%d unknown command %d\n", 4030 proc->pid, thread->pid, cmd); 4031 return -EINVAL; 4032 } 4033 *consumed = ptr - buffer; 4034 } 4035 return 0; 4036 } 4037 4038 static void binder_stat_br(struct binder_proc *proc, 4039 struct binder_thread *thread, uint32_t cmd) 4040 { 4041 trace_binder_return(cmd); 4042 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { 4043 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]); 4044 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]); 4045 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]); 4046 } 4047 } 4048 4049 static int binder_put_node_cmd(struct binder_proc *proc, 4050 struct binder_thread *thread, 4051 void __user **ptrp, 4052 binder_uintptr_t node_ptr, 4053 binder_uintptr_t node_cookie, 4054 int node_debug_id, 4055 uint32_t cmd, const char *cmd_name) 4056 { 4057 void __user *ptr = *ptrp; 4058 4059 if (put_user(cmd, (uint32_t __user *)ptr)) 4060 return -EFAULT; 4061 ptr += sizeof(uint32_t); 4062 4063 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr)) 4064 return -EFAULT; 4065 ptr += sizeof(binder_uintptr_t); 4066 4067 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr)) 4068 return -EFAULT; 4069 ptr += sizeof(binder_uintptr_t); 4070 4071 binder_stat_br(proc, thread, cmd); 4072 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n", 4073 proc->pid, thread->pid, cmd_name, node_debug_id, 4074 (u64)node_ptr, (u64)node_cookie); 4075 4076 *ptrp = ptr; 4077 return 0; 4078 } 4079 4080 static int binder_wait_for_work(struct binder_thread *thread, 4081 bool do_proc_work) 4082 { 4083 DEFINE_WAIT(wait); 4084 struct binder_proc *proc = thread->proc; 4085 int ret = 0; 4086 4087 freezer_do_not_count(); 4088 binder_inner_proc_lock(proc); 4089 for (;;) { 4090 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE); 4091 if (binder_has_work_ilocked(thread, do_proc_work)) 4092 break; 4093 if (do_proc_work) 4094 list_add(&thread->waiting_thread_node, 4095 &proc->waiting_threads); 4096 binder_inner_proc_unlock(proc); 4097 schedule(); 4098 binder_inner_proc_lock(proc); 4099 list_del_init(&thread->waiting_thread_node); 4100 if (signal_pending(current)) { 4101 ret = -ERESTARTSYS; 4102 break; 4103 } 4104 } 4105 finish_wait(&thread->wait, &wait); 4106 binder_inner_proc_unlock(proc); 4107 freezer_count(); 4108 4109 return ret; 4110 } 4111 4112 /** 4113 * binder_apply_fd_fixups() - finish fd translation 4114 * @proc: binder_proc associated @t->buffer 4115 * @t: binder transaction with list of fd fixups 4116 * 4117 * Now that we are in the context of the transaction target 4118 * process, we can allocate and install fds. Process the 4119 * list of fds to translate and fixup the buffer with the 4120 * new fds. 4121 * 4122 * If we fail to allocate an fd, then free the resources by 4123 * fput'ing files that have not been processed and ksys_close'ing 4124 * any fds that have already been allocated. 4125 */ 4126 static int binder_apply_fd_fixups(struct binder_proc *proc, 4127 struct binder_transaction *t) 4128 { 4129 struct binder_txn_fd_fixup *fixup, *tmp; 4130 int ret = 0; 4131 4132 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) { 4133 int fd = get_unused_fd_flags(O_CLOEXEC); 4134 4135 if (fd < 0) { 4136 binder_debug(BINDER_DEBUG_TRANSACTION, 4137 "failed fd fixup txn %d fd %d\n", 4138 t->debug_id, fd); 4139 ret = -ENOMEM; 4140 break; 4141 } 4142 binder_debug(BINDER_DEBUG_TRANSACTION, 4143 "fd fixup txn %d fd %d\n", 4144 t->debug_id, fd); 4145 trace_binder_transaction_fd_recv(t, fd, fixup->offset); 4146 fd_install(fd, fixup->file); 4147 fixup->file = NULL; 4148 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer, 4149 fixup->offset, &fd, 4150 sizeof(u32))) { 4151 ret = -EINVAL; 4152 break; 4153 } 4154 } 4155 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) { 4156 if (fixup->file) { 4157 fput(fixup->file); 4158 } else if (ret) { 4159 u32 fd; 4160 int err; 4161 4162 err = binder_alloc_copy_from_buffer(&proc->alloc, &fd, 4163 t->buffer, 4164 fixup->offset, 4165 sizeof(fd)); 4166 WARN_ON(err); 4167 if (!err) 4168 binder_deferred_fd_close(fd); 4169 } 4170 list_del(&fixup->fixup_entry); 4171 kfree(fixup); 4172 } 4173 4174 return ret; 4175 } 4176 4177 static int binder_thread_read(struct binder_proc *proc, 4178 struct binder_thread *thread, 4179 binder_uintptr_t binder_buffer, size_t size, 4180 binder_size_t *consumed, int non_block) 4181 { 4182 void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 4183 void __user *ptr = buffer + *consumed; 4184 void __user *end = buffer + size; 4185 4186 int ret = 0; 4187 int wait_for_proc_work; 4188 4189 if (*consumed == 0) { 4190 if (put_user(BR_NOOP, (uint32_t __user *)ptr)) 4191 return -EFAULT; 4192 ptr += sizeof(uint32_t); 4193 } 4194 4195 retry: 4196 binder_inner_proc_lock(proc); 4197 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread); 4198 binder_inner_proc_unlock(proc); 4199 4200 thread->looper |= BINDER_LOOPER_STATE_WAITING; 4201 4202 trace_binder_wait_for_work(wait_for_proc_work, 4203 !!thread->transaction_stack, 4204 !binder_worklist_empty(proc, &thread->todo)); 4205 if (wait_for_proc_work) { 4206 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 4207 BINDER_LOOPER_STATE_ENTERED))) { 4208 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n", 4209 proc->pid, thread->pid, thread->looper); 4210 wait_event_interruptible(binder_user_error_wait, 4211 binder_stop_on_user_error < 2); 4212 } 4213 binder_set_nice(proc->default_priority); 4214 } 4215 4216 if (non_block) { 4217 if (!binder_has_work(thread, wait_for_proc_work)) 4218 ret = -EAGAIN; 4219 } else { 4220 ret = binder_wait_for_work(thread, wait_for_proc_work); 4221 } 4222 4223 thread->looper &= ~BINDER_LOOPER_STATE_WAITING; 4224 4225 if (ret) 4226 return ret; 4227 4228 while (1) { 4229 uint32_t cmd; 4230 struct binder_transaction_data_secctx tr; 4231 struct binder_transaction_data *trd = &tr.transaction_data; 4232 struct binder_work *w = NULL; 4233 struct list_head *list = NULL; 4234 struct binder_transaction *t = NULL; 4235 struct binder_thread *t_from; 4236 size_t trsize = sizeof(*trd); 4237 4238 binder_inner_proc_lock(proc); 4239 if (!binder_worklist_empty_ilocked(&thread->todo)) 4240 list = &thread->todo; 4241 else if (!binder_worklist_empty_ilocked(&proc->todo) && 4242 wait_for_proc_work) 4243 list = &proc->todo; 4244 else { 4245 binder_inner_proc_unlock(proc); 4246 4247 /* no data added */ 4248 if (ptr - buffer == 4 && !thread->looper_need_return) 4249 goto retry; 4250 break; 4251 } 4252 4253 if (end - ptr < sizeof(tr) + 4) { 4254 binder_inner_proc_unlock(proc); 4255 break; 4256 } 4257 w = binder_dequeue_work_head_ilocked(list); 4258 if (binder_worklist_empty_ilocked(&thread->todo)) 4259 thread->process_todo = false; 4260 4261 switch (w->type) { 4262 case BINDER_WORK_TRANSACTION: { 4263 binder_inner_proc_unlock(proc); 4264 t = container_of(w, struct binder_transaction, work); 4265 } break; 4266 case BINDER_WORK_RETURN_ERROR: { 4267 struct binder_error *e = container_of( 4268 w, struct binder_error, work); 4269 4270 WARN_ON(e->cmd == BR_OK); 4271 binder_inner_proc_unlock(proc); 4272 if (put_user(e->cmd, (uint32_t __user *)ptr)) 4273 return -EFAULT; 4274 cmd = e->cmd; 4275 e->cmd = BR_OK; 4276 ptr += sizeof(uint32_t); 4277 4278 binder_stat_br(proc, thread, cmd); 4279 } break; 4280 case BINDER_WORK_TRANSACTION_COMPLETE: { 4281 binder_inner_proc_unlock(proc); 4282 cmd = BR_TRANSACTION_COMPLETE; 4283 kfree(w); 4284 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 4285 if (put_user(cmd, (uint32_t __user *)ptr)) 4286 return -EFAULT; 4287 ptr += sizeof(uint32_t); 4288 4289 binder_stat_br(proc, thread, cmd); 4290 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, 4291 "%d:%d BR_TRANSACTION_COMPLETE\n", 4292 proc->pid, thread->pid); 4293 } break; 4294 case BINDER_WORK_NODE: { 4295 struct binder_node *node = container_of(w, struct binder_node, work); 4296 int strong, weak; 4297 binder_uintptr_t node_ptr = node->ptr; 4298 binder_uintptr_t node_cookie = node->cookie; 4299 int node_debug_id = node->debug_id; 4300 int has_weak_ref; 4301 int has_strong_ref; 4302 void __user *orig_ptr = ptr; 4303 4304 BUG_ON(proc != node->proc); 4305 strong = node->internal_strong_refs || 4306 node->local_strong_refs; 4307 weak = !hlist_empty(&node->refs) || 4308 node->local_weak_refs || 4309 node->tmp_refs || strong; 4310 has_strong_ref = node->has_strong_ref; 4311 has_weak_ref = node->has_weak_ref; 4312 4313 if (weak && !has_weak_ref) { 4314 node->has_weak_ref = 1; 4315 node->pending_weak_ref = 1; 4316 node->local_weak_refs++; 4317 } 4318 if (strong && !has_strong_ref) { 4319 node->has_strong_ref = 1; 4320 node->pending_strong_ref = 1; 4321 node->local_strong_refs++; 4322 } 4323 if (!strong && has_strong_ref) 4324 node->has_strong_ref = 0; 4325 if (!weak && has_weak_ref) 4326 node->has_weak_ref = 0; 4327 if (!weak && !strong) { 4328 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 4329 "%d:%d node %d u%016llx c%016llx deleted\n", 4330 proc->pid, thread->pid, 4331 node_debug_id, 4332 (u64)node_ptr, 4333 (u64)node_cookie); 4334 rb_erase(&node->rb_node, &proc->nodes); 4335 binder_inner_proc_unlock(proc); 4336 binder_node_lock(node); 4337 /* 4338 * Acquire the node lock before freeing the 4339 * node to serialize with other threads that 4340 * may have been holding the node lock while 4341 * decrementing this node (avoids race where 4342 * this thread frees while the other thread 4343 * is unlocking the node after the final 4344 * decrement) 4345 */ 4346 binder_node_unlock(node); 4347 binder_free_node(node); 4348 } else 4349 binder_inner_proc_unlock(proc); 4350 4351 if (weak && !has_weak_ref) 4352 ret = binder_put_node_cmd( 4353 proc, thread, &ptr, node_ptr, 4354 node_cookie, node_debug_id, 4355 BR_INCREFS, "BR_INCREFS"); 4356 if (!ret && strong && !has_strong_ref) 4357 ret = binder_put_node_cmd( 4358 proc, thread, &ptr, node_ptr, 4359 node_cookie, node_debug_id, 4360 BR_ACQUIRE, "BR_ACQUIRE"); 4361 if (!ret && !strong && has_strong_ref) 4362 ret = binder_put_node_cmd( 4363 proc, thread, &ptr, node_ptr, 4364 node_cookie, node_debug_id, 4365 BR_RELEASE, "BR_RELEASE"); 4366 if (!ret && !weak && has_weak_ref) 4367 ret = binder_put_node_cmd( 4368 proc, thread, &ptr, node_ptr, 4369 node_cookie, node_debug_id, 4370 BR_DECREFS, "BR_DECREFS"); 4371 if (orig_ptr == ptr) 4372 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 4373 "%d:%d node %d u%016llx c%016llx state unchanged\n", 4374 proc->pid, thread->pid, 4375 node_debug_id, 4376 (u64)node_ptr, 4377 (u64)node_cookie); 4378 if (ret) 4379 return ret; 4380 } break; 4381 case BINDER_WORK_DEAD_BINDER: 4382 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 4383 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 4384 struct binder_ref_death *death; 4385 uint32_t cmd; 4386 binder_uintptr_t cookie; 4387 4388 death = container_of(w, struct binder_ref_death, work); 4389 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) 4390 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; 4391 else 4392 cmd = BR_DEAD_BINDER; 4393 cookie = death->cookie; 4394 4395 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 4396 "%d:%d %s %016llx\n", 4397 proc->pid, thread->pid, 4398 cmd == BR_DEAD_BINDER ? 4399 "BR_DEAD_BINDER" : 4400 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 4401 (u64)cookie); 4402 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { 4403 binder_inner_proc_unlock(proc); 4404 kfree(death); 4405 binder_stats_deleted(BINDER_STAT_DEATH); 4406 } else { 4407 binder_enqueue_work_ilocked( 4408 w, &proc->delivered_death); 4409 binder_inner_proc_unlock(proc); 4410 } 4411 if (put_user(cmd, (uint32_t __user *)ptr)) 4412 return -EFAULT; 4413 ptr += sizeof(uint32_t); 4414 if (put_user(cookie, 4415 (binder_uintptr_t __user *)ptr)) 4416 return -EFAULT; 4417 ptr += sizeof(binder_uintptr_t); 4418 binder_stat_br(proc, thread, cmd); 4419 if (cmd == BR_DEAD_BINDER) 4420 goto done; /* DEAD_BINDER notifications can cause transactions */ 4421 } break; 4422 default: 4423 binder_inner_proc_unlock(proc); 4424 pr_err("%d:%d: bad work type %d\n", 4425 proc->pid, thread->pid, w->type); 4426 break; 4427 } 4428 4429 if (!t) 4430 continue; 4431 4432 BUG_ON(t->buffer == NULL); 4433 if (t->buffer->target_node) { 4434 struct binder_node *target_node = t->buffer->target_node; 4435 4436 trd->target.ptr = target_node->ptr; 4437 trd->cookie = target_node->cookie; 4438 t->saved_priority = task_nice(current); 4439 if (t->priority < target_node->min_priority && 4440 !(t->flags & TF_ONE_WAY)) 4441 binder_set_nice(t->priority); 4442 else if (!(t->flags & TF_ONE_WAY) || 4443 t->saved_priority > target_node->min_priority) 4444 binder_set_nice(target_node->min_priority); 4445 cmd = BR_TRANSACTION; 4446 } else { 4447 trd->target.ptr = 0; 4448 trd->cookie = 0; 4449 cmd = BR_REPLY; 4450 } 4451 trd->code = t->code; 4452 trd->flags = t->flags; 4453 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid); 4454 4455 t_from = binder_get_txn_from(t); 4456 if (t_from) { 4457 struct task_struct *sender = t_from->proc->tsk; 4458 4459 trd->sender_pid = 4460 task_tgid_nr_ns(sender, 4461 task_active_pid_ns(current)); 4462 } else { 4463 trd->sender_pid = 0; 4464 } 4465 4466 ret = binder_apply_fd_fixups(proc, t); 4467 if (ret) { 4468 struct binder_buffer *buffer = t->buffer; 4469 bool oneway = !!(t->flags & TF_ONE_WAY); 4470 int tid = t->debug_id; 4471 4472 if (t_from) 4473 binder_thread_dec_tmpref(t_from); 4474 buffer->transaction = NULL; 4475 binder_cleanup_transaction(t, "fd fixups failed", 4476 BR_FAILED_REPLY); 4477 binder_free_buf(proc, buffer); 4478 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 4479 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n", 4480 proc->pid, thread->pid, 4481 oneway ? "async " : 4482 (cmd == BR_REPLY ? "reply " : ""), 4483 tid, BR_FAILED_REPLY, ret, __LINE__); 4484 if (cmd == BR_REPLY) { 4485 cmd = BR_FAILED_REPLY; 4486 if (put_user(cmd, (uint32_t __user *)ptr)) 4487 return -EFAULT; 4488 ptr += sizeof(uint32_t); 4489 binder_stat_br(proc, thread, cmd); 4490 break; 4491 } 4492 continue; 4493 } 4494 trd->data_size = t->buffer->data_size; 4495 trd->offsets_size = t->buffer->offsets_size; 4496 trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data; 4497 trd->data.ptr.offsets = trd->data.ptr.buffer + 4498 ALIGN(t->buffer->data_size, 4499 sizeof(void *)); 4500 4501 tr.secctx = t->security_ctx; 4502 if (t->security_ctx) { 4503 cmd = BR_TRANSACTION_SEC_CTX; 4504 trsize = sizeof(tr); 4505 } 4506 if (put_user(cmd, (uint32_t __user *)ptr)) { 4507 if (t_from) 4508 binder_thread_dec_tmpref(t_from); 4509 4510 binder_cleanup_transaction(t, "put_user failed", 4511 BR_FAILED_REPLY); 4512 4513 return -EFAULT; 4514 } 4515 ptr += sizeof(uint32_t); 4516 if (copy_to_user(ptr, &tr, trsize)) { 4517 if (t_from) 4518 binder_thread_dec_tmpref(t_from); 4519 4520 binder_cleanup_transaction(t, "copy_to_user failed", 4521 BR_FAILED_REPLY); 4522 4523 return -EFAULT; 4524 } 4525 ptr += trsize; 4526 4527 trace_binder_transaction_received(t); 4528 binder_stat_br(proc, thread, cmd); 4529 binder_debug(BINDER_DEBUG_TRANSACTION, 4530 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n", 4531 proc->pid, thread->pid, 4532 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : 4533 (cmd == BR_TRANSACTION_SEC_CTX) ? 4534 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY", 4535 t->debug_id, t_from ? t_from->proc->pid : 0, 4536 t_from ? t_from->pid : 0, cmd, 4537 t->buffer->data_size, t->buffer->offsets_size, 4538 (u64)trd->data.ptr.buffer, 4539 (u64)trd->data.ptr.offsets); 4540 4541 if (t_from) 4542 binder_thread_dec_tmpref(t_from); 4543 t->buffer->allow_user_free = 1; 4544 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) { 4545 binder_inner_proc_lock(thread->proc); 4546 t->to_parent = thread->transaction_stack; 4547 t->to_thread = thread; 4548 thread->transaction_stack = t; 4549 binder_inner_proc_unlock(thread->proc); 4550 } else { 4551 binder_free_transaction(t); 4552 } 4553 break; 4554 } 4555 4556 done: 4557 4558 *consumed = ptr - buffer; 4559 binder_inner_proc_lock(proc); 4560 if (proc->requested_threads == 0 && 4561 list_empty(&thread->proc->waiting_threads) && 4562 proc->requested_threads_started < proc->max_threads && 4563 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 4564 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ 4565 /*spawn a new thread if we leave this out */) { 4566 proc->requested_threads++; 4567 binder_inner_proc_unlock(proc); 4568 binder_debug(BINDER_DEBUG_THREADS, 4569 "%d:%d BR_SPAWN_LOOPER\n", 4570 proc->pid, thread->pid); 4571 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) 4572 return -EFAULT; 4573 binder_stat_br(proc, thread, BR_SPAWN_LOOPER); 4574 } else 4575 binder_inner_proc_unlock(proc); 4576 return 0; 4577 } 4578 4579 static void binder_release_work(struct binder_proc *proc, 4580 struct list_head *list) 4581 { 4582 struct binder_work *w; 4583 4584 while (1) { 4585 w = binder_dequeue_work_head(proc, list); 4586 if (!w) 4587 return; 4588 4589 switch (w->type) { 4590 case BINDER_WORK_TRANSACTION: { 4591 struct binder_transaction *t; 4592 4593 t = container_of(w, struct binder_transaction, work); 4594 4595 binder_cleanup_transaction(t, "process died.", 4596 BR_DEAD_REPLY); 4597 } break; 4598 case BINDER_WORK_RETURN_ERROR: { 4599 struct binder_error *e = container_of( 4600 w, struct binder_error, work); 4601 4602 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4603 "undelivered TRANSACTION_ERROR: %u\n", 4604 e->cmd); 4605 } break; 4606 case BINDER_WORK_TRANSACTION_COMPLETE: { 4607 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4608 "undelivered TRANSACTION_COMPLETE\n"); 4609 kfree(w); 4610 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 4611 } break; 4612 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 4613 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 4614 struct binder_ref_death *death; 4615 4616 death = container_of(w, struct binder_ref_death, work); 4617 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4618 "undelivered death notification, %016llx\n", 4619 (u64)death->cookie); 4620 kfree(death); 4621 binder_stats_deleted(BINDER_STAT_DEATH); 4622 } break; 4623 default: 4624 pr_err("unexpected work type, %d, not freed\n", 4625 w->type); 4626 break; 4627 } 4628 } 4629 4630 } 4631 4632 static struct binder_thread *binder_get_thread_ilocked( 4633 struct binder_proc *proc, struct binder_thread *new_thread) 4634 { 4635 struct binder_thread *thread = NULL; 4636 struct rb_node *parent = NULL; 4637 struct rb_node **p = &proc->threads.rb_node; 4638 4639 while (*p) { 4640 parent = *p; 4641 thread = rb_entry(parent, struct binder_thread, rb_node); 4642 4643 if (current->pid < thread->pid) 4644 p = &(*p)->rb_left; 4645 else if (current->pid > thread->pid) 4646 p = &(*p)->rb_right; 4647 else 4648 return thread; 4649 } 4650 if (!new_thread) 4651 return NULL; 4652 thread = new_thread; 4653 binder_stats_created(BINDER_STAT_THREAD); 4654 thread->proc = proc; 4655 thread->pid = current->pid; 4656 atomic_set(&thread->tmp_ref, 0); 4657 init_waitqueue_head(&thread->wait); 4658 INIT_LIST_HEAD(&thread->todo); 4659 rb_link_node(&thread->rb_node, parent, p); 4660 rb_insert_color(&thread->rb_node, &proc->threads); 4661 thread->looper_need_return = true; 4662 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR; 4663 thread->return_error.cmd = BR_OK; 4664 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR; 4665 thread->reply_error.cmd = BR_OK; 4666 INIT_LIST_HEAD(&new_thread->waiting_thread_node); 4667 return thread; 4668 } 4669 4670 static struct binder_thread *binder_get_thread(struct binder_proc *proc) 4671 { 4672 struct binder_thread *thread; 4673 struct binder_thread *new_thread; 4674 4675 binder_inner_proc_lock(proc); 4676 thread = binder_get_thread_ilocked(proc, NULL); 4677 binder_inner_proc_unlock(proc); 4678 if (!thread) { 4679 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL); 4680 if (new_thread == NULL) 4681 return NULL; 4682 binder_inner_proc_lock(proc); 4683 thread = binder_get_thread_ilocked(proc, new_thread); 4684 binder_inner_proc_unlock(proc); 4685 if (thread != new_thread) 4686 kfree(new_thread); 4687 } 4688 return thread; 4689 } 4690 4691 static void binder_free_proc(struct binder_proc *proc) 4692 { 4693 BUG_ON(!list_empty(&proc->todo)); 4694 BUG_ON(!list_empty(&proc->delivered_death)); 4695 binder_alloc_deferred_release(&proc->alloc); 4696 put_task_struct(proc->tsk); 4697 binder_stats_deleted(BINDER_STAT_PROC); 4698 kfree(proc); 4699 } 4700 4701 static void binder_free_thread(struct binder_thread *thread) 4702 { 4703 BUG_ON(!list_empty(&thread->todo)); 4704 binder_stats_deleted(BINDER_STAT_THREAD); 4705 binder_proc_dec_tmpref(thread->proc); 4706 kfree(thread); 4707 } 4708 4709 static int binder_thread_release(struct binder_proc *proc, 4710 struct binder_thread *thread) 4711 { 4712 struct binder_transaction *t; 4713 struct binder_transaction *send_reply = NULL; 4714 int active_transactions = 0; 4715 struct binder_transaction *last_t = NULL; 4716 4717 binder_inner_proc_lock(thread->proc); 4718 /* 4719 * take a ref on the proc so it survives 4720 * after we remove this thread from proc->threads. 4721 * The corresponding dec is when we actually 4722 * free the thread in binder_free_thread() 4723 */ 4724 proc->tmp_ref++; 4725 /* 4726 * take a ref on this thread to ensure it 4727 * survives while we are releasing it 4728 */ 4729 atomic_inc(&thread->tmp_ref); 4730 rb_erase(&thread->rb_node, &proc->threads); 4731 t = thread->transaction_stack; 4732 if (t) { 4733 spin_lock(&t->lock); 4734 if (t->to_thread == thread) 4735 send_reply = t; 4736 } else { 4737 __acquire(&t->lock); 4738 } 4739 thread->is_dead = true; 4740 4741 while (t) { 4742 last_t = t; 4743 active_transactions++; 4744 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4745 "release %d:%d transaction %d %s, still active\n", 4746 proc->pid, thread->pid, 4747 t->debug_id, 4748 (t->to_thread == thread) ? "in" : "out"); 4749 4750 if (t->to_thread == thread) { 4751 t->to_proc = NULL; 4752 t->to_thread = NULL; 4753 if (t->buffer) { 4754 t->buffer->transaction = NULL; 4755 t->buffer = NULL; 4756 } 4757 t = t->to_parent; 4758 } else if (t->from == thread) { 4759 t->from = NULL; 4760 t = t->from_parent; 4761 } else 4762 BUG(); 4763 spin_unlock(&last_t->lock); 4764 if (t) 4765 spin_lock(&t->lock); 4766 else 4767 __acquire(&t->lock); 4768 } 4769 /* annotation for sparse, lock not acquired in last iteration above */ 4770 __release(&t->lock); 4771 4772 /* 4773 * If this thread used poll, make sure we remove the waitqueue 4774 * from any epoll data structures holding it with POLLFREE. 4775 * waitqueue_active() is safe to use here because we're holding 4776 * the inner lock. 4777 */ 4778 if ((thread->looper & BINDER_LOOPER_STATE_POLL) && 4779 waitqueue_active(&thread->wait)) { 4780 wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE); 4781 } 4782 4783 binder_inner_proc_unlock(thread->proc); 4784 4785 /* 4786 * This is needed to avoid races between wake_up_poll() above and 4787 * and ep_remove_waitqueue() called for other reasons (eg the epoll file 4788 * descriptor being closed); ep_remove_waitqueue() holds an RCU read 4789 * lock, so we can be sure it's done after calling synchronize_rcu(). 4790 */ 4791 if (thread->looper & BINDER_LOOPER_STATE_POLL) 4792 synchronize_rcu(); 4793 4794 if (send_reply) 4795 binder_send_failed_reply(send_reply, BR_DEAD_REPLY); 4796 binder_release_work(proc, &thread->todo); 4797 binder_thread_dec_tmpref(thread); 4798 return active_transactions; 4799 } 4800 4801 static __poll_t binder_poll(struct file *filp, 4802 struct poll_table_struct *wait) 4803 { 4804 struct binder_proc *proc = filp->private_data; 4805 struct binder_thread *thread = NULL; 4806 bool wait_for_proc_work; 4807 4808 thread = binder_get_thread(proc); 4809 if (!thread) 4810 return POLLERR; 4811 4812 binder_inner_proc_lock(thread->proc); 4813 thread->looper |= BINDER_LOOPER_STATE_POLL; 4814 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread); 4815 4816 binder_inner_proc_unlock(thread->proc); 4817 4818 poll_wait(filp, &thread->wait, wait); 4819 4820 if (binder_has_work(thread, wait_for_proc_work)) 4821 return EPOLLIN; 4822 4823 return 0; 4824 } 4825 4826 static int binder_ioctl_write_read(struct file *filp, 4827 unsigned int cmd, unsigned long arg, 4828 struct binder_thread *thread) 4829 { 4830 int ret = 0; 4831 struct binder_proc *proc = filp->private_data; 4832 unsigned int size = _IOC_SIZE(cmd); 4833 void __user *ubuf = (void __user *)arg; 4834 struct binder_write_read bwr; 4835 4836 if (size != sizeof(struct binder_write_read)) { 4837 ret = -EINVAL; 4838 goto out; 4839 } 4840 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { 4841 ret = -EFAULT; 4842 goto out; 4843 } 4844 binder_debug(BINDER_DEBUG_READ_WRITE, 4845 "%d:%d write %lld at %016llx, read %lld at %016llx\n", 4846 proc->pid, thread->pid, 4847 (u64)bwr.write_size, (u64)bwr.write_buffer, 4848 (u64)bwr.read_size, (u64)bwr.read_buffer); 4849 4850 if (bwr.write_size > 0) { 4851 ret = binder_thread_write(proc, thread, 4852 bwr.write_buffer, 4853 bwr.write_size, 4854 &bwr.write_consumed); 4855 trace_binder_write_done(ret); 4856 if (ret < 0) { 4857 bwr.read_consumed = 0; 4858 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 4859 ret = -EFAULT; 4860 goto out; 4861 } 4862 } 4863 if (bwr.read_size > 0) { 4864 ret = binder_thread_read(proc, thread, bwr.read_buffer, 4865 bwr.read_size, 4866 &bwr.read_consumed, 4867 filp->f_flags & O_NONBLOCK); 4868 trace_binder_read_done(ret); 4869 binder_inner_proc_lock(proc); 4870 if (!binder_worklist_empty_ilocked(&proc->todo)) 4871 binder_wakeup_proc_ilocked(proc); 4872 binder_inner_proc_unlock(proc); 4873 if (ret < 0) { 4874 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 4875 ret = -EFAULT; 4876 goto out; 4877 } 4878 } 4879 binder_debug(BINDER_DEBUG_READ_WRITE, 4880 "%d:%d wrote %lld of %lld, read return %lld of %lld\n", 4881 proc->pid, thread->pid, 4882 (u64)bwr.write_consumed, (u64)bwr.write_size, 4883 (u64)bwr.read_consumed, (u64)bwr.read_size); 4884 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { 4885 ret = -EFAULT; 4886 goto out; 4887 } 4888 out: 4889 return ret; 4890 } 4891 4892 static int binder_ioctl_set_ctx_mgr(struct file *filp, 4893 struct flat_binder_object *fbo) 4894 { 4895 int ret = 0; 4896 struct binder_proc *proc = filp->private_data; 4897 struct binder_context *context = proc->context; 4898 struct binder_node *new_node; 4899 kuid_t curr_euid = current_euid(); 4900 4901 mutex_lock(&context->context_mgr_node_lock); 4902 if (context->binder_context_mgr_node) { 4903 pr_err("BINDER_SET_CONTEXT_MGR already set\n"); 4904 ret = -EBUSY; 4905 goto out; 4906 } 4907 ret = security_binder_set_context_mgr(proc->tsk); 4908 if (ret < 0) 4909 goto out; 4910 if (uid_valid(context->binder_context_mgr_uid)) { 4911 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) { 4912 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n", 4913 from_kuid(&init_user_ns, curr_euid), 4914 from_kuid(&init_user_ns, 4915 context->binder_context_mgr_uid)); 4916 ret = -EPERM; 4917 goto out; 4918 } 4919 } else { 4920 context->binder_context_mgr_uid = curr_euid; 4921 } 4922 new_node = binder_new_node(proc, fbo); 4923 if (!new_node) { 4924 ret = -ENOMEM; 4925 goto out; 4926 } 4927 binder_node_lock(new_node); 4928 new_node->local_weak_refs++; 4929 new_node->local_strong_refs++; 4930 new_node->has_strong_ref = 1; 4931 new_node->has_weak_ref = 1; 4932 context->binder_context_mgr_node = new_node; 4933 binder_node_unlock(new_node); 4934 binder_put_node(new_node); 4935 out: 4936 mutex_unlock(&context->context_mgr_node_lock); 4937 return ret; 4938 } 4939 4940 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc, 4941 struct binder_node_info_for_ref *info) 4942 { 4943 struct binder_node *node; 4944 struct binder_context *context = proc->context; 4945 __u32 handle = info->handle; 4946 4947 if (info->strong_count || info->weak_count || info->reserved1 || 4948 info->reserved2 || info->reserved3) { 4949 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.", 4950 proc->pid); 4951 return -EINVAL; 4952 } 4953 4954 /* This ioctl may only be used by the context manager */ 4955 mutex_lock(&context->context_mgr_node_lock); 4956 if (!context->binder_context_mgr_node || 4957 context->binder_context_mgr_node->proc != proc) { 4958 mutex_unlock(&context->context_mgr_node_lock); 4959 return -EPERM; 4960 } 4961 mutex_unlock(&context->context_mgr_node_lock); 4962 4963 node = binder_get_node_from_ref(proc, handle, true, NULL); 4964 if (!node) 4965 return -EINVAL; 4966 4967 info->strong_count = node->local_strong_refs + 4968 node->internal_strong_refs; 4969 info->weak_count = node->local_weak_refs; 4970 4971 binder_put_node(node); 4972 4973 return 0; 4974 } 4975 4976 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc, 4977 struct binder_node_debug_info *info) 4978 { 4979 struct rb_node *n; 4980 binder_uintptr_t ptr = info->ptr; 4981 4982 memset(info, 0, sizeof(*info)); 4983 4984 binder_inner_proc_lock(proc); 4985 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { 4986 struct binder_node *node = rb_entry(n, struct binder_node, 4987 rb_node); 4988 if (node->ptr > ptr) { 4989 info->ptr = node->ptr; 4990 info->cookie = node->cookie; 4991 info->has_strong_ref = node->has_strong_ref; 4992 info->has_weak_ref = node->has_weak_ref; 4993 break; 4994 } 4995 } 4996 binder_inner_proc_unlock(proc); 4997 4998 return 0; 4999 } 5000 5001 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 5002 { 5003 int ret; 5004 struct binder_proc *proc = filp->private_data; 5005 struct binder_thread *thread; 5006 unsigned int size = _IOC_SIZE(cmd); 5007 void __user *ubuf = (void __user *)arg; 5008 5009 /*pr_info("binder_ioctl: %d:%d %x %lx\n", 5010 proc->pid, current->pid, cmd, arg);*/ 5011 5012 binder_selftest_alloc(&proc->alloc); 5013 5014 trace_binder_ioctl(cmd, arg); 5015 5016 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 5017 if (ret) 5018 goto err_unlocked; 5019 5020 thread = binder_get_thread(proc); 5021 if (thread == NULL) { 5022 ret = -ENOMEM; 5023 goto err; 5024 } 5025 5026 switch (cmd) { 5027 case BINDER_WRITE_READ: 5028 ret = binder_ioctl_write_read(filp, cmd, arg, thread); 5029 if (ret) 5030 goto err; 5031 break; 5032 case BINDER_SET_MAX_THREADS: { 5033 int max_threads; 5034 5035 if (copy_from_user(&max_threads, ubuf, 5036 sizeof(max_threads))) { 5037 ret = -EINVAL; 5038 goto err; 5039 } 5040 binder_inner_proc_lock(proc); 5041 proc->max_threads = max_threads; 5042 binder_inner_proc_unlock(proc); 5043 break; 5044 } 5045 case BINDER_SET_CONTEXT_MGR_EXT: { 5046 struct flat_binder_object fbo; 5047 5048 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) { 5049 ret = -EINVAL; 5050 goto err; 5051 } 5052 ret = binder_ioctl_set_ctx_mgr(filp, &fbo); 5053 if (ret) 5054 goto err; 5055 break; 5056 } 5057 case BINDER_SET_CONTEXT_MGR: 5058 ret = binder_ioctl_set_ctx_mgr(filp, NULL); 5059 if (ret) 5060 goto err; 5061 break; 5062 case BINDER_THREAD_EXIT: 5063 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n", 5064 proc->pid, thread->pid); 5065 binder_thread_release(proc, thread); 5066 thread = NULL; 5067 break; 5068 case BINDER_VERSION: { 5069 struct binder_version __user *ver = ubuf; 5070 5071 if (size != sizeof(struct binder_version)) { 5072 ret = -EINVAL; 5073 goto err; 5074 } 5075 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, 5076 &ver->protocol_version)) { 5077 ret = -EINVAL; 5078 goto err; 5079 } 5080 break; 5081 } 5082 case BINDER_GET_NODE_INFO_FOR_REF: { 5083 struct binder_node_info_for_ref info; 5084 5085 if (copy_from_user(&info, ubuf, sizeof(info))) { 5086 ret = -EFAULT; 5087 goto err; 5088 } 5089 5090 ret = binder_ioctl_get_node_info_for_ref(proc, &info); 5091 if (ret < 0) 5092 goto err; 5093 5094 if (copy_to_user(ubuf, &info, sizeof(info))) { 5095 ret = -EFAULT; 5096 goto err; 5097 } 5098 5099 break; 5100 } 5101 case BINDER_GET_NODE_DEBUG_INFO: { 5102 struct binder_node_debug_info info; 5103 5104 if (copy_from_user(&info, ubuf, sizeof(info))) { 5105 ret = -EFAULT; 5106 goto err; 5107 } 5108 5109 ret = binder_ioctl_get_node_debug_info(proc, &info); 5110 if (ret < 0) 5111 goto err; 5112 5113 if (copy_to_user(ubuf, &info, sizeof(info))) { 5114 ret = -EFAULT; 5115 goto err; 5116 } 5117 break; 5118 } 5119 default: 5120 ret = -EINVAL; 5121 goto err; 5122 } 5123 ret = 0; 5124 err: 5125 if (thread) 5126 thread->looper_need_return = false; 5127 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 5128 if (ret && ret != -ERESTARTSYS) 5129 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); 5130 err_unlocked: 5131 trace_binder_ioctl_done(ret); 5132 return ret; 5133 } 5134 5135 static void binder_vma_open(struct vm_area_struct *vma) 5136 { 5137 struct binder_proc *proc = vma->vm_private_data; 5138 5139 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 5140 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 5141 proc->pid, vma->vm_start, vma->vm_end, 5142 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 5143 (unsigned long)pgprot_val(vma->vm_page_prot)); 5144 } 5145 5146 static void binder_vma_close(struct vm_area_struct *vma) 5147 { 5148 struct binder_proc *proc = vma->vm_private_data; 5149 5150 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 5151 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 5152 proc->pid, vma->vm_start, vma->vm_end, 5153 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 5154 (unsigned long)pgprot_val(vma->vm_page_prot)); 5155 binder_alloc_vma_close(&proc->alloc); 5156 } 5157 5158 static vm_fault_t binder_vm_fault(struct vm_fault *vmf) 5159 { 5160 return VM_FAULT_SIGBUS; 5161 } 5162 5163 static const struct vm_operations_struct binder_vm_ops = { 5164 .open = binder_vma_open, 5165 .close = binder_vma_close, 5166 .fault = binder_vm_fault, 5167 }; 5168 5169 static int binder_mmap(struct file *filp, struct vm_area_struct *vma) 5170 { 5171 int ret; 5172 struct binder_proc *proc = filp->private_data; 5173 const char *failure_string; 5174 5175 if (proc->tsk != current->group_leader) 5176 return -EINVAL; 5177 5178 if ((vma->vm_end - vma->vm_start) > SZ_4M) 5179 vma->vm_end = vma->vm_start + SZ_4M; 5180 5181 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 5182 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", 5183 __func__, proc->pid, vma->vm_start, vma->vm_end, 5184 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 5185 (unsigned long)pgprot_val(vma->vm_page_prot)); 5186 5187 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { 5188 ret = -EPERM; 5189 failure_string = "bad vm_flags"; 5190 goto err_bad_arg; 5191 } 5192 vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP; 5193 vma->vm_flags &= ~VM_MAYWRITE; 5194 5195 vma->vm_ops = &binder_vm_ops; 5196 vma->vm_private_data = proc; 5197 5198 ret = binder_alloc_mmap_handler(&proc->alloc, vma); 5199 if (ret) 5200 return ret; 5201 return 0; 5202 5203 err_bad_arg: 5204 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__, 5205 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); 5206 return ret; 5207 } 5208 5209 static int binder_open(struct inode *nodp, struct file *filp) 5210 { 5211 struct binder_proc *proc; 5212 struct binder_device *binder_dev; 5213 struct binderfs_info *info; 5214 struct dentry *binder_binderfs_dir_entry_proc = NULL; 5215 5216 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__, 5217 current->group_leader->pid, current->pid); 5218 5219 proc = kzalloc(sizeof(*proc), GFP_KERNEL); 5220 if (proc == NULL) 5221 return -ENOMEM; 5222 spin_lock_init(&proc->inner_lock); 5223 spin_lock_init(&proc->outer_lock); 5224 get_task_struct(current->group_leader); 5225 proc->tsk = current->group_leader; 5226 INIT_LIST_HEAD(&proc->todo); 5227 proc->default_priority = task_nice(current); 5228 /* binderfs stashes devices in i_private */ 5229 if (is_binderfs_device(nodp)) { 5230 binder_dev = nodp->i_private; 5231 info = nodp->i_sb->s_fs_info; 5232 binder_binderfs_dir_entry_proc = info->proc_log_dir; 5233 } else { 5234 binder_dev = container_of(filp->private_data, 5235 struct binder_device, miscdev); 5236 } 5237 proc->context = &binder_dev->context; 5238 binder_alloc_init(&proc->alloc); 5239 5240 binder_stats_created(BINDER_STAT_PROC); 5241 proc->pid = current->group_leader->pid; 5242 INIT_LIST_HEAD(&proc->delivered_death); 5243 INIT_LIST_HEAD(&proc->waiting_threads); 5244 filp->private_data = proc; 5245 5246 mutex_lock(&binder_procs_lock); 5247 hlist_add_head(&proc->proc_node, &binder_procs); 5248 mutex_unlock(&binder_procs_lock); 5249 5250 if (binder_debugfs_dir_entry_proc) { 5251 char strbuf[11]; 5252 5253 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); 5254 /* 5255 * proc debug entries are shared between contexts, so 5256 * this will fail if the process tries to open the driver 5257 * again with a different context. The priting code will 5258 * anyway print all contexts that a given PID has, so this 5259 * is not a problem. 5260 */ 5261 proc->debugfs_entry = debugfs_create_file(strbuf, 0444, 5262 binder_debugfs_dir_entry_proc, 5263 (void *)(unsigned long)proc->pid, 5264 &proc_fops); 5265 } 5266 5267 if (binder_binderfs_dir_entry_proc) { 5268 char strbuf[11]; 5269 struct dentry *binderfs_entry; 5270 5271 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); 5272 /* 5273 * Similar to debugfs, the process specific log file is shared 5274 * between contexts. If the file has already been created for a 5275 * process, the following binderfs_create_file() call will 5276 * fail with error code EEXIST if another context of the same 5277 * process invoked binder_open(). This is ok since same as 5278 * debugfs, the log file will contain information on all 5279 * contexts of a given PID. 5280 */ 5281 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc, 5282 strbuf, &proc_fops, (void *)(unsigned long)proc->pid); 5283 if (!IS_ERR(binderfs_entry)) { 5284 proc->binderfs_entry = binderfs_entry; 5285 } else { 5286 int error; 5287 5288 error = PTR_ERR(binderfs_entry); 5289 if (error != -EEXIST) { 5290 pr_warn("Unable to create file %s in binderfs (error %d)\n", 5291 strbuf, error); 5292 } 5293 } 5294 } 5295 5296 return 0; 5297 } 5298 5299 static int binder_flush(struct file *filp, fl_owner_t id) 5300 { 5301 struct binder_proc *proc = filp->private_data; 5302 5303 binder_defer_work(proc, BINDER_DEFERRED_FLUSH); 5304 5305 return 0; 5306 } 5307 5308 static void binder_deferred_flush(struct binder_proc *proc) 5309 { 5310 struct rb_node *n; 5311 int wake_count = 0; 5312 5313 binder_inner_proc_lock(proc); 5314 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { 5315 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); 5316 5317 thread->looper_need_return = true; 5318 if (thread->looper & BINDER_LOOPER_STATE_WAITING) { 5319 wake_up_interruptible(&thread->wait); 5320 wake_count++; 5321 } 5322 } 5323 binder_inner_proc_unlock(proc); 5324 5325 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 5326 "binder_flush: %d woke %d threads\n", proc->pid, 5327 wake_count); 5328 } 5329 5330 static int binder_release(struct inode *nodp, struct file *filp) 5331 { 5332 struct binder_proc *proc = filp->private_data; 5333 5334 debugfs_remove(proc->debugfs_entry); 5335 5336 if (proc->binderfs_entry) { 5337 binderfs_remove_file(proc->binderfs_entry); 5338 proc->binderfs_entry = NULL; 5339 } 5340 5341 binder_defer_work(proc, BINDER_DEFERRED_RELEASE); 5342 5343 return 0; 5344 } 5345 5346 static int binder_node_release(struct binder_node *node, int refs) 5347 { 5348 struct binder_ref *ref; 5349 int death = 0; 5350 struct binder_proc *proc = node->proc; 5351 5352 binder_release_work(proc, &node->async_todo); 5353 5354 binder_node_lock(node); 5355 binder_inner_proc_lock(proc); 5356 binder_dequeue_work_ilocked(&node->work); 5357 /* 5358 * The caller must have taken a temporary ref on the node, 5359 */ 5360 BUG_ON(!node->tmp_refs); 5361 if (hlist_empty(&node->refs) && node->tmp_refs == 1) { 5362 binder_inner_proc_unlock(proc); 5363 binder_node_unlock(node); 5364 binder_free_node(node); 5365 5366 return refs; 5367 } 5368 5369 node->proc = NULL; 5370 node->local_strong_refs = 0; 5371 node->local_weak_refs = 0; 5372 binder_inner_proc_unlock(proc); 5373 5374 spin_lock(&binder_dead_nodes_lock); 5375 hlist_add_head(&node->dead_node, &binder_dead_nodes); 5376 spin_unlock(&binder_dead_nodes_lock); 5377 5378 hlist_for_each_entry(ref, &node->refs, node_entry) { 5379 refs++; 5380 /* 5381 * Need the node lock to synchronize 5382 * with new notification requests and the 5383 * inner lock to synchronize with queued 5384 * death notifications. 5385 */ 5386 binder_inner_proc_lock(ref->proc); 5387 if (!ref->death) { 5388 binder_inner_proc_unlock(ref->proc); 5389 continue; 5390 } 5391 5392 death++; 5393 5394 BUG_ON(!list_empty(&ref->death->work.entry)); 5395 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 5396 binder_enqueue_work_ilocked(&ref->death->work, 5397 &ref->proc->todo); 5398 binder_wakeup_proc_ilocked(ref->proc); 5399 binder_inner_proc_unlock(ref->proc); 5400 } 5401 5402 binder_debug(BINDER_DEBUG_DEAD_BINDER, 5403 "node %d now dead, refs %d, death %d\n", 5404 node->debug_id, refs, death); 5405 binder_node_unlock(node); 5406 binder_put_node(node); 5407 5408 return refs; 5409 } 5410 5411 static void binder_deferred_release(struct binder_proc *proc) 5412 { 5413 struct binder_context *context = proc->context; 5414 struct rb_node *n; 5415 int threads, nodes, incoming_refs, outgoing_refs, active_transactions; 5416 5417 mutex_lock(&binder_procs_lock); 5418 hlist_del(&proc->proc_node); 5419 mutex_unlock(&binder_procs_lock); 5420 5421 mutex_lock(&context->context_mgr_node_lock); 5422 if (context->binder_context_mgr_node && 5423 context->binder_context_mgr_node->proc == proc) { 5424 binder_debug(BINDER_DEBUG_DEAD_BINDER, 5425 "%s: %d context_mgr_node gone\n", 5426 __func__, proc->pid); 5427 context->binder_context_mgr_node = NULL; 5428 } 5429 mutex_unlock(&context->context_mgr_node_lock); 5430 binder_inner_proc_lock(proc); 5431 /* 5432 * Make sure proc stays alive after we 5433 * remove all the threads 5434 */ 5435 proc->tmp_ref++; 5436 5437 proc->is_dead = true; 5438 threads = 0; 5439 active_transactions = 0; 5440 while ((n = rb_first(&proc->threads))) { 5441 struct binder_thread *thread; 5442 5443 thread = rb_entry(n, struct binder_thread, rb_node); 5444 binder_inner_proc_unlock(proc); 5445 threads++; 5446 active_transactions += binder_thread_release(proc, thread); 5447 binder_inner_proc_lock(proc); 5448 } 5449 5450 nodes = 0; 5451 incoming_refs = 0; 5452 while ((n = rb_first(&proc->nodes))) { 5453 struct binder_node *node; 5454 5455 node = rb_entry(n, struct binder_node, rb_node); 5456 nodes++; 5457 /* 5458 * take a temporary ref on the node before 5459 * calling binder_node_release() which will either 5460 * kfree() the node or call binder_put_node() 5461 */ 5462 binder_inc_node_tmpref_ilocked(node); 5463 rb_erase(&node->rb_node, &proc->nodes); 5464 binder_inner_proc_unlock(proc); 5465 incoming_refs = binder_node_release(node, incoming_refs); 5466 binder_inner_proc_lock(proc); 5467 } 5468 binder_inner_proc_unlock(proc); 5469 5470 outgoing_refs = 0; 5471 binder_proc_lock(proc); 5472 while ((n = rb_first(&proc->refs_by_desc))) { 5473 struct binder_ref *ref; 5474 5475 ref = rb_entry(n, struct binder_ref, rb_node_desc); 5476 outgoing_refs++; 5477 binder_cleanup_ref_olocked(ref); 5478 binder_proc_unlock(proc); 5479 binder_free_ref(ref); 5480 binder_proc_lock(proc); 5481 } 5482 binder_proc_unlock(proc); 5483 5484 binder_release_work(proc, &proc->todo); 5485 binder_release_work(proc, &proc->delivered_death); 5486 5487 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 5488 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n", 5489 __func__, proc->pid, threads, nodes, incoming_refs, 5490 outgoing_refs, active_transactions); 5491 5492 binder_proc_dec_tmpref(proc); 5493 } 5494 5495 static void binder_deferred_func(struct work_struct *work) 5496 { 5497 struct binder_proc *proc; 5498 5499 int defer; 5500 5501 do { 5502 mutex_lock(&binder_deferred_lock); 5503 if (!hlist_empty(&binder_deferred_list)) { 5504 proc = hlist_entry(binder_deferred_list.first, 5505 struct binder_proc, deferred_work_node); 5506 hlist_del_init(&proc->deferred_work_node); 5507 defer = proc->deferred_work; 5508 proc->deferred_work = 0; 5509 } else { 5510 proc = NULL; 5511 defer = 0; 5512 } 5513 mutex_unlock(&binder_deferred_lock); 5514 5515 if (defer & BINDER_DEFERRED_FLUSH) 5516 binder_deferred_flush(proc); 5517 5518 if (defer & BINDER_DEFERRED_RELEASE) 5519 binder_deferred_release(proc); /* frees proc */ 5520 } while (proc); 5521 } 5522 static DECLARE_WORK(binder_deferred_work, binder_deferred_func); 5523 5524 static void 5525 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) 5526 { 5527 mutex_lock(&binder_deferred_lock); 5528 proc->deferred_work |= defer; 5529 if (hlist_unhashed(&proc->deferred_work_node)) { 5530 hlist_add_head(&proc->deferred_work_node, 5531 &binder_deferred_list); 5532 schedule_work(&binder_deferred_work); 5533 } 5534 mutex_unlock(&binder_deferred_lock); 5535 } 5536 5537 static void print_binder_transaction_ilocked(struct seq_file *m, 5538 struct binder_proc *proc, 5539 const char *prefix, 5540 struct binder_transaction *t) 5541 { 5542 struct binder_proc *to_proc; 5543 struct binder_buffer *buffer = t->buffer; 5544 5545 spin_lock(&t->lock); 5546 to_proc = t->to_proc; 5547 seq_printf(m, 5548 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d", 5549 prefix, t->debug_id, t, 5550 t->from ? t->from->proc->pid : 0, 5551 t->from ? t->from->pid : 0, 5552 to_proc ? to_proc->pid : 0, 5553 t->to_thread ? t->to_thread->pid : 0, 5554 t->code, t->flags, t->priority, t->need_reply); 5555 spin_unlock(&t->lock); 5556 5557 if (proc != to_proc) { 5558 /* 5559 * Can only safely deref buffer if we are holding the 5560 * correct proc inner lock for this node 5561 */ 5562 seq_puts(m, "\n"); 5563 return; 5564 } 5565 5566 if (buffer == NULL) { 5567 seq_puts(m, " buffer free\n"); 5568 return; 5569 } 5570 if (buffer->target_node) 5571 seq_printf(m, " node %d", buffer->target_node->debug_id); 5572 seq_printf(m, " size %zd:%zd data %pK\n", 5573 buffer->data_size, buffer->offsets_size, 5574 buffer->user_data); 5575 } 5576 5577 static void print_binder_work_ilocked(struct seq_file *m, 5578 struct binder_proc *proc, 5579 const char *prefix, 5580 const char *transaction_prefix, 5581 struct binder_work *w) 5582 { 5583 struct binder_node *node; 5584 struct binder_transaction *t; 5585 5586 switch (w->type) { 5587 case BINDER_WORK_TRANSACTION: 5588 t = container_of(w, struct binder_transaction, work); 5589 print_binder_transaction_ilocked( 5590 m, proc, transaction_prefix, t); 5591 break; 5592 case BINDER_WORK_RETURN_ERROR: { 5593 struct binder_error *e = container_of( 5594 w, struct binder_error, work); 5595 5596 seq_printf(m, "%stransaction error: %u\n", 5597 prefix, e->cmd); 5598 } break; 5599 case BINDER_WORK_TRANSACTION_COMPLETE: 5600 seq_printf(m, "%stransaction complete\n", prefix); 5601 break; 5602 case BINDER_WORK_NODE: 5603 node = container_of(w, struct binder_node, work); 5604 seq_printf(m, "%snode work %d: u%016llx c%016llx\n", 5605 prefix, node->debug_id, 5606 (u64)node->ptr, (u64)node->cookie); 5607 break; 5608 case BINDER_WORK_DEAD_BINDER: 5609 seq_printf(m, "%shas dead binder\n", prefix); 5610 break; 5611 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 5612 seq_printf(m, "%shas cleared dead binder\n", prefix); 5613 break; 5614 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: 5615 seq_printf(m, "%shas cleared death notification\n", prefix); 5616 break; 5617 default: 5618 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type); 5619 break; 5620 } 5621 } 5622 5623 static void print_binder_thread_ilocked(struct seq_file *m, 5624 struct binder_thread *thread, 5625 int print_always) 5626 { 5627 struct binder_transaction *t; 5628 struct binder_work *w; 5629 size_t start_pos = m->count; 5630 size_t header_pos; 5631 5632 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n", 5633 thread->pid, thread->looper, 5634 thread->looper_need_return, 5635 atomic_read(&thread->tmp_ref)); 5636 header_pos = m->count; 5637 t = thread->transaction_stack; 5638 while (t) { 5639 if (t->from == thread) { 5640 print_binder_transaction_ilocked(m, thread->proc, 5641 " outgoing transaction", t); 5642 t = t->from_parent; 5643 } else if (t->to_thread == thread) { 5644 print_binder_transaction_ilocked(m, thread->proc, 5645 " incoming transaction", t); 5646 t = t->to_parent; 5647 } else { 5648 print_binder_transaction_ilocked(m, thread->proc, 5649 " bad transaction", t); 5650 t = NULL; 5651 } 5652 } 5653 list_for_each_entry(w, &thread->todo, entry) { 5654 print_binder_work_ilocked(m, thread->proc, " ", 5655 " pending transaction", w); 5656 } 5657 if (!print_always && m->count == header_pos) 5658 m->count = start_pos; 5659 } 5660 5661 static void print_binder_node_nilocked(struct seq_file *m, 5662 struct binder_node *node) 5663 { 5664 struct binder_ref *ref; 5665 struct binder_work *w; 5666 int count; 5667 5668 count = 0; 5669 hlist_for_each_entry(ref, &node->refs, node_entry) 5670 count++; 5671 5672 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d", 5673 node->debug_id, (u64)node->ptr, (u64)node->cookie, 5674 node->has_strong_ref, node->has_weak_ref, 5675 node->local_strong_refs, node->local_weak_refs, 5676 node->internal_strong_refs, count, node->tmp_refs); 5677 if (count) { 5678 seq_puts(m, " proc"); 5679 hlist_for_each_entry(ref, &node->refs, node_entry) 5680 seq_printf(m, " %d", ref->proc->pid); 5681 } 5682 seq_puts(m, "\n"); 5683 if (node->proc) { 5684 list_for_each_entry(w, &node->async_todo, entry) 5685 print_binder_work_ilocked(m, node->proc, " ", 5686 " pending async transaction", w); 5687 } 5688 } 5689 5690 static void print_binder_ref_olocked(struct seq_file *m, 5691 struct binder_ref *ref) 5692 { 5693 binder_node_lock(ref->node); 5694 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n", 5695 ref->data.debug_id, ref->data.desc, 5696 ref->node->proc ? "" : "dead ", 5697 ref->node->debug_id, ref->data.strong, 5698 ref->data.weak, ref->death); 5699 binder_node_unlock(ref->node); 5700 } 5701 5702 static void print_binder_proc(struct seq_file *m, 5703 struct binder_proc *proc, int print_all) 5704 { 5705 struct binder_work *w; 5706 struct rb_node *n; 5707 size_t start_pos = m->count; 5708 size_t header_pos; 5709 struct binder_node *last_node = NULL; 5710 5711 seq_printf(m, "proc %d\n", proc->pid); 5712 seq_printf(m, "context %s\n", proc->context->name); 5713 header_pos = m->count; 5714 5715 binder_inner_proc_lock(proc); 5716 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 5717 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread, 5718 rb_node), print_all); 5719 5720 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { 5721 struct binder_node *node = rb_entry(n, struct binder_node, 5722 rb_node); 5723 if (!print_all && !node->has_async_transaction) 5724 continue; 5725 5726 /* 5727 * take a temporary reference on the node so it 5728 * survives and isn't removed from the tree 5729 * while we print it. 5730 */ 5731 binder_inc_node_tmpref_ilocked(node); 5732 /* Need to drop inner lock to take node lock */ 5733 binder_inner_proc_unlock(proc); 5734 if (last_node) 5735 binder_put_node(last_node); 5736 binder_node_inner_lock(node); 5737 print_binder_node_nilocked(m, node); 5738 binder_node_inner_unlock(node); 5739 last_node = node; 5740 binder_inner_proc_lock(proc); 5741 } 5742 binder_inner_proc_unlock(proc); 5743 if (last_node) 5744 binder_put_node(last_node); 5745 5746 if (print_all) { 5747 binder_proc_lock(proc); 5748 for (n = rb_first(&proc->refs_by_desc); 5749 n != NULL; 5750 n = rb_next(n)) 5751 print_binder_ref_olocked(m, rb_entry(n, 5752 struct binder_ref, 5753 rb_node_desc)); 5754 binder_proc_unlock(proc); 5755 } 5756 binder_alloc_print_allocated(m, &proc->alloc); 5757 binder_inner_proc_lock(proc); 5758 list_for_each_entry(w, &proc->todo, entry) 5759 print_binder_work_ilocked(m, proc, " ", 5760 " pending transaction", w); 5761 list_for_each_entry(w, &proc->delivered_death, entry) { 5762 seq_puts(m, " has delivered dead binder\n"); 5763 break; 5764 } 5765 binder_inner_proc_unlock(proc); 5766 if (!print_all && m->count == header_pos) 5767 m->count = start_pos; 5768 } 5769 5770 static const char * const binder_return_strings[] = { 5771 "BR_ERROR", 5772 "BR_OK", 5773 "BR_TRANSACTION", 5774 "BR_REPLY", 5775 "BR_ACQUIRE_RESULT", 5776 "BR_DEAD_REPLY", 5777 "BR_TRANSACTION_COMPLETE", 5778 "BR_INCREFS", 5779 "BR_ACQUIRE", 5780 "BR_RELEASE", 5781 "BR_DECREFS", 5782 "BR_ATTEMPT_ACQUIRE", 5783 "BR_NOOP", 5784 "BR_SPAWN_LOOPER", 5785 "BR_FINISHED", 5786 "BR_DEAD_BINDER", 5787 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 5788 "BR_FAILED_REPLY" 5789 }; 5790 5791 static const char * const binder_command_strings[] = { 5792 "BC_TRANSACTION", 5793 "BC_REPLY", 5794 "BC_ACQUIRE_RESULT", 5795 "BC_FREE_BUFFER", 5796 "BC_INCREFS", 5797 "BC_ACQUIRE", 5798 "BC_RELEASE", 5799 "BC_DECREFS", 5800 "BC_INCREFS_DONE", 5801 "BC_ACQUIRE_DONE", 5802 "BC_ATTEMPT_ACQUIRE", 5803 "BC_REGISTER_LOOPER", 5804 "BC_ENTER_LOOPER", 5805 "BC_EXIT_LOOPER", 5806 "BC_REQUEST_DEATH_NOTIFICATION", 5807 "BC_CLEAR_DEATH_NOTIFICATION", 5808 "BC_DEAD_BINDER_DONE", 5809 "BC_TRANSACTION_SG", 5810 "BC_REPLY_SG", 5811 }; 5812 5813 static const char * const binder_objstat_strings[] = { 5814 "proc", 5815 "thread", 5816 "node", 5817 "ref", 5818 "death", 5819 "transaction", 5820 "transaction_complete" 5821 }; 5822 5823 static void print_binder_stats(struct seq_file *m, const char *prefix, 5824 struct binder_stats *stats) 5825 { 5826 int i; 5827 5828 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != 5829 ARRAY_SIZE(binder_command_strings)); 5830 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) { 5831 int temp = atomic_read(&stats->bc[i]); 5832 5833 if (temp) 5834 seq_printf(m, "%s%s: %d\n", prefix, 5835 binder_command_strings[i], temp); 5836 } 5837 5838 BUILD_BUG_ON(ARRAY_SIZE(stats->br) != 5839 ARRAY_SIZE(binder_return_strings)); 5840 for (i = 0; i < ARRAY_SIZE(stats->br); i++) { 5841 int temp = atomic_read(&stats->br[i]); 5842 5843 if (temp) 5844 seq_printf(m, "%s%s: %d\n", prefix, 5845 binder_return_strings[i], temp); 5846 } 5847 5848 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 5849 ARRAY_SIZE(binder_objstat_strings)); 5850 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 5851 ARRAY_SIZE(stats->obj_deleted)); 5852 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { 5853 int created = atomic_read(&stats->obj_created[i]); 5854 int deleted = atomic_read(&stats->obj_deleted[i]); 5855 5856 if (created || deleted) 5857 seq_printf(m, "%s%s: active %d total %d\n", 5858 prefix, 5859 binder_objstat_strings[i], 5860 created - deleted, 5861 created); 5862 } 5863 } 5864 5865 static void print_binder_proc_stats(struct seq_file *m, 5866 struct binder_proc *proc) 5867 { 5868 struct binder_work *w; 5869 struct binder_thread *thread; 5870 struct rb_node *n; 5871 int count, strong, weak, ready_threads; 5872 size_t free_async_space = 5873 binder_alloc_get_free_async_space(&proc->alloc); 5874 5875 seq_printf(m, "proc %d\n", proc->pid); 5876 seq_printf(m, "context %s\n", proc->context->name); 5877 count = 0; 5878 ready_threads = 0; 5879 binder_inner_proc_lock(proc); 5880 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 5881 count++; 5882 5883 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node) 5884 ready_threads++; 5885 5886 seq_printf(m, " threads: %d\n", count); 5887 seq_printf(m, " requested threads: %d+%d/%d\n" 5888 " ready threads %d\n" 5889 " free async space %zd\n", proc->requested_threads, 5890 proc->requested_threads_started, proc->max_threads, 5891 ready_threads, 5892 free_async_space); 5893 count = 0; 5894 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) 5895 count++; 5896 binder_inner_proc_unlock(proc); 5897 seq_printf(m, " nodes: %d\n", count); 5898 count = 0; 5899 strong = 0; 5900 weak = 0; 5901 binder_proc_lock(proc); 5902 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 5903 struct binder_ref *ref = rb_entry(n, struct binder_ref, 5904 rb_node_desc); 5905 count++; 5906 strong += ref->data.strong; 5907 weak += ref->data.weak; 5908 } 5909 binder_proc_unlock(proc); 5910 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak); 5911 5912 count = binder_alloc_get_allocated_count(&proc->alloc); 5913 seq_printf(m, " buffers: %d\n", count); 5914 5915 binder_alloc_print_pages(m, &proc->alloc); 5916 5917 count = 0; 5918 binder_inner_proc_lock(proc); 5919 list_for_each_entry(w, &proc->todo, entry) { 5920 if (w->type == BINDER_WORK_TRANSACTION) 5921 count++; 5922 } 5923 binder_inner_proc_unlock(proc); 5924 seq_printf(m, " pending transactions: %d\n", count); 5925 5926 print_binder_stats(m, " ", &proc->stats); 5927 } 5928 5929 5930 int binder_state_show(struct seq_file *m, void *unused) 5931 { 5932 struct binder_proc *proc; 5933 struct binder_node *node; 5934 struct binder_node *last_node = NULL; 5935 5936 seq_puts(m, "binder state:\n"); 5937 5938 spin_lock(&binder_dead_nodes_lock); 5939 if (!hlist_empty(&binder_dead_nodes)) 5940 seq_puts(m, "dead nodes:\n"); 5941 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) { 5942 /* 5943 * take a temporary reference on the node so it 5944 * survives and isn't removed from the list 5945 * while we print it. 5946 */ 5947 node->tmp_refs++; 5948 spin_unlock(&binder_dead_nodes_lock); 5949 if (last_node) 5950 binder_put_node(last_node); 5951 binder_node_lock(node); 5952 print_binder_node_nilocked(m, node); 5953 binder_node_unlock(node); 5954 last_node = node; 5955 spin_lock(&binder_dead_nodes_lock); 5956 } 5957 spin_unlock(&binder_dead_nodes_lock); 5958 if (last_node) 5959 binder_put_node(last_node); 5960 5961 mutex_lock(&binder_procs_lock); 5962 hlist_for_each_entry(proc, &binder_procs, proc_node) 5963 print_binder_proc(m, proc, 1); 5964 mutex_unlock(&binder_procs_lock); 5965 5966 return 0; 5967 } 5968 5969 int binder_stats_show(struct seq_file *m, void *unused) 5970 { 5971 struct binder_proc *proc; 5972 5973 seq_puts(m, "binder stats:\n"); 5974 5975 print_binder_stats(m, "", &binder_stats); 5976 5977 mutex_lock(&binder_procs_lock); 5978 hlist_for_each_entry(proc, &binder_procs, proc_node) 5979 print_binder_proc_stats(m, proc); 5980 mutex_unlock(&binder_procs_lock); 5981 5982 return 0; 5983 } 5984 5985 int binder_transactions_show(struct seq_file *m, void *unused) 5986 { 5987 struct binder_proc *proc; 5988 5989 seq_puts(m, "binder transactions:\n"); 5990 mutex_lock(&binder_procs_lock); 5991 hlist_for_each_entry(proc, &binder_procs, proc_node) 5992 print_binder_proc(m, proc, 0); 5993 mutex_unlock(&binder_procs_lock); 5994 5995 return 0; 5996 } 5997 5998 static int proc_show(struct seq_file *m, void *unused) 5999 { 6000 struct binder_proc *itr; 6001 int pid = (unsigned long)m->private; 6002 6003 mutex_lock(&binder_procs_lock); 6004 hlist_for_each_entry(itr, &binder_procs, proc_node) { 6005 if (itr->pid == pid) { 6006 seq_puts(m, "binder proc state:\n"); 6007 print_binder_proc(m, itr, 1); 6008 } 6009 } 6010 mutex_unlock(&binder_procs_lock); 6011 6012 return 0; 6013 } 6014 6015 static void print_binder_transaction_log_entry(struct seq_file *m, 6016 struct binder_transaction_log_entry *e) 6017 { 6018 int debug_id = READ_ONCE(e->debug_id_done); 6019 /* 6020 * read barrier to guarantee debug_id_done read before 6021 * we print the log values 6022 */ 6023 smp_rmb(); 6024 seq_printf(m, 6025 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d", 6026 e->debug_id, (e->call_type == 2) ? "reply" : 6027 ((e->call_type == 1) ? "async" : "call "), e->from_proc, 6028 e->from_thread, e->to_proc, e->to_thread, e->context_name, 6029 e->to_node, e->target_handle, e->data_size, e->offsets_size, 6030 e->return_error, e->return_error_param, 6031 e->return_error_line); 6032 /* 6033 * read-barrier to guarantee read of debug_id_done after 6034 * done printing the fields of the entry 6035 */ 6036 smp_rmb(); 6037 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ? 6038 "\n" : " (incomplete)\n"); 6039 } 6040 6041 int binder_transaction_log_show(struct seq_file *m, void *unused) 6042 { 6043 struct binder_transaction_log *log = m->private; 6044 unsigned int log_cur = atomic_read(&log->cur); 6045 unsigned int count; 6046 unsigned int cur; 6047 int i; 6048 6049 count = log_cur + 1; 6050 cur = count < ARRAY_SIZE(log->entry) && !log->full ? 6051 0 : count % ARRAY_SIZE(log->entry); 6052 if (count > ARRAY_SIZE(log->entry) || log->full) 6053 count = ARRAY_SIZE(log->entry); 6054 for (i = 0; i < count; i++) { 6055 unsigned int index = cur++ % ARRAY_SIZE(log->entry); 6056 6057 print_binder_transaction_log_entry(m, &log->entry[index]); 6058 } 6059 return 0; 6060 } 6061 6062 const struct file_operations binder_fops = { 6063 .owner = THIS_MODULE, 6064 .poll = binder_poll, 6065 .unlocked_ioctl = binder_ioctl, 6066 .compat_ioctl = binder_ioctl, 6067 .mmap = binder_mmap, 6068 .open = binder_open, 6069 .flush = binder_flush, 6070 .release = binder_release, 6071 }; 6072 6073 static int __init init_binder_device(const char *name) 6074 { 6075 int ret; 6076 struct binder_device *binder_device; 6077 6078 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL); 6079 if (!binder_device) 6080 return -ENOMEM; 6081 6082 binder_device->miscdev.fops = &binder_fops; 6083 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR; 6084 binder_device->miscdev.name = name; 6085 6086 binder_device->context.binder_context_mgr_uid = INVALID_UID; 6087 binder_device->context.name = name; 6088 mutex_init(&binder_device->context.context_mgr_node_lock); 6089 6090 ret = misc_register(&binder_device->miscdev); 6091 if (ret < 0) { 6092 kfree(binder_device); 6093 return ret; 6094 } 6095 6096 hlist_add_head(&binder_device->hlist, &binder_devices); 6097 6098 return ret; 6099 } 6100 6101 static int __init binder_init(void) 6102 { 6103 int ret; 6104 char *device_name, *device_tmp; 6105 struct binder_device *device; 6106 struct hlist_node *tmp; 6107 char *device_names = NULL; 6108 6109 ret = binder_alloc_shrinker_init(); 6110 if (ret) 6111 return ret; 6112 6113 atomic_set(&binder_transaction_log.cur, ~0U); 6114 atomic_set(&binder_transaction_log_failed.cur, ~0U); 6115 6116 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); 6117 if (binder_debugfs_dir_entry_root) 6118 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", 6119 binder_debugfs_dir_entry_root); 6120 6121 if (binder_debugfs_dir_entry_root) { 6122 debugfs_create_file("state", 6123 0444, 6124 binder_debugfs_dir_entry_root, 6125 NULL, 6126 &binder_state_fops); 6127 debugfs_create_file("stats", 6128 0444, 6129 binder_debugfs_dir_entry_root, 6130 NULL, 6131 &binder_stats_fops); 6132 debugfs_create_file("transactions", 6133 0444, 6134 binder_debugfs_dir_entry_root, 6135 NULL, 6136 &binder_transactions_fops); 6137 debugfs_create_file("transaction_log", 6138 0444, 6139 binder_debugfs_dir_entry_root, 6140 &binder_transaction_log, 6141 &binder_transaction_log_fops); 6142 debugfs_create_file("failed_transaction_log", 6143 0444, 6144 binder_debugfs_dir_entry_root, 6145 &binder_transaction_log_failed, 6146 &binder_transaction_log_fops); 6147 } 6148 6149 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) && 6150 strcmp(binder_devices_param, "") != 0) { 6151 /* 6152 * Copy the module_parameter string, because we don't want to 6153 * tokenize it in-place. 6154 */ 6155 device_names = kstrdup(binder_devices_param, GFP_KERNEL); 6156 if (!device_names) { 6157 ret = -ENOMEM; 6158 goto err_alloc_device_names_failed; 6159 } 6160 6161 device_tmp = device_names; 6162 while ((device_name = strsep(&device_tmp, ","))) { 6163 ret = init_binder_device(device_name); 6164 if (ret) 6165 goto err_init_binder_device_failed; 6166 } 6167 } 6168 6169 ret = init_binderfs(); 6170 if (ret) 6171 goto err_init_binder_device_failed; 6172 6173 return ret; 6174 6175 err_init_binder_device_failed: 6176 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) { 6177 misc_deregister(&device->miscdev); 6178 hlist_del(&device->hlist); 6179 kfree(device); 6180 } 6181 6182 kfree(device_names); 6183 6184 err_alloc_device_names_failed: 6185 debugfs_remove_recursive(binder_debugfs_dir_entry_root); 6186 6187 return ret; 6188 } 6189 6190 device_initcall(binder_init); 6191 6192 #define CREATE_TRACE_POINTS 6193 #include "binder_trace.h" 6194 6195 MODULE_LICENSE("GPL v2"); 6196