1 /* binder.c 2 * 3 * Android IPC Subsystem 4 * 5 * Copyright (C) 2007-2008 Google, Inc. 6 * 7 * This software is licensed under the terms of the GNU General Public 8 * License version 2, as published by the Free Software Foundation, and 9 * may be copied, distributed, and modified under those terms. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 */ 17 18 /* 19 * Locking overview 20 * 21 * There are 3 main spinlocks which must be acquired in the 22 * order shown: 23 * 24 * 1) proc->outer_lock : protects binder_ref 25 * binder_proc_lock() and binder_proc_unlock() are 26 * used to acq/rel. 27 * 2) node->lock : protects most fields of binder_node. 28 * binder_node_lock() and binder_node_unlock() are 29 * used to acq/rel 30 * 3) proc->inner_lock : protects the thread and node lists 31 * (proc->threads, proc->waiting_threads, proc->nodes) 32 * and all todo lists associated with the binder_proc 33 * (proc->todo, thread->todo, proc->delivered_death and 34 * node->async_todo), as well as thread->transaction_stack 35 * binder_inner_proc_lock() and binder_inner_proc_unlock() 36 * are used to acq/rel 37 * 38 * Any lock under procA must never be nested under any lock at the same 39 * level or below on procB. 40 * 41 * Functions that require a lock held on entry indicate which lock 42 * in the suffix of the function name: 43 * 44 * foo_olocked() : requires node->outer_lock 45 * foo_nlocked() : requires node->lock 46 * foo_ilocked() : requires proc->inner_lock 47 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock 48 * foo_nilocked(): requires node->lock and proc->inner_lock 49 * ... 50 */ 51 52 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 53 54 #include <linux/fdtable.h> 55 #include <linux/file.h> 56 #include <linux/freezer.h> 57 #include <linux/fs.h> 58 #include <linux/list.h> 59 #include <linux/miscdevice.h> 60 #include <linux/module.h> 61 #include <linux/mutex.h> 62 #include <linux/nsproxy.h> 63 #include <linux/poll.h> 64 #include <linux/debugfs.h> 65 #include <linux/rbtree.h> 66 #include <linux/sched/signal.h> 67 #include <linux/sched/mm.h> 68 #include <linux/seq_file.h> 69 #include <linux/uaccess.h> 70 #include <linux/pid_namespace.h> 71 #include <linux/security.h> 72 #include <linux/spinlock.h> 73 #include <linux/ratelimit.h> 74 #include <linux/syscalls.h> 75 #include <linux/task_work.h> 76 77 #include <uapi/linux/android/binder.h> 78 79 #include <asm/cacheflush.h> 80 81 #include "binder_alloc.h" 82 #include "binder_internal.h" 83 #include "binder_trace.h" 84 85 static HLIST_HEAD(binder_deferred_list); 86 static DEFINE_MUTEX(binder_deferred_lock); 87 88 static HLIST_HEAD(binder_devices); 89 static HLIST_HEAD(binder_procs); 90 static DEFINE_MUTEX(binder_procs_lock); 91 92 static HLIST_HEAD(binder_dead_nodes); 93 static DEFINE_SPINLOCK(binder_dead_nodes_lock); 94 95 static struct dentry *binder_debugfs_dir_entry_root; 96 static struct dentry *binder_debugfs_dir_entry_proc; 97 static atomic_t binder_last_id; 98 99 static int proc_show(struct seq_file *m, void *unused); 100 DEFINE_SHOW_ATTRIBUTE(proc); 101 102 /* This is only defined in include/asm-arm/sizes.h */ 103 #ifndef SZ_1K 104 #define SZ_1K 0x400 105 #endif 106 107 #ifndef SZ_4M 108 #define SZ_4M 0x400000 109 #endif 110 111 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE) 112 113 enum { 114 BINDER_DEBUG_USER_ERROR = 1U << 0, 115 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1, 116 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2, 117 BINDER_DEBUG_OPEN_CLOSE = 1U << 3, 118 BINDER_DEBUG_DEAD_BINDER = 1U << 4, 119 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5, 120 BINDER_DEBUG_READ_WRITE = 1U << 6, 121 BINDER_DEBUG_USER_REFS = 1U << 7, 122 BINDER_DEBUG_THREADS = 1U << 8, 123 BINDER_DEBUG_TRANSACTION = 1U << 9, 124 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10, 125 BINDER_DEBUG_FREE_BUFFER = 1U << 11, 126 BINDER_DEBUG_INTERNAL_REFS = 1U << 12, 127 BINDER_DEBUG_PRIORITY_CAP = 1U << 13, 128 BINDER_DEBUG_SPINLOCKS = 1U << 14, 129 }; 130 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | 131 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; 132 module_param_named(debug_mask, binder_debug_mask, uint, 0644); 133 134 static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES; 135 module_param_named(devices, binder_devices_param, charp, 0444); 136 137 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); 138 static int binder_stop_on_user_error; 139 140 static int binder_set_stop_on_user_error(const char *val, 141 const struct kernel_param *kp) 142 { 143 int ret; 144 145 ret = param_set_int(val, kp); 146 if (binder_stop_on_user_error < 2) 147 wake_up(&binder_user_error_wait); 148 return ret; 149 } 150 module_param_call(stop_on_user_error, binder_set_stop_on_user_error, 151 param_get_int, &binder_stop_on_user_error, 0644); 152 153 #define binder_debug(mask, x...) \ 154 do { \ 155 if (binder_debug_mask & mask) \ 156 pr_info_ratelimited(x); \ 157 } while (0) 158 159 #define binder_user_error(x...) \ 160 do { \ 161 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \ 162 pr_info_ratelimited(x); \ 163 if (binder_stop_on_user_error) \ 164 binder_stop_on_user_error = 2; \ 165 } while (0) 166 167 #define to_flat_binder_object(hdr) \ 168 container_of(hdr, struct flat_binder_object, hdr) 169 170 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr) 171 172 #define to_binder_buffer_object(hdr) \ 173 container_of(hdr, struct binder_buffer_object, hdr) 174 175 #define to_binder_fd_array_object(hdr) \ 176 container_of(hdr, struct binder_fd_array_object, hdr) 177 178 enum binder_stat_types { 179 BINDER_STAT_PROC, 180 BINDER_STAT_THREAD, 181 BINDER_STAT_NODE, 182 BINDER_STAT_REF, 183 BINDER_STAT_DEATH, 184 BINDER_STAT_TRANSACTION, 185 BINDER_STAT_TRANSACTION_COMPLETE, 186 BINDER_STAT_COUNT 187 }; 188 189 struct binder_stats { 190 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1]; 191 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1]; 192 atomic_t obj_created[BINDER_STAT_COUNT]; 193 atomic_t obj_deleted[BINDER_STAT_COUNT]; 194 }; 195 196 static struct binder_stats binder_stats; 197 198 static inline void binder_stats_deleted(enum binder_stat_types type) 199 { 200 atomic_inc(&binder_stats.obj_deleted[type]); 201 } 202 203 static inline void binder_stats_created(enum binder_stat_types type) 204 { 205 atomic_inc(&binder_stats.obj_created[type]); 206 } 207 208 struct binder_transaction_log_entry { 209 int debug_id; 210 int debug_id_done; 211 int call_type; 212 int from_proc; 213 int from_thread; 214 int target_handle; 215 int to_proc; 216 int to_thread; 217 int to_node; 218 int data_size; 219 int offsets_size; 220 int return_error_line; 221 uint32_t return_error; 222 uint32_t return_error_param; 223 const char *context_name; 224 }; 225 struct binder_transaction_log { 226 atomic_t cur; 227 bool full; 228 struct binder_transaction_log_entry entry[32]; 229 }; 230 static struct binder_transaction_log binder_transaction_log; 231 static struct binder_transaction_log binder_transaction_log_failed; 232 233 static struct binder_transaction_log_entry *binder_transaction_log_add( 234 struct binder_transaction_log *log) 235 { 236 struct binder_transaction_log_entry *e; 237 unsigned int cur = atomic_inc_return(&log->cur); 238 239 if (cur >= ARRAY_SIZE(log->entry)) 240 log->full = true; 241 e = &log->entry[cur % ARRAY_SIZE(log->entry)]; 242 WRITE_ONCE(e->debug_id_done, 0); 243 /* 244 * write-barrier to synchronize access to e->debug_id_done. 245 * We make sure the initialized 0 value is seen before 246 * memset() other fields are zeroed by memset. 247 */ 248 smp_wmb(); 249 memset(e, 0, sizeof(*e)); 250 return e; 251 } 252 253 /** 254 * struct binder_work - work enqueued on a worklist 255 * @entry: node enqueued on list 256 * @type: type of work to be performed 257 * 258 * There are separate work lists for proc, thread, and node (async). 259 */ 260 struct binder_work { 261 struct list_head entry; 262 263 enum { 264 BINDER_WORK_TRANSACTION = 1, 265 BINDER_WORK_TRANSACTION_COMPLETE, 266 BINDER_WORK_RETURN_ERROR, 267 BINDER_WORK_NODE, 268 BINDER_WORK_DEAD_BINDER, 269 BINDER_WORK_DEAD_BINDER_AND_CLEAR, 270 BINDER_WORK_CLEAR_DEATH_NOTIFICATION, 271 } type; 272 }; 273 274 struct binder_error { 275 struct binder_work work; 276 uint32_t cmd; 277 }; 278 279 /** 280 * struct binder_node - binder node bookkeeping 281 * @debug_id: unique ID for debugging 282 * (invariant after initialized) 283 * @lock: lock for node fields 284 * @work: worklist element for node work 285 * (protected by @proc->inner_lock) 286 * @rb_node: element for proc->nodes tree 287 * (protected by @proc->inner_lock) 288 * @dead_node: element for binder_dead_nodes list 289 * (protected by binder_dead_nodes_lock) 290 * @proc: binder_proc that owns this node 291 * (invariant after initialized) 292 * @refs: list of references on this node 293 * (protected by @lock) 294 * @internal_strong_refs: used to take strong references when 295 * initiating a transaction 296 * (protected by @proc->inner_lock if @proc 297 * and by @lock) 298 * @local_weak_refs: weak user refs from local process 299 * (protected by @proc->inner_lock if @proc 300 * and by @lock) 301 * @local_strong_refs: strong user refs from local process 302 * (protected by @proc->inner_lock if @proc 303 * and by @lock) 304 * @tmp_refs: temporary kernel refs 305 * (protected by @proc->inner_lock while @proc 306 * is valid, and by binder_dead_nodes_lock 307 * if @proc is NULL. During inc/dec and node release 308 * it is also protected by @lock to provide safety 309 * as the node dies and @proc becomes NULL) 310 * @ptr: userspace pointer for node 311 * (invariant, no lock needed) 312 * @cookie: userspace cookie for node 313 * (invariant, no lock needed) 314 * @has_strong_ref: userspace notified of strong ref 315 * (protected by @proc->inner_lock if @proc 316 * and by @lock) 317 * @pending_strong_ref: userspace has acked notification of strong ref 318 * (protected by @proc->inner_lock if @proc 319 * and by @lock) 320 * @has_weak_ref: userspace notified of weak ref 321 * (protected by @proc->inner_lock if @proc 322 * and by @lock) 323 * @pending_weak_ref: userspace has acked notification of weak ref 324 * (protected by @proc->inner_lock if @proc 325 * and by @lock) 326 * @has_async_transaction: async transaction to node in progress 327 * (protected by @lock) 328 * @accept_fds: file descriptor operations supported for node 329 * (invariant after initialized) 330 * @min_priority: minimum scheduling priority 331 * (invariant after initialized) 332 * @txn_security_ctx: require sender's security context 333 * (invariant after initialized) 334 * @async_todo: list of async work items 335 * (protected by @proc->inner_lock) 336 * 337 * Bookkeeping structure for binder nodes. 338 */ 339 struct binder_node { 340 int debug_id; 341 spinlock_t lock; 342 struct binder_work work; 343 union { 344 struct rb_node rb_node; 345 struct hlist_node dead_node; 346 }; 347 struct binder_proc *proc; 348 struct hlist_head refs; 349 int internal_strong_refs; 350 int local_weak_refs; 351 int local_strong_refs; 352 int tmp_refs; 353 binder_uintptr_t ptr; 354 binder_uintptr_t cookie; 355 struct { 356 /* 357 * bitfield elements protected by 358 * proc inner_lock 359 */ 360 u8 has_strong_ref:1; 361 u8 pending_strong_ref:1; 362 u8 has_weak_ref:1; 363 u8 pending_weak_ref:1; 364 }; 365 struct { 366 /* 367 * invariant after initialization 368 */ 369 u8 accept_fds:1; 370 u8 txn_security_ctx:1; 371 u8 min_priority; 372 }; 373 bool has_async_transaction; 374 struct list_head async_todo; 375 }; 376 377 struct binder_ref_death { 378 /** 379 * @work: worklist element for death notifications 380 * (protected by inner_lock of the proc that 381 * this ref belongs to) 382 */ 383 struct binder_work work; 384 binder_uintptr_t cookie; 385 }; 386 387 /** 388 * struct binder_ref_data - binder_ref counts and id 389 * @debug_id: unique ID for the ref 390 * @desc: unique userspace handle for ref 391 * @strong: strong ref count (debugging only if not locked) 392 * @weak: weak ref count (debugging only if not locked) 393 * 394 * Structure to hold ref count and ref id information. Since 395 * the actual ref can only be accessed with a lock, this structure 396 * is used to return information about the ref to callers of 397 * ref inc/dec functions. 398 */ 399 struct binder_ref_data { 400 int debug_id; 401 uint32_t desc; 402 int strong; 403 int weak; 404 }; 405 406 /** 407 * struct binder_ref - struct to track references on nodes 408 * @data: binder_ref_data containing id, handle, and current refcounts 409 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree 410 * @rb_node_node: node for lookup by @node in proc's rb_tree 411 * @node_entry: list entry for node->refs list in target node 412 * (protected by @node->lock) 413 * @proc: binder_proc containing ref 414 * @node: binder_node of target node. When cleaning up a 415 * ref for deletion in binder_cleanup_ref, a non-NULL 416 * @node indicates the node must be freed 417 * @death: pointer to death notification (ref_death) if requested 418 * (protected by @node->lock) 419 * 420 * Structure to track references from procA to target node (on procB). This 421 * structure is unsafe to access without holding @proc->outer_lock. 422 */ 423 struct binder_ref { 424 /* Lookups needed: */ 425 /* node + proc => ref (transaction) */ 426 /* desc + proc => ref (transaction, inc/dec ref) */ 427 /* node => refs + procs (proc exit) */ 428 struct binder_ref_data data; 429 struct rb_node rb_node_desc; 430 struct rb_node rb_node_node; 431 struct hlist_node node_entry; 432 struct binder_proc *proc; 433 struct binder_node *node; 434 struct binder_ref_death *death; 435 }; 436 437 enum binder_deferred_state { 438 BINDER_DEFERRED_FLUSH = 0x01, 439 BINDER_DEFERRED_RELEASE = 0x02, 440 }; 441 442 /** 443 * struct binder_proc - binder process bookkeeping 444 * @proc_node: element for binder_procs list 445 * @threads: rbtree of binder_threads in this proc 446 * (protected by @inner_lock) 447 * @nodes: rbtree of binder nodes associated with 448 * this proc ordered by node->ptr 449 * (protected by @inner_lock) 450 * @refs_by_desc: rbtree of refs ordered by ref->desc 451 * (protected by @outer_lock) 452 * @refs_by_node: rbtree of refs ordered by ref->node 453 * (protected by @outer_lock) 454 * @waiting_threads: threads currently waiting for proc work 455 * (protected by @inner_lock) 456 * @pid PID of group_leader of process 457 * (invariant after initialized) 458 * @tsk task_struct for group_leader of process 459 * (invariant after initialized) 460 * @deferred_work_node: element for binder_deferred_list 461 * (protected by binder_deferred_lock) 462 * @deferred_work: bitmap of deferred work to perform 463 * (protected by binder_deferred_lock) 464 * @is_dead: process is dead and awaiting free 465 * when outstanding transactions are cleaned up 466 * (protected by @inner_lock) 467 * @todo: list of work for this process 468 * (protected by @inner_lock) 469 * @stats: per-process binder statistics 470 * (atomics, no lock needed) 471 * @delivered_death: list of delivered death notification 472 * (protected by @inner_lock) 473 * @max_threads: cap on number of binder threads 474 * (protected by @inner_lock) 475 * @requested_threads: number of binder threads requested but not 476 * yet started. In current implementation, can 477 * only be 0 or 1. 478 * (protected by @inner_lock) 479 * @requested_threads_started: number binder threads started 480 * (protected by @inner_lock) 481 * @tmp_ref: temporary reference to indicate proc is in use 482 * (protected by @inner_lock) 483 * @default_priority: default scheduler priority 484 * (invariant after initialized) 485 * @debugfs_entry: debugfs node 486 * @alloc: binder allocator bookkeeping 487 * @context: binder_context for this proc 488 * (invariant after initialized) 489 * @inner_lock: can nest under outer_lock and/or node lock 490 * @outer_lock: no nesting under innor or node lock 491 * Lock order: 1) outer, 2) node, 3) inner 492 * 493 * Bookkeeping structure for binder processes 494 */ 495 struct binder_proc { 496 struct hlist_node proc_node; 497 struct rb_root threads; 498 struct rb_root nodes; 499 struct rb_root refs_by_desc; 500 struct rb_root refs_by_node; 501 struct list_head waiting_threads; 502 int pid; 503 struct task_struct *tsk; 504 struct hlist_node deferred_work_node; 505 int deferred_work; 506 bool is_dead; 507 508 struct list_head todo; 509 struct binder_stats stats; 510 struct list_head delivered_death; 511 int max_threads; 512 int requested_threads; 513 int requested_threads_started; 514 int tmp_ref; 515 long default_priority; 516 struct dentry *debugfs_entry; 517 struct binder_alloc alloc; 518 struct binder_context *context; 519 spinlock_t inner_lock; 520 spinlock_t outer_lock; 521 }; 522 523 enum { 524 BINDER_LOOPER_STATE_REGISTERED = 0x01, 525 BINDER_LOOPER_STATE_ENTERED = 0x02, 526 BINDER_LOOPER_STATE_EXITED = 0x04, 527 BINDER_LOOPER_STATE_INVALID = 0x08, 528 BINDER_LOOPER_STATE_WAITING = 0x10, 529 BINDER_LOOPER_STATE_POLL = 0x20, 530 }; 531 532 /** 533 * struct binder_thread - binder thread bookkeeping 534 * @proc: binder process for this thread 535 * (invariant after initialization) 536 * @rb_node: element for proc->threads rbtree 537 * (protected by @proc->inner_lock) 538 * @waiting_thread_node: element for @proc->waiting_threads list 539 * (protected by @proc->inner_lock) 540 * @pid: PID for this thread 541 * (invariant after initialization) 542 * @looper: bitmap of looping state 543 * (only accessed by this thread) 544 * @looper_needs_return: looping thread needs to exit driver 545 * (no lock needed) 546 * @transaction_stack: stack of in-progress transactions for this thread 547 * (protected by @proc->inner_lock) 548 * @todo: list of work to do for this thread 549 * (protected by @proc->inner_lock) 550 * @process_todo: whether work in @todo should be processed 551 * (protected by @proc->inner_lock) 552 * @return_error: transaction errors reported by this thread 553 * (only accessed by this thread) 554 * @reply_error: transaction errors reported by target thread 555 * (protected by @proc->inner_lock) 556 * @wait: wait queue for thread work 557 * @stats: per-thread statistics 558 * (atomics, no lock needed) 559 * @tmp_ref: temporary reference to indicate thread is in use 560 * (atomic since @proc->inner_lock cannot 561 * always be acquired) 562 * @is_dead: thread is dead and awaiting free 563 * when outstanding transactions are cleaned up 564 * (protected by @proc->inner_lock) 565 * 566 * Bookkeeping structure for binder threads. 567 */ 568 struct binder_thread { 569 struct binder_proc *proc; 570 struct rb_node rb_node; 571 struct list_head waiting_thread_node; 572 int pid; 573 int looper; /* only modified by this thread */ 574 bool looper_need_return; /* can be written by other thread */ 575 struct binder_transaction *transaction_stack; 576 struct list_head todo; 577 bool process_todo; 578 struct binder_error return_error; 579 struct binder_error reply_error; 580 wait_queue_head_t wait; 581 struct binder_stats stats; 582 atomic_t tmp_ref; 583 bool is_dead; 584 }; 585 586 /** 587 * struct binder_txn_fd_fixup - transaction fd fixup list element 588 * @fixup_entry: list entry 589 * @file: struct file to be associated with new fd 590 * @offset: offset in buffer data to this fixup 591 * 592 * List element for fd fixups in a transaction. Since file 593 * descriptors need to be allocated in the context of the 594 * target process, we pass each fd to be processed in this 595 * struct. 596 */ 597 struct binder_txn_fd_fixup { 598 struct list_head fixup_entry; 599 struct file *file; 600 size_t offset; 601 }; 602 603 struct binder_transaction { 604 int debug_id; 605 struct binder_work work; 606 struct binder_thread *from; 607 struct binder_transaction *from_parent; 608 struct binder_proc *to_proc; 609 struct binder_thread *to_thread; 610 struct binder_transaction *to_parent; 611 unsigned need_reply:1; 612 /* unsigned is_dead:1; */ /* not used at the moment */ 613 614 struct binder_buffer *buffer; 615 unsigned int code; 616 unsigned int flags; 617 long priority; 618 long saved_priority; 619 kuid_t sender_euid; 620 struct list_head fd_fixups; 621 binder_uintptr_t security_ctx; 622 /** 623 * @lock: protects @from, @to_proc, and @to_thread 624 * 625 * @from, @to_proc, and @to_thread can be set to NULL 626 * during thread teardown 627 */ 628 spinlock_t lock; 629 }; 630 631 /** 632 * struct binder_object - union of flat binder object types 633 * @hdr: generic object header 634 * @fbo: binder object (nodes and refs) 635 * @fdo: file descriptor object 636 * @bbo: binder buffer pointer 637 * @fdao: file descriptor array 638 * 639 * Used for type-independent object copies 640 */ 641 struct binder_object { 642 union { 643 struct binder_object_header hdr; 644 struct flat_binder_object fbo; 645 struct binder_fd_object fdo; 646 struct binder_buffer_object bbo; 647 struct binder_fd_array_object fdao; 648 }; 649 }; 650 651 /** 652 * binder_proc_lock() - Acquire outer lock for given binder_proc 653 * @proc: struct binder_proc to acquire 654 * 655 * Acquires proc->outer_lock. Used to protect binder_ref 656 * structures associated with the given proc. 657 */ 658 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__) 659 static void 660 _binder_proc_lock(struct binder_proc *proc, int line) 661 __acquires(&proc->outer_lock) 662 { 663 binder_debug(BINDER_DEBUG_SPINLOCKS, 664 "%s: line=%d\n", __func__, line); 665 spin_lock(&proc->outer_lock); 666 } 667 668 /** 669 * binder_proc_unlock() - Release spinlock for given binder_proc 670 * @proc: struct binder_proc to acquire 671 * 672 * Release lock acquired via binder_proc_lock() 673 */ 674 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__) 675 static void 676 _binder_proc_unlock(struct binder_proc *proc, int line) 677 __releases(&proc->outer_lock) 678 { 679 binder_debug(BINDER_DEBUG_SPINLOCKS, 680 "%s: line=%d\n", __func__, line); 681 spin_unlock(&proc->outer_lock); 682 } 683 684 /** 685 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc 686 * @proc: struct binder_proc to acquire 687 * 688 * Acquires proc->inner_lock. Used to protect todo lists 689 */ 690 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__) 691 static void 692 _binder_inner_proc_lock(struct binder_proc *proc, int line) 693 __acquires(&proc->inner_lock) 694 { 695 binder_debug(BINDER_DEBUG_SPINLOCKS, 696 "%s: line=%d\n", __func__, line); 697 spin_lock(&proc->inner_lock); 698 } 699 700 /** 701 * binder_inner_proc_unlock() - Release inner lock for given binder_proc 702 * @proc: struct binder_proc to acquire 703 * 704 * Release lock acquired via binder_inner_proc_lock() 705 */ 706 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__) 707 static void 708 _binder_inner_proc_unlock(struct binder_proc *proc, int line) 709 __releases(&proc->inner_lock) 710 { 711 binder_debug(BINDER_DEBUG_SPINLOCKS, 712 "%s: line=%d\n", __func__, line); 713 spin_unlock(&proc->inner_lock); 714 } 715 716 /** 717 * binder_node_lock() - Acquire spinlock for given binder_node 718 * @node: struct binder_node to acquire 719 * 720 * Acquires node->lock. Used to protect binder_node fields 721 */ 722 #define binder_node_lock(node) _binder_node_lock(node, __LINE__) 723 static void 724 _binder_node_lock(struct binder_node *node, int line) 725 __acquires(&node->lock) 726 { 727 binder_debug(BINDER_DEBUG_SPINLOCKS, 728 "%s: line=%d\n", __func__, line); 729 spin_lock(&node->lock); 730 } 731 732 /** 733 * binder_node_unlock() - Release spinlock for given binder_proc 734 * @node: struct binder_node to acquire 735 * 736 * Release lock acquired via binder_node_lock() 737 */ 738 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__) 739 static void 740 _binder_node_unlock(struct binder_node *node, int line) 741 __releases(&node->lock) 742 { 743 binder_debug(BINDER_DEBUG_SPINLOCKS, 744 "%s: line=%d\n", __func__, line); 745 spin_unlock(&node->lock); 746 } 747 748 /** 749 * binder_node_inner_lock() - Acquire node and inner locks 750 * @node: struct binder_node to acquire 751 * 752 * Acquires node->lock. If node->proc also acquires 753 * proc->inner_lock. Used to protect binder_node fields 754 */ 755 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__) 756 static void 757 _binder_node_inner_lock(struct binder_node *node, int line) 758 __acquires(&node->lock) __acquires(&node->proc->inner_lock) 759 { 760 binder_debug(BINDER_DEBUG_SPINLOCKS, 761 "%s: line=%d\n", __func__, line); 762 spin_lock(&node->lock); 763 if (node->proc) 764 binder_inner_proc_lock(node->proc); 765 else 766 /* annotation for sparse */ 767 __acquire(&node->proc->inner_lock); 768 } 769 770 /** 771 * binder_node_unlock() - Release node and inner locks 772 * @node: struct binder_node to acquire 773 * 774 * Release lock acquired via binder_node_lock() 775 */ 776 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__) 777 static void 778 _binder_node_inner_unlock(struct binder_node *node, int line) 779 __releases(&node->lock) __releases(&node->proc->inner_lock) 780 { 781 struct binder_proc *proc = node->proc; 782 783 binder_debug(BINDER_DEBUG_SPINLOCKS, 784 "%s: line=%d\n", __func__, line); 785 if (proc) 786 binder_inner_proc_unlock(proc); 787 else 788 /* annotation for sparse */ 789 __release(&node->proc->inner_lock); 790 spin_unlock(&node->lock); 791 } 792 793 static bool binder_worklist_empty_ilocked(struct list_head *list) 794 { 795 return list_empty(list); 796 } 797 798 /** 799 * binder_worklist_empty() - Check if no items on the work list 800 * @proc: binder_proc associated with list 801 * @list: list to check 802 * 803 * Return: true if there are no items on list, else false 804 */ 805 static bool binder_worklist_empty(struct binder_proc *proc, 806 struct list_head *list) 807 { 808 bool ret; 809 810 binder_inner_proc_lock(proc); 811 ret = binder_worklist_empty_ilocked(list); 812 binder_inner_proc_unlock(proc); 813 return ret; 814 } 815 816 /** 817 * binder_enqueue_work_ilocked() - Add an item to the work list 818 * @work: struct binder_work to add to list 819 * @target_list: list to add work to 820 * 821 * Adds the work to the specified list. Asserts that work 822 * is not already on a list. 823 * 824 * Requires the proc->inner_lock to be held. 825 */ 826 static void 827 binder_enqueue_work_ilocked(struct binder_work *work, 828 struct list_head *target_list) 829 { 830 BUG_ON(target_list == NULL); 831 BUG_ON(work->entry.next && !list_empty(&work->entry)); 832 list_add_tail(&work->entry, target_list); 833 } 834 835 /** 836 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work 837 * @thread: thread to queue work to 838 * @work: struct binder_work to add to list 839 * 840 * Adds the work to the todo list of the thread. Doesn't set the process_todo 841 * flag, which means that (if it wasn't already set) the thread will go to 842 * sleep without handling this work when it calls read. 843 * 844 * Requires the proc->inner_lock to be held. 845 */ 846 static void 847 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread, 848 struct binder_work *work) 849 { 850 WARN_ON(!list_empty(&thread->waiting_thread_node)); 851 binder_enqueue_work_ilocked(work, &thread->todo); 852 } 853 854 /** 855 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list 856 * @thread: thread to queue work to 857 * @work: struct binder_work to add to list 858 * 859 * Adds the work to the todo list of the thread, and enables processing 860 * of the todo queue. 861 * 862 * Requires the proc->inner_lock to be held. 863 */ 864 static void 865 binder_enqueue_thread_work_ilocked(struct binder_thread *thread, 866 struct binder_work *work) 867 { 868 WARN_ON(!list_empty(&thread->waiting_thread_node)); 869 binder_enqueue_work_ilocked(work, &thread->todo); 870 thread->process_todo = true; 871 } 872 873 /** 874 * binder_enqueue_thread_work() - Add an item to the thread work list 875 * @thread: thread to queue work to 876 * @work: struct binder_work to add to list 877 * 878 * Adds the work to the todo list of the thread, and enables processing 879 * of the todo queue. 880 */ 881 static void 882 binder_enqueue_thread_work(struct binder_thread *thread, 883 struct binder_work *work) 884 { 885 binder_inner_proc_lock(thread->proc); 886 binder_enqueue_thread_work_ilocked(thread, work); 887 binder_inner_proc_unlock(thread->proc); 888 } 889 890 static void 891 binder_dequeue_work_ilocked(struct binder_work *work) 892 { 893 list_del_init(&work->entry); 894 } 895 896 /** 897 * binder_dequeue_work() - Removes an item from the work list 898 * @proc: binder_proc associated with list 899 * @work: struct binder_work to remove from list 900 * 901 * Removes the specified work item from whatever list it is on. 902 * Can safely be called if work is not on any list. 903 */ 904 static void 905 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work) 906 { 907 binder_inner_proc_lock(proc); 908 binder_dequeue_work_ilocked(work); 909 binder_inner_proc_unlock(proc); 910 } 911 912 static struct binder_work *binder_dequeue_work_head_ilocked( 913 struct list_head *list) 914 { 915 struct binder_work *w; 916 917 w = list_first_entry_or_null(list, struct binder_work, entry); 918 if (w) 919 list_del_init(&w->entry); 920 return w; 921 } 922 923 /** 924 * binder_dequeue_work_head() - Dequeues the item at head of list 925 * @proc: binder_proc associated with list 926 * @list: list to dequeue head 927 * 928 * Removes the head of the list if there are items on the list 929 * 930 * Return: pointer dequeued binder_work, NULL if list was empty 931 */ 932 static struct binder_work *binder_dequeue_work_head( 933 struct binder_proc *proc, 934 struct list_head *list) 935 { 936 struct binder_work *w; 937 938 binder_inner_proc_lock(proc); 939 w = binder_dequeue_work_head_ilocked(list); 940 binder_inner_proc_unlock(proc); 941 return w; 942 } 943 944 static void 945 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); 946 static void binder_free_thread(struct binder_thread *thread); 947 static void binder_free_proc(struct binder_proc *proc); 948 static void binder_inc_node_tmpref_ilocked(struct binder_node *node); 949 950 static bool binder_has_work_ilocked(struct binder_thread *thread, 951 bool do_proc_work) 952 { 953 return thread->process_todo || 954 thread->looper_need_return || 955 (do_proc_work && 956 !binder_worklist_empty_ilocked(&thread->proc->todo)); 957 } 958 959 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work) 960 { 961 bool has_work; 962 963 binder_inner_proc_lock(thread->proc); 964 has_work = binder_has_work_ilocked(thread, do_proc_work); 965 binder_inner_proc_unlock(thread->proc); 966 967 return has_work; 968 } 969 970 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread) 971 { 972 return !thread->transaction_stack && 973 binder_worklist_empty_ilocked(&thread->todo) && 974 (thread->looper & (BINDER_LOOPER_STATE_ENTERED | 975 BINDER_LOOPER_STATE_REGISTERED)); 976 } 977 978 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc, 979 bool sync) 980 { 981 struct rb_node *n; 982 struct binder_thread *thread; 983 984 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { 985 thread = rb_entry(n, struct binder_thread, rb_node); 986 if (thread->looper & BINDER_LOOPER_STATE_POLL && 987 binder_available_for_proc_work_ilocked(thread)) { 988 if (sync) 989 wake_up_interruptible_sync(&thread->wait); 990 else 991 wake_up_interruptible(&thread->wait); 992 } 993 } 994 } 995 996 /** 997 * binder_select_thread_ilocked() - selects a thread for doing proc work. 998 * @proc: process to select a thread from 999 * 1000 * Note that calling this function moves the thread off the waiting_threads 1001 * list, so it can only be woken up by the caller of this function, or a 1002 * signal. Therefore, callers *should* always wake up the thread this function 1003 * returns. 1004 * 1005 * Return: If there's a thread currently waiting for process work, 1006 * returns that thread. Otherwise returns NULL. 1007 */ 1008 static struct binder_thread * 1009 binder_select_thread_ilocked(struct binder_proc *proc) 1010 { 1011 struct binder_thread *thread; 1012 1013 assert_spin_locked(&proc->inner_lock); 1014 thread = list_first_entry_or_null(&proc->waiting_threads, 1015 struct binder_thread, 1016 waiting_thread_node); 1017 1018 if (thread) 1019 list_del_init(&thread->waiting_thread_node); 1020 1021 return thread; 1022 } 1023 1024 /** 1025 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work. 1026 * @proc: process to wake up a thread in 1027 * @thread: specific thread to wake-up (may be NULL) 1028 * @sync: whether to do a synchronous wake-up 1029 * 1030 * This function wakes up a thread in the @proc process. 1031 * The caller may provide a specific thread to wake-up in 1032 * the @thread parameter. If @thread is NULL, this function 1033 * will wake up threads that have called poll(). 1034 * 1035 * Note that for this function to work as expected, callers 1036 * should first call binder_select_thread() to find a thread 1037 * to handle the work (if they don't have a thread already), 1038 * and pass the result into the @thread parameter. 1039 */ 1040 static void binder_wakeup_thread_ilocked(struct binder_proc *proc, 1041 struct binder_thread *thread, 1042 bool sync) 1043 { 1044 assert_spin_locked(&proc->inner_lock); 1045 1046 if (thread) { 1047 if (sync) 1048 wake_up_interruptible_sync(&thread->wait); 1049 else 1050 wake_up_interruptible(&thread->wait); 1051 return; 1052 } 1053 1054 /* Didn't find a thread waiting for proc work; this can happen 1055 * in two scenarios: 1056 * 1. All threads are busy handling transactions 1057 * In that case, one of those threads should call back into 1058 * the kernel driver soon and pick up this work. 1059 * 2. Threads are using the (e)poll interface, in which case 1060 * they may be blocked on the waitqueue without having been 1061 * added to waiting_threads. For this case, we just iterate 1062 * over all threads not handling transaction work, and 1063 * wake them all up. We wake all because we don't know whether 1064 * a thread that called into (e)poll is handling non-binder 1065 * work currently. 1066 */ 1067 binder_wakeup_poll_threads_ilocked(proc, sync); 1068 } 1069 1070 static void binder_wakeup_proc_ilocked(struct binder_proc *proc) 1071 { 1072 struct binder_thread *thread = binder_select_thread_ilocked(proc); 1073 1074 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false); 1075 } 1076 1077 static void binder_set_nice(long nice) 1078 { 1079 long min_nice; 1080 1081 if (can_nice(current, nice)) { 1082 set_user_nice(current, nice); 1083 return; 1084 } 1085 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE)); 1086 binder_debug(BINDER_DEBUG_PRIORITY_CAP, 1087 "%d: nice value %ld not allowed use %ld instead\n", 1088 current->pid, nice, min_nice); 1089 set_user_nice(current, min_nice); 1090 if (min_nice <= MAX_NICE) 1091 return; 1092 binder_user_error("%d RLIMIT_NICE not set\n", current->pid); 1093 } 1094 1095 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc, 1096 binder_uintptr_t ptr) 1097 { 1098 struct rb_node *n = proc->nodes.rb_node; 1099 struct binder_node *node; 1100 1101 assert_spin_locked(&proc->inner_lock); 1102 1103 while (n) { 1104 node = rb_entry(n, struct binder_node, rb_node); 1105 1106 if (ptr < node->ptr) 1107 n = n->rb_left; 1108 else if (ptr > node->ptr) 1109 n = n->rb_right; 1110 else { 1111 /* 1112 * take an implicit weak reference 1113 * to ensure node stays alive until 1114 * call to binder_put_node() 1115 */ 1116 binder_inc_node_tmpref_ilocked(node); 1117 return node; 1118 } 1119 } 1120 return NULL; 1121 } 1122 1123 static struct binder_node *binder_get_node(struct binder_proc *proc, 1124 binder_uintptr_t ptr) 1125 { 1126 struct binder_node *node; 1127 1128 binder_inner_proc_lock(proc); 1129 node = binder_get_node_ilocked(proc, ptr); 1130 binder_inner_proc_unlock(proc); 1131 return node; 1132 } 1133 1134 static struct binder_node *binder_init_node_ilocked( 1135 struct binder_proc *proc, 1136 struct binder_node *new_node, 1137 struct flat_binder_object *fp) 1138 { 1139 struct rb_node **p = &proc->nodes.rb_node; 1140 struct rb_node *parent = NULL; 1141 struct binder_node *node; 1142 binder_uintptr_t ptr = fp ? fp->binder : 0; 1143 binder_uintptr_t cookie = fp ? fp->cookie : 0; 1144 __u32 flags = fp ? fp->flags : 0; 1145 1146 assert_spin_locked(&proc->inner_lock); 1147 1148 while (*p) { 1149 1150 parent = *p; 1151 node = rb_entry(parent, struct binder_node, rb_node); 1152 1153 if (ptr < node->ptr) 1154 p = &(*p)->rb_left; 1155 else if (ptr > node->ptr) 1156 p = &(*p)->rb_right; 1157 else { 1158 /* 1159 * A matching node is already in 1160 * the rb tree. Abandon the init 1161 * and return it. 1162 */ 1163 binder_inc_node_tmpref_ilocked(node); 1164 return node; 1165 } 1166 } 1167 node = new_node; 1168 binder_stats_created(BINDER_STAT_NODE); 1169 node->tmp_refs++; 1170 rb_link_node(&node->rb_node, parent, p); 1171 rb_insert_color(&node->rb_node, &proc->nodes); 1172 node->debug_id = atomic_inc_return(&binder_last_id); 1173 node->proc = proc; 1174 node->ptr = ptr; 1175 node->cookie = cookie; 1176 node->work.type = BINDER_WORK_NODE; 1177 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK; 1178 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); 1179 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX); 1180 spin_lock_init(&node->lock); 1181 INIT_LIST_HEAD(&node->work.entry); 1182 INIT_LIST_HEAD(&node->async_todo); 1183 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1184 "%d:%d node %d u%016llx c%016llx created\n", 1185 proc->pid, current->pid, node->debug_id, 1186 (u64)node->ptr, (u64)node->cookie); 1187 1188 return node; 1189 } 1190 1191 static struct binder_node *binder_new_node(struct binder_proc *proc, 1192 struct flat_binder_object *fp) 1193 { 1194 struct binder_node *node; 1195 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL); 1196 1197 if (!new_node) 1198 return NULL; 1199 binder_inner_proc_lock(proc); 1200 node = binder_init_node_ilocked(proc, new_node, fp); 1201 binder_inner_proc_unlock(proc); 1202 if (node != new_node) 1203 /* 1204 * The node was already added by another thread 1205 */ 1206 kfree(new_node); 1207 1208 return node; 1209 } 1210 1211 static void binder_free_node(struct binder_node *node) 1212 { 1213 kfree(node); 1214 binder_stats_deleted(BINDER_STAT_NODE); 1215 } 1216 1217 static int binder_inc_node_nilocked(struct binder_node *node, int strong, 1218 int internal, 1219 struct list_head *target_list) 1220 { 1221 struct binder_proc *proc = node->proc; 1222 1223 assert_spin_locked(&node->lock); 1224 if (proc) 1225 assert_spin_locked(&proc->inner_lock); 1226 if (strong) { 1227 if (internal) { 1228 if (target_list == NULL && 1229 node->internal_strong_refs == 0 && 1230 !(node->proc && 1231 node == node->proc->context->binder_context_mgr_node && 1232 node->has_strong_ref)) { 1233 pr_err("invalid inc strong node for %d\n", 1234 node->debug_id); 1235 return -EINVAL; 1236 } 1237 node->internal_strong_refs++; 1238 } else 1239 node->local_strong_refs++; 1240 if (!node->has_strong_ref && target_list) { 1241 struct binder_thread *thread = container_of(target_list, 1242 struct binder_thread, todo); 1243 binder_dequeue_work_ilocked(&node->work); 1244 BUG_ON(&thread->todo != target_list); 1245 binder_enqueue_deferred_thread_work_ilocked(thread, 1246 &node->work); 1247 } 1248 } else { 1249 if (!internal) 1250 node->local_weak_refs++; 1251 if (!node->has_weak_ref && list_empty(&node->work.entry)) { 1252 if (target_list == NULL) { 1253 pr_err("invalid inc weak node for %d\n", 1254 node->debug_id); 1255 return -EINVAL; 1256 } 1257 /* 1258 * See comment above 1259 */ 1260 binder_enqueue_work_ilocked(&node->work, target_list); 1261 } 1262 } 1263 return 0; 1264 } 1265 1266 static int binder_inc_node(struct binder_node *node, int strong, int internal, 1267 struct list_head *target_list) 1268 { 1269 int ret; 1270 1271 binder_node_inner_lock(node); 1272 ret = binder_inc_node_nilocked(node, strong, internal, target_list); 1273 binder_node_inner_unlock(node); 1274 1275 return ret; 1276 } 1277 1278 static bool binder_dec_node_nilocked(struct binder_node *node, 1279 int strong, int internal) 1280 { 1281 struct binder_proc *proc = node->proc; 1282 1283 assert_spin_locked(&node->lock); 1284 if (proc) 1285 assert_spin_locked(&proc->inner_lock); 1286 if (strong) { 1287 if (internal) 1288 node->internal_strong_refs--; 1289 else 1290 node->local_strong_refs--; 1291 if (node->local_strong_refs || node->internal_strong_refs) 1292 return false; 1293 } else { 1294 if (!internal) 1295 node->local_weak_refs--; 1296 if (node->local_weak_refs || node->tmp_refs || 1297 !hlist_empty(&node->refs)) 1298 return false; 1299 } 1300 1301 if (proc && (node->has_strong_ref || node->has_weak_ref)) { 1302 if (list_empty(&node->work.entry)) { 1303 binder_enqueue_work_ilocked(&node->work, &proc->todo); 1304 binder_wakeup_proc_ilocked(proc); 1305 } 1306 } else { 1307 if (hlist_empty(&node->refs) && !node->local_strong_refs && 1308 !node->local_weak_refs && !node->tmp_refs) { 1309 if (proc) { 1310 binder_dequeue_work_ilocked(&node->work); 1311 rb_erase(&node->rb_node, &proc->nodes); 1312 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1313 "refless node %d deleted\n", 1314 node->debug_id); 1315 } else { 1316 BUG_ON(!list_empty(&node->work.entry)); 1317 spin_lock(&binder_dead_nodes_lock); 1318 /* 1319 * tmp_refs could have changed so 1320 * check it again 1321 */ 1322 if (node->tmp_refs) { 1323 spin_unlock(&binder_dead_nodes_lock); 1324 return false; 1325 } 1326 hlist_del(&node->dead_node); 1327 spin_unlock(&binder_dead_nodes_lock); 1328 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1329 "dead node %d deleted\n", 1330 node->debug_id); 1331 } 1332 return true; 1333 } 1334 } 1335 return false; 1336 } 1337 1338 static void binder_dec_node(struct binder_node *node, int strong, int internal) 1339 { 1340 bool free_node; 1341 1342 binder_node_inner_lock(node); 1343 free_node = binder_dec_node_nilocked(node, strong, internal); 1344 binder_node_inner_unlock(node); 1345 if (free_node) 1346 binder_free_node(node); 1347 } 1348 1349 static void binder_inc_node_tmpref_ilocked(struct binder_node *node) 1350 { 1351 /* 1352 * No call to binder_inc_node() is needed since we 1353 * don't need to inform userspace of any changes to 1354 * tmp_refs 1355 */ 1356 node->tmp_refs++; 1357 } 1358 1359 /** 1360 * binder_inc_node_tmpref() - take a temporary reference on node 1361 * @node: node to reference 1362 * 1363 * Take reference on node to prevent the node from being freed 1364 * while referenced only by a local variable. The inner lock is 1365 * needed to serialize with the node work on the queue (which 1366 * isn't needed after the node is dead). If the node is dead 1367 * (node->proc is NULL), use binder_dead_nodes_lock to protect 1368 * node->tmp_refs against dead-node-only cases where the node 1369 * lock cannot be acquired (eg traversing the dead node list to 1370 * print nodes) 1371 */ 1372 static void binder_inc_node_tmpref(struct binder_node *node) 1373 { 1374 binder_node_lock(node); 1375 if (node->proc) 1376 binder_inner_proc_lock(node->proc); 1377 else 1378 spin_lock(&binder_dead_nodes_lock); 1379 binder_inc_node_tmpref_ilocked(node); 1380 if (node->proc) 1381 binder_inner_proc_unlock(node->proc); 1382 else 1383 spin_unlock(&binder_dead_nodes_lock); 1384 binder_node_unlock(node); 1385 } 1386 1387 /** 1388 * binder_dec_node_tmpref() - remove a temporary reference on node 1389 * @node: node to reference 1390 * 1391 * Release temporary reference on node taken via binder_inc_node_tmpref() 1392 */ 1393 static void binder_dec_node_tmpref(struct binder_node *node) 1394 { 1395 bool free_node; 1396 1397 binder_node_inner_lock(node); 1398 if (!node->proc) 1399 spin_lock(&binder_dead_nodes_lock); 1400 else 1401 __acquire(&binder_dead_nodes_lock); 1402 node->tmp_refs--; 1403 BUG_ON(node->tmp_refs < 0); 1404 if (!node->proc) 1405 spin_unlock(&binder_dead_nodes_lock); 1406 else 1407 __release(&binder_dead_nodes_lock); 1408 /* 1409 * Call binder_dec_node() to check if all refcounts are 0 1410 * and cleanup is needed. Calling with strong=0 and internal=1 1411 * causes no actual reference to be released in binder_dec_node(). 1412 * If that changes, a change is needed here too. 1413 */ 1414 free_node = binder_dec_node_nilocked(node, 0, 1); 1415 binder_node_inner_unlock(node); 1416 if (free_node) 1417 binder_free_node(node); 1418 } 1419 1420 static void binder_put_node(struct binder_node *node) 1421 { 1422 binder_dec_node_tmpref(node); 1423 } 1424 1425 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc, 1426 u32 desc, bool need_strong_ref) 1427 { 1428 struct rb_node *n = proc->refs_by_desc.rb_node; 1429 struct binder_ref *ref; 1430 1431 while (n) { 1432 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1433 1434 if (desc < ref->data.desc) { 1435 n = n->rb_left; 1436 } else if (desc > ref->data.desc) { 1437 n = n->rb_right; 1438 } else if (need_strong_ref && !ref->data.strong) { 1439 binder_user_error("tried to use weak ref as strong ref\n"); 1440 return NULL; 1441 } else { 1442 return ref; 1443 } 1444 } 1445 return NULL; 1446 } 1447 1448 /** 1449 * binder_get_ref_for_node_olocked() - get the ref associated with given node 1450 * @proc: binder_proc that owns the ref 1451 * @node: binder_node of target 1452 * @new_ref: newly allocated binder_ref to be initialized or %NULL 1453 * 1454 * Look up the ref for the given node and return it if it exists 1455 * 1456 * If it doesn't exist and the caller provides a newly allocated 1457 * ref, initialize the fields of the newly allocated ref and insert 1458 * into the given proc rb_trees and node refs list. 1459 * 1460 * Return: the ref for node. It is possible that another thread 1461 * allocated/initialized the ref first in which case the 1462 * returned ref would be different than the passed-in 1463 * new_ref. new_ref must be kfree'd by the caller in 1464 * this case. 1465 */ 1466 static struct binder_ref *binder_get_ref_for_node_olocked( 1467 struct binder_proc *proc, 1468 struct binder_node *node, 1469 struct binder_ref *new_ref) 1470 { 1471 struct binder_context *context = proc->context; 1472 struct rb_node **p = &proc->refs_by_node.rb_node; 1473 struct rb_node *parent = NULL; 1474 struct binder_ref *ref; 1475 struct rb_node *n; 1476 1477 while (*p) { 1478 parent = *p; 1479 ref = rb_entry(parent, struct binder_ref, rb_node_node); 1480 1481 if (node < ref->node) 1482 p = &(*p)->rb_left; 1483 else if (node > ref->node) 1484 p = &(*p)->rb_right; 1485 else 1486 return ref; 1487 } 1488 if (!new_ref) 1489 return NULL; 1490 1491 binder_stats_created(BINDER_STAT_REF); 1492 new_ref->data.debug_id = atomic_inc_return(&binder_last_id); 1493 new_ref->proc = proc; 1494 new_ref->node = node; 1495 rb_link_node(&new_ref->rb_node_node, parent, p); 1496 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); 1497 1498 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1; 1499 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 1500 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1501 if (ref->data.desc > new_ref->data.desc) 1502 break; 1503 new_ref->data.desc = ref->data.desc + 1; 1504 } 1505 1506 p = &proc->refs_by_desc.rb_node; 1507 while (*p) { 1508 parent = *p; 1509 ref = rb_entry(parent, struct binder_ref, rb_node_desc); 1510 1511 if (new_ref->data.desc < ref->data.desc) 1512 p = &(*p)->rb_left; 1513 else if (new_ref->data.desc > ref->data.desc) 1514 p = &(*p)->rb_right; 1515 else 1516 BUG(); 1517 } 1518 rb_link_node(&new_ref->rb_node_desc, parent, p); 1519 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc); 1520 1521 binder_node_lock(node); 1522 hlist_add_head(&new_ref->node_entry, &node->refs); 1523 1524 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1525 "%d new ref %d desc %d for node %d\n", 1526 proc->pid, new_ref->data.debug_id, new_ref->data.desc, 1527 node->debug_id); 1528 binder_node_unlock(node); 1529 return new_ref; 1530 } 1531 1532 static void binder_cleanup_ref_olocked(struct binder_ref *ref) 1533 { 1534 bool delete_node = false; 1535 1536 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1537 "%d delete ref %d desc %d for node %d\n", 1538 ref->proc->pid, ref->data.debug_id, ref->data.desc, 1539 ref->node->debug_id); 1540 1541 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); 1542 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); 1543 1544 binder_node_inner_lock(ref->node); 1545 if (ref->data.strong) 1546 binder_dec_node_nilocked(ref->node, 1, 1); 1547 1548 hlist_del(&ref->node_entry); 1549 delete_node = binder_dec_node_nilocked(ref->node, 0, 1); 1550 binder_node_inner_unlock(ref->node); 1551 /* 1552 * Clear ref->node unless we want the caller to free the node 1553 */ 1554 if (!delete_node) { 1555 /* 1556 * The caller uses ref->node to determine 1557 * whether the node needs to be freed. Clear 1558 * it since the node is still alive. 1559 */ 1560 ref->node = NULL; 1561 } 1562 1563 if (ref->death) { 1564 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1565 "%d delete ref %d desc %d has death notification\n", 1566 ref->proc->pid, ref->data.debug_id, 1567 ref->data.desc); 1568 binder_dequeue_work(ref->proc, &ref->death->work); 1569 binder_stats_deleted(BINDER_STAT_DEATH); 1570 } 1571 binder_stats_deleted(BINDER_STAT_REF); 1572 } 1573 1574 /** 1575 * binder_inc_ref_olocked() - increment the ref for given handle 1576 * @ref: ref to be incremented 1577 * @strong: if true, strong increment, else weak 1578 * @target_list: list to queue node work on 1579 * 1580 * Increment the ref. @ref->proc->outer_lock must be held on entry 1581 * 1582 * Return: 0, if successful, else errno 1583 */ 1584 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong, 1585 struct list_head *target_list) 1586 { 1587 int ret; 1588 1589 if (strong) { 1590 if (ref->data.strong == 0) { 1591 ret = binder_inc_node(ref->node, 1, 1, target_list); 1592 if (ret) 1593 return ret; 1594 } 1595 ref->data.strong++; 1596 } else { 1597 if (ref->data.weak == 0) { 1598 ret = binder_inc_node(ref->node, 0, 1, target_list); 1599 if (ret) 1600 return ret; 1601 } 1602 ref->data.weak++; 1603 } 1604 return 0; 1605 } 1606 1607 /** 1608 * binder_dec_ref() - dec the ref for given handle 1609 * @ref: ref to be decremented 1610 * @strong: if true, strong decrement, else weak 1611 * 1612 * Decrement the ref. 1613 * 1614 * Return: true if ref is cleaned up and ready to be freed 1615 */ 1616 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong) 1617 { 1618 if (strong) { 1619 if (ref->data.strong == 0) { 1620 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n", 1621 ref->proc->pid, ref->data.debug_id, 1622 ref->data.desc, ref->data.strong, 1623 ref->data.weak); 1624 return false; 1625 } 1626 ref->data.strong--; 1627 if (ref->data.strong == 0) 1628 binder_dec_node(ref->node, strong, 1); 1629 } else { 1630 if (ref->data.weak == 0) { 1631 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n", 1632 ref->proc->pid, ref->data.debug_id, 1633 ref->data.desc, ref->data.strong, 1634 ref->data.weak); 1635 return false; 1636 } 1637 ref->data.weak--; 1638 } 1639 if (ref->data.strong == 0 && ref->data.weak == 0) { 1640 binder_cleanup_ref_olocked(ref); 1641 return true; 1642 } 1643 return false; 1644 } 1645 1646 /** 1647 * binder_get_node_from_ref() - get the node from the given proc/desc 1648 * @proc: proc containing the ref 1649 * @desc: the handle associated with the ref 1650 * @need_strong_ref: if true, only return node if ref is strong 1651 * @rdata: the id/refcount data for the ref 1652 * 1653 * Given a proc and ref handle, return the associated binder_node 1654 * 1655 * Return: a binder_node or NULL if not found or not strong when strong required 1656 */ 1657 static struct binder_node *binder_get_node_from_ref( 1658 struct binder_proc *proc, 1659 u32 desc, bool need_strong_ref, 1660 struct binder_ref_data *rdata) 1661 { 1662 struct binder_node *node; 1663 struct binder_ref *ref; 1664 1665 binder_proc_lock(proc); 1666 ref = binder_get_ref_olocked(proc, desc, need_strong_ref); 1667 if (!ref) 1668 goto err_no_ref; 1669 node = ref->node; 1670 /* 1671 * Take an implicit reference on the node to ensure 1672 * it stays alive until the call to binder_put_node() 1673 */ 1674 binder_inc_node_tmpref(node); 1675 if (rdata) 1676 *rdata = ref->data; 1677 binder_proc_unlock(proc); 1678 1679 return node; 1680 1681 err_no_ref: 1682 binder_proc_unlock(proc); 1683 return NULL; 1684 } 1685 1686 /** 1687 * binder_free_ref() - free the binder_ref 1688 * @ref: ref to free 1689 * 1690 * Free the binder_ref. Free the binder_node indicated by ref->node 1691 * (if non-NULL) and the binder_ref_death indicated by ref->death. 1692 */ 1693 static void binder_free_ref(struct binder_ref *ref) 1694 { 1695 if (ref->node) 1696 binder_free_node(ref->node); 1697 kfree(ref->death); 1698 kfree(ref); 1699 } 1700 1701 /** 1702 * binder_update_ref_for_handle() - inc/dec the ref for given handle 1703 * @proc: proc containing the ref 1704 * @desc: the handle associated with the ref 1705 * @increment: true=inc reference, false=dec reference 1706 * @strong: true=strong reference, false=weak reference 1707 * @rdata: the id/refcount data for the ref 1708 * 1709 * Given a proc and ref handle, increment or decrement the ref 1710 * according to "increment" arg. 1711 * 1712 * Return: 0 if successful, else errno 1713 */ 1714 static int binder_update_ref_for_handle(struct binder_proc *proc, 1715 uint32_t desc, bool increment, bool strong, 1716 struct binder_ref_data *rdata) 1717 { 1718 int ret = 0; 1719 struct binder_ref *ref; 1720 bool delete_ref = false; 1721 1722 binder_proc_lock(proc); 1723 ref = binder_get_ref_olocked(proc, desc, strong); 1724 if (!ref) { 1725 ret = -EINVAL; 1726 goto err_no_ref; 1727 } 1728 if (increment) 1729 ret = binder_inc_ref_olocked(ref, strong, NULL); 1730 else 1731 delete_ref = binder_dec_ref_olocked(ref, strong); 1732 1733 if (rdata) 1734 *rdata = ref->data; 1735 binder_proc_unlock(proc); 1736 1737 if (delete_ref) 1738 binder_free_ref(ref); 1739 return ret; 1740 1741 err_no_ref: 1742 binder_proc_unlock(proc); 1743 return ret; 1744 } 1745 1746 /** 1747 * binder_dec_ref_for_handle() - dec the ref for given handle 1748 * @proc: proc containing the ref 1749 * @desc: the handle associated with the ref 1750 * @strong: true=strong reference, false=weak reference 1751 * @rdata: the id/refcount data for the ref 1752 * 1753 * Just calls binder_update_ref_for_handle() to decrement the ref. 1754 * 1755 * Return: 0 if successful, else errno 1756 */ 1757 static int binder_dec_ref_for_handle(struct binder_proc *proc, 1758 uint32_t desc, bool strong, struct binder_ref_data *rdata) 1759 { 1760 return binder_update_ref_for_handle(proc, desc, false, strong, rdata); 1761 } 1762 1763 1764 /** 1765 * binder_inc_ref_for_node() - increment the ref for given proc/node 1766 * @proc: proc containing the ref 1767 * @node: target node 1768 * @strong: true=strong reference, false=weak reference 1769 * @target_list: worklist to use if node is incremented 1770 * @rdata: the id/refcount data for the ref 1771 * 1772 * Given a proc and node, increment the ref. Create the ref if it 1773 * doesn't already exist 1774 * 1775 * Return: 0 if successful, else errno 1776 */ 1777 static int binder_inc_ref_for_node(struct binder_proc *proc, 1778 struct binder_node *node, 1779 bool strong, 1780 struct list_head *target_list, 1781 struct binder_ref_data *rdata) 1782 { 1783 struct binder_ref *ref; 1784 struct binder_ref *new_ref = NULL; 1785 int ret = 0; 1786 1787 binder_proc_lock(proc); 1788 ref = binder_get_ref_for_node_olocked(proc, node, NULL); 1789 if (!ref) { 1790 binder_proc_unlock(proc); 1791 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); 1792 if (!new_ref) 1793 return -ENOMEM; 1794 binder_proc_lock(proc); 1795 ref = binder_get_ref_for_node_olocked(proc, node, new_ref); 1796 } 1797 ret = binder_inc_ref_olocked(ref, strong, target_list); 1798 *rdata = ref->data; 1799 binder_proc_unlock(proc); 1800 if (new_ref && ref != new_ref) 1801 /* 1802 * Another thread created the ref first so 1803 * free the one we allocated 1804 */ 1805 kfree(new_ref); 1806 return ret; 1807 } 1808 1809 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread, 1810 struct binder_transaction *t) 1811 { 1812 BUG_ON(!target_thread); 1813 assert_spin_locked(&target_thread->proc->inner_lock); 1814 BUG_ON(target_thread->transaction_stack != t); 1815 BUG_ON(target_thread->transaction_stack->from != target_thread); 1816 target_thread->transaction_stack = 1817 target_thread->transaction_stack->from_parent; 1818 t->from = NULL; 1819 } 1820 1821 /** 1822 * binder_thread_dec_tmpref() - decrement thread->tmp_ref 1823 * @thread: thread to decrement 1824 * 1825 * A thread needs to be kept alive while being used to create or 1826 * handle a transaction. binder_get_txn_from() is used to safely 1827 * extract t->from from a binder_transaction and keep the thread 1828 * indicated by t->from from being freed. When done with that 1829 * binder_thread, this function is called to decrement the 1830 * tmp_ref and free if appropriate (thread has been released 1831 * and no transaction being processed by the driver) 1832 */ 1833 static void binder_thread_dec_tmpref(struct binder_thread *thread) 1834 { 1835 /* 1836 * atomic is used to protect the counter value while 1837 * it cannot reach zero or thread->is_dead is false 1838 */ 1839 binder_inner_proc_lock(thread->proc); 1840 atomic_dec(&thread->tmp_ref); 1841 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) { 1842 binder_inner_proc_unlock(thread->proc); 1843 binder_free_thread(thread); 1844 return; 1845 } 1846 binder_inner_proc_unlock(thread->proc); 1847 } 1848 1849 /** 1850 * binder_proc_dec_tmpref() - decrement proc->tmp_ref 1851 * @proc: proc to decrement 1852 * 1853 * A binder_proc needs to be kept alive while being used to create or 1854 * handle a transaction. proc->tmp_ref is incremented when 1855 * creating a new transaction or the binder_proc is currently in-use 1856 * by threads that are being released. When done with the binder_proc, 1857 * this function is called to decrement the counter and free the 1858 * proc if appropriate (proc has been released, all threads have 1859 * been released and not currenly in-use to process a transaction). 1860 */ 1861 static void binder_proc_dec_tmpref(struct binder_proc *proc) 1862 { 1863 binder_inner_proc_lock(proc); 1864 proc->tmp_ref--; 1865 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) && 1866 !proc->tmp_ref) { 1867 binder_inner_proc_unlock(proc); 1868 binder_free_proc(proc); 1869 return; 1870 } 1871 binder_inner_proc_unlock(proc); 1872 } 1873 1874 /** 1875 * binder_get_txn_from() - safely extract the "from" thread in transaction 1876 * @t: binder transaction for t->from 1877 * 1878 * Atomically return the "from" thread and increment the tmp_ref 1879 * count for the thread to ensure it stays alive until 1880 * binder_thread_dec_tmpref() is called. 1881 * 1882 * Return: the value of t->from 1883 */ 1884 static struct binder_thread *binder_get_txn_from( 1885 struct binder_transaction *t) 1886 { 1887 struct binder_thread *from; 1888 1889 spin_lock(&t->lock); 1890 from = t->from; 1891 if (from) 1892 atomic_inc(&from->tmp_ref); 1893 spin_unlock(&t->lock); 1894 return from; 1895 } 1896 1897 /** 1898 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock 1899 * @t: binder transaction for t->from 1900 * 1901 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock 1902 * to guarantee that the thread cannot be released while operating on it. 1903 * The caller must call binder_inner_proc_unlock() to release the inner lock 1904 * as well as call binder_dec_thread_txn() to release the reference. 1905 * 1906 * Return: the value of t->from 1907 */ 1908 static struct binder_thread *binder_get_txn_from_and_acq_inner( 1909 struct binder_transaction *t) 1910 __acquires(&t->from->proc->inner_lock) 1911 { 1912 struct binder_thread *from; 1913 1914 from = binder_get_txn_from(t); 1915 if (!from) { 1916 __acquire(&from->proc->inner_lock); 1917 return NULL; 1918 } 1919 binder_inner_proc_lock(from->proc); 1920 if (t->from) { 1921 BUG_ON(from != t->from); 1922 return from; 1923 } 1924 binder_inner_proc_unlock(from->proc); 1925 __acquire(&from->proc->inner_lock); 1926 binder_thread_dec_tmpref(from); 1927 return NULL; 1928 } 1929 1930 /** 1931 * binder_free_txn_fixups() - free unprocessed fd fixups 1932 * @t: binder transaction for t->from 1933 * 1934 * If the transaction is being torn down prior to being 1935 * processed by the target process, free all of the 1936 * fd fixups and fput the file structs. It is safe to 1937 * call this function after the fixups have been 1938 * processed -- in that case, the list will be empty. 1939 */ 1940 static void binder_free_txn_fixups(struct binder_transaction *t) 1941 { 1942 struct binder_txn_fd_fixup *fixup, *tmp; 1943 1944 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) { 1945 fput(fixup->file); 1946 list_del(&fixup->fixup_entry); 1947 kfree(fixup); 1948 } 1949 } 1950 1951 static void binder_free_transaction(struct binder_transaction *t) 1952 { 1953 if (t->buffer) 1954 t->buffer->transaction = NULL; 1955 binder_free_txn_fixups(t); 1956 kfree(t); 1957 binder_stats_deleted(BINDER_STAT_TRANSACTION); 1958 } 1959 1960 static void binder_send_failed_reply(struct binder_transaction *t, 1961 uint32_t error_code) 1962 { 1963 struct binder_thread *target_thread; 1964 struct binder_transaction *next; 1965 1966 BUG_ON(t->flags & TF_ONE_WAY); 1967 while (1) { 1968 target_thread = binder_get_txn_from_and_acq_inner(t); 1969 if (target_thread) { 1970 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1971 "send failed reply for transaction %d to %d:%d\n", 1972 t->debug_id, 1973 target_thread->proc->pid, 1974 target_thread->pid); 1975 1976 binder_pop_transaction_ilocked(target_thread, t); 1977 if (target_thread->reply_error.cmd == BR_OK) { 1978 target_thread->reply_error.cmd = error_code; 1979 binder_enqueue_thread_work_ilocked( 1980 target_thread, 1981 &target_thread->reply_error.work); 1982 wake_up_interruptible(&target_thread->wait); 1983 } else { 1984 /* 1985 * Cannot get here for normal operation, but 1986 * we can if multiple synchronous transactions 1987 * are sent without blocking for responses. 1988 * Just ignore the 2nd error in this case. 1989 */ 1990 pr_warn("Unexpected reply error: %u\n", 1991 target_thread->reply_error.cmd); 1992 } 1993 binder_inner_proc_unlock(target_thread->proc); 1994 binder_thread_dec_tmpref(target_thread); 1995 binder_free_transaction(t); 1996 return; 1997 } else { 1998 __release(&target_thread->proc->inner_lock); 1999 } 2000 next = t->from_parent; 2001 2002 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 2003 "send failed reply for transaction %d, target dead\n", 2004 t->debug_id); 2005 2006 binder_free_transaction(t); 2007 if (next == NULL) { 2008 binder_debug(BINDER_DEBUG_DEAD_BINDER, 2009 "reply failed, no target thread at root\n"); 2010 return; 2011 } 2012 t = next; 2013 binder_debug(BINDER_DEBUG_DEAD_BINDER, 2014 "reply failed, no target thread -- retry %d\n", 2015 t->debug_id); 2016 } 2017 } 2018 2019 /** 2020 * binder_cleanup_transaction() - cleans up undelivered transaction 2021 * @t: transaction that needs to be cleaned up 2022 * @reason: reason the transaction wasn't delivered 2023 * @error_code: error to return to caller (if synchronous call) 2024 */ 2025 static void binder_cleanup_transaction(struct binder_transaction *t, 2026 const char *reason, 2027 uint32_t error_code) 2028 { 2029 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) { 2030 binder_send_failed_reply(t, error_code); 2031 } else { 2032 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 2033 "undelivered transaction %d, %s\n", 2034 t->debug_id, reason); 2035 binder_free_transaction(t); 2036 } 2037 } 2038 2039 /** 2040 * binder_get_object() - gets object and checks for valid metadata 2041 * @proc: binder_proc owning the buffer 2042 * @buffer: binder_buffer that we're parsing. 2043 * @offset: offset in the @buffer at which to validate an object. 2044 * @object: struct binder_object to read into 2045 * 2046 * Return: If there's a valid metadata object at @offset in @buffer, the 2047 * size of that object. Otherwise, it returns zero. The object 2048 * is read into the struct binder_object pointed to by @object. 2049 */ 2050 static size_t binder_get_object(struct binder_proc *proc, 2051 struct binder_buffer *buffer, 2052 unsigned long offset, 2053 struct binder_object *object) 2054 { 2055 size_t read_size; 2056 struct binder_object_header *hdr; 2057 size_t object_size = 0; 2058 2059 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset); 2060 if (offset > buffer->data_size || read_size < sizeof(*hdr) || 2061 !IS_ALIGNED(offset, sizeof(u32))) 2062 return 0; 2063 binder_alloc_copy_from_buffer(&proc->alloc, object, buffer, 2064 offset, read_size); 2065 2066 /* Ok, now see if we read a complete object. */ 2067 hdr = &object->hdr; 2068 switch (hdr->type) { 2069 case BINDER_TYPE_BINDER: 2070 case BINDER_TYPE_WEAK_BINDER: 2071 case BINDER_TYPE_HANDLE: 2072 case BINDER_TYPE_WEAK_HANDLE: 2073 object_size = sizeof(struct flat_binder_object); 2074 break; 2075 case BINDER_TYPE_FD: 2076 object_size = sizeof(struct binder_fd_object); 2077 break; 2078 case BINDER_TYPE_PTR: 2079 object_size = sizeof(struct binder_buffer_object); 2080 break; 2081 case BINDER_TYPE_FDA: 2082 object_size = sizeof(struct binder_fd_array_object); 2083 break; 2084 default: 2085 return 0; 2086 } 2087 if (offset <= buffer->data_size - object_size && 2088 buffer->data_size >= object_size) 2089 return object_size; 2090 else 2091 return 0; 2092 } 2093 2094 /** 2095 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer. 2096 * @proc: binder_proc owning the buffer 2097 * @b: binder_buffer containing the object 2098 * @object: struct binder_object to read into 2099 * @index: index in offset array at which the binder_buffer_object is 2100 * located 2101 * @start_offset: points to the start of the offset array 2102 * @object_offsetp: offset of @object read from @b 2103 * @num_valid: the number of valid offsets in the offset array 2104 * 2105 * Return: If @index is within the valid range of the offset array 2106 * described by @start and @num_valid, and if there's a valid 2107 * binder_buffer_object at the offset found in index @index 2108 * of the offset array, that object is returned. Otherwise, 2109 * %NULL is returned. 2110 * Note that the offset found in index @index itself is not 2111 * verified; this function assumes that @num_valid elements 2112 * from @start were previously verified to have valid offsets. 2113 * If @object_offsetp is non-NULL, then the offset within 2114 * @b is written to it. 2115 */ 2116 static struct binder_buffer_object *binder_validate_ptr( 2117 struct binder_proc *proc, 2118 struct binder_buffer *b, 2119 struct binder_object *object, 2120 binder_size_t index, 2121 binder_size_t start_offset, 2122 binder_size_t *object_offsetp, 2123 binder_size_t num_valid) 2124 { 2125 size_t object_size; 2126 binder_size_t object_offset; 2127 unsigned long buffer_offset; 2128 2129 if (index >= num_valid) 2130 return NULL; 2131 2132 buffer_offset = start_offset + sizeof(binder_size_t) * index; 2133 binder_alloc_copy_from_buffer(&proc->alloc, &object_offset, 2134 b, buffer_offset, sizeof(object_offset)); 2135 object_size = binder_get_object(proc, b, object_offset, object); 2136 if (!object_size || object->hdr.type != BINDER_TYPE_PTR) 2137 return NULL; 2138 if (object_offsetp) 2139 *object_offsetp = object_offset; 2140 2141 return &object->bbo; 2142 } 2143 2144 /** 2145 * binder_validate_fixup() - validates pointer/fd fixups happen in order. 2146 * @proc: binder_proc owning the buffer 2147 * @b: transaction buffer 2148 * @objects_start_offset: offset to start of objects buffer 2149 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up 2150 * @fixup_offset: start offset in @buffer to fix up 2151 * @last_obj_offset: offset to last binder_buffer_object that we fixed 2152 * @last_min_offset: minimum fixup offset in object at @last_obj_offset 2153 * 2154 * Return: %true if a fixup in buffer @buffer at offset @offset is 2155 * allowed. 2156 * 2157 * For safety reasons, we only allow fixups inside a buffer to happen 2158 * at increasing offsets; additionally, we only allow fixup on the last 2159 * buffer object that was verified, or one of its parents. 2160 * 2161 * Example of what is allowed: 2162 * 2163 * A 2164 * B (parent = A, offset = 0) 2165 * C (parent = A, offset = 16) 2166 * D (parent = C, offset = 0) 2167 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset) 2168 * 2169 * Examples of what is not allowed: 2170 * 2171 * Decreasing offsets within the same parent: 2172 * A 2173 * C (parent = A, offset = 16) 2174 * B (parent = A, offset = 0) // decreasing offset within A 2175 * 2176 * Referring to a parent that wasn't the last object or any of its parents: 2177 * A 2178 * B (parent = A, offset = 0) 2179 * C (parent = A, offset = 0) 2180 * C (parent = A, offset = 16) 2181 * D (parent = B, offset = 0) // B is not A or any of A's parents 2182 */ 2183 static bool binder_validate_fixup(struct binder_proc *proc, 2184 struct binder_buffer *b, 2185 binder_size_t objects_start_offset, 2186 binder_size_t buffer_obj_offset, 2187 binder_size_t fixup_offset, 2188 binder_size_t last_obj_offset, 2189 binder_size_t last_min_offset) 2190 { 2191 if (!last_obj_offset) { 2192 /* Nothing to fix up in */ 2193 return false; 2194 } 2195 2196 while (last_obj_offset != buffer_obj_offset) { 2197 unsigned long buffer_offset; 2198 struct binder_object last_object; 2199 struct binder_buffer_object *last_bbo; 2200 size_t object_size = binder_get_object(proc, b, last_obj_offset, 2201 &last_object); 2202 if (object_size != sizeof(*last_bbo)) 2203 return false; 2204 2205 last_bbo = &last_object.bbo; 2206 /* 2207 * Safe to retrieve the parent of last_obj, since it 2208 * was already previously verified by the driver. 2209 */ 2210 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0) 2211 return false; 2212 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t); 2213 buffer_offset = objects_start_offset + 2214 sizeof(binder_size_t) * last_bbo->parent, 2215 binder_alloc_copy_from_buffer(&proc->alloc, &last_obj_offset, 2216 b, buffer_offset, 2217 sizeof(last_obj_offset)); 2218 } 2219 return (fixup_offset >= last_min_offset); 2220 } 2221 2222 /** 2223 * struct binder_task_work_cb - for deferred close 2224 * 2225 * @twork: callback_head for task work 2226 * @fd: fd to close 2227 * 2228 * Structure to pass task work to be handled after 2229 * returning from binder_ioctl() via task_work_add(). 2230 */ 2231 struct binder_task_work_cb { 2232 struct callback_head twork; 2233 struct file *file; 2234 }; 2235 2236 /** 2237 * binder_do_fd_close() - close list of file descriptors 2238 * @twork: callback head for task work 2239 * 2240 * It is not safe to call ksys_close() during the binder_ioctl() 2241 * function if there is a chance that binder's own file descriptor 2242 * might be closed. This is to meet the requirements for using 2243 * fdget() (see comments for __fget_light()). Therefore use 2244 * task_work_add() to schedule the close operation once we have 2245 * returned from binder_ioctl(). This function is a callback 2246 * for that mechanism and does the actual ksys_close() on the 2247 * given file descriptor. 2248 */ 2249 static void binder_do_fd_close(struct callback_head *twork) 2250 { 2251 struct binder_task_work_cb *twcb = container_of(twork, 2252 struct binder_task_work_cb, twork); 2253 2254 fput(twcb->file); 2255 kfree(twcb); 2256 } 2257 2258 /** 2259 * binder_deferred_fd_close() - schedule a close for the given file-descriptor 2260 * @fd: file-descriptor to close 2261 * 2262 * See comments in binder_do_fd_close(). This function is used to schedule 2263 * a file-descriptor to be closed after returning from binder_ioctl(). 2264 */ 2265 static void binder_deferred_fd_close(int fd) 2266 { 2267 struct binder_task_work_cb *twcb; 2268 2269 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL); 2270 if (!twcb) 2271 return; 2272 init_task_work(&twcb->twork, binder_do_fd_close); 2273 __close_fd_get_file(fd, &twcb->file); 2274 if (twcb->file) 2275 task_work_add(current, &twcb->twork, true); 2276 else 2277 kfree(twcb); 2278 } 2279 2280 static void binder_transaction_buffer_release(struct binder_proc *proc, 2281 struct binder_buffer *buffer, 2282 binder_size_t failed_at, 2283 bool is_failure) 2284 { 2285 int debug_id = buffer->debug_id; 2286 binder_size_t off_start_offset, buffer_offset, off_end_offset; 2287 2288 binder_debug(BINDER_DEBUG_TRANSACTION, 2289 "%d buffer release %d, size %zd-%zd, failed at %llx\n", 2290 proc->pid, buffer->debug_id, 2291 buffer->data_size, buffer->offsets_size, 2292 (unsigned long long)failed_at); 2293 2294 if (buffer->target_node) 2295 binder_dec_node(buffer->target_node, 1, 0); 2296 2297 off_start_offset = ALIGN(buffer->data_size, sizeof(void *)); 2298 off_end_offset = is_failure ? failed_at : 2299 off_start_offset + buffer->offsets_size; 2300 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset; 2301 buffer_offset += sizeof(binder_size_t)) { 2302 struct binder_object_header *hdr; 2303 size_t object_size; 2304 struct binder_object object; 2305 binder_size_t object_offset; 2306 2307 binder_alloc_copy_from_buffer(&proc->alloc, &object_offset, 2308 buffer, buffer_offset, 2309 sizeof(object_offset)); 2310 object_size = binder_get_object(proc, buffer, 2311 object_offset, &object); 2312 if (object_size == 0) { 2313 pr_err("transaction release %d bad object at offset %lld, size %zd\n", 2314 debug_id, (u64)object_offset, buffer->data_size); 2315 continue; 2316 } 2317 hdr = &object.hdr; 2318 switch (hdr->type) { 2319 case BINDER_TYPE_BINDER: 2320 case BINDER_TYPE_WEAK_BINDER: { 2321 struct flat_binder_object *fp; 2322 struct binder_node *node; 2323 2324 fp = to_flat_binder_object(hdr); 2325 node = binder_get_node(proc, fp->binder); 2326 if (node == NULL) { 2327 pr_err("transaction release %d bad node %016llx\n", 2328 debug_id, (u64)fp->binder); 2329 break; 2330 } 2331 binder_debug(BINDER_DEBUG_TRANSACTION, 2332 " node %d u%016llx\n", 2333 node->debug_id, (u64)node->ptr); 2334 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER, 2335 0); 2336 binder_put_node(node); 2337 } break; 2338 case BINDER_TYPE_HANDLE: 2339 case BINDER_TYPE_WEAK_HANDLE: { 2340 struct flat_binder_object *fp; 2341 struct binder_ref_data rdata; 2342 int ret; 2343 2344 fp = to_flat_binder_object(hdr); 2345 ret = binder_dec_ref_for_handle(proc, fp->handle, 2346 hdr->type == BINDER_TYPE_HANDLE, &rdata); 2347 2348 if (ret) { 2349 pr_err("transaction release %d bad handle %d, ret = %d\n", 2350 debug_id, fp->handle, ret); 2351 break; 2352 } 2353 binder_debug(BINDER_DEBUG_TRANSACTION, 2354 " ref %d desc %d\n", 2355 rdata.debug_id, rdata.desc); 2356 } break; 2357 2358 case BINDER_TYPE_FD: { 2359 /* 2360 * No need to close the file here since user-space 2361 * closes it for for successfully delivered 2362 * transactions. For transactions that weren't 2363 * delivered, the new fd was never allocated so 2364 * there is no need to close and the fput on the 2365 * file is done when the transaction is torn 2366 * down. 2367 */ 2368 WARN_ON(failed_at && 2369 proc->tsk == current->group_leader); 2370 } break; 2371 case BINDER_TYPE_PTR: 2372 /* 2373 * Nothing to do here, this will get cleaned up when the 2374 * transaction buffer gets freed 2375 */ 2376 break; 2377 case BINDER_TYPE_FDA: { 2378 struct binder_fd_array_object *fda; 2379 struct binder_buffer_object *parent; 2380 struct binder_object ptr_object; 2381 binder_size_t fda_offset; 2382 size_t fd_index; 2383 binder_size_t fd_buf_size; 2384 binder_size_t num_valid; 2385 2386 if (proc->tsk != current->group_leader) { 2387 /* 2388 * Nothing to do if running in sender context 2389 * The fd fixups have not been applied so no 2390 * fds need to be closed. 2391 */ 2392 continue; 2393 } 2394 2395 num_valid = (buffer_offset - off_start_offset) / 2396 sizeof(binder_size_t); 2397 fda = to_binder_fd_array_object(hdr); 2398 parent = binder_validate_ptr(proc, buffer, &ptr_object, 2399 fda->parent, 2400 off_start_offset, 2401 NULL, 2402 num_valid); 2403 if (!parent) { 2404 pr_err("transaction release %d bad parent offset\n", 2405 debug_id); 2406 continue; 2407 } 2408 fd_buf_size = sizeof(u32) * fda->num_fds; 2409 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { 2410 pr_err("transaction release %d invalid number of fds (%lld)\n", 2411 debug_id, (u64)fda->num_fds); 2412 continue; 2413 } 2414 if (fd_buf_size > parent->length || 2415 fda->parent_offset > parent->length - fd_buf_size) { 2416 /* No space for all file descriptors here. */ 2417 pr_err("transaction release %d not enough space for %lld fds in buffer\n", 2418 debug_id, (u64)fda->num_fds); 2419 continue; 2420 } 2421 /* 2422 * the source data for binder_buffer_object is visible 2423 * to user-space and the @buffer element is the user 2424 * pointer to the buffer_object containing the fd_array. 2425 * Convert the address to an offset relative to 2426 * the base of the transaction buffer. 2427 */ 2428 fda_offset = 2429 (parent->buffer - (uintptr_t)buffer->user_data) + 2430 fda->parent_offset; 2431 for (fd_index = 0; fd_index < fda->num_fds; 2432 fd_index++) { 2433 u32 fd; 2434 binder_size_t offset = fda_offset + 2435 fd_index * sizeof(fd); 2436 2437 binder_alloc_copy_from_buffer(&proc->alloc, 2438 &fd, 2439 buffer, 2440 offset, 2441 sizeof(fd)); 2442 binder_deferred_fd_close(fd); 2443 } 2444 } break; 2445 default: 2446 pr_err("transaction release %d bad object type %x\n", 2447 debug_id, hdr->type); 2448 break; 2449 } 2450 } 2451 } 2452 2453 static int binder_translate_binder(struct flat_binder_object *fp, 2454 struct binder_transaction *t, 2455 struct binder_thread *thread) 2456 { 2457 struct binder_node *node; 2458 struct binder_proc *proc = thread->proc; 2459 struct binder_proc *target_proc = t->to_proc; 2460 struct binder_ref_data rdata; 2461 int ret = 0; 2462 2463 node = binder_get_node(proc, fp->binder); 2464 if (!node) { 2465 node = binder_new_node(proc, fp); 2466 if (!node) 2467 return -ENOMEM; 2468 } 2469 if (fp->cookie != node->cookie) { 2470 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n", 2471 proc->pid, thread->pid, (u64)fp->binder, 2472 node->debug_id, (u64)fp->cookie, 2473 (u64)node->cookie); 2474 ret = -EINVAL; 2475 goto done; 2476 } 2477 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { 2478 ret = -EPERM; 2479 goto done; 2480 } 2481 2482 ret = binder_inc_ref_for_node(target_proc, node, 2483 fp->hdr.type == BINDER_TYPE_BINDER, 2484 &thread->todo, &rdata); 2485 if (ret) 2486 goto done; 2487 2488 if (fp->hdr.type == BINDER_TYPE_BINDER) 2489 fp->hdr.type = BINDER_TYPE_HANDLE; 2490 else 2491 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE; 2492 fp->binder = 0; 2493 fp->handle = rdata.desc; 2494 fp->cookie = 0; 2495 2496 trace_binder_transaction_node_to_ref(t, node, &rdata); 2497 binder_debug(BINDER_DEBUG_TRANSACTION, 2498 " node %d u%016llx -> ref %d desc %d\n", 2499 node->debug_id, (u64)node->ptr, 2500 rdata.debug_id, rdata.desc); 2501 done: 2502 binder_put_node(node); 2503 return ret; 2504 } 2505 2506 static int binder_translate_handle(struct flat_binder_object *fp, 2507 struct binder_transaction *t, 2508 struct binder_thread *thread) 2509 { 2510 struct binder_proc *proc = thread->proc; 2511 struct binder_proc *target_proc = t->to_proc; 2512 struct binder_node *node; 2513 struct binder_ref_data src_rdata; 2514 int ret = 0; 2515 2516 node = binder_get_node_from_ref(proc, fp->handle, 2517 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata); 2518 if (!node) { 2519 binder_user_error("%d:%d got transaction with invalid handle, %d\n", 2520 proc->pid, thread->pid, fp->handle); 2521 return -EINVAL; 2522 } 2523 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { 2524 ret = -EPERM; 2525 goto done; 2526 } 2527 2528 binder_node_lock(node); 2529 if (node->proc == target_proc) { 2530 if (fp->hdr.type == BINDER_TYPE_HANDLE) 2531 fp->hdr.type = BINDER_TYPE_BINDER; 2532 else 2533 fp->hdr.type = BINDER_TYPE_WEAK_BINDER; 2534 fp->binder = node->ptr; 2535 fp->cookie = node->cookie; 2536 if (node->proc) 2537 binder_inner_proc_lock(node->proc); 2538 else 2539 __acquire(&node->proc->inner_lock); 2540 binder_inc_node_nilocked(node, 2541 fp->hdr.type == BINDER_TYPE_BINDER, 2542 0, NULL); 2543 if (node->proc) 2544 binder_inner_proc_unlock(node->proc); 2545 else 2546 __release(&node->proc->inner_lock); 2547 trace_binder_transaction_ref_to_node(t, node, &src_rdata); 2548 binder_debug(BINDER_DEBUG_TRANSACTION, 2549 " ref %d desc %d -> node %d u%016llx\n", 2550 src_rdata.debug_id, src_rdata.desc, node->debug_id, 2551 (u64)node->ptr); 2552 binder_node_unlock(node); 2553 } else { 2554 struct binder_ref_data dest_rdata; 2555 2556 binder_node_unlock(node); 2557 ret = binder_inc_ref_for_node(target_proc, node, 2558 fp->hdr.type == BINDER_TYPE_HANDLE, 2559 NULL, &dest_rdata); 2560 if (ret) 2561 goto done; 2562 2563 fp->binder = 0; 2564 fp->handle = dest_rdata.desc; 2565 fp->cookie = 0; 2566 trace_binder_transaction_ref_to_ref(t, node, &src_rdata, 2567 &dest_rdata); 2568 binder_debug(BINDER_DEBUG_TRANSACTION, 2569 " ref %d desc %d -> ref %d desc %d (node %d)\n", 2570 src_rdata.debug_id, src_rdata.desc, 2571 dest_rdata.debug_id, dest_rdata.desc, 2572 node->debug_id); 2573 } 2574 done: 2575 binder_put_node(node); 2576 return ret; 2577 } 2578 2579 static int binder_translate_fd(u32 fd, binder_size_t fd_offset, 2580 struct binder_transaction *t, 2581 struct binder_thread *thread, 2582 struct binder_transaction *in_reply_to) 2583 { 2584 struct binder_proc *proc = thread->proc; 2585 struct binder_proc *target_proc = t->to_proc; 2586 struct binder_txn_fd_fixup *fixup; 2587 struct file *file; 2588 int ret = 0; 2589 bool target_allows_fd; 2590 2591 if (in_reply_to) 2592 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS); 2593 else 2594 target_allows_fd = t->buffer->target_node->accept_fds; 2595 if (!target_allows_fd) { 2596 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n", 2597 proc->pid, thread->pid, 2598 in_reply_to ? "reply" : "transaction", 2599 fd); 2600 ret = -EPERM; 2601 goto err_fd_not_accepted; 2602 } 2603 2604 file = fget(fd); 2605 if (!file) { 2606 binder_user_error("%d:%d got transaction with invalid fd, %d\n", 2607 proc->pid, thread->pid, fd); 2608 ret = -EBADF; 2609 goto err_fget; 2610 } 2611 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file); 2612 if (ret < 0) { 2613 ret = -EPERM; 2614 goto err_security; 2615 } 2616 2617 /* 2618 * Add fixup record for this transaction. The allocation 2619 * of the fd in the target needs to be done from a 2620 * target thread. 2621 */ 2622 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL); 2623 if (!fixup) { 2624 ret = -ENOMEM; 2625 goto err_alloc; 2626 } 2627 fixup->file = file; 2628 fixup->offset = fd_offset; 2629 trace_binder_transaction_fd_send(t, fd, fixup->offset); 2630 list_add_tail(&fixup->fixup_entry, &t->fd_fixups); 2631 2632 return ret; 2633 2634 err_alloc: 2635 err_security: 2636 fput(file); 2637 err_fget: 2638 err_fd_not_accepted: 2639 return ret; 2640 } 2641 2642 static int binder_translate_fd_array(struct binder_fd_array_object *fda, 2643 struct binder_buffer_object *parent, 2644 struct binder_transaction *t, 2645 struct binder_thread *thread, 2646 struct binder_transaction *in_reply_to) 2647 { 2648 binder_size_t fdi, fd_buf_size; 2649 binder_size_t fda_offset; 2650 struct binder_proc *proc = thread->proc; 2651 struct binder_proc *target_proc = t->to_proc; 2652 2653 fd_buf_size = sizeof(u32) * fda->num_fds; 2654 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { 2655 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n", 2656 proc->pid, thread->pid, (u64)fda->num_fds); 2657 return -EINVAL; 2658 } 2659 if (fd_buf_size > parent->length || 2660 fda->parent_offset > parent->length - fd_buf_size) { 2661 /* No space for all file descriptors here. */ 2662 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n", 2663 proc->pid, thread->pid, (u64)fda->num_fds); 2664 return -EINVAL; 2665 } 2666 /* 2667 * the source data for binder_buffer_object is visible 2668 * to user-space and the @buffer element is the user 2669 * pointer to the buffer_object containing the fd_array. 2670 * Convert the address to an offset relative to 2671 * the base of the transaction buffer. 2672 */ 2673 fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) + 2674 fda->parent_offset; 2675 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) { 2676 binder_user_error("%d:%d parent offset not aligned correctly.\n", 2677 proc->pid, thread->pid); 2678 return -EINVAL; 2679 } 2680 for (fdi = 0; fdi < fda->num_fds; fdi++) { 2681 u32 fd; 2682 int ret; 2683 binder_size_t offset = fda_offset + fdi * sizeof(fd); 2684 2685 binder_alloc_copy_from_buffer(&target_proc->alloc, 2686 &fd, t->buffer, 2687 offset, sizeof(fd)); 2688 ret = binder_translate_fd(fd, offset, t, thread, 2689 in_reply_to); 2690 if (ret < 0) 2691 return ret; 2692 } 2693 return 0; 2694 } 2695 2696 static int binder_fixup_parent(struct binder_transaction *t, 2697 struct binder_thread *thread, 2698 struct binder_buffer_object *bp, 2699 binder_size_t off_start_offset, 2700 binder_size_t num_valid, 2701 binder_size_t last_fixup_obj_off, 2702 binder_size_t last_fixup_min_off) 2703 { 2704 struct binder_buffer_object *parent; 2705 struct binder_buffer *b = t->buffer; 2706 struct binder_proc *proc = thread->proc; 2707 struct binder_proc *target_proc = t->to_proc; 2708 struct binder_object object; 2709 binder_size_t buffer_offset; 2710 binder_size_t parent_offset; 2711 2712 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT)) 2713 return 0; 2714 2715 parent = binder_validate_ptr(target_proc, b, &object, bp->parent, 2716 off_start_offset, &parent_offset, 2717 num_valid); 2718 if (!parent) { 2719 binder_user_error("%d:%d got transaction with invalid parent offset or type\n", 2720 proc->pid, thread->pid); 2721 return -EINVAL; 2722 } 2723 2724 if (!binder_validate_fixup(target_proc, b, off_start_offset, 2725 parent_offset, bp->parent_offset, 2726 last_fixup_obj_off, 2727 last_fixup_min_off)) { 2728 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", 2729 proc->pid, thread->pid); 2730 return -EINVAL; 2731 } 2732 2733 if (parent->length < sizeof(binder_uintptr_t) || 2734 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) { 2735 /* No space for a pointer here! */ 2736 binder_user_error("%d:%d got transaction with invalid parent offset\n", 2737 proc->pid, thread->pid); 2738 return -EINVAL; 2739 } 2740 buffer_offset = bp->parent_offset + 2741 (uintptr_t)parent->buffer - (uintptr_t)b->user_data; 2742 binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset, 2743 &bp->buffer, sizeof(bp->buffer)); 2744 2745 return 0; 2746 } 2747 2748 /** 2749 * binder_proc_transaction() - sends a transaction to a process and wakes it up 2750 * @t: transaction to send 2751 * @proc: process to send the transaction to 2752 * @thread: thread in @proc to send the transaction to (may be NULL) 2753 * 2754 * This function queues a transaction to the specified process. It will try 2755 * to find a thread in the target process to handle the transaction and 2756 * wake it up. If no thread is found, the work is queued to the proc 2757 * waitqueue. 2758 * 2759 * If the @thread parameter is not NULL, the transaction is always queued 2760 * to the waitlist of that specific thread. 2761 * 2762 * Return: true if the transactions was successfully queued 2763 * false if the target process or thread is dead 2764 */ 2765 static bool binder_proc_transaction(struct binder_transaction *t, 2766 struct binder_proc *proc, 2767 struct binder_thread *thread) 2768 { 2769 struct binder_node *node = t->buffer->target_node; 2770 bool oneway = !!(t->flags & TF_ONE_WAY); 2771 bool pending_async = false; 2772 2773 BUG_ON(!node); 2774 binder_node_lock(node); 2775 if (oneway) { 2776 BUG_ON(thread); 2777 if (node->has_async_transaction) { 2778 pending_async = true; 2779 } else { 2780 node->has_async_transaction = true; 2781 } 2782 } 2783 2784 binder_inner_proc_lock(proc); 2785 2786 if (proc->is_dead || (thread && thread->is_dead)) { 2787 binder_inner_proc_unlock(proc); 2788 binder_node_unlock(node); 2789 return false; 2790 } 2791 2792 if (!thread && !pending_async) 2793 thread = binder_select_thread_ilocked(proc); 2794 2795 if (thread) 2796 binder_enqueue_thread_work_ilocked(thread, &t->work); 2797 else if (!pending_async) 2798 binder_enqueue_work_ilocked(&t->work, &proc->todo); 2799 else 2800 binder_enqueue_work_ilocked(&t->work, &node->async_todo); 2801 2802 if (!pending_async) 2803 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */); 2804 2805 binder_inner_proc_unlock(proc); 2806 binder_node_unlock(node); 2807 2808 return true; 2809 } 2810 2811 /** 2812 * binder_get_node_refs_for_txn() - Get required refs on node for txn 2813 * @node: struct binder_node for which to get refs 2814 * @proc: returns @node->proc if valid 2815 * @error: if no @proc then returns BR_DEAD_REPLY 2816 * 2817 * User-space normally keeps the node alive when creating a transaction 2818 * since it has a reference to the target. The local strong ref keeps it 2819 * alive if the sending process dies before the target process processes 2820 * the transaction. If the source process is malicious or has a reference 2821 * counting bug, relying on the local strong ref can fail. 2822 * 2823 * Since user-space can cause the local strong ref to go away, we also take 2824 * a tmpref on the node to ensure it survives while we are constructing 2825 * the transaction. We also need a tmpref on the proc while we are 2826 * constructing the transaction, so we take that here as well. 2827 * 2828 * Return: The target_node with refs taken or NULL if no @node->proc is NULL. 2829 * Also sets @proc if valid. If the @node->proc is NULL indicating that the 2830 * target proc has died, @error is set to BR_DEAD_REPLY 2831 */ 2832 static struct binder_node *binder_get_node_refs_for_txn( 2833 struct binder_node *node, 2834 struct binder_proc **procp, 2835 uint32_t *error) 2836 { 2837 struct binder_node *target_node = NULL; 2838 2839 binder_node_inner_lock(node); 2840 if (node->proc) { 2841 target_node = node; 2842 binder_inc_node_nilocked(node, 1, 0, NULL); 2843 binder_inc_node_tmpref_ilocked(node); 2844 node->proc->tmp_ref++; 2845 *procp = node->proc; 2846 } else 2847 *error = BR_DEAD_REPLY; 2848 binder_node_inner_unlock(node); 2849 2850 return target_node; 2851 } 2852 2853 static void binder_transaction(struct binder_proc *proc, 2854 struct binder_thread *thread, 2855 struct binder_transaction_data *tr, int reply, 2856 binder_size_t extra_buffers_size) 2857 { 2858 int ret; 2859 struct binder_transaction *t; 2860 struct binder_work *w; 2861 struct binder_work *tcomplete; 2862 binder_size_t buffer_offset = 0; 2863 binder_size_t off_start_offset, off_end_offset; 2864 binder_size_t off_min; 2865 binder_size_t sg_buf_offset, sg_buf_end_offset; 2866 struct binder_proc *target_proc = NULL; 2867 struct binder_thread *target_thread = NULL; 2868 struct binder_node *target_node = NULL; 2869 struct binder_transaction *in_reply_to = NULL; 2870 struct binder_transaction_log_entry *e; 2871 uint32_t return_error = 0; 2872 uint32_t return_error_param = 0; 2873 uint32_t return_error_line = 0; 2874 binder_size_t last_fixup_obj_off = 0; 2875 binder_size_t last_fixup_min_off = 0; 2876 struct binder_context *context = proc->context; 2877 int t_debug_id = atomic_inc_return(&binder_last_id); 2878 char *secctx = NULL; 2879 u32 secctx_sz = 0; 2880 2881 e = binder_transaction_log_add(&binder_transaction_log); 2882 e->debug_id = t_debug_id; 2883 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); 2884 e->from_proc = proc->pid; 2885 e->from_thread = thread->pid; 2886 e->target_handle = tr->target.handle; 2887 e->data_size = tr->data_size; 2888 e->offsets_size = tr->offsets_size; 2889 e->context_name = proc->context->name; 2890 2891 if (reply) { 2892 binder_inner_proc_lock(proc); 2893 in_reply_to = thread->transaction_stack; 2894 if (in_reply_to == NULL) { 2895 binder_inner_proc_unlock(proc); 2896 binder_user_error("%d:%d got reply transaction with no transaction stack\n", 2897 proc->pid, thread->pid); 2898 return_error = BR_FAILED_REPLY; 2899 return_error_param = -EPROTO; 2900 return_error_line = __LINE__; 2901 goto err_empty_call_stack; 2902 } 2903 if (in_reply_to->to_thread != thread) { 2904 spin_lock(&in_reply_to->lock); 2905 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n", 2906 proc->pid, thread->pid, in_reply_to->debug_id, 2907 in_reply_to->to_proc ? 2908 in_reply_to->to_proc->pid : 0, 2909 in_reply_to->to_thread ? 2910 in_reply_to->to_thread->pid : 0); 2911 spin_unlock(&in_reply_to->lock); 2912 binder_inner_proc_unlock(proc); 2913 return_error = BR_FAILED_REPLY; 2914 return_error_param = -EPROTO; 2915 return_error_line = __LINE__; 2916 in_reply_to = NULL; 2917 goto err_bad_call_stack; 2918 } 2919 thread->transaction_stack = in_reply_to->to_parent; 2920 binder_inner_proc_unlock(proc); 2921 binder_set_nice(in_reply_to->saved_priority); 2922 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to); 2923 if (target_thread == NULL) { 2924 /* annotation for sparse */ 2925 __release(&target_thread->proc->inner_lock); 2926 return_error = BR_DEAD_REPLY; 2927 return_error_line = __LINE__; 2928 goto err_dead_binder; 2929 } 2930 if (target_thread->transaction_stack != in_reply_to) { 2931 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n", 2932 proc->pid, thread->pid, 2933 target_thread->transaction_stack ? 2934 target_thread->transaction_stack->debug_id : 0, 2935 in_reply_to->debug_id); 2936 binder_inner_proc_unlock(target_thread->proc); 2937 return_error = BR_FAILED_REPLY; 2938 return_error_param = -EPROTO; 2939 return_error_line = __LINE__; 2940 in_reply_to = NULL; 2941 target_thread = NULL; 2942 goto err_dead_binder; 2943 } 2944 target_proc = target_thread->proc; 2945 target_proc->tmp_ref++; 2946 binder_inner_proc_unlock(target_thread->proc); 2947 } else { 2948 if (tr->target.handle) { 2949 struct binder_ref *ref; 2950 2951 /* 2952 * There must already be a strong ref 2953 * on this node. If so, do a strong 2954 * increment on the node to ensure it 2955 * stays alive until the transaction is 2956 * done. 2957 */ 2958 binder_proc_lock(proc); 2959 ref = binder_get_ref_olocked(proc, tr->target.handle, 2960 true); 2961 if (ref) { 2962 target_node = binder_get_node_refs_for_txn( 2963 ref->node, &target_proc, 2964 &return_error); 2965 } else { 2966 binder_user_error("%d:%d got transaction to invalid handle\n", 2967 proc->pid, thread->pid); 2968 return_error = BR_FAILED_REPLY; 2969 } 2970 binder_proc_unlock(proc); 2971 } else { 2972 mutex_lock(&context->context_mgr_node_lock); 2973 target_node = context->binder_context_mgr_node; 2974 if (target_node) 2975 target_node = binder_get_node_refs_for_txn( 2976 target_node, &target_proc, 2977 &return_error); 2978 else 2979 return_error = BR_DEAD_REPLY; 2980 mutex_unlock(&context->context_mgr_node_lock); 2981 if (target_node && target_proc == proc) { 2982 binder_user_error("%d:%d got transaction to context manager from process owning it\n", 2983 proc->pid, thread->pid); 2984 return_error = BR_FAILED_REPLY; 2985 return_error_param = -EINVAL; 2986 return_error_line = __LINE__; 2987 goto err_invalid_target_handle; 2988 } 2989 } 2990 if (!target_node) { 2991 /* 2992 * return_error is set above 2993 */ 2994 return_error_param = -EINVAL; 2995 return_error_line = __LINE__; 2996 goto err_dead_binder; 2997 } 2998 e->to_node = target_node->debug_id; 2999 if (security_binder_transaction(proc->tsk, 3000 target_proc->tsk) < 0) { 3001 return_error = BR_FAILED_REPLY; 3002 return_error_param = -EPERM; 3003 return_error_line = __LINE__; 3004 goto err_invalid_target_handle; 3005 } 3006 binder_inner_proc_lock(proc); 3007 3008 w = list_first_entry_or_null(&thread->todo, 3009 struct binder_work, entry); 3010 if (!(tr->flags & TF_ONE_WAY) && w && 3011 w->type == BINDER_WORK_TRANSACTION) { 3012 /* 3013 * Do not allow new outgoing transaction from a 3014 * thread that has a transaction at the head of 3015 * its todo list. Only need to check the head 3016 * because binder_select_thread_ilocked picks a 3017 * thread from proc->waiting_threads to enqueue 3018 * the transaction, and nothing is queued to the 3019 * todo list while the thread is on waiting_threads. 3020 */ 3021 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n", 3022 proc->pid, thread->pid); 3023 binder_inner_proc_unlock(proc); 3024 return_error = BR_FAILED_REPLY; 3025 return_error_param = -EPROTO; 3026 return_error_line = __LINE__; 3027 goto err_bad_todo_list; 3028 } 3029 3030 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { 3031 struct binder_transaction *tmp; 3032 3033 tmp = thread->transaction_stack; 3034 if (tmp->to_thread != thread) { 3035 spin_lock(&tmp->lock); 3036 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n", 3037 proc->pid, thread->pid, tmp->debug_id, 3038 tmp->to_proc ? tmp->to_proc->pid : 0, 3039 tmp->to_thread ? 3040 tmp->to_thread->pid : 0); 3041 spin_unlock(&tmp->lock); 3042 binder_inner_proc_unlock(proc); 3043 return_error = BR_FAILED_REPLY; 3044 return_error_param = -EPROTO; 3045 return_error_line = __LINE__; 3046 goto err_bad_call_stack; 3047 } 3048 while (tmp) { 3049 struct binder_thread *from; 3050 3051 spin_lock(&tmp->lock); 3052 from = tmp->from; 3053 if (from && from->proc == target_proc) { 3054 atomic_inc(&from->tmp_ref); 3055 target_thread = from; 3056 spin_unlock(&tmp->lock); 3057 break; 3058 } 3059 spin_unlock(&tmp->lock); 3060 tmp = tmp->from_parent; 3061 } 3062 } 3063 binder_inner_proc_unlock(proc); 3064 } 3065 if (target_thread) 3066 e->to_thread = target_thread->pid; 3067 e->to_proc = target_proc->pid; 3068 3069 /* TODO: reuse incoming transaction for reply */ 3070 t = kzalloc(sizeof(*t), GFP_KERNEL); 3071 if (t == NULL) { 3072 return_error = BR_FAILED_REPLY; 3073 return_error_param = -ENOMEM; 3074 return_error_line = __LINE__; 3075 goto err_alloc_t_failed; 3076 } 3077 INIT_LIST_HEAD(&t->fd_fixups); 3078 binder_stats_created(BINDER_STAT_TRANSACTION); 3079 spin_lock_init(&t->lock); 3080 3081 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); 3082 if (tcomplete == NULL) { 3083 return_error = BR_FAILED_REPLY; 3084 return_error_param = -ENOMEM; 3085 return_error_line = __LINE__; 3086 goto err_alloc_tcomplete_failed; 3087 } 3088 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE); 3089 3090 t->debug_id = t_debug_id; 3091 3092 if (reply) 3093 binder_debug(BINDER_DEBUG_TRANSACTION, 3094 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n", 3095 proc->pid, thread->pid, t->debug_id, 3096 target_proc->pid, target_thread->pid, 3097 (u64)tr->data.ptr.buffer, 3098 (u64)tr->data.ptr.offsets, 3099 (u64)tr->data_size, (u64)tr->offsets_size, 3100 (u64)extra_buffers_size); 3101 else 3102 binder_debug(BINDER_DEBUG_TRANSACTION, 3103 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n", 3104 proc->pid, thread->pid, t->debug_id, 3105 target_proc->pid, target_node->debug_id, 3106 (u64)tr->data.ptr.buffer, 3107 (u64)tr->data.ptr.offsets, 3108 (u64)tr->data_size, (u64)tr->offsets_size, 3109 (u64)extra_buffers_size); 3110 3111 if (!reply && !(tr->flags & TF_ONE_WAY)) 3112 t->from = thread; 3113 else 3114 t->from = NULL; 3115 t->sender_euid = task_euid(proc->tsk); 3116 t->to_proc = target_proc; 3117 t->to_thread = target_thread; 3118 t->code = tr->code; 3119 t->flags = tr->flags; 3120 t->priority = task_nice(current); 3121 3122 if (target_node && target_node->txn_security_ctx) { 3123 u32 secid; 3124 3125 security_task_getsecid(proc->tsk, &secid); 3126 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz); 3127 if (ret) { 3128 return_error = BR_FAILED_REPLY; 3129 return_error_param = ret; 3130 return_error_line = __LINE__; 3131 goto err_get_secctx_failed; 3132 } 3133 extra_buffers_size += ALIGN(secctx_sz, sizeof(u64)); 3134 } 3135 3136 trace_binder_transaction(reply, t, target_node); 3137 3138 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size, 3139 tr->offsets_size, extra_buffers_size, 3140 !reply && (t->flags & TF_ONE_WAY)); 3141 if (IS_ERR(t->buffer)) { 3142 /* 3143 * -ESRCH indicates VMA cleared. The target is dying. 3144 */ 3145 return_error_param = PTR_ERR(t->buffer); 3146 return_error = return_error_param == -ESRCH ? 3147 BR_DEAD_REPLY : BR_FAILED_REPLY; 3148 return_error_line = __LINE__; 3149 t->buffer = NULL; 3150 goto err_binder_alloc_buf_failed; 3151 } 3152 if (secctx) { 3153 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) + 3154 ALIGN(tr->offsets_size, sizeof(void *)) + 3155 ALIGN(extra_buffers_size, sizeof(void *)) - 3156 ALIGN(secctx_sz, sizeof(u64)); 3157 3158 t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset; 3159 binder_alloc_copy_to_buffer(&target_proc->alloc, 3160 t->buffer, buf_offset, 3161 secctx, secctx_sz); 3162 security_release_secctx(secctx, secctx_sz); 3163 secctx = NULL; 3164 } 3165 t->buffer->debug_id = t->debug_id; 3166 t->buffer->transaction = t; 3167 t->buffer->target_node = target_node; 3168 trace_binder_transaction_alloc_buf(t->buffer); 3169 3170 if (binder_alloc_copy_user_to_buffer( 3171 &target_proc->alloc, 3172 t->buffer, 0, 3173 (const void __user *) 3174 (uintptr_t)tr->data.ptr.buffer, 3175 tr->data_size)) { 3176 binder_user_error("%d:%d got transaction with invalid data ptr\n", 3177 proc->pid, thread->pid); 3178 return_error = BR_FAILED_REPLY; 3179 return_error_param = -EFAULT; 3180 return_error_line = __LINE__; 3181 goto err_copy_data_failed; 3182 } 3183 if (binder_alloc_copy_user_to_buffer( 3184 &target_proc->alloc, 3185 t->buffer, 3186 ALIGN(tr->data_size, sizeof(void *)), 3187 (const void __user *) 3188 (uintptr_t)tr->data.ptr.offsets, 3189 tr->offsets_size)) { 3190 binder_user_error("%d:%d got transaction with invalid offsets ptr\n", 3191 proc->pid, thread->pid); 3192 return_error = BR_FAILED_REPLY; 3193 return_error_param = -EFAULT; 3194 return_error_line = __LINE__; 3195 goto err_copy_data_failed; 3196 } 3197 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) { 3198 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n", 3199 proc->pid, thread->pid, (u64)tr->offsets_size); 3200 return_error = BR_FAILED_REPLY; 3201 return_error_param = -EINVAL; 3202 return_error_line = __LINE__; 3203 goto err_bad_offset; 3204 } 3205 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) { 3206 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n", 3207 proc->pid, thread->pid, 3208 (u64)extra_buffers_size); 3209 return_error = BR_FAILED_REPLY; 3210 return_error_param = -EINVAL; 3211 return_error_line = __LINE__; 3212 goto err_bad_offset; 3213 } 3214 off_start_offset = ALIGN(tr->data_size, sizeof(void *)); 3215 buffer_offset = off_start_offset; 3216 off_end_offset = off_start_offset + tr->offsets_size; 3217 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *)); 3218 sg_buf_end_offset = sg_buf_offset + extra_buffers_size; 3219 off_min = 0; 3220 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset; 3221 buffer_offset += sizeof(binder_size_t)) { 3222 struct binder_object_header *hdr; 3223 size_t object_size; 3224 struct binder_object object; 3225 binder_size_t object_offset; 3226 3227 binder_alloc_copy_from_buffer(&target_proc->alloc, 3228 &object_offset, 3229 t->buffer, 3230 buffer_offset, 3231 sizeof(object_offset)); 3232 object_size = binder_get_object(target_proc, t->buffer, 3233 object_offset, &object); 3234 if (object_size == 0 || object_offset < off_min) { 3235 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n", 3236 proc->pid, thread->pid, 3237 (u64)object_offset, 3238 (u64)off_min, 3239 (u64)t->buffer->data_size); 3240 return_error = BR_FAILED_REPLY; 3241 return_error_param = -EINVAL; 3242 return_error_line = __LINE__; 3243 goto err_bad_offset; 3244 } 3245 3246 hdr = &object.hdr; 3247 off_min = object_offset + object_size; 3248 switch (hdr->type) { 3249 case BINDER_TYPE_BINDER: 3250 case BINDER_TYPE_WEAK_BINDER: { 3251 struct flat_binder_object *fp; 3252 3253 fp = to_flat_binder_object(hdr); 3254 ret = binder_translate_binder(fp, t, thread); 3255 if (ret < 0) { 3256 return_error = BR_FAILED_REPLY; 3257 return_error_param = ret; 3258 return_error_line = __LINE__; 3259 goto err_translate_failed; 3260 } 3261 binder_alloc_copy_to_buffer(&target_proc->alloc, 3262 t->buffer, object_offset, 3263 fp, sizeof(*fp)); 3264 } break; 3265 case BINDER_TYPE_HANDLE: 3266 case BINDER_TYPE_WEAK_HANDLE: { 3267 struct flat_binder_object *fp; 3268 3269 fp = to_flat_binder_object(hdr); 3270 ret = binder_translate_handle(fp, t, thread); 3271 if (ret < 0) { 3272 return_error = BR_FAILED_REPLY; 3273 return_error_param = ret; 3274 return_error_line = __LINE__; 3275 goto err_translate_failed; 3276 } 3277 binder_alloc_copy_to_buffer(&target_proc->alloc, 3278 t->buffer, object_offset, 3279 fp, sizeof(*fp)); 3280 } break; 3281 3282 case BINDER_TYPE_FD: { 3283 struct binder_fd_object *fp = to_binder_fd_object(hdr); 3284 binder_size_t fd_offset = object_offset + 3285 (uintptr_t)&fp->fd - (uintptr_t)fp; 3286 int ret = binder_translate_fd(fp->fd, fd_offset, t, 3287 thread, in_reply_to); 3288 3289 if (ret < 0) { 3290 return_error = BR_FAILED_REPLY; 3291 return_error_param = ret; 3292 return_error_line = __LINE__; 3293 goto err_translate_failed; 3294 } 3295 fp->pad_binder = 0; 3296 binder_alloc_copy_to_buffer(&target_proc->alloc, 3297 t->buffer, object_offset, 3298 fp, sizeof(*fp)); 3299 } break; 3300 case BINDER_TYPE_FDA: { 3301 struct binder_object ptr_object; 3302 binder_size_t parent_offset; 3303 struct binder_fd_array_object *fda = 3304 to_binder_fd_array_object(hdr); 3305 size_t num_valid = (buffer_offset - off_start_offset) * 3306 sizeof(binder_size_t); 3307 struct binder_buffer_object *parent = 3308 binder_validate_ptr(target_proc, t->buffer, 3309 &ptr_object, fda->parent, 3310 off_start_offset, 3311 &parent_offset, 3312 num_valid); 3313 if (!parent) { 3314 binder_user_error("%d:%d got transaction with invalid parent offset or type\n", 3315 proc->pid, thread->pid); 3316 return_error = BR_FAILED_REPLY; 3317 return_error_param = -EINVAL; 3318 return_error_line = __LINE__; 3319 goto err_bad_parent; 3320 } 3321 if (!binder_validate_fixup(target_proc, t->buffer, 3322 off_start_offset, 3323 parent_offset, 3324 fda->parent_offset, 3325 last_fixup_obj_off, 3326 last_fixup_min_off)) { 3327 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", 3328 proc->pid, thread->pid); 3329 return_error = BR_FAILED_REPLY; 3330 return_error_param = -EINVAL; 3331 return_error_line = __LINE__; 3332 goto err_bad_parent; 3333 } 3334 ret = binder_translate_fd_array(fda, parent, t, thread, 3335 in_reply_to); 3336 if (ret < 0) { 3337 return_error = BR_FAILED_REPLY; 3338 return_error_param = ret; 3339 return_error_line = __LINE__; 3340 goto err_translate_failed; 3341 } 3342 last_fixup_obj_off = parent_offset; 3343 last_fixup_min_off = 3344 fda->parent_offset + sizeof(u32) * fda->num_fds; 3345 } break; 3346 case BINDER_TYPE_PTR: { 3347 struct binder_buffer_object *bp = 3348 to_binder_buffer_object(hdr); 3349 size_t buf_left = sg_buf_end_offset - sg_buf_offset; 3350 size_t num_valid; 3351 3352 if (bp->length > buf_left) { 3353 binder_user_error("%d:%d got transaction with too large buffer\n", 3354 proc->pid, thread->pid); 3355 return_error = BR_FAILED_REPLY; 3356 return_error_param = -EINVAL; 3357 return_error_line = __LINE__; 3358 goto err_bad_offset; 3359 } 3360 if (binder_alloc_copy_user_to_buffer( 3361 &target_proc->alloc, 3362 t->buffer, 3363 sg_buf_offset, 3364 (const void __user *) 3365 (uintptr_t)bp->buffer, 3366 bp->length)) { 3367 binder_user_error("%d:%d got transaction with invalid offsets ptr\n", 3368 proc->pid, thread->pid); 3369 return_error_param = -EFAULT; 3370 return_error = BR_FAILED_REPLY; 3371 return_error_line = __LINE__; 3372 goto err_copy_data_failed; 3373 } 3374 /* Fixup buffer pointer to target proc address space */ 3375 bp->buffer = (uintptr_t) 3376 t->buffer->user_data + sg_buf_offset; 3377 sg_buf_offset += ALIGN(bp->length, sizeof(u64)); 3378 3379 num_valid = (buffer_offset - off_start_offset) * 3380 sizeof(binder_size_t); 3381 ret = binder_fixup_parent(t, thread, bp, 3382 off_start_offset, 3383 num_valid, 3384 last_fixup_obj_off, 3385 last_fixup_min_off); 3386 if (ret < 0) { 3387 return_error = BR_FAILED_REPLY; 3388 return_error_param = ret; 3389 return_error_line = __LINE__; 3390 goto err_translate_failed; 3391 } 3392 binder_alloc_copy_to_buffer(&target_proc->alloc, 3393 t->buffer, object_offset, 3394 bp, sizeof(*bp)); 3395 last_fixup_obj_off = object_offset; 3396 last_fixup_min_off = 0; 3397 } break; 3398 default: 3399 binder_user_error("%d:%d got transaction with invalid object type, %x\n", 3400 proc->pid, thread->pid, hdr->type); 3401 return_error = BR_FAILED_REPLY; 3402 return_error_param = -EINVAL; 3403 return_error_line = __LINE__; 3404 goto err_bad_object_type; 3405 } 3406 } 3407 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; 3408 t->work.type = BINDER_WORK_TRANSACTION; 3409 3410 if (reply) { 3411 binder_enqueue_thread_work(thread, tcomplete); 3412 binder_inner_proc_lock(target_proc); 3413 if (target_thread->is_dead) { 3414 binder_inner_proc_unlock(target_proc); 3415 goto err_dead_proc_or_thread; 3416 } 3417 BUG_ON(t->buffer->async_transaction != 0); 3418 binder_pop_transaction_ilocked(target_thread, in_reply_to); 3419 binder_enqueue_thread_work_ilocked(target_thread, &t->work); 3420 binder_inner_proc_unlock(target_proc); 3421 wake_up_interruptible_sync(&target_thread->wait); 3422 binder_free_transaction(in_reply_to); 3423 } else if (!(t->flags & TF_ONE_WAY)) { 3424 BUG_ON(t->buffer->async_transaction != 0); 3425 binder_inner_proc_lock(proc); 3426 /* 3427 * Defer the TRANSACTION_COMPLETE, so we don't return to 3428 * userspace immediately; this allows the target process to 3429 * immediately start processing this transaction, reducing 3430 * latency. We will then return the TRANSACTION_COMPLETE when 3431 * the target replies (or there is an error). 3432 */ 3433 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete); 3434 t->need_reply = 1; 3435 t->from_parent = thread->transaction_stack; 3436 thread->transaction_stack = t; 3437 binder_inner_proc_unlock(proc); 3438 if (!binder_proc_transaction(t, target_proc, target_thread)) { 3439 binder_inner_proc_lock(proc); 3440 binder_pop_transaction_ilocked(thread, t); 3441 binder_inner_proc_unlock(proc); 3442 goto err_dead_proc_or_thread; 3443 } 3444 } else { 3445 BUG_ON(target_node == NULL); 3446 BUG_ON(t->buffer->async_transaction != 1); 3447 binder_enqueue_thread_work(thread, tcomplete); 3448 if (!binder_proc_transaction(t, target_proc, NULL)) 3449 goto err_dead_proc_or_thread; 3450 } 3451 if (target_thread) 3452 binder_thread_dec_tmpref(target_thread); 3453 binder_proc_dec_tmpref(target_proc); 3454 if (target_node) 3455 binder_dec_node_tmpref(target_node); 3456 /* 3457 * write barrier to synchronize with initialization 3458 * of log entry 3459 */ 3460 smp_wmb(); 3461 WRITE_ONCE(e->debug_id_done, t_debug_id); 3462 return; 3463 3464 err_dead_proc_or_thread: 3465 return_error = BR_DEAD_REPLY; 3466 return_error_line = __LINE__; 3467 binder_dequeue_work(proc, tcomplete); 3468 err_translate_failed: 3469 err_bad_object_type: 3470 err_bad_offset: 3471 err_bad_parent: 3472 err_copy_data_failed: 3473 binder_free_txn_fixups(t); 3474 trace_binder_transaction_failed_buffer_release(t->buffer); 3475 binder_transaction_buffer_release(target_proc, t->buffer, 3476 buffer_offset, true); 3477 if (target_node) 3478 binder_dec_node_tmpref(target_node); 3479 target_node = NULL; 3480 t->buffer->transaction = NULL; 3481 binder_alloc_free_buf(&target_proc->alloc, t->buffer); 3482 err_binder_alloc_buf_failed: 3483 if (secctx) 3484 security_release_secctx(secctx, secctx_sz); 3485 err_get_secctx_failed: 3486 kfree(tcomplete); 3487 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 3488 err_alloc_tcomplete_failed: 3489 kfree(t); 3490 binder_stats_deleted(BINDER_STAT_TRANSACTION); 3491 err_alloc_t_failed: 3492 err_bad_todo_list: 3493 err_bad_call_stack: 3494 err_empty_call_stack: 3495 err_dead_binder: 3496 err_invalid_target_handle: 3497 if (target_thread) 3498 binder_thread_dec_tmpref(target_thread); 3499 if (target_proc) 3500 binder_proc_dec_tmpref(target_proc); 3501 if (target_node) { 3502 binder_dec_node(target_node, 1, 0); 3503 binder_dec_node_tmpref(target_node); 3504 } 3505 3506 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 3507 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n", 3508 proc->pid, thread->pid, return_error, return_error_param, 3509 (u64)tr->data_size, (u64)tr->offsets_size, 3510 return_error_line); 3511 3512 { 3513 struct binder_transaction_log_entry *fe; 3514 3515 e->return_error = return_error; 3516 e->return_error_param = return_error_param; 3517 e->return_error_line = return_error_line; 3518 fe = binder_transaction_log_add(&binder_transaction_log_failed); 3519 *fe = *e; 3520 /* 3521 * write barrier to synchronize with initialization 3522 * of log entry 3523 */ 3524 smp_wmb(); 3525 WRITE_ONCE(e->debug_id_done, t_debug_id); 3526 WRITE_ONCE(fe->debug_id_done, t_debug_id); 3527 } 3528 3529 BUG_ON(thread->return_error.cmd != BR_OK); 3530 if (in_reply_to) { 3531 thread->return_error.cmd = BR_TRANSACTION_COMPLETE; 3532 binder_enqueue_thread_work(thread, &thread->return_error.work); 3533 binder_send_failed_reply(in_reply_to, return_error); 3534 } else { 3535 thread->return_error.cmd = return_error; 3536 binder_enqueue_thread_work(thread, &thread->return_error.work); 3537 } 3538 } 3539 3540 /** 3541 * binder_free_buf() - free the specified buffer 3542 * @proc: binder proc that owns buffer 3543 * @buffer: buffer to be freed 3544 * 3545 * If buffer for an async transaction, enqueue the next async 3546 * transaction from the node. 3547 * 3548 * Cleanup buffer and free it. 3549 */ 3550 static void 3551 binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer) 3552 { 3553 if (buffer->transaction) { 3554 buffer->transaction->buffer = NULL; 3555 buffer->transaction = NULL; 3556 } 3557 if (buffer->async_transaction && buffer->target_node) { 3558 struct binder_node *buf_node; 3559 struct binder_work *w; 3560 3561 buf_node = buffer->target_node; 3562 binder_node_inner_lock(buf_node); 3563 BUG_ON(!buf_node->has_async_transaction); 3564 BUG_ON(buf_node->proc != proc); 3565 w = binder_dequeue_work_head_ilocked( 3566 &buf_node->async_todo); 3567 if (!w) { 3568 buf_node->has_async_transaction = false; 3569 } else { 3570 binder_enqueue_work_ilocked( 3571 w, &proc->todo); 3572 binder_wakeup_proc_ilocked(proc); 3573 } 3574 binder_node_inner_unlock(buf_node); 3575 } 3576 trace_binder_transaction_buffer_release(buffer); 3577 binder_transaction_buffer_release(proc, buffer, 0, false); 3578 binder_alloc_free_buf(&proc->alloc, buffer); 3579 } 3580 3581 static int binder_thread_write(struct binder_proc *proc, 3582 struct binder_thread *thread, 3583 binder_uintptr_t binder_buffer, size_t size, 3584 binder_size_t *consumed) 3585 { 3586 uint32_t cmd; 3587 struct binder_context *context = proc->context; 3588 void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 3589 void __user *ptr = buffer + *consumed; 3590 void __user *end = buffer + size; 3591 3592 while (ptr < end && thread->return_error.cmd == BR_OK) { 3593 int ret; 3594 3595 if (get_user(cmd, (uint32_t __user *)ptr)) 3596 return -EFAULT; 3597 ptr += sizeof(uint32_t); 3598 trace_binder_command(cmd); 3599 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { 3600 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]); 3601 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]); 3602 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]); 3603 } 3604 switch (cmd) { 3605 case BC_INCREFS: 3606 case BC_ACQUIRE: 3607 case BC_RELEASE: 3608 case BC_DECREFS: { 3609 uint32_t target; 3610 const char *debug_string; 3611 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE; 3612 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE; 3613 struct binder_ref_data rdata; 3614 3615 if (get_user(target, (uint32_t __user *)ptr)) 3616 return -EFAULT; 3617 3618 ptr += sizeof(uint32_t); 3619 ret = -1; 3620 if (increment && !target) { 3621 struct binder_node *ctx_mgr_node; 3622 mutex_lock(&context->context_mgr_node_lock); 3623 ctx_mgr_node = context->binder_context_mgr_node; 3624 if (ctx_mgr_node) 3625 ret = binder_inc_ref_for_node( 3626 proc, ctx_mgr_node, 3627 strong, NULL, &rdata); 3628 mutex_unlock(&context->context_mgr_node_lock); 3629 } 3630 if (ret) 3631 ret = binder_update_ref_for_handle( 3632 proc, target, increment, strong, 3633 &rdata); 3634 if (!ret && rdata.desc != target) { 3635 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n", 3636 proc->pid, thread->pid, 3637 target, rdata.desc); 3638 } 3639 switch (cmd) { 3640 case BC_INCREFS: 3641 debug_string = "IncRefs"; 3642 break; 3643 case BC_ACQUIRE: 3644 debug_string = "Acquire"; 3645 break; 3646 case BC_RELEASE: 3647 debug_string = "Release"; 3648 break; 3649 case BC_DECREFS: 3650 default: 3651 debug_string = "DecRefs"; 3652 break; 3653 } 3654 if (ret) { 3655 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n", 3656 proc->pid, thread->pid, debug_string, 3657 strong, target, ret); 3658 break; 3659 } 3660 binder_debug(BINDER_DEBUG_USER_REFS, 3661 "%d:%d %s ref %d desc %d s %d w %d\n", 3662 proc->pid, thread->pid, debug_string, 3663 rdata.debug_id, rdata.desc, rdata.strong, 3664 rdata.weak); 3665 break; 3666 } 3667 case BC_INCREFS_DONE: 3668 case BC_ACQUIRE_DONE: { 3669 binder_uintptr_t node_ptr; 3670 binder_uintptr_t cookie; 3671 struct binder_node *node; 3672 bool free_node; 3673 3674 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr)) 3675 return -EFAULT; 3676 ptr += sizeof(binder_uintptr_t); 3677 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 3678 return -EFAULT; 3679 ptr += sizeof(binder_uintptr_t); 3680 node = binder_get_node(proc, node_ptr); 3681 if (node == NULL) { 3682 binder_user_error("%d:%d %s u%016llx no match\n", 3683 proc->pid, thread->pid, 3684 cmd == BC_INCREFS_DONE ? 3685 "BC_INCREFS_DONE" : 3686 "BC_ACQUIRE_DONE", 3687 (u64)node_ptr); 3688 break; 3689 } 3690 if (cookie != node->cookie) { 3691 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n", 3692 proc->pid, thread->pid, 3693 cmd == BC_INCREFS_DONE ? 3694 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 3695 (u64)node_ptr, node->debug_id, 3696 (u64)cookie, (u64)node->cookie); 3697 binder_put_node(node); 3698 break; 3699 } 3700 binder_node_inner_lock(node); 3701 if (cmd == BC_ACQUIRE_DONE) { 3702 if (node->pending_strong_ref == 0) { 3703 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n", 3704 proc->pid, thread->pid, 3705 node->debug_id); 3706 binder_node_inner_unlock(node); 3707 binder_put_node(node); 3708 break; 3709 } 3710 node->pending_strong_ref = 0; 3711 } else { 3712 if (node->pending_weak_ref == 0) { 3713 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n", 3714 proc->pid, thread->pid, 3715 node->debug_id); 3716 binder_node_inner_unlock(node); 3717 binder_put_node(node); 3718 break; 3719 } 3720 node->pending_weak_ref = 0; 3721 } 3722 free_node = binder_dec_node_nilocked(node, 3723 cmd == BC_ACQUIRE_DONE, 0); 3724 WARN_ON(free_node); 3725 binder_debug(BINDER_DEBUG_USER_REFS, 3726 "%d:%d %s node %d ls %d lw %d tr %d\n", 3727 proc->pid, thread->pid, 3728 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 3729 node->debug_id, node->local_strong_refs, 3730 node->local_weak_refs, node->tmp_refs); 3731 binder_node_inner_unlock(node); 3732 binder_put_node(node); 3733 break; 3734 } 3735 case BC_ATTEMPT_ACQUIRE: 3736 pr_err("BC_ATTEMPT_ACQUIRE not supported\n"); 3737 return -EINVAL; 3738 case BC_ACQUIRE_RESULT: 3739 pr_err("BC_ACQUIRE_RESULT not supported\n"); 3740 return -EINVAL; 3741 3742 case BC_FREE_BUFFER: { 3743 binder_uintptr_t data_ptr; 3744 struct binder_buffer *buffer; 3745 3746 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr)) 3747 return -EFAULT; 3748 ptr += sizeof(binder_uintptr_t); 3749 3750 buffer = binder_alloc_prepare_to_free(&proc->alloc, 3751 data_ptr); 3752 if (IS_ERR_OR_NULL(buffer)) { 3753 if (PTR_ERR(buffer) == -EPERM) { 3754 binder_user_error( 3755 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n", 3756 proc->pid, thread->pid, 3757 (u64)data_ptr); 3758 } else { 3759 binder_user_error( 3760 "%d:%d BC_FREE_BUFFER u%016llx no match\n", 3761 proc->pid, thread->pid, 3762 (u64)data_ptr); 3763 } 3764 break; 3765 } 3766 binder_debug(BINDER_DEBUG_FREE_BUFFER, 3767 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n", 3768 proc->pid, thread->pid, (u64)data_ptr, 3769 buffer->debug_id, 3770 buffer->transaction ? "active" : "finished"); 3771 binder_free_buf(proc, buffer); 3772 break; 3773 } 3774 3775 case BC_TRANSACTION_SG: 3776 case BC_REPLY_SG: { 3777 struct binder_transaction_data_sg tr; 3778 3779 if (copy_from_user(&tr, ptr, sizeof(tr))) 3780 return -EFAULT; 3781 ptr += sizeof(tr); 3782 binder_transaction(proc, thread, &tr.transaction_data, 3783 cmd == BC_REPLY_SG, tr.buffers_size); 3784 break; 3785 } 3786 case BC_TRANSACTION: 3787 case BC_REPLY: { 3788 struct binder_transaction_data tr; 3789 3790 if (copy_from_user(&tr, ptr, sizeof(tr))) 3791 return -EFAULT; 3792 ptr += sizeof(tr); 3793 binder_transaction(proc, thread, &tr, 3794 cmd == BC_REPLY, 0); 3795 break; 3796 } 3797 3798 case BC_REGISTER_LOOPER: 3799 binder_debug(BINDER_DEBUG_THREADS, 3800 "%d:%d BC_REGISTER_LOOPER\n", 3801 proc->pid, thread->pid); 3802 binder_inner_proc_lock(proc); 3803 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { 3804 thread->looper |= BINDER_LOOPER_STATE_INVALID; 3805 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n", 3806 proc->pid, thread->pid); 3807 } else if (proc->requested_threads == 0) { 3808 thread->looper |= BINDER_LOOPER_STATE_INVALID; 3809 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n", 3810 proc->pid, thread->pid); 3811 } else { 3812 proc->requested_threads--; 3813 proc->requested_threads_started++; 3814 } 3815 thread->looper |= BINDER_LOOPER_STATE_REGISTERED; 3816 binder_inner_proc_unlock(proc); 3817 break; 3818 case BC_ENTER_LOOPER: 3819 binder_debug(BINDER_DEBUG_THREADS, 3820 "%d:%d BC_ENTER_LOOPER\n", 3821 proc->pid, thread->pid); 3822 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { 3823 thread->looper |= BINDER_LOOPER_STATE_INVALID; 3824 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n", 3825 proc->pid, thread->pid); 3826 } 3827 thread->looper |= BINDER_LOOPER_STATE_ENTERED; 3828 break; 3829 case BC_EXIT_LOOPER: 3830 binder_debug(BINDER_DEBUG_THREADS, 3831 "%d:%d BC_EXIT_LOOPER\n", 3832 proc->pid, thread->pid); 3833 thread->looper |= BINDER_LOOPER_STATE_EXITED; 3834 break; 3835 3836 case BC_REQUEST_DEATH_NOTIFICATION: 3837 case BC_CLEAR_DEATH_NOTIFICATION: { 3838 uint32_t target; 3839 binder_uintptr_t cookie; 3840 struct binder_ref *ref; 3841 struct binder_ref_death *death = NULL; 3842 3843 if (get_user(target, (uint32_t __user *)ptr)) 3844 return -EFAULT; 3845 ptr += sizeof(uint32_t); 3846 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 3847 return -EFAULT; 3848 ptr += sizeof(binder_uintptr_t); 3849 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { 3850 /* 3851 * Allocate memory for death notification 3852 * before taking lock 3853 */ 3854 death = kzalloc(sizeof(*death), GFP_KERNEL); 3855 if (death == NULL) { 3856 WARN_ON(thread->return_error.cmd != 3857 BR_OK); 3858 thread->return_error.cmd = BR_ERROR; 3859 binder_enqueue_thread_work( 3860 thread, 3861 &thread->return_error.work); 3862 binder_debug( 3863 BINDER_DEBUG_FAILED_TRANSACTION, 3864 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n", 3865 proc->pid, thread->pid); 3866 break; 3867 } 3868 } 3869 binder_proc_lock(proc); 3870 ref = binder_get_ref_olocked(proc, target, false); 3871 if (ref == NULL) { 3872 binder_user_error("%d:%d %s invalid ref %d\n", 3873 proc->pid, thread->pid, 3874 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 3875 "BC_REQUEST_DEATH_NOTIFICATION" : 3876 "BC_CLEAR_DEATH_NOTIFICATION", 3877 target); 3878 binder_proc_unlock(proc); 3879 kfree(death); 3880 break; 3881 } 3882 3883 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 3884 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n", 3885 proc->pid, thread->pid, 3886 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 3887 "BC_REQUEST_DEATH_NOTIFICATION" : 3888 "BC_CLEAR_DEATH_NOTIFICATION", 3889 (u64)cookie, ref->data.debug_id, 3890 ref->data.desc, ref->data.strong, 3891 ref->data.weak, ref->node->debug_id); 3892 3893 binder_node_lock(ref->node); 3894 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { 3895 if (ref->death) { 3896 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n", 3897 proc->pid, thread->pid); 3898 binder_node_unlock(ref->node); 3899 binder_proc_unlock(proc); 3900 kfree(death); 3901 break; 3902 } 3903 binder_stats_created(BINDER_STAT_DEATH); 3904 INIT_LIST_HEAD(&death->work.entry); 3905 death->cookie = cookie; 3906 ref->death = death; 3907 if (ref->node->proc == NULL) { 3908 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 3909 3910 binder_inner_proc_lock(proc); 3911 binder_enqueue_work_ilocked( 3912 &ref->death->work, &proc->todo); 3913 binder_wakeup_proc_ilocked(proc); 3914 binder_inner_proc_unlock(proc); 3915 } 3916 } else { 3917 if (ref->death == NULL) { 3918 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n", 3919 proc->pid, thread->pid); 3920 binder_node_unlock(ref->node); 3921 binder_proc_unlock(proc); 3922 break; 3923 } 3924 death = ref->death; 3925 if (death->cookie != cookie) { 3926 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n", 3927 proc->pid, thread->pid, 3928 (u64)death->cookie, 3929 (u64)cookie); 3930 binder_node_unlock(ref->node); 3931 binder_proc_unlock(proc); 3932 break; 3933 } 3934 ref->death = NULL; 3935 binder_inner_proc_lock(proc); 3936 if (list_empty(&death->work.entry)) { 3937 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 3938 if (thread->looper & 3939 (BINDER_LOOPER_STATE_REGISTERED | 3940 BINDER_LOOPER_STATE_ENTERED)) 3941 binder_enqueue_thread_work_ilocked( 3942 thread, 3943 &death->work); 3944 else { 3945 binder_enqueue_work_ilocked( 3946 &death->work, 3947 &proc->todo); 3948 binder_wakeup_proc_ilocked( 3949 proc); 3950 } 3951 } else { 3952 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); 3953 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; 3954 } 3955 binder_inner_proc_unlock(proc); 3956 } 3957 binder_node_unlock(ref->node); 3958 binder_proc_unlock(proc); 3959 } break; 3960 case BC_DEAD_BINDER_DONE: { 3961 struct binder_work *w; 3962 binder_uintptr_t cookie; 3963 struct binder_ref_death *death = NULL; 3964 3965 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 3966 return -EFAULT; 3967 3968 ptr += sizeof(cookie); 3969 binder_inner_proc_lock(proc); 3970 list_for_each_entry(w, &proc->delivered_death, 3971 entry) { 3972 struct binder_ref_death *tmp_death = 3973 container_of(w, 3974 struct binder_ref_death, 3975 work); 3976 3977 if (tmp_death->cookie == cookie) { 3978 death = tmp_death; 3979 break; 3980 } 3981 } 3982 binder_debug(BINDER_DEBUG_DEAD_BINDER, 3983 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n", 3984 proc->pid, thread->pid, (u64)cookie, 3985 death); 3986 if (death == NULL) { 3987 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n", 3988 proc->pid, thread->pid, (u64)cookie); 3989 binder_inner_proc_unlock(proc); 3990 break; 3991 } 3992 binder_dequeue_work_ilocked(&death->work); 3993 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) { 3994 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 3995 if (thread->looper & 3996 (BINDER_LOOPER_STATE_REGISTERED | 3997 BINDER_LOOPER_STATE_ENTERED)) 3998 binder_enqueue_thread_work_ilocked( 3999 thread, &death->work); 4000 else { 4001 binder_enqueue_work_ilocked( 4002 &death->work, 4003 &proc->todo); 4004 binder_wakeup_proc_ilocked(proc); 4005 } 4006 } 4007 binder_inner_proc_unlock(proc); 4008 } break; 4009 4010 default: 4011 pr_err("%d:%d unknown command %d\n", 4012 proc->pid, thread->pid, cmd); 4013 return -EINVAL; 4014 } 4015 *consumed = ptr - buffer; 4016 } 4017 return 0; 4018 } 4019 4020 static void binder_stat_br(struct binder_proc *proc, 4021 struct binder_thread *thread, uint32_t cmd) 4022 { 4023 trace_binder_return(cmd); 4024 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { 4025 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]); 4026 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]); 4027 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]); 4028 } 4029 } 4030 4031 static int binder_put_node_cmd(struct binder_proc *proc, 4032 struct binder_thread *thread, 4033 void __user **ptrp, 4034 binder_uintptr_t node_ptr, 4035 binder_uintptr_t node_cookie, 4036 int node_debug_id, 4037 uint32_t cmd, const char *cmd_name) 4038 { 4039 void __user *ptr = *ptrp; 4040 4041 if (put_user(cmd, (uint32_t __user *)ptr)) 4042 return -EFAULT; 4043 ptr += sizeof(uint32_t); 4044 4045 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr)) 4046 return -EFAULT; 4047 ptr += sizeof(binder_uintptr_t); 4048 4049 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr)) 4050 return -EFAULT; 4051 ptr += sizeof(binder_uintptr_t); 4052 4053 binder_stat_br(proc, thread, cmd); 4054 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n", 4055 proc->pid, thread->pid, cmd_name, node_debug_id, 4056 (u64)node_ptr, (u64)node_cookie); 4057 4058 *ptrp = ptr; 4059 return 0; 4060 } 4061 4062 static int binder_wait_for_work(struct binder_thread *thread, 4063 bool do_proc_work) 4064 { 4065 DEFINE_WAIT(wait); 4066 struct binder_proc *proc = thread->proc; 4067 int ret = 0; 4068 4069 freezer_do_not_count(); 4070 binder_inner_proc_lock(proc); 4071 for (;;) { 4072 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE); 4073 if (binder_has_work_ilocked(thread, do_proc_work)) 4074 break; 4075 if (do_proc_work) 4076 list_add(&thread->waiting_thread_node, 4077 &proc->waiting_threads); 4078 binder_inner_proc_unlock(proc); 4079 schedule(); 4080 binder_inner_proc_lock(proc); 4081 list_del_init(&thread->waiting_thread_node); 4082 if (signal_pending(current)) { 4083 ret = -ERESTARTSYS; 4084 break; 4085 } 4086 } 4087 finish_wait(&thread->wait, &wait); 4088 binder_inner_proc_unlock(proc); 4089 freezer_count(); 4090 4091 return ret; 4092 } 4093 4094 /** 4095 * binder_apply_fd_fixups() - finish fd translation 4096 * @proc: binder_proc associated @t->buffer 4097 * @t: binder transaction with list of fd fixups 4098 * 4099 * Now that we are in the context of the transaction target 4100 * process, we can allocate and install fds. Process the 4101 * list of fds to translate and fixup the buffer with the 4102 * new fds. 4103 * 4104 * If we fail to allocate an fd, then free the resources by 4105 * fput'ing files that have not been processed and ksys_close'ing 4106 * any fds that have already been allocated. 4107 */ 4108 static int binder_apply_fd_fixups(struct binder_proc *proc, 4109 struct binder_transaction *t) 4110 { 4111 struct binder_txn_fd_fixup *fixup, *tmp; 4112 int ret = 0; 4113 4114 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) { 4115 int fd = get_unused_fd_flags(O_CLOEXEC); 4116 4117 if (fd < 0) { 4118 binder_debug(BINDER_DEBUG_TRANSACTION, 4119 "failed fd fixup txn %d fd %d\n", 4120 t->debug_id, fd); 4121 ret = -ENOMEM; 4122 break; 4123 } 4124 binder_debug(BINDER_DEBUG_TRANSACTION, 4125 "fd fixup txn %d fd %d\n", 4126 t->debug_id, fd); 4127 trace_binder_transaction_fd_recv(t, fd, fixup->offset); 4128 fd_install(fd, fixup->file); 4129 fixup->file = NULL; 4130 binder_alloc_copy_to_buffer(&proc->alloc, t->buffer, 4131 fixup->offset, &fd, 4132 sizeof(u32)); 4133 } 4134 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) { 4135 if (fixup->file) { 4136 fput(fixup->file); 4137 } else if (ret) { 4138 u32 fd; 4139 4140 binder_alloc_copy_from_buffer(&proc->alloc, &fd, 4141 t->buffer, fixup->offset, 4142 sizeof(fd)); 4143 binder_deferred_fd_close(fd); 4144 } 4145 list_del(&fixup->fixup_entry); 4146 kfree(fixup); 4147 } 4148 4149 return ret; 4150 } 4151 4152 static int binder_thread_read(struct binder_proc *proc, 4153 struct binder_thread *thread, 4154 binder_uintptr_t binder_buffer, size_t size, 4155 binder_size_t *consumed, int non_block) 4156 { 4157 void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 4158 void __user *ptr = buffer + *consumed; 4159 void __user *end = buffer + size; 4160 4161 int ret = 0; 4162 int wait_for_proc_work; 4163 4164 if (*consumed == 0) { 4165 if (put_user(BR_NOOP, (uint32_t __user *)ptr)) 4166 return -EFAULT; 4167 ptr += sizeof(uint32_t); 4168 } 4169 4170 retry: 4171 binder_inner_proc_lock(proc); 4172 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread); 4173 binder_inner_proc_unlock(proc); 4174 4175 thread->looper |= BINDER_LOOPER_STATE_WAITING; 4176 4177 trace_binder_wait_for_work(wait_for_proc_work, 4178 !!thread->transaction_stack, 4179 !binder_worklist_empty(proc, &thread->todo)); 4180 if (wait_for_proc_work) { 4181 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 4182 BINDER_LOOPER_STATE_ENTERED))) { 4183 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n", 4184 proc->pid, thread->pid, thread->looper); 4185 wait_event_interruptible(binder_user_error_wait, 4186 binder_stop_on_user_error < 2); 4187 } 4188 binder_set_nice(proc->default_priority); 4189 } 4190 4191 if (non_block) { 4192 if (!binder_has_work(thread, wait_for_proc_work)) 4193 ret = -EAGAIN; 4194 } else { 4195 ret = binder_wait_for_work(thread, wait_for_proc_work); 4196 } 4197 4198 thread->looper &= ~BINDER_LOOPER_STATE_WAITING; 4199 4200 if (ret) 4201 return ret; 4202 4203 while (1) { 4204 uint32_t cmd; 4205 struct binder_transaction_data_secctx tr; 4206 struct binder_transaction_data *trd = &tr.transaction_data; 4207 struct binder_work *w = NULL; 4208 struct list_head *list = NULL; 4209 struct binder_transaction *t = NULL; 4210 struct binder_thread *t_from; 4211 size_t trsize = sizeof(*trd); 4212 4213 binder_inner_proc_lock(proc); 4214 if (!binder_worklist_empty_ilocked(&thread->todo)) 4215 list = &thread->todo; 4216 else if (!binder_worklist_empty_ilocked(&proc->todo) && 4217 wait_for_proc_work) 4218 list = &proc->todo; 4219 else { 4220 binder_inner_proc_unlock(proc); 4221 4222 /* no data added */ 4223 if (ptr - buffer == 4 && !thread->looper_need_return) 4224 goto retry; 4225 break; 4226 } 4227 4228 if (end - ptr < sizeof(tr) + 4) { 4229 binder_inner_proc_unlock(proc); 4230 break; 4231 } 4232 w = binder_dequeue_work_head_ilocked(list); 4233 if (binder_worklist_empty_ilocked(&thread->todo)) 4234 thread->process_todo = false; 4235 4236 switch (w->type) { 4237 case BINDER_WORK_TRANSACTION: { 4238 binder_inner_proc_unlock(proc); 4239 t = container_of(w, struct binder_transaction, work); 4240 } break; 4241 case BINDER_WORK_RETURN_ERROR: { 4242 struct binder_error *e = container_of( 4243 w, struct binder_error, work); 4244 4245 WARN_ON(e->cmd == BR_OK); 4246 binder_inner_proc_unlock(proc); 4247 if (put_user(e->cmd, (uint32_t __user *)ptr)) 4248 return -EFAULT; 4249 cmd = e->cmd; 4250 e->cmd = BR_OK; 4251 ptr += sizeof(uint32_t); 4252 4253 binder_stat_br(proc, thread, cmd); 4254 } break; 4255 case BINDER_WORK_TRANSACTION_COMPLETE: { 4256 binder_inner_proc_unlock(proc); 4257 cmd = BR_TRANSACTION_COMPLETE; 4258 if (put_user(cmd, (uint32_t __user *)ptr)) 4259 return -EFAULT; 4260 ptr += sizeof(uint32_t); 4261 4262 binder_stat_br(proc, thread, cmd); 4263 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, 4264 "%d:%d BR_TRANSACTION_COMPLETE\n", 4265 proc->pid, thread->pid); 4266 kfree(w); 4267 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 4268 } break; 4269 case BINDER_WORK_NODE: { 4270 struct binder_node *node = container_of(w, struct binder_node, work); 4271 int strong, weak; 4272 binder_uintptr_t node_ptr = node->ptr; 4273 binder_uintptr_t node_cookie = node->cookie; 4274 int node_debug_id = node->debug_id; 4275 int has_weak_ref; 4276 int has_strong_ref; 4277 void __user *orig_ptr = ptr; 4278 4279 BUG_ON(proc != node->proc); 4280 strong = node->internal_strong_refs || 4281 node->local_strong_refs; 4282 weak = !hlist_empty(&node->refs) || 4283 node->local_weak_refs || 4284 node->tmp_refs || strong; 4285 has_strong_ref = node->has_strong_ref; 4286 has_weak_ref = node->has_weak_ref; 4287 4288 if (weak && !has_weak_ref) { 4289 node->has_weak_ref = 1; 4290 node->pending_weak_ref = 1; 4291 node->local_weak_refs++; 4292 } 4293 if (strong && !has_strong_ref) { 4294 node->has_strong_ref = 1; 4295 node->pending_strong_ref = 1; 4296 node->local_strong_refs++; 4297 } 4298 if (!strong && has_strong_ref) 4299 node->has_strong_ref = 0; 4300 if (!weak && has_weak_ref) 4301 node->has_weak_ref = 0; 4302 if (!weak && !strong) { 4303 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 4304 "%d:%d node %d u%016llx c%016llx deleted\n", 4305 proc->pid, thread->pid, 4306 node_debug_id, 4307 (u64)node_ptr, 4308 (u64)node_cookie); 4309 rb_erase(&node->rb_node, &proc->nodes); 4310 binder_inner_proc_unlock(proc); 4311 binder_node_lock(node); 4312 /* 4313 * Acquire the node lock before freeing the 4314 * node to serialize with other threads that 4315 * may have been holding the node lock while 4316 * decrementing this node (avoids race where 4317 * this thread frees while the other thread 4318 * is unlocking the node after the final 4319 * decrement) 4320 */ 4321 binder_node_unlock(node); 4322 binder_free_node(node); 4323 } else 4324 binder_inner_proc_unlock(proc); 4325 4326 if (weak && !has_weak_ref) 4327 ret = binder_put_node_cmd( 4328 proc, thread, &ptr, node_ptr, 4329 node_cookie, node_debug_id, 4330 BR_INCREFS, "BR_INCREFS"); 4331 if (!ret && strong && !has_strong_ref) 4332 ret = binder_put_node_cmd( 4333 proc, thread, &ptr, node_ptr, 4334 node_cookie, node_debug_id, 4335 BR_ACQUIRE, "BR_ACQUIRE"); 4336 if (!ret && !strong && has_strong_ref) 4337 ret = binder_put_node_cmd( 4338 proc, thread, &ptr, node_ptr, 4339 node_cookie, node_debug_id, 4340 BR_RELEASE, "BR_RELEASE"); 4341 if (!ret && !weak && has_weak_ref) 4342 ret = binder_put_node_cmd( 4343 proc, thread, &ptr, node_ptr, 4344 node_cookie, node_debug_id, 4345 BR_DECREFS, "BR_DECREFS"); 4346 if (orig_ptr == ptr) 4347 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 4348 "%d:%d node %d u%016llx c%016llx state unchanged\n", 4349 proc->pid, thread->pid, 4350 node_debug_id, 4351 (u64)node_ptr, 4352 (u64)node_cookie); 4353 if (ret) 4354 return ret; 4355 } break; 4356 case BINDER_WORK_DEAD_BINDER: 4357 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 4358 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 4359 struct binder_ref_death *death; 4360 uint32_t cmd; 4361 binder_uintptr_t cookie; 4362 4363 death = container_of(w, struct binder_ref_death, work); 4364 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) 4365 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; 4366 else 4367 cmd = BR_DEAD_BINDER; 4368 cookie = death->cookie; 4369 4370 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 4371 "%d:%d %s %016llx\n", 4372 proc->pid, thread->pid, 4373 cmd == BR_DEAD_BINDER ? 4374 "BR_DEAD_BINDER" : 4375 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 4376 (u64)cookie); 4377 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { 4378 binder_inner_proc_unlock(proc); 4379 kfree(death); 4380 binder_stats_deleted(BINDER_STAT_DEATH); 4381 } else { 4382 binder_enqueue_work_ilocked( 4383 w, &proc->delivered_death); 4384 binder_inner_proc_unlock(proc); 4385 } 4386 if (put_user(cmd, (uint32_t __user *)ptr)) 4387 return -EFAULT; 4388 ptr += sizeof(uint32_t); 4389 if (put_user(cookie, 4390 (binder_uintptr_t __user *)ptr)) 4391 return -EFAULT; 4392 ptr += sizeof(binder_uintptr_t); 4393 binder_stat_br(proc, thread, cmd); 4394 if (cmd == BR_DEAD_BINDER) 4395 goto done; /* DEAD_BINDER notifications can cause transactions */ 4396 } break; 4397 default: 4398 binder_inner_proc_unlock(proc); 4399 pr_err("%d:%d: bad work type %d\n", 4400 proc->pid, thread->pid, w->type); 4401 break; 4402 } 4403 4404 if (!t) 4405 continue; 4406 4407 BUG_ON(t->buffer == NULL); 4408 if (t->buffer->target_node) { 4409 struct binder_node *target_node = t->buffer->target_node; 4410 4411 trd->target.ptr = target_node->ptr; 4412 trd->cookie = target_node->cookie; 4413 t->saved_priority = task_nice(current); 4414 if (t->priority < target_node->min_priority && 4415 !(t->flags & TF_ONE_WAY)) 4416 binder_set_nice(t->priority); 4417 else if (!(t->flags & TF_ONE_WAY) || 4418 t->saved_priority > target_node->min_priority) 4419 binder_set_nice(target_node->min_priority); 4420 cmd = BR_TRANSACTION; 4421 } else { 4422 trd->target.ptr = 0; 4423 trd->cookie = 0; 4424 cmd = BR_REPLY; 4425 } 4426 trd->code = t->code; 4427 trd->flags = t->flags; 4428 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid); 4429 4430 t_from = binder_get_txn_from(t); 4431 if (t_from) { 4432 struct task_struct *sender = t_from->proc->tsk; 4433 4434 trd->sender_pid = 4435 task_tgid_nr_ns(sender, 4436 task_active_pid_ns(current)); 4437 } else { 4438 trd->sender_pid = 0; 4439 } 4440 4441 ret = binder_apply_fd_fixups(proc, t); 4442 if (ret) { 4443 struct binder_buffer *buffer = t->buffer; 4444 bool oneway = !!(t->flags & TF_ONE_WAY); 4445 int tid = t->debug_id; 4446 4447 if (t_from) 4448 binder_thread_dec_tmpref(t_from); 4449 buffer->transaction = NULL; 4450 binder_cleanup_transaction(t, "fd fixups failed", 4451 BR_FAILED_REPLY); 4452 binder_free_buf(proc, buffer); 4453 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 4454 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n", 4455 proc->pid, thread->pid, 4456 oneway ? "async " : 4457 (cmd == BR_REPLY ? "reply " : ""), 4458 tid, BR_FAILED_REPLY, ret, __LINE__); 4459 if (cmd == BR_REPLY) { 4460 cmd = BR_FAILED_REPLY; 4461 if (put_user(cmd, (uint32_t __user *)ptr)) 4462 return -EFAULT; 4463 ptr += sizeof(uint32_t); 4464 binder_stat_br(proc, thread, cmd); 4465 break; 4466 } 4467 continue; 4468 } 4469 trd->data_size = t->buffer->data_size; 4470 trd->offsets_size = t->buffer->offsets_size; 4471 trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data; 4472 trd->data.ptr.offsets = trd->data.ptr.buffer + 4473 ALIGN(t->buffer->data_size, 4474 sizeof(void *)); 4475 4476 tr.secctx = t->security_ctx; 4477 if (t->security_ctx) { 4478 cmd = BR_TRANSACTION_SEC_CTX; 4479 trsize = sizeof(tr); 4480 } 4481 if (put_user(cmd, (uint32_t __user *)ptr)) { 4482 if (t_from) 4483 binder_thread_dec_tmpref(t_from); 4484 4485 binder_cleanup_transaction(t, "put_user failed", 4486 BR_FAILED_REPLY); 4487 4488 return -EFAULT; 4489 } 4490 ptr += sizeof(uint32_t); 4491 if (copy_to_user(ptr, &tr, trsize)) { 4492 if (t_from) 4493 binder_thread_dec_tmpref(t_from); 4494 4495 binder_cleanup_transaction(t, "copy_to_user failed", 4496 BR_FAILED_REPLY); 4497 4498 return -EFAULT; 4499 } 4500 ptr += trsize; 4501 4502 trace_binder_transaction_received(t); 4503 binder_stat_br(proc, thread, cmd); 4504 binder_debug(BINDER_DEBUG_TRANSACTION, 4505 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n", 4506 proc->pid, thread->pid, 4507 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : 4508 (cmd == BR_TRANSACTION_SEC_CTX) ? 4509 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY", 4510 t->debug_id, t_from ? t_from->proc->pid : 0, 4511 t_from ? t_from->pid : 0, cmd, 4512 t->buffer->data_size, t->buffer->offsets_size, 4513 (u64)trd->data.ptr.buffer, 4514 (u64)trd->data.ptr.offsets); 4515 4516 if (t_from) 4517 binder_thread_dec_tmpref(t_from); 4518 t->buffer->allow_user_free = 1; 4519 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) { 4520 binder_inner_proc_lock(thread->proc); 4521 t->to_parent = thread->transaction_stack; 4522 t->to_thread = thread; 4523 thread->transaction_stack = t; 4524 binder_inner_proc_unlock(thread->proc); 4525 } else { 4526 binder_free_transaction(t); 4527 } 4528 break; 4529 } 4530 4531 done: 4532 4533 *consumed = ptr - buffer; 4534 binder_inner_proc_lock(proc); 4535 if (proc->requested_threads == 0 && 4536 list_empty(&thread->proc->waiting_threads) && 4537 proc->requested_threads_started < proc->max_threads && 4538 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 4539 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ 4540 /*spawn a new thread if we leave this out */) { 4541 proc->requested_threads++; 4542 binder_inner_proc_unlock(proc); 4543 binder_debug(BINDER_DEBUG_THREADS, 4544 "%d:%d BR_SPAWN_LOOPER\n", 4545 proc->pid, thread->pid); 4546 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) 4547 return -EFAULT; 4548 binder_stat_br(proc, thread, BR_SPAWN_LOOPER); 4549 } else 4550 binder_inner_proc_unlock(proc); 4551 return 0; 4552 } 4553 4554 static void binder_release_work(struct binder_proc *proc, 4555 struct list_head *list) 4556 { 4557 struct binder_work *w; 4558 4559 while (1) { 4560 w = binder_dequeue_work_head(proc, list); 4561 if (!w) 4562 return; 4563 4564 switch (w->type) { 4565 case BINDER_WORK_TRANSACTION: { 4566 struct binder_transaction *t; 4567 4568 t = container_of(w, struct binder_transaction, work); 4569 4570 binder_cleanup_transaction(t, "process died.", 4571 BR_DEAD_REPLY); 4572 } break; 4573 case BINDER_WORK_RETURN_ERROR: { 4574 struct binder_error *e = container_of( 4575 w, struct binder_error, work); 4576 4577 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4578 "undelivered TRANSACTION_ERROR: %u\n", 4579 e->cmd); 4580 } break; 4581 case BINDER_WORK_TRANSACTION_COMPLETE: { 4582 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4583 "undelivered TRANSACTION_COMPLETE\n"); 4584 kfree(w); 4585 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 4586 } break; 4587 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 4588 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 4589 struct binder_ref_death *death; 4590 4591 death = container_of(w, struct binder_ref_death, work); 4592 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4593 "undelivered death notification, %016llx\n", 4594 (u64)death->cookie); 4595 kfree(death); 4596 binder_stats_deleted(BINDER_STAT_DEATH); 4597 } break; 4598 default: 4599 pr_err("unexpected work type, %d, not freed\n", 4600 w->type); 4601 break; 4602 } 4603 } 4604 4605 } 4606 4607 static struct binder_thread *binder_get_thread_ilocked( 4608 struct binder_proc *proc, struct binder_thread *new_thread) 4609 { 4610 struct binder_thread *thread = NULL; 4611 struct rb_node *parent = NULL; 4612 struct rb_node **p = &proc->threads.rb_node; 4613 4614 while (*p) { 4615 parent = *p; 4616 thread = rb_entry(parent, struct binder_thread, rb_node); 4617 4618 if (current->pid < thread->pid) 4619 p = &(*p)->rb_left; 4620 else if (current->pid > thread->pid) 4621 p = &(*p)->rb_right; 4622 else 4623 return thread; 4624 } 4625 if (!new_thread) 4626 return NULL; 4627 thread = new_thread; 4628 binder_stats_created(BINDER_STAT_THREAD); 4629 thread->proc = proc; 4630 thread->pid = current->pid; 4631 atomic_set(&thread->tmp_ref, 0); 4632 init_waitqueue_head(&thread->wait); 4633 INIT_LIST_HEAD(&thread->todo); 4634 rb_link_node(&thread->rb_node, parent, p); 4635 rb_insert_color(&thread->rb_node, &proc->threads); 4636 thread->looper_need_return = true; 4637 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR; 4638 thread->return_error.cmd = BR_OK; 4639 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR; 4640 thread->reply_error.cmd = BR_OK; 4641 INIT_LIST_HEAD(&new_thread->waiting_thread_node); 4642 return thread; 4643 } 4644 4645 static struct binder_thread *binder_get_thread(struct binder_proc *proc) 4646 { 4647 struct binder_thread *thread; 4648 struct binder_thread *new_thread; 4649 4650 binder_inner_proc_lock(proc); 4651 thread = binder_get_thread_ilocked(proc, NULL); 4652 binder_inner_proc_unlock(proc); 4653 if (!thread) { 4654 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL); 4655 if (new_thread == NULL) 4656 return NULL; 4657 binder_inner_proc_lock(proc); 4658 thread = binder_get_thread_ilocked(proc, new_thread); 4659 binder_inner_proc_unlock(proc); 4660 if (thread != new_thread) 4661 kfree(new_thread); 4662 } 4663 return thread; 4664 } 4665 4666 static void binder_free_proc(struct binder_proc *proc) 4667 { 4668 BUG_ON(!list_empty(&proc->todo)); 4669 BUG_ON(!list_empty(&proc->delivered_death)); 4670 binder_alloc_deferred_release(&proc->alloc); 4671 put_task_struct(proc->tsk); 4672 binder_stats_deleted(BINDER_STAT_PROC); 4673 kfree(proc); 4674 } 4675 4676 static void binder_free_thread(struct binder_thread *thread) 4677 { 4678 BUG_ON(!list_empty(&thread->todo)); 4679 binder_stats_deleted(BINDER_STAT_THREAD); 4680 binder_proc_dec_tmpref(thread->proc); 4681 kfree(thread); 4682 } 4683 4684 static int binder_thread_release(struct binder_proc *proc, 4685 struct binder_thread *thread) 4686 { 4687 struct binder_transaction *t; 4688 struct binder_transaction *send_reply = NULL; 4689 int active_transactions = 0; 4690 struct binder_transaction *last_t = NULL; 4691 4692 binder_inner_proc_lock(thread->proc); 4693 /* 4694 * take a ref on the proc so it survives 4695 * after we remove this thread from proc->threads. 4696 * The corresponding dec is when we actually 4697 * free the thread in binder_free_thread() 4698 */ 4699 proc->tmp_ref++; 4700 /* 4701 * take a ref on this thread to ensure it 4702 * survives while we are releasing it 4703 */ 4704 atomic_inc(&thread->tmp_ref); 4705 rb_erase(&thread->rb_node, &proc->threads); 4706 t = thread->transaction_stack; 4707 if (t) { 4708 spin_lock(&t->lock); 4709 if (t->to_thread == thread) 4710 send_reply = t; 4711 } else { 4712 __acquire(&t->lock); 4713 } 4714 thread->is_dead = true; 4715 4716 while (t) { 4717 last_t = t; 4718 active_transactions++; 4719 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4720 "release %d:%d transaction %d %s, still active\n", 4721 proc->pid, thread->pid, 4722 t->debug_id, 4723 (t->to_thread == thread) ? "in" : "out"); 4724 4725 if (t->to_thread == thread) { 4726 t->to_proc = NULL; 4727 t->to_thread = NULL; 4728 if (t->buffer) { 4729 t->buffer->transaction = NULL; 4730 t->buffer = NULL; 4731 } 4732 t = t->to_parent; 4733 } else if (t->from == thread) { 4734 t->from = NULL; 4735 t = t->from_parent; 4736 } else 4737 BUG(); 4738 spin_unlock(&last_t->lock); 4739 if (t) 4740 spin_lock(&t->lock); 4741 else 4742 __acquire(&t->lock); 4743 } 4744 /* annotation for sparse, lock not acquired in last iteration above */ 4745 __release(&t->lock); 4746 4747 /* 4748 * If this thread used poll, make sure we remove the waitqueue 4749 * from any epoll data structures holding it with POLLFREE. 4750 * waitqueue_active() is safe to use here because we're holding 4751 * the inner lock. 4752 */ 4753 if ((thread->looper & BINDER_LOOPER_STATE_POLL) && 4754 waitqueue_active(&thread->wait)) { 4755 wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE); 4756 } 4757 4758 binder_inner_proc_unlock(thread->proc); 4759 4760 /* 4761 * This is needed to avoid races between wake_up_poll() above and 4762 * and ep_remove_waitqueue() called for other reasons (eg the epoll file 4763 * descriptor being closed); ep_remove_waitqueue() holds an RCU read 4764 * lock, so we can be sure it's done after calling synchronize_rcu(). 4765 */ 4766 if (thread->looper & BINDER_LOOPER_STATE_POLL) 4767 synchronize_rcu(); 4768 4769 if (send_reply) 4770 binder_send_failed_reply(send_reply, BR_DEAD_REPLY); 4771 binder_release_work(proc, &thread->todo); 4772 binder_thread_dec_tmpref(thread); 4773 return active_transactions; 4774 } 4775 4776 static __poll_t binder_poll(struct file *filp, 4777 struct poll_table_struct *wait) 4778 { 4779 struct binder_proc *proc = filp->private_data; 4780 struct binder_thread *thread = NULL; 4781 bool wait_for_proc_work; 4782 4783 thread = binder_get_thread(proc); 4784 if (!thread) 4785 return POLLERR; 4786 4787 binder_inner_proc_lock(thread->proc); 4788 thread->looper |= BINDER_LOOPER_STATE_POLL; 4789 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread); 4790 4791 binder_inner_proc_unlock(thread->proc); 4792 4793 poll_wait(filp, &thread->wait, wait); 4794 4795 if (binder_has_work(thread, wait_for_proc_work)) 4796 return EPOLLIN; 4797 4798 return 0; 4799 } 4800 4801 static int binder_ioctl_write_read(struct file *filp, 4802 unsigned int cmd, unsigned long arg, 4803 struct binder_thread *thread) 4804 { 4805 int ret = 0; 4806 struct binder_proc *proc = filp->private_data; 4807 unsigned int size = _IOC_SIZE(cmd); 4808 void __user *ubuf = (void __user *)arg; 4809 struct binder_write_read bwr; 4810 4811 if (size != sizeof(struct binder_write_read)) { 4812 ret = -EINVAL; 4813 goto out; 4814 } 4815 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { 4816 ret = -EFAULT; 4817 goto out; 4818 } 4819 binder_debug(BINDER_DEBUG_READ_WRITE, 4820 "%d:%d write %lld at %016llx, read %lld at %016llx\n", 4821 proc->pid, thread->pid, 4822 (u64)bwr.write_size, (u64)bwr.write_buffer, 4823 (u64)bwr.read_size, (u64)bwr.read_buffer); 4824 4825 if (bwr.write_size > 0) { 4826 ret = binder_thread_write(proc, thread, 4827 bwr.write_buffer, 4828 bwr.write_size, 4829 &bwr.write_consumed); 4830 trace_binder_write_done(ret); 4831 if (ret < 0) { 4832 bwr.read_consumed = 0; 4833 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 4834 ret = -EFAULT; 4835 goto out; 4836 } 4837 } 4838 if (bwr.read_size > 0) { 4839 ret = binder_thread_read(proc, thread, bwr.read_buffer, 4840 bwr.read_size, 4841 &bwr.read_consumed, 4842 filp->f_flags & O_NONBLOCK); 4843 trace_binder_read_done(ret); 4844 binder_inner_proc_lock(proc); 4845 if (!binder_worklist_empty_ilocked(&proc->todo)) 4846 binder_wakeup_proc_ilocked(proc); 4847 binder_inner_proc_unlock(proc); 4848 if (ret < 0) { 4849 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 4850 ret = -EFAULT; 4851 goto out; 4852 } 4853 } 4854 binder_debug(BINDER_DEBUG_READ_WRITE, 4855 "%d:%d wrote %lld of %lld, read return %lld of %lld\n", 4856 proc->pid, thread->pid, 4857 (u64)bwr.write_consumed, (u64)bwr.write_size, 4858 (u64)bwr.read_consumed, (u64)bwr.read_size); 4859 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { 4860 ret = -EFAULT; 4861 goto out; 4862 } 4863 out: 4864 return ret; 4865 } 4866 4867 static int binder_ioctl_set_ctx_mgr(struct file *filp, 4868 struct flat_binder_object *fbo) 4869 { 4870 int ret = 0; 4871 struct binder_proc *proc = filp->private_data; 4872 struct binder_context *context = proc->context; 4873 struct binder_node *new_node; 4874 kuid_t curr_euid = current_euid(); 4875 4876 mutex_lock(&context->context_mgr_node_lock); 4877 if (context->binder_context_mgr_node) { 4878 pr_err("BINDER_SET_CONTEXT_MGR already set\n"); 4879 ret = -EBUSY; 4880 goto out; 4881 } 4882 ret = security_binder_set_context_mgr(proc->tsk); 4883 if (ret < 0) 4884 goto out; 4885 if (uid_valid(context->binder_context_mgr_uid)) { 4886 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) { 4887 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n", 4888 from_kuid(&init_user_ns, curr_euid), 4889 from_kuid(&init_user_ns, 4890 context->binder_context_mgr_uid)); 4891 ret = -EPERM; 4892 goto out; 4893 } 4894 } else { 4895 context->binder_context_mgr_uid = curr_euid; 4896 } 4897 new_node = binder_new_node(proc, fbo); 4898 if (!new_node) { 4899 ret = -ENOMEM; 4900 goto out; 4901 } 4902 binder_node_lock(new_node); 4903 new_node->local_weak_refs++; 4904 new_node->local_strong_refs++; 4905 new_node->has_strong_ref = 1; 4906 new_node->has_weak_ref = 1; 4907 context->binder_context_mgr_node = new_node; 4908 binder_node_unlock(new_node); 4909 binder_put_node(new_node); 4910 out: 4911 mutex_unlock(&context->context_mgr_node_lock); 4912 return ret; 4913 } 4914 4915 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc, 4916 struct binder_node_info_for_ref *info) 4917 { 4918 struct binder_node *node; 4919 struct binder_context *context = proc->context; 4920 __u32 handle = info->handle; 4921 4922 if (info->strong_count || info->weak_count || info->reserved1 || 4923 info->reserved2 || info->reserved3) { 4924 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.", 4925 proc->pid); 4926 return -EINVAL; 4927 } 4928 4929 /* This ioctl may only be used by the context manager */ 4930 mutex_lock(&context->context_mgr_node_lock); 4931 if (!context->binder_context_mgr_node || 4932 context->binder_context_mgr_node->proc != proc) { 4933 mutex_unlock(&context->context_mgr_node_lock); 4934 return -EPERM; 4935 } 4936 mutex_unlock(&context->context_mgr_node_lock); 4937 4938 node = binder_get_node_from_ref(proc, handle, true, NULL); 4939 if (!node) 4940 return -EINVAL; 4941 4942 info->strong_count = node->local_strong_refs + 4943 node->internal_strong_refs; 4944 info->weak_count = node->local_weak_refs; 4945 4946 binder_put_node(node); 4947 4948 return 0; 4949 } 4950 4951 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc, 4952 struct binder_node_debug_info *info) 4953 { 4954 struct rb_node *n; 4955 binder_uintptr_t ptr = info->ptr; 4956 4957 memset(info, 0, sizeof(*info)); 4958 4959 binder_inner_proc_lock(proc); 4960 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { 4961 struct binder_node *node = rb_entry(n, struct binder_node, 4962 rb_node); 4963 if (node->ptr > ptr) { 4964 info->ptr = node->ptr; 4965 info->cookie = node->cookie; 4966 info->has_strong_ref = node->has_strong_ref; 4967 info->has_weak_ref = node->has_weak_ref; 4968 break; 4969 } 4970 } 4971 binder_inner_proc_unlock(proc); 4972 4973 return 0; 4974 } 4975 4976 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 4977 { 4978 int ret; 4979 struct binder_proc *proc = filp->private_data; 4980 struct binder_thread *thread; 4981 unsigned int size = _IOC_SIZE(cmd); 4982 void __user *ubuf = (void __user *)arg; 4983 4984 /*pr_info("binder_ioctl: %d:%d %x %lx\n", 4985 proc->pid, current->pid, cmd, arg);*/ 4986 4987 binder_selftest_alloc(&proc->alloc); 4988 4989 trace_binder_ioctl(cmd, arg); 4990 4991 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 4992 if (ret) 4993 goto err_unlocked; 4994 4995 thread = binder_get_thread(proc); 4996 if (thread == NULL) { 4997 ret = -ENOMEM; 4998 goto err; 4999 } 5000 5001 switch (cmd) { 5002 case BINDER_WRITE_READ: 5003 ret = binder_ioctl_write_read(filp, cmd, arg, thread); 5004 if (ret) 5005 goto err; 5006 break; 5007 case BINDER_SET_MAX_THREADS: { 5008 int max_threads; 5009 5010 if (copy_from_user(&max_threads, ubuf, 5011 sizeof(max_threads))) { 5012 ret = -EINVAL; 5013 goto err; 5014 } 5015 binder_inner_proc_lock(proc); 5016 proc->max_threads = max_threads; 5017 binder_inner_proc_unlock(proc); 5018 break; 5019 } 5020 case BINDER_SET_CONTEXT_MGR_EXT: { 5021 struct flat_binder_object fbo; 5022 5023 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) { 5024 ret = -EINVAL; 5025 goto err; 5026 } 5027 ret = binder_ioctl_set_ctx_mgr(filp, &fbo); 5028 if (ret) 5029 goto err; 5030 break; 5031 } 5032 case BINDER_SET_CONTEXT_MGR: 5033 ret = binder_ioctl_set_ctx_mgr(filp, NULL); 5034 if (ret) 5035 goto err; 5036 break; 5037 case BINDER_THREAD_EXIT: 5038 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n", 5039 proc->pid, thread->pid); 5040 binder_thread_release(proc, thread); 5041 thread = NULL; 5042 break; 5043 case BINDER_VERSION: { 5044 struct binder_version __user *ver = ubuf; 5045 5046 if (size != sizeof(struct binder_version)) { 5047 ret = -EINVAL; 5048 goto err; 5049 } 5050 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, 5051 &ver->protocol_version)) { 5052 ret = -EINVAL; 5053 goto err; 5054 } 5055 break; 5056 } 5057 case BINDER_GET_NODE_INFO_FOR_REF: { 5058 struct binder_node_info_for_ref info; 5059 5060 if (copy_from_user(&info, ubuf, sizeof(info))) { 5061 ret = -EFAULT; 5062 goto err; 5063 } 5064 5065 ret = binder_ioctl_get_node_info_for_ref(proc, &info); 5066 if (ret < 0) 5067 goto err; 5068 5069 if (copy_to_user(ubuf, &info, sizeof(info))) { 5070 ret = -EFAULT; 5071 goto err; 5072 } 5073 5074 break; 5075 } 5076 case BINDER_GET_NODE_DEBUG_INFO: { 5077 struct binder_node_debug_info info; 5078 5079 if (copy_from_user(&info, ubuf, sizeof(info))) { 5080 ret = -EFAULT; 5081 goto err; 5082 } 5083 5084 ret = binder_ioctl_get_node_debug_info(proc, &info); 5085 if (ret < 0) 5086 goto err; 5087 5088 if (copy_to_user(ubuf, &info, sizeof(info))) { 5089 ret = -EFAULT; 5090 goto err; 5091 } 5092 break; 5093 } 5094 default: 5095 ret = -EINVAL; 5096 goto err; 5097 } 5098 ret = 0; 5099 err: 5100 if (thread) 5101 thread->looper_need_return = false; 5102 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 5103 if (ret && ret != -ERESTARTSYS) 5104 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); 5105 err_unlocked: 5106 trace_binder_ioctl_done(ret); 5107 return ret; 5108 } 5109 5110 static void binder_vma_open(struct vm_area_struct *vma) 5111 { 5112 struct binder_proc *proc = vma->vm_private_data; 5113 5114 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 5115 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 5116 proc->pid, vma->vm_start, vma->vm_end, 5117 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 5118 (unsigned long)pgprot_val(vma->vm_page_prot)); 5119 } 5120 5121 static void binder_vma_close(struct vm_area_struct *vma) 5122 { 5123 struct binder_proc *proc = vma->vm_private_data; 5124 5125 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 5126 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 5127 proc->pid, vma->vm_start, vma->vm_end, 5128 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 5129 (unsigned long)pgprot_val(vma->vm_page_prot)); 5130 binder_alloc_vma_close(&proc->alloc); 5131 } 5132 5133 static vm_fault_t binder_vm_fault(struct vm_fault *vmf) 5134 { 5135 return VM_FAULT_SIGBUS; 5136 } 5137 5138 static const struct vm_operations_struct binder_vm_ops = { 5139 .open = binder_vma_open, 5140 .close = binder_vma_close, 5141 .fault = binder_vm_fault, 5142 }; 5143 5144 static int binder_mmap(struct file *filp, struct vm_area_struct *vma) 5145 { 5146 int ret; 5147 struct binder_proc *proc = filp->private_data; 5148 const char *failure_string; 5149 5150 if (proc->tsk != current->group_leader) 5151 return -EINVAL; 5152 5153 if ((vma->vm_end - vma->vm_start) > SZ_4M) 5154 vma->vm_end = vma->vm_start + SZ_4M; 5155 5156 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 5157 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", 5158 __func__, proc->pid, vma->vm_start, vma->vm_end, 5159 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 5160 (unsigned long)pgprot_val(vma->vm_page_prot)); 5161 5162 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { 5163 ret = -EPERM; 5164 failure_string = "bad vm_flags"; 5165 goto err_bad_arg; 5166 } 5167 vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP; 5168 vma->vm_flags &= ~VM_MAYWRITE; 5169 5170 vma->vm_ops = &binder_vm_ops; 5171 vma->vm_private_data = proc; 5172 5173 ret = binder_alloc_mmap_handler(&proc->alloc, vma); 5174 if (ret) 5175 return ret; 5176 return 0; 5177 5178 err_bad_arg: 5179 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__, 5180 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); 5181 return ret; 5182 } 5183 5184 static int binder_open(struct inode *nodp, struct file *filp) 5185 { 5186 struct binder_proc *proc; 5187 struct binder_device *binder_dev; 5188 5189 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__, 5190 current->group_leader->pid, current->pid); 5191 5192 proc = kzalloc(sizeof(*proc), GFP_KERNEL); 5193 if (proc == NULL) 5194 return -ENOMEM; 5195 spin_lock_init(&proc->inner_lock); 5196 spin_lock_init(&proc->outer_lock); 5197 get_task_struct(current->group_leader); 5198 proc->tsk = current->group_leader; 5199 INIT_LIST_HEAD(&proc->todo); 5200 proc->default_priority = task_nice(current); 5201 /* binderfs stashes devices in i_private */ 5202 if (is_binderfs_device(nodp)) 5203 binder_dev = nodp->i_private; 5204 else 5205 binder_dev = container_of(filp->private_data, 5206 struct binder_device, miscdev); 5207 proc->context = &binder_dev->context; 5208 binder_alloc_init(&proc->alloc); 5209 5210 binder_stats_created(BINDER_STAT_PROC); 5211 proc->pid = current->group_leader->pid; 5212 INIT_LIST_HEAD(&proc->delivered_death); 5213 INIT_LIST_HEAD(&proc->waiting_threads); 5214 filp->private_data = proc; 5215 5216 mutex_lock(&binder_procs_lock); 5217 hlist_add_head(&proc->proc_node, &binder_procs); 5218 mutex_unlock(&binder_procs_lock); 5219 5220 if (binder_debugfs_dir_entry_proc) { 5221 char strbuf[11]; 5222 5223 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); 5224 /* 5225 * proc debug entries are shared between contexts, so 5226 * this will fail if the process tries to open the driver 5227 * again with a different context. The priting code will 5228 * anyway print all contexts that a given PID has, so this 5229 * is not a problem. 5230 */ 5231 proc->debugfs_entry = debugfs_create_file(strbuf, 0444, 5232 binder_debugfs_dir_entry_proc, 5233 (void *)(unsigned long)proc->pid, 5234 &proc_fops); 5235 } 5236 5237 return 0; 5238 } 5239 5240 static int binder_flush(struct file *filp, fl_owner_t id) 5241 { 5242 struct binder_proc *proc = filp->private_data; 5243 5244 binder_defer_work(proc, BINDER_DEFERRED_FLUSH); 5245 5246 return 0; 5247 } 5248 5249 static void binder_deferred_flush(struct binder_proc *proc) 5250 { 5251 struct rb_node *n; 5252 int wake_count = 0; 5253 5254 binder_inner_proc_lock(proc); 5255 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { 5256 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); 5257 5258 thread->looper_need_return = true; 5259 if (thread->looper & BINDER_LOOPER_STATE_WAITING) { 5260 wake_up_interruptible(&thread->wait); 5261 wake_count++; 5262 } 5263 } 5264 binder_inner_proc_unlock(proc); 5265 5266 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 5267 "binder_flush: %d woke %d threads\n", proc->pid, 5268 wake_count); 5269 } 5270 5271 static int binder_release(struct inode *nodp, struct file *filp) 5272 { 5273 struct binder_proc *proc = filp->private_data; 5274 5275 debugfs_remove(proc->debugfs_entry); 5276 binder_defer_work(proc, BINDER_DEFERRED_RELEASE); 5277 5278 return 0; 5279 } 5280 5281 static int binder_node_release(struct binder_node *node, int refs) 5282 { 5283 struct binder_ref *ref; 5284 int death = 0; 5285 struct binder_proc *proc = node->proc; 5286 5287 binder_release_work(proc, &node->async_todo); 5288 5289 binder_node_lock(node); 5290 binder_inner_proc_lock(proc); 5291 binder_dequeue_work_ilocked(&node->work); 5292 /* 5293 * The caller must have taken a temporary ref on the node, 5294 */ 5295 BUG_ON(!node->tmp_refs); 5296 if (hlist_empty(&node->refs) && node->tmp_refs == 1) { 5297 binder_inner_proc_unlock(proc); 5298 binder_node_unlock(node); 5299 binder_free_node(node); 5300 5301 return refs; 5302 } 5303 5304 node->proc = NULL; 5305 node->local_strong_refs = 0; 5306 node->local_weak_refs = 0; 5307 binder_inner_proc_unlock(proc); 5308 5309 spin_lock(&binder_dead_nodes_lock); 5310 hlist_add_head(&node->dead_node, &binder_dead_nodes); 5311 spin_unlock(&binder_dead_nodes_lock); 5312 5313 hlist_for_each_entry(ref, &node->refs, node_entry) { 5314 refs++; 5315 /* 5316 * Need the node lock to synchronize 5317 * with new notification requests and the 5318 * inner lock to synchronize with queued 5319 * death notifications. 5320 */ 5321 binder_inner_proc_lock(ref->proc); 5322 if (!ref->death) { 5323 binder_inner_proc_unlock(ref->proc); 5324 continue; 5325 } 5326 5327 death++; 5328 5329 BUG_ON(!list_empty(&ref->death->work.entry)); 5330 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 5331 binder_enqueue_work_ilocked(&ref->death->work, 5332 &ref->proc->todo); 5333 binder_wakeup_proc_ilocked(ref->proc); 5334 binder_inner_proc_unlock(ref->proc); 5335 } 5336 5337 binder_debug(BINDER_DEBUG_DEAD_BINDER, 5338 "node %d now dead, refs %d, death %d\n", 5339 node->debug_id, refs, death); 5340 binder_node_unlock(node); 5341 binder_put_node(node); 5342 5343 return refs; 5344 } 5345 5346 static void binder_deferred_release(struct binder_proc *proc) 5347 { 5348 struct binder_context *context = proc->context; 5349 struct rb_node *n; 5350 int threads, nodes, incoming_refs, outgoing_refs, active_transactions; 5351 5352 mutex_lock(&binder_procs_lock); 5353 hlist_del(&proc->proc_node); 5354 mutex_unlock(&binder_procs_lock); 5355 5356 mutex_lock(&context->context_mgr_node_lock); 5357 if (context->binder_context_mgr_node && 5358 context->binder_context_mgr_node->proc == proc) { 5359 binder_debug(BINDER_DEBUG_DEAD_BINDER, 5360 "%s: %d context_mgr_node gone\n", 5361 __func__, proc->pid); 5362 context->binder_context_mgr_node = NULL; 5363 } 5364 mutex_unlock(&context->context_mgr_node_lock); 5365 binder_inner_proc_lock(proc); 5366 /* 5367 * Make sure proc stays alive after we 5368 * remove all the threads 5369 */ 5370 proc->tmp_ref++; 5371 5372 proc->is_dead = true; 5373 threads = 0; 5374 active_transactions = 0; 5375 while ((n = rb_first(&proc->threads))) { 5376 struct binder_thread *thread; 5377 5378 thread = rb_entry(n, struct binder_thread, rb_node); 5379 binder_inner_proc_unlock(proc); 5380 threads++; 5381 active_transactions += binder_thread_release(proc, thread); 5382 binder_inner_proc_lock(proc); 5383 } 5384 5385 nodes = 0; 5386 incoming_refs = 0; 5387 while ((n = rb_first(&proc->nodes))) { 5388 struct binder_node *node; 5389 5390 node = rb_entry(n, struct binder_node, rb_node); 5391 nodes++; 5392 /* 5393 * take a temporary ref on the node before 5394 * calling binder_node_release() which will either 5395 * kfree() the node or call binder_put_node() 5396 */ 5397 binder_inc_node_tmpref_ilocked(node); 5398 rb_erase(&node->rb_node, &proc->nodes); 5399 binder_inner_proc_unlock(proc); 5400 incoming_refs = binder_node_release(node, incoming_refs); 5401 binder_inner_proc_lock(proc); 5402 } 5403 binder_inner_proc_unlock(proc); 5404 5405 outgoing_refs = 0; 5406 binder_proc_lock(proc); 5407 while ((n = rb_first(&proc->refs_by_desc))) { 5408 struct binder_ref *ref; 5409 5410 ref = rb_entry(n, struct binder_ref, rb_node_desc); 5411 outgoing_refs++; 5412 binder_cleanup_ref_olocked(ref); 5413 binder_proc_unlock(proc); 5414 binder_free_ref(ref); 5415 binder_proc_lock(proc); 5416 } 5417 binder_proc_unlock(proc); 5418 5419 binder_release_work(proc, &proc->todo); 5420 binder_release_work(proc, &proc->delivered_death); 5421 5422 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 5423 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n", 5424 __func__, proc->pid, threads, nodes, incoming_refs, 5425 outgoing_refs, active_transactions); 5426 5427 binder_proc_dec_tmpref(proc); 5428 } 5429 5430 static void binder_deferred_func(struct work_struct *work) 5431 { 5432 struct binder_proc *proc; 5433 5434 int defer; 5435 5436 do { 5437 mutex_lock(&binder_deferred_lock); 5438 if (!hlist_empty(&binder_deferred_list)) { 5439 proc = hlist_entry(binder_deferred_list.first, 5440 struct binder_proc, deferred_work_node); 5441 hlist_del_init(&proc->deferred_work_node); 5442 defer = proc->deferred_work; 5443 proc->deferred_work = 0; 5444 } else { 5445 proc = NULL; 5446 defer = 0; 5447 } 5448 mutex_unlock(&binder_deferred_lock); 5449 5450 if (defer & BINDER_DEFERRED_FLUSH) 5451 binder_deferred_flush(proc); 5452 5453 if (defer & BINDER_DEFERRED_RELEASE) 5454 binder_deferred_release(proc); /* frees proc */ 5455 } while (proc); 5456 } 5457 static DECLARE_WORK(binder_deferred_work, binder_deferred_func); 5458 5459 static void 5460 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) 5461 { 5462 mutex_lock(&binder_deferred_lock); 5463 proc->deferred_work |= defer; 5464 if (hlist_unhashed(&proc->deferred_work_node)) { 5465 hlist_add_head(&proc->deferred_work_node, 5466 &binder_deferred_list); 5467 schedule_work(&binder_deferred_work); 5468 } 5469 mutex_unlock(&binder_deferred_lock); 5470 } 5471 5472 static void print_binder_transaction_ilocked(struct seq_file *m, 5473 struct binder_proc *proc, 5474 const char *prefix, 5475 struct binder_transaction *t) 5476 { 5477 struct binder_proc *to_proc; 5478 struct binder_buffer *buffer = t->buffer; 5479 5480 spin_lock(&t->lock); 5481 to_proc = t->to_proc; 5482 seq_printf(m, 5483 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d", 5484 prefix, t->debug_id, t, 5485 t->from ? t->from->proc->pid : 0, 5486 t->from ? t->from->pid : 0, 5487 to_proc ? to_proc->pid : 0, 5488 t->to_thread ? t->to_thread->pid : 0, 5489 t->code, t->flags, t->priority, t->need_reply); 5490 spin_unlock(&t->lock); 5491 5492 if (proc != to_proc) { 5493 /* 5494 * Can only safely deref buffer if we are holding the 5495 * correct proc inner lock for this node 5496 */ 5497 seq_puts(m, "\n"); 5498 return; 5499 } 5500 5501 if (buffer == NULL) { 5502 seq_puts(m, " buffer free\n"); 5503 return; 5504 } 5505 if (buffer->target_node) 5506 seq_printf(m, " node %d", buffer->target_node->debug_id); 5507 seq_printf(m, " size %zd:%zd data %pK\n", 5508 buffer->data_size, buffer->offsets_size, 5509 buffer->user_data); 5510 } 5511 5512 static void print_binder_work_ilocked(struct seq_file *m, 5513 struct binder_proc *proc, 5514 const char *prefix, 5515 const char *transaction_prefix, 5516 struct binder_work *w) 5517 { 5518 struct binder_node *node; 5519 struct binder_transaction *t; 5520 5521 switch (w->type) { 5522 case BINDER_WORK_TRANSACTION: 5523 t = container_of(w, struct binder_transaction, work); 5524 print_binder_transaction_ilocked( 5525 m, proc, transaction_prefix, t); 5526 break; 5527 case BINDER_WORK_RETURN_ERROR: { 5528 struct binder_error *e = container_of( 5529 w, struct binder_error, work); 5530 5531 seq_printf(m, "%stransaction error: %u\n", 5532 prefix, e->cmd); 5533 } break; 5534 case BINDER_WORK_TRANSACTION_COMPLETE: 5535 seq_printf(m, "%stransaction complete\n", prefix); 5536 break; 5537 case BINDER_WORK_NODE: 5538 node = container_of(w, struct binder_node, work); 5539 seq_printf(m, "%snode work %d: u%016llx c%016llx\n", 5540 prefix, node->debug_id, 5541 (u64)node->ptr, (u64)node->cookie); 5542 break; 5543 case BINDER_WORK_DEAD_BINDER: 5544 seq_printf(m, "%shas dead binder\n", prefix); 5545 break; 5546 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 5547 seq_printf(m, "%shas cleared dead binder\n", prefix); 5548 break; 5549 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: 5550 seq_printf(m, "%shas cleared death notification\n", prefix); 5551 break; 5552 default: 5553 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type); 5554 break; 5555 } 5556 } 5557 5558 static void print_binder_thread_ilocked(struct seq_file *m, 5559 struct binder_thread *thread, 5560 int print_always) 5561 { 5562 struct binder_transaction *t; 5563 struct binder_work *w; 5564 size_t start_pos = m->count; 5565 size_t header_pos; 5566 5567 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n", 5568 thread->pid, thread->looper, 5569 thread->looper_need_return, 5570 atomic_read(&thread->tmp_ref)); 5571 header_pos = m->count; 5572 t = thread->transaction_stack; 5573 while (t) { 5574 if (t->from == thread) { 5575 print_binder_transaction_ilocked(m, thread->proc, 5576 " outgoing transaction", t); 5577 t = t->from_parent; 5578 } else if (t->to_thread == thread) { 5579 print_binder_transaction_ilocked(m, thread->proc, 5580 " incoming transaction", t); 5581 t = t->to_parent; 5582 } else { 5583 print_binder_transaction_ilocked(m, thread->proc, 5584 " bad transaction", t); 5585 t = NULL; 5586 } 5587 } 5588 list_for_each_entry(w, &thread->todo, entry) { 5589 print_binder_work_ilocked(m, thread->proc, " ", 5590 " pending transaction", w); 5591 } 5592 if (!print_always && m->count == header_pos) 5593 m->count = start_pos; 5594 } 5595 5596 static void print_binder_node_nilocked(struct seq_file *m, 5597 struct binder_node *node) 5598 { 5599 struct binder_ref *ref; 5600 struct binder_work *w; 5601 int count; 5602 5603 count = 0; 5604 hlist_for_each_entry(ref, &node->refs, node_entry) 5605 count++; 5606 5607 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d", 5608 node->debug_id, (u64)node->ptr, (u64)node->cookie, 5609 node->has_strong_ref, node->has_weak_ref, 5610 node->local_strong_refs, node->local_weak_refs, 5611 node->internal_strong_refs, count, node->tmp_refs); 5612 if (count) { 5613 seq_puts(m, " proc"); 5614 hlist_for_each_entry(ref, &node->refs, node_entry) 5615 seq_printf(m, " %d", ref->proc->pid); 5616 } 5617 seq_puts(m, "\n"); 5618 if (node->proc) { 5619 list_for_each_entry(w, &node->async_todo, entry) 5620 print_binder_work_ilocked(m, node->proc, " ", 5621 " pending async transaction", w); 5622 } 5623 } 5624 5625 static void print_binder_ref_olocked(struct seq_file *m, 5626 struct binder_ref *ref) 5627 { 5628 binder_node_lock(ref->node); 5629 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n", 5630 ref->data.debug_id, ref->data.desc, 5631 ref->node->proc ? "" : "dead ", 5632 ref->node->debug_id, ref->data.strong, 5633 ref->data.weak, ref->death); 5634 binder_node_unlock(ref->node); 5635 } 5636 5637 static void print_binder_proc(struct seq_file *m, 5638 struct binder_proc *proc, int print_all) 5639 { 5640 struct binder_work *w; 5641 struct rb_node *n; 5642 size_t start_pos = m->count; 5643 size_t header_pos; 5644 struct binder_node *last_node = NULL; 5645 5646 seq_printf(m, "proc %d\n", proc->pid); 5647 seq_printf(m, "context %s\n", proc->context->name); 5648 header_pos = m->count; 5649 5650 binder_inner_proc_lock(proc); 5651 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 5652 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread, 5653 rb_node), print_all); 5654 5655 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { 5656 struct binder_node *node = rb_entry(n, struct binder_node, 5657 rb_node); 5658 if (!print_all && !node->has_async_transaction) 5659 continue; 5660 5661 /* 5662 * take a temporary reference on the node so it 5663 * survives and isn't removed from the tree 5664 * while we print it. 5665 */ 5666 binder_inc_node_tmpref_ilocked(node); 5667 /* Need to drop inner lock to take node lock */ 5668 binder_inner_proc_unlock(proc); 5669 if (last_node) 5670 binder_put_node(last_node); 5671 binder_node_inner_lock(node); 5672 print_binder_node_nilocked(m, node); 5673 binder_node_inner_unlock(node); 5674 last_node = node; 5675 binder_inner_proc_lock(proc); 5676 } 5677 binder_inner_proc_unlock(proc); 5678 if (last_node) 5679 binder_put_node(last_node); 5680 5681 if (print_all) { 5682 binder_proc_lock(proc); 5683 for (n = rb_first(&proc->refs_by_desc); 5684 n != NULL; 5685 n = rb_next(n)) 5686 print_binder_ref_olocked(m, rb_entry(n, 5687 struct binder_ref, 5688 rb_node_desc)); 5689 binder_proc_unlock(proc); 5690 } 5691 binder_alloc_print_allocated(m, &proc->alloc); 5692 binder_inner_proc_lock(proc); 5693 list_for_each_entry(w, &proc->todo, entry) 5694 print_binder_work_ilocked(m, proc, " ", 5695 " pending transaction", w); 5696 list_for_each_entry(w, &proc->delivered_death, entry) { 5697 seq_puts(m, " has delivered dead binder\n"); 5698 break; 5699 } 5700 binder_inner_proc_unlock(proc); 5701 if (!print_all && m->count == header_pos) 5702 m->count = start_pos; 5703 } 5704 5705 static const char * const binder_return_strings[] = { 5706 "BR_ERROR", 5707 "BR_OK", 5708 "BR_TRANSACTION", 5709 "BR_REPLY", 5710 "BR_ACQUIRE_RESULT", 5711 "BR_DEAD_REPLY", 5712 "BR_TRANSACTION_COMPLETE", 5713 "BR_INCREFS", 5714 "BR_ACQUIRE", 5715 "BR_RELEASE", 5716 "BR_DECREFS", 5717 "BR_ATTEMPT_ACQUIRE", 5718 "BR_NOOP", 5719 "BR_SPAWN_LOOPER", 5720 "BR_FINISHED", 5721 "BR_DEAD_BINDER", 5722 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 5723 "BR_FAILED_REPLY" 5724 }; 5725 5726 static const char * const binder_command_strings[] = { 5727 "BC_TRANSACTION", 5728 "BC_REPLY", 5729 "BC_ACQUIRE_RESULT", 5730 "BC_FREE_BUFFER", 5731 "BC_INCREFS", 5732 "BC_ACQUIRE", 5733 "BC_RELEASE", 5734 "BC_DECREFS", 5735 "BC_INCREFS_DONE", 5736 "BC_ACQUIRE_DONE", 5737 "BC_ATTEMPT_ACQUIRE", 5738 "BC_REGISTER_LOOPER", 5739 "BC_ENTER_LOOPER", 5740 "BC_EXIT_LOOPER", 5741 "BC_REQUEST_DEATH_NOTIFICATION", 5742 "BC_CLEAR_DEATH_NOTIFICATION", 5743 "BC_DEAD_BINDER_DONE", 5744 "BC_TRANSACTION_SG", 5745 "BC_REPLY_SG", 5746 }; 5747 5748 static const char * const binder_objstat_strings[] = { 5749 "proc", 5750 "thread", 5751 "node", 5752 "ref", 5753 "death", 5754 "transaction", 5755 "transaction_complete" 5756 }; 5757 5758 static void print_binder_stats(struct seq_file *m, const char *prefix, 5759 struct binder_stats *stats) 5760 { 5761 int i; 5762 5763 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != 5764 ARRAY_SIZE(binder_command_strings)); 5765 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) { 5766 int temp = atomic_read(&stats->bc[i]); 5767 5768 if (temp) 5769 seq_printf(m, "%s%s: %d\n", prefix, 5770 binder_command_strings[i], temp); 5771 } 5772 5773 BUILD_BUG_ON(ARRAY_SIZE(stats->br) != 5774 ARRAY_SIZE(binder_return_strings)); 5775 for (i = 0; i < ARRAY_SIZE(stats->br); i++) { 5776 int temp = atomic_read(&stats->br[i]); 5777 5778 if (temp) 5779 seq_printf(m, "%s%s: %d\n", prefix, 5780 binder_return_strings[i], temp); 5781 } 5782 5783 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 5784 ARRAY_SIZE(binder_objstat_strings)); 5785 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 5786 ARRAY_SIZE(stats->obj_deleted)); 5787 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { 5788 int created = atomic_read(&stats->obj_created[i]); 5789 int deleted = atomic_read(&stats->obj_deleted[i]); 5790 5791 if (created || deleted) 5792 seq_printf(m, "%s%s: active %d total %d\n", 5793 prefix, 5794 binder_objstat_strings[i], 5795 created - deleted, 5796 created); 5797 } 5798 } 5799 5800 static void print_binder_proc_stats(struct seq_file *m, 5801 struct binder_proc *proc) 5802 { 5803 struct binder_work *w; 5804 struct binder_thread *thread; 5805 struct rb_node *n; 5806 int count, strong, weak, ready_threads; 5807 size_t free_async_space = 5808 binder_alloc_get_free_async_space(&proc->alloc); 5809 5810 seq_printf(m, "proc %d\n", proc->pid); 5811 seq_printf(m, "context %s\n", proc->context->name); 5812 count = 0; 5813 ready_threads = 0; 5814 binder_inner_proc_lock(proc); 5815 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 5816 count++; 5817 5818 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node) 5819 ready_threads++; 5820 5821 seq_printf(m, " threads: %d\n", count); 5822 seq_printf(m, " requested threads: %d+%d/%d\n" 5823 " ready threads %d\n" 5824 " free async space %zd\n", proc->requested_threads, 5825 proc->requested_threads_started, proc->max_threads, 5826 ready_threads, 5827 free_async_space); 5828 count = 0; 5829 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) 5830 count++; 5831 binder_inner_proc_unlock(proc); 5832 seq_printf(m, " nodes: %d\n", count); 5833 count = 0; 5834 strong = 0; 5835 weak = 0; 5836 binder_proc_lock(proc); 5837 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 5838 struct binder_ref *ref = rb_entry(n, struct binder_ref, 5839 rb_node_desc); 5840 count++; 5841 strong += ref->data.strong; 5842 weak += ref->data.weak; 5843 } 5844 binder_proc_unlock(proc); 5845 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak); 5846 5847 count = binder_alloc_get_allocated_count(&proc->alloc); 5848 seq_printf(m, " buffers: %d\n", count); 5849 5850 binder_alloc_print_pages(m, &proc->alloc); 5851 5852 count = 0; 5853 binder_inner_proc_lock(proc); 5854 list_for_each_entry(w, &proc->todo, entry) { 5855 if (w->type == BINDER_WORK_TRANSACTION) 5856 count++; 5857 } 5858 binder_inner_proc_unlock(proc); 5859 seq_printf(m, " pending transactions: %d\n", count); 5860 5861 print_binder_stats(m, " ", &proc->stats); 5862 } 5863 5864 5865 static int state_show(struct seq_file *m, void *unused) 5866 { 5867 struct binder_proc *proc; 5868 struct binder_node *node; 5869 struct binder_node *last_node = NULL; 5870 5871 seq_puts(m, "binder state:\n"); 5872 5873 spin_lock(&binder_dead_nodes_lock); 5874 if (!hlist_empty(&binder_dead_nodes)) 5875 seq_puts(m, "dead nodes:\n"); 5876 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) { 5877 /* 5878 * take a temporary reference on the node so it 5879 * survives and isn't removed from the list 5880 * while we print it. 5881 */ 5882 node->tmp_refs++; 5883 spin_unlock(&binder_dead_nodes_lock); 5884 if (last_node) 5885 binder_put_node(last_node); 5886 binder_node_lock(node); 5887 print_binder_node_nilocked(m, node); 5888 binder_node_unlock(node); 5889 last_node = node; 5890 spin_lock(&binder_dead_nodes_lock); 5891 } 5892 spin_unlock(&binder_dead_nodes_lock); 5893 if (last_node) 5894 binder_put_node(last_node); 5895 5896 mutex_lock(&binder_procs_lock); 5897 hlist_for_each_entry(proc, &binder_procs, proc_node) 5898 print_binder_proc(m, proc, 1); 5899 mutex_unlock(&binder_procs_lock); 5900 5901 return 0; 5902 } 5903 5904 static int stats_show(struct seq_file *m, void *unused) 5905 { 5906 struct binder_proc *proc; 5907 5908 seq_puts(m, "binder stats:\n"); 5909 5910 print_binder_stats(m, "", &binder_stats); 5911 5912 mutex_lock(&binder_procs_lock); 5913 hlist_for_each_entry(proc, &binder_procs, proc_node) 5914 print_binder_proc_stats(m, proc); 5915 mutex_unlock(&binder_procs_lock); 5916 5917 return 0; 5918 } 5919 5920 static int transactions_show(struct seq_file *m, void *unused) 5921 { 5922 struct binder_proc *proc; 5923 5924 seq_puts(m, "binder transactions:\n"); 5925 mutex_lock(&binder_procs_lock); 5926 hlist_for_each_entry(proc, &binder_procs, proc_node) 5927 print_binder_proc(m, proc, 0); 5928 mutex_unlock(&binder_procs_lock); 5929 5930 return 0; 5931 } 5932 5933 static int proc_show(struct seq_file *m, void *unused) 5934 { 5935 struct binder_proc *itr; 5936 int pid = (unsigned long)m->private; 5937 5938 mutex_lock(&binder_procs_lock); 5939 hlist_for_each_entry(itr, &binder_procs, proc_node) { 5940 if (itr->pid == pid) { 5941 seq_puts(m, "binder proc state:\n"); 5942 print_binder_proc(m, itr, 1); 5943 } 5944 } 5945 mutex_unlock(&binder_procs_lock); 5946 5947 return 0; 5948 } 5949 5950 static void print_binder_transaction_log_entry(struct seq_file *m, 5951 struct binder_transaction_log_entry *e) 5952 { 5953 int debug_id = READ_ONCE(e->debug_id_done); 5954 /* 5955 * read barrier to guarantee debug_id_done read before 5956 * we print the log values 5957 */ 5958 smp_rmb(); 5959 seq_printf(m, 5960 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d", 5961 e->debug_id, (e->call_type == 2) ? "reply" : 5962 ((e->call_type == 1) ? "async" : "call "), e->from_proc, 5963 e->from_thread, e->to_proc, e->to_thread, e->context_name, 5964 e->to_node, e->target_handle, e->data_size, e->offsets_size, 5965 e->return_error, e->return_error_param, 5966 e->return_error_line); 5967 /* 5968 * read-barrier to guarantee read of debug_id_done after 5969 * done printing the fields of the entry 5970 */ 5971 smp_rmb(); 5972 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ? 5973 "\n" : " (incomplete)\n"); 5974 } 5975 5976 static int transaction_log_show(struct seq_file *m, void *unused) 5977 { 5978 struct binder_transaction_log *log = m->private; 5979 unsigned int log_cur = atomic_read(&log->cur); 5980 unsigned int count; 5981 unsigned int cur; 5982 int i; 5983 5984 count = log_cur + 1; 5985 cur = count < ARRAY_SIZE(log->entry) && !log->full ? 5986 0 : count % ARRAY_SIZE(log->entry); 5987 if (count > ARRAY_SIZE(log->entry) || log->full) 5988 count = ARRAY_SIZE(log->entry); 5989 for (i = 0; i < count; i++) { 5990 unsigned int index = cur++ % ARRAY_SIZE(log->entry); 5991 5992 print_binder_transaction_log_entry(m, &log->entry[index]); 5993 } 5994 return 0; 5995 } 5996 5997 const struct file_operations binder_fops = { 5998 .owner = THIS_MODULE, 5999 .poll = binder_poll, 6000 .unlocked_ioctl = binder_ioctl, 6001 .compat_ioctl = binder_ioctl, 6002 .mmap = binder_mmap, 6003 .open = binder_open, 6004 .flush = binder_flush, 6005 .release = binder_release, 6006 }; 6007 6008 DEFINE_SHOW_ATTRIBUTE(state); 6009 DEFINE_SHOW_ATTRIBUTE(stats); 6010 DEFINE_SHOW_ATTRIBUTE(transactions); 6011 DEFINE_SHOW_ATTRIBUTE(transaction_log); 6012 6013 static int __init init_binder_device(const char *name) 6014 { 6015 int ret; 6016 struct binder_device *binder_device; 6017 6018 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL); 6019 if (!binder_device) 6020 return -ENOMEM; 6021 6022 binder_device->miscdev.fops = &binder_fops; 6023 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR; 6024 binder_device->miscdev.name = name; 6025 6026 binder_device->context.binder_context_mgr_uid = INVALID_UID; 6027 binder_device->context.name = name; 6028 mutex_init(&binder_device->context.context_mgr_node_lock); 6029 6030 ret = misc_register(&binder_device->miscdev); 6031 if (ret < 0) { 6032 kfree(binder_device); 6033 return ret; 6034 } 6035 6036 hlist_add_head(&binder_device->hlist, &binder_devices); 6037 6038 return ret; 6039 } 6040 6041 static int __init binder_init(void) 6042 { 6043 int ret; 6044 char *device_name, *device_tmp; 6045 struct binder_device *device; 6046 struct hlist_node *tmp; 6047 char *device_names = NULL; 6048 6049 ret = binder_alloc_shrinker_init(); 6050 if (ret) 6051 return ret; 6052 6053 atomic_set(&binder_transaction_log.cur, ~0U); 6054 atomic_set(&binder_transaction_log_failed.cur, ~0U); 6055 6056 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); 6057 if (binder_debugfs_dir_entry_root) 6058 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", 6059 binder_debugfs_dir_entry_root); 6060 6061 if (binder_debugfs_dir_entry_root) { 6062 debugfs_create_file("state", 6063 0444, 6064 binder_debugfs_dir_entry_root, 6065 NULL, 6066 &state_fops); 6067 debugfs_create_file("stats", 6068 0444, 6069 binder_debugfs_dir_entry_root, 6070 NULL, 6071 &stats_fops); 6072 debugfs_create_file("transactions", 6073 0444, 6074 binder_debugfs_dir_entry_root, 6075 NULL, 6076 &transactions_fops); 6077 debugfs_create_file("transaction_log", 6078 0444, 6079 binder_debugfs_dir_entry_root, 6080 &binder_transaction_log, 6081 &transaction_log_fops); 6082 debugfs_create_file("failed_transaction_log", 6083 0444, 6084 binder_debugfs_dir_entry_root, 6085 &binder_transaction_log_failed, 6086 &transaction_log_fops); 6087 } 6088 6089 if (strcmp(binder_devices_param, "") != 0) { 6090 /* 6091 * Copy the module_parameter string, because we don't want to 6092 * tokenize it in-place. 6093 */ 6094 device_names = kstrdup(binder_devices_param, GFP_KERNEL); 6095 if (!device_names) { 6096 ret = -ENOMEM; 6097 goto err_alloc_device_names_failed; 6098 } 6099 6100 device_tmp = device_names; 6101 while ((device_name = strsep(&device_tmp, ","))) { 6102 ret = init_binder_device(device_name); 6103 if (ret) 6104 goto err_init_binder_device_failed; 6105 } 6106 } 6107 6108 ret = init_binderfs(); 6109 if (ret) 6110 goto err_init_binder_device_failed; 6111 6112 return ret; 6113 6114 err_init_binder_device_failed: 6115 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) { 6116 misc_deregister(&device->miscdev); 6117 hlist_del(&device->hlist); 6118 kfree(device); 6119 } 6120 6121 kfree(device_names); 6122 6123 err_alloc_device_names_failed: 6124 debugfs_remove_recursive(binder_debugfs_dir_entry_root); 6125 6126 return ret; 6127 } 6128 6129 device_initcall(binder_init); 6130 6131 #define CREATE_TRACE_POINTS 6132 #include "binder_trace.h" 6133 6134 MODULE_LICENSE("GPL v2"); 6135