1 /* binder.c 2 * 3 * Android IPC Subsystem 4 * 5 * Copyright (C) 2007-2008 Google, Inc. 6 * 7 * This software is licensed under the terms of the GNU General Public 8 * License version 2, as published by the Free Software Foundation, and 9 * may be copied, distributed, and modified under those terms. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 */ 17 18 /* 19 * Locking overview 20 * 21 * There are 3 main spinlocks which must be acquired in the 22 * order shown: 23 * 24 * 1) proc->outer_lock : protects binder_ref 25 * binder_proc_lock() and binder_proc_unlock() are 26 * used to acq/rel. 27 * 2) node->lock : protects most fields of binder_node. 28 * binder_node_lock() and binder_node_unlock() are 29 * used to acq/rel 30 * 3) proc->inner_lock : protects the thread and node lists 31 * (proc->threads, proc->waiting_threads, proc->nodes) 32 * and all todo lists associated with the binder_proc 33 * (proc->todo, thread->todo, proc->delivered_death and 34 * node->async_todo), as well as thread->transaction_stack 35 * binder_inner_proc_lock() and binder_inner_proc_unlock() 36 * are used to acq/rel 37 * 38 * Any lock under procA must never be nested under any lock at the same 39 * level or below on procB. 40 * 41 * Functions that require a lock held on entry indicate which lock 42 * in the suffix of the function name: 43 * 44 * foo_olocked() : requires node->outer_lock 45 * foo_nlocked() : requires node->lock 46 * foo_ilocked() : requires proc->inner_lock 47 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock 48 * foo_nilocked(): requires node->lock and proc->inner_lock 49 * ... 50 */ 51 52 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 53 54 #include <linux/fdtable.h> 55 #include <linux/file.h> 56 #include <linux/freezer.h> 57 #include <linux/fs.h> 58 #include <linux/list.h> 59 #include <linux/miscdevice.h> 60 #include <linux/module.h> 61 #include <linux/mutex.h> 62 #include <linux/nsproxy.h> 63 #include <linux/poll.h> 64 #include <linux/debugfs.h> 65 #include <linux/rbtree.h> 66 #include <linux/sched/signal.h> 67 #include <linux/sched/mm.h> 68 #include <linux/seq_file.h> 69 #include <linux/uaccess.h> 70 #include <linux/pid_namespace.h> 71 #include <linux/security.h> 72 #include <linux/spinlock.h> 73 #include <linux/ratelimit.h> 74 #include <linux/syscalls.h> 75 #include <linux/task_work.h> 76 77 #include <uapi/linux/android/binder.h> 78 79 #include <asm/cacheflush.h> 80 81 #include "binder_alloc.h" 82 #include "binder_internal.h" 83 #include "binder_trace.h" 84 85 static HLIST_HEAD(binder_deferred_list); 86 static DEFINE_MUTEX(binder_deferred_lock); 87 88 static HLIST_HEAD(binder_devices); 89 static HLIST_HEAD(binder_procs); 90 static DEFINE_MUTEX(binder_procs_lock); 91 92 static HLIST_HEAD(binder_dead_nodes); 93 static DEFINE_SPINLOCK(binder_dead_nodes_lock); 94 95 static struct dentry *binder_debugfs_dir_entry_root; 96 static struct dentry *binder_debugfs_dir_entry_proc; 97 static atomic_t binder_last_id; 98 99 static int proc_show(struct seq_file *m, void *unused); 100 DEFINE_SHOW_ATTRIBUTE(proc); 101 102 /* This is only defined in include/asm-arm/sizes.h */ 103 #ifndef SZ_1K 104 #define SZ_1K 0x400 105 #endif 106 107 #ifndef SZ_4M 108 #define SZ_4M 0x400000 109 #endif 110 111 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE) 112 113 enum { 114 BINDER_DEBUG_USER_ERROR = 1U << 0, 115 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1, 116 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2, 117 BINDER_DEBUG_OPEN_CLOSE = 1U << 3, 118 BINDER_DEBUG_DEAD_BINDER = 1U << 4, 119 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5, 120 BINDER_DEBUG_READ_WRITE = 1U << 6, 121 BINDER_DEBUG_USER_REFS = 1U << 7, 122 BINDER_DEBUG_THREADS = 1U << 8, 123 BINDER_DEBUG_TRANSACTION = 1U << 9, 124 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10, 125 BINDER_DEBUG_FREE_BUFFER = 1U << 11, 126 BINDER_DEBUG_INTERNAL_REFS = 1U << 12, 127 BINDER_DEBUG_PRIORITY_CAP = 1U << 13, 128 BINDER_DEBUG_SPINLOCKS = 1U << 14, 129 }; 130 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | 131 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; 132 module_param_named(debug_mask, binder_debug_mask, uint, 0644); 133 134 static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES; 135 module_param_named(devices, binder_devices_param, charp, 0444); 136 137 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); 138 static int binder_stop_on_user_error; 139 140 static int binder_set_stop_on_user_error(const char *val, 141 const struct kernel_param *kp) 142 { 143 int ret; 144 145 ret = param_set_int(val, kp); 146 if (binder_stop_on_user_error < 2) 147 wake_up(&binder_user_error_wait); 148 return ret; 149 } 150 module_param_call(stop_on_user_error, binder_set_stop_on_user_error, 151 param_get_int, &binder_stop_on_user_error, 0644); 152 153 #define binder_debug(mask, x...) \ 154 do { \ 155 if (binder_debug_mask & mask) \ 156 pr_info_ratelimited(x); \ 157 } while (0) 158 159 #define binder_user_error(x...) \ 160 do { \ 161 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \ 162 pr_info_ratelimited(x); \ 163 if (binder_stop_on_user_error) \ 164 binder_stop_on_user_error = 2; \ 165 } while (0) 166 167 #define to_flat_binder_object(hdr) \ 168 container_of(hdr, struct flat_binder_object, hdr) 169 170 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr) 171 172 #define to_binder_buffer_object(hdr) \ 173 container_of(hdr, struct binder_buffer_object, hdr) 174 175 #define to_binder_fd_array_object(hdr) \ 176 container_of(hdr, struct binder_fd_array_object, hdr) 177 178 enum binder_stat_types { 179 BINDER_STAT_PROC, 180 BINDER_STAT_THREAD, 181 BINDER_STAT_NODE, 182 BINDER_STAT_REF, 183 BINDER_STAT_DEATH, 184 BINDER_STAT_TRANSACTION, 185 BINDER_STAT_TRANSACTION_COMPLETE, 186 BINDER_STAT_COUNT 187 }; 188 189 struct binder_stats { 190 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1]; 191 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1]; 192 atomic_t obj_created[BINDER_STAT_COUNT]; 193 atomic_t obj_deleted[BINDER_STAT_COUNT]; 194 }; 195 196 static struct binder_stats binder_stats; 197 198 static inline void binder_stats_deleted(enum binder_stat_types type) 199 { 200 atomic_inc(&binder_stats.obj_deleted[type]); 201 } 202 203 static inline void binder_stats_created(enum binder_stat_types type) 204 { 205 atomic_inc(&binder_stats.obj_created[type]); 206 } 207 208 struct binder_transaction_log_entry { 209 int debug_id; 210 int debug_id_done; 211 int call_type; 212 int from_proc; 213 int from_thread; 214 int target_handle; 215 int to_proc; 216 int to_thread; 217 int to_node; 218 int data_size; 219 int offsets_size; 220 int return_error_line; 221 uint32_t return_error; 222 uint32_t return_error_param; 223 const char *context_name; 224 }; 225 struct binder_transaction_log { 226 atomic_t cur; 227 bool full; 228 struct binder_transaction_log_entry entry[32]; 229 }; 230 static struct binder_transaction_log binder_transaction_log; 231 static struct binder_transaction_log binder_transaction_log_failed; 232 233 static struct binder_transaction_log_entry *binder_transaction_log_add( 234 struct binder_transaction_log *log) 235 { 236 struct binder_transaction_log_entry *e; 237 unsigned int cur = atomic_inc_return(&log->cur); 238 239 if (cur >= ARRAY_SIZE(log->entry)) 240 log->full = true; 241 e = &log->entry[cur % ARRAY_SIZE(log->entry)]; 242 WRITE_ONCE(e->debug_id_done, 0); 243 /* 244 * write-barrier to synchronize access to e->debug_id_done. 245 * We make sure the initialized 0 value is seen before 246 * memset() other fields are zeroed by memset. 247 */ 248 smp_wmb(); 249 memset(e, 0, sizeof(*e)); 250 return e; 251 } 252 253 /** 254 * struct binder_work - work enqueued on a worklist 255 * @entry: node enqueued on list 256 * @type: type of work to be performed 257 * 258 * There are separate work lists for proc, thread, and node (async). 259 */ 260 struct binder_work { 261 struct list_head entry; 262 263 enum { 264 BINDER_WORK_TRANSACTION = 1, 265 BINDER_WORK_TRANSACTION_COMPLETE, 266 BINDER_WORK_RETURN_ERROR, 267 BINDER_WORK_NODE, 268 BINDER_WORK_DEAD_BINDER, 269 BINDER_WORK_DEAD_BINDER_AND_CLEAR, 270 BINDER_WORK_CLEAR_DEATH_NOTIFICATION, 271 } type; 272 }; 273 274 struct binder_error { 275 struct binder_work work; 276 uint32_t cmd; 277 }; 278 279 /** 280 * struct binder_node - binder node bookkeeping 281 * @debug_id: unique ID for debugging 282 * (invariant after initialized) 283 * @lock: lock for node fields 284 * @work: worklist element for node work 285 * (protected by @proc->inner_lock) 286 * @rb_node: element for proc->nodes tree 287 * (protected by @proc->inner_lock) 288 * @dead_node: element for binder_dead_nodes list 289 * (protected by binder_dead_nodes_lock) 290 * @proc: binder_proc that owns this node 291 * (invariant after initialized) 292 * @refs: list of references on this node 293 * (protected by @lock) 294 * @internal_strong_refs: used to take strong references when 295 * initiating a transaction 296 * (protected by @proc->inner_lock if @proc 297 * and by @lock) 298 * @local_weak_refs: weak user refs from local process 299 * (protected by @proc->inner_lock if @proc 300 * and by @lock) 301 * @local_strong_refs: strong user refs from local process 302 * (protected by @proc->inner_lock if @proc 303 * and by @lock) 304 * @tmp_refs: temporary kernel refs 305 * (protected by @proc->inner_lock while @proc 306 * is valid, and by binder_dead_nodes_lock 307 * if @proc is NULL. During inc/dec and node release 308 * it is also protected by @lock to provide safety 309 * as the node dies and @proc becomes NULL) 310 * @ptr: userspace pointer for node 311 * (invariant, no lock needed) 312 * @cookie: userspace cookie for node 313 * (invariant, no lock needed) 314 * @has_strong_ref: userspace notified of strong ref 315 * (protected by @proc->inner_lock if @proc 316 * and by @lock) 317 * @pending_strong_ref: userspace has acked notification of strong ref 318 * (protected by @proc->inner_lock if @proc 319 * and by @lock) 320 * @has_weak_ref: userspace notified of weak ref 321 * (protected by @proc->inner_lock if @proc 322 * and by @lock) 323 * @pending_weak_ref: userspace has acked notification of weak ref 324 * (protected by @proc->inner_lock if @proc 325 * and by @lock) 326 * @has_async_transaction: async transaction to node in progress 327 * (protected by @lock) 328 * @accept_fds: file descriptor operations supported for node 329 * (invariant after initialized) 330 * @min_priority: minimum scheduling priority 331 * (invariant after initialized) 332 * @txn_security_ctx: require sender's security context 333 * (invariant after initialized) 334 * @async_todo: list of async work items 335 * (protected by @proc->inner_lock) 336 * 337 * Bookkeeping structure for binder nodes. 338 */ 339 struct binder_node { 340 int debug_id; 341 spinlock_t lock; 342 struct binder_work work; 343 union { 344 struct rb_node rb_node; 345 struct hlist_node dead_node; 346 }; 347 struct binder_proc *proc; 348 struct hlist_head refs; 349 int internal_strong_refs; 350 int local_weak_refs; 351 int local_strong_refs; 352 int tmp_refs; 353 binder_uintptr_t ptr; 354 binder_uintptr_t cookie; 355 struct { 356 /* 357 * bitfield elements protected by 358 * proc inner_lock 359 */ 360 u8 has_strong_ref:1; 361 u8 pending_strong_ref:1; 362 u8 has_weak_ref:1; 363 u8 pending_weak_ref:1; 364 }; 365 struct { 366 /* 367 * invariant after initialization 368 */ 369 u8 accept_fds:1; 370 u8 txn_security_ctx:1; 371 u8 min_priority; 372 }; 373 bool has_async_transaction; 374 struct list_head async_todo; 375 }; 376 377 struct binder_ref_death { 378 /** 379 * @work: worklist element for death notifications 380 * (protected by inner_lock of the proc that 381 * this ref belongs to) 382 */ 383 struct binder_work work; 384 binder_uintptr_t cookie; 385 }; 386 387 /** 388 * struct binder_ref_data - binder_ref counts and id 389 * @debug_id: unique ID for the ref 390 * @desc: unique userspace handle for ref 391 * @strong: strong ref count (debugging only if not locked) 392 * @weak: weak ref count (debugging only if not locked) 393 * 394 * Structure to hold ref count and ref id information. Since 395 * the actual ref can only be accessed with a lock, this structure 396 * is used to return information about the ref to callers of 397 * ref inc/dec functions. 398 */ 399 struct binder_ref_data { 400 int debug_id; 401 uint32_t desc; 402 int strong; 403 int weak; 404 }; 405 406 /** 407 * struct binder_ref - struct to track references on nodes 408 * @data: binder_ref_data containing id, handle, and current refcounts 409 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree 410 * @rb_node_node: node for lookup by @node in proc's rb_tree 411 * @node_entry: list entry for node->refs list in target node 412 * (protected by @node->lock) 413 * @proc: binder_proc containing ref 414 * @node: binder_node of target node. When cleaning up a 415 * ref for deletion in binder_cleanup_ref, a non-NULL 416 * @node indicates the node must be freed 417 * @death: pointer to death notification (ref_death) if requested 418 * (protected by @node->lock) 419 * 420 * Structure to track references from procA to target node (on procB). This 421 * structure is unsafe to access without holding @proc->outer_lock. 422 */ 423 struct binder_ref { 424 /* Lookups needed: */ 425 /* node + proc => ref (transaction) */ 426 /* desc + proc => ref (transaction, inc/dec ref) */ 427 /* node => refs + procs (proc exit) */ 428 struct binder_ref_data data; 429 struct rb_node rb_node_desc; 430 struct rb_node rb_node_node; 431 struct hlist_node node_entry; 432 struct binder_proc *proc; 433 struct binder_node *node; 434 struct binder_ref_death *death; 435 }; 436 437 enum binder_deferred_state { 438 BINDER_DEFERRED_FLUSH = 0x01, 439 BINDER_DEFERRED_RELEASE = 0x02, 440 }; 441 442 /** 443 * struct binder_proc - binder process bookkeeping 444 * @proc_node: element for binder_procs list 445 * @threads: rbtree of binder_threads in this proc 446 * (protected by @inner_lock) 447 * @nodes: rbtree of binder nodes associated with 448 * this proc ordered by node->ptr 449 * (protected by @inner_lock) 450 * @refs_by_desc: rbtree of refs ordered by ref->desc 451 * (protected by @outer_lock) 452 * @refs_by_node: rbtree of refs ordered by ref->node 453 * (protected by @outer_lock) 454 * @waiting_threads: threads currently waiting for proc work 455 * (protected by @inner_lock) 456 * @pid PID of group_leader of process 457 * (invariant after initialized) 458 * @tsk task_struct for group_leader of process 459 * (invariant after initialized) 460 * @deferred_work_node: element for binder_deferred_list 461 * (protected by binder_deferred_lock) 462 * @deferred_work: bitmap of deferred work to perform 463 * (protected by binder_deferred_lock) 464 * @is_dead: process is dead and awaiting free 465 * when outstanding transactions are cleaned up 466 * (protected by @inner_lock) 467 * @todo: list of work for this process 468 * (protected by @inner_lock) 469 * @stats: per-process binder statistics 470 * (atomics, no lock needed) 471 * @delivered_death: list of delivered death notification 472 * (protected by @inner_lock) 473 * @max_threads: cap on number of binder threads 474 * (protected by @inner_lock) 475 * @requested_threads: number of binder threads requested but not 476 * yet started. In current implementation, can 477 * only be 0 or 1. 478 * (protected by @inner_lock) 479 * @requested_threads_started: number binder threads started 480 * (protected by @inner_lock) 481 * @tmp_ref: temporary reference to indicate proc is in use 482 * (protected by @inner_lock) 483 * @default_priority: default scheduler priority 484 * (invariant after initialized) 485 * @debugfs_entry: debugfs node 486 * @alloc: binder allocator bookkeeping 487 * @context: binder_context for this proc 488 * (invariant after initialized) 489 * @inner_lock: can nest under outer_lock and/or node lock 490 * @outer_lock: no nesting under innor or node lock 491 * Lock order: 1) outer, 2) node, 3) inner 492 * 493 * Bookkeeping structure for binder processes 494 */ 495 struct binder_proc { 496 struct hlist_node proc_node; 497 struct rb_root threads; 498 struct rb_root nodes; 499 struct rb_root refs_by_desc; 500 struct rb_root refs_by_node; 501 struct list_head waiting_threads; 502 int pid; 503 struct task_struct *tsk; 504 struct hlist_node deferred_work_node; 505 int deferred_work; 506 bool is_dead; 507 508 struct list_head todo; 509 struct binder_stats stats; 510 struct list_head delivered_death; 511 int max_threads; 512 int requested_threads; 513 int requested_threads_started; 514 int tmp_ref; 515 long default_priority; 516 struct dentry *debugfs_entry; 517 struct binder_alloc alloc; 518 struct binder_context *context; 519 spinlock_t inner_lock; 520 spinlock_t outer_lock; 521 }; 522 523 enum { 524 BINDER_LOOPER_STATE_REGISTERED = 0x01, 525 BINDER_LOOPER_STATE_ENTERED = 0x02, 526 BINDER_LOOPER_STATE_EXITED = 0x04, 527 BINDER_LOOPER_STATE_INVALID = 0x08, 528 BINDER_LOOPER_STATE_WAITING = 0x10, 529 BINDER_LOOPER_STATE_POLL = 0x20, 530 }; 531 532 /** 533 * struct binder_thread - binder thread bookkeeping 534 * @proc: binder process for this thread 535 * (invariant after initialization) 536 * @rb_node: element for proc->threads rbtree 537 * (protected by @proc->inner_lock) 538 * @waiting_thread_node: element for @proc->waiting_threads list 539 * (protected by @proc->inner_lock) 540 * @pid: PID for this thread 541 * (invariant after initialization) 542 * @looper: bitmap of looping state 543 * (only accessed by this thread) 544 * @looper_needs_return: looping thread needs to exit driver 545 * (no lock needed) 546 * @transaction_stack: stack of in-progress transactions for this thread 547 * (protected by @proc->inner_lock) 548 * @todo: list of work to do for this thread 549 * (protected by @proc->inner_lock) 550 * @process_todo: whether work in @todo should be processed 551 * (protected by @proc->inner_lock) 552 * @return_error: transaction errors reported by this thread 553 * (only accessed by this thread) 554 * @reply_error: transaction errors reported by target thread 555 * (protected by @proc->inner_lock) 556 * @wait: wait queue for thread work 557 * @stats: per-thread statistics 558 * (atomics, no lock needed) 559 * @tmp_ref: temporary reference to indicate thread is in use 560 * (atomic since @proc->inner_lock cannot 561 * always be acquired) 562 * @is_dead: thread is dead and awaiting free 563 * when outstanding transactions are cleaned up 564 * (protected by @proc->inner_lock) 565 * 566 * Bookkeeping structure for binder threads. 567 */ 568 struct binder_thread { 569 struct binder_proc *proc; 570 struct rb_node rb_node; 571 struct list_head waiting_thread_node; 572 int pid; 573 int looper; /* only modified by this thread */ 574 bool looper_need_return; /* can be written by other thread */ 575 struct binder_transaction *transaction_stack; 576 struct list_head todo; 577 bool process_todo; 578 struct binder_error return_error; 579 struct binder_error reply_error; 580 wait_queue_head_t wait; 581 struct binder_stats stats; 582 atomic_t tmp_ref; 583 bool is_dead; 584 }; 585 586 /** 587 * struct binder_txn_fd_fixup - transaction fd fixup list element 588 * @fixup_entry: list entry 589 * @file: struct file to be associated with new fd 590 * @offset: offset in buffer data to this fixup 591 * 592 * List element for fd fixups in a transaction. Since file 593 * descriptors need to be allocated in the context of the 594 * target process, we pass each fd to be processed in this 595 * struct. 596 */ 597 struct binder_txn_fd_fixup { 598 struct list_head fixup_entry; 599 struct file *file; 600 size_t offset; 601 }; 602 603 struct binder_transaction { 604 int debug_id; 605 struct binder_work work; 606 struct binder_thread *from; 607 struct binder_transaction *from_parent; 608 struct binder_proc *to_proc; 609 struct binder_thread *to_thread; 610 struct binder_transaction *to_parent; 611 unsigned need_reply:1; 612 /* unsigned is_dead:1; */ /* not used at the moment */ 613 614 struct binder_buffer *buffer; 615 unsigned int code; 616 unsigned int flags; 617 long priority; 618 long saved_priority; 619 kuid_t sender_euid; 620 struct list_head fd_fixups; 621 binder_uintptr_t security_ctx; 622 /** 623 * @lock: protects @from, @to_proc, and @to_thread 624 * 625 * @from, @to_proc, and @to_thread can be set to NULL 626 * during thread teardown 627 */ 628 spinlock_t lock; 629 }; 630 631 /** 632 * struct binder_object - union of flat binder object types 633 * @hdr: generic object header 634 * @fbo: binder object (nodes and refs) 635 * @fdo: file descriptor object 636 * @bbo: binder buffer pointer 637 * @fdao: file descriptor array 638 * 639 * Used for type-independent object copies 640 */ 641 struct binder_object { 642 union { 643 struct binder_object_header hdr; 644 struct flat_binder_object fbo; 645 struct binder_fd_object fdo; 646 struct binder_buffer_object bbo; 647 struct binder_fd_array_object fdao; 648 }; 649 }; 650 651 /** 652 * binder_proc_lock() - Acquire outer lock for given binder_proc 653 * @proc: struct binder_proc to acquire 654 * 655 * Acquires proc->outer_lock. Used to protect binder_ref 656 * structures associated with the given proc. 657 */ 658 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__) 659 static void 660 _binder_proc_lock(struct binder_proc *proc, int line) 661 __acquires(&proc->outer_lock) 662 { 663 binder_debug(BINDER_DEBUG_SPINLOCKS, 664 "%s: line=%d\n", __func__, line); 665 spin_lock(&proc->outer_lock); 666 } 667 668 /** 669 * binder_proc_unlock() - Release spinlock for given binder_proc 670 * @proc: struct binder_proc to acquire 671 * 672 * Release lock acquired via binder_proc_lock() 673 */ 674 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__) 675 static void 676 _binder_proc_unlock(struct binder_proc *proc, int line) 677 __releases(&proc->outer_lock) 678 { 679 binder_debug(BINDER_DEBUG_SPINLOCKS, 680 "%s: line=%d\n", __func__, line); 681 spin_unlock(&proc->outer_lock); 682 } 683 684 /** 685 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc 686 * @proc: struct binder_proc to acquire 687 * 688 * Acquires proc->inner_lock. Used to protect todo lists 689 */ 690 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__) 691 static void 692 _binder_inner_proc_lock(struct binder_proc *proc, int line) 693 __acquires(&proc->inner_lock) 694 { 695 binder_debug(BINDER_DEBUG_SPINLOCKS, 696 "%s: line=%d\n", __func__, line); 697 spin_lock(&proc->inner_lock); 698 } 699 700 /** 701 * binder_inner_proc_unlock() - Release inner lock for given binder_proc 702 * @proc: struct binder_proc to acquire 703 * 704 * Release lock acquired via binder_inner_proc_lock() 705 */ 706 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__) 707 static void 708 _binder_inner_proc_unlock(struct binder_proc *proc, int line) 709 __releases(&proc->inner_lock) 710 { 711 binder_debug(BINDER_DEBUG_SPINLOCKS, 712 "%s: line=%d\n", __func__, line); 713 spin_unlock(&proc->inner_lock); 714 } 715 716 /** 717 * binder_node_lock() - Acquire spinlock for given binder_node 718 * @node: struct binder_node to acquire 719 * 720 * Acquires node->lock. Used to protect binder_node fields 721 */ 722 #define binder_node_lock(node) _binder_node_lock(node, __LINE__) 723 static void 724 _binder_node_lock(struct binder_node *node, int line) 725 __acquires(&node->lock) 726 { 727 binder_debug(BINDER_DEBUG_SPINLOCKS, 728 "%s: line=%d\n", __func__, line); 729 spin_lock(&node->lock); 730 } 731 732 /** 733 * binder_node_unlock() - Release spinlock for given binder_proc 734 * @node: struct binder_node to acquire 735 * 736 * Release lock acquired via binder_node_lock() 737 */ 738 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__) 739 static void 740 _binder_node_unlock(struct binder_node *node, int line) 741 __releases(&node->lock) 742 { 743 binder_debug(BINDER_DEBUG_SPINLOCKS, 744 "%s: line=%d\n", __func__, line); 745 spin_unlock(&node->lock); 746 } 747 748 /** 749 * binder_node_inner_lock() - Acquire node and inner locks 750 * @node: struct binder_node to acquire 751 * 752 * Acquires node->lock. If node->proc also acquires 753 * proc->inner_lock. Used to protect binder_node fields 754 */ 755 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__) 756 static void 757 _binder_node_inner_lock(struct binder_node *node, int line) 758 __acquires(&node->lock) __acquires(&node->proc->inner_lock) 759 { 760 binder_debug(BINDER_DEBUG_SPINLOCKS, 761 "%s: line=%d\n", __func__, line); 762 spin_lock(&node->lock); 763 if (node->proc) 764 binder_inner_proc_lock(node->proc); 765 else 766 /* annotation for sparse */ 767 __acquire(&node->proc->inner_lock); 768 } 769 770 /** 771 * binder_node_unlock() - Release node and inner locks 772 * @node: struct binder_node to acquire 773 * 774 * Release lock acquired via binder_node_lock() 775 */ 776 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__) 777 static void 778 _binder_node_inner_unlock(struct binder_node *node, int line) 779 __releases(&node->lock) __releases(&node->proc->inner_lock) 780 { 781 struct binder_proc *proc = node->proc; 782 783 binder_debug(BINDER_DEBUG_SPINLOCKS, 784 "%s: line=%d\n", __func__, line); 785 if (proc) 786 binder_inner_proc_unlock(proc); 787 else 788 /* annotation for sparse */ 789 __release(&node->proc->inner_lock); 790 spin_unlock(&node->lock); 791 } 792 793 static bool binder_worklist_empty_ilocked(struct list_head *list) 794 { 795 return list_empty(list); 796 } 797 798 /** 799 * binder_worklist_empty() - Check if no items on the work list 800 * @proc: binder_proc associated with list 801 * @list: list to check 802 * 803 * Return: true if there are no items on list, else false 804 */ 805 static bool binder_worklist_empty(struct binder_proc *proc, 806 struct list_head *list) 807 { 808 bool ret; 809 810 binder_inner_proc_lock(proc); 811 ret = binder_worklist_empty_ilocked(list); 812 binder_inner_proc_unlock(proc); 813 return ret; 814 } 815 816 /** 817 * binder_enqueue_work_ilocked() - Add an item to the work list 818 * @work: struct binder_work to add to list 819 * @target_list: list to add work to 820 * 821 * Adds the work to the specified list. Asserts that work 822 * is not already on a list. 823 * 824 * Requires the proc->inner_lock to be held. 825 */ 826 static void 827 binder_enqueue_work_ilocked(struct binder_work *work, 828 struct list_head *target_list) 829 { 830 BUG_ON(target_list == NULL); 831 BUG_ON(work->entry.next && !list_empty(&work->entry)); 832 list_add_tail(&work->entry, target_list); 833 } 834 835 /** 836 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work 837 * @thread: thread to queue work to 838 * @work: struct binder_work to add to list 839 * 840 * Adds the work to the todo list of the thread. Doesn't set the process_todo 841 * flag, which means that (if it wasn't already set) the thread will go to 842 * sleep without handling this work when it calls read. 843 * 844 * Requires the proc->inner_lock to be held. 845 */ 846 static void 847 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread, 848 struct binder_work *work) 849 { 850 WARN_ON(!list_empty(&thread->waiting_thread_node)); 851 binder_enqueue_work_ilocked(work, &thread->todo); 852 } 853 854 /** 855 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list 856 * @thread: thread to queue work to 857 * @work: struct binder_work to add to list 858 * 859 * Adds the work to the todo list of the thread, and enables processing 860 * of the todo queue. 861 * 862 * Requires the proc->inner_lock to be held. 863 */ 864 static void 865 binder_enqueue_thread_work_ilocked(struct binder_thread *thread, 866 struct binder_work *work) 867 { 868 WARN_ON(!list_empty(&thread->waiting_thread_node)); 869 binder_enqueue_work_ilocked(work, &thread->todo); 870 thread->process_todo = true; 871 } 872 873 /** 874 * binder_enqueue_thread_work() - Add an item to the thread work list 875 * @thread: thread to queue work to 876 * @work: struct binder_work to add to list 877 * 878 * Adds the work to the todo list of the thread, and enables processing 879 * of the todo queue. 880 */ 881 static void 882 binder_enqueue_thread_work(struct binder_thread *thread, 883 struct binder_work *work) 884 { 885 binder_inner_proc_lock(thread->proc); 886 binder_enqueue_thread_work_ilocked(thread, work); 887 binder_inner_proc_unlock(thread->proc); 888 } 889 890 static void 891 binder_dequeue_work_ilocked(struct binder_work *work) 892 { 893 list_del_init(&work->entry); 894 } 895 896 /** 897 * binder_dequeue_work() - Removes an item from the work list 898 * @proc: binder_proc associated with list 899 * @work: struct binder_work to remove from list 900 * 901 * Removes the specified work item from whatever list it is on. 902 * Can safely be called if work is not on any list. 903 */ 904 static void 905 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work) 906 { 907 binder_inner_proc_lock(proc); 908 binder_dequeue_work_ilocked(work); 909 binder_inner_proc_unlock(proc); 910 } 911 912 static struct binder_work *binder_dequeue_work_head_ilocked( 913 struct list_head *list) 914 { 915 struct binder_work *w; 916 917 w = list_first_entry_or_null(list, struct binder_work, entry); 918 if (w) 919 list_del_init(&w->entry); 920 return w; 921 } 922 923 /** 924 * binder_dequeue_work_head() - Dequeues the item at head of list 925 * @proc: binder_proc associated with list 926 * @list: list to dequeue head 927 * 928 * Removes the head of the list if there are items on the list 929 * 930 * Return: pointer dequeued binder_work, NULL if list was empty 931 */ 932 static struct binder_work *binder_dequeue_work_head( 933 struct binder_proc *proc, 934 struct list_head *list) 935 { 936 struct binder_work *w; 937 938 binder_inner_proc_lock(proc); 939 w = binder_dequeue_work_head_ilocked(list); 940 binder_inner_proc_unlock(proc); 941 return w; 942 } 943 944 static void 945 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); 946 static void binder_free_thread(struct binder_thread *thread); 947 static void binder_free_proc(struct binder_proc *proc); 948 static void binder_inc_node_tmpref_ilocked(struct binder_node *node); 949 950 static bool binder_has_work_ilocked(struct binder_thread *thread, 951 bool do_proc_work) 952 { 953 return thread->process_todo || 954 thread->looper_need_return || 955 (do_proc_work && 956 !binder_worklist_empty_ilocked(&thread->proc->todo)); 957 } 958 959 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work) 960 { 961 bool has_work; 962 963 binder_inner_proc_lock(thread->proc); 964 has_work = binder_has_work_ilocked(thread, do_proc_work); 965 binder_inner_proc_unlock(thread->proc); 966 967 return has_work; 968 } 969 970 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread) 971 { 972 return !thread->transaction_stack && 973 binder_worklist_empty_ilocked(&thread->todo) && 974 (thread->looper & (BINDER_LOOPER_STATE_ENTERED | 975 BINDER_LOOPER_STATE_REGISTERED)); 976 } 977 978 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc, 979 bool sync) 980 { 981 struct rb_node *n; 982 struct binder_thread *thread; 983 984 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { 985 thread = rb_entry(n, struct binder_thread, rb_node); 986 if (thread->looper & BINDER_LOOPER_STATE_POLL && 987 binder_available_for_proc_work_ilocked(thread)) { 988 if (sync) 989 wake_up_interruptible_sync(&thread->wait); 990 else 991 wake_up_interruptible(&thread->wait); 992 } 993 } 994 } 995 996 /** 997 * binder_select_thread_ilocked() - selects a thread for doing proc work. 998 * @proc: process to select a thread from 999 * 1000 * Note that calling this function moves the thread off the waiting_threads 1001 * list, so it can only be woken up by the caller of this function, or a 1002 * signal. Therefore, callers *should* always wake up the thread this function 1003 * returns. 1004 * 1005 * Return: If there's a thread currently waiting for process work, 1006 * returns that thread. Otherwise returns NULL. 1007 */ 1008 static struct binder_thread * 1009 binder_select_thread_ilocked(struct binder_proc *proc) 1010 { 1011 struct binder_thread *thread; 1012 1013 assert_spin_locked(&proc->inner_lock); 1014 thread = list_first_entry_or_null(&proc->waiting_threads, 1015 struct binder_thread, 1016 waiting_thread_node); 1017 1018 if (thread) 1019 list_del_init(&thread->waiting_thread_node); 1020 1021 return thread; 1022 } 1023 1024 /** 1025 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work. 1026 * @proc: process to wake up a thread in 1027 * @thread: specific thread to wake-up (may be NULL) 1028 * @sync: whether to do a synchronous wake-up 1029 * 1030 * This function wakes up a thread in the @proc process. 1031 * The caller may provide a specific thread to wake-up in 1032 * the @thread parameter. If @thread is NULL, this function 1033 * will wake up threads that have called poll(). 1034 * 1035 * Note that for this function to work as expected, callers 1036 * should first call binder_select_thread() to find a thread 1037 * to handle the work (if they don't have a thread already), 1038 * and pass the result into the @thread parameter. 1039 */ 1040 static void binder_wakeup_thread_ilocked(struct binder_proc *proc, 1041 struct binder_thread *thread, 1042 bool sync) 1043 { 1044 assert_spin_locked(&proc->inner_lock); 1045 1046 if (thread) { 1047 if (sync) 1048 wake_up_interruptible_sync(&thread->wait); 1049 else 1050 wake_up_interruptible(&thread->wait); 1051 return; 1052 } 1053 1054 /* Didn't find a thread waiting for proc work; this can happen 1055 * in two scenarios: 1056 * 1. All threads are busy handling transactions 1057 * In that case, one of those threads should call back into 1058 * the kernel driver soon and pick up this work. 1059 * 2. Threads are using the (e)poll interface, in which case 1060 * they may be blocked on the waitqueue without having been 1061 * added to waiting_threads. For this case, we just iterate 1062 * over all threads not handling transaction work, and 1063 * wake them all up. We wake all because we don't know whether 1064 * a thread that called into (e)poll is handling non-binder 1065 * work currently. 1066 */ 1067 binder_wakeup_poll_threads_ilocked(proc, sync); 1068 } 1069 1070 static void binder_wakeup_proc_ilocked(struct binder_proc *proc) 1071 { 1072 struct binder_thread *thread = binder_select_thread_ilocked(proc); 1073 1074 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false); 1075 } 1076 1077 static void binder_set_nice(long nice) 1078 { 1079 long min_nice; 1080 1081 if (can_nice(current, nice)) { 1082 set_user_nice(current, nice); 1083 return; 1084 } 1085 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE)); 1086 binder_debug(BINDER_DEBUG_PRIORITY_CAP, 1087 "%d: nice value %ld not allowed use %ld instead\n", 1088 current->pid, nice, min_nice); 1089 set_user_nice(current, min_nice); 1090 if (min_nice <= MAX_NICE) 1091 return; 1092 binder_user_error("%d RLIMIT_NICE not set\n", current->pid); 1093 } 1094 1095 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc, 1096 binder_uintptr_t ptr) 1097 { 1098 struct rb_node *n = proc->nodes.rb_node; 1099 struct binder_node *node; 1100 1101 assert_spin_locked(&proc->inner_lock); 1102 1103 while (n) { 1104 node = rb_entry(n, struct binder_node, rb_node); 1105 1106 if (ptr < node->ptr) 1107 n = n->rb_left; 1108 else if (ptr > node->ptr) 1109 n = n->rb_right; 1110 else { 1111 /* 1112 * take an implicit weak reference 1113 * to ensure node stays alive until 1114 * call to binder_put_node() 1115 */ 1116 binder_inc_node_tmpref_ilocked(node); 1117 return node; 1118 } 1119 } 1120 return NULL; 1121 } 1122 1123 static struct binder_node *binder_get_node(struct binder_proc *proc, 1124 binder_uintptr_t ptr) 1125 { 1126 struct binder_node *node; 1127 1128 binder_inner_proc_lock(proc); 1129 node = binder_get_node_ilocked(proc, ptr); 1130 binder_inner_proc_unlock(proc); 1131 return node; 1132 } 1133 1134 static struct binder_node *binder_init_node_ilocked( 1135 struct binder_proc *proc, 1136 struct binder_node *new_node, 1137 struct flat_binder_object *fp) 1138 { 1139 struct rb_node **p = &proc->nodes.rb_node; 1140 struct rb_node *parent = NULL; 1141 struct binder_node *node; 1142 binder_uintptr_t ptr = fp ? fp->binder : 0; 1143 binder_uintptr_t cookie = fp ? fp->cookie : 0; 1144 __u32 flags = fp ? fp->flags : 0; 1145 1146 assert_spin_locked(&proc->inner_lock); 1147 1148 while (*p) { 1149 1150 parent = *p; 1151 node = rb_entry(parent, struct binder_node, rb_node); 1152 1153 if (ptr < node->ptr) 1154 p = &(*p)->rb_left; 1155 else if (ptr > node->ptr) 1156 p = &(*p)->rb_right; 1157 else { 1158 /* 1159 * A matching node is already in 1160 * the rb tree. Abandon the init 1161 * and return it. 1162 */ 1163 binder_inc_node_tmpref_ilocked(node); 1164 return node; 1165 } 1166 } 1167 node = new_node; 1168 binder_stats_created(BINDER_STAT_NODE); 1169 node->tmp_refs++; 1170 rb_link_node(&node->rb_node, parent, p); 1171 rb_insert_color(&node->rb_node, &proc->nodes); 1172 node->debug_id = atomic_inc_return(&binder_last_id); 1173 node->proc = proc; 1174 node->ptr = ptr; 1175 node->cookie = cookie; 1176 node->work.type = BINDER_WORK_NODE; 1177 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK; 1178 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); 1179 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX); 1180 spin_lock_init(&node->lock); 1181 INIT_LIST_HEAD(&node->work.entry); 1182 INIT_LIST_HEAD(&node->async_todo); 1183 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1184 "%d:%d node %d u%016llx c%016llx created\n", 1185 proc->pid, current->pid, node->debug_id, 1186 (u64)node->ptr, (u64)node->cookie); 1187 1188 return node; 1189 } 1190 1191 static struct binder_node *binder_new_node(struct binder_proc *proc, 1192 struct flat_binder_object *fp) 1193 { 1194 struct binder_node *node; 1195 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL); 1196 1197 if (!new_node) 1198 return NULL; 1199 binder_inner_proc_lock(proc); 1200 node = binder_init_node_ilocked(proc, new_node, fp); 1201 binder_inner_proc_unlock(proc); 1202 if (node != new_node) 1203 /* 1204 * The node was already added by another thread 1205 */ 1206 kfree(new_node); 1207 1208 return node; 1209 } 1210 1211 static void binder_free_node(struct binder_node *node) 1212 { 1213 kfree(node); 1214 binder_stats_deleted(BINDER_STAT_NODE); 1215 } 1216 1217 static int binder_inc_node_nilocked(struct binder_node *node, int strong, 1218 int internal, 1219 struct list_head *target_list) 1220 { 1221 struct binder_proc *proc = node->proc; 1222 1223 assert_spin_locked(&node->lock); 1224 if (proc) 1225 assert_spin_locked(&proc->inner_lock); 1226 if (strong) { 1227 if (internal) { 1228 if (target_list == NULL && 1229 node->internal_strong_refs == 0 && 1230 !(node->proc && 1231 node == node->proc->context->binder_context_mgr_node && 1232 node->has_strong_ref)) { 1233 pr_err("invalid inc strong node for %d\n", 1234 node->debug_id); 1235 return -EINVAL; 1236 } 1237 node->internal_strong_refs++; 1238 } else 1239 node->local_strong_refs++; 1240 if (!node->has_strong_ref && target_list) { 1241 struct binder_thread *thread = container_of(target_list, 1242 struct binder_thread, todo); 1243 binder_dequeue_work_ilocked(&node->work); 1244 BUG_ON(&thread->todo != target_list); 1245 binder_enqueue_deferred_thread_work_ilocked(thread, 1246 &node->work); 1247 } 1248 } else { 1249 if (!internal) 1250 node->local_weak_refs++; 1251 if (!node->has_weak_ref && list_empty(&node->work.entry)) { 1252 if (target_list == NULL) { 1253 pr_err("invalid inc weak node for %d\n", 1254 node->debug_id); 1255 return -EINVAL; 1256 } 1257 /* 1258 * See comment above 1259 */ 1260 binder_enqueue_work_ilocked(&node->work, target_list); 1261 } 1262 } 1263 return 0; 1264 } 1265 1266 static int binder_inc_node(struct binder_node *node, int strong, int internal, 1267 struct list_head *target_list) 1268 { 1269 int ret; 1270 1271 binder_node_inner_lock(node); 1272 ret = binder_inc_node_nilocked(node, strong, internal, target_list); 1273 binder_node_inner_unlock(node); 1274 1275 return ret; 1276 } 1277 1278 static bool binder_dec_node_nilocked(struct binder_node *node, 1279 int strong, int internal) 1280 { 1281 struct binder_proc *proc = node->proc; 1282 1283 assert_spin_locked(&node->lock); 1284 if (proc) 1285 assert_spin_locked(&proc->inner_lock); 1286 if (strong) { 1287 if (internal) 1288 node->internal_strong_refs--; 1289 else 1290 node->local_strong_refs--; 1291 if (node->local_strong_refs || node->internal_strong_refs) 1292 return false; 1293 } else { 1294 if (!internal) 1295 node->local_weak_refs--; 1296 if (node->local_weak_refs || node->tmp_refs || 1297 !hlist_empty(&node->refs)) 1298 return false; 1299 } 1300 1301 if (proc && (node->has_strong_ref || node->has_weak_ref)) { 1302 if (list_empty(&node->work.entry)) { 1303 binder_enqueue_work_ilocked(&node->work, &proc->todo); 1304 binder_wakeup_proc_ilocked(proc); 1305 } 1306 } else { 1307 if (hlist_empty(&node->refs) && !node->local_strong_refs && 1308 !node->local_weak_refs && !node->tmp_refs) { 1309 if (proc) { 1310 binder_dequeue_work_ilocked(&node->work); 1311 rb_erase(&node->rb_node, &proc->nodes); 1312 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1313 "refless node %d deleted\n", 1314 node->debug_id); 1315 } else { 1316 BUG_ON(!list_empty(&node->work.entry)); 1317 spin_lock(&binder_dead_nodes_lock); 1318 /* 1319 * tmp_refs could have changed so 1320 * check it again 1321 */ 1322 if (node->tmp_refs) { 1323 spin_unlock(&binder_dead_nodes_lock); 1324 return false; 1325 } 1326 hlist_del(&node->dead_node); 1327 spin_unlock(&binder_dead_nodes_lock); 1328 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1329 "dead node %d deleted\n", 1330 node->debug_id); 1331 } 1332 return true; 1333 } 1334 } 1335 return false; 1336 } 1337 1338 static void binder_dec_node(struct binder_node *node, int strong, int internal) 1339 { 1340 bool free_node; 1341 1342 binder_node_inner_lock(node); 1343 free_node = binder_dec_node_nilocked(node, strong, internal); 1344 binder_node_inner_unlock(node); 1345 if (free_node) 1346 binder_free_node(node); 1347 } 1348 1349 static void binder_inc_node_tmpref_ilocked(struct binder_node *node) 1350 { 1351 /* 1352 * No call to binder_inc_node() is needed since we 1353 * don't need to inform userspace of any changes to 1354 * tmp_refs 1355 */ 1356 node->tmp_refs++; 1357 } 1358 1359 /** 1360 * binder_inc_node_tmpref() - take a temporary reference on node 1361 * @node: node to reference 1362 * 1363 * Take reference on node to prevent the node from being freed 1364 * while referenced only by a local variable. The inner lock is 1365 * needed to serialize with the node work on the queue (which 1366 * isn't needed after the node is dead). If the node is dead 1367 * (node->proc is NULL), use binder_dead_nodes_lock to protect 1368 * node->tmp_refs against dead-node-only cases where the node 1369 * lock cannot be acquired (eg traversing the dead node list to 1370 * print nodes) 1371 */ 1372 static void binder_inc_node_tmpref(struct binder_node *node) 1373 { 1374 binder_node_lock(node); 1375 if (node->proc) 1376 binder_inner_proc_lock(node->proc); 1377 else 1378 spin_lock(&binder_dead_nodes_lock); 1379 binder_inc_node_tmpref_ilocked(node); 1380 if (node->proc) 1381 binder_inner_proc_unlock(node->proc); 1382 else 1383 spin_unlock(&binder_dead_nodes_lock); 1384 binder_node_unlock(node); 1385 } 1386 1387 /** 1388 * binder_dec_node_tmpref() - remove a temporary reference on node 1389 * @node: node to reference 1390 * 1391 * Release temporary reference on node taken via binder_inc_node_tmpref() 1392 */ 1393 static void binder_dec_node_tmpref(struct binder_node *node) 1394 { 1395 bool free_node; 1396 1397 binder_node_inner_lock(node); 1398 if (!node->proc) 1399 spin_lock(&binder_dead_nodes_lock); 1400 else 1401 __acquire(&binder_dead_nodes_lock); 1402 node->tmp_refs--; 1403 BUG_ON(node->tmp_refs < 0); 1404 if (!node->proc) 1405 spin_unlock(&binder_dead_nodes_lock); 1406 else 1407 __release(&binder_dead_nodes_lock); 1408 /* 1409 * Call binder_dec_node() to check if all refcounts are 0 1410 * and cleanup is needed. Calling with strong=0 and internal=1 1411 * causes no actual reference to be released in binder_dec_node(). 1412 * If that changes, a change is needed here too. 1413 */ 1414 free_node = binder_dec_node_nilocked(node, 0, 1); 1415 binder_node_inner_unlock(node); 1416 if (free_node) 1417 binder_free_node(node); 1418 } 1419 1420 static void binder_put_node(struct binder_node *node) 1421 { 1422 binder_dec_node_tmpref(node); 1423 } 1424 1425 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc, 1426 u32 desc, bool need_strong_ref) 1427 { 1428 struct rb_node *n = proc->refs_by_desc.rb_node; 1429 struct binder_ref *ref; 1430 1431 while (n) { 1432 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1433 1434 if (desc < ref->data.desc) { 1435 n = n->rb_left; 1436 } else if (desc > ref->data.desc) { 1437 n = n->rb_right; 1438 } else if (need_strong_ref && !ref->data.strong) { 1439 binder_user_error("tried to use weak ref as strong ref\n"); 1440 return NULL; 1441 } else { 1442 return ref; 1443 } 1444 } 1445 return NULL; 1446 } 1447 1448 /** 1449 * binder_get_ref_for_node_olocked() - get the ref associated with given node 1450 * @proc: binder_proc that owns the ref 1451 * @node: binder_node of target 1452 * @new_ref: newly allocated binder_ref to be initialized or %NULL 1453 * 1454 * Look up the ref for the given node and return it if it exists 1455 * 1456 * If it doesn't exist and the caller provides a newly allocated 1457 * ref, initialize the fields of the newly allocated ref and insert 1458 * into the given proc rb_trees and node refs list. 1459 * 1460 * Return: the ref for node. It is possible that another thread 1461 * allocated/initialized the ref first in which case the 1462 * returned ref would be different than the passed-in 1463 * new_ref. new_ref must be kfree'd by the caller in 1464 * this case. 1465 */ 1466 static struct binder_ref *binder_get_ref_for_node_olocked( 1467 struct binder_proc *proc, 1468 struct binder_node *node, 1469 struct binder_ref *new_ref) 1470 { 1471 struct binder_context *context = proc->context; 1472 struct rb_node **p = &proc->refs_by_node.rb_node; 1473 struct rb_node *parent = NULL; 1474 struct binder_ref *ref; 1475 struct rb_node *n; 1476 1477 while (*p) { 1478 parent = *p; 1479 ref = rb_entry(parent, struct binder_ref, rb_node_node); 1480 1481 if (node < ref->node) 1482 p = &(*p)->rb_left; 1483 else if (node > ref->node) 1484 p = &(*p)->rb_right; 1485 else 1486 return ref; 1487 } 1488 if (!new_ref) 1489 return NULL; 1490 1491 binder_stats_created(BINDER_STAT_REF); 1492 new_ref->data.debug_id = atomic_inc_return(&binder_last_id); 1493 new_ref->proc = proc; 1494 new_ref->node = node; 1495 rb_link_node(&new_ref->rb_node_node, parent, p); 1496 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); 1497 1498 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1; 1499 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 1500 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1501 if (ref->data.desc > new_ref->data.desc) 1502 break; 1503 new_ref->data.desc = ref->data.desc + 1; 1504 } 1505 1506 p = &proc->refs_by_desc.rb_node; 1507 while (*p) { 1508 parent = *p; 1509 ref = rb_entry(parent, struct binder_ref, rb_node_desc); 1510 1511 if (new_ref->data.desc < ref->data.desc) 1512 p = &(*p)->rb_left; 1513 else if (new_ref->data.desc > ref->data.desc) 1514 p = &(*p)->rb_right; 1515 else 1516 BUG(); 1517 } 1518 rb_link_node(&new_ref->rb_node_desc, parent, p); 1519 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc); 1520 1521 binder_node_lock(node); 1522 hlist_add_head(&new_ref->node_entry, &node->refs); 1523 1524 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1525 "%d new ref %d desc %d for node %d\n", 1526 proc->pid, new_ref->data.debug_id, new_ref->data.desc, 1527 node->debug_id); 1528 binder_node_unlock(node); 1529 return new_ref; 1530 } 1531 1532 static void binder_cleanup_ref_olocked(struct binder_ref *ref) 1533 { 1534 bool delete_node = false; 1535 1536 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1537 "%d delete ref %d desc %d for node %d\n", 1538 ref->proc->pid, ref->data.debug_id, ref->data.desc, 1539 ref->node->debug_id); 1540 1541 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); 1542 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); 1543 1544 binder_node_inner_lock(ref->node); 1545 if (ref->data.strong) 1546 binder_dec_node_nilocked(ref->node, 1, 1); 1547 1548 hlist_del(&ref->node_entry); 1549 delete_node = binder_dec_node_nilocked(ref->node, 0, 1); 1550 binder_node_inner_unlock(ref->node); 1551 /* 1552 * Clear ref->node unless we want the caller to free the node 1553 */ 1554 if (!delete_node) { 1555 /* 1556 * The caller uses ref->node to determine 1557 * whether the node needs to be freed. Clear 1558 * it since the node is still alive. 1559 */ 1560 ref->node = NULL; 1561 } 1562 1563 if (ref->death) { 1564 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1565 "%d delete ref %d desc %d has death notification\n", 1566 ref->proc->pid, ref->data.debug_id, 1567 ref->data.desc); 1568 binder_dequeue_work(ref->proc, &ref->death->work); 1569 binder_stats_deleted(BINDER_STAT_DEATH); 1570 } 1571 binder_stats_deleted(BINDER_STAT_REF); 1572 } 1573 1574 /** 1575 * binder_inc_ref_olocked() - increment the ref for given handle 1576 * @ref: ref to be incremented 1577 * @strong: if true, strong increment, else weak 1578 * @target_list: list to queue node work on 1579 * 1580 * Increment the ref. @ref->proc->outer_lock must be held on entry 1581 * 1582 * Return: 0, if successful, else errno 1583 */ 1584 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong, 1585 struct list_head *target_list) 1586 { 1587 int ret; 1588 1589 if (strong) { 1590 if (ref->data.strong == 0) { 1591 ret = binder_inc_node(ref->node, 1, 1, target_list); 1592 if (ret) 1593 return ret; 1594 } 1595 ref->data.strong++; 1596 } else { 1597 if (ref->data.weak == 0) { 1598 ret = binder_inc_node(ref->node, 0, 1, target_list); 1599 if (ret) 1600 return ret; 1601 } 1602 ref->data.weak++; 1603 } 1604 return 0; 1605 } 1606 1607 /** 1608 * binder_dec_ref() - dec the ref for given handle 1609 * @ref: ref to be decremented 1610 * @strong: if true, strong decrement, else weak 1611 * 1612 * Decrement the ref. 1613 * 1614 * Return: true if ref is cleaned up and ready to be freed 1615 */ 1616 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong) 1617 { 1618 if (strong) { 1619 if (ref->data.strong == 0) { 1620 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n", 1621 ref->proc->pid, ref->data.debug_id, 1622 ref->data.desc, ref->data.strong, 1623 ref->data.weak); 1624 return false; 1625 } 1626 ref->data.strong--; 1627 if (ref->data.strong == 0) 1628 binder_dec_node(ref->node, strong, 1); 1629 } else { 1630 if (ref->data.weak == 0) { 1631 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n", 1632 ref->proc->pid, ref->data.debug_id, 1633 ref->data.desc, ref->data.strong, 1634 ref->data.weak); 1635 return false; 1636 } 1637 ref->data.weak--; 1638 } 1639 if (ref->data.strong == 0 && ref->data.weak == 0) { 1640 binder_cleanup_ref_olocked(ref); 1641 return true; 1642 } 1643 return false; 1644 } 1645 1646 /** 1647 * binder_get_node_from_ref() - get the node from the given proc/desc 1648 * @proc: proc containing the ref 1649 * @desc: the handle associated with the ref 1650 * @need_strong_ref: if true, only return node if ref is strong 1651 * @rdata: the id/refcount data for the ref 1652 * 1653 * Given a proc and ref handle, return the associated binder_node 1654 * 1655 * Return: a binder_node or NULL if not found or not strong when strong required 1656 */ 1657 static struct binder_node *binder_get_node_from_ref( 1658 struct binder_proc *proc, 1659 u32 desc, bool need_strong_ref, 1660 struct binder_ref_data *rdata) 1661 { 1662 struct binder_node *node; 1663 struct binder_ref *ref; 1664 1665 binder_proc_lock(proc); 1666 ref = binder_get_ref_olocked(proc, desc, need_strong_ref); 1667 if (!ref) 1668 goto err_no_ref; 1669 node = ref->node; 1670 /* 1671 * Take an implicit reference on the node to ensure 1672 * it stays alive until the call to binder_put_node() 1673 */ 1674 binder_inc_node_tmpref(node); 1675 if (rdata) 1676 *rdata = ref->data; 1677 binder_proc_unlock(proc); 1678 1679 return node; 1680 1681 err_no_ref: 1682 binder_proc_unlock(proc); 1683 return NULL; 1684 } 1685 1686 /** 1687 * binder_free_ref() - free the binder_ref 1688 * @ref: ref to free 1689 * 1690 * Free the binder_ref. Free the binder_node indicated by ref->node 1691 * (if non-NULL) and the binder_ref_death indicated by ref->death. 1692 */ 1693 static void binder_free_ref(struct binder_ref *ref) 1694 { 1695 if (ref->node) 1696 binder_free_node(ref->node); 1697 kfree(ref->death); 1698 kfree(ref); 1699 } 1700 1701 /** 1702 * binder_update_ref_for_handle() - inc/dec the ref for given handle 1703 * @proc: proc containing the ref 1704 * @desc: the handle associated with the ref 1705 * @increment: true=inc reference, false=dec reference 1706 * @strong: true=strong reference, false=weak reference 1707 * @rdata: the id/refcount data for the ref 1708 * 1709 * Given a proc and ref handle, increment or decrement the ref 1710 * according to "increment" arg. 1711 * 1712 * Return: 0 if successful, else errno 1713 */ 1714 static int binder_update_ref_for_handle(struct binder_proc *proc, 1715 uint32_t desc, bool increment, bool strong, 1716 struct binder_ref_data *rdata) 1717 { 1718 int ret = 0; 1719 struct binder_ref *ref; 1720 bool delete_ref = false; 1721 1722 binder_proc_lock(proc); 1723 ref = binder_get_ref_olocked(proc, desc, strong); 1724 if (!ref) { 1725 ret = -EINVAL; 1726 goto err_no_ref; 1727 } 1728 if (increment) 1729 ret = binder_inc_ref_olocked(ref, strong, NULL); 1730 else 1731 delete_ref = binder_dec_ref_olocked(ref, strong); 1732 1733 if (rdata) 1734 *rdata = ref->data; 1735 binder_proc_unlock(proc); 1736 1737 if (delete_ref) 1738 binder_free_ref(ref); 1739 return ret; 1740 1741 err_no_ref: 1742 binder_proc_unlock(proc); 1743 return ret; 1744 } 1745 1746 /** 1747 * binder_dec_ref_for_handle() - dec the ref for given handle 1748 * @proc: proc containing the ref 1749 * @desc: the handle associated with the ref 1750 * @strong: true=strong reference, false=weak reference 1751 * @rdata: the id/refcount data for the ref 1752 * 1753 * Just calls binder_update_ref_for_handle() to decrement the ref. 1754 * 1755 * Return: 0 if successful, else errno 1756 */ 1757 static int binder_dec_ref_for_handle(struct binder_proc *proc, 1758 uint32_t desc, bool strong, struct binder_ref_data *rdata) 1759 { 1760 return binder_update_ref_for_handle(proc, desc, false, strong, rdata); 1761 } 1762 1763 1764 /** 1765 * binder_inc_ref_for_node() - increment the ref for given proc/node 1766 * @proc: proc containing the ref 1767 * @node: target node 1768 * @strong: true=strong reference, false=weak reference 1769 * @target_list: worklist to use if node is incremented 1770 * @rdata: the id/refcount data for the ref 1771 * 1772 * Given a proc and node, increment the ref. Create the ref if it 1773 * doesn't already exist 1774 * 1775 * Return: 0 if successful, else errno 1776 */ 1777 static int binder_inc_ref_for_node(struct binder_proc *proc, 1778 struct binder_node *node, 1779 bool strong, 1780 struct list_head *target_list, 1781 struct binder_ref_data *rdata) 1782 { 1783 struct binder_ref *ref; 1784 struct binder_ref *new_ref = NULL; 1785 int ret = 0; 1786 1787 binder_proc_lock(proc); 1788 ref = binder_get_ref_for_node_olocked(proc, node, NULL); 1789 if (!ref) { 1790 binder_proc_unlock(proc); 1791 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); 1792 if (!new_ref) 1793 return -ENOMEM; 1794 binder_proc_lock(proc); 1795 ref = binder_get_ref_for_node_olocked(proc, node, new_ref); 1796 } 1797 ret = binder_inc_ref_olocked(ref, strong, target_list); 1798 *rdata = ref->data; 1799 binder_proc_unlock(proc); 1800 if (new_ref && ref != new_ref) 1801 /* 1802 * Another thread created the ref first so 1803 * free the one we allocated 1804 */ 1805 kfree(new_ref); 1806 return ret; 1807 } 1808 1809 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread, 1810 struct binder_transaction *t) 1811 { 1812 BUG_ON(!target_thread); 1813 assert_spin_locked(&target_thread->proc->inner_lock); 1814 BUG_ON(target_thread->transaction_stack != t); 1815 BUG_ON(target_thread->transaction_stack->from != target_thread); 1816 target_thread->transaction_stack = 1817 target_thread->transaction_stack->from_parent; 1818 t->from = NULL; 1819 } 1820 1821 /** 1822 * binder_thread_dec_tmpref() - decrement thread->tmp_ref 1823 * @thread: thread to decrement 1824 * 1825 * A thread needs to be kept alive while being used to create or 1826 * handle a transaction. binder_get_txn_from() is used to safely 1827 * extract t->from from a binder_transaction and keep the thread 1828 * indicated by t->from from being freed. When done with that 1829 * binder_thread, this function is called to decrement the 1830 * tmp_ref and free if appropriate (thread has been released 1831 * and no transaction being processed by the driver) 1832 */ 1833 static void binder_thread_dec_tmpref(struct binder_thread *thread) 1834 { 1835 /* 1836 * atomic is used to protect the counter value while 1837 * it cannot reach zero or thread->is_dead is false 1838 */ 1839 binder_inner_proc_lock(thread->proc); 1840 atomic_dec(&thread->tmp_ref); 1841 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) { 1842 binder_inner_proc_unlock(thread->proc); 1843 binder_free_thread(thread); 1844 return; 1845 } 1846 binder_inner_proc_unlock(thread->proc); 1847 } 1848 1849 /** 1850 * binder_proc_dec_tmpref() - decrement proc->tmp_ref 1851 * @proc: proc to decrement 1852 * 1853 * A binder_proc needs to be kept alive while being used to create or 1854 * handle a transaction. proc->tmp_ref is incremented when 1855 * creating a new transaction or the binder_proc is currently in-use 1856 * by threads that are being released. When done with the binder_proc, 1857 * this function is called to decrement the counter and free the 1858 * proc if appropriate (proc has been released, all threads have 1859 * been released and not currenly in-use to process a transaction). 1860 */ 1861 static void binder_proc_dec_tmpref(struct binder_proc *proc) 1862 { 1863 binder_inner_proc_lock(proc); 1864 proc->tmp_ref--; 1865 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) && 1866 !proc->tmp_ref) { 1867 binder_inner_proc_unlock(proc); 1868 binder_free_proc(proc); 1869 return; 1870 } 1871 binder_inner_proc_unlock(proc); 1872 } 1873 1874 /** 1875 * binder_get_txn_from() - safely extract the "from" thread in transaction 1876 * @t: binder transaction for t->from 1877 * 1878 * Atomically return the "from" thread and increment the tmp_ref 1879 * count for the thread to ensure it stays alive until 1880 * binder_thread_dec_tmpref() is called. 1881 * 1882 * Return: the value of t->from 1883 */ 1884 static struct binder_thread *binder_get_txn_from( 1885 struct binder_transaction *t) 1886 { 1887 struct binder_thread *from; 1888 1889 spin_lock(&t->lock); 1890 from = t->from; 1891 if (from) 1892 atomic_inc(&from->tmp_ref); 1893 spin_unlock(&t->lock); 1894 return from; 1895 } 1896 1897 /** 1898 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock 1899 * @t: binder transaction for t->from 1900 * 1901 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock 1902 * to guarantee that the thread cannot be released while operating on it. 1903 * The caller must call binder_inner_proc_unlock() to release the inner lock 1904 * as well as call binder_dec_thread_txn() to release the reference. 1905 * 1906 * Return: the value of t->from 1907 */ 1908 static struct binder_thread *binder_get_txn_from_and_acq_inner( 1909 struct binder_transaction *t) 1910 __acquires(&t->from->proc->inner_lock) 1911 { 1912 struct binder_thread *from; 1913 1914 from = binder_get_txn_from(t); 1915 if (!from) { 1916 __acquire(&from->proc->inner_lock); 1917 return NULL; 1918 } 1919 binder_inner_proc_lock(from->proc); 1920 if (t->from) { 1921 BUG_ON(from != t->from); 1922 return from; 1923 } 1924 binder_inner_proc_unlock(from->proc); 1925 __acquire(&from->proc->inner_lock); 1926 binder_thread_dec_tmpref(from); 1927 return NULL; 1928 } 1929 1930 /** 1931 * binder_free_txn_fixups() - free unprocessed fd fixups 1932 * @t: binder transaction for t->from 1933 * 1934 * If the transaction is being torn down prior to being 1935 * processed by the target process, free all of the 1936 * fd fixups and fput the file structs. It is safe to 1937 * call this function after the fixups have been 1938 * processed -- in that case, the list will be empty. 1939 */ 1940 static void binder_free_txn_fixups(struct binder_transaction *t) 1941 { 1942 struct binder_txn_fd_fixup *fixup, *tmp; 1943 1944 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) { 1945 fput(fixup->file); 1946 list_del(&fixup->fixup_entry); 1947 kfree(fixup); 1948 } 1949 } 1950 1951 static void binder_free_transaction(struct binder_transaction *t) 1952 { 1953 if (t->buffer) 1954 t->buffer->transaction = NULL; 1955 binder_free_txn_fixups(t); 1956 kfree(t); 1957 binder_stats_deleted(BINDER_STAT_TRANSACTION); 1958 } 1959 1960 static void binder_send_failed_reply(struct binder_transaction *t, 1961 uint32_t error_code) 1962 { 1963 struct binder_thread *target_thread; 1964 struct binder_transaction *next; 1965 1966 BUG_ON(t->flags & TF_ONE_WAY); 1967 while (1) { 1968 target_thread = binder_get_txn_from_and_acq_inner(t); 1969 if (target_thread) { 1970 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1971 "send failed reply for transaction %d to %d:%d\n", 1972 t->debug_id, 1973 target_thread->proc->pid, 1974 target_thread->pid); 1975 1976 binder_pop_transaction_ilocked(target_thread, t); 1977 if (target_thread->reply_error.cmd == BR_OK) { 1978 target_thread->reply_error.cmd = error_code; 1979 binder_enqueue_thread_work_ilocked( 1980 target_thread, 1981 &target_thread->reply_error.work); 1982 wake_up_interruptible(&target_thread->wait); 1983 } else { 1984 /* 1985 * Cannot get here for normal operation, but 1986 * we can if multiple synchronous transactions 1987 * are sent without blocking for responses. 1988 * Just ignore the 2nd error in this case. 1989 */ 1990 pr_warn("Unexpected reply error: %u\n", 1991 target_thread->reply_error.cmd); 1992 } 1993 binder_inner_proc_unlock(target_thread->proc); 1994 binder_thread_dec_tmpref(target_thread); 1995 binder_free_transaction(t); 1996 return; 1997 } else { 1998 __release(&target_thread->proc->inner_lock); 1999 } 2000 next = t->from_parent; 2001 2002 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 2003 "send failed reply for transaction %d, target dead\n", 2004 t->debug_id); 2005 2006 binder_free_transaction(t); 2007 if (next == NULL) { 2008 binder_debug(BINDER_DEBUG_DEAD_BINDER, 2009 "reply failed, no target thread at root\n"); 2010 return; 2011 } 2012 t = next; 2013 binder_debug(BINDER_DEBUG_DEAD_BINDER, 2014 "reply failed, no target thread -- retry %d\n", 2015 t->debug_id); 2016 } 2017 } 2018 2019 /** 2020 * binder_cleanup_transaction() - cleans up undelivered transaction 2021 * @t: transaction that needs to be cleaned up 2022 * @reason: reason the transaction wasn't delivered 2023 * @error_code: error to return to caller (if synchronous call) 2024 */ 2025 static void binder_cleanup_transaction(struct binder_transaction *t, 2026 const char *reason, 2027 uint32_t error_code) 2028 { 2029 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) { 2030 binder_send_failed_reply(t, error_code); 2031 } else { 2032 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 2033 "undelivered transaction %d, %s\n", 2034 t->debug_id, reason); 2035 binder_free_transaction(t); 2036 } 2037 } 2038 2039 /** 2040 * binder_get_object() - gets object and checks for valid metadata 2041 * @proc: binder_proc owning the buffer 2042 * @buffer: binder_buffer that we're parsing. 2043 * @offset: offset in the @buffer at which to validate an object. 2044 * @object: struct binder_object to read into 2045 * 2046 * Return: If there's a valid metadata object at @offset in @buffer, the 2047 * size of that object. Otherwise, it returns zero. The object 2048 * is read into the struct binder_object pointed to by @object. 2049 */ 2050 static size_t binder_get_object(struct binder_proc *proc, 2051 struct binder_buffer *buffer, 2052 unsigned long offset, 2053 struct binder_object *object) 2054 { 2055 size_t read_size; 2056 struct binder_object_header *hdr; 2057 size_t object_size = 0; 2058 2059 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset); 2060 if (read_size < sizeof(*hdr) || !IS_ALIGNED(offset, sizeof(u32))) 2061 return 0; 2062 binder_alloc_copy_from_buffer(&proc->alloc, object, buffer, 2063 offset, read_size); 2064 2065 /* Ok, now see if we read a complete object. */ 2066 hdr = &object->hdr; 2067 switch (hdr->type) { 2068 case BINDER_TYPE_BINDER: 2069 case BINDER_TYPE_WEAK_BINDER: 2070 case BINDER_TYPE_HANDLE: 2071 case BINDER_TYPE_WEAK_HANDLE: 2072 object_size = sizeof(struct flat_binder_object); 2073 break; 2074 case BINDER_TYPE_FD: 2075 object_size = sizeof(struct binder_fd_object); 2076 break; 2077 case BINDER_TYPE_PTR: 2078 object_size = sizeof(struct binder_buffer_object); 2079 break; 2080 case BINDER_TYPE_FDA: 2081 object_size = sizeof(struct binder_fd_array_object); 2082 break; 2083 default: 2084 return 0; 2085 } 2086 if (offset <= buffer->data_size - object_size && 2087 buffer->data_size >= object_size) 2088 return object_size; 2089 else 2090 return 0; 2091 } 2092 2093 /** 2094 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer. 2095 * @proc: binder_proc owning the buffer 2096 * @b: binder_buffer containing the object 2097 * @object: struct binder_object to read into 2098 * @index: index in offset array at which the binder_buffer_object is 2099 * located 2100 * @start_offset: points to the start of the offset array 2101 * @object_offsetp: offset of @object read from @b 2102 * @num_valid: the number of valid offsets in the offset array 2103 * 2104 * Return: If @index is within the valid range of the offset array 2105 * described by @start and @num_valid, and if there's a valid 2106 * binder_buffer_object at the offset found in index @index 2107 * of the offset array, that object is returned. Otherwise, 2108 * %NULL is returned. 2109 * Note that the offset found in index @index itself is not 2110 * verified; this function assumes that @num_valid elements 2111 * from @start were previously verified to have valid offsets. 2112 * If @object_offsetp is non-NULL, then the offset within 2113 * @b is written to it. 2114 */ 2115 static struct binder_buffer_object *binder_validate_ptr( 2116 struct binder_proc *proc, 2117 struct binder_buffer *b, 2118 struct binder_object *object, 2119 binder_size_t index, 2120 binder_size_t start_offset, 2121 binder_size_t *object_offsetp, 2122 binder_size_t num_valid) 2123 { 2124 size_t object_size; 2125 binder_size_t object_offset; 2126 unsigned long buffer_offset; 2127 2128 if (index >= num_valid) 2129 return NULL; 2130 2131 buffer_offset = start_offset + sizeof(binder_size_t) * index; 2132 binder_alloc_copy_from_buffer(&proc->alloc, &object_offset, 2133 b, buffer_offset, sizeof(object_offset)); 2134 object_size = binder_get_object(proc, b, object_offset, object); 2135 if (!object_size || object->hdr.type != BINDER_TYPE_PTR) 2136 return NULL; 2137 if (object_offsetp) 2138 *object_offsetp = object_offset; 2139 2140 return &object->bbo; 2141 } 2142 2143 /** 2144 * binder_validate_fixup() - validates pointer/fd fixups happen in order. 2145 * @proc: binder_proc owning the buffer 2146 * @b: transaction buffer 2147 * @objects_start_offset: offset to start of objects buffer 2148 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up 2149 * @fixup_offset: start offset in @buffer to fix up 2150 * @last_obj_offset: offset to last binder_buffer_object that we fixed 2151 * @last_min_offset: minimum fixup offset in object at @last_obj_offset 2152 * 2153 * Return: %true if a fixup in buffer @buffer at offset @offset is 2154 * allowed. 2155 * 2156 * For safety reasons, we only allow fixups inside a buffer to happen 2157 * at increasing offsets; additionally, we only allow fixup on the last 2158 * buffer object that was verified, or one of its parents. 2159 * 2160 * Example of what is allowed: 2161 * 2162 * A 2163 * B (parent = A, offset = 0) 2164 * C (parent = A, offset = 16) 2165 * D (parent = C, offset = 0) 2166 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset) 2167 * 2168 * Examples of what is not allowed: 2169 * 2170 * Decreasing offsets within the same parent: 2171 * A 2172 * C (parent = A, offset = 16) 2173 * B (parent = A, offset = 0) // decreasing offset within A 2174 * 2175 * Referring to a parent that wasn't the last object or any of its parents: 2176 * A 2177 * B (parent = A, offset = 0) 2178 * C (parent = A, offset = 0) 2179 * C (parent = A, offset = 16) 2180 * D (parent = B, offset = 0) // B is not A or any of A's parents 2181 */ 2182 static bool binder_validate_fixup(struct binder_proc *proc, 2183 struct binder_buffer *b, 2184 binder_size_t objects_start_offset, 2185 binder_size_t buffer_obj_offset, 2186 binder_size_t fixup_offset, 2187 binder_size_t last_obj_offset, 2188 binder_size_t last_min_offset) 2189 { 2190 if (!last_obj_offset) { 2191 /* Nothing to fix up in */ 2192 return false; 2193 } 2194 2195 while (last_obj_offset != buffer_obj_offset) { 2196 unsigned long buffer_offset; 2197 struct binder_object last_object; 2198 struct binder_buffer_object *last_bbo; 2199 size_t object_size = binder_get_object(proc, b, last_obj_offset, 2200 &last_object); 2201 if (object_size != sizeof(*last_bbo)) 2202 return false; 2203 2204 last_bbo = &last_object.bbo; 2205 /* 2206 * Safe to retrieve the parent of last_obj, since it 2207 * was already previously verified by the driver. 2208 */ 2209 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0) 2210 return false; 2211 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t); 2212 buffer_offset = objects_start_offset + 2213 sizeof(binder_size_t) * last_bbo->parent, 2214 binder_alloc_copy_from_buffer(&proc->alloc, &last_obj_offset, 2215 b, buffer_offset, 2216 sizeof(last_obj_offset)); 2217 } 2218 return (fixup_offset >= last_min_offset); 2219 } 2220 2221 /** 2222 * struct binder_task_work_cb - for deferred close 2223 * 2224 * @twork: callback_head for task work 2225 * @fd: fd to close 2226 * 2227 * Structure to pass task work to be handled after 2228 * returning from binder_ioctl() via task_work_add(). 2229 */ 2230 struct binder_task_work_cb { 2231 struct callback_head twork; 2232 struct file *file; 2233 }; 2234 2235 /** 2236 * binder_do_fd_close() - close list of file descriptors 2237 * @twork: callback head for task work 2238 * 2239 * It is not safe to call ksys_close() during the binder_ioctl() 2240 * function if there is a chance that binder's own file descriptor 2241 * might be closed. This is to meet the requirements for using 2242 * fdget() (see comments for __fget_light()). Therefore use 2243 * task_work_add() to schedule the close operation once we have 2244 * returned from binder_ioctl(). This function is a callback 2245 * for that mechanism and does the actual ksys_close() on the 2246 * given file descriptor. 2247 */ 2248 static void binder_do_fd_close(struct callback_head *twork) 2249 { 2250 struct binder_task_work_cb *twcb = container_of(twork, 2251 struct binder_task_work_cb, twork); 2252 2253 fput(twcb->file); 2254 kfree(twcb); 2255 } 2256 2257 /** 2258 * binder_deferred_fd_close() - schedule a close for the given file-descriptor 2259 * @fd: file-descriptor to close 2260 * 2261 * See comments in binder_do_fd_close(). This function is used to schedule 2262 * a file-descriptor to be closed after returning from binder_ioctl(). 2263 */ 2264 static void binder_deferred_fd_close(int fd) 2265 { 2266 struct binder_task_work_cb *twcb; 2267 2268 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL); 2269 if (!twcb) 2270 return; 2271 init_task_work(&twcb->twork, binder_do_fd_close); 2272 __close_fd_get_file(fd, &twcb->file); 2273 if (twcb->file) 2274 task_work_add(current, &twcb->twork, true); 2275 else 2276 kfree(twcb); 2277 } 2278 2279 static void binder_transaction_buffer_release(struct binder_proc *proc, 2280 struct binder_buffer *buffer, 2281 binder_size_t failed_at, 2282 bool is_failure) 2283 { 2284 int debug_id = buffer->debug_id; 2285 binder_size_t off_start_offset, buffer_offset, off_end_offset; 2286 2287 binder_debug(BINDER_DEBUG_TRANSACTION, 2288 "%d buffer release %d, size %zd-%zd, failed at %llx\n", 2289 proc->pid, buffer->debug_id, 2290 buffer->data_size, buffer->offsets_size, 2291 (unsigned long long)failed_at); 2292 2293 if (buffer->target_node) 2294 binder_dec_node(buffer->target_node, 1, 0); 2295 2296 off_start_offset = ALIGN(buffer->data_size, sizeof(void *)); 2297 off_end_offset = is_failure ? failed_at : 2298 off_start_offset + buffer->offsets_size; 2299 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset; 2300 buffer_offset += sizeof(binder_size_t)) { 2301 struct binder_object_header *hdr; 2302 size_t object_size; 2303 struct binder_object object; 2304 binder_size_t object_offset; 2305 2306 binder_alloc_copy_from_buffer(&proc->alloc, &object_offset, 2307 buffer, buffer_offset, 2308 sizeof(object_offset)); 2309 object_size = binder_get_object(proc, buffer, 2310 object_offset, &object); 2311 if (object_size == 0) { 2312 pr_err("transaction release %d bad object at offset %lld, size %zd\n", 2313 debug_id, (u64)object_offset, buffer->data_size); 2314 continue; 2315 } 2316 hdr = &object.hdr; 2317 switch (hdr->type) { 2318 case BINDER_TYPE_BINDER: 2319 case BINDER_TYPE_WEAK_BINDER: { 2320 struct flat_binder_object *fp; 2321 struct binder_node *node; 2322 2323 fp = to_flat_binder_object(hdr); 2324 node = binder_get_node(proc, fp->binder); 2325 if (node == NULL) { 2326 pr_err("transaction release %d bad node %016llx\n", 2327 debug_id, (u64)fp->binder); 2328 break; 2329 } 2330 binder_debug(BINDER_DEBUG_TRANSACTION, 2331 " node %d u%016llx\n", 2332 node->debug_id, (u64)node->ptr); 2333 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER, 2334 0); 2335 binder_put_node(node); 2336 } break; 2337 case BINDER_TYPE_HANDLE: 2338 case BINDER_TYPE_WEAK_HANDLE: { 2339 struct flat_binder_object *fp; 2340 struct binder_ref_data rdata; 2341 int ret; 2342 2343 fp = to_flat_binder_object(hdr); 2344 ret = binder_dec_ref_for_handle(proc, fp->handle, 2345 hdr->type == BINDER_TYPE_HANDLE, &rdata); 2346 2347 if (ret) { 2348 pr_err("transaction release %d bad handle %d, ret = %d\n", 2349 debug_id, fp->handle, ret); 2350 break; 2351 } 2352 binder_debug(BINDER_DEBUG_TRANSACTION, 2353 " ref %d desc %d\n", 2354 rdata.debug_id, rdata.desc); 2355 } break; 2356 2357 case BINDER_TYPE_FD: { 2358 /* 2359 * No need to close the file here since user-space 2360 * closes it for for successfully delivered 2361 * transactions. For transactions that weren't 2362 * delivered, the new fd was never allocated so 2363 * there is no need to close and the fput on the 2364 * file is done when the transaction is torn 2365 * down. 2366 */ 2367 WARN_ON(failed_at && 2368 proc->tsk == current->group_leader); 2369 } break; 2370 case BINDER_TYPE_PTR: 2371 /* 2372 * Nothing to do here, this will get cleaned up when the 2373 * transaction buffer gets freed 2374 */ 2375 break; 2376 case BINDER_TYPE_FDA: { 2377 struct binder_fd_array_object *fda; 2378 struct binder_buffer_object *parent; 2379 struct binder_object ptr_object; 2380 binder_size_t fda_offset; 2381 size_t fd_index; 2382 binder_size_t fd_buf_size; 2383 binder_size_t num_valid; 2384 2385 if (proc->tsk != current->group_leader) { 2386 /* 2387 * Nothing to do if running in sender context 2388 * The fd fixups have not been applied so no 2389 * fds need to be closed. 2390 */ 2391 continue; 2392 } 2393 2394 num_valid = (buffer_offset - off_start_offset) / 2395 sizeof(binder_size_t); 2396 fda = to_binder_fd_array_object(hdr); 2397 parent = binder_validate_ptr(proc, buffer, &ptr_object, 2398 fda->parent, 2399 off_start_offset, 2400 NULL, 2401 num_valid); 2402 if (!parent) { 2403 pr_err("transaction release %d bad parent offset\n", 2404 debug_id); 2405 continue; 2406 } 2407 fd_buf_size = sizeof(u32) * fda->num_fds; 2408 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { 2409 pr_err("transaction release %d invalid number of fds (%lld)\n", 2410 debug_id, (u64)fda->num_fds); 2411 continue; 2412 } 2413 if (fd_buf_size > parent->length || 2414 fda->parent_offset > parent->length - fd_buf_size) { 2415 /* No space for all file descriptors here. */ 2416 pr_err("transaction release %d not enough space for %lld fds in buffer\n", 2417 debug_id, (u64)fda->num_fds); 2418 continue; 2419 } 2420 /* 2421 * the source data for binder_buffer_object is visible 2422 * to user-space and the @buffer element is the user 2423 * pointer to the buffer_object containing the fd_array. 2424 * Convert the address to an offset relative to 2425 * the base of the transaction buffer. 2426 */ 2427 fda_offset = 2428 (parent->buffer - (uintptr_t)buffer->user_data) + 2429 fda->parent_offset; 2430 for (fd_index = 0; fd_index < fda->num_fds; 2431 fd_index++) { 2432 u32 fd; 2433 binder_size_t offset = fda_offset + 2434 fd_index * sizeof(fd); 2435 2436 binder_alloc_copy_from_buffer(&proc->alloc, 2437 &fd, 2438 buffer, 2439 offset, 2440 sizeof(fd)); 2441 binder_deferred_fd_close(fd); 2442 } 2443 } break; 2444 default: 2445 pr_err("transaction release %d bad object type %x\n", 2446 debug_id, hdr->type); 2447 break; 2448 } 2449 } 2450 } 2451 2452 static int binder_translate_binder(struct flat_binder_object *fp, 2453 struct binder_transaction *t, 2454 struct binder_thread *thread) 2455 { 2456 struct binder_node *node; 2457 struct binder_proc *proc = thread->proc; 2458 struct binder_proc *target_proc = t->to_proc; 2459 struct binder_ref_data rdata; 2460 int ret = 0; 2461 2462 node = binder_get_node(proc, fp->binder); 2463 if (!node) { 2464 node = binder_new_node(proc, fp); 2465 if (!node) 2466 return -ENOMEM; 2467 } 2468 if (fp->cookie != node->cookie) { 2469 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n", 2470 proc->pid, thread->pid, (u64)fp->binder, 2471 node->debug_id, (u64)fp->cookie, 2472 (u64)node->cookie); 2473 ret = -EINVAL; 2474 goto done; 2475 } 2476 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { 2477 ret = -EPERM; 2478 goto done; 2479 } 2480 2481 ret = binder_inc_ref_for_node(target_proc, node, 2482 fp->hdr.type == BINDER_TYPE_BINDER, 2483 &thread->todo, &rdata); 2484 if (ret) 2485 goto done; 2486 2487 if (fp->hdr.type == BINDER_TYPE_BINDER) 2488 fp->hdr.type = BINDER_TYPE_HANDLE; 2489 else 2490 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE; 2491 fp->binder = 0; 2492 fp->handle = rdata.desc; 2493 fp->cookie = 0; 2494 2495 trace_binder_transaction_node_to_ref(t, node, &rdata); 2496 binder_debug(BINDER_DEBUG_TRANSACTION, 2497 " node %d u%016llx -> ref %d desc %d\n", 2498 node->debug_id, (u64)node->ptr, 2499 rdata.debug_id, rdata.desc); 2500 done: 2501 binder_put_node(node); 2502 return ret; 2503 } 2504 2505 static int binder_translate_handle(struct flat_binder_object *fp, 2506 struct binder_transaction *t, 2507 struct binder_thread *thread) 2508 { 2509 struct binder_proc *proc = thread->proc; 2510 struct binder_proc *target_proc = t->to_proc; 2511 struct binder_node *node; 2512 struct binder_ref_data src_rdata; 2513 int ret = 0; 2514 2515 node = binder_get_node_from_ref(proc, fp->handle, 2516 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata); 2517 if (!node) { 2518 binder_user_error("%d:%d got transaction with invalid handle, %d\n", 2519 proc->pid, thread->pid, fp->handle); 2520 return -EINVAL; 2521 } 2522 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { 2523 ret = -EPERM; 2524 goto done; 2525 } 2526 2527 binder_node_lock(node); 2528 if (node->proc == target_proc) { 2529 if (fp->hdr.type == BINDER_TYPE_HANDLE) 2530 fp->hdr.type = BINDER_TYPE_BINDER; 2531 else 2532 fp->hdr.type = BINDER_TYPE_WEAK_BINDER; 2533 fp->binder = node->ptr; 2534 fp->cookie = node->cookie; 2535 if (node->proc) 2536 binder_inner_proc_lock(node->proc); 2537 else 2538 __acquire(&node->proc->inner_lock); 2539 binder_inc_node_nilocked(node, 2540 fp->hdr.type == BINDER_TYPE_BINDER, 2541 0, NULL); 2542 if (node->proc) 2543 binder_inner_proc_unlock(node->proc); 2544 else 2545 __release(&node->proc->inner_lock); 2546 trace_binder_transaction_ref_to_node(t, node, &src_rdata); 2547 binder_debug(BINDER_DEBUG_TRANSACTION, 2548 " ref %d desc %d -> node %d u%016llx\n", 2549 src_rdata.debug_id, src_rdata.desc, node->debug_id, 2550 (u64)node->ptr); 2551 binder_node_unlock(node); 2552 } else { 2553 struct binder_ref_data dest_rdata; 2554 2555 binder_node_unlock(node); 2556 ret = binder_inc_ref_for_node(target_proc, node, 2557 fp->hdr.type == BINDER_TYPE_HANDLE, 2558 NULL, &dest_rdata); 2559 if (ret) 2560 goto done; 2561 2562 fp->binder = 0; 2563 fp->handle = dest_rdata.desc; 2564 fp->cookie = 0; 2565 trace_binder_transaction_ref_to_ref(t, node, &src_rdata, 2566 &dest_rdata); 2567 binder_debug(BINDER_DEBUG_TRANSACTION, 2568 " ref %d desc %d -> ref %d desc %d (node %d)\n", 2569 src_rdata.debug_id, src_rdata.desc, 2570 dest_rdata.debug_id, dest_rdata.desc, 2571 node->debug_id); 2572 } 2573 done: 2574 binder_put_node(node); 2575 return ret; 2576 } 2577 2578 static int binder_translate_fd(u32 fd, binder_size_t fd_offset, 2579 struct binder_transaction *t, 2580 struct binder_thread *thread, 2581 struct binder_transaction *in_reply_to) 2582 { 2583 struct binder_proc *proc = thread->proc; 2584 struct binder_proc *target_proc = t->to_proc; 2585 struct binder_txn_fd_fixup *fixup; 2586 struct file *file; 2587 int ret = 0; 2588 bool target_allows_fd; 2589 2590 if (in_reply_to) 2591 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS); 2592 else 2593 target_allows_fd = t->buffer->target_node->accept_fds; 2594 if (!target_allows_fd) { 2595 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n", 2596 proc->pid, thread->pid, 2597 in_reply_to ? "reply" : "transaction", 2598 fd); 2599 ret = -EPERM; 2600 goto err_fd_not_accepted; 2601 } 2602 2603 file = fget(fd); 2604 if (!file) { 2605 binder_user_error("%d:%d got transaction with invalid fd, %d\n", 2606 proc->pid, thread->pid, fd); 2607 ret = -EBADF; 2608 goto err_fget; 2609 } 2610 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file); 2611 if (ret < 0) { 2612 ret = -EPERM; 2613 goto err_security; 2614 } 2615 2616 /* 2617 * Add fixup record for this transaction. The allocation 2618 * of the fd in the target needs to be done from a 2619 * target thread. 2620 */ 2621 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL); 2622 if (!fixup) { 2623 ret = -ENOMEM; 2624 goto err_alloc; 2625 } 2626 fixup->file = file; 2627 fixup->offset = fd_offset; 2628 trace_binder_transaction_fd_send(t, fd, fixup->offset); 2629 list_add_tail(&fixup->fixup_entry, &t->fd_fixups); 2630 2631 return ret; 2632 2633 err_alloc: 2634 err_security: 2635 fput(file); 2636 err_fget: 2637 err_fd_not_accepted: 2638 return ret; 2639 } 2640 2641 static int binder_translate_fd_array(struct binder_fd_array_object *fda, 2642 struct binder_buffer_object *parent, 2643 struct binder_transaction *t, 2644 struct binder_thread *thread, 2645 struct binder_transaction *in_reply_to) 2646 { 2647 binder_size_t fdi, fd_buf_size; 2648 binder_size_t fda_offset; 2649 struct binder_proc *proc = thread->proc; 2650 struct binder_proc *target_proc = t->to_proc; 2651 2652 fd_buf_size = sizeof(u32) * fda->num_fds; 2653 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { 2654 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n", 2655 proc->pid, thread->pid, (u64)fda->num_fds); 2656 return -EINVAL; 2657 } 2658 if (fd_buf_size > parent->length || 2659 fda->parent_offset > parent->length - fd_buf_size) { 2660 /* No space for all file descriptors here. */ 2661 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n", 2662 proc->pid, thread->pid, (u64)fda->num_fds); 2663 return -EINVAL; 2664 } 2665 /* 2666 * the source data for binder_buffer_object is visible 2667 * to user-space and the @buffer element is the user 2668 * pointer to the buffer_object containing the fd_array. 2669 * Convert the address to an offset relative to 2670 * the base of the transaction buffer. 2671 */ 2672 fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) + 2673 fda->parent_offset; 2674 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) { 2675 binder_user_error("%d:%d parent offset not aligned correctly.\n", 2676 proc->pid, thread->pid); 2677 return -EINVAL; 2678 } 2679 for (fdi = 0; fdi < fda->num_fds; fdi++) { 2680 u32 fd; 2681 int ret; 2682 binder_size_t offset = fda_offset + fdi * sizeof(fd); 2683 2684 binder_alloc_copy_from_buffer(&target_proc->alloc, 2685 &fd, t->buffer, 2686 offset, sizeof(fd)); 2687 ret = binder_translate_fd(fd, offset, t, thread, 2688 in_reply_to); 2689 if (ret < 0) 2690 return ret; 2691 } 2692 return 0; 2693 } 2694 2695 static int binder_fixup_parent(struct binder_transaction *t, 2696 struct binder_thread *thread, 2697 struct binder_buffer_object *bp, 2698 binder_size_t off_start_offset, 2699 binder_size_t num_valid, 2700 binder_size_t last_fixup_obj_off, 2701 binder_size_t last_fixup_min_off) 2702 { 2703 struct binder_buffer_object *parent; 2704 struct binder_buffer *b = t->buffer; 2705 struct binder_proc *proc = thread->proc; 2706 struct binder_proc *target_proc = t->to_proc; 2707 struct binder_object object; 2708 binder_size_t buffer_offset; 2709 binder_size_t parent_offset; 2710 2711 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT)) 2712 return 0; 2713 2714 parent = binder_validate_ptr(target_proc, b, &object, bp->parent, 2715 off_start_offset, &parent_offset, 2716 num_valid); 2717 if (!parent) { 2718 binder_user_error("%d:%d got transaction with invalid parent offset or type\n", 2719 proc->pid, thread->pid); 2720 return -EINVAL; 2721 } 2722 2723 if (!binder_validate_fixup(target_proc, b, off_start_offset, 2724 parent_offset, bp->parent_offset, 2725 last_fixup_obj_off, 2726 last_fixup_min_off)) { 2727 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", 2728 proc->pid, thread->pid); 2729 return -EINVAL; 2730 } 2731 2732 if (parent->length < sizeof(binder_uintptr_t) || 2733 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) { 2734 /* No space for a pointer here! */ 2735 binder_user_error("%d:%d got transaction with invalid parent offset\n", 2736 proc->pid, thread->pid); 2737 return -EINVAL; 2738 } 2739 buffer_offset = bp->parent_offset + 2740 (uintptr_t)parent->buffer - (uintptr_t)b->user_data; 2741 binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset, 2742 &bp->buffer, sizeof(bp->buffer)); 2743 2744 return 0; 2745 } 2746 2747 /** 2748 * binder_proc_transaction() - sends a transaction to a process and wakes it up 2749 * @t: transaction to send 2750 * @proc: process to send the transaction to 2751 * @thread: thread in @proc to send the transaction to (may be NULL) 2752 * 2753 * This function queues a transaction to the specified process. It will try 2754 * to find a thread in the target process to handle the transaction and 2755 * wake it up. If no thread is found, the work is queued to the proc 2756 * waitqueue. 2757 * 2758 * If the @thread parameter is not NULL, the transaction is always queued 2759 * to the waitlist of that specific thread. 2760 * 2761 * Return: true if the transactions was successfully queued 2762 * false if the target process or thread is dead 2763 */ 2764 static bool binder_proc_transaction(struct binder_transaction *t, 2765 struct binder_proc *proc, 2766 struct binder_thread *thread) 2767 { 2768 struct binder_node *node = t->buffer->target_node; 2769 bool oneway = !!(t->flags & TF_ONE_WAY); 2770 bool pending_async = false; 2771 2772 BUG_ON(!node); 2773 binder_node_lock(node); 2774 if (oneway) { 2775 BUG_ON(thread); 2776 if (node->has_async_transaction) { 2777 pending_async = true; 2778 } else { 2779 node->has_async_transaction = true; 2780 } 2781 } 2782 2783 binder_inner_proc_lock(proc); 2784 2785 if (proc->is_dead || (thread && thread->is_dead)) { 2786 binder_inner_proc_unlock(proc); 2787 binder_node_unlock(node); 2788 return false; 2789 } 2790 2791 if (!thread && !pending_async) 2792 thread = binder_select_thread_ilocked(proc); 2793 2794 if (thread) 2795 binder_enqueue_thread_work_ilocked(thread, &t->work); 2796 else if (!pending_async) 2797 binder_enqueue_work_ilocked(&t->work, &proc->todo); 2798 else 2799 binder_enqueue_work_ilocked(&t->work, &node->async_todo); 2800 2801 if (!pending_async) 2802 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */); 2803 2804 binder_inner_proc_unlock(proc); 2805 binder_node_unlock(node); 2806 2807 return true; 2808 } 2809 2810 /** 2811 * binder_get_node_refs_for_txn() - Get required refs on node for txn 2812 * @node: struct binder_node for which to get refs 2813 * @proc: returns @node->proc if valid 2814 * @error: if no @proc then returns BR_DEAD_REPLY 2815 * 2816 * User-space normally keeps the node alive when creating a transaction 2817 * since it has a reference to the target. The local strong ref keeps it 2818 * alive if the sending process dies before the target process processes 2819 * the transaction. If the source process is malicious or has a reference 2820 * counting bug, relying on the local strong ref can fail. 2821 * 2822 * Since user-space can cause the local strong ref to go away, we also take 2823 * a tmpref on the node to ensure it survives while we are constructing 2824 * the transaction. We also need a tmpref on the proc while we are 2825 * constructing the transaction, so we take that here as well. 2826 * 2827 * Return: The target_node with refs taken or NULL if no @node->proc is NULL. 2828 * Also sets @proc if valid. If the @node->proc is NULL indicating that the 2829 * target proc has died, @error is set to BR_DEAD_REPLY 2830 */ 2831 static struct binder_node *binder_get_node_refs_for_txn( 2832 struct binder_node *node, 2833 struct binder_proc **procp, 2834 uint32_t *error) 2835 { 2836 struct binder_node *target_node = NULL; 2837 2838 binder_node_inner_lock(node); 2839 if (node->proc) { 2840 target_node = node; 2841 binder_inc_node_nilocked(node, 1, 0, NULL); 2842 binder_inc_node_tmpref_ilocked(node); 2843 node->proc->tmp_ref++; 2844 *procp = node->proc; 2845 } else 2846 *error = BR_DEAD_REPLY; 2847 binder_node_inner_unlock(node); 2848 2849 return target_node; 2850 } 2851 2852 static void binder_transaction(struct binder_proc *proc, 2853 struct binder_thread *thread, 2854 struct binder_transaction_data *tr, int reply, 2855 binder_size_t extra_buffers_size) 2856 { 2857 int ret; 2858 struct binder_transaction *t; 2859 struct binder_work *w; 2860 struct binder_work *tcomplete; 2861 binder_size_t buffer_offset = 0; 2862 binder_size_t off_start_offset, off_end_offset; 2863 binder_size_t off_min; 2864 binder_size_t sg_buf_offset, sg_buf_end_offset; 2865 struct binder_proc *target_proc = NULL; 2866 struct binder_thread *target_thread = NULL; 2867 struct binder_node *target_node = NULL; 2868 struct binder_transaction *in_reply_to = NULL; 2869 struct binder_transaction_log_entry *e; 2870 uint32_t return_error = 0; 2871 uint32_t return_error_param = 0; 2872 uint32_t return_error_line = 0; 2873 binder_size_t last_fixup_obj_off = 0; 2874 binder_size_t last_fixup_min_off = 0; 2875 struct binder_context *context = proc->context; 2876 int t_debug_id = atomic_inc_return(&binder_last_id); 2877 char *secctx = NULL; 2878 u32 secctx_sz = 0; 2879 2880 e = binder_transaction_log_add(&binder_transaction_log); 2881 e->debug_id = t_debug_id; 2882 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); 2883 e->from_proc = proc->pid; 2884 e->from_thread = thread->pid; 2885 e->target_handle = tr->target.handle; 2886 e->data_size = tr->data_size; 2887 e->offsets_size = tr->offsets_size; 2888 e->context_name = proc->context->name; 2889 2890 if (reply) { 2891 binder_inner_proc_lock(proc); 2892 in_reply_to = thread->transaction_stack; 2893 if (in_reply_to == NULL) { 2894 binder_inner_proc_unlock(proc); 2895 binder_user_error("%d:%d got reply transaction with no transaction stack\n", 2896 proc->pid, thread->pid); 2897 return_error = BR_FAILED_REPLY; 2898 return_error_param = -EPROTO; 2899 return_error_line = __LINE__; 2900 goto err_empty_call_stack; 2901 } 2902 if (in_reply_to->to_thread != thread) { 2903 spin_lock(&in_reply_to->lock); 2904 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n", 2905 proc->pid, thread->pid, in_reply_to->debug_id, 2906 in_reply_to->to_proc ? 2907 in_reply_to->to_proc->pid : 0, 2908 in_reply_to->to_thread ? 2909 in_reply_to->to_thread->pid : 0); 2910 spin_unlock(&in_reply_to->lock); 2911 binder_inner_proc_unlock(proc); 2912 return_error = BR_FAILED_REPLY; 2913 return_error_param = -EPROTO; 2914 return_error_line = __LINE__; 2915 in_reply_to = NULL; 2916 goto err_bad_call_stack; 2917 } 2918 thread->transaction_stack = in_reply_to->to_parent; 2919 binder_inner_proc_unlock(proc); 2920 binder_set_nice(in_reply_to->saved_priority); 2921 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to); 2922 if (target_thread == NULL) { 2923 /* annotation for sparse */ 2924 __release(&target_thread->proc->inner_lock); 2925 return_error = BR_DEAD_REPLY; 2926 return_error_line = __LINE__; 2927 goto err_dead_binder; 2928 } 2929 if (target_thread->transaction_stack != in_reply_to) { 2930 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n", 2931 proc->pid, thread->pid, 2932 target_thread->transaction_stack ? 2933 target_thread->transaction_stack->debug_id : 0, 2934 in_reply_to->debug_id); 2935 binder_inner_proc_unlock(target_thread->proc); 2936 return_error = BR_FAILED_REPLY; 2937 return_error_param = -EPROTO; 2938 return_error_line = __LINE__; 2939 in_reply_to = NULL; 2940 target_thread = NULL; 2941 goto err_dead_binder; 2942 } 2943 target_proc = target_thread->proc; 2944 target_proc->tmp_ref++; 2945 binder_inner_proc_unlock(target_thread->proc); 2946 } else { 2947 if (tr->target.handle) { 2948 struct binder_ref *ref; 2949 2950 /* 2951 * There must already be a strong ref 2952 * on this node. If so, do a strong 2953 * increment on the node to ensure it 2954 * stays alive until the transaction is 2955 * done. 2956 */ 2957 binder_proc_lock(proc); 2958 ref = binder_get_ref_olocked(proc, tr->target.handle, 2959 true); 2960 if (ref) { 2961 target_node = binder_get_node_refs_for_txn( 2962 ref->node, &target_proc, 2963 &return_error); 2964 } else { 2965 binder_user_error("%d:%d got transaction to invalid handle\n", 2966 proc->pid, thread->pid); 2967 return_error = BR_FAILED_REPLY; 2968 } 2969 binder_proc_unlock(proc); 2970 } else { 2971 mutex_lock(&context->context_mgr_node_lock); 2972 target_node = context->binder_context_mgr_node; 2973 if (target_node) 2974 target_node = binder_get_node_refs_for_txn( 2975 target_node, &target_proc, 2976 &return_error); 2977 else 2978 return_error = BR_DEAD_REPLY; 2979 mutex_unlock(&context->context_mgr_node_lock); 2980 if (target_node && target_proc == proc) { 2981 binder_user_error("%d:%d got transaction to context manager from process owning it\n", 2982 proc->pid, thread->pid); 2983 return_error = BR_FAILED_REPLY; 2984 return_error_param = -EINVAL; 2985 return_error_line = __LINE__; 2986 goto err_invalid_target_handle; 2987 } 2988 } 2989 if (!target_node) { 2990 /* 2991 * return_error is set above 2992 */ 2993 return_error_param = -EINVAL; 2994 return_error_line = __LINE__; 2995 goto err_dead_binder; 2996 } 2997 e->to_node = target_node->debug_id; 2998 if (security_binder_transaction(proc->tsk, 2999 target_proc->tsk) < 0) { 3000 return_error = BR_FAILED_REPLY; 3001 return_error_param = -EPERM; 3002 return_error_line = __LINE__; 3003 goto err_invalid_target_handle; 3004 } 3005 binder_inner_proc_lock(proc); 3006 3007 w = list_first_entry_or_null(&thread->todo, 3008 struct binder_work, entry); 3009 if (!(tr->flags & TF_ONE_WAY) && w && 3010 w->type == BINDER_WORK_TRANSACTION) { 3011 /* 3012 * Do not allow new outgoing transaction from a 3013 * thread that has a transaction at the head of 3014 * its todo list. Only need to check the head 3015 * because binder_select_thread_ilocked picks a 3016 * thread from proc->waiting_threads to enqueue 3017 * the transaction, and nothing is queued to the 3018 * todo list while the thread is on waiting_threads. 3019 */ 3020 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n", 3021 proc->pid, thread->pid); 3022 binder_inner_proc_unlock(proc); 3023 return_error = BR_FAILED_REPLY; 3024 return_error_param = -EPROTO; 3025 return_error_line = __LINE__; 3026 goto err_bad_todo_list; 3027 } 3028 3029 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { 3030 struct binder_transaction *tmp; 3031 3032 tmp = thread->transaction_stack; 3033 if (tmp->to_thread != thread) { 3034 spin_lock(&tmp->lock); 3035 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n", 3036 proc->pid, thread->pid, tmp->debug_id, 3037 tmp->to_proc ? tmp->to_proc->pid : 0, 3038 tmp->to_thread ? 3039 tmp->to_thread->pid : 0); 3040 spin_unlock(&tmp->lock); 3041 binder_inner_proc_unlock(proc); 3042 return_error = BR_FAILED_REPLY; 3043 return_error_param = -EPROTO; 3044 return_error_line = __LINE__; 3045 goto err_bad_call_stack; 3046 } 3047 while (tmp) { 3048 struct binder_thread *from; 3049 3050 spin_lock(&tmp->lock); 3051 from = tmp->from; 3052 if (from && from->proc == target_proc) { 3053 atomic_inc(&from->tmp_ref); 3054 target_thread = from; 3055 spin_unlock(&tmp->lock); 3056 break; 3057 } 3058 spin_unlock(&tmp->lock); 3059 tmp = tmp->from_parent; 3060 } 3061 } 3062 binder_inner_proc_unlock(proc); 3063 } 3064 if (target_thread) 3065 e->to_thread = target_thread->pid; 3066 e->to_proc = target_proc->pid; 3067 3068 /* TODO: reuse incoming transaction for reply */ 3069 t = kzalloc(sizeof(*t), GFP_KERNEL); 3070 if (t == NULL) { 3071 return_error = BR_FAILED_REPLY; 3072 return_error_param = -ENOMEM; 3073 return_error_line = __LINE__; 3074 goto err_alloc_t_failed; 3075 } 3076 INIT_LIST_HEAD(&t->fd_fixups); 3077 binder_stats_created(BINDER_STAT_TRANSACTION); 3078 spin_lock_init(&t->lock); 3079 3080 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); 3081 if (tcomplete == NULL) { 3082 return_error = BR_FAILED_REPLY; 3083 return_error_param = -ENOMEM; 3084 return_error_line = __LINE__; 3085 goto err_alloc_tcomplete_failed; 3086 } 3087 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE); 3088 3089 t->debug_id = t_debug_id; 3090 3091 if (reply) 3092 binder_debug(BINDER_DEBUG_TRANSACTION, 3093 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n", 3094 proc->pid, thread->pid, t->debug_id, 3095 target_proc->pid, target_thread->pid, 3096 (u64)tr->data.ptr.buffer, 3097 (u64)tr->data.ptr.offsets, 3098 (u64)tr->data_size, (u64)tr->offsets_size, 3099 (u64)extra_buffers_size); 3100 else 3101 binder_debug(BINDER_DEBUG_TRANSACTION, 3102 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n", 3103 proc->pid, thread->pid, t->debug_id, 3104 target_proc->pid, target_node->debug_id, 3105 (u64)tr->data.ptr.buffer, 3106 (u64)tr->data.ptr.offsets, 3107 (u64)tr->data_size, (u64)tr->offsets_size, 3108 (u64)extra_buffers_size); 3109 3110 if (!reply && !(tr->flags & TF_ONE_WAY)) 3111 t->from = thread; 3112 else 3113 t->from = NULL; 3114 t->sender_euid = task_euid(proc->tsk); 3115 t->to_proc = target_proc; 3116 t->to_thread = target_thread; 3117 t->code = tr->code; 3118 t->flags = tr->flags; 3119 t->priority = task_nice(current); 3120 3121 if (target_node && target_node->txn_security_ctx) { 3122 u32 secid; 3123 3124 security_task_getsecid(proc->tsk, &secid); 3125 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz); 3126 if (ret) { 3127 return_error = BR_FAILED_REPLY; 3128 return_error_param = ret; 3129 return_error_line = __LINE__; 3130 goto err_get_secctx_failed; 3131 } 3132 extra_buffers_size += ALIGN(secctx_sz, sizeof(u64)); 3133 } 3134 3135 trace_binder_transaction(reply, t, target_node); 3136 3137 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size, 3138 tr->offsets_size, extra_buffers_size, 3139 !reply && (t->flags & TF_ONE_WAY)); 3140 if (IS_ERR(t->buffer)) { 3141 /* 3142 * -ESRCH indicates VMA cleared. The target is dying. 3143 */ 3144 return_error_param = PTR_ERR(t->buffer); 3145 return_error = return_error_param == -ESRCH ? 3146 BR_DEAD_REPLY : BR_FAILED_REPLY; 3147 return_error_line = __LINE__; 3148 t->buffer = NULL; 3149 goto err_binder_alloc_buf_failed; 3150 } 3151 if (secctx) { 3152 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) + 3153 ALIGN(tr->offsets_size, sizeof(void *)) + 3154 ALIGN(extra_buffers_size, sizeof(void *)) - 3155 ALIGN(secctx_sz, sizeof(u64)); 3156 3157 t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset; 3158 binder_alloc_copy_to_buffer(&target_proc->alloc, 3159 t->buffer, buf_offset, 3160 secctx, secctx_sz); 3161 security_release_secctx(secctx, secctx_sz); 3162 secctx = NULL; 3163 } 3164 t->buffer->debug_id = t->debug_id; 3165 t->buffer->transaction = t; 3166 t->buffer->target_node = target_node; 3167 trace_binder_transaction_alloc_buf(t->buffer); 3168 3169 if (binder_alloc_copy_user_to_buffer( 3170 &target_proc->alloc, 3171 t->buffer, 0, 3172 (const void __user *) 3173 (uintptr_t)tr->data.ptr.buffer, 3174 tr->data_size)) { 3175 binder_user_error("%d:%d got transaction with invalid data ptr\n", 3176 proc->pid, thread->pid); 3177 return_error = BR_FAILED_REPLY; 3178 return_error_param = -EFAULT; 3179 return_error_line = __LINE__; 3180 goto err_copy_data_failed; 3181 } 3182 if (binder_alloc_copy_user_to_buffer( 3183 &target_proc->alloc, 3184 t->buffer, 3185 ALIGN(tr->data_size, sizeof(void *)), 3186 (const void __user *) 3187 (uintptr_t)tr->data.ptr.offsets, 3188 tr->offsets_size)) { 3189 binder_user_error("%d:%d got transaction with invalid offsets ptr\n", 3190 proc->pid, thread->pid); 3191 return_error = BR_FAILED_REPLY; 3192 return_error_param = -EFAULT; 3193 return_error_line = __LINE__; 3194 goto err_copy_data_failed; 3195 } 3196 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) { 3197 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n", 3198 proc->pid, thread->pid, (u64)tr->offsets_size); 3199 return_error = BR_FAILED_REPLY; 3200 return_error_param = -EINVAL; 3201 return_error_line = __LINE__; 3202 goto err_bad_offset; 3203 } 3204 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) { 3205 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n", 3206 proc->pid, thread->pid, 3207 (u64)extra_buffers_size); 3208 return_error = BR_FAILED_REPLY; 3209 return_error_param = -EINVAL; 3210 return_error_line = __LINE__; 3211 goto err_bad_offset; 3212 } 3213 off_start_offset = ALIGN(tr->data_size, sizeof(void *)); 3214 buffer_offset = off_start_offset; 3215 off_end_offset = off_start_offset + tr->offsets_size; 3216 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *)); 3217 sg_buf_end_offset = sg_buf_offset + extra_buffers_size; 3218 off_min = 0; 3219 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset; 3220 buffer_offset += sizeof(binder_size_t)) { 3221 struct binder_object_header *hdr; 3222 size_t object_size; 3223 struct binder_object object; 3224 binder_size_t object_offset; 3225 3226 binder_alloc_copy_from_buffer(&target_proc->alloc, 3227 &object_offset, 3228 t->buffer, 3229 buffer_offset, 3230 sizeof(object_offset)); 3231 object_size = binder_get_object(target_proc, t->buffer, 3232 object_offset, &object); 3233 if (object_size == 0 || object_offset < off_min) { 3234 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n", 3235 proc->pid, thread->pid, 3236 (u64)object_offset, 3237 (u64)off_min, 3238 (u64)t->buffer->data_size); 3239 return_error = BR_FAILED_REPLY; 3240 return_error_param = -EINVAL; 3241 return_error_line = __LINE__; 3242 goto err_bad_offset; 3243 } 3244 3245 hdr = &object.hdr; 3246 off_min = object_offset + object_size; 3247 switch (hdr->type) { 3248 case BINDER_TYPE_BINDER: 3249 case BINDER_TYPE_WEAK_BINDER: { 3250 struct flat_binder_object *fp; 3251 3252 fp = to_flat_binder_object(hdr); 3253 ret = binder_translate_binder(fp, t, thread); 3254 if (ret < 0) { 3255 return_error = BR_FAILED_REPLY; 3256 return_error_param = ret; 3257 return_error_line = __LINE__; 3258 goto err_translate_failed; 3259 } 3260 binder_alloc_copy_to_buffer(&target_proc->alloc, 3261 t->buffer, object_offset, 3262 fp, sizeof(*fp)); 3263 } break; 3264 case BINDER_TYPE_HANDLE: 3265 case BINDER_TYPE_WEAK_HANDLE: { 3266 struct flat_binder_object *fp; 3267 3268 fp = to_flat_binder_object(hdr); 3269 ret = binder_translate_handle(fp, t, thread); 3270 if (ret < 0) { 3271 return_error = BR_FAILED_REPLY; 3272 return_error_param = ret; 3273 return_error_line = __LINE__; 3274 goto err_translate_failed; 3275 } 3276 binder_alloc_copy_to_buffer(&target_proc->alloc, 3277 t->buffer, object_offset, 3278 fp, sizeof(*fp)); 3279 } break; 3280 3281 case BINDER_TYPE_FD: { 3282 struct binder_fd_object *fp = to_binder_fd_object(hdr); 3283 binder_size_t fd_offset = object_offset + 3284 (uintptr_t)&fp->fd - (uintptr_t)fp; 3285 int ret = binder_translate_fd(fp->fd, fd_offset, t, 3286 thread, in_reply_to); 3287 3288 if (ret < 0) { 3289 return_error = BR_FAILED_REPLY; 3290 return_error_param = ret; 3291 return_error_line = __LINE__; 3292 goto err_translate_failed; 3293 } 3294 fp->pad_binder = 0; 3295 binder_alloc_copy_to_buffer(&target_proc->alloc, 3296 t->buffer, object_offset, 3297 fp, sizeof(*fp)); 3298 } break; 3299 case BINDER_TYPE_FDA: { 3300 struct binder_object ptr_object; 3301 binder_size_t parent_offset; 3302 struct binder_fd_array_object *fda = 3303 to_binder_fd_array_object(hdr); 3304 size_t num_valid = (buffer_offset - off_start_offset) * 3305 sizeof(binder_size_t); 3306 struct binder_buffer_object *parent = 3307 binder_validate_ptr(target_proc, t->buffer, 3308 &ptr_object, fda->parent, 3309 off_start_offset, 3310 &parent_offset, 3311 num_valid); 3312 if (!parent) { 3313 binder_user_error("%d:%d got transaction with invalid parent offset or type\n", 3314 proc->pid, thread->pid); 3315 return_error = BR_FAILED_REPLY; 3316 return_error_param = -EINVAL; 3317 return_error_line = __LINE__; 3318 goto err_bad_parent; 3319 } 3320 if (!binder_validate_fixup(target_proc, t->buffer, 3321 off_start_offset, 3322 parent_offset, 3323 fda->parent_offset, 3324 last_fixup_obj_off, 3325 last_fixup_min_off)) { 3326 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", 3327 proc->pid, thread->pid); 3328 return_error = BR_FAILED_REPLY; 3329 return_error_param = -EINVAL; 3330 return_error_line = __LINE__; 3331 goto err_bad_parent; 3332 } 3333 ret = binder_translate_fd_array(fda, parent, t, thread, 3334 in_reply_to); 3335 if (ret < 0) { 3336 return_error = BR_FAILED_REPLY; 3337 return_error_param = ret; 3338 return_error_line = __LINE__; 3339 goto err_translate_failed; 3340 } 3341 last_fixup_obj_off = parent_offset; 3342 last_fixup_min_off = 3343 fda->parent_offset + sizeof(u32) * fda->num_fds; 3344 } break; 3345 case BINDER_TYPE_PTR: { 3346 struct binder_buffer_object *bp = 3347 to_binder_buffer_object(hdr); 3348 size_t buf_left = sg_buf_end_offset - sg_buf_offset; 3349 size_t num_valid; 3350 3351 if (bp->length > buf_left) { 3352 binder_user_error("%d:%d got transaction with too large buffer\n", 3353 proc->pid, thread->pid); 3354 return_error = BR_FAILED_REPLY; 3355 return_error_param = -EINVAL; 3356 return_error_line = __LINE__; 3357 goto err_bad_offset; 3358 } 3359 if (binder_alloc_copy_user_to_buffer( 3360 &target_proc->alloc, 3361 t->buffer, 3362 sg_buf_offset, 3363 (const void __user *) 3364 (uintptr_t)bp->buffer, 3365 bp->length)) { 3366 binder_user_error("%d:%d got transaction with invalid offsets ptr\n", 3367 proc->pid, thread->pid); 3368 return_error_param = -EFAULT; 3369 return_error = BR_FAILED_REPLY; 3370 return_error_line = __LINE__; 3371 goto err_copy_data_failed; 3372 } 3373 /* Fixup buffer pointer to target proc address space */ 3374 bp->buffer = (uintptr_t) 3375 t->buffer->user_data + sg_buf_offset; 3376 sg_buf_offset += ALIGN(bp->length, sizeof(u64)); 3377 3378 num_valid = (buffer_offset - off_start_offset) * 3379 sizeof(binder_size_t); 3380 ret = binder_fixup_parent(t, thread, bp, 3381 off_start_offset, 3382 num_valid, 3383 last_fixup_obj_off, 3384 last_fixup_min_off); 3385 if (ret < 0) { 3386 return_error = BR_FAILED_REPLY; 3387 return_error_param = ret; 3388 return_error_line = __LINE__; 3389 goto err_translate_failed; 3390 } 3391 binder_alloc_copy_to_buffer(&target_proc->alloc, 3392 t->buffer, object_offset, 3393 bp, sizeof(*bp)); 3394 last_fixup_obj_off = object_offset; 3395 last_fixup_min_off = 0; 3396 } break; 3397 default: 3398 binder_user_error("%d:%d got transaction with invalid object type, %x\n", 3399 proc->pid, thread->pid, hdr->type); 3400 return_error = BR_FAILED_REPLY; 3401 return_error_param = -EINVAL; 3402 return_error_line = __LINE__; 3403 goto err_bad_object_type; 3404 } 3405 } 3406 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; 3407 t->work.type = BINDER_WORK_TRANSACTION; 3408 3409 if (reply) { 3410 binder_enqueue_thread_work(thread, tcomplete); 3411 binder_inner_proc_lock(target_proc); 3412 if (target_thread->is_dead) { 3413 binder_inner_proc_unlock(target_proc); 3414 goto err_dead_proc_or_thread; 3415 } 3416 BUG_ON(t->buffer->async_transaction != 0); 3417 binder_pop_transaction_ilocked(target_thread, in_reply_to); 3418 binder_enqueue_thread_work_ilocked(target_thread, &t->work); 3419 binder_inner_proc_unlock(target_proc); 3420 wake_up_interruptible_sync(&target_thread->wait); 3421 binder_free_transaction(in_reply_to); 3422 } else if (!(t->flags & TF_ONE_WAY)) { 3423 BUG_ON(t->buffer->async_transaction != 0); 3424 binder_inner_proc_lock(proc); 3425 /* 3426 * Defer the TRANSACTION_COMPLETE, so we don't return to 3427 * userspace immediately; this allows the target process to 3428 * immediately start processing this transaction, reducing 3429 * latency. We will then return the TRANSACTION_COMPLETE when 3430 * the target replies (or there is an error). 3431 */ 3432 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete); 3433 t->need_reply = 1; 3434 t->from_parent = thread->transaction_stack; 3435 thread->transaction_stack = t; 3436 binder_inner_proc_unlock(proc); 3437 if (!binder_proc_transaction(t, target_proc, target_thread)) { 3438 binder_inner_proc_lock(proc); 3439 binder_pop_transaction_ilocked(thread, t); 3440 binder_inner_proc_unlock(proc); 3441 goto err_dead_proc_or_thread; 3442 } 3443 } else { 3444 BUG_ON(target_node == NULL); 3445 BUG_ON(t->buffer->async_transaction != 1); 3446 binder_enqueue_thread_work(thread, tcomplete); 3447 if (!binder_proc_transaction(t, target_proc, NULL)) 3448 goto err_dead_proc_or_thread; 3449 } 3450 if (target_thread) 3451 binder_thread_dec_tmpref(target_thread); 3452 binder_proc_dec_tmpref(target_proc); 3453 if (target_node) 3454 binder_dec_node_tmpref(target_node); 3455 /* 3456 * write barrier to synchronize with initialization 3457 * of log entry 3458 */ 3459 smp_wmb(); 3460 WRITE_ONCE(e->debug_id_done, t_debug_id); 3461 return; 3462 3463 err_dead_proc_or_thread: 3464 return_error = BR_DEAD_REPLY; 3465 return_error_line = __LINE__; 3466 binder_dequeue_work(proc, tcomplete); 3467 err_translate_failed: 3468 err_bad_object_type: 3469 err_bad_offset: 3470 err_bad_parent: 3471 err_copy_data_failed: 3472 binder_free_txn_fixups(t); 3473 trace_binder_transaction_failed_buffer_release(t->buffer); 3474 binder_transaction_buffer_release(target_proc, t->buffer, 3475 buffer_offset, true); 3476 if (target_node) 3477 binder_dec_node_tmpref(target_node); 3478 target_node = NULL; 3479 t->buffer->transaction = NULL; 3480 binder_alloc_free_buf(&target_proc->alloc, t->buffer); 3481 err_binder_alloc_buf_failed: 3482 if (secctx) 3483 security_release_secctx(secctx, secctx_sz); 3484 err_get_secctx_failed: 3485 kfree(tcomplete); 3486 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 3487 err_alloc_tcomplete_failed: 3488 kfree(t); 3489 binder_stats_deleted(BINDER_STAT_TRANSACTION); 3490 err_alloc_t_failed: 3491 err_bad_todo_list: 3492 err_bad_call_stack: 3493 err_empty_call_stack: 3494 err_dead_binder: 3495 err_invalid_target_handle: 3496 if (target_thread) 3497 binder_thread_dec_tmpref(target_thread); 3498 if (target_proc) 3499 binder_proc_dec_tmpref(target_proc); 3500 if (target_node) { 3501 binder_dec_node(target_node, 1, 0); 3502 binder_dec_node_tmpref(target_node); 3503 } 3504 3505 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 3506 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n", 3507 proc->pid, thread->pid, return_error, return_error_param, 3508 (u64)tr->data_size, (u64)tr->offsets_size, 3509 return_error_line); 3510 3511 { 3512 struct binder_transaction_log_entry *fe; 3513 3514 e->return_error = return_error; 3515 e->return_error_param = return_error_param; 3516 e->return_error_line = return_error_line; 3517 fe = binder_transaction_log_add(&binder_transaction_log_failed); 3518 *fe = *e; 3519 /* 3520 * write barrier to synchronize with initialization 3521 * of log entry 3522 */ 3523 smp_wmb(); 3524 WRITE_ONCE(e->debug_id_done, t_debug_id); 3525 WRITE_ONCE(fe->debug_id_done, t_debug_id); 3526 } 3527 3528 BUG_ON(thread->return_error.cmd != BR_OK); 3529 if (in_reply_to) { 3530 thread->return_error.cmd = BR_TRANSACTION_COMPLETE; 3531 binder_enqueue_thread_work(thread, &thread->return_error.work); 3532 binder_send_failed_reply(in_reply_to, return_error); 3533 } else { 3534 thread->return_error.cmd = return_error; 3535 binder_enqueue_thread_work(thread, &thread->return_error.work); 3536 } 3537 } 3538 3539 /** 3540 * binder_free_buf() - free the specified buffer 3541 * @proc: binder proc that owns buffer 3542 * @buffer: buffer to be freed 3543 * 3544 * If buffer for an async transaction, enqueue the next async 3545 * transaction from the node. 3546 * 3547 * Cleanup buffer and free it. 3548 */ 3549 static void 3550 binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer) 3551 { 3552 if (buffer->transaction) { 3553 buffer->transaction->buffer = NULL; 3554 buffer->transaction = NULL; 3555 } 3556 if (buffer->async_transaction && buffer->target_node) { 3557 struct binder_node *buf_node; 3558 struct binder_work *w; 3559 3560 buf_node = buffer->target_node; 3561 binder_node_inner_lock(buf_node); 3562 BUG_ON(!buf_node->has_async_transaction); 3563 BUG_ON(buf_node->proc != proc); 3564 w = binder_dequeue_work_head_ilocked( 3565 &buf_node->async_todo); 3566 if (!w) { 3567 buf_node->has_async_transaction = false; 3568 } else { 3569 binder_enqueue_work_ilocked( 3570 w, &proc->todo); 3571 binder_wakeup_proc_ilocked(proc); 3572 } 3573 binder_node_inner_unlock(buf_node); 3574 } 3575 trace_binder_transaction_buffer_release(buffer); 3576 binder_transaction_buffer_release(proc, buffer, 0, false); 3577 binder_alloc_free_buf(&proc->alloc, buffer); 3578 } 3579 3580 static int binder_thread_write(struct binder_proc *proc, 3581 struct binder_thread *thread, 3582 binder_uintptr_t binder_buffer, size_t size, 3583 binder_size_t *consumed) 3584 { 3585 uint32_t cmd; 3586 struct binder_context *context = proc->context; 3587 void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 3588 void __user *ptr = buffer + *consumed; 3589 void __user *end = buffer + size; 3590 3591 while (ptr < end && thread->return_error.cmd == BR_OK) { 3592 int ret; 3593 3594 if (get_user(cmd, (uint32_t __user *)ptr)) 3595 return -EFAULT; 3596 ptr += sizeof(uint32_t); 3597 trace_binder_command(cmd); 3598 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { 3599 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]); 3600 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]); 3601 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]); 3602 } 3603 switch (cmd) { 3604 case BC_INCREFS: 3605 case BC_ACQUIRE: 3606 case BC_RELEASE: 3607 case BC_DECREFS: { 3608 uint32_t target; 3609 const char *debug_string; 3610 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE; 3611 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE; 3612 struct binder_ref_data rdata; 3613 3614 if (get_user(target, (uint32_t __user *)ptr)) 3615 return -EFAULT; 3616 3617 ptr += sizeof(uint32_t); 3618 ret = -1; 3619 if (increment && !target) { 3620 struct binder_node *ctx_mgr_node; 3621 mutex_lock(&context->context_mgr_node_lock); 3622 ctx_mgr_node = context->binder_context_mgr_node; 3623 if (ctx_mgr_node) 3624 ret = binder_inc_ref_for_node( 3625 proc, ctx_mgr_node, 3626 strong, NULL, &rdata); 3627 mutex_unlock(&context->context_mgr_node_lock); 3628 } 3629 if (ret) 3630 ret = binder_update_ref_for_handle( 3631 proc, target, increment, strong, 3632 &rdata); 3633 if (!ret && rdata.desc != target) { 3634 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n", 3635 proc->pid, thread->pid, 3636 target, rdata.desc); 3637 } 3638 switch (cmd) { 3639 case BC_INCREFS: 3640 debug_string = "IncRefs"; 3641 break; 3642 case BC_ACQUIRE: 3643 debug_string = "Acquire"; 3644 break; 3645 case BC_RELEASE: 3646 debug_string = "Release"; 3647 break; 3648 case BC_DECREFS: 3649 default: 3650 debug_string = "DecRefs"; 3651 break; 3652 } 3653 if (ret) { 3654 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n", 3655 proc->pid, thread->pid, debug_string, 3656 strong, target, ret); 3657 break; 3658 } 3659 binder_debug(BINDER_DEBUG_USER_REFS, 3660 "%d:%d %s ref %d desc %d s %d w %d\n", 3661 proc->pid, thread->pid, debug_string, 3662 rdata.debug_id, rdata.desc, rdata.strong, 3663 rdata.weak); 3664 break; 3665 } 3666 case BC_INCREFS_DONE: 3667 case BC_ACQUIRE_DONE: { 3668 binder_uintptr_t node_ptr; 3669 binder_uintptr_t cookie; 3670 struct binder_node *node; 3671 bool free_node; 3672 3673 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr)) 3674 return -EFAULT; 3675 ptr += sizeof(binder_uintptr_t); 3676 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 3677 return -EFAULT; 3678 ptr += sizeof(binder_uintptr_t); 3679 node = binder_get_node(proc, node_ptr); 3680 if (node == NULL) { 3681 binder_user_error("%d:%d %s u%016llx no match\n", 3682 proc->pid, thread->pid, 3683 cmd == BC_INCREFS_DONE ? 3684 "BC_INCREFS_DONE" : 3685 "BC_ACQUIRE_DONE", 3686 (u64)node_ptr); 3687 break; 3688 } 3689 if (cookie != node->cookie) { 3690 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n", 3691 proc->pid, thread->pid, 3692 cmd == BC_INCREFS_DONE ? 3693 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 3694 (u64)node_ptr, node->debug_id, 3695 (u64)cookie, (u64)node->cookie); 3696 binder_put_node(node); 3697 break; 3698 } 3699 binder_node_inner_lock(node); 3700 if (cmd == BC_ACQUIRE_DONE) { 3701 if (node->pending_strong_ref == 0) { 3702 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n", 3703 proc->pid, thread->pid, 3704 node->debug_id); 3705 binder_node_inner_unlock(node); 3706 binder_put_node(node); 3707 break; 3708 } 3709 node->pending_strong_ref = 0; 3710 } else { 3711 if (node->pending_weak_ref == 0) { 3712 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n", 3713 proc->pid, thread->pid, 3714 node->debug_id); 3715 binder_node_inner_unlock(node); 3716 binder_put_node(node); 3717 break; 3718 } 3719 node->pending_weak_ref = 0; 3720 } 3721 free_node = binder_dec_node_nilocked(node, 3722 cmd == BC_ACQUIRE_DONE, 0); 3723 WARN_ON(free_node); 3724 binder_debug(BINDER_DEBUG_USER_REFS, 3725 "%d:%d %s node %d ls %d lw %d tr %d\n", 3726 proc->pid, thread->pid, 3727 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 3728 node->debug_id, node->local_strong_refs, 3729 node->local_weak_refs, node->tmp_refs); 3730 binder_node_inner_unlock(node); 3731 binder_put_node(node); 3732 break; 3733 } 3734 case BC_ATTEMPT_ACQUIRE: 3735 pr_err("BC_ATTEMPT_ACQUIRE not supported\n"); 3736 return -EINVAL; 3737 case BC_ACQUIRE_RESULT: 3738 pr_err("BC_ACQUIRE_RESULT not supported\n"); 3739 return -EINVAL; 3740 3741 case BC_FREE_BUFFER: { 3742 binder_uintptr_t data_ptr; 3743 struct binder_buffer *buffer; 3744 3745 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr)) 3746 return -EFAULT; 3747 ptr += sizeof(binder_uintptr_t); 3748 3749 buffer = binder_alloc_prepare_to_free(&proc->alloc, 3750 data_ptr); 3751 if (IS_ERR_OR_NULL(buffer)) { 3752 if (PTR_ERR(buffer) == -EPERM) { 3753 binder_user_error( 3754 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n", 3755 proc->pid, thread->pid, 3756 (u64)data_ptr); 3757 } else { 3758 binder_user_error( 3759 "%d:%d BC_FREE_BUFFER u%016llx no match\n", 3760 proc->pid, thread->pid, 3761 (u64)data_ptr); 3762 } 3763 break; 3764 } 3765 binder_debug(BINDER_DEBUG_FREE_BUFFER, 3766 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n", 3767 proc->pid, thread->pid, (u64)data_ptr, 3768 buffer->debug_id, 3769 buffer->transaction ? "active" : "finished"); 3770 binder_free_buf(proc, buffer); 3771 break; 3772 } 3773 3774 case BC_TRANSACTION_SG: 3775 case BC_REPLY_SG: { 3776 struct binder_transaction_data_sg tr; 3777 3778 if (copy_from_user(&tr, ptr, sizeof(tr))) 3779 return -EFAULT; 3780 ptr += sizeof(tr); 3781 binder_transaction(proc, thread, &tr.transaction_data, 3782 cmd == BC_REPLY_SG, tr.buffers_size); 3783 break; 3784 } 3785 case BC_TRANSACTION: 3786 case BC_REPLY: { 3787 struct binder_transaction_data tr; 3788 3789 if (copy_from_user(&tr, ptr, sizeof(tr))) 3790 return -EFAULT; 3791 ptr += sizeof(tr); 3792 binder_transaction(proc, thread, &tr, 3793 cmd == BC_REPLY, 0); 3794 break; 3795 } 3796 3797 case BC_REGISTER_LOOPER: 3798 binder_debug(BINDER_DEBUG_THREADS, 3799 "%d:%d BC_REGISTER_LOOPER\n", 3800 proc->pid, thread->pid); 3801 binder_inner_proc_lock(proc); 3802 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { 3803 thread->looper |= BINDER_LOOPER_STATE_INVALID; 3804 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n", 3805 proc->pid, thread->pid); 3806 } else if (proc->requested_threads == 0) { 3807 thread->looper |= BINDER_LOOPER_STATE_INVALID; 3808 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n", 3809 proc->pid, thread->pid); 3810 } else { 3811 proc->requested_threads--; 3812 proc->requested_threads_started++; 3813 } 3814 thread->looper |= BINDER_LOOPER_STATE_REGISTERED; 3815 binder_inner_proc_unlock(proc); 3816 break; 3817 case BC_ENTER_LOOPER: 3818 binder_debug(BINDER_DEBUG_THREADS, 3819 "%d:%d BC_ENTER_LOOPER\n", 3820 proc->pid, thread->pid); 3821 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { 3822 thread->looper |= BINDER_LOOPER_STATE_INVALID; 3823 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n", 3824 proc->pid, thread->pid); 3825 } 3826 thread->looper |= BINDER_LOOPER_STATE_ENTERED; 3827 break; 3828 case BC_EXIT_LOOPER: 3829 binder_debug(BINDER_DEBUG_THREADS, 3830 "%d:%d BC_EXIT_LOOPER\n", 3831 proc->pid, thread->pid); 3832 thread->looper |= BINDER_LOOPER_STATE_EXITED; 3833 break; 3834 3835 case BC_REQUEST_DEATH_NOTIFICATION: 3836 case BC_CLEAR_DEATH_NOTIFICATION: { 3837 uint32_t target; 3838 binder_uintptr_t cookie; 3839 struct binder_ref *ref; 3840 struct binder_ref_death *death = NULL; 3841 3842 if (get_user(target, (uint32_t __user *)ptr)) 3843 return -EFAULT; 3844 ptr += sizeof(uint32_t); 3845 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 3846 return -EFAULT; 3847 ptr += sizeof(binder_uintptr_t); 3848 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { 3849 /* 3850 * Allocate memory for death notification 3851 * before taking lock 3852 */ 3853 death = kzalloc(sizeof(*death), GFP_KERNEL); 3854 if (death == NULL) { 3855 WARN_ON(thread->return_error.cmd != 3856 BR_OK); 3857 thread->return_error.cmd = BR_ERROR; 3858 binder_enqueue_thread_work( 3859 thread, 3860 &thread->return_error.work); 3861 binder_debug( 3862 BINDER_DEBUG_FAILED_TRANSACTION, 3863 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n", 3864 proc->pid, thread->pid); 3865 break; 3866 } 3867 } 3868 binder_proc_lock(proc); 3869 ref = binder_get_ref_olocked(proc, target, false); 3870 if (ref == NULL) { 3871 binder_user_error("%d:%d %s invalid ref %d\n", 3872 proc->pid, thread->pid, 3873 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 3874 "BC_REQUEST_DEATH_NOTIFICATION" : 3875 "BC_CLEAR_DEATH_NOTIFICATION", 3876 target); 3877 binder_proc_unlock(proc); 3878 kfree(death); 3879 break; 3880 } 3881 3882 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 3883 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n", 3884 proc->pid, thread->pid, 3885 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 3886 "BC_REQUEST_DEATH_NOTIFICATION" : 3887 "BC_CLEAR_DEATH_NOTIFICATION", 3888 (u64)cookie, ref->data.debug_id, 3889 ref->data.desc, ref->data.strong, 3890 ref->data.weak, ref->node->debug_id); 3891 3892 binder_node_lock(ref->node); 3893 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { 3894 if (ref->death) { 3895 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n", 3896 proc->pid, thread->pid); 3897 binder_node_unlock(ref->node); 3898 binder_proc_unlock(proc); 3899 kfree(death); 3900 break; 3901 } 3902 binder_stats_created(BINDER_STAT_DEATH); 3903 INIT_LIST_HEAD(&death->work.entry); 3904 death->cookie = cookie; 3905 ref->death = death; 3906 if (ref->node->proc == NULL) { 3907 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 3908 3909 binder_inner_proc_lock(proc); 3910 binder_enqueue_work_ilocked( 3911 &ref->death->work, &proc->todo); 3912 binder_wakeup_proc_ilocked(proc); 3913 binder_inner_proc_unlock(proc); 3914 } 3915 } else { 3916 if (ref->death == NULL) { 3917 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n", 3918 proc->pid, thread->pid); 3919 binder_node_unlock(ref->node); 3920 binder_proc_unlock(proc); 3921 break; 3922 } 3923 death = ref->death; 3924 if (death->cookie != cookie) { 3925 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n", 3926 proc->pid, thread->pid, 3927 (u64)death->cookie, 3928 (u64)cookie); 3929 binder_node_unlock(ref->node); 3930 binder_proc_unlock(proc); 3931 break; 3932 } 3933 ref->death = NULL; 3934 binder_inner_proc_lock(proc); 3935 if (list_empty(&death->work.entry)) { 3936 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 3937 if (thread->looper & 3938 (BINDER_LOOPER_STATE_REGISTERED | 3939 BINDER_LOOPER_STATE_ENTERED)) 3940 binder_enqueue_thread_work_ilocked( 3941 thread, 3942 &death->work); 3943 else { 3944 binder_enqueue_work_ilocked( 3945 &death->work, 3946 &proc->todo); 3947 binder_wakeup_proc_ilocked( 3948 proc); 3949 } 3950 } else { 3951 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); 3952 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; 3953 } 3954 binder_inner_proc_unlock(proc); 3955 } 3956 binder_node_unlock(ref->node); 3957 binder_proc_unlock(proc); 3958 } break; 3959 case BC_DEAD_BINDER_DONE: { 3960 struct binder_work *w; 3961 binder_uintptr_t cookie; 3962 struct binder_ref_death *death = NULL; 3963 3964 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 3965 return -EFAULT; 3966 3967 ptr += sizeof(cookie); 3968 binder_inner_proc_lock(proc); 3969 list_for_each_entry(w, &proc->delivered_death, 3970 entry) { 3971 struct binder_ref_death *tmp_death = 3972 container_of(w, 3973 struct binder_ref_death, 3974 work); 3975 3976 if (tmp_death->cookie == cookie) { 3977 death = tmp_death; 3978 break; 3979 } 3980 } 3981 binder_debug(BINDER_DEBUG_DEAD_BINDER, 3982 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n", 3983 proc->pid, thread->pid, (u64)cookie, 3984 death); 3985 if (death == NULL) { 3986 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n", 3987 proc->pid, thread->pid, (u64)cookie); 3988 binder_inner_proc_unlock(proc); 3989 break; 3990 } 3991 binder_dequeue_work_ilocked(&death->work); 3992 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) { 3993 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 3994 if (thread->looper & 3995 (BINDER_LOOPER_STATE_REGISTERED | 3996 BINDER_LOOPER_STATE_ENTERED)) 3997 binder_enqueue_thread_work_ilocked( 3998 thread, &death->work); 3999 else { 4000 binder_enqueue_work_ilocked( 4001 &death->work, 4002 &proc->todo); 4003 binder_wakeup_proc_ilocked(proc); 4004 } 4005 } 4006 binder_inner_proc_unlock(proc); 4007 } break; 4008 4009 default: 4010 pr_err("%d:%d unknown command %d\n", 4011 proc->pid, thread->pid, cmd); 4012 return -EINVAL; 4013 } 4014 *consumed = ptr - buffer; 4015 } 4016 return 0; 4017 } 4018 4019 static void binder_stat_br(struct binder_proc *proc, 4020 struct binder_thread *thread, uint32_t cmd) 4021 { 4022 trace_binder_return(cmd); 4023 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { 4024 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]); 4025 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]); 4026 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]); 4027 } 4028 } 4029 4030 static int binder_put_node_cmd(struct binder_proc *proc, 4031 struct binder_thread *thread, 4032 void __user **ptrp, 4033 binder_uintptr_t node_ptr, 4034 binder_uintptr_t node_cookie, 4035 int node_debug_id, 4036 uint32_t cmd, const char *cmd_name) 4037 { 4038 void __user *ptr = *ptrp; 4039 4040 if (put_user(cmd, (uint32_t __user *)ptr)) 4041 return -EFAULT; 4042 ptr += sizeof(uint32_t); 4043 4044 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr)) 4045 return -EFAULT; 4046 ptr += sizeof(binder_uintptr_t); 4047 4048 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr)) 4049 return -EFAULT; 4050 ptr += sizeof(binder_uintptr_t); 4051 4052 binder_stat_br(proc, thread, cmd); 4053 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n", 4054 proc->pid, thread->pid, cmd_name, node_debug_id, 4055 (u64)node_ptr, (u64)node_cookie); 4056 4057 *ptrp = ptr; 4058 return 0; 4059 } 4060 4061 static int binder_wait_for_work(struct binder_thread *thread, 4062 bool do_proc_work) 4063 { 4064 DEFINE_WAIT(wait); 4065 struct binder_proc *proc = thread->proc; 4066 int ret = 0; 4067 4068 freezer_do_not_count(); 4069 binder_inner_proc_lock(proc); 4070 for (;;) { 4071 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE); 4072 if (binder_has_work_ilocked(thread, do_proc_work)) 4073 break; 4074 if (do_proc_work) 4075 list_add(&thread->waiting_thread_node, 4076 &proc->waiting_threads); 4077 binder_inner_proc_unlock(proc); 4078 schedule(); 4079 binder_inner_proc_lock(proc); 4080 list_del_init(&thread->waiting_thread_node); 4081 if (signal_pending(current)) { 4082 ret = -ERESTARTSYS; 4083 break; 4084 } 4085 } 4086 finish_wait(&thread->wait, &wait); 4087 binder_inner_proc_unlock(proc); 4088 freezer_count(); 4089 4090 return ret; 4091 } 4092 4093 /** 4094 * binder_apply_fd_fixups() - finish fd translation 4095 * @proc: binder_proc associated @t->buffer 4096 * @t: binder transaction with list of fd fixups 4097 * 4098 * Now that we are in the context of the transaction target 4099 * process, we can allocate and install fds. Process the 4100 * list of fds to translate and fixup the buffer with the 4101 * new fds. 4102 * 4103 * If we fail to allocate an fd, then free the resources by 4104 * fput'ing files that have not been processed and ksys_close'ing 4105 * any fds that have already been allocated. 4106 */ 4107 static int binder_apply_fd_fixups(struct binder_proc *proc, 4108 struct binder_transaction *t) 4109 { 4110 struct binder_txn_fd_fixup *fixup, *tmp; 4111 int ret = 0; 4112 4113 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) { 4114 int fd = get_unused_fd_flags(O_CLOEXEC); 4115 4116 if (fd < 0) { 4117 binder_debug(BINDER_DEBUG_TRANSACTION, 4118 "failed fd fixup txn %d fd %d\n", 4119 t->debug_id, fd); 4120 ret = -ENOMEM; 4121 break; 4122 } 4123 binder_debug(BINDER_DEBUG_TRANSACTION, 4124 "fd fixup txn %d fd %d\n", 4125 t->debug_id, fd); 4126 trace_binder_transaction_fd_recv(t, fd, fixup->offset); 4127 fd_install(fd, fixup->file); 4128 fixup->file = NULL; 4129 binder_alloc_copy_to_buffer(&proc->alloc, t->buffer, 4130 fixup->offset, &fd, 4131 sizeof(u32)); 4132 } 4133 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) { 4134 if (fixup->file) { 4135 fput(fixup->file); 4136 } else if (ret) { 4137 u32 fd; 4138 4139 binder_alloc_copy_from_buffer(&proc->alloc, &fd, 4140 t->buffer, fixup->offset, 4141 sizeof(fd)); 4142 binder_deferred_fd_close(fd); 4143 } 4144 list_del(&fixup->fixup_entry); 4145 kfree(fixup); 4146 } 4147 4148 return ret; 4149 } 4150 4151 static int binder_thread_read(struct binder_proc *proc, 4152 struct binder_thread *thread, 4153 binder_uintptr_t binder_buffer, size_t size, 4154 binder_size_t *consumed, int non_block) 4155 { 4156 void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 4157 void __user *ptr = buffer + *consumed; 4158 void __user *end = buffer + size; 4159 4160 int ret = 0; 4161 int wait_for_proc_work; 4162 4163 if (*consumed == 0) { 4164 if (put_user(BR_NOOP, (uint32_t __user *)ptr)) 4165 return -EFAULT; 4166 ptr += sizeof(uint32_t); 4167 } 4168 4169 retry: 4170 binder_inner_proc_lock(proc); 4171 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread); 4172 binder_inner_proc_unlock(proc); 4173 4174 thread->looper |= BINDER_LOOPER_STATE_WAITING; 4175 4176 trace_binder_wait_for_work(wait_for_proc_work, 4177 !!thread->transaction_stack, 4178 !binder_worklist_empty(proc, &thread->todo)); 4179 if (wait_for_proc_work) { 4180 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 4181 BINDER_LOOPER_STATE_ENTERED))) { 4182 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n", 4183 proc->pid, thread->pid, thread->looper); 4184 wait_event_interruptible(binder_user_error_wait, 4185 binder_stop_on_user_error < 2); 4186 } 4187 binder_set_nice(proc->default_priority); 4188 } 4189 4190 if (non_block) { 4191 if (!binder_has_work(thread, wait_for_proc_work)) 4192 ret = -EAGAIN; 4193 } else { 4194 ret = binder_wait_for_work(thread, wait_for_proc_work); 4195 } 4196 4197 thread->looper &= ~BINDER_LOOPER_STATE_WAITING; 4198 4199 if (ret) 4200 return ret; 4201 4202 while (1) { 4203 uint32_t cmd; 4204 struct binder_transaction_data_secctx tr; 4205 struct binder_transaction_data *trd = &tr.transaction_data; 4206 struct binder_work *w = NULL; 4207 struct list_head *list = NULL; 4208 struct binder_transaction *t = NULL; 4209 struct binder_thread *t_from; 4210 size_t trsize = sizeof(*trd); 4211 4212 binder_inner_proc_lock(proc); 4213 if (!binder_worklist_empty_ilocked(&thread->todo)) 4214 list = &thread->todo; 4215 else if (!binder_worklist_empty_ilocked(&proc->todo) && 4216 wait_for_proc_work) 4217 list = &proc->todo; 4218 else { 4219 binder_inner_proc_unlock(proc); 4220 4221 /* no data added */ 4222 if (ptr - buffer == 4 && !thread->looper_need_return) 4223 goto retry; 4224 break; 4225 } 4226 4227 if (end - ptr < sizeof(tr) + 4) { 4228 binder_inner_proc_unlock(proc); 4229 break; 4230 } 4231 w = binder_dequeue_work_head_ilocked(list); 4232 if (binder_worklist_empty_ilocked(&thread->todo)) 4233 thread->process_todo = false; 4234 4235 switch (w->type) { 4236 case BINDER_WORK_TRANSACTION: { 4237 binder_inner_proc_unlock(proc); 4238 t = container_of(w, struct binder_transaction, work); 4239 } break; 4240 case BINDER_WORK_RETURN_ERROR: { 4241 struct binder_error *e = container_of( 4242 w, struct binder_error, work); 4243 4244 WARN_ON(e->cmd == BR_OK); 4245 binder_inner_proc_unlock(proc); 4246 if (put_user(e->cmd, (uint32_t __user *)ptr)) 4247 return -EFAULT; 4248 cmd = e->cmd; 4249 e->cmd = BR_OK; 4250 ptr += sizeof(uint32_t); 4251 4252 binder_stat_br(proc, thread, cmd); 4253 } break; 4254 case BINDER_WORK_TRANSACTION_COMPLETE: { 4255 binder_inner_proc_unlock(proc); 4256 cmd = BR_TRANSACTION_COMPLETE; 4257 if (put_user(cmd, (uint32_t __user *)ptr)) 4258 return -EFAULT; 4259 ptr += sizeof(uint32_t); 4260 4261 binder_stat_br(proc, thread, cmd); 4262 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, 4263 "%d:%d BR_TRANSACTION_COMPLETE\n", 4264 proc->pid, thread->pid); 4265 kfree(w); 4266 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 4267 } break; 4268 case BINDER_WORK_NODE: { 4269 struct binder_node *node = container_of(w, struct binder_node, work); 4270 int strong, weak; 4271 binder_uintptr_t node_ptr = node->ptr; 4272 binder_uintptr_t node_cookie = node->cookie; 4273 int node_debug_id = node->debug_id; 4274 int has_weak_ref; 4275 int has_strong_ref; 4276 void __user *orig_ptr = ptr; 4277 4278 BUG_ON(proc != node->proc); 4279 strong = node->internal_strong_refs || 4280 node->local_strong_refs; 4281 weak = !hlist_empty(&node->refs) || 4282 node->local_weak_refs || 4283 node->tmp_refs || strong; 4284 has_strong_ref = node->has_strong_ref; 4285 has_weak_ref = node->has_weak_ref; 4286 4287 if (weak && !has_weak_ref) { 4288 node->has_weak_ref = 1; 4289 node->pending_weak_ref = 1; 4290 node->local_weak_refs++; 4291 } 4292 if (strong && !has_strong_ref) { 4293 node->has_strong_ref = 1; 4294 node->pending_strong_ref = 1; 4295 node->local_strong_refs++; 4296 } 4297 if (!strong && has_strong_ref) 4298 node->has_strong_ref = 0; 4299 if (!weak && has_weak_ref) 4300 node->has_weak_ref = 0; 4301 if (!weak && !strong) { 4302 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 4303 "%d:%d node %d u%016llx c%016llx deleted\n", 4304 proc->pid, thread->pid, 4305 node_debug_id, 4306 (u64)node_ptr, 4307 (u64)node_cookie); 4308 rb_erase(&node->rb_node, &proc->nodes); 4309 binder_inner_proc_unlock(proc); 4310 binder_node_lock(node); 4311 /* 4312 * Acquire the node lock before freeing the 4313 * node to serialize with other threads that 4314 * may have been holding the node lock while 4315 * decrementing this node (avoids race where 4316 * this thread frees while the other thread 4317 * is unlocking the node after the final 4318 * decrement) 4319 */ 4320 binder_node_unlock(node); 4321 binder_free_node(node); 4322 } else 4323 binder_inner_proc_unlock(proc); 4324 4325 if (weak && !has_weak_ref) 4326 ret = binder_put_node_cmd( 4327 proc, thread, &ptr, node_ptr, 4328 node_cookie, node_debug_id, 4329 BR_INCREFS, "BR_INCREFS"); 4330 if (!ret && strong && !has_strong_ref) 4331 ret = binder_put_node_cmd( 4332 proc, thread, &ptr, node_ptr, 4333 node_cookie, node_debug_id, 4334 BR_ACQUIRE, "BR_ACQUIRE"); 4335 if (!ret && !strong && has_strong_ref) 4336 ret = binder_put_node_cmd( 4337 proc, thread, &ptr, node_ptr, 4338 node_cookie, node_debug_id, 4339 BR_RELEASE, "BR_RELEASE"); 4340 if (!ret && !weak && has_weak_ref) 4341 ret = binder_put_node_cmd( 4342 proc, thread, &ptr, node_ptr, 4343 node_cookie, node_debug_id, 4344 BR_DECREFS, "BR_DECREFS"); 4345 if (orig_ptr == ptr) 4346 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 4347 "%d:%d node %d u%016llx c%016llx state unchanged\n", 4348 proc->pid, thread->pid, 4349 node_debug_id, 4350 (u64)node_ptr, 4351 (u64)node_cookie); 4352 if (ret) 4353 return ret; 4354 } break; 4355 case BINDER_WORK_DEAD_BINDER: 4356 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 4357 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 4358 struct binder_ref_death *death; 4359 uint32_t cmd; 4360 binder_uintptr_t cookie; 4361 4362 death = container_of(w, struct binder_ref_death, work); 4363 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) 4364 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; 4365 else 4366 cmd = BR_DEAD_BINDER; 4367 cookie = death->cookie; 4368 4369 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 4370 "%d:%d %s %016llx\n", 4371 proc->pid, thread->pid, 4372 cmd == BR_DEAD_BINDER ? 4373 "BR_DEAD_BINDER" : 4374 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 4375 (u64)cookie); 4376 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { 4377 binder_inner_proc_unlock(proc); 4378 kfree(death); 4379 binder_stats_deleted(BINDER_STAT_DEATH); 4380 } else { 4381 binder_enqueue_work_ilocked( 4382 w, &proc->delivered_death); 4383 binder_inner_proc_unlock(proc); 4384 } 4385 if (put_user(cmd, (uint32_t __user *)ptr)) 4386 return -EFAULT; 4387 ptr += sizeof(uint32_t); 4388 if (put_user(cookie, 4389 (binder_uintptr_t __user *)ptr)) 4390 return -EFAULT; 4391 ptr += sizeof(binder_uintptr_t); 4392 binder_stat_br(proc, thread, cmd); 4393 if (cmd == BR_DEAD_BINDER) 4394 goto done; /* DEAD_BINDER notifications can cause transactions */ 4395 } break; 4396 default: 4397 binder_inner_proc_unlock(proc); 4398 pr_err("%d:%d: bad work type %d\n", 4399 proc->pid, thread->pid, w->type); 4400 break; 4401 } 4402 4403 if (!t) 4404 continue; 4405 4406 BUG_ON(t->buffer == NULL); 4407 if (t->buffer->target_node) { 4408 struct binder_node *target_node = t->buffer->target_node; 4409 4410 trd->target.ptr = target_node->ptr; 4411 trd->cookie = target_node->cookie; 4412 t->saved_priority = task_nice(current); 4413 if (t->priority < target_node->min_priority && 4414 !(t->flags & TF_ONE_WAY)) 4415 binder_set_nice(t->priority); 4416 else if (!(t->flags & TF_ONE_WAY) || 4417 t->saved_priority > target_node->min_priority) 4418 binder_set_nice(target_node->min_priority); 4419 cmd = BR_TRANSACTION; 4420 } else { 4421 trd->target.ptr = 0; 4422 trd->cookie = 0; 4423 cmd = BR_REPLY; 4424 } 4425 trd->code = t->code; 4426 trd->flags = t->flags; 4427 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid); 4428 4429 t_from = binder_get_txn_from(t); 4430 if (t_from) { 4431 struct task_struct *sender = t_from->proc->tsk; 4432 4433 trd->sender_pid = 4434 task_tgid_nr_ns(sender, 4435 task_active_pid_ns(current)); 4436 } else { 4437 trd->sender_pid = 0; 4438 } 4439 4440 ret = binder_apply_fd_fixups(proc, t); 4441 if (ret) { 4442 struct binder_buffer *buffer = t->buffer; 4443 bool oneway = !!(t->flags & TF_ONE_WAY); 4444 int tid = t->debug_id; 4445 4446 if (t_from) 4447 binder_thread_dec_tmpref(t_from); 4448 buffer->transaction = NULL; 4449 binder_cleanup_transaction(t, "fd fixups failed", 4450 BR_FAILED_REPLY); 4451 binder_free_buf(proc, buffer); 4452 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 4453 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n", 4454 proc->pid, thread->pid, 4455 oneway ? "async " : 4456 (cmd == BR_REPLY ? "reply " : ""), 4457 tid, BR_FAILED_REPLY, ret, __LINE__); 4458 if (cmd == BR_REPLY) { 4459 cmd = BR_FAILED_REPLY; 4460 if (put_user(cmd, (uint32_t __user *)ptr)) 4461 return -EFAULT; 4462 ptr += sizeof(uint32_t); 4463 binder_stat_br(proc, thread, cmd); 4464 break; 4465 } 4466 continue; 4467 } 4468 trd->data_size = t->buffer->data_size; 4469 trd->offsets_size = t->buffer->offsets_size; 4470 trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data; 4471 trd->data.ptr.offsets = trd->data.ptr.buffer + 4472 ALIGN(t->buffer->data_size, 4473 sizeof(void *)); 4474 4475 tr.secctx = t->security_ctx; 4476 if (t->security_ctx) { 4477 cmd = BR_TRANSACTION_SEC_CTX; 4478 trsize = sizeof(tr); 4479 } 4480 if (put_user(cmd, (uint32_t __user *)ptr)) { 4481 if (t_from) 4482 binder_thread_dec_tmpref(t_from); 4483 4484 binder_cleanup_transaction(t, "put_user failed", 4485 BR_FAILED_REPLY); 4486 4487 return -EFAULT; 4488 } 4489 ptr += sizeof(uint32_t); 4490 if (copy_to_user(ptr, &tr, trsize)) { 4491 if (t_from) 4492 binder_thread_dec_tmpref(t_from); 4493 4494 binder_cleanup_transaction(t, "copy_to_user failed", 4495 BR_FAILED_REPLY); 4496 4497 return -EFAULT; 4498 } 4499 ptr += trsize; 4500 4501 trace_binder_transaction_received(t); 4502 binder_stat_br(proc, thread, cmd); 4503 binder_debug(BINDER_DEBUG_TRANSACTION, 4504 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n", 4505 proc->pid, thread->pid, 4506 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : 4507 (cmd == BR_TRANSACTION_SEC_CTX) ? 4508 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY", 4509 t->debug_id, t_from ? t_from->proc->pid : 0, 4510 t_from ? t_from->pid : 0, cmd, 4511 t->buffer->data_size, t->buffer->offsets_size, 4512 (u64)trd->data.ptr.buffer, 4513 (u64)trd->data.ptr.offsets); 4514 4515 if (t_from) 4516 binder_thread_dec_tmpref(t_from); 4517 t->buffer->allow_user_free = 1; 4518 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) { 4519 binder_inner_proc_lock(thread->proc); 4520 t->to_parent = thread->transaction_stack; 4521 t->to_thread = thread; 4522 thread->transaction_stack = t; 4523 binder_inner_proc_unlock(thread->proc); 4524 } else { 4525 binder_free_transaction(t); 4526 } 4527 break; 4528 } 4529 4530 done: 4531 4532 *consumed = ptr - buffer; 4533 binder_inner_proc_lock(proc); 4534 if (proc->requested_threads == 0 && 4535 list_empty(&thread->proc->waiting_threads) && 4536 proc->requested_threads_started < proc->max_threads && 4537 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 4538 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ 4539 /*spawn a new thread if we leave this out */) { 4540 proc->requested_threads++; 4541 binder_inner_proc_unlock(proc); 4542 binder_debug(BINDER_DEBUG_THREADS, 4543 "%d:%d BR_SPAWN_LOOPER\n", 4544 proc->pid, thread->pid); 4545 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) 4546 return -EFAULT; 4547 binder_stat_br(proc, thread, BR_SPAWN_LOOPER); 4548 } else 4549 binder_inner_proc_unlock(proc); 4550 return 0; 4551 } 4552 4553 static void binder_release_work(struct binder_proc *proc, 4554 struct list_head *list) 4555 { 4556 struct binder_work *w; 4557 4558 while (1) { 4559 w = binder_dequeue_work_head(proc, list); 4560 if (!w) 4561 return; 4562 4563 switch (w->type) { 4564 case BINDER_WORK_TRANSACTION: { 4565 struct binder_transaction *t; 4566 4567 t = container_of(w, struct binder_transaction, work); 4568 4569 binder_cleanup_transaction(t, "process died.", 4570 BR_DEAD_REPLY); 4571 } break; 4572 case BINDER_WORK_RETURN_ERROR: { 4573 struct binder_error *e = container_of( 4574 w, struct binder_error, work); 4575 4576 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4577 "undelivered TRANSACTION_ERROR: %u\n", 4578 e->cmd); 4579 } break; 4580 case BINDER_WORK_TRANSACTION_COMPLETE: { 4581 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4582 "undelivered TRANSACTION_COMPLETE\n"); 4583 kfree(w); 4584 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 4585 } break; 4586 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 4587 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 4588 struct binder_ref_death *death; 4589 4590 death = container_of(w, struct binder_ref_death, work); 4591 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4592 "undelivered death notification, %016llx\n", 4593 (u64)death->cookie); 4594 kfree(death); 4595 binder_stats_deleted(BINDER_STAT_DEATH); 4596 } break; 4597 default: 4598 pr_err("unexpected work type, %d, not freed\n", 4599 w->type); 4600 break; 4601 } 4602 } 4603 4604 } 4605 4606 static struct binder_thread *binder_get_thread_ilocked( 4607 struct binder_proc *proc, struct binder_thread *new_thread) 4608 { 4609 struct binder_thread *thread = NULL; 4610 struct rb_node *parent = NULL; 4611 struct rb_node **p = &proc->threads.rb_node; 4612 4613 while (*p) { 4614 parent = *p; 4615 thread = rb_entry(parent, struct binder_thread, rb_node); 4616 4617 if (current->pid < thread->pid) 4618 p = &(*p)->rb_left; 4619 else if (current->pid > thread->pid) 4620 p = &(*p)->rb_right; 4621 else 4622 return thread; 4623 } 4624 if (!new_thread) 4625 return NULL; 4626 thread = new_thread; 4627 binder_stats_created(BINDER_STAT_THREAD); 4628 thread->proc = proc; 4629 thread->pid = current->pid; 4630 atomic_set(&thread->tmp_ref, 0); 4631 init_waitqueue_head(&thread->wait); 4632 INIT_LIST_HEAD(&thread->todo); 4633 rb_link_node(&thread->rb_node, parent, p); 4634 rb_insert_color(&thread->rb_node, &proc->threads); 4635 thread->looper_need_return = true; 4636 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR; 4637 thread->return_error.cmd = BR_OK; 4638 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR; 4639 thread->reply_error.cmd = BR_OK; 4640 INIT_LIST_HEAD(&new_thread->waiting_thread_node); 4641 return thread; 4642 } 4643 4644 static struct binder_thread *binder_get_thread(struct binder_proc *proc) 4645 { 4646 struct binder_thread *thread; 4647 struct binder_thread *new_thread; 4648 4649 binder_inner_proc_lock(proc); 4650 thread = binder_get_thread_ilocked(proc, NULL); 4651 binder_inner_proc_unlock(proc); 4652 if (!thread) { 4653 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL); 4654 if (new_thread == NULL) 4655 return NULL; 4656 binder_inner_proc_lock(proc); 4657 thread = binder_get_thread_ilocked(proc, new_thread); 4658 binder_inner_proc_unlock(proc); 4659 if (thread != new_thread) 4660 kfree(new_thread); 4661 } 4662 return thread; 4663 } 4664 4665 static void binder_free_proc(struct binder_proc *proc) 4666 { 4667 BUG_ON(!list_empty(&proc->todo)); 4668 BUG_ON(!list_empty(&proc->delivered_death)); 4669 binder_alloc_deferred_release(&proc->alloc); 4670 put_task_struct(proc->tsk); 4671 binder_stats_deleted(BINDER_STAT_PROC); 4672 kfree(proc); 4673 } 4674 4675 static void binder_free_thread(struct binder_thread *thread) 4676 { 4677 BUG_ON(!list_empty(&thread->todo)); 4678 binder_stats_deleted(BINDER_STAT_THREAD); 4679 binder_proc_dec_tmpref(thread->proc); 4680 kfree(thread); 4681 } 4682 4683 static int binder_thread_release(struct binder_proc *proc, 4684 struct binder_thread *thread) 4685 { 4686 struct binder_transaction *t; 4687 struct binder_transaction *send_reply = NULL; 4688 int active_transactions = 0; 4689 struct binder_transaction *last_t = NULL; 4690 4691 binder_inner_proc_lock(thread->proc); 4692 /* 4693 * take a ref on the proc so it survives 4694 * after we remove this thread from proc->threads. 4695 * The corresponding dec is when we actually 4696 * free the thread in binder_free_thread() 4697 */ 4698 proc->tmp_ref++; 4699 /* 4700 * take a ref on this thread to ensure it 4701 * survives while we are releasing it 4702 */ 4703 atomic_inc(&thread->tmp_ref); 4704 rb_erase(&thread->rb_node, &proc->threads); 4705 t = thread->transaction_stack; 4706 if (t) { 4707 spin_lock(&t->lock); 4708 if (t->to_thread == thread) 4709 send_reply = t; 4710 } else { 4711 __acquire(&t->lock); 4712 } 4713 thread->is_dead = true; 4714 4715 while (t) { 4716 last_t = t; 4717 active_transactions++; 4718 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 4719 "release %d:%d transaction %d %s, still active\n", 4720 proc->pid, thread->pid, 4721 t->debug_id, 4722 (t->to_thread == thread) ? "in" : "out"); 4723 4724 if (t->to_thread == thread) { 4725 t->to_proc = NULL; 4726 t->to_thread = NULL; 4727 if (t->buffer) { 4728 t->buffer->transaction = NULL; 4729 t->buffer = NULL; 4730 } 4731 t = t->to_parent; 4732 } else if (t->from == thread) { 4733 t->from = NULL; 4734 t = t->from_parent; 4735 } else 4736 BUG(); 4737 spin_unlock(&last_t->lock); 4738 if (t) 4739 spin_lock(&t->lock); 4740 else 4741 __acquire(&t->lock); 4742 } 4743 /* annotation for sparse, lock not acquired in last iteration above */ 4744 __release(&t->lock); 4745 4746 /* 4747 * If this thread used poll, make sure we remove the waitqueue 4748 * from any epoll data structures holding it with POLLFREE. 4749 * waitqueue_active() is safe to use here because we're holding 4750 * the inner lock. 4751 */ 4752 if ((thread->looper & BINDER_LOOPER_STATE_POLL) && 4753 waitqueue_active(&thread->wait)) { 4754 wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE); 4755 } 4756 4757 binder_inner_proc_unlock(thread->proc); 4758 4759 /* 4760 * This is needed to avoid races between wake_up_poll() above and 4761 * and ep_remove_waitqueue() called for other reasons (eg the epoll file 4762 * descriptor being closed); ep_remove_waitqueue() holds an RCU read 4763 * lock, so we can be sure it's done after calling synchronize_rcu(). 4764 */ 4765 if (thread->looper & BINDER_LOOPER_STATE_POLL) 4766 synchronize_rcu(); 4767 4768 if (send_reply) 4769 binder_send_failed_reply(send_reply, BR_DEAD_REPLY); 4770 binder_release_work(proc, &thread->todo); 4771 binder_thread_dec_tmpref(thread); 4772 return active_transactions; 4773 } 4774 4775 static __poll_t binder_poll(struct file *filp, 4776 struct poll_table_struct *wait) 4777 { 4778 struct binder_proc *proc = filp->private_data; 4779 struct binder_thread *thread = NULL; 4780 bool wait_for_proc_work; 4781 4782 thread = binder_get_thread(proc); 4783 if (!thread) 4784 return POLLERR; 4785 4786 binder_inner_proc_lock(thread->proc); 4787 thread->looper |= BINDER_LOOPER_STATE_POLL; 4788 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread); 4789 4790 binder_inner_proc_unlock(thread->proc); 4791 4792 poll_wait(filp, &thread->wait, wait); 4793 4794 if (binder_has_work(thread, wait_for_proc_work)) 4795 return EPOLLIN; 4796 4797 return 0; 4798 } 4799 4800 static int binder_ioctl_write_read(struct file *filp, 4801 unsigned int cmd, unsigned long arg, 4802 struct binder_thread *thread) 4803 { 4804 int ret = 0; 4805 struct binder_proc *proc = filp->private_data; 4806 unsigned int size = _IOC_SIZE(cmd); 4807 void __user *ubuf = (void __user *)arg; 4808 struct binder_write_read bwr; 4809 4810 if (size != sizeof(struct binder_write_read)) { 4811 ret = -EINVAL; 4812 goto out; 4813 } 4814 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { 4815 ret = -EFAULT; 4816 goto out; 4817 } 4818 binder_debug(BINDER_DEBUG_READ_WRITE, 4819 "%d:%d write %lld at %016llx, read %lld at %016llx\n", 4820 proc->pid, thread->pid, 4821 (u64)bwr.write_size, (u64)bwr.write_buffer, 4822 (u64)bwr.read_size, (u64)bwr.read_buffer); 4823 4824 if (bwr.write_size > 0) { 4825 ret = binder_thread_write(proc, thread, 4826 bwr.write_buffer, 4827 bwr.write_size, 4828 &bwr.write_consumed); 4829 trace_binder_write_done(ret); 4830 if (ret < 0) { 4831 bwr.read_consumed = 0; 4832 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 4833 ret = -EFAULT; 4834 goto out; 4835 } 4836 } 4837 if (bwr.read_size > 0) { 4838 ret = binder_thread_read(proc, thread, bwr.read_buffer, 4839 bwr.read_size, 4840 &bwr.read_consumed, 4841 filp->f_flags & O_NONBLOCK); 4842 trace_binder_read_done(ret); 4843 binder_inner_proc_lock(proc); 4844 if (!binder_worklist_empty_ilocked(&proc->todo)) 4845 binder_wakeup_proc_ilocked(proc); 4846 binder_inner_proc_unlock(proc); 4847 if (ret < 0) { 4848 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 4849 ret = -EFAULT; 4850 goto out; 4851 } 4852 } 4853 binder_debug(BINDER_DEBUG_READ_WRITE, 4854 "%d:%d wrote %lld of %lld, read return %lld of %lld\n", 4855 proc->pid, thread->pid, 4856 (u64)bwr.write_consumed, (u64)bwr.write_size, 4857 (u64)bwr.read_consumed, (u64)bwr.read_size); 4858 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { 4859 ret = -EFAULT; 4860 goto out; 4861 } 4862 out: 4863 return ret; 4864 } 4865 4866 static int binder_ioctl_set_ctx_mgr(struct file *filp, 4867 struct flat_binder_object *fbo) 4868 { 4869 int ret = 0; 4870 struct binder_proc *proc = filp->private_data; 4871 struct binder_context *context = proc->context; 4872 struct binder_node *new_node; 4873 kuid_t curr_euid = current_euid(); 4874 4875 mutex_lock(&context->context_mgr_node_lock); 4876 if (context->binder_context_mgr_node) { 4877 pr_err("BINDER_SET_CONTEXT_MGR already set\n"); 4878 ret = -EBUSY; 4879 goto out; 4880 } 4881 ret = security_binder_set_context_mgr(proc->tsk); 4882 if (ret < 0) 4883 goto out; 4884 if (uid_valid(context->binder_context_mgr_uid)) { 4885 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) { 4886 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n", 4887 from_kuid(&init_user_ns, curr_euid), 4888 from_kuid(&init_user_ns, 4889 context->binder_context_mgr_uid)); 4890 ret = -EPERM; 4891 goto out; 4892 } 4893 } else { 4894 context->binder_context_mgr_uid = curr_euid; 4895 } 4896 new_node = binder_new_node(proc, fbo); 4897 if (!new_node) { 4898 ret = -ENOMEM; 4899 goto out; 4900 } 4901 binder_node_lock(new_node); 4902 new_node->local_weak_refs++; 4903 new_node->local_strong_refs++; 4904 new_node->has_strong_ref = 1; 4905 new_node->has_weak_ref = 1; 4906 context->binder_context_mgr_node = new_node; 4907 binder_node_unlock(new_node); 4908 binder_put_node(new_node); 4909 out: 4910 mutex_unlock(&context->context_mgr_node_lock); 4911 return ret; 4912 } 4913 4914 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc, 4915 struct binder_node_info_for_ref *info) 4916 { 4917 struct binder_node *node; 4918 struct binder_context *context = proc->context; 4919 __u32 handle = info->handle; 4920 4921 if (info->strong_count || info->weak_count || info->reserved1 || 4922 info->reserved2 || info->reserved3) { 4923 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.", 4924 proc->pid); 4925 return -EINVAL; 4926 } 4927 4928 /* This ioctl may only be used by the context manager */ 4929 mutex_lock(&context->context_mgr_node_lock); 4930 if (!context->binder_context_mgr_node || 4931 context->binder_context_mgr_node->proc != proc) { 4932 mutex_unlock(&context->context_mgr_node_lock); 4933 return -EPERM; 4934 } 4935 mutex_unlock(&context->context_mgr_node_lock); 4936 4937 node = binder_get_node_from_ref(proc, handle, true, NULL); 4938 if (!node) 4939 return -EINVAL; 4940 4941 info->strong_count = node->local_strong_refs + 4942 node->internal_strong_refs; 4943 info->weak_count = node->local_weak_refs; 4944 4945 binder_put_node(node); 4946 4947 return 0; 4948 } 4949 4950 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc, 4951 struct binder_node_debug_info *info) 4952 { 4953 struct rb_node *n; 4954 binder_uintptr_t ptr = info->ptr; 4955 4956 memset(info, 0, sizeof(*info)); 4957 4958 binder_inner_proc_lock(proc); 4959 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { 4960 struct binder_node *node = rb_entry(n, struct binder_node, 4961 rb_node); 4962 if (node->ptr > ptr) { 4963 info->ptr = node->ptr; 4964 info->cookie = node->cookie; 4965 info->has_strong_ref = node->has_strong_ref; 4966 info->has_weak_ref = node->has_weak_ref; 4967 break; 4968 } 4969 } 4970 binder_inner_proc_unlock(proc); 4971 4972 return 0; 4973 } 4974 4975 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 4976 { 4977 int ret; 4978 struct binder_proc *proc = filp->private_data; 4979 struct binder_thread *thread; 4980 unsigned int size = _IOC_SIZE(cmd); 4981 void __user *ubuf = (void __user *)arg; 4982 4983 /*pr_info("binder_ioctl: %d:%d %x %lx\n", 4984 proc->pid, current->pid, cmd, arg);*/ 4985 4986 binder_selftest_alloc(&proc->alloc); 4987 4988 trace_binder_ioctl(cmd, arg); 4989 4990 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 4991 if (ret) 4992 goto err_unlocked; 4993 4994 thread = binder_get_thread(proc); 4995 if (thread == NULL) { 4996 ret = -ENOMEM; 4997 goto err; 4998 } 4999 5000 switch (cmd) { 5001 case BINDER_WRITE_READ: 5002 ret = binder_ioctl_write_read(filp, cmd, arg, thread); 5003 if (ret) 5004 goto err; 5005 break; 5006 case BINDER_SET_MAX_THREADS: { 5007 int max_threads; 5008 5009 if (copy_from_user(&max_threads, ubuf, 5010 sizeof(max_threads))) { 5011 ret = -EINVAL; 5012 goto err; 5013 } 5014 binder_inner_proc_lock(proc); 5015 proc->max_threads = max_threads; 5016 binder_inner_proc_unlock(proc); 5017 break; 5018 } 5019 case BINDER_SET_CONTEXT_MGR_EXT: { 5020 struct flat_binder_object fbo; 5021 5022 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) { 5023 ret = -EINVAL; 5024 goto err; 5025 } 5026 ret = binder_ioctl_set_ctx_mgr(filp, &fbo); 5027 if (ret) 5028 goto err; 5029 break; 5030 } 5031 case BINDER_SET_CONTEXT_MGR: 5032 ret = binder_ioctl_set_ctx_mgr(filp, NULL); 5033 if (ret) 5034 goto err; 5035 break; 5036 case BINDER_THREAD_EXIT: 5037 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n", 5038 proc->pid, thread->pid); 5039 binder_thread_release(proc, thread); 5040 thread = NULL; 5041 break; 5042 case BINDER_VERSION: { 5043 struct binder_version __user *ver = ubuf; 5044 5045 if (size != sizeof(struct binder_version)) { 5046 ret = -EINVAL; 5047 goto err; 5048 } 5049 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, 5050 &ver->protocol_version)) { 5051 ret = -EINVAL; 5052 goto err; 5053 } 5054 break; 5055 } 5056 case BINDER_GET_NODE_INFO_FOR_REF: { 5057 struct binder_node_info_for_ref info; 5058 5059 if (copy_from_user(&info, ubuf, sizeof(info))) { 5060 ret = -EFAULT; 5061 goto err; 5062 } 5063 5064 ret = binder_ioctl_get_node_info_for_ref(proc, &info); 5065 if (ret < 0) 5066 goto err; 5067 5068 if (copy_to_user(ubuf, &info, sizeof(info))) { 5069 ret = -EFAULT; 5070 goto err; 5071 } 5072 5073 break; 5074 } 5075 case BINDER_GET_NODE_DEBUG_INFO: { 5076 struct binder_node_debug_info info; 5077 5078 if (copy_from_user(&info, ubuf, sizeof(info))) { 5079 ret = -EFAULT; 5080 goto err; 5081 } 5082 5083 ret = binder_ioctl_get_node_debug_info(proc, &info); 5084 if (ret < 0) 5085 goto err; 5086 5087 if (copy_to_user(ubuf, &info, sizeof(info))) { 5088 ret = -EFAULT; 5089 goto err; 5090 } 5091 break; 5092 } 5093 default: 5094 ret = -EINVAL; 5095 goto err; 5096 } 5097 ret = 0; 5098 err: 5099 if (thread) 5100 thread->looper_need_return = false; 5101 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 5102 if (ret && ret != -ERESTARTSYS) 5103 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); 5104 err_unlocked: 5105 trace_binder_ioctl_done(ret); 5106 return ret; 5107 } 5108 5109 static void binder_vma_open(struct vm_area_struct *vma) 5110 { 5111 struct binder_proc *proc = vma->vm_private_data; 5112 5113 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 5114 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 5115 proc->pid, vma->vm_start, vma->vm_end, 5116 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 5117 (unsigned long)pgprot_val(vma->vm_page_prot)); 5118 } 5119 5120 static void binder_vma_close(struct vm_area_struct *vma) 5121 { 5122 struct binder_proc *proc = vma->vm_private_data; 5123 5124 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 5125 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 5126 proc->pid, vma->vm_start, vma->vm_end, 5127 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 5128 (unsigned long)pgprot_val(vma->vm_page_prot)); 5129 binder_alloc_vma_close(&proc->alloc); 5130 } 5131 5132 static vm_fault_t binder_vm_fault(struct vm_fault *vmf) 5133 { 5134 return VM_FAULT_SIGBUS; 5135 } 5136 5137 static const struct vm_operations_struct binder_vm_ops = { 5138 .open = binder_vma_open, 5139 .close = binder_vma_close, 5140 .fault = binder_vm_fault, 5141 }; 5142 5143 static int binder_mmap(struct file *filp, struct vm_area_struct *vma) 5144 { 5145 int ret; 5146 struct binder_proc *proc = filp->private_data; 5147 const char *failure_string; 5148 5149 if (proc->tsk != current->group_leader) 5150 return -EINVAL; 5151 5152 if ((vma->vm_end - vma->vm_start) > SZ_4M) 5153 vma->vm_end = vma->vm_start + SZ_4M; 5154 5155 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 5156 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", 5157 __func__, proc->pid, vma->vm_start, vma->vm_end, 5158 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 5159 (unsigned long)pgprot_val(vma->vm_page_prot)); 5160 5161 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { 5162 ret = -EPERM; 5163 failure_string = "bad vm_flags"; 5164 goto err_bad_arg; 5165 } 5166 vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP; 5167 vma->vm_flags &= ~VM_MAYWRITE; 5168 5169 vma->vm_ops = &binder_vm_ops; 5170 vma->vm_private_data = proc; 5171 5172 ret = binder_alloc_mmap_handler(&proc->alloc, vma); 5173 if (ret) 5174 return ret; 5175 return 0; 5176 5177 err_bad_arg: 5178 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__, 5179 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); 5180 return ret; 5181 } 5182 5183 static int binder_open(struct inode *nodp, struct file *filp) 5184 { 5185 struct binder_proc *proc; 5186 struct binder_device *binder_dev; 5187 5188 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__, 5189 current->group_leader->pid, current->pid); 5190 5191 proc = kzalloc(sizeof(*proc), GFP_KERNEL); 5192 if (proc == NULL) 5193 return -ENOMEM; 5194 spin_lock_init(&proc->inner_lock); 5195 spin_lock_init(&proc->outer_lock); 5196 get_task_struct(current->group_leader); 5197 proc->tsk = current->group_leader; 5198 INIT_LIST_HEAD(&proc->todo); 5199 proc->default_priority = task_nice(current); 5200 /* binderfs stashes devices in i_private */ 5201 if (is_binderfs_device(nodp)) 5202 binder_dev = nodp->i_private; 5203 else 5204 binder_dev = container_of(filp->private_data, 5205 struct binder_device, miscdev); 5206 proc->context = &binder_dev->context; 5207 binder_alloc_init(&proc->alloc); 5208 5209 binder_stats_created(BINDER_STAT_PROC); 5210 proc->pid = current->group_leader->pid; 5211 INIT_LIST_HEAD(&proc->delivered_death); 5212 INIT_LIST_HEAD(&proc->waiting_threads); 5213 filp->private_data = proc; 5214 5215 mutex_lock(&binder_procs_lock); 5216 hlist_add_head(&proc->proc_node, &binder_procs); 5217 mutex_unlock(&binder_procs_lock); 5218 5219 if (binder_debugfs_dir_entry_proc) { 5220 char strbuf[11]; 5221 5222 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); 5223 /* 5224 * proc debug entries are shared between contexts, so 5225 * this will fail if the process tries to open the driver 5226 * again with a different context. The priting code will 5227 * anyway print all contexts that a given PID has, so this 5228 * is not a problem. 5229 */ 5230 proc->debugfs_entry = debugfs_create_file(strbuf, 0444, 5231 binder_debugfs_dir_entry_proc, 5232 (void *)(unsigned long)proc->pid, 5233 &proc_fops); 5234 } 5235 5236 return 0; 5237 } 5238 5239 static int binder_flush(struct file *filp, fl_owner_t id) 5240 { 5241 struct binder_proc *proc = filp->private_data; 5242 5243 binder_defer_work(proc, BINDER_DEFERRED_FLUSH); 5244 5245 return 0; 5246 } 5247 5248 static void binder_deferred_flush(struct binder_proc *proc) 5249 { 5250 struct rb_node *n; 5251 int wake_count = 0; 5252 5253 binder_inner_proc_lock(proc); 5254 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { 5255 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); 5256 5257 thread->looper_need_return = true; 5258 if (thread->looper & BINDER_LOOPER_STATE_WAITING) { 5259 wake_up_interruptible(&thread->wait); 5260 wake_count++; 5261 } 5262 } 5263 binder_inner_proc_unlock(proc); 5264 5265 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 5266 "binder_flush: %d woke %d threads\n", proc->pid, 5267 wake_count); 5268 } 5269 5270 static int binder_release(struct inode *nodp, struct file *filp) 5271 { 5272 struct binder_proc *proc = filp->private_data; 5273 5274 debugfs_remove(proc->debugfs_entry); 5275 binder_defer_work(proc, BINDER_DEFERRED_RELEASE); 5276 5277 return 0; 5278 } 5279 5280 static int binder_node_release(struct binder_node *node, int refs) 5281 { 5282 struct binder_ref *ref; 5283 int death = 0; 5284 struct binder_proc *proc = node->proc; 5285 5286 binder_release_work(proc, &node->async_todo); 5287 5288 binder_node_lock(node); 5289 binder_inner_proc_lock(proc); 5290 binder_dequeue_work_ilocked(&node->work); 5291 /* 5292 * The caller must have taken a temporary ref on the node, 5293 */ 5294 BUG_ON(!node->tmp_refs); 5295 if (hlist_empty(&node->refs) && node->tmp_refs == 1) { 5296 binder_inner_proc_unlock(proc); 5297 binder_node_unlock(node); 5298 binder_free_node(node); 5299 5300 return refs; 5301 } 5302 5303 node->proc = NULL; 5304 node->local_strong_refs = 0; 5305 node->local_weak_refs = 0; 5306 binder_inner_proc_unlock(proc); 5307 5308 spin_lock(&binder_dead_nodes_lock); 5309 hlist_add_head(&node->dead_node, &binder_dead_nodes); 5310 spin_unlock(&binder_dead_nodes_lock); 5311 5312 hlist_for_each_entry(ref, &node->refs, node_entry) { 5313 refs++; 5314 /* 5315 * Need the node lock to synchronize 5316 * with new notification requests and the 5317 * inner lock to synchronize with queued 5318 * death notifications. 5319 */ 5320 binder_inner_proc_lock(ref->proc); 5321 if (!ref->death) { 5322 binder_inner_proc_unlock(ref->proc); 5323 continue; 5324 } 5325 5326 death++; 5327 5328 BUG_ON(!list_empty(&ref->death->work.entry)); 5329 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 5330 binder_enqueue_work_ilocked(&ref->death->work, 5331 &ref->proc->todo); 5332 binder_wakeup_proc_ilocked(ref->proc); 5333 binder_inner_proc_unlock(ref->proc); 5334 } 5335 5336 binder_debug(BINDER_DEBUG_DEAD_BINDER, 5337 "node %d now dead, refs %d, death %d\n", 5338 node->debug_id, refs, death); 5339 binder_node_unlock(node); 5340 binder_put_node(node); 5341 5342 return refs; 5343 } 5344 5345 static void binder_deferred_release(struct binder_proc *proc) 5346 { 5347 struct binder_context *context = proc->context; 5348 struct rb_node *n; 5349 int threads, nodes, incoming_refs, outgoing_refs, active_transactions; 5350 5351 mutex_lock(&binder_procs_lock); 5352 hlist_del(&proc->proc_node); 5353 mutex_unlock(&binder_procs_lock); 5354 5355 mutex_lock(&context->context_mgr_node_lock); 5356 if (context->binder_context_mgr_node && 5357 context->binder_context_mgr_node->proc == proc) { 5358 binder_debug(BINDER_DEBUG_DEAD_BINDER, 5359 "%s: %d context_mgr_node gone\n", 5360 __func__, proc->pid); 5361 context->binder_context_mgr_node = NULL; 5362 } 5363 mutex_unlock(&context->context_mgr_node_lock); 5364 binder_inner_proc_lock(proc); 5365 /* 5366 * Make sure proc stays alive after we 5367 * remove all the threads 5368 */ 5369 proc->tmp_ref++; 5370 5371 proc->is_dead = true; 5372 threads = 0; 5373 active_transactions = 0; 5374 while ((n = rb_first(&proc->threads))) { 5375 struct binder_thread *thread; 5376 5377 thread = rb_entry(n, struct binder_thread, rb_node); 5378 binder_inner_proc_unlock(proc); 5379 threads++; 5380 active_transactions += binder_thread_release(proc, thread); 5381 binder_inner_proc_lock(proc); 5382 } 5383 5384 nodes = 0; 5385 incoming_refs = 0; 5386 while ((n = rb_first(&proc->nodes))) { 5387 struct binder_node *node; 5388 5389 node = rb_entry(n, struct binder_node, rb_node); 5390 nodes++; 5391 /* 5392 * take a temporary ref on the node before 5393 * calling binder_node_release() which will either 5394 * kfree() the node or call binder_put_node() 5395 */ 5396 binder_inc_node_tmpref_ilocked(node); 5397 rb_erase(&node->rb_node, &proc->nodes); 5398 binder_inner_proc_unlock(proc); 5399 incoming_refs = binder_node_release(node, incoming_refs); 5400 binder_inner_proc_lock(proc); 5401 } 5402 binder_inner_proc_unlock(proc); 5403 5404 outgoing_refs = 0; 5405 binder_proc_lock(proc); 5406 while ((n = rb_first(&proc->refs_by_desc))) { 5407 struct binder_ref *ref; 5408 5409 ref = rb_entry(n, struct binder_ref, rb_node_desc); 5410 outgoing_refs++; 5411 binder_cleanup_ref_olocked(ref); 5412 binder_proc_unlock(proc); 5413 binder_free_ref(ref); 5414 binder_proc_lock(proc); 5415 } 5416 binder_proc_unlock(proc); 5417 5418 binder_release_work(proc, &proc->todo); 5419 binder_release_work(proc, &proc->delivered_death); 5420 5421 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 5422 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n", 5423 __func__, proc->pid, threads, nodes, incoming_refs, 5424 outgoing_refs, active_transactions); 5425 5426 binder_proc_dec_tmpref(proc); 5427 } 5428 5429 static void binder_deferred_func(struct work_struct *work) 5430 { 5431 struct binder_proc *proc; 5432 5433 int defer; 5434 5435 do { 5436 mutex_lock(&binder_deferred_lock); 5437 if (!hlist_empty(&binder_deferred_list)) { 5438 proc = hlist_entry(binder_deferred_list.first, 5439 struct binder_proc, deferred_work_node); 5440 hlist_del_init(&proc->deferred_work_node); 5441 defer = proc->deferred_work; 5442 proc->deferred_work = 0; 5443 } else { 5444 proc = NULL; 5445 defer = 0; 5446 } 5447 mutex_unlock(&binder_deferred_lock); 5448 5449 if (defer & BINDER_DEFERRED_FLUSH) 5450 binder_deferred_flush(proc); 5451 5452 if (defer & BINDER_DEFERRED_RELEASE) 5453 binder_deferred_release(proc); /* frees proc */ 5454 } while (proc); 5455 } 5456 static DECLARE_WORK(binder_deferred_work, binder_deferred_func); 5457 5458 static void 5459 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) 5460 { 5461 mutex_lock(&binder_deferred_lock); 5462 proc->deferred_work |= defer; 5463 if (hlist_unhashed(&proc->deferred_work_node)) { 5464 hlist_add_head(&proc->deferred_work_node, 5465 &binder_deferred_list); 5466 schedule_work(&binder_deferred_work); 5467 } 5468 mutex_unlock(&binder_deferred_lock); 5469 } 5470 5471 static void print_binder_transaction_ilocked(struct seq_file *m, 5472 struct binder_proc *proc, 5473 const char *prefix, 5474 struct binder_transaction *t) 5475 { 5476 struct binder_proc *to_proc; 5477 struct binder_buffer *buffer = t->buffer; 5478 5479 spin_lock(&t->lock); 5480 to_proc = t->to_proc; 5481 seq_printf(m, 5482 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d", 5483 prefix, t->debug_id, t, 5484 t->from ? t->from->proc->pid : 0, 5485 t->from ? t->from->pid : 0, 5486 to_proc ? to_proc->pid : 0, 5487 t->to_thread ? t->to_thread->pid : 0, 5488 t->code, t->flags, t->priority, t->need_reply); 5489 spin_unlock(&t->lock); 5490 5491 if (proc != to_proc) { 5492 /* 5493 * Can only safely deref buffer if we are holding the 5494 * correct proc inner lock for this node 5495 */ 5496 seq_puts(m, "\n"); 5497 return; 5498 } 5499 5500 if (buffer == NULL) { 5501 seq_puts(m, " buffer free\n"); 5502 return; 5503 } 5504 if (buffer->target_node) 5505 seq_printf(m, " node %d", buffer->target_node->debug_id); 5506 seq_printf(m, " size %zd:%zd data %pK\n", 5507 buffer->data_size, buffer->offsets_size, 5508 buffer->user_data); 5509 } 5510 5511 static void print_binder_work_ilocked(struct seq_file *m, 5512 struct binder_proc *proc, 5513 const char *prefix, 5514 const char *transaction_prefix, 5515 struct binder_work *w) 5516 { 5517 struct binder_node *node; 5518 struct binder_transaction *t; 5519 5520 switch (w->type) { 5521 case BINDER_WORK_TRANSACTION: 5522 t = container_of(w, struct binder_transaction, work); 5523 print_binder_transaction_ilocked( 5524 m, proc, transaction_prefix, t); 5525 break; 5526 case BINDER_WORK_RETURN_ERROR: { 5527 struct binder_error *e = container_of( 5528 w, struct binder_error, work); 5529 5530 seq_printf(m, "%stransaction error: %u\n", 5531 prefix, e->cmd); 5532 } break; 5533 case BINDER_WORK_TRANSACTION_COMPLETE: 5534 seq_printf(m, "%stransaction complete\n", prefix); 5535 break; 5536 case BINDER_WORK_NODE: 5537 node = container_of(w, struct binder_node, work); 5538 seq_printf(m, "%snode work %d: u%016llx c%016llx\n", 5539 prefix, node->debug_id, 5540 (u64)node->ptr, (u64)node->cookie); 5541 break; 5542 case BINDER_WORK_DEAD_BINDER: 5543 seq_printf(m, "%shas dead binder\n", prefix); 5544 break; 5545 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 5546 seq_printf(m, "%shas cleared dead binder\n", prefix); 5547 break; 5548 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: 5549 seq_printf(m, "%shas cleared death notification\n", prefix); 5550 break; 5551 default: 5552 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type); 5553 break; 5554 } 5555 } 5556 5557 static void print_binder_thread_ilocked(struct seq_file *m, 5558 struct binder_thread *thread, 5559 int print_always) 5560 { 5561 struct binder_transaction *t; 5562 struct binder_work *w; 5563 size_t start_pos = m->count; 5564 size_t header_pos; 5565 5566 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n", 5567 thread->pid, thread->looper, 5568 thread->looper_need_return, 5569 atomic_read(&thread->tmp_ref)); 5570 header_pos = m->count; 5571 t = thread->transaction_stack; 5572 while (t) { 5573 if (t->from == thread) { 5574 print_binder_transaction_ilocked(m, thread->proc, 5575 " outgoing transaction", t); 5576 t = t->from_parent; 5577 } else if (t->to_thread == thread) { 5578 print_binder_transaction_ilocked(m, thread->proc, 5579 " incoming transaction", t); 5580 t = t->to_parent; 5581 } else { 5582 print_binder_transaction_ilocked(m, thread->proc, 5583 " bad transaction", t); 5584 t = NULL; 5585 } 5586 } 5587 list_for_each_entry(w, &thread->todo, entry) { 5588 print_binder_work_ilocked(m, thread->proc, " ", 5589 " pending transaction", w); 5590 } 5591 if (!print_always && m->count == header_pos) 5592 m->count = start_pos; 5593 } 5594 5595 static void print_binder_node_nilocked(struct seq_file *m, 5596 struct binder_node *node) 5597 { 5598 struct binder_ref *ref; 5599 struct binder_work *w; 5600 int count; 5601 5602 count = 0; 5603 hlist_for_each_entry(ref, &node->refs, node_entry) 5604 count++; 5605 5606 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d", 5607 node->debug_id, (u64)node->ptr, (u64)node->cookie, 5608 node->has_strong_ref, node->has_weak_ref, 5609 node->local_strong_refs, node->local_weak_refs, 5610 node->internal_strong_refs, count, node->tmp_refs); 5611 if (count) { 5612 seq_puts(m, " proc"); 5613 hlist_for_each_entry(ref, &node->refs, node_entry) 5614 seq_printf(m, " %d", ref->proc->pid); 5615 } 5616 seq_puts(m, "\n"); 5617 if (node->proc) { 5618 list_for_each_entry(w, &node->async_todo, entry) 5619 print_binder_work_ilocked(m, node->proc, " ", 5620 " pending async transaction", w); 5621 } 5622 } 5623 5624 static void print_binder_ref_olocked(struct seq_file *m, 5625 struct binder_ref *ref) 5626 { 5627 binder_node_lock(ref->node); 5628 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n", 5629 ref->data.debug_id, ref->data.desc, 5630 ref->node->proc ? "" : "dead ", 5631 ref->node->debug_id, ref->data.strong, 5632 ref->data.weak, ref->death); 5633 binder_node_unlock(ref->node); 5634 } 5635 5636 static void print_binder_proc(struct seq_file *m, 5637 struct binder_proc *proc, int print_all) 5638 { 5639 struct binder_work *w; 5640 struct rb_node *n; 5641 size_t start_pos = m->count; 5642 size_t header_pos; 5643 struct binder_node *last_node = NULL; 5644 5645 seq_printf(m, "proc %d\n", proc->pid); 5646 seq_printf(m, "context %s\n", proc->context->name); 5647 header_pos = m->count; 5648 5649 binder_inner_proc_lock(proc); 5650 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 5651 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread, 5652 rb_node), print_all); 5653 5654 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { 5655 struct binder_node *node = rb_entry(n, struct binder_node, 5656 rb_node); 5657 if (!print_all && !node->has_async_transaction) 5658 continue; 5659 5660 /* 5661 * take a temporary reference on the node so it 5662 * survives and isn't removed from the tree 5663 * while we print it. 5664 */ 5665 binder_inc_node_tmpref_ilocked(node); 5666 /* Need to drop inner lock to take node lock */ 5667 binder_inner_proc_unlock(proc); 5668 if (last_node) 5669 binder_put_node(last_node); 5670 binder_node_inner_lock(node); 5671 print_binder_node_nilocked(m, node); 5672 binder_node_inner_unlock(node); 5673 last_node = node; 5674 binder_inner_proc_lock(proc); 5675 } 5676 binder_inner_proc_unlock(proc); 5677 if (last_node) 5678 binder_put_node(last_node); 5679 5680 if (print_all) { 5681 binder_proc_lock(proc); 5682 for (n = rb_first(&proc->refs_by_desc); 5683 n != NULL; 5684 n = rb_next(n)) 5685 print_binder_ref_olocked(m, rb_entry(n, 5686 struct binder_ref, 5687 rb_node_desc)); 5688 binder_proc_unlock(proc); 5689 } 5690 binder_alloc_print_allocated(m, &proc->alloc); 5691 binder_inner_proc_lock(proc); 5692 list_for_each_entry(w, &proc->todo, entry) 5693 print_binder_work_ilocked(m, proc, " ", 5694 " pending transaction", w); 5695 list_for_each_entry(w, &proc->delivered_death, entry) { 5696 seq_puts(m, " has delivered dead binder\n"); 5697 break; 5698 } 5699 binder_inner_proc_unlock(proc); 5700 if (!print_all && m->count == header_pos) 5701 m->count = start_pos; 5702 } 5703 5704 static const char * const binder_return_strings[] = { 5705 "BR_ERROR", 5706 "BR_OK", 5707 "BR_TRANSACTION", 5708 "BR_REPLY", 5709 "BR_ACQUIRE_RESULT", 5710 "BR_DEAD_REPLY", 5711 "BR_TRANSACTION_COMPLETE", 5712 "BR_INCREFS", 5713 "BR_ACQUIRE", 5714 "BR_RELEASE", 5715 "BR_DECREFS", 5716 "BR_ATTEMPT_ACQUIRE", 5717 "BR_NOOP", 5718 "BR_SPAWN_LOOPER", 5719 "BR_FINISHED", 5720 "BR_DEAD_BINDER", 5721 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 5722 "BR_FAILED_REPLY" 5723 }; 5724 5725 static const char * const binder_command_strings[] = { 5726 "BC_TRANSACTION", 5727 "BC_REPLY", 5728 "BC_ACQUIRE_RESULT", 5729 "BC_FREE_BUFFER", 5730 "BC_INCREFS", 5731 "BC_ACQUIRE", 5732 "BC_RELEASE", 5733 "BC_DECREFS", 5734 "BC_INCREFS_DONE", 5735 "BC_ACQUIRE_DONE", 5736 "BC_ATTEMPT_ACQUIRE", 5737 "BC_REGISTER_LOOPER", 5738 "BC_ENTER_LOOPER", 5739 "BC_EXIT_LOOPER", 5740 "BC_REQUEST_DEATH_NOTIFICATION", 5741 "BC_CLEAR_DEATH_NOTIFICATION", 5742 "BC_DEAD_BINDER_DONE", 5743 "BC_TRANSACTION_SG", 5744 "BC_REPLY_SG", 5745 }; 5746 5747 static const char * const binder_objstat_strings[] = { 5748 "proc", 5749 "thread", 5750 "node", 5751 "ref", 5752 "death", 5753 "transaction", 5754 "transaction_complete" 5755 }; 5756 5757 static void print_binder_stats(struct seq_file *m, const char *prefix, 5758 struct binder_stats *stats) 5759 { 5760 int i; 5761 5762 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != 5763 ARRAY_SIZE(binder_command_strings)); 5764 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) { 5765 int temp = atomic_read(&stats->bc[i]); 5766 5767 if (temp) 5768 seq_printf(m, "%s%s: %d\n", prefix, 5769 binder_command_strings[i], temp); 5770 } 5771 5772 BUILD_BUG_ON(ARRAY_SIZE(stats->br) != 5773 ARRAY_SIZE(binder_return_strings)); 5774 for (i = 0; i < ARRAY_SIZE(stats->br); i++) { 5775 int temp = atomic_read(&stats->br[i]); 5776 5777 if (temp) 5778 seq_printf(m, "%s%s: %d\n", prefix, 5779 binder_return_strings[i], temp); 5780 } 5781 5782 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 5783 ARRAY_SIZE(binder_objstat_strings)); 5784 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 5785 ARRAY_SIZE(stats->obj_deleted)); 5786 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { 5787 int created = atomic_read(&stats->obj_created[i]); 5788 int deleted = atomic_read(&stats->obj_deleted[i]); 5789 5790 if (created || deleted) 5791 seq_printf(m, "%s%s: active %d total %d\n", 5792 prefix, 5793 binder_objstat_strings[i], 5794 created - deleted, 5795 created); 5796 } 5797 } 5798 5799 static void print_binder_proc_stats(struct seq_file *m, 5800 struct binder_proc *proc) 5801 { 5802 struct binder_work *w; 5803 struct binder_thread *thread; 5804 struct rb_node *n; 5805 int count, strong, weak, ready_threads; 5806 size_t free_async_space = 5807 binder_alloc_get_free_async_space(&proc->alloc); 5808 5809 seq_printf(m, "proc %d\n", proc->pid); 5810 seq_printf(m, "context %s\n", proc->context->name); 5811 count = 0; 5812 ready_threads = 0; 5813 binder_inner_proc_lock(proc); 5814 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 5815 count++; 5816 5817 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node) 5818 ready_threads++; 5819 5820 seq_printf(m, " threads: %d\n", count); 5821 seq_printf(m, " requested threads: %d+%d/%d\n" 5822 " ready threads %d\n" 5823 " free async space %zd\n", proc->requested_threads, 5824 proc->requested_threads_started, proc->max_threads, 5825 ready_threads, 5826 free_async_space); 5827 count = 0; 5828 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) 5829 count++; 5830 binder_inner_proc_unlock(proc); 5831 seq_printf(m, " nodes: %d\n", count); 5832 count = 0; 5833 strong = 0; 5834 weak = 0; 5835 binder_proc_lock(proc); 5836 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 5837 struct binder_ref *ref = rb_entry(n, struct binder_ref, 5838 rb_node_desc); 5839 count++; 5840 strong += ref->data.strong; 5841 weak += ref->data.weak; 5842 } 5843 binder_proc_unlock(proc); 5844 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak); 5845 5846 count = binder_alloc_get_allocated_count(&proc->alloc); 5847 seq_printf(m, " buffers: %d\n", count); 5848 5849 binder_alloc_print_pages(m, &proc->alloc); 5850 5851 count = 0; 5852 binder_inner_proc_lock(proc); 5853 list_for_each_entry(w, &proc->todo, entry) { 5854 if (w->type == BINDER_WORK_TRANSACTION) 5855 count++; 5856 } 5857 binder_inner_proc_unlock(proc); 5858 seq_printf(m, " pending transactions: %d\n", count); 5859 5860 print_binder_stats(m, " ", &proc->stats); 5861 } 5862 5863 5864 static int state_show(struct seq_file *m, void *unused) 5865 { 5866 struct binder_proc *proc; 5867 struct binder_node *node; 5868 struct binder_node *last_node = NULL; 5869 5870 seq_puts(m, "binder state:\n"); 5871 5872 spin_lock(&binder_dead_nodes_lock); 5873 if (!hlist_empty(&binder_dead_nodes)) 5874 seq_puts(m, "dead nodes:\n"); 5875 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) { 5876 /* 5877 * take a temporary reference on the node so it 5878 * survives and isn't removed from the list 5879 * while we print it. 5880 */ 5881 node->tmp_refs++; 5882 spin_unlock(&binder_dead_nodes_lock); 5883 if (last_node) 5884 binder_put_node(last_node); 5885 binder_node_lock(node); 5886 print_binder_node_nilocked(m, node); 5887 binder_node_unlock(node); 5888 last_node = node; 5889 spin_lock(&binder_dead_nodes_lock); 5890 } 5891 spin_unlock(&binder_dead_nodes_lock); 5892 if (last_node) 5893 binder_put_node(last_node); 5894 5895 mutex_lock(&binder_procs_lock); 5896 hlist_for_each_entry(proc, &binder_procs, proc_node) 5897 print_binder_proc(m, proc, 1); 5898 mutex_unlock(&binder_procs_lock); 5899 5900 return 0; 5901 } 5902 5903 static int stats_show(struct seq_file *m, void *unused) 5904 { 5905 struct binder_proc *proc; 5906 5907 seq_puts(m, "binder stats:\n"); 5908 5909 print_binder_stats(m, "", &binder_stats); 5910 5911 mutex_lock(&binder_procs_lock); 5912 hlist_for_each_entry(proc, &binder_procs, proc_node) 5913 print_binder_proc_stats(m, proc); 5914 mutex_unlock(&binder_procs_lock); 5915 5916 return 0; 5917 } 5918 5919 static int transactions_show(struct seq_file *m, void *unused) 5920 { 5921 struct binder_proc *proc; 5922 5923 seq_puts(m, "binder transactions:\n"); 5924 mutex_lock(&binder_procs_lock); 5925 hlist_for_each_entry(proc, &binder_procs, proc_node) 5926 print_binder_proc(m, proc, 0); 5927 mutex_unlock(&binder_procs_lock); 5928 5929 return 0; 5930 } 5931 5932 static int proc_show(struct seq_file *m, void *unused) 5933 { 5934 struct binder_proc *itr; 5935 int pid = (unsigned long)m->private; 5936 5937 mutex_lock(&binder_procs_lock); 5938 hlist_for_each_entry(itr, &binder_procs, proc_node) { 5939 if (itr->pid == pid) { 5940 seq_puts(m, "binder proc state:\n"); 5941 print_binder_proc(m, itr, 1); 5942 } 5943 } 5944 mutex_unlock(&binder_procs_lock); 5945 5946 return 0; 5947 } 5948 5949 static void print_binder_transaction_log_entry(struct seq_file *m, 5950 struct binder_transaction_log_entry *e) 5951 { 5952 int debug_id = READ_ONCE(e->debug_id_done); 5953 /* 5954 * read barrier to guarantee debug_id_done read before 5955 * we print the log values 5956 */ 5957 smp_rmb(); 5958 seq_printf(m, 5959 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d", 5960 e->debug_id, (e->call_type == 2) ? "reply" : 5961 ((e->call_type == 1) ? "async" : "call "), e->from_proc, 5962 e->from_thread, e->to_proc, e->to_thread, e->context_name, 5963 e->to_node, e->target_handle, e->data_size, e->offsets_size, 5964 e->return_error, e->return_error_param, 5965 e->return_error_line); 5966 /* 5967 * read-barrier to guarantee read of debug_id_done after 5968 * done printing the fields of the entry 5969 */ 5970 smp_rmb(); 5971 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ? 5972 "\n" : " (incomplete)\n"); 5973 } 5974 5975 static int transaction_log_show(struct seq_file *m, void *unused) 5976 { 5977 struct binder_transaction_log *log = m->private; 5978 unsigned int log_cur = atomic_read(&log->cur); 5979 unsigned int count; 5980 unsigned int cur; 5981 int i; 5982 5983 count = log_cur + 1; 5984 cur = count < ARRAY_SIZE(log->entry) && !log->full ? 5985 0 : count % ARRAY_SIZE(log->entry); 5986 if (count > ARRAY_SIZE(log->entry) || log->full) 5987 count = ARRAY_SIZE(log->entry); 5988 for (i = 0; i < count; i++) { 5989 unsigned int index = cur++ % ARRAY_SIZE(log->entry); 5990 5991 print_binder_transaction_log_entry(m, &log->entry[index]); 5992 } 5993 return 0; 5994 } 5995 5996 const struct file_operations binder_fops = { 5997 .owner = THIS_MODULE, 5998 .poll = binder_poll, 5999 .unlocked_ioctl = binder_ioctl, 6000 .compat_ioctl = binder_ioctl, 6001 .mmap = binder_mmap, 6002 .open = binder_open, 6003 .flush = binder_flush, 6004 .release = binder_release, 6005 }; 6006 6007 DEFINE_SHOW_ATTRIBUTE(state); 6008 DEFINE_SHOW_ATTRIBUTE(stats); 6009 DEFINE_SHOW_ATTRIBUTE(transactions); 6010 DEFINE_SHOW_ATTRIBUTE(transaction_log); 6011 6012 static int __init init_binder_device(const char *name) 6013 { 6014 int ret; 6015 struct binder_device *binder_device; 6016 6017 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL); 6018 if (!binder_device) 6019 return -ENOMEM; 6020 6021 binder_device->miscdev.fops = &binder_fops; 6022 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR; 6023 binder_device->miscdev.name = name; 6024 6025 binder_device->context.binder_context_mgr_uid = INVALID_UID; 6026 binder_device->context.name = name; 6027 mutex_init(&binder_device->context.context_mgr_node_lock); 6028 6029 ret = misc_register(&binder_device->miscdev); 6030 if (ret < 0) { 6031 kfree(binder_device); 6032 return ret; 6033 } 6034 6035 hlist_add_head(&binder_device->hlist, &binder_devices); 6036 6037 return ret; 6038 } 6039 6040 static int __init binder_init(void) 6041 { 6042 int ret; 6043 char *device_name, *device_tmp; 6044 struct binder_device *device; 6045 struct hlist_node *tmp; 6046 char *device_names = NULL; 6047 6048 ret = binder_alloc_shrinker_init(); 6049 if (ret) 6050 return ret; 6051 6052 atomic_set(&binder_transaction_log.cur, ~0U); 6053 atomic_set(&binder_transaction_log_failed.cur, ~0U); 6054 6055 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); 6056 if (binder_debugfs_dir_entry_root) 6057 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", 6058 binder_debugfs_dir_entry_root); 6059 6060 if (binder_debugfs_dir_entry_root) { 6061 debugfs_create_file("state", 6062 0444, 6063 binder_debugfs_dir_entry_root, 6064 NULL, 6065 &state_fops); 6066 debugfs_create_file("stats", 6067 0444, 6068 binder_debugfs_dir_entry_root, 6069 NULL, 6070 &stats_fops); 6071 debugfs_create_file("transactions", 6072 0444, 6073 binder_debugfs_dir_entry_root, 6074 NULL, 6075 &transactions_fops); 6076 debugfs_create_file("transaction_log", 6077 0444, 6078 binder_debugfs_dir_entry_root, 6079 &binder_transaction_log, 6080 &transaction_log_fops); 6081 debugfs_create_file("failed_transaction_log", 6082 0444, 6083 binder_debugfs_dir_entry_root, 6084 &binder_transaction_log_failed, 6085 &transaction_log_fops); 6086 } 6087 6088 if (strcmp(binder_devices_param, "") != 0) { 6089 /* 6090 * Copy the module_parameter string, because we don't want to 6091 * tokenize it in-place. 6092 */ 6093 device_names = kstrdup(binder_devices_param, GFP_KERNEL); 6094 if (!device_names) { 6095 ret = -ENOMEM; 6096 goto err_alloc_device_names_failed; 6097 } 6098 6099 device_tmp = device_names; 6100 while ((device_name = strsep(&device_tmp, ","))) { 6101 ret = init_binder_device(device_name); 6102 if (ret) 6103 goto err_init_binder_device_failed; 6104 } 6105 } 6106 6107 ret = init_binderfs(); 6108 if (ret) 6109 goto err_init_binder_device_failed; 6110 6111 return ret; 6112 6113 err_init_binder_device_failed: 6114 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) { 6115 misc_deregister(&device->miscdev); 6116 hlist_del(&device->hlist); 6117 kfree(device); 6118 } 6119 6120 kfree(device_names); 6121 6122 err_alloc_device_names_failed: 6123 debugfs_remove_recursive(binder_debugfs_dir_entry_root); 6124 6125 return ret; 6126 } 6127 6128 device_initcall(binder_init); 6129 6130 #define CREATE_TRACE_POINTS 6131 #include "binder_trace.h" 6132 6133 MODULE_LICENSE("GPL v2"); 6134