1 /* 2 * User-space Probes (UProbes) 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 * 18 * Copyright (C) IBM Corporation, 2008-2012 19 * Authors: 20 * Srikar Dronamraju 21 * Jim Keniston 22 * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra 23 */ 24 25 #include <linux/kernel.h> 26 #include <linux/highmem.h> 27 #include <linux/pagemap.h> /* read_mapping_page */ 28 #include <linux/slab.h> 29 #include <linux/sched.h> 30 #include <linux/sched/mm.h> 31 #include <linux/sched/coredump.h> 32 #include <linux/export.h> 33 #include <linux/rmap.h> /* anon_vma_prepare */ 34 #include <linux/mmu_notifier.h> /* set_pte_at_notify */ 35 #include <linux/swap.h> /* try_to_free_swap */ 36 #include <linux/ptrace.h> /* user_enable_single_step */ 37 #include <linux/kdebug.h> /* notifier mechanism */ 38 #include "../../mm/internal.h" /* munlock_vma_page */ 39 #include <linux/percpu-rwsem.h> 40 #include <linux/task_work.h> 41 #include <linux/shmem_fs.h> 42 43 #include <linux/uprobes.h> 44 45 #define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES) 46 #define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE 47 48 static struct rb_root uprobes_tree = RB_ROOT; 49 /* 50 * allows us to skip the uprobe_mmap if there are no uprobe events active 51 * at this time. Probably a fine grained per inode count is better? 52 */ 53 #define no_uprobe_events() RB_EMPTY_ROOT(&uprobes_tree) 54 55 static DEFINE_SPINLOCK(uprobes_treelock); /* serialize rbtree access */ 56 57 #define UPROBES_HASH_SZ 13 58 /* serialize uprobe->pending_list */ 59 static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ]; 60 #define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ]) 61 62 static struct percpu_rw_semaphore dup_mmap_sem; 63 64 /* Have a copy of original instruction */ 65 #define UPROBE_COPY_INSN 0 66 67 struct uprobe { 68 struct rb_node rb_node; /* node in the rb tree */ 69 atomic_t ref; 70 struct rw_semaphore register_rwsem; 71 struct rw_semaphore consumer_rwsem; 72 struct list_head pending_list; 73 struct uprobe_consumer *consumers; 74 struct inode *inode; /* Also hold a ref to inode */ 75 loff_t offset; 76 unsigned long flags; 77 78 /* 79 * The generic code assumes that it has two members of unknown type 80 * owned by the arch-specific code: 81 * 82 * insn - copy_insn() saves the original instruction here for 83 * arch_uprobe_analyze_insn(). 84 * 85 * ixol - potentially modified instruction to execute out of 86 * line, copied to xol_area by xol_get_insn_slot(). 87 */ 88 struct arch_uprobe arch; 89 }; 90 91 /* 92 * Execute out of line area: anonymous executable mapping installed 93 * by the probed task to execute the copy of the original instruction 94 * mangled by set_swbp(). 95 * 96 * On a breakpoint hit, thread contests for a slot. It frees the 97 * slot after singlestep. Currently a fixed number of slots are 98 * allocated. 99 */ 100 struct xol_area { 101 wait_queue_head_t wq; /* if all slots are busy */ 102 atomic_t slot_count; /* number of in-use slots */ 103 unsigned long *bitmap; /* 0 = free slot */ 104 105 struct vm_special_mapping xol_mapping; 106 struct page *pages[2]; 107 /* 108 * We keep the vma's vm_start rather than a pointer to the vma 109 * itself. The probed process or a naughty kernel module could make 110 * the vma go away, and we must handle that reasonably gracefully. 111 */ 112 unsigned long vaddr; /* Page(s) of instruction slots */ 113 }; 114 115 /* 116 * valid_vma: Verify if the specified vma is an executable vma 117 * Relax restrictions while unregistering: vm_flags might have 118 * changed after breakpoint was inserted. 119 * - is_register: indicates if we are in register context. 120 * - Return 1 if the specified virtual address is in an 121 * executable vma. 122 */ 123 static bool valid_vma(struct vm_area_struct *vma, bool is_register) 124 { 125 vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE; 126 127 if (is_register) 128 flags |= VM_WRITE; 129 130 return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC; 131 } 132 133 static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset) 134 { 135 return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 136 } 137 138 static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr) 139 { 140 return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start); 141 } 142 143 /** 144 * __replace_page - replace page in vma by new page. 145 * based on replace_page in mm/ksm.c 146 * 147 * @vma: vma that holds the pte pointing to page 148 * @addr: address the old @page is mapped at 149 * @page: the cowed page we are replacing by kpage 150 * @kpage: the modified page we replace page by 151 * 152 * Returns 0 on success, -EFAULT on failure. 153 */ 154 static int __replace_page(struct vm_area_struct *vma, unsigned long addr, 155 struct page *old_page, struct page *new_page) 156 { 157 struct mm_struct *mm = vma->vm_mm; 158 struct page_vma_mapped_walk pvmw = { 159 .page = old_page, 160 .vma = vma, 161 .address = addr, 162 }; 163 int err; 164 /* For mmu_notifiers */ 165 const unsigned long mmun_start = addr; 166 const unsigned long mmun_end = addr + PAGE_SIZE; 167 struct mem_cgroup *memcg; 168 169 VM_BUG_ON_PAGE(PageTransHuge(old_page), old_page); 170 171 err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL, &memcg, 172 false); 173 if (err) 174 return err; 175 176 /* For try_to_free_swap() and munlock_vma_page() below */ 177 lock_page(old_page); 178 179 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 180 err = -EAGAIN; 181 if (!page_vma_mapped_walk(&pvmw)) { 182 mem_cgroup_cancel_charge(new_page, memcg, false); 183 goto unlock; 184 } 185 VM_BUG_ON_PAGE(addr != pvmw.address, old_page); 186 187 get_page(new_page); 188 page_add_new_anon_rmap(new_page, vma, addr, false); 189 mem_cgroup_commit_charge(new_page, memcg, false, false); 190 lru_cache_add_active_or_unevictable(new_page, vma); 191 192 if (!PageAnon(old_page)) { 193 dec_mm_counter(mm, mm_counter_file(old_page)); 194 inc_mm_counter(mm, MM_ANONPAGES); 195 } 196 197 flush_cache_page(vma, addr, pte_pfn(*pvmw.pte)); 198 ptep_clear_flush_notify(vma, addr, pvmw.pte); 199 set_pte_at_notify(mm, addr, pvmw.pte, 200 mk_pte(new_page, vma->vm_page_prot)); 201 202 page_remove_rmap(old_page, false); 203 if (!page_mapped(old_page)) 204 try_to_free_swap(old_page); 205 page_vma_mapped_walk_done(&pvmw); 206 207 if (vma->vm_flags & VM_LOCKED) 208 munlock_vma_page(old_page); 209 put_page(old_page); 210 211 err = 0; 212 unlock: 213 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 214 unlock_page(old_page); 215 return err; 216 } 217 218 /** 219 * is_swbp_insn - check if instruction is breakpoint instruction. 220 * @insn: instruction to be checked. 221 * Default implementation of is_swbp_insn 222 * Returns true if @insn is a breakpoint instruction. 223 */ 224 bool __weak is_swbp_insn(uprobe_opcode_t *insn) 225 { 226 return *insn == UPROBE_SWBP_INSN; 227 } 228 229 /** 230 * is_trap_insn - check if instruction is breakpoint instruction. 231 * @insn: instruction to be checked. 232 * Default implementation of is_trap_insn 233 * Returns true if @insn is a breakpoint instruction. 234 * 235 * This function is needed for the case where an architecture has multiple 236 * trap instructions (like powerpc). 237 */ 238 bool __weak is_trap_insn(uprobe_opcode_t *insn) 239 { 240 return is_swbp_insn(insn); 241 } 242 243 static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len) 244 { 245 void *kaddr = kmap_atomic(page); 246 memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len); 247 kunmap_atomic(kaddr); 248 } 249 250 static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len) 251 { 252 void *kaddr = kmap_atomic(page); 253 memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len); 254 kunmap_atomic(kaddr); 255 } 256 257 static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode) 258 { 259 uprobe_opcode_t old_opcode; 260 bool is_swbp; 261 262 /* 263 * Note: We only check if the old_opcode is UPROBE_SWBP_INSN here. 264 * We do not check if it is any other 'trap variant' which could 265 * be conditional trap instruction such as the one powerpc supports. 266 * 267 * The logic is that we do not care if the underlying instruction 268 * is a trap variant; uprobes always wins over any other (gdb) 269 * breakpoint. 270 */ 271 copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE); 272 is_swbp = is_swbp_insn(&old_opcode); 273 274 if (is_swbp_insn(new_opcode)) { 275 if (is_swbp) /* register: already installed? */ 276 return 0; 277 } else { 278 if (!is_swbp) /* unregister: was it changed by us? */ 279 return 0; 280 } 281 282 return 1; 283 } 284 285 /* 286 * NOTE: 287 * Expect the breakpoint instruction to be the smallest size instruction for 288 * the architecture. If an arch has variable length instruction and the 289 * breakpoint instruction is not of the smallest length instruction 290 * supported by that architecture then we need to modify is_trap_at_addr and 291 * uprobe_write_opcode accordingly. This would never be a problem for archs 292 * that have fixed length instructions. 293 * 294 * uprobe_write_opcode - write the opcode at a given virtual address. 295 * @mm: the probed process address space. 296 * @vaddr: the virtual address to store the opcode. 297 * @opcode: opcode to be written at @vaddr. 298 * 299 * Called with mm->mmap_sem held for write. 300 * Return 0 (success) or a negative errno. 301 */ 302 int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, 303 uprobe_opcode_t opcode) 304 { 305 struct page *old_page, *new_page; 306 struct vm_area_struct *vma; 307 int ret; 308 309 retry: 310 /* Read the page with vaddr into memory */ 311 ret = get_user_pages_remote(NULL, mm, vaddr, 1, 312 FOLL_FORCE | FOLL_SPLIT, &old_page, &vma, NULL); 313 if (ret <= 0) 314 return ret; 315 316 ret = verify_opcode(old_page, vaddr, &opcode); 317 if (ret <= 0) 318 goto put_old; 319 320 ret = anon_vma_prepare(vma); 321 if (ret) 322 goto put_old; 323 324 ret = -ENOMEM; 325 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); 326 if (!new_page) 327 goto put_old; 328 329 __SetPageUptodate(new_page); 330 copy_highpage(new_page, old_page); 331 copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE); 332 333 ret = __replace_page(vma, vaddr, old_page, new_page); 334 put_page(new_page); 335 put_old: 336 put_page(old_page); 337 338 if (unlikely(ret == -EAGAIN)) 339 goto retry; 340 return ret; 341 } 342 343 /** 344 * set_swbp - store breakpoint at a given address. 345 * @auprobe: arch specific probepoint information. 346 * @mm: the probed process address space. 347 * @vaddr: the virtual address to insert the opcode. 348 * 349 * For mm @mm, store the breakpoint instruction at @vaddr. 350 * Return 0 (success) or a negative errno. 351 */ 352 int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) 353 { 354 return uprobe_write_opcode(mm, vaddr, UPROBE_SWBP_INSN); 355 } 356 357 /** 358 * set_orig_insn - Restore the original instruction. 359 * @mm: the probed process address space. 360 * @auprobe: arch specific probepoint information. 361 * @vaddr: the virtual address to insert the opcode. 362 * 363 * For mm @mm, restore the original opcode (opcode) at @vaddr. 364 * Return 0 (success) or a negative errno. 365 */ 366 int __weak 367 set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) 368 { 369 return uprobe_write_opcode(mm, vaddr, *(uprobe_opcode_t *)&auprobe->insn); 370 } 371 372 static struct uprobe *get_uprobe(struct uprobe *uprobe) 373 { 374 atomic_inc(&uprobe->ref); 375 return uprobe; 376 } 377 378 static void put_uprobe(struct uprobe *uprobe) 379 { 380 if (atomic_dec_and_test(&uprobe->ref)) 381 kfree(uprobe); 382 } 383 384 static int match_uprobe(struct uprobe *l, struct uprobe *r) 385 { 386 if (l->inode < r->inode) 387 return -1; 388 389 if (l->inode > r->inode) 390 return 1; 391 392 if (l->offset < r->offset) 393 return -1; 394 395 if (l->offset > r->offset) 396 return 1; 397 398 return 0; 399 } 400 401 static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset) 402 { 403 struct uprobe u = { .inode = inode, .offset = offset }; 404 struct rb_node *n = uprobes_tree.rb_node; 405 struct uprobe *uprobe; 406 int match; 407 408 while (n) { 409 uprobe = rb_entry(n, struct uprobe, rb_node); 410 match = match_uprobe(&u, uprobe); 411 if (!match) 412 return get_uprobe(uprobe); 413 414 if (match < 0) 415 n = n->rb_left; 416 else 417 n = n->rb_right; 418 } 419 return NULL; 420 } 421 422 /* 423 * Find a uprobe corresponding to a given inode:offset 424 * Acquires uprobes_treelock 425 */ 426 static struct uprobe *find_uprobe(struct inode *inode, loff_t offset) 427 { 428 struct uprobe *uprobe; 429 430 spin_lock(&uprobes_treelock); 431 uprobe = __find_uprobe(inode, offset); 432 spin_unlock(&uprobes_treelock); 433 434 return uprobe; 435 } 436 437 static struct uprobe *__insert_uprobe(struct uprobe *uprobe) 438 { 439 struct rb_node **p = &uprobes_tree.rb_node; 440 struct rb_node *parent = NULL; 441 struct uprobe *u; 442 int match; 443 444 while (*p) { 445 parent = *p; 446 u = rb_entry(parent, struct uprobe, rb_node); 447 match = match_uprobe(uprobe, u); 448 if (!match) 449 return get_uprobe(u); 450 451 if (match < 0) 452 p = &parent->rb_left; 453 else 454 p = &parent->rb_right; 455 456 } 457 458 u = NULL; 459 rb_link_node(&uprobe->rb_node, parent, p); 460 rb_insert_color(&uprobe->rb_node, &uprobes_tree); 461 /* get access + creation ref */ 462 atomic_set(&uprobe->ref, 2); 463 464 return u; 465 } 466 467 /* 468 * Acquire uprobes_treelock. 469 * Matching uprobe already exists in rbtree; 470 * increment (access refcount) and return the matching uprobe. 471 * 472 * No matching uprobe; insert the uprobe in rb_tree; 473 * get a double refcount (access + creation) and return NULL. 474 */ 475 static struct uprobe *insert_uprobe(struct uprobe *uprobe) 476 { 477 struct uprobe *u; 478 479 spin_lock(&uprobes_treelock); 480 u = __insert_uprobe(uprobe); 481 spin_unlock(&uprobes_treelock); 482 483 return u; 484 } 485 486 static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset) 487 { 488 struct uprobe *uprobe, *cur_uprobe; 489 490 uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL); 491 if (!uprobe) 492 return NULL; 493 494 uprobe->inode = inode; 495 uprobe->offset = offset; 496 init_rwsem(&uprobe->register_rwsem); 497 init_rwsem(&uprobe->consumer_rwsem); 498 499 /* add to uprobes_tree, sorted on inode:offset */ 500 cur_uprobe = insert_uprobe(uprobe); 501 /* a uprobe exists for this inode:offset combination */ 502 if (cur_uprobe) { 503 kfree(uprobe); 504 uprobe = cur_uprobe; 505 } 506 507 return uprobe; 508 } 509 510 static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc) 511 { 512 down_write(&uprobe->consumer_rwsem); 513 uc->next = uprobe->consumers; 514 uprobe->consumers = uc; 515 up_write(&uprobe->consumer_rwsem); 516 } 517 518 /* 519 * For uprobe @uprobe, delete the consumer @uc. 520 * Return true if the @uc is deleted successfully 521 * or return false. 522 */ 523 static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc) 524 { 525 struct uprobe_consumer **con; 526 bool ret = false; 527 528 down_write(&uprobe->consumer_rwsem); 529 for (con = &uprobe->consumers; *con; con = &(*con)->next) { 530 if (*con == uc) { 531 *con = uc->next; 532 ret = true; 533 break; 534 } 535 } 536 up_write(&uprobe->consumer_rwsem); 537 538 return ret; 539 } 540 541 static int __copy_insn(struct address_space *mapping, struct file *filp, 542 void *insn, int nbytes, loff_t offset) 543 { 544 struct page *page; 545 /* 546 * Ensure that the page that has the original instruction is populated 547 * and in page-cache. If ->readpage == NULL it must be shmem_mapping(), 548 * see uprobe_register(). 549 */ 550 if (mapping->a_ops->readpage) 551 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp); 552 else 553 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); 554 if (IS_ERR(page)) 555 return PTR_ERR(page); 556 557 copy_from_page(page, offset, insn, nbytes); 558 put_page(page); 559 560 return 0; 561 } 562 563 static int copy_insn(struct uprobe *uprobe, struct file *filp) 564 { 565 struct address_space *mapping = uprobe->inode->i_mapping; 566 loff_t offs = uprobe->offset; 567 void *insn = &uprobe->arch.insn; 568 int size = sizeof(uprobe->arch.insn); 569 int len, err = -EIO; 570 571 /* Copy only available bytes, -EIO if nothing was read */ 572 do { 573 if (offs >= i_size_read(uprobe->inode)) 574 break; 575 576 len = min_t(int, size, PAGE_SIZE - (offs & ~PAGE_MASK)); 577 err = __copy_insn(mapping, filp, insn, len, offs); 578 if (err) 579 break; 580 581 insn += len; 582 offs += len; 583 size -= len; 584 } while (size); 585 586 return err; 587 } 588 589 static int prepare_uprobe(struct uprobe *uprobe, struct file *file, 590 struct mm_struct *mm, unsigned long vaddr) 591 { 592 int ret = 0; 593 594 if (test_bit(UPROBE_COPY_INSN, &uprobe->flags)) 595 return ret; 596 597 /* TODO: move this into _register, until then we abuse this sem. */ 598 down_write(&uprobe->consumer_rwsem); 599 if (test_bit(UPROBE_COPY_INSN, &uprobe->flags)) 600 goto out; 601 602 ret = copy_insn(uprobe, file); 603 if (ret) 604 goto out; 605 606 ret = -ENOTSUPP; 607 if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn)) 608 goto out; 609 610 ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr); 611 if (ret) 612 goto out; 613 614 /* uprobe_write_opcode() assumes we don't cross page boundary */ 615 BUG_ON((uprobe->offset & ~PAGE_MASK) + 616 UPROBE_SWBP_INSN_SIZE > PAGE_SIZE); 617 618 smp_wmb(); /* pairs with rmb() in find_active_uprobe() */ 619 set_bit(UPROBE_COPY_INSN, &uprobe->flags); 620 621 out: 622 up_write(&uprobe->consumer_rwsem); 623 624 return ret; 625 } 626 627 static inline bool consumer_filter(struct uprobe_consumer *uc, 628 enum uprobe_filter_ctx ctx, struct mm_struct *mm) 629 { 630 return !uc->filter || uc->filter(uc, ctx, mm); 631 } 632 633 static bool filter_chain(struct uprobe *uprobe, 634 enum uprobe_filter_ctx ctx, struct mm_struct *mm) 635 { 636 struct uprobe_consumer *uc; 637 bool ret = false; 638 639 down_read(&uprobe->consumer_rwsem); 640 for (uc = uprobe->consumers; uc; uc = uc->next) { 641 ret = consumer_filter(uc, ctx, mm); 642 if (ret) 643 break; 644 } 645 up_read(&uprobe->consumer_rwsem); 646 647 return ret; 648 } 649 650 static int 651 install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, 652 struct vm_area_struct *vma, unsigned long vaddr) 653 { 654 bool first_uprobe; 655 int ret; 656 657 ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr); 658 if (ret) 659 return ret; 660 661 /* 662 * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(), 663 * the task can hit this breakpoint right after __replace_page(). 664 */ 665 first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags); 666 if (first_uprobe) 667 set_bit(MMF_HAS_UPROBES, &mm->flags); 668 669 ret = set_swbp(&uprobe->arch, mm, vaddr); 670 if (!ret) 671 clear_bit(MMF_RECALC_UPROBES, &mm->flags); 672 else if (first_uprobe) 673 clear_bit(MMF_HAS_UPROBES, &mm->flags); 674 675 return ret; 676 } 677 678 static int 679 remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr) 680 { 681 set_bit(MMF_RECALC_UPROBES, &mm->flags); 682 return set_orig_insn(&uprobe->arch, mm, vaddr); 683 } 684 685 static inline bool uprobe_is_active(struct uprobe *uprobe) 686 { 687 return !RB_EMPTY_NODE(&uprobe->rb_node); 688 } 689 /* 690 * There could be threads that have already hit the breakpoint. They 691 * will recheck the current insn and restart if find_uprobe() fails. 692 * See find_active_uprobe(). 693 */ 694 static void delete_uprobe(struct uprobe *uprobe) 695 { 696 if (WARN_ON(!uprobe_is_active(uprobe))) 697 return; 698 699 spin_lock(&uprobes_treelock); 700 rb_erase(&uprobe->rb_node, &uprobes_tree); 701 spin_unlock(&uprobes_treelock); 702 RB_CLEAR_NODE(&uprobe->rb_node); /* for uprobe_is_active() */ 703 put_uprobe(uprobe); 704 } 705 706 struct map_info { 707 struct map_info *next; 708 struct mm_struct *mm; 709 unsigned long vaddr; 710 }; 711 712 static inline struct map_info *free_map_info(struct map_info *info) 713 { 714 struct map_info *next = info->next; 715 kfree(info); 716 return next; 717 } 718 719 static struct map_info * 720 build_map_info(struct address_space *mapping, loff_t offset, bool is_register) 721 { 722 unsigned long pgoff = offset >> PAGE_SHIFT; 723 struct vm_area_struct *vma; 724 struct map_info *curr = NULL; 725 struct map_info *prev = NULL; 726 struct map_info *info; 727 int more = 0; 728 729 again: 730 i_mmap_lock_read(mapping); 731 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 732 if (!valid_vma(vma, is_register)) 733 continue; 734 735 if (!prev && !more) { 736 /* 737 * Needs GFP_NOWAIT to avoid i_mmap_rwsem recursion through 738 * reclaim. This is optimistic, no harm done if it fails. 739 */ 740 prev = kmalloc(sizeof(struct map_info), 741 GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN); 742 if (prev) 743 prev->next = NULL; 744 } 745 if (!prev) { 746 more++; 747 continue; 748 } 749 750 if (!mmget_not_zero(vma->vm_mm)) 751 continue; 752 753 info = prev; 754 prev = prev->next; 755 info->next = curr; 756 curr = info; 757 758 info->mm = vma->vm_mm; 759 info->vaddr = offset_to_vaddr(vma, offset); 760 } 761 i_mmap_unlock_read(mapping); 762 763 if (!more) 764 goto out; 765 766 prev = curr; 767 while (curr) { 768 mmput(curr->mm); 769 curr = curr->next; 770 } 771 772 do { 773 info = kmalloc(sizeof(struct map_info), GFP_KERNEL); 774 if (!info) { 775 curr = ERR_PTR(-ENOMEM); 776 goto out; 777 } 778 info->next = prev; 779 prev = info; 780 } while (--more); 781 782 goto again; 783 out: 784 while (prev) 785 prev = free_map_info(prev); 786 return curr; 787 } 788 789 static int 790 register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new) 791 { 792 bool is_register = !!new; 793 struct map_info *info; 794 int err = 0; 795 796 percpu_down_write(&dup_mmap_sem); 797 info = build_map_info(uprobe->inode->i_mapping, 798 uprobe->offset, is_register); 799 if (IS_ERR(info)) { 800 err = PTR_ERR(info); 801 goto out; 802 } 803 804 while (info) { 805 struct mm_struct *mm = info->mm; 806 struct vm_area_struct *vma; 807 808 if (err && is_register) 809 goto free; 810 811 down_write(&mm->mmap_sem); 812 vma = find_vma(mm, info->vaddr); 813 if (!vma || !valid_vma(vma, is_register) || 814 file_inode(vma->vm_file) != uprobe->inode) 815 goto unlock; 816 817 if (vma->vm_start > info->vaddr || 818 vaddr_to_offset(vma, info->vaddr) != uprobe->offset) 819 goto unlock; 820 821 if (is_register) { 822 /* consult only the "caller", new consumer. */ 823 if (consumer_filter(new, 824 UPROBE_FILTER_REGISTER, mm)) 825 err = install_breakpoint(uprobe, mm, vma, info->vaddr); 826 } else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) { 827 if (!filter_chain(uprobe, 828 UPROBE_FILTER_UNREGISTER, mm)) 829 err |= remove_breakpoint(uprobe, mm, info->vaddr); 830 } 831 832 unlock: 833 up_write(&mm->mmap_sem); 834 free: 835 mmput(mm); 836 info = free_map_info(info); 837 } 838 out: 839 percpu_up_write(&dup_mmap_sem); 840 return err; 841 } 842 843 static int __uprobe_register(struct uprobe *uprobe, struct uprobe_consumer *uc) 844 { 845 consumer_add(uprobe, uc); 846 return register_for_each_vma(uprobe, uc); 847 } 848 849 static void __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc) 850 { 851 int err; 852 853 if (WARN_ON(!consumer_del(uprobe, uc))) 854 return; 855 856 err = register_for_each_vma(uprobe, NULL); 857 /* TODO : cant unregister? schedule a worker thread */ 858 if (!uprobe->consumers && !err) 859 delete_uprobe(uprobe); 860 } 861 862 /* 863 * uprobe_register - register a probe 864 * @inode: the file in which the probe has to be placed. 865 * @offset: offset from the start of the file. 866 * @uc: information on howto handle the probe.. 867 * 868 * Apart from the access refcount, uprobe_register() takes a creation 869 * refcount (thro alloc_uprobe) if and only if this @uprobe is getting 870 * inserted into the rbtree (i.e first consumer for a @inode:@offset 871 * tuple). Creation refcount stops uprobe_unregister from freeing the 872 * @uprobe even before the register operation is complete. Creation 873 * refcount is released when the last @uc for the @uprobe 874 * unregisters. Caller of uprobe_register() is required to keep @inode 875 * (and the containing mount) referenced. 876 * 877 * Return errno if it cannot successully install probes 878 * else return 0 (success) 879 */ 880 int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc) 881 { 882 struct uprobe *uprobe; 883 int ret; 884 885 /* Uprobe must have at least one set consumer */ 886 if (!uc->handler && !uc->ret_handler) 887 return -EINVAL; 888 889 /* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */ 890 if (!inode->i_mapping->a_ops->readpage && !shmem_mapping(inode->i_mapping)) 891 return -EIO; 892 /* Racy, just to catch the obvious mistakes */ 893 if (offset > i_size_read(inode)) 894 return -EINVAL; 895 896 retry: 897 uprobe = alloc_uprobe(inode, offset); 898 if (!uprobe) 899 return -ENOMEM; 900 /* 901 * We can race with uprobe_unregister()->delete_uprobe(). 902 * Check uprobe_is_active() and retry if it is false. 903 */ 904 down_write(&uprobe->register_rwsem); 905 ret = -EAGAIN; 906 if (likely(uprobe_is_active(uprobe))) { 907 ret = __uprobe_register(uprobe, uc); 908 if (ret) 909 __uprobe_unregister(uprobe, uc); 910 } 911 up_write(&uprobe->register_rwsem); 912 put_uprobe(uprobe); 913 914 if (unlikely(ret == -EAGAIN)) 915 goto retry; 916 return ret; 917 } 918 EXPORT_SYMBOL_GPL(uprobe_register); 919 920 /* 921 * uprobe_apply - unregister a already registered probe. 922 * @inode: the file in which the probe has to be removed. 923 * @offset: offset from the start of the file. 924 * @uc: consumer which wants to add more or remove some breakpoints 925 * @add: add or remove the breakpoints 926 */ 927 int uprobe_apply(struct inode *inode, loff_t offset, 928 struct uprobe_consumer *uc, bool add) 929 { 930 struct uprobe *uprobe; 931 struct uprobe_consumer *con; 932 int ret = -ENOENT; 933 934 uprobe = find_uprobe(inode, offset); 935 if (WARN_ON(!uprobe)) 936 return ret; 937 938 down_write(&uprobe->register_rwsem); 939 for (con = uprobe->consumers; con && con != uc ; con = con->next) 940 ; 941 if (con) 942 ret = register_for_each_vma(uprobe, add ? uc : NULL); 943 up_write(&uprobe->register_rwsem); 944 put_uprobe(uprobe); 945 946 return ret; 947 } 948 949 /* 950 * uprobe_unregister - unregister a already registered probe. 951 * @inode: the file in which the probe has to be removed. 952 * @offset: offset from the start of the file. 953 * @uc: identify which probe if multiple probes are colocated. 954 */ 955 void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc) 956 { 957 struct uprobe *uprobe; 958 959 uprobe = find_uprobe(inode, offset); 960 if (WARN_ON(!uprobe)) 961 return; 962 963 down_write(&uprobe->register_rwsem); 964 __uprobe_unregister(uprobe, uc); 965 up_write(&uprobe->register_rwsem); 966 put_uprobe(uprobe); 967 } 968 EXPORT_SYMBOL_GPL(uprobe_unregister); 969 970 static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm) 971 { 972 struct vm_area_struct *vma; 973 int err = 0; 974 975 down_read(&mm->mmap_sem); 976 for (vma = mm->mmap; vma; vma = vma->vm_next) { 977 unsigned long vaddr; 978 loff_t offset; 979 980 if (!valid_vma(vma, false) || 981 file_inode(vma->vm_file) != uprobe->inode) 982 continue; 983 984 offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT; 985 if (uprobe->offset < offset || 986 uprobe->offset >= offset + vma->vm_end - vma->vm_start) 987 continue; 988 989 vaddr = offset_to_vaddr(vma, uprobe->offset); 990 err |= remove_breakpoint(uprobe, mm, vaddr); 991 } 992 up_read(&mm->mmap_sem); 993 994 return err; 995 } 996 997 static struct rb_node * 998 find_node_in_range(struct inode *inode, loff_t min, loff_t max) 999 { 1000 struct rb_node *n = uprobes_tree.rb_node; 1001 1002 while (n) { 1003 struct uprobe *u = rb_entry(n, struct uprobe, rb_node); 1004 1005 if (inode < u->inode) { 1006 n = n->rb_left; 1007 } else if (inode > u->inode) { 1008 n = n->rb_right; 1009 } else { 1010 if (max < u->offset) 1011 n = n->rb_left; 1012 else if (min > u->offset) 1013 n = n->rb_right; 1014 else 1015 break; 1016 } 1017 } 1018 1019 return n; 1020 } 1021 1022 /* 1023 * For a given range in vma, build a list of probes that need to be inserted. 1024 */ 1025 static void build_probe_list(struct inode *inode, 1026 struct vm_area_struct *vma, 1027 unsigned long start, unsigned long end, 1028 struct list_head *head) 1029 { 1030 loff_t min, max; 1031 struct rb_node *n, *t; 1032 struct uprobe *u; 1033 1034 INIT_LIST_HEAD(head); 1035 min = vaddr_to_offset(vma, start); 1036 max = min + (end - start) - 1; 1037 1038 spin_lock(&uprobes_treelock); 1039 n = find_node_in_range(inode, min, max); 1040 if (n) { 1041 for (t = n; t; t = rb_prev(t)) { 1042 u = rb_entry(t, struct uprobe, rb_node); 1043 if (u->inode != inode || u->offset < min) 1044 break; 1045 list_add(&u->pending_list, head); 1046 get_uprobe(u); 1047 } 1048 for (t = n; (t = rb_next(t)); ) { 1049 u = rb_entry(t, struct uprobe, rb_node); 1050 if (u->inode != inode || u->offset > max) 1051 break; 1052 list_add(&u->pending_list, head); 1053 get_uprobe(u); 1054 } 1055 } 1056 spin_unlock(&uprobes_treelock); 1057 } 1058 1059 /* 1060 * Called from mmap_region/vma_adjust with mm->mmap_sem acquired. 1061 * 1062 * Currently we ignore all errors and always return 0, the callers 1063 * can't handle the failure anyway. 1064 */ 1065 int uprobe_mmap(struct vm_area_struct *vma) 1066 { 1067 struct list_head tmp_list; 1068 struct uprobe *uprobe, *u; 1069 struct inode *inode; 1070 1071 if (no_uprobe_events() || !valid_vma(vma, true)) 1072 return 0; 1073 1074 inode = file_inode(vma->vm_file); 1075 if (!inode) 1076 return 0; 1077 1078 mutex_lock(uprobes_mmap_hash(inode)); 1079 build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list); 1080 /* 1081 * We can race with uprobe_unregister(), this uprobe can be already 1082 * removed. But in this case filter_chain() must return false, all 1083 * consumers have gone away. 1084 */ 1085 list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { 1086 if (!fatal_signal_pending(current) && 1087 filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) { 1088 unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset); 1089 install_breakpoint(uprobe, vma->vm_mm, vma, vaddr); 1090 } 1091 put_uprobe(uprobe); 1092 } 1093 mutex_unlock(uprobes_mmap_hash(inode)); 1094 1095 return 0; 1096 } 1097 1098 static bool 1099 vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end) 1100 { 1101 loff_t min, max; 1102 struct inode *inode; 1103 struct rb_node *n; 1104 1105 inode = file_inode(vma->vm_file); 1106 1107 min = vaddr_to_offset(vma, start); 1108 max = min + (end - start) - 1; 1109 1110 spin_lock(&uprobes_treelock); 1111 n = find_node_in_range(inode, min, max); 1112 spin_unlock(&uprobes_treelock); 1113 1114 return !!n; 1115 } 1116 1117 /* 1118 * Called in context of a munmap of a vma. 1119 */ 1120 void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) 1121 { 1122 if (no_uprobe_events() || !valid_vma(vma, false)) 1123 return; 1124 1125 if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */ 1126 return; 1127 1128 if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) || 1129 test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags)) 1130 return; 1131 1132 if (vma_has_uprobes(vma, start, end)) 1133 set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags); 1134 } 1135 1136 /* Slot allocation for XOL */ 1137 static int xol_add_vma(struct mm_struct *mm, struct xol_area *area) 1138 { 1139 struct vm_area_struct *vma; 1140 int ret; 1141 1142 if (down_write_killable(&mm->mmap_sem)) 1143 return -EINTR; 1144 1145 if (mm->uprobes_state.xol_area) { 1146 ret = -EALREADY; 1147 goto fail; 1148 } 1149 1150 if (!area->vaddr) { 1151 /* Try to map as high as possible, this is only a hint. */ 1152 area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, 1153 PAGE_SIZE, 0, 0); 1154 if (area->vaddr & ~PAGE_MASK) { 1155 ret = area->vaddr; 1156 goto fail; 1157 } 1158 } 1159 1160 vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE, 1161 VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO, 1162 &area->xol_mapping); 1163 if (IS_ERR(vma)) { 1164 ret = PTR_ERR(vma); 1165 goto fail; 1166 } 1167 1168 ret = 0; 1169 /* pairs with get_xol_area() */ 1170 smp_store_release(&mm->uprobes_state.xol_area, area); /* ^^^ */ 1171 fail: 1172 up_write(&mm->mmap_sem); 1173 1174 return ret; 1175 } 1176 1177 static struct xol_area *__create_xol_area(unsigned long vaddr) 1178 { 1179 struct mm_struct *mm = current->mm; 1180 uprobe_opcode_t insn = UPROBE_SWBP_INSN; 1181 struct xol_area *area; 1182 1183 area = kmalloc(sizeof(*area), GFP_KERNEL); 1184 if (unlikely(!area)) 1185 goto out; 1186 1187 area->bitmap = kzalloc(BITS_TO_LONGS(UINSNS_PER_PAGE) * sizeof(long), GFP_KERNEL); 1188 if (!area->bitmap) 1189 goto free_area; 1190 1191 area->xol_mapping.name = "[uprobes]"; 1192 area->xol_mapping.fault = NULL; 1193 area->xol_mapping.pages = area->pages; 1194 area->pages[0] = alloc_page(GFP_HIGHUSER); 1195 if (!area->pages[0]) 1196 goto free_bitmap; 1197 area->pages[1] = NULL; 1198 1199 area->vaddr = vaddr; 1200 init_waitqueue_head(&area->wq); 1201 /* Reserve the 1st slot for get_trampoline_vaddr() */ 1202 set_bit(0, area->bitmap); 1203 atomic_set(&area->slot_count, 1); 1204 arch_uprobe_copy_ixol(area->pages[0], 0, &insn, UPROBE_SWBP_INSN_SIZE); 1205 1206 if (!xol_add_vma(mm, area)) 1207 return area; 1208 1209 __free_page(area->pages[0]); 1210 free_bitmap: 1211 kfree(area->bitmap); 1212 free_area: 1213 kfree(area); 1214 out: 1215 return NULL; 1216 } 1217 1218 /* 1219 * get_xol_area - Allocate process's xol_area if necessary. 1220 * This area will be used for storing instructions for execution out of line. 1221 * 1222 * Returns the allocated area or NULL. 1223 */ 1224 static struct xol_area *get_xol_area(void) 1225 { 1226 struct mm_struct *mm = current->mm; 1227 struct xol_area *area; 1228 1229 if (!mm->uprobes_state.xol_area) 1230 __create_xol_area(0); 1231 1232 /* Pairs with xol_add_vma() smp_store_release() */ 1233 area = READ_ONCE(mm->uprobes_state.xol_area); /* ^^^ */ 1234 return area; 1235 } 1236 1237 /* 1238 * uprobe_clear_state - Free the area allocated for slots. 1239 */ 1240 void uprobe_clear_state(struct mm_struct *mm) 1241 { 1242 struct xol_area *area = mm->uprobes_state.xol_area; 1243 1244 if (!area) 1245 return; 1246 1247 put_page(area->pages[0]); 1248 kfree(area->bitmap); 1249 kfree(area); 1250 } 1251 1252 void uprobe_start_dup_mmap(void) 1253 { 1254 percpu_down_read(&dup_mmap_sem); 1255 } 1256 1257 void uprobe_end_dup_mmap(void) 1258 { 1259 percpu_up_read(&dup_mmap_sem); 1260 } 1261 1262 void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm) 1263 { 1264 if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) { 1265 set_bit(MMF_HAS_UPROBES, &newmm->flags); 1266 /* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */ 1267 set_bit(MMF_RECALC_UPROBES, &newmm->flags); 1268 } 1269 } 1270 1271 /* 1272 * - search for a free slot. 1273 */ 1274 static unsigned long xol_take_insn_slot(struct xol_area *area) 1275 { 1276 unsigned long slot_addr; 1277 int slot_nr; 1278 1279 do { 1280 slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE); 1281 if (slot_nr < UINSNS_PER_PAGE) { 1282 if (!test_and_set_bit(slot_nr, area->bitmap)) 1283 break; 1284 1285 slot_nr = UINSNS_PER_PAGE; 1286 continue; 1287 } 1288 wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE)); 1289 } while (slot_nr >= UINSNS_PER_PAGE); 1290 1291 slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES); 1292 atomic_inc(&area->slot_count); 1293 1294 return slot_addr; 1295 } 1296 1297 /* 1298 * xol_get_insn_slot - allocate a slot for xol. 1299 * Returns the allocated slot address or 0. 1300 */ 1301 static unsigned long xol_get_insn_slot(struct uprobe *uprobe) 1302 { 1303 struct xol_area *area; 1304 unsigned long xol_vaddr; 1305 1306 area = get_xol_area(); 1307 if (!area) 1308 return 0; 1309 1310 xol_vaddr = xol_take_insn_slot(area); 1311 if (unlikely(!xol_vaddr)) 1312 return 0; 1313 1314 arch_uprobe_copy_ixol(area->pages[0], xol_vaddr, 1315 &uprobe->arch.ixol, sizeof(uprobe->arch.ixol)); 1316 1317 return xol_vaddr; 1318 } 1319 1320 /* 1321 * xol_free_insn_slot - If slot was earlier allocated by 1322 * @xol_get_insn_slot(), make the slot available for 1323 * subsequent requests. 1324 */ 1325 static void xol_free_insn_slot(struct task_struct *tsk) 1326 { 1327 struct xol_area *area; 1328 unsigned long vma_end; 1329 unsigned long slot_addr; 1330 1331 if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask) 1332 return; 1333 1334 slot_addr = tsk->utask->xol_vaddr; 1335 if (unlikely(!slot_addr)) 1336 return; 1337 1338 area = tsk->mm->uprobes_state.xol_area; 1339 vma_end = area->vaddr + PAGE_SIZE; 1340 if (area->vaddr <= slot_addr && slot_addr < vma_end) { 1341 unsigned long offset; 1342 int slot_nr; 1343 1344 offset = slot_addr - area->vaddr; 1345 slot_nr = offset / UPROBE_XOL_SLOT_BYTES; 1346 if (slot_nr >= UINSNS_PER_PAGE) 1347 return; 1348 1349 clear_bit(slot_nr, area->bitmap); 1350 atomic_dec(&area->slot_count); 1351 smp_mb__after_atomic(); /* pairs with prepare_to_wait() */ 1352 if (waitqueue_active(&area->wq)) 1353 wake_up(&area->wq); 1354 1355 tsk->utask->xol_vaddr = 0; 1356 } 1357 } 1358 1359 void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, 1360 void *src, unsigned long len) 1361 { 1362 /* Initialize the slot */ 1363 copy_to_page(page, vaddr, src, len); 1364 1365 /* 1366 * We probably need flush_icache_user_range() but it needs vma. 1367 * This should work on most of architectures by default. If 1368 * architecture needs to do something different it can define 1369 * its own version of the function. 1370 */ 1371 flush_dcache_page(page); 1372 } 1373 1374 /** 1375 * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs 1376 * @regs: Reflects the saved state of the task after it has hit a breakpoint 1377 * instruction. 1378 * Return the address of the breakpoint instruction. 1379 */ 1380 unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs) 1381 { 1382 return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE; 1383 } 1384 1385 unsigned long uprobe_get_trap_addr(struct pt_regs *regs) 1386 { 1387 struct uprobe_task *utask = current->utask; 1388 1389 if (unlikely(utask && utask->active_uprobe)) 1390 return utask->vaddr; 1391 1392 return instruction_pointer(regs); 1393 } 1394 1395 static struct return_instance *free_ret_instance(struct return_instance *ri) 1396 { 1397 struct return_instance *next = ri->next; 1398 put_uprobe(ri->uprobe); 1399 kfree(ri); 1400 return next; 1401 } 1402 1403 /* 1404 * Called with no locks held. 1405 * Called in context of a exiting or a exec-ing thread. 1406 */ 1407 void uprobe_free_utask(struct task_struct *t) 1408 { 1409 struct uprobe_task *utask = t->utask; 1410 struct return_instance *ri; 1411 1412 if (!utask) 1413 return; 1414 1415 if (utask->active_uprobe) 1416 put_uprobe(utask->active_uprobe); 1417 1418 ri = utask->return_instances; 1419 while (ri) 1420 ri = free_ret_instance(ri); 1421 1422 xol_free_insn_slot(t); 1423 kfree(utask); 1424 t->utask = NULL; 1425 } 1426 1427 /* 1428 * Allocate a uprobe_task object for the task if if necessary. 1429 * Called when the thread hits a breakpoint. 1430 * 1431 * Returns: 1432 * - pointer to new uprobe_task on success 1433 * - NULL otherwise 1434 */ 1435 static struct uprobe_task *get_utask(void) 1436 { 1437 if (!current->utask) 1438 current->utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL); 1439 return current->utask; 1440 } 1441 1442 static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask) 1443 { 1444 struct uprobe_task *n_utask; 1445 struct return_instance **p, *o, *n; 1446 1447 n_utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL); 1448 if (!n_utask) 1449 return -ENOMEM; 1450 t->utask = n_utask; 1451 1452 p = &n_utask->return_instances; 1453 for (o = o_utask->return_instances; o; o = o->next) { 1454 n = kmalloc(sizeof(struct return_instance), GFP_KERNEL); 1455 if (!n) 1456 return -ENOMEM; 1457 1458 *n = *o; 1459 get_uprobe(n->uprobe); 1460 n->next = NULL; 1461 1462 *p = n; 1463 p = &n->next; 1464 n_utask->depth++; 1465 } 1466 1467 return 0; 1468 } 1469 1470 static void uprobe_warn(struct task_struct *t, const char *msg) 1471 { 1472 pr_warn("uprobe: %s:%d failed to %s\n", 1473 current->comm, current->pid, msg); 1474 } 1475 1476 static void dup_xol_work(struct callback_head *work) 1477 { 1478 if (current->flags & PF_EXITING) 1479 return; 1480 1481 if (!__create_xol_area(current->utask->dup_xol_addr) && 1482 !fatal_signal_pending(current)) 1483 uprobe_warn(current, "dup xol area"); 1484 } 1485 1486 /* 1487 * Called in context of a new clone/fork from copy_process. 1488 */ 1489 void uprobe_copy_process(struct task_struct *t, unsigned long flags) 1490 { 1491 struct uprobe_task *utask = current->utask; 1492 struct mm_struct *mm = current->mm; 1493 struct xol_area *area; 1494 1495 t->utask = NULL; 1496 1497 if (!utask || !utask->return_instances) 1498 return; 1499 1500 if (mm == t->mm && !(flags & CLONE_VFORK)) 1501 return; 1502 1503 if (dup_utask(t, utask)) 1504 return uprobe_warn(t, "dup ret instances"); 1505 1506 /* The task can fork() after dup_xol_work() fails */ 1507 area = mm->uprobes_state.xol_area; 1508 if (!area) 1509 return uprobe_warn(t, "dup xol area"); 1510 1511 if (mm == t->mm) 1512 return; 1513 1514 t->utask->dup_xol_addr = area->vaddr; 1515 init_task_work(&t->utask->dup_xol_work, dup_xol_work); 1516 task_work_add(t, &t->utask->dup_xol_work, true); 1517 } 1518 1519 /* 1520 * Current area->vaddr notion assume the trampoline address is always 1521 * equal area->vaddr. 1522 * 1523 * Returns -1 in case the xol_area is not allocated. 1524 */ 1525 static unsigned long get_trampoline_vaddr(void) 1526 { 1527 struct xol_area *area; 1528 unsigned long trampoline_vaddr = -1; 1529 1530 /* Pairs with xol_add_vma() smp_store_release() */ 1531 area = READ_ONCE(current->mm->uprobes_state.xol_area); /* ^^^ */ 1532 if (area) 1533 trampoline_vaddr = area->vaddr; 1534 1535 return trampoline_vaddr; 1536 } 1537 1538 static void cleanup_return_instances(struct uprobe_task *utask, bool chained, 1539 struct pt_regs *regs) 1540 { 1541 struct return_instance *ri = utask->return_instances; 1542 enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL; 1543 1544 while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) { 1545 ri = free_ret_instance(ri); 1546 utask->depth--; 1547 } 1548 utask->return_instances = ri; 1549 } 1550 1551 static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs) 1552 { 1553 struct return_instance *ri; 1554 struct uprobe_task *utask; 1555 unsigned long orig_ret_vaddr, trampoline_vaddr; 1556 bool chained; 1557 1558 if (!get_xol_area()) 1559 return; 1560 1561 utask = get_utask(); 1562 if (!utask) 1563 return; 1564 1565 if (utask->depth >= MAX_URETPROBE_DEPTH) { 1566 printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to" 1567 " nestedness limit pid/tgid=%d/%d\n", 1568 current->pid, current->tgid); 1569 return; 1570 } 1571 1572 ri = kmalloc(sizeof(struct return_instance), GFP_KERNEL); 1573 if (!ri) 1574 return; 1575 1576 trampoline_vaddr = get_trampoline_vaddr(); 1577 orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs); 1578 if (orig_ret_vaddr == -1) 1579 goto fail; 1580 1581 /* drop the entries invalidated by longjmp() */ 1582 chained = (orig_ret_vaddr == trampoline_vaddr); 1583 cleanup_return_instances(utask, chained, regs); 1584 1585 /* 1586 * We don't want to keep trampoline address in stack, rather keep the 1587 * original return address of first caller thru all the consequent 1588 * instances. This also makes breakpoint unwrapping easier. 1589 */ 1590 if (chained) { 1591 if (!utask->return_instances) { 1592 /* 1593 * This situation is not possible. Likely we have an 1594 * attack from user-space. 1595 */ 1596 uprobe_warn(current, "handle tail call"); 1597 goto fail; 1598 } 1599 orig_ret_vaddr = utask->return_instances->orig_ret_vaddr; 1600 } 1601 1602 ri->uprobe = get_uprobe(uprobe); 1603 ri->func = instruction_pointer(regs); 1604 ri->stack = user_stack_pointer(regs); 1605 ri->orig_ret_vaddr = orig_ret_vaddr; 1606 ri->chained = chained; 1607 1608 utask->depth++; 1609 ri->next = utask->return_instances; 1610 utask->return_instances = ri; 1611 1612 return; 1613 fail: 1614 kfree(ri); 1615 } 1616 1617 /* Prepare to single-step probed instruction out of line. */ 1618 static int 1619 pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr) 1620 { 1621 struct uprobe_task *utask; 1622 unsigned long xol_vaddr; 1623 int err; 1624 1625 utask = get_utask(); 1626 if (!utask) 1627 return -ENOMEM; 1628 1629 xol_vaddr = xol_get_insn_slot(uprobe); 1630 if (!xol_vaddr) 1631 return -ENOMEM; 1632 1633 utask->xol_vaddr = xol_vaddr; 1634 utask->vaddr = bp_vaddr; 1635 1636 err = arch_uprobe_pre_xol(&uprobe->arch, regs); 1637 if (unlikely(err)) { 1638 xol_free_insn_slot(current); 1639 return err; 1640 } 1641 1642 utask->active_uprobe = uprobe; 1643 utask->state = UTASK_SSTEP; 1644 return 0; 1645 } 1646 1647 /* 1648 * If we are singlestepping, then ensure this thread is not connected to 1649 * non-fatal signals until completion of singlestep. When xol insn itself 1650 * triggers the signal, restart the original insn even if the task is 1651 * already SIGKILL'ed (since coredump should report the correct ip). This 1652 * is even more important if the task has a handler for SIGSEGV/etc, The 1653 * _same_ instruction should be repeated again after return from the signal 1654 * handler, and SSTEP can never finish in this case. 1655 */ 1656 bool uprobe_deny_signal(void) 1657 { 1658 struct task_struct *t = current; 1659 struct uprobe_task *utask = t->utask; 1660 1661 if (likely(!utask || !utask->active_uprobe)) 1662 return false; 1663 1664 WARN_ON_ONCE(utask->state != UTASK_SSTEP); 1665 1666 if (signal_pending(t)) { 1667 spin_lock_irq(&t->sighand->siglock); 1668 clear_tsk_thread_flag(t, TIF_SIGPENDING); 1669 spin_unlock_irq(&t->sighand->siglock); 1670 1671 if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) { 1672 utask->state = UTASK_SSTEP_TRAPPED; 1673 set_tsk_thread_flag(t, TIF_UPROBE); 1674 } 1675 } 1676 1677 return true; 1678 } 1679 1680 static void mmf_recalc_uprobes(struct mm_struct *mm) 1681 { 1682 struct vm_area_struct *vma; 1683 1684 for (vma = mm->mmap; vma; vma = vma->vm_next) { 1685 if (!valid_vma(vma, false)) 1686 continue; 1687 /* 1688 * This is not strictly accurate, we can race with 1689 * uprobe_unregister() and see the already removed 1690 * uprobe if delete_uprobe() was not yet called. 1691 * Or this uprobe can be filtered out. 1692 */ 1693 if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end)) 1694 return; 1695 } 1696 1697 clear_bit(MMF_HAS_UPROBES, &mm->flags); 1698 } 1699 1700 static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr) 1701 { 1702 struct page *page; 1703 uprobe_opcode_t opcode; 1704 int result; 1705 1706 pagefault_disable(); 1707 result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr); 1708 pagefault_enable(); 1709 1710 if (likely(result == 0)) 1711 goto out; 1712 1713 /* 1714 * The NULL 'tsk' here ensures that any faults that occur here 1715 * will not be accounted to the task. 'mm' *is* current->mm, 1716 * but we treat this as a 'remote' access since it is 1717 * essentially a kernel access to the memory. 1718 */ 1719 result = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &page, 1720 NULL, NULL); 1721 if (result < 0) 1722 return result; 1723 1724 copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE); 1725 put_page(page); 1726 out: 1727 /* This needs to return true for any variant of the trap insn */ 1728 return is_trap_insn(&opcode); 1729 } 1730 1731 static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp) 1732 { 1733 struct mm_struct *mm = current->mm; 1734 struct uprobe *uprobe = NULL; 1735 struct vm_area_struct *vma; 1736 1737 down_read(&mm->mmap_sem); 1738 vma = find_vma(mm, bp_vaddr); 1739 if (vma && vma->vm_start <= bp_vaddr) { 1740 if (valid_vma(vma, false)) { 1741 struct inode *inode = file_inode(vma->vm_file); 1742 loff_t offset = vaddr_to_offset(vma, bp_vaddr); 1743 1744 uprobe = find_uprobe(inode, offset); 1745 } 1746 1747 if (!uprobe) 1748 *is_swbp = is_trap_at_addr(mm, bp_vaddr); 1749 } else { 1750 *is_swbp = -EFAULT; 1751 } 1752 1753 if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags)) 1754 mmf_recalc_uprobes(mm); 1755 up_read(&mm->mmap_sem); 1756 1757 return uprobe; 1758 } 1759 1760 static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs) 1761 { 1762 struct uprobe_consumer *uc; 1763 int remove = UPROBE_HANDLER_REMOVE; 1764 bool need_prep = false; /* prepare return uprobe, when needed */ 1765 1766 down_read(&uprobe->register_rwsem); 1767 for (uc = uprobe->consumers; uc; uc = uc->next) { 1768 int rc = 0; 1769 1770 if (uc->handler) { 1771 rc = uc->handler(uc, regs); 1772 WARN(rc & ~UPROBE_HANDLER_MASK, 1773 "bad rc=0x%x from %pf()\n", rc, uc->handler); 1774 } 1775 1776 if (uc->ret_handler) 1777 need_prep = true; 1778 1779 remove &= rc; 1780 } 1781 1782 if (need_prep && !remove) 1783 prepare_uretprobe(uprobe, regs); /* put bp at return */ 1784 1785 if (remove && uprobe->consumers) { 1786 WARN_ON(!uprobe_is_active(uprobe)); 1787 unapply_uprobe(uprobe, current->mm); 1788 } 1789 up_read(&uprobe->register_rwsem); 1790 } 1791 1792 static void 1793 handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs) 1794 { 1795 struct uprobe *uprobe = ri->uprobe; 1796 struct uprobe_consumer *uc; 1797 1798 down_read(&uprobe->register_rwsem); 1799 for (uc = uprobe->consumers; uc; uc = uc->next) { 1800 if (uc->ret_handler) 1801 uc->ret_handler(uc, ri->func, regs); 1802 } 1803 up_read(&uprobe->register_rwsem); 1804 } 1805 1806 static struct return_instance *find_next_ret_chain(struct return_instance *ri) 1807 { 1808 bool chained; 1809 1810 do { 1811 chained = ri->chained; 1812 ri = ri->next; /* can't be NULL if chained */ 1813 } while (chained); 1814 1815 return ri; 1816 } 1817 1818 static void handle_trampoline(struct pt_regs *regs) 1819 { 1820 struct uprobe_task *utask; 1821 struct return_instance *ri, *next; 1822 bool valid; 1823 1824 utask = current->utask; 1825 if (!utask) 1826 goto sigill; 1827 1828 ri = utask->return_instances; 1829 if (!ri) 1830 goto sigill; 1831 1832 do { 1833 /* 1834 * We should throw out the frames invalidated by longjmp(). 1835 * If this chain is valid, then the next one should be alive 1836 * or NULL; the latter case means that nobody but ri->func 1837 * could hit this trampoline on return. TODO: sigaltstack(). 1838 */ 1839 next = find_next_ret_chain(ri); 1840 valid = !next || arch_uretprobe_is_alive(next, RP_CHECK_RET, regs); 1841 1842 instruction_pointer_set(regs, ri->orig_ret_vaddr); 1843 do { 1844 if (valid) 1845 handle_uretprobe_chain(ri, regs); 1846 ri = free_ret_instance(ri); 1847 utask->depth--; 1848 } while (ri != next); 1849 } while (!valid); 1850 1851 utask->return_instances = ri; 1852 return; 1853 1854 sigill: 1855 uprobe_warn(current, "handle uretprobe, sending SIGILL."); 1856 force_sig_info(SIGILL, SEND_SIG_FORCED, current); 1857 1858 } 1859 1860 bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs) 1861 { 1862 return false; 1863 } 1864 1865 bool __weak arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, 1866 struct pt_regs *regs) 1867 { 1868 return true; 1869 } 1870 1871 /* 1872 * Run handler and ask thread to singlestep. 1873 * Ensure all non-fatal signals cannot interrupt thread while it singlesteps. 1874 */ 1875 static void handle_swbp(struct pt_regs *regs) 1876 { 1877 struct uprobe *uprobe; 1878 unsigned long bp_vaddr; 1879 int uninitialized_var(is_swbp); 1880 1881 bp_vaddr = uprobe_get_swbp_addr(regs); 1882 if (bp_vaddr == get_trampoline_vaddr()) 1883 return handle_trampoline(regs); 1884 1885 uprobe = find_active_uprobe(bp_vaddr, &is_swbp); 1886 if (!uprobe) { 1887 if (is_swbp > 0) { 1888 /* No matching uprobe; signal SIGTRAP. */ 1889 send_sig(SIGTRAP, current, 0); 1890 } else { 1891 /* 1892 * Either we raced with uprobe_unregister() or we can't 1893 * access this memory. The latter is only possible if 1894 * another thread plays with our ->mm. In both cases 1895 * we can simply restart. If this vma was unmapped we 1896 * can pretend this insn was not executed yet and get 1897 * the (correct) SIGSEGV after restart. 1898 */ 1899 instruction_pointer_set(regs, bp_vaddr); 1900 } 1901 return; 1902 } 1903 1904 /* change it in advance for ->handler() and restart */ 1905 instruction_pointer_set(regs, bp_vaddr); 1906 1907 /* 1908 * TODO: move copy_insn/etc into _register and remove this hack. 1909 * After we hit the bp, _unregister + _register can install the 1910 * new and not-yet-analyzed uprobe at the same address, restart. 1911 */ 1912 smp_rmb(); /* pairs with wmb() in install_breakpoint() */ 1913 if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags))) 1914 goto out; 1915 1916 /* Tracing handlers use ->utask to communicate with fetch methods */ 1917 if (!get_utask()) 1918 goto out; 1919 1920 if (arch_uprobe_ignore(&uprobe->arch, regs)) 1921 goto out; 1922 1923 handler_chain(uprobe, regs); 1924 1925 if (arch_uprobe_skip_sstep(&uprobe->arch, regs)) 1926 goto out; 1927 1928 if (!pre_ssout(uprobe, regs, bp_vaddr)) 1929 return; 1930 1931 /* arch_uprobe_skip_sstep() succeeded, or restart if can't singlestep */ 1932 out: 1933 put_uprobe(uprobe); 1934 } 1935 1936 /* 1937 * Perform required fix-ups and disable singlestep. 1938 * Allow pending signals to take effect. 1939 */ 1940 static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs) 1941 { 1942 struct uprobe *uprobe; 1943 int err = 0; 1944 1945 uprobe = utask->active_uprobe; 1946 if (utask->state == UTASK_SSTEP_ACK) 1947 err = arch_uprobe_post_xol(&uprobe->arch, regs); 1948 else if (utask->state == UTASK_SSTEP_TRAPPED) 1949 arch_uprobe_abort_xol(&uprobe->arch, regs); 1950 else 1951 WARN_ON_ONCE(1); 1952 1953 put_uprobe(uprobe); 1954 utask->active_uprobe = NULL; 1955 utask->state = UTASK_RUNNING; 1956 xol_free_insn_slot(current); 1957 1958 spin_lock_irq(¤t->sighand->siglock); 1959 recalc_sigpending(); /* see uprobe_deny_signal() */ 1960 spin_unlock_irq(¤t->sighand->siglock); 1961 1962 if (unlikely(err)) { 1963 uprobe_warn(current, "execute the probed insn, sending SIGILL."); 1964 force_sig_info(SIGILL, SEND_SIG_FORCED, current); 1965 } 1966 } 1967 1968 /* 1969 * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and 1970 * allows the thread to return from interrupt. After that handle_swbp() 1971 * sets utask->active_uprobe. 1972 * 1973 * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag 1974 * and allows the thread to return from interrupt. 1975 * 1976 * While returning to userspace, thread notices the TIF_UPROBE flag and calls 1977 * uprobe_notify_resume(). 1978 */ 1979 void uprobe_notify_resume(struct pt_regs *regs) 1980 { 1981 struct uprobe_task *utask; 1982 1983 clear_thread_flag(TIF_UPROBE); 1984 1985 utask = current->utask; 1986 if (utask && utask->active_uprobe) 1987 handle_singlestep(utask, regs); 1988 else 1989 handle_swbp(regs); 1990 } 1991 1992 /* 1993 * uprobe_pre_sstep_notifier gets called from interrupt context as part of 1994 * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit. 1995 */ 1996 int uprobe_pre_sstep_notifier(struct pt_regs *regs) 1997 { 1998 if (!current->mm) 1999 return 0; 2000 2001 if (!test_bit(MMF_HAS_UPROBES, ¤t->mm->flags) && 2002 (!current->utask || !current->utask->return_instances)) 2003 return 0; 2004 2005 set_thread_flag(TIF_UPROBE); 2006 return 1; 2007 } 2008 2009 /* 2010 * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier 2011 * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep. 2012 */ 2013 int uprobe_post_sstep_notifier(struct pt_regs *regs) 2014 { 2015 struct uprobe_task *utask = current->utask; 2016 2017 if (!current->mm || !utask || !utask->active_uprobe) 2018 /* task is currently not uprobed */ 2019 return 0; 2020 2021 utask->state = UTASK_SSTEP_ACK; 2022 set_thread_flag(TIF_UPROBE); 2023 return 1; 2024 } 2025 2026 static struct notifier_block uprobe_exception_nb = { 2027 .notifier_call = arch_uprobe_exception_notify, 2028 .priority = INT_MAX-1, /* notified after kprobes, kgdb */ 2029 }; 2030 2031 static int __init init_uprobes(void) 2032 { 2033 int i; 2034 2035 for (i = 0; i < UPROBES_HASH_SZ; i++) 2036 mutex_init(&uprobes_mmap_mutex[i]); 2037 2038 if (percpu_init_rwsem(&dup_mmap_sem)) 2039 return -ENOMEM; 2040 2041 return register_die_notifier(&uprobe_exception_nb); 2042 } 2043 __initcall(init_uprobes); 2044