1a5f4374aSIngo Molnar /* 2a5f4374aSIngo Molnar * User-space Probes (UProbes) 3a5f4374aSIngo Molnar * 4a5f4374aSIngo Molnar * This program is free software; you can redistribute it and/or modify 5a5f4374aSIngo Molnar * it under the terms of the GNU General Public License as published by 6a5f4374aSIngo Molnar * the Free Software Foundation; either version 2 of the License, or 7a5f4374aSIngo Molnar * (at your option) any later version. 8a5f4374aSIngo Molnar * 9a5f4374aSIngo Molnar * This program is distributed in the hope that it will be useful, 10a5f4374aSIngo Molnar * but WITHOUT ANY WARRANTY; without even the implied warranty of 11a5f4374aSIngo Molnar * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12a5f4374aSIngo Molnar * GNU General Public License for more details. 13a5f4374aSIngo Molnar * 14a5f4374aSIngo Molnar * You should have received a copy of the GNU General Public License 15a5f4374aSIngo Molnar * along with this program; if not, write to the Free Software 16a5f4374aSIngo Molnar * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17a5f4374aSIngo Molnar * 1835aa621bSIngo Molnar * Copyright (C) IBM Corporation, 2008-2012 19a5f4374aSIngo Molnar * Authors: 20a5f4374aSIngo Molnar * Srikar Dronamraju 21a5f4374aSIngo Molnar * Jim Keniston 2235aa621bSIngo Molnar * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 23a5f4374aSIngo Molnar */ 24a5f4374aSIngo Molnar 25a5f4374aSIngo Molnar #include <linux/kernel.h> 26a5f4374aSIngo Molnar #include <linux/highmem.h> 27a5f4374aSIngo Molnar #include <linux/pagemap.h> /* read_mapping_page */ 28a5f4374aSIngo Molnar #include <linux/slab.h> 29a5f4374aSIngo Molnar #include <linux/sched.h> 30a5f4374aSIngo Molnar #include <linux/rmap.h> /* anon_vma_prepare */ 31a5f4374aSIngo Molnar #include <linux/mmu_notifier.h> /* set_pte_at_notify */ 32a5f4374aSIngo Molnar #include <linux/swap.h> /* try_to_free_swap */ 330326f5a9SSrikar Dronamraju #include <linux/ptrace.h> /* user_enable_single_step */ 340326f5a9SSrikar Dronamraju #include <linux/kdebug.h> /* notifier mechanism */ 35194f8dcbSOleg Nesterov #include "../../mm/internal.h" /* munlock_vma_page */ 36a5f4374aSIngo Molnar 37a5f4374aSIngo Molnar #include <linux/uprobes.h> 38a5f4374aSIngo Molnar 39d4b3b638SSrikar Dronamraju #define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES) 40d4b3b638SSrikar Dronamraju #define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE 41d4b3b638SSrikar Dronamraju 42a5f4374aSIngo Molnar static struct rb_root uprobes_tree = RB_ROOT; 43a5f4374aSIngo Molnar 44a5f4374aSIngo Molnar static DEFINE_SPINLOCK(uprobes_treelock); /* serialize rbtree access */ 45a5f4374aSIngo Molnar 46a5f4374aSIngo Molnar #define UPROBES_HASH_SZ 13 47a5f4374aSIngo Molnar 48c5784de2SPeter Zijlstra /* 49c5784de2SPeter Zijlstra * We need separate register/unregister and mmap/munmap lock hashes because 50c5784de2SPeter Zijlstra * of mmap_sem nesting. 51c5784de2SPeter Zijlstra * 52c5784de2SPeter Zijlstra * uprobe_register() needs to install probes on (potentially) all processes 53c5784de2SPeter Zijlstra * and thus needs to acquire multiple mmap_sems (consequtively, not 54c5784de2SPeter Zijlstra * concurrently), whereas uprobe_mmap() is called while holding mmap_sem 55c5784de2SPeter Zijlstra * for the particular process doing the mmap. 56c5784de2SPeter Zijlstra * 57c5784de2SPeter Zijlstra * uprobe_register()->register_for_each_vma() needs to drop/acquire mmap_sem 58c5784de2SPeter Zijlstra * because of lock order against i_mmap_mutex. This means there's a hole in 59c5784de2SPeter Zijlstra * the register vma iteration where a mmap() can happen. 60c5784de2SPeter Zijlstra * 61c5784de2SPeter Zijlstra * Thus uprobe_register() can race with uprobe_mmap() and we can try and 62c5784de2SPeter Zijlstra * install a probe where one is already installed. 63c5784de2SPeter Zijlstra */ 64c5784de2SPeter Zijlstra 65a5f4374aSIngo Molnar /* serialize (un)register */ 66a5f4374aSIngo Molnar static struct mutex uprobes_mutex[UPROBES_HASH_SZ]; 67a5f4374aSIngo Molnar 68a5f4374aSIngo Molnar #define uprobes_hash(v) (&uprobes_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ]) 69a5f4374aSIngo Molnar 70a5f4374aSIngo Molnar /* serialize uprobe->pending_list */ 71a5f4374aSIngo Molnar static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ]; 72a5f4374aSIngo Molnar #define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ]) 73a5f4374aSIngo Molnar 74a5f4374aSIngo Molnar /* 75a5f4374aSIngo Molnar * uprobe_events allows us to skip the uprobe_mmap if there are no uprobe 76a5f4374aSIngo Molnar * events active at this time. Probably a fine grained per inode count is 77a5f4374aSIngo Molnar * better? 78a5f4374aSIngo Molnar */ 79a5f4374aSIngo Molnar static atomic_t uprobe_events = ATOMIC_INIT(0); 80a5f4374aSIngo Molnar 813ff54efdSSrikar Dronamraju struct uprobe { 823ff54efdSSrikar Dronamraju struct rb_node rb_node; /* node in the rb tree */ 833ff54efdSSrikar Dronamraju atomic_t ref; 843ff54efdSSrikar Dronamraju struct rw_semaphore consumer_rwsem; 853ff54efdSSrikar Dronamraju struct list_head pending_list; 863ff54efdSSrikar Dronamraju struct uprobe_consumer *consumers; 873ff54efdSSrikar Dronamraju struct inode *inode; /* Also hold a ref to inode */ 883ff54efdSSrikar Dronamraju loff_t offset; 893ff54efdSSrikar Dronamraju int flags; 903ff54efdSSrikar Dronamraju struct arch_uprobe arch; 913ff54efdSSrikar Dronamraju }; 923ff54efdSSrikar Dronamraju 93a5f4374aSIngo Molnar /* 94a5f4374aSIngo Molnar * valid_vma: Verify if the specified vma is an executable vma 95a5f4374aSIngo Molnar * Relax restrictions while unregistering: vm_flags might have 96a5f4374aSIngo Molnar * changed after breakpoint was inserted. 97a5f4374aSIngo Molnar * - is_register: indicates if we are in register context. 98a5f4374aSIngo Molnar * - Return 1 if the specified virtual address is in an 99a5f4374aSIngo Molnar * executable vma. 100a5f4374aSIngo Molnar */ 101a5f4374aSIngo Molnar static bool valid_vma(struct vm_area_struct *vma, bool is_register) 102a5f4374aSIngo Molnar { 103e40cfce6SOleg Nesterov vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_SHARED; 104a5f4374aSIngo Molnar 105e40cfce6SOleg Nesterov if (is_register) 106e40cfce6SOleg Nesterov flags |= VM_WRITE; 107a5f4374aSIngo Molnar 108e40cfce6SOleg Nesterov return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC; 109a5f4374aSIngo Molnar } 110a5f4374aSIngo Molnar 11157683f72SOleg Nesterov static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset) 112a5f4374aSIngo Molnar { 11357683f72SOleg Nesterov return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 114a5f4374aSIngo Molnar } 115a5f4374aSIngo Molnar 116cb113b47SOleg Nesterov static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr) 117cb113b47SOleg Nesterov { 118cb113b47SOleg Nesterov return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start); 119cb113b47SOleg Nesterov } 120cb113b47SOleg Nesterov 121a5f4374aSIngo Molnar /** 122a5f4374aSIngo Molnar * __replace_page - replace page in vma by new page. 123a5f4374aSIngo Molnar * based on replace_page in mm/ksm.c 124a5f4374aSIngo Molnar * 125a5f4374aSIngo Molnar * @vma: vma that holds the pte pointing to page 126c517ee74SOleg Nesterov * @addr: address the old @page is mapped at 127a5f4374aSIngo Molnar * @page: the cowed page we are replacing by kpage 128a5f4374aSIngo Molnar * @kpage: the modified page we replace page by 129a5f4374aSIngo Molnar * 130a5f4374aSIngo Molnar * Returns 0 on success, -EFAULT on failure. 131a5f4374aSIngo Molnar */ 132c517ee74SOleg Nesterov static int __replace_page(struct vm_area_struct *vma, unsigned long addr, 133c517ee74SOleg Nesterov struct page *page, struct page *kpage) 134a5f4374aSIngo Molnar { 135a5f4374aSIngo Molnar struct mm_struct *mm = vma->vm_mm; 1365323ce71SOleg Nesterov spinlock_t *ptl; 1375323ce71SOleg Nesterov pte_t *ptep; 1389f92448cSOleg Nesterov int err; 139a5f4374aSIngo Molnar 140194f8dcbSOleg Nesterov /* For try_to_free_swap() and munlock_vma_page() below */ 1419f92448cSOleg Nesterov lock_page(page); 1429f92448cSOleg Nesterov 1439f92448cSOleg Nesterov err = -EAGAIN; 1445323ce71SOleg Nesterov ptep = page_check_address(page, mm, addr, &ptl, 0); 145a5f4374aSIngo Molnar if (!ptep) 1469f92448cSOleg Nesterov goto unlock; 147a5f4374aSIngo Molnar 148a5f4374aSIngo Molnar get_page(kpage); 149a5f4374aSIngo Molnar page_add_new_anon_rmap(kpage, vma, addr); 150a5f4374aSIngo Molnar 1517396fa81SSrikar Dronamraju if (!PageAnon(page)) { 1527396fa81SSrikar Dronamraju dec_mm_counter(mm, MM_FILEPAGES); 1537396fa81SSrikar Dronamraju inc_mm_counter(mm, MM_ANONPAGES); 1547396fa81SSrikar Dronamraju } 1557396fa81SSrikar Dronamraju 156a5f4374aSIngo Molnar flush_cache_page(vma, addr, pte_pfn(*ptep)); 157a5f4374aSIngo Molnar ptep_clear_flush(vma, addr, ptep); 158a5f4374aSIngo Molnar set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); 159a5f4374aSIngo Molnar 160a5f4374aSIngo Molnar page_remove_rmap(page); 161a5f4374aSIngo Molnar if (!page_mapped(page)) 162a5f4374aSIngo Molnar try_to_free_swap(page); 163a5f4374aSIngo Molnar pte_unmap_unlock(ptep, ptl); 164a5f4374aSIngo Molnar 165194f8dcbSOleg Nesterov if (vma->vm_flags & VM_LOCKED) 166194f8dcbSOleg Nesterov munlock_vma_page(page); 167194f8dcbSOleg Nesterov put_page(page); 168194f8dcbSOleg Nesterov 1699f92448cSOleg Nesterov err = 0; 1709f92448cSOleg Nesterov unlock: 1719f92448cSOleg Nesterov unlock_page(page); 1729f92448cSOleg Nesterov return err; 173a5f4374aSIngo Molnar } 174a5f4374aSIngo Molnar 175a5f4374aSIngo Molnar /** 1765cb4ac3aSSrikar Dronamraju * is_swbp_insn - check if instruction is breakpoint instruction. 177a5f4374aSIngo Molnar * @insn: instruction to be checked. 1785cb4ac3aSSrikar Dronamraju * Default implementation of is_swbp_insn 179a5f4374aSIngo Molnar * Returns true if @insn is a breakpoint instruction. 180a5f4374aSIngo Molnar */ 1815cb4ac3aSSrikar Dronamraju bool __weak is_swbp_insn(uprobe_opcode_t *insn) 182a5f4374aSIngo Molnar { 1835cb4ac3aSSrikar Dronamraju return *insn == UPROBE_SWBP_INSN; 184a5f4374aSIngo Molnar } 185a5f4374aSIngo Molnar 186cceb55aaSOleg Nesterov static void copy_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *opcode) 187cceb55aaSOleg Nesterov { 188cceb55aaSOleg Nesterov void *kaddr = kmap_atomic(page); 189cceb55aaSOleg Nesterov memcpy(opcode, kaddr + (vaddr & ~PAGE_MASK), UPROBE_SWBP_INSN_SIZE); 190cceb55aaSOleg Nesterov kunmap_atomic(kaddr); 191cceb55aaSOleg Nesterov } 192cceb55aaSOleg Nesterov 193ed6f6a50SOleg Nesterov static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode) 194ed6f6a50SOleg Nesterov { 195ed6f6a50SOleg Nesterov uprobe_opcode_t old_opcode; 196ed6f6a50SOleg Nesterov bool is_swbp; 197ed6f6a50SOleg Nesterov 198ed6f6a50SOleg Nesterov copy_opcode(page, vaddr, &old_opcode); 199ed6f6a50SOleg Nesterov is_swbp = is_swbp_insn(&old_opcode); 200ed6f6a50SOleg Nesterov 201ed6f6a50SOleg Nesterov if (is_swbp_insn(new_opcode)) { 202ed6f6a50SOleg Nesterov if (is_swbp) /* register: already installed? */ 203ed6f6a50SOleg Nesterov return 0; 204ed6f6a50SOleg Nesterov } else { 205ed6f6a50SOleg Nesterov if (!is_swbp) /* unregister: was it changed by us? */ 206ed6f6a50SOleg Nesterov return -EINVAL; 207ed6f6a50SOleg Nesterov } 208ed6f6a50SOleg Nesterov 209ed6f6a50SOleg Nesterov return 1; 210ed6f6a50SOleg Nesterov } 211ed6f6a50SOleg Nesterov 212a5f4374aSIngo Molnar /* 213a5f4374aSIngo Molnar * NOTE: 214a5f4374aSIngo Molnar * Expect the breakpoint instruction to be the smallest size instruction for 215a5f4374aSIngo Molnar * the architecture. If an arch has variable length instruction and the 216a5f4374aSIngo Molnar * breakpoint instruction is not of the smallest length instruction 217cceb55aaSOleg Nesterov * supported by that architecture then we need to modify is_swbp_at_addr and 218a5f4374aSIngo Molnar * write_opcode accordingly. This would never be a problem for archs that 219a5f4374aSIngo Molnar * have fixed length instructions. 220a5f4374aSIngo Molnar */ 221a5f4374aSIngo Molnar 222a5f4374aSIngo Molnar /* 223a5f4374aSIngo Molnar * write_opcode - write the opcode at a given virtual address. 224a5f4374aSIngo Molnar * @mm: the probed process address space. 225a5f4374aSIngo Molnar * @vaddr: the virtual address to store the opcode. 226a5f4374aSIngo Molnar * @opcode: opcode to be written at @vaddr. 227a5f4374aSIngo Molnar * 228a5f4374aSIngo Molnar * Called with mm->mmap_sem held (for read and with a reference to 229a5f4374aSIngo Molnar * mm). 230a5f4374aSIngo Molnar * 231a5f4374aSIngo Molnar * For mm @mm, write the opcode at @vaddr. 232a5f4374aSIngo Molnar * Return 0 (success) or a negative errno. 233a5f4374aSIngo Molnar */ 234cceb55aaSOleg Nesterov static int write_opcode(struct mm_struct *mm, unsigned long vaddr, 235cceb55aaSOleg Nesterov uprobe_opcode_t opcode) 236a5f4374aSIngo Molnar { 237a5f4374aSIngo Molnar struct page *old_page, *new_page; 238a5f4374aSIngo Molnar void *vaddr_old, *vaddr_new; 239a5f4374aSIngo Molnar struct vm_area_struct *vma; 240a5f4374aSIngo Molnar int ret; 241f403072cSOleg Nesterov 2425323ce71SOleg Nesterov retry: 243a5f4374aSIngo Molnar /* Read the page with vaddr into memory */ 24475ed82eaSOleg Nesterov ret = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &old_page, &vma); 245a5f4374aSIngo Molnar if (ret <= 0) 246a5f4374aSIngo Molnar return ret; 247a5f4374aSIngo Molnar 248ed6f6a50SOleg Nesterov ret = verify_opcode(old_page, vaddr, &opcode); 249ed6f6a50SOleg Nesterov if (ret <= 0) 250ed6f6a50SOleg Nesterov goto put_old; 251ed6f6a50SOleg Nesterov 252a5f4374aSIngo Molnar ret = -ENOMEM; 253a5f4374aSIngo Molnar new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); 254a5f4374aSIngo Molnar if (!new_page) 2559f92448cSOleg Nesterov goto put_old; 256a5f4374aSIngo Molnar 257a5f4374aSIngo Molnar __SetPageUptodate(new_page); 258a5f4374aSIngo Molnar 259a5f4374aSIngo Molnar /* copy the page now that we've got it stable */ 260a5f4374aSIngo Molnar vaddr_old = kmap_atomic(old_page); 261a5f4374aSIngo Molnar vaddr_new = kmap_atomic(new_page); 262a5f4374aSIngo Molnar 263a5f4374aSIngo Molnar memcpy(vaddr_new, vaddr_old, PAGE_SIZE); 264d9c4a30eSOleg Nesterov memcpy(vaddr_new + (vaddr & ~PAGE_MASK), &opcode, UPROBE_SWBP_INSN_SIZE); 265a5f4374aSIngo Molnar 266a5f4374aSIngo Molnar kunmap_atomic(vaddr_new); 267a5f4374aSIngo Molnar kunmap_atomic(vaddr_old); 268a5f4374aSIngo Molnar 269a5f4374aSIngo Molnar ret = anon_vma_prepare(vma); 270a5f4374aSIngo Molnar if (ret) 2719f92448cSOleg Nesterov goto put_new; 272a5f4374aSIngo Molnar 273c517ee74SOleg Nesterov ret = __replace_page(vma, vaddr, old_page, new_page); 274a5f4374aSIngo Molnar 2759f92448cSOleg Nesterov put_new: 276a5f4374aSIngo Molnar page_cache_release(new_page); 2779f92448cSOleg Nesterov put_old: 278a5f4374aSIngo Molnar put_page(old_page); 279a5f4374aSIngo Molnar 2805323ce71SOleg Nesterov if (unlikely(ret == -EAGAIN)) 2815323ce71SOleg Nesterov goto retry; 282a5f4374aSIngo Molnar return ret; 283a5f4374aSIngo Molnar } 284a5f4374aSIngo Molnar 285a5f4374aSIngo Molnar /** 2865cb4ac3aSSrikar Dronamraju * set_swbp - store breakpoint at a given address. 287e3343e6aSSrikar Dronamraju * @auprobe: arch specific probepoint information. 288a5f4374aSIngo Molnar * @mm: the probed process address space. 289a5f4374aSIngo Molnar * @vaddr: the virtual address to insert the opcode. 290a5f4374aSIngo Molnar * 291a5f4374aSIngo Molnar * For mm @mm, store the breakpoint instruction at @vaddr. 292a5f4374aSIngo Molnar * Return 0 (success) or a negative errno. 293a5f4374aSIngo Molnar */ 2945cb4ac3aSSrikar Dronamraju int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) 295a5f4374aSIngo Molnar { 296cceb55aaSOleg Nesterov return write_opcode(mm, vaddr, UPROBE_SWBP_INSN); 297a5f4374aSIngo Molnar } 298a5f4374aSIngo Molnar 299a5f4374aSIngo Molnar /** 300a5f4374aSIngo Molnar * set_orig_insn - Restore the original instruction. 301a5f4374aSIngo Molnar * @mm: the probed process address space. 302e3343e6aSSrikar Dronamraju * @auprobe: arch specific probepoint information. 303a5f4374aSIngo Molnar * @vaddr: the virtual address to insert the opcode. 304a5f4374aSIngo Molnar * 305a5f4374aSIngo Molnar * For mm @mm, restore the original opcode (opcode) at @vaddr. 306a5f4374aSIngo Molnar * Return 0 (success) or a negative errno. 307a5f4374aSIngo Molnar */ 308a5f4374aSIngo Molnar int __weak 309ded86e7cSOleg Nesterov set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) 310a5f4374aSIngo Molnar { 311cceb55aaSOleg Nesterov return write_opcode(mm, vaddr, *(uprobe_opcode_t *)auprobe->insn); 312a5f4374aSIngo Molnar } 313a5f4374aSIngo Molnar 314a5f4374aSIngo Molnar static int match_uprobe(struct uprobe *l, struct uprobe *r) 315a5f4374aSIngo Molnar { 316a5f4374aSIngo Molnar if (l->inode < r->inode) 317a5f4374aSIngo Molnar return -1; 318a5f4374aSIngo Molnar 319a5f4374aSIngo Molnar if (l->inode > r->inode) 320a5f4374aSIngo Molnar return 1; 321a5f4374aSIngo Molnar 322a5f4374aSIngo Molnar if (l->offset < r->offset) 323a5f4374aSIngo Molnar return -1; 324a5f4374aSIngo Molnar 325a5f4374aSIngo Molnar if (l->offset > r->offset) 326a5f4374aSIngo Molnar return 1; 327a5f4374aSIngo Molnar 328a5f4374aSIngo Molnar return 0; 329a5f4374aSIngo Molnar } 330a5f4374aSIngo Molnar 331a5f4374aSIngo Molnar static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset) 332a5f4374aSIngo Molnar { 333a5f4374aSIngo Molnar struct uprobe u = { .inode = inode, .offset = offset }; 334a5f4374aSIngo Molnar struct rb_node *n = uprobes_tree.rb_node; 335a5f4374aSIngo Molnar struct uprobe *uprobe; 336a5f4374aSIngo Molnar int match; 337a5f4374aSIngo Molnar 338a5f4374aSIngo Molnar while (n) { 339a5f4374aSIngo Molnar uprobe = rb_entry(n, struct uprobe, rb_node); 340a5f4374aSIngo Molnar match = match_uprobe(&u, uprobe); 341a5f4374aSIngo Molnar if (!match) { 342a5f4374aSIngo Molnar atomic_inc(&uprobe->ref); 343a5f4374aSIngo Molnar return uprobe; 344a5f4374aSIngo Molnar } 345a5f4374aSIngo Molnar 346a5f4374aSIngo Molnar if (match < 0) 347a5f4374aSIngo Molnar n = n->rb_left; 348a5f4374aSIngo Molnar else 349a5f4374aSIngo Molnar n = n->rb_right; 350a5f4374aSIngo Molnar } 351a5f4374aSIngo Molnar return NULL; 352a5f4374aSIngo Molnar } 353a5f4374aSIngo Molnar 354a5f4374aSIngo Molnar /* 355a5f4374aSIngo Molnar * Find a uprobe corresponding to a given inode:offset 356a5f4374aSIngo Molnar * Acquires uprobes_treelock 357a5f4374aSIngo Molnar */ 358a5f4374aSIngo Molnar static struct uprobe *find_uprobe(struct inode *inode, loff_t offset) 359a5f4374aSIngo Molnar { 360a5f4374aSIngo Molnar struct uprobe *uprobe; 361a5f4374aSIngo Molnar 3626f47caa0SOleg Nesterov spin_lock(&uprobes_treelock); 363a5f4374aSIngo Molnar uprobe = __find_uprobe(inode, offset); 3646f47caa0SOleg Nesterov spin_unlock(&uprobes_treelock); 365a5f4374aSIngo Molnar 366a5f4374aSIngo Molnar return uprobe; 367a5f4374aSIngo Molnar } 368a5f4374aSIngo Molnar 369a5f4374aSIngo Molnar static struct uprobe *__insert_uprobe(struct uprobe *uprobe) 370a5f4374aSIngo Molnar { 371a5f4374aSIngo Molnar struct rb_node **p = &uprobes_tree.rb_node; 372a5f4374aSIngo Molnar struct rb_node *parent = NULL; 373a5f4374aSIngo Molnar struct uprobe *u; 374a5f4374aSIngo Molnar int match; 375a5f4374aSIngo Molnar 376a5f4374aSIngo Molnar while (*p) { 377a5f4374aSIngo Molnar parent = *p; 378a5f4374aSIngo Molnar u = rb_entry(parent, struct uprobe, rb_node); 379a5f4374aSIngo Molnar match = match_uprobe(uprobe, u); 380a5f4374aSIngo Molnar if (!match) { 381a5f4374aSIngo Molnar atomic_inc(&u->ref); 382a5f4374aSIngo Molnar return u; 383a5f4374aSIngo Molnar } 384a5f4374aSIngo Molnar 385a5f4374aSIngo Molnar if (match < 0) 386a5f4374aSIngo Molnar p = &parent->rb_left; 387a5f4374aSIngo Molnar else 388a5f4374aSIngo Molnar p = &parent->rb_right; 389a5f4374aSIngo Molnar 390a5f4374aSIngo Molnar } 391a5f4374aSIngo Molnar 392a5f4374aSIngo Molnar u = NULL; 393a5f4374aSIngo Molnar rb_link_node(&uprobe->rb_node, parent, p); 394a5f4374aSIngo Molnar rb_insert_color(&uprobe->rb_node, &uprobes_tree); 395a5f4374aSIngo Molnar /* get access + creation ref */ 396a5f4374aSIngo Molnar atomic_set(&uprobe->ref, 2); 397a5f4374aSIngo Molnar 398a5f4374aSIngo Molnar return u; 399a5f4374aSIngo Molnar } 400a5f4374aSIngo Molnar 401a5f4374aSIngo Molnar /* 402a5f4374aSIngo Molnar * Acquire uprobes_treelock. 403a5f4374aSIngo Molnar * Matching uprobe already exists in rbtree; 404a5f4374aSIngo Molnar * increment (access refcount) and return the matching uprobe. 405a5f4374aSIngo Molnar * 406a5f4374aSIngo Molnar * No matching uprobe; insert the uprobe in rb_tree; 407a5f4374aSIngo Molnar * get a double refcount (access + creation) and return NULL. 408a5f4374aSIngo Molnar */ 409a5f4374aSIngo Molnar static struct uprobe *insert_uprobe(struct uprobe *uprobe) 410a5f4374aSIngo Molnar { 411a5f4374aSIngo Molnar struct uprobe *u; 412a5f4374aSIngo Molnar 4136f47caa0SOleg Nesterov spin_lock(&uprobes_treelock); 414a5f4374aSIngo Molnar u = __insert_uprobe(uprobe); 4156f47caa0SOleg Nesterov spin_unlock(&uprobes_treelock); 416a5f4374aSIngo Molnar 4170326f5a9SSrikar Dronamraju /* For now assume that the instruction need not be single-stepped */ 4180326f5a9SSrikar Dronamraju uprobe->flags |= UPROBE_SKIP_SSTEP; 4190326f5a9SSrikar Dronamraju 420a5f4374aSIngo Molnar return u; 421a5f4374aSIngo Molnar } 422a5f4374aSIngo Molnar 423a5f4374aSIngo Molnar static void put_uprobe(struct uprobe *uprobe) 424a5f4374aSIngo Molnar { 425a5f4374aSIngo Molnar if (atomic_dec_and_test(&uprobe->ref)) 426a5f4374aSIngo Molnar kfree(uprobe); 427a5f4374aSIngo Molnar } 428a5f4374aSIngo Molnar 429a5f4374aSIngo Molnar static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset) 430a5f4374aSIngo Molnar { 431a5f4374aSIngo Molnar struct uprobe *uprobe, *cur_uprobe; 432a5f4374aSIngo Molnar 433a5f4374aSIngo Molnar uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL); 434a5f4374aSIngo Molnar if (!uprobe) 435a5f4374aSIngo Molnar return NULL; 436a5f4374aSIngo Molnar 437a5f4374aSIngo Molnar uprobe->inode = igrab(inode); 438a5f4374aSIngo Molnar uprobe->offset = offset; 439a5f4374aSIngo Molnar init_rwsem(&uprobe->consumer_rwsem); 440a5f4374aSIngo Molnar 441a5f4374aSIngo Molnar /* add to uprobes_tree, sorted on inode:offset */ 442a5f4374aSIngo Molnar cur_uprobe = insert_uprobe(uprobe); 443a5f4374aSIngo Molnar 444a5f4374aSIngo Molnar /* a uprobe exists for this inode:offset combination */ 445a5f4374aSIngo Molnar if (cur_uprobe) { 446a5f4374aSIngo Molnar kfree(uprobe); 447a5f4374aSIngo Molnar uprobe = cur_uprobe; 448a5f4374aSIngo Molnar iput(inode); 449a5f4374aSIngo Molnar } else { 450a5f4374aSIngo Molnar atomic_inc(&uprobe_events); 451a5f4374aSIngo Molnar } 452a5f4374aSIngo Molnar 453a5f4374aSIngo Molnar return uprobe; 454a5f4374aSIngo Molnar } 455a5f4374aSIngo Molnar 4560326f5a9SSrikar Dronamraju static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs) 4570326f5a9SSrikar Dronamraju { 4580326f5a9SSrikar Dronamraju struct uprobe_consumer *uc; 4590326f5a9SSrikar Dronamraju 4600326f5a9SSrikar Dronamraju if (!(uprobe->flags & UPROBE_RUN_HANDLER)) 4610326f5a9SSrikar Dronamraju return; 4620326f5a9SSrikar Dronamraju 4630326f5a9SSrikar Dronamraju down_read(&uprobe->consumer_rwsem); 4640326f5a9SSrikar Dronamraju for (uc = uprobe->consumers; uc; uc = uc->next) { 4650326f5a9SSrikar Dronamraju if (!uc->filter || uc->filter(uc, current)) 4660326f5a9SSrikar Dronamraju uc->handler(uc, regs); 4670326f5a9SSrikar Dronamraju } 4680326f5a9SSrikar Dronamraju up_read(&uprobe->consumer_rwsem); 4690326f5a9SSrikar Dronamraju } 4700326f5a9SSrikar Dronamraju 471a5f4374aSIngo Molnar /* Returns the previous consumer */ 472a5f4374aSIngo Molnar static struct uprobe_consumer * 473e3343e6aSSrikar Dronamraju consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc) 474a5f4374aSIngo Molnar { 475a5f4374aSIngo Molnar down_write(&uprobe->consumer_rwsem); 476e3343e6aSSrikar Dronamraju uc->next = uprobe->consumers; 477e3343e6aSSrikar Dronamraju uprobe->consumers = uc; 478a5f4374aSIngo Molnar up_write(&uprobe->consumer_rwsem); 479a5f4374aSIngo Molnar 480e3343e6aSSrikar Dronamraju return uc->next; 481a5f4374aSIngo Molnar } 482a5f4374aSIngo Molnar 483a5f4374aSIngo Molnar /* 484e3343e6aSSrikar Dronamraju * For uprobe @uprobe, delete the consumer @uc. 485e3343e6aSSrikar Dronamraju * Return true if the @uc is deleted successfully 486a5f4374aSIngo Molnar * or return false. 487a5f4374aSIngo Molnar */ 488e3343e6aSSrikar Dronamraju static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc) 489a5f4374aSIngo Molnar { 490a5f4374aSIngo Molnar struct uprobe_consumer **con; 491a5f4374aSIngo Molnar bool ret = false; 492a5f4374aSIngo Molnar 493a5f4374aSIngo Molnar down_write(&uprobe->consumer_rwsem); 494a5f4374aSIngo Molnar for (con = &uprobe->consumers; *con; con = &(*con)->next) { 495e3343e6aSSrikar Dronamraju if (*con == uc) { 496e3343e6aSSrikar Dronamraju *con = uc->next; 497a5f4374aSIngo Molnar ret = true; 498a5f4374aSIngo Molnar break; 499a5f4374aSIngo Molnar } 500a5f4374aSIngo Molnar } 501a5f4374aSIngo Molnar up_write(&uprobe->consumer_rwsem); 502a5f4374aSIngo Molnar 503a5f4374aSIngo Molnar return ret; 504a5f4374aSIngo Molnar } 505a5f4374aSIngo Molnar 506e3343e6aSSrikar Dronamraju static int 507d436615eSOleg Nesterov __copy_insn(struct address_space *mapping, struct file *filp, char *insn, 508593609a5SOleg Nesterov unsigned long nbytes, loff_t offset) 509a5f4374aSIngo Molnar { 510a5f4374aSIngo Molnar struct page *page; 511a5f4374aSIngo Molnar void *vaddr; 512593609a5SOleg Nesterov unsigned long off; 513593609a5SOleg Nesterov pgoff_t idx; 514a5f4374aSIngo Molnar 515a5f4374aSIngo Molnar if (!filp) 516a5f4374aSIngo Molnar return -EINVAL; 517a5f4374aSIngo Molnar 518cc359d18SOleg Nesterov if (!mapping->a_ops->readpage) 519cc359d18SOleg Nesterov return -EIO; 520cc359d18SOleg Nesterov 521593609a5SOleg Nesterov idx = offset >> PAGE_CACHE_SHIFT; 522593609a5SOleg Nesterov off = offset & ~PAGE_MASK; 523a5f4374aSIngo Molnar 524a5f4374aSIngo Molnar /* 525a5f4374aSIngo Molnar * Ensure that the page that has the original instruction is 526a5f4374aSIngo Molnar * populated and in page-cache. 527a5f4374aSIngo Molnar */ 528a5f4374aSIngo Molnar page = read_mapping_page(mapping, idx, filp); 529a5f4374aSIngo Molnar if (IS_ERR(page)) 530a5f4374aSIngo Molnar return PTR_ERR(page); 531a5f4374aSIngo Molnar 532a5f4374aSIngo Molnar vaddr = kmap_atomic(page); 533593609a5SOleg Nesterov memcpy(insn, vaddr + off, nbytes); 534a5f4374aSIngo Molnar kunmap_atomic(vaddr); 535a5f4374aSIngo Molnar page_cache_release(page); 536a5f4374aSIngo Molnar 537a5f4374aSIngo Molnar return 0; 538a5f4374aSIngo Molnar } 539a5f4374aSIngo Molnar 540d436615eSOleg Nesterov static int copy_insn(struct uprobe *uprobe, struct file *filp) 541a5f4374aSIngo Molnar { 542a5f4374aSIngo Molnar struct address_space *mapping; 543a5f4374aSIngo Molnar unsigned long nbytes; 544a5f4374aSIngo Molnar int bytes; 545a5f4374aSIngo Molnar 546d436615eSOleg Nesterov nbytes = PAGE_SIZE - (uprobe->offset & ~PAGE_MASK); 547a5f4374aSIngo Molnar mapping = uprobe->inode->i_mapping; 548a5f4374aSIngo Molnar 549a5f4374aSIngo Molnar /* Instruction at end of binary; copy only available bytes */ 550a5f4374aSIngo Molnar if (uprobe->offset + MAX_UINSN_BYTES > uprobe->inode->i_size) 551a5f4374aSIngo Molnar bytes = uprobe->inode->i_size - uprobe->offset; 552a5f4374aSIngo Molnar else 553a5f4374aSIngo Molnar bytes = MAX_UINSN_BYTES; 554a5f4374aSIngo Molnar 555a5f4374aSIngo Molnar /* Instruction at the page-boundary; copy bytes in second page */ 556a5f4374aSIngo Molnar if (nbytes < bytes) { 557fc36f595SOleg Nesterov int err = __copy_insn(mapping, filp, uprobe->arch.insn + nbytes, 558fc36f595SOleg Nesterov bytes - nbytes, uprobe->offset + nbytes); 559fc36f595SOleg Nesterov if (err) 560fc36f595SOleg Nesterov return err; 561a5f4374aSIngo Molnar bytes = nbytes; 562a5f4374aSIngo Molnar } 563d436615eSOleg Nesterov return __copy_insn(mapping, filp, uprobe->arch.insn, bytes, uprobe->offset); 564a5f4374aSIngo Molnar } 565a5f4374aSIngo Molnar 566e3343e6aSSrikar Dronamraju static int 567e3343e6aSSrikar Dronamraju install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, 568816c03fbSOleg Nesterov struct vm_area_struct *vma, unsigned long vaddr) 569a5f4374aSIngo Molnar { 570f8ac4ec9SOleg Nesterov bool first_uprobe; 571a5f4374aSIngo Molnar int ret; 572a5f4374aSIngo Molnar 573a5f4374aSIngo Molnar /* 574a5f4374aSIngo Molnar * If probe is being deleted, unregister thread could be done with 575a5f4374aSIngo Molnar * the vma-rmap-walk through. Adding a probe now can be fatal since 576a5f4374aSIngo Molnar * nobody will be able to cleanup. Also we could be from fork or 577a5f4374aSIngo Molnar * mremap path, where the probe might have already been inserted. 578a5f4374aSIngo Molnar * Hence behave as if probe already existed. 579a5f4374aSIngo Molnar */ 580a5f4374aSIngo Molnar if (!uprobe->consumers) 58178f74116SOleg Nesterov return 0; 582a5f4374aSIngo Molnar 583900771a4SSrikar Dronamraju if (!(uprobe->flags & UPROBE_COPY_INSN)) { 584d436615eSOleg Nesterov ret = copy_insn(uprobe, vma->vm_file); 585a5f4374aSIngo Molnar if (ret) 586a5f4374aSIngo Molnar return ret; 587a5f4374aSIngo Molnar 5885cb4ac3aSSrikar Dronamraju if (is_swbp_insn((uprobe_opcode_t *)uprobe->arch.insn)) 589c1914a09SOleg Nesterov return -ENOTSUPP; 590a5f4374aSIngo Molnar 591816c03fbSOleg Nesterov ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr); 592a5f4374aSIngo Molnar if (ret) 593a5f4374aSIngo Molnar return ret; 594a5f4374aSIngo Molnar 595d9c4a30eSOleg Nesterov /* write_opcode() assumes we don't cross page boundary */ 596d9c4a30eSOleg Nesterov BUG_ON((uprobe->offset & ~PAGE_MASK) + 597d9c4a30eSOleg Nesterov UPROBE_SWBP_INSN_SIZE > PAGE_SIZE); 598d9c4a30eSOleg Nesterov 599900771a4SSrikar Dronamraju uprobe->flags |= UPROBE_COPY_INSN; 600a5f4374aSIngo Molnar } 601682968e0SSrikar Dronamraju 602f8ac4ec9SOleg Nesterov /* 603f8ac4ec9SOleg Nesterov * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(), 604f8ac4ec9SOleg Nesterov * the task can hit this breakpoint right after __replace_page(). 605f8ac4ec9SOleg Nesterov */ 606f8ac4ec9SOleg Nesterov first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags); 607f8ac4ec9SOleg Nesterov if (first_uprobe) 608f8ac4ec9SOleg Nesterov set_bit(MMF_HAS_UPROBES, &mm->flags); 609f8ac4ec9SOleg Nesterov 610816c03fbSOleg Nesterov ret = set_swbp(&uprobe->arch, mm, vaddr); 6119f68f672SOleg Nesterov if (!ret) 6129f68f672SOleg Nesterov clear_bit(MMF_RECALC_UPROBES, &mm->flags); 6139f68f672SOleg Nesterov else if (first_uprobe) 614f8ac4ec9SOleg Nesterov clear_bit(MMF_HAS_UPROBES, &mm->flags); 615a5f4374aSIngo Molnar 616a5f4374aSIngo Molnar return ret; 617a5f4374aSIngo Molnar } 618a5f4374aSIngo Molnar 619e3343e6aSSrikar Dronamraju static void 620816c03fbSOleg Nesterov remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr) 621a5f4374aSIngo Molnar { 6229f68f672SOleg Nesterov /* can happen if uprobe_register() fails */ 6239f68f672SOleg Nesterov if (!test_bit(MMF_HAS_UPROBES, &mm->flags)) 6249f68f672SOleg Nesterov return; 6259f68f672SOleg Nesterov 6269f68f672SOleg Nesterov set_bit(MMF_RECALC_UPROBES, &mm->flags); 627ded86e7cSOleg Nesterov set_orig_insn(&uprobe->arch, mm, vaddr); 628a5f4374aSIngo Molnar } 629a5f4374aSIngo Molnar 6300326f5a9SSrikar Dronamraju /* 631778b032dSOleg Nesterov * There could be threads that have already hit the breakpoint. They 632778b032dSOleg Nesterov * will recheck the current insn and restart if find_uprobe() fails. 633778b032dSOleg Nesterov * See find_active_uprobe(). 6340326f5a9SSrikar Dronamraju */ 635a5f4374aSIngo Molnar static void delete_uprobe(struct uprobe *uprobe) 636a5f4374aSIngo Molnar { 6376f47caa0SOleg Nesterov spin_lock(&uprobes_treelock); 638a5f4374aSIngo Molnar rb_erase(&uprobe->rb_node, &uprobes_tree); 6396f47caa0SOleg Nesterov spin_unlock(&uprobes_treelock); 640a5f4374aSIngo Molnar iput(uprobe->inode); 641a5f4374aSIngo Molnar put_uprobe(uprobe); 642a5f4374aSIngo Molnar atomic_dec(&uprobe_events); 643a5f4374aSIngo Molnar } 644a5f4374aSIngo Molnar 64526872090SOleg Nesterov struct map_info { 64626872090SOleg Nesterov struct map_info *next; 64726872090SOleg Nesterov struct mm_struct *mm; 648816c03fbSOleg Nesterov unsigned long vaddr; 64926872090SOleg Nesterov }; 65026872090SOleg Nesterov 65126872090SOleg Nesterov static inline struct map_info *free_map_info(struct map_info *info) 652a5f4374aSIngo Molnar { 65326872090SOleg Nesterov struct map_info *next = info->next; 65426872090SOleg Nesterov kfree(info); 65526872090SOleg Nesterov return next; 65626872090SOleg Nesterov } 65726872090SOleg Nesterov 65826872090SOleg Nesterov static struct map_info * 65926872090SOleg Nesterov build_map_info(struct address_space *mapping, loff_t offset, bool is_register) 66026872090SOleg Nesterov { 66126872090SOleg Nesterov unsigned long pgoff = offset >> PAGE_SHIFT; 662a5f4374aSIngo Molnar struct prio_tree_iter iter; 663a5f4374aSIngo Molnar struct vm_area_struct *vma; 66426872090SOleg Nesterov struct map_info *curr = NULL; 66526872090SOleg Nesterov struct map_info *prev = NULL; 66626872090SOleg Nesterov struct map_info *info; 66726872090SOleg Nesterov int more = 0; 668a5f4374aSIngo Molnar 66926872090SOleg Nesterov again: 67026872090SOleg Nesterov mutex_lock(&mapping->i_mmap_mutex); 671a5f4374aSIngo Molnar vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 672a5f4374aSIngo Molnar if (!valid_vma(vma, is_register)) 673a5f4374aSIngo Molnar continue; 674a5f4374aSIngo Molnar 6757a5bfb66SOleg Nesterov if (!prev && !more) { 6767a5bfb66SOleg Nesterov /* 6777a5bfb66SOleg Nesterov * Needs GFP_NOWAIT to avoid i_mmap_mutex recursion through 6787a5bfb66SOleg Nesterov * reclaim. This is optimistic, no harm done if it fails. 6797a5bfb66SOleg Nesterov */ 6807a5bfb66SOleg Nesterov prev = kmalloc(sizeof(struct map_info), 6817a5bfb66SOleg Nesterov GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN); 6827a5bfb66SOleg Nesterov if (prev) 6837a5bfb66SOleg Nesterov prev->next = NULL; 6847a5bfb66SOleg Nesterov } 68526872090SOleg Nesterov if (!prev) { 68626872090SOleg Nesterov more++; 68726872090SOleg Nesterov continue; 688a5f4374aSIngo Molnar } 689a5f4374aSIngo Molnar 69026872090SOleg Nesterov if (!atomic_inc_not_zero(&vma->vm_mm->mm_users)) 69126872090SOleg Nesterov continue; 692a5f4374aSIngo Molnar 69326872090SOleg Nesterov info = prev; 69426872090SOleg Nesterov prev = prev->next; 69526872090SOleg Nesterov info->next = curr; 69626872090SOleg Nesterov curr = info; 69726872090SOleg Nesterov 69826872090SOleg Nesterov info->mm = vma->vm_mm; 69957683f72SOleg Nesterov info->vaddr = offset_to_vaddr(vma, offset); 700a5f4374aSIngo Molnar } 701a5f4374aSIngo Molnar mutex_unlock(&mapping->i_mmap_mutex); 702a5f4374aSIngo Molnar 70326872090SOleg Nesterov if (!more) 70426872090SOleg Nesterov goto out; 705a5f4374aSIngo Molnar 70626872090SOleg Nesterov prev = curr; 70726872090SOleg Nesterov while (curr) { 70826872090SOleg Nesterov mmput(curr->mm); 70926872090SOleg Nesterov curr = curr->next; 71026872090SOleg Nesterov } 71126872090SOleg Nesterov 71226872090SOleg Nesterov do { 71326872090SOleg Nesterov info = kmalloc(sizeof(struct map_info), GFP_KERNEL); 71426872090SOleg Nesterov if (!info) { 71526872090SOleg Nesterov curr = ERR_PTR(-ENOMEM); 71626872090SOleg Nesterov goto out; 71726872090SOleg Nesterov } 71826872090SOleg Nesterov info->next = prev; 71926872090SOleg Nesterov prev = info; 72026872090SOleg Nesterov } while (--more); 72126872090SOleg Nesterov 72226872090SOleg Nesterov goto again; 72326872090SOleg Nesterov out: 72426872090SOleg Nesterov while (prev) 72526872090SOleg Nesterov prev = free_map_info(prev); 72626872090SOleg Nesterov return curr; 727a5f4374aSIngo Molnar } 728a5f4374aSIngo Molnar 729a5f4374aSIngo Molnar static int register_for_each_vma(struct uprobe *uprobe, bool is_register) 730a5f4374aSIngo Molnar { 73126872090SOleg Nesterov struct map_info *info; 73226872090SOleg Nesterov int err = 0; 73326872090SOleg Nesterov 73426872090SOleg Nesterov info = build_map_info(uprobe->inode->i_mapping, 73526872090SOleg Nesterov uprobe->offset, is_register); 73626872090SOleg Nesterov if (IS_ERR(info)) 73726872090SOleg Nesterov return PTR_ERR(info); 73826872090SOleg Nesterov 73926872090SOleg Nesterov while (info) { 74026872090SOleg Nesterov struct mm_struct *mm = info->mm; 741a5f4374aSIngo Molnar struct vm_area_struct *vma; 742a5f4374aSIngo Molnar 74326872090SOleg Nesterov if (err) 74426872090SOleg Nesterov goto free; 745a5f4374aSIngo Molnar 74677fc4af1SOleg Nesterov down_write(&mm->mmap_sem); 747f4d6dfe5SOleg Nesterov vma = find_vma(mm, info->vaddr); 748f4d6dfe5SOleg Nesterov if (!vma || !valid_vma(vma, is_register) || 749f4d6dfe5SOleg Nesterov vma->vm_file->f_mapping->host != uprobe->inode) 75026872090SOleg Nesterov goto unlock; 75126872090SOleg Nesterov 752f4d6dfe5SOleg Nesterov if (vma->vm_start > info->vaddr || 753f4d6dfe5SOleg Nesterov vaddr_to_offset(vma, info->vaddr) != uprobe->offset) 75426872090SOleg Nesterov goto unlock; 755a5f4374aSIngo Molnar 75678f74116SOleg Nesterov if (is_register) 75726872090SOleg Nesterov err = install_breakpoint(uprobe, mm, vma, info->vaddr); 75878f74116SOleg Nesterov else 75926872090SOleg Nesterov remove_breakpoint(uprobe, mm, info->vaddr); 76078f74116SOleg Nesterov 76126872090SOleg Nesterov unlock: 76226872090SOleg Nesterov up_write(&mm->mmap_sem); 76326872090SOleg Nesterov free: 76426872090SOleg Nesterov mmput(mm); 76526872090SOleg Nesterov info = free_map_info(info); 766a5f4374aSIngo Molnar } 767a5f4374aSIngo Molnar 76826872090SOleg Nesterov return err; 769a5f4374aSIngo Molnar } 770a5f4374aSIngo Molnar 771a5f4374aSIngo Molnar static int __uprobe_register(struct uprobe *uprobe) 772a5f4374aSIngo Molnar { 773a5f4374aSIngo Molnar return register_for_each_vma(uprobe, true); 774a5f4374aSIngo Molnar } 775a5f4374aSIngo Molnar 776a5f4374aSIngo Molnar static void __uprobe_unregister(struct uprobe *uprobe) 777a5f4374aSIngo Molnar { 778a5f4374aSIngo Molnar if (!register_for_each_vma(uprobe, false)) 779a5f4374aSIngo Molnar delete_uprobe(uprobe); 780a5f4374aSIngo Molnar 781a5f4374aSIngo Molnar /* TODO : cant unregister? schedule a worker thread */ 782a5f4374aSIngo Molnar } 783a5f4374aSIngo Molnar 784a5f4374aSIngo Molnar /* 785a5f4374aSIngo Molnar * uprobe_register - register a probe 786a5f4374aSIngo Molnar * @inode: the file in which the probe has to be placed. 787a5f4374aSIngo Molnar * @offset: offset from the start of the file. 788e3343e6aSSrikar Dronamraju * @uc: information on howto handle the probe.. 789a5f4374aSIngo Molnar * 790a5f4374aSIngo Molnar * Apart from the access refcount, uprobe_register() takes a creation 791a5f4374aSIngo Molnar * refcount (thro alloc_uprobe) if and only if this @uprobe is getting 792a5f4374aSIngo Molnar * inserted into the rbtree (i.e first consumer for a @inode:@offset 793a5f4374aSIngo Molnar * tuple). Creation refcount stops uprobe_unregister from freeing the 794a5f4374aSIngo Molnar * @uprobe even before the register operation is complete. Creation 795e3343e6aSSrikar Dronamraju * refcount is released when the last @uc for the @uprobe 796a5f4374aSIngo Molnar * unregisters. 797a5f4374aSIngo Molnar * 798a5f4374aSIngo Molnar * Return errno if it cannot successully install probes 799a5f4374aSIngo Molnar * else return 0 (success) 800a5f4374aSIngo Molnar */ 801e3343e6aSSrikar Dronamraju int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc) 802a5f4374aSIngo Molnar { 803a5f4374aSIngo Molnar struct uprobe *uprobe; 804a5f4374aSIngo Molnar int ret; 805a5f4374aSIngo Molnar 806e3343e6aSSrikar Dronamraju if (!inode || !uc || uc->next) 807a5f4374aSIngo Molnar return -EINVAL; 808a5f4374aSIngo Molnar 809a5f4374aSIngo Molnar if (offset > i_size_read(inode)) 810a5f4374aSIngo Molnar return -EINVAL; 811a5f4374aSIngo Molnar 812a5f4374aSIngo Molnar ret = 0; 813a5f4374aSIngo Molnar mutex_lock(uprobes_hash(inode)); 814a5f4374aSIngo Molnar uprobe = alloc_uprobe(inode, offset); 815a5f4374aSIngo Molnar 816a5f658b7SOleg Nesterov if (!uprobe) { 817a5f658b7SOleg Nesterov ret = -ENOMEM; 818a5f658b7SOleg Nesterov } else if (!consumer_add(uprobe, uc)) { 819a5f4374aSIngo Molnar ret = __uprobe_register(uprobe); 820a5f4374aSIngo Molnar if (ret) { 821a5f4374aSIngo Molnar uprobe->consumers = NULL; 822a5f4374aSIngo Molnar __uprobe_unregister(uprobe); 823a5f4374aSIngo Molnar } else { 824900771a4SSrikar Dronamraju uprobe->flags |= UPROBE_RUN_HANDLER; 825a5f4374aSIngo Molnar } 826a5f4374aSIngo Molnar } 827a5f4374aSIngo Molnar 828a5f4374aSIngo Molnar mutex_unlock(uprobes_hash(inode)); 8296d1d8dfaSSebastian Andrzej Siewior if (uprobe) 830a5f4374aSIngo Molnar put_uprobe(uprobe); 831a5f4374aSIngo Molnar 832a5f4374aSIngo Molnar return ret; 833a5f4374aSIngo Molnar } 834a5f4374aSIngo Molnar 835a5f4374aSIngo Molnar /* 836a5f4374aSIngo Molnar * uprobe_unregister - unregister a already registered probe. 837a5f4374aSIngo Molnar * @inode: the file in which the probe has to be removed. 838a5f4374aSIngo Molnar * @offset: offset from the start of the file. 839e3343e6aSSrikar Dronamraju * @uc: identify which probe if multiple probes are colocated. 840a5f4374aSIngo Molnar */ 841e3343e6aSSrikar Dronamraju void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc) 842a5f4374aSIngo Molnar { 843a5f4374aSIngo Molnar struct uprobe *uprobe; 844a5f4374aSIngo Molnar 845e3343e6aSSrikar Dronamraju if (!inode || !uc) 846a5f4374aSIngo Molnar return; 847a5f4374aSIngo Molnar 848a5f4374aSIngo Molnar uprobe = find_uprobe(inode, offset); 849a5f4374aSIngo Molnar if (!uprobe) 850a5f4374aSIngo Molnar return; 851a5f4374aSIngo Molnar 852a5f4374aSIngo Molnar mutex_lock(uprobes_hash(inode)); 853a5f4374aSIngo Molnar 854e3343e6aSSrikar Dronamraju if (consumer_del(uprobe, uc)) { 855a5f4374aSIngo Molnar if (!uprobe->consumers) { 856a5f4374aSIngo Molnar __uprobe_unregister(uprobe); 857900771a4SSrikar Dronamraju uprobe->flags &= ~UPROBE_RUN_HANDLER; 858a5f4374aSIngo Molnar } 859a5f4374aSIngo Molnar } 860a5f4374aSIngo Molnar 861a5f4374aSIngo Molnar mutex_unlock(uprobes_hash(inode)); 862a5f4374aSIngo Molnar if (uprobe) 863a5f4374aSIngo Molnar put_uprobe(uprobe); 864a5f4374aSIngo Molnar } 865a5f4374aSIngo Molnar 866891c3970SOleg Nesterov static struct rb_node * 867891c3970SOleg Nesterov find_node_in_range(struct inode *inode, loff_t min, loff_t max) 868a5f4374aSIngo Molnar { 869a5f4374aSIngo Molnar struct rb_node *n = uprobes_tree.rb_node; 870a5f4374aSIngo Molnar 871a5f4374aSIngo Molnar while (n) { 872891c3970SOleg Nesterov struct uprobe *u = rb_entry(n, struct uprobe, rb_node); 873a5f4374aSIngo Molnar 874891c3970SOleg Nesterov if (inode < u->inode) { 875a5f4374aSIngo Molnar n = n->rb_left; 876891c3970SOleg Nesterov } else if (inode > u->inode) { 877a5f4374aSIngo Molnar n = n->rb_right; 878891c3970SOleg Nesterov } else { 879891c3970SOleg Nesterov if (max < u->offset) 880891c3970SOleg Nesterov n = n->rb_left; 881891c3970SOleg Nesterov else if (min > u->offset) 882891c3970SOleg Nesterov n = n->rb_right; 883891c3970SOleg Nesterov else 884891c3970SOleg Nesterov break; 885891c3970SOleg Nesterov } 886a5f4374aSIngo Molnar } 887a5f4374aSIngo Molnar 888891c3970SOleg Nesterov return n; 889a5f4374aSIngo Molnar } 890a5f4374aSIngo Molnar 891a5f4374aSIngo Molnar /* 892891c3970SOleg Nesterov * For a given range in vma, build a list of probes that need to be inserted. 893a5f4374aSIngo Molnar */ 894891c3970SOleg Nesterov static void build_probe_list(struct inode *inode, 895891c3970SOleg Nesterov struct vm_area_struct *vma, 896891c3970SOleg Nesterov unsigned long start, unsigned long end, 897891c3970SOleg Nesterov struct list_head *head) 898a5f4374aSIngo Molnar { 899891c3970SOleg Nesterov loff_t min, max; 900891c3970SOleg Nesterov struct rb_node *n, *t; 901891c3970SOleg Nesterov struct uprobe *u; 902891c3970SOleg Nesterov 903891c3970SOleg Nesterov INIT_LIST_HEAD(head); 904cb113b47SOleg Nesterov min = vaddr_to_offset(vma, start); 905891c3970SOleg Nesterov max = min + (end - start) - 1; 906a5f4374aSIngo Molnar 9076f47caa0SOleg Nesterov spin_lock(&uprobes_treelock); 908891c3970SOleg Nesterov n = find_node_in_range(inode, min, max); 909891c3970SOleg Nesterov if (n) { 910891c3970SOleg Nesterov for (t = n; t; t = rb_prev(t)) { 911891c3970SOleg Nesterov u = rb_entry(t, struct uprobe, rb_node); 912891c3970SOleg Nesterov if (u->inode != inode || u->offset < min) 913a5f4374aSIngo Molnar break; 914891c3970SOleg Nesterov list_add(&u->pending_list, head); 915891c3970SOleg Nesterov atomic_inc(&u->ref); 916a5f4374aSIngo Molnar } 917891c3970SOleg Nesterov for (t = n; (t = rb_next(t)); ) { 918891c3970SOleg Nesterov u = rb_entry(t, struct uprobe, rb_node); 919891c3970SOleg Nesterov if (u->inode != inode || u->offset > max) 920891c3970SOleg Nesterov break; 921891c3970SOleg Nesterov list_add(&u->pending_list, head); 922891c3970SOleg Nesterov atomic_inc(&u->ref); 923891c3970SOleg Nesterov } 924891c3970SOleg Nesterov } 9256f47caa0SOleg Nesterov spin_unlock(&uprobes_treelock); 926a5f4374aSIngo Molnar } 927a5f4374aSIngo Molnar 928a5f4374aSIngo Molnar /* 9295e5be71aSOleg Nesterov * Called from mmap_region/vma_adjust with mm->mmap_sem acquired. 930a5f4374aSIngo Molnar * 9315e5be71aSOleg Nesterov * Currently we ignore all errors and always return 0, the callers 9325e5be71aSOleg Nesterov * can't handle the failure anyway. 933a5f4374aSIngo Molnar */ 934a5f4374aSIngo Molnar int uprobe_mmap(struct vm_area_struct *vma) 935a5f4374aSIngo Molnar { 936a5f4374aSIngo Molnar struct list_head tmp_list; 937665605a2SOleg Nesterov struct uprobe *uprobe, *u; 938a5f4374aSIngo Molnar struct inode *inode; 939a5f4374aSIngo Molnar 940a5f4374aSIngo Molnar if (!atomic_read(&uprobe_events) || !valid_vma(vma, true)) 941a5f4374aSIngo Molnar return 0; 942a5f4374aSIngo Molnar 943a5f4374aSIngo Molnar inode = vma->vm_file->f_mapping->host; 944a5f4374aSIngo Molnar if (!inode) 945a5f4374aSIngo Molnar return 0; 946a5f4374aSIngo Molnar 947a5f4374aSIngo Molnar mutex_lock(uprobes_mmap_hash(inode)); 948891c3970SOleg Nesterov build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list); 949a5f4374aSIngo Molnar 950665605a2SOleg Nesterov list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { 9515e5be71aSOleg Nesterov if (!fatal_signal_pending(current)) { 95257683f72SOleg Nesterov unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset); 9535e5be71aSOleg Nesterov install_breakpoint(uprobe, vma->vm_mm, vma, vaddr); 954a5f4374aSIngo Molnar } 955a5f4374aSIngo Molnar put_uprobe(uprobe); 956a5f4374aSIngo Molnar } 957a5f4374aSIngo Molnar mutex_unlock(uprobes_mmap_hash(inode)); 958a5f4374aSIngo Molnar 9595e5be71aSOleg Nesterov return 0; 960a5f4374aSIngo Molnar } 961a5f4374aSIngo Molnar 9629f68f672SOleg Nesterov static bool 9639f68f672SOleg Nesterov vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end) 9649f68f672SOleg Nesterov { 9659f68f672SOleg Nesterov loff_t min, max; 9669f68f672SOleg Nesterov struct inode *inode; 9679f68f672SOleg Nesterov struct rb_node *n; 9689f68f672SOleg Nesterov 9699f68f672SOleg Nesterov inode = vma->vm_file->f_mapping->host; 9709f68f672SOleg Nesterov 9719f68f672SOleg Nesterov min = vaddr_to_offset(vma, start); 9729f68f672SOleg Nesterov max = min + (end - start) - 1; 9739f68f672SOleg Nesterov 9749f68f672SOleg Nesterov spin_lock(&uprobes_treelock); 9759f68f672SOleg Nesterov n = find_node_in_range(inode, min, max); 9769f68f672SOleg Nesterov spin_unlock(&uprobes_treelock); 9779f68f672SOleg Nesterov 9789f68f672SOleg Nesterov return !!n; 9799f68f672SOleg Nesterov } 9809f68f672SOleg Nesterov 981682968e0SSrikar Dronamraju /* 982682968e0SSrikar Dronamraju * Called in context of a munmap of a vma. 983682968e0SSrikar Dronamraju */ 984cbc91f71SSrikar Dronamraju void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) 985682968e0SSrikar Dronamraju { 986682968e0SSrikar Dronamraju if (!atomic_read(&uprobe_events) || !valid_vma(vma, false)) 987682968e0SSrikar Dronamraju return; 988682968e0SSrikar Dronamraju 9892fd611a9SOleg Nesterov if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */ 9902fd611a9SOleg Nesterov return; 9912fd611a9SOleg Nesterov 9929f68f672SOleg Nesterov if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) || 9939f68f672SOleg Nesterov test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags)) 994f8ac4ec9SOleg Nesterov return; 995f8ac4ec9SOleg Nesterov 9969f68f672SOleg Nesterov if (vma_has_uprobes(vma, start, end)) 9979f68f672SOleg Nesterov set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags); 998682968e0SSrikar Dronamraju } 999682968e0SSrikar Dronamraju 1000d4b3b638SSrikar Dronamraju /* Slot allocation for XOL */ 1001d4b3b638SSrikar Dronamraju static int xol_add_vma(struct xol_area *area) 1002d4b3b638SSrikar Dronamraju { 1003d4b3b638SSrikar Dronamraju struct mm_struct *mm; 1004d4b3b638SSrikar Dronamraju int ret; 1005d4b3b638SSrikar Dronamraju 1006d4b3b638SSrikar Dronamraju area->page = alloc_page(GFP_HIGHUSER); 1007d4b3b638SSrikar Dronamraju if (!area->page) 1008d4b3b638SSrikar Dronamraju return -ENOMEM; 1009d4b3b638SSrikar Dronamraju 1010d4b3b638SSrikar Dronamraju ret = -EALREADY; 1011d4b3b638SSrikar Dronamraju mm = current->mm; 1012d4b3b638SSrikar Dronamraju 1013d4b3b638SSrikar Dronamraju down_write(&mm->mmap_sem); 1014d4b3b638SSrikar Dronamraju if (mm->uprobes_state.xol_area) 1015d4b3b638SSrikar Dronamraju goto fail; 1016d4b3b638SSrikar Dronamraju 1017d4b3b638SSrikar Dronamraju ret = -ENOMEM; 1018d4b3b638SSrikar Dronamraju 1019d4b3b638SSrikar Dronamraju /* Try to map as high as possible, this is only a hint. */ 1020d4b3b638SSrikar Dronamraju area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, PAGE_SIZE, 0, 0); 1021d4b3b638SSrikar Dronamraju if (area->vaddr & ~PAGE_MASK) { 1022d4b3b638SSrikar Dronamraju ret = area->vaddr; 1023d4b3b638SSrikar Dronamraju goto fail; 1024d4b3b638SSrikar Dronamraju } 1025d4b3b638SSrikar Dronamraju 1026d4b3b638SSrikar Dronamraju ret = install_special_mapping(mm, area->vaddr, PAGE_SIZE, 1027d4b3b638SSrikar Dronamraju VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO, &area->page); 1028d4b3b638SSrikar Dronamraju if (ret) 1029d4b3b638SSrikar Dronamraju goto fail; 1030d4b3b638SSrikar Dronamraju 1031d4b3b638SSrikar Dronamraju smp_wmb(); /* pairs with get_xol_area() */ 1032d4b3b638SSrikar Dronamraju mm->uprobes_state.xol_area = area; 1033d4b3b638SSrikar Dronamraju ret = 0; 1034d4b3b638SSrikar Dronamraju 1035d4b3b638SSrikar Dronamraju fail: 1036d4b3b638SSrikar Dronamraju up_write(&mm->mmap_sem); 1037d4b3b638SSrikar Dronamraju if (ret) 1038d4b3b638SSrikar Dronamraju __free_page(area->page); 1039d4b3b638SSrikar Dronamraju 1040d4b3b638SSrikar Dronamraju return ret; 1041d4b3b638SSrikar Dronamraju } 1042d4b3b638SSrikar Dronamraju 1043d4b3b638SSrikar Dronamraju static struct xol_area *get_xol_area(struct mm_struct *mm) 1044d4b3b638SSrikar Dronamraju { 1045d4b3b638SSrikar Dronamraju struct xol_area *area; 1046d4b3b638SSrikar Dronamraju 1047d4b3b638SSrikar Dronamraju area = mm->uprobes_state.xol_area; 1048d4b3b638SSrikar Dronamraju smp_read_barrier_depends(); /* pairs with wmb in xol_add_vma() */ 1049d4b3b638SSrikar Dronamraju 1050d4b3b638SSrikar Dronamraju return area; 1051d4b3b638SSrikar Dronamraju } 1052d4b3b638SSrikar Dronamraju 1053d4b3b638SSrikar Dronamraju /* 1054d4b3b638SSrikar Dronamraju * xol_alloc_area - Allocate process's xol_area. 1055d4b3b638SSrikar Dronamraju * This area will be used for storing instructions for execution out of 1056d4b3b638SSrikar Dronamraju * line. 1057d4b3b638SSrikar Dronamraju * 1058d4b3b638SSrikar Dronamraju * Returns the allocated area or NULL. 1059d4b3b638SSrikar Dronamraju */ 1060d4b3b638SSrikar Dronamraju static struct xol_area *xol_alloc_area(void) 1061d4b3b638SSrikar Dronamraju { 1062d4b3b638SSrikar Dronamraju struct xol_area *area; 1063d4b3b638SSrikar Dronamraju 1064d4b3b638SSrikar Dronamraju area = kzalloc(sizeof(*area), GFP_KERNEL); 1065d4b3b638SSrikar Dronamraju if (unlikely(!area)) 1066d4b3b638SSrikar Dronamraju return NULL; 1067d4b3b638SSrikar Dronamraju 1068d4b3b638SSrikar Dronamraju area->bitmap = kzalloc(BITS_TO_LONGS(UINSNS_PER_PAGE) * sizeof(long), GFP_KERNEL); 1069d4b3b638SSrikar Dronamraju 1070d4b3b638SSrikar Dronamraju if (!area->bitmap) 1071d4b3b638SSrikar Dronamraju goto fail; 1072d4b3b638SSrikar Dronamraju 1073d4b3b638SSrikar Dronamraju init_waitqueue_head(&area->wq); 1074d4b3b638SSrikar Dronamraju if (!xol_add_vma(area)) 1075d4b3b638SSrikar Dronamraju return area; 1076d4b3b638SSrikar Dronamraju 1077d4b3b638SSrikar Dronamraju fail: 1078d4b3b638SSrikar Dronamraju kfree(area->bitmap); 1079d4b3b638SSrikar Dronamraju kfree(area); 1080d4b3b638SSrikar Dronamraju 1081d4b3b638SSrikar Dronamraju return get_xol_area(current->mm); 1082d4b3b638SSrikar Dronamraju } 1083d4b3b638SSrikar Dronamraju 1084d4b3b638SSrikar Dronamraju /* 1085d4b3b638SSrikar Dronamraju * uprobe_clear_state - Free the area allocated for slots. 1086d4b3b638SSrikar Dronamraju */ 1087d4b3b638SSrikar Dronamraju void uprobe_clear_state(struct mm_struct *mm) 1088d4b3b638SSrikar Dronamraju { 1089d4b3b638SSrikar Dronamraju struct xol_area *area = mm->uprobes_state.xol_area; 1090d4b3b638SSrikar Dronamraju 1091d4b3b638SSrikar Dronamraju if (!area) 1092d4b3b638SSrikar Dronamraju return; 1093d4b3b638SSrikar Dronamraju 1094d4b3b638SSrikar Dronamraju put_page(area->page); 1095d4b3b638SSrikar Dronamraju kfree(area->bitmap); 1096d4b3b638SSrikar Dronamraju kfree(area); 1097d4b3b638SSrikar Dronamraju } 1098d4b3b638SSrikar Dronamraju 1099f8ac4ec9SOleg Nesterov void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm) 1100f8ac4ec9SOleg Nesterov { 110161559a81SOleg Nesterov newmm->uprobes_state.xol_area = NULL; 110261559a81SOleg Nesterov 11039f68f672SOleg Nesterov if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) { 1104f8ac4ec9SOleg Nesterov set_bit(MMF_HAS_UPROBES, &newmm->flags); 11059f68f672SOleg Nesterov /* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */ 11069f68f672SOleg Nesterov set_bit(MMF_RECALC_UPROBES, &newmm->flags); 11079f68f672SOleg Nesterov } 1108f8ac4ec9SOleg Nesterov } 1109f8ac4ec9SOleg Nesterov 1110d4b3b638SSrikar Dronamraju /* 1111d4b3b638SSrikar Dronamraju * - search for a free slot. 1112d4b3b638SSrikar Dronamraju */ 1113d4b3b638SSrikar Dronamraju static unsigned long xol_take_insn_slot(struct xol_area *area) 1114d4b3b638SSrikar Dronamraju { 1115d4b3b638SSrikar Dronamraju unsigned long slot_addr; 1116d4b3b638SSrikar Dronamraju int slot_nr; 1117d4b3b638SSrikar Dronamraju 1118d4b3b638SSrikar Dronamraju do { 1119d4b3b638SSrikar Dronamraju slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE); 1120d4b3b638SSrikar Dronamraju if (slot_nr < UINSNS_PER_PAGE) { 1121d4b3b638SSrikar Dronamraju if (!test_and_set_bit(slot_nr, area->bitmap)) 1122d4b3b638SSrikar Dronamraju break; 1123d4b3b638SSrikar Dronamraju 1124d4b3b638SSrikar Dronamraju slot_nr = UINSNS_PER_PAGE; 1125d4b3b638SSrikar Dronamraju continue; 1126d4b3b638SSrikar Dronamraju } 1127d4b3b638SSrikar Dronamraju wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE)); 1128d4b3b638SSrikar Dronamraju } while (slot_nr >= UINSNS_PER_PAGE); 1129d4b3b638SSrikar Dronamraju 1130d4b3b638SSrikar Dronamraju slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES); 1131d4b3b638SSrikar Dronamraju atomic_inc(&area->slot_count); 1132d4b3b638SSrikar Dronamraju 1133d4b3b638SSrikar Dronamraju return slot_addr; 1134d4b3b638SSrikar Dronamraju } 1135d4b3b638SSrikar Dronamraju 1136d4b3b638SSrikar Dronamraju /* 1137d4b3b638SSrikar Dronamraju * xol_get_insn_slot - If was not allocated a slot, then 1138d4b3b638SSrikar Dronamraju * allocate a slot. 1139d4b3b638SSrikar Dronamraju * Returns the allocated slot address or 0. 1140d4b3b638SSrikar Dronamraju */ 1141d4b3b638SSrikar Dronamraju static unsigned long xol_get_insn_slot(struct uprobe *uprobe, unsigned long slot_addr) 1142d4b3b638SSrikar Dronamraju { 1143d4b3b638SSrikar Dronamraju struct xol_area *area; 1144d4b3b638SSrikar Dronamraju unsigned long offset; 1145d4b3b638SSrikar Dronamraju void *vaddr; 1146d4b3b638SSrikar Dronamraju 1147d4b3b638SSrikar Dronamraju area = get_xol_area(current->mm); 1148d4b3b638SSrikar Dronamraju if (!area) { 1149d4b3b638SSrikar Dronamraju area = xol_alloc_area(); 1150d4b3b638SSrikar Dronamraju if (!area) 1151d4b3b638SSrikar Dronamraju return 0; 1152d4b3b638SSrikar Dronamraju } 1153d4b3b638SSrikar Dronamraju current->utask->xol_vaddr = xol_take_insn_slot(area); 1154d4b3b638SSrikar Dronamraju 1155d4b3b638SSrikar Dronamraju /* 1156d4b3b638SSrikar Dronamraju * Initialize the slot if xol_vaddr points to valid 1157d4b3b638SSrikar Dronamraju * instruction slot. 1158d4b3b638SSrikar Dronamraju */ 1159d4b3b638SSrikar Dronamraju if (unlikely(!current->utask->xol_vaddr)) 1160d4b3b638SSrikar Dronamraju return 0; 1161d4b3b638SSrikar Dronamraju 1162d4b3b638SSrikar Dronamraju current->utask->vaddr = slot_addr; 1163d4b3b638SSrikar Dronamraju offset = current->utask->xol_vaddr & ~PAGE_MASK; 1164d4b3b638SSrikar Dronamraju vaddr = kmap_atomic(area->page); 1165d4b3b638SSrikar Dronamraju memcpy(vaddr + offset, uprobe->arch.insn, MAX_UINSN_BYTES); 1166d4b3b638SSrikar Dronamraju kunmap_atomic(vaddr); 1167d4b3b638SSrikar Dronamraju 1168d4b3b638SSrikar Dronamraju return current->utask->xol_vaddr; 1169d4b3b638SSrikar Dronamraju } 1170d4b3b638SSrikar Dronamraju 1171d4b3b638SSrikar Dronamraju /* 1172d4b3b638SSrikar Dronamraju * xol_free_insn_slot - If slot was earlier allocated by 1173d4b3b638SSrikar Dronamraju * @xol_get_insn_slot(), make the slot available for 1174d4b3b638SSrikar Dronamraju * subsequent requests. 1175d4b3b638SSrikar Dronamraju */ 1176d4b3b638SSrikar Dronamraju static void xol_free_insn_slot(struct task_struct *tsk) 1177d4b3b638SSrikar Dronamraju { 1178d4b3b638SSrikar Dronamraju struct xol_area *area; 1179d4b3b638SSrikar Dronamraju unsigned long vma_end; 1180d4b3b638SSrikar Dronamraju unsigned long slot_addr; 1181d4b3b638SSrikar Dronamraju 1182d4b3b638SSrikar Dronamraju if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask) 1183d4b3b638SSrikar Dronamraju return; 1184d4b3b638SSrikar Dronamraju 1185d4b3b638SSrikar Dronamraju slot_addr = tsk->utask->xol_vaddr; 1186d4b3b638SSrikar Dronamraju 1187d4b3b638SSrikar Dronamraju if (unlikely(!slot_addr || IS_ERR_VALUE(slot_addr))) 1188d4b3b638SSrikar Dronamraju return; 1189d4b3b638SSrikar Dronamraju 1190d4b3b638SSrikar Dronamraju area = tsk->mm->uprobes_state.xol_area; 1191d4b3b638SSrikar Dronamraju vma_end = area->vaddr + PAGE_SIZE; 1192d4b3b638SSrikar Dronamraju if (area->vaddr <= slot_addr && slot_addr < vma_end) { 1193d4b3b638SSrikar Dronamraju unsigned long offset; 1194d4b3b638SSrikar Dronamraju int slot_nr; 1195d4b3b638SSrikar Dronamraju 1196d4b3b638SSrikar Dronamraju offset = slot_addr - area->vaddr; 1197d4b3b638SSrikar Dronamraju slot_nr = offset / UPROBE_XOL_SLOT_BYTES; 1198d4b3b638SSrikar Dronamraju if (slot_nr >= UINSNS_PER_PAGE) 1199d4b3b638SSrikar Dronamraju return; 1200d4b3b638SSrikar Dronamraju 1201d4b3b638SSrikar Dronamraju clear_bit(slot_nr, area->bitmap); 1202d4b3b638SSrikar Dronamraju atomic_dec(&area->slot_count); 1203d4b3b638SSrikar Dronamraju if (waitqueue_active(&area->wq)) 1204d4b3b638SSrikar Dronamraju wake_up(&area->wq); 1205d4b3b638SSrikar Dronamraju 1206d4b3b638SSrikar Dronamraju tsk->utask->xol_vaddr = 0; 1207d4b3b638SSrikar Dronamraju } 1208d4b3b638SSrikar Dronamraju } 1209d4b3b638SSrikar Dronamraju 12100326f5a9SSrikar Dronamraju /** 12110326f5a9SSrikar Dronamraju * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs 12120326f5a9SSrikar Dronamraju * @regs: Reflects the saved state of the task after it has hit a breakpoint 12130326f5a9SSrikar Dronamraju * instruction. 12140326f5a9SSrikar Dronamraju * Return the address of the breakpoint instruction. 12150326f5a9SSrikar Dronamraju */ 12160326f5a9SSrikar Dronamraju unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs) 12170326f5a9SSrikar Dronamraju { 12180326f5a9SSrikar Dronamraju return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE; 12190326f5a9SSrikar Dronamraju } 12200326f5a9SSrikar Dronamraju 12210326f5a9SSrikar Dronamraju /* 12220326f5a9SSrikar Dronamraju * Called with no locks held. 12230326f5a9SSrikar Dronamraju * Called in context of a exiting or a exec-ing thread. 12240326f5a9SSrikar Dronamraju */ 12250326f5a9SSrikar Dronamraju void uprobe_free_utask(struct task_struct *t) 12260326f5a9SSrikar Dronamraju { 12270326f5a9SSrikar Dronamraju struct uprobe_task *utask = t->utask; 12280326f5a9SSrikar Dronamraju 12290326f5a9SSrikar Dronamraju if (!utask) 12300326f5a9SSrikar Dronamraju return; 12310326f5a9SSrikar Dronamraju 12320326f5a9SSrikar Dronamraju if (utask->active_uprobe) 12330326f5a9SSrikar Dronamraju put_uprobe(utask->active_uprobe); 12340326f5a9SSrikar Dronamraju 1235d4b3b638SSrikar Dronamraju xol_free_insn_slot(t); 12360326f5a9SSrikar Dronamraju kfree(utask); 12370326f5a9SSrikar Dronamraju t->utask = NULL; 12380326f5a9SSrikar Dronamraju } 12390326f5a9SSrikar Dronamraju 12400326f5a9SSrikar Dronamraju /* 12410326f5a9SSrikar Dronamraju * Called in context of a new clone/fork from copy_process. 12420326f5a9SSrikar Dronamraju */ 12430326f5a9SSrikar Dronamraju void uprobe_copy_process(struct task_struct *t) 12440326f5a9SSrikar Dronamraju { 12450326f5a9SSrikar Dronamraju t->utask = NULL; 12460326f5a9SSrikar Dronamraju } 12470326f5a9SSrikar Dronamraju 12480326f5a9SSrikar Dronamraju /* 12490326f5a9SSrikar Dronamraju * Allocate a uprobe_task object for the task. 12500326f5a9SSrikar Dronamraju * Called when the thread hits a breakpoint for the first time. 12510326f5a9SSrikar Dronamraju * 12520326f5a9SSrikar Dronamraju * Returns: 12530326f5a9SSrikar Dronamraju * - pointer to new uprobe_task on success 12540326f5a9SSrikar Dronamraju * - NULL otherwise 12550326f5a9SSrikar Dronamraju */ 12560326f5a9SSrikar Dronamraju static struct uprobe_task *add_utask(void) 12570326f5a9SSrikar Dronamraju { 12580326f5a9SSrikar Dronamraju struct uprobe_task *utask; 12590326f5a9SSrikar Dronamraju 12600326f5a9SSrikar Dronamraju utask = kzalloc(sizeof *utask, GFP_KERNEL); 12610326f5a9SSrikar Dronamraju if (unlikely(!utask)) 12620326f5a9SSrikar Dronamraju return NULL; 12630326f5a9SSrikar Dronamraju 12640326f5a9SSrikar Dronamraju current->utask = utask; 12650326f5a9SSrikar Dronamraju return utask; 12660326f5a9SSrikar Dronamraju } 12670326f5a9SSrikar Dronamraju 12680326f5a9SSrikar Dronamraju /* Prepare to single-step probed instruction out of line. */ 12690326f5a9SSrikar Dronamraju static int 12700326f5a9SSrikar Dronamraju pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long vaddr) 12710326f5a9SSrikar Dronamraju { 1272d4b3b638SSrikar Dronamraju if (xol_get_insn_slot(uprobe, vaddr) && !arch_uprobe_pre_xol(&uprobe->arch, regs)) 1273d4b3b638SSrikar Dronamraju return 0; 1274d4b3b638SSrikar Dronamraju 12750326f5a9SSrikar Dronamraju return -EFAULT; 12760326f5a9SSrikar Dronamraju } 12770326f5a9SSrikar Dronamraju 12780326f5a9SSrikar Dronamraju /* 12790326f5a9SSrikar Dronamraju * If we are singlestepping, then ensure this thread is not connected to 12800326f5a9SSrikar Dronamraju * non-fatal signals until completion of singlestep. When xol insn itself 12810326f5a9SSrikar Dronamraju * triggers the signal, restart the original insn even if the task is 12820326f5a9SSrikar Dronamraju * already SIGKILL'ed (since coredump should report the correct ip). This 12830326f5a9SSrikar Dronamraju * is even more important if the task has a handler for SIGSEGV/etc, The 12840326f5a9SSrikar Dronamraju * _same_ instruction should be repeated again after return from the signal 12850326f5a9SSrikar Dronamraju * handler, and SSTEP can never finish in this case. 12860326f5a9SSrikar Dronamraju */ 12870326f5a9SSrikar Dronamraju bool uprobe_deny_signal(void) 12880326f5a9SSrikar Dronamraju { 12890326f5a9SSrikar Dronamraju struct task_struct *t = current; 12900326f5a9SSrikar Dronamraju struct uprobe_task *utask = t->utask; 12910326f5a9SSrikar Dronamraju 12920326f5a9SSrikar Dronamraju if (likely(!utask || !utask->active_uprobe)) 12930326f5a9SSrikar Dronamraju return false; 12940326f5a9SSrikar Dronamraju 12950326f5a9SSrikar Dronamraju WARN_ON_ONCE(utask->state != UTASK_SSTEP); 12960326f5a9SSrikar Dronamraju 12970326f5a9SSrikar Dronamraju if (signal_pending(t)) { 12980326f5a9SSrikar Dronamraju spin_lock_irq(&t->sighand->siglock); 12990326f5a9SSrikar Dronamraju clear_tsk_thread_flag(t, TIF_SIGPENDING); 13000326f5a9SSrikar Dronamraju spin_unlock_irq(&t->sighand->siglock); 13010326f5a9SSrikar Dronamraju 13020326f5a9SSrikar Dronamraju if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) { 13030326f5a9SSrikar Dronamraju utask->state = UTASK_SSTEP_TRAPPED; 13040326f5a9SSrikar Dronamraju set_tsk_thread_flag(t, TIF_UPROBE); 13050326f5a9SSrikar Dronamraju set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); 13060326f5a9SSrikar Dronamraju } 13070326f5a9SSrikar Dronamraju } 13080326f5a9SSrikar Dronamraju 13090326f5a9SSrikar Dronamraju return true; 13100326f5a9SSrikar Dronamraju } 13110326f5a9SSrikar Dronamraju 13120326f5a9SSrikar Dronamraju /* 13130326f5a9SSrikar Dronamraju * Avoid singlestepping the original instruction if the original instruction 13140326f5a9SSrikar Dronamraju * is a NOP or can be emulated. 13150326f5a9SSrikar Dronamraju */ 13160326f5a9SSrikar Dronamraju static bool can_skip_sstep(struct uprobe *uprobe, struct pt_regs *regs) 13170326f5a9SSrikar Dronamraju { 13180578a970SOleg Nesterov if (uprobe->flags & UPROBE_SKIP_SSTEP) { 13190326f5a9SSrikar Dronamraju if (arch_uprobe_skip_sstep(&uprobe->arch, regs)) 13200326f5a9SSrikar Dronamraju return true; 13210326f5a9SSrikar Dronamraju uprobe->flags &= ~UPROBE_SKIP_SSTEP; 13220578a970SOleg Nesterov } 13230326f5a9SSrikar Dronamraju return false; 13240326f5a9SSrikar Dronamraju } 13250326f5a9SSrikar Dronamraju 1326499a4f3eSOleg Nesterov static void mmf_recalc_uprobes(struct mm_struct *mm) 1327499a4f3eSOleg Nesterov { 1328499a4f3eSOleg Nesterov struct vm_area_struct *vma; 1329499a4f3eSOleg Nesterov 1330499a4f3eSOleg Nesterov for (vma = mm->mmap; vma; vma = vma->vm_next) { 1331499a4f3eSOleg Nesterov if (!valid_vma(vma, false)) 1332499a4f3eSOleg Nesterov continue; 1333499a4f3eSOleg Nesterov /* 1334499a4f3eSOleg Nesterov * This is not strictly accurate, we can race with 1335499a4f3eSOleg Nesterov * uprobe_unregister() and see the already removed 1336499a4f3eSOleg Nesterov * uprobe if delete_uprobe() was not yet called. 1337499a4f3eSOleg Nesterov */ 1338499a4f3eSOleg Nesterov if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end)) 1339499a4f3eSOleg Nesterov return; 1340499a4f3eSOleg Nesterov } 1341499a4f3eSOleg Nesterov 1342499a4f3eSOleg Nesterov clear_bit(MMF_HAS_UPROBES, &mm->flags); 1343499a4f3eSOleg Nesterov } 1344499a4f3eSOleg Nesterov 1345ec75fba9SOleg Nesterov static int is_swbp_at_addr(struct mm_struct *mm, unsigned long vaddr) 1346ec75fba9SOleg Nesterov { 1347ec75fba9SOleg Nesterov struct page *page; 1348ec75fba9SOleg Nesterov uprobe_opcode_t opcode; 1349ec75fba9SOleg Nesterov int result; 1350ec75fba9SOleg Nesterov 1351ec75fba9SOleg Nesterov pagefault_disable(); 1352ec75fba9SOleg Nesterov result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr, 1353ec75fba9SOleg Nesterov sizeof(opcode)); 1354ec75fba9SOleg Nesterov pagefault_enable(); 1355ec75fba9SOleg Nesterov 1356ec75fba9SOleg Nesterov if (likely(result == 0)) 1357ec75fba9SOleg Nesterov goto out; 1358ec75fba9SOleg Nesterov 1359ec75fba9SOleg Nesterov result = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &page, NULL); 1360ec75fba9SOleg Nesterov if (result < 0) 1361ec75fba9SOleg Nesterov return result; 1362ec75fba9SOleg Nesterov 1363ec75fba9SOleg Nesterov copy_opcode(page, vaddr, &opcode); 1364ec75fba9SOleg Nesterov put_page(page); 1365ec75fba9SOleg Nesterov out: 1366ec75fba9SOleg Nesterov return is_swbp_insn(&opcode); 1367ec75fba9SOleg Nesterov } 1368ec75fba9SOleg Nesterov 1369d790d346SOleg Nesterov static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp) 13700326f5a9SSrikar Dronamraju { 13713a9ea052SOleg Nesterov struct mm_struct *mm = current->mm; 13723a9ea052SOleg Nesterov struct uprobe *uprobe = NULL; 13730326f5a9SSrikar Dronamraju struct vm_area_struct *vma; 13740326f5a9SSrikar Dronamraju 13750326f5a9SSrikar Dronamraju down_read(&mm->mmap_sem); 13760326f5a9SSrikar Dronamraju vma = find_vma(mm, bp_vaddr); 13773a9ea052SOleg Nesterov if (vma && vma->vm_start <= bp_vaddr) { 13783a9ea052SOleg Nesterov if (valid_vma(vma, false)) { 1379cb113b47SOleg Nesterov struct inode *inode = vma->vm_file->f_mapping->host; 1380cb113b47SOleg Nesterov loff_t offset = vaddr_to_offset(vma, bp_vaddr); 13810326f5a9SSrikar Dronamraju 13820326f5a9SSrikar Dronamraju uprobe = find_uprobe(inode, offset); 13830326f5a9SSrikar Dronamraju } 1384d790d346SOleg Nesterov 1385d790d346SOleg Nesterov if (!uprobe) 1386d790d346SOleg Nesterov *is_swbp = is_swbp_at_addr(mm, bp_vaddr); 1387d790d346SOleg Nesterov } else { 1388d790d346SOleg Nesterov *is_swbp = -EFAULT; 13893a9ea052SOleg Nesterov } 1390499a4f3eSOleg Nesterov 1391499a4f3eSOleg Nesterov if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags)) 1392499a4f3eSOleg Nesterov mmf_recalc_uprobes(mm); 13930326f5a9SSrikar Dronamraju up_read(&mm->mmap_sem); 13940326f5a9SSrikar Dronamraju 13953a9ea052SOleg Nesterov return uprobe; 13963a9ea052SOleg Nesterov } 13973a9ea052SOleg Nesterov 13989d778782SSebastian Andrzej Siewior void __weak arch_uprobe_enable_step(struct arch_uprobe *arch) 13999d778782SSebastian Andrzej Siewior { 14009d778782SSebastian Andrzej Siewior user_enable_single_step(current); 14019d778782SSebastian Andrzej Siewior } 14029d778782SSebastian Andrzej Siewior 14039d778782SSebastian Andrzej Siewior void __weak arch_uprobe_disable_step(struct arch_uprobe *arch) 14049d778782SSebastian Andrzej Siewior { 14059d778782SSebastian Andrzej Siewior user_disable_single_step(current); 14069d778782SSebastian Andrzej Siewior } 14079d778782SSebastian Andrzej Siewior 14083a9ea052SOleg Nesterov /* 14093a9ea052SOleg Nesterov * Run handler and ask thread to singlestep. 14103a9ea052SOleg Nesterov * Ensure all non-fatal signals cannot interrupt thread while it singlesteps. 14113a9ea052SOleg Nesterov */ 14123a9ea052SOleg Nesterov static void handle_swbp(struct pt_regs *regs) 14133a9ea052SOleg Nesterov { 14143a9ea052SOleg Nesterov struct uprobe_task *utask; 14153a9ea052SOleg Nesterov struct uprobe *uprobe; 14163a9ea052SOleg Nesterov unsigned long bp_vaddr; 141756bb4cf6SOleg Nesterov int uninitialized_var(is_swbp); 14183a9ea052SOleg Nesterov 14193a9ea052SOleg Nesterov bp_vaddr = uprobe_get_swbp_addr(regs); 1420d790d346SOleg Nesterov uprobe = find_active_uprobe(bp_vaddr, &is_swbp); 14213a9ea052SOleg Nesterov 14220326f5a9SSrikar Dronamraju if (!uprobe) { 142356bb4cf6SOleg Nesterov if (is_swbp > 0) { 14240326f5a9SSrikar Dronamraju /* No matching uprobe; signal SIGTRAP. */ 14250326f5a9SSrikar Dronamraju send_sig(SIGTRAP, current, 0); 142656bb4cf6SOleg Nesterov } else { 142756bb4cf6SOleg Nesterov /* 142856bb4cf6SOleg Nesterov * Either we raced with uprobe_unregister() or we can't 142956bb4cf6SOleg Nesterov * access this memory. The latter is only possible if 143056bb4cf6SOleg Nesterov * another thread plays with our ->mm. In both cases 143156bb4cf6SOleg Nesterov * we can simply restart. If this vma was unmapped we 143256bb4cf6SOleg Nesterov * can pretend this insn was not executed yet and get 143356bb4cf6SOleg Nesterov * the (correct) SIGSEGV after restart. 143456bb4cf6SOleg Nesterov */ 143556bb4cf6SOleg Nesterov instruction_pointer_set(regs, bp_vaddr); 143656bb4cf6SOleg Nesterov } 14370326f5a9SSrikar Dronamraju return; 14380326f5a9SSrikar Dronamraju } 14390326f5a9SSrikar Dronamraju 14401b08e907SOleg Nesterov utask = current->utask; 14410326f5a9SSrikar Dronamraju if (!utask) { 14420326f5a9SSrikar Dronamraju utask = add_utask(); 14430326f5a9SSrikar Dronamraju /* Cannot allocate; re-execute the instruction. */ 14440326f5a9SSrikar Dronamraju if (!utask) 14450578a970SOleg Nesterov goto restart; 14460326f5a9SSrikar Dronamraju } 1447746a9e6bSOleg Nesterov 14480326f5a9SSrikar Dronamraju handler_chain(uprobe, regs); 14490578a970SOleg Nesterov if (can_skip_sstep(uprobe, regs)) 14500578a970SOleg Nesterov goto out; 14510326f5a9SSrikar Dronamraju 14520326f5a9SSrikar Dronamraju if (!pre_ssout(uprobe, regs, bp_vaddr)) { 14539d778782SSebastian Andrzej Siewior arch_uprobe_enable_step(&uprobe->arch); 1454746a9e6bSOleg Nesterov utask->active_uprobe = uprobe; 1455746a9e6bSOleg Nesterov utask->state = UTASK_SSTEP; 14560326f5a9SSrikar Dronamraju return; 14570326f5a9SSrikar Dronamraju } 14580326f5a9SSrikar Dronamraju 14590578a970SOleg Nesterov restart: 14600326f5a9SSrikar Dronamraju /* 14610326f5a9SSrikar Dronamraju * cannot singlestep; cannot skip instruction; 14620326f5a9SSrikar Dronamraju * re-execute the instruction. 14630326f5a9SSrikar Dronamraju */ 14640326f5a9SSrikar Dronamraju instruction_pointer_set(regs, bp_vaddr); 14650578a970SOleg Nesterov out: 14660326f5a9SSrikar Dronamraju put_uprobe(uprobe); 14670326f5a9SSrikar Dronamraju } 14680326f5a9SSrikar Dronamraju 14690326f5a9SSrikar Dronamraju /* 14700326f5a9SSrikar Dronamraju * Perform required fix-ups and disable singlestep. 14710326f5a9SSrikar Dronamraju * Allow pending signals to take effect. 14720326f5a9SSrikar Dronamraju */ 14730326f5a9SSrikar Dronamraju static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs) 14740326f5a9SSrikar Dronamraju { 14750326f5a9SSrikar Dronamraju struct uprobe *uprobe; 14760326f5a9SSrikar Dronamraju 14770326f5a9SSrikar Dronamraju uprobe = utask->active_uprobe; 14780326f5a9SSrikar Dronamraju if (utask->state == UTASK_SSTEP_ACK) 14790326f5a9SSrikar Dronamraju arch_uprobe_post_xol(&uprobe->arch, regs); 14800326f5a9SSrikar Dronamraju else if (utask->state == UTASK_SSTEP_TRAPPED) 14810326f5a9SSrikar Dronamraju arch_uprobe_abort_xol(&uprobe->arch, regs); 14820326f5a9SSrikar Dronamraju else 14830326f5a9SSrikar Dronamraju WARN_ON_ONCE(1); 14840326f5a9SSrikar Dronamraju 14859d778782SSebastian Andrzej Siewior arch_uprobe_disable_step(&uprobe->arch); 14860326f5a9SSrikar Dronamraju put_uprobe(uprobe); 14870326f5a9SSrikar Dronamraju utask->active_uprobe = NULL; 14880326f5a9SSrikar Dronamraju utask->state = UTASK_RUNNING; 1489d4b3b638SSrikar Dronamraju xol_free_insn_slot(current); 14900326f5a9SSrikar Dronamraju 14910326f5a9SSrikar Dronamraju spin_lock_irq(¤t->sighand->siglock); 14920326f5a9SSrikar Dronamraju recalc_sigpending(); /* see uprobe_deny_signal() */ 14930326f5a9SSrikar Dronamraju spin_unlock_irq(¤t->sighand->siglock); 14940326f5a9SSrikar Dronamraju } 14950326f5a9SSrikar Dronamraju 14960326f5a9SSrikar Dronamraju /* 14971b08e907SOleg Nesterov * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and 14981b08e907SOleg Nesterov * allows the thread to return from interrupt. After that handle_swbp() 14991b08e907SOleg Nesterov * sets utask->active_uprobe. 15000326f5a9SSrikar Dronamraju * 15011b08e907SOleg Nesterov * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag 15021b08e907SOleg Nesterov * and allows the thread to return from interrupt. 15030326f5a9SSrikar Dronamraju * 15040326f5a9SSrikar Dronamraju * While returning to userspace, thread notices the TIF_UPROBE flag and calls 15050326f5a9SSrikar Dronamraju * uprobe_notify_resume(). 15060326f5a9SSrikar Dronamraju */ 15070326f5a9SSrikar Dronamraju void uprobe_notify_resume(struct pt_regs *regs) 15080326f5a9SSrikar Dronamraju { 15090326f5a9SSrikar Dronamraju struct uprobe_task *utask; 15100326f5a9SSrikar Dronamraju 1511db023ea5SOleg Nesterov clear_thread_flag(TIF_UPROBE); 1512db023ea5SOleg Nesterov 15130326f5a9SSrikar Dronamraju utask = current->utask; 15141b08e907SOleg Nesterov if (utask && utask->active_uprobe) 15150326f5a9SSrikar Dronamraju handle_singlestep(utask, regs); 15161b08e907SOleg Nesterov else 15171b08e907SOleg Nesterov handle_swbp(regs); 15180326f5a9SSrikar Dronamraju } 15190326f5a9SSrikar Dronamraju 15200326f5a9SSrikar Dronamraju /* 15210326f5a9SSrikar Dronamraju * uprobe_pre_sstep_notifier gets called from interrupt context as part of 15220326f5a9SSrikar Dronamraju * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit. 15230326f5a9SSrikar Dronamraju */ 15240326f5a9SSrikar Dronamraju int uprobe_pre_sstep_notifier(struct pt_regs *regs) 15250326f5a9SSrikar Dronamraju { 1526f8ac4ec9SOleg Nesterov if (!current->mm || !test_bit(MMF_HAS_UPROBES, ¤t->mm->flags)) 15270326f5a9SSrikar Dronamraju return 0; 15280326f5a9SSrikar Dronamraju 15290326f5a9SSrikar Dronamraju set_thread_flag(TIF_UPROBE); 15300326f5a9SSrikar Dronamraju return 1; 15310326f5a9SSrikar Dronamraju } 15320326f5a9SSrikar Dronamraju 15330326f5a9SSrikar Dronamraju /* 15340326f5a9SSrikar Dronamraju * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier 15350326f5a9SSrikar Dronamraju * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep. 15360326f5a9SSrikar Dronamraju */ 15370326f5a9SSrikar Dronamraju int uprobe_post_sstep_notifier(struct pt_regs *regs) 15380326f5a9SSrikar Dronamraju { 15390326f5a9SSrikar Dronamraju struct uprobe_task *utask = current->utask; 15400326f5a9SSrikar Dronamraju 15410326f5a9SSrikar Dronamraju if (!current->mm || !utask || !utask->active_uprobe) 15420326f5a9SSrikar Dronamraju /* task is currently not uprobed */ 15430326f5a9SSrikar Dronamraju return 0; 15440326f5a9SSrikar Dronamraju 15450326f5a9SSrikar Dronamraju utask->state = UTASK_SSTEP_ACK; 15460326f5a9SSrikar Dronamraju set_thread_flag(TIF_UPROBE); 15470326f5a9SSrikar Dronamraju return 1; 15480326f5a9SSrikar Dronamraju } 15490326f5a9SSrikar Dronamraju 15500326f5a9SSrikar Dronamraju static struct notifier_block uprobe_exception_nb = { 15510326f5a9SSrikar Dronamraju .notifier_call = arch_uprobe_exception_notify, 15520326f5a9SSrikar Dronamraju .priority = INT_MAX-1, /* notified after kprobes, kgdb */ 15530326f5a9SSrikar Dronamraju }; 15540326f5a9SSrikar Dronamraju 1555a5f4374aSIngo Molnar static int __init init_uprobes(void) 1556a5f4374aSIngo Molnar { 1557a5f4374aSIngo Molnar int i; 1558a5f4374aSIngo Molnar 1559a5f4374aSIngo Molnar for (i = 0; i < UPROBES_HASH_SZ; i++) { 1560a5f4374aSIngo Molnar mutex_init(&uprobes_mutex[i]); 1561a5f4374aSIngo Molnar mutex_init(&uprobes_mmap_mutex[i]); 1562a5f4374aSIngo Molnar } 15630326f5a9SSrikar Dronamraju 15640326f5a9SSrikar Dronamraju return register_die_notifier(&uprobe_exception_nb); 1565a5f4374aSIngo Molnar } 15660326f5a9SSrikar Dronamraju module_init(init_uprobes); 1567a5f4374aSIngo Molnar 1568a5f4374aSIngo Molnar static void __exit exit_uprobes(void) 1569a5f4374aSIngo Molnar { 1570a5f4374aSIngo Molnar } 1571a5f4374aSIngo Molnar module_exit(exit_uprobes); 1572