1a5f4374aSIngo Molnar /* 2a5f4374aSIngo Molnar * User-space Probes (UProbes) 3a5f4374aSIngo Molnar * 4a5f4374aSIngo Molnar * This program is free software; you can redistribute it and/or modify 5a5f4374aSIngo Molnar * it under the terms of the GNU General Public License as published by 6a5f4374aSIngo Molnar * the Free Software Foundation; either version 2 of the License, or 7a5f4374aSIngo Molnar * (at your option) any later version. 8a5f4374aSIngo Molnar * 9a5f4374aSIngo Molnar * This program is distributed in the hope that it will be useful, 10a5f4374aSIngo Molnar * but WITHOUT ANY WARRANTY; without even the implied warranty of 11a5f4374aSIngo Molnar * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12a5f4374aSIngo Molnar * GNU General Public License for more details. 13a5f4374aSIngo Molnar * 14a5f4374aSIngo Molnar * You should have received a copy of the GNU General Public License 15a5f4374aSIngo Molnar * along with this program; if not, write to the Free Software 16a5f4374aSIngo Molnar * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17a5f4374aSIngo Molnar * 1835aa621bSIngo Molnar * Copyright (C) IBM Corporation, 2008-2012 19a5f4374aSIngo Molnar * Authors: 20a5f4374aSIngo Molnar * Srikar Dronamraju 21a5f4374aSIngo Molnar * Jim Keniston 2235aa621bSIngo Molnar * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 23a5f4374aSIngo Molnar */ 24a5f4374aSIngo Molnar 25a5f4374aSIngo Molnar #include <linux/kernel.h> 26a5f4374aSIngo Molnar #include <linux/highmem.h> 27a5f4374aSIngo Molnar #include <linux/pagemap.h> /* read_mapping_page */ 28a5f4374aSIngo Molnar #include <linux/slab.h> 29a5f4374aSIngo Molnar #include <linux/sched.h> 30e8440c14SJosh Stone #include <linux/export.h> 31a5f4374aSIngo Molnar #include <linux/rmap.h> /* anon_vma_prepare */ 32a5f4374aSIngo Molnar #include <linux/mmu_notifier.h> /* set_pte_at_notify */ 33a5f4374aSIngo Molnar #include <linux/swap.h> /* try_to_free_swap */ 340326f5a9SSrikar Dronamraju #include <linux/ptrace.h> /* user_enable_single_step */ 350326f5a9SSrikar Dronamraju #include <linux/kdebug.h> /* notifier mechanism */ 36194f8dcbSOleg Nesterov #include "../../mm/internal.h" /* munlock_vma_page */ 3732cdba1eSOleg Nesterov #include <linux/percpu-rwsem.h> 38aa59c53fSOleg Nesterov #include <linux/task_work.h> 3940814f68SOleg Nesterov #include <linux/shmem_fs.h> 40a5f4374aSIngo Molnar 41a5f4374aSIngo Molnar #include <linux/uprobes.h> 42a5f4374aSIngo Molnar 43d4b3b638SSrikar Dronamraju #define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES) 44d4b3b638SSrikar Dronamraju #define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE 45d4b3b638SSrikar Dronamraju 46a5f4374aSIngo Molnar static struct rb_root uprobes_tree = RB_ROOT; 47441f1eb7SOleg Nesterov /* 48441f1eb7SOleg Nesterov * allows us to skip the uprobe_mmap if there are no uprobe events active 49441f1eb7SOleg Nesterov * at this time. Probably a fine grained per inode count is better? 50441f1eb7SOleg Nesterov */ 51441f1eb7SOleg Nesterov #define no_uprobe_events() RB_EMPTY_ROOT(&uprobes_tree) 52a5f4374aSIngo Molnar 53a5f4374aSIngo Molnar static DEFINE_SPINLOCK(uprobes_treelock); /* serialize rbtree access */ 54a5f4374aSIngo Molnar 55a5f4374aSIngo Molnar #define UPROBES_HASH_SZ 13 56a5f4374aSIngo Molnar /* serialize uprobe->pending_list */ 57a5f4374aSIngo Molnar static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ]; 58a5f4374aSIngo Molnar #define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ]) 59a5f4374aSIngo Molnar 6032cdba1eSOleg Nesterov static struct percpu_rw_semaphore dup_mmap_sem; 6132cdba1eSOleg Nesterov 62cb9a19feSOleg Nesterov /* Have a copy of original instruction */ 6371434f2fSOleg Nesterov #define UPROBE_COPY_INSN 0 64cb9a19feSOleg Nesterov 653ff54efdSSrikar Dronamraju struct uprobe { 663ff54efdSSrikar Dronamraju struct rb_node rb_node; /* node in the rb tree */ 673ff54efdSSrikar Dronamraju atomic_t ref; 68e591c8d7SOleg Nesterov struct rw_semaphore register_rwsem; 693ff54efdSSrikar Dronamraju struct rw_semaphore consumer_rwsem; 703ff54efdSSrikar Dronamraju struct list_head pending_list; 713ff54efdSSrikar Dronamraju struct uprobe_consumer *consumers; 723ff54efdSSrikar Dronamraju struct inode *inode; /* Also hold a ref to inode */ 733ff54efdSSrikar Dronamraju loff_t offset; 7471434f2fSOleg Nesterov unsigned long flags; 75ad439356SOleg Nesterov 76ad439356SOleg Nesterov /* 77ad439356SOleg Nesterov * The generic code assumes that it has two members of unknown type 78ad439356SOleg Nesterov * owned by the arch-specific code: 79ad439356SOleg Nesterov * 80ad439356SOleg Nesterov * insn - copy_insn() saves the original instruction here for 81ad439356SOleg Nesterov * arch_uprobe_analyze_insn(). 82ad439356SOleg Nesterov * 83ad439356SOleg Nesterov * ixol - potentially modified instruction to execute out of 84ad439356SOleg Nesterov * line, copied to xol_area by xol_get_insn_slot(). 85ad439356SOleg Nesterov */ 863ff54efdSSrikar Dronamraju struct arch_uprobe arch; 873ff54efdSSrikar Dronamraju }; 883ff54efdSSrikar Dronamraju 89a5f4374aSIngo Molnar /* 90ad439356SOleg Nesterov * Execute out of line area: anonymous executable mapping installed 91ad439356SOleg Nesterov * by the probed task to execute the copy of the original instruction 92ad439356SOleg Nesterov * mangled by set_swbp(). 93ad439356SOleg Nesterov * 94c912dae6SOleg Nesterov * On a breakpoint hit, thread contests for a slot. It frees the 95c912dae6SOleg Nesterov * slot after singlestep. Currently a fixed number of slots are 96c912dae6SOleg Nesterov * allocated. 97c912dae6SOleg Nesterov */ 98c912dae6SOleg Nesterov struct xol_area { 99c912dae6SOleg Nesterov wait_queue_head_t wq; /* if all slots are busy */ 100c912dae6SOleg Nesterov atomic_t slot_count; /* number of in-use slots */ 101c912dae6SOleg Nesterov unsigned long *bitmap; /* 0 = free slot */ 102c912dae6SOleg Nesterov 103704bde3cSOleg Nesterov struct vm_special_mapping xol_mapping; 104704bde3cSOleg Nesterov struct page *pages[2]; 105c912dae6SOleg Nesterov /* 106c912dae6SOleg Nesterov * We keep the vma's vm_start rather than a pointer to the vma 107c912dae6SOleg Nesterov * itself. The probed process or a naughty kernel module could make 108c912dae6SOleg Nesterov * the vma go away, and we must handle that reasonably gracefully. 109c912dae6SOleg Nesterov */ 110c912dae6SOleg Nesterov unsigned long vaddr; /* Page(s) of instruction slots */ 111c912dae6SOleg Nesterov }; 112c912dae6SOleg Nesterov 113c912dae6SOleg Nesterov /* 114a5f4374aSIngo Molnar * valid_vma: Verify if the specified vma is an executable vma 115a5f4374aSIngo Molnar * Relax restrictions while unregistering: vm_flags might have 116a5f4374aSIngo Molnar * changed after breakpoint was inserted. 117a5f4374aSIngo Molnar * - is_register: indicates if we are in register context. 118a5f4374aSIngo Molnar * - Return 1 if the specified virtual address is in an 119a5f4374aSIngo Molnar * executable vma. 120a5f4374aSIngo Molnar */ 121a5f4374aSIngo Molnar static bool valid_vma(struct vm_area_struct *vma, bool is_register) 122a5f4374aSIngo Molnar { 12313f59c5eSOleg Nesterov vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE; 124a5f4374aSIngo Molnar 125e40cfce6SOleg Nesterov if (is_register) 126e40cfce6SOleg Nesterov flags |= VM_WRITE; 127a5f4374aSIngo Molnar 128e40cfce6SOleg Nesterov return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC; 129a5f4374aSIngo Molnar } 130a5f4374aSIngo Molnar 13157683f72SOleg Nesterov static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset) 132a5f4374aSIngo Molnar { 13357683f72SOleg Nesterov return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 134a5f4374aSIngo Molnar } 135a5f4374aSIngo Molnar 136cb113b47SOleg Nesterov static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr) 137cb113b47SOleg Nesterov { 138cb113b47SOleg Nesterov return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start); 139cb113b47SOleg Nesterov } 140cb113b47SOleg Nesterov 141a5f4374aSIngo Molnar /** 142a5f4374aSIngo Molnar * __replace_page - replace page in vma by new page. 143a5f4374aSIngo Molnar * based on replace_page in mm/ksm.c 144a5f4374aSIngo Molnar * 145a5f4374aSIngo Molnar * @vma: vma that holds the pte pointing to page 146c517ee74SOleg Nesterov * @addr: address the old @page is mapped at 147a5f4374aSIngo Molnar * @page: the cowed page we are replacing by kpage 148a5f4374aSIngo Molnar * @kpage: the modified page we replace page by 149a5f4374aSIngo Molnar * 150a5f4374aSIngo Molnar * Returns 0 on success, -EFAULT on failure. 151a5f4374aSIngo Molnar */ 152c517ee74SOleg Nesterov static int __replace_page(struct vm_area_struct *vma, unsigned long addr, 153c517ee74SOleg Nesterov struct page *page, struct page *kpage) 154a5f4374aSIngo Molnar { 155a5f4374aSIngo Molnar struct mm_struct *mm = vma->vm_mm; 1565323ce71SOleg Nesterov spinlock_t *ptl; 1575323ce71SOleg Nesterov pte_t *ptep; 1589f92448cSOleg Nesterov int err; 1596bdb913fSHaggai Eran /* For mmu_notifiers */ 1606bdb913fSHaggai Eran const unsigned long mmun_start = addr; 1616bdb913fSHaggai Eran const unsigned long mmun_end = addr + PAGE_SIZE; 16200501b53SJohannes Weiner struct mem_cgroup *memcg; 16300501b53SJohannes Weiner 16400501b53SJohannes Weiner err = mem_cgroup_try_charge(kpage, vma->vm_mm, GFP_KERNEL, &memcg); 16500501b53SJohannes Weiner if (err) 16600501b53SJohannes Weiner return err; 167a5f4374aSIngo Molnar 168194f8dcbSOleg Nesterov /* For try_to_free_swap() and munlock_vma_page() below */ 1699f92448cSOleg Nesterov lock_page(page); 1709f92448cSOleg Nesterov 1716bdb913fSHaggai Eran mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 1729f92448cSOleg Nesterov err = -EAGAIN; 1735323ce71SOleg Nesterov ptep = page_check_address(page, mm, addr, &ptl, 0); 174a5f4374aSIngo Molnar if (!ptep) 1759f92448cSOleg Nesterov goto unlock; 176a5f4374aSIngo Molnar 177a5f4374aSIngo Molnar get_page(kpage); 178a5f4374aSIngo Molnar page_add_new_anon_rmap(kpage, vma, addr); 17900501b53SJohannes Weiner mem_cgroup_commit_charge(kpage, memcg, false); 18000501b53SJohannes Weiner lru_cache_add_active_or_unevictable(kpage, vma); 181a5f4374aSIngo Molnar 1827396fa81SSrikar Dronamraju if (!PageAnon(page)) { 1837396fa81SSrikar Dronamraju dec_mm_counter(mm, MM_FILEPAGES); 1847396fa81SSrikar Dronamraju inc_mm_counter(mm, MM_ANONPAGES); 1857396fa81SSrikar Dronamraju } 1867396fa81SSrikar Dronamraju 187a5f4374aSIngo Molnar flush_cache_page(vma, addr, pte_pfn(*ptep)); 18834ee645eSJoerg Roedel ptep_clear_flush_notify(vma, addr, ptep); 189a5f4374aSIngo Molnar set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); 190a5f4374aSIngo Molnar 191a5f4374aSIngo Molnar page_remove_rmap(page); 192a5f4374aSIngo Molnar if (!page_mapped(page)) 193a5f4374aSIngo Molnar try_to_free_swap(page); 194a5f4374aSIngo Molnar pte_unmap_unlock(ptep, ptl); 195a5f4374aSIngo Molnar 196194f8dcbSOleg Nesterov if (vma->vm_flags & VM_LOCKED) 197194f8dcbSOleg Nesterov munlock_vma_page(page); 198194f8dcbSOleg Nesterov put_page(page); 199194f8dcbSOleg Nesterov 2009f92448cSOleg Nesterov err = 0; 2019f92448cSOleg Nesterov unlock: 20200501b53SJohannes Weiner mem_cgroup_cancel_charge(kpage, memcg); 2036bdb913fSHaggai Eran mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 2049f92448cSOleg Nesterov unlock_page(page); 2059f92448cSOleg Nesterov return err; 206a5f4374aSIngo Molnar } 207a5f4374aSIngo Molnar 208a5f4374aSIngo Molnar /** 2095cb4ac3aSSrikar Dronamraju * is_swbp_insn - check if instruction is breakpoint instruction. 210a5f4374aSIngo Molnar * @insn: instruction to be checked. 2115cb4ac3aSSrikar Dronamraju * Default implementation of is_swbp_insn 212a5f4374aSIngo Molnar * Returns true if @insn is a breakpoint instruction. 213a5f4374aSIngo Molnar */ 2145cb4ac3aSSrikar Dronamraju bool __weak is_swbp_insn(uprobe_opcode_t *insn) 215a5f4374aSIngo Molnar { 2165cb4ac3aSSrikar Dronamraju return *insn == UPROBE_SWBP_INSN; 217a5f4374aSIngo Molnar } 218a5f4374aSIngo Molnar 2190908ad6eSAnanth N Mavinakayanahalli /** 2200908ad6eSAnanth N Mavinakayanahalli * is_trap_insn - check if instruction is breakpoint instruction. 2210908ad6eSAnanth N Mavinakayanahalli * @insn: instruction to be checked. 2220908ad6eSAnanth N Mavinakayanahalli * Default implementation of is_trap_insn 2230908ad6eSAnanth N Mavinakayanahalli * Returns true if @insn is a breakpoint instruction. 2240908ad6eSAnanth N Mavinakayanahalli * 2250908ad6eSAnanth N Mavinakayanahalli * This function is needed for the case where an architecture has multiple 2260908ad6eSAnanth N Mavinakayanahalli * trap instructions (like powerpc). 2270908ad6eSAnanth N Mavinakayanahalli */ 2280908ad6eSAnanth N Mavinakayanahalli bool __weak is_trap_insn(uprobe_opcode_t *insn) 2290908ad6eSAnanth N Mavinakayanahalli { 2300908ad6eSAnanth N Mavinakayanahalli return is_swbp_insn(insn); 2310908ad6eSAnanth N Mavinakayanahalli } 2320908ad6eSAnanth N Mavinakayanahalli 233ab0d805cSOleg Nesterov static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len) 234cceb55aaSOleg Nesterov { 235cceb55aaSOleg Nesterov void *kaddr = kmap_atomic(page); 236ab0d805cSOleg Nesterov memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len); 237cceb55aaSOleg Nesterov kunmap_atomic(kaddr); 238cceb55aaSOleg Nesterov } 239cceb55aaSOleg Nesterov 2405669cceeSOleg Nesterov static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len) 2415669cceeSOleg Nesterov { 2425669cceeSOleg Nesterov void *kaddr = kmap_atomic(page); 2435669cceeSOleg Nesterov memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len); 2445669cceeSOleg Nesterov kunmap_atomic(kaddr); 2455669cceeSOleg Nesterov } 2465669cceeSOleg Nesterov 247ed6f6a50SOleg Nesterov static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode) 248ed6f6a50SOleg Nesterov { 249ed6f6a50SOleg Nesterov uprobe_opcode_t old_opcode; 250ed6f6a50SOleg Nesterov bool is_swbp; 251ed6f6a50SOleg Nesterov 2520908ad6eSAnanth N Mavinakayanahalli /* 2530908ad6eSAnanth N Mavinakayanahalli * Note: We only check if the old_opcode is UPROBE_SWBP_INSN here. 2540908ad6eSAnanth N Mavinakayanahalli * We do not check if it is any other 'trap variant' which could 2550908ad6eSAnanth N Mavinakayanahalli * be conditional trap instruction such as the one powerpc supports. 2560908ad6eSAnanth N Mavinakayanahalli * 2570908ad6eSAnanth N Mavinakayanahalli * The logic is that we do not care if the underlying instruction 2580908ad6eSAnanth N Mavinakayanahalli * is a trap variant; uprobes always wins over any other (gdb) 2590908ad6eSAnanth N Mavinakayanahalli * breakpoint. 2600908ad6eSAnanth N Mavinakayanahalli */ 261ab0d805cSOleg Nesterov copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE); 262ed6f6a50SOleg Nesterov is_swbp = is_swbp_insn(&old_opcode); 263ed6f6a50SOleg Nesterov 264ed6f6a50SOleg Nesterov if (is_swbp_insn(new_opcode)) { 265ed6f6a50SOleg Nesterov if (is_swbp) /* register: already installed? */ 266ed6f6a50SOleg Nesterov return 0; 267ed6f6a50SOleg Nesterov } else { 268ed6f6a50SOleg Nesterov if (!is_swbp) /* unregister: was it changed by us? */ 269076a365bSOleg Nesterov return 0; 270ed6f6a50SOleg Nesterov } 271ed6f6a50SOleg Nesterov 272ed6f6a50SOleg Nesterov return 1; 273ed6f6a50SOleg Nesterov } 274ed6f6a50SOleg Nesterov 275a5f4374aSIngo Molnar /* 276a5f4374aSIngo Molnar * NOTE: 277a5f4374aSIngo Molnar * Expect the breakpoint instruction to be the smallest size instruction for 278a5f4374aSIngo Molnar * the architecture. If an arch has variable length instruction and the 279a5f4374aSIngo Molnar * breakpoint instruction is not of the smallest length instruction 2800908ad6eSAnanth N Mavinakayanahalli * supported by that architecture then we need to modify is_trap_at_addr and 281f72d41faSOleg Nesterov * uprobe_write_opcode accordingly. This would never be a problem for archs 282f72d41faSOleg Nesterov * that have fixed length instructions. 28329dedee0SOleg Nesterov * 284f72d41faSOleg Nesterov * uprobe_write_opcode - write the opcode at a given virtual address. 285a5f4374aSIngo Molnar * @mm: the probed process address space. 286a5f4374aSIngo Molnar * @vaddr: the virtual address to store the opcode. 287a5f4374aSIngo Molnar * @opcode: opcode to be written at @vaddr. 288a5f4374aSIngo Molnar * 28929dedee0SOleg Nesterov * Called with mm->mmap_sem held for write. 290a5f4374aSIngo Molnar * Return 0 (success) or a negative errno. 291a5f4374aSIngo Molnar */ 292f72d41faSOleg Nesterov int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, 293cceb55aaSOleg Nesterov uprobe_opcode_t opcode) 294a5f4374aSIngo Molnar { 295a5f4374aSIngo Molnar struct page *old_page, *new_page; 296a5f4374aSIngo Molnar struct vm_area_struct *vma; 297a5f4374aSIngo Molnar int ret; 298f403072cSOleg Nesterov 2995323ce71SOleg Nesterov retry: 300a5f4374aSIngo Molnar /* Read the page with vaddr into memory */ 30175ed82eaSOleg Nesterov ret = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &old_page, &vma); 302a5f4374aSIngo Molnar if (ret <= 0) 303a5f4374aSIngo Molnar return ret; 304a5f4374aSIngo Molnar 305ed6f6a50SOleg Nesterov ret = verify_opcode(old_page, vaddr, &opcode); 306ed6f6a50SOleg Nesterov if (ret <= 0) 307ed6f6a50SOleg Nesterov goto put_old; 308ed6f6a50SOleg Nesterov 30929dedee0SOleg Nesterov ret = anon_vma_prepare(vma); 31029dedee0SOleg Nesterov if (ret) 31129dedee0SOleg Nesterov goto put_old; 31229dedee0SOleg Nesterov 313a5f4374aSIngo Molnar ret = -ENOMEM; 314a5f4374aSIngo Molnar new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); 315a5f4374aSIngo Molnar if (!new_page) 3169f92448cSOleg Nesterov goto put_old; 317a5f4374aSIngo Molnar 31829dedee0SOleg Nesterov __SetPageUptodate(new_page); 3193f47107cSOleg Nesterov copy_highpage(new_page, old_page); 3203f47107cSOleg Nesterov copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE); 321a5f4374aSIngo Molnar 322c517ee74SOleg Nesterov ret = __replace_page(vma, vaddr, old_page, new_page); 323a5f4374aSIngo Molnar page_cache_release(new_page); 3249f92448cSOleg Nesterov put_old: 325a5f4374aSIngo Molnar put_page(old_page); 326a5f4374aSIngo Molnar 3275323ce71SOleg Nesterov if (unlikely(ret == -EAGAIN)) 3285323ce71SOleg Nesterov goto retry; 329a5f4374aSIngo Molnar return ret; 330a5f4374aSIngo Molnar } 331a5f4374aSIngo Molnar 332a5f4374aSIngo Molnar /** 3335cb4ac3aSSrikar Dronamraju * set_swbp - store breakpoint at a given address. 334e3343e6aSSrikar Dronamraju * @auprobe: arch specific probepoint information. 335a5f4374aSIngo Molnar * @mm: the probed process address space. 336a5f4374aSIngo Molnar * @vaddr: the virtual address to insert the opcode. 337a5f4374aSIngo Molnar * 338a5f4374aSIngo Molnar * For mm @mm, store the breakpoint instruction at @vaddr. 339a5f4374aSIngo Molnar * Return 0 (success) or a negative errno. 340a5f4374aSIngo Molnar */ 3415cb4ac3aSSrikar Dronamraju int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) 342a5f4374aSIngo Molnar { 343f72d41faSOleg Nesterov return uprobe_write_opcode(mm, vaddr, UPROBE_SWBP_INSN); 344a5f4374aSIngo Molnar } 345a5f4374aSIngo Molnar 346a5f4374aSIngo Molnar /** 347a5f4374aSIngo Molnar * set_orig_insn - Restore the original instruction. 348a5f4374aSIngo Molnar * @mm: the probed process address space. 349e3343e6aSSrikar Dronamraju * @auprobe: arch specific probepoint information. 350a5f4374aSIngo Molnar * @vaddr: the virtual address to insert the opcode. 351a5f4374aSIngo Molnar * 352a5f4374aSIngo Molnar * For mm @mm, restore the original opcode (opcode) at @vaddr. 353a5f4374aSIngo Molnar * Return 0 (success) or a negative errno. 354a5f4374aSIngo Molnar */ 355a5f4374aSIngo Molnar int __weak 356ded86e7cSOleg Nesterov set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) 357a5f4374aSIngo Molnar { 358803200e2SOleg Nesterov return uprobe_write_opcode(mm, vaddr, *(uprobe_opcode_t *)&auprobe->insn); 359a5f4374aSIngo Molnar } 360a5f4374aSIngo Molnar 361f231722aSOleg Nesterov static struct uprobe *get_uprobe(struct uprobe *uprobe) 362f231722aSOleg Nesterov { 363f231722aSOleg Nesterov atomic_inc(&uprobe->ref); 364f231722aSOleg Nesterov return uprobe; 365f231722aSOleg Nesterov } 366f231722aSOleg Nesterov 367f231722aSOleg Nesterov static void put_uprobe(struct uprobe *uprobe) 368f231722aSOleg Nesterov { 369f231722aSOleg Nesterov if (atomic_dec_and_test(&uprobe->ref)) 370f231722aSOleg Nesterov kfree(uprobe); 371f231722aSOleg Nesterov } 372f231722aSOleg Nesterov 373a5f4374aSIngo Molnar static int match_uprobe(struct uprobe *l, struct uprobe *r) 374a5f4374aSIngo Molnar { 375a5f4374aSIngo Molnar if (l->inode < r->inode) 376a5f4374aSIngo Molnar return -1; 377a5f4374aSIngo Molnar 378a5f4374aSIngo Molnar if (l->inode > r->inode) 379a5f4374aSIngo Molnar return 1; 380a5f4374aSIngo Molnar 381a5f4374aSIngo Molnar if (l->offset < r->offset) 382a5f4374aSIngo Molnar return -1; 383a5f4374aSIngo Molnar 384a5f4374aSIngo Molnar if (l->offset > r->offset) 385a5f4374aSIngo Molnar return 1; 386a5f4374aSIngo Molnar 387a5f4374aSIngo Molnar return 0; 388a5f4374aSIngo Molnar } 389a5f4374aSIngo Molnar 390a5f4374aSIngo Molnar static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset) 391a5f4374aSIngo Molnar { 392a5f4374aSIngo Molnar struct uprobe u = { .inode = inode, .offset = offset }; 393a5f4374aSIngo Molnar struct rb_node *n = uprobes_tree.rb_node; 394a5f4374aSIngo Molnar struct uprobe *uprobe; 395a5f4374aSIngo Molnar int match; 396a5f4374aSIngo Molnar 397a5f4374aSIngo Molnar while (n) { 398a5f4374aSIngo Molnar uprobe = rb_entry(n, struct uprobe, rb_node); 399a5f4374aSIngo Molnar match = match_uprobe(&u, uprobe); 400f231722aSOleg Nesterov if (!match) 401f231722aSOleg Nesterov return get_uprobe(uprobe); 402a5f4374aSIngo Molnar 403a5f4374aSIngo Molnar if (match < 0) 404a5f4374aSIngo Molnar n = n->rb_left; 405a5f4374aSIngo Molnar else 406a5f4374aSIngo Molnar n = n->rb_right; 407a5f4374aSIngo Molnar } 408a5f4374aSIngo Molnar return NULL; 409a5f4374aSIngo Molnar } 410a5f4374aSIngo Molnar 411a5f4374aSIngo Molnar /* 412a5f4374aSIngo Molnar * Find a uprobe corresponding to a given inode:offset 413a5f4374aSIngo Molnar * Acquires uprobes_treelock 414a5f4374aSIngo Molnar */ 415a5f4374aSIngo Molnar static struct uprobe *find_uprobe(struct inode *inode, loff_t offset) 416a5f4374aSIngo Molnar { 417a5f4374aSIngo Molnar struct uprobe *uprobe; 418a5f4374aSIngo Molnar 4196f47caa0SOleg Nesterov spin_lock(&uprobes_treelock); 420a5f4374aSIngo Molnar uprobe = __find_uprobe(inode, offset); 4216f47caa0SOleg Nesterov spin_unlock(&uprobes_treelock); 422a5f4374aSIngo Molnar 423a5f4374aSIngo Molnar return uprobe; 424a5f4374aSIngo Molnar } 425a5f4374aSIngo Molnar 426a5f4374aSIngo Molnar static struct uprobe *__insert_uprobe(struct uprobe *uprobe) 427a5f4374aSIngo Molnar { 428a5f4374aSIngo Molnar struct rb_node **p = &uprobes_tree.rb_node; 429a5f4374aSIngo Molnar struct rb_node *parent = NULL; 430a5f4374aSIngo Molnar struct uprobe *u; 431a5f4374aSIngo Molnar int match; 432a5f4374aSIngo Molnar 433a5f4374aSIngo Molnar while (*p) { 434a5f4374aSIngo Molnar parent = *p; 435a5f4374aSIngo Molnar u = rb_entry(parent, struct uprobe, rb_node); 436a5f4374aSIngo Molnar match = match_uprobe(uprobe, u); 437f231722aSOleg Nesterov if (!match) 438f231722aSOleg Nesterov return get_uprobe(u); 439a5f4374aSIngo Molnar 440a5f4374aSIngo Molnar if (match < 0) 441a5f4374aSIngo Molnar p = &parent->rb_left; 442a5f4374aSIngo Molnar else 443a5f4374aSIngo Molnar p = &parent->rb_right; 444a5f4374aSIngo Molnar 445a5f4374aSIngo Molnar } 446a5f4374aSIngo Molnar 447a5f4374aSIngo Molnar u = NULL; 448a5f4374aSIngo Molnar rb_link_node(&uprobe->rb_node, parent, p); 449a5f4374aSIngo Molnar rb_insert_color(&uprobe->rb_node, &uprobes_tree); 450a5f4374aSIngo Molnar /* get access + creation ref */ 451a5f4374aSIngo Molnar atomic_set(&uprobe->ref, 2); 452a5f4374aSIngo Molnar 453a5f4374aSIngo Molnar return u; 454a5f4374aSIngo Molnar } 455a5f4374aSIngo Molnar 456a5f4374aSIngo Molnar /* 457a5f4374aSIngo Molnar * Acquire uprobes_treelock. 458a5f4374aSIngo Molnar * Matching uprobe already exists in rbtree; 459a5f4374aSIngo Molnar * increment (access refcount) and return the matching uprobe. 460a5f4374aSIngo Molnar * 461a5f4374aSIngo Molnar * No matching uprobe; insert the uprobe in rb_tree; 462a5f4374aSIngo Molnar * get a double refcount (access + creation) and return NULL. 463a5f4374aSIngo Molnar */ 464a5f4374aSIngo Molnar static struct uprobe *insert_uprobe(struct uprobe *uprobe) 465a5f4374aSIngo Molnar { 466a5f4374aSIngo Molnar struct uprobe *u; 467a5f4374aSIngo Molnar 4686f47caa0SOleg Nesterov spin_lock(&uprobes_treelock); 469a5f4374aSIngo Molnar u = __insert_uprobe(uprobe); 4706f47caa0SOleg Nesterov spin_unlock(&uprobes_treelock); 471a5f4374aSIngo Molnar 472a5f4374aSIngo Molnar return u; 473a5f4374aSIngo Molnar } 474a5f4374aSIngo Molnar 475a5f4374aSIngo Molnar static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset) 476a5f4374aSIngo Molnar { 477a5f4374aSIngo Molnar struct uprobe *uprobe, *cur_uprobe; 478a5f4374aSIngo Molnar 479a5f4374aSIngo Molnar uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL); 480a5f4374aSIngo Molnar if (!uprobe) 481a5f4374aSIngo Molnar return NULL; 482a5f4374aSIngo Molnar 483a5f4374aSIngo Molnar uprobe->inode = igrab(inode); 484a5f4374aSIngo Molnar uprobe->offset = offset; 485e591c8d7SOleg Nesterov init_rwsem(&uprobe->register_rwsem); 486a5f4374aSIngo Molnar init_rwsem(&uprobe->consumer_rwsem); 487a5f4374aSIngo Molnar 488a5f4374aSIngo Molnar /* add to uprobes_tree, sorted on inode:offset */ 489a5f4374aSIngo Molnar cur_uprobe = insert_uprobe(uprobe); 490a5f4374aSIngo Molnar /* a uprobe exists for this inode:offset combination */ 491a5f4374aSIngo Molnar if (cur_uprobe) { 492a5f4374aSIngo Molnar kfree(uprobe); 493a5f4374aSIngo Molnar uprobe = cur_uprobe; 494a5f4374aSIngo Molnar iput(inode); 495a5f4374aSIngo Molnar } 496a5f4374aSIngo Molnar 497a5f4374aSIngo Molnar return uprobe; 498a5f4374aSIngo Molnar } 499a5f4374aSIngo Molnar 5009a98e03cSOleg Nesterov static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc) 501a5f4374aSIngo Molnar { 502a5f4374aSIngo Molnar down_write(&uprobe->consumer_rwsem); 503e3343e6aSSrikar Dronamraju uc->next = uprobe->consumers; 504e3343e6aSSrikar Dronamraju uprobe->consumers = uc; 505a5f4374aSIngo Molnar up_write(&uprobe->consumer_rwsem); 506a5f4374aSIngo Molnar } 507a5f4374aSIngo Molnar 508a5f4374aSIngo Molnar /* 509e3343e6aSSrikar Dronamraju * For uprobe @uprobe, delete the consumer @uc. 510e3343e6aSSrikar Dronamraju * Return true if the @uc is deleted successfully 511a5f4374aSIngo Molnar * or return false. 512a5f4374aSIngo Molnar */ 513e3343e6aSSrikar Dronamraju static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc) 514a5f4374aSIngo Molnar { 515a5f4374aSIngo Molnar struct uprobe_consumer **con; 516a5f4374aSIngo Molnar bool ret = false; 517a5f4374aSIngo Molnar 518a5f4374aSIngo Molnar down_write(&uprobe->consumer_rwsem); 519a5f4374aSIngo Molnar for (con = &uprobe->consumers; *con; con = &(*con)->next) { 520e3343e6aSSrikar Dronamraju if (*con == uc) { 521e3343e6aSSrikar Dronamraju *con = uc->next; 522a5f4374aSIngo Molnar ret = true; 523a5f4374aSIngo Molnar break; 524a5f4374aSIngo Molnar } 525a5f4374aSIngo Molnar } 526a5f4374aSIngo Molnar up_write(&uprobe->consumer_rwsem); 527a5f4374aSIngo Molnar 528a5f4374aSIngo Molnar return ret; 529a5f4374aSIngo Molnar } 530a5f4374aSIngo Molnar 5312ded0980SOleg Nesterov static int __copy_insn(struct address_space *mapping, struct file *filp, 5322ded0980SOleg Nesterov void *insn, int nbytes, loff_t offset) 533a5f4374aSIngo Molnar { 534a5f4374aSIngo Molnar struct page *page; 535a5f4374aSIngo Molnar /* 53640814f68SOleg Nesterov * Ensure that the page that has the original instruction is populated 53740814f68SOleg Nesterov * and in page-cache. If ->readpage == NULL it must be shmem_mapping(), 53840814f68SOleg Nesterov * see uprobe_register(). 539a5f4374aSIngo Molnar */ 54040814f68SOleg Nesterov if (mapping->a_ops->readpage) 5412edb7b55SOleg Nesterov page = read_mapping_page(mapping, offset >> PAGE_CACHE_SHIFT, filp); 54240814f68SOleg Nesterov else 54340814f68SOleg Nesterov page = shmem_read_mapping_page(mapping, offset >> PAGE_CACHE_SHIFT); 544a5f4374aSIngo Molnar if (IS_ERR(page)) 545a5f4374aSIngo Molnar return PTR_ERR(page); 546a5f4374aSIngo Molnar 5472edb7b55SOleg Nesterov copy_from_page(page, offset, insn, nbytes); 548a5f4374aSIngo Molnar page_cache_release(page); 549a5f4374aSIngo Molnar 550a5f4374aSIngo Molnar return 0; 551a5f4374aSIngo Molnar } 552a5f4374aSIngo Molnar 553d436615eSOleg Nesterov static int copy_insn(struct uprobe *uprobe, struct file *filp) 554a5f4374aSIngo Molnar { 5552ded0980SOleg Nesterov struct address_space *mapping = uprobe->inode->i_mapping; 5562ded0980SOleg Nesterov loff_t offs = uprobe->offset; 557803200e2SOleg Nesterov void *insn = &uprobe->arch.insn; 558803200e2SOleg Nesterov int size = sizeof(uprobe->arch.insn); 5592ded0980SOleg Nesterov int len, err = -EIO; 560a5f4374aSIngo Molnar 5612ded0980SOleg Nesterov /* Copy only available bytes, -EIO if nothing was read */ 5622ded0980SOleg Nesterov do { 5632ded0980SOleg Nesterov if (offs >= i_size_read(uprobe->inode)) 5642ded0980SOleg Nesterov break; 565a5f4374aSIngo Molnar 5662ded0980SOleg Nesterov len = min_t(int, size, PAGE_SIZE - (offs & ~PAGE_MASK)); 5672ded0980SOleg Nesterov err = __copy_insn(mapping, filp, insn, len, offs); 568fc36f595SOleg Nesterov if (err) 5692ded0980SOleg Nesterov break; 5702ded0980SOleg Nesterov 5712ded0980SOleg Nesterov insn += len; 5722ded0980SOleg Nesterov offs += len; 5732ded0980SOleg Nesterov size -= len; 5742ded0980SOleg Nesterov } while (size); 5752ded0980SOleg Nesterov 576fc36f595SOleg Nesterov return err; 577a5f4374aSIngo Molnar } 578a5f4374aSIngo Molnar 579cb9a19feSOleg Nesterov static int prepare_uprobe(struct uprobe *uprobe, struct file *file, 580cb9a19feSOleg Nesterov struct mm_struct *mm, unsigned long vaddr) 581cb9a19feSOleg Nesterov { 582cb9a19feSOleg Nesterov int ret = 0; 583cb9a19feSOleg Nesterov 58471434f2fSOleg Nesterov if (test_bit(UPROBE_COPY_INSN, &uprobe->flags)) 585cb9a19feSOleg Nesterov return ret; 586cb9a19feSOleg Nesterov 587d4d3ccc6SOleg Nesterov /* TODO: move this into _register, until then we abuse this sem. */ 588d4d3ccc6SOleg Nesterov down_write(&uprobe->consumer_rwsem); 58971434f2fSOleg Nesterov if (test_bit(UPROBE_COPY_INSN, &uprobe->flags)) 5904710f05fSOleg Nesterov goto out; 5914710f05fSOleg Nesterov 592cb9a19feSOleg Nesterov ret = copy_insn(uprobe, file); 593cb9a19feSOleg Nesterov if (ret) 594cb9a19feSOleg Nesterov goto out; 595cb9a19feSOleg Nesterov 596cb9a19feSOleg Nesterov ret = -ENOTSUPP; 597803200e2SOleg Nesterov if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn)) 598cb9a19feSOleg Nesterov goto out; 599cb9a19feSOleg Nesterov 600cb9a19feSOleg Nesterov ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr); 601cb9a19feSOleg Nesterov if (ret) 602cb9a19feSOleg Nesterov goto out; 603cb9a19feSOleg Nesterov 604f72d41faSOleg Nesterov /* uprobe_write_opcode() assumes we don't cross page boundary */ 605cb9a19feSOleg Nesterov BUG_ON((uprobe->offset & ~PAGE_MASK) + 606cb9a19feSOleg Nesterov UPROBE_SWBP_INSN_SIZE > PAGE_SIZE); 607cb9a19feSOleg Nesterov 608cb9a19feSOleg Nesterov smp_wmb(); /* pairs with rmb() in find_active_uprobe() */ 60971434f2fSOleg Nesterov set_bit(UPROBE_COPY_INSN, &uprobe->flags); 610cb9a19feSOleg Nesterov 611cb9a19feSOleg Nesterov out: 612d4d3ccc6SOleg Nesterov up_write(&uprobe->consumer_rwsem); 6134710f05fSOleg Nesterov 614cb9a19feSOleg Nesterov return ret; 615cb9a19feSOleg Nesterov } 616cb9a19feSOleg Nesterov 6178a7f2fa0SOleg Nesterov static inline bool consumer_filter(struct uprobe_consumer *uc, 6188a7f2fa0SOleg Nesterov enum uprobe_filter_ctx ctx, struct mm_struct *mm) 619806a98bdSOleg Nesterov { 6208a7f2fa0SOleg Nesterov return !uc->filter || uc->filter(uc, ctx, mm); 621806a98bdSOleg Nesterov } 622806a98bdSOleg Nesterov 6238a7f2fa0SOleg Nesterov static bool filter_chain(struct uprobe *uprobe, 6248a7f2fa0SOleg Nesterov enum uprobe_filter_ctx ctx, struct mm_struct *mm) 62563633cbfSOleg Nesterov { 6261ff6fee5SOleg Nesterov struct uprobe_consumer *uc; 6271ff6fee5SOleg Nesterov bool ret = false; 6281ff6fee5SOleg Nesterov 6291ff6fee5SOleg Nesterov down_read(&uprobe->consumer_rwsem); 6301ff6fee5SOleg Nesterov for (uc = uprobe->consumers; uc; uc = uc->next) { 6318a7f2fa0SOleg Nesterov ret = consumer_filter(uc, ctx, mm); 6321ff6fee5SOleg Nesterov if (ret) 6331ff6fee5SOleg Nesterov break; 6341ff6fee5SOleg Nesterov } 6351ff6fee5SOleg Nesterov up_read(&uprobe->consumer_rwsem); 6361ff6fee5SOleg Nesterov 6371ff6fee5SOleg Nesterov return ret; 63863633cbfSOleg Nesterov } 63963633cbfSOleg Nesterov 640e3343e6aSSrikar Dronamraju static int 641e3343e6aSSrikar Dronamraju install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, 642816c03fbSOleg Nesterov struct vm_area_struct *vma, unsigned long vaddr) 643a5f4374aSIngo Molnar { 644f8ac4ec9SOleg Nesterov bool first_uprobe; 645a5f4374aSIngo Molnar int ret; 646a5f4374aSIngo Molnar 647cb9a19feSOleg Nesterov ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr); 648a5f4374aSIngo Molnar if (ret) 649a5f4374aSIngo Molnar return ret; 650a5f4374aSIngo Molnar 651f8ac4ec9SOleg Nesterov /* 652f8ac4ec9SOleg Nesterov * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(), 653f8ac4ec9SOleg Nesterov * the task can hit this breakpoint right after __replace_page(). 654f8ac4ec9SOleg Nesterov */ 655f8ac4ec9SOleg Nesterov first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags); 656f8ac4ec9SOleg Nesterov if (first_uprobe) 657f8ac4ec9SOleg Nesterov set_bit(MMF_HAS_UPROBES, &mm->flags); 658f8ac4ec9SOleg Nesterov 659816c03fbSOleg Nesterov ret = set_swbp(&uprobe->arch, mm, vaddr); 6609f68f672SOleg Nesterov if (!ret) 6619f68f672SOleg Nesterov clear_bit(MMF_RECALC_UPROBES, &mm->flags); 6629f68f672SOleg Nesterov else if (first_uprobe) 663f8ac4ec9SOleg Nesterov clear_bit(MMF_HAS_UPROBES, &mm->flags); 664a5f4374aSIngo Molnar 665a5f4374aSIngo Molnar return ret; 666a5f4374aSIngo Molnar } 667a5f4374aSIngo Molnar 668076a365bSOleg Nesterov static int 669816c03fbSOleg Nesterov remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr) 670a5f4374aSIngo Molnar { 6719f68f672SOleg Nesterov set_bit(MMF_RECALC_UPROBES, &mm->flags); 672076a365bSOleg Nesterov return set_orig_insn(&uprobe->arch, mm, vaddr); 673a5f4374aSIngo Molnar } 674a5f4374aSIngo Molnar 67506b7bcd8SOleg Nesterov static inline bool uprobe_is_active(struct uprobe *uprobe) 67606b7bcd8SOleg Nesterov { 67706b7bcd8SOleg Nesterov return !RB_EMPTY_NODE(&uprobe->rb_node); 67806b7bcd8SOleg Nesterov } 6790326f5a9SSrikar Dronamraju /* 680778b032dSOleg Nesterov * There could be threads that have already hit the breakpoint. They 681778b032dSOleg Nesterov * will recheck the current insn and restart if find_uprobe() fails. 682778b032dSOleg Nesterov * See find_active_uprobe(). 6830326f5a9SSrikar Dronamraju */ 684a5f4374aSIngo Molnar static void delete_uprobe(struct uprobe *uprobe) 685a5f4374aSIngo Molnar { 68606b7bcd8SOleg Nesterov if (WARN_ON(!uprobe_is_active(uprobe))) 68706b7bcd8SOleg Nesterov return; 68806b7bcd8SOleg Nesterov 6896f47caa0SOleg Nesterov spin_lock(&uprobes_treelock); 690a5f4374aSIngo Molnar rb_erase(&uprobe->rb_node, &uprobes_tree); 6916f47caa0SOleg Nesterov spin_unlock(&uprobes_treelock); 69206b7bcd8SOleg Nesterov RB_CLEAR_NODE(&uprobe->rb_node); /* for uprobe_is_active() */ 693a5f4374aSIngo Molnar iput(uprobe->inode); 694a5f4374aSIngo Molnar put_uprobe(uprobe); 695a5f4374aSIngo Molnar } 696a5f4374aSIngo Molnar 69726872090SOleg Nesterov struct map_info { 69826872090SOleg Nesterov struct map_info *next; 69926872090SOleg Nesterov struct mm_struct *mm; 700816c03fbSOleg Nesterov unsigned long vaddr; 70126872090SOleg Nesterov }; 70226872090SOleg Nesterov 70326872090SOleg Nesterov static inline struct map_info *free_map_info(struct map_info *info) 704a5f4374aSIngo Molnar { 70526872090SOleg Nesterov struct map_info *next = info->next; 70626872090SOleg Nesterov kfree(info); 70726872090SOleg Nesterov return next; 70826872090SOleg Nesterov } 70926872090SOleg Nesterov 71026872090SOleg Nesterov static struct map_info * 71126872090SOleg Nesterov build_map_info(struct address_space *mapping, loff_t offset, bool is_register) 71226872090SOleg Nesterov { 71326872090SOleg Nesterov unsigned long pgoff = offset >> PAGE_SHIFT; 714a5f4374aSIngo Molnar struct vm_area_struct *vma; 71526872090SOleg Nesterov struct map_info *curr = NULL; 71626872090SOleg Nesterov struct map_info *prev = NULL; 71726872090SOleg Nesterov struct map_info *info; 71826872090SOleg Nesterov int more = 0; 719a5f4374aSIngo Molnar 72026872090SOleg Nesterov again: 7214a23717aSDavidlohr Bueso i_mmap_lock_read(mapping); 7226b2dbba8SMichel Lespinasse vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 723a5f4374aSIngo Molnar if (!valid_vma(vma, is_register)) 724a5f4374aSIngo Molnar continue; 725a5f4374aSIngo Molnar 7267a5bfb66SOleg Nesterov if (!prev && !more) { 7277a5bfb66SOleg Nesterov /* 728c8c06efaSDavidlohr Bueso * Needs GFP_NOWAIT to avoid i_mmap_rwsem recursion through 7297a5bfb66SOleg Nesterov * reclaim. This is optimistic, no harm done if it fails. 7307a5bfb66SOleg Nesterov */ 7317a5bfb66SOleg Nesterov prev = kmalloc(sizeof(struct map_info), 7327a5bfb66SOleg Nesterov GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN); 7337a5bfb66SOleg Nesterov if (prev) 7347a5bfb66SOleg Nesterov prev->next = NULL; 7357a5bfb66SOleg Nesterov } 73626872090SOleg Nesterov if (!prev) { 73726872090SOleg Nesterov more++; 73826872090SOleg Nesterov continue; 739a5f4374aSIngo Molnar } 740a5f4374aSIngo Molnar 74126872090SOleg Nesterov if (!atomic_inc_not_zero(&vma->vm_mm->mm_users)) 74226872090SOleg Nesterov continue; 743a5f4374aSIngo Molnar 74426872090SOleg Nesterov info = prev; 74526872090SOleg Nesterov prev = prev->next; 74626872090SOleg Nesterov info->next = curr; 74726872090SOleg Nesterov curr = info; 74826872090SOleg Nesterov 74926872090SOleg Nesterov info->mm = vma->vm_mm; 75057683f72SOleg Nesterov info->vaddr = offset_to_vaddr(vma, offset); 751a5f4374aSIngo Molnar } 7524a23717aSDavidlohr Bueso i_mmap_unlock_read(mapping); 753a5f4374aSIngo Molnar 75426872090SOleg Nesterov if (!more) 75526872090SOleg Nesterov goto out; 756a5f4374aSIngo Molnar 75726872090SOleg Nesterov prev = curr; 75826872090SOleg Nesterov while (curr) { 75926872090SOleg Nesterov mmput(curr->mm); 76026872090SOleg Nesterov curr = curr->next; 76126872090SOleg Nesterov } 76226872090SOleg Nesterov 76326872090SOleg Nesterov do { 76426872090SOleg Nesterov info = kmalloc(sizeof(struct map_info), GFP_KERNEL); 76526872090SOleg Nesterov if (!info) { 76626872090SOleg Nesterov curr = ERR_PTR(-ENOMEM); 76726872090SOleg Nesterov goto out; 76826872090SOleg Nesterov } 76926872090SOleg Nesterov info->next = prev; 77026872090SOleg Nesterov prev = info; 77126872090SOleg Nesterov } while (--more); 77226872090SOleg Nesterov 77326872090SOleg Nesterov goto again; 77426872090SOleg Nesterov out: 77526872090SOleg Nesterov while (prev) 77626872090SOleg Nesterov prev = free_map_info(prev); 77726872090SOleg Nesterov return curr; 778a5f4374aSIngo Molnar } 779a5f4374aSIngo Molnar 780bdf8647cSOleg Nesterov static int 781bdf8647cSOleg Nesterov register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new) 782a5f4374aSIngo Molnar { 783bdf8647cSOleg Nesterov bool is_register = !!new; 78426872090SOleg Nesterov struct map_info *info; 78526872090SOleg Nesterov int err = 0; 78626872090SOleg Nesterov 78732cdba1eSOleg Nesterov percpu_down_write(&dup_mmap_sem); 78826872090SOleg Nesterov info = build_map_info(uprobe->inode->i_mapping, 78926872090SOleg Nesterov uprobe->offset, is_register); 79032cdba1eSOleg Nesterov if (IS_ERR(info)) { 79132cdba1eSOleg Nesterov err = PTR_ERR(info); 79232cdba1eSOleg Nesterov goto out; 79332cdba1eSOleg Nesterov } 79426872090SOleg Nesterov 79526872090SOleg Nesterov while (info) { 79626872090SOleg Nesterov struct mm_struct *mm = info->mm; 797a5f4374aSIngo Molnar struct vm_area_struct *vma; 798a5f4374aSIngo Molnar 799076a365bSOleg Nesterov if (err && is_register) 80026872090SOleg Nesterov goto free; 801a5f4374aSIngo Molnar 80277fc4af1SOleg Nesterov down_write(&mm->mmap_sem); 803f4d6dfe5SOleg Nesterov vma = find_vma(mm, info->vaddr); 804f4d6dfe5SOleg Nesterov if (!vma || !valid_vma(vma, is_register) || 805f281769eSOleg Nesterov file_inode(vma->vm_file) != uprobe->inode) 80626872090SOleg Nesterov goto unlock; 80726872090SOleg Nesterov 808f4d6dfe5SOleg Nesterov if (vma->vm_start > info->vaddr || 809f4d6dfe5SOleg Nesterov vaddr_to_offset(vma, info->vaddr) != uprobe->offset) 81026872090SOleg Nesterov goto unlock; 811a5f4374aSIngo Molnar 812806a98bdSOleg Nesterov if (is_register) { 813806a98bdSOleg Nesterov /* consult only the "caller", new consumer. */ 814bdf8647cSOleg Nesterov if (consumer_filter(new, 8158a7f2fa0SOleg Nesterov UPROBE_FILTER_REGISTER, mm)) 81626872090SOleg Nesterov err = install_breakpoint(uprobe, mm, vma, info->vaddr); 817806a98bdSOleg Nesterov } else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) { 8188a7f2fa0SOleg Nesterov if (!filter_chain(uprobe, 8198a7f2fa0SOleg Nesterov UPROBE_FILTER_UNREGISTER, mm)) 820076a365bSOleg Nesterov err |= remove_breakpoint(uprobe, mm, info->vaddr); 821806a98bdSOleg Nesterov } 82278f74116SOleg Nesterov 82326872090SOleg Nesterov unlock: 82426872090SOleg Nesterov up_write(&mm->mmap_sem); 82526872090SOleg Nesterov free: 82626872090SOleg Nesterov mmput(mm); 82726872090SOleg Nesterov info = free_map_info(info); 828a5f4374aSIngo Molnar } 82932cdba1eSOleg Nesterov out: 83032cdba1eSOleg Nesterov percpu_up_write(&dup_mmap_sem); 83126872090SOleg Nesterov return err; 832a5f4374aSIngo Molnar } 833a5f4374aSIngo Molnar 8349a98e03cSOleg Nesterov static int __uprobe_register(struct uprobe *uprobe, struct uprobe_consumer *uc) 835a5f4374aSIngo Molnar { 8369a98e03cSOleg Nesterov consumer_add(uprobe, uc); 837bdf8647cSOleg Nesterov return register_for_each_vma(uprobe, uc); 838a5f4374aSIngo Molnar } 839a5f4374aSIngo Molnar 84004aab9b2SOleg Nesterov static void __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc) 841a5f4374aSIngo Molnar { 84204aab9b2SOleg Nesterov int err; 843a5f4374aSIngo Molnar 84406d07139SOleg Nesterov if (WARN_ON(!consumer_del(uprobe, uc))) 84504aab9b2SOleg Nesterov return; 84604aab9b2SOleg Nesterov 847bdf8647cSOleg Nesterov err = register_for_each_vma(uprobe, NULL); 848a5f4374aSIngo Molnar /* TODO : cant unregister? schedule a worker thread */ 849bb929284SOleg Nesterov if (!uprobe->consumers && !err) 85004aab9b2SOleg Nesterov delete_uprobe(uprobe); 85104aab9b2SOleg Nesterov } 852a5f4374aSIngo Molnar 853a5f4374aSIngo Molnar /* 854a5f4374aSIngo Molnar * uprobe_register - register a probe 855a5f4374aSIngo Molnar * @inode: the file in which the probe has to be placed. 856a5f4374aSIngo Molnar * @offset: offset from the start of the file. 857e3343e6aSSrikar Dronamraju * @uc: information on howto handle the probe.. 858a5f4374aSIngo Molnar * 859a5f4374aSIngo Molnar * Apart from the access refcount, uprobe_register() takes a creation 860a5f4374aSIngo Molnar * refcount (thro alloc_uprobe) if and only if this @uprobe is getting 861a5f4374aSIngo Molnar * inserted into the rbtree (i.e first consumer for a @inode:@offset 862a5f4374aSIngo Molnar * tuple). Creation refcount stops uprobe_unregister from freeing the 863a5f4374aSIngo Molnar * @uprobe even before the register operation is complete. Creation 864e3343e6aSSrikar Dronamraju * refcount is released when the last @uc for the @uprobe 865a5f4374aSIngo Molnar * unregisters. 866a5f4374aSIngo Molnar * 867a5f4374aSIngo Molnar * Return errno if it cannot successully install probes 868a5f4374aSIngo Molnar * else return 0 (success) 869a5f4374aSIngo Molnar */ 870e3343e6aSSrikar Dronamraju int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc) 871a5f4374aSIngo Molnar { 872a5f4374aSIngo Molnar struct uprobe *uprobe; 873a5f4374aSIngo Molnar int ret; 874a5f4374aSIngo Molnar 875ea024870SAnton Arapov /* Uprobe must have at least one set consumer */ 876ea024870SAnton Arapov if (!uc->handler && !uc->ret_handler) 877ea024870SAnton Arapov return -EINVAL; 878ea024870SAnton Arapov 87940814f68SOleg Nesterov /* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */ 88040814f68SOleg Nesterov if (!inode->i_mapping->a_ops->readpage && !shmem_mapping(inode->i_mapping)) 88141ccba02SOleg Nesterov return -EIO; 882f0744af7SOleg Nesterov /* Racy, just to catch the obvious mistakes */ 883a5f4374aSIngo Molnar if (offset > i_size_read(inode)) 884a5f4374aSIngo Molnar return -EINVAL; 885a5f4374aSIngo Molnar 88666d06dffSOleg Nesterov retry: 887a5f4374aSIngo Molnar uprobe = alloc_uprobe(inode, offset); 88866d06dffSOleg Nesterov if (!uprobe) 88966d06dffSOleg Nesterov return -ENOMEM; 89066d06dffSOleg Nesterov /* 89166d06dffSOleg Nesterov * We can race with uprobe_unregister()->delete_uprobe(). 89266d06dffSOleg Nesterov * Check uprobe_is_active() and retry if it is false. 89366d06dffSOleg Nesterov */ 894e591c8d7SOleg Nesterov down_write(&uprobe->register_rwsem); 89566d06dffSOleg Nesterov ret = -EAGAIN; 89666d06dffSOleg Nesterov if (likely(uprobe_is_active(uprobe))) { 8979a98e03cSOleg Nesterov ret = __uprobe_register(uprobe, uc); 8989a98e03cSOleg Nesterov if (ret) 89904aab9b2SOleg Nesterov __uprobe_unregister(uprobe, uc); 900a5f4374aSIngo Molnar } 90166d06dffSOleg Nesterov up_write(&uprobe->register_rwsem); 902a5f4374aSIngo Molnar put_uprobe(uprobe); 903a5f4374aSIngo Molnar 90466d06dffSOleg Nesterov if (unlikely(ret == -EAGAIN)) 90566d06dffSOleg Nesterov goto retry; 906a5f4374aSIngo Molnar return ret; 907a5f4374aSIngo Molnar } 908e8440c14SJosh Stone EXPORT_SYMBOL_GPL(uprobe_register); 909a5f4374aSIngo Molnar 910a5f4374aSIngo Molnar /* 911bdf8647cSOleg Nesterov * uprobe_apply - unregister a already registered probe. 912bdf8647cSOleg Nesterov * @inode: the file in which the probe has to be removed. 913bdf8647cSOleg Nesterov * @offset: offset from the start of the file. 914bdf8647cSOleg Nesterov * @uc: consumer which wants to add more or remove some breakpoints 915bdf8647cSOleg Nesterov * @add: add or remove the breakpoints 916bdf8647cSOleg Nesterov */ 917bdf8647cSOleg Nesterov int uprobe_apply(struct inode *inode, loff_t offset, 918bdf8647cSOleg Nesterov struct uprobe_consumer *uc, bool add) 919bdf8647cSOleg Nesterov { 920bdf8647cSOleg Nesterov struct uprobe *uprobe; 921bdf8647cSOleg Nesterov struct uprobe_consumer *con; 922bdf8647cSOleg Nesterov int ret = -ENOENT; 923bdf8647cSOleg Nesterov 924bdf8647cSOleg Nesterov uprobe = find_uprobe(inode, offset); 92506d07139SOleg Nesterov if (WARN_ON(!uprobe)) 926bdf8647cSOleg Nesterov return ret; 927bdf8647cSOleg Nesterov 928bdf8647cSOleg Nesterov down_write(&uprobe->register_rwsem); 929bdf8647cSOleg Nesterov for (con = uprobe->consumers; con && con != uc ; con = con->next) 930bdf8647cSOleg Nesterov ; 931bdf8647cSOleg Nesterov if (con) 932bdf8647cSOleg Nesterov ret = register_for_each_vma(uprobe, add ? uc : NULL); 933bdf8647cSOleg Nesterov up_write(&uprobe->register_rwsem); 934bdf8647cSOleg Nesterov put_uprobe(uprobe); 935bdf8647cSOleg Nesterov 936bdf8647cSOleg Nesterov return ret; 937bdf8647cSOleg Nesterov } 938bdf8647cSOleg Nesterov 939bdf8647cSOleg Nesterov /* 940a5f4374aSIngo Molnar * uprobe_unregister - unregister a already registered probe. 941a5f4374aSIngo Molnar * @inode: the file in which the probe has to be removed. 942a5f4374aSIngo Molnar * @offset: offset from the start of the file. 943e3343e6aSSrikar Dronamraju * @uc: identify which probe if multiple probes are colocated. 944a5f4374aSIngo Molnar */ 945e3343e6aSSrikar Dronamraju void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc) 946a5f4374aSIngo Molnar { 947a5f4374aSIngo Molnar struct uprobe *uprobe; 948a5f4374aSIngo Molnar 949a5f4374aSIngo Molnar uprobe = find_uprobe(inode, offset); 95006d07139SOleg Nesterov if (WARN_ON(!uprobe)) 951a5f4374aSIngo Molnar return; 952a5f4374aSIngo Molnar 953e591c8d7SOleg Nesterov down_write(&uprobe->register_rwsem); 95404aab9b2SOleg Nesterov __uprobe_unregister(uprobe, uc); 955e591c8d7SOleg Nesterov up_write(&uprobe->register_rwsem); 956a5f4374aSIngo Molnar put_uprobe(uprobe); 957a5f4374aSIngo Molnar } 958e8440c14SJosh Stone EXPORT_SYMBOL_GPL(uprobe_unregister); 959a5f4374aSIngo Molnar 960da1816b1SOleg Nesterov static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm) 961da1816b1SOleg Nesterov { 962da1816b1SOleg Nesterov struct vm_area_struct *vma; 963da1816b1SOleg Nesterov int err = 0; 964da1816b1SOleg Nesterov 965da1816b1SOleg Nesterov down_read(&mm->mmap_sem); 966da1816b1SOleg Nesterov for (vma = mm->mmap; vma; vma = vma->vm_next) { 967da1816b1SOleg Nesterov unsigned long vaddr; 968da1816b1SOleg Nesterov loff_t offset; 969da1816b1SOleg Nesterov 970da1816b1SOleg Nesterov if (!valid_vma(vma, false) || 971f281769eSOleg Nesterov file_inode(vma->vm_file) != uprobe->inode) 972da1816b1SOleg Nesterov continue; 973da1816b1SOleg Nesterov 974da1816b1SOleg Nesterov offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT; 975da1816b1SOleg Nesterov if (uprobe->offset < offset || 976da1816b1SOleg Nesterov uprobe->offset >= offset + vma->vm_end - vma->vm_start) 977da1816b1SOleg Nesterov continue; 978da1816b1SOleg Nesterov 979da1816b1SOleg Nesterov vaddr = offset_to_vaddr(vma, uprobe->offset); 980da1816b1SOleg Nesterov err |= remove_breakpoint(uprobe, mm, vaddr); 981da1816b1SOleg Nesterov } 982da1816b1SOleg Nesterov up_read(&mm->mmap_sem); 983da1816b1SOleg Nesterov 984da1816b1SOleg Nesterov return err; 985da1816b1SOleg Nesterov } 986da1816b1SOleg Nesterov 987891c3970SOleg Nesterov static struct rb_node * 988891c3970SOleg Nesterov find_node_in_range(struct inode *inode, loff_t min, loff_t max) 989a5f4374aSIngo Molnar { 990a5f4374aSIngo Molnar struct rb_node *n = uprobes_tree.rb_node; 991a5f4374aSIngo Molnar 992a5f4374aSIngo Molnar while (n) { 993891c3970SOleg Nesterov struct uprobe *u = rb_entry(n, struct uprobe, rb_node); 994a5f4374aSIngo Molnar 995891c3970SOleg Nesterov if (inode < u->inode) { 996a5f4374aSIngo Molnar n = n->rb_left; 997891c3970SOleg Nesterov } else if (inode > u->inode) { 998a5f4374aSIngo Molnar n = n->rb_right; 999891c3970SOleg Nesterov } else { 1000891c3970SOleg Nesterov if (max < u->offset) 1001891c3970SOleg Nesterov n = n->rb_left; 1002891c3970SOleg Nesterov else if (min > u->offset) 1003891c3970SOleg Nesterov n = n->rb_right; 1004891c3970SOleg Nesterov else 1005891c3970SOleg Nesterov break; 1006891c3970SOleg Nesterov } 1007a5f4374aSIngo Molnar } 1008a5f4374aSIngo Molnar 1009891c3970SOleg Nesterov return n; 1010a5f4374aSIngo Molnar } 1011a5f4374aSIngo Molnar 1012a5f4374aSIngo Molnar /* 1013891c3970SOleg Nesterov * For a given range in vma, build a list of probes that need to be inserted. 1014a5f4374aSIngo Molnar */ 1015891c3970SOleg Nesterov static void build_probe_list(struct inode *inode, 1016891c3970SOleg Nesterov struct vm_area_struct *vma, 1017891c3970SOleg Nesterov unsigned long start, unsigned long end, 1018891c3970SOleg Nesterov struct list_head *head) 1019a5f4374aSIngo Molnar { 1020891c3970SOleg Nesterov loff_t min, max; 1021891c3970SOleg Nesterov struct rb_node *n, *t; 1022891c3970SOleg Nesterov struct uprobe *u; 1023891c3970SOleg Nesterov 1024891c3970SOleg Nesterov INIT_LIST_HEAD(head); 1025cb113b47SOleg Nesterov min = vaddr_to_offset(vma, start); 1026891c3970SOleg Nesterov max = min + (end - start) - 1; 1027a5f4374aSIngo Molnar 10286f47caa0SOleg Nesterov spin_lock(&uprobes_treelock); 1029891c3970SOleg Nesterov n = find_node_in_range(inode, min, max); 1030891c3970SOleg Nesterov if (n) { 1031891c3970SOleg Nesterov for (t = n; t; t = rb_prev(t)) { 1032891c3970SOleg Nesterov u = rb_entry(t, struct uprobe, rb_node); 1033891c3970SOleg Nesterov if (u->inode != inode || u->offset < min) 1034a5f4374aSIngo Molnar break; 1035891c3970SOleg Nesterov list_add(&u->pending_list, head); 1036f231722aSOleg Nesterov get_uprobe(u); 1037a5f4374aSIngo Molnar } 1038891c3970SOleg Nesterov for (t = n; (t = rb_next(t)); ) { 1039891c3970SOleg Nesterov u = rb_entry(t, struct uprobe, rb_node); 1040891c3970SOleg Nesterov if (u->inode != inode || u->offset > max) 1041891c3970SOleg Nesterov break; 1042891c3970SOleg Nesterov list_add(&u->pending_list, head); 1043f231722aSOleg Nesterov get_uprobe(u); 1044891c3970SOleg Nesterov } 1045891c3970SOleg Nesterov } 10466f47caa0SOleg Nesterov spin_unlock(&uprobes_treelock); 1047a5f4374aSIngo Molnar } 1048a5f4374aSIngo Molnar 1049a5f4374aSIngo Molnar /* 10505e5be71aSOleg Nesterov * Called from mmap_region/vma_adjust with mm->mmap_sem acquired. 1051a5f4374aSIngo Molnar * 10525e5be71aSOleg Nesterov * Currently we ignore all errors and always return 0, the callers 10535e5be71aSOleg Nesterov * can't handle the failure anyway. 1054a5f4374aSIngo Molnar */ 1055a5f4374aSIngo Molnar int uprobe_mmap(struct vm_area_struct *vma) 1056a5f4374aSIngo Molnar { 1057a5f4374aSIngo Molnar struct list_head tmp_list; 1058665605a2SOleg Nesterov struct uprobe *uprobe, *u; 1059a5f4374aSIngo Molnar struct inode *inode; 1060a5f4374aSIngo Molnar 1061441f1eb7SOleg Nesterov if (no_uprobe_events() || !valid_vma(vma, true)) 1062a5f4374aSIngo Molnar return 0; 1063a5f4374aSIngo Molnar 1064f281769eSOleg Nesterov inode = file_inode(vma->vm_file); 1065a5f4374aSIngo Molnar if (!inode) 1066a5f4374aSIngo Molnar return 0; 1067a5f4374aSIngo Molnar 1068a5f4374aSIngo Molnar mutex_lock(uprobes_mmap_hash(inode)); 1069891c3970SOleg Nesterov build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list); 1070806a98bdSOleg Nesterov /* 1071806a98bdSOleg Nesterov * We can race with uprobe_unregister(), this uprobe can be already 1072806a98bdSOleg Nesterov * removed. But in this case filter_chain() must return false, all 1073806a98bdSOleg Nesterov * consumers have gone away. 1074806a98bdSOleg Nesterov */ 1075665605a2SOleg Nesterov list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { 1076806a98bdSOleg Nesterov if (!fatal_signal_pending(current) && 10778a7f2fa0SOleg Nesterov filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) { 107857683f72SOleg Nesterov unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset); 10795e5be71aSOleg Nesterov install_breakpoint(uprobe, vma->vm_mm, vma, vaddr); 1080a5f4374aSIngo Molnar } 1081a5f4374aSIngo Molnar put_uprobe(uprobe); 1082a5f4374aSIngo Molnar } 1083a5f4374aSIngo Molnar mutex_unlock(uprobes_mmap_hash(inode)); 1084a5f4374aSIngo Molnar 10855e5be71aSOleg Nesterov return 0; 1086a5f4374aSIngo Molnar } 1087a5f4374aSIngo Molnar 10889f68f672SOleg Nesterov static bool 10899f68f672SOleg Nesterov vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end) 10909f68f672SOleg Nesterov { 10919f68f672SOleg Nesterov loff_t min, max; 10929f68f672SOleg Nesterov struct inode *inode; 10939f68f672SOleg Nesterov struct rb_node *n; 10949f68f672SOleg Nesterov 1095f281769eSOleg Nesterov inode = file_inode(vma->vm_file); 10969f68f672SOleg Nesterov 10979f68f672SOleg Nesterov min = vaddr_to_offset(vma, start); 10989f68f672SOleg Nesterov max = min + (end - start) - 1; 10999f68f672SOleg Nesterov 11009f68f672SOleg Nesterov spin_lock(&uprobes_treelock); 11019f68f672SOleg Nesterov n = find_node_in_range(inode, min, max); 11029f68f672SOleg Nesterov spin_unlock(&uprobes_treelock); 11039f68f672SOleg Nesterov 11049f68f672SOleg Nesterov return !!n; 11059f68f672SOleg Nesterov } 11069f68f672SOleg Nesterov 1107682968e0SSrikar Dronamraju /* 1108682968e0SSrikar Dronamraju * Called in context of a munmap of a vma. 1109682968e0SSrikar Dronamraju */ 1110cbc91f71SSrikar Dronamraju void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) 1111682968e0SSrikar Dronamraju { 1112441f1eb7SOleg Nesterov if (no_uprobe_events() || !valid_vma(vma, false)) 1113682968e0SSrikar Dronamraju return; 1114682968e0SSrikar Dronamraju 11152fd611a9SOleg Nesterov if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */ 11162fd611a9SOleg Nesterov return; 11172fd611a9SOleg Nesterov 11189f68f672SOleg Nesterov if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) || 11199f68f672SOleg Nesterov test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags)) 1120f8ac4ec9SOleg Nesterov return; 1121f8ac4ec9SOleg Nesterov 11229f68f672SOleg Nesterov if (vma_has_uprobes(vma, start, end)) 11239f68f672SOleg Nesterov set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags); 1124682968e0SSrikar Dronamraju } 1125682968e0SSrikar Dronamraju 1126d4b3b638SSrikar Dronamraju /* Slot allocation for XOL */ 11276441ec8bSOleg Nesterov static int xol_add_vma(struct mm_struct *mm, struct xol_area *area) 1128d4b3b638SSrikar Dronamraju { 1129704bde3cSOleg Nesterov struct vm_area_struct *vma; 1130704bde3cSOleg Nesterov int ret; 1131d4b3b638SSrikar Dronamraju 1132d4b3b638SSrikar Dronamraju down_write(&mm->mmap_sem); 1133704bde3cSOleg Nesterov if (mm->uprobes_state.xol_area) { 1134704bde3cSOleg Nesterov ret = -EALREADY; 1135d4b3b638SSrikar Dronamraju goto fail; 1136704bde3cSOleg Nesterov } 1137d4b3b638SSrikar Dronamraju 1138af0d95afSOleg Nesterov if (!area->vaddr) { 1139d4b3b638SSrikar Dronamraju /* Try to map as high as possible, this is only a hint. */ 1140af0d95afSOleg Nesterov area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, 1141af0d95afSOleg Nesterov PAGE_SIZE, 0, 0); 1142d4b3b638SSrikar Dronamraju if (area->vaddr & ~PAGE_MASK) { 1143d4b3b638SSrikar Dronamraju ret = area->vaddr; 1144d4b3b638SSrikar Dronamraju goto fail; 1145d4b3b638SSrikar Dronamraju } 1146af0d95afSOleg Nesterov } 1147d4b3b638SSrikar Dronamraju 1148704bde3cSOleg Nesterov vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE, 1149704bde3cSOleg Nesterov VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO, 1150704bde3cSOleg Nesterov &area->xol_mapping); 1151704bde3cSOleg Nesterov if (IS_ERR(vma)) { 1152704bde3cSOleg Nesterov ret = PTR_ERR(vma); 1153d4b3b638SSrikar Dronamraju goto fail; 1154704bde3cSOleg Nesterov } 1155d4b3b638SSrikar Dronamraju 1156704bde3cSOleg Nesterov ret = 0; 1157d4b3b638SSrikar Dronamraju smp_wmb(); /* pairs with get_xol_area() */ 1158d4b3b638SSrikar Dronamraju mm->uprobes_state.xol_area = area; 1159d4b3b638SSrikar Dronamraju fail: 1160d4b3b638SSrikar Dronamraju up_write(&mm->mmap_sem); 1161d4b3b638SSrikar Dronamraju 1162d4b3b638SSrikar Dronamraju return ret; 1163d4b3b638SSrikar Dronamraju } 1164d4b3b638SSrikar Dronamraju 1165af0d95afSOleg Nesterov static struct xol_area *__create_xol_area(unsigned long vaddr) 1166d4b3b638SSrikar Dronamraju { 11679b545df8SOleg Nesterov struct mm_struct *mm = current->mm; 1168e78aebfdSAnton Arapov uprobe_opcode_t insn = UPROBE_SWBP_INSN; 11696441ec8bSOleg Nesterov struct xol_area *area; 11709b545df8SOleg Nesterov 1171af0d95afSOleg Nesterov area = kmalloc(sizeof(*area), GFP_KERNEL); 1172d4b3b638SSrikar Dronamraju if (unlikely(!area)) 1173c8a82538SOleg Nesterov goto out; 1174d4b3b638SSrikar Dronamraju 1175d4b3b638SSrikar Dronamraju area->bitmap = kzalloc(BITS_TO_LONGS(UINSNS_PER_PAGE) * sizeof(long), GFP_KERNEL); 1176d4b3b638SSrikar Dronamraju if (!area->bitmap) 1177c8a82538SOleg Nesterov goto free_area; 1178c8a82538SOleg Nesterov 1179704bde3cSOleg Nesterov area->xol_mapping.name = "[uprobes]"; 1180704bde3cSOleg Nesterov area->xol_mapping.pages = area->pages; 1181f58bea2fSOleg Nesterov area->pages[0] = alloc_page(GFP_HIGHUSER); 1182f58bea2fSOleg Nesterov if (!area->pages[0]) 1183c8a82538SOleg Nesterov goto free_bitmap; 1184f58bea2fSOleg Nesterov area->pages[1] = NULL; 1185d4b3b638SSrikar Dronamraju 1186af0d95afSOleg Nesterov area->vaddr = vaddr; 1187d4b3b638SSrikar Dronamraju init_waitqueue_head(&area->wq); 11886441ec8bSOleg Nesterov /* Reserve the 1st slot for get_trampoline_vaddr() */ 11896441ec8bSOleg Nesterov set_bit(0, area->bitmap); 11906441ec8bSOleg Nesterov atomic_set(&area->slot_count, 1); 1191f58bea2fSOleg Nesterov copy_to_page(area->pages[0], 0, &insn, UPROBE_SWBP_INSN_SIZE); 1192e78aebfdSAnton Arapov 11936441ec8bSOleg Nesterov if (!xol_add_vma(mm, area)) 1194d4b3b638SSrikar Dronamraju return area; 1195d4b3b638SSrikar Dronamraju 1196f58bea2fSOleg Nesterov __free_page(area->pages[0]); 1197c8a82538SOleg Nesterov free_bitmap: 1198d4b3b638SSrikar Dronamraju kfree(area->bitmap); 1199c8a82538SOleg Nesterov free_area: 1200d4b3b638SSrikar Dronamraju kfree(area); 1201c8a82538SOleg Nesterov out: 12026441ec8bSOleg Nesterov return NULL; 12036441ec8bSOleg Nesterov } 12046441ec8bSOleg Nesterov 12056441ec8bSOleg Nesterov /* 12066441ec8bSOleg Nesterov * get_xol_area - Allocate process's xol_area if necessary. 12076441ec8bSOleg Nesterov * This area will be used for storing instructions for execution out of line. 12086441ec8bSOleg Nesterov * 12096441ec8bSOleg Nesterov * Returns the allocated area or NULL. 12106441ec8bSOleg Nesterov */ 12116441ec8bSOleg Nesterov static struct xol_area *get_xol_area(void) 12126441ec8bSOleg Nesterov { 12136441ec8bSOleg Nesterov struct mm_struct *mm = current->mm; 12146441ec8bSOleg Nesterov struct xol_area *area; 12156441ec8bSOleg Nesterov 12166441ec8bSOleg Nesterov if (!mm->uprobes_state.xol_area) 1217af0d95afSOleg Nesterov __create_xol_area(0); 12186441ec8bSOleg Nesterov 12199b545df8SOleg Nesterov area = mm->uprobes_state.xol_area; 12209b545df8SOleg Nesterov smp_read_barrier_depends(); /* pairs with wmb in xol_add_vma() */ 12219b545df8SOleg Nesterov return area; 1222d4b3b638SSrikar Dronamraju } 1223d4b3b638SSrikar Dronamraju 1224d4b3b638SSrikar Dronamraju /* 1225d4b3b638SSrikar Dronamraju * uprobe_clear_state - Free the area allocated for slots. 1226d4b3b638SSrikar Dronamraju */ 1227d4b3b638SSrikar Dronamraju void uprobe_clear_state(struct mm_struct *mm) 1228d4b3b638SSrikar Dronamraju { 1229d4b3b638SSrikar Dronamraju struct xol_area *area = mm->uprobes_state.xol_area; 1230d4b3b638SSrikar Dronamraju 1231d4b3b638SSrikar Dronamraju if (!area) 1232d4b3b638SSrikar Dronamraju return; 1233d4b3b638SSrikar Dronamraju 1234f58bea2fSOleg Nesterov put_page(area->pages[0]); 1235d4b3b638SSrikar Dronamraju kfree(area->bitmap); 1236d4b3b638SSrikar Dronamraju kfree(area); 1237d4b3b638SSrikar Dronamraju } 1238d4b3b638SSrikar Dronamraju 123932cdba1eSOleg Nesterov void uprobe_start_dup_mmap(void) 124032cdba1eSOleg Nesterov { 124132cdba1eSOleg Nesterov percpu_down_read(&dup_mmap_sem); 124232cdba1eSOleg Nesterov } 124332cdba1eSOleg Nesterov 124432cdba1eSOleg Nesterov void uprobe_end_dup_mmap(void) 124532cdba1eSOleg Nesterov { 124632cdba1eSOleg Nesterov percpu_up_read(&dup_mmap_sem); 124732cdba1eSOleg Nesterov } 124832cdba1eSOleg Nesterov 1249f8ac4ec9SOleg Nesterov void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm) 1250f8ac4ec9SOleg Nesterov { 125161559a81SOleg Nesterov newmm->uprobes_state.xol_area = NULL; 125261559a81SOleg Nesterov 12539f68f672SOleg Nesterov if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) { 1254f8ac4ec9SOleg Nesterov set_bit(MMF_HAS_UPROBES, &newmm->flags); 12559f68f672SOleg Nesterov /* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */ 12569f68f672SOleg Nesterov set_bit(MMF_RECALC_UPROBES, &newmm->flags); 12579f68f672SOleg Nesterov } 1258f8ac4ec9SOleg Nesterov } 1259f8ac4ec9SOleg Nesterov 1260d4b3b638SSrikar Dronamraju /* 1261d4b3b638SSrikar Dronamraju * - search for a free slot. 1262d4b3b638SSrikar Dronamraju */ 1263d4b3b638SSrikar Dronamraju static unsigned long xol_take_insn_slot(struct xol_area *area) 1264d4b3b638SSrikar Dronamraju { 1265d4b3b638SSrikar Dronamraju unsigned long slot_addr; 1266d4b3b638SSrikar Dronamraju int slot_nr; 1267d4b3b638SSrikar Dronamraju 1268d4b3b638SSrikar Dronamraju do { 1269d4b3b638SSrikar Dronamraju slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE); 1270d4b3b638SSrikar Dronamraju if (slot_nr < UINSNS_PER_PAGE) { 1271d4b3b638SSrikar Dronamraju if (!test_and_set_bit(slot_nr, area->bitmap)) 1272d4b3b638SSrikar Dronamraju break; 1273d4b3b638SSrikar Dronamraju 1274d4b3b638SSrikar Dronamraju slot_nr = UINSNS_PER_PAGE; 1275d4b3b638SSrikar Dronamraju continue; 1276d4b3b638SSrikar Dronamraju } 1277d4b3b638SSrikar Dronamraju wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE)); 1278d4b3b638SSrikar Dronamraju } while (slot_nr >= UINSNS_PER_PAGE); 1279d4b3b638SSrikar Dronamraju 1280d4b3b638SSrikar Dronamraju slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES); 1281d4b3b638SSrikar Dronamraju atomic_inc(&area->slot_count); 1282d4b3b638SSrikar Dronamraju 1283d4b3b638SSrikar Dronamraju return slot_addr; 1284d4b3b638SSrikar Dronamraju } 1285d4b3b638SSrikar Dronamraju 1286d4b3b638SSrikar Dronamraju /* 1287a6cb3f6dSOleg Nesterov * xol_get_insn_slot - allocate a slot for xol. 1288d4b3b638SSrikar Dronamraju * Returns the allocated slot address or 0. 1289d4b3b638SSrikar Dronamraju */ 1290a6cb3f6dSOleg Nesterov static unsigned long xol_get_insn_slot(struct uprobe *uprobe) 1291d4b3b638SSrikar Dronamraju { 1292d4b3b638SSrikar Dronamraju struct xol_area *area; 1293a6cb3f6dSOleg Nesterov unsigned long xol_vaddr; 1294d4b3b638SSrikar Dronamraju 12959b545df8SOleg Nesterov area = get_xol_area(); 1296d4b3b638SSrikar Dronamraju if (!area) 1297d4b3b638SSrikar Dronamraju return 0; 1298d4b3b638SSrikar Dronamraju 1299a6cb3f6dSOleg Nesterov xol_vaddr = xol_take_insn_slot(area); 1300a6cb3f6dSOleg Nesterov if (unlikely(!xol_vaddr)) 1301d4b3b638SSrikar Dronamraju return 0; 1302d4b3b638SSrikar Dronamraju 1303f58bea2fSOleg Nesterov arch_uprobe_copy_ixol(area->pages[0], xol_vaddr, 1304803200e2SOleg Nesterov &uprobe->arch.ixol, sizeof(uprobe->arch.ixol)); 1305d4b3b638SSrikar Dronamraju 1306a6cb3f6dSOleg Nesterov return xol_vaddr; 1307d4b3b638SSrikar Dronamraju } 1308d4b3b638SSrikar Dronamraju 1309d4b3b638SSrikar Dronamraju /* 1310d4b3b638SSrikar Dronamraju * xol_free_insn_slot - If slot was earlier allocated by 1311d4b3b638SSrikar Dronamraju * @xol_get_insn_slot(), make the slot available for 1312d4b3b638SSrikar Dronamraju * subsequent requests. 1313d4b3b638SSrikar Dronamraju */ 1314d4b3b638SSrikar Dronamraju static void xol_free_insn_slot(struct task_struct *tsk) 1315d4b3b638SSrikar Dronamraju { 1316d4b3b638SSrikar Dronamraju struct xol_area *area; 1317d4b3b638SSrikar Dronamraju unsigned long vma_end; 1318d4b3b638SSrikar Dronamraju unsigned long slot_addr; 1319d4b3b638SSrikar Dronamraju 1320d4b3b638SSrikar Dronamraju if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask) 1321d4b3b638SSrikar Dronamraju return; 1322d4b3b638SSrikar Dronamraju 1323d4b3b638SSrikar Dronamraju slot_addr = tsk->utask->xol_vaddr; 1324af4355e9SOleg Nesterov if (unlikely(!slot_addr)) 1325d4b3b638SSrikar Dronamraju return; 1326d4b3b638SSrikar Dronamraju 1327d4b3b638SSrikar Dronamraju area = tsk->mm->uprobes_state.xol_area; 1328d4b3b638SSrikar Dronamraju vma_end = area->vaddr + PAGE_SIZE; 1329d4b3b638SSrikar Dronamraju if (area->vaddr <= slot_addr && slot_addr < vma_end) { 1330d4b3b638SSrikar Dronamraju unsigned long offset; 1331d4b3b638SSrikar Dronamraju int slot_nr; 1332d4b3b638SSrikar Dronamraju 1333d4b3b638SSrikar Dronamraju offset = slot_addr - area->vaddr; 1334d4b3b638SSrikar Dronamraju slot_nr = offset / UPROBE_XOL_SLOT_BYTES; 1335d4b3b638SSrikar Dronamraju if (slot_nr >= UINSNS_PER_PAGE) 1336d4b3b638SSrikar Dronamraju return; 1337d4b3b638SSrikar Dronamraju 1338d4b3b638SSrikar Dronamraju clear_bit(slot_nr, area->bitmap); 1339d4b3b638SSrikar Dronamraju atomic_dec(&area->slot_count); 1340d4b3b638SSrikar Dronamraju if (waitqueue_active(&area->wq)) 1341d4b3b638SSrikar Dronamraju wake_up(&area->wq); 1342d4b3b638SSrikar Dronamraju 1343d4b3b638SSrikar Dronamraju tsk->utask->xol_vaddr = 0; 1344d4b3b638SSrikar Dronamraju } 1345d4b3b638SSrikar Dronamraju } 1346d4b3b638SSrikar Dronamraju 134772e6ae28SVictor Kamensky void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, 134872e6ae28SVictor Kamensky void *src, unsigned long len) 134972e6ae28SVictor Kamensky { 135072e6ae28SVictor Kamensky /* Initialize the slot */ 135172e6ae28SVictor Kamensky copy_to_page(page, vaddr, src, len); 135272e6ae28SVictor Kamensky 135372e6ae28SVictor Kamensky /* 135472e6ae28SVictor Kamensky * We probably need flush_icache_user_range() but it needs vma. 135572e6ae28SVictor Kamensky * This should work on most of architectures by default. If 135672e6ae28SVictor Kamensky * architecture needs to do something different it can define 135772e6ae28SVictor Kamensky * its own version of the function. 135872e6ae28SVictor Kamensky */ 135972e6ae28SVictor Kamensky flush_dcache_page(page); 136072e6ae28SVictor Kamensky } 136172e6ae28SVictor Kamensky 13620326f5a9SSrikar Dronamraju /** 13630326f5a9SSrikar Dronamraju * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs 13640326f5a9SSrikar Dronamraju * @regs: Reflects the saved state of the task after it has hit a breakpoint 13650326f5a9SSrikar Dronamraju * instruction. 13660326f5a9SSrikar Dronamraju * Return the address of the breakpoint instruction. 13670326f5a9SSrikar Dronamraju */ 13680326f5a9SSrikar Dronamraju unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs) 13690326f5a9SSrikar Dronamraju { 13700326f5a9SSrikar Dronamraju return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE; 13710326f5a9SSrikar Dronamraju } 13720326f5a9SSrikar Dronamraju 1373b02ef20aSOleg Nesterov unsigned long uprobe_get_trap_addr(struct pt_regs *regs) 1374b02ef20aSOleg Nesterov { 1375b02ef20aSOleg Nesterov struct uprobe_task *utask = current->utask; 1376b02ef20aSOleg Nesterov 1377b02ef20aSOleg Nesterov if (unlikely(utask && utask->active_uprobe)) 1378b02ef20aSOleg Nesterov return utask->vaddr; 1379b02ef20aSOleg Nesterov 1380b02ef20aSOleg Nesterov return instruction_pointer(regs); 1381b02ef20aSOleg Nesterov } 1382b02ef20aSOleg Nesterov 13832bb5e840SOleg Nesterov static struct return_instance *free_ret_instance(struct return_instance *ri) 13842bb5e840SOleg Nesterov { 13852bb5e840SOleg Nesterov struct return_instance *next = ri->next; 13862bb5e840SOleg Nesterov put_uprobe(ri->uprobe); 13872bb5e840SOleg Nesterov kfree(ri); 13882bb5e840SOleg Nesterov return next; 13892bb5e840SOleg Nesterov } 13902bb5e840SOleg Nesterov 13910326f5a9SSrikar Dronamraju /* 13920326f5a9SSrikar Dronamraju * Called with no locks held. 13930326f5a9SSrikar Dronamraju * Called in context of a exiting or a exec-ing thread. 13940326f5a9SSrikar Dronamraju */ 13950326f5a9SSrikar Dronamraju void uprobe_free_utask(struct task_struct *t) 13960326f5a9SSrikar Dronamraju { 13970326f5a9SSrikar Dronamraju struct uprobe_task *utask = t->utask; 13982bb5e840SOleg Nesterov struct return_instance *ri; 13990326f5a9SSrikar Dronamraju 14000326f5a9SSrikar Dronamraju if (!utask) 14010326f5a9SSrikar Dronamraju return; 14020326f5a9SSrikar Dronamraju 14030326f5a9SSrikar Dronamraju if (utask->active_uprobe) 14040326f5a9SSrikar Dronamraju put_uprobe(utask->active_uprobe); 14050326f5a9SSrikar Dronamraju 14060dfd0eb8SAnton Arapov ri = utask->return_instances; 14072bb5e840SOleg Nesterov while (ri) 14082bb5e840SOleg Nesterov ri = free_ret_instance(ri); 14090dfd0eb8SAnton Arapov 1410d4b3b638SSrikar Dronamraju xol_free_insn_slot(t); 14110326f5a9SSrikar Dronamraju kfree(utask); 14120326f5a9SSrikar Dronamraju t->utask = NULL; 14130326f5a9SSrikar Dronamraju } 14140326f5a9SSrikar Dronamraju 14150326f5a9SSrikar Dronamraju /* 14165a2df662SOleg Nesterov * Allocate a uprobe_task object for the task if if necessary. 14175a2df662SOleg Nesterov * Called when the thread hits a breakpoint. 14180326f5a9SSrikar Dronamraju * 14190326f5a9SSrikar Dronamraju * Returns: 14200326f5a9SSrikar Dronamraju * - pointer to new uprobe_task on success 14210326f5a9SSrikar Dronamraju * - NULL otherwise 14220326f5a9SSrikar Dronamraju */ 14235a2df662SOleg Nesterov static struct uprobe_task *get_utask(void) 14240326f5a9SSrikar Dronamraju { 14255a2df662SOleg Nesterov if (!current->utask) 14265a2df662SOleg Nesterov current->utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL); 14275a2df662SOleg Nesterov return current->utask; 14280326f5a9SSrikar Dronamraju } 14290326f5a9SSrikar Dronamraju 1430248d3a7bSOleg Nesterov static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask) 1431248d3a7bSOleg Nesterov { 1432248d3a7bSOleg Nesterov struct uprobe_task *n_utask; 1433248d3a7bSOleg Nesterov struct return_instance **p, *o, *n; 1434248d3a7bSOleg Nesterov 1435248d3a7bSOleg Nesterov n_utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL); 1436248d3a7bSOleg Nesterov if (!n_utask) 1437248d3a7bSOleg Nesterov return -ENOMEM; 1438248d3a7bSOleg Nesterov t->utask = n_utask; 1439248d3a7bSOleg Nesterov 1440248d3a7bSOleg Nesterov p = &n_utask->return_instances; 1441248d3a7bSOleg Nesterov for (o = o_utask->return_instances; o; o = o->next) { 1442248d3a7bSOleg Nesterov n = kmalloc(sizeof(struct return_instance), GFP_KERNEL); 1443248d3a7bSOleg Nesterov if (!n) 1444248d3a7bSOleg Nesterov return -ENOMEM; 1445248d3a7bSOleg Nesterov 1446248d3a7bSOleg Nesterov *n = *o; 1447f231722aSOleg Nesterov get_uprobe(n->uprobe); 1448248d3a7bSOleg Nesterov n->next = NULL; 1449248d3a7bSOleg Nesterov 1450248d3a7bSOleg Nesterov *p = n; 1451248d3a7bSOleg Nesterov p = &n->next; 1452248d3a7bSOleg Nesterov n_utask->depth++; 1453248d3a7bSOleg Nesterov } 1454248d3a7bSOleg Nesterov 1455248d3a7bSOleg Nesterov return 0; 1456248d3a7bSOleg Nesterov } 1457248d3a7bSOleg Nesterov 1458248d3a7bSOleg Nesterov static void uprobe_warn(struct task_struct *t, const char *msg) 1459248d3a7bSOleg Nesterov { 1460248d3a7bSOleg Nesterov pr_warn("uprobe: %s:%d failed to %s\n", 1461248d3a7bSOleg Nesterov current->comm, current->pid, msg); 1462248d3a7bSOleg Nesterov } 1463248d3a7bSOleg Nesterov 1464aa59c53fSOleg Nesterov static void dup_xol_work(struct callback_head *work) 1465aa59c53fSOleg Nesterov { 1466aa59c53fSOleg Nesterov if (current->flags & PF_EXITING) 1467aa59c53fSOleg Nesterov return; 1468aa59c53fSOleg Nesterov 146932473431SOleg Nesterov if (!__create_xol_area(current->utask->dup_xol_addr)) 1470aa59c53fSOleg Nesterov uprobe_warn(current, "dup xol area"); 1471aa59c53fSOleg Nesterov } 1472aa59c53fSOleg Nesterov 1473e78aebfdSAnton Arapov /* 1474b68e0749SOleg Nesterov * Called in context of a new clone/fork from copy_process. 1475b68e0749SOleg Nesterov */ 14763ab67966SOleg Nesterov void uprobe_copy_process(struct task_struct *t, unsigned long flags) 1477b68e0749SOleg Nesterov { 1478248d3a7bSOleg Nesterov struct uprobe_task *utask = current->utask; 1479248d3a7bSOleg Nesterov struct mm_struct *mm = current->mm; 1480aa59c53fSOleg Nesterov struct xol_area *area; 1481248d3a7bSOleg Nesterov 1482b68e0749SOleg Nesterov t->utask = NULL; 1483248d3a7bSOleg Nesterov 14843ab67966SOleg Nesterov if (!utask || !utask->return_instances) 14853ab67966SOleg Nesterov return; 14863ab67966SOleg Nesterov 14873ab67966SOleg Nesterov if (mm == t->mm && !(flags & CLONE_VFORK)) 1488248d3a7bSOleg Nesterov return; 1489248d3a7bSOleg Nesterov 1490248d3a7bSOleg Nesterov if (dup_utask(t, utask)) 1491248d3a7bSOleg Nesterov return uprobe_warn(t, "dup ret instances"); 1492aa59c53fSOleg Nesterov 1493aa59c53fSOleg Nesterov /* The task can fork() after dup_xol_work() fails */ 1494aa59c53fSOleg Nesterov area = mm->uprobes_state.xol_area; 1495aa59c53fSOleg Nesterov if (!area) 1496aa59c53fSOleg Nesterov return uprobe_warn(t, "dup xol area"); 1497aa59c53fSOleg Nesterov 14983ab67966SOleg Nesterov if (mm == t->mm) 14993ab67966SOleg Nesterov return; 15003ab67966SOleg Nesterov 150132473431SOleg Nesterov t->utask->dup_xol_addr = area->vaddr; 150232473431SOleg Nesterov init_task_work(&t->utask->dup_xol_work, dup_xol_work); 150332473431SOleg Nesterov task_work_add(t, &t->utask->dup_xol_work, true); 1504b68e0749SOleg Nesterov } 1505b68e0749SOleg Nesterov 1506b68e0749SOleg Nesterov /* 1507e78aebfdSAnton Arapov * Current area->vaddr notion assume the trampoline address is always 1508e78aebfdSAnton Arapov * equal area->vaddr. 1509e78aebfdSAnton Arapov * 1510e78aebfdSAnton Arapov * Returns -1 in case the xol_area is not allocated. 1511e78aebfdSAnton Arapov */ 1512e78aebfdSAnton Arapov static unsigned long get_trampoline_vaddr(void) 1513e78aebfdSAnton Arapov { 1514e78aebfdSAnton Arapov struct xol_area *area; 1515e78aebfdSAnton Arapov unsigned long trampoline_vaddr = -1; 1516e78aebfdSAnton Arapov 1517e78aebfdSAnton Arapov area = current->mm->uprobes_state.xol_area; 1518e78aebfdSAnton Arapov smp_read_barrier_depends(); 1519e78aebfdSAnton Arapov if (area) 1520e78aebfdSAnton Arapov trampoline_vaddr = area->vaddr; 1521e78aebfdSAnton Arapov 1522e78aebfdSAnton Arapov return trampoline_vaddr; 1523e78aebfdSAnton Arapov } 1524e78aebfdSAnton Arapov 1525db087ef6SOleg Nesterov static void cleanup_return_instances(struct uprobe_task *utask, bool chained, 1526db087ef6SOleg Nesterov struct pt_regs *regs) 1527a5b7e1a8SOleg Nesterov { 1528a5b7e1a8SOleg Nesterov struct return_instance *ri = utask->return_instances; 1529db087ef6SOleg Nesterov enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL; 153086dcb702SOleg Nesterov 153186dcb702SOleg Nesterov while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) { 1532a5b7e1a8SOleg Nesterov ri = free_ret_instance(ri); 1533a5b7e1a8SOleg Nesterov utask->depth--; 1534a5b7e1a8SOleg Nesterov } 1535a5b7e1a8SOleg Nesterov utask->return_instances = ri; 1536a5b7e1a8SOleg Nesterov } 1537a5b7e1a8SOleg Nesterov 15380dfd0eb8SAnton Arapov static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs) 15390dfd0eb8SAnton Arapov { 15400dfd0eb8SAnton Arapov struct return_instance *ri; 15410dfd0eb8SAnton Arapov struct uprobe_task *utask; 15420dfd0eb8SAnton Arapov unsigned long orig_ret_vaddr, trampoline_vaddr; 1543db087ef6SOleg Nesterov bool chained; 15440dfd0eb8SAnton Arapov 15450dfd0eb8SAnton Arapov if (!get_xol_area()) 15460dfd0eb8SAnton Arapov return; 15470dfd0eb8SAnton Arapov 15480dfd0eb8SAnton Arapov utask = get_utask(); 15490dfd0eb8SAnton Arapov if (!utask) 15500dfd0eb8SAnton Arapov return; 15510dfd0eb8SAnton Arapov 1552ded49c55SAnton Arapov if (utask->depth >= MAX_URETPROBE_DEPTH) { 1553ded49c55SAnton Arapov printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to" 1554ded49c55SAnton Arapov " nestedness limit pid/tgid=%d/%d\n", 1555ded49c55SAnton Arapov current->pid, current->tgid); 1556ded49c55SAnton Arapov return; 1557ded49c55SAnton Arapov } 1558ded49c55SAnton Arapov 15596c58d0e4SOleg Nesterov ri = kmalloc(sizeof(struct return_instance), GFP_KERNEL); 15600dfd0eb8SAnton Arapov if (!ri) 15616c58d0e4SOleg Nesterov return; 15620dfd0eb8SAnton Arapov 15630dfd0eb8SAnton Arapov trampoline_vaddr = get_trampoline_vaddr(); 15640dfd0eb8SAnton Arapov orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs); 15650dfd0eb8SAnton Arapov if (orig_ret_vaddr == -1) 15660dfd0eb8SAnton Arapov goto fail; 15670dfd0eb8SAnton Arapov 1568a5b7e1a8SOleg Nesterov /* drop the entries invalidated by longjmp() */ 1569db087ef6SOleg Nesterov chained = (orig_ret_vaddr == trampoline_vaddr); 1570db087ef6SOleg Nesterov cleanup_return_instances(utask, chained, regs); 1571a5b7e1a8SOleg Nesterov 15720dfd0eb8SAnton Arapov /* 15730dfd0eb8SAnton Arapov * We don't want to keep trampoline address in stack, rather keep the 15740dfd0eb8SAnton Arapov * original return address of first caller thru all the consequent 15750dfd0eb8SAnton Arapov * instances. This also makes breakpoint unwrapping easier. 15760dfd0eb8SAnton Arapov */ 1577db087ef6SOleg Nesterov if (chained) { 15780dfd0eb8SAnton Arapov if (!utask->return_instances) { 15790dfd0eb8SAnton Arapov /* 15800dfd0eb8SAnton Arapov * This situation is not possible. Likely we have an 15810dfd0eb8SAnton Arapov * attack from user-space. 15820dfd0eb8SAnton Arapov */ 15836c58d0e4SOleg Nesterov uprobe_warn(current, "handle tail call"); 15840dfd0eb8SAnton Arapov goto fail; 15850dfd0eb8SAnton Arapov } 15860dfd0eb8SAnton Arapov orig_ret_vaddr = utask->return_instances->orig_ret_vaddr; 15870dfd0eb8SAnton Arapov } 15880dfd0eb8SAnton Arapov 1589f231722aSOleg Nesterov ri->uprobe = get_uprobe(uprobe); 15900dfd0eb8SAnton Arapov ri->func = instruction_pointer(regs); 15917b868e48SOleg Nesterov ri->stack = user_stack_pointer(regs); 15920dfd0eb8SAnton Arapov ri->orig_ret_vaddr = orig_ret_vaddr; 15930dfd0eb8SAnton Arapov ri->chained = chained; 15940dfd0eb8SAnton Arapov 1595ded49c55SAnton Arapov utask->depth++; 15960dfd0eb8SAnton Arapov ri->next = utask->return_instances; 15970dfd0eb8SAnton Arapov utask->return_instances = ri; 15980dfd0eb8SAnton Arapov 15990dfd0eb8SAnton Arapov return; 16000dfd0eb8SAnton Arapov fail: 16010dfd0eb8SAnton Arapov kfree(ri); 16020dfd0eb8SAnton Arapov } 16030dfd0eb8SAnton Arapov 16040326f5a9SSrikar Dronamraju /* Prepare to single-step probed instruction out of line. */ 16050326f5a9SSrikar Dronamraju static int 1606a6cb3f6dSOleg Nesterov pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr) 16070326f5a9SSrikar Dronamraju { 1608a6cb3f6dSOleg Nesterov struct uprobe_task *utask; 1609a6cb3f6dSOleg Nesterov unsigned long xol_vaddr; 1610aba51024SOleg Nesterov int err; 1611d4b3b638SSrikar Dronamraju 1612608e7427SOleg Nesterov utask = get_utask(); 1613608e7427SOleg Nesterov if (!utask) 1614608e7427SOleg Nesterov return -ENOMEM; 1615a6cb3f6dSOleg Nesterov 1616a6cb3f6dSOleg Nesterov xol_vaddr = xol_get_insn_slot(uprobe); 1617a6cb3f6dSOleg Nesterov if (!xol_vaddr) 1618a6cb3f6dSOleg Nesterov return -ENOMEM; 1619a6cb3f6dSOleg Nesterov 1620a6cb3f6dSOleg Nesterov utask->xol_vaddr = xol_vaddr; 1621a6cb3f6dSOleg Nesterov utask->vaddr = bp_vaddr; 1622a6cb3f6dSOleg Nesterov 1623aba51024SOleg Nesterov err = arch_uprobe_pre_xol(&uprobe->arch, regs); 1624aba51024SOleg Nesterov if (unlikely(err)) { 1625aba51024SOleg Nesterov xol_free_insn_slot(current); 1626aba51024SOleg Nesterov return err; 1627aba51024SOleg Nesterov } 1628aba51024SOleg Nesterov 1629608e7427SOleg Nesterov utask->active_uprobe = uprobe; 1630608e7427SOleg Nesterov utask->state = UTASK_SSTEP; 1631aba51024SOleg Nesterov return 0; 16320326f5a9SSrikar Dronamraju } 16330326f5a9SSrikar Dronamraju 16340326f5a9SSrikar Dronamraju /* 16350326f5a9SSrikar Dronamraju * If we are singlestepping, then ensure this thread is not connected to 16360326f5a9SSrikar Dronamraju * non-fatal signals until completion of singlestep. When xol insn itself 16370326f5a9SSrikar Dronamraju * triggers the signal, restart the original insn even if the task is 16380326f5a9SSrikar Dronamraju * already SIGKILL'ed (since coredump should report the correct ip). This 16390326f5a9SSrikar Dronamraju * is even more important if the task has a handler for SIGSEGV/etc, The 16400326f5a9SSrikar Dronamraju * _same_ instruction should be repeated again after return from the signal 16410326f5a9SSrikar Dronamraju * handler, and SSTEP can never finish in this case. 16420326f5a9SSrikar Dronamraju */ 16430326f5a9SSrikar Dronamraju bool uprobe_deny_signal(void) 16440326f5a9SSrikar Dronamraju { 16450326f5a9SSrikar Dronamraju struct task_struct *t = current; 16460326f5a9SSrikar Dronamraju struct uprobe_task *utask = t->utask; 16470326f5a9SSrikar Dronamraju 16480326f5a9SSrikar Dronamraju if (likely(!utask || !utask->active_uprobe)) 16490326f5a9SSrikar Dronamraju return false; 16500326f5a9SSrikar Dronamraju 16510326f5a9SSrikar Dronamraju WARN_ON_ONCE(utask->state != UTASK_SSTEP); 16520326f5a9SSrikar Dronamraju 16530326f5a9SSrikar Dronamraju if (signal_pending(t)) { 16540326f5a9SSrikar Dronamraju spin_lock_irq(&t->sighand->siglock); 16550326f5a9SSrikar Dronamraju clear_tsk_thread_flag(t, TIF_SIGPENDING); 16560326f5a9SSrikar Dronamraju spin_unlock_irq(&t->sighand->siglock); 16570326f5a9SSrikar Dronamraju 16580326f5a9SSrikar Dronamraju if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) { 16590326f5a9SSrikar Dronamraju utask->state = UTASK_SSTEP_TRAPPED; 16600326f5a9SSrikar Dronamraju set_tsk_thread_flag(t, TIF_UPROBE); 16610326f5a9SSrikar Dronamraju } 16620326f5a9SSrikar Dronamraju } 16630326f5a9SSrikar Dronamraju 16640326f5a9SSrikar Dronamraju return true; 16650326f5a9SSrikar Dronamraju } 16660326f5a9SSrikar Dronamraju 1667499a4f3eSOleg Nesterov static void mmf_recalc_uprobes(struct mm_struct *mm) 1668499a4f3eSOleg Nesterov { 1669499a4f3eSOleg Nesterov struct vm_area_struct *vma; 1670499a4f3eSOleg Nesterov 1671499a4f3eSOleg Nesterov for (vma = mm->mmap; vma; vma = vma->vm_next) { 1672499a4f3eSOleg Nesterov if (!valid_vma(vma, false)) 1673499a4f3eSOleg Nesterov continue; 1674499a4f3eSOleg Nesterov /* 1675499a4f3eSOleg Nesterov * This is not strictly accurate, we can race with 1676499a4f3eSOleg Nesterov * uprobe_unregister() and see the already removed 1677499a4f3eSOleg Nesterov * uprobe if delete_uprobe() was not yet called. 167863633cbfSOleg Nesterov * Or this uprobe can be filtered out. 1679499a4f3eSOleg Nesterov */ 1680499a4f3eSOleg Nesterov if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end)) 1681499a4f3eSOleg Nesterov return; 1682499a4f3eSOleg Nesterov } 1683499a4f3eSOleg Nesterov 1684499a4f3eSOleg Nesterov clear_bit(MMF_HAS_UPROBES, &mm->flags); 1685499a4f3eSOleg Nesterov } 1686499a4f3eSOleg Nesterov 16870908ad6eSAnanth N Mavinakayanahalli static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr) 1688ec75fba9SOleg Nesterov { 1689ec75fba9SOleg Nesterov struct page *page; 1690ec75fba9SOleg Nesterov uprobe_opcode_t opcode; 1691ec75fba9SOleg Nesterov int result; 1692ec75fba9SOleg Nesterov 1693ec75fba9SOleg Nesterov pagefault_disable(); 1694ec75fba9SOleg Nesterov result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr, 1695ec75fba9SOleg Nesterov sizeof(opcode)); 1696ec75fba9SOleg Nesterov pagefault_enable(); 1697ec75fba9SOleg Nesterov 1698ec75fba9SOleg Nesterov if (likely(result == 0)) 1699ec75fba9SOleg Nesterov goto out; 1700ec75fba9SOleg Nesterov 1701ec75fba9SOleg Nesterov result = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &page, NULL); 1702ec75fba9SOleg Nesterov if (result < 0) 1703ec75fba9SOleg Nesterov return result; 1704ec75fba9SOleg Nesterov 1705ab0d805cSOleg Nesterov copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE); 1706ec75fba9SOleg Nesterov put_page(page); 1707ec75fba9SOleg Nesterov out: 17080908ad6eSAnanth N Mavinakayanahalli /* This needs to return true for any variant of the trap insn */ 17090908ad6eSAnanth N Mavinakayanahalli return is_trap_insn(&opcode); 1710ec75fba9SOleg Nesterov } 1711ec75fba9SOleg Nesterov 1712d790d346SOleg Nesterov static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp) 17130326f5a9SSrikar Dronamraju { 17143a9ea052SOleg Nesterov struct mm_struct *mm = current->mm; 17153a9ea052SOleg Nesterov struct uprobe *uprobe = NULL; 17160326f5a9SSrikar Dronamraju struct vm_area_struct *vma; 17170326f5a9SSrikar Dronamraju 17180326f5a9SSrikar Dronamraju down_read(&mm->mmap_sem); 17190326f5a9SSrikar Dronamraju vma = find_vma(mm, bp_vaddr); 17203a9ea052SOleg Nesterov if (vma && vma->vm_start <= bp_vaddr) { 17213a9ea052SOleg Nesterov if (valid_vma(vma, false)) { 1722f281769eSOleg Nesterov struct inode *inode = file_inode(vma->vm_file); 1723cb113b47SOleg Nesterov loff_t offset = vaddr_to_offset(vma, bp_vaddr); 17240326f5a9SSrikar Dronamraju 17250326f5a9SSrikar Dronamraju uprobe = find_uprobe(inode, offset); 17260326f5a9SSrikar Dronamraju } 1727d790d346SOleg Nesterov 1728d790d346SOleg Nesterov if (!uprobe) 17290908ad6eSAnanth N Mavinakayanahalli *is_swbp = is_trap_at_addr(mm, bp_vaddr); 1730d790d346SOleg Nesterov } else { 1731d790d346SOleg Nesterov *is_swbp = -EFAULT; 17323a9ea052SOleg Nesterov } 1733499a4f3eSOleg Nesterov 1734499a4f3eSOleg Nesterov if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags)) 1735499a4f3eSOleg Nesterov mmf_recalc_uprobes(mm); 17360326f5a9SSrikar Dronamraju up_read(&mm->mmap_sem); 17370326f5a9SSrikar Dronamraju 17383a9ea052SOleg Nesterov return uprobe; 17393a9ea052SOleg Nesterov } 17403a9ea052SOleg Nesterov 1741da1816b1SOleg Nesterov static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs) 1742da1816b1SOleg Nesterov { 1743da1816b1SOleg Nesterov struct uprobe_consumer *uc; 1744da1816b1SOleg Nesterov int remove = UPROBE_HANDLER_REMOVE; 17450dfd0eb8SAnton Arapov bool need_prep = false; /* prepare return uprobe, when needed */ 1746da1816b1SOleg Nesterov 1747da1816b1SOleg Nesterov down_read(&uprobe->register_rwsem); 1748da1816b1SOleg Nesterov for (uc = uprobe->consumers; uc; uc = uc->next) { 1749ea024870SAnton Arapov int rc = 0; 1750da1816b1SOleg Nesterov 1751ea024870SAnton Arapov if (uc->handler) { 1752ea024870SAnton Arapov rc = uc->handler(uc, regs); 1753da1816b1SOleg Nesterov WARN(rc & ~UPROBE_HANDLER_MASK, 1754da1816b1SOleg Nesterov "bad rc=0x%x from %pf()\n", rc, uc->handler); 1755ea024870SAnton Arapov } 17560dfd0eb8SAnton Arapov 17570dfd0eb8SAnton Arapov if (uc->ret_handler) 17580dfd0eb8SAnton Arapov need_prep = true; 17590dfd0eb8SAnton Arapov 1760da1816b1SOleg Nesterov remove &= rc; 1761da1816b1SOleg Nesterov } 1762da1816b1SOleg Nesterov 17630dfd0eb8SAnton Arapov if (need_prep && !remove) 17640dfd0eb8SAnton Arapov prepare_uretprobe(uprobe, regs); /* put bp at return */ 17650dfd0eb8SAnton Arapov 1766da1816b1SOleg Nesterov if (remove && uprobe->consumers) { 1767da1816b1SOleg Nesterov WARN_ON(!uprobe_is_active(uprobe)); 1768da1816b1SOleg Nesterov unapply_uprobe(uprobe, current->mm); 1769da1816b1SOleg Nesterov } 1770da1816b1SOleg Nesterov up_read(&uprobe->register_rwsem); 1771da1816b1SOleg Nesterov } 1772da1816b1SOleg Nesterov 1773fec8898dSAnton Arapov static void 1774fec8898dSAnton Arapov handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs) 1775fec8898dSAnton Arapov { 1776fec8898dSAnton Arapov struct uprobe *uprobe = ri->uprobe; 1777fec8898dSAnton Arapov struct uprobe_consumer *uc; 1778fec8898dSAnton Arapov 1779fec8898dSAnton Arapov down_read(&uprobe->register_rwsem); 1780fec8898dSAnton Arapov for (uc = uprobe->consumers; uc; uc = uc->next) { 1781fec8898dSAnton Arapov if (uc->ret_handler) 1782fec8898dSAnton Arapov uc->ret_handler(uc, ri->func, regs); 1783fec8898dSAnton Arapov } 1784fec8898dSAnton Arapov up_read(&uprobe->register_rwsem); 1785fec8898dSAnton Arapov } 1786fec8898dSAnton Arapov 1787a83cfeb9SOleg Nesterov static struct return_instance *find_next_ret_chain(struct return_instance *ri) 1788a83cfeb9SOleg Nesterov { 1789a83cfeb9SOleg Nesterov bool chained; 1790a83cfeb9SOleg Nesterov 1791a83cfeb9SOleg Nesterov do { 1792a83cfeb9SOleg Nesterov chained = ri->chained; 1793a83cfeb9SOleg Nesterov ri = ri->next; /* can't be NULL if chained */ 1794a83cfeb9SOleg Nesterov } while (chained); 1795a83cfeb9SOleg Nesterov 1796a83cfeb9SOleg Nesterov return ri; 1797a83cfeb9SOleg Nesterov } 1798a83cfeb9SOleg Nesterov 17990b5256c7SOleg Nesterov static void handle_trampoline(struct pt_regs *regs) 1800fec8898dSAnton Arapov { 1801fec8898dSAnton Arapov struct uprobe_task *utask; 1802a83cfeb9SOleg Nesterov struct return_instance *ri, *next; 18035eeb50deSOleg Nesterov bool valid; 1804fec8898dSAnton Arapov 1805fec8898dSAnton Arapov utask = current->utask; 1806fec8898dSAnton Arapov if (!utask) 18070b5256c7SOleg Nesterov goto sigill; 1808fec8898dSAnton Arapov 1809fec8898dSAnton Arapov ri = utask->return_instances; 1810fec8898dSAnton Arapov if (!ri) 18110b5256c7SOleg Nesterov goto sigill; 1812fec8898dSAnton Arapov 18135eeb50deSOleg Nesterov do { 1814fec8898dSAnton Arapov /* 18155eeb50deSOleg Nesterov * We should throw out the frames invalidated by longjmp(). 18165eeb50deSOleg Nesterov * If this chain is valid, then the next one should be alive 18175eeb50deSOleg Nesterov * or NULL; the latter case means that nobody but ri->func 18185eeb50deSOleg Nesterov * could hit this trampoline on return. TODO: sigaltstack(). 1819fec8898dSAnton Arapov */ 18205eeb50deSOleg Nesterov next = find_next_ret_chain(ri); 182186dcb702SOleg Nesterov valid = !next || arch_uretprobe_is_alive(next, RP_CHECK_RET, regs); 18225eeb50deSOleg Nesterov 1823fec8898dSAnton Arapov instruction_pointer_set(regs, ri->orig_ret_vaddr); 1824a83cfeb9SOleg Nesterov do { 18255eeb50deSOleg Nesterov if (valid) 1826fec8898dSAnton Arapov handle_uretprobe_chain(ri, regs); 18272bb5e840SOleg Nesterov ri = free_ret_instance(ri); 1828878b5a6eSOleg Nesterov utask->depth--; 1829a83cfeb9SOleg Nesterov } while (ri != next); 18305eeb50deSOleg Nesterov } while (!valid); 1831fec8898dSAnton Arapov 1832fec8898dSAnton Arapov utask->return_instances = ri; 18330b5256c7SOleg Nesterov return; 1834fec8898dSAnton Arapov 18350b5256c7SOleg Nesterov sigill: 18360b5256c7SOleg Nesterov uprobe_warn(current, "handle uretprobe, sending SIGILL."); 18370b5256c7SOleg Nesterov force_sig_info(SIGILL, SEND_SIG_FORCED, current); 18380b5256c7SOleg Nesterov 1839fec8898dSAnton Arapov } 1840fec8898dSAnton Arapov 18416fe50a28SDavid A. Long bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs) 18426fe50a28SDavid A. Long { 18436fe50a28SDavid A. Long return false; 18446fe50a28SDavid A. Long } 18456fe50a28SDavid A. Long 184686dcb702SOleg Nesterov bool __weak arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, 184786dcb702SOleg Nesterov struct pt_regs *regs) 184897da8976SOleg Nesterov { 184997da8976SOleg Nesterov return true; 185097da8976SOleg Nesterov } 185197da8976SOleg Nesterov 18523a9ea052SOleg Nesterov /* 18533a9ea052SOleg Nesterov * Run handler and ask thread to singlestep. 18543a9ea052SOleg Nesterov * Ensure all non-fatal signals cannot interrupt thread while it singlesteps. 18553a9ea052SOleg Nesterov */ 18563a9ea052SOleg Nesterov static void handle_swbp(struct pt_regs *regs) 18573a9ea052SOleg Nesterov { 18583a9ea052SOleg Nesterov struct uprobe *uprobe; 18593a9ea052SOleg Nesterov unsigned long bp_vaddr; 186056bb4cf6SOleg Nesterov int uninitialized_var(is_swbp); 18613a9ea052SOleg Nesterov 18623a9ea052SOleg Nesterov bp_vaddr = uprobe_get_swbp_addr(regs); 18630b5256c7SOleg Nesterov if (bp_vaddr == get_trampoline_vaddr()) 18640b5256c7SOleg Nesterov return handle_trampoline(regs); 1865fec8898dSAnton Arapov 1866fec8898dSAnton Arapov uprobe = find_active_uprobe(bp_vaddr, &is_swbp); 18670326f5a9SSrikar Dronamraju if (!uprobe) { 186856bb4cf6SOleg Nesterov if (is_swbp > 0) { 18690326f5a9SSrikar Dronamraju /* No matching uprobe; signal SIGTRAP. */ 18700326f5a9SSrikar Dronamraju send_sig(SIGTRAP, current, 0); 187156bb4cf6SOleg Nesterov } else { 187256bb4cf6SOleg Nesterov /* 187356bb4cf6SOleg Nesterov * Either we raced with uprobe_unregister() or we can't 187456bb4cf6SOleg Nesterov * access this memory. The latter is only possible if 187556bb4cf6SOleg Nesterov * another thread plays with our ->mm. In both cases 187656bb4cf6SOleg Nesterov * we can simply restart. If this vma was unmapped we 187756bb4cf6SOleg Nesterov * can pretend this insn was not executed yet and get 187856bb4cf6SOleg Nesterov * the (correct) SIGSEGV after restart. 187956bb4cf6SOleg Nesterov */ 188056bb4cf6SOleg Nesterov instruction_pointer_set(regs, bp_vaddr); 188156bb4cf6SOleg Nesterov } 18820326f5a9SSrikar Dronamraju return; 18830326f5a9SSrikar Dronamraju } 188474e59dfcSOleg Nesterov 188574e59dfcSOleg Nesterov /* change it in advance for ->handler() and restart */ 188674e59dfcSOleg Nesterov instruction_pointer_set(regs, bp_vaddr); 188774e59dfcSOleg Nesterov 1888142b18ddSOleg Nesterov /* 1889142b18ddSOleg Nesterov * TODO: move copy_insn/etc into _register and remove this hack. 1890142b18ddSOleg Nesterov * After we hit the bp, _unregister + _register can install the 1891142b18ddSOleg Nesterov * new and not-yet-analyzed uprobe at the same address, restart. 1892142b18ddSOleg Nesterov */ 1893142b18ddSOleg Nesterov smp_rmb(); /* pairs with wmb() in install_breakpoint() */ 189471434f2fSOleg Nesterov if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags))) 189574e59dfcSOleg Nesterov goto out; 18960326f5a9SSrikar Dronamraju 189772fd293aSOleg Nesterov /* Tracing handlers use ->utask to communicate with fetch methods */ 189872fd293aSOleg Nesterov if (!get_utask()) 189972fd293aSOleg Nesterov goto out; 190072fd293aSOleg Nesterov 19016fe50a28SDavid A. Long if (arch_uprobe_ignore(&uprobe->arch, regs)) 19026fe50a28SDavid A. Long goto out; 19036fe50a28SDavid A. Long 19040326f5a9SSrikar Dronamraju handler_chain(uprobe, regs); 19056fe50a28SDavid A. Long 19068a6b1732SOleg Nesterov if (arch_uprobe_skip_sstep(&uprobe->arch, regs)) 19070578a970SOleg Nesterov goto out; 19080326f5a9SSrikar Dronamraju 1909608e7427SOleg Nesterov if (!pre_ssout(uprobe, regs, bp_vaddr)) 19100326f5a9SSrikar Dronamraju return; 19110326f5a9SSrikar Dronamraju 19128a6b1732SOleg Nesterov /* arch_uprobe_skip_sstep() succeeded, or restart if can't singlestep */ 19130578a970SOleg Nesterov out: 19140326f5a9SSrikar Dronamraju put_uprobe(uprobe); 19150326f5a9SSrikar Dronamraju } 19160326f5a9SSrikar Dronamraju 19170326f5a9SSrikar Dronamraju /* 19180326f5a9SSrikar Dronamraju * Perform required fix-ups and disable singlestep. 19190326f5a9SSrikar Dronamraju * Allow pending signals to take effect. 19200326f5a9SSrikar Dronamraju */ 19210326f5a9SSrikar Dronamraju static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs) 19220326f5a9SSrikar Dronamraju { 19230326f5a9SSrikar Dronamraju struct uprobe *uprobe; 1924014940baSOleg Nesterov int err = 0; 19250326f5a9SSrikar Dronamraju 19260326f5a9SSrikar Dronamraju uprobe = utask->active_uprobe; 19270326f5a9SSrikar Dronamraju if (utask->state == UTASK_SSTEP_ACK) 1928014940baSOleg Nesterov err = arch_uprobe_post_xol(&uprobe->arch, regs); 19290326f5a9SSrikar Dronamraju else if (utask->state == UTASK_SSTEP_TRAPPED) 19300326f5a9SSrikar Dronamraju arch_uprobe_abort_xol(&uprobe->arch, regs); 19310326f5a9SSrikar Dronamraju else 19320326f5a9SSrikar Dronamraju WARN_ON_ONCE(1); 19330326f5a9SSrikar Dronamraju 19340326f5a9SSrikar Dronamraju put_uprobe(uprobe); 19350326f5a9SSrikar Dronamraju utask->active_uprobe = NULL; 19360326f5a9SSrikar Dronamraju utask->state = UTASK_RUNNING; 1937d4b3b638SSrikar Dronamraju xol_free_insn_slot(current); 19380326f5a9SSrikar Dronamraju 19390326f5a9SSrikar Dronamraju spin_lock_irq(¤t->sighand->siglock); 19400326f5a9SSrikar Dronamraju recalc_sigpending(); /* see uprobe_deny_signal() */ 19410326f5a9SSrikar Dronamraju spin_unlock_irq(¤t->sighand->siglock); 1942014940baSOleg Nesterov 1943014940baSOleg Nesterov if (unlikely(err)) { 1944014940baSOleg Nesterov uprobe_warn(current, "execute the probed insn, sending SIGILL."); 1945014940baSOleg Nesterov force_sig_info(SIGILL, SEND_SIG_FORCED, current); 1946014940baSOleg Nesterov } 19470326f5a9SSrikar Dronamraju } 19480326f5a9SSrikar Dronamraju 19490326f5a9SSrikar Dronamraju /* 19501b08e907SOleg Nesterov * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and 19511b08e907SOleg Nesterov * allows the thread to return from interrupt. After that handle_swbp() 19521b08e907SOleg Nesterov * sets utask->active_uprobe. 19530326f5a9SSrikar Dronamraju * 19541b08e907SOleg Nesterov * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag 19551b08e907SOleg Nesterov * and allows the thread to return from interrupt. 19560326f5a9SSrikar Dronamraju * 19570326f5a9SSrikar Dronamraju * While returning to userspace, thread notices the TIF_UPROBE flag and calls 19580326f5a9SSrikar Dronamraju * uprobe_notify_resume(). 19590326f5a9SSrikar Dronamraju */ 19600326f5a9SSrikar Dronamraju void uprobe_notify_resume(struct pt_regs *regs) 19610326f5a9SSrikar Dronamraju { 19620326f5a9SSrikar Dronamraju struct uprobe_task *utask; 19630326f5a9SSrikar Dronamraju 1964db023ea5SOleg Nesterov clear_thread_flag(TIF_UPROBE); 1965db023ea5SOleg Nesterov 19660326f5a9SSrikar Dronamraju utask = current->utask; 19671b08e907SOleg Nesterov if (utask && utask->active_uprobe) 19680326f5a9SSrikar Dronamraju handle_singlestep(utask, regs); 19691b08e907SOleg Nesterov else 19701b08e907SOleg Nesterov handle_swbp(regs); 19710326f5a9SSrikar Dronamraju } 19720326f5a9SSrikar Dronamraju 19730326f5a9SSrikar Dronamraju /* 19740326f5a9SSrikar Dronamraju * uprobe_pre_sstep_notifier gets called from interrupt context as part of 19750326f5a9SSrikar Dronamraju * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit. 19760326f5a9SSrikar Dronamraju */ 19770326f5a9SSrikar Dronamraju int uprobe_pre_sstep_notifier(struct pt_regs *regs) 19780326f5a9SSrikar Dronamraju { 19790dfd0eb8SAnton Arapov if (!current->mm) 19800dfd0eb8SAnton Arapov return 0; 19810dfd0eb8SAnton Arapov 19820dfd0eb8SAnton Arapov if (!test_bit(MMF_HAS_UPROBES, ¤t->mm->flags) && 19830dfd0eb8SAnton Arapov (!current->utask || !current->utask->return_instances)) 19840326f5a9SSrikar Dronamraju return 0; 19850326f5a9SSrikar Dronamraju 19860326f5a9SSrikar Dronamraju set_thread_flag(TIF_UPROBE); 19870326f5a9SSrikar Dronamraju return 1; 19880326f5a9SSrikar Dronamraju } 19890326f5a9SSrikar Dronamraju 19900326f5a9SSrikar Dronamraju /* 19910326f5a9SSrikar Dronamraju * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier 19920326f5a9SSrikar Dronamraju * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep. 19930326f5a9SSrikar Dronamraju */ 19940326f5a9SSrikar Dronamraju int uprobe_post_sstep_notifier(struct pt_regs *regs) 19950326f5a9SSrikar Dronamraju { 19960326f5a9SSrikar Dronamraju struct uprobe_task *utask = current->utask; 19970326f5a9SSrikar Dronamraju 19980326f5a9SSrikar Dronamraju if (!current->mm || !utask || !utask->active_uprobe) 19990326f5a9SSrikar Dronamraju /* task is currently not uprobed */ 20000326f5a9SSrikar Dronamraju return 0; 20010326f5a9SSrikar Dronamraju 20020326f5a9SSrikar Dronamraju utask->state = UTASK_SSTEP_ACK; 20030326f5a9SSrikar Dronamraju set_thread_flag(TIF_UPROBE); 20040326f5a9SSrikar Dronamraju return 1; 20050326f5a9SSrikar Dronamraju } 20060326f5a9SSrikar Dronamraju 20070326f5a9SSrikar Dronamraju static struct notifier_block uprobe_exception_nb = { 20080326f5a9SSrikar Dronamraju .notifier_call = arch_uprobe_exception_notify, 20090326f5a9SSrikar Dronamraju .priority = INT_MAX-1, /* notified after kprobes, kgdb */ 20100326f5a9SSrikar Dronamraju }; 20110326f5a9SSrikar Dronamraju 2012a5f4374aSIngo Molnar static int __init init_uprobes(void) 2013a5f4374aSIngo Molnar { 2014a5f4374aSIngo Molnar int i; 2015a5f4374aSIngo Molnar 201666d06dffSOleg Nesterov for (i = 0; i < UPROBES_HASH_SZ; i++) 2017a5f4374aSIngo Molnar mutex_init(&uprobes_mmap_mutex[i]); 20180326f5a9SSrikar Dronamraju 201932cdba1eSOleg Nesterov if (percpu_init_rwsem(&dup_mmap_sem)) 202032cdba1eSOleg Nesterov return -ENOMEM; 202132cdba1eSOleg Nesterov 20220326f5a9SSrikar Dronamraju return register_die_notifier(&uprobe_exception_nb); 2023a5f4374aSIngo Molnar } 2024736e89d9SOleg Nesterov __initcall(init_uprobes); 2025