1720e596aSThomas Gleixner // SPDX-License-Identifier: GPL-2.0+ 2a5f4374aSIngo Molnar /* 3a5f4374aSIngo Molnar * User-space Probes (UProbes) 4a5f4374aSIngo Molnar * 535aa621bSIngo Molnar * Copyright (C) IBM Corporation, 2008-2012 6a5f4374aSIngo Molnar * Authors: 7a5f4374aSIngo Molnar * Srikar Dronamraju 8a5f4374aSIngo Molnar * Jim Keniston 990eec103SPeter Zijlstra * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra 10a5f4374aSIngo Molnar */ 11a5f4374aSIngo Molnar 12a5f4374aSIngo Molnar #include <linux/kernel.h> 13a5f4374aSIngo Molnar #include <linux/highmem.h> 14a5f4374aSIngo Molnar #include <linux/pagemap.h> /* read_mapping_page */ 15a5f4374aSIngo Molnar #include <linux/slab.h> 16a5f4374aSIngo Molnar #include <linux/sched.h> 176e84f315SIngo Molnar #include <linux/sched/mm.h> 18f7ccbae4SIngo Molnar #include <linux/sched/coredump.h> 19e8440c14SJosh Stone #include <linux/export.h> 20a5f4374aSIngo Molnar #include <linux/rmap.h> /* anon_vma_prepare */ 21a5f4374aSIngo Molnar #include <linux/mmu_notifier.h> /* set_pte_at_notify */ 22a5f4374aSIngo Molnar #include <linux/swap.h> /* try_to_free_swap */ 230326f5a9SSrikar Dronamraju #include <linux/ptrace.h> /* user_enable_single_step */ 240326f5a9SSrikar Dronamraju #include <linux/kdebug.h> /* notifier mechanism */ 25194f8dcbSOleg Nesterov #include "../../mm/internal.h" /* munlock_vma_page */ 2632cdba1eSOleg Nesterov #include <linux/percpu-rwsem.h> 27aa59c53fSOleg Nesterov #include <linux/task_work.h> 2840814f68SOleg Nesterov #include <linux/shmem_fs.h> 29a5f4374aSIngo Molnar 30a5f4374aSIngo Molnar #include <linux/uprobes.h> 31a5f4374aSIngo Molnar 32d4b3b638SSrikar Dronamraju #define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES) 33d4b3b638SSrikar Dronamraju #define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE 34d4b3b638SSrikar Dronamraju 35a5f4374aSIngo Molnar static struct rb_root uprobes_tree = RB_ROOT; 36441f1eb7SOleg Nesterov /* 37441f1eb7SOleg Nesterov * allows us to skip the uprobe_mmap if there are no uprobe events active 38441f1eb7SOleg Nesterov * at this time. Probably a fine grained per inode count is better? 39441f1eb7SOleg Nesterov */ 40441f1eb7SOleg Nesterov #define no_uprobe_events() RB_EMPTY_ROOT(&uprobes_tree) 41a5f4374aSIngo Molnar 42a5f4374aSIngo Molnar static DEFINE_SPINLOCK(uprobes_treelock); /* serialize rbtree access */ 43a5f4374aSIngo Molnar 44a5f4374aSIngo Molnar #define UPROBES_HASH_SZ 13 45a5f4374aSIngo Molnar /* serialize uprobe->pending_list */ 46a5f4374aSIngo Molnar static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ]; 47a5f4374aSIngo Molnar #define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ]) 48a5f4374aSIngo Molnar 4932cdba1eSOleg Nesterov static struct percpu_rw_semaphore dup_mmap_sem; 5032cdba1eSOleg Nesterov 51cb9a19feSOleg Nesterov /* Have a copy of original instruction */ 5271434f2fSOleg Nesterov #define UPROBE_COPY_INSN 0 53cb9a19feSOleg Nesterov 543ff54efdSSrikar Dronamraju struct uprobe { 553ff54efdSSrikar Dronamraju struct rb_node rb_node; /* node in the rb tree */ 56ce59b8e9SElena Reshetova refcount_t ref; 57e591c8d7SOleg Nesterov struct rw_semaphore register_rwsem; 583ff54efdSSrikar Dronamraju struct rw_semaphore consumer_rwsem; 593ff54efdSSrikar Dronamraju struct list_head pending_list; 603ff54efdSSrikar Dronamraju struct uprobe_consumer *consumers; 613ff54efdSSrikar Dronamraju struct inode *inode; /* Also hold a ref to inode */ 623ff54efdSSrikar Dronamraju loff_t offset; 631cc33161SRavi Bangoria loff_t ref_ctr_offset; 6471434f2fSOleg Nesterov unsigned long flags; 65ad439356SOleg Nesterov 66ad439356SOleg Nesterov /* 67ad439356SOleg Nesterov * The generic code assumes that it has two members of unknown type 68ad439356SOleg Nesterov * owned by the arch-specific code: 69ad439356SOleg Nesterov * 70ad439356SOleg Nesterov * insn - copy_insn() saves the original instruction here for 71ad439356SOleg Nesterov * arch_uprobe_analyze_insn(). 72ad439356SOleg Nesterov * 73ad439356SOleg Nesterov * ixol - potentially modified instruction to execute out of 74ad439356SOleg Nesterov * line, copied to xol_area by xol_get_insn_slot(). 75ad439356SOleg Nesterov */ 763ff54efdSSrikar Dronamraju struct arch_uprobe arch; 773ff54efdSSrikar Dronamraju }; 783ff54efdSSrikar Dronamraju 791cc33161SRavi Bangoria struct delayed_uprobe { 801cc33161SRavi Bangoria struct list_head list; 811cc33161SRavi Bangoria struct uprobe *uprobe; 821cc33161SRavi Bangoria struct mm_struct *mm; 831cc33161SRavi Bangoria }; 841cc33161SRavi Bangoria 851cc33161SRavi Bangoria static DEFINE_MUTEX(delayed_uprobe_lock); 861cc33161SRavi Bangoria static LIST_HEAD(delayed_uprobe_list); 871cc33161SRavi Bangoria 88a5f4374aSIngo Molnar /* 89ad439356SOleg Nesterov * Execute out of line area: anonymous executable mapping installed 90ad439356SOleg Nesterov * by the probed task to execute the copy of the original instruction 91ad439356SOleg Nesterov * mangled by set_swbp(). 92ad439356SOleg Nesterov * 93c912dae6SOleg Nesterov * On a breakpoint hit, thread contests for a slot. It frees the 94c912dae6SOleg Nesterov * slot after singlestep. Currently a fixed number of slots are 95c912dae6SOleg Nesterov * allocated. 96c912dae6SOleg Nesterov */ 97c912dae6SOleg Nesterov struct xol_area { 98c912dae6SOleg Nesterov wait_queue_head_t wq; /* if all slots are busy */ 99c912dae6SOleg Nesterov atomic_t slot_count; /* number of in-use slots */ 100c912dae6SOleg Nesterov unsigned long *bitmap; /* 0 = free slot */ 101c912dae6SOleg Nesterov 102704bde3cSOleg Nesterov struct vm_special_mapping xol_mapping; 103704bde3cSOleg Nesterov struct page *pages[2]; 104c912dae6SOleg Nesterov /* 105c912dae6SOleg Nesterov * We keep the vma's vm_start rather than a pointer to the vma 106c912dae6SOleg Nesterov * itself. The probed process or a naughty kernel module could make 107c912dae6SOleg Nesterov * the vma go away, and we must handle that reasonably gracefully. 108c912dae6SOleg Nesterov */ 109c912dae6SOleg Nesterov unsigned long vaddr; /* Page(s) of instruction slots */ 110c912dae6SOleg Nesterov }; 111c912dae6SOleg Nesterov 112c912dae6SOleg Nesterov /* 113a5f4374aSIngo Molnar * valid_vma: Verify if the specified vma is an executable vma 114a5f4374aSIngo Molnar * Relax restrictions while unregistering: vm_flags might have 115a5f4374aSIngo Molnar * changed after breakpoint was inserted. 116a5f4374aSIngo Molnar * - is_register: indicates if we are in register context. 117a5f4374aSIngo Molnar * - Return 1 if the specified virtual address is in an 118a5f4374aSIngo Molnar * executable vma. 119a5f4374aSIngo Molnar */ 120a5f4374aSIngo Molnar static bool valid_vma(struct vm_area_struct *vma, bool is_register) 121a5f4374aSIngo Molnar { 12213f59c5eSOleg Nesterov vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE; 123a5f4374aSIngo Molnar 124e40cfce6SOleg Nesterov if (is_register) 125e40cfce6SOleg Nesterov flags |= VM_WRITE; 126a5f4374aSIngo Molnar 127e40cfce6SOleg Nesterov return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC; 128a5f4374aSIngo Molnar } 129a5f4374aSIngo Molnar 13057683f72SOleg Nesterov static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset) 131a5f4374aSIngo Molnar { 13257683f72SOleg Nesterov return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 133a5f4374aSIngo Molnar } 134a5f4374aSIngo Molnar 135cb113b47SOleg Nesterov static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr) 136cb113b47SOleg Nesterov { 137cb113b47SOleg Nesterov return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start); 138cb113b47SOleg Nesterov } 139cb113b47SOleg Nesterov 140a5f4374aSIngo Molnar /** 141a5f4374aSIngo Molnar * __replace_page - replace page in vma by new page. 142a5f4374aSIngo Molnar * based on replace_page in mm/ksm.c 143a5f4374aSIngo Molnar * 144a5f4374aSIngo Molnar * @vma: vma that holds the pte pointing to page 145c517ee74SOleg Nesterov * @addr: address the old @page is mapped at 146a5f4374aSIngo Molnar * @page: the cowed page we are replacing by kpage 147a5f4374aSIngo Molnar * @kpage: the modified page we replace page by 148a5f4374aSIngo Molnar * 149a5f4374aSIngo Molnar * Returns 0 on success, -EFAULT on failure. 150a5f4374aSIngo Molnar */ 151c517ee74SOleg Nesterov static int __replace_page(struct vm_area_struct *vma, unsigned long addr, 152bdfaa2eeSOleg Nesterov struct page *old_page, struct page *new_page) 153a5f4374aSIngo Molnar { 154a5f4374aSIngo Molnar struct mm_struct *mm = vma->vm_mm; 15514fa2daaSKirill A. Shutemov struct page_vma_mapped_walk pvmw = { 15614fa2daaSKirill A. Shutemov .page = old_page, 15714fa2daaSKirill A. Shutemov .vma = vma, 15814fa2daaSKirill A. Shutemov .address = addr, 15914fa2daaSKirill A. Shutemov }; 1609f92448cSOleg Nesterov int err; 161ac46d4f3SJérôme Glisse struct mmu_notifier_range range; 16200501b53SJohannes Weiner struct mem_cgroup *memcg; 16300501b53SJohannes Weiner 1646f4f13e8SJérôme Glisse mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, addr, 1656f4f13e8SJérôme Glisse addr + PAGE_SIZE); 166ac46d4f3SJérôme Glisse 16714fa2daaSKirill A. Shutemov VM_BUG_ON_PAGE(PageTransHuge(old_page), old_page); 16814fa2daaSKirill A. Shutemov 169bdfaa2eeSOleg Nesterov err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL, &memcg, 170f627c2f5SKirill A. Shutemov false); 17100501b53SJohannes Weiner if (err) 17200501b53SJohannes Weiner return err; 173a5f4374aSIngo Molnar 174194f8dcbSOleg Nesterov /* For try_to_free_swap() and munlock_vma_page() below */ 175bdfaa2eeSOleg Nesterov lock_page(old_page); 1769f92448cSOleg Nesterov 177ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range); 1789f92448cSOleg Nesterov err = -EAGAIN; 17914fa2daaSKirill A. Shutemov if (!page_vma_mapped_walk(&pvmw)) { 180bdfaa2eeSOleg Nesterov mem_cgroup_cancel_charge(new_page, memcg, false); 1819f92448cSOleg Nesterov goto unlock; 1826c4687ccSOleg Nesterov } 18314fa2daaSKirill A. Shutemov VM_BUG_ON_PAGE(addr != pvmw.address, old_page); 184a5f4374aSIngo Molnar 185bdfaa2eeSOleg Nesterov get_page(new_page); 186bdfaa2eeSOleg Nesterov page_add_new_anon_rmap(new_page, vma, addr, false); 187bdfaa2eeSOleg Nesterov mem_cgroup_commit_charge(new_page, memcg, false, false); 188bdfaa2eeSOleg Nesterov lru_cache_add_active_or_unevictable(new_page, vma); 189a5f4374aSIngo Molnar 190bdfaa2eeSOleg Nesterov if (!PageAnon(old_page)) { 191bdfaa2eeSOleg Nesterov dec_mm_counter(mm, mm_counter_file(old_page)); 1927396fa81SSrikar Dronamraju inc_mm_counter(mm, MM_ANONPAGES); 1937396fa81SSrikar Dronamraju } 1947396fa81SSrikar Dronamraju 19514fa2daaSKirill A. Shutemov flush_cache_page(vma, addr, pte_pfn(*pvmw.pte)); 19614fa2daaSKirill A. Shutemov ptep_clear_flush_notify(vma, addr, pvmw.pte); 19714fa2daaSKirill A. Shutemov set_pte_at_notify(mm, addr, pvmw.pte, 19814fa2daaSKirill A. Shutemov mk_pte(new_page, vma->vm_page_prot)); 199a5f4374aSIngo Molnar 200bdfaa2eeSOleg Nesterov page_remove_rmap(old_page, false); 201bdfaa2eeSOleg Nesterov if (!page_mapped(old_page)) 202bdfaa2eeSOleg Nesterov try_to_free_swap(old_page); 20314fa2daaSKirill A. Shutemov page_vma_mapped_walk_done(&pvmw); 204a5f4374aSIngo Molnar 205194f8dcbSOleg Nesterov if (vma->vm_flags & VM_LOCKED) 206bdfaa2eeSOleg Nesterov munlock_vma_page(old_page); 207bdfaa2eeSOleg Nesterov put_page(old_page); 208194f8dcbSOleg Nesterov 2099f92448cSOleg Nesterov err = 0; 2109f92448cSOleg Nesterov unlock: 211ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_end(&range); 212bdfaa2eeSOleg Nesterov unlock_page(old_page); 2139f92448cSOleg Nesterov return err; 214a5f4374aSIngo Molnar } 215a5f4374aSIngo Molnar 216a5f4374aSIngo Molnar /** 2175cb4ac3aSSrikar Dronamraju * is_swbp_insn - check if instruction is breakpoint instruction. 218a5f4374aSIngo Molnar * @insn: instruction to be checked. 2195cb4ac3aSSrikar Dronamraju * Default implementation of is_swbp_insn 220a5f4374aSIngo Molnar * Returns true if @insn is a breakpoint instruction. 221a5f4374aSIngo Molnar */ 2225cb4ac3aSSrikar Dronamraju bool __weak is_swbp_insn(uprobe_opcode_t *insn) 223a5f4374aSIngo Molnar { 2245cb4ac3aSSrikar Dronamraju return *insn == UPROBE_SWBP_INSN; 225a5f4374aSIngo Molnar } 226a5f4374aSIngo Molnar 2270908ad6eSAnanth N Mavinakayanahalli /** 2280908ad6eSAnanth N Mavinakayanahalli * is_trap_insn - check if instruction is breakpoint instruction. 2290908ad6eSAnanth N Mavinakayanahalli * @insn: instruction to be checked. 2300908ad6eSAnanth N Mavinakayanahalli * Default implementation of is_trap_insn 2310908ad6eSAnanth N Mavinakayanahalli * Returns true if @insn is a breakpoint instruction. 2320908ad6eSAnanth N Mavinakayanahalli * 2330908ad6eSAnanth N Mavinakayanahalli * This function is needed for the case where an architecture has multiple 2340908ad6eSAnanth N Mavinakayanahalli * trap instructions (like powerpc). 2350908ad6eSAnanth N Mavinakayanahalli */ 2360908ad6eSAnanth N Mavinakayanahalli bool __weak is_trap_insn(uprobe_opcode_t *insn) 2370908ad6eSAnanth N Mavinakayanahalli { 2380908ad6eSAnanth N Mavinakayanahalli return is_swbp_insn(insn); 2390908ad6eSAnanth N Mavinakayanahalli } 2400908ad6eSAnanth N Mavinakayanahalli 241ab0d805cSOleg Nesterov static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len) 242cceb55aaSOleg Nesterov { 243cceb55aaSOleg Nesterov void *kaddr = kmap_atomic(page); 244ab0d805cSOleg Nesterov memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len); 245cceb55aaSOleg Nesterov kunmap_atomic(kaddr); 246cceb55aaSOleg Nesterov } 247cceb55aaSOleg Nesterov 2485669cceeSOleg Nesterov static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len) 2495669cceeSOleg Nesterov { 2505669cceeSOleg Nesterov void *kaddr = kmap_atomic(page); 2515669cceeSOleg Nesterov memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len); 2525669cceeSOleg Nesterov kunmap_atomic(kaddr); 2535669cceeSOleg Nesterov } 2545669cceeSOleg Nesterov 255ed6f6a50SOleg Nesterov static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode) 256ed6f6a50SOleg Nesterov { 257ed6f6a50SOleg Nesterov uprobe_opcode_t old_opcode; 258ed6f6a50SOleg Nesterov bool is_swbp; 259ed6f6a50SOleg Nesterov 2600908ad6eSAnanth N Mavinakayanahalli /* 2610908ad6eSAnanth N Mavinakayanahalli * Note: We only check if the old_opcode is UPROBE_SWBP_INSN here. 2620908ad6eSAnanth N Mavinakayanahalli * We do not check if it is any other 'trap variant' which could 2630908ad6eSAnanth N Mavinakayanahalli * be conditional trap instruction such as the one powerpc supports. 2640908ad6eSAnanth N Mavinakayanahalli * 2650908ad6eSAnanth N Mavinakayanahalli * The logic is that we do not care if the underlying instruction 2660908ad6eSAnanth N Mavinakayanahalli * is a trap variant; uprobes always wins over any other (gdb) 2670908ad6eSAnanth N Mavinakayanahalli * breakpoint. 2680908ad6eSAnanth N Mavinakayanahalli */ 269ab0d805cSOleg Nesterov copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE); 270ed6f6a50SOleg Nesterov is_swbp = is_swbp_insn(&old_opcode); 271ed6f6a50SOleg Nesterov 272ed6f6a50SOleg Nesterov if (is_swbp_insn(new_opcode)) { 273ed6f6a50SOleg Nesterov if (is_swbp) /* register: already installed? */ 274ed6f6a50SOleg Nesterov return 0; 275ed6f6a50SOleg Nesterov } else { 276ed6f6a50SOleg Nesterov if (!is_swbp) /* unregister: was it changed by us? */ 277076a365bSOleg Nesterov return 0; 278ed6f6a50SOleg Nesterov } 279ed6f6a50SOleg Nesterov 280ed6f6a50SOleg Nesterov return 1; 281ed6f6a50SOleg Nesterov } 282ed6f6a50SOleg Nesterov 2831cc33161SRavi Bangoria static struct delayed_uprobe * 2841cc33161SRavi Bangoria delayed_uprobe_check(struct uprobe *uprobe, struct mm_struct *mm) 2851cc33161SRavi Bangoria { 2861cc33161SRavi Bangoria struct delayed_uprobe *du; 2871cc33161SRavi Bangoria 2881cc33161SRavi Bangoria list_for_each_entry(du, &delayed_uprobe_list, list) 2891cc33161SRavi Bangoria if (du->uprobe == uprobe && du->mm == mm) 2901cc33161SRavi Bangoria return du; 2911cc33161SRavi Bangoria return NULL; 2921cc33161SRavi Bangoria } 2931cc33161SRavi Bangoria 2941cc33161SRavi Bangoria static int delayed_uprobe_add(struct uprobe *uprobe, struct mm_struct *mm) 2951cc33161SRavi Bangoria { 2961cc33161SRavi Bangoria struct delayed_uprobe *du; 2971cc33161SRavi Bangoria 2981cc33161SRavi Bangoria if (delayed_uprobe_check(uprobe, mm)) 2991cc33161SRavi Bangoria return 0; 3001cc33161SRavi Bangoria 3011cc33161SRavi Bangoria du = kzalloc(sizeof(*du), GFP_KERNEL); 3021cc33161SRavi Bangoria if (!du) 3031cc33161SRavi Bangoria return -ENOMEM; 3041cc33161SRavi Bangoria 3051cc33161SRavi Bangoria du->uprobe = uprobe; 3061cc33161SRavi Bangoria du->mm = mm; 3071cc33161SRavi Bangoria list_add(&du->list, &delayed_uprobe_list); 3081cc33161SRavi Bangoria return 0; 3091cc33161SRavi Bangoria } 3101cc33161SRavi Bangoria 3111cc33161SRavi Bangoria static void delayed_uprobe_delete(struct delayed_uprobe *du) 3121cc33161SRavi Bangoria { 3131cc33161SRavi Bangoria if (WARN_ON(!du)) 3141cc33161SRavi Bangoria return; 3151cc33161SRavi Bangoria list_del(&du->list); 3161cc33161SRavi Bangoria kfree(du); 3171cc33161SRavi Bangoria } 3181cc33161SRavi Bangoria 3191cc33161SRavi Bangoria static void delayed_uprobe_remove(struct uprobe *uprobe, struct mm_struct *mm) 3201cc33161SRavi Bangoria { 3211cc33161SRavi Bangoria struct list_head *pos, *q; 3221cc33161SRavi Bangoria struct delayed_uprobe *du; 3231cc33161SRavi Bangoria 3241cc33161SRavi Bangoria if (!uprobe && !mm) 3251cc33161SRavi Bangoria return; 3261cc33161SRavi Bangoria 3271cc33161SRavi Bangoria list_for_each_safe(pos, q, &delayed_uprobe_list) { 3281cc33161SRavi Bangoria du = list_entry(pos, struct delayed_uprobe, list); 3291cc33161SRavi Bangoria 3301cc33161SRavi Bangoria if (uprobe && du->uprobe != uprobe) 3311cc33161SRavi Bangoria continue; 3321cc33161SRavi Bangoria if (mm && du->mm != mm) 3331cc33161SRavi Bangoria continue; 3341cc33161SRavi Bangoria 3351cc33161SRavi Bangoria delayed_uprobe_delete(du); 3361cc33161SRavi Bangoria } 3371cc33161SRavi Bangoria } 3381cc33161SRavi Bangoria 3391cc33161SRavi Bangoria static bool valid_ref_ctr_vma(struct uprobe *uprobe, 3401cc33161SRavi Bangoria struct vm_area_struct *vma) 3411cc33161SRavi Bangoria { 3421cc33161SRavi Bangoria unsigned long vaddr = offset_to_vaddr(vma, uprobe->ref_ctr_offset); 3431cc33161SRavi Bangoria 3441cc33161SRavi Bangoria return uprobe->ref_ctr_offset && 3451cc33161SRavi Bangoria vma->vm_file && 3461cc33161SRavi Bangoria file_inode(vma->vm_file) == uprobe->inode && 3471cc33161SRavi Bangoria (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE && 3481cc33161SRavi Bangoria vma->vm_start <= vaddr && 3491cc33161SRavi Bangoria vma->vm_end > vaddr; 3501cc33161SRavi Bangoria } 3511cc33161SRavi Bangoria 3521cc33161SRavi Bangoria static struct vm_area_struct * 3531cc33161SRavi Bangoria find_ref_ctr_vma(struct uprobe *uprobe, struct mm_struct *mm) 3541cc33161SRavi Bangoria { 3551cc33161SRavi Bangoria struct vm_area_struct *tmp; 3561cc33161SRavi Bangoria 3571cc33161SRavi Bangoria for (tmp = mm->mmap; tmp; tmp = tmp->vm_next) 3581cc33161SRavi Bangoria if (valid_ref_ctr_vma(uprobe, tmp)) 3591cc33161SRavi Bangoria return tmp; 3601cc33161SRavi Bangoria 3611cc33161SRavi Bangoria return NULL; 3621cc33161SRavi Bangoria } 3631cc33161SRavi Bangoria 3641cc33161SRavi Bangoria static int 3651cc33161SRavi Bangoria __update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d) 3661cc33161SRavi Bangoria { 3671cc33161SRavi Bangoria void *kaddr; 3681cc33161SRavi Bangoria struct page *page; 3691cc33161SRavi Bangoria struct vm_area_struct *vma; 3701cc33161SRavi Bangoria int ret; 3711cc33161SRavi Bangoria short *ptr; 3721cc33161SRavi Bangoria 3731cc33161SRavi Bangoria if (!vaddr || !d) 3741cc33161SRavi Bangoria return -EINVAL; 3751cc33161SRavi Bangoria 3761cc33161SRavi Bangoria ret = get_user_pages_remote(NULL, mm, vaddr, 1, 3771cc33161SRavi Bangoria FOLL_WRITE, &page, &vma, NULL); 3781cc33161SRavi Bangoria if (unlikely(ret <= 0)) { 3791cc33161SRavi Bangoria /* 3801cc33161SRavi Bangoria * We are asking for 1 page. If get_user_pages_remote() fails, 3811cc33161SRavi Bangoria * it may return 0, in that case we have to return error. 3821cc33161SRavi Bangoria */ 3831cc33161SRavi Bangoria return ret == 0 ? -EBUSY : ret; 3841cc33161SRavi Bangoria } 3851cc33161SRavi Bangoria 3861cc33161SRavi Bangoria kaddr = kmap_atomic(page); 3871cc33161SRavi Bangoria ptr = kaddr + (vaddr & ~PAGE_MASK); 3881cc33161SRavi Bangoria 3891cc33161SRavi Bangoria if (unlikely(*ptr + d < 0)) { 3901cc33161SRavi Bangoria pr_warn("ref_ctr going negative. vaddr: 0x%lx, " 3911cc33161SRavi Bangoria "curr val: %d, delta: %d\n", vaddr, *ptr, d); 3921cc33161SRavi Bangoria ret = -EINVAL; 3931cc33161SRavi Bangoria goto out; 3941cc33161SRavi Bangoria } 3951cc33161SRavi Bangoria 3961cc33161SRavi Bangoria *ptr += d; 3971cc33161SRavi Bangoria ret = 0; 3981cc33161SRavi Bangoria out: 3991cc33161SRavi Bangoria kunmap_atomic(kaddr); 4001cc33161SRavi Bangoria put_page(page); 4011cc33161SRavi Bangoria return ret; 4021cc33161SRavi Bangoria } 4031cc33161SRavi Bangoria 4041cc33161SRavi Bangoria static void update_ref_ctr_warn(struct uprobe *uprobe, 4051cc33161SRavi Bangoria struct mm_struct *mm, short d) 4061cc33161SRavi Bangoria { 4071cc33161SRavi Bangoria pr_warn("ref_ctr %s failed for inode: 0x%lx offset: " 4081cc33161SRavi Bangoria "0x%llx ref_ctr_offset: 0x%llx of mm: 0x%pK\n", 4091cc33161SRavi Bangoria d > 0 ? "increment" : "decrement", uprobe->inode->i_ino, 4101cc33161SRavi Bangoria (unsigned long long) uprobe->offset, 4111cc33161SRavi Bangoria (unsigned long long) uprobe->ref_ctr_offset, mm); 4121cc33161SRavi Bangoria } 4131cc33161SRavi Bangoria 4141cc33161SRavi Bangoria static int update_ref_ctr(struct uprobe *uprobe, struct mm_struct *mm, 4151cc33161SRavi Bangoria short d) 4161cc33161SRavi Bangoria { 4171cc33161SRavi Bangoria struct vm_area_struct *rc_vma; 4181cc33161SRavi Bangoria unsigned long rc_vaddr; 4191cc33161SRavi Bangoria int ret = 0; 4201cc33161SRavi Bangoria 4211cc33161SRavi Bangoria rc_vma = find_ref_ctr_vma(uprobe, mm); 4221cc33161SRavi Bangoria 4231cc33161SRavi Bangoria if (rc_vma) { 4241cc33161SRavi Bangoria rc_vaddr = offset_to_vaddr(rc_vma, uprobe->ref_ctr_offset); 4251cc33161SRavi Bangoria ret = __update_ref_ctr(mm, rc_vaddr, d); 4261cc33161SRavi Bangoria if (ret) 4271cc33161SRavi Bangoria update_ref_ctr_warn(uprobe, mm, d); 4281cc33161SRavi Bangoria 4291cc33161SRavi Bangoria if (d > 0) 4301cc33161SRavi Bangoria return ret; 4311cc33161SRavi Bangoria } 4321cc33161SRavi Bangoria 4331cc33161SRavi Bangoria mutex_lock(&delayed_uprobe_lock); 4341cc33161SRavi Bangoria if (d > 0) 4351cc33161SRavi Bangoria ret = delayed_uprobe_add(uprobe, mm); 4361cc33161SRavi Bangoria else 4371cc33161SRavi Bangoria delayed_uprobe_remove(uprobe, mm); 4381cc33161SRavi Bangoria mutex_unlock(&delayed_uprobe_lock); 4391cc33161SRavi Bangoria 4401cc33161SRavi Bangoria return ret; 4411cc33161SRavi Bangoria } 4421cc33161SRavi Bangoria 443a5f4374aSIngo Molnar /* 444a5f4374aSIngo Molnar * NOTE: 445a5f4374aSIngo Molnar * Expect the breakpoint instruction to be the smallest size instruction for 446a5f4374aSIngo Molnar * the architecture. If an arch has variable length instruction and the 447a5f4374aSIngo Molnar * breakpoint instruction is not of the smallest length instruction 4480908ad6eSAnanth N Mavinakayanahalli * supported by that architecture then we need to modify is_trap_at_addr and 449f72d41faSOleg Nesterov * uprobe_write_opcode accordingly. This would never be a problem for archs 450f72d41faSOleg Nesterov * that have fixed length instructions. 45129dedee0SOleg Nesterov * 452f72d41faSOleg Nesterov * uprobe_write_opcode - write the opcode at a given virtual address. 453a5f4374aSIngo Molnar * @mm: the probed process address space. 454a5f4374aSIngo Molnar * @vaddr: the virtual address to store the opcode. 455a5f4374aSIngo Molnar * @opcode: opcode to be written at @vaddr. 456a5f4374aSIngo Molnar * 45729dedee0SOleg Nesterov * Called with mm->mmap_sem held for write. 458a5f4374aSIngo Molnar * Return 0 (success) or a negative errno. 459a5f4374aSIngo Molnar */ 4606d43743eSRavi Bangoria int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, 4616d43743eSRavi Bangoria unsigned long vaddr, uprobe_opcode_t opcode) 462a5f4374aSIngo Molnar { 4631cc33161SRavi Bangoria struct uprobe *uprobe; 464a5f4374aSIngo Molnar struct page *old_page, *new_page; 465a5f4374aSIngo Molnar struct vm_area_struct *vma; 4661cc33161SRavi Bangoria int ret, is_register, ref_ctr_updated = 0; 4671cc33161SRavi Bangoria 4681cc33161SRavi Bangoria is_register = is_swbp_insn(&opcode); 4691cc33161SRavi Bangoria uprobe = container_of(auprobe, struct uprobe, arch); 470f403072cSOleg Nesterov 4715323ce71SOleg Nesterov retry: 472a5f4374aSIngo Molnar /* Read the page with vaddr into memory */ 473c8394812SKirill A. Shutemov ret = get_user_pages_remote(NULL, mm, vaddr, 1, 474c8394812SKirill A. Shutemov FOLL_FORCE | FOLL_SPLIT, &old_page, &vma, NULL); 475a5f4374aSIngo Molnar if (ret <= 0) 476a5f4374aSIngo Molnar return ret; 477a5f4374aSIngo Molnar 478ed6f6a50SOleg Nesterov ret = verify_opcode(old_page, vaddr, &opcode); 479ed6f6a50SOleg Nesterov if (ret <= 0) 480ed6f6a50SOleg Nesterov goto put_old; 481ed6f6a50SOleg Nesterov 4821cc33161SRavi Bangoria /* We are going to replace instruction, update ref_ctr. */ 4831cc33161SRavi Bangoria if (!ref_ctr_updated && uprobe->ref_ctr_offset) { 4841cc33161SRavi Bangoria ret = update_ref_ctr(uprobe, mm, is_register ? 1 : -1); 4851cc33161SRavi Bangoria if (ret) 4861cc33161SRavi Bangoria goto put_old; 4871cc33161SRavi Bangoria 4881cc33161SRavi Bangoria ref_ctr_updated = 1; 4891cc33161SRavi Bangoria } 4901cc33161SRavi Bangoria 49129dedee0SOleg Nesterov ret = anon_vma_prepare(vma); 49229dedee0SOleg Nesterov if (ret) 49329dedee0SOleg Nesterov goto put_old; 49429dedee0SOleg Nesterov 495a5f4374aSIngo Molnar ret = -ENOMEM; 496a5f4374aSIngo Molnar new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); 497a5f4374aSIngo Molnar if (!new_page) 4989f92448cSOleg Nesterov goto put_old; 499a5f4374aSIngo Molnar 50029dedee0SOleg Nesterov __SetPageUptodate(new_page); 5013f47107cSOleg Nesterov copy_highpage(new_page, old_page); 5023f47107cSOleg Nesterov copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE); 503a5f4374aSIngo Molnar 504c517ee74SOleg Nesterov ret = __replace_page(vma, vaddr, old_page, new_page); 50509cbfeafSKirill A. Shutemov put_page(new_page); 5069f92448cSOleg Nesterov put_old: 507a5f4374aSIngo Molnar put_page(old_page); 508a5f4374aSIngo Molnar 5095323ce71SOleg Nesterov if (unlikely(ret == -EAGAIN)) 5105323ce71SOleg Nesterov goto retry; 5111cc33161SRavi Bangoria 5121cc33161SRavi Bangoria /* Revert back reference counter if instruction update failed. */ 5131cc33161SRavi Bangoria if (ret && is_register && ref_ctr_updated) 5141cc33161SRavi Bangoria update_ref_ctr(uprobe, mm, -1); 5151cc33161SRavi Bangoria 516a5f4374aSIngo Molnar return ret; 517a5f4374aSIngo Molnar } 518a5f4374aSIngo Molnar 519a5f4374aSIngo Molnar /** 5205cb4ac3aSSrikar Dronamraju * set_swbp - store breakpoint at a given address. 521e3343e6aSSrikar Dronamraju * @auprobe: arch specific probepoint information. 522a5f4374aSIngo Molnar * @mm: the probed process address space. 523a5f4374aSIngo Molnar * @vaddr: the virtual address to insert the opcode. 524a5f4374aSIngo Molnar * 525a5f4374aSIngo Molnar * For mm @mm, store the breakpoint instruction at @vaddr. 526a5f4374aSIngo Molnar * Return 0 (success) or a negative errno. 527a5f4374aSIngo Molnar */ 5285cb4ac3aSSrikar Dronamraju int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) 529a5f4374aSIngo Molnar { 5306d43743eSRavi Bangoria return uprobe_write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN); 531a5f4374aSIngo Molnar } 532a5f4374aSIngo Molnar 533a5f4374aSIngo Molnar /** 534a5f4374aSIngo Molnar * set_orig_insn - Restore the original instruction. 535a5f4374aSIngo Molnar * @mm: the probed process address space. 536e3343e6aSSrikar Dronamraju * @auprobe: arch specific probepoint information. 537a5f4374aSIngo Molnar * @vaddr: the virtual address to insert the opcode. 538a5f4374aSIngo Molnar * 539a5f4374aSIngo Molnar * For mm @mm, restore the original opcode (opcode) at @vaddr. 540a5f4374aSIngo Molnar * Return 0 (success) or a negative errno. 541a5f4374aSIngo Molnar */ 542a5f4374aSIngo Molnar int __weak 543ded86e7cSOleg Nesterov set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) 544a5f4374aSIngo Molnar { 5456d43743eSRavi Bangoria return uprobe_write_opcode(auprobe, mm, vaddr, 5466d43743eSRavi Bangoria *(uprobe_opcode_t *)&auprobe->insn); 547a5f4374aSIngo Molnar } 548a5f4374aSIngo Molnar 549f231722aSOleg Nesterov static struct uprobe *get_uprobe(struct uprobe *uprobe) 550f231722aSOleg Nesterov { 551ce59b8e9SElena Reshetova refcount_inc(&uprobe->ref); 552f231722aSOleg Nesterov return uprobe; 553f231722aSOleg Nesterov } 554f231722aSOleg Nesterov 555f231722aSOleg Nesterov static void put_uprobe(struct uprobe *uprobe) 556f231722aSOleg Nesterov { 557ce59b8e9SElena Reshetova if (refcount_dec_and_test(&uprobe->ref)) { 5581cc33161SRavi Bangoria /* 5591cc33161SRavi Bangoria * If application munmap(exec_vma) before uprobe_unregister() 5601cc33161SRavi Bangoria * gets called, we don't get a chance to remove uprobe from 5611cc33161SRavi Bangoria * delayed_uprobe_list from remove_breakpoint(). Do it here. 5621cc33161SRavi Bangoria */ 5631aed58e6SRavi Bangoria mutex_lock(&delayed_uprobe_lock); 5641cc33161SRavi Bangoria delayed_uprobe_remove(uprobe, NULL); 5651aed58e6SRavi Bangoria mutex_unlock(&delayed_uprobe_lock); 566f231722aSOleg Nesterov kfree(uprobe); 567f231722aSOleg Nesterov } 5681cc33161SRavi Bangoria } 569f231722aSOleg Nesterov 570a5f4374aSIngo Molnar static int match_uprobe(struct uprobe *l, struct uprobe *r) 571a5f4374aSIngo Molnar { 572a5f4374aSIngo Molnar if (l->inode < r->inode) 573a5f4374aSIngo Molnar return -1; 574a5f4374aSIngo Molnar 575a5f4374aSIngo Molnar if (l->inode > r->inode) 576a5f4374aSIngo Molnar return 1; 577a5f4374aSIngo Molnar 578a5f4374aSIngo Molnar if (l->offset < r->offset) 579a5f4374aSIngo Molnar return -1; 580a5f4374aSIngo Molnar 581a5f4374aSIngo Molnar if (l->offset > r->offset) 582a5f4374aSIngo Molnar return 1; 583a5f4374aSIngo Molnar 584a5f4374aSIngo Molnar return 0; 585a5f4374aSIngo Molnar } 586a5f4374aSIngo Molnar 587a5f4374aSIngo Molnar static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset) 588a5f4374aSIngo Molnar { 589a5f4374aSIngo Molnar struct uprobe u = { .inode = inode, .offset = offset }; 590a5f4374aSIngo Molnar struct rb_node *n = uprobes_tree.rb_node; 591a5f4374aSIngo Molnar struct uprobe *uprobe; 592a5f4374aSIngo Molnar int match; 593a5f4374aSIngo Molnar 594a5f4374aSIngo Molnar while (n) { 595a5f4374aSIngo Molnar uprobe = rb_entry(n, struct uprobe, rb_node); 596a5f4374aSIngo Molnar match = match_uprobe(&u, uprobe); 597f231722aSOleg Nesterov if (!match) 598f231722aSOleg Nesterov return get_uprobe(uprobe); 599a5f4374aSIngo Molnar 600a5f4374aSIngo Molnar if (match < 0) 601a5f4374aSIngo Molnar n = n->rb_left; 602a5f4374aSIngo Molnar else 603a5f4374aSIngo Molnar n = n->rb_right; 604a5f4374aSIngo Molnar } 605a5f4374aSIngo Molnar return NULL; 606a5f4374aSIngo Molnar } 607a5f4374aSIngo Molnar 608a5f4374aSIngo Molnar /* 609a5f4374aSIngo Molnar * Find a uprobe corresponding to a given inode:offset 610a5f4374aSIngo Molnar * Acquires uprobes_treelock 611a5f4374aSIngo Molnar */ 612a5f4374aSIngo Molnar static struct uprobe *find_uprobe(struct inode *inode, loff_t offset) 613a5f4374aSIngo Molnar { 614a5f4374aSIngo Molnar struct uprobe *uprobe; 615a5f4374aSIngo Molnar 6166f47caa0SOleg Nesterov spin_lock(&uprobes_treelock); 617a5f4374aSIngo Molnar uprobe = __find_uprobe(inode, offset); 6186f47caa0SOleg Nesterov spin_unlock(&uprobes_treelock); 619a5f4374aSIngo Molnar 620a5f4374aSIngo Molnar return uprobe; 621a5f4374aSIngo Molnar } 622a5f4374aSIngo Molnar 623a5f4374aSIngo Molnar static struct uprobe *__insert_uprobe(struct uprobe *uprobe) 624a5f4374aSIngo Molnar { 625a5f4374aSIngo Molnar struct rb_node **p = &uprobes_tree.rb_node; 626a5f4374aSIngo Molnar struct rb_node *parent = NULL; 627a5f4374aSIngo Molnar struct uprobe *u; 628a5f4374aSIngo Molnar int match; 629a5f4374aSIngo Molnar 630a5f4374aSIngo Molnar while (*p) { 631a5f4374aSIngo Molnar parent = *p; 632a5f4374aSIngo Molnar u = rb_entry(parent, struct uprobe, rb_node); 633a5f4374aSIngo Molnar match = match_uprobe(uprobe, u); 634f231722aSOleg Nesterov if (!match) 635f231722aSOleg Nesterov return get_uprobe(u); 636a5f4374aSIngo Molnar 637a5f4374aSIngo Molnar if (match < 0) 638a5f4374aSIngo Molnar p = &parent->rb_left; 639a5f4374aSIngo Molnar else 640a5f4374aSIngo Molnar p = &parent->rb_right; 641a5f4374aSIngo Molnar 642a5f4374aSIngo Molnar } 643a5f4374aSIngo Molnar 644a5f4374aSIngo Molnar u = NULL; 645a5f4374aSIngo Molnar rb_link_node(&uprobe->rb_node, parent, p); 646a5f4374aSIngo Molnar rb_insert_color(&uprobe->rb_node, &uprobes_tree); 647a5f4374aSIngo Molnar /* get access + creation ref */ 648ce59b8e9SElena Reshetova refcount_set(&uprobe->ref, 2); 649a5f4374aSIngo Molnar 650a5f4374aSIngo Molnar return u; 651a5f4374aSIngo Molnar } 652a5f4374aSIngo Molnar 653a5f4374aSIngo Molnar /* 654a5f4374aSIngo Molnar * Acquire uprobes_treelock. 655a5f4374aSIngo Molnar * Matching uprobe already exists in rbtree; 656a5f4374aSIngo Molnar * increment (access refcount) and return the matching uprobe. 657a5f4374aSIngo Molnar * 658a5f4374aSIngo Molnar * No matching uprobe; insert the uprobe in rb_tree; 659a5f4374aSIngo Molnar * get a double refcount (access + creation) and return NULL. 660a5f4374aSIngo Molnar */ 661a5f4374aSIngo Molnar static struct uprobe *insert_uprobe(struct uprobe *uprobe) 662a5f4374aSIngo Molnar { 663a5f4374aSIngo Molnar struct uprobe *u; 664a5f4374aSIngo Molnar 6656f47caa0SOleg Nesterov spin_lock(&uprobes_treelock); 666a5f4374aSIngo Molnar u = __insert_uprobe(uprobe); 6676f47caa0SOleg Nesterov spin_unlock(&uprobes_treelock); 668a5f4374aSIngo Molnar 669a5f4374aSIngo Molnar return u; 670a5f4374aSIngo Molnar } 671a5f4374aSIngo Molnar 67222bad382SRavi Bangoria static void 67322bad382SRavi Bangoria ref_ctr_mismatch_warn(struct uprobe *cur_uprobe, struct uprobe *uprobe) 67422bad382SRavi Bangoria { 67522bad382SRavi Bangoria pr_warn("ref_ctr_offset mismatch. inode: 0x%lx offset: 0x%llx " 67622bad382SRavi Bangoria "ref_ctr_offset(old): 0x%llx ref_ctr_offset(new): 0x%llx\n", 67722bad382SRavi Bangoria uprobe->inode->i_ino, (unsigned long long) uprobe->offset, 67822bad382SRavi Bangoria (unsigned long long) cur_uprobe->ref_ctr_offset, 67922bad382SRavi Bangoria (unsigned long long) uprobe->ref_ctr_offset); 68022bad382SRavi Bangoria } 68122bad382SRavi Bangoria 6821cc33161SRavi Bangoria static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset, 6831cc33161SRavi Bangoria loff_t ref_ctr_offset) 684a5f4374aSIngo Molnar { 685a5f4374aSIngo Molnar struct uprobe *uprobe, *cur_uprobe; 686a5f4374aSIngo Molnar 687a5f4374aSIngo Molnar uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL); 688a5f4374aSIngo Molnar if (!uprobe) 689a5f4374aSIngo Molnar return NULL; 690a5f4374aSIngo Molnar 69161f94203SSong Liu uprobe->inode = inode; 692a5f4374aSIngo Molnar uprobe->offset = offset; 6931cc33161SRavi Bangoria uprobe->ref_ctr_offset = ref_ctr_offset; 694e591c8d7SOleg Nesterov init_rwsem(&uprobe->register_rwsem); 695a5f4374aSIngo Molnar init_rwsem(&uprobe->consumer_rwsem); 696a5f4374aSIngo Molnar 697a5f4374aSIngo Molnar /* add to uprobes_tree, sorted on inode:offset */ 698a5f4374aSIngo Molnar cur_uprobe = insert_uprobe(uprobe); 699a5f4374aSIngo Molnar /* a uprobe exists for this inode:offset combination */ 700a5f4374aSIngo Molnar if (cur_uprobe) { 70122bad382SRavi Bangoria if (cur_uprobe->ref_ctr_offset != uprobe->ref_ctr_offset) { 70222bad382SRavi Bangoria ref_ctr_mismatch_warn(cur_uprobe, uprobe); 70322bad382SRavi Bangoria put_uprobe(cur_uprobe); 70422bad382SRavi Bangoria kfree(uprobe); 70522bad382SRavi Bangoria return ERR_PTR(-EINVAL); 70622bad382SRavi Bangoria } 707a5f4374aSIngo Molnar kfree(uprobe); 708a5f4374aSIngo Molnar uprobe = cur_uprobe; 709a5f4374aSIngo Molnar } 710a5f4374aSIngo Molnar 711a5f4374aSIngo Molnar return uprobe; 712a5f4374aSIngo Molnar } 713a5f4374aSIngo Molnar 7149a98e03cSOleg Nesterov static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc) 715a5f4374aSIngo Molnar { 716a5f4374aSIngo Molnar down_write(&uprobe->consumer_rwsem); 717e3343e6aSSrikar Dronamraju uc->next = uprobe->consumers; 718e3343e6aSSrikar Dronamraju uprobe->consumers = uc; 719a5f4374aSIngo Molnar up_write(&uprobe->consumer_rwsem); 720a5f4374aSIngo Molnar } 721a5f4374aSIngo Molnar 722a5f4374aSIngo Molnar /* 723e3343e6aSSrikar Dronamraju * For uprobe @uprobe, delete the consumer @uc. 724e3343e6aSSrikar Dronamraju * Return true if the @uc is deleted successfully 725a5f4374aSIngo Molnar * or return false. 726a5f4374aSIngo Molnar */ 727e3343e6aSSrikar Dronamraju static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc) 728a5f4374aSIngo Molnar { 729a5f4374aSIngo Molnar struct uprobe_consumer **con; 730a5f4374aSIngo Molnar bool ret = false; 731a5f4374aSIngo Molnar 732a5f4374aSIngo Molnar down_write(&uprobe->consumer_rwsem); 733a5f4374aSIngo Molnar for (con = &uprobe->consumers; *con; con = &(*con)->next) { 734e3343e6aSSrikar Dronamraju if (*con == uc) { 735e3343e6aSSrikar Dronamraju *con = uc->next; 736a5f4374aSIngo Molnar ret = true; 737a5f4374aSIngo Molnar break; 738a5f4374aSIngo Molnar } 739a5f4374aSIngo Molnar } 740a5f4374aSIngo Molnar up_write(&uprobe->consumer_rwsem); 741a5f4374aSIngo Molnar 742a5f4374aSIngo Molnar return ret; 743a5f4374aSIngo Molnar } 744a5f4374aSIngo Molnar 7452ded0980SOleg Nesterov static int __copy_insn(struct address_space *mapping, struct file *filp, 7462ded0980SOleg Nesterov void *insn, int nbytes, loff_t offset) 747a5f4374aSIngo Molnar { 748a5f4374aSIngo Molnar struct page *page; 749a5f4374aSIngo Molnar /* 75040814f68SOleg Nesterov * Ensure that the page that has the original instruction is populated 75140814f68SOleg Nesterov * and in page-cache. If ->readpage == NULL it must be shmem_mapping(), 75240814f68SOleg Nesterov * see uprobe_register(). 753a5f4374aSIngo Molnar */ 75440814f68SOleg Nesterov if (mapping->a_ops->readpage) 75509cbfeafSKirill A. Shutemov page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp); 75640814f68SOleg Nesterov else 75709cbfeafSKirill A. Shutemov page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); 758a5f4374aSIngo Molnar if (IS_ERR(page)) 759a5f4374aSIngo Molnar return PTR_ERR(page); 760a5f4374aSIngo Molnar 7612edb7b55SOleg Nesterov copy_from_page(page, offset, insn, nbytes); 76209cbfeafSKirill A. Shutemov put_page(page); 763a5f4374aSIngo Molnar 764a5f4374aSIngo Molnar return 0; 765a5f4374aSIngo Molnar } 766a5f4374aSIngo Molnar 767d436615eSOleg Nesterov static int copy_insn(struct uprobe *uprobe, struct file *filp) 768a5f4374aSIngo Molnar { 7692ded0980SOleg Nesterov struct address_space *mapping = uprobe->inode->i_mapping; 7702ded0980SOleg Nesterov loff_t offs = uprobe->offset; 771803200e2SOleg Nesterov void *insn = &uprobe->arch.insn; 772803200e2SOleg Nesterov int size = sizeof(uprobe->arch.insn); 7732ded0980SOleg Nesterov int len, err = -EIO; 774a5f4374aSIngo Molnar 7752ded0980SOleg Nesterov /* Copy only available bytes, -EIO if nothing was read */ 7762ded0980SOleg Nesterov do { 7772ded0980SOleg Nesterov if (offs >= i_size_read(uprobe->inode)) 7782ded0980SOleg Nesterov break; 779a5f4374aSIngo Molnar 7802ded0980SOleg Nesterov len = min_t(int, size, PAGE_SIZE - (offs & ~PAGE_MASK)); 7812ded0980SOleg Nesterov err = __copy_insn(mapping, filp, insn, len, offs); 782fc36f595SOleg Nesterov if (err) 7832ded0980SOleg Nesterov break; 7842ded0980SOleg Nesterov 7852ded0980SOleg Nesterov insn += len; 7862ded0980SOleg Nesterov offs += len; 7872ded0980SOleg Nesterov size -= len; 7882ded0980SOleg Nesterov } while (size); 7892ded0980SOleg Nesterov 790fc36f595SOleg Nesterov return err; 791a5f4374aSIngo Molnar } 792a5f4374aSIngo Molnar 793cb9a19feSOleg Nesterov static int prepare_uprobe(struct uprobe *uprobe, struct file *file, 794cb9a19feSOleg Nesterov struct mm_struct *mm, unsigned long vaddr) 795cb9a19feSOleg Nesterov { 796cb9a19feSOleg Nesterov int ret = 0; 797cb9a19feSOleg Nesterov 79871434f2fSOleg Nesterov if (test_bit(UPROBE_COPY_INSN, &uprobe->flags)) 799cb9a19feSOleg Nesterov return ret; 800cb9a19feSOleg Nesterov 801d4d3ccc6SOleg Nesterov /* TODO: move this into _register, until then we abuse this sem. */ 802d4d3ccc6SOleg Nesterov down_write(&uprobe->consumer_rwsem); 80371434f2fSOleg Nesterov if (test_bit(UPROBE_COPY_INSN, &uprobe->flags)) 8044710f05fSOleg Nesterov goto out; 8054710f05fSOleg Nesterov 806cb9a19feSOleg Nesterov ret = copy_insn(uprobe, file); 807cb9a19feSOleg Nesterov if (ret) 808cb9a19feSOleg Nesterov goto out; 809cb9a19feSOleg Nesterov 810cb9a19feSOleg Nesterov ret = -ENOTSUPP; 811803200e2SOleg Nesterov if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn)) 812cb9a19feSOleg Nesterov goto out; 813cb9a19feSOleg Nesterov 814cb9a19feSOleg Nesterov ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr); 815cb9a19feSOleg Nesterov if (ret) 816cb9a19feSOleg Nesterov goto out; 817cb9a19feSOleg Nesterov 818f72d41faSOleg Nesterov /* uprobe_write_opcode() assumes we don't cross page boundary */ 819cb9a19feSOleg Nesterov BUG_ON((uprobe->offset & ~PAGE_MASK) + 820cb9a19feSOleg Nesterov UPROBE_SWBP_INSN_SIZE > PAGE_SIZE); 821cb9a19feSOleg Nesterov 82209d3f015SAndrea Parri smp_wmb(); /* pairs with the smp_rmb() in handle_swbp() */ 82371434f2fSOleg Nesterov set_bit(UPROBE_COPY_INSN, &uprobe->flags); 824cb9a19feSOleg Nesterov 825cb9a19feSOleg Nesterov out: 826d4d3ccc6SOleg Nesterov up_write(&uprobe->consumer_rwsem); 8274710f05fSOleg Nesterov 828cb9a19feSOleg Nesterov return ret; 829cb9a19feSOleg Nesterov } 830cb9a19feSOleg Nesterov 8318a7f2fa0SOleg Nesterov static inline bool consumer_filter(struct uprobe_consumer *uc, 8328a7f2fa0SOleg Nesterov enum uprobe_filter_ctx ctx, struct mm_struct *mm) 833806a98bdSOleg Nesterov { 8348a7f2fa0SOleg Nesterov return !uc->filter || uc->filter(uc, ctx, mm); 835806a98bdSOleg Nesterov } 836806a98bdSOleg Nesterov 8378a7f2fa0SOleg Nesterov static bool filter_chain(struct uprobe *uprobe, 8388a7f2fa0SOleg Nesterov enum uprobe_filter_ctx ctx, struct mm_struct *mm) 83963633cbfSOleg Nesterov { 8401ff6fee5SOleg Nesterov struct uprobe_consumer *uc; 8411ff6fee5SOleg Nesterov bool ret = false; 8421ff6fee5SOleg Nesterov 8431ff6fee5SOleg Nesterov down_read(&uprobe->consumer_rwsem); 8441ff6fee5SOleg Nesterov for (uc = uprobe->consumers; uc; uc = uc->next) { 8458a7f2fa0SOleg Nesterov ret = consumer_filter(uc, ctx, mm); 8461ff6fee5SOleg Nesterov if (ret) 8471ff6fee5SOleg Nesterov break; 8481ff6fee5SOleg Nesterov } 8491ff6fee5SOleg Nesterov up_read(&uprobe->consumer_rwsem); 8501ff6fee5SOleg Nesterov 8511ff6fee5SOleg Nesterov return ret; 85263633cbfSOleg Nesterov } 85363633cbfSOleg Nesterov 854e3343e6aSSrikar Dronamraju static int 855e3343e6aSSrikar Dronamraju install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, 856816c03fbSOleg Nesterov struct vm_area_struct *vma, unsigned long vaddr) 857a5f4374aSIngo Molnar { 858f8ac4ec9SOleg Nesterov bool first_uprobe; 859a5f4374aSIngo Molnar int ret; 860a5f4374aSIngo Molnar 861cb9a19feSOleg Nesterov ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr); 862a5f4374aSIngo Molnar if (ret) 863a5f4374aSIngo Molnar return ret; 864a5f4374aSIngo Molnar 865f8ac4ec9SOleg Nesterov /* 866f8ac4ec9SOleg Nesterov * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(), 867f8ac4ec9SOleg Nesterov * the task can hit this breakpoint right after __replace_page(). 868f8ac4ec9SOleg Nesterov */ 869f8ac4ec9SOleg Nesterov first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags); 870f8ac4ec9SOleg Nesterov if (first_uprobe) 871f8ac4ec9SOleg Nesterov set_bit(MMF_HAS_UPROBES, &mm->flags); 872f8ac4ec9SOleg Nesterov 873816c03fbSOleg Nesterov ret = set_swbp(&uprobe->arch, mm, vaddr); 8749f68f672SOleg Nesterov if (!ret) 8759f68f672SOleg Nesterov clear_bit(MMF_RECALC_UPROBES, &mm->flags); 8769f68f672SOleg Nesterov else if (first_uprobe) 877f8ac4ec9SOleg Nesterov clear_bit(MMF_HAS_UPROBES, &mm->flags); 878a5f4374aSIngo Molnar 879a5f4374aSIngo Molnar return ret; 880a5f4374aSIngo Molnar } 881a5f4374aSIngo Molnar 882076a365bSOleg Nesterov static int 883816c03fbSOleg Nesterov remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr) 884a5f4374aSIngo Molnar { 8859f68f672SOleg Nesterov set_bit(MMF_RECALC_UPROBES, &mm->flags); 886076a365bSOleg Nesterov return set_orig_insn(&uprobe->arch, mm, vaddr); 887a5f4374aSIngo Molnar } 888a5f4374aSIngo Molnar 88906b7bcd8SOleg Nesterov static inline bool uprobe_is_active(struct uprobe *uprobe) 89006b7bcd8SOleg Nesterov { 89106b7bcd8SOleg Nesterov return !RB_EMPTY_NODE(&uprobe->rb_node); 89206b7bcd8SOleg Nesterov } 8930326f5a9SSrikar Dronamraju /* 894778b032dSOleg Nesterov * There could be threads that have already hit the breakpoint. They 895778b032dSOleg Nesterov * will recheck the current insn and restart if find_uprobe() fails. 896778b032dSOleg Nesterov * See find_active_uprobe(). 8970326f5a9SSrikar Dronamraju */ 898a5f4374aSIngo Molnar static void delete_uprobe(struct uprobe *uprobe) 899a5f4374aSIngo Molnar { 90006b7bcd8SOleg Nesterov if (WARN_ON(!uprobe_is_active(uprobe))) 90106b7bcd8SOleg Nesterov return; 90206b7bcd8SOleg Nesterov 9036f47caa0SOleg Nesterov spin_lock(&uprobes_treelock); 904a5f4374aSIngo Molnar rb_erase(&uprobe->rb_node, &uprobes_tree); 9056f47caa0SOleg Nesterov spin_unlock(&uprobes_treelock); 90606b7bcd8SOleg Nesterov RB_CLEAR_NODE(&uprobe->rb_node); /* for uprobe_is_active() */ 907a5f4374aSIngo Molnar put_uprobe(uprobe); 908a5f4374aSIngo Molnar } 909a5f4374aSIngo Molnar 91026872090SOleg Nesterov struct map_info { 91126872090SOleg Nesterov struct map_info *next; 91226872090SOleg Nesterov struct mm_struct *mm; 913816c03fbSOleg Nesterov unsigned long vaddr; 91426872090SOleg Nesterov }; 91526872090SOleg Nesterov 91626872090SOleg Nesterov static inline struct map_info *free_map_info(struct map_info *info) 917a5f4374aSIngo Molnar { 91826872090SOleg Nesterov struct map_info *next = info->next; 91926872090SOleg Nesterov kfree(info); 92026872090SOleg Nesterov return next; 92126872090SOleg Nesterov } 92226872090SOleg Nesterov 92326872090SOleg Nesterov static struct map_info * 92426872090SOleg Nesterov build_map_info(struct address_space *mapping, loff_t offset, bool is_register) 92526872090SOleg Nesterov { 92626872090SOleg Nesterov unsigned long pgoff = offset >> PAGE_SHIFT; 927a5f4374aSIngo Molnar struct vm_area_struct *vma; 92826872090SOleg Nesterov struct map_info *curr = NULL; 92926872090SOleg Nesterov struct map_info *prev = NULL; 93026872090SOleg Nesterov struct map_info *info; 93126872090SOleg Nesterov int more = 0; 932a5f4374aSIngo Molnar 93326872090SOleg Nesterov again: 9344a23717aSDavidlohr Bueso i_mmap_lock_read(mapping); 9356b2dbba8SMichel Lespinasse vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 936a5f4374aSIngo Molnar if (!valid_vma(vma, is_register)) 937a5f4374aSIngo Molnar continue; 938a5f4374aSIngo Molnar 9397a5bfb66SOleg Nesterov if (!prev && !more) { 9407a5bfb66SOleg Nesterov /* 941c8c06efaSDavidlohr Bueso * Needs GFP_NOWAIT to avoid i_mmap_rwsem recursion through 9427a5bfb66SOleg Nesterov * reclaim. This is optimistic, no harm done if it fails. 9437a5bfb66SOleg Nesterov */ 9447a5bfb66SOleg Nesterov prev = kmalloc(sizeof(struct map_info), 9457a5bfb66SOleg Nesterov GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN); 9467a5bfb66SOleg Nesterov if (prev) 9477a5bfb66SOleg Nesterov prev->next = NULL; 9487a5bfb66SOleg Nesterov } 94926872090SOleg Nesterov if (!prev) { 95026872090SOleg Nesterov more++; 95126872090SOleg Nesterov continue; 952a5f4374aSIngo Molnar } 953a5f4374aSIngo Molnar 954388f7934SVegard Nossum if (!mmget_not_zero(vma->vm_mm)) 95526872090SOleg Nesterov continue; 956a5f4374aSIngo Molnar 95726872090SOleg Nesterov info = prev; 95826872090SOleg Nesterov prev = prev->next; 95926872090SOleg Nesterov info->next = curr; 96026872090SOleg Nesterov curr = info; 96126872090SOleg Nesterov 96226872090SOleg Nesterov info->mm = vma->vm_mm; 96357683f72SOleg Nesterov info->vaddr = offset_to_vaddr(vma, offset); 964a5f4374aSIngo Molnar } 9654a23717aSDavidlohr Bueso i_mmap_unlock_read(mapping); 966a5f4374aSIngo Molnar 96726872090SOleg Nesterov if (!more) 96826872090SOleg Nesterov goto out; 969a5f4374aSIngo Molnar 97026872090SOleg Nesterov prev = curr; 97126872090SOleg Nesterov while (curr) { 97226872090SOleg Nesterov mmput(curr->mm); 97326872090SOleg Nesterov curr = curr->next; 97426872090SOleg Nesterov } 97526872090SOleg Nesterov 97626872090SOleg Nesterov do { 97726872090SOleg Nesterov info = kmalloc(sizeof(struct map_info), GFP_KERNEL); 97826872090SOleg Nesterov if (!info) { 97926872090SOleg Nesterov curr = ERR_PTR(-ENOMEM); 98026872090SOleg Nesterov goto out; 98126872090SOleg Nesterov } 98226872090SOleg Nesterov info->next = prev; 98326872090SOleg Nesterov prev = info; 98426872090SOleg Nesterov } while (--more); 98526872090SOleg Nesterov 98626872090SOleg Nesterov goto again; 98726872090SOleg Nesterov out: 98826872090SOleg Nesterov while (prev) 98926872090SOleg Nesterov prev = free_map_info(prev); 99026872090SOleg Nesterov return curr; 991a5f4374aSIngo Molnar } 992a5f4374aSIngo Molnar 993bdf8647cSOleg Nesterov static int 994bdf8647cSOleg Nesterov register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new) 995a5f4374aSIngo Molnar { 996bdf8647cSOleg Nesterov bool is_register = !!new; 99726872090SOleg Nesterov struct map_info *info; 99826872090SOleg Nesterov int err = 0; 99926872090SOleg Nesterov 100032cdba1eSOleg Nesterov percpu_down_write(&dup_mmap_sem); 100126872090SOleg Nesterov info = build_map_info(uprobe->inode->i_mapping, 100226872090SOleg Nesterov uprobe->offset, is_register); 100332cdba1eSOleg Nesterov if (IS_ERR(info)) { 100432cdba1eSOleg Nesterov err = PTR_ERR(info); 100532cdba1eSOleg Nesterov goto out; 100632cdba1eSOleg Nesterov } 100726872090SOleg Nesterov 100826872090SOleg Nesterov while (info) { 100926872090SOleg Nesterov struct mm_struct *mm = info->mm; 1010a5f4374aSIngo Molnar struct vm_area_struct *vma; 1011a5f4374aSIngo Molnar 1012076a365bSOleg Nesterov if (err && is_register) 101326872090SOleg Nesterov goto free; 1014a5f4374aSIngo Molnar 101577fc4af1SOleg Nesterov down_write(&mm->mmap_sem); 1016f4d6dfe5SOleg Nesterov vma = find_vma(mm, info->vaddr); 1017f4d6dfe5SOleg Nesterov if (!vma || !valid_vma(vma, is_register) || 1018f281769eSOleg Nesterov file_inode(vma->vm_file) != uprobe->inode) 101926872090SOleg Nesterov goto unlock; 102026872090SOleg Nesterov 1021f4d6dfe5SOleg Nesterov if (vma->vm_start > info->vaddr || 1022f4d6dfe5SOleg Nesterov vaddr_to_offset(vma, info->vaddr) != uprobe->offset) 102326872090SOleg Nesterov goto unlock; 1024a5f4374aSIngo Molnar 1025806a98bdSOleg Nesterov if (is_register) { 1026806a98bdSOleg Nesterov /* consult only the "caller", new consumer. */ 1027bdf8647cSOleg Nesterov if (consumer_filter(new, 10288a7f2fa0SOleg Nesterov UPROBE_FILTER_REGISTER, mm)) 102926872090SOleg Nesterov err = install_breakpoint(uprobe, mm, vma, info->vaddr); 1030806a98bdSOleg Nesterov } else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) { 10318a7f2fa0SOleg Nesterov if (!filter_chain(uprobe, 10328a7f2fa0SOleg Nesterov UPROBE_FILTER_UNREGISTER, mm)) 1033076a365bSOleg Nesterov err |= remove_breakpoint(uprobe, mm, info->vaddr); 1034806a98bdSOleg Nesterov } 103578f74116SOleg Nesterov 103626872090SOleg Nesterov unlock: 103726872090SOleg Nesterov up_write(&mm->mmap_sem); 103826872090SOleg Nesterov free: 103926872090SOleg Nesterov mmput(mm); 104026872090SOleg Nesterov info = free_map_info(info); 1041a5f4374aSIngo Molnar } 104232cdba1eSOleg Nesterov out: 104332cdba1eSOleg Nesterov percpu_up_write(&dup_mmap_sem); 104426872090SOleg Nesterov return err; 1045a5f4374aSIngo Molnar } 1046a5f4374aSIngo Molnar 104738e967aeSRavi Bangoria static void 104838e967aeSRavi Bangoria __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc) 1049a5f4374aSIngo Molnar { 105004aab9b2SOleg Nesterov int err; 1051a5f4374aSIngo Molnar 105206d07139SOleg Nesterov if (WARN_ON(!consumer_del(uprobe, uc))) 105304aab9b2SOleg Nesterov return; 105404aab9b2SOleg Nesterov 1055bdf8647cSOleg Nesterov err = register_for_each_vma(uprobe, NULL); 1056a5f4374aSIngo Molnar /* TODO : cant unregister? schedule a worker thread */ 1057bb929284SOleg Nesterov if (!uprobe->consumers && !err) 105804aab9b2SOleg Nesterov delete_uprobe(uprobe); 105904aab9b2SOleg Nesterov } 1060a5f4374aSIngo Molnar 1061a5f4374aSIngo Molnar /* 10627140ad38SLinus Torvalds * uprobe_unregister - unregister an already registered probe. 106338e967aeSRavi Bangoria * @inode: the file in which the probe has to be removed. 106438e967aeSRavi Bangoria * @offset: offset from the start of the file. 106538e967aeSRavi Bangoria * @uc: identify which probe if multiple probes are colocated. 106638e967aeSRavi Bangoria */ 106738e967aeSRavi Bangoria void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc) 106838e967aeSRavi Bangoria { 106938e967aeSRavi Bangoria struct uprobe *uprobe; 107038e967aeSRavi Bangoria 107138e967aeSRavi Bangoria uprobe = find_uprobe(inode, offset); 107238e967aeSRavi Bangoria if (WARN_ON(!uprobe)) 107338e967aeSRavi Bangoria return; 107438e967aeSRavi Bangoria 107538e967aeSRavi Bangoria down_write(&uprobe->register_rwsem); 107638e967aeSRavi Bangoria __uprobe_unregister(uprobe, uc); 107738e967aeSRavi Bangoria up_write(&uprobe->register_rwsem); 107838e967aeSRavi Bangoria put_uprobe(uprobe); 107938e967aeSRavi Bangoria } 108038e967aeSRavi Bangoria EXPORT_SYMBOL_GPL(uprobe_unregister); 108138e967aeSRavi Bangoria 108238e967aeSRavi Bangoria /* 108338e967aeSRavi Bangoria * __uprobe_register - register a probe 1084a5f4374aSIngo Molnar * @inode: the file in which the probe has to be placed. 1085a5f4374aSIngo Molnar * @offset: offset from the start of the file. 1086e3343e6aSSrikar Dronamraju * @uc: information on howto handle the probe.. 1087a5f4374aSIngo Molnar * 108838e967aeSRavi Bangoria * Apart from the access refcount, __uprobe_register() takes a creation 1089a5f4374aSIngo Molnar * refcount (thro alloc_uprobe) if and only if this @uprobe is getting 1090a5f4374aSIngo Molnar * inserted into the rbtree (i.e first consumer for a @inode:@offset 1091a5f4374aSIngo Molnar * tuple). Creation refcount stops uprobe_unregister from freeing the 1092a5f4374aSIngo Molnar * @uprobe even before the register operation is complete. Creation 1093e3343e6aSSrikar Dronamraju * refcount is released when the last @uc for the @uprobe 109438e967aeSRavi Bangoria * unregisters. Caller of __uprobe_register() is required to keep @inode 109561f94203SSong Liu * (and the containing mount) referenced. 1096a5f4374aSIngo Molnar * 1097a5f4374aSIngo Molnar * Return errno if it cannot successully install probes 1098a5f4374aSIngo Molnar * else return 0 (success) 1099a5f4374aSIngo Molnar */ 110038e967aeSRavi Bangoria static int __uprobe_register(struct inode *inode, loff_t offset, 11011cc33161SRavi Bangoria loff_t ref_ctr_offset, struct uprobe_consumer *uc) 1102a5f4374aSIngo Molnar { 1103a5f4374aSIngo Molnar struct uprobe *uprobe; 1104a5f4374aSIngo Molnar int ret; 1105a5f4374aSIngo Molnar 1106ea024870SAnton Arapov /* Uprobe must have at least one set consumer */ 1107ea024870SAnton Arapov if (!uc->handler && !uc->ret_handler) 1108ea024870SAnton Arapov return -EINVAL; 1109ea024870SAnton Arapov 111040814f68SOleg Nesterov /* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */ 111140814f68SOleg Nesterov if (!inode->i_mapping->a_ops->readpage && !shmem_mapping(inode->i_mapping)) 111241ccba02SOleg Nesterov return -EIO; 1113f0744af7SOleg Nesterov /* Racy, just to catch the obvious mistakes */ 1114a5f4374aSIngo Molnar if (offset > i_size_read(inode)) 1115a5f4374aSIngo Molnar return -EINVAL; 1116a5f4374aSIngo Molnar 111766d06dffSOleg Nesterov retry: 11181cc33161SRavi Bangoria uprobe = alloc_uprobe(inode, offset, ref_ctr_offset); 111966d06dffSOleg Nesterov if (!uprobe) 112066d06dffSOleg Nesterov return -ENOMEM; 112122bad382SRavi Bangoria if (IS_ERR(uprobe)) 112222bad382SRavi Bangoria return PTR_ERR(uprobe); 112322bad382SRavi Bangoria 112466d06dffSOleg Nesterov /* 112566d06dffSOleg Nesterov * We can race with uprobe_unregister()->delete_uprobe(). 112666d06dffSOleg Nesterov * Check uprobe_is_active() and retry if it is false. 112766d06dffSOleg Nesterov */ 1128e591c8d7SOleg Nesterov down_write(&uprobe->register_rwsem); 112966d06dffSOleg Nesterov ret = -EAGAIN; 113066d06dffSOleg Nesterov if (likely(uprobe_is_active(uprobe))) { 113138e967aeSRavi Bangoria consumer_add(uprobe, uc); 113238e967aeSRavi Bangoria ret = register_for_each_vma(uprobe, uc); 11339a98e03cSOleg Nesterov if (ret) 113404aab9b2SOleg Nesterov __uprobe_unregister(uprobe, uc); 1135a5f4374aSIngo Molnar } 113666d06dffSOleg Nesterov up_write(&uprobe->register_rwsem); 1137a5f4374aSIngo Molnar put_uprobe(uprobe); 1138a5f4374aSIngo Molnar 113966d06dffSOleg Nesterov if (unlikely(ret == -EAGAIN)) 114066d06dffSOleg Nesterov goto retry; 1141a5f4374aSIngo Molnar return ret; 1142a5f4374aSIngo Molnar } 114338e967aeSRavi Bangoria 114438e967aeSRavi Bangoria int uprobe_register(struct inode *inode, loff_t offset, 114538e967aeSRavi Bangoria struct uprobe_consumer *uc) 114638e967aeSRavi Bangoria { 11471cc33161SRavi Bangoria return __uprobe_register(inode, offset, 0, uc); 114838e967aeSRavi Bangoria } 1149e8440c14SJosh Stone EXPORT_SYMBOL_GPL(uprobe_register); 1150a5f4374aSIngo Molnar 11511cc33161SRavi Bangoria int uprobe_register_refctr(struct inode *inode, loff_t offset, 11521cc33161SRavi Bangoria loff_t ref_ctr_offset, struct uprobe_consumer *uc) 11531cc33161SRavi Bangoria { 11541cc33161SRavi Bangoria return __uprobe_register(inode, offset, ref_ctr_offset, uc); 11551cc33161SRavi Bangoria } 11561cc33161SRavi Bangoria EXPORT_SYMBOL_GPL(uprobe_register_refctr); 11571cc33161SRavi Bangoria 1158a5f4374aSIngo Molnar /* 1159788faab7STobias Tefke * uprobe_apply - unregister an already registered probe. 1160bdf8647cSOleg Nesterov * @inode: the file in which the probe has to be removed. 1161bdf8647cSOleg Nesterov * @offset: offset from the start of the file. 1162bdf8647cSOleg Nesterov * @uc: consumer which wants to add more or remove some breakpoints 1163bdf8647cSOleg Nesterov * @add: add or remove the breakpoints 1164bdf8647cSOleg Nesterov */ 1165bdf8647cSOleg Nesterov int uprobe_apply(struct inode *inode, loff_t offset, 1166bdf8647cSOleg Nesterov struct uprobe_consumer *uc, bool add) 1167bdf8647cSOleg Nesterov { 1168bdf8647cSOleg Nesterov struct uprobe *uprobe; 1169bdf8647cSOleg Nesterov struct uprobe_consumer *con; 1170bdf8647cSOleg Nesterov int ret = -ENOENT; 1171bdf8647cSOleg Nesterov 1172bdf8647cSOleg Nesterov uprobe = find_uprobe(inode, offset); 117306d07139SOleg Nesterov if (WARN_ON(!uprobe)) 1174bdf8647cSOleg Nesterov return ret; 1175bdf8647cSOleg Nesterov 1176bdf8647cSOleg Nesterov down_write(&uprobe->register_rwsem); 1177bdf8647cSOleg Nesterov for (con = uprobe->consumers; con && con != uc ; con = con->next) 1178bdf8647cSOleg Nesterov ; 1179bdf8647cSOleg Nesterov if (con) 1180bdf8647cSOleg Nesterov ret = register_for_each_vma(uprobe, add ? uc : NULL); 1181bdf8647cSOleg Nesterov up_write(&uprobe->register_rwsem); 1182bdf8647cSOleg Nesterov put_uprobe(uprobe); 1183bdf8647cSOleg Nesterov 1184bdf8647cSOleg Nesterov return ret; 1185bdf8647cSOleg Nesterov } 1186bdf8647cSOleg Nesterov 1187da1816b1SOleg Nesterov static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm) 1188da1816b1SOleg Nesterov { 1189da1816b1SOleg Nesterov struct vm_area_struct *vma; 1190da1816b1SOleg Nesterov int err = 0; 1191da1816b1SOleg Nesterov 1192da1816b1SOleg Nesterov down_read(&mm->mmap_sem); 1193da1816b1SOleg Nesterov for (vma = mm->mmap; vma; vma = vma->vm_next) { 1194da1816b1SOleg Nesterov unsigned long vaddr; 1195da1816b1SOleg Nesterov loff_t offset; 1196da1816b1SOleg Nesterov 1197da1816b1SOleg Nesterov if (!valid_vma(vma, false) || 1198f281769eSOleg Nesterov file_inode(vma->vm_file) != uprobe->inode) 1199da1816b1SOleg Nesterov continue; 1200da1816b1SOleg Nesterov 1201da1816b1SOleg Nesterov offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT; 1202da1816b1SOleg Nesterov if (uprobe->offset < offset || 1203da1816b1SOleg Nesterov uprobe->offset >= offset + vma->vm_end - vma->vm_start) 1204da1816b1SOleg Nesterov continue; 1205da1816b1SOleg Nesterov 1206da1816b1SOleg Nesterov vaddr = offset_to_vaddr(vma, uprobe->offset); 1207da1816b1SOleg Nesterov err |= remove_breakpoint(uprobe, mm, vaddr); 1208da1816b1SOleg Nesterov } 1209da1816b1SOleg Nesterov up_read(&mm->mmap_sem); 1210da1816b1SOleg Nesterov 1211da1816b1SOleg Nesterov return err; 1212da1816b1SOleg Nesterov } 1213da1816b1SOleg Nesterov 1214891c3970SOleg Nesterov static struct rb_node * 1215891c3970SOleg Nesterov find_node_in_range(struct inode *inode, loff_t min, loff_t max) 1216a5f4374aSIngo Molnar { 1217a5f4374aSIngo Molnar struct rb_node *n = uprobes_tree.rb_node; 1218a5f4374aSIngo Molnar 1219a5f4374aSIngo Molnar while (n) { 1220891c3970SOleg Nesterov struct uprobe *u = rb_entry(n, struct uprobe, rb_node); 1221a5f4374aSIngo Molnar 1222891c3970SOleg Nesterov if (inode < u->inode) { 1223a5f4374aSIngo Molnar n = n->rb_left; 1224891c3970SOleg Nesterov } else if (inode > u->inode) { 1225a5f4374aSIngo Molnar n = n->rb_right; 1226891c3970SOleg Nesterov } else { 1227891c3970SOleg Nesterov if (max < u->offset) 1228891c3970SOleg Nesterov n = n->rb_left; 1229891c3970SOleg Nesterov else if (min > u->offset) 1230891c3970SOleg Nesterov n = n->rb_right; 1231891c3970SOleg Nesterov else 1232891c3970SOleg Nesterov break; 1233891c3970SOleg Nesterov } 1234a5f4374aSIngo Molnar } 1235a5f4374aSIngo Molnar 1236891c3970SOleg Nesterov return n; 1237a5f4374aSIngo Molnar } 1238a5f4374aSIngo Molnar 1239a5f4374aSIngo Molnar /* 1240891c3970SOleg Nesterov * For a given range in vma, build a list of probes that need to be inserted. 1241a5f4374aSIngo Molnar */ 1242891c3970SOleg Nesterov static void build_probe_list(struct inode *inode, 1243891c3970SOleg Nesterov struct vm_area_struct *vma, 1244891c3970SOleg Nesterov unsigned long start, unsigned long end, 1245891c3970SOleg Nesterov struct list_head *head) 1246a5f4374aSIngo Molnar { 1247891c3970SOleg Nesterov loff_t min, max; 1248891c3970SOleg Nesterov struct rb_node *n, *t; 1249891c3970SOleg Nesterov struct uprobe *u; 1250891c3970SOleg Nesterov 1251891c3970SOleg Nesterov INIT_LIST_HEAD(head); 1252cb113b47SOleg Nesterov min = vaddr_to_offset(vma, start); 1253891c3970SOleg Nesterov max = min + (end - start) - 1; 1254a5f4374aSIngo Molnar 12556f47caa0SOleg Nesterov spin_lock(&uprobes_treelock); 1256891c3970SOleg Nesterov n = find_node_in_range(inode, min, max); 1257891c3970SOleg Nesterov if (n) { 1258891c3970SOleg Nesterov for (t = n; t; t = rb_prev(t)) { 1259891c3970SOleg Nesterov u = rb_entry(t, struct uprobe, rb_node); 1260891c3970SOleg Nesterov if (u->inode != inode || u->offset < min) 1261a5f4374aSIngo Molnar break; 1262891c3970SOleg Nesterov list_add(&u->pending_list, head); 1263f231722aSOleg Nesterov get_uprobe(u); 1264a5f4374aSIngo Molnar } 1265891c3970SOleg Nesterov for (t = n; (t = rb_next(t)); ) { 1266891c3970SOleg Nesterov u = rb_entry(t, struct uprobe, rb_node); 1267891c3970SOleg Nesterov if (u->inode != inode || u->offset > max) 1268891c3970SOleg Nesterov break; 1269891c3970SOleg Nesterov list_add(&u->pending_list, head); 1270f231722aSOleg Nesterov get_uprobe(u); 1271891c3970SOleg Nesterov } 1272891c3970SOleg Nesterov } 12736f47caa0SOleg Nesterov spin_unlock(&uprobes_treelock); 1274a5f4374aSIngo Molnar } 1275a5f4374aSIngo Molnar 12761cc33161SRavi Bangoria /* @vma contains reference counter, not the probed instruction. */ 12771cc33161SRavi Bangoria static int delayed_ref_ctr_inc(struct vm_area_struct *vma) 12781cc33161SRavi Bangoria { 12791cc33161SRavi Bangoria struct list_head *pos, *q; 12801cc33161SRavi Bangoria struct delayed_uprobe *du; 12811cc33161SRavi Bangoria unsigned long vaddr; 12821cc33161SRavi Bangoria int ret = 0, err = 0; 12831cc33161SRavi Bangoria 12841cc33161SRavi Bangoria mutex_lock(&delayed_uprobe_lock); 12851cc33161SRavi Bangoria list_for_each_safe(pos, q, &delayed_uprobe_list) { 12861cc33161SRavi Bangoria du = list_entry(pos, struct delayed_uprobe, list); 12871cc33161SRavi Bangoria 12881cc33161SRavi Bangoria if (du->mm != vma->vm_mm || 12891cc33161SRavi Bangoria !valid_ref_ctr_vma(du->uprobe, vma)) 12901cc33161SRavi Bangoria continue; 12911cc33161SRavi Bangoria 12921cc33161SRavi Bangoria vaddr = offset_to_vaddr(vma, du->uprobe->ref_ctr_offset); 12931cc33161SRavi Bangoria ret = __update_ref_ctr(vma->vm_mm, vaddr, 1); 12941cc33161SRavi Bangoria if (ret) { 12951cc33161SRavi Bangoria update_ref_ctr_warn(du->uprobe, vma->vm_mm, 1); 12961cc33161SRavi Bangoria if (!err) 12971cc33161SRavi Bangoria err = ret; 12981cc33161SRavi Bangoria } 12991cc33161SRavi Bangoria delayed_uprobe_delete(du); 13001cc33161SRavi Bangoria } 13011cc33161SRavi Bangoria mutex_unlock(&delayed_uprobe_lock); 13021cc33161SRavi Bangoria return err; 13031cc33161SRavi Bangoria } 13041cc33161SRavi Bangoria 1305a5f4374aSIngo Molnar /* 13065e5be71aSOleg Nesterov * Called from mmap_region/vma_adjust with mm->mmap_sem acquired. 1307a5f4374aSIngo Molnar * 13085e5be71aSOleg Nesterov * Currently we ignore all errors and always return 0, the callers 13095e5be71aSOleg Nesterov * can't handle the failure anyway. 1310a5f4374aSIngo Molnar */ 1311a5f4374aSIngo Molnar int uprobe_mmap(struct vm_area_struct *vma) 1312a5f4374aSIngo Molnar { 1313a5f4374aSIngo Molnar struct list_head tmp_list; 1314665605a2SOleg Nesterov struct uprobe *uprobe, *u; 1315a5f4374aSIngo Molnar struct inode *inode; 1316a5f4374aSIngo Molnar 13171cc33161SRavi Bangoria if (no_uprobe_events()) 13181cc33161SRavi Bangoria return 0; 13191cc33161SRavi Bangoria 13201cc33161SRavi Bangoria if (vma->vm_file && 13211cc33161SRavi Bangoria (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE && 13221cc33161SRavi Bangoria test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags)) 13231cc33161SRavi Bangoria delayed_ref_ctr_inc(vma); 13241cc33161SRavi Bangoria 13251cc33161SRavi Bangoria if (!valid_vma(vma, true)) 1326a5f4374aSIngo Molnar return 0; 1327a5f4374aSIngo Molnar 1328f281769eSOleg Nesterov inode = file_inode(vma->vm_file); 1329a5f4374aSIngo Molnar if (!inode) 1330a5f4374aSIngo Molnar return 0; 1331a5f4374aSIngo Molnar 1332a5f4374aSIngo Molnar mutex_lock(uprobes_mmap_hash(inode)); 1333891c3970SOleg Nesterov build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list); 1334806a98bdSOleg Nesterov /* 1335806a98bdSOleg Nesterov * We can race with uprobe_unregister(), this uprobe can be already 1336806a98bdSOleg Nesterov * removed. But in this case filter_chain() must return false, all 1337806a98bdSOleg Nesterov * consumers have gone away. 1338806a98bdSOleg Nesterov */ 1339665605a2SOleg Nesterov list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { 1340806a98bdSOleg Nesterov if (!fatal_signal_pending(current) && 13418a7f2fa0SOleg Nesterov filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) { 134257683f72SOleg Nesterov unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset); 13435e5be71aSOleg Nesterov install_breakpoint(uprobe, vma->vm_mm, vma, vaddr); 1344a5f4374aSIngo Molnar } 1345a5f4374aSIngo Molnar put_uprobe(uprobe); 1346a5f4374aSIngo Molnar } 1347a5f4374aSIngo Molnar mutex_unlock(uprobes_mmap_hash(inode)); 1348a5f4374aSIngo Molnar 13495e5be71aSOleg Nesterov return 0; 1350a5f4374aSIngo Molnar } 1351a5f4374aSIngo Molnar 13529f68f672SOleg Nesterov static bool 13539f68f672SOleg Nesterov vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end) 13549f68f672SOleg Nesterov { 13559f68f672SOleg Nesterov loff_t min, max; 13569f68f672SOleg Nesterov struct inode *inode; 13579f68f672SOleg Nesterov struct rb_node *n; 13589f68f672SOleg Nesterov 1359f281769eSOleg Nesterov inode = file_inode(vma->vm_file); 13609f68f672SOleg Nesterov 13619f68f672SOleg Nesterov min = vaddr_to_offset(vma, start); 13629f68f672SOleg Nesterov max = min + (end - start) - 1; 13639f68f672SOleg Nesterov 13649f68f672SOleg Nesterov spin_lock(&uprobes_treelock); 13659f68f672SOleg Nesterov n = find_node_in_range(inode, min, max); 13669f68f672SOleg Nesterov spin_unlock(&uprobes_treelock); 13679f68f672SOleg Nesterov 13689f68f672SOleg Nesterov return !!n; 13699f68f672SOleg Nesterov } 13709f68f672SOleg Nesterov 1371682968e0SSrikar Dronamraju /* 1372682968e0SSrikar Dronamraju * Called in context of a munmap of a vma. 1373682968e0SSrikar Dronamraju */ 1374cbc91f71SSrikar Dronamraju void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) 1375682968e0SSrikar Dronamraju { 1376441f1eb7SOleg Nesterov if (no_uprobe_events() || !valid_vma(vma, false)) 1377682968e0SSrikar Dronamraju return; 1378682968e0SSrikar Dronamraju 13792fd611a9SOleg Nesterov if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */ 13802fd611a9SOleg Nesterov return; 13812fd611a9SOleg Nesterov 13829f68f672SOleg Nesterov if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) || 13839f68f672SOleg Nesterov test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags)) 1384f8ac4ec9SOleg Nesterov return; 1385f8ac4ec9SOleg Nesterov 13869f68f672SOleg Nesterov if (vma_has_uprobes(vma, start, end)) 13879f68f672SOleg Nesterov set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags); 1388682968e0SSrikar Dronamraju } 1389682968e0SSrikar Dronamraju 1390d4b3b638SSrikar Dronamraju /* Slot allocation for XOL */ 13916441ec8bSOleg Nesterov static int xol_add_vma(struct mm_struct *mm, struct xol_area *area) 1392d4b3b638SSrikar Dronamraju { 1393704bde3cSOleg Nesterov struct vm_area_struct *vma; 1394704bde3cSOleg Nesterov int ret; 1395d4b3b638SSrikar Dronamraju 1396598fdc1dSMichal Hocko if (down_write_killable(&mm->mmap_sem)) 1397598fdc1dSMichal Hocko return -EINTR; 1398598fdc1dSMichal Hocko 1399704bde3cSOleg Nesterov if (mm->uprobes_state.xol_area) { 1400704bde3cSOleg Nesterov ret = -EALREADY; 1401d4b3b638SSrikar Dronamraju goto fail; 1402704bde3cSOleg Nesterov } 1403d4b3b638SSrikar Dronamraju 1404af0d95afSOleg Nesterov if (!area->vaddr) { 1405d4b3b638SSrikar Dronamraju /* Try to map as high as possible, this is only a hint. */ 1406af0d95afSOleg Nesterov area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, 1407af0d95afSOleg Nesterov PAGE_SIZE, 0, 0); 1408d4b3b638SSrikar Dronamraju if (area->vaddr & ~PAGE_MASK) { 1409d4b3b638SSrikar Dronamraju ret = area->vaddr; 1410d4b3b638SSrikar Dronamraju goto fail; 1411d4b3b638SSrikar Dronamraju } 1412af0d95afSOleg Nesterov } 1413d4b3b638SSrikar Dronamraju 1414704bde3cSOleg Nesterov vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE, 1415704bde3cSOleg Nesterov VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO, 1416704bde3cSOleg Nesterov &area->xol_mapping); 1417704bde3cSOleg Nesterov if (IS_ERR(vma)) { 1418704bde3cSOleg Nesterov ret = PTR_ERR(vma); 1419d4b3b638SSrikar Dronamraju goto fail; 1420704bde3cSOleg Nesterov } 1421d4b3b638SSrikar Dronamraju 1422704bde3cSOleg Nesterov ret = 0; 14235c6338b4SPaul E. McKenney /* pairs with get_xol_area() */ 14245c6338b4SPaul E. McKenney smp_store_release(&mm->uprobes_state.xol_area, area); /* ^^^ */ 1425d4b3b638SSrikar Dronamraju fail: 1426d4b3b638SSrikar Dronamraju up_write(&mm->mmap_sem); 1427d4b3b638SSrikar Dronamraju 1428d4b3b638SSrikar Dronamraju return ret; 1429d4b3b638SSrikar Dronamraju } 1430d4b3b638SSrikar Dronamraju 1431af0d95afSOleg Nesterov static struct xol_area *__create_xol_area(unsigned long vaddr) 1432d4b3b638SSrikar Dronamraju { 14339b545df8SOleg Nesterov struct mm_struct *mm = current->mm; 1434e78aebfdSAnton Arapov uprobe_opcode_t insn = UPROBE_SWBP_INSN; 14356441ec8bSOleg Nesterov struct xol_area *area; 14369b545df8SOleg Nesterov 1437af0d95afSOleg Nesterov area = kmalloc(sizeof(*area), GFP_KERNEL); 1438d4b3b638SSrikar Dronamraju if (unlikely(!area)) 1439c8a82538SOleg Nesterov goto out; 1440d4b3b638SSrikar Dronamraju 14416396bb22SKees Cook area->bitmap = kcalloc(BITS_TO_LONGS(UINSNS_PER_PAGE), sizeof(long), 14426396bb22SKees Cook GFP_KERNEL); 1443d4b3b638SSrikar Dronamraju if (!area->bitmap) 1444c8a82538SOleg Nesterov goto free_area; 1445c8a82538SOleg Nesterov 1446704bde3cSOleg Nesterov area->xol_mapping.name = "[uprobes]"; 1447869ae761SOleg Nesterov area->xol_mapping.fault = NULL; 1448704bde3cSOleg Nesterov area->xol_mapping.pages = area->pages; 1449f58bea2fSOleg Nesterov area->pages[0] = alloc_page(GFP_HIGHUSER); 1450f58bea2fSOleg Nesterov if (!area->pages[0]) 1451c8a82538SOleg Nesterov goto free_bitmap; 1452f58bea2fSOleg Nesterov area->pages[1] = NULL; 1453d4b3b638SSrikar Dronamraju 1454af0d95afSOleg Nesterov area->vaddr = vaddr; 1455d4b3b638SSrikar Dronamraju init_waitqueue_head(&area->wq); 14566441ec8bSOleg Nesterov /* Reserve the 1st slot for get_trampoline_vaddr() */ 14576441ec8bSOleg Nesterov set_bit(0, area->bitmap); 14586441ec8bSOleg Nesterov atomic_set(&area->slot_count, 1); 1459297e765eSMarcin Nowakowski arch_uprobe_copy_ixol(area->pages[0], 0, &insn, UPROBE_SWBP_INSN_SIZE); 1460e78aebfdSAnton Arapov 14616441ec8bSOleg Nesterov if (!xol_add_vma(mm, area)) 1462d4b3b638SSrikar Dronamraju return area; 1463d4b3b638SSrikar Dronamraju 1464f58bea2fSOleg Nesterov __free_page(area->pages[0]); 1465c8a82538SOleg Nesterov free_bitmap: 1466d4b3b638SSrikar Dronamraju kfree(area->bitmap); 1467c8a82538SOleg Nesterov free_area: 1468d4b3b638SSrikar Dronamraju kfree(area); 1469c8a82538SOleg Nesterov out: 14706441ec8bSOleg Nesterov return NULL; 14716441ec8bSOleg Nesterov } 14726441ec8bSOleg Nesterov 14736441ec8bSOleg Nesterov /* 14746441ec8bSOleg Nesterov * get_xol_area - Allocate process's xol_area if necessary. 14756441ec8bSOleg Nesterov * This area will be used for storing instructions for execution out of line. 14766441ec8bSOleg Nesterov * 14776441ec8bSOleg Nesterov * Returns the allocated area or NULL. 14786441ec8bSOleg Nesterov */ 14796441ec8bSOleg Nesterov static struct xol_area *get_xol_area(void) 14806441ec8bSOleg Nesterov { 14816441ec8bSOleg Nesterov struct mm_struct *mm = current->mm; 14826441ec8bSOleg Nesterov struct xol_area *area; 14836441ec8bSOleg Nesterov 14846441ec8bSOleg Nesterov if (!mm->uprobes_state.xol_area) 1485af0d95afSOleg Nesterov __create_xol_area(0); 14866441ec8bSOleg Nesterov 14875c6338b4SPaul E. McKenney /* Pairs with xol_add_vma() smp_store_release() */ 14885c6338b4SPaul E. McKenney area = READ_ONCE(mm->uprobes_state.xol_area); /* ^^^ */ 14899b545df8SOleg Nesterov return area; 1490d4b3b638SSrikar Dronamraju } 1491d4b3b638SSrikar Dronamraju 1492d4b3b638SSrikar Dronamraju /* 1493d4b3b638SSrikar Dronamraju * uprobe_clear_state - Free the area allocated for slots. 1494d4b3b638SSrikar Dronamraju */ 1495d4b3b638SSrikar Dronamraju void uprobe_clear_state(struct mm_struct *mm) 1496d4b3b638SSrikar Dronamraju { 1497d4b3b638SSrikar Dronamraju struct xol_area *area = mm->uprobes_state.xol_area; 1498d4b3b638SSrikar Dronamraju 14991cc33161SRavi Bangoria mutex_lock(&delayed_uprobe_lock); 15001cc33161SRavi Bangoria delayed_uprobe_remove(NULL, mm); 15011cc33161SRavi Bangoria mutex_unlock(&delayed_uprobe_lock); 15021cc33161SRavi Bangoria 1503d4b3b638SSrikar Dronamraju if (!area) 1504d4b3b638SSrikar Dronamraju return; 1505d4b3b638SSrikar Dronamraju 1506f58bea2fSOleg Nesterov put_page(area->pages[0]); 1507d4b3b638SSrikar Dronamraju kfree(area->bitmap); 1508d4b3b638SSrikar Dronamraju kfree(area); 1509d4b3b638SSrikar Dronamraju } 1510d4b3b638SSrikar Dronamraju 151132cdba1eSOleg Nesterov void uprobe_start_dup_mmap(void) 151232cdba1eSOleg Nesterov { 151332cdba1eSOleg Nesterov percpu_down_read(&dup_mmap_sem); 151432cdba1eSOleg Nesterov } 151532cdba1eSOleg Nesterov 151632cdba1eSOleg Nesterov void uprobe_end_dup_mmap(void) 151732cdba1eSOleg Nesterov { 151832cdba1eSOleg Nesterov percpu_up_read(&dup_mmap_sem); 151932cdba1eSOleg Nesterov } 152032cdba1eSOleg Nesterov 1521f8ac4ec9SOleg Nesterov void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm) 1522f8ac4ec9SOleg Nesterov { 15239f68f672SOleg Nesterov if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) { 1524f8ac4ec9SOleg Nesterov set_bit(MMF_HAS_UPROBES, &newmm->flags); 15259f68f672SOleg Nesterov /* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */ 15269f68f672SOleg Nesterov set_bit(MMF_RECALC_UPROBES, &newmm->flags); 15279f68f672SOleg Nesterov } 1528f8ac4ec9SOleg Nesterov } 1529f8ac4ec9SOleg Nesterov 1530d4b3b638SSrikar Dronamraju /* 1531d4b3b638SSrikar Dronamraju * - search for a free slot. 1532d4b3b638SSrikar Dronamraju */ 1533d4b3b638SSrikar Dronamraju static unsigned long xol_take_insn_slot(struct xol_area *area) 1534d4b3b638SSrikar Dronamraju { 1535d4b3b638SSrikar Dronamraju unsigned long slot_addr; 1536d4b3b638SSrikar Dronamraju int slot_nr; 1537d4b3b638SSrikar Dronamraju 1538d4b3b638SSrikar Dronamraju do { 1539d4b3b638SSrikar Dronamraju slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE); 1540d4b3b638SSrikar Dronamraju if (slot_nr < UINSNS_PER_PAGE) { 1541d4b3b638SSrikar Dronamraju if (!test_and_set_bit(slot_nr, area->bitmap)) 1542d4b3b638SSrikar Dronamraju break; 1543d4b3b638SSrikar Dronamraju 1544d4b3b638SSrikar Dronamraju slot_nr = UINSNS_PER_PAGE; 1545d4b3b638SSrikar Dronamraju continue; 1546d4b3b638SSrikar Dronamraju } 1547d4b3b638SSrikar Dronamraju wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE)); 1548d4b3b638SSrikar Dronamraju } while (slot_nr >= UINSNS_PER_PAGE); 1549d4b3b638SSrikar Dronamraju 1550d4b3b638SSrikar Dronamraju slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES); 1551d4b3b638SSrikar Dronamraju atomic_inc(&area->slot_count); 1552d4b3b638SSrikar Dronamraju 1553d4b3b638SSrikar Dronamraju return slot_addr; 1554d4b3b638SSrikar Dronamraju } 1555d4b3b638SSrikar Dronamraju 1556d4b3b638SSrikar Dronamraju /* 1557a6cb3f6dSOleg Nesterov * xol_get_insn_slot - allocate a slot for xol. 1558d4b3b638SSrikar Dronamraju * Returns the allocated slot address or 0. 1559d4b3b638SSrikar Dronamraju */ 1560a6cb3f6dSOleg Nesterov static unsigned long xol_get_insn_slot(struct uprobe *uprobe) 1561d4b3b638SSrikar Dronamraju { 1562d4b3b638SSrikar Dronamraju struct xol_area *area; 1563a6cb3f6dSOleg Nesterov unsigned long xol_vaddr; 1564d4b3b638SSrikar Dronamraju 15659b545df8SOleg Nesterov area = get_xol_area(); 1566d4b3b638SSrikar Dronamraju if (!area) 1567d4b3b638SSrikar Dronamraju return 0; 1568d4b3b638SSrikar Dronamraju 1569a6cb3f6dSOleg Nesterov xol_vaddr = xol_take_insn_slot(area); 1570a6cb3f6dSOleg Nesterov if (unlikely(!xol_vaddr)) 1571d4b3b638SSrikar Dronamraju return 0; 1572d4b3b638SSrikar Dronamraju 1573f58bea2fSOleg Nesterov arch_uprobe_copy_ixol(area->pages[0], xol_vaddr, 1574803200e2SOleg Nesterov &uprobe->arch.ixol, sizeof(uprobe->arch.ixol)); 1575d4b3b638SSrikar Dronamraju 1576a6cb3f6dSOleg Nesterov return xol_vaddr; 1577d4b3b638SSrikar Dronamraju } 1578d4b3b638SSrikar Dronamraju 1579d4b3b638SSrikar Dronamraju /* 1580d4b3b638SSrikar Dronamraju * xol_free_insn_slot - If slot was earlier allocated by 1581d4b3b638SSrikar Dronamraju * @xol_get_insn_slot(), make the slot available for 1582d4b3b638SSrikar Dronamraju * subsequent requests. 1583d4b3b638SSrikar Dronamraju */ 1584d4b3b638SSrikar Dronamraju static void xol_free_insn_slot(struct task_struct *tsk) 1585d4b3b638SSrikar Dronamraju { 1586d4b3b638SSrikar Dronamraju struct xol_area *area; 1587d4b3b638SSrikar Dronamraju unsigned long vma_end; 1588d4b3b638SSrikar Dronamraju unsigned long slot_addr; 1589d4b3b638SSrikar Dronamraju 1590d4b3b638SSrikar Dronamraju if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask) 1591d4b3b638SSrikar Dronamraju return; 1592d4b3b638SSrikar Dronamraju 1593d4b3b638SSrikar Dronamraju slot_addr = tsk->utask->xol_vaddr; 1594af4355e9SOleg Nesterov if (unlikely(!slot_addr)) 1595d4b3b638SSrikar Dronamraju return; 1596d4b3b638SSrikar Dronamraju 1597d4b3b638SSrikar Dronamraju area = tsk->mm->uprobes_state.xol_area; 1598d4b3b638SSrikar Dronamraju vma_end = area->vaddr + PAGE_SIZE; 1599d4b3b638SSrikar Dronamraju if (area->vaddr <= slot_addr && slot_addr < vma_end) { 1600d4b3b638SSrikar Dronamraju unsigned long offset; 1601d4b3b638SSrikar Dronamraju int slot_nr; 1602d4b3b638SSrikar Dronamraju 1603d4b3b638SSrikar Dronamraju offset = slot_addr - area->vaddr; 1604d4b3b638SSrikar Dronamraju slot_nr = offset / UPROBE_XOL_SLOT_BYTES; 1605d4b3b638SSrikar Dronamraju if (slot_nr >= UINSNS_PER_PAGE) 1606d4b3b638SSrikar Dronamraju return; 1607d4b3b638SSrikar Dronamraju 1608d4b3b638SSrikar Dronamraju clear_bit(slot_nr, area->bitmap); 1609d4b3b638SSrikar Dronamraju atomic_dec(&area->slot_count); 16102a742cedSOleg Nesterov smp_mb__after_atomic(); /* pairs with prepare_to_wait() */ 1611d4b3b638SSrikar Dronamraju if (waitqueue_active(&area->wq)) 1612d4b3b638SSrikar Dronamraju wake_up(&area->wq); 1613d4b3b638SSrikar Dronamraju 1614d4b3b638SSrikar Dronamraju tsk->utask->xol_vaddr = 0; 1615d4b3b638SSrikar Dronamraju } 1616d4b3b638SSrikar Dronamraju } 1617d4b3b638SSrikar Dronamraju 161872e6ae28SVictor Kamensky void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, 161972e6ae28SVictor Kamensky void *src, unsigned long len) 162072e6ae28SVictor Kamensky { 162172e6ae28SVictor Kamensky /* Initialize the slot */ 162272e6ae28SVictor Kamensky copy_to_page(page, vaddr, src, len); 162372e6ae28SVictor Kamensky 162472e6ae28SVictor Kamensky /* 162572e6ae28SVictor Kamensky * We probably need flush_icache_user_range() but it needs vma. 162672e6ae28SVictor Kamensky * This should work on most of architectures by default. If 162772e6ae28SVictor Kamensky * architecture needs to do something different it can define 162872e6ae28SVictor Kamensky * its own version of the function. 162972e6ae28SVictor Kamensky */ 163072e6ae28SVictor Kamensky flush_dcache_page(page); 163172e6ae28SVictor Kamensky } 163272e6ae28SVictor Kamensky 16330326f5a9SSrikar Dronamraju /** 16340326f5a9SSrikar Dronamraju * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs 16350326f5a9SSrikar Dronamraju * @regs: Reflects the saved state of the task after it has hit a breakpoint 16360326f5a9SSrikar Dronamraju * instruction. 16370326f5a9SSrikar Dronamraju * Return the address of the breakpoint instruction. 16380326f5a9SSrikar Dronamraju */ 16390326f5a9SSrikar Dronamraju unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs) 16400326f5a9SSrikar Dronamraju { 16410326f5a9SSrikar Dronamraju return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE; 16420326f5a9SSrikar Dronamraju } 16430326f5a9SSrikar Dronamraju 1644b02ef20aSOleg Nesterov unsigned long uprobe_get_trap_addr(struct pt_regs *regs) 1645b02ef20aSOleg Nesterov { 1646b02ef20aSOleg Nesterov struct uprobe_task *utask = current->utask; 1647b02ef20aSOleg Nesterov 1648b02ef20aSOleg Nesterov if (unlikely(utask && utask->active_uprobe)) 1649b02ef20aSOleg Nesterov return utask->vaddr; 1650b02ef20aSOleg Nesterov 1651b02ef20aSOleg Nesterov return instruction_pointer(regs); 1652b02ef20aSOleg Nesterov } 1653b02ef20aSOleg Nesterov 16542bb5e840SOleg Nesterov static struct return_instance *free_ret_instance(struct return_instance *ri) 16552bb5e840SOleg Nesterov { 16562bb5e840SOleg Nesterov struct return_instance *next = ri->next; 16572bb5e840SOleg Nesterov put_uprobe(ri->uprobe); 16582bb5e840SOleg Nesterov kfree(ri); 16592bb5e840SOleg Nesterov return next; 16602bb5e840SOleg Nesterov } 16612bb5e840SOleg Nesterov 16620326f5a9SSrikar Dronamraju /* 16630326f5a9SSrikar Dronamraju * Called with no locks held. 1664788faab7STobias Tefke * Called in context of an exiting or an exec-ing thread. 16650326f5a9SSrikar Dronamraju */ 16660326f5a9SSrikar Dronamraju void uprobe_free_utask(struct task_struct *t) 16670326f5a9SSrikar Dronamraju { 16680326f5a9SSrikar Dronamraju struct uprobe_task *utask = t->utask; 16692bb5e840SOleg Nesterov struct return_instance *ri; 16700326f5a9SSrikar Dronamraju 16710326f5a9SSrikar Dronamraju if (!utask) 16720326f5a9SSrikar Dronamraju return; 16730326f5a9SSrikar Dronamraju 16740326f5a9SSrikar Dronamraju if (utask->active_uprobe) 16750326f5a9SSrikar Dronamraju put_uprobe(utask->active_uprobe); 16760326f5a9SSrikar Dronamraju 16770dfd0eb8SAnton Arapov ri = utask->return_instances; 16782bb5e840SOleg Nesterov while (ri) 16792bb5e840SOleg Nesterov ri = free_ret_instance(ri); 16800dfd0eb8SAnton Arapov 1681d4b3b638SSrikar Dronamraju xol_free_insn_slot(t); 16820326f5a9SSrikar Dronamraju kfree(utask); 16830326f5a9SSrikar Dronamraju t->utask = NULL; 16840326f5a9SSrikar Dronamraju } 16850326f5a9SSrikar Dronamraju 16860326f5a9SSrikar Dronamraju /* 16875a2df662SOleg Nesterov * Allocate a uprobe_task object for the task if if necessary. 16885a2df662SOleg Nesterov * Called when the thread hits a breakpoint. 16890326f5a9SSrikar Dronamraju * 16900326f5a9SSrikar Dronamraju * Returns: 16910326f5a9SSrikar Dronamraju * - pointer to new uprobe_task on success 16920326f5a9SSrikar Dronamraju * - NULL otherwise 16930326f5a9SSrikar Dronamraju */ 16945a2df662SOleg Nesterov static struct uprobe_task *get_utask(void) 16950326f5a9SSrikar Dronamraju { 16965a2df662SOleg Nesterov if (!current->utask) 16975a2df662SOleg Nesterov current->utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL); 16985a2df662SOleg Nesterov return current->utask; 16990326f5a9SSrikar Dronamraju } 17000326f5a9SSrikar Dronamraju 1701248d3a7bSOleg Nesterov static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask) 1702248d3a7bSOleg Nesterov { 1703248d3a7bSOleg Nesterov struct uprobe_task *n_utask; 1704248d3a7bSOleg Nesterov struct return_instance **p, *o, *n; 1705248d3a7bSOleg Nesterov 1706248d3a7bSOleg Nesterov n_utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL); 1707248d3a7bSOleg Nesterov if (!n_utask) 1708248d3a7bSOleg Nesterov return -ENOMEM; 1709248d3a7bSOleg Nesterov t->utask = n_utask; 1710248d3a7bSOleg Nesterov 1711248d3a7bSOleg Nesterov p = &n_utask->return_instances; 1712248d3a7bSOleg Nesterov for (o = o_utask->return_instances; o; o = o->next) { 1713248d3a7bSOleg Nesterov n = kmalloc(sizeof(struct return_instance), GFP_KERNEL); 1714248d3a7bSOleg Nesterov if (!n) 1715248d3a7bSOleg Nesterov return -ENOMEM; 1716248d3a7bSOleg Nesterov 1717248d3a7bSOleg Nesterov *n = *o; 1718f231722aSOleg Nesterov get_uprobe(n->uprobe); 1719248d3a7bSOleg Nesterov n->next = NULL; 1720248d3a7bSOleg Nesterov 1721248d3a7bSOleg Nesterov *p = n; 1722248d3a7bSOleg Nesterov p = &n->next; 1723248d3a7bSOleg Nesterov n_utask->depth++; 1724248d3a7bSOleg Nesterov } 1725248d3a7bSOleg Nesterov 1726248d3a7bSOleg Nesterov return 0; 1727248d3a7bSOleg Nesterov } 1728248d3a7bSOleg Nesterov 1729248d3a7bSOleg Nesterov static void uprobe_warn(struct task_struct *t, const char *msg) 1730248d3a7bSOleg Nesterov { 1731248d3a7bSOleg Nesterov pr_warn("uprobe: %s:%d failed to %s\n", 1732248d3a7bSOleg Nesterov current->comm, current->pid, msg); 1733248d3a7bSOleg Nesterov } 1734248d3a7bSOleg Nesterov 1735aa59c53fSOleg Nesterov static void dup_xol_work(struct callback_head *work) 1736aa59c53fSOleg Nesterov { 1737aa59c53fSOleg Nesterov if (current->flags & PF_EXITING) 1738aa59c53fSOleg Nesterov return; 1739aa59c53fSOleg Nesterov 1740598fdc1dSMichal Hocko if (!__create_xol_area(current->utask->dup_xol_addr) && 1741598fdc1dSMichal Hocko !fatal_signal_pending(current)) 1742aa59c53fSOleg Nesterov uprobe_warn(current, "dup xol area"); 1743aa59c53fSOleg Nesterov } 1744aa59c53fSOleg Nesterov 1745e78aebfdSAnton Arapov /* 1746b68e0749SOleg Nesterov * Called in context of a new clone/fork from copy_process. 1747b68e0749SOleg Nesterov */ 17483ab67966SOleg Nesterov void uprobe_copy_process(struct task_struct *t, unsigned long flags) 1749b68e0749SOleg Nesterov { 1750248d3a7bSOleg Nesterov struct uprobe_task *utask = current->utask; 1751248d3a7bSOleg Nesterov struct mm_struct *mm = current->mm; 1752aa59c53fSOleg Nesterov struct xol_area *area; 1753248d3a7bSOleg Nesterov 1754b68e0749SOleg Nesterov t->utask = NULL; 1755248d3a7bSOleg Nesterov 17563ab67966SOleg Nesterov if (!utask || !utask->return_instances) 17573ab67966SOleg Nesterov return; 17583ab67966SOleg Nesterov 17593ab67966SOleg Nesterov if (mm == t->mm && !(flags & CLONE_VFORK)) 1760248d3a7bSOleg Nesterov return; 1761248d3a7bSOleg Nesterov 1762248d3a7bSOleg Nesterov if (dup_utask(t, utask)) 1763248d3a7bSOleg Nesterov return uprobe_warn(t, "dup ret instances"); 1764aa59c53fSOleg Nesterov 1765aa59c53fSOleg Nesterov /* The task can fork() after dup_xol_work() fails */ 1766aa59c53fSOleg Nesterov area = mm->uprobes_state.xol_area; 1767aa59c53fSOleg Nesterov if (!area) 1768aa59c53fSOleg Nesterov return uprobe_warn(t, "dup xol area"); 1769aa59c53fSOleg Nesterov 17703ab67966SOleg Nesterov if (mm == t->mm) 17713ab67966SOleg Nesterov return; 17723ab67966SOleg Nesterov 177332473431SOleg Nesterov t->utask->dup_xol_addr = area->vaddr; 177432473431SOleg Nesterov init_task_work(&t->utask->dup_xol_work, dup_xol_work); 177532473431SOleg Nesterov task_work_add(t, &t->utask->dup_xol_work, true); 1776b68e0749SOleg Nesterov } 1777b68e0749SOleg Nesterov 1778b68e0749SOleg Nesterov /* 1779e78aebfdSAnton Arapov * Current area->vaddr notion assume the trampoline address is always 1780e78aebfdSAnton Arapov * equal area->vaddr. 1781e78aebfdSAnton Arapov * 1782e78aebfdSAnton Arapov * Returns -1 in case the xol_area is not allocated. 1783e78aebfdSAnton Arapov */ 1784e78aebfdSAnton Arapov static unsigned long get_trampoline_vaddr(void) 1785e78aebfdSAnton Arapov { 1786e78aebfdSAnton Arapov struct xol_area *area; 1787e78aebfdSAnton Arapov unsigned long trampoline_vaddr = -1; 1788e78aebfdSAnton Arapov 17895c6338b4SPaul E. McKenney /* Pairs with xol_add_vma() smp_store_release() */ 17905c6338b4SPaul E. McKenney area = READ_ONCE(current->mm->uprobes_state.xol_area); /* ^^^ */ 1791e78aebfdSAnton Arapov if (area) 1792e78aebfdSAnton Arapov trampoline_vaddr = area->vaddr; 1793e78aebfdSAnton Arapov 1794e78aebfdSAnton Arapov return trampoline_vaddr; 1795e78aebfdSAnton Arapov } 1796e78aebfdSAnton Arapov 1797db087ef6SOleg Nesterov static void cleanup_return_instances(struct uprobe_task *utask, bool chained, 1798db087ef6SOleg Nesterov struct pt_regs *regs) 1799a5b7e1a8SOleg Nesterov { 1800a5b7e1a8SOleg Nesterov struct return_instance *ri = utask->return_instances; 1801db087ef6SOleg Nesterov enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL; 180286dcb702SOleg Nesterov 180386dcb702SOleg Nesterov while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) { 1804a5b7e1a8SOleg Nesterov ri = free_ret_instance(ri); 1805a5b7e1a8SOleg Nesterov utask->depth--; 1806a5b7e1a8SOleg Nesterov } 1807a5b7e1a8SOleg Nesterov utask->return_instances = ri; 1808a5b7e1a8SOleg Nesterov } 1809a5b7e1a8SOleg Nesterov 18100dfd0eb8SAnton Arapov static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs) 18110dfd0eb8SAnton Arapov { 18120dfd0eb8SAnton Arapov struct return_instance *ri; 18130dfd0eb8SAnton Arapov struct uprobe_task *utask; 18140dfd0eb8SAnton Arapov unsigned long orig_ret_vaddr, trampoline_vaddr; 1815db087ef6SOleg Nesterov bool chained; 18160dfd0eb8SAnton Arapov 18170dfd0eb8SAnton Arapov if (!get_xol_area()) 18180dfd0eb8SAnton Arapov return; 18190dfd0eb8SAnton Arapov 18200dfd0eb8SAnton Arapov utask = get_utask(); 18210dfd0eb8SAnton Arapov if (!utask) 18220dfd0eb8SAnton Arapov return; 18230dfd0eb8SAnton Arapov 1824ded49c55SAnton Arapov if (utask->depth >= MAX_URETPROBE_DEPTH) { 1825ded49c55SAnton Arapov printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to" 1826ded49c55SAnton Arapov " nestedness limit pid/tgid=%d/%d\n", 1827ded49c55SAnton Arapov current->pid, current->tgid); 1828ded49c55SAnton Arapov return; 1829ded49c55SAnton Arapov } 1830ded49c55SAnton Arapov 18316c58d0e4SOleg Nesterov ri = kmalloc(sizeof(struct return_instance), GFP_KERNEL); 18320dfd0eb8SAnton Arapov if (!ri) 18336c58d0e4SOleg Nesterov return; 18340dfd0eb8SAnton Arapov 18350dfd0eb8SAnton Arapov trampoline_vaddr = get_trampoline_vaddr(); 18360dfd0eb8SAnton Arapov orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs); 18370dfd0eb8SAnton Arapov if (orig_ret_vaddr == -1) 18380dfd0eb8SAnton Arapov goto fail; 18390dfd0eb8SAnton Arapov 1840a5b7e1a8SOleg Nesterov /* drop the entries invalidated by longjmp() */ 1841db087ef6SOleg Nesterov chained = (orig_ret_vaddr == trampoline_vaddr); 1842db087ef6SOleg Nesterov cleanup_return_instances(utask, chained, regs); 1843a5b7e1a8SOleg Nesterov 18440dfd0eb8SAnton Arapov /* 18450dfd0eb8SAnton Arapov * We don't want to keep trampoline address in stack, rather keep the 18460dfd0eb8SAnton Arapov * original return address of first caller thru all the consequent 18470dfd0eb8SAnton Arapov * instances. This also makes breakpoint unwrapping easier. 18480dfd0eb8SAnton Arapov */ 1849db087ef6SOleg Nesterov if (chained) { 18500dfd0eb8SAnton Arapov if (!utask->return_instances) { 18510dfd0eb8SAnton Arapov /* 18520dfd0eb8SAnton Arapov * This situation is not possible. Likely we have an 18530dfd0eb8SAnton Arapov * attack from user-space. 18540dfd0eb8SAnton Arapov */ 18556c58d0e4SOleg Nesterov uprobe_warn(current, "handle tail call"); 18560dfd0eb8SAnton Arapov goto fail; 18570dfd0eb8SAnton Arapov } 18580dfd0eb8SAnton Arapov orig_ret_vaddr = utask->return_instances->orig_ret_vaddr; 18590dfd0eb8SAnton Arapov } 18600dfd0eb8SAnton Arapov 1861f231722aSOleg Nesterov ri->uprobe = get_uprobe(uprobe); 18620dfd0eb8SAnton Arapov ri->func = instruction_pointer(regs); 18637b868e48SOleg Nesterov ri->stack = user_stack_pointer(regs); 18640dfd0eb8SAnton Arapov ri->orig_ret_vaddr = orig_ret_vaddr; 18650dfd0eb8SAnton Arapov ri->chained = chained; 18660dfd0eb8SAnton Arapov 1867ded49c55SAnton Arapov utask->depth++; 18680dfd0eb8SAnton Arapov ri->next = utask->return_instances; 18690dfd0eb8SAnton Arapov utask->return_instances = ri; 18700dfd0eb8SAnton Arapov 18710dfd0eb8SAnton Arapov return; 18720dfd0eb8SAnton Arapov fail: 18730dfd0eb8SAnton Arapov kfree(ri); 18740dfd0eb8SAnton Arapov } 18750dfd0eb8SAnton Arapov 18760326f5a9SSrikar Dronamraju /* Prepare to single-step probed instruction out of line. */ 18770326f5a9SSrikar Dronamraju static int 1878a6cb3f6dSOleg Nesterov pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr) 18790326f5a9SSrikar Dronamraju { 1880a6cb3f6dSOleg Nesterov struct uprobe_task *utask; 1881a6cb3f6dSOleg Nesterov unsigned long xol_vaddr; 1882aba51024SOleg Nesterov int err; 1883d4b3b638SSrikar Dronamraju 1884608e7427SOleg Nesterov utask = get_utask(); 1885608e7427SOleg Nesterov if (!utask) 1886608e7427SOleg Nesterov return -ENOMEM; 1887a6cb3f6dSOleg Nesterov 1888a6cb3f6dSOleg Nesterov xol_vaddr = xol_get_insn_slot(uprobe); 1889a6cb3f6dSOleg Nesterov if (!xol_vaddr) 1890a6cb3f6dSOleg Nesterov return -ENOMEM; 1891a6cb3f6dSOleg Nesterov 1892a6cb3f6dSOleg Nesterov utask->xol_vaddr = xol_vaddr; 1893a6cb3f6dSOleg Nesterov utask->vaddr = bp_vaddr; 1894a6cb3f6dSOleg Nesterov 1895aba51024SOleg Nesterov err = arch_uprobe_pre_xol(&uprobe->arch, regs); 1896aba51024SOleg Nesterov if (unlikely(err)) { 1897aba51024SOleg Nesterov xol_free_insn_slot(current); 1898aba51024SOleg Nesterov return err; 1899aba51024SOleg Nesterov } 1900aba51024SOleg Nesterov 1901608e7427SOleg Nesterov utask->active_uprobe = uprobe; 1902608e7427SOleg Nesterov utask->state = UTASK_SSTEP; 1903aba51024SOleg Nesterov return 0; 19040326f5a9SSrikar Dronamraju } 19050326f5a9SSrikar Dronamraju 19060326f5a9SSrikar Dronamraju /* 19070326f5a9SSrikar Dronamraju * If we are singlestepping, then ensure this thread is not connected to 19080326f5a9SSrikar Dronamraju * non-fatal signals until completion of singlestep. When xol insn itself 19090326f5a9SSrikar Dronamraju * triggers the signal, restart the original insn even if the task is 19100326f5a9SSrikar Dronamraju * already SIGKILL'ed (since coredump should report the correct ip). This 19110326f5a9SSrikar Dronamraju * is even more important if the task has a handler for SIGSEGV/etc, The 19120326f5a9SSrikar Dronamraju * _same_ instruction should be repeated again after return from the signal 19130326f5a9SSrikar Dronamraju * handler, and SSTEP can never finish in this case. 19140326f5a9SSrikar Dronamraju */ 19150326f5a9SSrikar Dronamraju bool uprobe_deny_signal(void) 19160326f5a9SSrikar Dronamraju { 19170326f5a9SSrikar Dronamraju struct task_struct *t = current; 19180326f5a9SSrikar Dronamraju struct uprobe_task *utask = t->utask; 19190326f5a9SSrikar Dronamraju 19200326f5a9SSrikar Dronamraju if (likely(!utask || !utask->active_uprobe)) 19210326f5a9SSrikar Dronamraju return false; 19220326f5a9SSrikar Dronamraju 19230326f5a9SSrikar Dronamraju WARN_ON_ONCE(utask->state != UTASK_SSTEP); 19240326f5a9SSrikar Dronamraju 19250326f5a9SSrikar Dronamraju if (signal_pending(t)) { 19260326f5a9SSrikar Dronamraju spin_lock_irq(&t->sighand->siglock); 19270326f5a9SSrikar Dronamraju clear_tsk_thread_flag(t, TIF_SIGPENDING); 19280326f5a9SSrikar Dronamraju spin_unlock_irq(&t->sighand->siglock); 19290326f5a9SSrikar Dronamraju 19300326f5a9SSrikar Dronamraju if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) { 19310326f5a9SSrikar Dronamraju utask->state = UTASK_SSTEP_TRAPPED; 19320326f5a9SSrikar Dronamraju set_tsk_thread_flag(t, TIF_UPROBE); 19330326f5a9SSrikar Dronamraju } 19340326f5a9SSrikar Dronamraju } 19350326f5a9SSrikar Dronamraju 19360326f5a9SSrikar Dronamraju return true; 19370326f5a9SSrikar Dronamraju } 19380326f5a9SSrikar Dronamraju 1939499a4f3eSOleg Nesterov static void mmf_recalc_uprobes(struct mm_struct *mm) 1940499a4f3eSOleg Nesterov { 1941499a4f3eSOleg Nesterov struct vm_area_struct *vma; 1942499a4f3eSOleg Nesterov 1943499a4f3eSOleg Nesterov for (vma = mm->mmap; vma; vma = vma->vm_next) { 1944499a4f3eSOleg Nesterov if (!valid_vma(vma, false)) 1945499a4f3eSOleg Nesterov continue; 1946499a4f3eSOleg Nesterov /* 1947499a4f3eSOleg Nesterov * This is not strictly accurate, we can race with 1948499a4f3eSOleg Nesterov * uprobe_unregister() and see the already removed 1949499a4f3eSOleg Nesterov * uprobe if delete_uprobe() was not yet called. 195063633cbfSOleg Nesterov * Or this uprobe can be filtered out. 1951499a4f3eSOleg Nesterov */ 1952499a4f3eSOleg Nesterov if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end)) 1953499a4f3eSOleg Nesterov return; 1954499a4f3eSOleg Nesterov } 1955499a4f3eSOleg Nesterov 1956499a4f3eSOleg Nesterov clear_bit(MMF_HAS_UPROBES, &mm->flags); 1957499a4f3eSOleg Nesterov } 1958499a4f3eSOleg Nesterov 19590908ad6eSAnanth N Mavinakayanahalli static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr) 1960ec75fba9SOleg Nesterov { 1961ec75fba9SOleg Nesterov struct page *page; 1962ec75fba9SOleg Nesterov uprobe_opcode_t opcode; 1963ec75fba9SOleg Nesterov int result; 1964ec75fba9SOleg Nesterov 1965ec75fba9SOleg Nesterov pagefault_disable(); 1966bd28b145SLinus Torvalds result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr); 1967ec75fba9SOleg Nesterov pagefault_enable(); 1968ec75fba9SOleg Nesterov 1969ec75fba9SOleg Nesterov if (likely(result == 0)) 1970ec75fba9SOleg Nesterov goto out; 1971ec75fba9SOleg Nesterov 19721e987790SDave Hansen /* 19731e987790SDave Hansen * The NULL 'tsk' here ensures that any faults that occur here 19741e987790SDave Hansen * will not be accounted to the task. 'mm' *is* current->mm, 19751e987790SDave Hansen * but we treat this as a 'remote' access since it is 19761e987790SDave Hansen * essentially a kernel access to the memory. 19771e987790SDave Hansen */ 19789beae1eaSLorenzo Stoakes result = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &page, 19795b56d49fSLorenzo Stoakes NULL, NULL); 1980ec75fba9SOleg Nesterov if (result < 0) 1981ec75fba9SOleg Nesterov return result; 1982ec75fba9SOleg Nesterov 1983ab0d805cSOleg Nesterov copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE); 1984ec75fba9SOleg Nesterov put_page(page); 1985ec75fba9SOleg Nesterov out: 19860908ad6eSAnanth N Mavinakayanahalli /* This needs to return true for any variant of the trap insn */ 19870908ad6eSAnanth N Mavinakayanahalli return is_trap_insn(&opcode); 1988ec75fba9SOleg Nesterov } 1989ec75fba9SOleg Nesterov 1990d790d346SOleg Nesterov static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp) 19910326f5a9SSrikar Dronamraju { 19923a9ea052SOleg Nesterov struct mm_struct *mm = current->mm; 19933a9ea052SOleg Nesterov struct uprobe *uprobe = NULL; 19940326f5a9SSrikar Dronamraju struct vm_area_struct *vma; 19950326f5a9SSrikar Dronamraju 19960326f5a9SSrikar Dronamraju down_read(&mm->mmap_sem); 19970326f5a9SSrikar Dronamraju vma = find_vma(mm, bp_vaddr); 19983a9ea052SOleg Nesterov if (vma && vma->vm_start <= bp_vaddr) { 19993a9ea052SOleg Nesterov if (valid_vma(vma, false)) { 2000f281769eSOleg Nesterov struct inode *inode = file_inode(vma->vm_file); 2001cb113b47SOleg Nesterov loff_t offset = vaddr_to_offset(vma, bp_vaddr); 20020326f5a9SSrikar Dronamraju 20030326f5a9SSrikar Dronamraju uprobe = find_uprobe(inode, offset); 20040326f5a9SSrikar Dronamraju } 2005d790d346SOleg Nesterov 2006d790d346SOleg Nesterov if (!uprobe) 20070908ad6eSAnanth N Mavinakayanahalli *is_swbp = is_trap_at_addr(mm, bp_vaddr); 2008d790d346SOleg Nesterov } else { 2009d790d346SOleg Nesterov *is_swbp = -EFAULT; 20103a9ea052SOleg Nesterov } 2011499a4f3eSOleg Nesterov 2012499a4f3eSOleg Nesterov if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags)) 2013499a4f3eSOleg Nesterov mmf_recalc_uprobes(mm); 20140326f5a9SSrikar Dronamraju up_read(&mm->mmap_sem); 20150326f5a9SSrikar Dronamraju 20163a9ea052SOleg Nesterov return uprobe; 20173a9ea052SOleg Nesterov } 20183a9ea052SOleg Nesterov 2019da1816b1SOleg Nesterov static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs) 2020da1816b1SOleg Nesterov { 2021da1816b1SOleg Nesterov struct uprobe_consumer *uc; 2022da1816b1SOleg Nesterov int remove = UPROBE_HANDLER_REMOVE; 20230dfd0eb8SAnton Arapov bool need_prep = false; /* prepare return uprobe, when needed */ 2024da1816b1SOleg Nesterov 2025da1816b1SOleg Nesterov down_read(&uprobe->register_rwsem); 2026da1816b1SOleg Nesterov for (uc = uprobe->consumers; uc; uc = uc->next) { 2027ea024870SAnton Arapov int rc = 0; 2028da1816b1SOleg Nesterov 2029ea024870SAnton Arapov if (uc->handler) { 2030ea024870SAnton Arapov rc = uc->handler(uc, regs); 2031da1816b1SOleg Nesterov WARN(rc & ~UPROBE_HANDLER_MASK, 2032d75f773cSSakari Ailus "bad rc=0x%x from %ps()\n", rc, uc->handler); 2033ea024870SAnton Arapov } 20340dfd0eb8SAnton Arapov 20350dfd0eb8SAnton Arapov if (uc->ret_handler) 20360dfd0eb8SAnton Arapov need_prep = true; 20370dfd0eb8SAnton Arapov 2038da1816b1SOleg Nesterov remove &= rc; 2039da1816b1SOleg Nesterov } 2040da1816b1SOleg Nesterov 20410dfd0eb8SAnton Arapov if (need_prep && !remove) 20420dfd0eb8SAnton Arapov prepare_uretprobe(uprobe, regs); /* put bp at return */ 20430dfd0eb8SAnton Arapov 2044da1816b1SOleg Nesterov if (remove && uprobe->consumers) { 2045da1816b1SOleg Nesterov WARN_ON(!uprobe_is_active(uprobe)); 2046da1816b1SOleg Nesterov unapply_uprobe(uprobe, current->mm); 2047da1816b1SOleg Nesterov } 2048da1816b1SOleg Nesterov up_read(&uprobe->register_rwsem); 2049da1816b1SOleg Nesterov } 2050da1816b1SOleg Nesterov 2051fec8898dSAnton Arapov static void 2052fec8898dSAnton Arapov handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs) 2053fec8898dSAnton Arapov { 2054fec8898dSAnton Arapov struct uprobe *uprobe = ri->uprobe; 2055fec8898dSAnton Arapov struct uprobe_consumer *uc; 2056fec8898dSAnton Arapov 2057fec8898dSAnton Arapov down_read(&uprobe->register_rwsem); 2058fec8898dSAnton Arapov for (uc = uprobe->consumers; uc; uc = uc->next) { 2059fec8898dSAnton Arapov if (uc->ret_handler) 2060fec8898dSAnton Arapov uc->ret_handler(uc, ri->func, regs); 2061fec8898dSAnton Arapov } 2062fec8898dSAnton Arapov up_read(&uprobe->register_rwsem); 2063fec8898dSAnton Arapov } 2064fec8898dSAnton Arapov 2065a83cfeb9SOleg Nesterov static struct return_instance *find_next_ret_chain(struct return_instance *ri) 2066a83cfeb9SOleg Nesterov { 2067a83cfeb9SOleg Nesterov bool chained; 2068a83cfeb9SOleg Nesterov 2069a83cfeb9SOleg Nesterov do { 2070a83cfeb9SOleg Nesterov chained = ri->chained; 2071a83cfeb9SOleg Nesterov ri = ri->next; /* can't be NULL if chained */ 2072a83cfeb9SOleg Nesterov } while (chained); 2073a83cfeb9SOleg Nesterov 2074a83cfeb9SOleg Nesterov return ri; 2075a83cfeb9SOleg Nesterov } 2076a83cfeb9SOleg Nesterov 20770b5256c7SOleg Nesterov static void handle_trampoline(struct pt_regs *regs) 2078fec8898dSAnton Arapov { 2079fec8898dSAnton Arapov struct uprobe_task *utask; 2080a83cfeb9SOleg Nesterov struct return_instance *ri, *next; 20815eeb50deSOleg Nesterov bool valid; 2082fec8898dSAnton Arapov 2083fec8898dSAnton Arapov utask = current->utask; 2084fec8898dSAnton Arapov if (!utask) 20850b5256c7SOleg Nesterov goto sigill; 2086fec8898dSAnton Arapov 2087fec8898dSAnton Arapov ri = utask->return_instances; 2088fec8898dSAnton Arapov if (!ri) 20890b5256c7SOleg Nesterov goto sigill; 2090fec8898dSAnton Arapov 20915eeb50deSOleg Nesterov do { 2092fec8898dSAnton Arapov /* 20935eeb50deSOleg Nesterov * We should throw out the frames invalidated by longjmp(). 20945eeb50deSOleg Nesterov * If this chain is valid, then the next one should be alive 20955eeb50deSOleg Nesterov * or NULL; the latter case means that nobody but ri->func 20965eeb50deSOleg Nesterov * could hit this trampoline on return. TODO: sigaltstack(). 2097fec8898dSAnton Arapov */ 20985eeb50deSOleg Nesterov next = find_next_ret_chain(ri); 209986dcb702SOleg Nesterov valid = !next || arch_uretprobe_is_alive(next, RP_CHECK_RET, regs); 21005eeb50deSOleg Nesterov 2101fec8898dSAnton Arapov instruction_pointer_set(regs, ri->orig_ret_vaddr); 2102a83cfeb9SOleg Nesterov do { 21035eeb50deSOleg Nesterov if (valid) 2104fec8898dSAnton Arapov handle_uretprobe_chain(ri, regs); 21052bb5e840SOleg Nesterov ri = free_ret_instance(ri); 2106878b5a6eSOleg Nesterov utask->depth--; 2107a83cfeb9SOleg Nesterov } while (ri != next); 21085eeb50deSOleg Nesterov } while (!valid); 2109fec8898dSAnton Arapov 2110fec8898dSAnton Arapov utask->return_instances = ri; 21110b5256c7SOleg Nesterov return; 2112fec8898dSAnton Arapov 21130b5256c7SOleg Nesterov sigill: 21140b5256c7SOleg Nesterov uprobe_warn(current, "handle uretprobe, sending SIGILL."); 211555a3235fSEric W. Biederman force_sig(SIGILL, current); 21160b5256c7SOleg Nesterov 2117fec8898dSAnton Arapov } 2118fec8898dSAnton Arapov 21196fe50a28SDavid A. Long bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs) 21206fe50a28SDavid A. Long { 21216fe50a28SDavid A. Long return false; 21226fe50a28SDavid A. Long } 21236fe50a28SDavid A. Long 212486dcb702SOleg Nesterov bool __weak arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, 212586dcb702SOleg Nesterov struct pt_regs *regs) 212697da8976SOleg Nesterov { 212797da8976SOleg Nesterov return true; 212897da8976SOleg Nesterov } 212997da8976SOleg Nesterov 21303a9ea052SOleg Nesterov /* 21313a9ea052SOleg Nesterov * Run handler and ask thread to singlestep. 21323a9ea052SOleg Nesterov * Ensure all non-fatal signals cannot interrupt thread while it singlesteps. 21333a9ea052SOleg Nesterov */ 21343a9ea052SOleg Nesterov static void handle_swbp(struct pt_regs *regs) 21353a9ea052SOleg Nesterov { 21363a9ea052SOleg Nesterov struct uprobe *uprobe; 21373a9ea052SOleg Nesterov unsigned long bp_vaddr; 213856bb4cf6SOleg Nesterov int uninitialized_var(is_swbp); 21393a9ea052SOleg Nesterov 21403a9ea052SOleg Nesterov bp_vaddr = uprobe_get_swbp_addr(regs); 21410b5256c7SOleg Nesterov if (bp_vaddr == get_trampoline_vaddr()) 21420b5256c7SOleg Nesterov return handle_trampoline(regs); 2143fec8898dSAnton Arapov 2144fec8898dSAnton Arapov uprobe = find_active_uprobe(bp_vaddr, &is_swbp); 21450326f5a9SSrikar Dronamraju if (!uprobe) { 214656bb4cf6SOleg Nesterov if (is_swbp > 0) { 21470326f5a9SSrikar Dronamraju /* No matching uprobe; signal SIGTRAP. */ 21480326f5a9SSrikar Dronamraju send_sig(SIGTRAP, current, 0); 214956bb4cf6SOleg Nesterov } else { 215056bb4cf6SOleg Nesterov /* 215156bb4cf6SOleg Nesterov * Either we raced with uprobe_unregister() or we can't 215256bb4cf6SOleg Nesterov * access this memory. The latter is only possible if 215356bb4cf6SOleg Nesterov * another thread plays with our ->mm. In both cases 215456bb4cf6SOleg Nesterov * we can simply restart. If this vma was unmapped we 215556bb4cf6SOleg Nesterov * can pretend this insn was not executed yet and get 215656bb4cf6SOleg Nesterov * the (correct) SIGSEGV after restart. 215756bb4cf6SOleg Nesterov */ 215856bb4cf6SOleg Nesterov instruction_pointer_set(regs, bp_vaddr); 215956bb4cf6SOleg Nesterov } 21600326f5a9SSrikar Dronamraju return; 21610326f5a9SSrikar Dronamraju } 216274e59dfcSOleg Nesterov 216374e59dfcSOleg Nesterov /* change it in advance for ->handler() and restart */ 216474e59dfcSOleg Nesterov instruction_pointer_set(regs, bp_vaddr); 216574e59dfcSOleg Nesterov 2166142b18ddSOleg Nesterov /* 2167142b18ddSOleg Nesterov * TODO: move copy_insn/etc into _register and remove this hack. 2168142b18ddSOleg Nesterov * After we hit the bp, _unregister + _register can install the 2169142b18ddSOleg Nesterov * new and not-yet-analyzed uprobe at the same address, restart. 2170142b18ddSOleg Nesterov */ 217171434f2fSOleg Nesterov if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags))) 217274e59dfcSOleg Nesterov goto out; 21730326f5a9SSrikar Dronamraju 217409d3f015SAndrea Parri /* 217509d3f015SAndrea Parri * Pairs with the smp_wmb() in prepare_uprobe(). 217609d3f015SAndrea Parri * 217709d3f015SAndrea Parri * Guarantees that if we see the UPROBE_COPY_INSN bit set, then 217809d3f015SAndrea Parri * we must also see the stores to &uprobe->arch performed by the 217909d3f015SAndrea Parri * prepare_uprobe() call. 218009d3f015SAndrea Parri */ 218109d3f015SAndrea Parri smp_rmb(); 218209d3f015SAndrea Parri 218372fd293aSOleg Nesterov /* Tracing handlers use ->utask to communicate with fetch methods */ 218472fd293aSOleg Nesterov if (!get_utask()) 218572fd293aSOleg Nesterov goto out; 218672fd293aSOleg Nesterov 21876fe50a28SDavid A. Long if (arch_uprobe_ignore(&uprobe->arch, regs)) 21886fe50a28SDavid A. Long goto out; 21896fe50a28SDavid A. Long 21900326f5a9SSrikar Dronamraju handler_chain(uprobe, regs); 21916fe50a28SDavid A. Long 21928a6b1732SOleg Nesterov if (arch_uprobe_skip_sstep(&uprobe->arch, regs)) 21930578a970SOleg Nesterov goto out; 21940326f5a9SSrikar Dronamraju 2195608e7427SOleg Nesterov if (!pre_ssout(uprobe, regs, bp_vaddr)) 21960326f5a9SSrikar Dronamraju return; 21970326f5a9SSrikar Dronamraju 21988a6b1732SOleg Nesterov /* arch_uprobe_skip_sstep() succeeded, or restart if can't singlestep */ 21990578a970SOleg Nesterov out: 22000326f5a9SSrikar Dronamraju put_uprobe(uprobe); 22010326f5a9SSrikar Dronamraju } 22020326f5a9SSrikar Dronamraju 22030326f5a9SSrikar Dronamraju /* 22040326f5a9SSrikar Dronamraju * Perform required fix-ups and disable singlestep. 22050326f5a9SSrikar Dronamraju * Allow pending signals to take effect. 22060326f5a9SSrikar Dronamraju */ 22070326f5a9SSrikar Dronamraju static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs) 22080326f5a9SSrikar Dronamraju { 22090326f5a9SSrikar Dronamraju struct uprobe *uprobe; 2210014940baSOleg Nesterov int err = 0; 22110326f5a9SSrikar Dronamraju 22120326f5a9SSrikar Dronamraju uprobe = utask->active_uprobe; 22130326f5a9SSrikar Dronamraju if (utask->state == UTASK_SSTEP_ACK) 2214014940baSOleg Nesterov err = arch_uprobe_post_xol(&uprobe->arch, regs); 22150326f5a9SSrikar Dronamraju else if (utask->state == UTASK_SSTEP_TRAPPED) 22160326f5a9SSrikar Dronamraju arch_uprobe_abort_xol(&uprobe->arch, regs); 22170326f5a9SSrikar Dronamraju else 22180326f5a9SSrikar Dronamraju WARN_ON_ONCE(1); 22190326f5a9SSrikar Dronamraju 22200326f5a9SSrikar Dronamraju put_uprobe(uprobe); 22210326f5a9SSrikar Dronamraju utask->active_uprobe = NULL; 22220326f5a9SSrikar Dronamraju utask->state = UTASK_RUNNING; 2223d4b3b638SSrikar Dronamraju xol_free_insn_slot(current); 22240326f5a9SSrikar Dronamraju 22250326f5a9SSrikar Dronamraju spin_lock_irq(¤t->sighand->siglock); 22260326f5a9SSrikar Dronamraju recalc_sigpending(); /* see uprobe_deny_signal() */ 22270326f5a9SSrikar Dronamraju spin_unlock_irq(¤t->sighand->siglock); 2228014940baSOleg Nesterov 2229014940baSOleg Nesterov if (unlikely(err)) { 2230014940baSOleg Nesterov uprobe_warn(current, "execute the probed insn, sending SIGILL."); 223155a3235fSEric W. Biederman force_sig(SIGILL, current); 2232014940baSOleg Nesterov } 22330326f5a9SSrikar Dronamraju } 22340326f5a9SSrikar Dronamraju 22350326f5a9SSrikar Dronamraju /* 22361b08e907SOleg Nesterov * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and 22371b08e907SOleg Nesterov * allows the thread to return from interrupt. After that handle_swbp() 22381b08e907SOleg Nesterov * sets utask->active_uprobe. 22390326f5a9SSrikar Dronamraju * 22401b08e907SOleg Nesterov * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag 22411b08e907SOleg Nesterov * and allows the thread to return from interrupt. 22420326f5a9SSrikar Dronamraju * 22430326f5a9SSrikar Dronamraju * While returning to userspace, thread notices the TIF_UPROBE flag and calls 22440326f5a9SSrikar Dronamraju * uprobe_notify_resume(). 22450326f5a9SSrikar Dronamraju */ 22460326f5a9SSrikar Dronamraju void uprobe_notify_resume(struct pt_regs *regs) 22470326f5a9SSrikar Dronamraju { 22480326f5a9SSrikar Dronamraju struct uprobe_task *utask; 22490326f5a9SSrikar Dronamraju 2250db023ea5SOleg Nesterov clear_thread_flag(TIF_UPROBE); 2251db023ea5SOleg Nesterov 22520326f5a9SSrikar Dronamraju utask = current->utask; 22531b08e907SOleg Nesterov if (utask && utask->active_uprobe) 22540326f5a9SSrikar Dronamraju handle_singlestep(utask, regs); 22551b08e907SOleg Nesterov else 22561b08e907SOleg Nesterov handle_swbp(regs); 22570326f5a9SSrikar Dronamraju } 22580326f5a9SSrikar Dronamraju 22590326f5a9SSrikar Dronamraju /* 22600326f5a9SSrikar Dronamraju * uprobe_pre_sstep_notifier gets called from interrupt context as part of 22610326f5a9SSrikar Dronamraju * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit. 22620326f5a9SSrikar Dronamraju */ 22630326f5a9SSrikar Dronamraju int uprobe_pre_sstep_notifier(struct pt_regs *regs) 22640326f5a9SSrikar Dronamraju { 22650dfd0eb8SAnton Arapov if (!current->mm) 22660dfd0eb8SAnton Arapov return 0; 22670dfd0eb8SAnton Arapov 22680dfd0eb8SAnton Arapov if (!test_bit(MMF_HAS_UPROBES, ¤t->mm->flags) && 22690dfd0eb8SAnton Arapov (!current->utask || !current->utask->return_instances)) 22700326f5a9SSrikar Dronamraju return 0; 22710326f5a9SSrikar Dronamraju 22720326f5a9SSrikar Dronamraju set_thread_flag(TIF_UPROBE); 22730326f5a9SSrikar Dronamraju return 1; 22740326f5a9SSrikar Dronamraju } 22750326f5a9SSrikar Dronamraju 22760326f5a9SSrikar Dronamraju /* 22770326f5a9SSrikar Dronamraju * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier 22780326f5a9SSrikar Dronamraju * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep. 22790326f5a9SSrikar Dronamraju */ 22800326f5a9SSrikar Dronamraju int uprobe_post_sstep_notifier(struct pt_regs *regs) 22810326f5a9SSrikar Dronamraju { 22820326f5a9SSrikar Dronamraju struct uprobe_task *utask = current->utask; 22830326f5a9SSrikar Dronamraju 22840326f5a9SSrikar Dronamraju if (!current->mm || !utask || !utask->active_uprobe) 22850326f5a9SSrikar Dronamraju /* task is currently not uprobed */ 22860326f5a9SSrikar Dronamraju return 0; 22870326f5a9SSrikar Dronamraju 22880326f5a9SSrikar Dronamraju utask->state = UTASK_SSTEP_ACK; 22890326f5a9SSrikar Dronamraju set_thread_flag(TIF_UPROBE); 22900326f5a9SSrikar Dronamraju return 1; 22910326f5a9SSrikar Dronamraju } 22920326f5a9SSrikar Dronamraju 22930326f5a9SSrikar Dronamraju static struct notifier_block uprobe_exception_nb = { 22940326f5a9SSrikar Dronamraju .notifier_call = arch_uprobe_exception_notify, 22950326f5a9SSrikar Dronamraju .priority = INT_MAX-1, /* notified after kprobes, kgdb */ 22960326f5a9SSrikar Dronamraju }; 22970326f5a9SSrikar Dronamraju 2298aad42dd4SNadav Amit void __init uprobes_init(void) 2299a5f4374aSIngo Molnar { 2300a5f4374aSIngo Molnar int i; 2301a5f4374aSIngo Molnar 230266d06dffSOleg Nesterov for (i = 0; i < UPROBES_HASH_SZ; i++) 2303a5f4374aSIngo Molnar mutex_init(&uprobes_mmap_mutex[i]); 23040326f5a9SSrikar Dronamraju 2305aad42dd4SNadav Amit BUG_ON(percpu_init_rwsem(&dup_mmap_sem)); 230632cdba1eSOleg Nesterov 2307aad42dd4SNadav Amit BUG_ON(register_die_notifier(&uprobe_exception_nb)); 2308a5f4374aSIngo Molnar } 2309