1720e596aSThomas Gleixner // SPDX-License-Identifier: GPL-2.0+
2a5f4374aSIngo Molnar /*
3a5f4374aSIngo Molnar * User-space Probes (UProbes)
4a5f4374aSIngo Molnar *
535aa621bSIngo Molnar * Copyright (C) IBM Corporation, 2008-2012
6a5f4374aSIngo Molnar * Authors:
7a5f4374aSIngo Molnar * Srikar Dronamraju
8a5f4374aSIngo Molnar * Jim Keniston
990eec103SPeter Zijlstra * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
10a5f4374aSIngo Molnar */
11a5f4374aSIngo Molnar
12a5f4374aSIngo Molnar #include <linux/kernel.h>
13a5f4374aSIngo Molnar #include <linux/highmem.h>
14a5f4374aSIngo Molnar #include <linux/pagemap.h> /* read_mapping_page */
15a5f4374aSIngo Molnar #include <linux/slab.h>
16a5f4374aSIngo Molnar #include <linux/sched.h>
176e84f315SIngo Molnar #include <linux/sched/mm.h>
18f7ccbae4SIngo Molnar #include <linux/sched/coredump.h>
19e8440c14SJosh Stone #include <linux/export.h>
20a5f4374aSIngo Molnar #include <linux/rmap.h> /* anon_vma_prepare */
21a5f4374aSIngo Molnar #include <linux/mmu_notifier.h> /* set_pte_at_notify */
225fcd079aSMatthew Wilcox (Oracle) #include <linux/swap.h> /* folio_free_swap */
230326f5a9SSrikar Dronamraju #include <linux/ptrace.h> /* user_enable_single_step */
240326f5a9SSrikar Dronamraju #include <linux/kdebug.h> /* notifier mechanism */
2532cdba1eSOleg Nesterov #include <linux/percpu-rwsem.h>
26aa59c53fSOleg Nesterov #include <linux/task_work.h>
2740814f68SOleg Nesterov #include <linux/shmem_fs.h>
28f385cb85SSong Liu #include <linux/khugepaged.h>
29a5f4374aSIngo Molnar
30a5f4374aSIngo Molnar #include <linux/uprobes.h>
31a5f4374aSIngo Molnar
32d4b3b638SSrikar Dronamraju #define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
33d4b3b638SSrikar Dronamraju #define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE
34d4b3b638SSrikar Dronamraju
35a5f4374aSIngo Molnar static struct rb_root uprobes_tree = RB_ROOT;
36441f1eb7SOleg Nesterov /*
37441f1eb7SOleg Nesterov * allows us to skip the uprobe_mmap if there are no uprobe events active
38441f1eb7SOleg Nesterov * at this time. Probably a fine grained per inode count is better?
39441f1eb7SOleg Nesterov */
40441f1eb7SOleg Nesterov #define no_uprobe_events() RB_EMPTY_ROOT(&uprobes_tree)
41a5f4374aSIngo Molnar
42a5f4374aSIngo Molnar static DEFINE_SPINLOCK(uprobes_treelock); /* serialize rbtree access */
43a5f4374aSIngo Molnar
44a5f4374aSIngo Molnar #define UPROBES_HASH_SZ 13
45a5f4374aSIngo Molnar /* serialize uprobe->pending_list */
46a5f4374aSIngo Molnar static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
47a5f4374aSIngo Molnar #define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
48a5f4374aSIngo Molnar
492bf1acc2SOleg Nesterov DEFINE_STATIC_PERCPU_RWSEM(dup_mmap_sem);
5032cdba1eSOleg Nesterov
51cb9a19feSOleg Nesterov /* Have a copy of original instruction */
5271434f2fSOleg Nesterov #define UPROBE_COPY_INSN 0
53cb9a19feSOleg Nesterov
543ff54efdSSrikar Dronamraju struct uprobe {
553ff54efdSSrikar Dronamraju struct rb_node rb_node; /* node in the rb tree */
56ce59b8e9SElena Reshetova refcount_t ref;
57e591c8d7SOleg Nesterov struct rw_semaphore register_rwsem;
583ff54efdSSrikar Dronamraju struct rw_semaphore consumer_rwsem;
593ff54efdSSrikar Dronamraju struct list_head pending_list;
603ff54efdSSrikar Dronamraju struct uprobe_consumer *consumers;
613ff54efdSSrikar Dronamraju struct inode *inode; /* Also hold a ref to inode */
623ff54efdSSrikar Dronamraju loff_t offset;
631cc33161SRavi Bangoria loff_t ref_ctr_offset;
6471434f2fSOleg Nesterov unsigned long flags;
65ad439356SOleg Nesterov
66ad439356SOleg Nesterov /*
67ad439356SOleg Nesterov * The generic code assumes that it has two members of unknown type
68ad439356SOleg Nesterov * owned by the arch-specific code:
69ad439356SOleg Nesterov *
70ad439356SOleg Nesterov * insn - copy_insn() saves the original instruction here for
71ad439356SOleg Nesterov * arch_uprobe_analyze_insn().
72ad439356SOleg Nesterov *
73ad439356SOleg Nesterov * ixol - potentially modified instruction to execute out of
74ad439356SOleg Nesterov * line, copied to xol_area by xol_get_insn_slot().
75ad439356SOleg Nesterov */
763ff54efdSSrikar Dronamraju struct arch_uprobe arch;
773ff54efdSSrikar Dronamraju };
783ff54efdSSrikar Dronamraju
791cc33161SRavi Bangoria struct delayed_uprobe {
801cc33161SRavi Bangoria struct list_head list;
811cc33161SRavi Bangoria struct uprobe *uprobe;
821cc33161SRavi Bangoria struct mm_struct *mm;
831cc33161SRavi Bangoria };
841cc33161SRavi Bangoria
851cc33161SRavi Bangoria static DEFINE_MUTEX(delayed_uprobe_lock);
861cc33161SRavi Bangoria static LIST_HEAD(delayed_uprobe_list);
871cc33161SRavi Bangoria
88a5f4374aSIngo Molnar /*
89ad439356SOleg Nesterov * Execute out of line area: anonymous executable mapping installed
90ad439356SOleg Nesterov * by the probed task to execute the copy of the original instruction
91ad439356SOleg Nesterov * mangled by set_swbp().
92ad439356SOleg Nesterov *
93c912dae6SOleg Nesterov * On a breakpoint hit, thread contests for a slot. It frees the
94c912dae6SOleg Nesterov * slot after singlestep. Currently a fixed number of slots are
95c912dae6SOleg Nesterov * allocated.
96c912dae6SOleg Nesterov */
97c912dae6SOleg Nesterov struct xol_area {
98c912dae6SOleg Nesterov wait_queue_head_t wq; /* if all slots are busy */
99c912dae6SOleg Nesterov atomic_t slot_count; /* number of in-use slots */
100c912dae6SOleg Nesterov unsigned long *bitmap; /* 0 = free slot */
101c912dae6SOleg Nesterov
102704bde3cSOleg Nesterov struct vm_special_mapping xol_mapping;
103704bde3cSOleg Nesterov struct page *pages[2];
104c912dae6SOleg Nesterov /*
105c912dae6SOleg Nesterov * We keep the vma's vm_start rather than a pointer to the vma
106c912dae6SOleg Nesterov * itself. The probed process or a naughty kernel module could make
107c912dae6SOleg Nesterov * the vma go away, and we must handle that reasonably gracefully.
108c912dae6SOleg Nesterov */
109c912dae6SOleg Nesterov unsigned long vaddr; /* Page(s) of instruction slots */
110c912dae6SOleg Nesterov };
111c912dae6SOleg Nesterov
112c912dae6SOleg Nesterov /*
113a5f4374aSIngo Molnar * valid_vma: Verify if the specified vma is an executable vma
114a5f4374aSIngo Molnar * Relax restrictions while unregistering: vm_flags might have
115a5f4374aSIngo Molnar * changed after breakpoint was inserted.
116a5f4374aSIngo Molnar * - is_register: indicates if we are in register context.
117a5f4374aSIngo Molnar * - Return 1 if the specified virtual address is in an
118a5f4374aSIngo Molnar * executable vma.
119a5f4374aSIngo Molnar */
valid_vma(struct vm_area_struct * vma,bool is_register)120a5f4374aSIngo Molnar static bool valid_vma(struct vm_area_struct *vma, bool is_register)
121a5f4374aSIngo Molnar {
12213f59c5eSOleg Nesterov vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE;
123a5f4374aSIngo Molnar
124e40cfce6SOleg Nesterov if (is_register)
125e40cfce6SOleg Nesterov flags |= VM_WRITE;
126a5f4374aSIngo Molnar
127e40cfce6SOleg Nesterov return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC;
128a5f4374aSIngo Molnar }
129a5f4374aSIngo Molnar
offset_to_vaddr(struct vm_area_struct * vma,loff_t offset)13057683f72SOleg Nesterov static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset)
131a5f4374aSIngo Molnar {
13257683f72SOleg Nesterov return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
133a5f4374aSIngo Molnar }
134a5f4374aSIngo Molnar
vaddr_to_offset(struct vm_area_struct * vma,unsigned long vaddr)135cb113b47SOleg Nesterov static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
136cb113b47SOleg Nesterov {
137cb113b47SOleg Nesterov return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start);
138cb113b47SOleg Nesterov }
139cb113b47SOleg Nesterov
140a5f4374aSIngo Molnar /**
141a5f4374aSIngo Molnar * __replace_page - replace page in vma by new page.
142a5f4374aSIngo Molnar * based on replace_page in mm/ksm.c
143a5f4374aSIngo Molnar *
144a5f4374aSIngo Molnar * @vma: vma that holds the pte pointing to page
145c517ee74SOleg Nesterov * @addr: address the old @page is mapped at
146fb4fb04fSSong Liu * @old_page: the page we are replacing by new_page
147fb4fb04fSSong Liu * @new_page: the modified page we replace page by
148a5f4374aSIngo Molnar *
149fb4fb04fSSong Liu * If @new_page is NULL, only unmap @old_page.
150fb4fb04fSSong Liu *
151fb4fb04fSSong Liu * Returns 0 on success, negative error code otherwise.
152a5f4374aSIngo Molnar */
__replace_page(struct vm_area_struct * vma,unsigned long addr,struct page * old_page,struct page * new_page)153c517ee74SOleg Nesterov static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
154bdfaa2eeSOleg Nesterov struct page *old_page, struct page *new_page)
155a5f4374aSIngo Molnar {
1565fcd079aSMatthew Wilcox (Oracle) struct folio *old_folio = page_folio(old_page);
15782e66bf7SMatthew Wilcox (Oracle) struct folio *new_folio;
158a5f4374aSIngo Molnar struct mm_struct *mm = vma->vm_mm;
1595fcd079aSMatthew Wilcox (Oracle) DEFINE_FOLIO_VMA_WALK(pvmw, old_folio, vma, addr, 0);
1609f92448cSOleg Nesterov int err;
161ac46d4f3SJérôme Glisse struct mmu_notifier_range range;
16200501b53SJohannes Weiner
1637d4a8be0SAlistair Popple mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr,
1646f4f13e8SJérôme Glisse addr + PAGE_SIZE);
165ac46d4f3SJérôme Glisse
166fb4fb04fSSong Liu if (new_page) {
16782e66bf7SMatthew Wilcox (Oracle) new_folio = page_folio(new_page);
16882e66bf7SMatthew Wilcox (Oracle) err = mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL);
16900501b53SJohannes Weiner if (err)
17000501b53SJohannes Weiner return err;
171fb4fb04fSSong Liu }
172a5f4374aSIngo Molnar
1735fcd079aSMatthew Wilcox (Oracle) /* For folio_free_swap() below */
1745fcd079aSMatthew Wilcox (Oracle) folio_lock(old_folio);
1759f92448cSOleg Nesterov
176ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range);
1779f92448cSOleg Nesterov err = -EAGAIN;
1789d82c694SJohannes Weiner if (!page_vma_mapped_walk(&pvmw))
1799f92448cSOleg Nesterov goto unlock;
18014fa2daaSKirill A. Shutemov VM_BUG_ON_PAGE(addr != pvmw.address, old_page);
181a5f4374aSIngo Molnar
182fb4fb04fSSong Liu if (new_page) {
18382e66bf7SMatthew Wilcox (Oracle) folio_get(new_folio);
18440f2bbf7SDavid Hildenbrand page_add_new_anon_rmap(new_page, vma, addr);
18582e66bf7SMatthew Wilcox (Oracle) folio_add_lru_vma(new_folio, vma);
186fb4fb04fSSong Liu } else
187fb4fb04fSSong Liu /* no new page, just dec_mm_counter for old_page */
188fb4fb04fSSong Liu dec_mm_counter(mm, MM_ANONPAGES);
189a5f4374aSIngo Molnar
1905fcd079aSMatthew Wilcox (Oracle) if (!folio_test_anon(old_folio)) {
191bdfaa2eeSOleg Nesterov dec_mm_counter(mm, mm_counter_file(old_page));
1927396fa81SSrikar Dronamraju inc_mm_counter(mm, MM_ANONPAGES);
1937396fa81SSrikar Dronamraju }
1947396fa81SSrikar Dronamraju
195c33c7948SRyan Roberts flush_cache_page(vma, addr, pte_pfn(ptep_get(pvmw.pte)));
196ec8832d0SAlistair Popple ptep_clear_flush(vma, addr, pvmw.pte);
197fb4fb04fSSong Liu if (new_page)
19814fa2daaSKirill A. Shutemov set_pte_at_notify(mm, addr, pvmw.pte,
19914fa2daaSKirill A. Shutemov mk_pte(new_page, vma->vm_page_prot));
200a5f4374aSIngo Molnar
201cea86fe2SHugh Dickins page_remove_rmap(old_page, vma, false);
2025fcd079aSMatthew Wilcox (Oracle) if (!folio_mapped(old_folio))
2035fcd079aSMatthew Wilcox (Oracle) folio_free_swap(old_folio);
20414fa2daaSKirill A. Shutemov page_vma_mapped_walk_done(&pvmw);
2055fcd079aSMatthew Wilcox (Oracle) folio_put(old_folio);
206194f8dcbSOleg Nesterov
2079f92448cSOleg Nesterov err = 0;
2089f92448cSOleg Nesterov unlock:
209ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_end(&range);
2105fcd079aSMatthew Wilcox (Oracle) folio_unlock(old_folio);
2119f92448cSOleg Nesterov return err;
212a5f4374aSIngo Molnar }
213a5f4374aSIngo Molnar
214a5f4374aSIngo Molnar /**
2155cb4ac3aSSrikar Dronamraju * is_swbp_insn - check if instruction is breakpoint instruction.
216a5f4374aSIngo Molnar * @insn: instruction to be checked.
2175cb4ac3aSSrikar Dronamraju * Default implementation of is_swbp_insn
218a5f4374aSIngo Molnar * Returns true if @insn is a breakpoint instruction.
219a5f4374aSIngo Molnar */
is_swbp_insn(uprobe_opcode_t * insn)2205cb4ac3aSSrikar Dronamraju bool __weak is_swbp_insn(uprobe_opcode_t *insn)
221a5f4374aSIngo Molnar {
2225cb4ac3aSSrikar Dronamraju return *insn == UPROBE_SWBP_INSN;
223a5f4374aSIngo Molnar }
224a5f4374aSIngo Molnar
2250908ad6eSAnanth N Mavinakayanahalli /**
2260908ad6eSAnanth N Mavinakayanahalli * is_trap_insn - check if instruction is breakpoint instruction.
2270908ad6eSAnanth N Mavinakayanahalli * @insn: instruction to be checked.
2280908ad6eSAnanth N Mavinakayanahalli * Default implementation of is_trap_insn
2290908ad6eSAnanth N Mavinakayanahalli * Returns true if @insn is a breakpoint instruction.
2300908ad6eSAnanth N Mavinakayanahalli *
2310908ad6eSAnanth N Mavinakayanahalli * This function is needed for the case where an architecture has multiple
2320908ad6eSAnanth N Mavinakayanahalli * trap instructions (like powerpc).
2330908ad6eSAnanth N Mavinakayanahalli */
is_trap_insn(uprobe_opcode_t * insn)2340908ad6eSAnanth N Mavinakayanahalli bool __weak is_trap_insn(uprobe_opcode_t *insn)
2350908ad6eSAnanth N Mavinakayanahalli {
2360908ad6eSAnanth N Mavinakayanahalli return is_swbp_insn(insn);
2370908ad6eSAnanth N Mavinakayanahalli }
2380908ad6eSAnanth N Mavinakayanahalli
copy_from_page(struct page * page,unsigned long vaddr,void * dst,int len)239ab0d805cSOleg Nesterov static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len)
240cceb55aaSOleg Nesterov {
241cceb55aaSOleg Nesterov void *kaddr = kmap_atomic(page);
242ab0d805cSOleg Nesterov memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len);
243cceb55aaSOleg Nesterov kunmap_atomic(kaddr);
244cceb55aaSOleg Nesterov }
245cceb55aaSOleg Nesterov
copy_to_page(struct page * page,unsigned long vaddr,const void * src,int len)2465669cceeSOleg Nesterov static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len)
2475669cceeSOleg Nesterov {
2485669cceeSOleg Nesterov void *kaddr = kmap_atomic(page);
2495669cceeSOleg Nesterov memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len);
2505669cceeSOleg Nesterov kunmap_atomic(kaddr);
2515669cceeSOleg Nesterov }
2525669cceeSOleg Nesterov
verify_opcode(struct page * page,unsigned long vaddr,uprobe_opcode_t * new_opcode)253ed6f6a50SOleg Nesterov static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode)
254ed6f6a50SOleg Nesterov {
255ed6f6a50SOleg Nesterov uprobe_opcode_t old_opcode;
256ed6f6a50SOleg Nesterov bool is_swbp;
257ed6f6a50SOleg Nesterov
2580908ad6eSAnanth N Mavinakayanahalli /*
2590908ad6eSAnanth N Mavinakayanahalli * Note: We only check if the old_opcode is UPROBE_SWBP_INSN here.
2600908ad6eSAnanth N Mavinakayanahalli * We do not check if it is any other 'trap variant' which could
2610908ad6eSAnanth N Mavinakayanahalli * be conditional trap instruction such as the one powerpc supports.
2620908ad6eSAnanth N Mavinakayanahalli *
2630908ad6eSAnanth N Mavinakayanahalli * The logic is that we do not care if the underlying instruction
2640908ad6eSAnanth N Mavinakayanahalli * is a trap variant; uprobes always wins over any other (gdb)
2650908ad6eSAnanth N Mavinakayanahalli * breakpoint.
2660908ad6eSAnanth N Mavinakayanahalli */
267ab0d805cSOleg Nesterov copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE);
268ed6f6a50SOleg Nesterov is_swbp = is_swbp_insn(&old_opcode);
269ed6f6a50SOleg Nesterov
270ed6f6a50SOleg Nesterov if (is_swbp_insn(new_opcode)) {
271ed6f6a50SOleg Nesterov if (is_swbp) /* register: already installed? */
272ed6f6a50SOleg Nesterov return 0;
273ed6f6a50SOleg Nesterov } else {
274ed6f6a50SOleg Nesterov if (!is_swbp) /* unregister: was it changed by us? */
275076a365bSOleg Nesterov return 0;
276ed6f6a50SOleg Nesterov }
277ed6f6a50SOleg Nesterov
278ed6f6a50SOleg Nesterov return 1;
279ed6f6a50SOleg Nesterov }
280ed6f6a50SOleg Nesterov
2811cc33161SRavi Bangoria static struct delayed_uprobe *
delayed_uprobe_check(struct uprobe * uprobe,struct mm_struct * mm)2821cc33161SRavi Bangoria delayed_uprobe_check(struct uprobe *uprobe, struct mm_struct *mm)
2831cc33161SRavi Bangoria {
2841cc33161SRavi Bangoria struct delayed_uprobe *du;
2851cc33161SRavi Bangoria
2861cc33161SRavi Bangoria list_for_each_entry(du, &delayed_uprobe_list, list)
2871cc33161SRavi Bangoria if (du->uprobe == uprobe && du->mm == mm)
2881cc33161SRavi Bangoria return du;
2891cc33161SRavi Bangoria return NULL;
2901cc33161SRavi Bangoria }
2911cc33161SRavi Bangoria
delayed_uprobe_add(struct uprobe * uprobe,struct mm_struct * mm)2921cc33161SRavi Bangoria static int delayed_uprobe_add(struct uprobe *uprobe, struct mm_struct *mm)
2931cc33161SRavi Bangoria {
2941cc33161SRavi Bangoria struct delayed_uprobe *du;
2951cc33161SRavi Bangoria
2961cc33161SRavi Bangoria if (delayed_uprobe_check(uprobe, mm))
2971cc33161SRavi Bangoria return 0;
2981cc33161SRavi Bangoria
2991cc33161SRavi Bangoria du = kzalloc(sizeof(*du), GFP_KERNEL);
3001cc33161SRavi Bangoria if (!du)
3011cc33161SRavi Bangoria return -ENOMEM;
3021cc33161SRavi Bangoria
3031cc33161SRavi Bangoria du->uprobe = uprobe;
3041cc33161SRavi Bangoria du->mm = mm;
3051cc33161SRavi Bangoria list_add(&du->list, &delayed_uprobe_list);
3061cc33161SRavi Bangoria return 0;
3071cc33161SRavi Bangoria }
3081cc33161SRavi Bangoria
delayed_uprobe_delete(struct delayed_uprobe * du)3091cc33161SRavi Bangoria static void delayed_uprobe_delete(struct delayed_uprobe *du)
3101cc33161SRavi Bangoria {
3111cc33161SRavi Bangoria if (WARN_ON(!du))
3121cc33161SRavi Bangoria return;
3131cc33161SRavi Bangoria list_del(&du->list);
3141cc33161SRavi Bangoria kfree(du);
3151cc33161SRavi Bangoria }
3161cc33161SRavi Bangoria
delayed_uprobe_remove(struct uprobe * uprobe,struct mm_struct * mm)3171cc33161SRavi Bangoria static void delayed_uprobe_remove(struct uprobe *uprobe, struct mm_struct *mm)
3181cc33161SRavi Bangoria {
3191cc33161SRavi Bangoria struct list_head *pos, *q;
3201cc33161SRavi Bangoria struct delayed_uprobe *du;
3211cc33161SRavi Bangoria
3221cc33161SRavi Bangoria if (!uprobe && !mm)
3231cc33161SRavi Bangoria return;
3241cc33161SRavi Bangoria
3251cc33161SRavi Bangoria list_for_each_safe(pos, q, &delayed_uprobe_list) {
3261cc33161SRavi Bangoria du = list_entry(pos, struct delayed_uprobe, list);
3271cc33161SRavi Bangoria
3281cc33161SRavi Bangoria if (uprobe && du->uprobe != uprobe)
3291cc33161SRavi Bangoria continue;
3301cc33161SRavi Bangoria if (mm && du->mm != mm)
3311cc33161SRavi Bangoria continue;
3321cc33161SRavi Bangoria
3331cc33161SRavi Bangoria delayed_uprobe_delete(du);
3341cc33161SRavi Bangoria }
3351cc33161SRavi Bangoria }
3361cc33161SRavi Bangoria
valid_ref_ctr_vma(struct uprobe * uprobe,struct vm_area_struct * vma)3371cc33161SRavi Bangoria static bool valid_ref_ctr_vma(struct uprobe *uprobe,
3381cc33161SRavi Bangoria struct vm_area_struct *vma)
3391cc33161SRavi Bangoria {
3401cc33161SRavi Bangoria unsigned long vaddr = offset_to_vaddr(vma, uprobe->ref_ctr_offset);
3411cc33161SRavi Bangoria
3421cc33161SRavi Bangoria return uprobe->ref_ctr_offset &&
3431cc33161SRavi Bangoria vma->vm_file &&
3441cc33161SRavi Bangoria file_inode(vma->vm_file) == uprobe->inode &&
3451cc33161SRavi Bangoria (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE &&
3461cc33161SRavi Bangoria vma->vm_start <= vaddr &&
3471cc33161SRavi Bangoria vma->vm_end > vaddr;
3481cc33161SRavi Bangoria }
3491cc33161SRavi Bangoria
3501cc33161SRavi Bangoria static struct vm_area_struct *
find_ref_ctr_vma(struct uprobe * uprobe,struct mm_struct * mm)3511cc33161SRavi Bangoria find_ref_ctr_vma(struct uprobe *uprobe, struct mm_struct *mm)
3521cc33161SRavi Bangoria {
353fcb72a58SMatthew Wilcox (Oracle) VMA_ITERATOR(vmi, mm, 0);
3541cc33161SRavi Bangoria struct vm_area_struct *tmp;
3551cc33161SRavi Bangoria
356fcb72a58SMatthew Wilcox (Oracle) for_each_vma(vmi, tmp)
3571cc33161SRavi Bangoria if (valid_ref_ctr_vma(uprobe, tmp))
3581cc33161SRavi Bangoria return tmp;
3591cc33161SRavi Bangoria
3601cc33161SRavi Bangoria return NULL;
3611cc33161SRavi Bangoria }
3621cc33161SRavi Bangoria
3631cc33161SRavi Bangoria static int
__update_ref_ctr(struct mm_struct * mm,unsigned long vaddr,short d)3641cc33161SRavi Bangoria __update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d)
3651cc33161SRavi Bangoria {
3661cc33161SRavi Bangoria void *kaddr;
3671cc33161SRavi Bangoria struct page *page;
3681cc33161SRavi Bangoria int ret;
3691cc33161SRavi Bangoria short *ptr;
3701cc33161SRavi Bangoria
3711cc33161SRavi Bangoria if (!vaddr || !d)
3721cc33161SRavi Bangoria return -EINVAL;
3731cc33161SRavi Bangoria
37464019a2eSPeter Xu ret = get_user_pages_remote(mm, vaddr, 1,
375ca5e8632SLorenzo Stoakes FOLL_WRITE, &page, NULL);
3761cc33161SRavi Bangoria if (unlikely(ret <= 0)) {
3771cc33161SRavi Bangoria /*
3781cc33161SRavi Bangoria * We are asking for 1 page. If get_user_pages_remote() fails,
3791cc33161SRavi Bangoria * it may return 0, in that case we have to return error.
3801cc33161SRavi Bangoria */
3811cc33161SRavi Bangoria return ret == 0 ? -EBUSY : ret;
3821cc33161SRavi Bangoria }
3831cc33161SRavi Bangoria
3841cc33161SRavi Bangoria kaddr = kmap_atomic(page);
3851cc33161SRavi Bangoria ptr = kaddr + (vaddr & ~PAGE_MASK);
3861cc33161SRavi Bangoria
3871cc33161SRavi Bangoria if (unlikely(*ptr + d < 0)) {
3881cc33161SRavi Bangoria pr_warn("ref_ctr going negative. vaddr: 0x%lx, "
3891cc33161SRavi Bangoria "curr val: %d, delta: %d\n", vaddr, *ptr, d);
3901cc33161SRavi Bangoria ret = -EINVAL;
3911cc33161SRavi Bangoria goto out;
3921cc33161SRavi Bangoria }
3931cc33161SRavi Bangoria
3941cc33161SRavi Bangoria *ptr += d;
3951cc33161SRavi Bangoria ret = 0;
3961cc33161SRavi Bangoria out:
3971cc33161SRavi Bangoria kunmap_atomic(kaddr);
3981cc33161SRavi Bangoria put_page(page);
3991cc33161SRavi Bangoria return ret;
4001cc33161SRavi Bangoria }
4011cc33161SRavi Bangoria
update_ref_ctr_warn(struct uprobe * uprobe,struct mm_struct * mm,short d)4021cc33161SRavi Bangoria static void update_ref_ctr_warn(struct uprobe *uprobe,
4031cc33161SRavi Bangoria struct mm_struct *mm, short d)
4041cc33161SRavi Bangoria {
4051cc33161SRavi Bangoria pr_warn("ref_ctr %s failed for inode: 0x%lx offset: "
4061cc33161SRavi Bangoria "0x%llx ref_ctr_offset: 0x%llx of mm: 0x%pK\n",
4071cc33161SRavi Bangoria d > 0 ? "increment" : "decrement", uprobe->inode->i_ino,
4081cc33161SRavi Bangoria (unsigned long long) uprobe->offset,
4091cc33161SRavi Bangoria (unsigned long long) uprobe->ref_ctr_offset, mm);
4101cc33161SRavi Bangoria }
4111cc33161SRavi Bangoria
update_ref_ctr(struct uprobe * uprobe,struct mm_struct * mm,short d)4121cc33161SRavi Bangoria static int update_ref_ctr(struct uprobe *uprobe, struct mm_struct *mm,
4131cc33161SRavi Bangoria short d)
4141cc33161SRavi Bangoria {
4151cc33161SRavi Bangoria struct vm_area_struct *rc_vma;
4161cc33161SRavi Bangoria unsigned long rc_vaddr;
4171cc33161SRavi Bangoria int ret = 0;
4181cc33161SRavi Bangoria
4191cc33161SRavi Bangoria rc_vma = find_ref_ctr_vma(uprobe, mm);
4201cc33161SRavi Bangoria
4211cc33161SRavi Bangoria if (rc_vma) {
4221cc33161SRavi Bangoria rc_vaddr = offset_to_vaddr(rc_vma, uprobe->ref_ctr_offset);
4231cc33161SRavi Bangoria ret = __update_ref_ctr(mm, rc_vaddr, d);
4241cc33161SRavi Bangoria if (ret)
4251cc33161SRavi Bangoria update_ref_ctr_warn(uprobe, mm, d);
4261cc33161SRavi Bangoria
4271cc33161SRavi Bangoria if (d > 0)
4281cc33161SRavi Bangoria return ret;
4291cc33161SRavi Bangoria }
4301cc33161SRavi Bangoria
4311cc33161SRavi Bangoria mutex_lock(&delayed_uprobe_lock);
4321cc33161SRavi Bangoria if (d > 0)
4331cc33161SRavi Bangoria ret = delayed_uprobe_add(uprobe, mm);
4341cc33161SRavi Bangoria else
4351cc33161SRavi Bangoria delayed_uprobe_remove(uprobe, mm);
4361cc33161SRavi Bangoria mutex_unlock(&delayed_uprobe_lock);
4371cc33161SRavi Bangoria
4381cc33161SRavi Bangoria return ret;
4391cc33161SRavi Bangoria }
4401cc33161SRavi Bangoria
441a5f4374aSIngo Molnar /*
442a5f4374aSIngo Molnar * NOTE:
443a5f4374aSIngo Molnar * Expect the breakpoint instruction to be the smallest size instruction for
444a5f4374aSIngo Molnar * the architecture. If an arch has variable length instruction and the
445a5f4374aSIngo Molnar * breakpoint instruction is not of the smallest length instruction
4460908ad6eSAnanth N Mavinakayanahalli * supported by that architecture then we need to modify is_trap_at_addr and
447f72d41faSOleg Nesterov * uprobe_write_opcode accordingly. This would never be a problem for archs
448f72d41faSOleg Nesterov * that have fixed length instructions.
44929dedee0SOleg Nesterov *
450f72d41faSOleg Nesterov * uprobe_write_opcode - write the opcode at a given virtual address.
4519ce4d216SQiujun Huang * @auprobe: arch specific probepoint information.
452a5f4374aSIngo Molnar * @mm: the probed process address space.
453a5f4374aSIngo Molnar * @vaddr: the virtual address to store the opcode.
454a5f4374aSIngo Molnar * @opcode: opcode to be written at @vaddr.
455a5f4374aSIngo Molnar *
456c1e8d7c6SMichel Lespinasse * Called with mm->mmap_lock held for write.
457a5f4374aSIngo Molnar * Return 0 (success) or a negative errno.
458a5f4374aSIngo Molnar */
uprobe_write_opcode(struct arch_uprobe * auprobe,struct mm_struct * mm,unsigned long vaddr,uprobe_opcode_t opcode)4596d43743eSRavi Bangoria int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
4606d43743eSRavi Bangoria unsigned long vaddr, uprobe_opcode_t opcode)
461a5f4374aSIngo Molnar {
4621cc33161SRavi Bangoria struct uprobe *uprobe;
463a5f4374aSIngo Molnar struct page *old_page, *new_page;
464a5f4374aSIngo Molnar struct vm_area_struct *vma;
4651cc33161SRavi Bangoria int ret, is_register, ref_ctr_updated = 0;
466f385cb85SSong Liu bool orig_page_huge = false;
467aa5de305SSong Liu unsigned int gup_flags = FOLL_FORCE;
4681cc33161SRavi Bangoria
4691cc33161SRavi Bangoria is_register = is_swbp_insn(&opcode);
4701cc33161SRavi Bangoria uprobe = container_of(auprobe, struct uprobe, arch);
471f403072cSOleg Nesterov
4725323ce71SOleg Nesterov retry:
473aa5de305SSong Liu if (is_register)
474aa5de305SSong Liu gup_flags |= FOLL_SPLIT_PMD;
475a5f4374aSIngo Molnar /* Read the page with vaddr into memory */
476ca5e8632SLorenzo Stoakes old_page = get_user_page_vma_remote(mm, vaddr, gup_flags, &vma);
477ca5e8632SLorenzo Stoakes if (IS_ERR_OR_NULL(old_page))
478ca5e8632SLorenzo Stoakes return old_page ? PTR_ERR(old_page) : 0;
479a5f4374aSIngo Molnar
480ed6f6a50SOleg Nesterov ret = verify_opcode(old_page, vaddr, &opcode);
481ed6f6a50SOleg Nesterov if (ret <= 0)
482ed6f6a50SOleg Nesterov goto put_old;
483ed6f6a50SOleg Nesterov
4840b6f1971STong Tiangen if (is_zero_page(old_page)) {
4850b6f1971STong Tiangen ret = -EINVAL;
4860b6f1971STong Tiangen goto put_old;
4870b6f1971STong Tiangen }
4880b6f1971STong Tiangen
489aa5de305SSong Liu if (WARN(!is_register && PageCompound(old_page),
490aa5de305SSong Liu "uprobe unregister should never work on compound page\n")) {
491aa5de305SSong Liu ret = -EINVAL;
492aa5de305SSong Liu goto put_old;
493aa5de305SSong Liu }
494aa5de305SSong Liu
4951cc33161SRavi Bangoria /* We are going to replace instruction, update ref_ctr. */
4961cc33161SRavi Bangoria if (!ref_ctr_updated && uprobe->ref_ctr_offset) {
4971cc33161SRavi Bangoria ret = update_ref_ctr(uprobe, mm, is_register ? 1 : -1);
4981cc33161SRavi Bangoria if (ret)
4991cc33161SRavi Bangoria goto put_old;
5001cc33161SRavi Bangoria
5011cc33161SRavi Bangoria ref_ctr_updated = 1;
5021cc33161SRavi Bangoria }
5031cc33161SRavi Bangoria
504fb4fb04fSSong Liu ret = 0;
505fb4fb04fSSong Liu if (!is_register && !PageAnon(old_page))
506fb4fb04fSSong Liu goto put_old;
507fb4fb04fSSong Liu
50829dedee0SOleg Nesterov ret = anon_vma_prepare(vma);
50929dedee0SOleg Nesterov if (ret)
51029dedee0SOleg Nesterov goto put_old;
51129dedee0SOleg Nesterov
512a5f4374aSIngo Molnar ret = -ENOMEM;
513a5f4374aSIngo Molnar new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
514a5f4374aSIngo Molnar if (!new_page)
5159f92448cSOleg Nesterov goto put_old;
516a5f4374aSIngo Molnar
51729dedee0SOleg Nesterov __SetPageUptodate(new_page);
5183f47107cSOleg Nesterov copy_highpage(new_page, old_page);
5193f47107cSOleg Nesterov copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
520a5f4374aSIngo Molnar
521fb4fb04fSSong Liu if (!is_register) {
522fb4fb04fSSong Liu struct page *orig_page;
523fb4fb04fSSong Liu pgoff_t index;
524fb4fb04fSSong Liu
525fb4fb04fSSong Liu VM_BUG_ON_PAGE(!PageAnon(old_page), old_page);
526fb4fb04fSSong Liu
527fb4fb04fSSong Liu index = vaddr_to_offset(vma, vaddr & PAGE_MASK) >> PAGE_SHIFT;
528fb4fb04fSSong Liu orig_page = find_get_page(vma->vm_file->f_inode->i_mapping,
529fb4fb04fSSong Liu index);
530fb4fb04fSSong Liu
531fb4fb04fSSong Liu if (orig_page) {
532fb4fb04fSSong Liu if (PageUptodate(orig_page) &&
533fb4fb04fSSong Liu pages_identical(new_page, orig_page)) {
534fb4fb04fSSong Liu /* let go new_page */
535fb4fb04fSSong Liu put_page(new_page);
536fb4fb04fSSong Liu new_page = NULL;
537f385cb85SSong Liu
538f385cb85SSong Liu if (PageCompound(orig_page))
539f385cb85SSong Liu orig_page_huge = true;
540fb4fb04fSSong Liu }
541fb4fb04fSSong Liu put_page(orig_page);
542fb4fb04fSSong Liu }
543fb4fb04fSSong Liu }
544fb4fb04fSSong Liu
545c517ee74SOleg Nesterov ret = __replace_page(vma, vaddr, old_page, new_page);
546fb4fb04fSSong Liu if (new_page)
54709cbfeafSKirill A. Shutemov put_page(new_page);
5489f92448cSOleg Nesterov put_old:
549a5f4374aSIngo Molnar put_page(old_page);
550a5f4374aSIngo Molnar
5515323ce71SOleg Nesterov if (unlikely(ret == -EAGAIN))
5525323ce71SOleg Nesterov goto retry;
5531cc33161SRavi Bangoria
5541cc33161SRavi Bangoria /* Revert back reference counter if instruction update failed. */
5551cc33161SRavi Bangoria if (ret && is_register && ref_ctr_updated)
5561cc33161SRavi Bangoria update_ref_ctr(uprobe, mm, -1);
5571cc33161SRavi Bangoria
558f385cb85SSong Liu /* try collapse pmd for compound page */
559f385cb85SSong Liu if (!ret && orig_page_huge)
56034488399SZach O'Keefe collapse_pte_mapped_thp(mm, vaddr, false);
561f385cb85SSong Liu
562a5f4374aSIngo Molnar return ret;
563a5f4374aSIngo Molnar }
564a5f4374aSIngo Molnar
565a5f4374aSIngo Molnar /**
5665cb4ac3aSSrikar Dronamraju * set_swbp - store breakpoint at a given address.
567e3343e6aSSrikar Dronamraju * @auprobe: arch specific probepoint information.
568a5f4374aSIngo Molnar * @mm: the probed process address space.
569a5f4374aSIngo Molnar * @vaddr: the virtual address to insert the opcode.
570a5f4374aSIngo Molnar *
571a5f4374aSIngo Molnar * For mm @mm, store the breakpoint instruction at @vaddr.
572a5f4374aSIngo Molnar * Return 0 (success) or a negative errno.
573a5f4374aSIngo Molnar */
set_swbp(struct arch_uprobe * auprobe,struct mm_struct * mm,unsigned long vaddr)5745cb4ac3aSSrikar Dronamraju int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
575a5f4374aSIngo Molnar {
5766d43743eSRavi Bangoria return uprobe_write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN);
577a5f4374aSIngo Molnar }
578a5f4374aSIngo Molnar
579a5f4374aSIngo Molnar /**
580a5f4374aSIngo Molnar * set_orig_insn - Restore the original instruction.
581a5f4374aSIngo Molnar * @mm: the probed process address space.
582e3343e6aSSrikar Dronamraju * @auprobe: arch specific probepoint information.
583a5f4374aSIngo Molnar * @vaddr: the virtual address to insert the opcode.
584a5f4374aSIngo Molnar *
585a5f4374aSIngo Molnar * For mm @mm, restore the original opcode (opcode) at @vaddr.
586a5f4374aSIngo Molnar * Return 0 (success) or a negative errno.
587a5f4374aSIngo Molnar */
588a5f4374aSIngo Molnar int __weak
set_orig_insn(struct arch_uprobe * auprobe,struct mm_struct * mm,unsigned long vaddr)589ded86e7cSOleg Nesterov set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
590a5f4374aSIngo Molnar {
5916d43743eSRavi Bangoria return uprobe_write_opcode(auprobe, mm, vaddr,
5926d43743eSRavi Bangoria *(uprobe_opcode_t *)&auprobe->insn);
593a5f4374aSIngo Molnar }
594a5f4374aSIngo Molnar
get_uprobe(struct uprobe * uprobe)595f231722aSOleg Nesterov static struct uprobe *get_uprobe(struct uprobe *uprobe)
596f231722aSOleg Nesterov {
597ce59b8e9SElena Reshetova refcount_inc(&uprobe->ref);
598f231722aSOleg Nesterov return uprobe;
599f231722aSOleg Nesterov }
600f231722aSOleg Nesterov
put_uprobe(struct uprobe * uprobe)601f231722aSOleg Nesterov static void put_uprobe(struct uprobe *uprobe)
602f231722aSOleg Nesterov {
603ce59b8e9SElena Reshetova if (refcount_dec_and_test(&uprobe->ref)) {
6041cc33161SRavi Bangoria /*
6051cc33161SRavi Bangoria * If application munmap(exec_vma) before uprobe_unregister()
6061cc33161SRavi Bangoria * gets called, we don't get a chance to remove uprobe from
6071cc33161SRavi Bangoria * delayed_uprobe_list from remove_breakpoint(). Do it here.
6081cc33161SRavi Bangoria */
6091aed58e6SRavi Bangoria mutex_lock(&delayed_uprobe_lock);
6101cc33161SRavi Bangoria delayed_uprobe_remove(uprobe, NULL);
6111aed58e6SRavi Bangoria mutex_unlock(&delayed_uprobe_lock);
612f231722aSOleg Nesterov kfree(uprobe);
613f231722aSOleg Nesterov }
6141cc33161SRavi Bangoria }
615f231722aSOleg Nesterov
616a905e84eSPeter Zijlstra static __always_inline
uprobe_cmp(const struct inode * l_inode,const loff_t l_offset,const struct uprobe * r)617a905e84eSPeter Zijlstra int uprobe_cmp(const struct inode *l_inode, const loff_t l_offset,
618a905e84eSPeter Zijlstra const struct uprobe *r)
619a5f4374aSIngo Molnar {
620a905e84eSPeter Zijlstra if (l_inode < r->inode)
621a5f4374aSIngo Molnar return -1;
622a5f4374aSIngo Molnar
623a905e84eSPeter Zijlstra if (l_inode > r->inode)
624a5f4374aSIngo Molnar return 1;
625a5f4374aSIngo Molnar
626a905e84eSPeter Zijlstra if (l_offset < r->offset)
627a5f4374aSIngo Molnar return -1;
628a5f4374aSIngo Molnar
629a905e84eSPeter Zijlstra if (l_offset > r->offset)
630a5f4374aSIngo Molnar return 1;
631a5f4374aSIngo Molnar
632a5f4374aSIngo Molnar return 0;
633a5f4374aSIngo Molnar }
634a5f4374aSIngo Molnar
635a905e84eSPeter Zijlstra #define __node_2_uprobe(node) \
636a905e84eSPeter Zijlstra rb_entry((node), struct uprobe, rb_node)
637a905e84eSPeter Zijlstra
638a905e84eSPeter Zijlstra struct __uprobe_key {
639a905e84eSPeter Zijlstra struct inode *inode;
640a905e84eSPeter Zijlstra loff_t offset;
641a905e84eSPeter Zijlstra };
642a905e84eSPeter Zijlstra
__uprobe_cmp_key(const void * key,const struct rb_node * b)643a905e84eSPeter Zijlstra static inline int __uprobe_cmp_key(const void *key, const struct rb_node *b)
644a905e84eSPeter Zijlstra {
645a905e84eSPeter Zijlstra const struct __uprobe_key *a = key;
646a905e84eSPeter Zijlstra return uprobe_cmp(a->inode, a->offset, __node_2_uprobe(b));
647a905e84eSPeter Zijlstra }
648a905e84eSPeter Zijlstra
__uprobe_cmp(struct rb_node * a,const struct rb_node * b)649a905e84eSPeter Zijlstra static inline int __uprobe_cmp(struct rb_node *a, const struct rb_node *b)
650a905e84eSPeter Zijlstra {
651a905e84eSPeter Zijlstra struct uprobe *u = __node_2_uprobe(a);
652a905e84eSPeter Zijlstra return uprobe_cmp(u->inode, u->offset, __node_2_uprobe(b));
653a905e84eSPeter Zijlstra }
654a905e84eSPeter Zijlstra
__find_uprobe(struct inode * inode,loff_t offset)655a5f4374aSIngo Molnar static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset)
656a5f4374aSIngo Molnar {
657a905e84eSPeter Zijlstra struct __uprobe_key key = {
658a905e84eSPeter Zijlstra .inode = inode,
659a905e84eSPeter Zijlstra .offset = offset,
660a905e84eSPeter Zijlstra };
661a905e84eSPeter Zijlstra struct rb_node *node = rb_find(&key, &uprobes_tree, __uprobe_cmp_key);
662a5f4374aSIngo Molnar
663a905e84eSPeter Zijlstra if (node)
664b0d6d478SSven Schnelle return get_uprobe(__node_2_uprobe(node));
665a5f4374aSIngo Molnar
666a5f4374aSIngo Molnar return NULL;
667a5f4374aSIngo Molnar }
668a5f4374aSIngo Molnar
669a5f4374aSIngo Molnar /*
670a5f4374aSIngo Molnar * Find a uprobe corresponding to a given inode:offset
671a5f4374aSIngo Molnar * Acquires uprobes_treelock
672a5f4374aSIngo Molnar */
find_uprobe(struct inode * inode,loff_t offset)673a5f4374aSIngo Molnar static struct uprobe *find_uprobe(struct inode *inode, loff_t offset)
674a5f4374aSIngo Molnar {
675a5f4374aSIngo Molnar struct uprobe *uprobe;
676a5f4374aSIngo Molnar
6776f47caa0SOleg Nesterov spin_lock(&uprobes_treelock);
678a5f4374aSIngo Molnar uprobe = __find_uprobe(inode, offset);
6796f47caa0SOleg Nesterov spin_unlock(&uprobes_treelock);
680a5f4374aSIngo Molnar
681a5f4374aSIngo Molnar return uprobe;
682a5f4374aSIngo Molnar }
683a5f4374aSIngo Molnar
__insert_uprobe(struct uprobe * uprobe)684a5f4374aSIngo Molnar static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
685a5f4374aSIngo Molnar {
686a905e84eSPeter Zijlstra struct rb_node *node;
687a5f4374aSIngo Molnar
688a905e84eSPeter Zijlstra node = rb_find_add(&uprobe->rb_node, &uprobes_tree, __uprobe_cmp);
689a905e84eSPeter Zijlstra if (node)
690a905e84eSPeter Zijlstra return get_uprobe(__node_2_uprobe(node));
691a5f4374aSIngo Molnar
692a5f4374aSIngo Molnar /* get access + creation ref */
693ce59b8e9SElena Reshetova refcount_set(&uprobe->ref, 2);
694a905e84eSPeter Zijlstra return NULL;
695a5f4374aSIngo Molnar }
696a5f4374aSIngo Molnar
697a5f4374aSIngo Molnar /*
698a5f4374aSIngo Molnar * Acquire uprobes_treelock.
699a5f4374aSIngo Molnar * Matching uprobe already exists in rbtree;
700a5f4374aSIngo Molnar * increment (access refcount) and return the matching uprobe.
701a5f4374aSIngo Molnar *
702a5f4374aSIngo Molnar * No matching uprobe; insert the uprobe in rb_tree;
703a5f4374aSIngo Molnar * get a double refcount (access + creation) and return NULL.
704a5f4374aSIngo Molnar */
insert_uprobe(struct uprobe * uprobe)705a5f4374aSIngo Molnar static struct uprobe *insert_uprobe(struct uprobe *uprobe)
706a5f4374aSIngo Molnar {
707a5f4374aSIngo Molnar struct uprobe *u;
708a5f4374aSIngo Molnar
7096f47caa0SOleg Nesterov spin_lock(&uprobes_treelock);
710a5f4374aSIngo Molnar u = __insert_uprobe(uprobe);
7116f47caa0SOleg Nesterov spin_unlock(&uprobes_treelock);
712a5f4374aSIngo Molnar
713a5f4374aSIngo Molnar return u;
714a5f4374aSIngo Molnar }
715a5f4374aSIngo Molnar
71622bad382SRavi Bangoria static void
ref_ctr_mismatch_warn(struct uprobe * cur_uprobe,struct uprobe * uprobe)71722bad382SRavi Bangoria ref_ctr_mismatch_warn(struct uprobe *cur_uprobe, struct uprobe *uprobe)
71822bad382SRavi Bangoria {
71922bad382SRavi Bangoria pr_warn("ref_ctr_offset mismatch. inode: 0x%lx offset: 0x%llx "
72022bad382SRavi Bangoria "ref_ctr_offset(old): 0x%llx ref_ctr_offset(new): 0x%llx\n",
72122bad382SRavi Bangoria uprobe->inode->i_ino, (unsigned long long) uprobe->offset,
72222bad382SRavi Bangoria (unsigned long long) cur_uprobe->ref_ctr_offset,
72322bad382SRavi Bangoria (unsigned long long) uprobe->ref_ctr_offset);
72422bad382SRavi Bangoria }
72522bad382SRavi Bangoria
alloc_uprobe(struct inode * inode,loff_t offset,loff_t ref_ctr_offset)7261cc33161SRavi Bangoria static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset,
7271cc33161SRavi Bangoria loff_t ref_ctr_offset)
728a5f4374aSIngo Molnar {
729a5f4374aSIngo Molnar struct uprobe *uprobe, *cur_uprobe;
730a5f4374aSIngo Molnar
731a5f4374aSIngo Molnar uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL);
732a5f4374aSIngo Molnar if (!uprobe)
733a5f4374aSIngo Molnar return NULL;
734a5f4374aSIngo Molnar
73561f94203SSong Liu uprobe->inode = inode;
736a5f4374aSIngo Molnar uprobe->offset = offset;
7371cc33161SRavi Bangoria uprobe->ref_ctr_offset = ref_ctr_offset;
738e591c8d7SOleg Nesterov init_rwsem(&uprobe->register_rwsem);
739a5f4374aSIngo Molnar init_rwsem(&uprobe->consumer_rwsem);
740a5f4374aSIngo Molnar
741a5f4374aSIngo Molnar /* add to uprobes_tree, sorted on inode:offset */
742a5f4374aSIngo Molnar cur_uprobe = insert_uprobe(uprobe);
743a5f4374aSIngo Molnar /* a uprobe exists for this inode:offset combination */
744a5f4374aSIngo Molnar if (cur_uprobe) {
74522bad382SRavi Bangoria if (cur_uprobe->ref_ctr_offset != uprobe->ref_ctr_offset) {
74622bad382SRavi Bangoria ref_ctr_mismatch_warn(cur_uprobe, uprobe);
74722bad382SRavi Bangoria put_uprobe(cur_uprobe);
74822bad382SRavi Bangoria kfree(uprobe);
74922bad382SRavi Bangoria return ERR_PTR(-EINVAL);
75022bad382SRavi Bangoria }
751a5f4374aSIngo Molnar kfree(uprobe);
752a5f4374aSIngo Molnar uprobe = cur_uprobe;
753a5f4374aSIngo Molnar }
754a5f4374aSIngo Molnar
755a5f4374aSIngo Molnar return uprobe;
756a5f4374aSIngo Molnar }
757a5f4374aSIngo Molnar
consumer_add(struct uprobe * uprobe,struct uprobe_consumer * uc)7589a98e03cSOleg Nesterov static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
759a5f4374aSIngo Molnar {
760a5f4374aSIngo Molnar down_write(&uprobe->consumer_rwsem);
761e3343e6aSSrikar Dronamraju uc->next = uprobe->consumers;
762e3343e6aSSrikar Dronamraju uprobe->consumers = uc;
763a5f4374aSIngo Molnar up_write(&uprobe->consumer_rwsem);
764a5f4374aSIngo Molnar }
765a5f4374aSIngo Molnar
766a5f4374aSIngo Molnar /*
767e3343e6aSSrikar Dronamraju * For uprobe @uprobe, delete the consumer @uc.
768e3343e6aSSrikar Dronamraju * Return true if the @uc is deleted successfully
769a5f4374aSIngo Molnar * or return false.
770a5f4374aSIngo Molnar */
consumer_del(struct uprobe * uprobe,struct uprobe_consumer * uc)771e3343e6aSSrikar Dronamraju static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
772a5f4374aSIngo Molnar {
773a5f4374aSIngo Molnar struct uprobe_consumer **con;
774a5f4374aSIngo Molnar bool ret = false;
775a5f4374aSIngo Molnar
776a5f4374aSIngo Molnar down_write(&uprobe->consumer_rwsem);
777a5f4374aSIngo Molnar for (con = &uprobe->consumers; *con; con = &(*con)->next) {
778e3343e6aSSrikar Dronamraju if (*con == uc) {
779e3343e6aSSrikar Dronamraju *con = uc->next;
780a5f4374aSIngo Molnar ret = true;
781a5f4374aSIngo Molnar break;
782a5f4374aSIngo Molnar }
783a5f4374aSIngo Molnar }
784a5f4374aSIngo Molnar up_write(&uprobe->consumer_rwsem);
785a5f4374aSIngo Molnar
786a5f4374aSIngo Molnar return ret;
787a5f4374aSIngo Molnar }
788a5f4374aSIngo Molnar
__copy_insn(struct address_space * mapping,struct file * filp,void * insn,int nbytes,loff_t offset)7892ded0980SOleg Nesterov static int __copy_insn(struct address_space *mapping, struct file *filp,
7902ded0980SOleg Nesterov void *insn, int nbytes, loff_t offset)
791a5f4374aSIngo Molnar {
792a5f4374aSIngo Molnar struct page *page;
793a5f4374aSIngo Molnar /*
79440814f68SOleg Nesterov * Ensure that the page that has the original instruction is populated
7957e0a1265SMatthew Wilcox (Oracle) * and in page-cache. If ->read_folio == NULL it must be shmem_mapping(),
79640814f68SOleg Nesterov * see uprobe_register().
797a5f4374aSIngo Molnar */
7987e0a1265SMatthew Wilcox (Oracle) if (mapping->a_ops->read_folio)
79909cbfeafSKirill A. Shutemov page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp);
80040814f68SOleg Nesterov else
80109cbfeafSKirill A. Shutemov page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
802a5f4374aSIngo Molnar if (IS_ERR(page))
803a5f4374aSIngo Molnar return PTR_ERR(page);
804a5f4374aSIngo Molnar
8052edb7b55SOleg Nesterov copy_from_page(page, offset, insn, nbytes);
80609cbfeafSKirill A. Shutemov put_page(page);
807a5f4374aSIngo Molnar
808a5f4374aSIngo Molnar return 0;
809a5f4374aSIngo Molnar }
810a5f4374aSIngo Molnar
copy_insn(struct uprobe * uprobe,struct file * filp)811d436615eSOleg Nesterov static int copy_insn(struct uprobe *uprobe, struct file *filp)
812a5f4374aSIngo Molnar {
8132ded0980SOleg Nesterov struct address_space *mapping = uprobe->inode->i_mapping;
8142ded0980SOleg Nesterov loff_t offs = uprobe->offset;
815803200e2SOleg Nesterov void *insn = &uprobe->arch.insn;
816803200e2SOleg Nesterov int size = sizeof(uprobe->arch.insn);
8172ded0980SOleg Nesterov int len, err = -EIO;
818a5f4374aSIngo Molnar
8192ded0980SOleg Nesterov /* Copy only available bytes, -EIO if nothing was read */
8202ded0980SOleg Nesterov do {
8212ded0980SOleg Nesterov if (offs >= i_size_read(uprobe->inode))
8222ded0980SOleg Nesterov break;
823a5f4374aSIngo Molnar
8242ded0980SOleg Nesterov len = min_t(int, size, PAGE_SIZE - (offs & ~PAGE_MASK));
8252ded0980SOleg Nesterov err = __copy_insn(mapping, filp, insn, len, offs);
826fc36f595SOleg Nesterov if (err)
8272ded0980SOleg Nesterov break;
8282ded0980SOleg Nesterov
8292ded0980SOleg Nesterov insn += len;
8302ded0980SOleg Nesterov offs += len;
8312ded0980SOleg Nesterov size -= len;
8322ded0980SOleg Nesterov } while (size);
8332ded0980SOleg Nesterov
834fc36f595SOleg Nesterov return err;
835a5f4374aSIngo Molnar }
836a5f4374aSIngo Molnar
prepare_uprobe(struct uprobe * uprobe,struct file * file,struct mm_struct * mm,unsigned long vaddr)837cb9a19feSOleg Nesterov static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
838cb9a19feSOleg Nesterov struct mm_struct *mm, unsigned long vaddr)
839cb9a19feSOleg Nesterov {
840cb9a19feSOleg Nesterov int ret = 0;
841cb9a19feSOleg Nesterov
84271434f2fSOleg Nesterov if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
843cb9a19feSOleg Nesterov return ret;
844cb9a19feSOleg Nesterov
845d4d3ccc6SOleg Nesterov /* TODO: move this into _register, until then we abuse this sem. */
846d4d3ccc6SOleg Nesterov down_write(&uprobe->consumer_rwsem);
84771434f2fSOleg Nesterov if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
8484710f05fSOleg Nesterov goto out;
8494710f05fSOleg Nesterov
850cb9a19feSOleg Nesterov ret = copy_insn(uprobe, file);
851cb9a19feSOleg Nesterov if (ret)
852cb9a19feSOleg Nesterov goto out;
853cb9a19feSOleg Nesterov
854cb9a19feSOleg Nesterov ret = -ENOTSUPP;
855803200e2SOleg Nesterov if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn))
856cb9a19feSOleg Nesterov goto out;
857cb9a19feSOleg Nesterov
858cb9a19feSOleg Nesterov ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
859cb9a19feSOleg Nesterov if (ret)
860cb9a19feSOleg Nesterov goto out;
861cb9a19feSOleg Nesterov
86209d3f015SAndrea Parri smp_wmb(); /* pairs with the smp_rmb() in handle_swbp() */
86371434f2fSOleg Nesterov set_bit(UPROBE_COPY_INSN, &uprobe->flags);
864cb9a19feSOleg Nesterov
865cb9a19feSOleg Nesterov out:
866d4d3ccc6SOleg Nesterov up_write(&uprobe->consumer_rwsem);
8674710f05fSOleg Nesterov
868cb9a19feSOleg Nesterov return ret;
869cb9a19feSOleg Nesterov }
870cb9a19feSOleg Nesterov
consumer_filter(struct uprobe_consumer * uc,enum uprobe_filter_ctx ctx,struct mm_struct * mm)8718a7f2fa0SOleg Nesterov static inline bool consumer_filter(struct uprobe_consumer *uc,
8728a7f2fa0SOleg Nesterov enum uprobe_filter_ctx ctx, struct mm_struct *mm)
873806a98bdSOleg Nesterov {
8748a7f2fa0SOleg Nesterov return !uc->filter || uc->filter(uc, ctx, mm);
875806a98bdSOleg Nesterov }
876806a98bdSOleg Nesterov
filter_chain(struct uprobe * uprobe,enum uprobe_filter_ctx ctx,struct mm_struct * mm)8778a7f2fa0SOleg Nesterov static bool filter_chain(struct uprobe *uprobe,
8788a7f2fa0SOleg Nesterov enum uprobe_filter_ctx ctx, struct mm_struct *mm)
87963633cbfSOleg Nesterov {
8801ff6fee5SOleg Nesterov struct uprobe_consumer *uc;
8811ff6fee5SOleg Nesterov bool ret = false;
8821ff6fee5SOleg Nesterov
8831ff6fee5SOleg Nesterov down_read(&uprobe->consumer_rwsem);
8841ff6fee5SOleg Nesterov for (uc = uprobe->consumers; uc; uc = uc->next) {
8858a7f2fa0SOleg Nesterov ret = consumer_filter(uc, ctx, mm);
8861ff6fee5SOleg Nesterov if (ret)
8871ff6fee5SOleg Nesterov break;
8881ff6fee5SOleg Nesterov }
8891ff6fee5SOleg Nesterov up_read(&uprobe->consumer_rwsem);
8901ff6fee5SOleg Nesterov
8911ff6fee5SOleg Nesterov return ret;
89263633cbfSOleg Nesterov }
89363633cbfSOleg Nesterov
894e3343e6aSSrikar Dronamraju static int
install_breakpoint(struct uprobe * uprobe,struct mm_struct * mm,struct vm_area_struct * vma,unsigned long vaddr)895e3343e6aSSrikar Dronamraju install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
896816c03fbSOleg Nesterov struct vm_area_struct *vma, unsigned long vaddr)
897a5f4374aSIngo Molnar {
898f8ac4ec9SOleg Nesterov bool first_uprobe;
899a5f4374aSIngo Molnar int ret;
900a5f4374aSIngo Molnar
901cb9a19feSOleg Nesterov ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr);
902a5f4374aSIngo Molnar if (ret)
903a5f4374aSIngo Molnar return ret;
904a5f4374aSIngo Molnar
905f8ac4ec9SOleg Nesterov /*
906f8ac4ec9SOleg Nesterov * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(),
907f8ac4ec9SOleg Nesterov * the task can hit this breakpoint right after __replace_page().
908f8ac4ec9SOleg Nesterov */
909f8ac4ec9SOleg Nesterov first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags);
910f8ac4ec9SOleg Nesterov if (first_uprobe)
911f8ac4ec9SOleg Nesterov set_bit(MMF_HAS_UPROBES, &mm->flags);
912f8ac4ec9SOleg Nesterov
913816c03fbSOleg Nesterov ret = set_swbp(&uprobe->arch, mm, vaddr);
9149f68f672SOleg Nesterov if (!ret)
9159f68f672SOleg Nesterov clear_bit(MMF_RECALC_UPROBES, &mm->flags);
9169f68f672SOleg Nesterov else if (first_uprobe)
917f8ac4ec9SOleg Nesterov clear_bit(MMF_HAS_UPROBES, &mm->flags);
918a5f4374aSIngo Molnar
919a5f4374aSIngo Molnar return ret;
920a5f4374aSIngo Molnar }
921a5f4374aSIngo Molnar
922076a365bSOleg Nesterov static int
remove_breakpoint(struct uprobe * uprobe,struct mm_struct * mm,unsigned long vaddr)923816c03fbSOleg Nesterov remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
924a5f4374aSIngo Molnar {
9259f68f672SOleg Nesterov set_bit(MMF_RECALC_UPROBES, &mm->flags);
926076a365bSOleg Nesterov return set_orig_insn(&uprobe->arch, mm, vaddr);
927a5f4374aSIngo Molnar }
928a5f4374aSIngo Molnar
uprobe_is_active(struct uprobe * uprobe)92906b7bcd8SOleg Nesterov static inline bool uprobe_is_active(struct uprobe *uprobe)
93006b7bcd8SOleg Nesterov {
93106b7bcd8SOleg Nesterov return !RB_EMPTY_NODE(&uprobe->rb_node);
93206b7bcd8SOleg Nesterov }
9330326f5a9SSrikar Dronamraju /*
934778b032dSOleg Nesterov * There could be threads that have already hit the breakpoint. They
935778b032dSOleg Nesterov * will recheck the current insn and restart if find_uprobe() fails.
936778b032dSOleg Nesterov * See find_active_uprobe().
9370326f5a9SSrikar Dronamraju */
delete_uprobe(struct uprobe * uprobe)938a5f4374aSIngo Molnar static void delete_uprobe(struct uprobe *uprobe)
939a5f4374aSIngo Molnar {
94006b7bcd8SOleg Nesterov if (WARN_ON(!uprobe_is_active(uprobe)))
94106b7bcd8SOleg Nesterov return;
94206b7bcd8SOleg Nesterov
9436f47caa0SOleg Nesterov spin_lock(&uprobes_treelock);
944a5f4374aSIngo Molnar rb_erase(&uprobe->rb_node, &uprobes_tree);
9456f47caa0SOleg Nesterov spin_unlock(&uprobes_treelock);
94606b7bcd8SOleg Nesterov RB_CLEAR_NODE(&uprobe->rb_node); /* for uprobe_is_active() */
947a5f4374aSIngo Molnar put_uprobe(uprobe);
948a5f4374aSIngo Molnar }
949a5f4374aSIngo Molnar
95026872090SOleg Nesterov struct map_info {
95126872090SOleg Nesterov struct map_info *next;
95226872090SOleg Nesterov struct mm_struct *mm;
953816c03fbSOleg Nesterov unsigned long vaddr;
95426872090SOleg Nesterov };
95526872090SOleg Nesterov
free_map_info(struct map_info * info)95626872090SOleg Nesterov static inline struct map_info *free_map_info(struct map_info *info)
957a5f4374aSIngo Molnar {
95826872090SOleg Nesterov struct map_info *next = info->next;
95926872090SOleg Nesterov kfree(info);
96026872090SOleg Nesterov return next;
96126872090SOleg Nesterov }
96226872090SOleg Nesterov
96326872090SOleg Nesterov static struct map_info *
build_map_info(struct address_space * mapping,loff_t offset,bool is_register)96426872090SOleg Nesterov build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
96526872090SOleg Nesterov {
96626872090SOleg Nesterov unsigned long pgoff = offset >> PAGE_SHIFT;
967a5f4374aSIngo Molnar struct vm_area_struct *vma;
96826872090SOleg Nesterov struct map_info *curr = NULL;
96926872090SOleg Nesterov struct map_info *prev = NULL;
97026872090SOleg Nesterov struct map_info *info;
97126872090SOleg Nesterov int more = 0;
972a5f4374aSIngo Molnar
97326872090SOleg Nesterov again:
9744a23717aSDavidlohr Bueso i_mmap_lock_read(mapping);
9756b2dbba8SMichel Lespinasse vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
976a5f4374aSIngo Molnar if (!valid_vma(vma, is_register))
977a5f4374aSIngo Molnar continue;
978a5f4374aSIngo Molnar
9797a5bfb66SOleg Nesterov if (!prev && !more) {
9807a5bfb66SOleg Nesterov /*
981c8c06efaSDavidlohr Bueso * Needs GFP_NOWAIT to avoid i_mmap_rwsem recursion through
9827a5bfb66SOleg Nesterov * reclaim. This is optimistic, no harm done if it fails.
9837a5bfb66SOleg Nesterov */
9847a5bfb66SOleg Nesterov prev = kmalloc(sizeof(struct map_info),
9857a5bfb66SOleg Nesterov GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN);
9867a5bfb66SOleg Nesterov if (prev)
9877a5bfb66SOleg Nesterov prev->next = NULL;
9887a5bfb66SOleg Nesterov }
98926872090SOleg Nesterov if (!prev) {
99026872090SOleg Nesterov more++;
99126872090SOleg Nesterov continue;
992a5f4374aSIngo Molnar }
993a5f4374aSIngo Molnar
994388f7934SVegard Nossum if (!mmget_not_zero(vma->vm_mm))
99526872090SOleg Nesterov continue;
996a5f4374aSIngo Molnar
99726872090SOleg Nesterov info = prev;
99826872090SOleg Nesterov prev = prev->next;
99926872090SOleg Nesterov info->next = curr;
100026872090SOleg Nesterov curr = info;
100126872090SOleg Nesterov
100226872090SOleg Nesterov info->mm = vma->vm_mm;
100357683f72SOleg Nesterov info->vaddr = offset_to_vaddr(vma, offset);
1004a5f4374aSIngo Molnar }
10054a23717aSDavidlohr Bueso i_mmap_unlock_read(mapping);
1006a5f4374aSIngo Molnar
100726872090SOleg Nesterov if (!more)
100826872090SOleg Nesterov goto out;
1009a5f4374aSIngo Molnar
101026872090SOleg Nesterov prev = curr;
101126872090SOleg Nesterov while (curr) {
101226872090SOleg Nesterov mmput(curr->mm);
101326872090SOleg Nesterov curr = curr->next;
101426872090SOleg Nesterov }
101526872090SOleg Nesterov
101626872090SOleg Nesterov do {
101726872090SOleg Nesterov info = kmalloc(sizeof(struct map_info), GFP_KERNEL);
101826872090SOleg Nesterov if (!info) {
101926872090SOleg Nesterov curr = ERR_PTR(-ENOMEM);
102026872090SOleg Nesterov goto out;
102126872090SOleg Nesterov }
102226872090SOleg Nesterov info->next = prev;
102326872090SOleg Nesterov prev = info;
102426872090SOleg Nesterov } while (--more);
102526872090SOleg Nesterov
102626872090SOleg Nesterov goto again;
102726872090SOleg Nesterov out:
102826872090SOleg Nesterov while (prev)
102926872090SOleg Nesterov prev = free_map_info(prev);
103026872090SOleg Nesterov return curr;
1031a5f4374aSIngo Molnar }
1032a5f4374aSIngo Molnar
1033bdf8647cSOleg Nesterov static int
register_for_each_vma(struct uprobe * uprobe,struct uprobe_consumer * new)1034bdf8647cSOleg Nesterov register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new)
1035a5f4374aSIngo Molnar {
1036bdf8647cSOleg Nesterov bool is_register = !!new;
103726872090SOleg Nesterov struct map_info *info;
103826872090SOleg Nesterov int err = 0;
103926872090SOleg Nesterov
104032cdba1eSOleg Nesterov percpu_down_write(&dup_mmap_sem);
104126872090SOleg Nesterov info = build_map_info(uprobe->inode->i_mapping,
104226872090SOleg Nesterov uprobe->offset, is_register);
104332cdba1eSOleg Nesterov if (IS_ERR(info)) {
104432cdba1eSOleg Nesterov err = PTR_ERR(info);
104532cdba1eSOleg Nesterov goto out;
104632cdba1eSOleg Nesterov }
104726872090SOleg Nesterov
104826872090SOleg Nesterov while (info) {
104926872090SOleg Nesterov struct mm_struct *mm = info->mm;
1050a5f4374aSIngo Molnar struct vm_area_struct *vma;
1051a5f4374aSIngo Molnar
1052076a365bSOleg Nesterov if (err && is_register)
105326872090SOleg Nesterov goto free;
1054a5f4374aSIngo Molnar
1055d8ed45c5SMichel Lespinasse mmap_write_lock(mm);
1056f4d6dfe5SOleg Nesterov vma = find_vma(mm, info->vaddr);
1057f4d6dfe5SOleg Nesterov if (!vma || !valid_vma(vma, is_register) ||
1058f281769eSOleg Nesterov file_inode(vma->vm_file) != uprobe->inode)
105926872090SOleg Nesterov goto unlock;
106026872090SOleg Nesterov
1061f4d6dfe5SOleg Nesterov if (vma->vm_start > info->vaddr ||
1062f4d6dfe5SOleg Nesterov vaddr_to_offset(vma, info->vaddr) != uprobe->offset)
106326872090SOleg Nesterov goto unlock;
1064a5f4374aSIngo Molnar
1065806a98bdSOleg Nesterov if (is_register) {
1066806a98bdSOleg Nesterov /* consult only the "caller", new consumer. */
1067bdf8647cSOleg Nesterov if (consumer_filter(new,
10688a7f2fa0SOleg Nesterov UPROBE_FILTER_REGISTER, mm))
106926872090SOleg Nesterov err = install_breakpoint(uprobe, mm, vma, info->vaddr);
1070806a98bdSOleg Nesterov } else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) {
10718a7f2fa0SOleg Nesterov if (!filter_chain(uprobe,
10728a7f2fa0SOleg Nesterov UPROBE_FILTER_UNREGISTER, mm))
1073076a365bSOleg Nesterov err |= remove_breakpoint(uprobe, mm, info->vaddr);
1074806a98bdSOleg Nesterov }
107578f74116SOleg Nesterov
107626872090SOleg Nesterov unlock:
1077d8ed45c5SMichel Lespinasse mmap_write_unlock(mm);
107826872090SOleg Nesterov free:
107926872090SOleg Nesterov mmput(mm);
108026872090SOleg Nesterov info = free_map_info(info);
1081a5f4374aSIngo Molnar }
108232cdba1eSOleg Nesterov out:
108332cdba1eSOleg Nesterov percpu_up_write(&dup_mmap_sem);
108426872090SOleg Nesterov return err;
1085a5f4374aSIngo Molnar }
1086a5f4374aSIngo Molnar
108738e967aeSRavi Bangoria static void
__uprobe_unregister(struct uprobe * uprobe,struct uprobe_consumer * uc)108838e967aeSRavi Bangoria __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc)
1089a5f4374aSIngo Molnar {
109004aab9b2SOleg Nesterov int err;
1091a5f4374aSIngo Molnar
109206d07139SOleg Nesterov if (WARN_ON(!consumer_del(uprobe, uc)))
109304aab9b2SOleg Nesterov return;
109404aab9b2SOleg Nesterov
1095bdf8647cSOleg Nesterov err = register_for_each_vma(uprobe, NULL);
1096a5f4374aSIngo Molnar /* TODO : cant unregister? schedule a worker thread */
1097bb929284SOleg Nesterov if (!uprobe->consumers && !err)
109804aab9b2SOleg Nesterov delete_uprobe(uprobe);
109904aab9b2SOleg Nesterov }
1100a5f4374aSIngo Molnar
1101a5f4374aSIngo Molnar /*
11027140ad38SLinus Torvalds * uprobe_unregister - unregister an already registered probe.
110338e967aeSRavi Bangoria * @inode: the file in which the probe has to be removed.
110438e967aeSRavi Bangoria * @offset: offset from the start of the file.
110538e967aeSRavi Bangoria * @uc: identify which probe if multiple probes are colocated.
110638e967aeSRavi Bangoria */
uprobe_unregister(struct inode * inode,loff_t offset,struct uprobe_consumer * uc)110738e967aeSRavi Bangoria void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
110838e967aeSRavi Bangoria {
110938e967aeSRavi Bangoria struct uprobe *uprobe;
111038e967aeSRavi Bangoria
111138e967aeSRavi Bangoria uprobe = find_uprobe(inode, offset);
111238e967aeSRavi Bangoria if (WARN_ON(!uprobe))
111338e967aeSRavi Bangoria return;
111438e967aeSRavi Bangoria
111538e967aeSRavi Bangoria down_write(&uprobe->register_rwsem);
111638e967aeSRavi Bangoria __uprobe_unregister(uprobe, uc);
111738e967aeSRavi Bangoria up_write(&uprobe->register_rwsem);
111838e967aeSRavi Bangoria put_uprobe(uprobe);
111938e967aeSRavi Bangoria }
112038e967aeSRavi Bangoria EXPORT_SYMBOL_GPL(uprobe_unregister);
112138e967aeSRavi Bangoria
112238e967aeSRavi Bangoria /*
112338e967aeSRavi Bangoria * __uprobe_register - register a probe
1124a5f4374aSIngo Molnar * @inode: the file in which the probe has to be placed.
1125a5f4374aSIngo Molnar * @offset: offset from the start of the file.
1126e3343e6aSSrikar Dronamraju * @uc: information on howto handle the probe..
1127a5f4374aSIngo Molnar *
112838e967aeSRavi Bangoria * Apart from the access refcount, __uprobe_register() takes a creation
1129a5f4374aSIngo Molnar * refcount (thro alloc_uprobe) if and only if this @uprobe is getting
1130a5f4374aSIngo Molnar * inserted into the rbtree (i.e first consumer for a @inode:@offset
1131a5f4374aSIngo Molnar * tuple). Creation refcount stops uprobe_unregister from freeing the
1132a5f4374aSIngo Molnar * @uprobe even before the register operation is complete. Creation
1133e3343e6aSSrikar Dronamraju * refcount is released when the last @uc for the @uprobe
113438e967aeSRavi Bangoria * unregisters. Caller of __uprobe_register() is required to keep @inode
113561f94203SSong Liu * (and the containing mount) referenced.
1136a5f4374aSIngo Molnar *
1137a5f4374aSIngo Molnar * Return errno if it cannot successully install probes
1138a5f4374aSIngo Molnar * else return 0 (success)
1139a5f4374aSIngo Molnar */
__uprobe_register(struct inode * inode,loff_t offset,loff_t ref_ctr_offset,struct uprobe_consumer * uc)114038e967aeSRavi Bangoria static int __uprobe_register(struct inode *inode, loff_t offset,
11411cc33161SRavi Bangoria loff_t ref_ctr_offset, struct uprobe_consumer *uc)
1142a5f4374aSIngo Molnar {
1143a5f4374aSIngo Molnar struct uprobe *uprobe;
1144a5f4374aSIngo Molnar int ret;
1145a5f4374aSIngo Molnar
1146ea024870SAnton Arapov /* Uprobe must have at least one set consumer */
1147ea024870SAnton Arapov if (!uc->handler && !uc->ret_handler)
1148ea024870SAnton Arapov return -EINVAL;
1149ea024870SAnton Arapov
115040814f68SOleg Nesterov /* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */
11515efe7448SMatthew Wilcox (Oracle) if (!inode->i_mapping->a_ops->read_folio &&
11525efe7448SMatthew Wilcox (Oracle) !shmem_mapping(inode->i_mapping))
115341ccba02SOleg Nesterov return -EIO;
1154f0744af7SOleg Nesterov /* Racy, just to catch the obvious mistakes */
1155a5f4374aSIngo Molnar if (offset > i_size_read(inode))
1156a5f4374aSIngo Molnar return -EINVAL;
1157a5f4374aSIngo Molnar
1158013b2debSOleg Nesterov /*
1159013b2debSOleg Nesterov * This ensures that copy_from_page(), copy_to_page() and
1160013b2debSOleg Nesterov * __update_ref_ctr() can't cross page boundary.
1161013b2debSOleg Nesterov */
1162013b2debSOleg Nesterov if (!IS_ALIGNED(offset, UPROBE_SWBP_INSN_SIZE))
1163013b2debSOleg Nesterov return -EINVAL;
1164013b2debSOleg Nesterov if (!IS_ALIGNED(ref_ctr_offset, sizeof(short)))
1165013b2debSOleg Nesterov return -EINVAL;
1166013b2debSOleg Nesterov
116766d06dffSOleg Nesterov retry:
11681cc33161SRavi Bangoria uprobe = alloc_uprobe(inode, offset, ref_ctr_offset);
116966d06dffSOleg Nesterov if (!uprobe)
117066d06dffSOleg Nesterov return -ENOMEM;
117122bad382SRavi Bangoria if (IS_ERR(uprobe))
117222bad382SRavi Bangoria return PTR_ERR(uprobe);
117322bad382SRavi Bangoria
117466d06dffSOleg Nesterov /*
117566d06dffSOleg Nesterov * We can race with uprobe_unregister()->delete_uprobe().
117666d06dffSOleg Nesterov * Check uprobe_is_active() and retry if it is false.
117766d06dffSOleg Nesterov */
1178e591c8d7SOleg Nesterov down_write(&uprobe->register_rwsem);
117966d06dffSOleg Nesterov ret = -EAGAIN;
118066d06dffSOleg Nesterov if (likely(uprobe_is_active(uprobe))) {
118138e967aeSRavi Bangoria consumer_add(uprobe, uc);
118238e967aeSRavi Bangoria ret = register_for_each_vma(uprobe, uc);
11839a98e03cSOleg Nesterov if (ret)
118404aab9b2SOleg Nesterov __uprobe_unregister(uprobe, uc);
1185a5f4374aSIngo Molnar }
118666d06dffSOleg Nesterov up_write(&uprobe->register_rwsem);
1187a5f4374aSIngo Molnar put_uprobe(uprobe);
1188a5f4374aSIngo Molnar
118966d06dffSOleg Nesterov if (unlikely(ret == -EAGAIN))
119066d06dffSOleg Nesterov goto retry;
1191a5f4374aSIngo Molnar return ret;
1192a5f4374aSIngo Molnar }
119338e967aeSRavi Bangoria
uprobe_register(struct inode * inode,loff_t offset,struct uprobe_consumer * uc)119438e967aeSRavi Bangoria int uprobe_register(struct inode *inode, loff_t offset,
119538e967aeSRavi Bangoria struct uprobe_consumer *uc)
119638e967aeSRavi Bangoria {
11971cc33161SRavi Bangoria return __uprobe_register(inode, offset, 0, uc);
119838e967aeSRavi Bangoria }
1199e8440c14SJosh Stone EXPORT_SYMBOL_GPL(uprobe_register);
1200a5f4374aSIngo Molnar
uprobe_register_refctr(struct inode * inode,loff_t offset,loff_t ref_ctr_offset,struct uprobe_consumer * uc)12011cc33161SRavi Bangoria int uprobe_register_refctr(struct inode *inode, loff_t offset,
12021cc33161SRavi Bangoria loff_t ref_ctr_offset, struct uprobe_consumer *uc)
12031cc33161SRavi Bangoria {
12041cc33161SRavi Bangoria return __uprobe_register(inode, offset, ref_ctr_offset, uc);
12051cc33161SRavi Bangoria }
12061cc33161SRavi Bangoria EXPORT_SYMBOL_GPL(uprobe_register_refctr);
12071cc33161SRavi Bangoria
1208a5f4374aSIngo Molnar /*
1209788faab7STobias Tefke * uprobe_apply - unregister an already registered probe.
1210bdf8647cSOleg Nesterov * @inode: the file in which the probe has to be removed.
1211bdf8647cSOleg Nesterov * @offset: offset from the start of the file.
1212bdf8647cSOleg Nesterov * @uc: consumer which wants to add more or remove some breakpoints
1213bdf8647cSOleg Nesterov * @add: add or remove the breakpoints
1214bdf8647cSOleg Nesterov */
uprobe_apply(struct inode * inode,loff_t offset,struct uprobe_consumer * uc,bool add)1215bdf8647cSOleg Nesterov int uprobe_apply(struct inode *inode, loff_t offset,
1216bdf8647cSOleg Nesterov struct uprobe_consumer *uc, bool add)
1217bdf8647cSOleg Nesterov {
1218bdf8647cSOleg Nesterov struct uprobe *uprobe;
1219bdf8647cSOleg Nesterov struct uprobe_consumer *con;
1220bdf8647cSOleg Nesterov int ret = -ENOENT;
1221bdf8647cSOleg Nesterov
1222bdf8647cSOleg Nesterov uprobe = find_uprobe(inode, offset);
122306d07139SOleg Nesterov if (WARN_ON(!uprobe))
1224bdf8647cSOleg Nesterov return ret;
1225bdf8647cSOleg Nesterov
1226bdf8647cSOleg Nesterov down_write(&uprobe->register_rwsem);
1227bdf8647cSOleg Nesterov for (con = uprobe->consumers; con && con != uc ; con = con->next)
1228bdf8647cSOleg Nesterov ;
1229bdf8647cSOleg Nesterov if (con)
1230bdf8647cSOleg Nesterov ret = register_for_each_vma(uprobe, add ? uc : NULL);
1231bdf8647cSOleg Nesterov up_write(&uprobe->register_rwsem);
1232bdf8647cSOleg Nesterov put_uprobe(uprobe);
1233bdf8647cSOleg Nesterov
1234bdf8647cSOleg Nesterov return ret;
1235bdf8647cSOleg Nesterov }
1236bdf8647cSOleg Nesterov
unapply_uprobe(struct uprobe * uprobe,struct mm_struct * mm)1237da1816b1SOleg Nesterov static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm)
1238da1816b1SOleg Nesterov {
1239fcb72a58SMatthew Wilcox (Oracle) VMA_ITERATOR(vmi, mm, 0);
1240da1816b1SOleg Nesterov struct vm_area_struct *vma;
1241da1816b1SOleg Nesterov int err = 0;
1242da1816b1SOleg Nesterov
1243d8ed45c5SMichel Lespinasse mmap_read_lock(mm);
1244fcb72a58SMatthew Wilcox (Oracle) for_each_vma(vmi, vma) {
1245da1816b1SOleg Nesterov unsigned long vaddr;
1246da1816b1SOleg Nesterov loff_t offset;
1247da1816b1SOleg Nesterov
1248da1816b1SOleg Nesterov if (!valid_vma(vma, false) ||
1249f281769eSOleg Nesterov file_inode(vma->vm_file) != uprobe->inode)
1250da1816b1SOleg Nesterov continue;
1251da1816b1SOleg Nesterov
1252da1816b1SOleg Nesterov offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
1253da1816b1SOleg Nesterov if (uprobe->offset < offset ||
1254da1816b1SOleg Nesterov uprobe->offset >= offset + vma->vm_end - vma->vm_start)
1255da1816b1SOleg Nesterov continue;
1256da1816b1SOleg Nesterov
1257da1816b1SOleg Nesterov vaddr = offset_to_vaddr(vma, uprobe->offset);
1258da1816b1SOleg Nesterov err |= remove_breakpoint(uprobe, mm, vaddr);
1259da1816b1SOleg Nesterov }
1260d8ed45c5SMichel Lespinasse mmap_read_unlock(mm);
1261da1816b1SOleg Nesterov
1262da1816b1SOleg Nesterov return err;
1263da1816b1SOleg Nesterov }
1264da1816b1SOleg Nesterov
1265891c3970SOleg Nesterov static struct rb_node *
find_node_in_range(struct inode * inode,loff_t min,loff_t max)1266891c3970SOleg Nesterov find_node_in_range(struct inode *inode, loff_t min, loff_t max)
1267a5f4374aSIngo Molnar {
1268a5f4374aSIngo Molnar struct rb_node *n = uprobes_tree.rb_node;
1269a5f4374aSIngo Molnar
1270a5f4374aSIngo Molnar while (n) {
1271891c3970SOleg Nesterov struct uprobe *u = rb_entry(n, struct uprobe, rb_node);
1272a5f4374aSIngo Molnar
1273891c3970SOleg Nesterov if (inode < u->inode) {
1274a5f4374aSIngo Molnar n = n->rb_left;
1275891c3970SOleg Nesterov } else if (inode > u->inode) {
1276a5f4374aSIngo Molnar n = n->rb_right;
1277891c3970SOleg Nesterov } else {
1278891c3970SOleg Nesterov if (max < u->offset)
1279891c3970SOleg Nesterov n = n->rb_left;
1280891c3970SOleg Nesterov else if (min > u->offset)
1281891c3970SOleg Nesterov n = n->rb_right;
1282891c3970SOleg Nesterov else
1283891c3970SOleg Nesterov break;
1284891c3970SOleg Nesterov }
1285a5f4374aSIngo Molnar }
1286a5f4374aSIngo Molnar
1287891c3970SOleg Nesterov return n;
1288a5f4374aSIngo Molnar }
1289a5f4374aSIngo Molnar
1290a5f4374aSIngo Molnar /*
1291891c3970SOleg Nesterov * For a given range in vma, build a list of probes that need to be inserted.
1292a5f4374aSIngo Molnar */
build_probe_list(struct inode * inode,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct list_head * head)1293891c3970SOleg Nesterov static void build_probe_list(struct inode *inode,
1294891c3970SOleg Nesterov struct vm_area_struct *vma,
1295891c3970SOleg Nesterov unsigned long start, unsigned long end,
1296891c3970SOleg Nesterov struct list_head *head)
1297a5f4374aSIngo Molnar {
1298891c3970SOleg Nesterov loff_t min, max;
1299891c3970SOleg Nesterov struct rb_node *n, *t;
1300891c3970SOleg Nesterov struct uprobe *u;
1301891c3970SOleg Nesterov
1302891c3970SOleg Nesterov INIT_LIST_HEAD(head);
1303cb113b47SOleg Nesterov min = vaddr_to_offset(vma, start);
1304891c3970SOleg Nesterov max = min + (end - start) - 1;
1305a5f4374aSIngo Molnar
13066f47caa0SOleg Nesterov spin_lock(&uprobes_treelock);
1307891c3970SOleg Nesterov n = find_node_in_range(inode, min, max);
1308891c3970SOleg Nesterov if (n) {
1309891c3970SOleg Nesterov for (t = n; t; t = rb_prev(t)) {
1310891c3970SOleg Nesterov u = rb_entry(t, struct uprobe, rb_node);
1311891c3970SOleg Nesterov if (u->inode != inode || u->offset < min)
1312a5f4374aSIngo Molnar break;
1313891c3970SOleg Nesterov list_add(&u->pending_list, head);
1314f231722aSOleg Nesterov get_uprobe(u);
1315a5f4374aSIngo Molnar }
1316891c3970SOleg Nesterov for (t = n; (t = rb_next(t)); ) {
1317891c3970SOleg Nesterov u = rb_entry(t, struct uprobe, rb_node);
1318891c3970SOleg Nesterov if (u->inode != inode || u->offset > max)
1319891c3970SOleg Nesterov break;
1320891c3970SOleg Nesterov list_add(&u->pending_list, head);
1321f231722aSOleg Nesterov get_uprobe(u);
1322891c3970SOleg Nesterov }
1323891c3970SOleg Nesterov }
13246f47caa0SOleg Nesterov spin_unlock(&uprobes_treelock);
1325a5f4374aSIngo Molnar }
1326a5f4374aSIngo Molnar
13271cc33161SRavi Bangoria /* @vma contains reference counter, not the probed instruction. */
delayed_ref_ctr_inc(struct vm_area_struct * vma)13281cc33161SRavi Bangoria static int delayed_ref_ctr_inc(struct vm_area_struct *vma)
13291cc33161SRavi Bangoria {
13301cc33161SRavi Bangoria struct list_head *pos, *q;
13311cc33161SRavi Bangoria struct delayed_uprobe *du;
13321cc33161SRavi Bangoria unsigned long vaddr;
13331cc33161SRavi Bangoria int ret = 0, err = 0;
13341cc33161SRavi Bangoria
13351cc33161SRavi Bangoria mutex_lock(&delayed_uprobe_lock);
13361cc33161SRavi Bangoria list_for_each_safe(pos, q, &delayed_uprobe_list) {
13371cc33161SRavi Bangoria du = list_entry(pos, struct delayed_uprobe, list);
13381cc33161SRavi Bangoria
13391cc33161SRavi Bangoria if (du->mm != vma->vm_mm ||
13401cc33161SRavi Bangoria !valid_ref_ctr_vma(du->uprobe, vma))
13411cc33161SRavi Bangoria continue;
13421cc33161SRavi Bangoria
13431cc33161SRavi Bangoria vaddr = offset_to_vaddr(vma, du->uprobe->ref_ctr_offset);
13441cc33161SRavi Bangoria ret = __update_ref_ctr(vma->vm_mm, vaddr, 1);
13451cc33161SRavi Bangoria if (ret) {
13461cc33161SRavi Bangoria update_ref_ctr_warn(du->uprobe, vma->vm_mm, 1);
13471cc33161SRavi Bangoria if (!err)
13481cc33161SRavi Bangoria err = ret;
13491cc33161SRavi Bangoria }
13501cc33161SRavi Bangoria delayed_uprobe_delete(du);
13511cc33161SRavi Bangoria }
13521cc33161SRavi Bangoria mutex_unlock(&delayed_uprobe_lock);
13531cc33161SRavi Bangoria return err;
13541cc33161SRavi Bangoria }
13551cc33161SRavi Bangoria
1356a5f4374aSIngo Molnar /*
13570503ea8fSLiam R. Howlett * Called from mmap_region/vma_merge with mm->mmap_lock acquired.
1358a5f4374aSIngo Molnar *
13595e5be71aSOleg Nesterov * Currently we ignore all errors and always return 0, the callers
13605e5be71aSOleg Nesterov * can't handle the failure anyway.
1361a5f4374aSIngo Molnar */
uprobe_mmap(struct vm_area_struct * vma)1362a5f4374aSIngo Molnar int uprobe_mmap(struct vm_area_struct *vma)
1363a5f4374aSIngo Molnar {
1364a5f4374aSIngo Molnar struct list_head tmp_list;
1365665605a2SOleg Nesterov struct uprobe *uprobe, *u;
1366a5f4374aSIngo Molnar struct inode *inode;
1367a5f4374aSIngo Molnar
13681cc33161SRavi Bangoria if (no_uprobe_events())
13691cc33161SRavi Bangoria return 0;
13701cc33161SRavi Bangoria
13711cc33161SRavi Bangoria if (vma->vm_file &&
13721cc33161SRavi Bangoria (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE &&
13731cc33161SRavi Bangoria test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags))
13741cc33161SRavi Bangoria delayed_ref_ctr_inc(vma);
13751cc33161SRavi Bangoria
13761cc33161SRavi Bangoria if (!valid_vma(vma, true))
1377a5f4374aSIngo Molnar return 0;
1378a5f4374aSIngo Molnar
1379f281769eSOleg Nesterov inode = file_inode(vma->vm_file);
1380a5f4374aSIngo Molnar if (!inode)
1381a5f4374aSIngo Molnar return 0;
1382a5f4374aSIngo Molnar
1383a5f4374aSIngo Molnar mutex_lock(uprobes_mmap_hash(inode));
1384891c3970SOleg Nesterov build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list);
1385806a98bdSOleg Nesterov /*
1386806a98bdSOleg Nesterov * We can race with uprobe_unregister(), this uprobe can be already
1387806a98bdSOleg Nesterov * removed. But in this case filter_chain() must return false, all
1388806a98bdSOleg Nesterov * consumers have gone away.
1389806a98bdSOleg Nesterov */
1390665605a2SOleg Nesterov list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
1391806a98bdSOleg Nesterov if (!fatal_signal_pending(current) &&
13928a7f2fa0SOleg Nesterov filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) {
139357683f72SOleg Nesterov unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset);
13945e5be71aSOleg Nesterov install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
1395a5f4374aSIngo Molnar }
1396a5f4374aSIngo Molnar put_uprobe(uprobe);
1397a5f4374aSIngo Molnar }
1398a5f4374aSIngo Molnar mutex_unlock(uprobes_mmap_hash(inode));
1399a5f4374aSIngo Molnar
14005e5be71aSOleg Nesterov return 0;
1401a5f4374aSIngo Molnar }
1402a5f4374aSIngo Molnar
14039f68f672SOleg Nesterov static bool
vma_has_uprobes(struct vm_area_struct * vma,unsigned long start,unsigned long end)14049f68f672SOleg Nesterov vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end)
14059f68f672SOleg Nesterov {
14069f68f672SOleg Nesterov loff_t min, max;
14079f68f672SOleg Nesterov struct inode *inode;
14089f68f672SOleg Nesterov struct rb_node *n;
14099f68f672SOleg Nesterov
1410f281769eSOleg Nesterov inode = file_inode(vma->vm_file);
14119f68f672SOleg Nesterov
14129f68f672SOleg Nesterov min = vaddr_to_offset(vma, start);
14139f68f672SOleg Nesterov max = min + (end - start) - 1;
14149f68f672SOleg Nesterov
14159f68f672SOleg Nesterov spin_lock(&uprobes_treelock);
14169f68f672SOleg Nesterov n = find_node_in_range(inode, min, max);
14179f68f672SOleg Nesterov spin_unlock(&uprobes_treelock);
14189f68f672SOleg Nesterov
14199f68f672SOleg Nesterov return !!n;
14209f68f672SOleg Nesterov }
14219f68f672SOleg Nesterov
1422682968e0SSrikar Dronamraju /*
1423682968e0SSrikar Dronamraju * Called in context of a munmap of a vma.
1424682968e0SSrikar Dronamraju */
uprobe_munmap(struct vm_area_struct * vma,unsigned long start,unsigned long end)1425cbc91f71SSrikar Dronamraju void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1426682968e0SSrikar Dronamraju {
1427441f1eb7SOleg Nesterov if (no_uprobe_events() || !valid_vma(vma, false))
1428682968e0SSrikar Dronamraju return;
1429682968e0SSrikar Dronamraju
14302fd611a9SOleg Nesterov if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */
14312fd611a9SOleg Nesterov return;
14322fd611a9SOleg Nesterov
14339f68f672SOleg Nesterov if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) ||
14349f68f672SOleg Nesterov test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags))
1435f8ac4ec9SOleg Nesterov return;
1436f8ac4ec9SOleg Nesterov
14379f68f672SOleg Nesterov if (vma_has_uprobes(vma, start, end))
14389f68f672SOleg Nesterov set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags);
1439682968e0SSrikar Dronamraju }
1440682968e0SSrikar Dronamraju
1441d4b3b638SSrikar Dronamraju /* Slot allocation for XOL */
xol_add_vma(struct mm_struct * mm,struct xol_area * area)14426441ec8bSOleg Nesterov static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
1443d4b3b638SSrikar Dronamraju {
1444704bde3cSOleg Nesterov struct vm_area_struct *vma;
1445704bde3cSOleg Nesterov int ret;
1446d4b3b638SSrikar Dronamraju
1447d8ed45c5SMichel Lespinasse if (mmap_write_lock_killable(mm))
1448598fdc1dSMichal Hocko return -EINTR;
1449598fdc1dSMichal Hocko
1450704bde3cSOleg Nesterov if (mm->uprobes_state.xol_area) {
1451704bde3cSOleg Nesterov ret = -EALREADY;
1452d4b3b638SSrikar Dronamraju goto fail;
1453704bde3cSOleg Nesterov }
1454d4b3b638SSrikar Dronamraju
1455af0d95afSOleg Nesterov if (!area->vaddr) {
1456d4b3b638SSrikar Dronamraju /* Try to map as high as possible, this is only a hint. */
1457af0d95afSOleg Nesterov area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE,
1458af0d95afSOleg Nesterov PAGE_SIZE, 0, 0);
1459ff68dac6SGaowei Pu if (IS_ERR_VALUE(area->vaddr)) {
1460d4b3b638SSrikar Dronamraju ret = area->vaddr;
1461d4b3b638SSrikar Dronamraju goto fail;
1462d4b3b638SSrikar Dronamraju }
1463af0d95afSOleg Nesterov }
1464d4b3b638SSrikar Dronamraju
1465704bde3cSOleg Nesterov vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE,
1466704bde3cSOleg Nesterov VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO,
1467704bde3cSOleg Nesterov &area->xol_mapping);
1468704bde3cSOleg Nesterov if (IS_ERR(vma)) {
1469704bde3cSOleg Nesterov ret = PTR_ERR(vma);
1470d4b3b638SSrikar Dronamraju goto fail;
1471704bde3cSOleg Nesterov }
1472d4b3b638SSrikar Dronamraju
1473704bde3cSOleg Nesterov ret = 0;
14745c6338b4SPaul E. McKenney /* pairs with get_xol_area() */
14755c6338b4SPaul E. McKenney smp_store_release(&mm->uprobes_state.xol_area, area); /* ^^^ */
1476d4b3b638SSrikar Dronamraju fail:
1477d8ed45c5SMichel Lespinasse mmap_write_unlock(mm);
1478d4b3b638SSrikar Dronamraju
1479d4b3b638SSrikar Dronamraju return ret;
1480d4b3b638SSrikar Dronamraju }
1481d4b3b638SSrikar Dronamraju
__create_xol_area(unsigned long vaddr)1482af0d95afSOleg Nesterov static struct xol_area *__create_xol_area(unsigned long vaddr)
1483d4b3b638SSrikar Dronamraju {
14849b545df8SOleg Nesterov struct mm_struct *mm = current->mm;
1485e78aebfdSAnton Arapov uprobe_opcode_t insn = UPROBE_SWBP_INSN;
14866441ec8bSOleg Nesterov struct xol_area *area;
14879b545df8SOleg Nesterov
14889faed52bSSven Schnelle area = kzalloc(sizeof(*area), GFP_KERNEL);
1489d4b3b638SSrikar Dronamraju if (unlikely(!area))
1490c8a82538SOleg Nesterov goto out;
1491d4b3b638SSrikar Dronamraju
14926396bb22SKees Cook area->bitmap = kcalloc(BITS_TO_LONGS(UINSNS_PER_PAGE), sizeof(long),
14936396bb22SKees Cook GFP_KERNEL);
1494d4b3b638SSrikar Dronamraju if (!area->bitmap)
1495c8a82538SOleg Nesterov goto free_area;
1496c8a82538SOleg Nesterov
1497704bde3cSOleg Nesterov area->xol_mapping.name = "[uprobes]";
1498704bde3cSOleg Nesterov area->xol_mapping.pages = area->pages;
14995b981d83SOleg Nesterov area->pages[0] = alloc_page(GFP_HIGHUSER | __GFP_ZERO);
1500f58bea2fSOleg Nesterov if (!area->pages[0])
1501c8a82538SOleg Nesterov goto free_bitmap;
1502f58bea2fSOleg Nesterov area->pages[1] = NULL;
1503d4b3b638SSrikar Dronamraju
1504af0d95afSOleg Nesterov area->vaddr = vaddr;
1505d4b3b638SSrikar Dronamraju init_waitqueue_head(&area->wq);
15066441ec8bSOleg Nesterov /* Reserve the 1st slot for get_trampoline_vaddr() */
15076441ec8bSOleg Nesterov set_bit(0, area->bitmap);
15086441ec8bSOleg Nesterov atomic_set(&area->slot_count, 1);
1509297e765eSMarcin Nowakowski arch_uprobe_copy_ixol(area->pages[0], 0, &insn, UPROBE_SWBP_INSN_SIZE);
1510e78aebfdSAnton Arapov
15116441ec8bSOleg Nesterov if (!xol_add_vma(mm, area))
1512d4b3b638SSrikar Dronamraju return area;
1513d4b3b638SSrikar Dronamraju
1514f58bea2fSOleg Nesterov __free_page(area->pages[0]);
1515c8a82538SOleg Nesterov free_bitmap:
1516d4b3b638SSrikar Dronamraju kfree(area->bitmap);
1517c8a82538SOleg Nesterov free_area:
1518d4b3b638SSrikar Dronamraju kfree(area);
1519c8a82538SOleg Nesterov out:
15206441ec8bSOleg Nesterov return NULL;
15216441ec8bSOleg Nesterov }
15226441ec8bSOleg Nesterov
15236441ec8bSOleg Nesterov /*
15246441ec8bSOleg Nesterov * get_xol_area - Allocate process's xol_area if necessary.
15256441ec8bSOleg Nesterov * This area will be used for storing instructions for execution out of line.
15266441ec8bSOleg Nesterov *
15276441ec8bSOleg Nesterov * Returns the allocated area or NULL.
15286441ec8bSOleg Nesterov */
get_xol_area(void)15296441ec8bSOleg Nesterov static struct xol_area *get_xol_area(void)
15306441ec8bSOleg Nesterov {
15316441ec8bSOleg Nesterov struct mm_struct *mm = current->mm;
15326441ec8bSOleg Nesterov struct xol_area *area;
15336441ec8bSOleg Nesterov
15346441ec8bSOleg Nesterov if (!mm->uprobes_state.xol_area)
1535af0d95afSOleg Nesterov __create_xol_area(0);
15366441ec8bSOleg Nesterov
15375c6338b4SPaul E. McKenney /* Pairs with xol_add_vma() smp_store_release() */
15385c6338b4SPaul E. McKenney area = READ_ONCE(mm->uprobes_state.xol_area); /* ^^^ */
15399b545df8SOleg Nesterov return area;
1540d4b3b638SSrikar Dronamraju }
1541d4b3b638SSrikar Dronamraju
1542d4b3b638SSrikar Dronamraju /*
1543d4b3b638SSrikar Dronamraju * uprobe_clear_state - Free the area allocated for slots.
1544d4b3b638SSrikar Dronamraju */
uprobe_clear_state(struct mm_struct * mm)1545d4b3b638SSrikar Dronamraju void uprobe_clear_state(struct mm_struct *mm)
1546d4b3b638SSrikar Dronamraju {
1547d4b3b638SSrikar Dronamraju struct xol_area *area = mm->uprobes_state.xol_area;
1548d4b3b638SSrikar Dronamraju
15491cc33161SRavi Bangoria mutex_lock(&delayed_uprobe_lock);
15501cc33161SRavi Bangoria delayed_uprobe_remove(NULL, mm);
15511cc33161SRavi Bangoria mutex_unlock(&delayed_uprobe_lock);
15521cc33161SRavi Bangoria
1553d4b3b638SSrikar Dronamraju if (!area)
1554d4b3b638SSrikar Dronamraju return;
1555d4b3b638SSrikar Dronamraju
1556f58bea2fSOleg Nesterov put_page(area->pages[0]);
1557d4b3b638SSrikar Dronamraju kfree(area->bitmap);
1558d4b3b638SSrikar Dronamraju kfree(area);
1559d4b3b638SSrikar Dronamraju }
1560d4b3b638SSrikar Dronamraju
uprobe_start_dup_mmap(void)156132cdba1eSOleg Nesterov void uprobe_start_dup_mmap(void)
156232cdba1eSOleg Nesterov {
156332cdba1eSOleg Nesterov percpu_down_read(&dup_mmap_sem);
156432cdba1eSOleg Nesterov }
156532cdba1eSOleg Nesterov
uprobe_end_dup_mmap(void)156632cdba1eSOleg Nesterov void uprobe_end_dup_mmap(void)
156732cdba1eSOleg Nesterov {
156832cdba1eSOleg Nesterov percpu_up_read(&dup_mmap_sem);
156932cdba1eSOleg Nesterov }
157032cdba1eSOleg Nesterov
uprobe_dup_mmap(struct mm_struct * oldmm,struct mm_struct * newmm)1571f8ac4ec9SOleg Nesterov void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
1572f8ac4ec9SOleg Nesterov {
15739f68f672SOleg Nesterov if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) {
1574f8ac4ec9SOleg Nesterov set_bit(MMF_HAS_UPROBES, &newmm->flags);
15759f68f672SOleg Nesterov /* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */
15769f68f672SOleg Nesterov set_bit(MMF_RECALC_UPROBES, &newmm->flags);
15779f68f672SOleg Nesterov }
1578f8ac4ec9SOleg Nesterov }
1579f8ac4ec9SOleg Nesterov
1580d4b3b638SSrikar Dronamraju /*
1581d4b3b638SSrikar Dronamraju * - search for a free slot.
1582d4b3b638SSrikar Dronamraju */
xol_take_insn_slot(struct xol_area * area)1583d4b3b638SSrikar Dronamraju static unsigned long xol_take_insn_slot(struct xol_area *area)
1584d4b3b638SSrikar Dronamraju {
1585d4b3b638SSrikar Dronamraju unsigned long slot_addr;
1586d4b3b638SSrikar Dronamraju int slot_nr;
1587d4b3b638SSrikar Dronamraju
1588d4b3b638SSrikar Dronamraju do {
1589d4b3b638SSrikar Dronamraju slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE);
1590d4b3b638SSrikar Dronamraju if (slot_nr < UINSNS_PER_PAGE) {
1591d4b3b638SSrikar Dronamraju if (!test_and_set_bit(slot_nr, area->bitmap))
1592d4b3b638SSrikar Dronamraju break;
1593d4b3b638SSrikar Dronamraju
1594d4b3b638SSrikar Dronamraju slot_nr = UINSNS_PER_PAGE;
1595d4b3b638SSrikar Dronamraju continue;
1596d4b3b638SSrikar Dronamraju }
1597d4b3b638SSrikar Dronamraju wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE));
1598d4b3b638SSrikar Dronamraju } while (slot_nr >= UINSNS_PER_PAGE);
1599d4b3b638SSrikar Dronamraju
1600d4b3b638SSrikar Dronamraju slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES);
1601d4b3b638SSrikar Dronamraju atomic_inc(&area->slot_count);
1602d4b3b638SSrikar Dronamraju
1603d4b3b638SSrikar Dronamraju return slot_addr;
1604d4b3b638SSrikar Dronamraju }
1605d4b3b638SSrikar Dronamraju
1606d4b3b638SSrikar Dronamraju /*
1607a6cb3f6dSOleg Nesterov * xol_get_insn_slot - allocate a slot for xol.
1608d4b3b638SSrikar Dronamraju * Returns the allocated slot address or 0.
1609d4b3b638SSrikar Dronamraju */
xol_get_insn_slot(struct uprobe * uprobe)1610a6cb3f6dSOleg Nesterov static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
1611d4b3b638SSrikar Dronamraju {
1612d4b3b638SSrikar Dronamraju struct xol_area *area;
1613a6cb3f6dSOleg Nesterov unsigned long xol_vaddr;
1614d4b3b638SSrikar Dronamraju
16159b545df8SOleg Nesterov area = get_xol_area();
1616d4b3b638SSrikar Dronamraju if (!area)
1617d4b3b638SSrikar Dronamraju return 0;
1618d4b3b638SSrikar Dronamraju
1619a6cb3f6dSOleg Nesterov xol_vaddr = xol_take_insn_slot(area);
1620a6cb3f6dSOleg Nesterov if (unlikely(!xol_vaddr))
1621d4b3b638SSrikar Dronamraju return 0;
1622d4b3b638SSrikar Dronamraju
1623f58bea2fSOleg Nesterov arch_uprobe_copy_ixol(area->pages[0], xol_vaddr,
1624803200e2SOleg Nesterov &uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
1625d4b3b638SSrikar Dronamraju
1626a6cb3f6dSOleg Nesterov return xol_vaddr;
1627d4b3b638SSrikar Dronamraju }
1628d4b3b638SSrikar Dronamraju
1629d4b3b638SSrikar Dronamraju /*
1630d4b3b638SSrikar Dronamraju * xol_free_insn_slot - If slot was earlier allocated by
1631d4b3b638SSrikar Dronamraju * @xol_get_insn_slot(), make the slot available for
1632d4b3b638SSrikar Dronamraju * subsequent requests.
1633d4b3b638SSrikar Dronamraju */
xol_free_insn_slot(struct task_struct * tsk)1634d4b3b638SSrikar Dronamraju static void xol_free_insn_slot(struct task_struct *tsk)
1635d4b3b638SSrikar Dronamraju {
1636d4b3b638SSrikar Dronamraju struct xol_area *area;
1637d4b3b638SSrikar Dronamraju unsigned long vma_end;
1638d4b3b638SSrikar Dronamraju unsigned long slot_addr;
1639d4b3b638SSrikar Dronamraju
1640d4b3b638SSrikar Dronamraju if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask)
1641d4b3b638SSrikar Dronamraju return;
1642d4b3b638SSrikar Dronamraju
1643d4b3b638SSrikar Dronamraju slot_addr = tsk->utask->xol_vaddr;
1644af4355e9SOleg Nesterov if (unlikely(!slot_addr))
1645d4b3b638SSrikar Dronamraju return;
1646d4b3b638SSrikar Dronamraju
1647d4b3b638SSrikar Dronamraju area = tsk->mm->uprobes_state.xol_area;
1648d4b3b638SSrikar Dronamraju vma_end = area->vaddr + PAGE_SIZE;
1649d4b3b638SSrikar Dronamraju if (area->vaddr <= slot_addr && slot_addr < vma_end) {
1650d4b3b638SSrikar Dronamraju unsigned long offset;
1651d4b3b638SSrikar Dronamraju int slot_nr;
1652d4b3b638SSrikar Dronamraju
1653d4b3b638SSrikar Dronamraju offset = slot_addr - area->vaddr;
1654d4b3b638SSrikar Dronamraju slot_nr = offset / UPROBE_XOL_SLOT_BYTES;
1655d4b3b638SSrikar Dronamraju if (slot_nr >= UINSNS_PER_PAGE)
1656d4b3b638SSrikar Dronamraju return;
1657d4b3b638SSrikar Dronamraju
1658d4b3b638SSrikar Dronamraju clear_bit(slot_nr, area->bitmap);
1659d4b3b638SSrikar Dronamraju atomic_dec(&area->slot_count);
16602a742cedSOleg Nesterov smp_mb__after_atomic(); /* pairs with prepare_to_wait() */
1661d4b3b638SSrikar Dronamraju if (waitqueue_active(&area->wq))
1662d4b3b638SSrikar Dronamraju wake_up(&area->wq);
1663d4b3b638SSrikar Dronamraju
1664d4b3b638SSrikar Dronamraju tsk->utask->xol_vaddr = 0;
1665d4b3b638SSrikar Dronamraju }
1666d4b3b638SSrikar Dronamraju }
1667d4b3b638SSrikar Dronamraju
arch_uprobe_copy_ixol(struct page * page,unsigned long vaddr,void * src,unsigned long len)166872e6ae28SVictor Kamensky void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
166972e6ae28SVictor Kamensky void *src, unsigned long len)
167072e6ae28SVictor Kamensky {
167172e6ae28SVictor Kamensky /* Initialize the slot */
167272e6ae28SVictor Kamensky copy_to_page(page, vaddr, src, len);
167372e6ae28SVictor Kamensky
167472e6ae28SVictor Kamensky /*
1675885f7f8eSChristoph Hellwig * We probably need flush_icache_user_page() but it needs vma.
167672e6ae28SVictor Kamensky * This should work on most of architectures by default. If
167772e6ae28SVictor Kamensky * architecture needs to do something different it can define
167872e6ae28SVictor Kamensky * its own version of the function.
167972e6ae28SVictor Kamensky */
168072e6ae28SVictor Kamensky flush_dcache_page(page);
168172e6ae28SVictor Kamensky }
168272e6ae28SVictor Kamensky
16830326f5a9SSrikar Dronamraju /**
16840326f5a9SSrikar Dronamraju * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
16850326f5a9SSrikar Dronamraju * @regs: Reflects the saved state of the task after it has hit a breakpoint
16860326f5a9SSrikar Dronamraju * instruction.
16870326f5a9SSrikar Dronamraju * Return the address of the breakpoint instruction.
16880326f5a9SSrikar Dronamraju */
uprobe_get_swbp_addr(struct pt_regs * regs)16890326f5a9SSrikar Dronamraju unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs)
16900326f5a9SSrikar Dronamraju {
16910326f5a9SSrikar Dronamraju return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE;
16920326f5a9SSrikar Dronamraju }
16930326f5a9SSrikar Dronamraju
uprobe_get_trap_addr(struct pt_regs * regs)1694b02ef20aSOleg Nesterov unsigned long uprobe_get_trap_addr(struct pt_regs *regs)
1695b02ef20aSOleg Nesterov {
1696b02ef20aSOleg Nesterov struct uprobe_task *utask = current->utask;
1697b02ef20aSOleg Nesterov
1698b02ef20aSOleg Nesterov if (unlikely(utask && utask->active_uprobe))
1699b02ef20aSOleg Nesterov return utask->vaddr;
1700b02ef20aSOleg Nesterov
1701b02ef20aSOleg Nesterov return instruction_pointer(regs);
1702b02ef20aSOleg Nesterov }
1703b02ef20aSOleg Nesterov
free_ret_instance(struct return_instance * ri)17042bb5e840SOleg Nesterov static struct return_instance *free_ret_instance(struct return_instance *ri)
17052bb5e840SOleg Nesterov {
17062bb5e840SOleg Nesterov struct return_instance *next = ri->next;
17072bb5e840SOleg Nesterov put_uprobe(ri->uprobe);
17082bb5e840SOleg Nesterov kfree(ri);
17092bb5e840SOleg Nesterov return next;
17102bb5e840SOleg Nesterov }
17112bb5e840SOleg Nesterov
17120326f5a9SSrikar Dronamraju /*
17130326f5a9SSrikar Dronamraju * Called with no locks held.
1714788faab7STobias Tefke * Called in context of an exiting or an exec-ing thread.
17150326f5a9SSrikar Dronamraju */
uprobe_free_utask(struct task_struct * t)17160326f5a9SSrikar Dronamraju void uprobe_free_utask(struct task_struct *t)
17170326f5a9SSrikar Dronamraju {
17180326f5a9SSrikar Dronamraju struct uprobe_task *utask = t->utask;
17192bb5e840SOleg Nesterov struct return_instance *ri;
17200326f5a9SSrikar Dronamraju
17210326f5a9SSrikar Dronamraju if (!utask)
17220326f5a9SSrikar Dronamraju return;
17230326f5a9SSrikar Dronamraju
1724*eff00c5eSJiri Olsa t->utask = NULL;
17250326f5a9SSrikar Dronamraju if (utask->active_uprobe)
17260326f5a9SSrikar Dronamraju put_uprobe(utask->active_uprobe);
17270326f5a9SSrikar Dronamraju
17280dfd0eb8SAnton Arapov ri = utask->return_instances;
17292bb5e840SOleg Nesterov while (ri)
17302bb5e840SOleg Nesterov ri = free_ret_instance(ri);
17310dfd0eb8SAnton Arapov
1732d4b3b638SSrikar Dronamraju xol_free_insn_slot(t);
17330326f5a9SSrikar Dronamraju kfree(utask);
17340326f5a9SSrikar Dronamraju }
17350326f5a9SSrikar Dronamraju
17360326f5a9SSrikar Dronamraju /*
1737c034f48eSRandy Dunlap * Allocate a uprobe_task object for the task if necessary.
17385a2df662SOleg Nesterov * Called when the thread hits a breakpoint.
17390326f5a9SSrikar Dronamraju *
17400326f5a9SSrikar Dronamraju * Returns:
17410326f5a9SSrikar Dronamraju * - pointer to new uprobe_task on success
17420326f5a9SSrikar Dronamraju * - NULL otherwise
17430326f5a9SSrikar Dronamraju */
get_utask(void)17445a2df662SOleg Nesterov static struct uprobe_task *get_utask(void)
17450326f5a9SSrikar Dronamraju {
17465a2df662SOleg Nesterov if (!current->utask)
17475a2df662SOleg Nesterov current->utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
17485a2df662SOleg Nesterov return current->utask;
17490326f5a9SSrikar Dronamraju }
17500326f5a9SSrikar Dronamraju
dup_utask(struct task_struct * t,struct uprobe_task * o_utask)1751248d3a7bSOleg Nesterov static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
1752248d3a7bSOleg Nesterov {
1753248d3a7bSOleg Nesterov struct uprobe_task *n_utask;
1754248d3a7bSOleg Nesterov struct return_instance **p, *o, *n;
1755248d3a7bSOleg Nesterov
1756248d3a7bSOleg Nesterov n_utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
1757248d3a7bSOleg Nesterov if (!n_utask)
1758248d3a7bSOleg Nesterov return -ENOMEM;
1759248d3a7bSOleg Nesterov t->utask = n_utask;
1760248d3a7bSOleg Nesterov
1761248d3a7bSOleg Nesterov p = &n_utask->return_instances;
1762248d3a7bSOleg Nesterov for (o = o_utask->return_instances; o; o = o->next) {
1763248d3a7bSOleg Nesterov n = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
1764248d3a7bSOleg Nesterov if (!n)
1765248d3a7bSOleg Nesterov return -ENOMEM;
1766248d3a7bSOleg Nesterov
1767248d3a7bSOleg Nesterov *n = *o;
1768f231722aSOleg Nesterov get_uprobe(n->uprobe);
1769248d3a7bSOleg Nesterov n->next = NULL;
1770248d3a7bSOleg Nesterov
1771248d3a7bSOleg Nesterov *p = n;
1772248d3a7bSOleg Nesterov p = &n->next;
1773248d3a7bSOleg Nesterov n_utask->depth++;
1774248d3a7bSOleg Nesterov }
1775248d3a7bSOleg Nesterov
1776248d3a7bSOleg Nesterov return 0;
1777248d3a7bSOleg Nesterov }
1778248d3a7bSOleg Nesterov
uprobe_warn(struct task_struct * t,const char * msg)1779248d3a7bSOleg Nesterov static void uprobe_warn(struct task_struct *t, const char *msg)
1780248d3a7bSOleg Nesterov {
1781248d3a7bSOleg Nesterov pr_warn("uprobe: %s:%d failed to %s\n",
1782248d3a7bSOleg Nesterov current->comm, current->pid, msg);
1783248d3a7bSOleg Nesterov }
1784248d3a7bSOleg Nesterov
dup_xol_work(struct callback_head * work)1785aa59c53fSOleg Nesterov static void dup_xol_work(struct callback_head *work)
1786aa59c53fSOleg Nesterov {
1787aa59c53fSOleg Nesterov if (current->flags & PF_EXITING)
1788aa59c53fSOleg Nesterov return;
1789aa59c53fSOleg Nesterov
1790598fdc1dSMichal Hocko if (!__create_xol_area(current->utask->dup_xol_addr) &&
1791598fdc1dSMichal Hocko !fatal_signal_pending(current))
1792aa59c53fSOleg Nesterov uprobe_warn(current, "dup xol area");
1793aa59c53fSOleg Nesterov }
1794aa59c53fSOleg Nesterov
1795e78aebfdSAnton Arapov /*
1796b68e0749SOleg Nesterov * Called in context of a new clone/fork from copy_process.
1797b68e0749SOleg Nesterov */
uprobe_copy_process(struct task_struct * t,unsigned long flags)17983ab67966SOleg Nesterov void uprobe_copy_process(struct task_struct *t, unsigned long flags)
1799b68e0749SOleg Nesterov {
1800248d3a7bSOleg Nesterov struct uprobe_task *utask = current->utask;
1801248d3a7bSOleg Nesterov struct mm_struct *mm = current->mm;
1802aa59c53fSOleg Nesterov struct xol_area *area;
1803248d3a7bSOleg Nesterov
1804b68e0749SOleg Nesterov t->utask = NULL;
1805248d3a7bSOleg Nesterov
18063ab67966SOleg Nesterov if (!utask || !utask->return_instances)
18073ab67966SOleg Nesterov return;
18083ab67966SOleg Nesterov
18093ab67966SOleg Nesterov if (mm == t->mm && !(flags & CLONE_VFORK))
1810248d3a7bSOleg Nesterov return;
1811248d3a7bSOleg Nesterov
1812248d3a7bSOleg Nesterov if (dup_utask(t, utask))
1813248d3a7bSOleg Nesterov return uprobe_warn(t, "dup ret instances");
1814aa59c53fSOleg Nesterov
1815aa59c53fSOleg Nesterov /* The task can fork() after dup_xol_work() fails */
1816aa59c53fSOleg Nesterov area = mm->uprobes_state.xol_area;
1817aa59c53fSOleg Nesterov if (!area)
1818aa59c53fSOleg Nesterov return uprobe_warn(t, "dup xol area");
1819aa59c53fSOleg Nesterov
18203ab67966SOleg Nesterov if (mm == t->mm)
18213ab67966SOleg Nesterov return;
18223ab67966SOleg Nesterov
182332473431SOleg Nesterov t->utask->dup_xol_addr = area->vaddr;
182432473431SOleg Nesterov init_task_work(&t->utask->dup_xol_work, dup_xol_work);
182591989c70SJens Axboe task_work_add(t, &t->utask->dup_xol_work, TWA_RESUME);
1826b68e0749SOleg Nesterov }
1827b68e0749SOleg Nesterov
1828b68e0749SOleg Nesterov /*
1829e78aebfdSAnton Arapov * Current area->vaddr notion assume the trampoline address is always
1830e78aebfdSAnton Arapov * equal area->vaddr.
1831e78aebfdSAnton Arapov *
1832e78aebfdSAnton Arapov * Returns -1 in case the xol_area is not allocated.
1833e78aebfdSAnton Arapov */
get_trampoline_vaddr(void)1834e78aebfdSAnton Arapov static unsigned long get_trampoline_vaddr(void)
1835e78aebfdSAnton Arapov {
1836e78aebfdSAnton Arapov struct xol_area *area;
1837e78aebfdSAnton Arapov unsigned long trampoline_vaddr = -1;
1838e78aebfdSAnton Arapov
18395c6338b4SPaul E. McKenney /* Pairs with xol_add_vma() smp_store_release() */
18405c6338b4SPaul E. McKenney area = READ_ONCE(current->mm->uprobes_state.xol_area); /* ^^^ */
1841e78aebfdSAnton Arapov if (area)
1842e78aebfdSAnton Arapov trampoline_vaddr = area->vaddr;
1843e78aebfdSAnton Arapov
1844e78aebfdSAnton Arapov return trampoline_vaddr;
1845e78aebfdSAnton Arapov }
1846e78aebfdSAnton Arapov
cleanup_return_instances(struct uprobe_task * utask,bool chained,struct pt_regs * regs)1847db087ef6SOleg Nesterov static void cleanup_return_instances(struct uprobe_task *utask, bool chained,
1848db087ef6SOleg Nesterov struct pt_regs *regs)
1849a5b7e1a8SOleg Nesterov {
1850a5b7e1a8SOleg Nesterov struct return_instance *ri = utask->return_instances;
1851db087ef6SOleg Nesterov enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL;
185286dcb702SOleg Nesterov
185386dcb702SOleg Nesterov while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) {
1854a5b7e1a8SOleg Nesterov ri = free_ret_instance(ri);
1855a5b7e1a8SOleg Nesterov utask->depth--;
1856a5b7e1a8SOleg Nesterov }
1857a5b7e1a8SOleg Nesterov utask->return_instances = ri;
1858a5b7e1a8SOleg Nesterov }
1859a5b7e1a8SOleg Nesterov
prepare_uretprobe(struct uprobe * uprobe,struct pt_regs * regs)18600dfd0eb8SAnton Arapov static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
18610dfd0eb8SAnton Arapov {
18620dfd0eb8SAnton Arapov struct return_instance *ri;
18630dfd0eb8SAnton Arapov struct uprobe_task *utask;
18640dfd0eb8SAnton Arapov unsigned long orig_ret_vaddr, trampoline_vaddr;
1865db087ef6SOleg Nesterov bool chained;
18660dfd0eb8SAnton Arapov
18670dfd0eb8SAnton Arapov if (!get_xol_area())
18680dfd0eb8SAnton Arapov return;
18690dfd0eb8SAnton Arapov
18700dfd0eb8SAnton Arapov utask = get_utask();
18710dfd0eb8SAnton Arapov if (!utask)
18720dfd0eb8SAnton Arapov return;
18730dfd0eb8SAnton Arapov
1874ded49c55SAnton Arapov if (utask->depth >= MAX_URETPROBE_DEPTH) {
1875ded49c55SAnton Arapov printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to"
1876ded49c55SAnton Arapov " nestedness limit pid/tgid=%d/%d\n",
1877ded49c55SAnton Arapov current->pid, current->tgid);
1878ded49c55SAnton Arapov return;
1879ded49c55SAnton Arapov }
1880ded49c55SAnton Arapov
18816c58d0e4SOleg Nesterov ri = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
18820dfd0eb8SAnton Arapov if (!ri)
18836c58d0e4SOleg Nesterov return;
18840dfd0eb8SAnton Arapov
18850dfd0eb8SAnton Arapov trampoline_vaddr = get_trampoline_vaddr();
18860dfd0eb8SAnton Arapov orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs);
18870dfd0eb8SAnton Arapov if (orig_ret_vaddr == -1)
18880dfd0eb8SAnton Arapov goto fail;
18890dfd0eb8SAnton Arapov
1890a5b7e1a8SOleg Nesterov /* drop the entries invalidated by longjmp() */
1891db087ef6SOleg Nesterov chained = (orig_ret_vaddr == trampoline_vaddr);
1892db087ef6SOleg Nesterov cleanup_return_instances(utask, chained, regs);
1893a5b7e1a8SOleg Nesterov
18940dfd0eb8SAnton Arapov /*
18950dfd0eb8SAnton Arapov * We don't want to keep trampoline address in stack, rather keep the
18960dfd0eb8SAnton Arapov * original return address of first caller thru all the consequent
18970dfd0eb8SAnton Arapov * instances. This also makes breakpoint unwrapping easier.
18980dfd0eb8SAnton Arapov */
1899db087ef6SOleg Nesterov if (chained) {
19000dfd0eb8SAnton Arapov if (!utask->return_instances) {
19010dfd0eb8SAnton Arapov /*
19020dfd0eb8SAnton Arapov * This situation is not possible. Likely we have an
19030dfd0eb8SAnton Arapov * attack from user-space.
19040dfd0eb8SAnton Arapov */
19056c58d0e4SOleg Nesterov uprobe_warn(current, "handle tail call");
19060dfd0eb8SAnton Arapov goto fail;
19070dfd0eb8SAnton Arapov }
19080dfd0eb8SAnton Arapov orig_ret_vaddr = utask->return_instances->orig_ret_vaddr;
19090dfd0eb8SAnton Arapov }
19100dfd0eb8SAnton Arapov
1911f231722aSOleg Nesterov ri->uprobe = get_uprobe(uprobe);
19120dfd0eb8SAnton Arapov ri->func = instruction_pointer(regs);
19137b868e48SOleg Nesterov ri->stack = user_stack_pointer(regs);
19140dfd0eb8SAnton Arapov ri->orig_ret_vaddr = orig_ret_vaddr;
19150dfd0eb8SAnton Arapov ri->chained = chained;
19160dfd0eb8SAnton Arapov
1917ded49c55SAnton Arapov utask->depth++;
19180dfd0eb8SAnton Arapov ri->next = utask->return_instances;
19190dfd0eb8SAnton Arapov utask->return_instances = ri;
19200dfd0eb8SAnton Arapov
19210dfd0eb8SAnton Arapov return;
19220dfd0eb8SAnton Arapov fail:
19230dfd0eb8SAnton Arapov kfree(ri);
19240dfd0eb8SAnton Arapov }
19250dfd0eb8SAnton Arapov
19260326f5a9SSrikar Dronamraju /* Prepare to single-step probed instruction out of line. */
19270326f5a9SSrikar Dronamraju static int
pre_ssout(struct uprobe * uprobe,struct pt_regs * regs,unsigned long bp_vaddr)1928a6cb3f6dSOleg Nesterov pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr)
19290326f5a9SSrikar Dronamraju {
1930a6cb3f6dSOleg Nesterov struct uprobe_task *utask;
1931a6cb3f6dSOleg Nesterov unsigned long xol_vaddr;
1932aba51024SOleg Nesterov int err;
1933d4b3b638SSrikar Dronamraju
1934608e7427SOleg Nesterov utask = get_utask();
1935608e7427SOleg Nesterov if (!utask)
1936608e7427SOleg Nesterov return -ENOMEM;
1937a6cb3f6dSOleg Nesterov
1938a6cb3f6dSOleg Nesterov xol_vaddr = xol_get_insn_slot(uprobe);
1939a6cb3f6dSOleg Nesterov if (!xol_vaddr)
1940a6cb3f6dSOleg Nesterov return -ENOMEM;
1941a6cb3f6dSOleg Nesterov
1942a6cb3f6dSOleg Nesterov utask->xol_vaddr = xol_vaddr;
1943a6cb3f6dSOleg Nesterov utask->vaddr = bp_vaddr;
1944a6cb3f6dSOleg Nesterov
1945aba51024SOleg Nesterov err = arch_uprobe_pre_xol(&uprobe->arch, regs);
1946aba51024SOleg Nesterov if (unlikely(err)) {
1947aba51024SOleg Nesterov xol_free_insn_slot(current);
1948aba51024SOleg Nesterov return err;
1949aba51024SOleg Nesterov }
1950aba51024SOleg Nesterov
1951608e7427SOleg Nesterov utask->active_uprobe = uprobe;
1952608e7427SOleg Nesterov utask->state = UTASK_SSTEP;
1953aba51024SOleg Nesterov return 0;
19540326f5a9SSrikar Dronamraju }
19550326f5a9SSrikar Dronamraju
19560326f5a9SSrikar Dronamraju /*
19570326f5a9SSrikar Dronamraju * If we are singlestepping, then ensure this thread is not connected to
19580326f5a9SSrikar Dronamraju * non-fatal signals until completion of singlestep. When xol insn itself
19590326f5a9SSrikar Dronamraju * triggers the signal, restart the original insn even if the task is
19600326f5a9SSrikar Dronamraju * already SIGKILL'ed (since coredump should report the correct ip). This
19610326f5a9SSrikar Dronamraju * is even more important if the task has a handler for SIGSEGV/etc, The
19620326f5a9SSrikar Dronamraju * _same_ instruction should be repeated again after return from the signal
19630326f5a9SSrikar Dronamraju * handler, and SSTEP can never finish in this case.
19640326f5a9SSrikar Dronamraju */
uprobe_deny_signal(void)19650326f5a9SSrikar Dronamraju bool uprobe_deny_signal(void)
19660326f5a9SSrikar Dronamraju {
19670326f5a9SSrikar Dronamraju struct task_struct *t = current;
19680326f5a9SSrikar Dronamraju struct uprobe_task *utask = t->utask;
19690326f5a9SSrikar Dronamraju
19700326f5a9SSrikar Dronamraju if (likely(!utask || !utask->active_uprobe))
19710326f5a9SSrikar Dronamraju return false;
19720326f5a9SSrikar Dronamraju
19730326f5a9SSrikar Dronamraju WARN_ON_ONCE(utask->state != UTASK_SSTEP);
19740326f5a9SSrikar Dronamraju
19755c251e9dSJens Axboe if (task_sigpending(t)) {
19760326f5a9SSrikar Dronamraju spin_lock_irq(&t->sighand->siglock);
19770326f5a9SSrikar Dronamraju clear_tsk_thread_flag(t, TIF_SIGPENDING);
19780326f5a9SSrikar Dronamraju spin_unlock_irq(&t->sighand->siglock);
19790326f5a9SSrikar Dronamraju
19800326f5a9SSrikar Dronamraju if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) {
19810326f5a9SSrikar Dronamraju utask->state = UTASK_SSTEP_TRAPPED;
19820326f5a9SSrikar Dronamraju set_tsk_thread_flag(t, TIF_UPROBE);
19830326f5a9SSrikar Dronamraju }
19840326f5a9SSrikar Dronamraju }
19850326f5a9SSrikar Dronamraju
19860326f5a9SSrikar Dronamraju return true;
19870326f5a9SSrikar Dronamraju }
19880326f5a9SSrikar Dronamraju
mmf_recalc_uprobes(struct mm_struct * mm)1989499a4f3eSOleg Nesterov static void mmf_recalc_uprobes(struct mm_struct *mm)
1990499a4f3eSOleg Nesterov {
1991fcb72a58SMatthew Wilcox (Oracle) VMA_ITERATOR(vmi, mm, 0);
1992499a4f3eSOleg Nesterov struct vm_area_struct *vma;
1993499a4f3eSOleg Nesterov
1994fcb72a58SMatthew Wilcox (Oracle) for_each_vma(vmi, vma) {
1995499a4f3eSOleg Nesterov if (!valid_vma(vma, false))
1996499a4f3eSOleg Nesterov continue;
1997499a4f3eSOleg Nesterov /*
1998499a4f3eSOleg Nesterov * This is not strictly accurate, we can race with
1999499a4f3eSOleg Nesterov * uprobe_unregister() and see the already removed
2000499a4f3eSOleg Nesterov * uprobe if delete_uprobe() was not yet called.
200163633cbfSOleg Nesterov * Or this uprobe can be filtered out.
2002499a4f3eSOleg Nesterov */
2003499a4f3eSOleg Nesterov if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end))
2004499a4f3eSOleg Nesterov return;
2005499a4f3eSOleg Nesterov }
2006499a4f3eSOleg Nesterov
2007499a4f3eSOleg Nesterov clear_bit(MMF_HAS_UPROBES, &mm->flags);
2008499a4f3eSOleg Nesterov }
2009499a4f3eSOleg Nesterov
is_trap_at_addr(struct mm_struct * mm,unsigned long vaddr)20100908ad6eSAnanth N Mavinakayanahalli static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
2011ec75fba9SOleg Nesterov {
2012ec75fba9SOleg Nesterov struct page *page;
2013ec75fba9SOleg Nesterov uprobe_opcode_t opcode;
2014ec75fba9SOleg Nesterov int result;
2015ec75fba9SOleg Nesterov
2016013b2debSOleg Nesterov if (WARN_ON_ONCE(!IS_ALIGNED(vaddr, UPROBE_SWBP_INSN_SIZE)))
2017013b2debSOleg Nesterov return -EINVAL;
2018013b2debSOleg Nesterov
2019ec75fba9SOleg Nesterov pagefault_disable();
2020bd28b145SLinus Torvalds result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr);
2021ec75fba9SOleg Nesterov pagefault_enable();
2022ec75fba9SOleg Nesterov
2023ec75fba9SOleg Nesterov if (likely(result == 0))
2024ec75fba9SOleg Nesterov goto out;
2025ec75fba9SOleg Nesterov
20261e987790SDave Hansen /*
20271e987790SDave Hansen * The NULL 'tsk' here ensures that any faults that occur here
20281e987790SDave Hansen * will not be accounted to the task. 'mm' *is* current->mm,
20291e987790SDave Hansen * but we treat this as a 'remote' access since it is
20301e987790SDave Hansen * essentially a kernel access to the memory.
20311e987790SDave Hansen */
2032ca5e8632SLorenzo Stoakes result = get_user_pages_remote(mm, vaddr, 1, FOLL_FORCE, &page, NULL);
2033ec75fba9SOleg Nesterov if (result < 0)
2034ec75fba9SOleg Nesterov return result;
2035ec75fba9SOleg Nesterov
2036ab0d805cSOleg Nesterov copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
2037ec75fba9SOleg Nesterov put_page(page);
2038ec75fba9SOleg Nesterov out:
20390908ad6eSAnanth N Mavinakayanahalli /* This needs to return true for any variant of the trap insn */
20400908ad6eSAnanth N Mavinakayanahalli return is_trap_insn(&opcode);
2041ec75fba9SOleg Nesterov }
2042ec75fba9SOleg Nesterov
find_active_uprobe(unsigned long bp_vaddr,int * is_swbp)2043d790d346SOleg Nesterov static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
20440326f5a9SSrikar Dronamraju {
20453a9ea052SOleg Nesterov struct mm_struct *mm = current->mm;
20463a9ea052SOleg Nesterov struct uprobe *uprobe = NULL;
20470326f5a9SSrikar Dronamraju struct vm_area_struct *vma;
20480326f5a9SSrikar Dronamraju
2049d8ed45c5SMichel Lespinasse mmap_read_lock(mm);
20509016ddedSLiam Howlett vma = vma_lookup(mm, bp_vaddr);
20519016ddedSLiam Howlett if (vma) {
20523a9ea052SOleg Nesterov if (valid_vma(vma, false)) {
2053f281769eSOleg Nesterov struct inode *inode = file_inode(vma->vm_file);
2054cb113b47SOleg Nesterov loff_t offset = vaddr_to_offset(vma, bp_vaddr);
20550326f5a9SSrikar Dronamraju
20560326f5a9SSrikar Dronamraju uprobe = find_uprobe(inode, offset);
20570326f5a9SSrikar Dronamraju }
2058d790d346SOleg Nesterov
2059d790d346SOleg Nesterov if (!uprobe)
20600908ad6eSAnanth N Mavinakayanahalli *is_swbp = is_trap_at_addr(mm, bp_vaddr);
2061d790d346SOleg Nesterov } else {
2062d790d346SOleg Nesterov *is_swbp = -EFAULT;
20633a9ea052SOleg Nesterov }
2064499a4f3eSOleg Nesterov
2065499a4f3eSOleg Nesterov if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags))
2066499a4f3eSOleg Nesterov mmf_recalc_uprobes(mm);
2067d8ed45c5SMichel Lespinasse mmap_read_unlock(mm);
20680326f5a9SSrikar Dronamraju
20693a9ea052SOleg Nesterov return uprobe;
20703a9ea052SOleg Nesterov }
20713a9ea052SOleg Nesterov
handler_chain(struct uprobe * uprobe,struct pt_regs * regs)2072da1816b1SOleg Nesterov static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
2073da1816b1SOleg Nesterov {
2074da1816b1SOleg Nesterov struct uprobe_consumer *uc;
2075da1816b1SOleg Nesterov int remove = UPROBE_HANDLER_REMOVE;
20760dfd0eb8SAnton Arapov bool need_prep = false; /* prepare return uprobe, when needed */
2077da1816b1SOleg Nesterov
2078da1816b1SOleg Nesterov down_read(&uprobe->register_rwsem);
2079adf290feSAndrii Nakryiko current->utask->auprobe = &uprobe->arch;
2080da1816b1SOleg Nesterov for (uc = uprobe->consumers; uc; uc = uc->next) {
2081ea024870SAnton Arapov int rc = 0;
2082da1816b1SOleg Nesterov
2083ea024870SAnton Arapov if (uc->handler) {
2084ea024870SAnton Arapov rc = uc->handler(uc, regs);
2085da1816b1SOleg Nesterov WARN(rc & ~UPROBE_HANDLER_MASK,
2086d75f773cSSakari Ailus "bad rc=0x%x from %ps()\n", rc, uc->handler);
2087ea024870SAnton Arapov }
20880dfd0eb8SAnton Arapov
20890dfd0eb8SAnton Arapov if (uc->ret_handler)
20900dfd0eb8SAnton Arapov need_prep = true;
20910dfd0eb8SAnton Arapov
2092da1816b1SOleg Nesterov remove &= rc;
2093da1816b1SOleg Nesterov }
2094adf290feSAndrii Nakryiko current->utask->auprobe = NULL;
2095da1816b1SOleg Nesterov
20960dfd0eb8SAnton Arapov if (need_prep && !remove)
20970dfd0eb8SAnton Arapov prepare_uretprobe(uprobe, regs); /* put bp at return */
20980dfd0eb8SAnton Arapov
2099da1816b1SOleg Nesterov if (remove && uprobe->consumers) {
2100da1816b1SOleg Nesterov WARN_ON(!uprobe_is_active(uprobe));
2101da1816b1SOleg Nesterov unapply_uprobe(uprobe, current->mm);
2102da1816b1SOleg Nesterov }
2103da1816b1SOleg Nesterov up_read(&uprobe->register_rwsem);
2104da1816b1SOleg Nesterov }
2105da1816b1SOleg Nesterov
2106fec8898dSAnton Arapov static void
handle_uretprobe_chain(struct return_instance * ri,struct pt_regs * regs)2107fec8898dSAnton Arapov handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs)
2108fec8898dSAnton Arapov {
2109fec8898dSAnton Arapov struct uprobe *uprobe = ri->uprobe;
2110fec8898dSAnton Arapov struct uprobe_consumer *uc;
2111fec8898dSAnton Arapov
2112fec8898dSAnton Arapov down_read(&uprobe->register_rwsem);
2113fec8898dSAnton Arapov for (uc = uprobe->consumers; uc; uc = uc->next) {
2114fec8898dSAnton Arapov if (uc->ret_handler)
2115fec8898dSAnton Arapov uc->ret_handler(uc, ri->func, regs);
2116fec8898dSAnton Arapov }
2117fec8898dSAnton Arapov up_read(&uprobe->register_rwsem);
2118fec8898dSAnton Arapov }
2119fec8898dSAnton Arapov
find_next_ret_chain(struct return_instance * ri)2120a83cfeb9SOleg Nesterov static struct return_instance *find_next_ret_chain(struct return_instance *ri)
2121a83cfeb9SOleg Nesterov {
2122a83cfeb9SOleg Nesterov bool chained;
2123a83cfeb9SOleg Nesterov
2124a83cfeb9SOleg Nesterov do {
2125a83cfeb9SOleg Nesterov chained = ri->chained;
2126a83cfeb9SOleg Nesterov ri = ri->next; /* can't be NULL if chained */
2127a83cfeb9SOleg Nesterov } while (chained);
2128a83cfeb9SOleg Nesterov
2129a83cfeb9SOleg Nesterov return ri;
2130a83cfeb9SOleg Nesterov }
2131a83cfeb9SOleg Nesterov
handle_trampoline(struct pt_regs * regs)21320b5256c7SOleg Nesterov static void handle_trampoline(struct pt_regs *regs)
2133fec8898dSAnton Arapov {
2134fec8898dSAnton Arapov struct uprobe_task *utask;
2135a83cfeb9SOleg Nesterov struct return_instance *ri, *next;
21365eeb50deSOleg Nesterov bool valid;
2137fec8898dSAnton Arapov
2138fec8898dSAnton Arapov utask = current->utask;
2139fec8898dSAnton Arapov if (!utask)
21400b5256c7SOleg Nesterov goto sigill;
2141fec8898dSAnton Arapov
2142fec8898dSAnton Arapov ri = utask->return_instances;
2143fec8898dSAnton Arapov if (!ri)
21440b5256c7SOleg Nesterov goto sigill;
2145fec8898dSAnton Arapov
21465eeb50deSOleg Nesterov do {
2147fec8898dSAnton Arapov /*
21485eeb50deSOleg Nesterov * We should throw out the frames invalidated by longjmp().
21495eeb50deSOleg Nesterov * If this chain is valid, then the next one should be alive
21505eeb50deSOleg Nesterov * or NULL; the latter case means that nobody but ri->func
21515eeb50deSOleg Nesterov * could hit this trampoline on return. TODO: sigaltstack().
2152fec8898dSAnton Arapov */
21535eeb50deSOleg Nesterov next = find_next_ret_chain(ri);
215486dcb702SOleg Nesterov valid = !next || arch_uretprobe_is_alive(next, RP_CHECK_RET, regs);
21555eeb50deSOleg Nesterov
2156fec8898dSAnton Arapov instruction_pointer_set(regs, ri->orig_ret_vaddr);
2157a83cfeb9SOleg Nesterov do {
21585eeb50deSOleg Nesterov if (valid)
2159fec8898dSAnton Arapov handle_uretprobe_chain(ri, regs);
21602bb5e840SOleg Nesterov ri = free_ret_instance(ri);
2161878b5a6eSOleg Nesterov utask->depth--;
2162a83cfeb9SOleg Nesterov } while (ri != next);
21635eeb50deSOleg Nesterov } while (!valid);
2164fec8898dSAnton Arapov
2165fec8898dSAnton Arapov utask->return_instances = ri;
21660b5256c7SOleg Nesterov return;
2167fec8898dSAnton Arapov
21680b5256c7SOleg Nesterov sigill:
21690b5256c7SOleg Nesterov uprobe_warn(current, "handle uretprobe, sending SIGILL.");
21703cf5d076SEric W. Biederman force_sig(SIGILL);
21710b5256c7SOleg Nesterov
2172fec8898dSAnton Arapov }
2173fec8898dSAnton Arapov
arch_uprobe_ignore(struct arch_uprobe * aup,struct pt_regs * regs)21746fe50a28SDavid A. Long bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs)
21756fe50a28SDavid A. Long {
21766fe50a28SDavid A. Long return false;
21776fe50a28SDavid A. Long }
21786fe50a28SDavid A. Long
arch_uretprobe_is_alive(struct return_instance * ret,enum rp_check ctx,struct pt_regs * regs)217986dcb702SOleg Nesterov bool __weak arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
218086dcb702SOleg Nesterov struct pt_regs *regs)
218197da8976SOleg Nesterov {
218297da8976SOleg Nesterov return true;
218397da8976SOleg Nesterov }
218497da8976SOleg Nesterov
21853a9ea052SOleg Nesterov /*
21863a9ea052SOleg Nesterov * Run handler and ask thread to singlestep.
21873a9ea052SOleg Nesterov * Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
21883a9ea052SOleg Nesterov */
handle_swbp(struct pt_regs * regs)21893a9ea052SOleg Nesterov static void handle_swbp(struct pt_regs *regs)
21903a9ea052SOleg Nesterov {
21913a9ea052SOleg Nesterov struct uprobe *uprobe;
21923a9ea052SOleg Nesterov unsigned long bp_vaddr;
21933f649ab7SKees Cook int is_swbp;
21943a9ea052SOleg Nesterov
21953a9ea052SOleg Nesterov bp_vaddr = uprobe_get_swbp_addr(regs);
21960b5256c7SOleg Nesterov if (bp_vaddr == get_trampoline_vaddr())
21970b5256c7SOleg Nesterov return handle_trampoline(regs);
2198fec8898dSAnton Arapov
2199fec8898dSAnton Arapov uprobe = find_active_uprobe(bp_vaddr, &is_swbp);
22000326f5a9SSrikar Dronamraju if (!uprobe) {
220156bb4cf6SOleg Nesterov if (is_swbp > 0) {
22020326f5a9SSrikar Dronamraju /* No matching uprobe; signal SIGTRAP. */
2203fe5ed7abSOleg Nesterov force_sig(SIGTRAP);
220456bb4cf6SOleg Nesterov } else {
220556bb4cf6SOleg Nesterov /*
220656bb4cf6SOleg Nesterov * Either we raced with uprobe_unregister() or we can't
220756bb4cf6SOleg Nesterov * access this memory. The latter is only possible if
220856bb4cf6SOleg Nesterov * another thread plays with our ->mm. In both cases
220956bb4cf6SOleg Nesterov * we can simply restart. If this vma was unmapped we
221056bb4cf6SOleg Nesterov * can pretend this insn was not executed yet and get
221156bb4cf6SOleg Nesterov * the (correct) SIGSEGV after restart.
221256bb4cf6SOleg Nesterov */
221356bb4cf6SOleg Nesterov instruction_pointer_set(regs, bp_vaddr);
221456bb4cf6SOleg Nesterov }
22150326f5a9SSrikar Dronamraju return;
22160326f5a9SSrikar Dronamraju }
221774e59dfcSOleg Nesterov
221874e59dfcSOleg Nesterov /* change it in advance for ->handler() and restart */
221974e59dfcSOleg Nesterov instruction_pointer_set(regs, bp_vaddr);
222074e59dfcSOleg Nesterov
2221142b18ddSOleg Nesterov /*
2222142b18ddSOleg Nesterov * TODO: move copy_insn/etc into _register and remove this hack.
2223142b18ddSOleg Nesterov * After we hit the bp, _unregister + _register can install the
2224142b18ddSOleg Nesterov * new and not-yet-analyzed uprobe at the same address, restart.
2225142b18ddSOleg Nesterov */
222671434f2fSOleg Nesterov if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags)))
222774e59dfcSOleg Nesterov goto out;
22280326f5a9SSrikar Dronamraju
222909d3f015SAndrea Parri /*
223009d3f015SAndrea Parri * Pairs with the smp_wmb() in prepare_uprobe().
223109d3f015SAndrea Parri *
223209d3f015SAndrea Parri * Guarantees that if we see the UPROBE_COPY_INSN bit set, then
223309d3f015SAndrea Parri * we must also see the stores to &uprobe->arch performed by the
223409d3f015SAndrea Parri * prepare_uprobe() call.
223509d3f015SAndrea Parri */
223609d3f015SAndrea Parri smp_rmb();
223709d3f015SAndrea Parri
223872fd293aSOleg Nesterov /* Tracing handlers use ->utask to communicate with fetch methods */
223972fd293aSOleg Nesterov if (!get_utask())
224072fd293aSOleg Nesterov goto out;
224172fd293aSOleg Nesterov
22426fe50a28SDavid A. Long if (arch_uprobe_ignore(&uprobe->arch, regs))
22436fe50a28SDavid A. Long goto out;
22446fe50a28SDavid A. Long
22450326f5a9SSrikar Dronamraju handler_chain(uprobe, regs);
22466fe50a28SDavid A. Long
22478a6b1732SOleg Nesterov if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
22480578a970SOleg Nesterov goto out;
22490326f5a9SSrikar Dronamraju
2250608e7427SOleg Nesterov if (!pre_ssout(uprobe, regs, bp_vaddr))
22510326f5a9SSrikar Dronamraju return;
22520326f5a9SSrikar Dronamraju
22538a6b1732SOleg Nesterov /* arch_uprobe_skip_sstep() succeeded, or restart if can't singlestep */
22540578a970SOleg Nesterov out:
22550326f5a9SSrikar Dronamraju put_uprobe(uprobe);
22560326f5a9SSrikar Dronamraju }
22570326f5a9SSrikar Dronamraju
22580326f5a9SSrikar Dronamraju /*
22590326f5a9SSrikar Dronamraju * Perform required fix-ups and disable singlestep.
22600326f5a9SSrikar Dronamraju * Allow pending signals to take effect.
22610326f5a9SSrikar Dronamraju */
handle_singlestep(struct uprobe_task * utask,struct pt_regs * regs)22620326f5a9SSrikar Dronamraju static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
22630326f5a9SSrikar Dronamraju {
22640326f5a9SSrikar Dronamraju struct uprobe *uprobe;
2265014940baSOleg Nesterov int err = 0;
22660326f5a9SSrikar Dronamraju
22670326f5a9SSrikar Dronamraju uprobe = utask->active_uprobe;
22680326f5a9SSrikar Dronamraju if (utask->state == UTASK_SSTEP_ACK)
2269014940baSOleg Nesterov err = arch_uprobe_post_xol(&uprobe->arch, regs);
22700326f5a9SSrikar Dronamraju else if (utask->state == UTASK_SSTEP_TRAPPED)
22710326f5a9SSrikar Dronamraju arch_uprobe_abort_xol(&uprobe->arch, regs);
22720326f5a9SSrikar Dronamraju else
22730326f5a9SSrikar Dronamraju WARN_ON_ONCE(1);
22740326f5a9SSrikar Dronamraju
22750326f5a9SSrikar Dronamraju put_uprobe(uprobe);
22760326f5a9SSrikar Dronamraju utask->active_uprobe = NULL;
22770326f5a9SSrikar Dronamraju utask->state = UTASK_RUNNING;
2278d4b3b638SSrikar Dronamraju xol_free_insn_slot(current);
22790326f5a9SSrikar Dronamraju
22800326f5a9SSrikar Dronamraju spin_lock_irq(¤t->sighand->siglock);
22810326f5a9SSrikar Dronamraju recalc_sigpending(); /* see uprobe_deny_signal() */
22820326f5a9SSrikar Dronamraju spin_unlock_irq(¤t->sighand->siglock);
2283014940baSOleg Nesterov
2284014940baSOleg Nesterov if (unlikely(err)) {
2285014940baSOleg Nesterov uprobe_warn(current, "execute the probed insn, sending SIGILL.");
22863cf5d076SEric W. Biederman force_sig(SIGILL);
2287014940baSOleg Nesterov }
22880326f5a9SSrikar Dronamraju }
22890326f5a9SSrikar Dronamraju
22900326f5a9SSrikar Dronamraju /*
22911b08e907SOleg Nesterov * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and
22921b08e907SOleg Nesterov * allows the thread to return from interrupt. After that handle_swbp()
22931b08e907SOleg Nesterov * sets utask->active_uprobe.
22940326f5a9SSrikar Dronamraju *
22951b08e907SOleg Nesterov * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag
22961b08e907SOleg Nesterov * and allows the thread to return from interrupt.
22970326f5a9SSrikar Dronamraju *
22980326f5a9SSrikar Dronamraju * While returning to userspace, thread notices the TIF_UPROBE flag and calls
22990326f5a9SSrikar Dronamraju * uprobe_notify_resume().
23000326f5a9SSrikar Dronamraju */
uprobe_notify_resume(struct pt_regs * regs)23010326f5a9SSrikar Dronamraju void uprobe_notify_resume(struct pt_regs *regs)
23020326f5a9SSrikar Dronamraju {
23030326f5a9SSrikar Dronamraju struct uprobe_task *utask;
23040326f5a9SSrikar Dronamraju
2305db023ea5SOleg Nesterov clear_thread_flag(TIF_UPROBE);
2306db023ea5SOleg Nesterov
23070326f5a9SSrikar Dronamraju utask = current->utask;
23081b08e907SOleg Nesterov if (utask && utask->active_uprobe)
23090326f5a9SSrikar Dronamraju handle_singlestep(utask, regs);
23101b08e907SOleg Nesterov else
23111b08e907SOleg Nesterov handle_swbp(regs);
23120326f5a9SSrikar Dronamraju }
23130326f5a9SSrikar Dronamraju
23140326f5a9SSrikar Dronamraju /*
23150326f5a9SSrikar Dronamraju * uprobe_pre_sstep_notifier gets called from interrupt context as part of
23160326f5a9SSrikar Dronamraju * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit.
23170326f5a9SSrikar Dronamraju */
uprobe_pre_sstep_notifier(struct pt_regs * regs)23180326f5a9SSrikar Dronamraju int uprobe_pre_sstep_notifier(struct pt_regs *regs)
23190326f5a9SSrikar Dronamraju {
23200dfd0eb8SAnton Arapov if (!current->mm)
23210dfd0eb8SAnton Arapov return 0;
23220dfd0eb8SAnton Arapov
23230dfd0eb8SAnton Arapov if (!test_bit(MMF_HAS_UPROBES, ¤t->mm->flags) &&
23240dfd0eb8SAnton Arapov (!current->utask || !current->utask->return_instances))
23250326f5a9SSrikar Dronamraju return 0;
23260326f5a9SSrikar Dronamraju
23270326f5a9SSrikar Dronamraju set_thread_flag(TIF_UPROBE);
23280326f5a9SSrikar Dronamraju return 1;
23290326f5a9SSrikar Dronamraju }
23300326f5a9SSrikar Dronamraju
23310326f5a9SSrikar Dronamraju /*
23320326f5a9SSrikar Dronamraju * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier
23330326f5a9SSrikar Dronamraju * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep.
23340326f5a9SSrikar Dronamraju */
uprobe_post_sstep_notifier(struct pt_regs * regs)23350326f5a9SSrikar Dronamraju int uprobe_post_sstep_notifier(struct pt_regs *regs)
23360326f5a9SSrikar Dronamraju {
23370326f5a9SSrikar Dronamraju struct uprobe_task *utask = current->utask;
23380326f5a9SSrikar Dronamraju
23390326f5a9SSrikar Dronamraju if (!current->mm || !utask || !utask->active_uprobe)
23400326f5a9SSrikar Dronamraju /* task is currently not uprobed */
23410326f5a9SSrikar Dronamraju return 0;
23420326f5a9SSrikar Dronamraju
23430326f5a9SSrikar Dronamraju utask->state = UTASK_SSTEP_ACK;
23440326f5a9SSrikar Dronamraju set_thread_flag(TIF_UPROBE);
23450326f5a9SSrikar Dronamraju return 1;
23460326f5a9SSrikar Dronamraju }
23470326f5a9SSrikar Dronamraju
23480326f5a9SSrikar Dronamraju static struct notifier_block uprobe_exception_nb = {
23490326f5a9SSrikar Dronamraju .notifier_call = arch_uprobe_exception_notify,
23500326f5a9SSrikar Dronamraju .priority = INT_MAX-1, /* notified after kprobes, kgdb */
23510326f5a9SSrikar Dronamraju };
23520326f5a9SSrikar Dronamraju
uprobes_init(void)2353aad42dd4SNadav Amit void __init uprobes_init(void)
2354a5f4374aSIngo Molnar {
2355a5f4374aSIngo Molnar int i;
2356a5f4374aSIngo Molnar
235766d06dffSOleg Nesterov for (i = 0; i < UPROBES_HASH_SZ; i++)
2358a5f4374aSIngo Molnar mutex_init(&uprobes_mmap_mutex[i]);
23590326f5a9SSrikar Dronamraju
2360aad42dd4SNadav Amit BUG_ON(register_die_notifier(&uprobe_exception_nb));
2361a5f4374aSIngo Molnar }
2362