xref: /openbmc/linux/kernel/events/uprobes.c (revision 7d4a8be0)
1720e596aSThomas Gleixner // SPDX-License-Identifier: GPL-2.0+
2a5f4374aSIngo Molnar /*
3a5f4374aSIngo Molnar  * User-space Probes (UProbes)
4a5f4374aSIngo Molnar  *
535aa621bSIngo Molnar  * Copyright (C) IBM Corporation, 2008-2012
6a5f4374aSIngo Molnar  * Authors:
7a5f4374aSIngo Molnar  *	Srikar Dronamraju
8a5f4374aSIngo Molnar  *	Jim Keniston
990eec103SPeter Zijlstra  * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
10a5f4374aSIngo Molnar  */
11a5f4374aSIngo Molnar 
12a5f4374aSIngo Molnar #include <linux/kernel.h>
13a5f4374aSIngo Molnar #include <linux/highmem.h>
14a5f4374aSIngo Molnar #include <linux/pagemap.h>	/* read_mapping_page */
15a5f4374aSIngo Molnar #include <linux/slab.h>
16a5f4374aSIngo Molnar #include <linux/sched.h>
176e84f315SIngo Molnar #include <linux/sched/mm.h>
18f7ccbae4SIngo Molnar #include <linux/sched/coredump.h>
19e8440c14SJosh Stone #include <linux/export.h>
20a5f4374aSIngo Molnar #include <linux/rmap.h>		/* anon_vma_prepare */
21a5f4374aSIngo Molnar #include <linux/mmu_notifier.h>	/* set_pte_at_notify */
225fcd079aSMatthew Wilcox (Oracle) #include <linux/swap.h>		/* folio_free_swap */
230326f5a9SSrikar Dronamraju #include <linux/ptrace.h>	/* user_enable_single_step */
240326f5a9SSrikar Dronamraju #include <linux/kdebug.h>	/* notifier mechanism */
25194f8dcbSOleg Nesterov #include "../../mm/internal.h"	/* munlock_vma_page */
2632cdba1eSOleg Nesterov #include <linux/percpu-rwsem.h>
27aa59c53fSOleg Nesterov #include <linux/task_work.h>
2840814f68SOleg Nesterov #include <linux/shmem_fs.h>
29f385cb85SSong Liu #include <linux/khugepaged.h>
30a5f4374aSIngo Molnar 
31a5f4374aSIngo Molnar #include <linux/uprobes.h>
32a5f4374aSIngo Molnar 
33d4b3b638SSrikar Dronamraju #define UINSNS_PER_PAGE			(PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
34d4b3b638SSrikar Dronamraju #define MAX_UPROBE_XOL_SLOTS		UINSNS_PER_PAGE
35d4b3b638SSrikar Dronamraju 
36a5f4374aSIngo Molnar static struct rb_root uprobes_tree = RB_ROOT;
37441f1eb7SOleg Nesterov /*
38441f1eb7SOleg Nesterov  * allows us to skip the uprobe_mmap if there are no uprobe events active
39441f1eb7SOleg Nesterov  * at this time.  Probably a fine grained per inode count is better?
40441f1eb7SOleg Nesterov  */
41441f1eb7SOleg Nesterov #define no_uprobe_events()	RB_EMPTY_ROOT(&uprobes_tree)
42a5f4374aSIngo Molnar 
43a5f4374aSIngo Molnar static DEFINE_SPINLOCK(uprobes_treelock);	/* serialize rbtree access */
44a5f4374aSIngo Molnar 
45a5f4374aSIngo Molnar #define UPROBES_HASH_SZ	13
46a5f4374aSIngo Molnar /* serialize uprobe->pending_list */
47a5f4374aSIngo Molnar static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
48a5f4374aSIngo Molnar #define uprobes_mmap_hash(v)	(&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
49a5f4374aSIngo Molnar 
502bf1acc2SOleg Nesterov DEFINE_STATIC_PERCPU_RWSEM(dup_mmap_sem);
5132cdba1eSOleg Nesterov 
52cb9a19feSOleg Nesterov /* Have a copy of original instruction */
5371434f2fSOleg Nesterov #define UPROBE_COPY_INSN	0
54cb9a19feSOleg Nesterov 
553ff54efdSSrikar Dronamraju struct uprobe {
563ff54efdSSrikar Dronamraju 	struct rb_node		rb_node;	/* node in the rb tree */
57ce59b8e9SElena Reshetova 	refcount_t		ref;
58e591c8d7SOleg Nesterov 	struct rw_semaphore	register_rwsem;
593ff54efdSSrikar Dronamraju 	struct rw_semaphore	consumer_rwsem;
603ff54efdSSrikar Dronamraju 	struct list_head	pending_list;
613ff54efdSSrikar Dronamraju 	struct uprobe_consumer	*consumers;
623ff54efdSSrikar Dronamraju 	struct inode		*inode;		/* Also hold a ref to inode */
633ff54efdSSrikar Dronamraju 	loff_t			offset;
641cc33161SRavi Bangoria 	loff_t			ref_ctr_offset;
6571434f2fSOleg Nesterov 	unsigned long		flags;
66ad439356SOleg Nesterov 
67ad439356SOleg Nesterov 	/*
68ad439356SOleg Nesterov 	 * The generic code assumes that it has two members of unknown type
69ad439356SOleg Nesterov 	 * owned by the arch-specific code:
70ad439356SOleg Nesterov 	 *
71ad439356SOleg Nesterov 	 * 	insn -	copy_insn() saves the original instruction here for
72ad439356SOleg Nesterov 	 *		arch_uprobe_analyze_insn().
73ad439356SOleg Nesterov 	 *
74ad439356SOleg Nesterov 	 *	ixol -	potentially modified instruction to execute out of
75ad439356SOleg Nesterov 	 *		line, copied to xol_area by xol_get_insn_slot().
76ad439356SOleg Nesterov 	 */
773ff54efdSSrikar Dronamraju 	struct arch_uprobe	arch;
783ff54efdSSrikar Dronamraju };
793ff54efdSSrikar Dronamraju 
801cc33161SRavi Bangoria struct delayed_uprobe {
811cc33161SRavi Bangoria 	struct list_head list;
821cc33161SRavi Bangoria 	struct uprobe *uprobe;
831cc33161SRavi Bangoria 	struct mm_struct *mm;
841cc33161SRavi Bangoria };
851cc33161SRavi Bangoria 
861cc33161SRavi Bangoria static DEFINE_MUTEX(delayed_uprobe_lock);
871cc33161SRavi Bangoria static LIST_HEAD(delayed_uprobe_list);
881cc33161SRavi Bangoria 
89a5f4374aSIngo Molnar /*
90ad439356SOleg Nesterov  * Execute out of line area: anonymous executable mapping installed
91ad439356SOleg Nesterov  * by the probed task to execute the copy of the original instruction
92ad439356SOleg Nesterov  * mangled by set_swbp().
93ad439356SOleg Nesterov  *
94c912dae6SOleg Nesterov  * On a breakpoint hit, thread contests for a slot.  It frees the
95c912dae6SOleg Nesterov  * slot after singlestep. Currently a fixed number of slots are
96c912dae6SOleg Nesterov  * allocated.
97c912dae6SOleg Nesterov  */
98c912dae6SOleg Nesterov struct xol_area {
99c912dae6SOleg Nesterov 	wait_queue_head_t 		wq;		/* if all slots are busy */
100c912dae6SOleg Nesterov 	atomic_t 			slot_count;	/* number of in-use slots */
101c912dae6SOleg Nesterov 	unsigned long 			*bitmap;	/* 0 = free slot */
102c912dae6SOleg Nesterov 
103704bde3cSOleg Nesterov 	struct vm_special_mapping	xol_mapping;
104704bde3cSOleg Nesterov 	struct page 			*pages[2];
105c912dae6SOleg Nesterov 	/*
106c912dae6SOleg Nesterov 	 * We keep the vma's vm_start rather than a pointer to the vma
107c912dae6SOleg Nesterov 	 * itself.  The probed process or a naughty kernel module could make
108c912dae6SOleg Nesterov 	 * the vma go away, and we must handle that reasonably gracefully.
109c912dae6SOleg Nesterov 	 */
110c912dae6SOleg Nesterov 	unsigned long 			vaddr;		/* Page(s) of instruction slots */
111c912dae6SOleg Nesterov };
112c912dae6SOleg Nesterov 
113c912dae6SOleg Nesterov /*
114a5f4374aSIngo Molnar  * valid_vma: Verify if the specified vma is an executable vma
115a5f4374aSIngo Molnar  * Relax restrictions while unregistering: vm_flags might have
116a5f4374aSIngo Molnar  * changed after breakpoint was inserted.
117a5f4374aSIngo Molnar  *	- is_register: indicates if we are in register context.
118a5f4374aSIngo Molnar  *	- Return 1 if the specified virtual address is in an
119a5f4374aSIngo Molnar  *	  executable vma.
120a5f4374aSIngo Molnar  */
121a5f4374aSIngo Molnar static bool valid_vma(struct vm_area_struct *vma, bool is_register)
122a5f4374aSIngo Molnar {
12313f59c5eSOleg Nesterov 	vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE;
124a5f4374aSIngo Molnar 
125e40cfce6SOleg Nesterov 	if (is_register)
126e40cfce6SOleg Nesterov 		flags |= VM_WRITE;
127a5f4374aSIngo Molnar 
128e40cfce6SOleg Nesterov 	return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC;
129a5f4374aSIngo Molnar }
130a5f4374aSIngo Molnar 
13157683f72SOleg Nesterov static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset)
132a5f4374aSIngo Molnar {
13357683f72SOleg Nesterov 	return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
134a5f4374aSIngo Molnar }
135a5f4374aSIngo Molnar 
136cb113b47SOleg Nesterov static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
137cb113b47SOleg Nesterov {
138cb113b47SOleg Nesterov 	return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start);
139cb113b47SOleg Nesterov }
140cb113b47SOleg Nesterov 
141a5f4374aSIngo Molnar /**
142a5f4374aSIngo Molnar  * __replace_page - replace page in vma by new page.
143a5f4374aSIngo Molnar  * based on replace_page in mm/ksm.c
144a5f4374aSIngo Molnar  *
145a5f4374aSIngo Molnar  * @vma:      vma that holds the pte pointing to page
146c517ee74SOleg Nesterov  * @addr:     address the old @page is mapped at
147fb4fb04fSSong Liu  * @old_page: the page we are replacing by new_page
148fb4fb04fSSong Liu  * @new_page: the modified page we replace page by
149a5f4374aSIngo Molnar  *
150fb4fb04fSSong Liu  * If @new_page is NULL, only unmap @old_page.
151fb4fb04fSSong Liu  *
152fb4fb04fSSong Liu  * Returns 0 on success, negative error code otherwise.
153a5f4374aSIngo Molnar  */
154c517ee74SOleg Nesterov static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
155bdfaa2eeSOleg Nesterov 				struct page *old_page, struct page *new_page)
156a5f4374aSIngo Molnar {
1575fcd079aSMatthew Wilcox (Oracle) 	struct folio *old_folio = page_folio(old_page);
15882e66bf7SMatthew Wilcox (Oracle) 	struct folio *new_folio;
159a5f4374aSIngo Molnar 	struct mm_struct *mm = vma->vm_mm;
1605fcd079aSMatthew Wilcox (Oracle) 	DEFINE_FOLIO_VMA_WALK(pvmw, old_folio, vma, addr, 0);
1619f92448cSOleg Nesterov 	int err;
162ac46d4f3SJérôme Glisse 	struct mmu_notifier_range range;
16300501b53SJohannes Weiner 
164*7d4a8be0SAlistair Popple 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr,
1656f4f13e8SJérôme Glisse 				addr + PAGE_SIZE);
166ac46d4f3SJérôme Glisse 
167fb4fb04fSSong Liu 	if (new_page) {
16882e66bf7SMatthew Wilcox (Oracle) 		new_folio = page_folio(new_page);
16982e66bf7SMatthew Wilcox (Oracle) 		err = mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL);
17000501b53SJohannes Weiner 		if (err)
17100501b53SJohannes Weiner 			return err;
172fb4fb04fSSong Liu 	}
173a5f4374aSIngo Molnar 
1745fcd079aSMatthew Wilcox (Oracle) 	/* For folio_free_swap() below */
1755fcd079aSMatthew Wilcox (Oracle) 	folio_lock(old_folio);
1769f92448cSOleg Nesterov 
177ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_start(&range);
1789f92448cSOleg Nesterov 	err = -EAGAIN;
1799d82c694SJohannes Weiner 	if (!page_vma_mapped_walk(&pvmw))
1809f92448cSOleg Nesterov 		goto unlock;
18114fa2daaSKirill A. Shutemov 	VM_BUG_ON_PAGE(addr != pvmw.address, old_page);
182a5f4374aSIngo Molnar 
183fb4fb04fSSong Liu 	if (new_page) {
18482e66bf7SMatthew Wilcox (Oracle) 		folio_get(new_folio);
18540f2bbf7SDavid Hildenbrand 		page_add_new_anon_rmap(new_page, vma, addr);
18682e66bf7SMatthew Wilcox (Oracle) 		folio_add_lru_vma(new_folio, vma);
187fb4fb04fSSong Liu 	} else
188fb4fb04fSSong Liu 		/* no new page, just dec_mm_counter for old_page */
189fb4fb04fSSong Liu 		dec_mm_counter(mm, MM_ANONPAGES);
190a5f4374aSIngo Molnar 
1915fcd079aSMatthew Wilcox (Oracle) 	if (!folio_test_anon(old_folio)) {
192bdfaa2eeSOleg Nesterov 		dec_mm_counter(mm, mm_counter_file(old_page));
1937396fa81SSrikar Dronamraju 		inc_mm_counter(mm, MM_ANONPAGES);
1947396fa81SSrikar Dronamraju 	}
1957396fa81SSrikar Dronamraju 
19614fa2daaSKirill A. Shutemov 	flush_cache_page(vma, addr, pte_pfn(*pvmw.pte));
19714fa2daaSKirill A. Shutemov 	ptep_clear_flush_notify(vma, addr, pvmw.pte);
198fb4fb04fSSong Liu 	if (new_page)
19914fa2daaSKirill A. Shutemov 		set_pte_at_notify(mm, addr, pvmw.pte,
20014fa2daaSKirill A. Shutemov 				  mk_pte(new_page, vma->vm_page_prot));
201a5f4374aSIngo Molnar 
202cea86fe2SHugh Dickins 	page_remove_rmap(old_page, vma, false);
2035fcd079aSMatthew Wilcox (Oracle) 	if (!folio_mapped(old_folio))
2045fcd079aSMatthew Wilcox (Oracle) 		folio_free_swap(old_folio);
20514fa2daaSKirill A. Shutemov 	page_vma_mapped_walk_done(&pvmw);
2065fcd079aSMatthew Wilcox (Oracle) 	folio_put(old_folio);
207194f8dcbSOleg Nesterov 
2089f92448cSOleg Nesterov 	err = 0;
2099f92448cSOleg Nesterov  unlock:
210ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_end(&range);
2115fcd079aSMatthew Wilcox (Oracle) 	folio_unlock(old_folio);
2129f92448cSOleg Nesterov 	return err;
213a5f4374aSIngo Molnar }
214a5f4374aSIngo Molnar 
215a5f4374aSIngo Molnar /**
2165cb4ac3aSSrikar Dronamraju  * is_swbp_insn - check if instruction is breakpoint instruction.
217a5f4374aSIngo Molnar  * @insn: instruction to be checked.
2185cb4ac3aSSrikar Dronamraju  * Default implementation of is_swbp_insn
219a5f4374aSIngo Molnar  * Returns true if @insn is a breakpoint instruction.
220a5f4374aSIngo Molnar  */
2215cb4ac3aSSrikar Dronamraju bool __weak is_swbp_insn(uprobe_opcode_t *insn)
222a5f4374aSIngo Molnar {
2235cb4ac3aSSrikar Dronamraju 	return *insn == UPROBE_SWBP_INSN;
224a5f4374aSIngo Molnar }
225a5f4374aSIngo Molnar 
2260908ad6eSAnanth N Mavinakayanahalli /**
2270908ad6eSAnanth N Mavinakayanahalli  * is_trap_insn - check if instruction is breakpoint instruction.
2280908ad6eSAnanth N Mavinakayanahalli  * @insn: instruction to be checked.
2290908ad6eSAnanth N Mavinakayanahalli  * Default implementation of is_trap_insn
2300908ad6eSAnanth N Mavinakayanahalli  * Returns true if @insn is a breakpoint instruction.
2310908ad6eSAnanth N Mavinakayanahalli  *
2320908ad6eSAnanth N Mavinakayanahalli  * This function is needed for the case where an architecture has multiple
2330908ad6eSAnanth N Mavinakayanahalli  * trap instructions (like powerpc).
2340908ad6eSAnanth N Mavinakayanahalli  */
2350908ad6eSAnanth N Mavinakayanahalli bool __weak is_trap_insn(uprobe_opcode_t *insn)
2360908ad6eSAnanth N Mavinakayanahalli {
2370908ad6eSAnanth N Mavinakayanahalli 	return is_swbp_insn(insn);
2380908ad6eSAnanth N Mavinakayanahalli }
2390908ad6eSAnanth N Mavinakayanahalli 
240ab0d805cSOleg Nesterov static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len)
241cceb55aaSOleg Nesterov {
242cceb55aaSOleg Nesterov 	void *kaddr = kmap_atomic(page);
243ab0d805cSOleg Nesterov 	memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len);
244cceb55aaSOleg Nesterov 	kunmap_atomic(kaddr);
245cceb55aaSOleg Nesterov }
246cceb55aaSOleg Nesterov 
2475669cceeSOleg Nesterov static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len)
2485669cceeSOleg Nesterov {
2495669cceeSOleg Nesterov 	void *kaddr = kmap_atomic(page);
2505669cceeSOleg Nesterov 	memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len);
2515669cceeSOleg Nesterov 	kunmap_atomic(kaddr);
2525669cceeSOleg Nesterov }
2535669cceeSOleg Nesterov 
254ed6f6a50SOleg Nesterov static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode)
255ed6f6a50SOleg Nesterov {
256ed6f6a50SOleg Nesterov 	uprobe_opcode_t old_opcode;
257ed6f6a50SOleg Nesterov 	bool is_swbp;
258ed6f6a50SOleg Nesterov 
2590908ad6eSAnanth N Mavinakayanahalli 	/*
2600908ad6eSAnanth N Mavinakayanahalli 	 * Note: We only check if the old_opcode is UPROBE_SWBP_INSN here.
2610908ad6eSAnanth N Mavinakayanahalli 	 * We do not check if it is any other 'trap variant' which could
2620908ad6eSAnanth N Mavinakayanahalli 	 * be conditional trap instruction such as the one powerpc supports.
2630908ad6eSAnanth N Mavinakayanahalli 	 *
2640908ad6eSAnanth N Mavinakayanahalli 	 * The logic is that we do not care if the underlying instruction
2650908ad6eSAnanth N Mavinakayanahalli 	 * is a trap variant; uprobes always wins over any other (gdb)
2660908ad6eSAnanth N Mavinakayanahalli 	 * breakpoint.
2670908ad6eSAnanth N Mavinakayanahalli 	 */
268ab0d805cSOleg Nesterov 	copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE);
269ed6f6a50SOleg Nesterov 	is_swbp = is_swbp_insn(&old_opcode);
270ed6f6a50SOleg Nesterov 
271ed6f6a50SOleg Nesterov 	if (is_swbp_insn(new_opcode)) {
272ed6f6a50SOleg Nesterov 		if (is_swbp)		/* register: already installed? */
273ed6f6a50SOleg Nesterov 			return 0;
274ed6f6a50SOleg Nesterov 	} else {
275ed6f6a50SOleg Nesterov 		if (!is_swbp)		/* unregister: was it changed by us? */
276076a365bSOleg Nesterov 			return 0;
277ed6f6a50SOleg Nesterov 	}
278ed6f6a50SOleg Nesterov 
279ed6f6a50SOleg Nesterov 	return 1;
280ed6f6a50SOleg Nesterov }
281ed6f6a50SOleg Nesterov 
2821cc33161SRavi Bangoria static struct delayed_uprobe *
2831cc33161SRavi Bangoria delayed_uprobe_check(struct uprobe *uprobe, struct mm_struct *mm)
2841cc33161SRavi Bangoria {
2851cc33161SRavi Bangoria 	struct delayed_uprobe *du;
2861cc33161SRavi Bangoria 
2871cc33161SRavi Bangoria 	list_for_each_entry(du, &delayed_uprobe_list, list)
2881cc33161SRavi Bangoria 		if (du->uprobe == uprobe && du->mm == mm)
2891cc33161SRavi Bangoria 			return du;
2901cc33161SRavi Bangoria 	return NULL;
2911cc33161SRavi Bangoria }
2921cc33161SRavi Bangoria 
2931cc33161SRavi Bangoria static int delayed_uprobe_add(struct uprobe *uprobe, struct mm_struct *mm)
2941cc33161SRavi Bangoria {
2951cc33161SRavi Bangoria 	struct delayed_uprobe *du;
2961cc33161SRavi Bangoria 
2971cc33161SRavi Bangoria 	if (delayed_uprobe_check(uprobe, mm))
2981cc33161SRavi Bangoria 		return 0;
2991cc33161SRavi Bangoria 
3001cc33161SRavi Bangoria 	du  = kzalloc(sizeof(*du), GFP_KERNEL);
3011cc33161SRavi Bangoria 	if (!du)
3021cc33161SRavi Bangoria 		return -ENOMEM;
3031cc33161SRavi Bangoria 
3041cc33161SRavi Bangoria 	du->uprobe = uprobe;
3051cc33161SRavi Bangoria 	du->mm = mm;
3061cc33161SRavi Bangoria 	list_add(&du->list, &delayed_uprobe_list);
3071cc33161SRavi Bangoria 	return 0;
3081cc33161SRavi Bangoria }
3091cc33161SRavi Bangoria 
3101cc33161SRavi Bangoria static void delayed_uprobe_delete(struct delayed_uprobe *du)
3111cc33161SRavi Bangoria {
3121cc33161SRavi Bangoria 	if (WARN_ON(!du))
3131cc33161SRavi Bangoria 		return;
3141cc33161SRavi Bangoria 	list_del(&du->list);
3151cc33161SRavi Bangoria 	kfree(du);
3161cc33161SRavi Bangoria }
3171cc33161SRavi Bangoria 
3181cc33161SRavi Bangoria static void delayed_uprobe_remove(struct uprobe *uprobe, struct mm_struct *mm)
3191cc33161SRavi Bangoria {
3201cc33161SRavi Bangoria 	struct list_head *pos, *q;
3211cc33161SRavi Bangoria 	struct delayed_uprobe *du;
3221cc33161SRavi Bangoria 
3231cc33161SRavi Bangoria 	if (!uprobe && !mm)
3241cc33161SRavi Bangoria 		return;
3251cc33161SRavi Bangoria 
3261cc33161SRavi Bangoria 	list_for_each_safe(pos, q, &delayed_uprobe_list) {
3271cc33161SRavi Bangoria 		du = list_entry(pos, struct delayed_uprobe, list);
3281cc33161SRavi Bangoria 
3291cc33161SRavi Bangoria 		if (uprobe && du->uprobe != uprobe)
3301cc33161SRavi Bangoria 			continue;
3311cc33161SRavi Bangoria 		if (mm && du->mm != mm)
3321cc33161SRavi Bangoria 			continue;
3331cc33161SRavi Bangoria 
3341cc33161SRavi Bangoria 		delayed_uprobe_delete(du);
3351cc33161SRavi Bangoria 	}
3361cc33161SRavi Bangoria }
3371cc33161SRavi Bangoria 
3381cc33161SRavi Bangoria static bool valid_ref_ctr_vma(struct uprobe *uprobe,
3391cc33161SRavi Bangoria 			      struct vm_area_struct *vma)
3401cc33161SRavi Bangoria {
3411cc33161SRavi Bangoria 	unsigned long vaddr = offset_to_vaddr(vma, uprobe->ref_ctr_offset);
3421cc33161SRavi Bangoria 
3431cc33161SRavi Bangoria 	return uprobe->ref_ctr_offset &&
3441cc33161SRavi Bangoria 		vma->vm_file &&
3451cc33161SRavi Bangoria 		file_inode(vma->vm_file) == uprobe->inode &&
3461cc33161SRavi Bangoria 		(vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE &&
3471cc33161SRavi Bangoria 		vma->vm_start <= vaddr &&
3481cc33161SRavi Bangoria 		vma->vm_end > vaddr;
3491cc33161SRavi Bangoria }
3501cc33161SRavi Bangoria 
3511cc33161SRavi Bangoria static struct vm_area_struct *
3521cc33161SRavi Bangoria find_ref_ctr_vma(struct uprobe *uprobe, struct mm_struct *mm)
3531cc33161SRavi Bangoria {
354fcb72a58SMatthew Wilcox (Oracle) 	VMA_ITERATOR(vmi, mm, 0);
3551cc33161SRavi Bangoria 	struct vm_area_struct *tmp;
3561cc33161SRavi Bangoria 
357fcb72a58SMatthew Wilcox (Oracle) 	for_each_vma(vmi, tmp)
3581cc33161SRavi Bangoria 		if (valid_ref_ctr_vma(uprobe, tmp))
3591cc33161SRavi Bangoria 			return tmp;
3601cc33161SRavi Bangoria 
3611cc33161SRavi Bangoria 	return NULL;
3621cc33161SRavi Bangoria }
3631cc33161SRavi Bangoria 
3641cc33161SRavi Bangoria static int
3651cc33161SRavi Bangoria __update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d)
3661cc33161SRavi Bangoria {
3671cc33161SRavi Bangoria 	void *kaddr;
3681cc33161SRavi Bangoria 	struct page *page;
3691cc33161SRavi Bangoria 	struct vm_area_struct *vma;
3701cc33161SRavi Bangoria 	int ret;
3711cc33161SRavi Bangoria 	short *ptr;
3721cc33161SRavi Bangoria 
3731cc33161SRavi Bangoria 	if (!vaddr || !d)
3741cc33161SRavi Bangoria 		return -EINVAL;
3751cc33161SRavi Bangoria 
37664019a2eSPeter Xu 	ret = get_user_pages_remote(mm, vaddr, 1,
3771cc33161SRavi Bangoria 			FOLL_WRITE, &page, &vma, NULL);
3781cc33161SRavi Bangoria 	if (unlikely(ret <= 0)) {
3791cc33161SRavi Bangoria 		/*
3801cc33161SRavi Bangoria 		 * We are asking for 1 page. If get_user_pages_remote() fails,
3811cc33161SRavi Bangoria 		 * it may return 0, in that case we have to return error.
3821cc33161SRavi Bangoria 		 */
3831cc33161SRavi Bangoria 		return ret == 0 ? -EBUSY : ret;
3841cc33161SRavi Bangoria 	}
3851cc33161SRavi Bangoria 
3861cc33161SRavi Bangoria 	kaddr = kmap_atomic(page);
3871cc33161SRavi Bangoria 	ptr = kaddr + (vaddr & ~PAGE_MASK);
3881cc33161SRavi Bangoria 
3891cc33161SRavi Bangoria 	if (unlikely(*ptr + d < 0)) {
3901cc33161SRavi Bangoria 		pr_warn("ref_ctr going negative. vaddr: 0x%lx, "
3911cc33161SRavi Bangoria 			"curr val: %d, delta: %d\n", vaddr, *ptr, d);
3921cc33161SRavi Bangoria 		ret = -EINVAL;
3931cc33161SRavi Bangoria 		goto out;
3941cc33161SRavi Bangoria 	}
3951cc33161SRavi Bangoria 
3961cc33161SRavi Bangoria 	*ptr += d;
3971cc33161SRavi Bangoria 	ret = 0;
3981cc33161SRavi Bangoria out:
3991cc33161SRavi Bangoria 	kunmap_atomic(kaddr);
4001cc33161SRavi Bangoria 	put_page(page);
4011cc33161SRavi Bangoria 	return ret;
4021cc33161SRavi Bangoria }
4031cc33161SRavi Bangoria 
4041cc33161SRavi Bangoria static void update_ref_ctr_warn(struct uprobe *uprobe,
4051cc33161SRavi Bangoria 				struct mm_struct *mm, short d)
4061cc33161SRavi Bangoria {
4071cc33161SRavi Bangoria 	pr_warn("ref_ctr %s failed for inode: 0x%lx offset: "
4081cc33161SRavi Bangoria 		"0x%llx ref_ctr_offset: 0x%llx of mm: 0x%pK\n",
4091cc33161SRavi Bangoria 		d > 0 ? "increment" : "decrement", uprobe->inode->i_ino,
4101cc33161SRavi Bangoria 		(unsigned long long) uprobe->offset,
4111cc33161SRavi Bangoria 		(unsigned long long) uprobe->ref_ctr_offset, mm);
4121cc33161SRavi Bangoria }
4131cc33161SRavi Bangoria 
4141cc33161SRavi Bangoria static int update_ref_ctr(struct uprobe *uprobe, struct mm_struct *mm,
4151cc33161SRavi Bangoria 			  short d)
4161cc33161SRavi Bangoria {
4171cc33161SRavi Bangoria 	struct vm_area_struct *rc_vma;
4181cc33161SRavi Bangoria 	unsigned long rc_vaddr;
4191cc33161SRavi Bangoria 	int ret = 0;
4201cc33161SRavi Bangoria 
4211cc33161SRavi Bangoria 	rc_vma = find_ref_ctr_vma(uprobe, mm);
4221cc33161SRavi Bangoria 
4231cc33161SRavi Bangoria 	if (rc_vma) {
4241cc33161SRavi Bangoria 		rc_vaddr = offset_to_vaddr(rc_vma, uprobe->ref_ctr_offset);
4251cc33161SRavi Bangoria 		ret = __update_ref_ctr(mm, rc_vaddr, d);
4261cc33161SRavi Bangoria 		if (ret)
4271cc33161SRavi Bangoria 			update_ref_ctr_warn(uprobe, mm, d);
4281cc33161SRavi Bangoria 
4291cc33161SRavi Bangoria 		if (d > 0)
4301cc33161SRavi Bangoria 			return ret;
4311cc33161SRavi Bangoria 	}
4321cc33161SRavi Bangoria 
4331cc33161SRavi Bangoria 	mutex_lock(&delayed_uprobe_lock);
4341cc33161SRavi Bangoria 	if (d > 0)
4351cc33161SRavi Bangoria 		ret = delayed_uprobe_add(uprobe, mm);
4361cc33161SRavi Bangoria 	else
4371cc33161SRavi Bangoria 		delayed_uprobe_remove(uprobe, mm);
4381cc33161SRavi Bangoria 	mutex_unlock(&delayed_uprobe_lock);
4391cc33161SRavi Bangoria 
4401cc33161SRavi Bangoria 	return ret;
4411cc33161SRavi Bangoria }
4421cc33161SRavi Bangoria 
443a5f4374aSIngo Molnar /*
444a5f4374aSIngo Molnar  * NOTE:
445a5f4374aSIngo Molnar  * Expect the breakpoint instruction to be the smallest size instruction for
446a5f4374aSIngo Molnar  * the architecture. If an arch has variable length instruction and the
447a5f4374aSIngo Molnar  * breakpoint instruction is not of the smallest length instruction
4480908ad6eSAnanth N Mavinakayanahalli  * supported by that architecture then we need to modify is_trap_at_addr and
449f72d41faSOleg Nesterov  * uprobe_write_opcode accordingly. This would never be a problem for archs
450f72d41faSOleg Nesterov  * that have fixed length instructions.
45129dedee0SOleg Nesterov  *
452f72d41faSOleg Nesterov  * uprobe_write_opcode - write the opcode at a given virtual address.
4539ce4d216SQiujun Huang  * @auprobe: arch specific probepoint information.
454a5f4374aSIngo Molnar  * @mm: the probed process address space.
455a5f4374aSIngo Molnar  * @vaddr: the virtual address to store the opcode.
456a5f4374aSIngo Molnar  * @opcode: opcode to be written at @vaddr.
457a5f4374aSIngo Molnar  *
458c1e8d7c6SMichel Lespinasse  * Called with mm->mmap_lock held for write.
459a5f4374aSIngo Molnar  * Return 0 (success) or a negative errno.
460a5f4374aSIngo Molnar  */
4616d43743eSRavi Bangoria int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
4626d43743eSRavi Bangoria 			unsigned long vaddr, uprobe_opcode_t opcode)
463a5f4374aSIngo Molnar {
4641cc33161SRavi Bangoria 	struct uprobe *uprobe;
465a5f4374aSIngo Molnar 	struct page *old_page, *new_page;
466a5f4374aSIngo Molnar 	struct vm_area_struct *vma;
4671cc33161SRavi Bangoria 	int ret, is_register, ref_ctr_updated = 0;
468f385cb85SSong Liu 	bool orig_page_huge = false;
469aa5de305SSong Liu 	unsigned int gup_flags = FOLL_FORCE;
4701cc33161SRavi Bangoria 
4711cc33161SRavi Bangoria 	is_register = is_swbp_insn(&opcode);
4721cc33161SRavi Bangoria 	uprobe = container_of(auprobe, struct uprobe, arch);
473f403072cSOleg Nesterov 
4745323ce71SOleg Nesterov retry:
475aa5de305SSong Liu 	if (is_register)
476aa5de305SSong Liu 		gup_flags |= FOLL_SPLIT_PMD;
477a5f4374aSIngo Molnar 	/* Read the page with vaddr into memory */
47864019a2eSPeter Xu 	ret = get_user_pages_remote(mm, vaddr, 1, gup_flags,
479aa5de305SSong Liu 				    &old_page, &vma, NULL);
480a5f4374aSIngo Molnar 	if (ret <= 0)
481a5f4374aSIngo Molnar 		return ret;
482a5f4374aSIngo Molnar 
483ed6f6a50SOleg Nesterov 	ret = verify_opcode(old_page, vaddr, &opcode);
484ed6f6a50SOleg Nesterov 	if (ret <= 0)
485ed6f6a50SOleg Nesterov 		goto put_old;
486ed6f6a50SOleg Nesterov 
487aa5de305SSong Liu 	if (WARN(!is_register && PageCompound(old_page),
488aa5de305SSong Liu 		 "uprobe unregister should never work on compound page\n")) {
489aa5de305SSong Liu 		ret = -EINVAL;
490aa5de305SSong Liu 		goto put_old;
491aa5de305SSong Liu 	}
492aa5de305SSong Liu 
4931cc33161SRavi Bangoria 	/* We are going to replace instruction, update ref_ctr. */
4941cc33161SRavi Bangoria 	if (!ref_ctr_updated && uprobe->ref_ctr_offset) {
4951cc33161SRavi Bangoria 		ret = update_ref_ctr(uprobe, mm, is_register ? 1 : -1);
4961cc33161SRavi Bangoria 		if (ret)
4971cc33161SRavi Bangoria 			goto put_old;
4981cc33161SRavi Bangoria 
4991cc33161SRavi Bangoria 		ref_ctr_updated = 1;
5001cc33161SRavi Bangoria 	}
5011cc33161SRavi Bangoria 
502fb4fb04fSSong Liu 	ret = 0;
503fb4fb04fSSong Liu 	if (!is_register && !PageAnon(old_page))
504fb4fb04fSSong Liu 		goto put_old;
505fb4fb04fSSong Liu 
50629dedee0SOleg Nesterov 	ret = anon_vma_prepare(vma);
50729dedee0SOleg Nesterov 	if (ret)
50829dedee0SOleg Nesterov 		goto put_old;
50929dedee0SOleg Nesterov 
510a5f4374aSIngo Molnar 	ret = -ENOMEM;
511a5f4374aSIngo Molnar 	new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
512a5f4374aSIngo Molnar 	if (!new_page)
5139f92448cSOleg Nesterov 		goto put_old;
514a5f4374aSIngo Molnar 
51529dedee0SOleg Nesterov 	__SetPageUptodate(new_page);
5163f47107cSOleg Nesterov 	copy_highpage(new_page, old_page);
5173f47107cSOleg Nesterov 	copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
518a5f4374aSIngo Molnar 
519fb4fb04fSSong Liu 	if (!is_register) {
520fb4fb04fSSong Liu 		struct page *orig_page;
521fb4fb04fSSong Liu 		pgoff_t index;
522fb4fb04fSSong Liu 
523fb4fb04fSSong Liu 		VM_BUG_ON_PAGE(!PageAnon(old_page), old_page);
524fb4fb04fSSong Liu 
525fb4fb04fSSong Liu 		index = vaddr_to_offset(vma, vaddr & PAGE_MASK) >> PAGE_SHIFT;
526fb4fb04fSSong Liu 		orig_page = find_get_page(vma->vm_file->f_inode->i_mapping,
527fb4fb04fSSong Liu 					  index);
528fb4fb04fSSong Liu 
529fb4fb04fSSong Liu 		if (orig_page) {
530fb4fb04fSSong Liu 			if (PageUptodate(orig_page) &&
531fb4fb04fSSong Liu 			    pages_identical(new_page, orig_page)) {
532fb4fb04fSSong Liu 				/* let go new_page */
533fb4fb04fSSong Liu 				put_page(new_page);
534fb4fb04fSSong Liu 				new_page = NULL;
535f385cb85SSong Liu 
536f385cb85SSong Liu 				if (PageCompound(orig_page))
537f385cb85SSong Liu 					orig_page_huge = true;
538fb4fb04fSSong Liu 			}
539fb4fb04fSSong Liu 			put_page(orig_page);
540fb4fb04fSSong Liu 		}
541fb4fb04fSSong Liu 	}
542fb4fb04fSSong Liu 
543c517ee74SOleg Nesterov 	ret = __replace_page(vma, vaddr, old_page, new_page);
544fb4fb04fSSong Liu 	if (new_page)
54509cbfeafSKirill A. Shutemov 		put_page(new_page);
5469f92448cSOleg Nesterov put_old:
547a5f4374aSIngo Molnar 	put_page(old_page);
548a5f4374aSIngo Molnar 
5495323ce71SOleg Nesterov 	if (unlikely(ret == -EAGAIN))
5505323ce71SOleg Nesterov 		goto retry;
5511cc33161SRavi Bangoria 
5521cc33161SRavi Bangoria 	/* Revert back reference counter if instruction update failed. */
5531cc33161SRavi Bangoria 	if (ret && is_register && ref_ctr_updated)
5541cc33161SRavi Bangoria 		update_ref_ctr(uprobe, mm, -1);
5551cc33161SRavi Bangoria 
556f385cb85SSong Liu 	/* try collapse pmd for compound page */
557f385cb85SSong Liu 	if (!ret && orig_page_huge)
55834488399SZach O'Keefe 		collapse_pte_mapped_thp(mm, vaddr, false);
559f385cb85SSong Liu 
560a5f4374aSIngo Molnar 	return ret;
561a5f4374aSIngo Molnar }
562a5f4374aSIngo Molnar 
563a5f4374aSIngo Molnar /**
5645cb4ac3aSSrikar Dronamraju  * set_swbp - store breakpoint at a given address.
565e3343e6aSSrikar Dronamraju  * @auprobe: arch specific probepoint information.
566a5f4374aSIngo Molnar  * @mm: the probed process address space.
567a5f4374aSIngo Molnar  * @vaddr: the virtual address to insert the opcode.
568a5f4374aSIngo Molnar  *
569a5f4374aSIngo Molnar  * For mm @mm, store the breakpoint instruction at @vaddr.
570a5f4374aSIngo Molnar  * Return 0 (success) or a negative errno.
571a5f4374aSIngo Molnar  */
5725cb4ac3aSSrikar Dronamraju int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
573a5f4374aSIngo Molnar {
5746d43743eSRavi Bangoria 	return uprobe_write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN);
575a5f4374aSIngo Molnar }
576a5f4374aSIngo Molnar 
577a5f4374aSIngo Molnar /**
578a5f4374aSIngo Molnar  * set_orig_insn - Restore the original instruction.
579a5f4374aSIngo Molnar  * @mm: the probed process address space.
580e3343e6aSSrikar Dronamraju  * @auprobe: arch specific probepoint information.
581a5f4374aSIngo Molnar  * @vaddr: the virtual address to insert the opcode.
582a5f4374aSIngo Molnar  *
583a5f4374aSIngo Molnar  * For mm @mm, restore the original opcode (opcode) at @vaddr.
584a5f4374aSIngo Molnar  * Return 0 (success) or a negative errno.
585a5f4374aSIngo Molnar  */
586a5f4374aSIngo Molnar int __weak
587ded86e7cSOleg Nesterov set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
588a5f4374aSIngo Molnar {
5896d43743eSRavi Bangoria 	return uprobe_write_opcode(auprobe, mm, vaddr,
5906d43743eSRavi Bangoria 			*(uprobe_opcode_t *)&auprobe->insn);
591a5f4374aSIngo Molnar }
592a5f4374aSIngo Molnar 
593f231722aSOleg Nesterov static struct uprobe *get_uprobe(struct uprobe *uprobe)
594f231722aSOleg Nesterov {
595ce59b8e9SElena Reshetova 	refcount_inc(&uprobe->ref);
596f231722aSOleg Nesterov 	return uprobe;
597f231722aSOleg Nesterov }
598f231722aSOleg Nesterov 
599f231722aSOleg Nesterov static void put_uprobe(struct uprobe *uprobe)
600f231722aSOleg Nesterov {
601ce59b8e9SElena Reshetova 	if (refcount_dec_and_test(&uprobe->ref)) {
6021cc33161SRavi Bangoria 		/*
6031cc33161SRavi Bangoria 		 * If application munmap(exec_vma) before uprobe_unregister()
6041cc33161SRavi Bangoria 		 * gets called, we don't get a chance to remove uprobe from
6051cc33161SRavi Bangoria 		 * delayed_uprobe_list from remove_breakpoint(). Do it here.
6061cc33161SRavi Bangoria 		 */
6071aed58e6SRavi Bangoria 		mutex_lock(&delayed_uprobe_lock);
6081cc33161SRavi Bangoria 		delayed_uprobe_remove(uprobe, NULL);
6091aed58e6SRavi Bangoria 		mutex_unlock(&delayed_uprobe_lock);
610f231722aSOleg Nesterov 		kfree(uprobe);
611f231722aSOleg Nesterov 	}
6121cc33161SRavi Bangoria }
613f231722aSOleg Nesterov 
614a905e84eSPeter Zijlstra static __always_inline
615a905e84eSPeter Zijlstra int uprobe_cmp(const struct inode *l_inode, const loff_t l_offset,
616a905e84eSPeter Zijlstra 	       const struct uprobe *r)
617a5f4374aSIngo Molnar {
618a905e84eSPeter Zijlstra 	if (l_inode < r->inode)
619a5f4374aSIngo Molnar 		return -1;
620a5f4374aSIngo Molnar 
621a905e84eSPeter Zijlstra 	if (l_inode > r->inode)
622a5f4374aSIngo Molnar 		return 1;
623a5f4374aSIngo Molnar 
624a905e84eSPeter Zijlstra 	if (l_offset < r->offset)
625a5f4374aSIngo Molnar 		return -1;
626a5f4374aSIngo Molnar 
627a905e84eSPeter Zijlstra 	if (l_offset > r->offset)
628a5f4374aSIngo Molnar 		return 1;
629a5f4374aSIngo Molnar 
630a5f4374aSIngo Molnar 	return 0;
631a5f4374aSIngo Molnar }
632a5f4374aSIngo Molnar 
633a905e84eSPeter Zijlstra #define __node_2_uprobe(node) \
634a905e84eSPeter Zijlstra 	rb_entry((node), struct uprobe, rb_node)
635a905e84eSPeter Zijlstra 
636a905e84eSPeter Zijlstra struct __uprobe_key {
637a905e84eSPeter Zijlstra 	struct inode *inode;
638a905e84eSPeter Zijlstra 	loff_t offset;
639a905e84eSPeter Zijlstra };
640a905e84eSPeter Zijlstra 
641a905e84eSPeter Zijlstra static inline int __uprobe_cmp_key(const void *key, const struct rb_node *b)
642a905e84eSPeter Zijlstra {
643a905e84eSPeter Zijlstra 	const struct __uprobe_key *a = key;
644a905e84eSPeter Zijlstra 	return uprobe_cmp(a->inode, a->offset, __node_2_uprobe(b));
645a905e84eSPeter Zijlstra }
646a905e84eSPeter Zijlstra 
647a905e84eSPeter Zijlstra static inline int __uprobe_cmp(struct rb_node *a, const struct rb_node *b)
648a905e84eSPeter Zijlstra {
649a905e84eSPeter Zijlstra 	struct uprobe *u = __node_2_uprobe(a);
650a905e84eSPeter Zijlstra 	return uprobe_cmp(u->inode, u->offset, __node_2_uprobe(b));
651a905e84eSPeter Zijlstra }
652a905e84eSPeter Zijlstra 
653a5f4374aSIngo Molnar static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset)
654a5f4374aSIngo Molnar {
655a905e84eSPeter Zijlstra 	struct __uprobe_key key = {
656a905e84eSPeter Zijlstra 		.inode = inode,
657a905e84eSPeter Zijlstra 		.offset = offset,
658a905e84eSPeter Zijlstra 	};
659a905e84eSPeter Zijlstra 	struct rb_node *node = rb_find(&key, &uprobes_tree, __uprobe_cmp_key);
660a5f4374aSIngo Molnar 
661a905e84eSPeter Zijlstra 	if (node)
662b0d6d478SSven Schnelle 		return get_uprobe(__node_2_uprobe(node));
663a5f4374aSIngo Molnar 
664a5f4374aSIngo Molnar 	return NULL;
665a5f4374aSIngo Molnar }
666a5f4374aSIngo Molnar 
667a5f4374aSIngo Molnar /*
668a5f4374aSIngo Molnar  * Find a uprobe corresponding to a given inode:offset
669a5f4374aSIngo Molnar  * Acquires uprobes_treelock
670a5f4374aSIngo Molnar  */
671a5f4374aSIngo Molnar static struct uprobe *find_uprobe(struct inode *inode, loff_t offset)
672a5f4374aSIngo Molnar {
673a5f4374aSIngo Molnar 	struct uprobe *uprobe;
674a5f4374aSIngo Molnar 
6756f47caa0SOleg Nesterov 	spin_lock(&uprobes_treelock);
676a5f4374aSIngo Molnar 	uprobe = __find_uprobe(inode, offset);
6776f47caa0SOleg Nesterov 	spin_unlock(&uprobes_treelock);
678a5f4374aSIngo Molnar 
679a5f4374aSIngo Molnar 	return uprobe;
680a5f4374aSIngo Molnar }
681a5f4374aSIngo Molnar 
682a5f4374aSIngo Molnar static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
683a5f4374aSIngo Molnar {
684a905e84eSPeter Zijlstra 	struct rb_node *node;
685a5f4374aSIngo Molnar 
686a905e84eSPeter Zijlstra 	node = rb_find_add(&uprobe->rb_node, &uprobes_tree, __uprobe_cmp);
687a905e84eSPeter Zijlstra 	if (node)
688a905e84eSPeter Zijlstra 		return get_uprobe(__node_2_uprobe(node));
689a5f4374aSIngo Molnar 
690a5f4374aSIngo Molnar 	/* get access + creation ref */
691ce59b8e9SElena Reshetova 	refcount_set(&uprobe->ref, 2);
692a905e84eSPeter Zijlstra 	return NULL;
693a5f4374aSIngo Molnar }
694a5f4374aSIngo Molnar 
695a5f4374aSIngo Molnar /*
696a5f4374aSIngo Molnar  * Acquire uprobes_treelock.
697a5f4374aSIngo Molnar  * Matching uprobe already exists in rbtree;
698a5f4374aSIngo Molnar  *	increment (access refcount) and return the matching uprobe.
699a5f4374aSIngo Molnar  *
700a5f4374aSIngo Molnar  * No matching uprobe; insert the uprobe in rb_tree;
701a5f4374aSIngo Molnar  *	get a double refcount (access + creation) and return NULL.
702a5f4374aSIngo Molnar  */
703a5f4374aSIngo Molnar static struct uprobe *insert_uprobe(struct uprobe *uprobe)
704a5f4374aSIngo Molnar {
705a5f4374aSIngo Molnar 	struct uprobe *u;
706a5f4374aSIngo Molnar 
7076f47caa0SOleg Nesterov 	spin_lock(&uprobes_treelock);
708a5f4374aSIngo Molnar 	u = __insert_uprobe(uprobe);
7096f47caa0SOleg Nesterov 	spin_unlock(&uprobes_treelock);
710a5f4374aSIngo Molnar 
711a5f4374aSIngo Molnar 	return u;
712a5f4374aSIngo Molnar }
713a5f4374aSIngo Molnar 
71422bad382SRavi Bangoria static void
71522bad382SRavi Bangoria ref_ctr_mismatch_warn(struct uprobe *cur_uprobe, struct uprobe *uprobe)
71622bad382SRavi Bangoria {
71722bad382SRavi Bangoria 	pr_warn("ref_ctr_offset mismatch. inode: 0x%lx offset: 0x%llx "
71822bad382SRavi Bangoria 		"ref_ctr_offset(old): 0x%llx ref_ctr_offset(new): 0x%llx\n",
71922bad382SRavi Bangoria 		uprobe->inode->i_ino, (unsigned long long) uprobe->offset,
72022bad382SRavi Bangoria 		(unsigned long long) cur_uprobe->ref_ctr_offset,
72122bad382SRavi Bangoria 		(unsigned long long) uprobe->ref_ctr_offset);
72222bad382SRavi Bangoria }
72322bad382SRavi Bangoria 
7241cc33161SRavi Bangoria static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset,
7251cc33161SRavi Bangoria 				   loff_t ref_ctr_offset)
726a5f4374aSIngo Molnar {
727a5f4374aSIngo Molnar 	struct uprobe *uprobe, *cur_uprobe;
728a5f4374aSIngo Molnar 
729a5f4374aSIngo Molnar 	uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL);
730a5f4374aSIngo Molnar 	if (!uprobe)
731a5f4374aSIngo Molnar 		return NULL;
732a5f4374aSIngo Molnar 
73361f94203SSong Liu 	uprobe->inode = inode;
734a5f4374aSIngo Molnar 	uprobe->offset = offset;
7351cc33161SRavi Bangoria 	uprobe->ref_ctr_offset = ref_ctr_offset;
736e591c8d7SOleg Nesterov 	init_rwsem(&uprobe->register_rwsem);
737a5f4374aSIngo Molnar 	init_rwsem(&uprobe->consumer_rwsem);
738a5f4374aSIngo Molnar 
739a5f4374aSIngo Molnar 	/* add to uprobes_tree, sorted on inode:offset */
740a5f4374aSIngo Molnar 	cur_uprobe = insert_uprobe(uprobe);
741a5f4374aSIngo Molnar 	/* a uprobe exists for this inode:offset combination */
742a5f4374aSIngo Molnar 	if (cur_uprobe) {
74322bad382SRavi Bangoria 		if (cur_uprobe->ref_ctr_offset != uprobe->ref_ctr_offset) {
74422bad382SRavi Bangoria 			ref_ctr_mismatch_warn(cur_uprobe, uprobe);
74522bad382SRavi Bangoria 			put_uprobe(cur_uprobe);
74622bad382SRavi Bangoria 			kfree(uprobe);
74722bad382SRavi Bangoria 			return ERR_PTR(-EINVAL);
74822bad382SRavi Bangoria 		}
749a5f4374aSIngo Molnar 		kfree(uprobe);
750a5f4374aSIngo Molnar 		uprobe = cur_uprobe;
751a5f4374aSIngo Molnar 	}
752a5f4374aSIngo Molnar 
753a5f4374aSIngo Molnar 	return uprobe;
754a5f4374aSIngo Molnar }
755a5f4374aSIngo Molnar 
7569a98e03cSOleg Nesterov static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
757a5f4374aSIngo Molnar {
758a5f4374aSIngo Molnar 	down_write(&uprobe->consumer_rwsem);
759e3343e6aSSrikar Dronamraju 	uc->next = uprobe->consumers;
760e3343e6aSSrikar Dronamraju 	uprobe->consumers = uc;
761a5f4374aSIngo Molnar 	up_write(&uprobe->consumer_rwsem);
762a5f4374aSIngo Molnar }
763a5f4374aSIngo Molnar 
764a5f4374aSIngo Molnar /*
765e3343e6aSSrikar Dronamraju  * For uprobe @uprobe, delete the consumer @uc.
766e3343e6aSSrikar Dronamraju  * Return true if the @uc is deleted successfully
767a5f4374aSIngo Molnar  * or return false.
768a5f4374aSIngo Molnar  */
769e3343e6aSSrikar Dronamraju static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
770a5f4374aSIngo Molnar {
771a5f4374aSIngo Molnar 	struct uprobe_consumer **con;
772a5f4374aSIngo Molnar 	bool ret = false;
773a5f4374aSIngo Molnar 
774a5f4374aSIngo Molnar 	down_write(&uprobe->consumer_rwsem);
775a5f4374aSIngo Molnar 	for (con = &uprobe->consumers; *con; con = &(*con)->next) {
776e3343e6aSSrikar Dronamraju 		if (*con == uc) {
777e3343e6aSSrikar Dronamraju 			*con = uc->next;
778a5f4374aSIngo Molnar 			ret = true;
779a5f4374aSIngo Molnar 			break;
780a5f4374aSIngo Molnar 		}
781a5f4374aSIngo Molnar 	}
782a5f4374aSIngo Molnar 	up_write(&uprobe->consumer_rwsem);
783a5f4374aSIngo Molnar 
784a5f4374aSIngo Molnar 	return ret;
785a5f4374aSIngo Molnar }
786a5f4374aSIngo Molnar 
7872ded0980SOleg Nesterov static int __copy_insn(struct address_space *mapping, struct file *filp,
7882ded0980SOleg Nesterov 			void *insn, int nbytes, loff_t offset)
789a5f4374aSIngo Molnar {
790a5f4374aSIngo Molnar 	struct page *page;
791a5f4374aSIngo Molnar 	/*
79240814f68SOleg Nesterov 	 * Ensure that the page that has the original instruction is populated
7937e0a1265SMatthew Wilcox (Oracle) 	 * and in page-cache. If ->read_folio == NULL it must be shmem_mapping(),
79440814f68SOleg Nesterov 	 * see uprobe_register().
795a5f4374aSIngo Molnar 	 */
7967e0a1265SMatthew Wilcox (Oracle) 	if (mapping->a_ops->read_folio)
79709cbfeafSKirill A. Shutemov 		page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp);
79840814f68SOleg Nesterov 	else
79909cbfeafSKirill A. Shutemov 		page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
800a5f4374aSIngo Molnar 	if (IS_ERR(page))
801a5f4374aSIngo Molnar 		return PTR_ERR(page);
802a5f4374aSIngo Molnar 
8032edb7b55SOleg Nesterov 	copy_from_page(page, offset, insn, nbytes);
80409cbfeafSKirill A. Shutemov 	put_page(page);
805a5f4374aSIngo Molnar 
806a5f4374aSIngo Molnar 	return 0;
807a5f4374aSIngo Molnar }
808a5f4374aSIngo Molnar 
809d436615eSOleg Nesterov static int copy_insn(struct uprobe *uprobe, struct file *filp)
810a5f4374aSIngo Molnar {
8112ded0980SOleg Nesterov 	struct address_space *mapping = uprobe->inode->i_mapping;
8122ded0980SOleg Nesterov 	loff_t offs = uprobe->offset;
813803200e2SOleg Nesterov 	void *insn = &uprobe->arch.insn;
814803200e2SOleg Nesterov 	int size = sizeof(uprobe->arch.insn);
8152ded0980SOleg Nesterov 	int len, err = -EIO;
816a5f4374aSIngo Molnar 
8172ded0980SOleg Nesterov 	/* Copy only available bytes, -EIO if nothing was read */
8182ded0980SOleg Nesterov 	do {
8192ded0980SOleg Nesterov 		if (offs >= i_size_read(uprobe->inode))
8202ded0980SOleg Nesterov 			break;
821a5f4374aSIngo Molnar 
8222ded0980SOleg Nesterov 		len = min_t(int, size, PAGE_SIZE - (offs & ~PAGE_MASK));
8232ded0980SOleg Nesterov 		err = __copy_insn(mapping, filp, insn, len, offs);
824fc36f595SOleg Nesterov 		if (err)
8252ded0980SOleg Nesterov 			break;
8262ded0980SOleg Nesterov 
8272ded0980SOleg Nesterov 		insn += len;
8282ded0980SOleg Nesterov 		offs += len;
8292ded0980SOleg Nesterov 		size -= len;
8302ded0980SOleg Nesterov 	} while (size);
8312ded0980SOleg Nesterov 
832fc36f595SOleg Nesterov 	return err;
833a5f4374aSIngo Molnar }
834a5f4374aSIngo Molnar 
835cb9a19feSOleg Nesterov static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
836cb9a19feSOleg Nesterov 				struct mm_struct *mm, unsigned long vaddr)
837cb9a19feSOleg Nesterov {
838cb9a19feSOleg Nesterov 	int ret = 0;
839cb9a19feSOleg Nesterov 
84071434f2fSOleg Nesterov 	if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
841cb9a19feSOleg Nesterov 		return ret;
842cb9a19feSOleg Nesterov 
843d4d3ccc6SOleg Nesterov 	/* TODO: move this into _register, until then we abuse this sem. */
844d4d3ccc6SOleg Nesterov 	down_write(&uprobe->consumer_rwsem);
84571434f2fSOleg Nesterov 	if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
8464710f05fSOleg Nesterov 		goto out;
8474710f05fSOleg Nesterov 
848cb9a19feSOleg Nesterov 	ret = copy_insn(uprobe, file);
849cb9a19feSOleg Nesterov 	if (ret)
850cb9a19feSOleg Nesterov 		goto out;
851cb9a19feSOleg Nesterov 
852cb9a19feSOleg Nesterov 	ret = -ENOTSUPP;
853803200e2SOleg Nesterov 	if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn))
854cb9a19feSOleg Nesterov 		goto out;
855cb9a19feSOleg Nesterov 
856cb9a19feSOleg Nesterov 	ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
857cb9a19feSOleg Nesterov 	if (ret)
858cb9a19feSOleg Nesterov 		goto out;
859cb9a19feSOleg Nesterov 
86009d3f015SAndrea Parri 	smp_wmb(); /* pairs with the smp_rmb() in handle_swbp() */
86171434f2fSOleg Nesterov 	set_bit(UPROBE_COPY_INSN, &uprobe->flags);
862cb9a19feSOleg Nesterov 
863cb9a19feSOleg Nesterov  out:
864d4d3ccc6SOleg Nesterov 	up_write(&uprobe->consumer_rwsem);
8654710f05fSOleg Nesterov 
866cb9a19feSOleg Nesterov 	return ret;
867cb9a19feSOleg Nesterov }
868cb9a19feSOleg Nesterov 
8698a7f2fa0SOleg Nesterov static inline bool consumer_filter(struct uprobe_consumer *uc,
8708a7f2fa0SOleg Nesterov 				   enum uprobe_filter_ctx ctx, struct mm_struct *mm)
871806a98bdSOleg Nesterov {
8728a7f2fa0SOleg Nesterov 	return !uc->filter || uc->filter(uc, ctx, mm);
873806a98bdSOleg Nesterov }
874806a98bdSOleg Nesterov 
8758a7f2fa0SOleg Nesterov static bool filter_chain(struct uprobe *uprobe,
8768a7f2fa0SOleg Nesterov 			 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
87763633cbfSOleg Nesterov {
8781ff6fee5SOleg Nesterov 	struct uprobe_consumer *uc;
8791ff6fee5SOleg Nesterov 	bool ret = false;
8801ff6fee5SOleg Nesterov 
8811ff6fee5SOleg Nesterov 	down_read(&uprobe->consumer_rwsem);
8821ff6fee5SOleg Nesterov 	for (uc = uprobe->consumers; uc; uc = uc->next) {
8838a7f2fa0SOleg Nesterov 		ret = consumer_filter(uc, ctx, mm);
8841ff6fee5SOleg Nesterov 		if (ret)
8851ff6fee5SOleg Nesterov 			break;
8861ff6fee5SOleg Nesterov 	}
8871ff6fee5SOleg Nesterov 	up_read(&uprobe->consumer_rwsem);
8881ff6fee5SOleg Nesterov 
8891ff6fee5SOleg Nesterov 	return ret;
89063633cbfSOleg Nesterov }
89163633cbfSOleg Nesterov 
892e3343e6aSSrikar Dronamraju static int
893e3343e6aSSrikar Dronamraju install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
894816c03fbSOleg Nesterov 			struct vm_area_struct *vma, unsigned long vaddr)
895a5f4374aSIngo Molnar {
896f8ac4ec9SOleg Nesterov 	bool first_uprobe;
897a5f4374aSIngo Molnar 	int ret;
898a5f4374aSIngo Molnar 
899cb9a19feSOleg Nesterov 	ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr);
900a5f4374aSIngo Molnar 	if (ret)
901a5f4374aSIngo Molnar 		return ret;
902a5f4374aSIngo Molnar 
903f8ac4ec9SOleg Nesterov 	/*
904f8ac4ec9SOleg Nesterov 	 * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(),
905f8ac4ec9SOleg Nesterov 	 * the task can hit this breakpoint right after __replace_page().
906f8ac4ec9SOleg Nesterov 	 */
907f8ac4ec9SOleg Nesterov 	first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags);
908f8ac4ec9SOleg Nesterov 	if (first_uprobe)
909f8ac4ec9SOleg Nesterov 		set_bit(MMF_HAS_UPROBES, &mm->flags);
910f8ac4ec9SOleg Nesterov 
911816c03fbSOleg Nesterov 	ret = set_swbp(&uprobe->arch, mm, vaddr);
9129f68f672SOleg Nesterov 	if (!ret)
9139f68f672SOleg Nesterov 		clear_bit(MMF_RECALC_UPROBES, &mm->flags);
9149f68f672SOleg Nesterov 	else if (first_uprobe)
915f8ac4ec9SOleg Nesterov 		clear_bit(MMF_HAS_UPROBES, &mm->flags);
916a5f4374aSIngo Molnar 
917a5f4374aSIngo Molnar 	return ret;
918a5f4374aSIngo Molnar }
919a5f4374aSIngo Molnar 
920076a365bSOleg Nesterov static int
921816c03fbSOleg Nesterov remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
922a5f4374aSIngo Molnar {
9239f68f672SOleg Nesterov 	set_bit(MMF_RECALC_UPROBES, &mm->flags);
924076a365bSOleg Nesterov 	return set_orig_insn(&uprobe->arch, mm, vaddr);
925a5f4374aSIngo Molnar }
926a5f4374aSIngo Molnar 
92706b7bcd8SOleg Nesterov static inline bool uprobe_is_active(struct uprobe *uprobe)
92806b7bcd8SOleg Nesterov {
92906b7bcd8SOleg Nesterov 	return !RB_EMPTY_NODE(&uprobe->rb_node);
93006b7bcd8SOleg Nesterov }
9310326f5a9SSrikar Dronamraju /*
932778b032dSOleg Nesterov  * There could be threads that have already hit the breakpoint. They
933778b032dSOleg Nesterov  * will recheck the current insn and restart if find_uprobe() fails.
934778b032dSOleg Nesterov  * See find_active_uprobe().
9350326f5a9SSrikar Dronamraju  */
936a5f4374aSIngo Molnar static void delete_uprobe(struct uprobe *uprobe)
937a5f4374aSIngo Molnar {
93806b7bcd8SOleg Nesterov 	if (WARN_ON(!uprobe_is_active(uprobe)))
93906b7bcd8SOleg Nesterov 		return;
94006b7bcd8SOleg Nesterov 
9416f47caa0SOleg Nesterov 	spin_lock(&uprobes_treelock);
942a5f4374aSIngo Molnar 	rb_erase(&uprobe->rb_node, &uprobes_tree);
9436f47caa0SOleg Nesterov 	spin_unlock(&uprobes_treelock);
94406b7bcd8SOleg Nesterov 	RB_CLEAR_NODE(&uprobe->rb_node); /* for uprobe_is_active() */
945a5f4374aSIngo Molnar 	put_uprobe(uprobe);
946a5f4374aSIngo Molnar }
947a5f4374aSIngo Molnar 
94826872090SOleg Nesterov struct map_info {
94926872090SOleg Nesterov 	struct map_info *next;
95026872090SOleg Nesterov 	struct mm_struct *mm;
951816c03fbSOleg Nesterov 	unsigned long vaddr;
95226872090SOleg Nesterov };
95326872090SOleg Nesterov 
95426872090SOleg Nesterov static inline struct map_info *free_map_info(struct map_info *info)
955a5f4374aSIngo Molnar {
95626872090SOleg Nesterov 	struct map_info *next = info->next;
95726872090SOleg Nesterov 	kfree(info);
95826872090SOleg Nesterov 	return next;
95926872090SOleg Nesterov }
96026872090SOleg Nesterov 
96126872090SOleg Nesterov static struct map_info *
96226872090SOleg Nesterov build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
96326872090SOleg Nesterov {
96426872090SOleg Nesterov 	unsigned long pgoff = offset >> PAGE_SHIFT;
965a5f4374aSIngo Molnar 	struct vm_area_struct *vma;
96626872090SOleg Nesterov 	struct map_info *curr = NULL;
96726872090SOleg Nesterov 	struct map_info *prev = NULL;
96826872090SOleg Nesterov 	struct map_info *info;
96926872090SOleg Nesterov 	int more = 0;
970a5f4374aSIngo Molnar 
97126872090SOleg Nesterov  again:
9724a23717aSDavidlohr Bueso 	i_mmap_lock_read(mapping);
9736b2dbba8SMichel Lespinasse 	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
974a5f4374aSIngo Molnar 		if (!valid_vma(vma, is_register))
975a5f4374aSIngo Molnar 			continue;
976a5f4374aSIngo Molnar 
9777a5bfb66SOleg Nesterov 		if (!prev && !more) {
9787a5bfb66SOleg Nesterov 			/*
979c8c06efaSDavidlohr Bueso 			 * Needs GFP_NOWAIT to avoid i_mmap_rwsem recursion through
9807a5bfb66SOleg Nesterov 			 * reclaim. This is optimistic, no harm done if it fails.
9817a5bfb66SOleg Nesterov 			 */
9827a5bfb66SOleg Nesterov 			prev = kmalloc(sizeof(struct map_info),
9837a5bfb66SOleg Nesterov 					GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN);
9847a5bfb66SOleg Nesterov 			if (prev)
9857a5bfb66SOleg Nesterov 				prev->next = NULL;
9867a5bfb66SOleg Nesterov 		}
98726872090SOleg Nesterov 		if (!prev) {
98826872090SOleg Nesterov 			more++;
98926872090SOleg Nesterov 			continue;
990a5f4374aSIngo Molnar 		}
991a5f4374aSIngo Molnar 
992388f7934SVegard Nossum 		if (!mmget_not_zero(vma->vm_mm))
99326872090SOleg Nesterov 			continue;
994a5f4374aSIngo Molnar 
99526872090SOleg Nesterov 		info = prev;
99626872090SOleg Nesterov 		prev = prev->next;
99726872090SOleg Nesterov 		info->next = curr;
99826872090SOleg Nesterov 		curr = info;
99926872090SOleg Nesterov 
100026872090SOleg Nesterov 		info->mm = vma->vm_mm;
100157683f72SOleg Nesterov 		info->vaddr = offset_to_vaddr(vma, offset);
1002a5f4374aSIngo Molnar 	}
10034a23717aSDavidlohr Bueso 	i_mmap_unlock_read(mapping);
1004a5f4374aSIngo Molnar 
100526872090SOleg Nesterov 	if (!more)
100626872090SOleg Nesterov 		goto out;
1007a5f4374aSIngo Molnar 
100826872090SOleg Nesterov 	prev = curr;
100926872090SOleg Nesterov 	while (curr) {
101026872090SOleg Nesterov 		mmput(curr->mm);
101126872090SOleg Nesterov 		curr = curr->next;
101226872090SOleg Nesterov 	}
101326872090SOleg Nesterov 
101426872090SOleg Nesterov 	do {
101526872090SOleg Nesterov 		info = kmalloc(sizeof(struct map_info), GFP_KERNEL);
101626872090SOleg Nesterov 		if (!info) {
101726872090SOleg Nesterov 			curr = ERR_PTR(-ENOMEM);
101826872090SOleg Nesterov 			goto out;
101926872090SOleg Nesterov 		}
102026872090SOleg Nesterov 		info->next = prev;
102126872090SOleg Nesterov 		prev = info;
102226872090SOleg Nesterov 	} while (--more);
102326872090SOleg Nesterov 
102426872090SOleg Nesterov 	goto again;
102526872090SOleg Nesterov  out:
102626872090SOleg Nesterov 	while (prev)
102726872090SOleg Nesterov 		prev = free_map_info(prev);
102826872090SOleg Nesterov 	return curr;
1029a5f4374aSIngo Molnar }
1030a5f4374aSIngo Molnar 
1031bdf8647cSOleg Nesterov static int
1032bdf8647cSOleg Nesterov register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new)
1033a5f4374aSIngo Molnar {
1034bdf8647cSOleg Nesterov 	bool is_register = !!new;
103526872090SOleg Nesterov 	struct map_info *info;
103626872090SOleg Nesterov 	int err = 0;
103726872090SOleg Nesterov 
103832cdba1eSOleg Nesterov 	percpu_down_write(&dup_mmap_sem);
103926872090SOleg Nesterov 	info = build_map_info(uprobe->inode->i_mapping,
104026872090SOleg Nesterov 					uprobe->offset, is_register);
104132cdba1eSOleg Nesterov 	if (IS_ERR(info)) {
104232cdba1eSOleg Nesterov 		err = PTR_ERR(info);
104332cdba1eSOleg Nesterov 		goto out;
104432cdba1eSOleg Nesterov 	}
104526872090SOleg Nesterov 
104626872090SOleg Nesterov 	while (info) {
104726872090SOleg Nesterov 		struct mm_struct *mm = info->mm;
1048a5f4374aSIngo Molnar 		struct vm_area_struct *vma;
1049a5f4374aSIngo Molnar 
1050076a365bSOleg Nesterov 		if (err && is_register)
105126872090SOleg Nesterov 			goto free;
1052a5f4374aSIngo Molnar 
1053d8ed45c5SMichel Lespinasse 		mmap_write_lock(mm);
1054f4d6dfe5SOleg Nesterov 		vma = find_vma(mm, info->vaddr);
1055f4d6dfe5SOleg Nesterov 		if (!vma || !valid_vma(vma, is_register) ||
1056f281769eSOleg Nesterov 		    file_inode(vma->vm_file) != uprobe->inode)
105726872090SOleg Nesterov 			goto unlock;
105826872090SOleg Nesterov 
1059f4d6dfe5SOleg Nesterov 		if (vma->vm_start > info->vaddr ||
1060f4d6dfe5SOleg Nesterov 		    vaddr_to_offset(vma, info->vaddr) != uprobe->offset)
106126872090SOleg Nesterov 			goto unlock;
1062a5f4374aSIngo Molnar 
1063806a98bdSOleg Nesterov 		if (is_register) {
1064806a98bdSOleg Nesterov 			/* consult only the "caller", new consumer. */
1065bdf8647cSOleg Nesterov 			if (consumer_filter(new,
10668a7f2fa0SOleg Nesterov 					UPROBE_FILTER_REGISTER, mm))
106726872090SOleg Nesterov 				err = install_breakpoint(uprobe, mm, vma, info->vaddr);
1068806a98bdSOleg Nesterov 		} else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) {
10698a7f2fa0SOleg Nesterov 			if (!filter_chain(uprobe,
10708a7f2fa0SOleg Nesterov 					UPROBE_FILTER_UNREGISTER, mm))
1071076a365bSOleg Nesterov 				err |= remove_breakpoint(uprobe, mm, info->vaddr);
1072806a98bdSOleg Nesterov 		}
107378f74116SOleg Nesterov 
107426872090SOleg Nesterov  unlock:
1075d8ed45c5SMichel Lespinasse 		mmap_write_unlock(mm);
107626872090SOleg Nesterov  free:
107726872090SOleg Nesterov 		mmput(mm);
107826872090SOleg Nesterov 		info = free_map_info(info);
1079a5f4374aSIngo Molnar 	}
108032cdba1eSOleg Nesterov  out:
108132cdba1eSOleg Nesterov 	percpu_up_write(&dup_mmap_sem);
108226872090SOleg Nesterov 	return err;
1083a5f4374aSIngo Molnar }
1084a5f4374aSIngo Molnar 
108538e967aeSRavi Bangoria static void
108638e967aeSRavi Bangoria __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc)
1087a5f4374aSIngo Molnar {
108804aab9b2SOleg Nesterov 	int err;
1089a5f4374aSIngo Molnar 
109006d07139SOleg Nesterov 	if (WARN_ON(!consumer_del(uprobe, uc)))
109104aab9b2SOleg Nesterov 		return;
109204aab9b2SOleg Nesterov 
1093bdf8647cSOleg Nesterov 	err = register_for_each_vma(uprobe, NULL);
1094a5f4374aSIngo Molnar 	/* TODO : cant unregister? schedule a worker thread */
1095bb929284SOleg Nesterov 	if (!uprobe->consumers && !err)
109604aab9b2SOleg Nesterov 		delete_uprobe(uprobe);
109704aab9b2SOleg Nesterov }
1098a5f4374aSIngo Molnar 
1099a5f4374aSIngo Molnar /*
11007140ad38SLinus Torvalds  * uprobe_unregister - unregister an already registered probe.
110138e967aeSRavi Bangoria  * @inode: the file in which the probe has to be removed.
110238e967aeSRavi Bangoria  * @offset: offset from the start of the file.
110338e967aeSRavi Bangoria  * @uc: identify which probe if multiple probes are colocated.
110438e967aeSRavi Bangoria  */
110538e967aeSRavi Bangoria void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
110638e967aeSRavi Bangoria {
110738e967aeSRavi Bangoria 	struct uprobe *uprobe;
110838e967aeSRavi Bangoria 
110938e967aeSRavi Bangoria 	uprobe = find_uprobe(inode, offset);
111038e967aeSRavi Bangoria 	if (WARN_ON(!uprobe))
111138e967aeSRavi Bangoria 		return;
111238e967aeSRavi Bangoria 
111338e967aeSRavi Bangoria 	down_write(&uprobe->register_rwsem);
111438e967aeSRavi Bangoria 	__uprobe_unregister(uprobe, uc);
111538e967aeSRavi Bangoria 	up_write(&uprobe->register_rwsem);
111638e967aeSRavi Bangoria 	put_uprobe(uprobe);
111738e967aeSRavi Bangoria }
111838e967aeSRavi Bangoria EXPORT_SYMBOL_GPL(uprobe_unregister);
111938e967aeSRavi Bangoria 
112038e967aeSRavi Bangoria /*
112138e967aeSRavi Bangoria  * __uprobe_register - register a probe
1122a5f4374aSIngo Molnar  * @inode: the file in which the probe has to be placed.
1123a5f4374aSIngo Molnar  * @offset: offset from the start of the file.
1124e3343e6aSSrikar Dronamraju  * @uc: information on howto handle the probe..
1125a5f4374aSIngo Molnar  *
112638e967aeSRavi Bangoria  * Apart from the access refcount, __uprobe_register() takes a creation
1127a5f4374aSIngo Molnar  * refcount (thro alloc_uprobe) if and only if this @uprobe is getting
1128a5f4374aSIngo Molnar  * inserted into the rbtree (i.e first consumer for a @inode:@offset
1129a5f4374aSIngo Molnar  * tuple).  Creation refcount stops uprobe_unregister from freeing the
1130a5f4374aSIngo Molnar  * @uprobe even before the register operation is complete. Creation
1131e3343e6aSSrikar Dronamraju  * refcount is released when the last @uc for the @uprobe
113238e967aeSRavi Bangoria  * unregisters. Caller of __uprobe_register() is required to keep @inode
113361f94203SSong Liu  * (and the containing mount) referenced.
1134a5f4374aSIngo Molnar  *
1135a5f4374aSIngo Molnar  * Return errno if it cannot successully install probes
1136a5f4374aSIngo Molnar  * else return 0 (success)
1137a5f4374aSIngo Molnar  */
113838e967aeSRavi Bangoria static int __uprobe_register(struct inode *inode, loff_t offset,
11391cc33161SRavi Bangoria 			     loff_t ref_ctr_offset, struct uprobe_consumer *uc)
1140a5f4374aSIngo Molnar {
1141a5f4374aSIngo Molnar 	struct uprobe *uprobe;
1142a5f4374aSIngo Molnar 	int ret;
1143a5f4374aSIngo Molnar 
1144ea024870SAnton Arapov 	/* Uprobe must have at least one set consumer */
1145ea024870SAnton Arapov 	if (!uc->handler && !uc->ret_handler)
1146ea024870SAnton Arapov 		return -EINVAL;
1147ea024870SAnton Arapov 
114840814f68SOleg Nesterov 	/* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */
11495efe7448SMatthew Wilcox (Oracle) 	if (!inode->i_mapping->a_ops->read_folio &&
11505efe7448SMatthew Wilcox (Oracle) 	    !shmem_mapping(inode->i_mapping))
115141ccba02SOleg Nesterov 		return -EIO;
1152f0744af7SOleg Nesterov 	/* Racy, just to catch the obvious mistakes */
1153a5f4374aSIngo Molnar 	if (offset > i_size_read(inode))
1154a5f4374aSIngo Molnar 		return -EINVAL;
1155a5f4374aSIngo Molnar 
1156013b2debSOleg Nesterov 	/*
1157013b2debSOleg Nesterov 	 * This ensures that copy_from_page(), copy_to_page() and
1158013b2debSOleg Nesterov 	 * __update_ref_ctr() can't cross page boundary.
1159013b2debSOleg Nesterov 	 */
1160013b2debSOleg Nesterov 	if (!IS_ALIGNED(offset, UPROBE_SWBP_INSN_SIZE))
1161013b2debSOleg Nesterov 		return -EINVAL;
1162013b2debSOleg Nesterov 	if (!IS_ALIGNED(ref_ctr_offset, sizeof(short)))
1163013b2debSOleg Nesterov 		return -EINVAL;
1164013b2debSOleg Nesterov 
116566d06dffSOleg Nesterov  retry:
11661cc33161SRavi Bangoria 	uprobe = alloc_uprobe(inode, offset, ref_ctr_offset);
116766d06dffSOleg Nesterov 	if (!uprobe)
116866d06dffSOleg Nesterov 		return -ENOMEM;
116922bad382SRavi Bangoria 	if (IS_ERR(uprobe))
117022bad382SRavi Bangoria 		return PTR_ERR(uprobe);
117122bad382SRavi Bangoria 
117266d06dffSOleg Nesterov 	/*
117366d06dffSOleg Nesterov 	 * We can race with uprobe_unregister()->delete_uprobe().
117466d06dffSOleg Nesterov 	 * Check uprobe_is_active() and retry if it is false.
117566d06dffSOleg Nesterov 	 */
1176e591c8d7SOleg Nesterov 	down_write(&uprobe->register_rwsem);
117766d06dffSOleg Nesterov 	ret = -EAGAIN;
117866d06dffSOleg Nesterov 	if (likely(uprobe_is_active(uprobe))) {
117938e967aeSRavi Bangoria 		consumer_add(uprobe, uc);
118038e967aeSRavi Bangoria 		ret = register_for_each_vma(uprobe, uc);
11819a98e03cSOleg Nesterov 		if (ret)
118204aab9b2SOleg Nesterov 			__uprobe_unregister(uprobe, uc);
1183a5f4374aSIngo Molnar 	}
118466d06dffSOleg Nesterov 	up_write(&uprobe->register_rwsem);
1185a5f4374aSIngo Molnar 	put_uprobe(uprobe);
1186a5f4374aSIngo Molnar 
118766d06dffSOleg Nesterov 	if (unlikely(ret == -EAGAIN))
118866d06dffSOleg Nesterov 		goto retry;
1189a5f4374aSIngo Molnar 	return ret;
1190a5f4374aSIngo Molnar }
119138e967aeSRavi Bangoria 
119238e967aeSRavi Bangoria int uprobe_register(struct inode *inode, loff_t offset,
119338e967aeSRavi Bangoria 		    struct uprobe_consumer *uc)
119438e967aeSRavi Bangoria {
11951cc33161SRavi Bangoria 	return __uprobe_register(inode, offset, 0, uc);
119638e967aeSRavi Bangoria }
1197e8440c14SJosh Stone EXPORT_SYMBOL_GPL(uprobe_register);
1198a5f4374aSIngo Molnar 
11991cc33161SRavi Bangoria int uprobe_register_refctr(struct inode *inode, loff_t offset,
12001cc33161SRavi Bangoria 			   loff_t ref_ctr_offset, struct uprobe_consumer *uc)
12011cc33161SRavi Bangoria {
12021cc33161SRavi Bangoria 	return __uprobe_register(inode, offset, ref_ctr_offset, uc);
12031cc33161SRavi Bangoria }
12041cc33161SRavi Bangoria EXPORT_SYMBOL_GPL(uprobe_register_refctr);
12051cc33161SRavi Bangoria 
1206a5f4374aSIngo Molnar /*
1207788faab7STobias Tefke  * uprobe_apply - unregister an already registered probe.
1208bdf8647cSOleg Nesterov  * @inode: the file in which the probe has to be removed.
1209bdf8647cSOleg Nesterov  * @offset: offset from the start of the file.
1210bdf8647cSOleg Nesterov  * @uc: consumer which wants to add more or remove some breakpoints
1211bdf8647cSOleg Nesterov  * @add: add or remove the breakpoints
1212bdf8647cSOleg Nesterov  */
1213bdf8647cSOleg Nesterov int uprobe_apply(struct inode *inode, loff_t offset,
1214bdf8647cSOleg Nesterov 			struct uprobe_consumer *uc, bool add)
1215bdf8647cSOleg Nesterov {
1216bdf8647cSOleg Nesterov 	struct uprobe *uprobe;
1217bdf8647cSOleg Nesterov 	struct uprobe_consumer *con;
1218bdf8647cSOleg Nesterov 	int ret = -ENOENT;
1219bdf8647cSOleg Nesterov 
1220bdf8647cSOleg Nesterov 	uprobe = find_uprobe(inode, offset);
122106d07139SOleg Nesterov 	if (WARN_ON(!uprobe))
1222bdf8647cSOleg Nesterov 		return ret;
1223bdf8647cSOleg Nesterov 
1224bdf8647cSOleg Nesterov 	down_write(&uprobe->register_rwsem);
1225bdf8647cSOleg Nesterov 	for (con = uprobe->consumers; con && con != uc ; con = con->next)
1226bdf8647cSOleg Nesterov 		;
1227bdf8647cSOleg Nesterov 	if (con)
1228bdf8647cSOleg Nesterov 		ret = register_for_each_vma(uprobe, add ? uc : NULL);
1229bdf8647cSOleg Nesterov 	up_write(&uprobe->register_rwsem);
1230bdf8647cSOleg Nesterov 	put_uprobe(uprobe);
1231bdf8647cSOleg Nesterov 
1232bdf8647cSOleg Nesterov 	return ret;
1233bdf8647cSOleg Nesterov }
1234bdf8647cSOleg Nesterov 
1235da1816b1SOleg Nesterov static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm)
1236da1816b1SOleg Nesterov {
1237fcb72a58SMatthew Wilcox (Oracle) 	VMA_ITERATOR(vmi, mm, 0);
1238da1816b1SOleg Nesterov 	struct vm_area_struct *vma;
1239da1816b1SOleg Nesterov 	int err = 0;
1240da1816b1SOleg Nesterov 
1241d8ed45c5SMichel Lespinasse 	mmap_read_lock(mm);
1242fcb72a58SMatthew Wilcox (Oracle) 	for_each_vma(vmi, vma) {
1243da1816b1SOleg Nesterov 		unsigned long vaddr;
1244da1816b1SOleg Nesterov 		loff_t offset;
1245da1816b1SOleg Nesterov 
1246da1816b1SOleg Nesterov 		if (!valid_vma(vma, false) ||
1247f281769eSOleg Nesterov 		    file_inode(vma->vm_file) != uprobe->inode)
1248da1816b1SOleg Nesterov 			continue;
1249da1816b1SOleg Nesterov 
1250da1816b1SOleg Nesterov 		offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
1251da1816b1SOleg Nesterov 		if (uprobe->offset <  offset ||
1252da1816b1SOleg Nesterov 		    uprobe->offset >= offset + vma->vm_end - vma->vm_start)
1253da1816b1SOleg Nesterov 			continue;
1254da1816b1SOleg Nesterov 
1255da1816b1SOleg Nesterov 		vaddr = offset_to_vaddr(vma, uprobe->offset);
1256da1816b1SOleg Nesterov 		err |= remove_breakpoint(uprobe, mm, vaddr);
1257da1816b1SOleg Nesterov 	}
1258d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm);
1259da1816b1SOleg Nesterov 
1260da1816b1SOleg Nesterov 	return err;
1261da1816b1SOleg Nesterov }
1262da1816b1SOleg Nesterov 
1263891c3970SOleg Nesterov static struct rb_node *
1264891c3970SOleg Nesterov find_node_in_range(struct inode *inode, loff_t min, loff_t max)
1265a5f4374aSIngo Molnar {
1266a5f4374aSIngo Molnar 	struct rb_node *n = uprobes_tree.rb_node;
1267a5f4374aSIngo Molnar 
1268a5f4374aSIngo Molnar 	while (n) {
1269891c3970SOleg Nesterov 		struct uprobe *u = rb_entry(n, struct uprobe, rb_node);
1270a5f4374aSIngo Molnar 
1271891c3970SOleg Nesterov 		if (inode < u->inode) {
1272a5f4374aSIngo Molnar 			n = n->rb_left;
1273891c3970SOleg Nesterov 		} else if (inode > u->inode) {
1274a5f4374aSIngo Molnar 			n = n->rb_right;
1275891c3970SOleg Nesterov 		} else {
1276891c3970SOleg Nesterov 			if (max < u->offset)
1277891c3970SOleg Nesterov 				n = n->rb_left;
1278891c3970SOleg Nesterov 			else if (min > u->offset)
1279891c3970SOleg Nesterov 				n = n->rb_right;
1280891c3970SOleg Nesterov 			else
1281891c3970SOleg Nesterov 				break;
1282891c3970SOleg Nesterov 		}
1283a5f4374aSIngo Molnar 	}
1284a5f4374aSIngo Molnar 
1285891c3970SOleg Nesterov 	return n;
1286a5f4374aSIngo Molnar }
1287a5f4374aSIngo Molnar 
1288a5f4374aSIngo Molnar /*
1289891c3970SOleg Nesterov  * For a given range in vma, build a list of probes that need to be inserted.
1290a5f4374aSIngo Molnar  */
1291891c3970SOleg Nesterov static void build_probe_list(struct inode *inode,
1292891c3970SOleg Nesterov 				struct vm_area_struct *vma,
1293891c3970SOleg Nesterov 				unsigned long start, unsigned long end,
1294891c3970SOleg Nesterov 				struct list_head *head)
1295a5f4374aSIngo Molnar {
1296891c3970SOleg Nesterov 	loff_t min, max;
1297891c3970SOleg Nesterov 	struct rb_node *n, *t;
1298891c3970SOleg Nesterov 	struct uprobe *u;
1299891c3970SOleg Nesterov 
1300891c3970SOleg Nesterov 	INIT_LIST_HEAD(head);
1301cb113b47SOleg Nesterov 	min = vaddr_to_offset(vma, start);
1302891c3970SOleg Nesterov 	max = min + (end - start) - 1;
1303a5f4374aSIngo Molnar 
13046f47caa0SOleg Nesterov 	spin_lock(&uprobes_treelock);
1305891c3970SOleg Nesterov 	n = find_node_in_range(inode, min, max);
1306891c3970SOleg Nesterov 	if (n) {
1307891c3970SOleg Nesterov 		for (t = n; t; t = rb_prev(t)) {
1308891c3970SOleg Nesterov 			u = rb_entry(t, struct uprobe, rb_node);
1309891c3970SOleg Nesterov 			if (u->inode != inode || u->offset < min)
1310a5f4374aSIngo Molnar 				break;
1311891c3970SOleg Nesterov 			list_add(&u->pending_list, head);
1312f231722aSOleg Nesterov 			get_uprobe(u);
1313a5f4374aSIngo Molnar 		}
1314891c3970SOleg Nesterov 		for (t = n; (t = rb_next(t)); ) {
1315891c3970SOleg Nesterov 			u = rb_entry(t, struct uprobe, rb_node);
1316891c3970SOleg Nesterov 			if (u->inode != inode || u->offset > max)
1317891c3970SOleg Nesterov 				break;
1318891c3970SOleg Nesterov 			list_add(&u->pending_list, head);
1319f231722aSOleg Nesterov 			get_uprobe(u);
1320891c3970SOleg Nesterov 		}
1321891c3970SOleg Nesterov 	}
13226f47caa0SOleg Nesterov 	spin_unlock(&uprobes_treelock);
1323a5f4374aSIngo Molnar }
1324a5f4374aSIngo Molnar 
13251cc33161SRavi Bangoria /* @vma contains reference counter, not the probed instruction. */
13261cc33161SRavi Bangoria static int delayed_ref_ctr_inc(struct vm_area_struct *vma)
13271cc33161SRavi Bangoria {
13281cc33161SRavi Bangoria 	struct list_head *pos, *q;
13291cc33161SRavi Bangoria 	struct delayed_uprobe *du;
13301cc33161SRavi Bangoria 	unsigned long vaddr;
13311cc33161SRavi Bangoria 	int ret = 0, err = 0;
13321cc33161SRavi Bangoria 
13331cc33161SRavi Bangoria 	mutex_lock(&delayed_uprobe_lock);
13341cc33161SRavi Bangoria 	list_for_each_safe(pos, q, &delayed_uprobe_list) {
13351cc33161SRavi Bangoria 		du = list_entry(pos, struct delayed_uprobe, list);
13361cc33161SRavi Bangoria 
13371cc33161SRavi Bangoria 		if (du->mm != vma->vm_mm ||
13381cc33161SRavi Bangoria 		    !valid_ref_ctr_vma(du->uprobe, vma))
13391cc33161SRavi Bangoria 			continue;
13401cc33161SRavi Bangoria 
13411cc33161SRavi Bangoria 		vaddr = offset_to_vaddr(vma, du->uprobe->ref_ctr_offset);
13421cc33161SRavi Bangoria 		ret = __update_ref_ctr(vma->vm_mm, vaddr, 1);
13431cc33161SRavi Bangoria 		if (ret) {
13441cc33161SRavi Bangoria 			update_ref_ctr_warn(du->uprobe, vma->vm_mm, 1);
13451cc33161SRavi Bangoria 			if (!err)
13461cc33161SRavi Bangoria 				err = ret;
13471cc33161SRavi Bangoria 		}
13481cc33161SRavi Bangoria 		delayed_uprobe_delete(du);
13491cc33161SRavi Bangoria 	}
13501cc33161SRavi Bangoria 	mutex_unlock(&delayed_uprobe_lock);
13511cc33161SRavi Bangoria 	return err;
13521cc33161SRavi Bangoria }
13531cc33161SRavi Bangoria 
1354a5f4374aSIngo Molnar /*
1355c1e8d7c6SMichel Lespinasse  * Called from mmap_region/vma_adjust with mm->mmap_lock acquired.
1356a5f4374aSIngo Molnar  *
13575e5be71aSOleg Nesterov  * Currently we ignore all errors and always return 0, the callers
13585e5be71aSOleg Nesterov  * can't handle the failure anyway.
1359a5f4374aSIngo Molnar  */
1360a5f4374aSIngo Molnar int uprobe_mmap(struct vm_area_struct *vma)
1361a5f4374aSIngo Molnar {
1362a5f4374aSIngo Molnar 	struct list_head tmp_list;
1363665605a2SOleg Nesterov 	struct uprobe *uprobe, *u;
1364a5f4374aSIngo Molnar 	struct inode *inode;
1365a5f4374aSIngo Molnar 
13661cc33161SRavi Bangoria 	if (no_uprobe_events())
13671cc33161SRavi Bangoria 		return 0;
13681cc33161SRavi Bangoria 
13691cc33161SRavi Bangoria 	if (vma->vm_file &&
13701cc33161SRavi Bangoria 	    (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE &&
13711cc33161SRavi Bangoria 	    test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags))
13721cc33161SRavi Bangoria 		delayed_ref_ctr_inc(vma);
13731cc33161SRavi Bangoria 
13741cc33161SRavi Bangoria 	if (!valid_vma(vma, true))
1375a5f4374aSIngo Molnar 		return 0;
1376a5f4374aSIngo Molnar 
1377f281769eSOleg Nesterov 	inode = file_inode(vma->vm_file);
1378a5f4374aSIngo Molnar 	if (!inode)
1379a5f4374aSIngo Molnar 		return 0;
1380a5f4374aSIngo Molnar 
1381a5f4374aSIngo Molnar 	mutex_lock(uprobes_mmap_hash(inode));
1382891c3970SOleg Nesterov 	build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list);
1383806a98bdSOleg Nesterov 	/*
1384806a98bdSOleg Nesterov 	 * We can race with uprobe_unregister(), this uprobe can be already
1385806a98bdSOleg Nesterov 	 * removed. But in this case filter_chain() must return false, all
1386806a98bdSOleg Nesterov 	 * consumers have gone away.
1387806a98bdSOleg Nesterov 	 */
1388665605a2SOleg Nesterov 	list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
1389806a98bdSOleg Nesterov 		if (!fatal_signal_pending(current) &&
13908a7f2fa0SOleg Nesterov 		    filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) {
139157683f72SOleg Nesterov 			unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset);
13925e5be71aSOleg Nesterov 			install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
1393a5f4374aSIngo Molnar 		}
1394a5f4374aSIngo Molnar 		put_uprobe(uprobe);
1395a5f4374aSIngo Molnar 	}
1396a5f4374aSIngo Molnar 	mutex_unlock(uprobes_mmap_hash(inode));
1397a5f4374aSIngo Molnar 
13985e5be71aSOleg Nesterov 	return 0;
1399a5f4374aSIngo Molnar }
1400a5f4374aSIngo Molnar 
14019f68f672SOleg Nesterov static bool
14029f68f672SOleg Nesterov vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end)
14039f68f672SOleg Nesterov {
14049f68f672SOleg Nesterov 	loff_t min, max;
14059f68f672SOleg Nesterov 	struct inode *inode;
14069f68f672SOleg Nesterov 	struct rb_node *n;
14079f68f672SOleg Nesterov 
1408f281769eSOleg Nesterov 	inode = file_inode(vma->vm_file);
14099f68f672SOleg Nesterov 
14109f68f672SOleg Nesterov 	min = vaddr_to_offset(vma, start);
14119f68f672SOleg Nesterov 	max = min + (end - start) - 1;
14129f68f672SOleg Nesterov 
14139f68f672SOleg Nesterov 	spin_lock(&uprobes_treelock);
14149f68f672SOleg Nesterov 	n = find_node_in_range(inode, min, max);
14159f68f672SOleg Nesterov 	spin_unlock(&uprobes_treelock);
14169f68f672SOleg Nesterov 
14179f68f672SOleg Nesterov 	return !!n;
14189f68f672SOleg Nesterov }
14199f68f672SOleg Nesterov 
1420682968e0SSrikar Dronamraju /*
1421682968e0SSrikar Dronamraju  * Called in context of a munmap of a vma.
1422682968e0SSrikar Dronamraju  */
1423cbc91f71SSrikar Dronamraju void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1424682968e0SSrikar Dronamraju {
1425441f1eb7SOleg Nesterov 	if (no_uprobe_events() || !valid_vma(vma, false))
1426682968e0SSrikar Dronamraju 		return;
1427682968e0SSrikar Dronamraju 
14282fd611a9SOleg Nesterov 	if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */
14292fd611a9SOleg Nesterov 		return;
14302fd611a9SOleg Nesterov 
14319f68f672SOleg Nesterov 	if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) ||
14329f68f672SOleg Nesterov 	     test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags))
1433f8ac4ec9SOleg Nesterov 		return;
1434f8ac4ec9SOleg Nesterov 
14359f68f672SOleg Nesterov 	if (vma_has_uprobes(vma, start, end))
14369f68f672SOleg Nesterov 		set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags);
1437682968e0SSrikar Dronamraju }
1438682968e0SSrikar Dronamraju 
1439d4b3b638SSrikar Dronamraju /* Slot allocation for XOL */
14406441ec8bSOleg Nesterov static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
1441d4b3b638SSrikar Dronamraju {
1442704bde3cSOleg Nesterov 	struct vm_area_struct *vma;
1443704bde3cSOleg Nesterov 	int ret;
1444d4b3b638SSrikar Dronamraju 
1445d8ed45c5SMichel Lespinasse 	if (mmap_write_lock_killable(mm))
1446598fdc1dSMichal Hocko 		return -EINTR;
1447598fdc1dSMichal Hocko 
1448704bde3cSOleg Nesterov 	if (mm->uprobes_state.xol_area) {
1449704bde3cSOleg Nesterov 		ret = -EALREADY;
1450d4b3b638SSrikar Dronamraju 		goto fail;
1451704bde3cSOleg Nesterov 	}
1452d4b3b638SSrikar Dronamraju 
1453af0d95afSOleg Nesterov 	if (!area->vaddr) {
1454d4b3b638SSrikar Dronamraju 		/* Try to map as high as possible, this is only a hint. */
1455af0d95afSOleg Nesterov 		area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE,
1456af0d95afSOleg Nesterov 						PAGE_SIZE, 0, 0);
1457ff68dac6SGaowei Pu 		if (IS_ERR_VALUE(area->vaddr)) {
1458d4b3b638SSrikar Dronamraju 			ret = area->vaddr;
1459d4b3b638SSrikar Dronamraju 			goto fail;
1460d4b3b638SSrikar Dronamraju 		}
1461af0d95afSOleg Nesterov 	}
1462d4b3b638SSrikar Dronamraju 
1463704bde3cSOleg Nesterov 	vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE,
1464704bde3cSOleg Nesterov 				VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO,
1465704bde3cSOleg Nesterov 				&area->xol_mapping);
1466704bde3cSOleg Nesterov 	if (IS_ERR(vma)) {
1467704bde3cSOleg Nesterov 		ret = PTR_ERR(vma);
1468d4b3b638SSrikar Dronamraju 		goto fail;
1469704bde3cSOleg Nesterov 	}
1470d4b3b638SSrikar Dronamraju 
1471704bde3cSOleg Nesterov 	ret = 0;
14725c6338b4SPaul E. McKenney 	/* pairs with get_xol_area() */
14735c6338b4SPaul E. McKenney 	smp_store_release(&mm->uprobes_state.xol_area, area); /* ^^^ */
1474d4b3b638SSrikar Dronamraju  fail:
1475d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
1476d4b3b638SSrikar Dronamraju 
1477d4b3b638SSrikar Dronamraju 	return ret;
1478d4b3b638SSrikar Dronamraju }
1479d4b3b638SSrikar Dronamraju 
1480af0d95afSOleg Nesterov static struct xol_area *__create_xol_area(unsigned long vaddr)
1481d4b3b638SSrikar Dronamraju {
14829b545df8SOleg Nesterov 	struct mm_struct *mm = current->mm;
1483e78aebfdSAnton Arapov 	uprobe_opcode_t insn = UPROBE_SWBP_INSN;
14846441ec8bSOleg Nesterov 	struct xol_area *area;
14859b545df8SOleg Nesterov 
1486af0d95afSOleg Nesterov 	area = kmalloc(sizeof(*area), GFP_KERNEL);
1487d4b3b638SSrikar Dronamraju 	if (unlikely(!area))
1488c8a82538SOleg Nesterov 		goto out;
1489d4b3b638SSrikar Dronamraju 
14906396bb22SKees Cook 	area->bitmap = kcalloc(BITS_TO_LONGS(UINSNS_PER_PAGE), sizeof(long),
14916396bb22SKees Cook 			       GFP_KERNEL);
1492d4b3b638SSrikar Dronamraju 	if (!area->bitmap)
1493c8a82538SOleg Nesterov 		goto free_area;
1494c8a82538SOleg Nesterov 
1495704bde3cSOleg Nesterov 	area->xol_mapping.name = "[uprobes]";
1496869ae761SOleg Nesterov 	area->xol_mapping.fault = NULL;
1497704bde3cSOleg Nesterov 	area->xol_mapping.pages = area->pages;
1498f58bea2fSOleg Nesterov 	area->pages[0] = alloc_page(GFP_HIGHUSER);
1499f58bea2fSOleg Nesterov 	if (!area->pages[0])
1500c8a82538SOleg Nesterov 		goto free_bitmap;
1501f58bea2fSOleg Nesterov 	area->pages[1] = NULL;
1502d4b3b638SSrikar Dronamraju 
1503af0d95afSOleg Nesterov 	area->vaddr = vaddr;
1504d4b3b638SSrikar Dronamraju 	init_waitqueue_head(&area->wq);
15056441ec8bSOleg Nesterov 	/* Reserve the 1st slot for get_trampoline_vaddr() */
15066441ec8bSOleg Nesterov 	set_bit(0, area->bitmap);
15076441ec8bSOleg Nesterov 	atomic_set(&area->slot_count, 1);
1508297e765eSMarcin Nowakowski 	arch_uprobe_copy_ixol(area->pages[0], 0, &insn, UPROBE_SWBP_INSN_SIZE);
1509e78aebfdSAnton Arapov 
15106441ec8bSOleg Nesterov 	if (!xol_add_vma(mm, area))
1511d4b3b638SSrikar Dronamraju 		return area;
1512d4b3b638SSrikar Dronamraju 
1513f58bea2fSOleg Nesterov 	__free_page(area->pages[0]);
1514c8a82538SOleg Nesterov  free_bitmap:
1515d4b3b638SSrikar Dronamraju 	kfree(area->bitmap);
1516c8a82538SOleg Nesterov  free_area:
1517d4b3b638SSrikar Dronamraju 	kfree(area);
1518c8a82538SOleg Nesterov  out:
15196441ec8bSOleg Nesterov 	return NULL;
15206441ec8bSOleg Nesterov }
15216441ec8bSOleg Nesterov 
15226441ec8bSOleg Nesterov /*
15236441ec8bSOleg Nesterov  * get_xol_area - Allocate process's xol_area if necessary.
15246441ec8bSOleg Nesterov  * This area will be used for storing instructions for execution out of line.
15256441ec8bSOleg Nesterov  *
15266441ec8bSOleg Nesterov  * Returns the allocated area or NULL.
15276441ec8bSOleg Nesterov  */
15286441ec8bSOleg Nesterov static struct xol_area *get_xol_area(void)
15296441ec8bSOleg Nesterov {
15306441ec8bSOleg Nesterov 	struct mm_struct *mm = current->mm;
15316441ec8bSOleg Nesterov 	struct xol_area *area;
15326441ec8bSOleg Nesterov 
15336441ec8bSOleg Nesterov 	if (!mm->uprobes_state.xol_area)
1534af0d95afSOleg Nesterov 		__create_xol_area(0);
15356441ec8bSOleg Nesterov 
15365c6338b4SPaul E. McKenney 	/* Pairs with xol_add_vma() smp_store_release() */
15375c6338b4SPaul E. McKenney 	area = READ_ONCE(mm->uprobes_state.xol_area); /* ^^^ */
15389b545df8SOleg Nesterov 	return area;
1539d4b3b638SSrikar Dronamraju }
1540d4b3b638SSrikar Dronamraju 
1541d4b3b638SSrikar Dronamraju /*
1542d4b3b638SSrikar Dronamraju  * uprobe_clear_state - Free the area allocated for slots.
1543d4b3b638SSrikar Dronamraju  */
1544d4b3b638SSrikar Dronamraju void uprobe_clear_state(struct mm_struct *mm)
1545d4b3b638SSrikar Dronamraju {
1546d4b3b638SSrikar Dronamraju 	struct xol_area *area = mm->uprobes_state.xol_area;
1547d4b3b638SSrikar Dronamraju 
15481cc33161SRavi Bangoria 	mutex_lock(&delayed_uprobe_lock);
15491cc33161SRavi Bangoria 	delayed_uprobe_remove(NULL, mm);
15501cc33161SRavi Bangoria 	mutex_unlock(&delayed_uprobe_lock);
15511cc33161SRavi Bangoria 
1552d4b3b638SSrikar Dronamraju 	if (!area)
1553d4b3b638SSrikar Dronamraju 		return;
1554d4b3b638SSrikar Dronamraju 
1555f58bea2fSOleg Nesterov 	put_page(area->pages[0]);
1556d4b3b638SSrikar Dronamraju 	kfree(area->bitmap);
1557d4b3b638SSrikar Dronamraju 	kfree(area);
1558d4b3b638SSrikar Dronamraju }
1559d4b3b638SSrikar Dronamraju 
156032cdba1eSOleg Nesterov void uprobe_start_dup_mmap(void)
156132cdba1eSOleg Nesterov {
156232cdba1eSOleg Nesterov 	percpu_down_read(&dup_mmap_sem);
156332cdba1eSOleg Nesterov }
156432cdba1eSOleg Nesterov 
156532cdba1eSOleg Nesterov void uprobe_end_dup_mmap(void)
156632cdba1eSOleg Nesterov {
156732cdba1eSOleg Nesterov 	percpu_up_read(&dup_mmap_sem);
156832cdba1eSOleg Nesterov }
156932cdba1eSOleg Nesterov 
1570f8ac4ec9SOleg Nesterov void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
1571f8ac4ec9SOleg Nesterov {
15729f68f672SOleg Nesterov 	if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) {
1573f8ac4ec9SOleg Nesterov 		set_bit(MMF_HAS_UPROBES, &newmm->flags);
15749f68f672SOleg Nesterov 		/* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */
15759f68f672SOleg Nesterov 		set_bit(MMF_RECALC_UPROBES, &newmm->flags);
15769f68f672SOleg Nesterov 	}
1577f8ac4ec9SOleg Nesterov }
1578f8ac4ec9SOleg Nesterov 
1579d4b3b638SSrikar Dronamraju /*
1580d4b3b638SSrikar Dronamraju  *  - search for a free slot.
1581d4b3b638SSrikar Dronamraju  */
1582d4b3b638SSrikar Dronamraju static unsigned long xol_take_insn_slot(struct xol_area *area)
1583d4b3b638SSrikar Dronamraju {
1584d4b3b638SSrikar Dronamraju 	unsigned long slot_addr;
1585d4b3b638SSrikar Dronamraju 	int slot_nr;
1586d4b3b638SSrikar Dronamraju 
1587d4b3b638SSrikar Dronamraju 	do {
1588d4b3b638SSrikar Dronamraju 		slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE);
1589d4b3b638SSrikar Dronamraju 		if (slot_nr < UINSNS_PER_PAGE) {
1590d4b3b638SSrikar Dronamraju 			if (!test_and_set_bit(slot_nr, area->bitmap))
1591d4b3b638SSrikar Dronamraju 				break;
1592d4b3b638SSrikar Dronamraju 
1593d4b3b638SSrikar Dronamraju 			slot_nr = UINSNS_PER_PAGE;
1594d4b3b638SSrikar Dronamraju 			continue;
1595d4b3b638SSrikar Dronamraju 		}
1596d4b3b638SSrikar Dronamraju 		wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE));
1597d4b3b638SSrikar Dronamraju 	} while (slot_nr >= UINSNS_PER_PAGE);
1598d4b3b638SSrikar Dronamraju 
1599d4b3b638SSrikar Dronamraju 	slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES);
1600d4b3b638SSrikar Dronamraju 	atomic_inc(&area->slot_count);
1601d4b3b638SSrikar Dronamraju 
1602d4b3b638SSrikar Dronamraju 	return slot_addr;
1603d4b3b638SSrikar Dronamraju }
1604d4b3b638SSrikar Dronamraju 
1605d4b3b638SSrikar Dronamraju /*
1606a6cb3f6dSOleg Nesterov  * xol_get_insn_slot - allocate a slot for xol.
1607d4b3b638SSrikar Dronamraju  * Returns the allocated slot address or 0.
1608d4b3b638SSrikar Dronamraju  */
1609a6cb3f6dSOleg Nesterov static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
1610d4b3b638SSrikar Dronamraju {
1611d4b3b638SSrikar Dronamraju 	struct xol_area *area;
1612a6cb3f6dSOleg Nesterov 	unsigned long xol_vaddr;
1613d4b3b638SSrikar Dronamraju 
16149b545df8SOleg Nesterov 	area = get_xol_area();
1615d4b3b638SSrikar Dronamraju 	if (!area)
1616d4b3b638SSrikar Dronamraju 		return 0;
1617d4b3b638SSrikar Dronamraju 
1618a6cb3f6dSOleg Nesterov 	xol_vaddr = xol_take_insn_slot(area);
1619a6cb3f6dSOleg Nesterov 	if (unlikely(!xol_vaddr))
1620d4b3b638SSrikar Dronamraju 		return 0;
1621d4b3b638SSrikar Dronamraju 
1622f58bea2fSOleg Nesterov 	arch_uprobe_copy_ixol(area->pages[0], xol_vaddr,
1623803200e2SOleg Nesterov 			      &uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
1624d4b3b638SSrikar Dronamraju 
1625a6cb3f6dSOleg Nesterov 	return xol_vaddr;
1626d4b3b638SSrikar Dronamraju }
1627d4b3b638SSrikar Dronamraju 
1628d4b3b638SSrikar Dronamraju /*
1629d4b3b638SSrikar Dronamraju  * xol_free_insn_slot - If slot was earlier allocated by
1630d4b3b638SSrikar Dronamraju  * @xol_get_insn_slot(), make the slot available for
1631d4b3b638SSrikar Dronamraju  * subsequent requests.
1632d4b3b638SSrikar Dronamraju  */
1633d4b3b638SSrikar Dronamraju static void xol_free_insn_slot(struct task_struct *tsk)
1634d4b3b638SSrikar Dronamraju {
1635d4b3b638SSrikar Dronamraju 	struct xol_area *area;
1636d4b3b638SSrikar Dronamraju 	unsigned long vma_end;
1637d4b3b638SSrikar Dronamraju 	unsigned long slot_addr;
1638d4b3b638SSrikar Dronamraju 
1639d4b3b638SSrikar Dronamraju 	if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask)
1640d4b3b638SSrikar Dronamraju 		return;
1641d4b3b638SSrikar Dronamraju 
1642d4b3b638SSrikar Dronamraju 	slot_addr = tsk->utask->xol_vaddr;
1643af4355e9SOleg Nesterov 	if (unlikely(!slot_addr))
1644d4b3b638SSrikar Dronamraju 		return;
1645d4b3b638SSrikar Dronamraju 
1646d4b3b638SSrikar Dronamraju 	area = tsk->mm->uprobes_state.xol_area;
1647d4b3b638SSrikar Dronamraju 	vma_end = area->vaddr + PAGE_SIZE;
1648d4b3b638SSrikar Dronamraju 	if (area->vaddr <= slot_addr && slot_addr < vma_end) {
1649d4b3b638SSrikar Dronamraju 		unsigned long offset;
1650d4b3b638SSrikar Dronamraju 		int slot_nr;
1651d4b3b638SSrikar Dronamraju 
1652d4b3b638SSrikar Dronamraju 		offset = slot_addr - area->vaddr;
1653d4b3b638SSrikar Dronamraju 		slot_nr = offset / UPROBE_XOL_SLOT_BYTES;
1654d4b3b638SSrikar Dronamraju 		if (slot_nr >= UINSNS_PER_PAGE)
1655d4b3b638SSrikar Dronamraju 			return;
1656d4b3b638SSrikar Dronamraju 
1657d4b3b638SSrikar Dronamraju 		clear_bit(slot_nr, area->bitmap);
1658d4b3b638SSrikar Dronamraju 		atomic_dec(&area->slot_count);
16592a742cedSOleg Nesterov 		smp_mb__after_atomic(); /* pairs with prepare_to_wait() */
1660d4b3b638SSrikar Dronamraju 		if (waitqueue_active(&area->wq))
1661d4b3b638SSrikar Dronamraju 			wake_up(&area->wq);
1662d4b3b638SSrikar Dronamraju 
1663d4b3b638SSrikar Dronamraju 		tsk->utask->xol_vaddr = 0;
1664d4b3b638SSrikar Dronamraju 	}
1665d4b3b638SSrikar Dronamraju }
1666d4b3b638SSrikar Dronamraju 
166772e6ae28SVictor Kamensky void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
166872e6ae28SVictor Kamensky 				  void *src, unsigned long len)
166972e6ae28SVictor Kamensky {
167072e6ae28SVictor Kamensky 	/* Initialize the slot */
167172e6ae28SVictor Kamensky 	copy_to_page(page, vaddr, src, len);
167272e6ae28SVictor Kamensky 
167372e6ae28SVictor Kamensky 	/*
1674885f7f8eSChristoph Hellwig 	 * We probably need flush_icache_user_page() but it needs vma.
167572e6ae28SVictor Kamensky 	 * This should work on most of architectures by default. If
167672e6ae28SVictor Kamensky 	 * architecture needs to do something different it can define
167772e6ae28SVictor Kamensky 	 * its own version of the function.
167872e6ae28SVictor Kamensky 	 */
167972e6ae28SVictor Kamensky 	flush_dcache_page(page);
168072e6ae28SVictor Kamensky }
168172e6ae28SVictor Kamensky 
16820326f5a9SSrikar Dronamraju /**
16830326f5a9SSrikar Dronamraju  * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
16840326f5a9SSrikar Dronamraju  * @regs: Reflects the saved state of the task after it has hit a breakpoint
16850326f5a9SSrikar Dronamraju  * instruction.
16860326f5a9SSrikar Dronamraju  * Return the address of the breakpoint instruction.
16870326f5a9SSrikar Dronamraju  */
16880326f5a9SSrikar Dronamraju unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs)
16890326f5a9SSrikar Dronamraju {
16900326f5a9SSrikar Dronamraju 	return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE;
16910326f5a9SSrikar Dronamraju }
16920326f5a9SSrikar Dronamraju 
1693b02ef20aSOleg Nesterov unsigned long uprobe_get_trap_addr(struct pt_regs *regs)
1694b02ef20aSOleg Nesterov {
1695b02ef20aSOleg Nesterov 	struct uprobe_task *utask = current->utask;
1696b02ef20aSOleg Nesterov 
1697b02ef20aSOleg Nesterov 	if (unlikely(utask && utask->active_uprobe))
1698b02ef20aSOleg Nesterov 		return utask->vaddr;
1699b02ef20aSOleg Nesterov 
1700b02ef20aSOleg Nesterov 	return instruction_pointer(regs);
1701b02ef20aSOleg Nesterov }
1702b02ef20aSOleg Nesterov 
17032bb5e840SOleg Nesterov static struct return_instance *free_ret_instance(struct return_instance *ri)
17042bb5e840SOleg Nesterov {
17052bb5e840SOleg Nesterov 	struct return_instance *next = ri->next;
17062bb5e840SOleg Nesterov 	put_uprobe(ri->uprobe);
17072bb5e840SOleg Nesterov 	kfree(ri);
17082bb5e840SOleg Nesterov 	return next;
17092bb5e840SOleg Nesterov }
17102bb5e840SOleg Nesterov 
17110326f5a9SSrikar Dronamraju /*
17120326f5a9SSrikar Dronamraju  * Called with no locks held.
1713788faab7STobias Tefke  * Called in context of an exiting or an exec-ing thread.
17140326f5a9SSrikar Dronamraju  */
17150326f5a9SSrikar Dronamraju void uprobe_free_utask(struct task_struct *t)
17160326f5a9SSrikar Dronamraju {
17170326f5a9SSrikar Dronamraju 	struct uprobe_task *utask = t->utask;
17182bb5e840SOleg Nesterov 	struct return_instance *ri;
17190326f5a9SSrikar Dronamraju 
17200326f5a9SSrikar Dronamraju 	if (!utask)
17210326f5a9SSrikar Dronamraju 		return;
17220326f5a9SSrikar Dronamraju 
17230326f5a9SSrikar Dronamraju 	if (utask->active_uprobe)
17240326f5a9SSrikar Dronamraju 		put_uprobe(utask->active_uprobe);
17250326f5a9SSrikar Dronamraju 
17260dfd0eb8SAnton Arapov 	ri = utask->return_instances;
17272bb5e840SOleg Nesterov 	while (ri)
17282bb5e840SOleg Nesterov 		ri = free_ret_instance(ri);
17290dfd0eb8SAnton Arapov 
1730d4b3b638SSrikar Dronamraju 	xol_free_insn_slot(t);
17310326f5a9SSrikar Dronamraju 	kfree(utask);
17320326f5a9SSrikar Dronamraju 	t->utask = NULL;
17330326f5a9SSrikar Dronamraju }
17340326f5a9SSrikar Dronamraju 
17350326f5a9SSrikar Dronamraju /*
1736c034f48eSRandy Dunlap  * Allocate a uprobe_task object for the task if necessary.
17375a2df662SOleg Nesterov  * Called when the thread hits a breakpoint.
17380326f5a9SSrikar Dronamraju  *
17390326f5a9SSrikar Dronamraju  * Returns:
17400326f5a9SSrikar Dronamraju  * - pointer to new uprobe_task on success
17410326f5a9SSrikar Dronamraju  * - NULL otherwise
17420326f5a9SSrikar Dronamraju  */
17435a2df662SOleg Nesterov static struct uprobe_task *get_utask(void)
17440326f5a9SSrikar Dronamraju {
17455a2df662SOleg Nesterov 	if (!current->utask)
17465a2df662SOleg Nesterov 		current->utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
17475a2df662SOleg Nesterov 	return current->utask;
17480326f5a9SSrikar Dronamraju }
17490326f5a9SSrikar Dronamraju 
1750248d3a7bSOleg Nesterov static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
1751248d3a7bSOleg Nesterov {
1752248d3a7bSOleg Nesterov 	struct uprobe_task *n_utask;
1753248d3a7bSOleg Nesterov 	struct return_instance **p, *o, *n;
1754248d3a7bSOleg Nesterov 
1755248d3a7bSOleg Nesterov 	n_utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
1756248d3a7bSOleg Nesterov 	if (!n_utask)
1757248d3a7bSOleg Nesterov 		return -ENOMEM;
1758248d3a7bSOleg Nesterov 	t->utask = n_utask;
1759248d3a7bSOleg Nesterov 
1760248d3a7bSOleg Nesterov 	p = &n_utask->return_instances;
1761248d3a7bSOleg Nesterov 	for (o = o_utask->return_instances; o; o = o->next) {
1762248d3a7bSOleg Nesterov 		n = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
1763248d3a7bSOleg Nesterov 		if (!n)
1764248d3a7bSOleg Nesterov 			return -ENOMEM;
1765248d3a7bSOleg Nesterov 
1766248d3a7bSOleg Nesterov 		*n = *o;
1767f231722aSOleg Nesterov 		get_uprobe(n->uprobe);
1768248d3a7bSOleg Nesterov 		n->next = NULL;
1769248d3a7bSOleg Nesterov 
1770248d3a7bSOleg Nesterov 		*p = n;
1771248d3a7bSOleg Nesterov 		p = &n->next;
1772248d3a7bSOleg Nesterov 		n_utask->depth++;
1773248d3a7bSOleg Nesterov 	}
1774248d3a7bSOleg Nesterov 
1775248d3a7bSOleg Nesterov 	return 0;
1776248d3a7bSOleg Nesterov }
1777248d3a7bSOleg Nesterov 
1778248d3a7bSOleg Nesterov static void uprobe_warn(struct task_struct *t, const char *msg)
1779248d3a7bSOleg Nesterov {
1780248d3a7bSOleg Nesterov 	pr_warn("uprobe: %s:%d failed to %s\n",
1781248d3a7bSOleg Nesterov 			current->comm, current->pid, msg);
1782248d3a7bSOleg Nesterov }
1783248d3a7bSOleg Nesterov 
1784aa59c53fSOleg Nesterov static void dup_xol_work(struct callback_head *work)
1785aa59c53fSOleg Nesterov {
1786aa59c53fSOleg Nesterov 	if (current->flags & PF_EXITING)
1787aa59c53fSOleg Nesterov 		return;
1788aa59c53fSOleg Nesterov 
1789598fdc1dSMichal Hocko 	if (!__create_xol_area(current->utask->dup_xol_addr) &&
1790598fdc1dSMichal Hocko 			!fatal_signal_pending(current))
1791aa59c53fSOleg Nesterov 		uprobe_warn(current, "dup xol area");
1792aa59c53fSOleg Nesterov }
1793aa59c53fSOleg Nesterov 
1794e78aebfdSAnton Arapov /*
1795b68e0749SOleg Nesterov  * Called in context of a new clone/fork from copy_process.
1796b68e0749SOleg Nesterov  */
17973ab67966SOleg Nesterov void uprobe_copy_process(struct task_struct *t, unsigned long flags)
1798b68e0749SOleg Nesterov {
1799248d3a7bSOleg Nesterov 	struct uprobe_task *utask = current->utask;
1800248d3a7bSOleg Nesterov 	struct mm_struct *mm = current->mm;
1801aa59c53fSOleg Nesterov 	struct xol_area *area;
1802248d3a7bSOleg Nesterov 
1803b68e0749SOleg Nesterov 	t->utask = NULL;
1804248d3a7bSOleg Nesterov 
18053ab67966SOleg Nesterov 	if (!utask || !utask->return_instances)
18063ab67966SOleg Nesterov 		return;
18073ab67966SOleg Nesterov 
18083ab67966SOleg Nesterov 	if (mm == t->mm && !(flags & CLONE_VFORK))
1809248d3a7bSOleg Nesterov 		return;
1810248d3a7bSOleg Nesterov 
1811248d3a7bSOleg Nesterov 	if (dup_utask(t, utask))
1812248d3a7bSOleg Nesterov 		return uprobe_warn(t, "dup ret instances");
1813aa59c53fSOleg Nesterov 
1814aa59c53fSOleg Nesterov 	/* The task can fork() after dup_xol_work() fails */
1815aa59c53fSOleg Nesterov 	area = mm->uprobes_state.xol_area;
1816aa59c53fSOleg Nesterov 	if (!area)
1817aa59c53fSOleg Nesterov 		return uprobe_warn(t, "dup xol area");
1818aa59c53fSOleg Nesterov 
18193ab67966SOleg Nesterov 	if (mm == t->mm)
18203ab67966SOleg Nesterov 		return;
18213ab67966SOleg Nesterov 
182232473431SOleg Nesterov 	t->utask->dup_xol_addr = area->vaddr;
182332473431SOleg Nesterov 	init_task_work(&t->utask->dup_xol_work, dup_xol_work);
182491989c70SJens Axboe 	task_work_add(t, &t->utask->dup_xol_work, TWA_RESUME);
1825b68e0749SOleg Nesterov }
1826b68e0749SOleg Nesterov 
1827b68e0749SOleg Nesterov /*
1828e78aebfdSAnton Arapov  * Current area->vaddr notion assume the trampoline address is always
1829e78aebfdSAnton Arapov  * equal area->vaddr.
1830e78aebfdSAnton Arapov  *
1831e78aebfdSAnton Arapov  * Returns -1 in case the xol_area is not allocated.
1832e78aebfdSAnton Arapov  */
1833e78aebfdSAnton Arapov static unsigned long get_trampoline_vaddr(void)
1834e78aebfdSAnton Arapov {
1835e78aebfdSAnton Arapov 	struct xol_area *area;
1836e78aebfdSAnton Arapov 	unsigned long trampoline_vaddr = -1;
1837e78aebfdSAnton Arapov 
18385c6338b4SPaul E. McKenney 	/* Pairs with xol_add_vma() smp_store_release() */
18395c6338b4SPaul E. McKenney 	area = READ_ONCE(current->mm->uprobes_state.xol_area); /* ^^^ */
1840e78aebfdSAnton Arapov 	if (area)
1841e78aebfdSAnton Arapov 		trampoline_vaddr = area->vaddr;
1842e78aebfdSAnton Arapov 
1843e78aebfdSAnton Arapov 	return trampoline_vaddr;
1844e78aebfdSAnton Arapov }
1845e78aebfdSAnton Arapov 
1846db087ef6SOleg Nesterov static void cleanup_return_instances(struct uprobe_task *utask, bool chained,
1847db087ef6SOleg Nesterov 					struct pt_regs *regs)
1848a5b7e1a8SOleg Nesterov {
1849a5b7e1a8SOleg Nesterov 	struct return_instance *ri = utask->return_instances;
1850db087ef6SOleg Nesterov 	enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL;
185186dcb702SOleg Nesterov 
185286dcb702SOleg Nesterov 	while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) {
1853a5b7e1a8SOleg Nesterov 		ri = free_ret_instance(ri);
1854a5b7e1a8SOleg Nesterov 		utask->depth--;
1855a5b7e1a8SOleg Nesterov 	}
1856a5b7e1a8SOleg Nesterov 	utask->return_instances = ri;
1857a5b7e1a8SOleg Nesterov }
1858a5b7e1a8SOleg Nesterov 
18590dfd0eb8SAnton Arapov static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
18600dfd0eb8SAnton Arapov {
18610dfd0eb8SAnton Arapov 	struct return_instance *ri;
18620dfd0eb8SAnton Arapov 	struct uprobe_task *utask;
18630dfd0eb8SAnton Arapov 	unsigned long orig_ret_vaddr, trampoline_vaddr;
1864db087ef6SOleg Nesterov 	bool chained;
18650dfd0eb8SAnton Arapov 
18660dfd0eb8SAnton Arapov 	if (!get_xol_area())
18670dfd0eb8SAnton Arapov 		return;
18680dfd0eb8SAnton Arapov 
18690dfd0eb8SAnton Arapov 	utask = get_utask();
18700dfd0eb8SAnton Arapov 	if (!utask)
18710dfd0eb8SAnton Arapov 		return;
18720dfd0eb8SAnton Arapov 
1873ded49c55SAnton Arapov 	if (utask->depth >= MAX_URETPROBE_DEPTH) {
1874ded49c55SAnton Arapov 		printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to"
1875ded49c55SAnton Arapov 				" nestedness limit pid/tgid=%d/%d\n",
1876ded49c55SAnton Arapov 				current->pid, current->tgid);
1877ded49c55SAnton Arapov 		return;
1878ded49c55SAnton Arapov 	}
1879ded49c55SAnton Arapov 
18806c58d0e4SOleg Nesterov 	ri = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
18810dfd0eb8SAnton Arapov 	if (!ri)
18826c58d0e4SOleg Nesterov 		return;
18830dfd0eb8SAnton Arapov 
18840dfd0eb8SAnton Arapov 	trampoline_vaddr = get_trampoline_vaddr();
18850dfd0eb8SAnton Arapov 	orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs);
18860dfd0eb8SAnton Arapov 	if (orig_ret_vaddr == -1)
18870dfd0eb8SAnton Arapov 		goto fail;
18880dfd0eb8SAnton Arapov 
1889a5b7e1a8SOleg Nesterov 	/* drop the entries invalidated by longjmp() */
1890db087ef6SOleg Nesterov 	chained = (orig_ret_vaddr == trampoline_vaddr);
1891db087ef6SOleg Nesterov 	cleanup_return_instances(utask, chained, regs);
1892a5b7e1a8SOleg Nesterov 
18930dfd0eb8SAnton Arapov 	/*
18940dfd0eb8SAnton Arapov 	 * We don't want to keep trampoline address in stack, rather keep the
18950dfd0eb8SAnton Arapov 	 * original return address of first caller thru all the consequent
18960dfd0eb8SAnton Arapov 	 * instances. This also makes breakpoint unwrapping easier.
18970dfd0eb8SAnton Arapov 	 */
1898db087ef6SOleg Nesterov 	if (chained) {
18990dfd0eb8SAnton Arapov 		if (!utask->return_instances) {
19000dfd0eb8SAnton Arapov 			/*
19010dfd0eb8SAnton Arapov 			 * This situation is not possible. Likely we have an
19020dfd0eb8SAnton Arapov 			 * attack from user-space.
19030dfd0eb8SAnton Arapov 			 */
19046c58d0e4SOleg Nesterov 			uprobe_warn(current, "handle tail call");
19050dfd0eb8SAnton Arapov 			goto fail;
19060dfd0eb8SAnton Arapov 		}
19070dfd0eb8SAnton Arapov 		orig_ret_vaddr = utask->return_instances->orig_ret_vaddr;
19080dfd0eb8SAnton Arapov 	}
19090dfd0eb8SAnton Arapov 
1910f231722aSOleg Nesterov 	ri->uprobe = get_uprobe(uprobe);
19110dfd0eb8SAnton Arapov 	ri->func = instruction_pointer(regs);
19127b868e48SOleg Nesterov 	ri->stack = user_stack_pointer(regs);
19130dfd0eb8SAnton Arapov 	ri->orig_ret_vaddr = orig_ret_vaddr;
19140dfd0eb8SAnton Arapov 	ri->chained = chained;
19150dfd0eb8SAnton Arapov 
1916ded49c55SAnton Arapov 	utask->depth++;
19170dfd0eb8SAnton Arapov 	ri->next = utask->return_instances;
19180dfd0eb8SAnton Arapov 	utask->return_instances = ri;
19190dfd0eb8SAnton Arapov 
19200dfd0eb8SAnton Arapov 	return;
19210dfd0eb8SAnton Arapov  fail:
19220dfd0eb8SAnton Arapov 	kfree(ri);
19230dfd0eb8SAnton Arapov }
19240dfd0eb8SAnton Arapov 
19250326f5a9SSrikar Dronamraju /* Prepare to single-step probed instruction out of line. */
19260326f5a9SSrikar Dronamraju static int
1927a6cb3f6dSOleg Nesterov pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr)
19280326f5a9SSrikar Dronamraju {
1929a6cb3f6dSOleg Nesterov 	struct uprobe_task *utask;
1930a6cb3f6dSOleg Nesterov 	unsigned long xol_vaddr;
1931aba51024SOleg Nesterov 	int err;
1932d4b3b638SSrikar Dronamraju 
1933608e7427SOleg Nesterov 	utask = get_utask();
1934608e7427SOleg Nesterov 	if (!utask)
1935608e7427SOleg Nesterov 		return -ENOMEM;
1936a6cb3f6dSOleg Nesterov 
1937a6cb3f6dSOleg Nesterov 	xol_vaddr = xol_get_insn_slot(uprobe);
1938a6cb3f6dSOleg Nesterov 	if (!xol_vaddr)
1939a6cb3f6dSOleg Nesterov 		return -ENOMEM;
1940a6cb3f6dSOleg Nesterov 
1941a6cb3f6dSOleg Nesterov 	utask->xol_vaddr = xol_vaddr;
1942a6cb3f6dSOleg Nesterov 	utask->vaddr = bp_vaddr;
1943a6cb3f6dSOleg Nesterov 
1944aba51024SOleg Nesterov 	err = arch_uprobe_pre_xol(&uprobe->arch, regs);
1945aba51024SOleg Nesterov 	if (unlikely(err)) {
1946aba51024SOleg Nesterov 		xol_free_insn_slot(current);
1947aba51024SOleg Nesterov 		return err;
1948aba51024SOleg Nesterov 	}
1949aba51024SOleg Nesterov 
1950608e7427SOleg Nesterov 	utask->active_uprobe = uprobe;
1951608e7427SOleg Nesterov 	utask->state = UTASK_SSTEP;
1952aba51024SOleg Nesterov 	return 0;
19530326f5a9SSrikar Dronamraju }
19540326f5a9SSrikar Dronamraju 
19550326f5a9SSrikar Dronamraju /*
19560326f5a9SSrikar Dronamraju  * If we are singlestepping, then ensure this thread is not connected to
19570326f5a9SSrikar Dronamraju  * non-fatal signals until completion of singlestep.  When xol insn itself
19580326f5a9SSrikar Dronamraju  * triggers the signal,  restart the original insn even if the task is
19590326f5a9SSrikar Dronamraju  * already SIGKILL'ed (since coredump should report the correct ip).  This
19600326f5a9SSrikar Dronamraju  * is even more important if the task has a handler for SIGSEGV/etc, The
19610326f5a9SSrikar Dronamraju  * _same_ instruction should be repeated again after return from the signal
19620326f5a9SSrikar Dronamraju  * handler, and SSTEP can never finish in this case.
19630326f5a9SSrikar Dronamraju  */
19640326f5a9SSrikar Dronamraju bool uprobe_deny_signal(void)
19650326f5a9SSrikar Dronamraju {
19660326f5a9SSrikar Dronamraju 	struct task_struct *t = current;
19670326f5a9SSrikar Dronamraju 	struct uprobe_task *utask = t->utask;
19680326f5a9SSrikar Dronamraju 
19690326f5a9SSrikar Dronamraju 	if (likely(!utask || !utask->active_uprobe))
19700326f5a9SSrikar Dronamraju 		return false;
19710326f5a9SSrikar Dronamraju 
19720326f5a9SSrikar Dronamraju 	WARN_ON_ONCE(utask->state != UTASK_SSTEP);
19730326f5a9SSrikar Dronamraju 
19745c251e9dSJens Axboe 	if (task_sigpending(t)) {
19750326f5a9SSrikar Dronamraju 		spin_lock_irq(&t->sighand->siglock);
19760326f5a9SSrikar Dronamraju 		clear_tsk_thread_flag(t, TIF_SIGPENDING);
19770326f5a9SSrikar Dronamraju 		spin_unlock_irq(&t->sighand->siglock);
19780326f5a9SSrikar Dronamraju 
19790326f5a9SSrikar Dronamraju 		if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) {
19800326f5a9SSrikar Dronamraju 			utask->state = UTASK_SSTEP_TRAPPED;
19810326f5a9SSrikar Dronamraju 			set_tsk_thread_flag(t, TIF_UPROBE);
19820326f5a9SSrikar Dronamraju 		}
19830326f5a9SSrikar Dronamraju 	}
19840326f5a9SSrikar Dronamraju 
19850326f5a9SSrikar Dronamraju 	return true;
19860326f5a9SSrikar Dronamraju }
19870326f5a9SSrikar Dronamraju 
1988499a4f3eSOleg Nesterov static void mmf_recalc_uprobes(struct mm_struct *mm)
1989499a4f3eSOleg Nesterov {
1990fcb72a58SMatthew Wilcox (Oracle) 	VMA_ITERATOR(vmi, mm, 0);
1991499a4f3eSOleg Nesterov 	struct vm_area_struct *vma;
1992499a4f3eSOleg Nesterov 
1993fcb72a58SMatthew Wilcox (Oracle) 	for_each_vma(vmi, vma) {
1994499a4f3eSOleg Nesterov 		if (!valid_vma(vma, false))
1995499a4f3eSOleg Nesterov 			continue;
1996499a4f3eSOleg Nesterov 		/*
1997499a4f3eSOleg Nesterov 		 * This is not strictly accurate, we can race with
1998499a4f3eSOleg Nesterov 		 * uprobe_unregister() and see the already removed
1999499a4f3eSOleg Nesterov 		 * uprobe if delete_uprobe() was not yet called.
200063633cbfSOleg Nesterov 		 * Or this uprobe can be filtered out.
2001499a4f3eSOleg Nesterov 		 */
2002499a4f3eSOleg Nesterov 		if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end))
2003499a4f3eSOleg Nesterov 			return;
2004499a4f3eSOleg Nesterov 	}
2005499a4f3eSOleg Nesterov 
2006499a4f3eSOleg Nesterov 	clear_bit(MMF_HAS_UPROBES, &mm->flags);
2007499a4f3eSOleg Nesterov }
2008499a4f3eSOleg Nesterov 
20090908ad6eSAnanth N Mavinakayanahalli static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
2010ec75fba9SOleg Nesterov {
2011ec75fba9SOleg Nesterov 	struct page *page;
2012ec75fba9SOleg Nesterov 	uprobe_opcode_t opcode;
2013ec75fba9SOleg Nesterov 	int result;
2014ec75fba9SOleg Nesterov 
2015013b2debSOleg Nesterov 	if (WARN_ON_ONCE(!IS_ALIGNED(vaddr, UPROBE_SWBP_INSN_SIZE)))
2016013b2debSOleg Nesterov 		return -EINVAL;
2017013b2debSOleg Nesterov 
2018ec75fba9SOleg Nesterov 	pagefault_disable();
2019bd28b145SLinus Torvalds 	result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr);
2020ec75fba9SOleg Nesterov 	pagefault_enable();
2021ec75fba9SOleg Nesterov 
2022ec75fba9SOleg Nesterov 	if (likely(result == 0))
2023ec75fba9SOleg Nesterov 		goto out;
2024ec75fba9SOleg Nesterov 
20251e987790SDave Hansen 	/*
20261e987790SDave Hansen 	 * The NULL 'tsk' here ensures that any faults that occur here
20271e987790SDave Hansen 	 * will not be accounted to the task.  'mm' *is* current->mm,
20281e987790SDave Hansen 	 * but we treat this as a 'remote' access since it is
20291e987790SDave Hansen 	 * essentially a kernel access to the memory.
20301e987790SDave Hansen 	 */
203164019a2eSPeter Xu 	result = get_user_pages_remote(mm, vaddr, 1, FOLL_FORCE, &page,
20325b56d49fSLorenzo Stoakes 			NULL, NULL);
2033ec75fba9SOleg Nesterov 	if (result < 0)
2034ec75fba9SOleg Nesterov 		return result;
2035ec75fba9SOleg Nesterov 
2036ab0d805cSOleg Nesterov 	copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
2037ec75fba9SOleg Nesterov 	put_page(page);
2038ec75fba9SOleg Nesterov  out:
20390908ad6eSAnanth N Mavinakayanahalli 	/* This needs to return true for any variant of the trap insn */
20400908ad6eSAnanth N Mavinakayanahalli 	return is_trap_insn(&opcode);
2041ec75fba9SOleg Nesterov }
2042ec75fba9SOleg Nesterov 
2043d790d346SOleg Nesterov static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
20440326f5a9SSrikar Dronamraju {
20453a9ea052SOleg Nesterov 	struct mm_struct *mm = current->mm;
20463a9ea052SOleg Nesterov 	struct uprobe *uprobe = NULL;
20470326f5a9SSrikar Dronamraju 	struct vm_area_struct *vma;
20480326f5a9SSrikar Dronamraju 
2049d8ed45c5SMichel Lespinasse 	mmap_read_lock(mm);
20509016ddedSLiam Howlett 	vma = vma_lookup(mm, bp_vaddr);
20519016ddedSLiam Howlett 	if (vma) {
20523a9ea052SOleg Nesterov 		if (valid_vma(vma, false)) {
2053f281769eSOleg Nesterov 			struct inode *inode = file_inode(vma->vm_file);
2054cb113b47SOleg Nesterov 			loff_t offset = vaddr_to_offset(vma, bp_vaddr);
20550326f5a9SSrikar Dronamraju 
20560326f5a9SSrikar Dronamraju 			uprobe = find_uprobe(inode, offset);
20570326f5a9SSrikar Dronamraju 		}
2058d790d346SOleg Nesterov 
2059d790d346SOleg Nesterov 		if (!uprobe)
20600908ad6eSAnanth N Mavinakayanahalli 			*is_swbp = is_trap_at_addr(mm, bp_vaddr);
2061d790d346SOleg Nesterov 	} else {
2062d790d346SOleg Nesterov 		*is_swbp = -EFAULT;
20633a9ea052SOleg Nesterov 	}
2064499a4f3eSOleg Nesterov 
2065499a4f3eSOleg Nesterov 	if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags))
2066499a4f3eSOleg Nesterov 		mmf_recalc_uprobes(mm);
2067d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm);
20680326f5a9SSrikar Dronamraju 
20693a9ea052SOleg Nesterov 	return uprobe;
20703a9ea052SOleg Nesterov }
20713a9ea052SOleg Nesterov 
2072da1816b1SOleg Nesterov static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
2073da1816b1SOleg Nesterov {
2074da1816b1SOleg Nesterov 	struct uprobe_consumer *uc;
2075da1816b1SOleg Nesterov 	int remove = UPROBE_HANDLER_REMOVE;
20760dfd0eb8SAnton Arapov 	bool need_prep = false; /* prepare return uprobe, when needed */
2077da1816b1SOleg Nesterov 
2078da1816b1SOleg Nesterov 	down_read(&uprobe->register_rwsem);
2079da1816b1SOleg Nesterov 	for (uc = uprobe->consumers; uc; uc = uc->next) {
2080ea024870SAnton Arapov 		int rc = 0;
2081da1816b1SOleg Nesterov 
2082ea024870SAnton Arapov 		if (uc->handler) {
2083ea024870SAnton Arapov 			rc = uc->handler(uc, regs);
2084da1816b1SOleg Nesterov 			WARN(rc & ~UPROBE_HANDLER_MASK,
2085d75f773cSSakari Ailus 				"bad rc=0x%x from %ps()\n", rc, uc->handler);
2086ea024870SAnton Arapov 		}
20870dfd0eb8SAnton Arapov 
20880dfd0eb8SAnton Arapov 		if (uc->ret_handler)
20890dfd0eb8SAnton Arapov 			need_prep = true;
20900dfd0eb8SAnton Arapov 
2091da1816b1SOleg Nesterov 		remove &= rc;
2092da1816b1SOleg Nesterov 	}
2093da1816b1SOleg Nesterov 
20940dfd0eb8SAnton Arapov 	if (need_prep && !remove)
20950dfd0eb8SAnton Arapov 		prepare_uretprobe(uprobe, regs); /* put bp at return */
20960dfd0eb8SAnton Arapov 
2097da1816b1SOleg Nesterov 	if (remove && uprobe->consumers) {
2098da1816b1SOleg Nesterov 		WARN_ON(!uprobe_is_active(uprobe));
2099da1816b1SOleg Nesterov 		unapply_uprobe(uprobe, current->mm);
2100da1816b1SOleg Nesterov 	}
2101da1816b1SOleg Nesterov 	up_read(&uprobe->register_rwsem);
2102da1816b1SOleg Nesterov }
2103da1816b1SOleg Nesterov 
2104fec8898dSAnton Arapov static void
2105fec8898dSAnton Arapov handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs)
2106fec8898dSAnton Arapov {
2107fec8898dSAnton Arapov 	struct uprobe *uprobe = ri->uprobe;
2108fec8898dSAnton Arapov 	struct uprobe_consumer *uc;
2109fec8898dSAnton Arapov 
2110fec8898dSAnton Arapov 	down_read(&uprobe->register_rwsem);
2111fec8898dSAnton Arapov 	for (uc = uprobe->consumers; uc; uc = uc->next) {
2112fec8898dSAnton Arapov 		if (uc->ret_handler)
2113fec8898dSAnton Arapov 			uc->ret_handler(uc, ri->func, regs);
2114fec8898dSAnton Arapov 	}
2115fec8898dSAnton Arapov 	up_read(&uprobe->register_rwsem);
2116fec8898dSAnton Arapov }
2117fec8898dSAnton Arapov 
2118a83cfeb9SOleg Nesterov static struct return_instance *find_next_ret_chain(struct return_instance *ri)
2119a83cfeb9SOleg Nesterov {
2120a83cfeb9SOleg Nesterov 	bool chained;
2121a83cfeb9SOleg Nesterov 
2122a83cfeb9SOleg Nesterov 	do {
2123a83cfeb9SOleg Nesterov 		chained = ri->chained;
2124a83cfeb9SOleg Nesterov 		ri = ri->next;	/* can't be NULL if chained */
2125a83cfeb9SOleg Nesterov 	} while (chained);
2126a83cfeb9SOleg Nesterov 
2127a83cfeb9SOleg Nesterov 	return ri;
2128a83cfeb9SOleg Nesterov }
2129a83cfeb9SOleg Nesterov 
21300b5256c7SOleg Nesterov static void handle_trampoline(struct pt_regs *regs)
2131fec8898dSAnton Arapov {
2132fec8898dSAnton Arapov 	struct uprobe_task *utask;
2133a83cfeb9SOleg Nesterov 	struct return_instance *ri, *next;
21345eeb50deSOleg Nesterov 	bool valid;
2135fec8898dSAnton Arapov 
2136fec8898dSAnton Arapov 	utask = current->utask;
2137fec8898dSAnton Arapov 	if (!utask)
21380b5256c7SOleg Nesterov 		goto sigill;
2139fec8898dSAnton Arapov 
2140fec8898dSAnton Arapov 	ri = utask->return_instances;
2141fec8898dSAnton Arapov 	if (!ri)
21420b5256c7SOleg Nesterov 		goto sigill;
2143fec8898dSAnton Arapov 
21445eeb50deSOleg Nesterov 	do {
2145fec8898dSAnton Arapov 		/*
21465eeb50deSOleg Nesterov 		 * We should throw out the frames invalidated by longjmp().
21475eeb50deSOleg Nesterov 		 * If this chain is valid, then the next one should be alive
21485eeb50deSOleg Nesterov 		 * or NULL; the latter case means that nobody but ri->func
21495eeb50deSOleg Nesterov 		 * could hit this trampoline on return. TODO: sigaltstack().
2150fec8898dSAnton Arapov 		 */
21515eeb50deSOleg Nesterov 		next = find_next_ret_chain(ri);
215286dcb702SOleg Nesterov 		valid = !next || arch_uretprobe_is_alive(next, RP_CHECK_RET, regs);
21535eeb50deSOleg Nesterov 
2154fec8898dSAnton Arapov 		instruction_pointer_set(regs, ri->orig_ret_vaddr);
2155a83cfeb9SOleg Nesterov 		do {
21565eeb50deSOleg Nesterov 			if (valid)
2157fec8898dSAnton Arapov 				handle_uretprobe_chain(ri, regs);
21582bb5e840SOleg Nesterov 			ri = free_ret_instance(ri);
2159878b5a6eSOleg Nesterov 			utask->depth--;
2160a83cfeb9SOleg Nesterov 		} while (ri != next);
21615eeb50deSOleg Nesterov 	} while (!valid);
2162fec8898dSAnton Arapov 
2163fec8898dSAnton Arapov 	utask->return_instances = ri;
21640b5256c7SOleg Nesterov 	return;
2165fec8898dSAnton Arapov 
21660b5256c7SOleg Nesterov  sigill:
21670b5256c7SOleg Nesterov 	uprobe_warn(current, "handle uretprobe, sending SIGILL.");
21683cf5d076SEric W. Biederman 	force_sig(SIGILL);
21690b5256c7SOleg Nesterov 
2170fec8898dSAnton Arapov }
2171fec8898dSAnton Arapov 
21726fe50a28SDavid A. Long bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs)
21736fe50a28SDavid A. Long {
21746fe50a28SDavid A. Long 	return false;
21756fe50a28SDavid A. Long }
21766fe50a28SDavid A. Long 
217786dcb702SOleg Nesterov bool __weak arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
217886dcb702SOleg Nesterov 					struct pt_regs *regs)
217997da8976SOleg Nesterov {
218097da8976SOleg Nesterov 	return true;
218197da8976SOleg Nesterov }
218297da8976SOleg Nesterov 
21833a9ea052SOleg Nesterov /*
21843a9ea052SOleg Nesterov  * Run handler and ask thread to singlestep.
21853a9ea052SOleg Nesterov  * Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
21863a9ea052SOleg Nesterov  */
21873a9ea052SOleg Nesterov static void handle_swbp(struct pt_regs *regs)
21883a9ea052SOleg Nesterov {
21893a9ea052SOleg Nesterov 	struct uprobe *uprobe;
21903a9ea052SOleg Nesterov 	unsigned long bp_vaddr;
21913f649ab7SKees Cook 	int is_swbp;
21923a9ea052SOleg Nesterov 
21933a9ea052SOleg Nesterov 	bp_vaddr = uprobe_get_swbp_addr(regs);
21940b5256c7SOleg Nesterov 	if (bp_vaddr == get_trampoline_vaddr())
21950b5256c7SOleg Nesterov 		return handle_trampoline(regs);
2196fec8898dSAnton Arapov 
2197fec8898dSAnton Arapov 	uprobe = find_active_uprobe(bp_vaddr, &is_swbp);
21980326f5a9SSrikar Dronamraju 	if (!uprobe) {
219956bb4cf6SOleg Nesterov 		if (is_swbp > 0) {
22000326f5a9SSrikar Dronamraju 			/* No matching uprobe; signal SIGTRAP. */
2201fe5ed7abSOleg Nesterov 			force_sig(SIGTRAP);
220256bb4cf6SOleg Nesterov 		} else {
220356bb4cf6SOleg Nesterov 			/*
220456bb4cf6SOleg Nesterov 			 * Either we raced with uprobe_unregister() or we can't
220556bb4cf6SOleg Nesterov 			 * access this memory. The latter is only possible if
220656bb4cf6SOleg Nesterov 			 * another thread plays with our ->mm. In both cases
220756bb4cf6SOleg Nesterov 			 * we can simply restart. If this vma was unmapped we
220856bb4cf6SOleg Nesterov 			 * can pretend this insn was not executed yet and get
220956bb4cf6SOleg Nesterov 			 * the (correct) SIGSEGV after restart.
221056bb4cf6SOleg Nesterov 			 */
221156bb4cf6SOleg Nesterov 			instruction_pointer_set(regs, bp_vaddr);
221256bb4cf6SOleg Nesterov 		}
22130326f5a9SSrikar Dronamraju 		return;
22140326f5a9SSrikar Dronamraju 	}
221574e59dfcSOleg Nesterov 
221674e59dfcSOleg Nesterov 	/* change it in advance for ->handler() and restart */
221774e59dfcSOleg Nesterov 	instruction_pointer_set(regs, bp_vaddr);
221874e59dfcSOleg Nesterov 
2219142b18ddSOleg Nesterov 	/*
2220142b18ddSOleg Nesterov 	 * TODO: move copy_insn/etc into _register and remove this hack.
2221142b18ddSOleg Nesterov 	 * After we hit the bp, _unregister + _register can install the
2222142b18ddSOleg Nesterov 	 * new and not-yet-analyzed uprobe at the same address, restart.
2223142b18ddSOleg Nesterov 	 */
222471434f2fSOleg Nesterov 	if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags)))
222574e59dfcSOleg Nesterov 		goto out;
22260326f5a9SSrikar Dronamraju 
222709d3f015SAndrea Parri 	/*
222809d3f015SAndrea Parri 	 * Pairs with the smp_wmb() in prepare_uprobe().
222909d3f015SAndrea Parri 	 *
223009d3f015SAndrea Parri 	 * Guarantees that if we see the UPROBE_COPY_INSN bit set, then
223109d3f015SAndrea Parri 	 * we must also see the stores to &uprobe->arch performed by the
223209d3f015SAndrea Parri 	 * prepare_uprobe() call.
223309d3f015SAndrea Parri 	 */
223409d3f015SAndrea Parri 	smp_rmb();
223509d3f015SAndrea Parri 
223672fd293aSOleg Nesterov 	/* Tracing handlers use ->utask to communicate with fetch methods */
223772fd293aSOleg Nesterov 	if (!get_utask())
223872fd293aSOleg Nesterov 		goto out;
223972fd293aSOleg Nesterov 
22406fe50a28SDavid A. Long 	if (arch_uprobe_ignore(&uprobe->arch, regs))
22416fe50a28SDavid A. Long 		goto out;
22426fe50a28SDavid A. Long 
22430326f5a9SSrikar Dronamraju 	handler_chain(uprobe, regs);
22446fe50a28SDavid A. Long 
22458a6b1732SOleg Nesterov 	if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
22460578a970SOleg Nesterov 		goto out;
22470326f5a9SSrikar Dronamraju 
2248608e7427SOleg Nesterov 	if (!pre_ssout(uprobe, regs, bp_vaddr))
22490326f5a9SSrikar Dronamraju 		return;
22500326f5a9SSrikar Dronamraju 
22518a6b1732SOleg Nesterov 	/* arch_uprobe_skip_sstep() succeeded, or restart if can't singlestep */
22520578a970SOleg Nesterov out:
22530326f5a9SSrikar Dronamraju 	put_uprobe(uprobe);
22540326f5a9SSrikar Dronamraju }
22550326f5a9SSrikar Dronamraju 
22560326f5a9SSrikar Dronamraju /*
22570326f5a9SSrikar Dronamraju  * Perform required fix-ups and disable singlestep.
22580326f5a9SSrikar Dronamraju  * Allow pending signals to take effect.
22590326f5a9SSrikar Dronamraju  */
22600326f5a9SSrikar Dronamraju static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
22610326f5a9SSrikar Dronamraju {
22620326f5a9SSrikar Dronamraju 	struct uprobe *uprobe;
2263014940baSOleg Nesterov 	int err = 0;
22640326f5a9SSrikar Dronamraju 
22650326f5a9SSrikar Dronamraju 	uprobe = utask->active_uprobe;
22660326f5a9SSrikar Dronamraju 	if (utask->state == UTASK_SSTEP_ACK)
2267014940baSOleg Nesterov 		err = arch_uprobe_post_xol(&uprobe->arch, regs);
22680326f5a9SSrikar Dronamraju 	else if (utask->state == UTASK_SSTEP_TRAPPED)
22690326f5a9SSrikar Dronamraju 		arch_uprobe_abort_xol(&uprobe->arch, regs);
22700326f5a9SSrikar Dronamraju 	else
22710326f5a9SSrikar Dronamraju 		WARN_ON_ONCE(1);
22720326f5a9SSrikar Dronamraju 
22730326f5a9SSrikar Dronamraju 	put_uprobe(uprobe);
22740326f5a9SSrikar Dronamraju 	utask->active_uprobe = NULL;
22750326f5a9SSrikar Dronamraju 	utask->state = UTASK_RUNNING;
2276d4b3b638SSrikar Dronamraju 	xol_free_insn_slot(current);
22770326f5a9SSrikar Dronamraju 
22780326f5a9SSrikar Dronamraju 	spin_lock_irq(&current->sighand->siglock);
22790326f5a9SSrikar Dronamraju 	recalc_sigpending(); /* see uprobe_deny_signal() */
22800326f5a9SSrikar Dronamraju 	spin_unlock_irq(&current->sighand->siglock);
2281014940baSOleg Nesterov 
2282014940baSOleg Nesterov 	if (unlikely(err)) {
2283014940baSOleg Nesterov 		uprobe_warn(current, "execute the probed insn, sending SIGILL.");
22843cf5d076SEric W. Biederman 		force_sig(SIGILL);
2285014940baSOleg Nesterov 	}
22860326f5a9SSrikar Dronamraju }
22870326f5a9SSrikar Dronamraju 
22880326f5a9SSrikar Dronamraju /*
22891b08e907SOleg Nesterov  * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and
22901b08e907SOleg Nesterov  * allows the thread to return from interrupt. After that handle_swbp()
22911b08e907SOleg Nesterov  * sets utask->active_uprobe.
22920326f5a9SSrikar Dronamraju  *
22931b08e907SOleg Nesterov  * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag
22941b08e907SOleg Nesterov  * and allows the thread to return from interrupt.
22950326f5a9SSrikar Dronamraju  *
22960326f5a9SSrikar Dronamraju  * While returning to userspace, thread notices the TIF_UPROBE flag and calls
22970326f5a9SSrikar Dronamraju  * uprobe_notify_resume().
22980326f5a9SSrikar Dronamraju  */
22990326f5a9SSrikar Dronamraju void uprobe_notify_resume(struct pt_regs *regs)
23000326f5a9SSrikar Dronamraju {
23010326f5a9SSrikar Dronamraju 	struct uprobe_task *utask;
23020326f5a9SSrikar Dronamraju 
2303db023ea5SOleg Nesterov 	clear_thread_flag(TIF_UPROBE);
2304db023ea5SOleg Nesterov 
23050326f5a9SSrikar Dronamraju 	utask = current->utask;
23061b08e907SOleg Nesterov 	if (utask && utask->active_uprobe)
23070326f5a9SSrikar Dronamraju 		handle_singlestep(utask, regs);
23081b08e907SOleg Nesterov 	else
23091b08e907SOleg Nesterov 		handle_swbp(regs);
23100326f5a9SSrikar Dronamraju }
23110326f5a9SSrikar Dronamraju 
23120326f5a9SSrikar Dronamraju /*
23130326f5a9SSrikar Dronamraju  * uprobe_pre_sstep_notifier gets called from interrupt context as part of
23140326f5a9SSrikar Dronamraju  * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit.
23150326f5a9SSrikar Dronamraju  */
23160326f5a9SSrikar Dronamraju int uprobe_pre_sstep_notifier(struct pt_regs *regs)
23170326f5a9SSrikar Dronamraju {
23180dfd0eb8SAnton Arapov 	if (!current->mm)
23190dfd0eb8SAnton Arapov 		return 0;
23200dfd0eb8SAnton Arapov 
23210dfd0eb8SAnton Arapov 	if (!test_bit(MMF_HAS_UPROBES, &current->mm->flags) &&
23220dfd0eb8SAnton Arapov 	    (!current->utask || !current->utask->return_instances))
23230326f5a9SSrikar Dronamraju 		return 0;
23240326f5a9SSrikar Dronamraju 
23250326f5a9SSrikar Dronamraju 	set_thread_flag(TIF_UPROBE);
23260326f5a9SSrikar Dronamraju 	return 1;
23270326f5a9SSrikar Dronamraju }
23280326f5a9SSrikar Dronamraju 
23290326f5a9SSrikar Dronamraju /*
23300326f5a9SSrikar Dronamraju  * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier
23310326f5a9SSrikar Dronamraju  * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep.
23320326f5a9SSrikar Dronamraju  */
23330326f5a9SSrikar Dronamraju int uprobe_post_sstep_notifier(struct pt_regs *regs)
23340326f5a9SSrikar Dronamraju {
23350326f5a9SSrikar Dronamraju 	struct uprobe_task *utask = current->utask;
23360326f5a9SSrikar Dronamraju 
23370326f5a9SSrikar Dronamraju 	if (!current->mm || !utask || !utask->active_uprobe)
23380326f5a9SSrikar Dronamraju 		/* task is currently not uprobed */
23390326f5a9SSrikar Dronamraju 		return 0;
23400326f5a9SSrikar Dronamraju 
23410326f5a9SSrikar Dronamraju 	utask->state = UTASK_SSTEP_ACK;
23420326f5a9SSrikar Dronamraju 	set_thread_flag(TIF_UPROBE);
23430326f5a9SSrikar Dronamraju 	return 1;
23440326f5a9SSrikar Dronamraju }
23450326f5a9SSrikar Dronamraju 
23460326f5a9SSrikar Dronamraju static struct notifier_block uprobe_exception_nb = {
23470326f5a9SSrikar Dronamraju 	.notifier_call		= arch_uprobe_exception_notify,
23480326f5a9SSrikar Dronamraju 	.priority		= INT_MAX-1,	/* notified after kprobes, kgdb */
23490326f5a9SSrikar Dronamraju };
23500326f5a9SSrikar Dronamraju 
2351aad42dd4SNadav Amit void __init uprobes_init(void)
2352a5f4374aSIngo Molnar {
2353a5f4374aSIngo Molnar 	int i;
2354a5f4374aSIngo Molnar 
235566d06dffSOleg Nesterov 	for (i = 0; i < UPROBES_HASH_SZ; i++)
2356a5f4374aSIngo Molnar 		mutex_init(&uprobes_mmap_mutex[i]);
23570326f5a9SSrikar Dronamraju 
2358aad42dd4SNadav Amit 	BUG_ON(register_die_notifier(&uprobe_exception_nb));
2359a5f4374aSIngo Molnar }
2360