xref: /openbmc/linux/kernel/events/uprobes.c (revision d9eb1ea2)
1720e596aSThomas Gleixner // SPDX-License-Identifier: GPL-2.0+
2a5f4374aSIngo Molnar /*
3a5f4374aSIngo Molnar  * User-space Probes (UProbes)
4a5f4374aSIngo Molnar  *
535aa621bSIngo Molnar  * Copyright (C) IBM Corporation, 2008-2012
6a5f4374aSIngo Molnar  * Authors:
7a5f4374aSIngo Molnar  *	Srikar Dronamraju
8a5f4374aSIngo Molnar  *	Jim Keniston
990eec103SPeter Zijlstra  * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
10a5f4374aSIngo Molnar  */
11a5f4374aSIngo Molnar 
12a5f4374aSIngo Molnar #include <linux/kernel.h>
13a5f4374aSIngo Molnar #include <linux/highmem.h>
14a5f4374aSIngo Molnar #include <linux/pagemap.h>	/* read_mapping_page */
15a5f4374aSIngo Molnar #include <linux/slab.h>
16a5f4374aSIngo Molnar #include <linux/sched.h>
176e84f315SIngo Molnar #include <linux/sched/mm.h>
18f7ccbae4SIngo Molnar #include <linux/sched/coredump.h>
19e8440c14SJosh Stone #include <linux/export.h>
20a5f4374aSIngo Molnar #include <linux/rmap.h>		/* anon_vma_prepare */
21a5f4374aSIngo Molnar #include <linux/mmu_notifier.h>	/* set_pte_at_notify */
22a5f4374aSIngo Molnar #include <linux/swap.h>		/* try_to_free_swap */
230326f5a9SSrikar Dronamraju #include <linux/ptrace.h>	/* user_enable_single_step */
240326f5a9SSrikar Dronamraju #include <linux/kdebug.h>	/* notifier mechanism */
25194f8dcbSOleg Nesterov #include "../../mm/internal.h"	/* munlock_vma_page */
2632cdba1eSOleg Nesterov #include <linux/percpu-rwsem.h>
27aa59c53fSOleg Nesterov #include <linux/task_work.h>
2840814f68SOleg Nesterov #include <linux/shmem_fs.h>
29f385cb85SSong Liu #include <linux/khugepaged.h>
30a5f4374aSIngo Molnar 
31a5f4374aSIngo Molnar #include <linux/uprobes.h>
32a5f4374aSIngo Molnar 
33d4b3b638SSrikar Dronamraju #define UINSNS_PER_PAGE			(PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
34d4b3b638SSrikar Dronamraju #define MAX_UPROBE_XOL_SLOTS		UINSNS_PER_PAGE
35d4b3b638SSrikar Dronamraju 
36a5f4374aSIngo Molnar static struct rb_root uprobes_tree = RB_ROOT;
37441f1eb7SOleg Nesterov /*
38441f1eb7SOleg Nesterov  * allows us to skip the uprobe_mmap if there are no uprobe events active
39441f1eb7SOleg Nesterov  * at this time.  Probably a fine grained per inode count is better?
40441f1eb7SOleg Nesterov  */
41441f1eb7SOleg Nesterov #define no_uprobe_events()	RB_EMPTY_ROOT(&uprobes_tree)
42a5f4374aSIngo Molnar 
43a5f4374aSIngo Molnar static DEFINE_SPINLOCK(uprobes_treelock);	/* serialize rbtree access */
44a5f4374aSIngo Molnar 
45a5f4374aSIngo Molnar #define UPROBES_HASH_SZ	13
46a5f4374aSIngo Molnar /* serialize uprobe->pending_list */
47a5f4374aSIngo Molnar static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
48a5f4374aSIngo Molnar #define uprobes_mmap_hash(v)	(&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
49a5f4374aSIngo Molnar 
502bf1acc2SOleg Nesterov DEFINE_STATIC_PERCPU_RWSEM(dup_mmap_sem);
5132cdba1eSOleg Nesterov 
52cb9a19feSOleg Nesterov /* Have a copy of original instruction */
5371434f2fSOleg Nesterov #define UPROBE_COPY_INSN	0
54cb9a19feSOleg Nesterov 
553ff54efdSSrikar Dronamraju struct uprobe {
563ff54efdSSrikar Dronamraju 	struct rb_node		rb_node;	/* node in the rb tree */
57ce59b8e9SElena Reshetova 	refcount_t		ref;
58e591c8d7SOleg Nesterov 	struct rw_semaphore	register_rwsem;
593ff54efdSSrikar Dronamraju 	struct rw_semaphore	consumer_rwsem;
603ff54efdSSrikar Dronamraju 	struct list_head	pending_list;
613ff54efdSSrikar Dronamraju 	struct uprobe_consumer	*consumers;
623ff54efdSSrikar Dronamraju 	struct inode		*inode;		/* Also hold a ref to inode */
633ff54efdSSrikar Dronamraju 	loff_t			offset;
641cc33161SRavi Bangoria 	loff_t			ref_ctr_offset;
6571434f2fSOleg Nesterov 	unsigned long		flags;
66ad439356SOleg Nesterov 
67ad439356SOleg Nesterov 	/*
68ad439356SOleg Nesterov 	 * The generic code assumes that it has two members of unknown type
69ad439356SOleg Nesterov 	 * owned by the arch-specific code:
70ad439356SOleg Nesterov 	 *
71ad439356SOleg Nesterov 	 * 	insn -	copy_insn() saves the original instruction here for
72ad439356SOleg Nesterov 	 *		arch_uprobe_analyze_insn().
73ad439356SOleg Nesterov 	 *
74ad439356SOleg Nesterov 	 *	ixol -	potentially modified instruction to execute out of
75ad439356SOleg Nesterov 	 *		line, copied to xol_area by xol_get_insn_slot().
76ad439356SOleg Nesterov 	 */
773ff54efdSSrikar Dronamraju 	struct arch_uprobe	arch;
783ff54efdSSrikar Dronamraju };
793ff54efdSSrikar Dronamraju 
801cc33161SRavi Bangoria struct delayed_uprobe {
811cc33161SRavi Bangoria 	struct list_head list;
821cc33161SRavi Bangoria 	struct uprobe *uprobe;
831cc33161SRavi Bangoria 	struct mm_struct *mm;
841cc33161SRavi Bangoria };
851cc33161SRavi Bangoria 
861cc33161SRavi Bangoria static DEFINE_MUTEX(delayed_uprobe_lock);
871cc33161SRavi Bangoria static LIST_HEAD(delayed_uprobe_list);
881cc33161SRavi Bangoria 
89a5f4374aSIngo Molnar /*
90ad439356SOleg Nesterov  * Execute out of line area: anonymous executable mapping installed
91ad439356SOleg Nesterov  * by the probed task to execute the copy of the original instruction
92ad439356SOleg Nesterov  * mangled by set_swbp().
93ad439356SOleg Nesterov  *
94c912dae6SOleg Nesterov  * On a breakpoint hit, thread contests for a slot.  It frees the
95c912dae6SOleg Nesterov  * slot after singlestep. Currently a fixed number of slots are
96c912dae6SOleg Nesterov  * allocated.
97c912dae6SOleg Nesterov  */
98c912dae6SOleg Nesterov struct xol_area {
99c912dae6SOleg Nesterov 	wait_queue_head_t 		wq;		/* if all slots are busy */
100c912dae6SOleg Nesterov 	atomic_t 			slot_count;	/* number of in-use slots */
101c912dae6SOleg Nesterov 	unsigned long 			*bitmap;	/* 0 = free slot */
102c912dae6SOleg Nesterov 
103704bde3cSOleg Nesterov 	struct vm_special_mapping	xol_mapping;
104704bde3cSOleg Nesterov 	struct page 			*pages[2];
105c912dae6SOleg Nesterov 	/*
106c912dae6SOleg Nesterov 	 * We keep the vma's vm_start rather than a pointer to the vma
107c912dae6SOleg Nesterov 	 * itself.  The probed process or a naughty kernel module could make
108c912dae6SOleg Nesterov 	 * the vma go away, and we must handle that reasonably gracefully.
109c912dae6SOleg Nesterov 	 */
110c912dae6SOleg Nesterov 	unsigned long 			vaddr;		/* Page(s) of instruction slots */
111c912dae6SOleg Nesterov };
112c912dae6SOleg Nesterov 
113c912dae6SOleg Nesterov /*
114a5f4374aSIngo Molnar  * valid_vma: Verify if the specified vma is an executable vma
115a5f4374aSIngo Molnar  * Relax restrictions while unregistering: vm_flags might have
116a5f4374aSIngo Molnar  * changed after breakpoint was inserted.
117a5f4374aSIngo Molnar  *	- is_register: indicates if we are in register context.
118a5f4374aSIngo Molnar  *	- Return 1 if the specified virtual address is in an
119a5f4374aSIngo Molnar  *	  executable vma.
120a5f4374aSIngo Molnar  */
121a5f4374aSIngo Molnar static bool valid_vma(struct vm_area_struct *vma, bool is_register)
122a5f4374aSIngo Molnar {
12313f59c5eSOleg Nesterov 	vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE;
124a5f4374aSIngo Molnar 
125e40cfce6SOleg Nesterov 	if (is_register)
126e40cfce6SOleg Nesterov 		flags |= VM_WRITE;
127a5f4374aSIngo Molnar 
128e40cfce6SOleg Nesterov 	return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC;
129a5f4374aSIngo Molnar }
130a5f4374aSIngo Molnar 
13157683f72SOleg Nesterov static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset)
132a5f4374aSIngo Molnar {
13357683f72SOleg Nesterov 	return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
134a5f4374aSIngo Molnar }
135a5f4374aSIngo Molnar 
136cb113b47SOleg Nesterov static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
137cb113b47SOleg Nesterov {
138cb113b47SOleg Nesterov 	return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start);
139cb113b47SOleg Nesterov }
140cb113b47SOleg Nesterov 
141a5f4374aSIngo Molnar /**
142a5f4374aSIngo Molnar  * __replace_page - replace page in vma by new page.
143a5f4374aSIngo Molnar  * based on replace_page in mm/ksm.c
144a5f4374aSIngo Molnar  *
145a5f4374aSIngo Molnar  * @vma:      vma that holds the pte pointing to page
146c517ee74SOleg Nesterov  * @addr:     address the old @page is mapped at
147fb4fb04fSSong Liu  * @old_page: the page we are replacing by new_page
148fb4fb04fSSong Liu  * @new_page: the modified page we replace page by
149a5f4374aSIngo Molnar  *
150fb4fb04fSSong Liu  * If @new_page is NULL, only unmap @old_page.
151fb4fb04fSSong Liu  *
152fb4fb04fSSong Liu  * Returns 0 on success, negative error code otherwise.
153a5f4374aSIngo Molnar  */
154c517ee74SOleg Nesterov static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
155bdfaa2eeSOleg Nesterov 				struct page *old_page, struct page *new_page)
156a5f4374aSIngo Molnar {
157a5f4374aSIngo Molnar 	struct mm_struct *mm = vma->vm_mm;
15814fa2daaSKirill A. Shutemov 	struct page_vma_mapped_walk pvmw = {
1595a52c9dfSSong Liu 		.page = compound_head(old_page),
16014fa2daaSKirill A. Shutemov 		.vma = vma,
16114fa2daaSKirill A. Shutemov 		.address = addr,
16214fa2daaSKirill A. Shutemov 	};
1639f92448cSOleg Nesterov 	int err;
164ac46d4f3SJérôme Glisse 	struct mmu_notifier_range range;
16500501b53SJohannes Weiner 
1667269f999SJérôme Glisse 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr,
1676f4f13e8SJérôme Glisse 				addr + PAGE_SIZE);
168ac46d4f3SJérôme Glisse 
169fb4fb04fSSong Liu 	if (new_page) {
170d9eb1ea2SJohannes Weiner 		err = mem_cgroup_charge(new_page, vma->vm_mm, GFP_KERNEL);
17100501b53SJohannes Weiner 		if (err)
17200501b53SJohannes Weiner 			return err;
173fb4fb04fSSong Liu 	}
174a5f4374aSIngo Molnar 
175194f8dcbSOleg Nesterov 	/* For try_to_free_swap() and munlock_vma_page() below */
176bdfaa2eeSOleg Nesterov 	lock_page(old_page);
1779f92448cSOleg Nesterov 
178ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_start(&range);
1799f92448cSOleg Nesterov 	err = -EAGAIN;
1809d82c694SJohannes Weiner 	if (!page_vma_mapped_walk(&pvmw))
1819f92448cSOleg Nesterov 		goto unlock;
18214fa2daaSKirill A. Shutemov 	VM_BUG_ON_PAGE(addr != pvmw.address, old_page);
183a5f4374aSIngo Molnar 
184fb4fb04fSSong Liu 	if (new_page) {
185bdfaa2eeSOleg Nesterov 		get_page(new_page);
186be5d0a74SJohannes Weiner 		page_add_new_anon_rmap(new_page, vma, addr, false);
187bdfaa2eeSOleg Nesterov 		lru_cache_add_active_or_unevictable(new_page, vma);
188fb4fb04fSSong Liu 	} else
189fb4fb04fSSong Liu 		/* no new page, just dec_mm_counter for old_page */
190fb4fb04fSSong Liu 		dec_mm_counter(mm, MM_ANONPAGES);
191a5f4374aSIngo Molnar 
192bdfaa2eeSOleg Nesterov 	if (!PageAnon(old_page)) {
193bdfaa2eeSOleg Nesterov 		dec_mm_counter(mm, mm_counter_file(old_page));
1947396fa81SSrikar Dronamraju 		inc_mm_counter(mm, MM_ANONPAGES);
1957396fa81SSrikar Dronamraju 	}
1967396fa81SSrikar Dronamraju 
19714fa2daaSKirill A. Shutemov 	flush_cache_page(vma, addr, pte_pfn(*pvmw.pte));
19814fa2daaSKirill A. Shutemov 	ptep_clear_flush_notify(vma, addr, pvmw.pte);
199fb4fb04fSSong Liu 	if (new_page)
20014fa2daaSKirill A. Shutemov 		set_pte_at_notify(mm, addr, pvmw.pte,
20114fa2daaSKirill A. Shutemov 				  mk_pte(new_page, vma->vm_page_prot));
202a5f4374aSIngo Molnar 
203bdfaa2eeSOleg Nesterov 	page_remove_rmap(old_page, false);
204bdfaa2eeSOleg Nesterov 	if (!page_mapped(old_page))
205bdfaa2eeSOleg Nesterov 		try_to_free_swap(old_page);
20614fa2daaSKirill A. Shutemov 	page_vma_mapped_walk_done(&pvmw);
207a5f4374aSIngo Molnar 
208194f8dcbSOleg Nesterov 	if (vma->vm_flags & VM_LOCKED)
209bdfaa2eeSOleg Nesterov 		munlock_vma_page(old_page);
210bdfaa2eeSOleg Nesterov 	put_page(old_page);
211194f8dcbSOleg Nesterov 
2129f92448cSOleg Nesterov 	err = 0;
2139f92448cSOleg Nesterov  unlock:
214ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_end(&range);
215bdfaa2eeSOleg Nesterov 	unlock_page(old_page);
2169f92448cSOleg Nesterov 	return err;
217a5f4374aSIngo Molnar }
218a5f4374aSIngo Molnar 
219a5f4374aSIngo Molnar /**
2205cb4ac3aSSrikar Dronamraju  * is_swbp_insn - check if instruction is breakpoint instruction.
221a5f4374aSIngo Molnar  * @insn: instruction to be checked.
2225cb4ac3aSSrikar Dronamraju  * Default implementation of is_swbp_insn
223a5f4374aSIngo Molnar  * Returns true if @insn is a breakpoint instruction.
224a5f4374aSIngo Molnar  */
2255cb4ac3aSSrikar Dronamraju bool __weak is_swbp_insn(uprobe_opcode_t *insn)
226a5f4374aSIngo Molnar {
2275cb4ac3aSSrikar Dronamraju 	return *insn == UPROBE_SWBP_INSN;
228a5f4374aSIngo Molnar }
229a5f4374aSIngo Molnar 
2300908ad6eSAnanth N Mavinakayanahalli /**
2310908ad6eSAnanth N Mavinakayanahalli  * is_trap_insn - check if instruction is breakpoint instruction.
2320908ad6eSAnanth N Mavinakayanahalli  * @insn: instruction to be checked.
2330908ad6eSAnanth N Mavinakayanahalli  * Default implementation of is_trap_insn
2340908ad6eSAnanth N Mavinakayanahalli  * Returns true if @insn is a breakpoint instruction.
2350908ad6eSAnanth N Mavinakayanahalli  *
2360908ad6eSAnanth N Mavinakayanahalli  * This function is needed for the case where an architecture has multiple
2370908ad6eSAnanth N Mavinakayanahalli  * trap instructions (like powerpc).
2380908ad6eSAnanth N Mavinakayanahalli  */
2390908ad6eSAnanth N Mavinakayanahalli bool __weak is_trap_insn(uprobe_opcode_t *insn)
2400908ad6eSAnanth N Mavinakayanahalli {
2410908ad6eSAnanth N Mavinakayanahalli 	return is_swbp_insn(insn);
2420908ad6eSAnanth N Mavinakayanahalli }
2430908ad6eSAnanth N Mavinakayanahalli 
244ab0d805cSOleg Nesterov static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len)
245cceb55aaSOleg Nesterov {
246cceb55aaSOleg Nesterov 	void *kaddr = kmap_atomic(page);
247ab0d805cSOleg Nesterov 	memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len);
248cceb55aaSOleg Nesterov 	kunmap_atomic(kaddr);
249cceb55aaSOleg Nesterov }
250cceb55aaSOleg Nesterov 
2515669cceeSOleg Nesterov static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len)
2525669cceeSOleg Nesterov {
2535669cceeSOleg Nesterov 	void *kaddr = kmap_atomic(page);
2545669cceeSOleg Nesterov 	memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len);
2555669cceeSOleg Nesterov 	kunmap_atomic(kaddr);
2565669cceeSOleg Nesterov }
2575669cceeSOleg Nesterov 
258ed6f6a50SOleg Nesterov static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode)
259ed6f6a50SOleg Nesterov {
260ed6f6a50SOleg Nesterov 	uprobe_opcode_t old_opcode;
261ed6f6a50SOleg Nesterov 	bool is_swbp;
262ed6f6a50SOleg Nesterov 
2630908ad6eSAnanth N Mavinakayanahalli 	/*
2640908ad6eSAnanth N Mavinakayanahalli 	 * Note: We only check if the old_opcode is UPROBE_SWBP_INSN here.
2650908ad6eSAnanth N Mavinakayanahalli 	 * We do not check if it is any other 'trap variant' which could
2660908ad6eSAnanth N Mavinakayanahalli 	 * be conditional trap instruction such as the one powerpc supports.
2670908ad6eSAnanth N Mavinakayanahalli 	 *
2680908ad6eSAnanth N Mavinakayanahalli 	 * The logic is that we do not care if the underlying instruction
2690908ad6eSAnanth N Mavinakayanahalli 	 * is a trap variant; uprobes always wins over any other (gdb)
2700908ad6eSAnanth N Mavinakayanahalli 	 * breakpoint.
2710908ad6eSAnanth N Mavinakayanahalli 	 */
272ab0d805cSOleg Nesterov 	copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE);
273ed6f6a50SOleg Nesterov 	is_swbp = is_swbp_insn(&old_opcode);
274ed6f6a50SOleg Nesterov 
275ed6f6a50SOleg Nesterov 	if (is_swbp_insn(new_opcode)) {
276ed6f6a50SOleg Nesterov 		if (is_swbp)		/* register: already installed? */
277ed6f6a50SOleg Nesterov 			return 0;
278ed6f6a50SOleg Nesterov 	} else {
279ed6f6a50SOleg Nesterov 		if (!is_swbp)		/* unregister: was it changed by us? */
280076a365bSOleg Nesterov 			return 0;
281ed6f6a50SOleg Nesterov 	}
282ed6f6a50SOleg Nesterov 
283ed6f6a50SOleg Nesterov 	return 1;
284ed6f6a50SOleg Nesterov }
285ed6f6a50SOleg Nesterov 
2861cc33161SRavi Bangoria static struct delayed_uprobe *
2871cc33161SRavi Bangoria delayed_uprobe_check(struct uprobe *uprobe, struct mm_struct *mm)
2881cc33161SRavi Bangoria {
2891cc33161SRavi Bangoria 	struct delayed_uprobe *du;
2901cc33161SRavi Bangoria 
2911cc33161SRavi Bangoria 	list_for_each_entry(du, &delayed_uprobe_list, list)
2921cc33161SRavi Bangoria 		if (du->uprobe == uprobe && du->mm == mm)
2931cc33161SRavi Bangoria 			return du;
2941cc33161SRavi Bangoria 	return NULL;
2951cc33161SRavi Bangoria }
2961cc33161SRavi Bangoria 
2971cc33161SRavi Bangoria static int delayed_uprobe_add(struct uprobe *uprobe, struct mm_struct *mm)
2981cc33161SRavi Bangoria {
2991cc33161SRavi Bangoria 	struct delayed_uprobe *du;
3001cc33161SRavi Bangoria 
3011cc33161SRavi Bangoria 	if (delayed_uprobe_check(uprobe, mm))
3021cc33161SRavi Bangoria 		return 0;
3031cc33161SRavi Bangoria 
3041cc33161SRavi Bangoria 	du  = kzalloc(sizeof(*du), GFP_KERNEL);
3051cc33161SRavi Bangoria 	if (!du)
3061cc33161SRavi Bangoria 		return -ENOMEM;
3071cc33161SRavi Bangoria 
3081cc33161SRavi Bangoria 	du->uprobe = uprobe;
3091cc33161SRavi Bangoria 	du->mm = mm;
3101cc33161SRavi Bangoria 	list_add(&du->list, &delayed_uprobe_list);
3111cc33161SRavi Bangoria 	return 0;
3121cc33161SRavi Bangoria }
3131cc33161SRavi Bangoria 
3141cc33161SRavi Bangoria static void delayed_uprobe_delete(struct delayed_uprobe *du)
3151cc33161SRavi Bangoria {
3161cc33161SRavi Bangoria 	if (WARN_ON(!du))
3171cc33161SRavi Bangoria 		return;
3181cc33161SRavi Bangoria 	list_del(&du->list);
3191cc33161SRavi Bangoria 	kfree(du);
3201cc33161SRavi Bangoria }
3211cc33161SRavi Bangoria 
3221cc33161SRavi Bangoria static void delayed_uprobe_remove(struct uprobe *uprobe, struct mm_struct *mm)
3231cc33161SRavi Bangoria {
3241cc33161SRavi Bangoria 	struct list_head *pos, *q;
3251cc33161SRavi Bangoria 	struct delayed_uprobe *du;
3261cc33161SRavi Bangoria 
3271cc33161SRavi Bangoria 	if (!uprobe && !mm)
3281cc33161SRavi Bangoria 		return;
3291cc33161SRavi Bangoria 
3301cc33161SRavi Bangoria 	list_for_each_safe(pos, q, &delayed_uprobe_list) {
3311cc33161SRavi Bangoria 		du = list_entry(pos, struct delayed_uprobe, list);
3321cc33161SRavi Bangoria 
3331cc33161SRavi Bangoria 		if (uprobe && du->uprobe != uprobe)
3341cc33161SRavi Bangoria 			continue;
3351cc33161SRavi Bangoria 		if (mm && du->mm != mm)
3361cc33161SRavi Bangoria 			continue;
3371cc33161SRavi Bangoria 
3381cc33161SRavi Bangoria 		delayed_uprobe_delete(du);
3391cc33161SRavi Bangoria 	}
3401cc33161SRavi Bangoria }
3411cc33161SRavi Bangoria 
3421cc33161SRavi Bangoria static bool valid_ref_ctr_vma(struct uprobe *uprobe,
3431cc33161SRavi Bangoria 			      struct vm_area_struct *vma)
3441cc33161SRavi Bangoria {
3451cc33161SRavi Bangoria 	unsigned long vaddr = offset_to_vaddr(vma, uprobe->ref_ctr_offset);
3461cc33161SRavi Bangoria 
3471cc33161SRavi Bangoria 	return uprobe->ref_ctr_offset &&
3481cc33161SRavi Bangoria 		vma->vm_file &&
3491cc33161SRavi Bangoria 		file_inode(vma->vm_file) == uprobe->inode &&
3501cc33161SRavi Bangoria 		(vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE &&
3511cc33161SRavi Bangoria 		vma->vm_start <= vaddr &&
3521cc33161SRavi Bangoria 		vma->vm_end > vaddr;
3531cc33161SRavi Bangoria }
3541cc33161SRavi Bangoria 
3551cc33161SRavi Bangoria static struct vm_area_struct *
3561cc33161SRavi Bangoria find_ref_ctr_vma(struct uprobe *uprobe, struct mm_struct *mm)
3571cc33161SRavi Bangoria {
3581cc33161SRavi Bangoria 	struct vm_area_struct *tmp;
3591cc33161SRavi Bangoria 
3601cc33161SRavi Bangoria 	for (tmp = mm->mmap; tmp; tmp = tmp->vm_next)
3611cc33161SRavi Bangoria 		if (valid_ref_ctr_vma(uprobe, tmp))
3621cc33161SRavi Bangoria 			return tmp;
3631cc33161SRavi Bangoria 
3641cc33161SRavi Bangoria 	return NULL;
3651cc33161SRavi Bangoria }
3661cc33161SRavi Bangoria 
3671cc33161SRavi Bangoria static int
3681cc33161SRavi Bangoria __update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d)
3691cc33161SRavi Bangoria {
3701cc33161SRavi Bangoria 	void *kaddr;
3711cc33161SRavi Bangoria 	struct page *page;
3721cc33161SRavi Bangoria 	struct vm_area_struct *vma;
3731cc33161SRavi Bangoria 	int ret;
3741cc33161SRavi Bangoria 	short *ptr;
3751cc33161SRavi Bangoria 
3761cc33161SRavi Bangoria 	if (!vaddr || !d)
3771cc33161SRavi Bangoria 		return -EINVAL;
3781cc33161SRavi Bangoria 
3791cc33161SRavi Bangoria 	ret = get_user_pages_remote(NULL, mm, vaddr, 1,
3801cc33161SRavi Bangoria 			FOLL_WRITE, &page, &vma, NULL);
3811cc33161SRavi Bangoria 	if (unlikely(ret <= 0)) {
3821cc33161SRavi Bangoria 		/*
3831cc33161SRavi Bangoria 		 * We are asking for 1 page. If get_user_pages_remote() fails,
3841cc33161SRavi Bangoria 		 * it may return 0, in that case we have to return error.
3851cc33161SRavi Bangoria 		 */
3861cc33161SRavi Bangoria 		return ret == 0 ? -EBUSY : ret;
3871cc33161SRavi Bangoria 	}
3881cc33161SRavi Bangoria 
3891cc33161SRavi Bangoria 	kaddr = kmap_atomic(page);
3901cc33161SRavi Bangoria 	ptr = kaddr + (vaddr & ~PAGE_MASK);
3911cc33161SRavi Bangoria 
3921cc33161SRavi Bangoria 	if (unlikely(*ptr + d < 0)) {
3931cc33161SRavi Bangoria 		pr_warn("ref_ctr going negative. vaddr: 0x%lx, "
3941cc33161SRavi Bangoria 			"curr val: %d, delta: %d\n", vaddr, *ptr, d);
3951cc33161SRavi Bangoria 		ret = -EINVAL;
3961cc33161SRavi Bangoria 		goto out;
3971cc33161SRavi Bangoria 	}
3981cc33161SRavi Bangoria 
3991cc33161SRavi Bangoria 	*ptr += d;
4001cc33161SRavi Bangoria 	ret = 0;
4011cc33161SRavi Bangoria out:
4021cc33161SRavi Bangoria 	kunmap_atomic(kaddr);
4031cc33161SRavi Bangoria 	put_page(page);
4041cc33161SRavi Bangoria 	return ret;
4051cc33161SRavi Bangoria }
4061cc33161SRavi Bangoria 
4071cc33161SRavi Bangoria static void update_ref_ctr_warn(struct uprobe *uprobe,
4081cc33161SRavi Bangoria 				struct mm_struct *mm, short d)
4091cc33161SRavi Bangoria {
4101cc33161SRavi Bangoria 	pr_warn("ref_ctr %s failed for inode: 0x%lx offset: "
4111cc33161SRavi Bangoria 		"0x%llx ref_ctr_offset: 0x%llx of mm: 0x%pK\n",
4121cc33161SRavi Bangoria 		d > 0 ? "increment" : "decrement", uprobe->inode->i_ino,
4131cc33161SRavi Bangoria 		(unsigned long long) uprobe->offset,
4141cc33161SRavi Bangoria 		(unsigned long long) uprobe->ref_ctr_offset, mm);
4151cc33161SRavi Bangoria }
4161cc33161SRavi Bangoria 
4171cc33161SRavi Bangoria static int update_ref_ctr(struct uprobe *uprobe, struct mm_struct *mm,
4181cc33161SRavi Bangoria 			  short d)
4191cc33161SRavi Bangoria {
4201cc33161SRavi Bangoria 	struct vm_area_struct *rc_vma;
4211cc33161SRavi Bangoria 	unsigned long rc_vaddr;
4221cc33161SRavi Bangoria 	int ret = 0;
4231cc33161SRavi Bangoria 
4241cc33161SRavi Bangoria 	rc_vma = find_ref_ctr_vma(uprobe, mm);
4251cc33161SRavi Bangoria 
4261cc33161SRavi Bangoria 	if (rc_vma) {
4271cc33161SRavi Bangoria 		rc_vaddr = offset_to_vaddr(rc_vma, uprobe->ref_ctr_offset);
4281cc33161SRavi Bangoria 		ret = __update_ref_ctr(mm, rc_vaddr, d);
4291cc33161SRavi Bangoria 		if (ret)
4301cc33161SRavi Bangoria 			update_ref_ctr_warn(uprobe, mm, d);
4311cc33161SRavi Bangoria 
4321cc33161SRavi Bangoria 		if (d > 0)
4331cc33161SRavi Bangoria 			return ret;
4341cc33161SRavi Bangoria 	}
4351cc33161SRavi Bangoria 
4361cc33161SRavi Bangoria 	mutex_lock(&delayed_uprobe_lock);
4371cc33161SRavi Bangoria 	if (d > 0)
4381cc33161SRavi Bangoria 		ret = delayed_uprobe_add(uprobe, mm);
4391cc33161SRavi Bangoria 	else
4401cc33161SRavi Bangoria 		delayed_uprobe_remove(uprobe, mm);
4411cc33161SRavi Bangoria 	mutex_unlock(&delayed_uprobe_lock);
4421cc33161SRavi Bangoria 
4431cc33161SRavi Bangoria 	return ret;
4441cc33161SRavi Bangoria }
4451cc33161SRavi Bangoria 
446a5f4374aSIngo Molnar /*
447a5f4374aSIngo Molnar  * NOTE:
448a5f4374aSIngo Molnar  * Expect the breakpoint instruction to be the smallest size instruction for
449a5f4374aSIngo Molnar  * the architecture. If an arch has variable length instruction and the
450a5f4374aSIngo Molnar  * breakpoint instruction is not of the smallest length instruction
4510908ad6eSAnanth N Mavinakayanahalli  * supported by that architecture then we need to modify is_trap_at_addr and
452f72d41faSOleg Nesterov  * uprobe_write_opcode accordingly. This would never be a problem for archs
453f72d41faSOleg Nesterov  * that have fixed length instructions.
45429dedee0SOleg Nesterov  *
455f72d41faSOleg Nesterov  * uprobe_write_opcode - write the opcode at a given virtual address.
456a5f4374aSIngo Molnar  * @mm: the probed process address space.
457a5f4374aSIngo Molnar  * @vaddr: the virtual address to store the opcode.
458a5f4374aSIngo Molnar  * @opcode: opcode to be written at @vaddr.
459a5f4374aSIngo Molnar  *
46029dedee0SOleg Nesterov  * Called with mm->mmap_sem held for write.
461a5f4374aSIngo Molnar  * Return 0 (success) or a negative errno.
462a5f4374aSIngo Molnar  */
4636d43743eSRavi Bangoria int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
4646d43743eSRavi Bangoria 			unsigned long vaddr, uprobe_opcode_t opcode)
465a5f4374aSIngo Molnar {
4661cc33161SRavi Bangoria 	struct uprobe *uprobe;
467a5f4374aSIngo Molnar 	struct page *old_page, *new_page;
468a5f4374aSIngo Molnar 	struct vm_area_struct *vma;
4691cc33161SRavi Bangoria 	int ret, is_register, ref_ctr_updated = 0;
470f385cb85SSong Liu 	bool orig_page_huge = false;
471aa5de305SSong Liu 	unsigned int gup_flags = FOLL_FORCE;
4721cc33161SRavi Bangoria 
4731cc33161SRavi Bangoria 	is_register = is_swbp_insn(&opcode);
4741cc33161SRavi Bangoria 	uprobe = container_of(auprobe, struct uprobe, arch);
475f403072cSOleg Nesterov 
4765323ce71SOleg Nesterov retry:
477aa5de305SSong Liu 	if (is_register)
478aa5de305SSong Liu 		gup_flags |= FOLL_SPLIT_PMD;
479a5f4374aSIngo Molnar 	/* Read the page with vaddr into memory */
480aa5de305SSong Liu 	ret = get_user_pages_remote(NULL, mm, vaddr, 1, gup_flags,
481aa5de305SSong Liu 				    &old_page, &vma, NULL);
482a5f4374aSIngo Molnar 	if (ret <= 0)
483a5f4374aSIngo Molnar 		return ret;
484a5f4374aSIngo Molnar 
485ed6f6a50SOleg Nesterov 	ret = verify_opcode(old_page, vaddr, &opcode);
486ed6f6a50SOleg Nesterov 	if (ret <= 0)
487ed6f6a50SOleg Nesterov 		goto put_old;
488ed6f6a50SOleg Nesterov 
489aa5de305SSong Liu 	if (WARN(!is_register && PageCompound(old_page),
490aa5de305SSong Liu 		 "uprobe unregister should never work on compound page\n")) {
491aa5de305SSong Liu 		ret = -EINVAL;
492aa5de305SSong Liu 		goto put_old;
493aa5de305SSong Liu 	}
494aa5de305SSong Liu 
4951cc33161SRavi Bangoria 	/* We are going to replace instruction, update ref_ctr. */
4961cc33161SRavi Bangoria 	if (!ref_ctr_updated && uprobe->ref_ctr_offset) {
4971cc33161SRavi Bangoria 		ret = update_ref_ctr(uprobe, mm, is_register ? 1 : -1);
4981cc33161SRavi Bangoria 		if (ret)
4991cc33161SRavi Bangoria 			goto put_old;
5001cc33161SRavi Bangoria 
5011cc33161SRavi Bangoria 		ref_ctr_updated = 1;
5021cc33161SRavi Bangoria 	}
5031cc33161SRavi Bangoria 
504fb4fb04fSSong Liu 	ret = 0;
505fb4fb04fSSong Liu 	if (!is_register && !PageAnon(old_page))
506fb4fb04fSSong Liu 		goto put_old;
507fb4fb04fSSong Liu 
50829dedee0SOleg Nesterov 	ret = anon_vma_prepare(vma);
50929dedee0SOleg Nesterov 	if (ret)
51029dedee0SOleg Nesterov 		goto put_old;
51129dedee0SOleg Nesterov 
512a5f4374aSIngo Molnar 	ret = -ENOMEM;
513a5f4374aSIngo Molnar 	new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
514a5f4374aSIngo Molnar 	if (!new_page)
5159f92448cSOleg Nesterov 		goto put_old;
516a5f4374aSIngo Molnar 
51729dedee0SOleg Nesterov 	__SetPageUptodate(new_page);
5183f47107cSOleg Nesterov 	copy_highpage(new_page, old_page);
5193f47107cSOleg Nesterov 	copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
520a5f4374aSIngo Molnar 
521fb4fb04fSSong Liu 	if (!is_register) {
522fb4fb04fSSong Liu 		struct page *orig_page;
523fb4fb04fSSong Liu 		pgoff_t index;
524fb4fb04fSSong Liu 
525fb4fb04fSSong Liu 		VM_BUG_ON_PAGE(!PageAnon(old_page), old_page);
526fb4fb04fSSong Liu 
527fb4fb04fSSong Liu 		index = vaddr_to_offset(vma, vaddr & PAGE_MASK) >> PAGE_SHIFT;
528fb4fb04fSSong Liu 		orig_page = find_get_page(vma->vm_file->f_inode->i_mapping,
529fb4fb04fSSong Liu 					  index);
530fb4fb04fSSong Liu 
531fb4fb04fSSong Liu 		if (orig_page) {
532fb4fb04fSSong Liu 			if (PageUptodate(orig_page) &&
533fb4fb04fSSong Liu 			    pages_identical(new_page, orig_page)) {
534fb4fb04fSSong Liu 				/* let go new_page */
535fb4fb04fSSong Liu 				put_page(new_page);
536fb4fb04fSSong Liu 				new_page = NULL;
537f385cb85SSong Liu 
538f385cb85SSong Liu 				if (PageCompound(orig_page))
539f385cb85SSong Liu 					orig_page_huge = true;
540fb4fb04fSSong Liu 			}
541fb4fb04fSSong Liu 			put_page(orig_page);
542fb4fb04fSSong Liu 		}
543fb4fb04fSSong Liu 	}
544fb4fb04fSSong Liu 
545c517ee74SOleg Nesterov 	ret = __replace_page(vma, vaddr, old_page, new_page);
546fb4fb04fSSong Liu 	if (new_page)
54709cbfeafSKirill A. Shutemov 		put_page(new_page);
5489f92448cSOleg Nesterov put_old:
549a5f4374aSIngo Molnar 	put_page(old_page);
550a5f4374aSIngo Molnar 
5515323ce71SOleg Nesterov 	if (unlikely(ret == -EAGAIN))
5525323ce71SOleg Nesterov 		goto retry;
5531cc33161SRavi Bangoria 
5541cc33161SRavi Bangoria 	/* Revert back reference counter if instruction update failed. */
5551cc33161SRavi Bangoria 	if (ret && is_register && ref_ctr_updated)
5561cc33161SRavi Bangoria 		update_ref_ctr(uprobe, mm, -1);
5571cc33161SRavi Bangoria 
558f385cb85SSong Liu 	/* try collapse pmd for compound page */
559f385cb85SSong Liu 	if (!ret && orig_page_huge)
560f385cb85SSong Liu 		collapse_pte_mapped_thp(mm, vaddr);
561f385cb85SSong Liu 
562a5f4374aSIngo Molnar 	return ret;
563a5f4374aSIngo Molnar }
564a5f4374aSIngo Molnar 
565a5f4374aSIngo Molnar /**
5665cb4ac3aSSrikar Dronamraju  * set_swbp - store breakpoint at a given address.
567e3343e6aSSrikar Dronamraju  * @auprobe: arch specific probepoint information.
568a5f4374aSIngo Molnar  * @mm: the probed process address space.
569a5f4374aSIngo Molnar  * @vaddr: the virtual address to insert the opcode.
570a5f4374aSIngo Molnar  *
571a5f4374aSIngo Molnar  * For mm @mm, store the breakpoint instruction at @vaddr.
572a5f4374aSIngo Molnar  * Return 0 (success) or a negative errno.
573a5f4374aSIngo Molnar  */
5745cb4ac3aSSrikar Dronamraju int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
575a5f4374aSIngo Molnar {
5766d43743eSRavi Bangoria 	return uprobe_write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN);
577a5f4374aSIngo Molnar }
578a5f4374aSIngo Molnar 
579a5f4374aSIngo Molnar /**
580a5f4374aSIngo Molnar  * set_orig_insn - Restore the original instruction.
581a5f4374aSIngo Molnar  * @mm: the probed process address space.
582e3343e6aSSrikar Dronamraju  * @auprobe: arch specific probepoint information.
583a5f4374aSIngo Molnar  * @vaddr: the virtual address to insert the opcode.
584a5f4374aSIngo Molnar  *
585a5f4374aSIngo Molnar  * For mm @mm, restore the original opcode (opcode) at @vaddr.
586a5f4374aSIngo Molnar  * Return 0 (success) or a negative errno.
587a5f4374aSIngo Molnar  */
588a5f4374aSIngo Molnar int __weak
589ded86e7cSOleg Nesterov set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
590a5f4374aSIngo Molnar {
5916d43743eSRavi Bangoria 	return uprobe_write_opcode(auprobe, mm, vaddr,
5926d43743eSRavi Bangoria 			*(uprobe_opcode_t *)&auprobe->insn);
593a5f4374aSIngo Molnar }
594a5f4374aSIngo Molnar 
595f231722aSOleg Nesterov static struct uprobe *get_uprobe(struct uprobe *uprobe)
596f231722aSOleg Nesterov {
597ce59b8e9SElena Reshetova 	refcount_inc(&uprobe->ref);
598f231722aSOleg Nesterov 	return uprobe;
599f231722aSOleg Nesterov }
600f231722aSOleg Nesterov 
601f231722aSOleg Nesterov static void put_uprobe(struct uprobe *uprobe)
602f231722aSOleg Nesterov {
603ce59b8e9SElena Reshetova 	if (refcount_dec_and_test(&uprobe->ref)) {
6041cc33161SRavi Bangoria 		/*
6051cc33161SRavi Bangoria 		 * If application munmap(exec_vma) before uprobe_unregister()
6061cc33161SRavi Bangoria 		 * gets called, we don't get a chance to remove uprobe from
6071cc33161SRavi Bangoria 		 * delayed_uprobe_list from remove_breakpoint(). Do it here.
6081cc33161SRavi Bangoria 		 */
6091aed58e6SRavi Bangoria 		mutex_lock(&delayed_uprobe_lock);
6101cc33161SRavi Bangoria 		delayed_uprobe_remove(uprobe, NULL);
6111aed58e6SRavi Bangoria 		mutex_unlock(&delayed_uprobe_lock);
612f231722aSOleg Nesterov 		kfree(uprobe);
613f231722aSOleg Nesterov 	}
6141cc33161SRavi Bangoria }
615f231722aSOleg Nesterov 
616a5f4374aSIngo Molnar static int match_uprobe(struct uprobe *l, struct uprobe *r)
617a5f4374aSIngo Molnar {
618a5f4374aSIngo Molnar 	if (l->inode < r->inode)
619a5f4374aSIngo Molnar 		return -1;
620a5f4374aSIngo Molnar 
621a5f4374aSIngo Molnar 	if (l->inode > r->inode)
622a5f4374aSIngo Molnar 		return 1;
623a5f4374aSIngo Molnar 
624a5f4374aSIngo Molnar 	if (l->offset < r->offset)
625a5f4374aSIngo Molnar 		return -1;
626a5f4374aSIngo Molnar 
627a5f4374aSIngo Molnar 	if (l->offset > r->offset)
628a5f4374aSIngo Molnar 		return 1;
629a5f4374aSIngo Molnar 
630a5f4374aSIngo Molnar 	return 0;
631a5f4374aSIngo Molnar }
632a5f4374aSIngo Molnar 
633a5f4374aSIngo Molnar static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset)
634a5f4374aSIngo Molnar {
635a5f4374aSIngo Molnar 	struct uprobe u = { .inode = inode, .offset = offset };
636a5f4374aSIngo Molnar 	struct rb_node *n = uprobes_tree.rb_node;
637a5f4374aSIngo Molnar 	struct uprobe *uprobe;
638a5f4374aSIngo Molnar 	int match;
639a5f4374aSIngo Molnar 
640a5f4374aSIngo Molnar 	while (n) {
641a5f4374aSIngo Molnar 		uprobe = rb_entry(n, struct uprobe, rb_node);
642a5f4374aSIngo Molnar 		match = match_uprobe(&u, uprobe);
643f231722aSOleg Nesterov 		if (!match)
644f231722aSOleg Nesterov 			return get_uprobe(uprobe);
645a5f4374aSIngo Molnar 
646a5f4374aSIngo Molnar 		if (match < 0)
647a5f4374aSIngo Molnar 			n = n->rb_left;
648a5f4374aSIngo Molnar 		else
649a5f4374aSIngo Molnar 			n = n->rb_right;
650a5f4374aSIngo Molnar 	}
651a5f4374aSIngo Molnar 	return NULL;
652a5f4374aSIngo Molnar }
653a5f4374aSIngo Molnar 
654a5f4374aSIngo Molnar /*
655a5f4374aSIngo Molnar  * Find a uprobe corresponding to a given inode:offset
656a5f4374aSIngo Molnar  * Acquires uprobes_treelock
657a5f4374aSIngo Molnar  */
658a5f4374aSIngo Molnar static struct uprobe *find_uprobe(struct inode *inode, loff_t offset)
659a5f4374aSIngo Molnar {
660a5f4374aSIngo Molnar 	struct uprobe *uprobe;
661a5f4374aSIngo Molnar 
6626f47caa0SOleg Nesterov 	spin_lock(&uprobes_treelock);
663a5f4374aSIngo Molnar 	uprobe = __find_uprobe(inode, offset);
6646f47caa0SOleg Nesterov 	spin_unlock(&uprobes_treelock);
665a5f4374aSIngo Molnar 
666a5f4374aSIngo Molnar 	return uprobe;
667a5f4374aSIngo Molnar }
668a5f4374aSIngo Molnar 
669a5f4374aSIngo Molnar static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
670a5f4374aSIngo Molnar {
671a5f4374aSIngo Molnar 	struct rb_node **p = &uprobes_tree.rb_node;
672a5f4374aSIngo Molnar 	struct rb_node *parent = NULL;
673a5f4374aSIngo Molnar 	struct uprobe *u;
674a5f4374aSIngo Molnar 	int match;
675a5f4374aSIngo Molnar 
676a5f4374aSIngo Molnar 	while (*p) {
677a5f4374aSIngo Molnar 		parent = *p;
678a5f4374aSIngo Molnar 		u = rb_entry(parent, struct uprobe, rb_node);
679a5f4374aSIngo Molnar 		match = match_uprobe(uprobe, u);
680f231722aSOleg Nesterov 		if (!match)
681f231722aSOleg Nesterov 			return get_uprobe(u);
682a5f4374aSIngo Molnar 
683a5f4374aSIngo Molnar 		if (match < 0)
684a5f4374aSIngo Molnar 			p = &parent->rb_left;
685a5f4374aSIngo Molnar 		else
686a5f4374aSIngo Molnar 			p = &parent->rb_right;
687a5f4374aSIngo Molnar 
688a5f4374aSIngo Molnar 	}
689a5f4374aSIngo Molnar 
690a5f4374aSIngo Molnar 	u = NULL;
691a5f4374aSIngo Molnar 	rb_link_node(&uprobe->rb_node, parent, p);
692a5f4374aSIngo Molnar 	rb_insert_color(&uprobe->rb_node, &uprobes_tree);
693a5f4374aSIngo Molnar 	/* get access + creation ref */
694ce59b8e9SElena Reshetova 	refcount_set(&uprobe->ref, 2);
695a5f4374aSIngo Molnar 
696a5f4374aSIngo Molnar 	return u;
697a5f4374aSIngo Molnar }
698a5f4374aSIngo Molnar 
699a5f4374aSIngo Molnar /*
700a5f4374aSIngo Molnar  * Acquire uprobes_treelock.
701a5f4374aSIngo Molnar  * Matching uprobe already exists in rbtree;
702a5f4374aSIngo Molnar  *	increment (access refcount) and return the matching uprobe.
703a5f4374aSIngo Molnar  *
704a5f4374aSIngo Molnar  * No matching uprobe; insert the uprobe in rb_tree;
705a5f4374aSIngo Molnar  *	get a double refcount (access + creation) and return NULL.
706a5f4374aSIngo Molnar  */
707a5f4374aSIngo Molnar static struct uprobe *insert_uprobe(struct uprobe *uprobe)
708a5f4374aSIngo Molnar {
709a5f4374aSIngo Molnar 	struct uprobe *u;
710a5f4374aSIngo Molnar 
7116f47caa0SOleg Nesterov 	spin_lock(&uprobes_treelock);
712a5f4374aSIngo Molnar 	u = __insert_uprobe(uprobe);
7136f47caa0SOleg Nesterov 	spin_unlock(&uprobes_treelock);
714a5f4374aSIngo Molnar 
715a5f4374aSIngo Molnar 	return u;
716a5f4374aSIngo Molnar }
717a5f4374aSIngo Molnar 
71822bad382SRavi Bangoria static void
71922bad382SRavi Bangoria ref_ctr_mismatch_warn(struct uprobe *cur_uprobe, struct uprobe *uprobe)
72022bad382SRavi Bangoria {
72122bad382SRavi Bangoria 	pr_warn("ref_ctr_offset mismatch. inode: 0x%lx offset: 0x%llx "
72222bad382SRavi Bangoria 		"ref_ctr_offset(old): 0x%llx ref_ctr_offset(new): 0x%llx\n",
72322bad382SRavi Bangoria 		uprobe->inode->i_ino, (unsigned long long) uprobe->offset,
72422bad382SRavi Bangoria 		(unsigned long long) cur_uprobe->ref_ctr_offset,
72522bad382SRavi Bangoria 		(unsigned long long) uprobe->ref_ctr_offset);
72622bad382SRavi Bangoria }
72722bad382SRavi Bangoria 
7281cc33161SRavi Bangoria static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset,
7291cc33161SRavi Bangoria 				   loff_t ref_ctr_offset)
730a5f4374aSIngo Molnar {
731a5f4374aSIngo Molnar 	struct uprobe *uprobe, *cur_uprobe;
732a5f4374aSIngo Molnar 
733a5f4374aSIngo Molnar 	uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL);
734a5f4374aSIngo Molnar 	if (!uprobe)
735a5f4374aSIngo Molnar 		return NULL;
736a5f4374aSIngo Molnar 
73761f94203SSong Liu 	uprobe->inode = inode;
738a5f4374aSIngo Molnar 	uprobe->offset = offset;
7391cc33161SRavi Bangoria 	uprobe->ref_ctr_offset = ref_ctr_offset;
740e591c8d7SOleg Nesterov 	init_rwsem(&uprobe->register_rwsem);
741a5f4374aSIngo Molnar 	init_rwsem(&uprobe->consumer_rwsem);
742a5f4374aSIngo Molnar 
743a5f4374aSIngo Molnar 	/* add to uprobes_tree, sorted on inode:offset */
744a5f4374aSIngo Molnar 	cur_uprobe = insert_uprobe(uprobe);
745a5f4374aSIngo Molnar 	/* a uprobe exists for this inode:offset combination */
746a5f4374aSIngo Molnar 	if (cur_uprobe) {
74722bad382SRavi Bangoria 		if (cur_uprobe->ref_ctr_offset != uprobe->ref_ctr_offset) {
74822bad382SRavi Bangoria 			ref_ctr_mismatch_warn(cur_uprobe, uprobe);
74922bad382SRavi Bangoria 			put_uprobe(cur_uprobe);
75022bad382SRavi Bangoria 			kfree(uprobe);
75122bad382SRavi Bangoria 			return ERR_PTR(-EINVAL);
75222bad382SRavi Bangoria 		}
753a5f4374aSIngo Molnar 		kfree(uprobe);
754a5f4374aSIngo Molnar 		uprobe = cur_uprobe;
755a5f4374aSIngo Molnar 	}
756a5f4374aSIngo Molnar 
757a5f4374aSIngo Molnar 	return uprobe;
758a5f4374aSIngo Molnar }
759a5f4374aSIngo Molnar 
7609a98e03cSOleg Nesterov static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
761a5f4374aSIngo Molnar {
762a5f4374aSIngo Molnar 	down_write(&uprobe->consumer_rwsem);
763e3343e6aSSrikar Dronamraju 	uc->next = uprobe->consumers;
764e3343e6aSSrikar Dronamraju 	uprobe->consumers = uc;
765a5f4374aSIngo Molnar 	up_write(&uprobe->consumer_rwsem);
766a5f4374aSIngo Molnar }
767a5f4374aSIngo Molnar 
768a5f4374aSIngo Molnar /*
769e3343e6aSSrikar Dronamraju  * For uprobe @uprobe, delete the consumer @uc.
770e3343e6aSSrikar Dronamraju  * Return true if the @uc is deleted successfully
771a5f4374aSIngo Molnar  * or return false.
772a5f4374aSIngo Molnar  */
773e3343e6aSSrikar Dronamraju static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
774a5f4374aSIngo Molnar {
775a5f4374aSIngo Molnar 	struct uprobe_consumer **con;
776a5f4374aSIngo Molnar 	bool ret = false;
777a5f4374aSIngo Molnar 
778a5f4374aSIngo Molnar 	down_write(&uprobe->consumer_rwsem);
779a5f4374aSIngo Molnar 	for (con = &uprobe->consumers; *con; con = &(*con)->next) {
780e3343e6aSSrikar Dronamraju 		if (*con == uc) {
781e3343e6aSSrikar Dronamraju 			*con = uc->next;
782a5f4374aSIngo Molnar 			ret = true;
783a5f4374aSIngo Molnar 			break;
784a5f4374aSIngo Molnar 		}
785a5f4374aSIngo Molnar 	}
786a5f4374aSIngo Molnar 	up_write(&uprobe->consumer_rwsem);
787a5f4374aSIngo Molnar 
788a5f4374aSIngo Molnar 	return ret;
789a5f4374aSIngo Molnar }
790a5f4374aSIngo Molnar 
7912ded0980SOleg Nesterov static int __copy_insn(struct address_space *mapping, struct file *filp,
7922ded0980SOleg Nesterov 			void *insn, int nbytes, loff_t offset)
793a5f4374aSIngo Molnar {
794a5f4374aSIngo Molnar 	struct page *page;
795a5f4374aSIngo Molnar 	/*
79640814f68SOleg Nesterov 	 * Ensure that the page that has the original instruction is populated
79740814f68SOleg Nesterov 	 * and in page-cache. If ->readpage == NULL it must be shmem_mapping(),
79840814f68SOleg Nesterov 	 * see uprobe_register().
799a5f4374aSIngo Molnar 	 */
80040814f68SOleg Nesterov 	if (mapping->a_ops->readpage)
80109cbfeafSKirill A. Shutemov 		page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp);
80240814f68SOleg Nesterov 	else
80309cbfeafSKirill A. Shutemov 		page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
804a5f4374aSIngo Molnar 	if (IS_ERR(page))
805a5f4374aSIngo Molnar 		return PTR_ERR(page);
806a5f4374aSIngo Molnar 
8072edb7b55SOleg Nesterov 	copy_from_page(page, offset, insn, nbytes);
80809cbfeafSKirill A. Shutemov 	put_page(page);
809a5f4374aSIngo Molnar 
810a5f4374aSIngo Molnar 	return 0;
811a5f4374aSIngo Molnar }
812a5f4374aSIngo Molnar 
813d436615eSOleg Nesterov static int copy_insn(struct uprobe *uprobe, struct file *filp)
814a5f4374aSIngo Molnar {
8152ded0980SOleg Nesterov 	struct address_space *mapping = uprobe->inode->i_mapping;
8162ded0980SOleg Nesterov 	loff_t offs = uprobe->offset;
817803200e2SOleg Nesterov 	void *insn = &uprobe->arch.insn;
818803200e2SOleg Nesterov 	int size = sizeof(uprobe->arch.insn);
8192ded0980SOleg Nesterov 	int len, err = -EIO;
820a5f4374aSIngo Molnar 
8212ded0980SOleg Nesterov 	/* Copy only available bytes, -EIO if nothing was read */
8222ded0980SOleg Nesterov 	do {
8232ded0980SOleg Nesterov 		if (offs >= i_size_read(uprobe->inode))
8242ded0980SOleg Nesterov 			break;
825a5f4374aSIngo Molnar 
8262ded0980SOleg Nesterov 		len = min_t(int, size, PAGE_SIZE - (offs & ~PAGE_MASK));
8272ded0980SOleg Nesterov 		err = __copy_insn(mapping, filp, insn, len, offs);
828fc36f595SOleg Nesterov 		if (err)
8292ded0980SOleg Nesterov 			break;
8302ded0980SOleg Nesterov 
8312ded0980SOleg Nesterov 		insn += len;
8322ded0980SOleg Nesterov 		offs += len;
8332ded0980SOleg Nesterov 		size -= len;
8342ded0980SOleg Nesterov 	} while (size);
8352ded0980SOleg Nesterov 
836fc36f595SOleg Nesterov 	return err;
837a5f4374aSIngo Molnar }
838a5f4374aSIngo Molnar 
839cb9a19feSOleg Nesterov static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
840cb9a19feSOleg Nesterov 				struct mm_struct *mm, unsigned long vaddr)
841cb9a19feSOleg Nesterov {
842cb9a19feSOleg Nesterov 	int ret = 0;
843cb9a19feSOleg Nesterov 
84471434f2fSOleg Nesterov 	if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
845cb9a19feSOleg Nesterov 		return ret;
846cb9a19feSOleg Nesterov 
847d4d3ccc6SOleg Nesterov 	/* TODO: move this into _register, until then we abuse this sem. */
848d4d3ccc6SOleg Nesterov 	down_write(&uprobe->consumer_rwsem);
84971434f2fSOleg Nesterov 	if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
8504710f05fSOleg Nesterov 		goto out;
8514710f05fSOleg Nesterov 
852cb9a19feSOleg Nesterov 	ret = copy_insn(uprobe, file);
853cb9a19feSOleg Nesterov 	if (ret)
854cb9a19feSOleg Nesterov 		goto out;
855cb9a19feSOleg Nesterov 
856cb9a19feSOleg Nesterov 	ret = -ENOTSUPP;
857803200e2SOleg Nesterov 	if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn))
858cb9a19feSOleg Nesterov 		goto out;
859cb9a19feSOleg Nesterov 
860cb9a19feSOleg Nesterov 	ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
861cb9a19feSOleg Nesterov 	if (ret)
862cb9a19feSOleg Nesterov 		goto out;
863cb9a19feSOleg Nesterov 
864f72d41faSOleg Nesterov 	/* uprobe_write_opcode() assumes we don't cross page boundary */
865cb9a19feSOleg Nesterov 	BUG_ON((uprobe->offset & ~PAGE_MASK) +
866cb9a19feSOleg Nesterov 			UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
867cb9a19feSOleg Nesterov 
86809d3f015SAndrea Parri 	smp_wmb(); /* pairs with the smp_rmb() in handle_swbp() */
86971434f2fSOleg Nesterov 	set_bit(UPROBE_COPY_INSN, &uprobe->flags);
870cb9a19feSOleg Nesterov 
871cb9a19feSOleg Nesterov  out:
872d4d3ccc6SOleg Nesterov 	up_write(&uprobe->consumer_rwsem);
8734710f05fSOleg Nesterov 
874cb9a19feSOleg Nesterov 	return ret;
875cb9a19feSOleg Nesterov }
876cb9a19feSOleg Nesterov 
8778a7f2fa0SOleg Nesterov static inline bool consumer_filter(struct uprobe_consumer *uc,
8788a7f2fa0SOleg Nesterov 				   enum uprobe_filter_ctx ctx, struct mm_struct *mm)
879806a98bdSOleg Nesterov {
8808a7f2fa0SOleg Nesterov 	return !uc->filter || uc->filter(uc, ctx, mm);
881806a98bdSOleg Nesterov }
882806a98bdSOleg Nesterov 
8838a7f2fa0SOleg Nesterov static bool filter_chain(struct uprobe *uprobe,
8848a7f2fa0SOleg Nesterov 			 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
88563633cbfSOleg Nesterov {
8861ff6fee5SOleg Nesterov 	struct uprobe_consumer *uc;
8871ff6fee5SOleg Nesterov 	bool ret = false;
8881ff6fee5SOleg Nesterov 
8891ff6fee5SOleg Nesterov 	down_read(&uprobe->consumer_rwsem);
8901ff6fee5SOleg Nesterov 	for (uc = uprobe->consumers; uc; uc = uc->next) {
8918a7f2fa0SOleg Nesterov 		ret = consumer_filter(uc, ctx, mm);
8921ff6fee5SOleg Nesterov 		if (ret)
8931ff6fee5SOleg Nesterov 			break;
8941ff6fee5SOleg Nesterov 	}
8951ff6fee5SOleg Nesterov 	up_read(&uprobe->consumer_rwsem);
8961ff6fee5SOleg Nesterov 
8971ff6fee5SOleg Nesterov 	return ret;
89863633cbfSOleg Nesterov }
89963633cbfSOleg Nesterov 
900e3343e6aSSrikar Dronamraju static int
901e3343e6aSSrikar Dronamraju install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
902816c03fbSOleg Nesterov 			struct vm_area_struct *vma, unsigned long vaddr)
903a5f4374aSIngo Molnar {
904f8ac4ec9SOleg Nesterov 	bool first_uprobe;
905a5f4374aSIngo Molnar 	int ret;
906a5f4374aSIngo Molnar 
907cb9a19feSOleg Nesterov 	ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr);
908a5f4374aSIngo Molnar 	if (ret)
909a5f4374aSIngo Molnar 		return ret;
910a5f4374aSIngo Molnar 
911f8ac4ec9SOleg Nesterov 	/*
912f8ac4ec9SOleg Nesterov 	 * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(),
913f8ac4ec9SOleg Nesterov 	 * the task can hit this breakpoint right after __replace_page().
914f8ac4ec9SOleg Nesterov 	 */
915f8ac4ec9SOleg Nesterov 	first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags);
916f8ac4ec9SOleg Nesterov 	if (first_uprobe)
917f8ac4ec9SOleg Nesterov 		set_bit(MMF_HAS_UPROBES, &mm->flags);
918f8ac4ec9SOleg Nesterov 
919816c03fbSOleg Nesterov 	ret = set_swbp(&uprobe->arch, mm, vaddr);
9209f68f672SOleg Nesterov 	if (!ret)
9219f68f672SOleg Nesterov 		clear_bit(MMF_RECALC_UPROBES, &mm->flags);
9229f68f672SOleg Nesterov 	else if (first_uprobe)
923f8ac4ec9SOleg Nesterov 		clear_bit(MMF_HAS_UPROBES, &mm->flags);
924a5f4374aSIngo Molnar 
925a5f4374aSIngo Molnar 	return ret;
926a5f4374aSIngo Molnar }
927a5f4374aSIngo Molnar 
928076a365bSOleg Nesterov static int
929816c03fbSOleg Nesterov remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
930a5f4374aSIngo Molnar {
9319f68f672SOleg Nesterov 	set_bit(MMF_RECALC_UPROBES, &mm->flags);
932076a365bSOleg Nesterov 	return set_orig_insn(&uprobe->arch, mm, vaddr);
933a5f4374aSIngo Molnar }
934a5f4374aSIngo Molnar 
93506b7bcd8SOleg Nesterov static inline bool uprobe_is_active(struct uprobe *uprobe)
93606b7bcd8SOleg Nesterov {
93706b7bcd8SOleg Nesterov 	return !RB_EMPTY_NODE(&uprobe->rb_node);
93806b7bcd8SOleg Nesterov }
9390326f5a9SSrikar Dronamraju /*
940778b032dSOleg Nesterov  * There could be threads that have already hit the breakpoint. They
941778b032dSOleg Nesterov  * will recheck the current insn and restart if find_uprobe() fails.
942778b032dSOleg Nesterov  * See find_active_uprobe().
9430326f5a9SSrikar Dronamraju  */
944a5f4374aSIngo Molnar static void delete_uprobe(struct uprobe *uprobe)
945a5f4374aSIngo Molnar {
94606b7bcd8SOleg Nesterov 	if (WARN_ON(!uprobe_is_active(uprobe)))
94706b7bcd8SOleg Nesterov 		return;
94806b7bcd8SOleg Nesterov 
9496f47caa0SOleg Nesterov 	spin_lock(&uprobes_treelock);
950a5f4374aSIngo Molnar 	rb_erase(&uprobe->rb_node, &uprobes_tree);
9516f47caa0SOleg Nesterov 	spin_unlock(&uprobes_treelock);
95206b7bcd8SOleg Nesterov 	RB_CLEAR_NODE(&uprobe->rb_node); /* for uprobe_is_active() */
953a5f4374aSIngo Molnar 	put_uprobe(uprobe);
954a5f4374aSIngo Molnar }
955a5f4374aSIngo Molnar 
95626872090SOleg Nesterov struct map_info {
95726872090SOleg Nesterov 	struct map_info *next;
95826872090SOleg Nesterov 	struct mm_struct *mm;
959816c03fbSOleg Nesterov 	unsigned long vaddr;
96026872090SOleg Nesterov };
96126872090SOleg Nesterov 
96226872090SOleg Nesterov static inline struct map_info *free_map_info(struct map_info *info)
963a5f4374aSIngo Molnar {
96426872090SOleg Nesterov 	struct map_info *next = info->next;
96526872090SOleg Nesterov 	kfree(info);
96626872090SOleg Nesterov 	return next;
96726872090SOleg Nesterov }
96826872090SOleg Nesterov 
96926872090SOleg Nesterov static struct map_info *
97026872090SOleg Nesterov build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
97126872090SOleg Nesterov {
97226872090SOleg Nesterov 	unsigned long pgoff = offset >> PAGE_SHIFT;
973a5f4374aSIngo Molnar 	struct vm_area_struct *vma;
97426872090SOleg Nesterov 	struct map_info *curr = NULL;
97526872090SOleg Nesterov 	struct map_info *prev = NULL;
97626872090SOleg Nesterov 	struct map_info *info;
97726872090SOleg Nesterov 	int more = 0;
978a5f4374aSIngo Molnar 
97926872090SOleg Nesterov  again:
9804a23717aSDavidlohr Bueso 	i_mmap_lock_read(mapping);
9816b2dbba8SMichel Lespinasse 	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
982a5f4374aSIngo Molnar 		if (!valid_vma(vma, is_register))
983a5f4374aSIngo Molnar 			continue;
984a5f4374aSIngo Molnar 
9857a5bfb66SOleg Nesterov 		if (!prev && !more) {
9867a5bfb66SOleg Nesterov 			/*
987c8c06efaSDavidlohr Bueso 			 * Needs GFP_NOWAIT to avoid i_mmap_rwsem recursion through
9887a5bfb66SOleg Nesterov 			 * reclaim. This is optimistic, no harm done if it fails.
9897a5bfb66SOleg Nesterov 			 */
9907a5bfb66SOleg Nesterov 			prev = kmalloc(sizeof(struct map_info),
9917a5bfb66SOleg Nesterov 					GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN);
9927a5bfb66SOleg Nesterov 			if (prev)
9937a5bfb66SOleg Nesterov 				prev->next = NULL;
9947a5bfb66SOleg Nesterov 		}
99526872090SOleg Nesterov 		if (!prev) {
99626872090SOleg Nesterov 			more++;
99726872090SOleg Nesterov 			continue;
998a5f4374aSIngo Molnar 		}
999a5f4374aSIngo Molnar 
1000388f7934SVegard Nossum 		if (!mmget_not_zero(vma->vm_mm))
100126872090SOleg Nesterov 			continue;
1002a5f4374aSIngo Molnar 
100326872090SOleg Nesterov 		info = prev;
100426872090SOleg Nesterov 		prev = prev->next;
100526872090SOleg Nesterov 		info->next = curr;
100626872090SOleg Nesterov 		curr = info;
100726872090SOleg Nesterov 
100826872090SOleg Nesterov 		info->mm = vma->vm_mm;
100957683f72SOleg Nesterov 		info->vaddr = offset_to_vaddr(vma, offset);
1010a5f4374aSIngo Molnar 	}
10114a23717aSDavidlohr Bueso 	i_mmap_unlock_read(mapping);
1012a5f4374aSIngo Molnar 
101326872090SOleg Nesterov 	if (!more)
101426872090SOleg Nesterov 		goto out;
1015a5f4374aSIngo Molnar 
101626872090SOleg Nesterov 	prev = curr;
101726872090SOleg Nesterov 	while (curr) {
101826872090SOleg Nesterov 		mmput(curr->mm);
101926872090SOleg Nesterov 		curr = curr->next;
102026872090SOleg Nesterov 	}
102126872090SOleg Nesterov 
102226872090SOleg Nesterov 	do {
102326872090SOleg Nesterov 		info = kmalloc(sizeof(struct map_info), GFP_KERNEL);
102426872090SOleg Nesterov 		if (!info) {
102526872090SOleg Nesterov 			curr = ERR_PTR(-ENOMEM);
102626872090SOleg Nesterov 			goto out;
102726872090SOleg Nesterov 		}
102826872090SOleg Nesterov 		info->next = prev;
102926872090SOleg Nesterov 		prev = info;
103026872090SOleg Nesterov 	} while (--more);
103126872090SOleg Nesterov 
103226872090SOleg Nesterov 	goto again;
103326872090SOleg Nesterov  out:
103426872090SOleg Nesterov 	while (prev)
103526872090SOleg Nesterov 		prev = free_map_info(prev);
103626872090SOleg Nesterov 	return curr;
1037a5f4374aSIngo Molnar }
1038a5f4374aSIngo Molnar 
1039bdf8647cSOleg Nesterov static int
1040bdf8647cSOleg Nesterov register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new)
1041a5f4374aSIngo Molnar {
1042bdf8647cSOleg Nesterov 	bool is_register = !!new;
104326872090SOleg Nesterov 	struct map_info *info;
104426872090SOleg Nesterov 	int err = 0;
104526872090SOleg Nesterov 
104632cdba1eSOleg Nesterov 	percpu_down_write(&dup_mmap_sem);
104726872090SOleg Nesterov 	info = build_map_info(uprobe->inode->i_mapping,
104826872090SOleg Nesterov 					uprobe->offset, is_register);
104932cdba1eSOleg Nesterov 	if (IS_ERR(info)) {
105032cdba1eSOleg Nesterov 		err = PTR_ERR(info);
105132cdba1eSOleg Nesterov 		goto out;
105232cdba1eSOleg Nesterov 	}
105326872090SOleg Nesterov 
105426872090SOleg Nesterov 	while (info) {
105526872090SOleg Nesterov 		struct mm_struct *mm = info->mm;
1056a5f4374aSIngo Molnar 		struct vm_area_struct *vma;
1057a5f4374aSIngo Molnar 
1058076a365bSOleg Nesterov 		if (err && is_register)
105926872090SOleg Nesterov 			goto free;
1060a5f4374aSIngo Molnar 
106177fc4af1SOleg Nesterov 		down_write(&mm->mmap_sem);
1062f4d6dfe5SOleg Nesterov 		vma = find_vma(mm, info->vaddr);
1063f4d6dfe5SOleg Nesterov 		if (!vma || !valid_vma(vma, is_register) ||
1064f281769eSOleg Nesterov 		    file_inode(vma->vm_file) != uprobe->inode)
106526872090SOleg Nesterov 			goto unlock;
106626872090SOleg Nesterov 
1067f4d6dfe5SOleg Nesterov 		if (vma->vm_start > info->vaddr ||
1068f4d6dfe5SOleg Nesterov 		    vaddr_to_offset(vma, info->vaddr) != uprobe->offset)
106926872090SOleg Nesterov 			goto unlock;
1070a5f4374aSIngo Molnar 
1071806a98bdSOleg Nesterov 		if (is_register) {
1072806a98bdSOleg Nesterov 			/* consult only the "caller", new consumer. */
1073bdf8647cSOleg Nesterov 			if (consumer_filter(new,
10748a7f2fa0SOleg Nesterov 					UPROBE_FILTER_REGISTER, mm))
107526872090SOleg Nesterov 				err = install_breakpoint(uprobe, mm, vma, info->vaddr);
1076806a98bdSOleg Nesterov 		} else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) {
10778a7f2fa0SOleg Nesterov 			if (!filter_chain(uprobe,
10788a7f2fa0SOleg Nesterov 					UPROBE_FILTER_UNREGISTER, mm))
1079076a365bSOleg Nesterov 				err |= remove_breakpoint(uprobe, mm, info->vaddr);
1080806a98bdSOleg Nesterov 		}
108178f74116SOleg Nesterov 
108226872090SOleg Nesterov  unlock:
108326872090SOleg Nesterov 		up_write(&mm->mmap_sem);
108426872090SOleg Nesterov  free:
108526872090SOleg Nesterov 		mmput(mm);
108626872090SOleg Nesterov 		info = free_map_info(info);
1087a5f4374aSIngo Molnar 	}
108832cdba1eSOleg Nesterov  out:
108932cdba1eSOleg Nesterov 	percpu_up_write(&dup_mmap_sem);
109026872090SOleg Nesterov 	return err;
1091a5f4374aSIngo Molnar }
1092a5f4374aSIngo Molnar 
109338e967aeSRavi Bangoria static void
109438e967aeSRavi Bangoria __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc)
1095a5f4374aSIngo Molnar {
109604aab9b2SOleg Nesterov 	int err;
1097a5f4374aSIngo Molnar 
109806d07139SOleg Nesterov 	if (WARN_ON(!consumer_del(uprobe, uc)))
109904aab9b2SOleg Nesterov 		return;
110004aab9b2SOleg Nesterov 
1101bdf8647cSOleg Nesterov 	err = register_for_each_vma(uprobe, NULL);
1102a5f4374aSIngo Molnar 	/* TODO : cant unregister? schedule a worker thread */
1103bb929284SOleg Nesterov 	if (!uprobe->consumers && !err)
110404aab9b2SOleg Nesterov 		delete_uprobe(uprobe);
110504aab9b2SOleg Nesterov }
1106a5f4374aSIngo Molnar 
1107a5f4374aSIngo Molnar /*
11087140ad38SLinus Torvalds  * uprobe_unregister - unregister an already registered probe.
110938e967aeSRavi Bangoria  * @inode: the file in which the probe has to be removed.
111038e967aeSRavi Bangoria  * @offset: offset from the start of the file.
111138e967aeSRavi Bangoria  * @uc: identify which probe if multiple probes are colocated.
111238e967aeSRavi Bangoria  */
111338e967aeSRavi Bangoria void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
111438e967aeSRavi Bangoria {
111538e967aeSRavi Bangoria 	struct uprobe *uprobe;
111638e967aeSRavi Bangoria 
111738e967aeSRavi Bangoria 	uprobe = find_uprobe(inode, offset);
111838e967aeSRavi Bangoria 	if (WARN_ON(!uprobe))
111938e967aeSRavi Bangoria 		return;
112038e967aeSRavi Bangoria 
112138e967aeSRavi Bangoria 	down_write(&uprobe->register_rwsem);
112238e967aeSRavi Bangoria 	__uprobe_unregister(uprobe, uc);
112338e967aeSRavi Bangoria 	up_write(&uprobe->register_rwsem);
112438e967aeSRavi Bangoria 	put_uprobe(uprobe);
112538e967aeSRavi Bangoria }
112638e967aeSRavi Bangoria EXPORT_SYMBOL_GPL(uprobe_unregister);
112738e967aeSRavi Bangoria 
112838e967aeSRavi Bangoria /*
112938e967aeSRavi Bangoria  * __uprobe_register - register a probe
1130a5f4374aSIngo Molnar  * @inode: the file in which the probe has to be placed.
1131a5f4374aSIngo Molnar  * @offset: offset from the start of the file.
1132e3343e6aSSrikar Dronamraju  * @uc: information on howto handle the probe..
1133a5f4374aSIngo Molnar  *
113438e967aeSRavi Bangoria  * Apart from the access refcount, __uprobe_register() takes a creation
1135a5f4374aSIngo Molnar  * refcount (thro alloc_uprobe) if and only if this @uprobe is getting
1136a5f4374aSIngo Molnar  * inserted into the rbtree (i.e first consumer for a @inode:@offset
1137a5f4374aSIngo Molnar  * tuple).  Creation refcount stops uprobe_unregister from freeing the
1138a5f4374aSIngo Molnar  * @uprobe even before the register operation is complete. Creation
1139e3343e6aSSrikar Dronamraju  * refcount is released when the last @uc for the @uprobe
114038e967aeSRavi Bangoria  * unregisters. Caller of __uprobe_register() is required to keep @inode
114161f94203SSong Liu  * (and the containing mount) referenced.
1142a5f4374aSIngo Molnar  *
1143a5f4374aSIngo Molnar  * Return errno if it cannot successully install probes
1144a5f4374aSIngo Molnar  * else return 0 (success)
1145a5f4374aSIngo Molnar  */
114638e967aeSRavi Bangoria static int __uprobe_register(struct inode *inode, loff_t offset,
11471cc33161SRavi Bangoria 			     loff_t ref_ctr_offset, struct uprobe_consumer *uc)
1148a5f4374aSIngo Molnar {
1149a5f4374aSIngo Molnar 	struct uprobe *uprobe;
1150a5f4374aSIngo Molnar 	int ret;
1151a5f4374aSIngo Molnar 
1152ea024870SAnton Arapov 	/* Uprobe must have at least one set consumer */
1153ea024870SAnton Arapov 	if (!uc->handler && !uc->ret_handler)
1154ea024870SAnton Arapov 		return -EINVAL;
1155ea024870SAnton Arapov 
115640814f68SOleg Nesterov 	/* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */
115740814f68SOleg Nesterov 	if (!inode->i_mapping->a_ops->readpage && !shmem_mapping(inode->i_mapping))
115841ccba02SOleg Nesterov 		return -EIO;
1159f0744af7SOleg Nesterov 	/* Racy, just to catch the obvious mistakes */
1160a5f4374aSIngo Molnar 	if (offset > i_size_read(inode))
1161a5f4374aSIngo Molnar 		return -EINVAL;
1162a5f4374aSIngo Molnar 
116366d06dffSOleg Nesterov  retry:
11641cc33161SRavi Bangoria 	uprobe = alloc_uprobe(inode, offset, ref_ctr_offset);
116566d06dffSOleg Nesterov 	if (!uprobe)
116666d06dffSOleg Nesterov 		return -ENOMEM;
116722bad382SRavi Bangoria 	if (IS_ERR(uprobe))
116822bad382SRavi Bangoria 		return PTR_ERR(uprobe);
116922bad382SRavi Bangoria 
117066d06dffSOleg Nesterov 	/*
117166d06dffSOleg Nesterov 	 * We can race with uprobe_unregister()->delete_uprobe().
117266d06dffSOleg Nesterov 	 * Check uprobe_is_active() and retry if it is false.
117366d06dffSOleg Nesterov 	 */
1174e591c8d7SOleg Nesterov 	down_write(&uprobe->register_rwsem);
117566d06dffSOleg Nesterov 	ret = -EAGAIN;
117666d06dffSOleg Nesterov 	if (likely(uprobe_is_active(uprobe))) {
117738e967aeSRavi Bangoria 		consumer_add(uprobe, uc);
117838e967aeSRavi Bangoria 		ret = register_for_each_vma(uprobe, uc);
11799a98e03cSOleg Nesterov 		if (ret)
118004aab9b2SOleg Nesterov 			__uprobe_unregister(uprobe, uc);
1181a5f4374aSIngo Molnar 	}
118266d06dffSOleg Nesterov 	up_write(&uprobe->register_rwsem);
1183a5f4374aSIngo Molnar 	put_uprobe(uprobe);
1184a5f4374aSIngo Molnar 
118566d06dffSOleg Nesterov 	if (unlikely(ret == -EAGAIN))
118666d06dffSOleg Nesterov 		goto retry;
1187a5f4374aSIngo Molnar 	return ret;
1188a5f4374aSIngo Molnar }
118938e967aeSRavi Bangoria 
119038e967aeSRavi Bangoria int uprobe_register(struct inode *inode, loff_t offset,
119138e967aeSRavi Bangoria 		    struct uprobe_consumer *uc)
119238e967aeSRavi Bangoria {
11931cc33161SRavi Bangoria 	return __uprobe_register(inode, offset, 0, uc);
119438e967aeSRavi Bangoria }
1195e8440c14SJosh Stone EXPORT_SYMBOL_GPL(uprobe_register);
1196a5f4374aSIngo Molnar 
11971cc33161SRavi Bangoria int uprobe_register_refctr(struct inode *inode, loff_t offset,
11981cc33161SRavi Bangoria 			   loff_t ref_ctr_offset, struct uprobe_consumer *uc)
11991cc33161SRavi Bangoria {
12001cc33161SRavi Bangoria 	return __uprobe_register(inode, offset, ref_ctr_offset, uc);
12011cc33161SRavi Bangoria }
12021cc33161SRavi Bangoria EXPORT_SYMBOL_GPL(uprobe_register_refctr);
12031cc33161SRavi Bangoria 
1204a5f4374aSIngo Molnar /*
1205788faab7STobias Tefke  * uprobe_apply - unregister an already registered probe.
1206bdf8647cSOleg Nesterov  * @inode: the file in which the probe has to be removed.
1207bdf8647cSOleg Nesterov  * @offset: offset from the start of the file.
1208bdf8647cSOleg Nesterov  * @uc: consumer which wants to add more or remove some breakpoints
1209bdf8647cSOleg Nesterov  * @add: add or remove the breakpoints
1210bdf8647cSOleg Nesterov  */
1211bdf8647cSOleg Nesterov int uprobe_apply(struct inode *inode, loff_t offset,
1212bdf8647cSOleg Nesterov 			struct uprobe_consumer *uc, bool add)
1213bdf8647cSOleg Nesterov {
1214bdf8647cSOleg Nesterov 	struct uprobe *uprobe;
1215bdf8647cSOleg Nesterov 	struct uprobe_consumer *con;
1216bdf8647cSOleg Nesterov 	int ret = -ENOENT;
1217bdf8647cSOleg Nesterov 
1218bdf8647cSOleg Nesterov 	uprobe = find_uprobe(inode, offset);
121906d07139SOleg Nesterov 	if (WARN_ON(!uprobe))
1220bdf8647cSOleg Nesterov 		return ret;
1221bdf8647cSOleg Nesterov 
1222bdf8647cSOleg Nesterov 	down_write(&uprobe->register_rwsem);
1223bdf8647cSOleg Nesterov 	for (con = uprobe->consumers; con && con != uc ; con = con->next)
1224bdf8647cSOleg Nesterov 		;
1225bdf8647cSOleg Nesterov 	if (con)
1226bdf8647cSOleg Nesterov 		ret = register_for_each_vma(uprobe, add ? uc : NULL);
1227bdf8647cSOleg Nesterov 	up_write(&uprobe->register_rwsem);
1228bdf8647cSOleg Nesterov 	put_uprobe(uprobe);
1229bdf8647cSOleg Nesterov 
1230bdf8647cSOleg Nesterov 	return ret;
1231bdf8647cSOleg Nesterov }
1232bdf8647cSOleg Nesterov 
1233da1816b1SOleg Nesterov static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm)
1234da1816b1SOleg Nesterov {
1235da1816b1SOleg Nesterov 	struct vm_area_struct *vma;
1236da1816b1SOleg Nesterov 	int err = 0;
1237da1816b1SOleg Nesterov 
1238da1816b1SOleg Nesterov 	down_read(&mm->mmap_sem);
1239da1816b1SOleg Nesterov 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
1240da1816b1SOleg Nesterov 		unsigned long vaddr;
1241da1816b1SOleg Nesterov 		loff_t offset;
1242da1816b1SOleg Nesterov 
1243da1816b1SOleg Nesterov 		if (!valid_vma(vma, false) ||
1244f281769eSOleg Nesterov 		    file_inode(vma->vm_file) != uprobe->inode)
1245da1816b1SOleg Nesterov 			continue;
1246da1816b1SOleg Nesterov 
1247da1816b1SOleg Nesterov 		offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
1248da1816b1SOleg Nesterov 		if (uprobe->offset <  offset ||
1249da1816b1SOleg Nesterov 		    uprobe->offset >= offset + vma->vm_end - vma->vm_start)
1250da1816b1SOleg Nesterov 			continue;
1251da1816b1SOleg Nesterov 
1252da1816b1SOleg Nesterov 		vaddr = offset_to_vaddr(vma, uprobe->offset);
1253da1816b1SOleg Nesterov 		err |= remove_breakpoint(uprobe, mm, vaddr);
1254da1816b1SOleg Nesterov 	}
1255da1816b1SOleg Nesterov 	up_read(&mm->mmap_sem);
1256da1816b1SOleg Nesterov 
1257da1816b1SOleg Nesterov 	return err;
1258da1816b1SOleg Nesterov }
1259da1816b1SOleg Nesterov 
1260891c3970SOleg Nesterov static struct rb_node *
1261891c3970SOleg Nesterov find_node_in_range(struct inode *inode, loff_t min, loff_t max)
1262a5f4374aSIngo Molnar {
1263a5f4374aSIngo Molnar 	struct rb_node *n = uprobes_tree.rb_node;
1264a5f4374aSIngo Molnar 
1265a5f4374aSIngo Molnar 	while (n) {
1266891c3970SOleg Nesterov 		struct uprobe *u = rb_entry(n, struct uprobe, rb_node);
1267a5f4374aSIngo Molnar 
1268891c3970SOleg Nesterov 		if (inode < u->inode) {
1269a5f4374aSIngo Molnar 			n = n->rb_left;
1270891c3970SOleg Nesterov 		} else if (inode > u->inode) {
1271a5f4374aSIngo Molnar 			n = n->rb_right;
1272891c3970SOleg Nesterov 		} else {
1273891c3970SOleg Nesterov 			if (max < u->offset)
1274891c3970SOleg Nesterov 				n = n->rb_left;
1275891c3970SOleg Nesterov 			else if (min > u->offset)
1276891c3970SOleg Nesterov 				n = n->rb_right;
1277891c3970SOleg Nesterov 			else
1278891c3970SOleg Nesterov 				break;
1279891c3970SOleg Nesterov 		}
1280a5f4374aSIngo Molnar 	}
1281a5f4374aSIngo Molnar 
1282891c3970SOleg Nesterov 	return n;
1283a5f4374aSIngo Molnar }
1284a5f4374aSIngo Molnar 
1285a5f4374aSIngo Molnar /*
1286891c3970SOleg Nesterov  * For a given range in vma, build a list of probes that need to be inserted.
1287a5f4374aSIngo Molnar  */
1288891c3970SOleg Nesterov static void build_probe_list(struct inode *inode,
1289891c3970SOleg Nesterov 				struct vm_area_struct *vma,
1290891c3970SOleg Nesterov 				unsigned long start, unsigned long end,
1291891c3970SOleg Nesterov 				struct list_head *head)
1292a5f4374aSIngo Molnar {
1293891c3970SOleg Nesterov 	loff_t min, max;
1294891c3970SOleg Nesterov 	struct rb_node *n, *t;
1295891c3970SOleg Nesterov 	struct uprobe *u;
1296891c3970SOleg Nesterov 
1297891c3970SOleg Nesterov 	INIT_LIST_HEAD(head);
1298cb113b47SOleg Nesterov 	min = vaddr_to_offset(vma, start);
1299891c3970SOleg Nesterov 	max = min + (end - start) - 1;
1300a5f4374aSIngo Molnar 
13016f47caa0SOleg Nesterov 	spin_lock(&uprobes_treelock);
1302891c3970SOleg Nesterov 	n = find_node_in_range(inode, min, max);
1303891c3970SOleg Nesterov 	if (n) {
1304891c3970SOleg Nesterov 		for (t = n; t; t = rb_prev(t)) {
1305891c3970SOleg Nesterov 			u = rb_entry(t, struct uprobe, rb_node);
1306891c3970SOleg Nesterov 			if (u->inode != inode || u->offset < min)
1307a5f4374aSIngo Molnar 				break;
1308891c3970SOleg Nesterov 			list_add(&u->pending_list, head);
1309f231722aSOleg Nesterov 			get_uprobe(u);
1310a5f4374aSIngo Molnar 		}
1311891c3970SOleg Nesterov 		for (t = n; (t = rb_next(t)); ) {
1312891c3970SOleg Nesterov 			u = rb_entry(t, struct uprobe, rb_node);
1313891c3970SOleg Nesterov 			if (u->inode != inode || u->offset > max)
1314891c3970SOleg Nesterov 				break;
1315891c3970SOleg Nesterov 			list_add(&u->pending_list, head);
1316f231722aSOleg Nesterov 			get_uprobe(u);
1317891c3970SOleg Nesterov 		}
1318891c3970SOleg Nesterov 	}
13196f47caa0SOleg Nesterov 	spin_unlock(&uprobes_treelock);
1320a5f4374aSIngo Molnar }
1321a5f4374aSIngo Molnar 
13221cc33161SRavi Bangoria /* @vma contains reference counter, not the probed instruction. */
13231cc33161SRavi Bangoria static int delayed_ref_ctr_inc(struct vm_area_struct *vma)
13241cc33161SRavi Bangoria {
13251cc33161SRavi Bangoria 	struct list_head *pos, *q;
13261cc33161SRavi Bangoria 	struct delayed_uprobe *du;
13271cc33161SRavi Bangoria 	unsigned long vaddr;
13281cc33161SRavi Bangoria 	int ret = 0, err = 0;
13291cc33161SRavi Bangoria 
13301cc33161SRavi Bangoria 	mutex_lock(&delayed_uprobe_lock);
13311cc33161SRavi Bangoria 	list_for_each_safe(pos, q, &delayed_uprobe_list) {
13321cc33161SRavi Bangoria 		du = list_entry(pos, struct delayed_uprobe, list);
13331cc33161SRavi Bangoria 
13341cc33161SRavi Bangoria 		if (du->mm != vma->vm_mm ||
13351cc33161SRavi Bangoria 		    !valid_ref_ctr_vma(du->uprobe, vma))
13361cc33161SRavi Bangoria 			continue;
13371cc33161SRavi Bangoria 
13381cc33161SRavi Bangoria 		vaddr = offset_to_vaddr(vma, du->uprobe->ref_ctr_offset);
13391cc33161SRavi Bangoria 		ret = __update_ref_ctr(vma->vm_mm, vaddr, 1);
13401cc33161SRavi Bangoria 		if (ret) {
13411cc33161SRavi Bangoria 			update_ref_ctr_warn(du->uprobe, vma->vm_mm, 1);
13421cc33161SRavi Bangoria 			if (!err)
13431cc33161SRavi Bangoria 				err = ret;
13441cc33161SRavi Bangoria 		}
13451cc33161SRavi Bangoria 		delayed_uprobe_delete(du);
13461cc33161SRavi Bangoria 	}
13471cc33161SRavi Bangoria 	mutex_unlock(&delayed_uprobe_lock);
13481cc33161SRavi Bangoria 	return err;
13491cc33161SRavi Bangoria }
13501cc33161SRavi Bangoria 
1351a5f4374aSIngo Molnar /*
13525e5be71aSOleg Nesterov  * Called from mmap_region/vma_adjust with mm->mmap_sem acquired.
1353a5f4374aSIngo Molnar  *
13545e5be71aSOleg Nesterov  * Currently we ignore all errors and always return 0, the callers
13555e5be71aSOleg Nesterov  * can't handle the failure anyway.
1356a5f4374aSIngo Molnar  */
1357a5f4374aSIngo Molnar int uprobe_mmap(struct vm_area_struct *vma)
1358a5f4374aSIngo Molnar {
1359a5f4374aSIngo Molnar 	struct list_head tmp_list;
1360665605a2SOleg Nesterov 	struct uprobe *uprobe, *u;
1361a5f4374aSIngo Molnar 	struct inode *inode;
1362a5f4374aSIngo Molnar 
13631cc33161SRavi Bangoria 	if (no_uprobe_events())
13641cc33161SRavi Bangoria 		return 0;
13651cc33161SRavi Bangoria 
13661cc33161SRavi Bangoria 	if (vma->vm_file &&
13671cc33161SRavi Bangoria 	    (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE &&
13681cc33161SRavi Bangoria 	    test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags))
13691cc33161SRavi Bangoria 		delayed_ref_ctr_inc(vma);
13701cc33161SRavi Bangoria 
13711cc33161SRavi Bangoria 	if (!valid_vma(vma, true))
1372a5f4374aSIngo Molnar 		return 0;
1373a5f4374aSIngo Molnar 
1374f281769eSOleg Nesterov 	inode = file_inode(vma->vm_file);
1375a5f4374aSIngo Molnar 	if (!inode)
1376a5f4374aSIngo Molnar 		return 0;
1377a5f4374aSIngo Molnar 
1378a5f4374aSIngo Molnar 	mutex_lock(uprobes_mmap_hash(inode));
1379891c3970SOleg Nesterov 	build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list);
1380806a98bdSOleg Nesterov 	/*
1381806a98bdSOleg Nesterov 	 * We can race with uprobe_unregister(), this uprobe can be already
1382806a98bdSOleg Nesterov 	 * removed. But in this case filter_chain() must return false, all
1383806a98bdSOleg Nesterov 	 * consumers have gone away.
1384806a98bdSOleg Nesterov 	 */
1385665605a2SOleg Nesterov 	list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
1386806a98bdSOleg Nesterov 		if (!fatal_signal_pending(current) &&
13878a7f2fa0SOleg Nesterov 		    filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) {
138857683f72SOleg Nesterov 			unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset);
13895e5be71aSOleg Nesterov 			install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
1390a5f4374aSIngo Molnar 		}
1391a5f4374aSIngo Molnar 		put_uprobe(uprobe);
1392a5f4374aSIngo Molnar 	}
1393a5f4374aSIngo Molnar 	mutex_unlock(uprobes_mmap_hash(inode));
1394a5f4374aSIngo Molnar 
13955e5be71aSOleg Nesterov 	return 0;
1396a5f4374aSIngo Molnar }
1397a5f4374aSIngo Molnar 
13989f68f672SOleg Nesterov static bool
13999f68f672SOleg Nesterov vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end)
14009f68f672SOleg Nesterov {
14019f68f672SOleg Nesterov 	loff_t min, max;
14029f68f672SOleg Nesterov 	struct inode *inode;
14039f68f672SOleg Nesterov 	struct rb_node *n;
14049f68f672SOleg Nesterov 
1405f281769eSOleg Nesterov 	inode = file_inode(vma->vm_file);
14069f68f672SOleg Nesterov 
14079f68f672SOleg Nesterov 	min = vaddr_to_offset(vma, start);
14089f68f672SOleg Nesterov 	max = min + (end - start) - 1;
14099f68f672SOleg Nesterov 
14109f68f672SOleg Nesterov 	spin_lock(&uprobes_treelock);
14119f68f672SOleg Nesterov 	n = find_node_in_range(inode, min, max);
14129f68f672SOleg Nesterov 	spin_unlock(&uprobes_treelock);
14139f68f672SOleg Nesterov 
14149f68f672SOleg Nesterov 	return !!n;
14159f68f672SOleg Nesterov }
14169f68f672SOleg Nesterov 
1417682968e0SSrikar Dronamraju /*
1418682968e0SSrikar Dronamraju  * Called in context of a munmap of a vma.
1419682968e0SSrikar Dronamraju  */
1420cbc91f71SSrikar Dronamraju void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1421682968e0SSrikar Dronamraju {
1422441f1eb7SOleg Nesterov 	if (no_uprobe_events() || !valid_vma(vma, false))
1423682968e0SSrikar Dronamraju 		return;
1424682968e0SSrikar Dronamraju 
14252fd611a9SOleg Nesterov 	if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */
14262fd611a9SOleg Nesterov 		return;
14272fd611a9SOleg Nesterov 
14289f68f672SOleg Nesterov 	if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) ||
14299f68f672SOleg Nesterov 	     test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags))
1430f8ac4ec9SOleg Nesterov 		return;
1431f8ac4ec9SOleg Nesterov 
14329f68f672SOleg Nesterov 	if (vma_has_uprobes(vma, start, end))
14339f68f672SOleg Nesterov 		set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags);
1434682968e0SSrikar Dronamraju }
1435682968e0SSrikar Dronamraju 
1436d4b3b638SSrikar Dronamraju /* Slot allocation for XOL */
14376441ec8bSOleg Nesterov static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
1438d4b3b638SSrikar Dronamraju {
1439704bde3cSOleg Nesterov 	struct vm_area_struct *vma;
1440704bde3cSOleg Nesterov 	int ret;
1441d4b3b638SSrikar Dronamraju 
1442598fdc1dSMichal Hocko 	if (down_write_killable(&mm->mmap_sem))
1443598fdc1dSMichal Hocko 		return -EINTR;
1444598fdc1dSMichal Hocko 
1445704bde3cSOleg Nesterov 	if (mm->uprobes_state.xol_area) {
1446704bde3cSOleg Nesterov 		ret = -EALREADY;
1447d4b3b638SSrikar Dronamraju 		goto fail;
1448704bde3cSOleg Nesterov 	}
1449d4b3b638SSrikar Dronamraju 
1450af0d95afSOleg Nesterov 	if (!area->vaddr) {
1451d4b3b638SSrikar Dronamraju 		/* Try to map as high as possible, this is only a hint. */
1452af0d95afSOleg Nesterov 		area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE,
1453af0d95afSOleg Nesterov 						PAGE_SIZE, 0, 0);
1454ff68dac6SGaowei Pu 		if (IS_ERR_VALUE(area->vaddr)) {
1455d4b3b638SSrikar Dronamraju 			ret = area->vaddr;
1456d4b3b638SSrikar Dronamraju 			goto fail;
1457d4b3b638SSrikar Dronamraju 		}
1458af0d95afSOleg Nesterov 	}
1459d4b3b638SSrikar Dronamraju 
1460704bde3cSOleg Nesterov 	vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE,
1461704bde3cSOleg Nesterov 				VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO,
1462704bde3cSOleg Nesterov 				&area->xol_mapping);
1463704bde3cSOleg Nesterov 	if (IS_ERR(vma)) {
1464704bde3cSOleg Nesterov 		ret = PTR_ERR(vma);
1465d4b3b638SSrikar Dronamraju 		goto fail;
1466704bde3cSOleg Nesterov 	}
1467d4b3b638SSrikar Dronamraju 
1468704bde3cSOleg Nesterov 	ret = 0;
14695c6338b4SPaul E. McKenney 	/* pairs with get_xol_area() */
14705c6338b4SPaul E. McKenney 	smp_store_release(&mm->uprobes_state.xol_area, area); /* ^^^ */
1471d4b3b638SSrikar Dronamraju  fail:
1472d4b3b638SSrikar Dronamraju 	up_write(&mm->mmap_sem);
1473d4b3b638SSrikar Dronamraju 
1474d4b3b638SSrikar Dronamraju 	return ret;
1475d4b3b638SSrikar Dronamraju }
1476d4b3b638SSrikar Dronamraju 
1477af0d95afSOleg Nesterov static struct xol_area *__create_xol_area(unsigned long vaddr)
1478d4b3b638SSrikar Dronamraju {
14799b545df8SOleg Nesterov 	struct mm_struct *mm = current->mm;
1480e78aebfdSAnton Arapov 	uprobe_opcode_t insn = UPROBE_SWBP_INSN;
14816441ec8bSOleg Nesterov 	struct xol_area *area;
14829b545df8SOleg Nesterov 
1483af0d95afSOleg Nesterov 	area = kmalloc(sizeof(*area), GFP_KERNEL);
1484d4b3b638SSrikar Dronamraju 	if (unlikely(!area))
1485c8a82538SOleg Nesterov 		goto out;
1486d4b3b638SSrikar Dronamraju 
14876396bb22SKees Cook 	area->bitmap = kcalloc(BITS_TO_LONGS(UINSNS_PER_PAGE), sizeof(long),
14886396bb22SKees Cook 			       GFP_KERNEL);
1489d4b3b638SSrikar Dronamraju 	if (!area->bitmap)
1490c8a82538SOleg Nesterov 		goto free_area;
1491c8a82538SOleg Nesterov 
1492704bde3cSOleg Nesterov 	area->xol_mapping.name = "[uprobes]";
1493869ae761SOleg Nesterov 	area->xol_mapping.fault = NULL;
1494704bde3cSOleg Nesterov 	area->xol_mapping.pages = area->pages;
1495f58bea2fSOleg Nesterov 	area->pages[0] = alloc_page(GFP_HIGHUSER);
1496f58bea2fSOleg Nesterov 	if (!area->pages[0])
1497c8a82538SOleg Nesterov 		goto free_bitmap;
1498f58bea2fSOleg Nesterov 	area->pages[1] = NULL;
1499d4b3b638SSrikar Dronamraju 
1500af0d95afSOleg Nesterov 	area->vaddr = vaddr;
1501d4b3b638SSrikar Dronamraju 	init_waitqueue_head(&area->wq);
15026441ec8bSOleg Nesterov 	/* Reserve the 1st slot for get_trampoline_vaddr() */
15036441ec8bSOleg Nesterov 	set_bit(0, area->bitmap);
15046441ec8bSOleg Nesterov 	atomic_set(&area->slot_count, 1);
1505297e765eSMarcin Nowakowski 	arch_uprobe_copy_ixol(area->pages[0], 0, &insn, UPROBE_SWBP_INSN_SIZE);
1506e78aebfdSAnton Arapov 
15076441ec8bSOleg Nesterov 	if (!xol_add_vma(mm, area))
1508d4b3b638SSrikar Dronamraju 		return area;
1509d4b3b638SSrikar Dronamraju 
1510f58bea2fSOleg Nesterov 	__free_page(area->pages[0]);
1511c8a82538SOleg Nesterov  free_bitmap:
1512d4b3b638SSrikar Dronamraju 	kfree(area->bitmap);
1513c8a82538SOleg Nesterov  free_area:
1514d4b3b638SSrikar Dronamraju 	kfree(area);
1515c8a82538SOleg Nesterov  out:
15166441ec8bSOleg Nesterov 	return NULL;
15176441ec8bSOleg Nesterov }
15186441ec8bSOleg Nesterov 
15196441ec8bSOleg Nesterov /*
15206441ec8bSOleg Nesterov  * get_xol_area - Allocate process's xol_area if necessary.
15216441ec8bSOleg Nesterov  * This area will be used for storing instructions for execution out of line.
15226441ec8bSOleg Nesterov  *
15236441ec8bSOleg Nesterov  * Returns the allocated area or NULL.
15246441ec8bSOleg Nesterov  */
15256441ec8bSOleg Nesterov static struct xol_area *get_xol_area(void)
15266441ec8bSOleg Nesterov {
15276441ec8bSOleg Nesterov 	struct mm_struct *mm = current->mm;
15286441ec8bSOleg Nesterov 	struct xol_area *area;
15296441ec8bSOleg Nesterov 
15306441ec8bSOleg Nesterov 	if (!mm->uprobes_state.xol_area)
1531af0d95afSOleg Nesterov 		__create_xol_area(0);
15326441ec8bSOleg Nesterov 
15335c6338b4SPaul E. McKenney 	/* Pairs with xol_add_vma() smp_store_release() */
15345c6338b4SPaul E. McKenney 	area = READ_ONCE(mm->uprobes_state.xol_area); /* ^^^ */
15359b545df8SOleg Nesterov 	return area;
1536d4b3b638SSrikar Dronamraju }
1537d4b3b638SSrikar Dronamraju 
1538d4b3b638SSrikar Dronamraju /*
1539d4b3b638SSrikar Dronamraju  * uprobe_clear_state - Free the area allocated for slots.
1540d4b3b638SSrikar Dronamraju  */
1541d4b3b638SSrikar Dronamraju void uprobe_clear_state(struct mm_struct *mm)
1542d4b3b638SSrikar Dronamraju {
1543d4b3b638SSrikar Dronamraju 	struct xol_area *area = mm->uprobes_state.xol_area;
1544d4b3b638SSrikar Dronamraju 
15451cc33161SRavi Bangoria 	mutex_lock(&delayed_uprobe_lock);
15461cc33161SRavi Bangoria 	delayed_uprobe_remove(NULL, mm);
15471cc33161SRavi Bangoria 	mutex_unlock(&delayed_uprobe_lock);
15481cc33161SRavi Bangoria 
1549d4b3b638SSrikar Dronamraju 	if (!area)
1550d4b3b638SSrikar Dronamraju 		return;
1551d4b3b638SSrikar Dronamraju 
1552f58bea2fSOleg Nesterov 	put_page(area->pages[0]);
1553d4b3b638SSrikar Dronamraju 	kfree(area->bitmap);
1554d4b3b638SSrikar Dronamraju 	kfree(area);
1555d4b3b638SSrikar Dronamraju }
1556d4b3b638SSrikar Dronamraju 
155732cdba1eSOleg Nesterov void uprobe_start_dup_mmap(void)
155832cdba1eSOleg Nesterov {
155932cdba1eSOleg Nesterov 	percpu_down_read(&dup_mmap_sem);
156032cdba1eSOleg Nesterov }
156132cdba1eSOleg Nesterov 
156232cdba1eSOleg Nesterov void uprobe_end_dup_mmap(void)
156332cdba1eSOleg Nesterov {
156432cdba1eSOleg Nesterov 	percpu_up_read(&dup_mmap_sem);
156532cdba1eSOleg Nesterov }
156632cdba1eSOleg Nesterov 
1567f8ac4ec9SOleg Nesterov void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
1568f8ac4ec9SOleg Nesterov {
15699f68f672SOleg Nesterov 	if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) {
1570f8ac4ec9SOleg Nesterov 		set_bit(MMF_HAS_UPROBES, &newmm->flags);
15719f68f672SOleg Nesterov 		/* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */
15729f68f672SOleg Nesterov 		set_bit(MMF_RECALC_UPROBES, &newmm->flags);
15739f68f672SOleg Nesterov 	}
1574f8ac4ec9SOleg Nesterov }
1575f8ac4ec9SOleg Nesterov 
1576d4b3b638SSrikar Dronamraju /*
1577d4b3b638SSrikar Dronamraju  *  - search for a free slot.
1578d4b3b638SSrikar Dronamraju  */
1579d4b3b638SSrikar Dronamraju static unsigned long xol_take_insn_slot(struct xol_area *area)
1580d4b3b638SSrikar Dronamraju {
1581d4b3b638SSrikar Dronamraju 	unsigned long slot_addr;
1582d4b3b638SSrikar Dronamraju 	int slot_nr;
1583d4b3b638SSrikar Dronamraju 
1584d4b3b638SSrikar Dronamraju 	do {
1585d4b3b638SSrikar Dronamraju 		slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE);
1586d4b3b638SSrikar Dronamraju 		if (slot_nr < UINSNS_PER_PAGE) {
1587d4b3b638SSrikar Dronamraju 			if (!test_and_set_bit(slot_nr, area->bitmap))
1588d4b3b638SSrikar Dronamraju 				break;
1589d4b3b638SSrikar Dronamraju 
1590d4b3b638SSrikar Dronamraju 			slot_nr = UINSNS_PER_PAGE;
1591d4b3b638SSrikar Dronamraju 			continue;
1592d4b3b638SSrikar Dronamraju 		}
1593d4b3b638SSrikar Dronamraju 		wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE));
1594d4b3b638SSrikar Dronamraju 	} while (slot_nr >= UINSNS_PER_PAGE);
1595d4b3b638SSrikar Dronamraju 
1596d4b3b638SSrikar Dronamraju 	slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES);
1597d4b3b638SSrikar Dronamraju 	atomic_inc(&area->slot_count);
1598d4b3b638SSrikar Dronamraju 
1599d4b3b638SSrikar Dronamraju 	return slot_addr;
1600d4b3b638SSrikar Dronamraju }
1601d4b3b638SSrikar Dronamraju 
1602d4b3b638SSrikar Dronamraju /*
1603a6cb3f6dSOleg Nesterov  * xol_get_insn_slot - allocate a slot for xol.
1604d4b3b638SSrikar Dronamraju  * Returns the allocated slot address or 0.
1605d4b3b638SSrikar Dronamraju  */
1606a6cb3f6dSOleg Nesterov static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
1607d4b3b638SSrikar Dronamraju {
1608d4b3b638SSrikar Dronamraju 	struct xol_area *area;
1609a6cb3f6dSOleg Nesterov 	unsigned long xol_vaddr;
1610d4b3b638SSrikar Dronamraju 
16119b545df8SOleg Nesterov 	area = get_xol_area();
1612d4b3b638SSrikar Dronamraju 	if (!area)
1613d4b3b638SSrikar Dronamraju 		return 0;
1614d4b3b638SSrikar Dronamraju 
1615a6cb3f6dSOleg Nesterov 	xol_vaddr = xol_take_insn_slot(area);
1616a6cb3f6dSOleg Nesterov 	if (unlikely(!xol_vaddr))
1617d4b3b638SSrikar Dronamraju 		return 0;
1618d4b3b638SSrikar Dronamraju 
1619f58bea2fSOleg Nesterov 	arch_uprobe_copy_ixol(area->pages[0], xol_vaddr,
1620803200e2SOleg Nesterov 			      &uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
1621d4b3b638SSrikar Dronamraju 
1622a6cb3f6dSOleg Nesterov 	return xol_vaddr;
1623d4b3b638SSrikar Dronamraju }
1624d4b3b638SSrikar Dronamraju 
1625d4b3b638SSrikar Dronamraju /*
1626d4b3b638SSrikar Dronamraju  * xol_free_insn_slot - If slot was earlier allocated by
1627d4b3b638SSrikar Dronamraju  * @xol_get_insn_slot(), make the slot available for
1628d4b3b638SSrikar Dronamraju  * subsequent requests.
1629d4b3b638SSrikar Dronamraju  */
1630d4b3b638SSrikar Dronamraju static void xol_free_insn_slot(struct task_struct *tsk)
1631d4b3b638SSrikar Dronamraju {
1632d4b3b638SSrikar Dronamraju 	struct xol_area *area;
1633d4b3b638SSrikar Dronamraju 	unsigned long vma_end;
1634d4b3b638SSrikar Dronamraju 	unsigned long slot_addr;
1635d4b3b638SSrikar Dronamraju 
1636d4b3b638SSrikar Dronamraju 	if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask)
1637d4b3b638SSrikar Dronamraju 		return;
1638d4b3b638SSrikar Dronamraju 
1639d4b3b638SSrikar Dronamraju 	slot_addr = tsk->utask->xol_vaddr;
1640af4355e9SOleg Nesterov 	if (unlikely(!slot_addr))
1641d4b3b638SSrikar Dronamraju 		return;
1642d4b3b638SSrikar Dronamraju 
1643d4b3b638SSrikar Dronamraju 	area = tsk->mm->uprobes_state.xol_area;
1644d4b3b638SSrikar Dronamraju 	vma_end = area->vaddr + PAGE_SIZE;
1645d4b3b638SSrikar Dronamraju 	if (area->vaddr <= slot_addr && slot_addr < vma_end) {
1646d4b3b638SSrikar Dronamraju 		unsigned long offset;
1647d4b3b638SSrikar Dronamraju 		int slot_nr;
1648d4b3b638SSrikar Dronamraju 
1649d4b3b638SSrikar Dronamraju 		offset = slot_addr - area->vaddr;
1650d4b3b638SSrikar Dronamraju 		slot_nr = offset / UPROBE_XOL_SLOT_BYTES;
1651d4b3b638SSrikar Dronamraju 		if (slot_nr >= UINSNS_PER_PAGE)
1652d4b3b638SSrikar Dronamraju 			return;
1653d4b3b638SSrikar Dronamraju 
1654d4b3b638SSrikar Dronamraju 		clear_bit(slot_nr, area->bitmap);
1655d4b3b638SSrikar Dronamraju 		atomic_dec(&area->slot_count);
16562a742cedSOleg Nesterov 		smp_mb__after_atomic(); /* pairs with prepare_to_wait() */
1657d4b3b638SSrikar Dronamraju 		if (waitqueue_active(&area->wq))
1658d4b3b638SSrikar Dronamraju 			wake_up(&area->wq);
1659d4b3b638SSrikar Dronamraju 
1660d4b3b638SSrikar Dronamraju 		tsk->utask->xol_vaddr = 0;
1661d4b3b638SSrikar Dronamraju 	}
1662d4b3b638SSrikar Dronamraju }
1663d4b3b638SSrikar Dronamraju 
166472e6ae28SVictor Kamensky void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
166572e6ae28SVictor Kamensky 				  void *src, unsigned long len)
166672e6ae28SVictor Kamensky {
166772e6ae28SVictor Kamensky 	/* Initialize the slot */
166872e6ae28SVictor Kamensky 	copy_to_page(page, vaddr, src, len);
166972e6ae28SVictor Kamensky 
167072e6ae28SVictor Kamensky 	/*
167172e6ae28SVictor Kamensky 	 * We probably need flush_icache_user_range() but it needs vma.
167272e6ae28SVictor Kamensky 	 * This should work on most of architectures by default. If
167372e6ae28SVictor Kamensky 	 * architecture needs to do something different it can define
167472e6ae28SVictor Kamensky 	 * its own version of the function.
167572e6ae28SVictor Kamensky 	 */
167672e6ae28SVictor Kamensky 	flush_dcache_page(page);
167772e6ae28SVictor Kamensky }
167872e6ae28SVictor Kamensky 
16790326f5a9SSrikar Dronamraju /**
16800326f5a9SSrikar Dronamraju  * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
16810326f5a9SSrikar Dronamraju  * @regs: Reflects the saved state of the task after it has hit a breakpoint
16820326f5a9SSrikar Dronamraju  * instruction.
16830326f5a9SSrikar Dronamraju  * Return the address of the breakpoint instruction.
16840326f5a9SSrikar Dronamraju  */
16850326f5a9SSrikar Dronamraju unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs)
16860326f5a9SSrikar Dronamraju {
16870326f5a9SSrikar Dronamraju 	return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE;
16880326f5a9SSrikar Dronamraju }
16890326f5a9SSrikar Dronamraju 
1690b02ef20aSOleg Nesterov unsigned long uprobe_get_trap_addr(struct pt_regs *regs)
1691b02ef20aSOleg Nesterov {
1692b02ef20aSOleg Nesterov 	struct uprobe_task *utask = current->utask;
1693b02ef20aSOleg Nesterov 
1694b02ef20aSOleg Nesterov 	if (unlikely(utask && utask->active_uprobe))
1695b02ef20aSOleg Nesterov 		return utask->vaddr;
1696b02ef20aSOleg Nesterov 
1697b02ef20aSOleg Nesterov 	return instruction_pointer(regs);
1698b02ef20aSOleg Nesterov }
1699b02ef20aSOleg Nesterov 
17002bb5e840SOleg Nesterov static struct return_instance *free_ret_instance(struct return_instance *ri)
17012bb5e840SOleg Nesterov {
17022bb5e840SOleg Nesterov 	struct return_instance *next = ri->next;
17032bb5e840SOleg Nesterov 	put_uprobe(ri->uprobe);
17042bb5e840SOleg Nesterov 	kfree(ri);
17052bb5e840SOleg Nesterov 	return next;
17062bb5e840SOleg Nesterov }
17072bb5e840SOleg Nesterov 
17080326f5a9SSrikar Dronamraju /*
17090326f5a9SSrikar Dronamraju  * Called with no locks held.
1710788faab7STobias Tefke  * Called in context of an exiting or an exec-ing thread.
17110326f5a9SSrikar Dronamraju  */
17120326f5a9SSrikar Dronamraju void uprobe_free_utask(struct task_struct *t)
17130326f5a9SSrikar Dronamraju {
17140326f5a9SSrikar Dronamraju 	struct uprobe_task *utask = t->utask;
17152bb5e840SOleg Nesterov 	struct return_instance *ri;
17160326f5a9SSrikar Dronamraju 
17170326f5a9SSrikar Dronamraju 	if (!utask)
17180326f5a9SSrikar Dronamraju 		return;
17190326f5a9SSrikar Dronamraju 
17200326f5a9SSrikar Dronamraju 	if (utask->active_uprobe)
17210326f5a9SSrikar Dronamraju 		put_uprobe(utask->active_uprobe);
17220326f5a9SSrikar Dronamraju 
17230dfd0eb8SAnton Arapov 	ri = utask->return_instances;
17242bb5e840SOleg Nesterov 	while (ri)
17252bb5e840SOleg Nesterov 		ri = free_ret_instance(ri);
17260dfd0eb8SAnton Arapov 
1727d4b3b638SSrikar Dronamraju 	xol_free_insn_slot(t);
17280326f5a9SSrikar Dronamraju 	kfree(utask);
17290326f5a9SSrikar Dronamraju 	t->utask = NULL;
17300326f5a9SSrikar Dronamraju }
17310326f5a9SSrikar Dronamraju 
17320326f5a9SSrikar Dronamraju /*
17335a2df662SOleg Nesterov  * Allocate a uprobe_task object for the task if if necessary.
17345a2df662SOleg Nesterov  * Called when the thread hits a breakpoint.
17350326f5a9SSrikar Dronamraju  *
17360326f5a9SSrikar Dronamraju  * Returns:
17370326f5a9SSrikar Dronamraju  * - pointer to new uprobe_task on success
17380326f5a9SSrikar Dronamraju  * - NULL otherwise
17390326f5a9SSrikar Dronamraju  */
17405a2df662SOleg Nesterov static struct uprobe_task *get_utask(void)
17410326f5a9SSrikar Dronamraju {
17425a2df662SOleg Nesterov 	if (!current->utask)
17435a2df662SOleg Nesterov 		current->utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
17445a2df662SOleg Nesterov 	return current->utask;
17450326f5a9SSrikar Dronamraju }
17460326f5a9SSrikar Dronamraju 
1747248d3a7bSOleg Nesterov static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
1748248d3a7bSOleg Nesterov {
1749248d3a7bSOleg Nesterov 	struct uprobe_task *n_utask;
1750248d3a7bSOleg Nesterov 	struct return_instance **p, *o, *n;
1751248d3a7bSOleg Nesterov 
1752248d3a7bSOleg Nesterov 	n_utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
1753248d3a7bSOleg Nesterov 	if (!n_utask)
1754248d3a7bSOleg Nesterov 		return -ENOMEM;
1755248d3a7bSOleg Nesterov 	t->utask = n_utask;
1756248d3a7bSOleg Nesterov 
1757248d3a7bSOleg Nesterov 	p = &n_utask->return_instances;
1758248d3a7bSOleg Nesterov 	for (o = o_utask->return_instances; o; o = o->next) {
1759248d3a7bSOleg Nesterov 		n = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
1760248d3a7bSOleg Nesterov 		if (!n)
1761248d3a7bSOleg Nesterov 			return -ENOMEM;
1762248d3a7bSOleg Nesterov 
1763248d3a7bSOleg Nesterov 		*n = *o;
1764f231722aSOleg Nesterov 		get_uprobe(n->uprobe);
1765248d3a7bSOleg Nesterov 		n->next = NULL;
1766248d3a7bSOleg Nesterov 
1767248d3a7bSOleg Nesterov 		*p = n;
1768248d3a7bSOleg Nesterov 		p = &n->next;
1769248d3a7bSOleg Nesterov 		n_utask->depth++;
1770248d3a7bSOleg Nesterov 	}
1771248d3a7bSOleg Nesterov 
1772248d3a7bSOleg Nesterov 	return 0;
1773248d3a7bSOleg Nesterov }
1774248d3a7bSOleg Nesterov 
1775248d3a7bSOleg Nesterov static void uprobe_warn(struct task_struct *t, const char *msg)
1776248d3a7bSOleg Nesterov {
1777248d3a7bSOleg Nesterov 	pr_warn("uprobe: %s:%d failed to %s\n",
1778248d3a7bSOleg Nesterov 			current->comm, current->pid, msg);
1779248d3a7bSOleg Nesterov }
1780248d3a7bSOleg Nesterov 
1781aa59c53fSOleg Nesterov static void dup_xol_work(struct callback_head *work)
1782aa59c53fSOleg Nesterov {
1783aa59c53fSOleg Nesterov 	if (current->flags & PF_EXITING)
1784aa59c53fSOleg Nesterov 		return;
1785aa59c53fSOleg Nesterov 
1786598fdc1dSMichal Hocko 	if (!__create_xol_area(current->utask->dup_xol_addr) &&
1787598fdc1dSMichal Hocko 			!fatal_signal_pending(current))
1788aa59c53fSOleg Nesterov 		uprobe_warn(current, "dup xol area");
1789aa59c53fSOleg Nesterov }
1790aa59c53fSOleg Nesterov 
1791e78aebfdSAnton Arapov /*
1792b68e0749SOleg Nesterov  * Called in context of a new clone/fork from copy_process.
1793b68e0749SOleg Nesterov  */
17943ab67966SOleg Nesterov void uprobe_copy_process(struct task_struct *t, unsigned long flags)
1795b68e0749SOleg Nesterov {
1796248d3a7bSOleg Nesterov 	struct uprobe_task *utask = current->utask;
1797248d3a7bSOleg Nesterov 	struct mm_struct *mm = current->mm;
1798aa59c53fSOleg Nesterov 	struct xol_area *area;
1799248d3a7bSOleg Nesterov 
1800b68e0749SOleg Nesterov 	t->utask = NULL;
1801248d3a7bSOleg Nesterov 
18023ab67966SOleg Nesterov 	if (!utask || !utask->return_instances)
18033ab67966SOleg Nesterov 		return;
18043ab67966SOleg Nesterov 
18053ab67966SOleg Nesterov 	if (mm == t->mm && !(flags & CLONE_VFORK))
1806248d3a7bSOleg Nesterov 		return;
1807248d3a7bSOleg Nesterov 
1808248d3a7bSOleg Nesterov 	if (dup_utask(t, utask))
1809248d3a7bSOleg Nesterov 		return uprobe_warn(t, "dup ret instances");
1810aa59c53fSOleg Nesterov 
1811aa59c53fSOleg Nesterov 	/* The task can fork() after dup_xol_work() fails */
1812aa59c53fSOleg Nesterov 	area = mm->uprobes_state.xol_area;
1813aa59c53fSOleg Nesterov 	if (!area)
1814aa59c53fSOleg Nesterov 		return uprobe_warn(t, "dup xol area");
1815aa59c53fSOleg Nesterov 
18163ab67966SOleg Nesterov 	if (mm == t->mm)
18173ab67966SOleg Nesterov 		return;
18183ab67966SOleg Nesterov 
181932473431SOleg Nesterov 	t->utask->dup_xol_addr = area->vaddr;
182032473431SOleg Nesterov 	init_task_work(&t->utask->dup_xol_work, dup_xol_work);
182132473431SOleg Nesterov 	task_work_add(t, &t->utask->dup_xol_work, true);
1822b68e0749SOleg Nesterov }
1823b68e0749SOleg Nesterov 
1824b68e0749SOleg Nesterov /*
1825e78aebfdSAnton Arapov  * Current area->vaddr notion assume the trampoline address is always
1826e78aebfdSAnton Arapov  * equal area->vaddr.
1827e78aebfdSAnton Arapov  *
1828e78aebfdSAnton Arapov  * Returns -1 in case the xol_area is not allocated.
1829e78aebfdSAnton Arapov  */
1830e78aebfdSAnton Arapov static unsigned long get_trampoline_vaddr(void)
1831e78aebfdSAnton Arapov {
1832e78aebfdSAnton Arapov 	struct xol_area *area;
1833e78aebfdSAnton Arapov 	unsigned long trampoline_vaddr = -1;
1834e78aebfdSAnton Arapov 
18355c6338b4SPaul E. McKenney 	/* Pairs with xol_add_vma() smp_store_release() */
18365c6338b4SPaul E. McKenney 	area = READ_ONCE(current->mm->uprobes_state.xol_area); /* ^^^ */
1837e78aebfdSAnton Arapov 	if (area)
1838e78aebfdSAnton Arapov 		trampoline_vaddr = area->vaddr;
1839e78aebfdSAnton Arapov 
1840e78aebfdSAnton Arapov 	return trampoline_vaddr;
1841e78aebfdSAnton Arapov }
1842e78aebfdSAnton Arapov 
1843db087ef6SOleg Nesterov static void cleanup_return_instances(struct uprobe_task *utask, bool chained,
1844db087ef6SOleg Nesterov 					struct pt_regs *regs)
1845a5b7e1a8SOleg Nesterov {
1846a5b7e1a8SOleg Nesterov 	struct return_instance *ri = utask->return_instances;
1847db087ef6SOleg Nesterov 	enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL;
184886dcb702SOleg Nesterov 
184986dcb702SOleg Nesterov 	while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) {
1850a5b7e1a8SOleg Nesterov 		ri = free_ret_instance(ri);
1851a5b7e1a8SOleg Nesterov 		utask->depth--;
1852a5b7e1a8SOleg Nesterov 	}
1853a5b7e1a8SOleg Nesterov 	utask->return_instances = ri;
1854a5b7e1a8SOleg Nesterov }
1855a5b7e1a8SOleg Nesterov 
18560dfd0eb8SAnton Arapov static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
18570dfd0eb8SAnton Arapov {
18580dfd0eb8SAnton Arapov 	struct return_instance *ri;
18590dfd0eb8SAnton Arapov 	struct uprobe_task *utask;
18600dfd0eb8SAnton Arapov 	unsigned long orig_ret_vaddr, trampoline_vaddr;
1861db087ef6SOleg Nesterov 	bool chained;
18620dfd0eb8SAnton Arapov 
18630dfd0eb8SAnton Arapov 	if (!get_xol_area())
18640dfd0eb8SAnton Arapov 		return;
18650dfd0eb8SAnton Arapov 
18660dfd0eb8SAnton Arapov 	utask = get_utask();
18670dfd0eb8SAnton Arapov 	if (!utask)
18680dfd0eb8SAnton Arapov 		return;
18690dfd0eb8SAnton Arapov 
1870ded49c55SAnton Arapov 	if (utask->depth >= MAX_URETPROBE_DEPTH) {
1871ded49c55SAnton Arapov 		printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to"
1872ded49c55SAnton Arapov 				" nestedness limit pid/tgid=%d/%d\n",
1873ded49c55SAnton Arapov 				current->pid, current->tgid);
1874ded49c55SAnton Arapov 		return;
1875ded49c55SAnton Arapov 	}
1876ded49c55SAnton Arapov 
18776c58d0e4SOleg Nesterov 	ri = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
18780dfd0eb8SAnton Arapov 	if (!ri)
18796c58d0e4SOleg Nesterov 		return;
18800dfd0eb8SAnton Arapov 
18810dfd0eb8SAnton Arapov 	trampoline_vaddr = get_trampoline_vaddr();
18820dfd0eb8SAnton Arapov 	orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs);
18830dfd0eb8SAnton Arapov 	if (orig_ret_vaddr == -1)
18840dfd0eb8SAnton Arapov 		goto fail;
18850dfd0eb8SAnton Arapov 
1886a5b7e1a8SOleg Nesterov 	/* drop the entries invalidated by longjmp() */
1887db087ef6SOleg Nesterov 	chained = (orig_ret_vaddr == trampoline_vaddr);
1888db087ef6SOleg Nesterov 	cleanup_return_instances(utask, chained, regs);
1889a5b7e1a8SOleg Nesterov 
18900dfd0eb8SAnton Arapov 	/*
18910dfd0eb8SAnton Arapov 	 * We don't want to keep trampoline address in stack, rather keep the
18920dfd0eb8SAnton Arapov 	 * original return address of first caller thru all the consequent
18930dfd0eb8SAnton Arapov 	 * instances. This also makes breakpoint unwrapping easier.
18940dfd0eb8SAnton Arapov 	 */
1895db087ef6SOleg Nesterov 	if (chained) {
18960dfd0eb8SAnton Arapov 		if (!utask->return_instances) {
18970dfd0eb8SAnton Arapov 			/*
18980dfd0eb8SAnton Arapov 			 * This situation is not possible. Likely we have an
18990dfd0eb8SAnton Arapov 			 * attack from user-space.
19000dfd0eb8SAnton Arapov 			 */
19016c58d0e4SOleg Nesterov 			uprobe_warn(current, "handle tail call");
19020dfd0eb8SAnton Arapov 			goto fail;
19030dfd0eb8SAnton Arapov 		}
19040dfd0eb8SAnton Arapov 		orig_ret_vaddr = utask->return_instances->orig_ret_vaddr;
19050dfd0eb8SAnton Arapov 	}
19060dfd0eb8SAnton Arapov 
1907f231722aSOleg Nesterov 	ri->uprobe = get_uprobe(uprobe);
19080dfd0eb8SAnton Arapov 	ri->func = instruction_pointer(regs);
19097b868e48SOleg Nesterov 	ri->stack = user_stack_pointer(regs);
19100dfd0eb8SAnton Arapov 	ri->orig_ret_vaddr = orig_ret_vaddr;
19110dfd0eb8SAnton Arapov 	ri->chained = chained;
19120dfd0eb8SAnton Arapov 
1913ded49c55SAnton Arapov 	utask->depth++;
19140dfd0eb8SAnton Arapov 	ri->next = utask->return_instances;
19150dfd0eb8SAnton Arapov 	utask->return_instances = ri;
19160dfd0eb8SAnton Arapov 
19170dfd0eb8SAnton Arapov 	return;
19180dfd0eb8SAnton Arapov  fail:
19190dfd0eb8SAnton Arapov 	kfree(ri);
19200dfd0eb8SAnton Arapov }
19210dfd0eb8SAnton Arapov 
19220326f5a9SSrikar Dronamraju /* Prepare to single-step probed instruction out of line. */
19230326f5a9SSrikar Dronamraju static int
1924a6cb3f6dSOleg Nesterov pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr)
19250326f5a9SSrikar Dronamraju {
1926a6cb3f6dSOleg Nesterov 	struct uprobe_task *utask;
1927a6cb3f6dSOleg Nesterov 	unsigned long xol_vaddr;
1928aba51024SOleg Nesterov 	int err;
1929d4b3b638SSrikar Dronamraju 
1930608e7427SOleg Nesterov 	utask = get_utask();
1931608e7427SOleg Nesterov 	if (!utask)
1932608e7427SOleg Nesterov 		return -ENOMEM;
1933a6cb3f6dSOleg Nesterov 
1934a6cb3f6dSOleg Nesterov 	xol_vaddr = xol_get_insn_slot(uprobe);
1935a6cb3f6dSOleg Nesterov 	if (!xol_vaddr)
1936a6cb3f6dSOleg Nesterov 		return -ENOMEM;
1937a6cb3f6dSOleg Nesterov 
1938a6cb3f6dSOleg Nesterov 	utask->xol_vaddr = xol_vaddr;
1939a6cb3f6dSOleg Nesterov 	utask->vaddr = bp_vaddr;
1940a6cb3f6dSOleg Nesterov 
1941aba51024SOleg Nesterov 	err = arch_uprobe_pre_xol(&uprobe->arch, regs);
1942aba51024SOleg Nesterov 	if (unlikely(err)) {
1943aba51024SOleg Nesterov 		xol_free_insn_slot(current);
1944aba51024SOleg Nesterov 		return err;
1945aba51024SOleg Nesterov 	}
1946aba51024SOleg Nesterov 
1947608e7427SOleg Nesterov 	utask->active_uprobe = uprobe;
1948608e7427SOleg Nesterov 	utask->state = UTASK_SSTEP;
1949aba51024SOleg Nesterov 	return 0;
19500326f5a9SSrikar Dronamraju }
19510326f5a9SSrikar Dronamraju 
19520326f5a9SSrikar Dronamraju /*
19530326f5a9SSrikar Dronamraju  * If we are singlestepping, then ensure this thread is not connected to
19540326f5a9SSrikar Dronamraju  * non-fatal signals until completion of singlestep.  When xol insn itself
19550326f5a9SSrikar Dronamraju  * triggers the signal,  restart the original insn even if the task is
19560326f5a9SSrikar Dronamraju  * already SIGKILL'ed (since coredump should report the correct ip).  This
19570326f5a9SSrikar Dronamraju  * is even more important if the task has a handler for SIGSEGV/etc, The
19580326f5a9SSrikar Dronamraju  * _same_ instruction should be repeated again after return from the signal
19590326f5a9SSrikar Dronamraju  * handler, and SSTEP can never finish in this case.
19600326f5a9SSrikar Dronamraju  */
19610326f5a9SSrikar Dronamraju bool uprobe_deny_signal(void)
19620326f5a9SSrikar Dronamraju {
19630326f5a9SSrikar Dronamraju 	struct task_struct *t = current;
19640326f5a9SSrikar Dronamraju 	struct uprobe_task *utask = t->utask;
19650326f5a9SSrikar Dronamraju 
19660326f5a9SSrikar Dronamraju 	if (likely(!utask || !utask->active_uprobe))
19670326f5a9SSrikar Dronamraju 		return false;
19680326f5a9SSrikar Dronamraju 
19690326f5a9SSrikar Dronamraju 	WARN_ON_ONCE(utask->state != UTASK_SSTEP);
19700326f5a9SSrikar Dronamraju 
19710326f5a9SSrikar Dronamraju 	if (signal_pending(t)) {
19720326f5a9SSrikar Dronamraju 		spin_lock_irq(&t->sighand->siglock);
19730326f5a9SSrikar Dronamraju 		clear_tsk_thread_flag(t, TIF_SIGPENDING);
19740326f5a9SSrikar Dronamraju 		spin_unlock_irq(&t->sighand->siglock);
19750326f5a9SSrikar Dronamraju 
19760326f5a9SSrikar Dronamraju 		if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) {
19770326f5a9SSrikar Dronamraju 			utask->state = UTASK_SSTEP_TRAPPED;
19780326f5a9SSrikar Dronamraju 			set_tsk_thread_flag(t, TIF_UPROBE);
19790326f5a9SSrikar Dronamraju 		}
19800326f5a9SSrikar Dronamraju 	}
19810326f5a9SSrikar Dronamraju 
19820326f5a9SSrikar Dronamraju 	return true;
19830326f5a9SSrikar Dronamraju }
19840326f5a9SSrikar Dronamraju 
1985499a4f3eSOleg Nesterov static void mmf_recalc_uprobes(struct mm_struct *mm)
1986499a4f3eSOleg Nesterov {
1987499a4f3eSOleg Nesterov 	struct vm_area_struct *vma;
1988499a4f3eSOleg Nesterov 
1989499a4f3eSOleg Nesterov 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
1990499a4f3eSOleg Nesterov 		if (!valid_vma(vma, false))
1991499a4f3eSOleg Nesterov 			continue;
1992499a4f3eSOleg Nesterov 		/*
1993499a4f3eSOleg Nesterov 		 * This is not strictly accurate, we can race with
1994499a4f3eSOleg Nesterov 		 * uprobe_unregister() and see the already removed
1995499a4f3eSOleg Nesterov 		 * uprobe if delete_uprobe() was not yet called.
199663633cbfSOleg Nesterov 		 * Or this uprobe can be filtered out.
1997499a4f3eSOleg Nesterov 		 */
1998499a4f3eSOleg Nesterov 		if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end))
1999499a4f3eSOleg Nesterov 			return;
2000499a4f3eSOleg Nesterov 	}
2001499a4f3eSOleg Nesterov 
2002499a4f3eSOleg Nesterov 	clear_bit(MMF_HAS_UPROBES, &mm->flags);
2003499a4f3eSOleg Nesterov }
2004499a4f3eSOleg Nesterov 
20050908ad6eSAnanth N Mavinakayanahalli static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
2006ec75fba9SOleg Nesterov {
2007ec75fba9SOleg Nesterov 	struct page *page;
2008ec75fba9SOleg Nesterov 	uprobe_opcode_t opcode;
2009ec75fba9SOleg Nesterov 	int result;
2010ec75fba9SOleg Nesterov 
2011ec75fba9SOleg Nesterov 	pagefault_disable();
2012bd28b145SLinus Torvalds 	result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr);
2013ec75fba9SOleg Nesterov 	pagefault_enable();
2014ec75fba9SOleg Nesterov 
2015ec75fba9SOleg Nesterov 	if (likely(result == 0))
2016ec75fba9SOleg Nesterov 		goto out;
2017ec75fba9SOleg Nesterov 
20181e987790SDave Hansen 	/*
20191e987790SDave Hansen 	 * The NULL 'tsk' here ensures that any faults that occur here
20201e987790SDave Hansen 	 * will not be accounted to the task.  'mm' *is* current->mm,
20211e987790SDave Hansen 	 * but we treat this as a 'remote' access since it is
20221e987790SDave Hansen 	 * essentially a kernel access to the memory.
20231e987790SDave Hansen 	 */
20249beae1eaSLorenzo Stoakes 	result = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &page,
20255b56d49fSLorenzo Stoakes 			NULL, NULL);
2026ec75fba9SOleg Nesterov 	if (result < 0)
2027ec75fba9SOleg Nesterov 		return result;
2028ec75fba9SOleg Nesterov 
2029ab0d805cSOleg Nesterov 	copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
2030ec75fba9SOleg Nesterov 	put_page(page);
2031ec75fba9SOleg Nesterov  out:
20320908ad6eSAnanth N Mavinakayanahalli 	/* This needs to return true for any variant of the trap insn */
20330908ad6eSAnanth N Mavinakayanahalli 	return is_trap_insn(&opcode);
2034ec75fba9SOleg Nesterov }
2035ec75fba9SOleg Nesterov 
2036d790d346SOleg Nesterov static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
20370326f5a9SSrikar Dronamraju {
20383a9ea052SOleg Nesterov 	struct mm_struct *mm = current->mm;
20393a9ea052SOleg Nesterov 	struct uprobe *uprobe = NULL;
20400326f5a9SSrikar Dronamraju 	struct vm_area_struct *vma;
20410326f5a9SSrikar Dronamraju 
20420326f5a9SSrikar Dronamraju 	down_read(&mm->mmap_sem);
20430326f5a9SSrikar Dronamraju 	vma = find_vma(mm, bp_vaddr);
20443a9ea052SOleg Nesterov 	if (vma && vma->vm_start <= bp_vaddr) {
20453a9ea052SOleg Nesterov 		if (valid_vma(vma, false)) {
2046f281769eSOleg Nesterov 			struct inode *inode = file_inode(vma->vm_file);
2047cb113b47SOleg Nesterov 			loff_t offset = vaddr_to_offset(vma, bp_vaddr);
20480326f5a9SSrikar Dronamraju 
20490326f5a9SSrikar Dronamraju 			uprobe = find_uprobe(inode, offset);
20500326f5a9SSrikar Dronamraju 		}
2051d790d346SOleg Nesterov 
2052d790d346SOleg Nesterov 		if (!uprobe)
20530908ad6eSAnanth N Mavinakayanahalli 			*is_swbp = is_trap_at_addr(mm, bp_vaddr);
2054d790d346SOleg Nesterov 	} else {
2055d790d346SOleg Nesterov 		*is_swbp = -EFAULT;
20563a9ea052SOleg Nesterov 	}
2057499a4f3eSOleg Nesterov 
2058499a4f3eSOleg Nesterov 	if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags))
2059499a4f3eSOleg Nesterov 		mmf_recalc_uprobes(mm);
20600326f5a9SSrikar Dronamraju 	up_read(&mm->mmap_sem);
20610326f5a9SSrikar Dronamraju 
20623a9ea052SOleg Nesterov 	return uprobe;
20633a9ea052SOleg Nesterov }
20643a9ea052SOleg Nesterov 
2065da1816b1SOleg Nesterov static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
2066da1816b1SOleg Nesterov {
2067da1816b1SOleg Nesterov 	struct uprobe_consumer *uc;
2068da1816b1SOleg Nesterov 	int remove = UPROBE_HANDLER_REMOVE;
20690dfd0eb8SAnton Arapov 	bool need_prep = false; /* prepare return uprobe, when needed */
2070da1816b1SOleg Nesterov 
2071da1816b1SOleg Nesterov 	down_read(&uprobe->register_rwsem);
2072da1816b1SOleg Nesterov 	for (uc = uprobe->consumers; uc; uc = uc->next) {
2073ea024870SAnton Arapov 		int rc = 0;
2074da1816b1SOleg Nesterov 
2075ea024870SAnton Arapov 		if (uc->handler) {
2076ea024870SAnton Arapov 			rc = uc->handler(uc, regs);
2077da1816b1SOleg Nesterov 			WARN(rc & ~UPROBE_HANDLER_MASK,
2078d75f773cSSakari Ailus 				"bad rc=0x%x from %ps()\n", rc, uc->handler);
2079ea024870SAnton Arapov 		}
20800dfd0eb8SAnton Arapov 
20810dfd0eb8SAnton Arapov 		if (uc->ret_handler)
20820dfd0eb8SAnton Arapov 			need_prep = true;
20830dfd0eb8SAnton Arapov 
2084da1816b1SOleg Nesterov 		remove &= rc;
2085da1816b1SOleg Nesterov 	}
2086da1816b1SOleg Nesterov 
20870dfd0eb8SAnton Arapov 	if (need_prep && !remove)
20880dfd0eb8SAnton Arapov 		prepare_uretprobe(uprobe, regs); /* put bp at return */
20890dfd0eb8SAnton Arapov 
2090da1816b1SOleg Nesterov 	if (remove && uprobe->consumers) {
2091da1816b1SOleg Nesterov 		WARN_ON(!uprobe_is_active(uprobe));
2092da1816b1SOleg Nesterov 		unapply_uprobe(uprobe, current->mm);
2093da1816b1SOleg Nesterov 	}
2094da1816b1SOleg Nesterov 	up_read(&uprobe->register_rwsem);
2095da1816b1SOleg Nesterov }
2096da1816b1SOleg Nesterov 
2097fec8898dSAnton Arapov static void
2098fec8898dSAnton Arapov handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs)
2099fec8898dSAnton Arapov {
2100fec8898dSAnton Arapov 	struct uprobe *uprobe = ri->uprobe;
2101fec8898dSAnton Arapov 	struct uprobe_consumer *uc;
2102fec8898dSAnton Arapov 
2103fec8898dSAnton Arapov 	down_read(&uprobe->register_rwsem);
2104fec8898dSAnton Arapov 	for (uc = uprobe->consumers; uc; uc = uc->next) {
2105fec8898dSAnton Arapov 		if (uc->ret_handler)
2106fec8898dSAnton Arapov 			uc->ret_handler(uc, ri->func, regs);
2107fec8898dSAnton Arapov 	}
2108fec8898dSAnton Arapov 	up_read(&uprobe->register_rwsem);
2109fec8898dSAnton Arapov }
2110fec8898dSAnton Arapov 
2111a83cfeb9SOleg Nesterov static struct return_instance *find_next_ret_chain(struct return_instance *ri)
2112a83cfeb9SOleg Nesterov {
2113a83cfeb9SOleg Nesterov 	bool chained;
2114a83cfeb9SOleg Nesterov 
2115a83cfeb9SOleg Nesterov 	do {
2116a83cfeb9SOleg Nesterov 		chained = ri->chained;
2117a83cfeb9SOleg Nesterov 		ri = ri->next;	/* can't be NULL if chained */
2118a83cfeb9SOleg Nesterov 	} while (chained);
2119a83cfeb9SOleg Nesterov 
2120a83cfeb9SOleg Nesterov 	return ri;
2121a83cfeb9SOleg Nesterov }
2122a83cfeb9SOleg Nesterov 
21230b5256c7SOleg Nesterov static void handle_trampoline(struct pt_regs *regs)
2124fec8898dSAnton Arapov {
2125fec8898dSAnton Arapov 	struct uprobe_task *utask;
2126a83cfeb9SOleg Nesterov 	struct return_instance *ri, *next;
21275eeb50deSOleg Nesterov 	bool valid;
2128fec8898dSAnton Arapov 
2129fec8898dSAnton Arapov 	utask = current->utask;
2130fec8898dSAnton Arapov 	if (!utask)
21310b5256c7SOleg Nesterov 		goto sigill;
2132fec8898dSAnton Arapov 
2133fec8898dSAnton Arapov 	ri = utask->return_instances;
2134fec8898dSAnton Arapov 	if (!ri)
21350b5256c7SOleg Nesterov 		goto sigill;
2136fec8898dSAnton Arapov 
21375eeb50deSOleg Nesterov 	do {
2138fec8898dSAnton Arapov 		/*
21395eeb50deSOleg Nesterov 		 * We should throw out the frames invalidated by longjmp().
21405eeb50deSOleg Nesterov 		 * If this chain is valid, then the next one should be alive
21415eeb50deSOleg Nesterov 		 * or NULL; the latter case means that nobody but ri->func
21425eeb50deSOleg Nesterov 		 * could hit this trampoline on return. TODO: sigaltstack().
2143fec8898dSAnton Arapov 		 */
21445eeb50deSOleg Nesterov 		next = find_next_ret_chain(ri);
214586dcb702SOleg Nesterov 		valid = !next || arch_uretprobe_is_alive(next, RP_CHECK_RET, regs);
21465eeb50deSOleg Nesterov 
2147fec8898dSAnton Arapov 		instruction_pointer_set(regs, ri->orig_ret_vaddr);
2148a83cfeb9SOleg Nesterov 		do {
21495eeb50deSOleg Nesterov 			if (valid)
2150fec8898dSAnton Arapov 				handle_uretprobe_chain(ri, regs);
21512bb5e840SOleg Nesterov 			ri = free_ret_instance(ri);
2152878b5a6eSOleg Nesterov 			utask->depth--;
2153a83cfeb9SOleg Nesterov 		} while (ri != next);
21545eeb50deSOleg Nesterov 	} while (!valid);
2155fec8898dSAnton Arapov 
2156fec8898dSAnton Arapov 	utask->return_instances = ri;
21570b5256c7SOleg Nesterov 	return;
2158fec8898dSAnton Arapov 
21590b5256c7SOleg Nesterov  sigill:
21600b5256c7SOleg Nesterov 	uprobe_warn(current, "handle uretprobe, sending SIGILL.");
21613cf5d076SEric W. Biederman 	force_sig(SIGILL);
21620b5256c7SOleg Nesterov 
2163fec8898dSAnton Arapov }
2164fec8898dSAnton Arapov 
21656fe50a28SDavid A. Long bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs)
21666fe50a28SDavid A. Long {
21676fe50a28SDavid A. Long 	return false;
21686fe50a28SDavid A. Long }
21696fe50a28SDavid A. Long 
217086dcb702SOleg Nesterov bool __weak arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
217186dcb702SOleg Nesterov 					struct pt_regs *regs)
217297da8976SOleg Nesterov {
217397da8976SOleg Nesterov 	return true;
217497da8976SOleg Nesterov }
217597da8976SOleg Nesterov 
21763a9ea052SOleg Nesterov /*
21773a9ea052SOleg Nesterov  * Run handler and ask thread to singlestep.
21783a9ea052SOleg Nesterov  * Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
21793a9ea052SOleg Nesterov  */
21803a9ea052SOleg Nesterov static void handle_swbp(struct pt_regs *regs)
21813a9ea052SOleg Nesterov {
21823a9ea052SOleg Nesterov 	struct uprobe *uprobe;
21833a9ea052SOleg Nesterov 	unsigned long bp_vaddr;
218456bb4cf6SOleg Nesterov 	int uninitialized_var(is_swbp);
21853a9ea052SOleg Nesterov 
21863a9ea052SOleg Nesterov 	bp_vaddr = uprobe_get_swbp_addr(regs);
21870b5256c7SOleg Nesterov 	if (bp_vaddr == get_trampoline_vaddr())
21880b5256c7SOleg Nesterov 		return handle_trampoline(regs);
2189fec8898dSAnton Arapov 
2190fec8898dSAnton Arapov 	uprobe = find_active_uprobe(bp_vaddr, &is_swbp);
21910326f5a9SSrikar Dronamraju 	if (!uprobe) {
219256bb4cf6SOleg Nesterov 		if (is_swbp > 0) {
21930326f5a9SSrikar Dronamraju 			/* No matching uprobe; signal SIGTRAP. */
21940326f5a9SSrikar Dronamraju 			send_sig(SIGTRAP, current, 0);
219556bb4cf6SOleg Nesterov 		} else {
219656bb4cf6SOleg Nesterov 			/*
219756bb4cf6SOleg Nesterov 			 * Either we raced with uprobe_unregister() or we can't
219856bb4cf6SOleg Nesterov 			 * access this memory. The latter is only possible if
219956bb4cf6SOleg Nesterov 			 * another thread plays with our ->mm. In both cases
220056bb4cf6SOleg Nesterov 			 * we can simply restart. If this vma was unmapped we
220156bb4cf6SOleg Nesterov 			 * can pretend this insn was not executed yet and get
220256bb4cf6SOleg Nesterov 			 * the (correct) SIGSEGV after restart.
220356bb4cf6SOleg Nesterov 			 */
220456bb4cf6SOleg Nesterov 			instruction_pointer_set(regs, bp_vaddr);
220556bb4cf6SOleg Nesterov 		}
22060326f5a9SSrikar Dronamraju 		return;
22070326f5a9SSrikar Dronamraju 	}
220874e59dfcSOleg Nesterov 
220974e59dfcSOleg Nesterov 	/* change it in advance for ->handler() and restart */
221074e59dfcSOleg Nesterov 	instruction_pointer_set(regs, bp_vaddr);
221174e59dfcSOleg Nesterov 
2212142b18ddSOleg Nesterov 	/*
2213142b18ddSOleg Nesterov 	 * TODO: move copy_insn/etc into _register and remove this hack.
2214142b18ddSOleg Nesterov 	 * After we hit the bp, _unregister + _register can install the
2215142b18ddSOleg Nesterov 	 * new and not-yet-analyzed uprobe at the same address, restart.
2216142b18ddSOleg Nesterov 	 */
221771434f2fSOleg Nesterov 	if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags)))
221874e59dfcSOleg Nesterov 		goto out;
22190326f5a9SSrikar Dronamraju 
222009d3f015SAndrea Parri 	/*
222109d3f015SAndrea Parri 	 * Pairs with the smp_wmb() in prepare_uprobe().
222209d3f015SAndrea Parri 	 *
222309d3f015SAndrea Parri 	 * Guarantees that if we see the UPROBE_COPY_INSN bit set, then
222409d3f015SAndrea Parri 	 * we must also see the stores to &uprobe->arch performed by the
222509d3f015SAndrea Parri 	 * prepare_uprobe() call.
222609d3f015SAndrea Parri 	 */
222709d3f015SAndrea Parri 	smp_rmb();
222809d3f015SAndrea Parri 
222972fd293aSOleg Nesterov 	/* Tracing handlers use ->utask to communicate with fetch methods */
223072fd293aSOleg Nesterov 	if (!get_utask())
223172fd293aSOleg Nesterov 		goto out;
223272fd293aSOleg Nesterov 
22336fe50a28SDavid A. Long 	if (arch_uprobe_ignore(&uprobe->arch, regs))
22346fe50a28SDavid A. Long 		goto out;
22356fe50a28SDavid A. Long 
22360326f5a9SSrikar Dronamraju 	handler_chain(uprobe, regs);
22376fe50a28SDavid A. Long 
22388a6b1732SOleg Nesterov 	if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
22390578a970SOleg Nesterov 		goto out;
22400326f5a9SSrikar Dronamraju 
2241608e7427SOleg Nesterov 	if (!pre_ssout(uprobe, regs, bp_vaddr))
22420326f5a9SSrikar Dronamraju 		return;
22430326f5a9SSrikar Dronamraju 
22448a6b1732SOleg Nesterov 	/* arch_uprobe_skip_sstep() succeeded, or restart if can't singlestep */
22450578a970SOleg Nesterov out:
22460326f5a9SSrikar Dronamraju 	put_uprobe(uprobe);
22470326f5a9SSrikar Dronamraju }
22480326f5a9SSrikar Dronamraju 
22490326f5a9SSrikar Dronamraju /*
22500326f5a9SSrikar Dronamraju  * Perform required fix-ups and disable singlestep.
22510326f5a9SSrikar Dronamraju  * Allow pending signals to take effect.
22520326f5a9SSrikar Dronamraju  */
22530326f5a9SSrikar Dronamraju static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
22540326f5a9SSrikar Dronamraju {
22550326f5a9SSrikar Dronamraju 	struct uprobe *uprobe;
2256014940baSOleg Nesterov 	int err = 0;
22570326f5a9SSrikar Dronamraju 
22580326f5a9SSrikar Dronamraju 	uprobe = utask->active_uprobe;
22590326f5a9SSrikar Dronamraju 	if (utask->state == UTASK_SSTEP_ACK)
2260014940baSOleg Nesterov 		err = arch_uprobe_post_xol(&uprobe->arch, regs);
22610326f5a9SSrikar Dronamraju 	else if (utask->state == UTASK_SSTEP_TRAPPED)
22620326f5a9SSrikar Dronamraju 		arch_uprobe_abort_xol(&uprobe->arch, regs);
22630326f5a9SSrikar Dronamraju 	else
22640326f5a9SSrikar Dronamraju 		WARN_ON_ONCE(1);
22650326f5a9SSrikar Dronamraju 
22660326f5a9SSrikar Dronamraju 	put_uprobe(uprobe);
22670326f5a9SSrikar Dronamraju 	utask->active_uprobe = NULL;
22680326f5a9SSrikar Dronamraju 	utask->state = UTASK_RUNNING;
2269d4b3b638SSrikar Dronamraju 	xol_free_insn_slot(current);
22700326f5a9SSrikar Dronamraju 
22710326f5a9SSrikar Dronamraju 	spin_lock_irq(&current->sighand->siglock);
22720326f5a9SSrikar Dronamraju 	recalc_sigpending(); /* see uprobe_deny_signal() */
22730326f5a9SSrikar Dronamraju 	spin_unlock_irq(&current->sighand->siglock);
2274014940baSOleg Nesterov 
2275014940baSOleg Nesterov 	if (unlikely(err)) {
2276014940baSOleg Nesterov 		uprobe_warn(current, "execute the probed insn, sending SIGILL.");
22773cf5d076SEric W. Biederman 		force_sig(SIGILL);
2278014940baSOleg Nesterov 	}
22790326f5a9SSrikar Dronamraju }
22800326f5a9SSrikar Dronamraju 
22810326f5a9SSrikar Dronamraju /*
22821b08e907SOleg Nesterov  * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and
22831b08e907SOleg Nesterov  * allows the thread to return from interrupt. After that handle_swbp()
22841b08e907SOleg Nesterov  * sets utask->active_uprobe.
22850326f5a9SSrikar Dronamraju  *
22861b08e907SOleg Nesterov  * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag
22871b08e907SOleg Nesterov  * and allows the thread to return from interrupt.
22880326f5a9SSrikar Dronamraju  *
22890326f5a9SSrikar Dronamraju  * While returning to userspace, thread notices the TIF_UPROBE flag and calls
22900326f5a9SSrikar Dronamraju  * uprobe_notify_resume().
22910326f5a9SSrikar Dronamraju  */
22920326f5a9SSrikar Dronamraju void uprobe_notify_resume(struct pt_regs *regs)
22930326f5a9SSrikar Dronamraju {
22940326f5a9SSrikar Dronamraju 	struct uprobe_task *utask;
22950326f5a9SSrikar Dronamraju 
2296db023ea5SOleg Nesterov 	clear_thread_flag(TIF_UPROBE);
2297db023ea5SOleg Nesterov 
22980326f5a9SSrikar Dronamraju 	utask = current->utask;
22991b08e907SOleg Nesterov 	if (utask && utask->active_uprobe)
23000326f5a9SSrikar Dronamraju 		handle_singlestep(utask, regs);
23011b08e907SOleg Nesterov 	else
23021b08e907SOleg Nesterov 		handle_swbp(regs);
23030326f5a9SSrikar Dronamraju }
23040326f5a9SSrikar Dronamraju 
23050326f5a9SSrikar Dronamraju /*
23060326f5a9SSrikar Dronamraju  * uprobe_pre_sstep_notifier gets called from interrupt context as part of
23070326f5a9SSrikar Dronamraju  * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit.
23080326f5a9SSrikar Dronamraju  */
23090326f5a9SSrikar Dronamraju int uprobe_pre_sstep_notifier(struct pt_regs *regs)
23100326f5a9SSrikar Dronamraju {
23110dfd0eb8SAnton Arapov 	if (!current->mm)
23120dfd0eb8SAnton Arapov 		return 0;
23130dfd0eb8SAnton Arapov 
23140dfd0eb8SAnton Arapov 	if (!test_bit(MMF_HAS_UPROBES, &current->mm->flags) &&
23150dfd0eb8SAnton Arapov 	    (!current->utask || !current->utask->return_instances))
23160326f5a9SSrikar Dronamraju 		return 0;
23170326f5a9SSrikar Dronamraju 
23180326f5a9SSrikar Dronamraju 	set_thread_flag(TIF_UPROBE);
23190326f5a9SSrikar Dronamraju 	return 1;
23200326f5a9SSrikar Dronamraju }
23210326f5a9SSrikar Dronamraju 
23220326f5a9SSrikar Dronamraju /*
23230326f5a9SSrikar Dronamraju  * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier
23240326f5a9SSrikar Dronamraju  * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep.
23250326f5a9SSrikar Dronamraju  */
23260326f5a9SSrikar Dronamraju int uprobe_post_sstep_notifier(struct pt_regs *regs)
23270326f5a9SSrikar Dronamraju {
23280326f5a9SSrikar Dronamraju 	struct uprobe_task *utask = current->utask;
23290326f5a9SSrikar Dronamraju 
23300326f5a9SSrikar Dronamraju 	if (!current->mm || !utask || !utask->active_uprobe)
23310326f5a9SSrikar Dronamraju 		/* task is currently not uprobed */
23320326f5a9SSrikar Dronamraju 		return 0;
23330326f5a9SSrikar Dronamraju 
23340326f5a9SSrikar Dronamraju 	utask->state = UTASK_SSTEP_ACK;
23350326f5a9SSrikar Dronamraju 	set_thread_flag(TIF_UPROBE);
23360326f5a9SSrikar Dronamraju 	return 1;
23370326f5a9SSrikar Dronamraju }
23380326f5a9SSrikar Dronamraju 
23390326f5a9SSrikar Dronamraju static struct notifier_block uprobe_exception_nb = {
23400326f5a9SSrikar Dronamraju 	.notifier_call		= arch_uprobe_exception_notify,
23410326f5a9SSrikar Dronamraju 	.priority		= INT_MAX-1,	/* notified after kprobes, kgdb */
23420326f5a9SSrikar Dronamraju };
23430326f5a9SSrikar Dronamraju 
2344aad42dd4SNadav Amit void __init uprobes_init(void)
2345a5f4374aSIngo Molnar {
2346a5f4374aSIngo Molnar 	int i;
2347a5f4374aSIngo Molnar 
234866d06dffSOleg Nesterov 	for (i = 0; i < UPROBES_HASH_SZ; i++)
2349a5f4374aSIngo Molnar 		mutex_init(&uprobes_mmap_mutex[i]);
23500326f5a9SSrikar Dronamraju 
2351aad42dd4SNadav Amit 	BUG_ON(register_die_notifier(&uprobe_exception_nb));
2352a5f4374aSIngo Molnar }
2353