xref: /openbmc/linux/kernel/events/uprobes.c (revision c8394812)
1a5f4374aSIngo Molnar /*
2a5f4374aSIngo Molnar  * User-space Probes (UProbes)
3a5f4374aSIngo Molnar  *
4a5f4374aSIngo Molnar  * This program is free software; you can redistribute it and/or modify
5a5f4374aSIngo Molnar  * it under the terms of the GNU General Public License as published by
6a5f4374aSIngo Molnar  * the Free Software Foundation; either version 2 of the License, or
7a5f4374aSIngo Molnar  * (at your option) any later version.
8a5f4374aSIngo Molnar  *
9a5f4374aSIngo Molnar  * This program is distributed in the hope that it will be useful,
10a5f4374aSIngo Molnar  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11a5f4374aSIngo Molnar  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12a5f4374aSIngo Molnar  * GNU General Public License for more details.
13a5f4374aSIngo Molnar  *
14a5f4374aSIngo Molnar  * You should have received a copy of the GNU General Public License
15a5f4374aSIngo Molnar  * along with this program; if not, write to the Free Software
16a5f4374aSIngo Molnar  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17a5f4374aSIngo Molnar  *
1835aa621bSIngo Molnar  * Copyright (C) IBM Corporation, 2008-2012
19a5f4374aSIngo Molnar  * Authors:
20a5f4374aSIngo Molnar  *	Srikar Dronamraju
21a5f4374aSIngo Molnar  *	Jim Keniston
2290eec103SPeter Zijlstra  * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
23a5f4374aSIngo Molnar  */
24a5f4374aSIngo Molnar 
25a5f4374aSIngo Molnar #include <linux/kernel.h>
26a5f4374aSIngo Molnar #include <linux/highmem.h>
27a5f4374aSIngo Molnar #include <linux/pagemap.h>	/* read_mapping_page */
28a5f4374aSIngo Molnar #include <linux/slab.h>
29a5f4374aSIngo Molnar #include <linux/sched.h>
30e8440c14SJosh Stone #include <linux/export.h>
31a5f4374aSIngo Molnar #include <linux/rmap.h>		/* anon_vma_prepare */
32a5f4374aSIngo Molnar #include <linux/mmu_notifier.h>	/* set_pte_at_notify */
33a5f4374aSIngo Molnar #include <linux/swap.h>		/* try_to_free_swap */
340326f5a9SSrikar Dronamraju #include <linux/ptrace.h>	/* user_enable_single_step */
350326f5a9SSrikar Dronamraju #include <linux/kdebug.h>	/* notifier mechanism */
36194f8dcbSOleg Nesterov #include "../../mm/internal.h"	/* munlock_vma_page */
3732cdba1eSOleg Nesterov #include <linux/percpu-rwsem.h>
38aa59c53fSOleg Nesterov #include <linux/task_work.h>
3940814f68SOleg Nesterov #include <linux/shmem_fs.h>
40a5f4374aSIngo Molnar 
41a5f4374aSIngo Molnar #include <linux/uprobes.h>
42a5f4374aSIngo Molnar 
43d4b3b638SSrikar Dronamraju #define UINSNS_PER_PAGE			(PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
44d4b3b638SSrikar Dronamraju #define MAX_UPROBE_XOL_SLOTS		UINSNS_PER_PAGE
45d4b3b638SSrikar Dronamraju 
46a5f4374aSIngo Molnar static struct rb_root uprobes_tree = RB_ROOT;
47441f1eb7SOleg Nesterov /*
48441f1eb7SOleg Nesterov  * allows us to skip the uprobe_mmap if there are no uprobe events active
49441f1eb7SOleg Nesterov  * at this time.  Probably a fine grained per inode count is better?
50441f1eb7SOleg Nesterov  */
51441f1eb7SOleg Nesterov #define no_uprobe_events()	RB_EMPTY_ROOT(&uprobes_tree)
52a5f4374aSIngo Molnar 
53a5f4374aSIngo Molnar static DEFINE_SPINLOCK(uprobes_treelock);	/* serialize rbtree access */
54a5f4374aSIngo Molnar 
55a5f4374aSIngo Molnar #define UPROBES_HASH_SZ	13
56a5f4374aSIngo Molnar /* serialize uprobe->pending_list */
57a5f4374aSIngo Molnar static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
58a5f4374aSIngo Molnar #define uprobes_mmap_hash(v)	(&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
59a5f4374aSIngo Molnar 
6032cdba1eSOleg Nesterov static struct percpu_rw_semaphore dup_mmap_sem;
6132cdba1eSOleg Nesterov 
62cb9a19feSOleg Nesterov /* Have a copy of original instruction */
6371434f2fSOleg Nesterov #define UPROBE_COPY_INSN	0
64cb9a19feSOleg Nesterov 
653ff54efdSSrikar Dronamraju struct uprobe {
663ff54efdSSrikar Dronamraju 	struct rb_node		rb_node;	/* node in the rb tree */
673ff54efdSSrikar Dronamraju 	atomic_t		ref;
68e591c8d7SOleg Nesterov 	struct rw_semaphore	register_rwsem;
693ff54efdSSrikar Dronamraju 	struct rw_semaphore	consumer_rwsem;
703ff54efdSSrikar Dronamraju 	struct list_head	pending_list;
713ff54efdSSrikar Dronamraju 	struct uprobe_consumer	*consumers;
723ff54efdSSrikar Dronamraju 	struct inode		*inode;		/* Also hold a ref to inode */
733ff54efdSSrikar Dronamraju 	loff_t			offset;
7471434f2fSOleg Nesterov 	unsigned long		flags;
75ad439356SOleg Nesterov 
76ad439356SOleg Nesterov 	/*
77ad439356SOleg Nesterov 	 * The generic code assumes that it has two members of unknown type
78ad439356SOleg Nesterov 	 * owned by the arch-specific code:
79ad439356SOleg Nesterov 	 *
80ad439356SOleg Nesterov 	 * 	insn -	copy_insn() saves the original instruction here for
81ad439356SOleg Nesterov 	 *		arch_uprobe_analyze_insn().
82ad439356SOleg Nesterov 	 *
83ad439356SOleg Nesterov 	 *	ixol -	potentially modified instruction to execute out of
84ad439356SOleg Nesterov 	 *		line, copied to xol_area by xol_get_insn_slot().
85ad439356SOleg Nesterov 	 */
863ff54efdSSrikar Dronamraju 	struct arch_uprobe	arch;
873ff54efdSSrikar Dronamraju };
883ff54efdSSrikar Dronamraju 
89a5f4374aSIngo Molnar /*
90ad439356SOleg Nesterov  * Execute out of line area: anonymous executable mapping installed
91ad439356SOleg Nesterov  * by the probed task to execute the copy of the original instruction
92ad439356SOleg Nesterov  * mangled by set_swbp().
93ad439356SOleg Nesterov  *
94c912dae6SOleg Nesterov  * On a breakpoint hit, thread contests for a slot.  It frees the
95c912dae6SOleg Nesterov  * slot after singlestep. Currently a fixed number of slots are
96c912dae6SOleg Nesterov  * allocated.
97c912dae6SOleg Nesterov  */
98c912dae6SOleg Nesterov struct xol_area {
99c912dae6SOleg Nesterov 	wait_queue_head_t 		wq;		/* if all slots are busy */
100c912dae6SOleg Nesterov 	atomic_t 			slot_count;	/* number of in-use slots */
101c912dae6SOleg Nesterov 	unsigned long 			*bitmap;	/* 0 = free slot */
102c912dae6SOleg Nesterov 
103704bde3cSOleg Nesterov 	struct vm_special_mapping	xol_mapping;
104704bde3cSOleg Nesterov 	struct page 			*pages[2];
105c912dae6SOleg Nesterov 	/*
106c912dae6SOleg Nesterov 	 * We keep the vma's vm_start rather than a pointer to the vma
107c912dae6SOleg Nesterov 	 * itself.  The probed process or a naughty kernel module could make
108c912dae6SOleg Nesterov 	 * the vma go away, and we must handle that reasonably gracefully.
109c912dae6SOleg Nesterov 	 */
110c912dae6SOleg Nesterov 	unsigned long 			vaddr;		/* Page(s) of instruction slots */
111c912dae6SOleg Nesterov };
112c912dae6SOleg Nesterov 
113c912dae6SOleg Nesterov /*
114a5f4374aSIngo Molnar  * valid_vma: Verify if the specified vma is an executable vma
115a5f4374aSIngo Molnar  * Relax restrictions while unregistering: vm_flags might have
116a5f4374aSIngo Molnar  * changed after breakpoint was inserted.
117a5f4374aSIngo Molnar  *	- is_register: indicates if we are in register context.
118a5f4374aSIngo Molnar  *	- Return 1 if the specified virtual address is in an
119a5f4374aSIngo Molnar  *	  executable vma.
120a5f4374aSIngo Molnar  */
121a5f4374aSIngo Molnar static bool valid_vma(struct vm_area_struct *vma, bool is_register)
122a5f4374aSIngo Molnar {
12313f59c5eSOleg Nesterov 	vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE;
124a5f4374aSIngo Molnar 
125e40cfce6SOleg Nesterov 	if (is_register)
126e40cfce6SOleg Nesterov 		flags |= VM_WRITE;
127a5f4374aSIngo Molnar 
128e40cfce6SOleg Nesterov 	return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC;
129a5f4374aSIngo Molnar }
130a5f4374aSIngo Molnar 
13157683f72SOleg Nesterov static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset)
132a5f4374aSIngo Molnar {
13357683f72SOleg Nesterov 	return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
134a5f4374aSIngo Molnar }
135a5f4374aSIngo Molnar 
136cb113b47SOleg Nesterov static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
137cb113b47SOleg Nesterov {
138cb113b47SOleg Nesterov 	return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start);
139cb113b47SOleg Nesterov }
140cb113b47SOleg Nesterov 
141a5f4374aSIngo Molnar /**
142a5f4374aSIngo Molnar  * __replace_page - replace page in vma by new page.
143a5f4374aSIngo Molnar  * based on replace_page in mm/ksm.c
144a5f4374aSIngo Molnar  *
145a5f4374aSIngo Molnar  * @vma:      vma that holds the pte pointing to page
146c517ee74SOleg Nesterov  * @addr:     address the old @page is mapped at
147a5f4374aSIngo Molnar  * @page:     the cowed page we are replacing by kpage
148a5f4374aSIngo Molnar  * @kpage:    the modified page we replace page by
149a5f4374aSIngo Molnar  *
150a5f4374aSIngo Molnar  * Returns 0 on success, -EFAULT on failure.
151a5f4374aSIngo Molnar  */
152c517ee74SOleg Nesterov static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
153bdfaa2eeSOleg Nesterov 				struct page *old_page, struct page *new_page)
154a5f4374aSIngo Molnar {
155a5f4374aSIngo Molnar 	struct mm_struct *mm = vma->vm_mm;
1565323ce71SOleg Nesterov 	spinlock_t *ptl;
1575323ce71SOleg Nesterov 	pte_t *ptep;
1589f92448cSOleg Nesterov 	int err;
1596bdb913fSHaggai Eran 	/* For mmu_notifiers */
1606bdb913fSHaggai Eran 	const unsigned long mmun_start = addr;
1616bdb913fSHaggai Eran 	const unsigned long mmun_end   = addr + PAGE_SIZE;
16200501b53SJohannes Weiner 	struct mem_cgroup *memcg;
16300501b53SJohannes Weiner 
164bdfaa2eeSOleg Nesterov 	err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL, &memcg,
165f627c2f5SKirill A. Shutemov 			false);
16600501b53SJohannes Weiner 	if (err)
16700501b53SJohannes Weiner 		return err;
168a5f4374aSIngo Molnar 
169194f8dcbSOleg Nesterov 	/* For try_to_free_swap() and munlock_vma_page() below */
170bdfaa2eeSOleg Nesterov 	lock_page(old_page);
1719f92448cSOleg Nesterov 
1726bdb913fSHaggai Eran 	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1739f92448cSOleg Nesterov 	err = -EAGAIN;
174bdfaa2eeSOleg Nesterov 	ptep = page_check_address(old_page, mm, addr, &ptl, 0);
1756c4687ccSOleg Nesterov 	if (!ptep) {
176bdfaa2eeSOleg Nesterov 		mem_cgroup_cancel_charge(new_page, memcg, false);
1779f92448cSOleg Nesterov 		goto unlock;
1786c4687ccSOleg Nesterov 	}
179a5f4374aSIngo Molnar 
180bdfaa2eeSOleg Nesterov 	get_page(new_page);
181bdfaa2eeSOleg Nesterov 	page_add_new_anon_rmap(new_page, vma, addr, false);
182bdfaa2eeSOleg Nesterov 	mem_cgroup_commit_charge(new_page, memcg, false, false);
183bdfaa2eeSOleg Nesterov 	lru_cache_add_active_or_unevictable(new_page, vma);
184a5f4374aSIngo Molnar 
185bdfaa2eeSOleg Nesterov 	if (!PageAnon(old_page)) {
186bdfaa2eeSOleg Nesterov 		dec_mm_counter(mm, mm_counter_file(old_page));
1877396fa81SSrikar Dronamraju 		inc_mm_counter(mm, MM_ANONPAGES);
1887396fa81SSrikar Dronamraju 	}
1897396fa81SSrikar Dronamraju 
190a5f4374aSIngo Molnar 	flush_cache_page(vma, addr, pte_pfn(*ptep));
19134ee645eSJoerg Roedel 	ptep_clear_flush_notify(vma, addr, ptep);
192bdfaa2eeSOleg Nesterov 	set_pte_at_notify(mm, addr, ptep, mk_pte(new_page, vma->vm_page_prot));
193a5f4374aSIngo Molnar 
194bdfaa2eeSOleg Nesterov 	page_remove_rmap(old_page, false);
195bdfaa2eeSOleg Nesterov 	if (!page_mapped(old_page))
196bdfaa2eeSOleg Nesterov 		try_to_free_swap(old_page);
197a5f4374aSIngo Molnar 	pte_unmap_unlock(ptep, ptl);
198a5f4374aSIngo Molnar 
199194f8dcbSOleg Nesterov 	if (vma->vm_flags & VM_LOCKED)
200bdfaa2eeSOleg Nesterov 		munlock_vma_page(old_page);
201bdfaa2eeSOleg Nesterov 	put_page(old_page);
202194f8dcbSOleg Nesterov 
2039f92448cSOleg Nesterov 	err = 0;
2049f92448cSOleg Nesterov  unlock:
2056bdb913fSHaggai Eran 	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
206bdfaa2eeSOleg Nesterov 	unlock_page(old_page);
2079f92448cSOleg Nesterov 	return err;
208a5f4374aSIngo Molnar }
209a5f4374aSIngo Molnar 
210a5f4374aSIngo Molnar /**
2115cb4ac3aSSrikar Dronamraju  * is_swbp_insn - check if instruction is breakpoint instruction.
212a5f4374aSIngo Molnar  * @insn: instruction to be checked.
2135cb4ac3aSSrikar Dronamraju  * Default implementation of is_swbp_insn
214a5f4374aSIngo Molnar  * Returns true if @insn is a breakpoint instruction.
215a5f4374aSIngo Molnar  */
2165cb4ac3aSSrikar Dronamraju bool __weak is_swbp_insn(uprobe_opcode_t *insn)
217a5f4374aSIngo Molnar {
2185cb4ac3aSSrikar Dronamraju 	return *insn == UPROBE_SWBP_INSN;
219a5f4374aSIngo Molnar }
220a5f4374aSIngo Molnar 
2210908ad6eSAnanth N Mavinakayanahalli /**
2220908ad6eSAnanth N Mavinakayanahalli  * is_trap_insn - check if instruction is breakpoint instruction.
2230908ad6eSAnanth N Mavinakayanahalli  * @insn: instruction to be checked.
2240908ad6eSAnanth N Mavinakayanahalli  * Default implementation of is_trap_insn
2250908ad6eSAnanth N Mavinakayanahalli  * Returns true if @insn is a breakpoint instruction.
2260908ad6eSAnanth N Mavinakayanahalli  *
2270908ad6eSAnanth N Mavinakayanahalli  * This function is needed for the case where an architecture has multiple
2280908ad6eSAnanth N Mavinakayanahalli  * trap instructions (like powerpc).
2290908ad6eSAnanth N Mavinakayanahalli  */
2300908ad6eSAnanth N Mavinakayanahalli bool __weak is_trap_insn(uprobe_opcode_t *insn)
2310908ad6eSAnanth N Mavinakayanahalli {
2320908ad6eSAnanth N Mavinakayanahalli 	return is_swbp_insn(insn);
2330908ad6eSAnanth N Mavinakayanahalli }
2340908ad6eSAnanth N Mavinakayanahalli 
235ab0d805cSOleg Nesterov static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len)
236cceb55aaSOleg Nesterov {
237cceb55aaSOleg Nesterov 	void *kaddr = kmap_atomic(page);
238ab0d805cSOleg Nesterov 	memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len);
239cceb55aaSOleg Nesterov 	kunmap_atomic(kaddr);
240cceb55aaSOleg Nesterov }
241cceb55aaSOleg Nesterov 
2425669cceeSOleg Nesterov static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len)
2435669cceeSOleg Nesterov {
2445669cceeSOleg Nesterov 	void *kaddr = kmap_atomic(page);
2455669cceeSOleg Nesterov 	memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len);
2465669cceeSOleg Nesterov 	kunmap_atomic(kaddr);
2475669cceeSOleg Nesterov }
2485669cceeSOleg Nesterov 
249ed6f6a50SOleg Nesterov static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode)
250ed6f6a50SOleg Nesterov {
251ed6f6a50SOleg Nesterov 	uprobe_opcode_t old_opcode;
252ed6f6a50SOleg Nesterov 	bool is_swbp;
253ed6f6a50SOleg Nesterov 
2540908ad6eSAnanth N Mavinakayanahalli 	/*
2550908ad6eSAnanth N Mavinakayanahalli 	 * Note: We only check if the old_opcode is UPROBE_SWBP_INSN here.
2560908ad6eSAnanth N Mavinakayanahalli 	 * We do not check if it is any other 'trap variant' which could
2570908ad6eSAnanth N Mavinakayanahalli 	 * be conditional trap instruction such as the one powerpc supports.
2580908ad6eSAnanth N Mavinakayanahalli 	 *
2590908ad6eSAnanth N Mavinakayanahalli 	 * The logic is that we do not care if the underlying instruction
2600908ad6eSAnanth N Mavinakayanahalli 	 * is a trap variant; uprobes always wins over any other (gdb)
2610908ad6eSAnanth N Mavinakayanahalli 	 * breakpoint.
2620908ad6eSAnanth N Mavinakayanahalli 	 */
263ab0d805cSOleg Nesterov 	copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE);
264ed6f6a50SOleg Nesterov 	is_swbp = is_swbp_insn(&old_opcode);
265ed6f6a50SOleg Nesterov 
266ed6f6a50SOleg Nesterov 	if (is_swbp_insn(new_opcode)) {
267ed6f6a50SOleg Nesterov 		if (is_swbp)		/* register: already installed? */
268ed6f6a50SOleg Nesterov 			return 0;
269ed6f6a50SOleg Nesterov 	} else {
270ed6f6a50SOleg Nesterov 		if (!is_swbp)		/* unregister: was it changed by us? */
271076a365bSOleg Nesterov 			return 0;
272ed6f6a50SOleg Nesterov 	}
273ed6f6a50SOleg Nesterov 
274ed6f6a50SOleg Nesterov 	return 1;
275ed6f6a50SOleg Nesterov }
276ed6f6a50SOleg Nesterov 
277a5f4374aSIngo Molnar /*
278a5f4374aSIngo Molnar  * NOTE:
279a5f4374aSIngo Molnar  * Expect the breakpoint instruction to be the smallest size instruction for
280a5f4374aSIngo Molnar  * the architecture. If an arch has variable length instruction and the
281a5f4374aSIngo Molnar  * breakpoint instruction is not of the smallest length instruction
2820908ad6eSAnanth N Mavinakayanahalli  * supported by that architecture then we need to modify is_trap_at_addr and
283f72d41faSOleg Nesterov  * uprobe_write_opcode accordingly. This would never be a problem for archs
284f72d41faSOleg Nesterov  * that have fixed length instructions.
28529dedee0SOleg Nesterov  *
286f72d41faSOleg Nesterov  * uprobe_write_opcode - write the opcode at a given virtual address.
287a5f4374aSIngo Molnar  * @mm: the probed process address space.
288a5f4374aSIngo Molnar  * @vaddr: the virtual address to store the opcode.
289a5f4374aSIngo Molnar  * @opcode: opcode to be written at @vaddr.
290a5f4374aSIngo Molnar  *
29129dedee0SOleg Nesterov  * Called with mm->mmap_sem held for write.
292a5f4374aSIngo Molnar  * Return 0 (success) or a negative errno.
293a5f4374aSIngo Molnar  */
294f72d41faSOleg Nesterov int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr,
295cceb55aaSOleg Nesterov 			uprobe_opcode_t opcode)
296a5f4374aSIngo Molnar {
297a5f4374aSIngo Molnar 	struct page *old_page, *new_page;
298a5f4374aSIngo Molnar 	struct vm_area_struct *vma;
299a5f4374aSIngo Molnar 	int ret;
300f403072cSOleg Nesterov 
3015323ce71SOleg Nesterov retry:
302a5f4374aSIngo Molnar 	/* Read the page with vaddr into memory */
303c8394812SKirill A. Shutemov 	ret = get_user_pages_remote(NULL, mm, vaddr, 1,
304c8394812SKirill A. Shutemov 			FOLL_FORCE | FOLL_SPLIT, &old_page, &vma, NULL);
305a5f4374aSIngo Molnar 	if (ret <= 0)
306a5f4374aSIngo Molnar 		return ret;
307a5f4374aSIngo Molnar 
308ed6f6a50SOleg Nesterov 	ret = verify_opcode(old_page, vaddr, &opcode);
309ed6f6a50SOleg Nesterov 	if (ret <= 0)
310ed6f6a50SOleg Nesterov 		goto put_old;
311ed6f6a50SOleg Nesterov 
31229dedee0SOleg Nesterov 	ret = anon_vma_prepare(vma);
31329dedee0SOleg Nesterov 	if (ret)
31429dedee0SOleg Nesterov 		goto put_old;
31529dedee0SOleg Nesterov 
316a5f4374aSIngo Molnar 	ret = -ENOMEM;
317a5f4374aSIngo Molnar 	new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
318a5f4374aSIngo Molnar 	if (!new_page)
3199f92448cSOleg Nesterov 		goto put_old;
320a5f4374aSIngo Molnar 
32129dedee0SOleg Nesterov 	__SetPageUptodate(new_page);
3223f47107cSOleg Nesterov 	copy_highpage(new_page, old_page);
3233f47107cSOleg Nesterov 	copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
324a5f4374aSIngo Molnar 
325c517ee74SOleg Nesterov 	ret = __replace_page(vma, vaddr, old_page, new_page);
32609cbfeafSKirill A. Shutemov 	put_page(new_page);
3279f92448cSOleg Nesterov put_old:
328a5f4374aSIngo Molnar 	put_page(old_page);
329a5f4374aSIngo Molnar 
3305323ce71SOleg Nesterov 	if (unlikely(ret == -EAGAIN))
3315323ce71SOleg Nesterov 		goto retry;
332a5f4374aSIngo Molnar 	return ret;
333a5f4374aSIngo Molnar }
334a5f4374aSIngo Molnar 
335a5f4374aSIngo Molnar /**
3365cb4ac3aSSrikar Dronamraju  * set_swbp - store breakpoint at a given address.
337e3343e6aSSrikar Dronamraju  * @auprobe: arch specific probepoint information.
338a5f4374aSIngo Molnar  * @mm: the probed process address space.
339a5f4374aSIngo Molnar  * @vaddr: the virtual address to insert the opcode.
340a5f4374aSIngo Molnar  *
341a5f4374aSIngo Molnar  * For mm @mm, store the breakpoint instruction at @vaddr.
342a5f4374aSIngo Molnar  * Return 0 (success) or a negative errno.
343a5f4374aSIngo Molnar  */
3445cb4ac3aSSrikar Dronamraju int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
345a5f4374aSIngo Molnar {
346f72d41faSOleg Nesterov 	return uprobe_write_opcode(mm, vaddr, UPROBE_SWBP_INSN);
347a5f4374aSIngo Molnar }
348a5f4374aSIngo Molnar 
349a5f4374aSIngo Molnar /**
350a5f4374aSIngo Molnar  * set_orig_insn - Restore the original instruction.
351a5f4374aSIngo Molnar  * @mm: the probed process address space.
352e3343e6aSSrikar Dronamraju  * @auprobe: arch specific probepoint information.
353a5f4374aSIngo Molnar  * @vaddr: the virtual address to insert the opcode.
354a5f4374aSIngo Molnar  *
355a5f4374aSIngo Molnar  * For mm @mm, restore the original opcode (opcode) at @vaddr.
356a5f4374aSIngo Molnar  * Return 0 (success) or a negative errno.
357a5f4374aSIngo Molnar  */
358a5f4374aSIngo Molnar int __weak
359ded86e7cSOleg Nesterov set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
360a5f4374aSIngo Molnar {
361803200e2SOleg Nesterov 	return uprobe_write_opcode(mm, vaddr, *(uprobe_opcode_t *)&auprobe->insn);
362a5f4374aSIngo Molnar }
363a5f4374aSIngo Molnar 
364f231722aSOleg Nesterov static struct uprobe *get_uprobe(struct uprobe *uprobe)
365f231722aSOleg Nesterov {
366f231722aSOleg Nesterov 	atomic_inc(&uprobe->ref);
367f231722aSOleg Nesterov 	return uprobe;
368f231722aSOleg Nesterov }
369f231722aSOleg Nesterov 
370f231722aSOleg Nesterov static void put_uprobe(struct uprobe *uprobe)
371f231722aSOleg Nesterov {
372f231722aSOleg Nesterov 	if (atomic_dec_and_test(&uprobe->ref))
373f231722aSOleg Nesterov 		kfree(uprobe);
374f231722aSOleg Nesterov }
375f231722aSOleg Nesterov 
376a5f4374aSIngo Molnar static int match_uprobe(struct uprobe *l, struct uprobe *r)
377a5f4374aSIngo Molnar {
378a5f4374aSIngo Molnar 	if (l->inode < r->inode)
379a5f4374aSIngo Molnar 		return -1;
380a5f4374aSIngo Molnar 
381a5f4374aSIngo Molnar 	if (l->inode > r->inode)
382a5f4374aSIngo Molnar 		return 1;
383a5f4374aSIngo Molnar 
384a5f4374aSIngo Molnar 	if (l->offset < r->offset)
385a5f4374aSIngo Molnar 		return -1;
386a5f4374aSIngo Molnar 
387a5f4374aSIngo Molnar 	if (l->offset > r->offset)
388a5f4374aSIngo Molnar 		return 1;
389a5f4374aSIngo Molnar 
390a5f4374aSIngo Molnar 	return 0;
391a5f4374aSIngo Molnar }
392a5f4374aSIngo Molnar 
393a5f4374aSIngo Molnar static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset)
394a5f4374aSIngo Molnar {
395a5f4374aSIngo Molnar 	struct uprobe u = { .inode = inode, .offset = offset };
396a5f4374aSIngo Molnar 	struct rb_node *n = uprobes_tree.rb_node;
397a5f4374aSIngo Molnar 	struct uprobe *uprobe;
398a5f4374aSIngo Molnar 	int match;
399a5f4374aSIngo Molnar 
400a5f4374aSIngo Molnar 	while (n) {
401a5f4374aSIngo Molnar 		uprobe = rb_entry(n, struct uprobe, rb_node);
402a5f4374aSIngo Molnar 		match = match_uprobe(&u, uprobe);
403f231722aSOleg Nesterov 		if (!match)
404f231722aSOleg Nesterov 			return get_uprobe(uprobe);
405a5f4374aSIngo Molnar 
406a5f4374aSIngo Molnar 		if (match < 0)
407a5f4374aSIngo Molnar 			n = n->rb_left;
408a5f4374aSIngo Molnar 		else
409a5f4374aSIngo Molnar 			n = n->rb_right;
410a5f4374aSIngo Molnar 	}
411a5f4374aSIngo Molnar 	return NULL;
412a5f4374aSIngo Molnar }
413a5f4374aSIngo Molnar 
414a5f4374aSIngo Molnar /*
415a5f4374aSIngo Molnar  * Find a uprobe corresponding to a given inode:offset
416a5f4374aSIngo Molnar  * Acquires uprobes_treelock
417a5f4374aSIngo Molnar  */
418a5f4374aSIngo Molnar static struct uprobe *find_uprobe(struct inode *inode, loff_t offset)
419a5f4374aSIngo Molnar {
420a5f4374aSIngo Molnar 	struct uprobe *uprobe;
421a5f4374aSIngo Molnar 
4226f47caa0SOleg Nesterov 	spin_lock(&uprobes_treelock);
423a5f4374aSIngo Molnar 	uprobe = __find_uprobe(inode, offset);
4246f47caa0SOleg Nesterov 	spin_unlock(&uprobes_treelock);
425a5f4374aSIngo Molnar 
426a5f4374aSIngo Molnar 	return uprobe;
427a5f4374aSIngo Molnar }
428a5f4374aSIngo Molnar 
429a5f4374aSIngo Molnar static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
430a5f4374aSIngo Molnar {
431a5f4374aSIngo Molnar 	struct rb_node **p = &uprobes_tree.rb_node;
432a5f4374aSIngo Molnar 	struct rb_node *parent = NULL;
433a5f4374aSIngo Molnar 	struct uprobe *u;
434a5f4374aSIngo Molnar 	int match;
435a5f4374aSIngo Molnar 
436a5f4374aSIngo Molnar 	while (*p) {
437a5f4374aSIngo Molnar 		parent = *p;
438a5f4374aSIngo Molnar 		u = rb_entry(parent, struct uprobe, rb_node);
439a5f4374aSIngo Molnar 		match = match_uprobe(uprobe, u);
440f231722aSOleg Nesterov 		if (!match)
441f231722aSOleg Nesterov 			return get_uprobe(u);
442a5f4374aSIngo Molnar 
443a5f4374aSIngo Molnar 		if (match < 0)
444a5f4374aSIngo Molnar 			p = &parent->rb_left;
445a5f4374aSIngo Molnar 		else
446a5f4374aSIngo Molnar 			p = &parent->rb_right;
447a5f4374aSIngo Molnar 
448a5f4374aSIngo Molnar 	}
449a5f4374aSIngo Molnar 
450a5f4374aSIngo Molnar 	u = NULL;
451a5f4374aSIngo Molnar 	rb_link_node(&uprobe->rb_node, parent, p);
452a5f4374aSIngo Molnar 	rb_insert_color(&uprobe->rb_node, &uprobes_tree);
453a5f4374aSIngo Molnar 	/* get access + creation ref */
454a5f4374aSIngo Molnar 	atomic_set(&uprobe->ref, 2);
455a5f4374aSIngo Molnar 
456a5f4374aSIngo Molnar 	return u;
457a5f4374aSIngo Molnar }
458a5f4374aSIngo Molnar 
459a5f4374aSIngo Molnar /*
460a5f4374aSIngo Molnar  * Acquire uprobes_treelock.
461a5f4374aSIngo Molnar  * Matching uprobe already exists in rbtree;
462a5f4374aSIngo Molnar  *	increment (access refcount) and return the matching uprobe.
463a5f4374aSIngo Molnar  *
464a5f4374aSIngo Molnar  * No matching uprobe; insert the uprobe in rb_tree;
465a5f4374aSIngo Molnar  *	get a double refcount (access + creation) and return NULL.
466a5f4374aSIngo Molnar  */
467a5f4374aSIngo Molnar static struct uprobe *insert_uprobe(struct uprobe *uprobe)
468a5f4374aSIngo Molnar {
469a5f4374aSIngo Molnar 	struct uprobe *u;
470a5f4374aSIngo Molnar 
4716f47caa0SOleg Nesterov 	spin_lock(&uprobes_treelock);
472a5f4374aSIngo Molnar 	u = __insert_uprobe(uprobe);
4736f47caa0SOleg Nesterov 	spin_unlock(&uprobes_treelock);
474a5f4374aSIngo Molnar 
475a5f4374aSIngo Molnar 	return u;
476a5f4374aSIngo Molnar }
477a5f4374aSIngo Molnar 
478a5f4374aSIngo Molnar static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
479a5f4374aSIngo Molnar {
480a5f4374aSIngo Molnar 	struct uprobe *uprobe, *cur_uprobe;
481a5f4374aSIngo Molnar 
482a5f4374aSIngo Molnar 	uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL);
483a5f4374aSIngo Molnar 	if (!uprobe)
484a5f4374aSIngo Molnar 		return NULL;
485a5f4374aSIngo Molnar 
486a5f4374aSIngo Molnar 	uprobe->inode = igrab(inode);
487a5f4374aSIngo Molnar 	uprobe->offset = offset;
488e591c8d7SOleg Nesterov 	init_rwsem(&uprobe->register_rwsem);
489a5f4374aSIngo Molnar 	init_rwsem(&uprobe->consumer_rwsem);
490a5f4374aSIngo Molnar 
491a5f4374aSIngo Molnar 	/* add to uprobes_tree, sorted on inode:offset */
492a5f4374aSIngo Molnar 	cur_uprobe = insert_uprobe(uprobe);
493a5f4374aSIngo Molnar 	/* a uprobe exists for this inode:offset combination */
494a5f4374aSIngo Molnar 	if (cur_uprobe) {
495a5f4374aSIngo Molnar 		kfree(uprobe);
496a5f4374aSIngo Molnar 		uprobe = cur_uprobe;
497a5f4374aSIngo Molnar 		iput(inode);
498a5f4374aSIngo Molnar 	}
499a5f4374aSIngo Molnar 
500a5f4374aSIngo Molnar 	return uprobe;
501a5f4374aSIngo Molnar }
502a5f4374aSIngo Molnar 
5039a98e03cSOleg Nesterov static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
504a5f4374aSIngo Molnar {
505a5f4374aSIngo Molnar 	down_write(&uprobe->consumer_rwsem);
506e3343e6aSSrikar Dronamraju 	uc->next = uprobe->consumers;
507e3343e6aSSrikar Dronamraju 	uprobe->consumers = uc;
508a5f4374aSIngo Molnar 	up_write(&uprobe->consumer_rwsem);
509a5f4374aSIngo Molnar }
510a5f4374aSIngo Molnar 
511a5f4374aSIngo Molnar /*
512e3343e6aSSrikar Dronamraju  * For uprobe @uprobe, delete the consumer @uc.
513e3343e6aSSrikar Dronamraju  * Return true if the @uc is deleted successfully
514a5f4374aSIngo Molnar  * or return false.
515a5f4374aSIngo Molnar  */
516e3343e6aSSrikar Dronamraju static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
517a5f4374aSIngo Molnar {
518a5f4374aSIngo Molnar 	struct uprobe_consumer **con;
519a5f4374aSIngo Molnar 	bool ret = false;
520a5f4374aSIngo Molnar 
521a5f4374aSIngo Molnar 	down_write(&uprobe->consumer_rwsem);
522a5f4374aSIngo Molnar 	for (con = &uprobe->consumers; *con; con = &(*con)->next) {
523e3343e6aSSrikar Dronamraju 		if (*con == uc) {
524e3343e6aSSrikar Dronamraju 			*con = uc->next;
525a5f4374aSIngo Molnar 			ret = true;
526a5f4374aSIngo Molnar 			break;
527a5f4374aSIngo Molnar 		}
528a5f4374aSIngo Molnar 	}
529a5f4374aSIngo Molnar 	up_write(&uprobe->consumer_rwsem);
530a5f4374aSIngo Molnar 
531a5f4374aSIngo Molnar 	return ret;
532a5f4374aSIngo Molnar }
533a5f4374aSIngo Molnar 
5342ded0980SOleg Nesterov static int __copy_insn(struct address_space *mapping, struct file *filp,
5352ded0980SOleg Nesterov 			void *insn, int nbytes, loff_t offset)
536a5f4374aSIngo Molnar {
537a5f4374aSIngo Molnar 	struct page *page;
538a5f4374aSIngo Molnar 	/*
53940814f68SOleg Nesterov 	 * Ensure that the page that has the original instruction is populated
54040814f68SOleg Nesterov 	 * and in page-cache. If ->readpage == NULL it must be shmem_mapping(),
54140814f68SOleg Nesterov 	 * see uprobe_register().
542a5f4374aSIngo Molnar 	 */
54340814f68SOleg Nesterov 	if (mapping->a_ops->readpage)
54409cbfeafSKirill A. Shutemov 		page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp);
54540814f68SOleg Nesterov 	else
54609cbfeafSKirill A. Shutemov 		page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
547a5f4374aSIngo Molnar 	if (IS_ERR(page))
548a5f4374aSIngo Molnar 		return PTR_ERR(page);
549a5f4374aSIngo Molnar 
5502edb7b55SOleg Nesterov 	copy_from_page(page, offset, insn, nbytes);
55109cbfeafSKirill A. Shutemov 	put_page(page);
552a5f4374aSIngo Molnar 
553a5f4374aSIngo Molnar 	return 0;
554a5f4374aSIngo Molnar }
555a5f4374aSIngo Molnar 
556d436615eSOleg Nesterov static int copy_insn(struct uprobe *uprobe, struct file *filp)
557a5f4374aSIngo Molnar {
5582ded0980SOleg Nesterov 	struct address_space *mapping = uprobe->inode->i_mapping;
5592ded0980SOleg Nesterov 	loff_t offs = uprobe->offset;
560803200e2SOleg Nesterov 	void *insn = &uprobe->arch.insn;
561803200e2SOleg Nesterov 	int size = sizeof(uprobe->arch.insn);
5622ded0980SOleg Nesterov 	int len, err = -EIO;
563a5f4374aSIngo Molnar 
5642ded0980SOleg Nesterov 	/* Copy only available bytes, -EIO if nothing was read */
5652ded0980SOleg Nesterov 	do {
5662ded0980SOleg Nesterov 		if (offs >= i_size_read(uprobe->inode))
5672ded0980SOleg Nesterov 			break;
568a5f4374aSIngo Molnar 
5692ded0980SOleg Nesterov 		len = min_t(int, size, PAGE_SIZE - (offs & ~PAGE_MASK));
5702ded0980SOleg Nesterov 		err = __copy_insn(mapping, filp, insn, len, offs);
571fc36f595SOleg Nesterov 		if (err)
5722ded0980SOleg Nesterov 			break;
5732ded0980SOleg Nesterov 
5742ded0980SOleg Nesterov 		insn += len;
5752ded0980SOleg Nesterov 		offs += len;
5762ded0980SOleg Nesterov 		size -= len;
5772ded0980SOleg Nesterov 	} while (size);
5782ded0980SOleg Nesterov 
579fc36f595SOleg Nesterov 	return err;
580a5f4374aSIngo Molnar }
581a5f4374aSIngo Molnar 
582cb9a19feSOleg Nesterov static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
583cb9a19feSOleg Nesterov 				struct mm_struct *mm, unsigned long vaddr)
584cb9a19feSOleg Nesterov {
585cb9a19feSOleg Nesterov 	int ret = 0;
586cb9a19feSOleg Nesterov 
58771434f2fSOleg Nesterov 	if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
588cb9a19feSOleg Nesterov 		return ret;
589cb9a19feSOleg Nesterov 
590d4d3ccc6SOleg Nesterov 	/* TODO: move this into _register, until then we abuse this sem. */
591d4d3ccc6SOleg Nesterov 	down_write(&uprobe->consumer_rwsem);
59271434f2fSOleg Nesterov 	if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
5934710f05fSOleg Nesterov 		goto out;
5944710f05fSOleg Nesterov 
595cb9a19feSOleg Nesterov 	ret = copy_insn(uprobe, file);
596cb9a19feSOleg Nesterov 	if (ret)
597cb9a19feSOleg Nesterov 		goto out;
598cb9a19feSOleg Nesterov 
599cb9a19feSOleg Nesterov 	ret = -ENOTSUPP;
600803200e2SOleg Nesterov 	if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn))
601cb9a19feSOleg Nesterov 		goto out;
602cb9a19feSOleg Nesterov 
603cb9a19feSOleg Nesterov 	ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
604cb9a19feSOleg Nesterov 	if (ret)
605cb9a19feSOleg Nesterov 		goto out;
606cb9a19feSOleg Nesterov 
607f72d41faSOleg Nesterov 	/* uprobe_write_opcode() assumes we don't cross page boundary */
608cb9a19feSOleg Nesterov 	BUG_ON((uprobe->offset & ~PAGE_MASK) +
609cb9a19feSOleg Nesterov 			UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
610cb9a19feSOleg Nesterov 
611cb9a19feSOleg Nesterov 	smp_wmb(); /* pairs with rmb() in find_active_uprobe() */
61271434f2fSOleg Nesterov 	set_bit(UPROBE_COPY_INSN, &uprobe->flags);
613cb9a19feSOleg Nesterov 
614cb9a19feSOleg Nesterov  out:
615d4d3ccc6SOleg Nesterov 	up_write(&uprobe->consumer_rwsem);
6164710f05fSOleg Nesterov 
617cb9a19feSOleg Nesterov 	return ret;
618cb9a19feSOleg Nesterov }
619cb9a19feSOleg Nesterov 
6208a7f2fa0SOleg Nesterov static inline bool consumer_filter(struct uprobe_consumer *uc,
6218a7f2fa0SOleg Nesterov 				   enum uprobe_filter_ctx ctx, struct mm_struct *mm)
622806a98bdSOleg Nesterov {
6238a7f2fa0SOleg Nesterov 	return !uc->filter || uc->filter(uc, ctx, mm);
624806a98bdSOleg Nesterov }
625806a98bdSOleg Nesterov 
6268a7f2fa0SOleg Nesterov static bool filter_chain(struct uprobe *uprobe,
6278a7f2fa0SOleg Nesterov 			 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
62863633cbfSOleg Nesterov {
6291ff6fee5SOleg Nesterov 	struct uprobe_consumer *uc;
6301ff6fee5SOleg Nesterov 	bool ret = false;
6311ff6fee5SOleg Nesterov 
6321ff6fee5SOleg Nesterov 	down_read(&uprobe->consumer_rwsem);
6331ff6fee5SOleg Nesterov 	for (uc = uprobe->consumers; uc; uc = uc->next) {
6348a7f2fa0SOleg Nesterov 		ret = consumer_filter(uc, ctx, mm);
6351ff6fee5SOleg Nesterov 		if (ret)
6361ff6fee5SOleg Nesterov 			break;
6371ff6fee5SOleg Nesterov 	}
6381ff6fee5SOleg Nesterov 	up_read(&uprobe->consumer_rwsem);
6391ff6fee5SOleg Nesterov 
6401ff6fee5SOleg Nesterov 	return ret;
64163633cbfSOleg Nesterov }
64263633cbfSOleg Nesterov 
643e3343e6aSSrikar Dronamraju static int
644e3343e6aSSrikar Dronamraju install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
645816c03fbSOleg Nesterov 			struct vm_area_struct *vma, unsigned long vaddr)
646a5f4374aSIngo Molnar {
647f8ac4ec9SOleg Nesterov 	bool first_uprobe;
648a5f4374aSIngo Molnar 	int ret;
649a5f4374aSIngo Molnar 
650cb9a19feSOleg Nesterov 	ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr);
651a5f4374aSIngo Molnar 	if (ret)
652a5f4374aSIngo Molnar 		return ret;
653a5f4374aSIngo Molnar 
654f8ac4ec9SOleg Nesterov 	/*
655f8ac4ec9SOleg Nesterov 	 * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(),
656f8ac4ec9SOleg Nesterov 	 * the task can hit this breakpoint right after __replace_page().
657f8ac4ec9SOleg Nesterov 	 */
658f8ac4ec9SOleg Nesterov 	first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags);
659f8ac4ec9SOleg Nesterov 	if (first_uprobe)
660f8ac4ec9SOleg Nesterov 		set_bit(MMF_HAS_UPROBES, &mm->flags);
661f8ac4ec9SOleg Nesterov 
662816c03fbSOleg Nesterov 	ret = set_swbp(&uprobe->arch, mm, vaddr);
6639f68f672SOleg Nesterov 	if (!ret)
6649f68f672SOleg Nesterov 		clear_bit(MMF_RECALC_UPROBES, &mm->flags);
6659f68f672SOleg Nesterov 	else if (first_uprobe)
666f8ac4ec9SOleg Nesterov 		clear_bit(MMF_HAS_UPROBES, &mm->flags);
667a5f4374aSIngo Molnar 
668a5f4374aSIngo Molnar 	return ret;
669a5f4374aSIngo Molnar }
670a5f4374aSIngo Molnar 
671076a365bSOleg Nesterov static int
672816c03fbSOleg Nesterov remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
673a5f4374aSIngo Molnar {
6749f68f672SOleg Nesterov 	set_bit(MMF_RECALC_UPROBES, &mm->flags);
675076a365bSOleg Nesterov 	return set_orig_insn(&uprobe->arch, mm, vaddr);
676a5f4374aSIngo Molnar }
677a5f4374aSIngo Molnar 
67806b7bcd8SOleg Nesterov static inline bool uprobe_is_active(struct uprobe *uprobe)
67906b7bcd8SOleg Nesterov {
68006b7bcd8SOleg Nesterov 	return !RB_EMPTY_NODE(&uprobe->rb_node);
68106b7bcd8SOleg Nesterov }
6820326f5a9SSrikar Dronamraju /*
683778b032dSOleg Nesterov  * There could be threads that have already hit the breakpoint. They
684778b032dSOleg Nesterov  * will recheck the current insn and restart if find_uprobe() fails.
685778b032dSOleg Nesterov  * See find_active_uprobe().
6860326f5a9SSrikar Dronamraju  */
687a5f4374aSIngo Molnar static void delete_uprobe(struct uprobe *uprobe)
688a5f4374aSIngo Molnar {
68906b7bcd8SOleg Nesterov 	if (WARN_ON(!uprobe_is_active(uprobe)))
69006b7bcd8SOleg Nesterov 		return;
69106b7bcd8SOleg Nesterov 
6926f47caa0SOleg Nesterov 	spin_lock(&uprobes_treelock);
693a5f4374aSIngo Molnar 	rb_erase(&uprobe->rb_node, &uprobes_tree);
6946f47caa0SOleg Nesterov 	spin_unlock(&uprobes_treelock);
69506b7bcd8SOleg Nesterov 	RB_CLEAR_NODE(&uprobe->rb_node); /* for uprobe_is_active() */
696a5f4374aSIngo Molnar 	iput(uprobe->inode);
697a5f4374aSIngo Molnar 	put_uprobe(uprobe);
698a5f4374aSIngo Molnar }
699a5f4374aSIngo Molnar 
70026872090SOleg Nesterov struct map_info {
70126872090SOleg Nesterov 	struct map_info *next;
70226872090SOleg Nesterov 	struct mm_struct *mm;
703816c03fbSOleg Nesterov 	unsigned long vaddr;
70426872090SOleg Nesterov };
70526872090SOleg Nesterov 
70626872090SOleg Nesterov static inline struct map_info *free_map_info(struct map_info *info)
707a5f4374aSIngo Molnar {
70826872090SOleg Nesterov 	struct map_info *next = info->next;
70926872090SOleg Nesterov 	kfree(info);
71026872090SOleg Nesterov 	return next;
71126872090SOleg Nesterov }
71226872090SOleg Nesterov 
71326872090SOleg Nesterov static struct map_info *
71426872090SOleg Nesterov build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
71526872090SOleg Nesterov {
71626872090SOleg Nesterov 	unsigned long pgoff = offset >> PAGE_SHIFT;
717a5f4374aSIngo Molnar 	struct vm_area_struct *vma;
71826872090SOleg Nesterov 	struct map_info *curr = NULL;
71926872090SOleg Nesterov 	struct map_info *prev = NULL;
72026872090SOleg Nesterov 	struct map_info *info;
72126872090SOleg Nesterov 	int more = 0;
722a5f4374aSIngo Molnar 
72326872090SOleg Nesterov  again:
7244a23717aSDavidlohr Bueso 	i_mmap_lock_read(mapping);
7256b2dbba8SMichel Lespinasse 	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
726a5f4374aSIngo Molnar 		if (!valid_vma(vma, is_register))
727a5f4374aSIngo Molnar 			continue;
728a5f4374aSIngo Molnar 
7297a5bfb66SOleg Nesterov 		if (!prev && !more) {
7307a5bfb66SOleg Nesterov 			/*
731c8c06efaSDavidlohr Bueso 			 * Needs GFP_NOWAIT to avoid i_mmap_rwsem recursion through
7327a5bfb66SOleg Nesterov 			 * reclaim. This is optimistic, no harm done if it fails.
7337a5bfb66SOleg Nesterov 			 */
7347a5bfb66SOleg Nesterov 			prev = kmalloc(sizeof(struct map_info),
7357a5bfb66SOleg Nesterov 					GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN);
7367a5bfb66SOleg Nesterov 			if (prev)
7377a5bfb66SOleg Nesterov 				prev->next = NULL;
7387a5bfb66SOleg Nesterov 		}
73926872090SOleg Nesterov 		if (!prev) {
74026872090SOleg Nesterov 			more++;
74126872090SOleg Nesterov 			continue;
742a5f4374aSIngo Molnar 		}
743a5f4374aSIngo Molnar 
74426872090SOleg Nesterov 		if (!atomic_inc_not_zero(&vma->vm_mm->mm_users))
74526872090SOleg Nesterov 			continue;
746a5f4374aSIngo Molnar 
74726872090SOleg Nesterov 		info = prev;
74826872090SOleg Nesterov 		prev = prev->next;
74926872090SOleg Nesterov 		info->next = curr;
75026872090SOleg Nesterov 		curr = info;
75126872090SOleg Nesterov 
75226872090SOleg Nesterov 		info->mm = vma->vm_mm;
75357683f72SOleg Nesterov 		info->vaddr = offset_to_vaddr(vma, offset);
754a5f4374aSIngo Molnar 	}
7554a23717aSDavidlohr Bueso 	i_mmap_unlock_read(mapping);
756a5f4374aSIngo Molnar 
75726872090SOleg Nesterov 	if (!more)
75826872090SOleg Nesterov 		goto out;
759a5f4374aSIngo Molnar 
76026872090SOleg Nesterov 	prev = curr;
76126872090SOleg Nesterov 	while (curr) {
76226872090SOleg Nesterov 		mmput(curr->mm);
76326872090SOleg Nesterov 		curr = curr->next;
76426872090SOleg Nesterov 	}
76526872090SOleg Nesterov 
76626872090SOleg Nesterov 	do {
76726872090SOleg Nesterov 		info = kmalloc(sizeof(struct map_info), GFP_KERNEL);
76826872090SOleg Nesterov 		if (!info) {
76926872090SOleg Nesterov 			curr = ERR_PTR(-ENOMEM);
77026872090SOleg Nesterov 			goto out;
77126872090SOleg Nesterov 		}
77226872090SOleg Nesterov 		info->next = prev;
77326872090SOleg Nesterov 		prev = info;
77426872090SOleg Nesterov 	} while (--more);
77526872090SOleg Nesterov 
77626872090SOleg Nesterov 	goto again;
77726872090SOleg Nesterov  out:
77826872090SOleg Nesterov 	while (prev)
77926872090SOleg Nesterov 		prev = free_map_info(prev);
78026872090SOleg Nesterov 	return curr;
781a5f4374aSIngo Molnar }
782a5f4374aSIngo Molnar 
783bdf8647cSOleg Nesterov static int
784bdf8647cSOleg Nesterov register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new)
785a5f4374aSIngo Molnar {
786bdf8647cSOleg Nesterov 	bool is_register = !!new;
78726872090SOleg Nesterov 	struct map_info *info;
78826872090SOleg Nesterov 	int err = 0;
78926872090SOleg Nesterov 
79032cdba1eSOleg Nesterov 	percpu_down_write(&dup_mmap_sem);
79126872090SOleg Nesterov 	info = build_map_info(uprobe->inode->i_mapping,
79226872090SOleg Nesterov 					uprobe->offset, is_register);
79332cdba1eSOleg Nesterov 	if (IS_ERR(info)) {
79432cdba1eSOleg Nesterov 		err = PTR_ERR(info);
79532cdba1eSOleg Nesterov 		goto out;
79632cdba1eSOleg Nesterov 	}
79726872090SOleg Nesterov 
79826872090SOleg Nesterov 	while (info) {
79926872090SOleg Nesterov 		struct mm_struct *mm = info->mm;
800a5f4374aSIngo Molnar 		struct vm_area_struct *vma;
801a5f4374aSIngo Molnar 
802076a365bSOleg Nesterov 		if (err && is_register)
80326872090SOleg Nesterov 			goto free;
804a5f4374aSIngo Molnar 
80577fc4af1SOleg Nesterov 		down_write(&mm->mmap_sem);
806f4d6dfe5SOleg Nesterov 		vma = find_vma(mm, info->vaddr);
807f4d6dfe5SOleg Nesterov 		if (!vma || !valid_vma(vma, is_register) ||
808f281769eSOleg Nesterov 		    file_inode(vma->vm_file) != uprobe->inode)
80926872090SOleg Nesterov 			goto unlock;
81026872090SOleg Nesterov 
811f4d6dfe5SOleg Nesterov 		if (vma->vm_start > info->vaddr ||
812f4d6dfe5SOleg Nesterov 		    vaddr_to_offset(vma, info->vaddr) != uprobe->offset)
81326872090SOleg Nesterov 			goto unlock;
814a5f4374aSIngo Molnar 
815806a98bdSOleg Nesterov 		if (is_register) {
816806a98bdSOleg Nesterov 			/* consult only the "caller", new consumer. */
817bdf8647cSOleg Nesterov 			if (consumer_filter(new,
8188a7f2fa0SOleg Nesterov 					UPROBE_FILTER_REGISTER, mm))
81926872090SOleg Nesterov 				err = install_breakpoint(uprobe, mm, vma, info->vaddr);
820806a98bdSOleg Nesterov 		} else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) {
8218a7f2fa0SOleg Nesterov 			if (!filter_chain(uprobe,
8228a7f2fa0SOleg Nesterov 					UPROBE_FILTER_UNREGISTER, mm))
823076a365bSOleg Nesterov 				err |= remove_breakpoint(uprobe, mm, info->vaddr);
824806a98bdSOleg Nesterov 		}
82578f74116SOleg Nesterov 
82626872090SOleg Nesterov  unlock:
82726872090SOleg Nesterov 		up_write(&mm->mmap_sem);
82826872090SOleg Nesterov  free:
82926872090SOleg Nesterov 		mmput(mm);
83026872090SOleg Nesterov 		info = free_map_info(info);
831a5f4374aSIngo Molnar 	}
83232cdba1eSOleg Nesterov  out:
83332cdba1eSOleg Nesterov 	percpu_up_write(&dup_mmap_sem);
83426872090SOleg Nesterov 	return err;
835a5f4374aSIngo Molnar }
836a5f4374aSIngo Molnar 
8379a98e03cSOleg Nesterov static int __uprobe_register(struct uprobe *uprobe, struct uprobe_consumer *uc)
838a5f4374aSIngo Molnar {
8399a98e03cSOleg Nesterov 	consumer_add(uprobe, uc);
840bdf8647cSOleg Nesterov 	return register_for_each_vma(uprobe, uc);
841a5f4374aSIngo Molnar }
842a5f4374aSIngo Molnar 
84304aab9b2SOleg Nesterov static void __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc)
844a5f4374aSIngo Molnar {
84504aab9b2SOleg Nesterov 	int err;
846a5f4374aSIngo Molnar 
84706d07139SOleg Nesterov 	if (WARN_ON(!consumer_del(uprobe, uc)))
84804aab9b2SOleg Nesterov 		return;
84904aab9b2SOleg Nesterov 
850bdf8647cSOleg Nesterov 	err = register_for_each_vma(uprobe, NULL);
851a5f4374aSIngo Molnar 	/* TODO : cant unregister? schedule a worker thread */
852bb929284SOleg Nesterov 	if (!uprobe->consumers && !err)
85304aab9b2SOleg Nesterov 		delete_uprobe(uprobe);
85404aab9b2SOleg Nesterov }
855a5f4374aSIngo Molnar 
856a5f4374aSIngo Molnar /*
857a5f4374aSIngo Molnar  * uprobe_register - register a probe
858a5f4374aSIngo Molnar  * @inode: the file in which the probe has to be placed.
859a5f4374aSIngo Molnar  * @offset: offset from the start of the file.
860e3343e6aSSrikar Dronamraju  * @uc: information on howto handle the probe..
861a5f4374aSIngo Molnar  *
862a5f4374aSIngo Molnar  * Apart from the access refcount, uprobe_register() takes a creation
863a5f4374aSIngo Molnar  * refcount (thro alloc_uprobe) if and only if this @uprobe is getting
864a5f4374aSIngo Molnar  * inserted into the rbtree (i.e first consumer for a @inode:@offset
865a5f4374aSIngo Molnar  * tuple).  Creation refcount stops uprobe_unregister from freeing the
866a5f4374aSIngo Molnar  * @uprobe even before the register operation is complete. Creation
867e3343e6aSSrikar Dronamraju  * refcount is released when the last @uc for the @uprobe
868a5f4374aSIngo Molnar  * unregisters.
869a5f4374aSIngo Molnar  *
870a5f4374aSIngo Molnar  * Return errno if it cannot successully install probes
871a5f4374aSIngo Molnar  * else return 0 (success)
872a5f4374aSIngo Molnar  */
873e3343e6aSSrikar Dronamraju int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
874a5f4374aSIngo Molnar {
875a5f4374aSIngo Molnar 	struct uprobe *uprobe;
876a5f4374aSIngo Molnar 	int ret;
877a5f4374aSIngo Molnar 
878ea024870SAnton Arapov 	/* Uprobe must have at least one set consumer */
879ea024870SAnton Arapov 	if (!uc->handler && !uc->ret_handler)
880ea024870SAnton Arapov 		return -EINVAL;
881ea024870SAnton Arapov 
88240814f68SOleg Nesterov 	/* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */
88340814f68SOleg Nesterov 	if (!inode->i_mapping->a_ops->readpage && !shmem_mapping(inode->i_mapping))
88441ccba02SOleg Nesterov 		return -EIO;
885f0744af7SOleg Nesterov 	/* Racy, just to catch the obvious mistakes */
886a5f4374aSIngo Molnar 	if (offset > i_size_read(inode))
887a5f4374aSIngo Molnar 		return -EINVAL;
888a5f4374aSIngo Molnar 
88966d06dffSOleg Nesterov  retry:
890a5f4374aSIngo Molnar 	uprobe = alloc_uprobe(inode, offset);
89166d06dffSOleg Nesterov 	if (!uprobe)
89266d06dffSOleg Nesterov 		return -ENOMEM;
89366d06dffSOleg Nesterov 	/*
89466d06dffSOleg Nesterov 	 * We can race with uprobe_unregister()->delete_uprobe().
89566d06dffSOleg Nesterov 	 * Check uprobe_is_active() and retry if it is false.
89666d06dffSOleg Nesterov 	 */
897e591c8d7SOleg Nesterov 	down_write(&uprobe->register_rwsem);
89866d06dffSOleg Nesterov 	ret = -EAGAIN;
89966d06dffSOleg Nesterov 	if (likely(uprobe_is_active(uprobe))) {
9009a98e03cSOleg Nesterov 		ret = __uprobe_register(uprobe, uc);
9019a98e03cSOleg Nesterov 		if (ret)
90204aab9b2SOleg Nesterov 			__uprobe_unregister(uprobe, uc);
903a5f4374aSIngo Molnar 	}
90466d06dffSOleg Nesterov 	up_write(&uprobe->register_rwsem);
905a5f4374aSIngo Molnar 	put_uprobe(uprobe);
906a5f4374aSIngo Molnar 
90766d06dffSOleg Nesterov 	if (unlikely(ret == -EAGAIN))
90866d06dffSOleg Nesterov 		goto retry;
909a5f4374aSIngo Molnar 	return ret;
910a5f4374aSIngo Molnar }
911e8440c14SJosh Stone EXPORT_SYMBOL_GPL(uprobe_register);
912a5f4374aSIngo Molnar 
913a5f4374aSIngo Molnar /*
914bdf8647cSOleg Nesterov  * uprobe_apply - unregister a already registered probe.
915bdf8647cSOleg Nesterov  * @inode: the file in which the probe has to be removed.
916bdf8647cSOleg Nesterov  * @offset: offset from the start of the file.
917bdf8647cSOleg Nesterov  * @uc: consumer which wants to add more or remove some breakpoints
918bdf8647cSOleg Nesterov  * @add: add or remove the breakpoints
919bdf8647cSOleg Nesterov  */
920bdf8647cSOleg Nesterov int uprobe_apply(struct inode *inode, loff_t offset,
921bdf8647cSOleg Nesterov 			struct uprobe_consumer *uc, bool add)
922bdf8647cSOleg Nesterov {
923bdf8647cSOleg Nesterov 	struct uprobe *uprobe;
924bdf8647cSOleg Nesterov 	struct uprobe_consumer *con;
925bdf8647cSOleg Nesterov 	int ret = -ENOENT;
926bdf8647cSOleg Nesterov 
927bdf8647cSOleg Nesterov 	uprobe = find_uprobe(inode, offset);
92806d07139SOleg Nesterov 	if (WARN_ON(!uprobe))
929bdf8647cSOleg Nesterov 		return ret;
930bdf8647cSOleg Nesterov 
931bdf8647cSOleg Nesterov 	down_write(&uprobe->register_rwsem);
932bdf8647cSOleg Nesterov 	for (con = uprobe->consumers; con && con != uc ; con = con->next)
933bdf8647cSOleg Nesterov 		;
934bdf8647cSOleg Nesterov 	if (con)
935bdf8647cSOleg Nesterov 		ret = register_for_each_vma(uprobe, add ? uc : NULL);
936bdf8647cSOleg Nesterov 	up_write(&uprobe->register_rwsem);
937bdf8647cSOleg Nesterov 	put_uprobe(uprobe);
938bdf8647cSOleg Nesterov 
939bdf8647cSOleg Nesterov 	return ret;
940bdf8647cSOleg Nesterov }
941bdf8647cSOleg Nesterov 
942bdf8647cSOleg Nesterov /*
943a5f4374aSIngo Molnar  * uprobe_unregister - unregister a already registered probe.
944a5f4374aSIngo Molnar  * @inode: the file in which the probe has to be removed.
945a5f4374aSIngo Molnar  * @offset: offset from the start of the file.
946e3343e6aSSrikar Dronamraju  * @uc: identify which probe if multiple probes are colocated.
947a5f4374aSIngo Molnar  */
948e3343e6aSSrikar Dronamraju void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
949a5f4374aSIngo Molnar {
950a5f4374aSIngo Molnar 	struct uprobe *uprobe;
951a5f4374aSIngo Molnar 
952a5f4374aSIngo Molnar 	uprobe = find_uprobe(inode, offset);
95306d07139SOleg Nesterov 	if (WARN_ON(!uprobe))
954a5f4374aSIngo Molnar 		return;
955a5f4374aSIngo Molnar 
956e591c8d7SOleg Nesterov 	down_write(&uprobe->register_rwsem);
95704aab9b2SOleg Nesterov 	__uprobe_unregister(uprobe, uc);
958e591c8d7SOleg Nesterov 	up_write(&uprobe->register_rwsem);
959a5f4374aSIngo Molnar 	put_uprobe(uprobe);
960a5f4374aSIngo Molnar }
961e8440c14SJosh Stone EXPORT_SYMBOL_GPL(uprobe_unregister);
962a5f4374aSIngo Molnar 
963da1816b1SOleg Nesterov static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm)
964da1816b1SOleg Nesterov {
965da1816b1SOleg Nesterov 	struct vm_area_struct *vma;
966da1816b1SOleg Nesterov 	int err = 0;
967da1816b1SOleg Nesterov 
968da1816b1SOleg Nesterov 	down_read(&mm->mmap_sem);
969da1816b1SOleg Nesterov 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
970da1816b1SOleg Nesterov 		unsigned long vaddr;
971da1816b1SOleg Nesterov 		loff_t offset;
972da1816b1SOleg Nesterov 
973da1816b1SOleg Nesterov 		if (!valid_vma(vma, false) ||
974f281769eSOleg Nesterov 		    file_inode(vma->vm_file) != uprobe->inode)
975da1816b1SOleg Nesterov 			continue;
976da1816b1SOleg Nesterov 
977da1816b1SOleg Nesterov 		offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
978da1816b1SOleg Nesterov 		if (uprobe->offset <  offset ||
979da1816b1SOleg Nesterov 		    uprobe->offset >= offset + vma->vm_end - vma->vm_start)
980da1816b1SOleg Nesterov 			continue;
981da1816b1SOleg Nesterov 
982da1816b1SOleg Nesterov 		vaddr = offset_to_vaddr(vma, uprobe->offset);
983da1816b1SOleg Nesterov 		err |= remove_breakpoint(uprobe, mm, vaddr);
984da1816b1SOleg Nesterov 	}
985da1816b1SOleg Nesterov 	up_read(&mm->mmap_sem);
986da1816b1SOleg Nesterov 
987da1816b1SOleg Nesterov 	return err;
988da1816b1SOleg Nesterov }
989da1816b1SOleg Nesterov 
990891c3970SOleg Nesterov static struct rb_node *
991891c3970SOleg Nesterov find_node_in_range(struct inode *inode, loff_t min, loff_t max)
992a5f4374aSIngo Molnar {
993a5f4374aSIngo Molnar 	struct rb_node *n = uprobes_tree.rb_node;
994a5f4374aSIngo Molnar 
995a5f4374aSIngo Molnar 	while (n) {
996891c3970SOleg Nesterov 		struct uprobe *u = rb_entry(n, struct uprobe, rb_node);
997a5f4374aSIngo Molnar 
998891c3970SOleg Nesterov 		if (inode < u->inode) {
999a5f4374aSIngo Molnar 			n = n->rb_left;
1000891c3970SOleg Nesterov 		} else if (inode > u->inode) {
1001a5f4374aSIngo Molnar 			n = n->rb_right;
1002891c3970SOleg Nesterov 		} else {
1003891c3970SOleg Nesterov 			if (max < u->offset)
1004891c3970SOleg Nesterov 				n = n->rb_left;
1005891c3970SOleg Nesterov 			else if (min > u->offset)
1006891c3970SOleg Nesterov 				n = n->rb_right;
1007891c3970SOleg Nesterov 			else
1008891c3970SOleg Nesterov 				break;
1009891c3970SOleg Nesterov 		}
1010a5f4374aSIngo Molnar 	}
1011a5f4374aSIngo Molnar 
1012891c3970SOleg Nesterov 	return n;
1013a5f4374aSIngo Molnar }
1014a5f4374aSIngo Molnar 
1015a5f4374aSIngo Molnar /*
1016891c3970SOleg Nesterov  * For a given range in vma, build a list of probes that need to be inserted.
1017a5f4374aSIngo Molnar  */
1018891c3970SOleg Nesterov static void build_probe_list(struct inode *inode,
1019891c3970SOleg Nesterov 				struct vm_area_struct *vma,
1020891c3970SOleg Nesterov 				unsigned long start, unsigned long end,
1021891c3970SOleg Nesterov 				struct list_head *head)
1022a5f4374aSIngo Molnar {
1023891c3970SOleg Nesterov 	loff_t min, max;
1024891c3970SOleg Nesterov 	struct rb_node *n, *t;
1025891c3970SOleg Nesterov 	struct uprobe *u;
1026891c3970SOleg Nesterov 
1027891c3970SOleg Nesterov 	INIT_LIST_HEAD(head);
1028cb113b47SOleg Nesterov 	min = vaddr_to_offset(vma, start);
1029891c3970SOleg Nesterov 	max = min + (end - start) - 1;
1030a5f4374aSIngo Molnar 
10316f47caa0SOleg Nesterov 	spin_lock(&uprobes_treelock);
1032891c3970SOleg Nesterov 	n = find_node_in_range(inode, min, max);
1033891c3970SOleg Nesterov 	if (n) {
1034891c3970SOleg Nesterov 		for (t = n; t; t = rb_prev(t)) {
1035891c3970SOleg Nesterov 			u = rb_entry(t, struct uprobe, rb_node);
1036891c3970SOleg Nesterov 			if (u->inode != inode || u->offset < min)
1037a5f4374aSIngo Molnar 				break;
1038891c3970SOleg Nesterov 			list_add(&u->pending_list, head);
1039f231722aSOleg Nesterov 			get_uprobe(u);
1040a5f4374aSIngo Molnar 		}
1041891c3970SOleg Nesterov 		for (t = n; (t = rb_next(t)); ) {
1042891c3970SOleg Nesterov 			u = rb_entry(t, struct uprobe, rb_node);
1043891c3970SOleg Nesterov 			if (u->inode != inode || u->offset > max)
1044891c3970SOleg Nesterov 				break;
1045891c3970SOleg Nesterov 			list_add(&u->pending_list, head);
1046f231722aSOleg Nesterov 			get_uprobe(u);
1047891c3970SOleg Nesterov 		}
1048891c3970SOleg Nesterov 	}
10496f47caa0SOleg Nesterov 	spin_unlock(&uprobes_treelock);
1050a5f4374aSIngo Molnar }
1051a5f4374aSIngo Molnar 
1052a5f4374aSIngo Molnar /*
10535e5be71aSOleg Nesterov  * Called from mmap_region/vma_adjust with mm->mmap_sem acquired.
1054a5f4374aSIngo Molnar  *
10555e5be71aSOleg Nesterov  * Currently we ignore all errors and always return 0, the callers
10565e5be71aSOleg Nesterov  * can't handle the failure anyway.
1057a5f4374aSIngo Molnar  */
1058a5f4374aSIngo Molnar int uprobe_mmap(struct vm_area_struct *vma)
1059a5f4374aSIngo Molnar {
1060a5f4374aSIngo Molnar 	struct list_head tmp_list;
1061665605a2SOleg Nesterov 	struct uprobe *uprobe, *u;
1062a5f4374aSIngo Molnar 	struct inode *inode;
1063a5f4374aSIngo Molnar 
1064441f1eb7SOleg Nesterov 	if (no_uprobe_events() || !valid_vma(vma, true))
1065a5f4374aSIngo Molnar 		return 0;
1066a5f4374aSIngo Molnar 
1067f281769eSOleg Nesterov 	inode = file_inode(vma->vm_file);
1068a5f4374aSIngo Molnar 	if (!inode)
1069a5f4374aSIngo Molnar 		return 0;
1070a5f4374aSIngo Molnar 
1071a5f4374aSIngo Molnar 	mutex_lock(uprobes_mmap_hash(inode));
1072891c3970SOleg Nesterov 	build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list);
1073806a98bdSOleg Nesterov 	/*
1074806a98bdSOleg Nesterov 	 * We can race with uprobe_unregister(), this uprobe can be already
1075806a98bdSOleg Nesterov 	 * removed. But in this case filter_chain() must return false, all
1076806a98bdSOleg Nesterov 	 * consumers have gone away.
1077806a98bdSOleg Nesterov 	 */
1078665605a2SOleg Nesterov 	list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
1079806a98bdSOleg Nesterov 		if (!fatal_signal_pending(current) &&
10808a7f2fa0SOleg Nesterov 		    filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) {
108157683f72SOleg Nesterov 			unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset);
10825e5be71aSOleg Nesterov 			install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
1083a5f4374aSIngo Molnar 		}
1084a5f4374aSIngo Molnar 		put_uprobe(uprobe);
1085a5f4374aSIngo Molnar 	}
1086a5f4374aSIngo Molnar 	mutex_unlock(uprobes_mmap_hash(inode));
1087a5f4374aSIngo Molnar 
10885e5be71aSOleg Nesterov 	return 0;
1089a5f4374aSIngo Molnar }
1090a5f4374aSIngo Molnar 
10919f68f672SOleg Nesterov static bool
10929f68f672SOleg Nesterov vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end)
10939f68f672SOleg Nesterov {
10949f68f672SOleg Nesterov 	loff_t min, max;
10959f68f672SOleg Nesterov 	struct inode *inode;
10969f68f672SOleg Nesterov 	struct rb_node *n;
10979f68f672SOleg Nesterov 
1098f281769eSOleg Nesterov 	inode = file_inode(vma->vm_file);
10999f68f672SOleg Nesterov 
11009f68f672SOleg Nesterov 	min = vaddr_to_offset(vma, start);
11019f68f672SOleg Nesterov 	max = min + (end - start) - 1;
11029f68f672SOleg Nesterov 
11039f68f672SOleg Nesterov 	spin_lock(&uprobes_treelock);
11049f68f672SOleg Nesterov 	n = find_node_in_range(inode, min, max);
11059f68f672SOleg Nesterov 	spin_unlock(&uprobes_treelock);
11069f68f672SOleg Nesterov 
11079f68f672SOleg Nesterov 	return !!n;
11089f68f672SOleg Nesterov }
11099f68f672SOleg Nesterov 
1110682968e0SSrikar Dronamraju /*
1111682968e0SSrikar Dronamraju  * Called in context of a munmap of a vma.
1112682968e0SSrikar Dronamraju  */
1113cbc91f71SSrikar Dronamraju void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1114682968e0SSrikar Dronamraju {
1115441f1eb7SOleg Nesterov 	if (no_uprobe_events() || !valid_vma(vma, false))
1116682968e0SSrikar Dronamraju 		return;
1117682968e0SSrikar Dronamraju 
11182fd611a9SOleg Nesterov 	if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */
11192fd611a9SOleg Nesterov 		return;
11202fd611a9SOleg Nesterov 
11219f68f672SOleg Nesterov 	if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) ||
11229f68f672SOleg Nesterov 	     test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags))
1123f8ac4ec9SOleg Nesterov 		return;
1124f8ac4ec9SOleg Nesterov 
11259f68f672SOleg Nesterov 	if (vma_has_uprobes(vma, start, end))
11269f68f672SOleg Nesterov 		set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags);
1127682968e0SSrikar Dronamraju }
1128682968e0SSrikar Dronamraju 
1129d4b3b638SSrikar Dronamraju /* Slot allocation for XOL */
11306441ec8bSOleg Nesterov static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
1131d4b3b638SSrikar Dronamraju {
1132704bde3cSOleg Nesterov 	struct vm_area_struct *vma;
1133704bde3cSOleg Nesterov 	int ret;
1134d4b3b638SSrikar Dronamraju 
1135598fdc1dSMichal Hocko 	if (down_write_killable(&mm->mmap_sem))
1136598fdc1dSMichal Hocko 		return -EINTR;
1137598fdc1dSMichal Hocko 
1138704bde3cSOleg Nesterov 	if (mm->uprobes_state.xol_area) {
1139704bde3cSOleg Nesterov 		ret = -EALREADY;
1140d4b3b638SSrikar Dronamraju 		goto fail;
1141704bde3cSOleg Nesterov 	}
1142d4b3b638SSrikar Dronamraju 
1143af0d95afSOleg Nesterov 	if (!area->vaddr) {
1144d4b3b638SSrikar Dronamraju 		/* Try to map as high as possible, this is only a hint. */
1145af0d95afSOleg Nesterov 		area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE,
1146af0d95afSOleg Nesterov 						PAGE_SIZE, 0, 0);
1147d4b3b638SSrikar Dronamraju 		if (area->vaddr & ~PAGE_MASK) {
1148d4b3b638SSrikar Dronamraju 			ret = area->vaddr;
1149d4b3b638SSrikar Dronamraju 			goto fail;
1150d4b3b638SSrikar Dronamraju 		}
1151af0d95afSOleg Nesterov 	}
1152d4b3b638SSrikar Dronamraju 
1153704bde3cSOleg Nesterov 	vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE,
1154704bde3cSOleg Nesterov 				VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO,
1155704bde3cSOleg Nesterov 				&area->xol_mapping);
1156704bde3cSOleg Nesterov 	if (IS_ERR(vma)) {
1157704bde3cSOleg Nesterov 		ret = PTR_ERR(vma);
1158d4b3b638SSrikar Dronamraju 		goto fail;
1159704bde3cSOleg Nesterov 	}
1160d4b3b638SSrikar Dronamraju 
1161704bde3cSOleg Nesterov 	ret = 0;
1162d4b3b638SSrikar Dronamraju 	smp_wmb();	/* pairs with get_xol_area() */
1163d4b3b638SSrikar Dronamraju 	mm->uprobes_state.xol_area = area;
1164d4b3b638SSrikar Dronamraju  fail:
1165d4b3b638SSrikar Dronamraju 	up_write(&mm->mmap_sem);
1166d4b3b638SSrikar Dronamraju 
1167d4b3b638SSrikar Dronamraju 	return ret;
1168d4b3b638SSrikar Dronamraju }
1169d4b3b638SSrikar Dronamraju 
1170af0d95afSOleg Nesterov static struct xol_area *__create_xol_area(unsigned long vaddr)
1171d4b3b638SSrikar Dronamraju {
11729b545df8SOleg Nesterov 	struct mm_struct *mm = current->mm;
1173e78aebfdSAnton Arapov 	uprobe_opcode_t insn = UPROBE_SWBP_INSN;
11746441ec8bSOleg Nesterov 	struct xol_area *area;
11759b545df8SOleg Nesterov 
1176af0d95afSOleg Nesterov 	area = kmalloc(sizeof(*area), GFP_KERNEL);
1177d4b3b638SSrikar Dronamraju 	if (unlikely(!area))
1178c8a82538SOleg Nesterov 		goto out;
1179d4b3b638SSrikar Dronamraju 
1180d4b3b638SSrikar Dronamraju 	area->bitmap = kzalloc(BITS_TO_LONGS(UINSNS_PER_PAGE) * sizeof(long), GFP_KERNEL);
1181d4b3b638SSrikar Dronamraju 	if (!area->bitmap)
1182c8a82538SOleg Nesterov 		goto free_area;
1183c8a82538SOleg Nesterov 
1184704bde3cSOleg Nesterov 	area->xol_mapping.name = "[uprobes]";
1185869ae761SOleg Nesterov 	area->xol_mapping.fault = NULL;
1186704bde3cSOleg Nesterov 	area->xol_mapping.pages = area->pages;
1187f58bea2fSOleg Nesterov 	area->pages[0] = alloc_page(GFP_HIGHUSER);
1188f58bea2fSOleg Nesterov 	if (!area->pages[0])
1189c8a82538SOleg Nesterov 		goto free_bitmap;
1190f58bea2fSOleg Nesterov 	area->pages[1] = NULL;
1191d4b3b638SSrikar Dronamraju 
1192af0d95afSOleg Nesterov 	area->vaddr = vaddr;
1193d4b3b638SSrikar Dronamraju 	init_waitqueue_head(&area->wq);
11946441ec8bSOleg Nesterov 	/* Reserve the 1st slot for get_trampoline_vaddr() */
11956441ec8bSOleg Nesterov 	set_bit(0, area->bitmap);
11966441ec8bSOleg Nesterov 	atomic_set(&area->slot_count, 1);
1197297e765eSMarcin Nowakowski 	arch_uprobe_copy_ixol(area->pages[0], 0, &insn, UPROBE_SWBP_INSN_SIZE);
1198e78aebfdSAnton Arapov 
11996441ec8bSOleg Nesterov 	if (!xol_add_vma(mm, area))
1200d4b3b638SSrikar Dronamraju 		return area;
1201d4b3b638SSrikar Dronamraju 
1202f58bea2fSOleg Nesterov 	__free_page(area->pages[0]);
1203c8a82538SOleg Nesterov  free_bitmap:
1204d4b3b638SSrikar Dronamraju 	kfree(area->bitmap);
1205c8a82538SOleg Nesterov  free_area:
1206d4b3b638SSrikar Dronamraju 	kfree(area);
1207c8a82538SOleg Nesterov  out:
12086441ec8bSOleg Nesterov 	return NULL;
12096441ec8bSOleg Nesterov }
12106441ec8bSOleg Nesterov 
12116441ec8bSOleg Nesterov /*
12126441ec8bSOleg Nesterov  * get_xol_area - Allocate process's xol_area if necessary.
12136441ec8bSOleg Nesterov  * This area will be used for storing instructions for execution out of line.
12146441ec8bSOleg Nesterov  *
12156441ec8bSOleg Nesterov  * Returns the allocated area or NULL.
12166441ec8bSOleg Nesterov  */
12176441ec8bSOleg Nesterov static struct xol_area *get_xol_area(void)
12186441ec8bSOleg Nesterov {
12196441ec8bSOleg Nesterov 	struct mm_struct *mm = current->mm;
12206441ec8bSOleg Nesterov 	struct xol_area *area;
12216441ec8bSOleg Nesterov 
12226441ec8bSOleg Nesterov 	if (!mm->uprobes_state.xol_area)
1223af0d95afSOleg Nesterov 		__create_xol_area(0);
12246441ec8bSOleg Nesterov 
12259b545df8SOleg Nesterov 	area = mm->uprobes_state.xol_area;
12269b545df8SOleg Nesterov 	smp_read_barrier_depends();	/* pairs with wmb in xol_add_vma() */
12279b545df8SOleg Nesterov 	return area;
1228d4b3b638SSrikar Dronamraju }
1229d4b3b638SSrikar Dronamraju 
1230d4b3b638SSrikar Dronamraju /*
1231d4b3b638SSrikar Dronamraju  * uprobe_clear_state - Free the area allocated for slots.
1232d4b3b638SSrikar Dronamraju  */
1233d4b3b638SSrikar Dronamraju void uprobe_clear_state(struct mm_struct *mm)
1234d4b3b638SSrikar Dronamraju {
1235d4b3b638SSrikar Dronamraju 	struct xol_area *area = mm->uprobes_state.xol_area;
1236d4b3b638SSrikar Dronamraju 
1237d4b3b638SSrikar Dronamraju 	if (!area)
1238d4b3b638SSrikar Dronamraju 		return;
1239d4b3b638SSrikar Dronamraju 
1240f58bea2fSOleg Nesterov 	put_page(area->pages[0]);
1241d4b3b638SSrikar Dronamraju 	kfree(area->bitmap);
1242d4b3b638SSrikar Dronamraju 	kfree(area);
1243d4b3b638SSrikar Dronamraju }
1244d4b3b638SSrikar Dronamraju 
124532cdba1eSOleg Nesterov void uprobe_start_dup_mmap(void)
124632cdba1eSOleg Nesterov {
124732cdba1eSOleg Nesterov 	percpu_down_read(&dup_mmap_sem);
124832cdba1eSOleg Nesterov }
124932cdba1eSOleg Nesterov 
125032cdba1eSOleg Nesterov void uprobe_end_dup_mmap(void)
125132cdba1eSOleg Nesterov {
125232cdba1eSOleg Nesterov 	percpu_up_read(&dup_mmap_sem);
125332cdba1eSOleg Nesterov }
125432cdba1eSOleg Nesterov 
1255f8ac4ec9SOleg Nesterov void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
1256f8ac4ec9SOleg Nesterov {
125761559a81SOleg Nesterov 	newmm->uprobes_state.xol_area = NULL;
125861559a81SOleg Nesterov 
12599f68f672SOleg Nesterov 	if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) {
1260f8ac4ec9SOleg Nesterov 		set_bit(MMF_HAS_UPROBES, &newmm->flags);
12619f68f672SOleg Nesterov 		/* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */
12629f68f672SOleg Nesterov 		set_bit(MMF_RECALC_UPROBES, &newmm->flags);
12639f68f672SOleg Nesterov 	}
1264f8ac4ec9SOleg Nesterov }
1265f8ac4ec9SOleg Nesterov 
1266d4b3b638SSrikar Dronamraju /*
1267d4b3b638SSrikar Dronamraju  *  - search for a free slot.
1268d4b3b638SSrikar Dronamraju  */
1269d4b3b638SSrikar Dronamraju static unsigned long xol_take_insn_slot(struct xol_area *area)
1270d4b3b638SSrikar Dronamraju {
1271d4b3b638SSrikar Dronamraju 	unsigned long slot_addr;
1272d4b3b638SSrikar Dronamraju 	int slot_nr;
1273d4b3b638SSrikar Dronamraju 
1274d4b3b638SSrikar Dronamraju 	do {
1275d4b3b638SSrikar Dronamraju 		slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE);
1276d4b3b638SSrikar Dronamraju 		if (slot_nr < UINSNS_PER_PAGE) {
1277d4b3b638SSrikar Dronamraju 			if (!test_and_set_bit(slot_nr, area->bitmap))
1278d4b3b638SSrikar Dronamraju 				break;
1279d4b3b638SSrikar Dronamraju 
1280d4b3b638SSrikar Dronamraju 			slot_nr = UINSNS_PER_PAGE;
1281d4b3b638SSrikar Dronamraju 			continue;
1282d4b3b638SSrikar Dronamraju 		}
1283d4b3b638SSrikar Dronamraju 		wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE));
1284d4b3b638SSrikar Dronamraju 	} while (slot_nr >= UINSNS_PER_PAGE);
1285d4b3b638SSrikar Dronamraju 
1286d4b3b638SSrikar Dronamraju 	slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES);
1287d4b3b638SSrikar Dronamraju 	atomic_inc(&area->slot_count);
1288d4b3b638SSrikar Dronamraju 
1289d4b3b638SSrikar Dronamraju 	return slot_addr;
1290d4b3b638SSrikar Dronamraju }
1291d4b3b638SSrikar Dronamraju 
1292d4b3b638SSrikar Dronamraju /*
1293a6cb3f6dSOleg Nesterov  * xol_get_insn_slot - allocate a slot for xol.
1294d4b3b638SSrikar Dronamraju  * Returns the allocated slot address or 0.
1295d4b3b638SSrikar Dronamraju  */
1296a6cb3f6dSOleg Nesterov static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
1297d4b3b638SSrikar Dronamraju {
1298d4b3b638SSrikar Dronamraju 	struct xol_area *area;
1299a6cb3f6dSOleg Nesterov 	unsigned long xol_vaddr;
1300d4b3b638SSrikar Dronamraju 
13019b545df8SOleg Nesterov 	area = get_xol_area();
1302d4b3b638SSrikar Dronamraju 	if (!area)
1303d4b3b638SSrikar Dronamraju 		return 0;
1304d4b3b638SSrikar Dronamraju 
1305a6cb3f6dSOleg Nesterov 	xol_vaddr = xol_take_insn_slot(area);
1306a6cb3f6dSOleg Nesterov 	if (unlikely(!xol_vaddr))
1307d4b3b638SSrikar Dronamraju 		return 0;
1308d4b3b638SSrikar Dronamraju 
1309f58bea2fSOleg Nesterov 	arch_uprobe_copy_ixol(area->pages[0], xol_vaddr,
1310803200e2SOleg Nesterov 			      &uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
1311d4b3b638SSrikar Dronamraju 
1312a6cb3f6dSOleg Nesterov 	return xol_vaddr;
1313d4b3b638SSrikar Dronamraju }
1314d4b3b638SSrikar Dronamraju 
1315d4b3b638SSrikar Dronamraju /*
1316d4b3b638SSrikar Dronamraju  * xol_free_insn_slot - If slot was earlier allocated by
1317d4b3b638SSrikar Dronamraju  * @xol_get_insn_slot(), make the slot available for
1318d4b3b638SSrikar Dronamraju  * subsequent requests.
1319d4b3b638SSrikar Dronamraju  */
1320d4b3b638SSrikar Dronamraju static void xol_free_insn_slot(struct task_struct *tsk)
1321d4b3b638SSrikar Dronamraju {
1322d4b3b638SSrikar Dronamraju 	struct xol_area *area;
1323d4b3b638SSrikar Dronamraju 	unsigned long vma_end;
1324d4b3b638SSrikar Dronamraju 	unsigned long slot_addr;
1325d4b3b638SSrikar Dronamraju 
1326d4b3b638SSrikar Dronamraju 	if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask)
1327d4b3b638SSrikar Dronamraju 		return;
1328d4b3b638SSrikar Dronamraju 
1329d4b3b638SSrikar Dronamraju 	slot_addr = tsk->utask->xol_vaddr;
1330af4355e9SOleg Nesterov 	if (unlikely(!slot_addr))
1331d4b3b638SSrikar Dronamraju 		return;
1332d4b3b638SSrikar Dronamraju 
1333d4b3b638SSrikar Dronamraju 	area = tsk->mm->uprobes_state.xol_area;
1334d4b3b638SSrikar Dronamraju 	vma_end = area->vaddr + PAGE_SIZE;
1335d4b3b638SSrikar Dronamraju 	if (area->vaddr <= slot_addr && slot_addr < vma_end) {
1336d4b3b638SSrikar Dronamraju 		unsigned long offset;
1337d4b3b638SSrikar Dronamraju 		int slot_nr;
1338d4b3b638SSrikar Dronamraju 
1339d4b3b638SSrikar Dronamraju 		offset = slot_addr - area->vaddr;
1340d4b3b638SSrikar Dronamraju 		slot_nr = offset / UPROBE_XOL_SLOT_BYTES;
1341d4b3b638SSrikar Dronamraju 		if (slot_nr >= UINSNS_PER_PAGE)
1342d4b3b638SSrikar Dronamraju 			return;
1343d4b3b638SSrikar Dronamraju 
1344d4b3b638SSrikar Dronamraju 		clear_bit(slot_nr, area->bitmap);
1345d4b3b638SSrikar Dronamraju 		atomic_dec(&area->slot_count);
13462a742cedSOleg Nesterov 		smp_mb__after_atomic(); /* pairs with prepare_to_wait() */
1347d4b3b638SSrikar Dronamraju 		if (waitqueue_active(&area->wq))
1348d4b3b638SSrikar Dronamraju 			wake_up(&area->wq);
1349d4b3b638SSrikar Dronamraju 
1350d4b3b638SSrikar Dronamraju 		tsk->utask->xol_vaddr = 0;
1351d4b3b638SSrikar Dronamraju 	}
1352d4b3b638SSrikar Dronamraju }
1353d4b3b638SSrikar Dronamraju 
135472e6ae28SVictor Kamensky void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
135572e6ae28SVictor Kamensky 				  void *src, unsigned long len)
135672e6ae28SVictor Kamensky {
135772e6ae28SVictor Kamensky 	/* Initialize the slot */
135872e6ae28SVictor Kamensky 	copy_to_page(page, vaddr, src, len);
135972e6ae28SVictor Kamensky 
136072e6ae28SVictor Kamensky 	/*
136172e6ae28SVictor Kamensky 	 * We probably need flush_icache_user_range() but it needs vma.
136272e6ae28SVictor Kamensky 	 * This should work on most of architectures by default. If
136372e6ae28SVictor Kamensky 	 * architecture needs to do something different it can define
136472e6ae28SVictor Kamensky 	 * its own version of the function.
136572e6ae28SVictor Kamensky 	 */
136672e6ae28SVictor Kamensky 	flush_dcache_page(page);
136772e6ae28SVictor Kamensky }
136872e6ae28SVictor Kamensky 
13690326f5a9SSrikar Dronamraju /**
13700326f5a9SSrikar Dronamraju  * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
13710326f5a9SSrikar Dronamraju  * @regs: Reflects the saved state of the task after it has hit a breakpoint
13720326f5a9SSrikar Dronamraju  * instruction.
13730326f5a9SSrikar Dronamraju  * Return the address of the breakpoint instruction.
13740326f5a9SSrikar Dronamraju  */
13750326f5a9SSrikar Dronamraju unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs)
13760326f5a9SSrikar Dronamraju {
13770326f5a9SSrikar Dronamraju 	return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE;
13780326f5a9SSrikar Dronamraju }
13790326f5a9SSrikar Dronamraju 
1380b02ef20aSOleg Nesterov unsigned long uprobe_get_trap_addr(struct pt_regs *regs)
1381b02ef20aSOleg Nesterov {
1382b02ef20aSOleg Nesterov 	struct uprobe_task *utask = current->utask;
1383b02ef20aSOleg Nesterov 
1384b02ef20aSOleg Nesterov 	if (unlikely(utask && utask->active_uprobe))
1385b02ef20aSOleg Nesterov 		return utask->vaddr;
1386b02ef20aSOleg Nesterov 
1387b02ef20aSOleg Nesterov 	return instruction_pointer(regs);
1388b02ef20aSOleg Nesterov }
1389b02ef20aSOleg Nesterov 
13902bb5e840SOleg Nesterov static struct return_instance *free_ret_instance(struct return_instance *ri)
13912bb5e840SOleg Nesterov {
13922bb5e840SOleg Nesterov 	struct return_instance *next = ri->next;
13932bb5e840SOleg Nesterov 	put_uprobe(ri->uprobe);
13942bb5e840SOleg Nesterov 	kfree(ri);
13952bb5e840SOleg Nesterov 	return next;
13962bb5e840SOleg Nesterov }
13972bb5e840SOleg Nesterov 
13980326f5a9SSrikar Dronamraju /*
13990326f5a9SSrikar Dronamraju  * Called with no locks held.
14000326f5a9SSrikar Dronamraju  * Called in context of a exiting or a exec-ing thread.
14010326f5a9SSrikar Dronamraju  */
14020326f5a9SSrikar Dronamraju void uprobe_free_utask(struct task_struct *t)
14030326f5a9SSrikar Dronamraju {
14040326f5a9SSrikar Dronamraju 	struct uprobe_task *utask = t->utask;
14052bb5e840SOleg Nesterov 	struct return_instance *ri;
14060326f5a9SSrikar Dronamraju 
14070326f5a9SSrikar Dronamraju 	if (!utask)
14080326f5a9SSrikar Dronamraju 		return;
14090326f5a9SSrikar Dronamraju 
14100326f5a9SSrikar Dronamraju 	if (utask->active_uprobe)
14110326f5a9SSrikar Dronamraju 		put_uprobe(utask->active_uprobe);
14120326f5a9SSrikar Dronamraju 
14130dfd0eb8SAnton Arapov 	ri = utask->return_instances;
14142bb5e840SOleg Nesterov 	while (ri)
14152bb5e840SOleg Nesterov 		ri = free_ret_instance(ri);
14160dfd0eb8SAnton Arapov 
1417d4b3b638SSrikar Dronamraju 	xol_free_insn_slot(t);
14180326f5a9SSrikar Dronamraju 	kfree(utask);
14190326f5a9SSrikar Dronamraju 	t->utask = NULL;
14200326f5a9SSrikar Dronamraju }
14210326f5a9SSrikar Dronamraju 
14220326f5a9SSrikar Dronamraju /*
14235a2df662SOleg Nesterov  * Allocate a uprobe_task object for the task if if necessary.
14245a2df662SOleg Nesterov  * Called when the thread hits a breakpoint.
14250326f5a9SSrikar Dronamraju  *
14260326f5a9SSrikar Dronamraju  * Returns:
14270326f5a9SSrikar Dronamraju  * - pointer to new uprobe_task on success
14280326f5a9SSrikar Dronamraju  * - NULL otherwise
14290326f5a9SSrikar Dronamraju  */
14305a2df662SOleg Nesterov static struct uprobe_task *get_utask(void)
14310326f5a9SSrikar Dronamraju {
14325a2df662SOleg Nesterov 	if (!current->utask)
14335a2df662SOleg Nesterov 		current->utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
14345a2df662SOleg Nesterov 	return current->utask;
14350326f5a9SSrikar Dronamraju }
14360326f5a9SSrikar Dronamraju 
1437248d3a7bSOleg Nesterov static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
1438248d3a7bSOleg Nesterov {
1439248d3a7bSOleg Nesterov 	struct uprobe_task *n_utask;
1440248d3a7bSOleg Nesterov 	struct return_instance **p, *o, *n;
1441248d3a7bSOleg Nesterov 
1442248d3a7bSOleg Nesterov 	n_utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
1443248d3a7bSOleg Nesterov 	if (!n_utask)
1444248d3a7bSOleg Nesterov 		return -ENOMEM;
1445248d3a7bSOleg Nesterov 	t->utask = n_utask;
1446248d3a7bSOleg Nesterov 
1447248d3a7bSOleg Nesterov 	p = &n_utask->return_instances;
1448248d3a7bSOleg Nesterov 	for (o = o_utask->return_instances; o; o = o->next) {
1449248d3a7bSOleg Nesterov 		n = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
1450248d3a7bSOleg Nesterov 		if (!n)
1451248d3a7bSOleg Nesterov 			return -ENOMEM;
1452248d3a7bSOleg Nesterov 
1453248d3a7bSOleg Nesterov 		*n = *o;
1454f231722aSOleg Nesterov 		get_uprobe(n->uprobe);
1455248d3a7bSOleg Nesterov 		n->next = NULL;
1456248d3a7bSOleg Nesterov 
1457248d3a7bSOleg Nesterov 		*p = n;
1458248d3a7bSOleg Nesterov 		p = &n->next;
1459248d3a7bSOleg Nesterov 		n_utask->depth++;
1460248d3a7bSOleg Nesterov 	}
1461248d3a7bSOleg Nesterov 
1462248d3a7bSOleg Nesterov 	return 0;
1463248d3a7bSOleg Nesterov }
1464248d3a7bSOleg Nesterov 
1465248d3a7bSOleg Nesterov static void uprobe_warn(struct task_struct *t, const char *msg)
1466248d3a7bSOleg Nesterov {
1467248d3a7bSOleg Nesterov 	pr_warn("uprobe: %s:%d failed to %s\n",
1468248d3a7bSOleg Nesterov 			current->comm, current->pid, msg);
1469248d3a7bSOleg Nesterov }
1470248d3a7bSOleg Nesterov 
1471aa59c53fSOleg Nesterov static void dup_xol_work(struct callback_head *work)
1472aa59c53fSOleg Nesterov {
1473aa59c53fSOleg Nesterov 	if (current->flags & PF_EXITING)
1474aa59c53fSOleg Nesterov 		return;
1475aa59c53fSOleg Nesterov 
1476598fdc1dSMichal Hocko 	if (!__create_xol_area(current->utask->dup_xol_addr) &&
1477598fdc1dSMichal Hocko 			!fatal_signal_pending(current))
1478aa59c53fSOleg Nesterov 		uprobe_warn(current, "dup xol area");
1479aa59c53fSOleg Nesterov }
1480aa59c53fSOleg Nesterov 
1481e78aebfdSAnton Arapov /*
1482b68e0749SOleg Nesterov  * Called in context of a new clone/fork from copy_process.
1483b68e0749SOleg Nesterov  */
14843ab67966SOleg Nesterov void uprobe_copy_process(struct task_struct *t, unsigned long flags)
1485b68e0749SOleg Nesterov {
1486248d3a7bSOleg Nesterov 	struct uprobe_task *utask = current->utask;
1487248d3a7bSOleg Nesterov 	struct mm_struct *mm = current->mm;
1488aa59c53fSOleg Nesterov 	struct xol_area *area;
1489248d3a7bSOleg Nesterov 
1490b68e0749SOleg Nesterov 	t->utask = NULL;
1491248d3a7bSOleg Nesterov 
14923ab67966SOleg Nesterov 	if (!utask || !utask->return_instances)
14933ab67966SOleg Nesterov 		return;
14943ab67966SOleg Nesterov 
14953ab67966SOleg Nesterov 	if (mm == t->mm && !(flags & CLONE_VFORK))
1496248d3a7bSOleg Nesterov 		return;
1497248d3a7bSOleg Nesterov 
1498248d3a7bSOleg Nesterov 	if (dup_utask(t, utask))
1499248d3a7bSOleg Nesterov 		return uprobe_warn(t, "dup ret instances");
1500aa59c53fSOleg Nesterov 
1501aa59c53fSOleg Nesterov 	/* The task can fork() after dup_xol_work() fails */
1502aa59c53fSOleg Nesterov 	area = mm->uprobes_state.xol_area;
1503aa59c53fSOleg Nesterov 	if (!area)
1504aa59c53fSOleg Nesterov 		return uprobe_warn(t, "dup xol area");
1505aa59c53fSOleg Nesterov 
15063ab67966SOleg Nesterov 	if (mm == t->mm)
15073ab67966SOleg Nesterov 		return;
15083ab67966SOleg Nesterov 
150932473431SOleg Nesterov 	t->utask->dup_xol_addr = area->vaddr;
151032473431SOleg Nesterov 	init_task_work(&t->utask->dup_xol_work, dup_xol_work);
151132473431SOleg Nesterov 	task_work_add(t, &t->utask->dup_xol_work, true);
1512b68e0749SOleg Nesterov }
1513b68e0749SOleg Nesterov 
1514b68e0749SOleg Nesterov /*
1515e78aebfdSAnton Arapov  * Current area->vaddr notion assume the trampoline address is always
1516e78aebfdSAnton Arapov  * equal area->vaddr.
1517e78aebfdSAnton Arapov  *
1518e78aebfdSAnton Arapov  * Returns -1 in case the xol_area is not allocated.
1519e78aebfdSAnton Arapov  */
1520e78aebfdSAnton Arapov static unsigned long get_trampoline_vaddr(void)
1521e78aebfdSAnton Arapov {
1522e78aebfdSAnton Arapov 	struct xol_area *area;
1523e78aebfdSAnton Arapov 	unsigned long trampoline_vaddr = -1;
1524e78aebfdSAnton Arapov 
1525e78aebfdSAnton Arapov 	area = current->mm->uprobes_state.xol_area;
1526e78aebfdSAnton Arapov 	smp_read_barrier_depends();
1527e78aebfdSAnton Arapov 	if (area)
1528e78aebfdSAnton Arapov 		trampoline_vaddr = area->vaddr;
1529e78aebfdSAnton Arapov 
1530e78aebfdSAnton Arapov 	return trampoline_vaddr;
1531e78aebfdSAnton Arapov }
1532e78aebfdSAnton Arapov 
1533db087ef6SOleg Nesterov static void cleanup_return_instances(struct uprobe_task *utask, bool chained,
1534db087ef6SOleg Nesterov 					struct pt_regs *regs)
1535a5b7e1a8SOleg Nesterov {
1536a5b7e1a8SOleg Nesterov 	struct return_instance *ri = utask->return_instances;
1537db087ef6SOleg Nesterov 	enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL;
153886dcb702SOleg Nesterov 
153986dcb702SOleg Nesterov 	while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) {
1540a5b7e1a8SOleg Nesterov 		ri = free_ret_instance(ri);
1541a5b7e1a8SOleg Nesterov 		utask->depth--;
1542a5b7e1a8SOleg Nesterov 	}
1543a5b7e1a8SOleg Nesterov 	utask->return_instances = ri;
1544a5b7e1a8SOleg Nesterov }
1545a5b7e1a8SOleg Nesterov 
15460dfd0eb8SAnton Arapov static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
15470dfd0eb8SAnton Arapov {
15480dfd0eb8SAnton Arapov 	struct return_instance *ri;
15490dfd0eb8SAnton Arapov 	struct uprobe_task *utask;
15500dfd0eb8SAnton Arapov 	unsigned long orig_ret_vaddr, trampoline_vaddr;
1551db087ef6SOleg Nesterov 	bool chained;
15520dfd0eb8SAnton Arapov 
15530dfd0eb8SAnton Arapov 	if (!get_xol_area())
15540dfd0eb8SAnton Arapov 		return;
15550dfd0eb8SAnton Arapov 
15560dfd0eb8SAnton Arapov 	utask = get_utask();
15570dfd0eb8SAnton Arapov 	if (!utask)
15580dfd0eb8SAnton Arapov 		return;
15590dfd0eb8SAnton Arapov 
1560ded49c55SAnton Arapov 	if (utask->depth >= MAX_URETPROBE_DEPTH) {
1561ded49c55SAnton Arapov 		printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to"
1562ded49c55SAnton Arapov 				" nestedness limit pid/tgid=%d/%d\n",
1563ded49c55SAnton Arapov 				current->pid, current->tgid);
1564ded49c55SAnton Arapov 		return;
1565ded49c55SAnton Arapov 	}
1566ded49c55SAnton Arapov 
15676c58d0e4SOleg Nesterov 	ri = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
15680dfd0eb8SAnton Arapov 	if (!ri)
15696c58d0e4SOleg Nesterov 		return;
15700dfd0eb8SAnton Arapov 
15710dfd0eb8SAnton Arapov 	trampoline_vaddr = get_trampoline_vaddr();
15720dfd0eb8SAnton Arapov 	orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs);
15730dfd0eb8SAnton Arapov 	if (orig_ret_vaddr == -1)
15740dfd0eb8SAnton Arapov 		goto fail;
15750dfd0eb8SAnton Arapov 
1576a5b7e1a8SOleg Nesterov 	/* drop the entries invalidated by longjmp() */
1577db087ef6SOleg Nesterov 	chained = (orig_ret_vaddr == trampoline_vaddr);
1578db087ef6SOleg Nesterov 	cleanup_return_instances(utask, chained, regs);
1579a5b7e1a8SOleg Nesterov 
15800dfd0eb8SAnton Arapov 	/*
15810dfd0eb8SAnton Arapov 	 * We don't want to keep trampoline address in stack, rather keep the
15820dfd0eb8SAnton Arapov 	 * original return address of first caller thru all the consequent
15830dfd0eb8SAnton Arapov 	 * instances. This also makes breakpoint unwrapping easier.
15840dfd0eb8SAnton Arapov 	 */
1585db087ef6SOleg Nesterov 	if (chained) {
15860dfd0eb8SAnton Arapov 		if (!utask->return_instances) {
15870dfd0eb8SAnton Arapov 			/*
15880dfd0eb8SAnton Arapov 			 * This situation is not possible. Likely we have an
15890dfd0eb8SAnton Arapov 			 * attack from user-space.
15900dfd0eb8SAnton Arapov 			 */
15916c58d0e4SOleg Nesterov 			uprobe_warn(current, "handle tail call");
15920dfd0eb8SAnton Arapov 			goto fail;
15930dfd0eb8SAnton Arapov 		}
15940dfd0eb8SAnton Arapov 		orig_ret_vaddr = utask->return_instances->orig_ret_vaddr;
15950dfd0eb8SAnton Arapov 	}
15960dfd0eb8SAnton Arapov 
1597f231722aSOleg Nesterov 	ri->uprobe = get_uprobe(uprobe);
15980dfd0eb8SAnton Arapov 	ri->func = instruction_pointer(regs);
15997b868e48SOleg Nesterov 	ri->stack = user_stack_pointer(regs);
16000dfd0eb8SAnton Arapov 	ri->orig_ret_vaddr = orig_ret_vaddr;
16010dfd0eb8SAnton Arapov 	ri->chained = chained;
16020dfd0eb8SAnton Arapov 
1603ded49c55SAnton Arapov 	utask->depth++;
16040dfd0eb8SAnton Arapov 	ri->next = utask->return_instances;
16050dfd0eb8SAnton Arapov 	utask->return_instances = ri;
16060dfd0eb8SAnton Arapov 
16070dfd0eb8SAnton Arapov 	return;
16080dfd0eb8SAnton Arapov  fail:
16090dfd0eb8SAnton Arapov 	kfree(ri);
16100dfd0eb8SAnton Arapov }
16110dfd0eb8SAnton Arapov 
16120326f5a9SSrikar Dronamraju /* Prepare to single-step probed instruction out of line. */
16130326f5a9SSrikar Dronamraju static int
1614a6cb3f6dSOleg Nesterov pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr)
16150326f5a9SSrikar Dronamraju {
1616a6cb3f6dSOleg Nesterov 	struct uprobe_task *utask;
1617a6cb3f6dSOleg Nesterov 	unsigned long xol_vaddr;
1618aba51024SOleg Nesterov 	int err;
1619d4b3b638SSrikar Dronamraju 
1620608e7427SOleg Nesterov 	utask = get_utask();
1621608e7427SOleg Nesterov 	if (!utask)
1622608e7427SOleg Nesterov 		return -ENOMEM;
1623a6cb3f6dSOleg Nesterov 
1624a6cb3f6dSOleg Nesterov 	xol_vaddr = xol_get_insn_slot(uprobe);
1625a6cb3f6dSOleg Nesterov 	if (!xol_vaddr)
1626a6cb3f6dSOleg Nesterov 		return -ENOMEM;
1627a6cb3f6dSOleg Nesterov 
1628a6cb3f6dSOleg Nesterov 	utask->xol_vaddr = xol_vaddr;
1629a6cb3f6dSOleg Nesterov 	utask->vaddr = bp_vaddr;
1630a6cb3f6dSOleg Nesterov 
1631aba51024SOleg Nesterov 	err = arch_uprobe_pre_xol(&uprobe->arch, regs);
1632aba51024SOleg Nesterov 	if (unlikely(err)) {
1633aba51024SOleg Nesterov 		xol_free_insn_slot(current);
1634aba51024SOleg Nesterov 		return err;
1635aba51024SOleg Nesterov 	}
1636aba51024SOleg Nesterov 
1637608e7427SOleg Nesterov 	utask->active_uprobe = uprobe;
1638608e7427SOleg Nesterov 	utask->state = UTASK_SSTEP;
1639aba51024SOleg Nesterov 	return 0;
16400326f5a9SSrikar Dronamraju }
16410326f5a9SSrikar Dronamraju 
16420326f5a9SSrikar Dronamraju /*
16430326f5a9SSrikar Dronamraju  * If we are singlestepping, then ensure this thread is not connected to
16440326f5a9SSrikar Dronamraju  * non-fatal signals until completion of singlestep.  When xol insn itself
16450326f5a9SSrikar Dronamraju  * triggers the signal,  restart the original insn even if the task is
16460326f5a9SSrikar Dronamraju  * already SIGKILL'ed (since coredump should report the correct ip).  This
16470326f5a9SSrikar Dronamraju  * is even more important if the task has a handler for SIGSEGV/etc, The
16480326f5a9SSrikar Dronamraju  * _same_ instruction should be repeated again after return from the signal
16490326f5a9SSrikar Dronamraju  * handler, and SSTEP can never finish in this case.
16500326f5a9SSrikar Dronamraju  */
16510326f5a9SSrikar Dronamraju bool uprobe_deny_signal(void)
16520326f5a9SSrikar Dronamraju {
16530326f5a9SSrikar Dronamraju 	struct task_struct *t = current;
16540326f5a9SSrikar Dronamraju 	struct uprobe_task *utask = t->utask;
16550326f5a9SSrikar Dronamraju 
16560326f5a9SSrikar Dronamraju 	if (likely(!utask || !utask->active_uprobe))
16570326f5a9SSrikar Dronamraju 		return false;
16580326f5a9SSrikar Dronamraju 
16590326f5a9SSrikar Dronamraju 	WARN_ON_ONCE(utask->state != UTASK_SSTEP);
16600326f5a9SSrikar Dronamraju 
16610326f5a9SSrikar Dronamraju 	if (signal_pending(t)) {
16620326f5a9SSrikar Dronamraju 		spin_lock_irq(&t->sighand->siglock);
16630326f5a9SSrikar Dronamraju 		clear_tsk_thread_flag(t, TIF_SIGPENDING);
16640326f5a9SSrikar Dronamraju 		spin_unlock_irq(&t->sighand->siglock);
16650326f5a9SSrikar Dronamraju 
16660326f5a9SSrikar Dronamraju 		if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) {
16670326f5a9SSrikar Dronamraju 			utask->state = UTASK_SSTEP_TRAPPED;
16680326f5a9SSrikar Dronamraju 			set_tsk_thread_flag(t, TIF_UPROBE);
16690326f5a9SSrikar Dronamraju 		}
16700326f5a9SSrikar Dronamraju 	}
16710326f5a9SSrikar Dronamraju 
16720326f5a9SSrikar Dronamraju 	return true;
16730326f5a9SSrikar Dronamraju }
16740326f5a9SSrikar Dronamraju 
1675499a4f3eSOleg Nesterov static void mmf_recalc_uprobes(struct mm_struct *mm)
1676499a4f3eSOleg Nesterov {
1677499a4f3eSOleg Nesterov 	struct vm_area_struct *vma;
1678499a4f3eSOleg Nesterov 
1679499a4f3eSOleg Nesterov 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
1680499a4f3eSOleg Nesterov 		if (!valid_vma(vma, false))
1681499a4f3eSOleg Nesterov 			continue;
1682499a4f3eSOleg Nesterov 		/*
1683499a4f3eSOleg Nesterov 		 * This is not strictly accurate, we can race with
1684499a4f3eSOleg Nesterov 		 * uprobe_unregister() and see the already removed
1685499a4f3eSOleg Nesterov 		 * uprobe if delete_uprobe() was not yet called.
168663633cbfSOleg Nesterov 		 * Or this uprobe can be filtered out.
1687499a4f3eSOleg Nesterov 		 */
1688499a4f3eSOleg Nesterov 		if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end))
1689499a4f3eSOleg Nesterov 			return;
1690499a4f3eSOleg Nesterov 	}
1691499a4f3eSOleg Nesterov 
1692499a4f3eSOleg Nesterov 	clear_bit(MMF_HAS_UPROBES, &mm->flags);
1693499a4f3eSOleg Nesterov }
1694499a4f3eSOleg Nesterov 
16950908ad6eSAnanth N Mavinakayanahalli static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
1696ec75fba9SOleg Nesterov {
1697ec75fba9SOleg Nesterov 	struct page *page;
1698ec75fba9SOleg Nesterov 	uprobe_opcode_t opcode;
1699ec75fba9SOleg Nesterov 	int result;
1700ec75fba9SOleg Nesterov 
1701ec75fba9SOleg Nesterov 	pagefault_disable();
1702bd28b145SLinus Torvalds 	result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr);
1703ec75fba9SOleg Nesterov 	pagefault_enable();
1704ec75fba9SOleg Nesterov 
1705ec75fba9SOleg Nesterov 	if (likely(result == 0))
1706ec75fba9SOleg Nesterov 		goto out;
1707ec75fba9SOleg Nesterov 
17081e987790SDave Hansen 	/*
17091e987790SDave Hansen 	 * The NULL 'tsk' here ensures that any faults that occur here
17101e987790SDave Hansen 	 * will not be accounted to the task.  'mm' *is* current->mm,
17111e987790SDave Hansen 	 * but we treat this as a 'remote' access since it is
17121e987790SDave Hansen 	 * essentially a kernel access to the memory.
17131e987790SDave Hansen 	 */
17149beae1eaSLorenzo Stoakes 	result = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &page,
17155b56d49fSLorenzo Stoakes 			NULL, NULL);
1716ec75fba9SOleg Nesterov 	if (result < 0)
1717ec75fba9SOleg Nesterov 		return result;
1718ec75fba9SOleg Nesterov 
1719ab0d805cSOleg Nesterov 	copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
1720ec75fba9SOleg Nesterov 	put_page(page);
1721ec75fba9SOleg Nesterov  out:
17220908ad6eSAnanth N Mavinakayanahalli 	/* This needs to return true for any variant of the trap insn */
17230908ad6eSAnanth N Mavinakayanahalli 	return is_trap_insn(&opcode);
1724ec75fba9SOleg Nesterov }
1725ec75fba9SOleg Nesterov 
1726d790d346SOleg Nesterov static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
17270326f5a9SSrikar Dronamraju {
17283a9ea052SOleg Nesterov 	struct mm_struct *mm = current->mm;
17293a9ea052SOleg Nesterov 	struct uprobe *uprobe = NULL;
17300326f5a9SSrikar Dronamraju 	struct vm_area_struct *vma;
17310326f5a9SSrikar Dronamraju 
17320326f5a9SSrikar Dronamraju 	down_read(&mm->mmap_sem);
17330326f5a9SSrikar Dronamraju 	vma = find_vma(mm, bp_vaddr);
17343a9ea052SOleg Nesterov 	if (vma && vma->vm_start <= bp_vaddr) {
17353a9ea052SOleg Nesterov 		if (valid_vma(vma, false)) {
1736f281769eSOleg Nesterov 			struct inode *inode = file_inode(vma->vm_file);
1737cb113b47SOleg Nesterov 			loff_t offset = vaddr_to_offset(vma, bp_vaddr);
17380326f5a9SSrikar Dronamraju 
17390326f5a9SSrikar Dronamraju 			uprobe = find_uprobe(inode, offset);
17400326f5a9SSrikar Dronamraju 		}
1741d790d346SOleg Nesterov 
1742d790d346SOleg Nesterov 		if (!uprobe)
17430908ad6eSAnanth N Mavinakayanahalli 			*is_swbp = is_trap_at_addr(mm, bp_vaddr);
1744d790d346SOleg Nesterov 	} else {
1745d790d346SOleg Nesterov 		*is_swbp = -EFAULT;
17463a9ea052SOleg Nesterov 	}
1747499a4f3eSOleg Nesterov 
1748499a4f3eSOleg Nesterov 	if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags))
1749499a4f3eSOleg Nesterov 		mmf_recalc_uprobes(mm);
17500326f5a9SSrikar Dronamraju 	up_read(&mm->mmap_sem);
17510326f5a9SSrikar Dronamraju 
17523a9ea052SOleg Nesterov 	return uprobe;
17533a9ea052SOleg Nesterov }
17543a9ea052SOleg Nesterov 
1755da1816b1SOleg Nesterov static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
1756da1816b1SOleg Nesterov {
1757da1816b1SOleg Nesterov 	struct uprobe_consumer *uc;
1758da1816b1SOleg Nesterov 	int remove = UPROBE_HANDLER_REMOVE;
17590dfd0eb8SAnton Arapov 	bool need_prep = false; /* prepare return uprobe, when needed */
1760da1816b1SOleg Nesterov 
1761da1816b1SOleg Nesterov 	down_read(&uprobe->register_rwsem);
1762da1816b1SOleg Nesterov 	for (uc = uprobe->consumers; uc; uc = uc->next) {
1763ea024870SAnton Arapov 		int rc = 0;
1764da1816b1SOleg Nesterov 
1765ea024870SAnton Arapov 		if (uc->handler) {
1766ea024870SAnton Arapov 			rc = uc->handler(uc, regs);
1767da1816b1SOleg Nesterov 			WARN(rc & ~UPROBE_HANDLER_MASK,
1768da1816b1SOleg Nesterov 				"bad rc=0x%x from %pf()\n", rc, uc->handler);
1769ea024870SAnton Arapov 		}
17700dfd0eb8SAnton Arapov 
17710dfd0eb8SAnton Arapov 		if (uc->ret_handler)
17720dfd0eb8SAnton Arapov 			need_prep = true;
17730dfd0eb8SAnton Arapov 
1774da1816b1SOleg Nesterov 		remove &= rc;
1775da1816b1SOleg Nesterov 	}
1776da1816b1SOleg Nesterov 
17770dfd0eb8SAnton Arapov 	if (need_prep && !remove)
17780dfd0eb8SAnton Arapov 		prepare_uretprobe(uprobe, regs); /* put bp at return */
17790dfd0eb8SAnton Arapov 
1780da1816b1SOleg Nesterov 	if (remove && uprobe->consumers) {
1781da1816b1SOleg Nesterov 		WARN_ON(!uprobe_is_active(uprobe));
1782da1816b1SOleg Nesterov 		unapply_uprobe(uprobe, current->mm);
1783da1816b1SOleg Nesterov 	}
1784da1816b1SOleg Nesterov 	up_read(&uprobe->register_rwsem);
1785da1816b1SOleg Nesterov }
1786da1816b1SOleg Nesterov 
1787fec8898dSAnton Arapov static void
1788fec8898dSAnton Arapov handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs)
1789fec8898dSAnton Arapov {
1790fec8898dSAnton Arapov 	struct uprobe *uprobe = ri->uprobe;
1791fec8898dSAnton Arapov 	struct uprobe_consumer *uc;
1792fec8898dSAnton Arapov 
1793fec8898dSAnton Arapov 	down_read(&uprobe->register_rwsem);
1794fec8898dSAnton Arapov 	for (uc = uprobe->consumers; uc; uc = uc->next) {
1795fec8898dSAnton Arapov 		if (uc->ret_handler)
1796fec8898dSAnton Arapov 			uc->ret_handler(uc, ri->func, regs);
1797fec8898dSAnton Arapov 	}
1798fec8898dSAnton Arapov 	up_read(&uprobe->register_rwsem);
1799fec8898dSAnton Arapov }
1800fec8898dSAnton Arapov 
1801a83cfeb9SOleg Nesterov static struct return_instance *find_next_ret_chain(struct return_instance *ri)
1802a83cfeb9SOleg Nesterov {
1803a83cfeb9SOleg Nesterov 	bool chained;
1804a83cfeb9SOleg Nesterov 
1805a83cfeb9SOleg Nesterov 	do {
1806a83cfeb9SOleg Nesterov 		chained = ri->chained;
1807a83cfeb9SOleg Nesterov 		ri = ri->next;	/* can't be NULL if chained */
1808a83cfeb9SOleg Nesterov 	} while (chained);
1809a83cfeb9SOleg Nesterov 
1810a83cfeb9SOleg Nesterov 	return ri;
1811a83cfeb9SOleg Nesterov }
1812a83cfeb9SOleg Nesterov 
18130b5256c7SOleg Nesterov static void handle_trampoline(struct pt_regs *regs)
1814fec8898dSAnton Arapov {
1815fec8898dSAnton Arapov 	struct uprobe_task *utask;
1816a83cfeb9SOleg Nesterov 	struct return_instance *ri, *next;
18175eeb50deSOleg Nesterov 	bool valid;
1818fec8898dSAnton Arapov 
1819fec8898dSAnton Arapov 	utask = current->utask;
1820fec8898dSAnton Arapov 	if (!utask)
18210b5256c7SOleg Nesterov 		goto sigill;
1822fec8898dSAnton Arapov 
1823fec8898dSAnton Arapov 	ri = utask->return_instances;
1824fec8898dSAnton Arapov 	if (!ri)
18250b5256c7SOleg Nesterov 		goto sigill;
1826fec8898dSAnton Arapov 
18275eeb50deSOleg Nesterov 	do {
1828fec8898dSAnton Arapov 		/*
18295eeb50deSOleg Nesterov 		 * We should throw out the frames invalidated by longjmp().
18305eeb50deSOleg Nesterov 		 * If this chain is valid, then the next one should be alive
18315eeb50deSOleg Nesterov 		 * or NULL; the latter case means that nobody but ri->func
18325eeb50deSOleg Nesterov 		 * could hit this trampoline on return. TODO: sigaltstack().
1833fec8898dSAnton Arapov 		 */
18345eeb50deSOleg Nesterov 		next = find_next_ret_chain(ri);
183586dcb702SOleg Nesterov 		valid = !next || arch_uretprobe_is_alive(next, RP_CHECK_RET, regs);
18365eeb50deSOleg Nesterov 
1837fec8898dSAnton Arapov 		instruction_pointer_set(regs, ri->orig_ret_vaddr);
1838a83cfeb9SOleg Nesterov 		do {
18395eeb50deSOleg Nesterov 			if (valid)
1840fec8898dSAnton Arapov 				handle_uretprobe_chain(ri, regs);
18412bb5e840SOleg Nesterov 			ri = free_ret_instance(ri);
1842878b5a6eSOleg Nesterov 			utask->depth--;
1843a83cfeb9SOleg Nesterov 		} while (ri != next);
18445eeb50deSOleg Nesterov 	} while (!valid);
1845fec8898dSAnton Arapov 
1846fec8898dSAnton Arapov 	utask->return_instances = ri;
18470b5256c7SOleg Nesterov 	return;
1848fec8898dSAnton Arapov 
18490b5256c7SOleg Nesterov  sigill:
18500b5256c7SOleg Nesterov 	uprobe_warn(current, "handle uretprobe, sending SIGILL.");
18510b5256c7SOleg Nesterov 	force_sig_info(SIGILL, SEND_SIG_FORCED, current);
18520b5256c7SOleg Nesterov 
1853fec8898dSAnton Arapov }
1854fec8898dSAnton Arapov 
18556fe50a28SDavid A. Long bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs)
18566fe50a28SDavid A. Long {
18576fe50a28SDavid A. Long 	return false;
18586fe50a28SDavid A. Long }
18596fe50a28SDavid A. Long 
186086dcb702SOleg Nesterov bool __weak arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
186186dcb702SOleg Nesterov 					struct pt_regs *regs)
186297da8976SOleg Nesterov {
186397da8976SOleg Nesterov 	return true;
186497da8976SOleg Nesterov }
186597da8976SOleg Nesterov 
18663a9ea052SOleg Nesterov /*
18673a9ea052SOleg Nesterov  * Run handler and ask thread to singlestep.
18683a9ea052SOleg Nesterov  * Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
18693a9ea052SOleg Nesterov  */
18703a9ea052SOleg Nesterov static void handle_swbp(struct pt_regs *regs)
18713a9ea052SOleg Nesterov {
18723a9ea052SOleg Nesterov 	struct uprobe *uprobe;
18733a9ea052SOleg Nesterov 	unsigned long bp_vaddr;
187456bb4cf6SOleg Nesterov 	int uninitialized_var(is_swbp);
18753a9ea052SOleg Nesterov 
18763a9ea052SOleg Nesterov 	bp_vaddr = uprobe_get_swbp_addr(regs);
18770b5256c7SOleg Nesterov 	if (bp_vaddr == get_trampoline_vaddr())
18780b5256c7SOleg Nesterov 		return handle_trampoline(regs);
1879fec8898dSAnton Arapov 
1880fec8898dSAnton Arapov 	uprobe = find_active_uprobe(bp_vaddr, &is_swbp);
18810326f5a9SSrikar Dronamraju 	if (!uprobe) {
188256bb4cf6SOleg Nesterov 		if (is_swbp > 0) {
18830326f5a9SSrikar Dronamraju 			/* No matching uprobe; signal SIGTRAP. */
18840326f5a9SSrikar Dronamraju 			send_sig(SIGTRAP, current, 0);
188556bb4cf6SOleg Nesterov 		} else {
188656bb4cf6SOleg Nesterov 			/*
188756bb4cf6SOleg Nesterov 			 * Either we raced with uprobe_unregister() or we can't
188856bb4cf6SOleg Nesterov 			 * access this memory. The latter is only possible if
188956bb4cf6SOleg Nesterov 			 * another thread plays with our ->mm. In both cases
189056bb4cf6SOleg Nesterov 			 * we can simply restart. If this vma was unmapped we
189156bb4cf6SOleg Nesterov 			 * can pretend this insn was not executed yet and get
189256bb4cf6SOleg Nesterov 			 * the (correct) SIGSEGV after restart.
189356bb4cf6SOleg Nesterov 			 */
189456bb4cf6SOleg Nesterov 			instruction_pointer_set(regs, bp_vaddr);
189556bb4cf6SOleg Nesterov 		}
18960326f5a9SSrikar Dronamraju 		return;
18970326f5a9SSrikar Dronamraju 	}
189874e59dfcSOleg Nesterov 
189974e59dfcSOleg Nesterov 	/* change it in advance for ->handler() and restart */
190074e59dfcSOleg Nesterov 	instruction_pointer_set(regs, bp_vaddr);
190174e59dfcSOleg Nesterov 
1902142b18ddSOleg Nesterov 	/*
1903142b18ddSOleg Nesterov 	 * TODO: move copy_insn/etc into _register and remove this hack.
1904142b18ddSOleg Nesterov 	 * After we hit the bp, _unregister + _register can install the
1905142b18ddSOleg Nesterov 	 * new and not-yet-analyzed uprobe at the same address, restart.
1906142b18ddSOleg Nesterov 	 */
1907142b18ddSOleg Nesterov 	smp_rmb(); /* pairs with wmb() in install_breakpoint() */
190871434f2fSOleg Nesterov 	if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags)))
190974e59dfcSOleg Nesterov 		goto out;
19100326f5a9SSrikar Dronamraju 
191172fd293aSOleg Nesterov 	/* Tracing handlers use ->utask to communicate with fetch methods */
191272fd293aSOleg Nesterov 	if (!get_utask())
191372fd293aSOleg Nesterov 		goto out;
191472fd293aSOleg Nesterov 
19156fe50a28SDavid A. Long 	if (arch_uprobe_ignore(&uprobe->arch, regs))
19166fe50a28SDavid A. Long 		goto out;
19176fe50a28SDavid A. Long 
19180326f5a9SSrikar Dronamraju 	handler_chain(uprobe, regs);
19196fe50a28SDavid A. Long 
19208a6b1732SOleg Nesterov 	if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
19210578a970SOleg Nesterov 		goto out;
19220326f5a9SSrikar Dronamraju 
1923608e7427SOleg Nesterov 	if (!pre_ssout(uprobe, regs, bp_vaddr))
19240326f5a9SSrikar Dronamraju 		return;
19250326f5a9SSrikar Dronamraju 
19268a6b1732SOleg Nesterov 	/* arch_uprobe_skip_sstep() succeeded, or restart if can't singlestep */
19270578a970SOleg Nesterov out:
19280326f5a9SSrikar Dronamraju 	put_uprobe(uprobe);
19290326f5a9SSrikar Dronamraju }
19300326f5a9SSrikar Dronamraju 
19310326f5a9SSrikar Dronamraju /*
19320326f5a9SSrikar Dronamraju  * Perform required fix-ups and disable singlestep.
19330326f5a9SSrikar Dronamraju  * Allow pending signals to take effect.
19340326f5a9SSrikar Dronamraju  */
19350326f5a9SSrikar Dronamraju static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
19360326f5a9SSrikar Dronamraju {
19370326f5a9SSrikar Dronamraju 	struct uprobe *uprobe;
1938014940baSOleg Nesterov 	int err = 0;
19390326f5a9SSrikar Dronamraju 
19400326f5a9SSrikar Dronamraju 	uprobe = utask->active_uprobe;
19410326f5a9SSrikar Dronamraju 	if (utask->state == UTASK_SSTEP_ACK)
1942014940baSOleg Nesterov 		err = arch_uprobe_post_xol(&uprobe->arch, regs);
19430326f5a9SSrikar Dronamraju 	else if (utask->state == UTASK_SSTEP_TRAPPED)
19440326f5a9SSrikar Dronamraju 		arch_uprobe_abort_xol(&uprobe->arch, regs);
19450326f5a9SSrikar Dronamraju 	else
19460326f5a9SSrikar Dronamraju 		WARN_ON_ONCE(1);
19470326f5a9SSrikar Dronamraju 
19480326f5a9SSrikar Dronamraju 	put_uprobe(uprobe);
19490326f5a9SSrikar Dronamraju 	utask->active_uprobe = NULL;
19500326f5a9SSrikar Dronamraju 	utask->state = UTASK_RUNNING;
1951d4b3b638SSrikar Dronamraju 	xol_free_insn_slot(current);
19520326f5a9SSrikar Dronamraju 
19530326f5a9SSrikar Dronamraju 	spin_lock_irq(&current->sighand->siglock);
19540326f5a9SSrikar Dronamraju 	recalc_sigpending(); /* see uprobe_deny_signal() */
19550326f5a9SSrikar Dronamraju 	spin_unlock_irq(&current->sighand->siglock);
1956014940baSOleg Nesterov 
1957014940baSOleg Nesterov 	if (unlikely(err)) {
1958014940baSOleg Nesterov 		uprobe_warn(current, "execute the probed insn, sending SIGILL.");
1959014940baSOleg Nesterov 		force_sig_info(SIGILL, SEND_SIG_FORCED, current);
1960014940baSOleg Nesterov 	}
19610326f5a9SSrikar Dronamraju }
19620326f5a9SSrikar Dronamraju 
19630326f5a9SSrikar Dronamraju /*
19641b08e907SOleg Nesterov  * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and
19651b08e907SOleg Nesterov  * allows the thread to return from interrupt. After that handle_swbp()
19661b08e907SOleg Nesterov  * sets utask->active_uprobe.
19670326f5a9SSrikar Dronamraju  *
19681b08e907SOleg Nesterov  * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag
19691b08e907SOleg Nesterov  * and allows the thread to return from interrupt.
19700326f5a9SSrikar Dronamraju  *
19710326f5a9SSrikar Dronamraju  * While returning to userspace, thread notices the TIF_UPROBE flag and calls
19720326f5a9SSrikar Dronamraju  * uprobe_notify_resume().
19730326f5a9SSrikar Dronamraju  */
19740326f5a9SSrikar Dronamraju void uprobe_notify_resume(struct pt_regs *regs)
19750326f5a9SSrikar Dronamraju {
19760326f5a9SSrikar Dronamraju 	struct uprobe_task *utask;
19770326f5a9SSrikar Dronamraju 
1978db023ea5SOleg Nesterov 	clear_thread_flag(TIF_UPROBE);
1979db023ea5SOleg Nesterov 
19800326f5a9SSrikar Dronamraju 	utask = current->utask;
19811b08e907SOleg Nesterov 	if (utask && utask->active_uprobe)
19820326f5a9SSrikar Dronamraju 		handle_singlestep(utask, regs);
19831b08e907SOleg Nesterov 	else
19841b08e907SOleg Nesterov 		handle_swbp(regs);
19850326f5a9SSrikar Dronamraju }
19860326f5a9SSrikar Dronamraju 
19870326f5a9SSrikar Dronamraju /*
19880326f5a9SSrikar Dronamraju  * uprobe_pre_sstep_notifier gets called from interrupt context as part of
19890326f5a9SSrikar Dronamraju  * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit.
19900326f5a9SSrikar Dronamraju  */
19910326f5a9SSrikar Dronamraju int uprobe_pre_sstep_notifier(struct pt_regs *regs)
19920326f5a9SSrikar Dronamraju {
19930dfd0eb8SAnton Arapov 	if (!current->mm)
19940dfd0eb8SAnton Arapov 		return 0;
19950dfd0eb8SAnton Arapov 
19960dfd0eb8SAnton Arapov 	if (!test_bit(MMF_HAS_UPROBES, &current->mm->flags) &&
19970dfd0eb8SAnton Arapov 	    (!current->utask || !current->utask->return_instances))
19980326f5a9SSrikar Dronamraju 		return 0;
19990326f5a9SSrikar Dronamraju 
20000326f5a9SSrikar Dronamraju 	set_thread_flag(TIF_UPROBE);
20010326f5a9SSrikar Dronamraju 	return 1;
20020326f5a9SSrikar Dronamraju }
20030326f5a9SSrikar Dronamraju 
20040326f5a9SSrikar Dronamraju /*
20050326f5a9SSrikar Dronamraju  * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier
20060326f5a9SSrikar Dronamraju  * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep.
20070326f5a9SSrikar Dronamraju  */
20080326f5a9SSrikar Dronamraju int uprobe_post_sstep_notifier(struct pt_regs *regs)
20090326f5a9SSrikar Dronamraju {
20100326f5a9SSrikar Dronamraju 	struct uprobe_task *utask = current->utask;
20110326f5a9SSrikar Dronamraju 
20120326f5a9SSrikar Dronamraju 	if (!current->mm || !utask || !utask->active_uprobe)
20130326f5a9SSrikar Dronamraju 		/* task is currently not uprobed */
20140326f5a9SSrikar Dronamraju 		return 0;
20150326f5a9SSrikar Dronamraju 
20160326f5a9SSrikar Dronamraju 	utask->state = UTASK_SSTEP_ACK;
20170326f5a9SSrikar Dronamraju 	set_thread_flag(TIF_UPROBE);
20180326f5a9SSrikar Dronamraju 	return 1;
20190326f5a9SSrikar Dronamraju }
20200326f5a9SSrikar Dronamraju 
20210326f5a9SSrikar Dronamraju static struct notifier_block uprobe_exception_nb = {
20220326f5a9SSrikar Dronamraju 	.notifier_call		= arch_uprobe_exception_notify,
20230326f5a9SSrikar Dronamraju 	.priority		= INT_MAX-1,	/* notified after kprobes, kgdb */
20240326f5a9SSrikar Dronamraju };
20250326f5a9SSrikar Dronamraju 
2026a5f4374aSIngo Molnar static int __init init_uprobes(void)
2027a5f4374aSIngo Molnar {
2028a5f4374aSIngo Molnar 	int i;
2029a5f4374aSIngo Molnar 
203066d06dffSOleg Nesterov 	for (i = 0; i < UPROBES_HASH_SZ; i++)
2031a5f4374aSIngo Molnar 		mutex_init(&uprobes_mmap_mutex[i]);
20320326f5a9SSrikar Dronamraju 
203332cdba1eSOleg Nesterov 	if (percpu_init_rwsem(&dup_mmap_sem))
203432cdba1eSOleg Nesterov 		return -ENOMEM;
203532cdba1eSOleg Nesterov 
20360326f5a9SSrikar Dronamraju 	return register_die_notifier(&uprobe_exception_nb);
2037a5f4374aSIngo Molnar }
2038736e89d9SOleg Nesterov __initcall(init_uprobes);
2039