xref: /openbmc/linux/kernel/events/uprobes.c (revision 1b08e907)
1a5f4374aSIngo Molnar /*
2a5f4374aSIngo Molnar  * User-space Probes (UProbes)
3a5f4374aSIngo Molnar  *
4a5f4374aSIngo Molnar  * This program is free software; you can redistribute it and/or modify
5a5f4374aSIngo Molnar  * it under the terms of the GNU General Public License as published by
6a5f4374aSIngo Molnar  * the Free Software Foundation; either version 2 of the License, or
7a5f4374aSIngo Molnar  * (at your option) any later version.
8a5f4374aSIngo Molnar  *
9a5f4374aSIngo Molnar  * This program is distributed in the hope that it will be useful,
10a5f4374aSIngo Molnar  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11a5f4374aSIngo Molnar  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12a5f4374aSIngo Molnar  * GNU General Public License for more details.
13a5f4374aSIngo Molnar  *
14a5f4374aSIngo Molnar  * You should have received a copy of the GNU General Public License
15a5f4374aSIngo Molnar  * along with this program; if not, write to the Free Software
16a5f4374aSIngo Molnar  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17a5f4374aSIngo Molnar  *
1835aa621bSIngo Molnar  * Copyright (C) IBM Corporation, 2008-2012
19a5f4374aSIngo Molnar  * Authors:
20a5f4374aSIngo Molnar  *	Srikar Dronamraju
21a5f4374aSIngo Molnar  *	Jim Keniston
2235aa621bSIngo Molnar  * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
23a5f4374aSIngo Molnar  */
24a5f4374aSIngo Molnar 
25a5f4374aSIngo Molnar #include <linux/kernel.h>
26a5f4374aSIngo Molnar #include <linux/highmem.h>
27a5f4374aSIngo Molnar #include <linux/pagemap.h>	/* read_mapping_page */
28a5f4374aSIngo Molnar #include <linux/slab.h>
29a5f4374aSIngo Molnar #include <linux/sched.h>
30a5f4374aSIngo Molnar #include <linux/rmap.h>		/* anon_vma_prepare */
31a5f4374aSIngo Molnar #include <linux/mmu_notifier.h>	/* set_pte_at_notify */
32a5f4374aSIngo Molnar #include <linux/swap.h>		/* try_to_free_swap */
330326f5a9SSrikar Dronamraju #include <linux/ptrace.h>	/* user_enable_single_step */
340326f5a9SSrikar Dronamraju #include <linux/kdebug.h>	/* notifier mechanism */
35194f8dcbSOleg Nesterov #include "../../mm/internal.h"	/* munlock_vma_page */
36a5f4374aSIngo Molnar 
37a5f4374aSIngo Molnar #include <linux/uprobes.h>
38a5f4374aSIngo Molnar 
39d4b3b638SSrikar Dronamraju #define UINSNS_PER_PAGE			(PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
40d4b3b638SSrikar Dronamraju #define MAX_UPROBE_XOL_SLOTS		UINSNS_PER_PAGE
41d4b3b638SSrikar Dronamraju 
42a5f4374aSIngo Molnar static struct rb_root uprobes_tree = RB_ROOT;
43a5f4374aSIngo Molnar 
44a5f4374aSIngo Molnar static DEFINE_SPINLOCK(uprobes_treelock);	/* serialize rbtree access */
45a5f4374aSIngo Molnar 
46a5f4374aSIngo Molnar #define UPROBES_HASH_SZ	13
47a5f4374aSIngo Molnar 
48c5784de2SPeter Zijlstra /*
49c5784de2SPeter Zijlstra  * We need separate register/unregister and mmap/munmap lock hashes because
50c5784de2SPeter Zijlstra  * of mmap_sem nesting.
51c5784de2SPeter Zijlstra  *
52c5784de2SPeter Zijlstra  * uprobe_register() needs to install probes on (potentially) all processes
53c5784de2SPeter Zijlstra  * and thus needs to acquire multiple mmap_sems (consequtively, not
54c5784de2SPeter Zijlstra  * concurrently), whereas uprobe_mmap() is called while holding mmap_sem
55c5784de2SPeter Zijlstra  * for the particular process doing the mmap.
56c5784de2SPeter Zijlstra  *
57c5784de2SPeter Zijlstra  * uprobe_register()->register_for_each_vma() needs to drop/acquire mmap_sem
58c5784de2SPeter Zijlstra  * because of lock order against i_mmap_mutex. This means there's a hole in
59c5784de2SPeter Zijlstra  * the register vma iteration where a mmap() can happen.
60c5784de2SPeter Zijlstra  *
61c5784de2SPeter Zijlstra  * Thus uprobe_register() can race with uprobe_mmap() and we can try and
62c5784de2SPeter Zijlstra  * install a probe where one is already installed.
63c5784de2SPeter Zijlstra  */
64c5784de2SPeter Zijlstra 
65a5f4374aSIngo Molnar /* serialize (un)register */
66a5f4374aSIngo Molnar static struct mutex uprobes_mutex[UPROBES_HASH_SZ];
67a5f4374aSIngo Molnar 
68a5f4374aSIngo Molnar #define uprobes_hash(v)		(&uprobes_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
69a5f4374aSIngo Molnar 
70a5f4374aSIngo Molnar /* serialize uprobe->pending_list */
71a5f4374aSIngo Molnar static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
72a5f4374aSIngo Molnar #define uprobes_mmap_hash(v)	(&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
73a5f4374aSIngo Molnar 
74a5f4374aSIngo Molnar /*
75a5f4374aSIngo Molnar  * uprobe_events allows us to skip the uprobe_mmap if there are no uprobe
76a5f4374aSIngo Molnar  * events active at this time.  Probably a fine grained per inode count is
77a5f4374aSIngo Molnar  * better?
78a5f4374aSIngo Molnar  */
79a5f4374aSIngo Molnar static atomic_t uprobe_events = ATOMIC_INIT(0);
80a5f4374aSIngo Molnar 
813ff54efdSSrikar Dronamraju struct uprobe {
823ff54efdSSrikar Dronamraju 	struct rb_node		rb_node;	/* node in the rb tree */
833ff54efdSSrikar Dronamraju 	atomic_t		ref;
843ff54efdSSrikar Dronamraju 	struct rw_semaphore	consumer_rwsem;
853ff54efdSSrikar Dronamraju 	struct list_head	pending_list;
863ff54efdSSrikar Dronamraju 	struct uprobe_consumer	*consumers;
873ff54efdSSrikar Dronamraju 	struct inode		*inode;		/* Also hold a ref to inode */
883ff54efdSSrikar Dronamraju 	loff_t			offset;
893ff54efdSSrikar Dronamraju 	int			flags;
903ff54efdSSrikar Dronamraju 	struct arch_uprobe	arch;
913ff54efdSSrikar Dronamraju };
923ff54efdSSrikar Dronamraju 
93a5f4374aSIngo Molnar /*
94a5f4374aSIngo Molnar  * valid_vma: Verify if the specified vma is an executable vma
95a5f4374aSIngo Molnar  * Relax restrictions while unregistering: vm_flags might have
96a5f4374aSIngo Molnar  * changed after breakpoint was inserted.
97a5f4374aSIngo Molnar  *	- is_register: indicates if we are in register context.
98a5f4374aSIngo Molnar  *	- Return 1 if the specified virtual address is in an
99a5f4374aSIngo Molnar  *	  executable vma.
100a5f4374aSIngo Molnar  */
101a5f4374aSIngo Molnar static bool valid_vma(struct vm_area_struct *vma, bool is_register)
102a5f4374aSIngo Molnar {
103a5f4374aSIngo Molnar 	if (!vma->vm_file)
104a5f4374aSIngo Molnar 		return false;
105a5f4374aSIngo Molnar 
106a5f4374aSIngo Molnar 	if (!is_register)
107a5f4374aSIngo Molnar 		return true;
108a5f4374aSIngo Molnar 
109ea131377SOleg Nesterov 	if ((vma->vm_flags & (VM_HUGETLB|VM_READ|VM_WRITE|VM_EXEC|VM_SHARED))
110ea131377SOleg Nesterov 				== (VM_READ|VM_EXEC))
111a5f4374aSIngo Molnar 		return true;
112a5f4374aSIngo Molnar 
113a5f4374aSIngo Molnar 	return false;
114a5f4374aSIngo Molnar }
115a5f4374aSIngo Molnar 
11657683f72SOleg Nesterov static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset)
117a5f4374aSIngo Molnar {
11857683f72SOleg Nesterov 	return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
119a5f4374aSIngo Molnar }
120a5f4374aSIngo Molnar 
121cb113b47SOleg Nesterov static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
122cb113b47SOleg Nesterov {
123cb113b47SOleg Nesterov 	return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start);
124cb113b47SOleg Nesterov }
125cb113b47SOleg Nesterov 
126a5f4374aSIngo Molnar /**
127a5f4374aSIngo Molnar  * __replace_page - replace page in vma by new page.
128a5f4374aSIngo Molnar  * based on replace_page in mm/ksm.c
129a5f4374aSIngo Molnar  *
130a5f4374aSIngo Molnar  * @vma:      vma that holds the pte pointing to page
131c517ee74SOleg Nesterov  * @addr:     address the old @page is mapped at
132a5f4374aSIngo Molnar  * @page:     the cowed page we are replacing by kpage
133a5f4374aSIngo Molnar  * @kpage:    the modified page we replace page by
134a5f4374aSIngo Molnar  *
135a5f4374aSIngo Molnar  * Returns 0 on success, -EFAULT on failure.
136a5f4374aSIngo Molnar  */
137c517ee74SOleg Nesterov static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
138c517ee74SOleg Nesterov 				struct page *page, struct page *kpage)
139a5f4374aSIngo Molnar {
140a5f4374aSIngo Molnar 	struct mm_struct *mm = vma->vm_mm;
1415323ce71SOleg Nesterov 	spinlock_t *ptl;
1425323ce71SOleg Nesterov 	pte_t *ptep;
1439f92448cSOleg Nesterov 	int err;
144a5f4374aSIngo Molnar 
145194f8dcbSOleg Nesterov 	/* For try_to_free_swap() and munlock_vma_page() below */
1469f92448cSOleg Nesterov 	lock_page(page);
1479f92448cSOleg Nesterov 
1489f92448cSOleg Nesterov 	err = -EAGAIN;
1495323ce71SOleg Nesterov 	ptep = page_check_address(page, mm, addr, &ptl, 0);
150a5f4374aSIngo Molnar 	if (!ptep)
1519f92448cSOleg Nesterov 		goto unlock;
152a5f4374aSIngo Molnar 
153a5f4374aSIngo Molnar 	get_page(kpage);
154a5f4374aSIngo Molnar 	page_add_new_anon_rmap(kpage, vma, addr);
155a5f4374aSIngo Molnar 
1567396fa81SSrikar Dronamraju 	if (!PageAnon(page)) {
1577396fa81SSrikar Dronamraju 		dec_mm_counter(mm, MM_FILEPAGES);
1587396fa81SSrikar Dronamraju 		inc_mm_counter(mm, MM_ANONPAGES);
1597396fa81SSrikar Dronamraju 	}
1607396fa81SSrikar Dronamraju 
161a5f4374aSIngo Molnar 	flush_cache_page(vma, addr, pte_pfn(*ptep));
162a5f4374aSIngo Molnar 	ptep_clear_flush(vma, addr, ptep);
163a5f4374aSIngo Molnar 	set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot));
164a5f4374aSIngo Molnar 
165a5f4374aSIngo Molnar 	page_remove_rmap(page);
166a5f4374aSIngo Molnar 	if (!page_mapped(page))
167a5f4374aSIngo Molnar 		try_to_free_swap(page);
168a5f4374aSIngo Molnar 	pte_unmap_unlock(ptep, ptl);
169a5f4374aSIngo Molnar 
170194f8dcbSOleg Nesterov 	if (vma->vm_flags & VM_LOCKED)
171194f8dcbSOleg Nesterov 		munlock_vma_page(page);
172194f8dcbSOleg Nesterov 	put_page(page);
173194f8dcbSOleg Nesterov 
1749f92448cSOleg Nesterov 	err = 0;
1759f92448cSOleg Nesterov  unlock:
1769f92448cSOleg Nesterov 	unlock_page(page);
1779f92448cSOleg Nesterov 	return err;
178a5f4374aSIngo Molnar }
179a5f4374aSIngo Molnar 
180a5f4374aSIngo Molnar /**
1815cb4ac3aSSrikar Dronamraju  * is_swbp_insn - check if instruction is breakpoint instruction.
182a5f4374aSIngo Molnar  * @insn: instruction to be checked.
1835cb4ac3aSSrikar Dronamraju  * Default implementation of is_swbp_insn
184a5f4374aSIngo Molnar  * Returns true if @insn is a breakpoint instruction.
185a5f4374aSIngo Molnar  */
1865cb4ac3aSSrikar Dronamraju bool __weak is_swbp_insn(uprobe_opcode_t *insn)
187a5f4374aSIngo Molnar {
1885cb4ac3aSSrikar Dronamraju 	return *insn == UPROBE_SWBP_INSN;
189a5f4374aSIngo Molnar }
190a5f4374aSIngo Molnar 
191a5f4374aSIngo Molnar /*
192a5f4374aSIngo Molnar  * NOTE:
193a5f4374aSIngo Molnar  * Expect the breakpoint instruction to be the smallest size instruction for
194a5f4374aSIngo Molnar  * the architecture. If an arch has variable length instruction and the
195a5f4374aSIngo Molnar  * breakpoint instruction is not of the smallest length instruction
196a5f4374aSIngo Molnar  * supported by that architecture then we need to modify read_opcode /
197a5f4374aSIngo Molnar  * write_opcode accordingly. This would never be a problem for archs that
198a5f4374aSIngo Molnar  * have fixed length instructions.
199a5f4374aSIngo Molnar  */
200a5f4374aSIngo Molnar 
201a5f4374aSIngo Molnar /*
202a5f4374aSIngo Molnar  * write_opcode - write the opcode at a given virtual address.
203e3343e6aSSrikar Dronamraju  * @auprobe: arch breakpointing information.
204a5f4374aSIngo Molnar  * @mm: the probed process address space.
205a5f4374aSIngo Molnar  * @vaddr: the virtual address to store the opcode.
206a5f4374aSIngo Molnar  * @opcode: opcode to be written at @vaddr.
207a5f4374aSIngo Molnar  *
208a5f4374aSIngo Molnar  * Called with mm->mmap_sem held (for read and with a reference to
209a5f4374aSIngo Molnar  * mm).
210a5f4374aSIngo Molnar  *
211a5f4374aSIngo Molnar  * For mm @mm, write the opcode at @vaddr.
212a5f4374aSIngo Molnar  * Return 0 (success) or a negative errno.
213a5f4374aSIngo Molnar  */
214e3343e6aSSrikar Dronamraju static int write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
215a5f4374aSIngo Molnar 			unsigned long vaddr, uprobe_opcode_t opcode)
216a5f4374aSIngo Molnar {
217a5f4374aSIngo Molnar 	struct page *old_page, *new_page;
218a5f4374aSIngo Molnar 	void *vaddr_old, *vaddr_new;
219a5f4374aSIngo Molnar 	struct vm_area_struct *vma;
220a5f4374aSIngo Molnar 	int ret;
221f403072cSOleg Nesterov 
2225323ce71SOleg Nesterov retry:
223a5f4374aSIngo Molnar 	/* Read the page with vaddr into memory */
224a5f4374aSIngo Molnar 	ret = get_user_pages(NULL, mm, vaddr, 1, 0, 0, &old_page, &vma);
225a5f4374aSIngo Molnar 	if (ret <= 0)
226a5f4374aSIngo Molnar 		return ret;
227a5f4374aSIngo Molnar 
228a5f4374aSIngo Molnar 	ret = -ENOMEM;
229a5f4374aSIngo Molnar 	new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
230a5f4374aSIngo Molnar 	if (!new_page)
2319f92448cSOleg Nesterov 		goto put_old;
232a5f4374aSIngo Molnar 
233a5f4374aSIngo Molnar 	__SetPageUptodate(new_page);
234a5f4374aSIngo Molnar 
235a5f4374aSIngo Molnar 	/* copy the page now that we've got it stable */
236a5f4374aSIngo Molnar 	vaddr_old = kmap_atomic(old_page);
237a5f4374aSIngo Molnar 	vaddr_new = kmap_atomic(new_page);
238a5f4374aSIngo Molnar 
239a5f4374aSIngo Molnar 	memcpy(vaddr_new, vaddr_old, PAGE_SIZE);
240d9c4a30eSOleg Nesterov 	memcpy(vaddr_new + (vaddr & ~PAGE_MASK), &opcode, UPROBE_SWBP_INSN_SIZE);
241a5f4374aSIngo Molnar 
242a5f4374aSIngo Molnar 	kunmap_atomic(vaddr_new);
243a5f4374aSIngo Molnar 	kunmap_atomic(vaddr_old);
244a5f4374aSIngo Molnar 
245a5f4374aSIngo Molnar 	ret = anon_vma_prepare(vma);
246a5f4374aSIngo Molnar 	if (ret)
2479f92448cSOleg Nesterov 		goto put_new;
248a5f4374aSIngo Molnar 
249c517ee74SOleg Nesterov 	ret = __replace_page(vma, vaddr, old_page, new_page);
250a5f4374aSIngo Molnar 
2519f92448cSOleg Nesterov put_new:
252a5f4374aSIngo Molnar 	page_cache_release(new_page);
2539f92448cSOleg Nesterov put_old:
254a5f4374aSIngo Molnar 	put_page(old_page);
255a5f4374aSIngo Molnar 
2565323ce71SOleg Nesterov 	if (unlikely(ret == -EAGAIN))
2575323ce71SOleg Nesterov 		goto retry;
258a5f4374aSIngo Molnar 	return ret;
259a5f4374aSIngo Molnar }
260a5f4374aSIngo Molnar 
261a5f4374aSIngo Molnar /**
262a5f4374aSIngo Molnar  * read_opcode - read the opcode at a given virtual address.
263a5f4374aSIngo Molnar  * @mm: the probed process address space.
264a5f4374aSIngo Molnar  * @vaddr: the virtual address to read the opcode.
265a5f4374aSIngo Molnar  * @opcode: location to store the read opcode.
266a5f4374aSIngo Molnar  *
267a5f4374aSIngo Molnar  * Called with mm->mmap_sem held (for read and with a reference to
268a5f4374aSIngo Molnar  * mm.
269a5f4374aSIngo Molnar  *
270a5f4374aSIngo Molnar  * For mm @mm, read the opcode at @vaddr and store it in @opcode.
271a5f4374aSIngo Molnar  * Return 0 (success) or a negative errno.
272a5f4374aSIngo Molnar  */
273a5f4374aSIngo Molnar static int read_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t *opcode)
274a5f4374aSIngo Molnar {
275a5f4374aSIngo Molnar 	struct page *page;
276a5f4374aSIngo Molnar 	void *vaddr_new;
277a5f4374aSIngo Molnar 	int ret;
278a5f4374aSIngo Molnar 
279a3d7bb47SOleg Nesterov 	ret = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &page, NULL);
280a5f4374aSIngo Molnar 	if (ret <= 0)
281a5f4374aSIngo Molnar 		return ret;
282a5f4374aSIngo Molnar 
283a5f4374aSIngo Molnar 	vaddr_new = kmap_atomic(page);
284a5f4374aSIngo Molnar 	vaddr &= ~PAGE_MASK;
2855cb4ac3aSSrikar Dronamraju 	memcpy(opcode, vaddr_new + vaddr, UPROBE_SWBP_INSN_SIZE);
286a5f4374aSIngo Molnar 	kunmap_atomic(vaddr_new);
287a5f4374aSIngo Molnar 
288a5f4374aSIngo Molnar 	put_page(page);
289a5f4374aSIngo Molnar 
290a5f4374aSIngo Molnar 	return 0;
291a5f4374aSIngo Molnar }
292a5f4374aSIngo Molnar 
2935cb4ac3aSSrikar Dronamraju static int is_swbp_at_addr(struct mm_struct *mm, unsigned long vaddr)
294a5f4374aSIngo Molnar {
295a5f4374aSIngo Molnar 	uprobe_opcode_t opcode;
296a5f4374aSIngo Molnar 	int result;
297a5f4374aSIngo Molnar 
298c00b2750SOleg Nesterov 	if (current->mm == mm) {
299c00b2750SOleg Nesterov 		pagefault_disable();
300c00b2750SOleg Nesterov 		result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
301c00b2750SOleg Nesterov 								sizeof(opcode));
302c00b2750SOleg Nesterov 		pagefault_enable();
303c00b2750SOleg Nesterov 
304c00b2750SOleg Nesterov 		if (likely(result == 0))
305c00b2750SOleg Nesterov 			goto out;
306c00b2750SOleg Nesterov 	}
307c00b2750SOleg Nesterov 
308a5f4374aSIngo Molnar 	result = read_opcode(mm, vaddr, &opcode);
309a5f4374aSIngo Molnar 	if (result)
310a5f4374aSIngo Molnar 		return result;
311c00b2750SOleg Nesterov out:
3125cb4ac3aSSrikar Dronamraju 	if (is_swbp_insn(&opcode))
313a5f4374aSIngo Molnar 		return 1;
314a5f4374aSIngo Molnar 
315a5f4374aSIngo Molnar 	return 0;
316a5f4374aSIngo Molnar }
317a5f4374aSIngo Molnar 
318a5f4374aSIngo Molnar /**
3195cb4ac3aSSrikar Dronamraju  * set_swbp - store breakpoint at a given address.
320e3343e6aSSrikar Dronamraju  * @auprobe: arch specific probepoint information.
321a5f4374aSIngo Molnar  * @mm: the probed process address space.
322a5f4374aSIngo Molnar  * @vaddr: the virtual address to insert the opcode.
323a5f4374aSIngo Molnar  *
324a5f4374aSIngo Molnar  * For mm @mm, store the breakpoint instruction at @vaddr.
325a5f4374aSIngo Molnar  * Return 0 (success) or a negative errno.
326a5f4374aSIngo Molnar  */
3275cb4ac3aSSrikar Dronamraju int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
328a5f4374aSIngo Molnar {
329a5f4374aSIngo Molnar 	int result;
330c5784de2SPeter Zijlstra 	/*
331c5784de2SPeter Zijlstra 	 * See the comment near uprobes_hash().
332c5784de2SPeter Zijlstra 	 */
3335cb4ac3aSSrikar Dronamraju 	result = is_swbp_at_addr(mm, vaddr);
334a5f4374aSIngo Molnar 	if (result == 1)
33578f74116SOleg Nesterov 		return 0;
336a5f4374aSIngo Molnar 
337a5f4374aSIngo Molnar 	if (result)
338a5f4374aSIngo Molnar 		return result;
339a5f4374aSIngo Molnar 
3405cb4ac3aSSrikar Dronamraju 	return write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN);
341a5f4374aSIngo Molnar }
342a5f4374aSIngo Molnar 
343a5f4374aSIngo Molnar /**
344a5f4374aSIngo Molnar  * set_orig_insn - Restore the original instruction.
345a5f4374aSIngo Molnar  * @mm: the probed process address space.
346e3343e6aSSrikar Dronamraju  * @auprobe: arch specific probepoint information.
347a5f4374aSIngo Molnar  * @vaddr: the virtual address to insert the opcode.
348a5f4374aSIngo Molnar  *
349a5f4374aSIngo Molnar  * For mm @mm, restore the original opcode (opcode) at @vaddr.
350a5f4374aSIngo Molnar  * Return 0 (success) or a negative errno.
351a5f4374aSIngo Molnar  */
352a5f4374aSIngo Molnar int __weak
353ded86e7cSOleg Nesterov set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
354a5f4374aSIngo Molnar {
355a5f4374aSIngo Molnar 	int result;
356a5f4374aSIngo Molnar 
3575cb4ac3aSSrikar Dronamraju 	result = is_swbp_at_addr(mm, vaddr);
358a5f4374aSIngo Molnar 	if (!result)
359a5f4374aSIngo Molnar 		return -EINVAL;
360a5f4374aSIngo Molnar 
361a5f4374aSIngo Molnar 	if (result != 1)
362a5f4374aSIngo Molnar 		return result;
363ded86e7cSOleg Nesterov 
364e3343e6aSSrikar Dronamraju 	return write_opcode(auprobe, mm, vaddr, *(uprobe_opcode_t *)auprobe->insn);
365a5f4374aSIngo Molnar }
366a5f4374aSIngo Molnar 
367a5f4374aSIngo Molnar static int match_uprobe(struct uprobe *l, struct uprobe *r)
368a5f4374aSIngo Molnar {
369a5f4374aSIngo Molnar 	if (l->inode < r->inode)
370a5f4374aSIngo Molnar 		return -1;
371a5f4374aSIngo Molnar 
372a5f4374aSIngo Molnar 	if (l->inode > r->inode)
373a5f4374aSIngo Molnar 		return 1;
374a5f4374aSIngo Molnar 
375a5f4374aSIngo Molnar 	if (l->offset < r->offset)
376a5f4374aSIngo Molnar 		return -1;
377a5f4374aSIngo Molnar 
378a5f4374aSIngo Molnar 	if (l->offset > r->offset)
379a5f4374aSIngo Molnar 		return 1;
380a5f4374aSIngo Molnar 
381a5f4374aSIngo Molnar 	return 0;
382a5f4374aSIngo Molnar }
383a5f4374aSIngo Molnar 
384a5f4374aSIngo Molnar static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset)
385a5f4374aSIngo Molnar {
386a5f4374aSIngo Molnar 	struct uprobe u = { .inode = inode, .offset = offset };
387a5f4374aSIngo Molnar 	struct rb_node *n = uprobes_tree.rb_node;
388a5f4374aSIngo Molnar 	struct uprobe *uprobe;
389a5f4374aSIngo Molnar 	int match;
390a5f4374aSIngo Molnar 
391a5f4374aSIngo Molnar 	while (n) {
392a5f4374aSIngo Molnar 		uprobe = rb_entry(n, struct uprobe, rb_node);
393a5f4374aSIngo Molnar 		match = match_uprobe(&u, uprobe);
394a5f4374aSIngo Molnar 		if (!match) {
395a5f4374aSIngo Molnar 			atomic_inc(&uprobe->ref);
396a5f4374aSIngo Molnar 			return uprobe;
397a5f4374aSIngo Molnar 		}
398a5f4374aSIngo Molnar 
399a5f4374aSIngo Molnar 		if (match < 0)
400a5f4374aSIngo Molnar 			n = n->rb_left;
401a5f4374aSIngo Molnar 		else
402a5f4374aSIngo Molnar 			n = n->rb_right;
403a5f4374aSIngo Molnar 	}
404a5f4374aSIngo Molnar 	return NULL;
405a5f4374aSIngo Molnar }
406a5f4374aSIngo Molnar 
407a5f4374aSIngo Molnar /*
408a5f4374aSIngo Molnar  * Find a uprobe corresponding to a given inode:offset
409a5f4374aSIngo Molnar  * Acquires uprobes_treelock
410a5f4374aSIngo Molnar  */
411a5f4374aSIngo Molnar static struct uprobe *find_uprobe(struct inode *inode, loff_t offset)
412a5f4374aSIngo Molnar {
413a5f4374aSIngo Molnar 	struct uprobe *uprobe;
414a5f4374aSIngo Molnar 
4156f47caa0SOleg Nesterov 	spin_lock(&uprobes_treelock);
416a5f4374aSIngo Molnar 	uprobe = __find_uprobe(inode, offset);
4176f47caa0SOleg Nesterov 	spin_unlock(&uprobes_treelock);
418a5f4374aSIngo Molnar 
419a5f4374aSIngo Molnar 	return uprobe;
420a5f4374aSIngo Molnar }
421a5f4374aSIngo Molnar 
422a5f4374aSIngo Molnar static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
423a5f4374aSIngo Molnar {
424a5f4374aSIngo Molnar 	struct rb_node **p = &uprobes_tree.rb_node;
425a5f4374aSIngo Molnar 	struct rb_node *parent = NULL;
426a5f4374aSIngo Molnar 	struct uprobe *u;
427a5f4374aSIngo Molnar 	int match;
428a5f4374aSIngo Molnar 
429a5f4374aSIngo Molnar 	while (*p) {
430a5f4374aSIngo Molnar 		parent = *p;
431a5f4374aSIngo Molnar 		u = rb_entry(parent, struct uprobe, rb_node);
432a5f4374aSIngo Molnar 		match = match_uprobe(uprobe, u);
433a5f4374aSIngo Molnar 		if (!match) {
434a5f4374aSIngo Molnar 			atomic_inc(&u->ref);
435a5f4374aSIngo Molnar 			return u;
436a5f4374aSIngo Molnar 		}
437a5f4374aSIngo Molnar 
438a5f4374aSIngo Molnar 		if (match < 0)
439a5f4374aSIngo Molnar 			p = &parent->rb_left;
440a5f4374aSIngo Molnar 		else
441a5f4374aSIngo Molnar 			p = &parent->rb_right;
442a5f4374aSIngo Molnar 
443a5f4374aSIngo Molnar 	}
444a5f4374aSIngo Molnar 
445a5f4374aSIngo Molnar 	u = NULL;
446a5f4374aSIngo Molnar 	rb_link_node(&uprobe->rb_node, parent, p);
447a5f4374aSIngo Molnar 	rb_insert_color(&uprobe->rb_node, &uprobes_tree);
448a5f4374aSIngo Molnar 	/* get access + creation ref */
449a5f4374aSIngo Molnar 	atomic_set(&uprobe->ref, 2);
450a5f4374aSIngo Molnar 
451a5f4374aSIngo Molnar 	return u;
452a5f4374aSIngo Molnar }
453a5f4374aSIngo Molnar 
454a5f4374aSIngo Molnar /*
455a5f4374aSIngo Molnar  * Acquire uprobes_treelock.
456a5f4374aSIngo Molnar  * Matching uprobe already exists in rbtree;
457a5f4374aSIngo Molnar  *	increment (access refcount) and return the matching uprobe.
458a5f4374aSIngo Molnar  *
459a5f4374aSIngo Molnar  * No matching uprobe; insert the uprobe in rb_tree;
460a5f4374aSIngo Molnar  *	get a double refcount (access + creation) and return NULL.
461a5f4374aSIngo Molnar  */
462a5f4374aSIngo Molnar static struct uprobe *insert_uprobe(struct uprobe *uprobe)
463a5f4374aSIngo Molnar {
464a5f4374aSIngo Molnar 	struct uprobe *u;
465a5f4374aSIngo Molnar 
4666f47caa0SOleg Nesterov 	spin_lock(&uprobes_treelock);
467a5f4374aSIngo Molnar 	u = __insert_uprobe(uprobe);
4686f47caa0SOleg Nesterov 	spin_unlock(&uprobes_treelock);
469a5f4374aSIngo Molnar 
4700326f5a9SSrikar Dronamraju 	/* For now assume that the instruction need not be single-stepped */
4710326f5a9SSrikar Dronamraju 	uprobe->flags |= UPROBE_SKIP_SSTEP;
4720326f5a9SSrikar Dronamraju 
473a5f4374aSIngo Molnar 	return u;
474a5f4374aSIngo Molnar }
475a5f4374aSIngo Molnar 
476a5f4374aSIngo Molnar static void put_uprobe(struct uprobe *uprobe)
477a5f4374aSIngo Molnar {
478a5f4374aSIngo Molnar 	if (atomic_dec_and_test(&uprobe->ref))
479a5f4374aSIngo Molnar 		kfree(uprobe);
480a5f4374aSIngo Molnar }
481a5f4374aSIngo Molnar 
482a5f4374aSIngo Molnar static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
483a5f4374aSIngo Molnar {
484a5f4374aSIngo Molnar 	struct uprobe *uprobe, *cur_uprobe;
485a5f4374aSIngo Molnar 
486a5f4374aSIngo Molnar 	uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL);
487a5f4374aSIngo Molnar 	if (!uprobe)
488a5f4374aSIngo Molnar 		return NULL;
489a5f4374aSIngo Molnar 
490a5f4374aSIngo Molnar 	uprobe->inode = igrab(inode);
491a5f4374aSIngo Molnar 	uprobe->offset = offset;
492a5f4374aSIngo Molnar 	init_rwsem(&uprobe->consumer_rwsem);
493a5f4374aSIngo Molnar 
494a5f4374aSIngo Molnar 	/* add to uprobes_tree, sorted on inode:offset */
495a5f4374aSIngo Molnar 	cur_uprobe = insert_uprobe(uprobe);
496a5f4374aSIngo Molnar 
497a5f4374aSIngo Molnar 	/* a uprobe exists for this inode:offset combination */
498a5f4374aSIngo Molnar 	if (cur_uprobe) {
499a5f4374aSIngo Molnar 		kfree(uprobe);
500a5f4374aSIngo Molnar 		uprobe = cur_uprobe;
501a5f4374aSIngo Molnar 		iput(inode);
502a5f4374aSIngo Molnar 	} else {
503a5f4374aSIngo Molnar 		atomic_inc(&uprobe_events);
504a5f4374aSIngo Molnar 	}
505a5f4374aSIngo Molnar 
506a5f4374aSIngo Molnar 	return uprobe;
507a5f4374aSIngo Molnar }
508a5f4374aSIngo Molnar 
5090326f5a9SSrikar Dronamraju static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
5100326f5a9SSrikar Dronamraju {
5110326f5a9SSrikar Dronamraju 	struct uprobe_consumer *uc;
5120326f5a9SSrikar Dronamraju 
5130326f5a9SSrikar Dronamraju 	if (!(uprobe->flags & UPROBE_RUN_HANDLER))
5140326f5a9SSrikar Dronamraju 		return;
5150326f5a9SSrikar Dronamraju 
5160326f5a9SSrikar Dronamraju 	down_read(&uprobe->consumer_rwsem);
5170326f5a9SSrikar Dronamraju 	for (uc = uprobe->consumers; uc; uc = uc->next) {
5180326f5a9SSrikar Dronamraju 		if (!uc->filter || uc->filter(uc, current))
5190326f5a9SSrikar Dronamraju 			uc->handler(uc, regs);
5200326f5a9SSrikar Dronamraju 	}
5210326f5a9SSrikar Dronamraju 	up_read(&uprobe->consumer_rwsem);
5220326f5a9SSrikar Dronamraju }
5230326f5a9SSrikar Dronamraju 
524a5f4374aSIngo Molnar /* Returns the previous consumer */
525a5f4374aSIngo Molnar static struct uprobe_consumer *
526e3343e6aSSrikar Dronamraju consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
527a5f4374aSIngo Molnar {
528a5f4374aSIngo Molnar 	down_write(&uprobe->consumer_rwsem);
529e3343e6aSSrikar Dronamraju 	uc->next = uprobe->consumers;
530e3343e6aSSrikar Dronamraju 	uprobe->consumers = uc;
531a5f4374aSIngo Molnar 	up_write(&uprobe->consumer_rwsem);
532a5f4374aSIngo Molnar 
533e3343e6aSSrikar Dronamraju 	return uc->next;
534a5f4374aSIngo Molnar }
535a5f4374aSIngo Molnar 
536a5f4374aSIngo Molnar /*
537e3343e6aSSrikar Dronamraju  * For uprobe @uprobe, delete the consumer @uc.
538e3343e6aSSrikar Dronamraju  * Return true if the @uc is deleted successfully
539a5f4374aSIngo Molnar  * or return false.
540a5f4374aSIngo Molnar  */
541e3343e6aSSrikar Dronamraju static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
542a5f4374aSIngo Molnar {
543a5f4374aSIngo Molnar 	struct uprobe_consumer **con;
544a5f4374aSIngo Molnar 	bool ret = false;
545a5f4374aSIngo Molnar 
546a5f4374aSIngo Molnar 	down_write(&uprobe->consumer_rwsem);
547a5f4374aSIngo Molnar 	for (con = &uprobe->consumers; *con; con = &(*con)->next) {
548e3343e6aSSrikar Dronamraju 		if (*con == uc) {
549e3343e6aSSrikar Dronamraju 			*con = uc->next;
550a5f4374aSIngo Molnar 			ret = true;
551a5f4374aSIngo Molnar 			break;
552a5f4374aSIngo Molnar 		}
553a5f4374aSIngo Molnar 	}
554a5f4374aSIngo Molnar 	up_write(&uprobe->consumer_rwsem);
555a5f4374aSIngo Molnar 
556a5f4374aSIngo Molnar 	return ret;
557a5f4374aSIngo Molnar }
558a5f4374aSIngo Molnar 
559e3343e6aSSrikar Dronamraju static int
560d436615eSOleg Nesterov __copy_insn(struct address_space *mapping, struct file *filp, char *insn,
561593609a5SOleg Nesterov 			unsigned long nbytes, loff_t offset)
562a5f4374aSIngo Molnar {
563a5f4374aSIngo Molnar 	struct page *page;
564a5f4374aSIngo Molnar 	void *vaddr;
565593609a5SOleg Nesterov 	unsigned long off;
566593609a5SOleg Nesterov 	pgoff_t idx;
567a5f4374aSIngo Molnar 
568a5f4374aSIngo Molnar 	if (!filp)
569a5f4374aSIngo Molnar 		return -EINVAL;
570a5f4374aSIngo Molnar 
571cc359d18SOleg Nesterov 	if (!mapping->a_ops->readpage)
572cc359d18SOleg Nesterov 		return -EIO;
573cc359d18SOleg Nesterov 
574593609a5SOleg Nesterov 	idx = offset >> PAGE_CACHE_SHIFT;
575593609a5SOleg Nesterov 	off = offset & ~PAGE_MASK;
576a5f4374aSIngo Molnar 
577a5f4374aSIngo Molnar 	/*
578a5f4374aSIngo Molnar 	 * Ensure that the page that has the original instruction is
579a5f4374aSIngo Molnar 	 * populated and in page-cache.
580a5f4374aSIngo Molnar 	 */
581a5f4374aSIngo Molnar 	page = read_mapping_page(mapping, idx, filp);
582a5f4374aSIngo Molnar 	if (IS_ERR(page))
583a5f4374aSIngo Molnar 		return PTR_ERR(page);
584a5f4374aSIngo Molnar 
585a5f4374aSIngo Molnar 	vaddr = kmap_atomic(page);
586593609a5SOleg Nesterov 	memcpy(insn, vaddr + off, nbytes);
587a5f4374aSIngo Molnar 	kunmap_atomic(vaddr);
588a5f4374aSIngo Molnar 	page_cache_release(page);
589a5f4374aSIngo Molnar 
590a5f4374aSIngo Molnar 	return 0;
591a5f4374aSIngo Molnar }
592a5f4374aSIngo Molnar 
593d436615eSOleg Nesterov static int copy_insn(struct uprobe *uprobe, struct file *filp)
594a5f4374aSIngo Molnar {
595a5f4374aSIngo Molnar 	struct address_space *mapping;
596a5f4374aSIngo Molnar 	unsigned long nbytes;
597a5f4374aSIngo Molnar 	int bytes;
598a5f4374aSIngo Molnar 
599d436615eSOleg Nesterov 	nbytes = PAGE_SIZE - (uprobe->offset & ~PAGE_MASK);
600a5f4374aSIngo Molnar 	mapping = uprobe->inode->i_mapping;
601a5f4374aSIngo Molnar 
602a5f4374aSIngo Molnar 	/* Instruction at end of binary; copy only available bytes */
603a5f4374aSIngo Molnar 	if (uprobe->offset + MAX_UINSN_BYTES > uprobe->inode->i_size)
604a5f4374aSIngo Molnar 		bytes = uprobe->inode->i_size - uprobe->offset;
605a5f4374aSIngo Molnar 	else
606a5f4374aSIngo Molnar 		bytes = MAX_UINSN_BYTES;
607a5f4374aSIngo Molnar 
608a5f4374aSIngo Molnar 	/* Instruction at the page-boundary; copy bytes in second page */
609a5f4374aSIngo Molnar 	if (nbytes < bytes) {
610fc36f595SOleg Nesterov 		int err = __copy_insn(mapping, filp, uprobe->arch.insn + nbytes,
611fc36f595SOleg Nesterov 				bytes - nbytes, uprobe->offset + nbytes);
612fc36f595SOleg Nesterov 		if (err)
613fc36f595SOleg Nesterov 			return err;
614a5f4374aSIngo Molnar 		bytes = nbytes;
615a5f4374aSIngo Molnar 	}
616d436615eSOleg Nesterov 	return __copy_insn(mapping, filp, uprobe->arch.insn, bytes, uprobe->offset);
617a5f4374aSIngo Molnar }
618a5f4374aSIngo Molnar 
619682968e0SSrikar Dronamraju /*
620682968e0SSrikar Dronamraju  * How mm->uprobes_state.count gets updated
621682968e0SSrikar Dronamraju  * uprobe_mmap() increments the count if
622682968e0SSrikar Dronamraju  * 	- it successfully adds a breakpoint.
623682968e0SSrikar Dronamraju  * 	- it cannot add a breakpoint, but sees that there is a underlying
624682968e0SSrikar Dronamraju  * 	  breakpoint (via a is_swbp_at_addr()).
625682968e0SSrikar Dronamraju  *
626682968e0SSrikar Dronamraju  * uprobe_munmap() decrements the count if
627682968e0SSrikar Dronamraju  * 	- it sees a underlying breakpoint, (via is_swbp_at_addr)
628682968e0SSrikar Dronamraju  * 	  (Subsequent uprobe_unregister wouldnt find the breakpoint
629682968e0SSrikar Dronamraju  * 	  unless a uprobe_mmap kicks in, since the old vma would be
630682968e0SSrikar Dronamraju  * 	  dropped just after uprobe_munmap.)
631682968e0SSrikar Dronamraju  *
632682968e0SSrikar Dronamraju  * uprobe_register increments the count if:
633682968e0SSrikar Dronamraju  * 	- it successfully adds a breakpoint.
634682968e0SSrikar Dronamraju  *
635682968e0SSrikar Dronamraju  * uprobe_unregister decrements the count if:
636682968e0SSrikar Dronamraju  * 	- it sees a underlying breakpoint and removes successfully.
637682968e0SSrikar Dronamraju  * 	  (via is_swbp_at_addr)
638682968e0SSrikar Dronamraju  * 	  (Subsequent uprobe_munmap wouldnt find the breakpoint
639682968e0SSrikar Dronamraju  * 	  since there is no underlying breakpoint after the
640682968e0SSrikar Dronamraju  * 	  breakpoint removal.)
641682968e0SSrikar Dronamraju  */
642e3343e6aSSrikar Dronamraju static int
643e3343e6aSSrikar Dronamraju install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
644816c03fbSOleg Nesterov 			struct vm_area_struct *vma, unsigned long vaddr)
645a5f4374aSIngo Molnar {
646f8ac4ec9SOleg Nesterov 	bool first_uprobe;
647a5f4374aSIngo Molnar 	int ret;
648a5f4374aSIngo Molnar 
649a5f4374aSIngo Molnar 	/*
650a5f4374aSIngo Molnar 	 * If probe is being deleted, unregister thread could be done with
651a5f4374aSIngo Molnar 	 * the vma-rmap-walk through. Adding a probe now can be fatal since
652a5f4374aSIngo Molnar 	 * nobody will be able to cleanup. Also we could be from fork or
653a5f4374aSIngo Molnar 	 * mremap path, where the probe might have already been inserted.
654a5f4374aSIngo Molnar 	 * Hence behave as if probe already existed.
655a5f4374aSIngo Molnar 	 */
656a5f4374aSIngo Molnar 	if (!uprobe->consumers)
65778f74116SOleg Nesterov 		return 0;
658a5f4374aSIngo Molnar 
659900771a4SSrikar Dronamraju 	if (!(uprobe->flags & UPROBE_COPY_INSN)) {
660d436615eSOleg Nesterov 		ret = copy_insn(uprobe, vma->vm_file);
661a5f4374aSIngo Molnar 		if (ret)
662a5f4374aSIngo Molnar 			return ret;
663a5f4374aSIngo Molnar 
6645cb4ac3aSSrikar Dronamraju 		if (is_swbp_insn((uprobe_opcode_t *)uprobe->arch.insn))
665c1914a09SOleg Nesterov 			return -ENOTSUPP;
666a5f4374aSIngo Molnar 
667816c03fbSOleg Nesterov 		ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
668a5f4374aSIngo Molnar 		if (ret)
669a5f4374aSIngo Molnar 			return ret;
670a5f4374aSIngo Molnar 
671d9c4a30eSOleg Nesterov 		/* write_opcode() assumes we don't cross page boundary */
672d9c4a30eSOleg Nesterov 		BUG_ON((uprobe->offset & ~PAGE_MASK) +
673d9c4a30eSOleg Nesterov 				UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
674d9c4a30eSOleg Nesterov 
675900771a4SSrikar Dronamraju 		uprobe->flags |= UPROBE_COPY_INSN;
676a5f4374aSIngo Molnar 	}
677682968e0SSrikar Dronamraju 
678f8ac4ec9SOleg Nesterov 	/*
679f8ac4ec9SOleg Nesterov 	 * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(),
680f8ac4ec9SOleg Nesterov 	 * the task can hit this breakpoint right after __replace_page().
681f8ac4ec9SOleg Nesterov 	 */
682f8ac4ec9SOleg Nesterov 	first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags);
683f8ac4ec9SOleg Nesterov 	if (first_uprobe)
684f8ac4ec9SOleg Nesterov 		set_bit(MMF_HAS_UPROBES, &mm->flags);
685f8ac4ec9SOleg Nesterov 
686816c03fbSOleg Nesterov 	ret = set_swbp(&uprobe->arch, mm, vaddr);
6879f68f672SOleg Nesterov 	if (!ret)
6889f68f672SOleg Nesterov 		clear_bit(MMF_RECALC_UPROBES, &mm->flags);
6899f68f672SOleg Nesterov 	else if (first_uprobe)
690f8ac4ec9SOleg Nesterov 		clear_bit(MMF_HAS_UPROBES, &mm->flags);
691a5f4374aSIngo Molnar 
692a5f4374aSIngo Molnar 	return ret;
693a5f4374aSIngo Molnar }
694a5f4374aSIngo Molnar 
695e3343e6aSSrikar Dronamraju static void
696816c03fbSOleg Nesterov remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
697a5f4374aSIngo Molnar {
6989f68f672SOleg Nesterov 	/* can happen if uprobe_register() fails */
6999f68f672SOleg Nesterov 	if (!test_bit(MMF_HAS_UPROBES, &mm->flags))
7009f68f672SOleg Nesterov 		return;
7019f68f672SOleg Nesterov 
7029f68f672SOleg Nesterov 	set_bit(MMF_RECALC_UPROBES, &mm->flags);
703ded86e7cSOleg Nesterov 	set_orig_insn(&uprobe->arch, mm, vaddr);
704a5f4374aSIngo Molnar }
705a5f4374aSIngo Molnar 
7060326f5a9SSrikar Dronamraju /*
707778b032dSOleg Nesterov  * There could be threads that have already hit the breakpoint. They
708778b032dSOleg Nesterov  * will recheck the current insn and restart if find_uprobe() fails.
709778b032dSOleg Nesterov  * See find_active_uprobe().
7100326f5a9SSrikar Dronamraju  */
711a5f4374aSIngo Molnar static void delete_uprobe(struct uprobe *uprobe)
712a5f4374aSIngo Molnar {
7136f47caa0SOleg Nesterov 	spin_lock(&uprobes_treelock);
714a5f4374aSIngo Molnar 	rb_erase(&uprobe->rb_node, &uprobes_tree);
7156f47caa0SOleg Nesterov 	spin_unlock(&uprobes_treelock);
716a5f4374aSIngo Molnar 	iput(uprobe->inode);
717a5f4374aSIngo Molnar 	put_uprobe(uprobe);
718a5f4374aSIngo Molnar 	atomic_dec(&uprobe_events);
719a5f4374aSIngo Molnar }
720a5f4374aSIngo Molnar 
72126872090SOleg Nesterov struct map_info {
72226872090SOleg Nesterov 	struct map_info *next;
72326872090SOleg Nesterov 	struct mm_struct *mm;
724816c03fbSOleg Nesterov 	unsigned long vaddr;
72526872090SOleg Nesterov };
72626872090SOleg Nesterov 
72726872090SOleg Nesterov static inline struct map_info *free_map_info(struct map_info *info)
728a5f4374aSIngo Molnar {
72926872090SOleg Nesterov 	struct map_info *next = info->next;
73026872090SOleg Nesterov 	kfree(info);
73126872090SOleg Nesterov 	return next;
73226872090SOleg Nesterov }
73326872090SOleg Nesterov 
73426872090SOleg Nesterov static struct map_info *
73526872090SOleg Nesterov build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
73626872090SOleg Nesterov {
73726872090SOleg Nesterov 	unsigned long pgoff = offset >> PAGE_SHIFT;
738a5f4374aSIngo Molnar 	struct prio_tree_iter iter;
739a5f4374aSIngo Molnar 	struct vm_area_struct *vma;
74026872090SOleg Nesterov 	struct map_info *curr = NULL;
74126872090SOleg Nesterov 	struct map_info *prev = NULL;
74226872090SOleg Nesterov 	struct map_info *info;
74326872090SOleg Nesterov 	int more = 0;
744a5f4374aSIngo Molnar 
74526872090SOleg Nesterov  again:
74626872090SOleg Nesterov 	mutex_lock(&mapping->i_mmap_mutex);
747a5f4374aSIngo Molnar 	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
748a5f4374aSIngo Molnar 		if (!valid_vma(vma, is_register))
749a5f4374aSIngo Molnar 			continue;
750a5f4374aSIngo Molnar 
7517a5bfb66SOleg Nesterov 		if (!prev && !more) {
7527a5bfb66SOleg Nesterov 			/*
7537a5bfb66SOleg Nesterov 			 * Needs GFP_NOWAIT to avoid i_mmap_mutex recursion through
7547a5bfb66SOleg Nesterov 			 * reclaim. This is optimistic, no harm done if it fails.
7557a5bfb66SOleg Nesterov 			 */
7567a5bfb66SOleg Nesterov 			prev = kmalloc(sizeof(struct map_info),
7577a5bfb66SOleg Nesterov 					GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN);
7587a5bfb66SOleg Nesterov 			if (prev)
7597a5bfb66SOleg Nesterov 				prev->next = NULL;
7607a5bfb66SOleg Nesterov 		}
76126872090SOleg Nesterov 		if (!prev) {
76226872090SOleg Nesterov 			more++;
76326872090SOleg Nesterov 			continue;
764a5f4374aSIngo Molnar 		}
765a5f4374aSIngo Molnar 
76626872090SOleg Nesterov 		if (!atomic_inc_not_zero(&vma->vm_mm->mm_users))
76726872090SOleg Nesterov 			continue;
768a5f4374aSIngo Molnar 
76926872090SOleg Nesterov 		info = prev;
77026872090SOleg Nesterov 		prev = prev->next;
77126872090SOleg Nesterov 		info->next = curr;
77226872090SOleg Nesterov 		curr = info;
77326872090SOleg Nesterov 
77426872090SOleg Nesterov 		info->mm = vma->vm_mm;
77557683f72SOleg Nesterov 		info->vaddr = offset_to_vaddr(vma, offset);
776a5f4374aSIngo Molnar 	}
777a5f4374aSIngo Molnar 	mutex_unlock(&mapping->i_mmap_mutex);
778a5f4374aSIngo Molnar 
77926872090SOleg Nesterov 	if (!more)
78026872090SOleg Nesterov 		goto out;
781a5f4374aSIngo Molnar 
78226872090SOleg Nesterov 	prev = curr;
78326872090SOleg Nesterov 	while (curr) {
78426872090SOleg Nesterov 		mmput(curr->mm);
78526872090SOleg Nesterov 		curr = curr->next;
78626872090SOleg Nesterov 	}
78726872090SOleg Nesterov 
78826872090SOleg Nesterov 	do {
78926872090SOleg Nesterov 		info = kmalloc(sizeof(struct map_info), GFP_KERNEL);
79026872090SOleg Nesterov 		if (!info) {
79126872090SOleg Nesterov 			curr = ERR_PTR(-ENOMEM);
79226872090SOleg Nesterov 			goto out;
79326872090SOleg Nesterov 		}
79426872090SOleg Nesterov 		info->next = prev;
79526872090SOleg Nesterov 		prev = info;
79626872090SOleg Nesterov 	} while (--more);
79726872090SOleg Nesterov 
79826872090SOleg Nesterov 	goto again;
79926872090SOleg Nesterov  out:
80026872090SOleg Nesterov 	while (prev)
80126872090SOleg Nesterov 		prev = free_map_info(prev);
80226872090SOleg Nesterov 	return curr;
803a5f4374aSIngo Molnar }
804a5f4374aSIngo Molnar 
805a5f4374aSIngo Molnar static int register_for_each_vma(struct uprobe *uprobe, bool is_register)
806a5f4374aSIngo Molnar {
80726872090SOleg Nesterov 	struct map_info *info;
80826872090SOleg Nesterov 	int err = 0;
80926872090SOleg Nesterov 
81026872090SOleg Nesterov 	info = build_map_info(uprobe->inode->i_mapping,
81126872090SOleg Nesterov 					uprobe->offset, is_register);
81226872090SOleg Nesterov 	if (IS_ERR(info))
81326872090SOleg Nesterov 		return PTR_ERR(info);
81426872090SOleg Nesterov 
81526872090SOleg Nesterov 	while (info) {
81626872090SOleg Nesterov 		struct mm_struct *mm = info->mm;
817a5f4374aSIngo Molnar 		struct vm_area_struct *vma;
818a5f4374aSIngo Molnar 
81926872090SOleg Nesterov 		if (err)
82026872090SOleg Nesterov 			goto free;
821a5f4374aSIngo Molnar 
82277fc4af1SOleg Nesterov 		down_write(&mm->mmap_sem);
823f4d6dfe5SOleg Nesterov 		vma = find_vma(mm, info->vaddr);
824f4d6dfe5SOleg Nesterov 		if (!vma || !valid_vma(vma, is_register) ||
825f4d6dfe5SOleg Nesterov 		    vma->vm_file->f_mapping->host != uprobe->inode)
82626872090SOleg Nesterov 			goto unlock;
82726872090SOleg Nesterov 
828f4d6dfe5SOleg Nesterov 		if (vma->vm_start > info->vaddr ||
829f4d6dfe5SOleg Nesterov 		    vaddr_to_offset(vma, info->vaddr) != uprobe->offset)
83026872090SOleg Nesterov 			goto unlock;
831a5f4374aSIngo Molnar 
83278f74116SOleg Nesterov 		if (is_register)
83326872090SOleg Nesterov 			err = install_breakpoint(uprobe, mm, vma, info->vaddr);
83478f74116SOleg Nesterov 		else
83526872090SOleg Nesterov 			remove_breakpoint(uprobe, mm, info->vaddr);
83678f74116SOleg Nesterov 
83726872090SOleg Nesterov  unlock:
83826872090SOleg Nesterov 		up_write(&mm->mmap_sem);
83926872090SOleg Nesterov  free:
84026872090SOleg Nesterov 		mmput(mm);
84126872090SOleg Nesterov 		info = free_map_info(info);
842a5f4374aSIngo Molnar 	}
843a5f4374aSIngo Molnar 
84426872090SOleg Nesterov 	return err;
845a5f4374aSIngo Molnar }
846a5f4374aSIngo Molnar 
847a5f4374aSIngo Molnar static int __uprobe_register(struct uprobe *uprobe)
848a5f4374aSIngo Molnar {
849a5f4374aSIngo Molnar 	return register_for_each_vma(uprobe, true);
850a5f4374aSIngo Molnar }
851a5f4374aSIngo Molnar 
852a5f4374aSIngo Molnar static void __uprobe_unregister(struct uprobe *uprobe)
853a5f4374aSIngo Molnar {
854a5f4374aSIngo Molnar 	if (!register_for_each_vma(uprobe, false))
855a5f4374aSIngo Molnar 		delete_uprobe(uprobe);
856a5f4374aSIngo Molnar 
857a5f4374aSIngo Molnar 	/* TODO : cant unregister? schedule a worker thread */
858a5f4374aSIngo Molnar }
859a5f4374aSIngo Molnar 
860a5f4374aSIngo Molnar /*
861a5f4374aSIngo Molnar  * uprobe_register - register a probe
862a5f4374aSIngo Molnar  * @inode: the file in which the probe has to be placed.
863a5f4374aSIngo Molnar  * @offset: offset from the start of the file.
864e3343e6aSSrikar Dronamraju  * @uc: information on howto handle the probe..
865a5f4374aSIngo Molnar  *
866a5f4374aSIngo Molnar  * Apart from the access refcount, uprobe_register() takes a creation
867a5f4374aSIngo Molnar  * refcount (thro alloc_uprobe) if and only if this @uprobe is getting
868a5f4374aSIngo Molnar  * inserted into the rbtree (i.e first consumer for a @inode:@offset
869a5f4374aSIngo Molnar  * tuple).  Creation refcount stops uprobe_unregister from freeing the
870a5f4374aSIngo Molnar  * @uprobe even before the register operation is complete. Creation
871e3343e6aSSrikar Dronamraju  * refcount is released when the last @uc for the @uprobe
872a5f4374aSIngo Molnar  * unregisters.
873a5f4374aSIngo Molnar  *
874a5f4374aSIngo Molnar  * Return errno if it cannot successully install probes
875a5f4374aSIngo Molnar  * else return 0 (success)
876a5f4374aSIngo Molnar  */
877e3343e6aSSrikar Dronamraju int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
878a5f4374aSIngo Molnar {
879a5f4374aSIngo Molnar 	struct uprobe *uprobe;
880a5f4374aSIngo Molnar 	int ret;
881a5f4374aSIngo Molnar 
882e3343e6aSSrikar Dronamraju 	if (!inode || !uc || uc->next)
883a5f4374aSIngo Molnar 		return -EINVAL;
884a5f4374aSIngo Molnar 
885a5f4374aSIngo Molnar 	if (offset > i_size_read(inode))
886a5f4374aSIngo Molnar 		return -EINVAL;
887a5f4374aSIngo Molnar 
888a5f4374aSIngo Molnar 	ret = 0;
889a5f4374aSIngo Molnar 	mutex_lock(uprobes_hash(inode));
890a5f4374aSIngo Molnar 	uprobe = alloc_uprobe(inode, offset);
891a5f4374aSIngo Molnar 
892e3343e6aSSrikar Dronamraju 	if (uprobe && !consumer_add(uprobe, uc)) {
893a5f4374aSIngo Molnar 		ret = __uprobe_register(uprobe);
894a5f4374aSIngo Molnar 		if (ret) {
895a5f4374aSIngo Molnar 			uprobe->consumers = NULL;
896a5f4374aSIngo Molnar 			__uprobe_unregister(uprobe);
897a5f4374aSIngo Molnar 		} else {
898900771a4SSrikar Dronamraju 			uprobe->flags |= UPROBE_RUN_HANDLER;
899a5f4374aSIngo Molnar 		}
900a5f4374aSIngo Molnar 	}
901a5f4374aSIngo Molnar 
902a5f4374aSIngo Molnar 	mutex_unlock(uprobes_hash(inode));
9036d1d8dfaSSebastian Andrzej Siewior 	if (uprobe)
904a5f4374aSIngo Molnar 		put_uprobe(uprobe);
905a5f4374aSIngo Molnar 
906a5f4374aSIngo Molnar 	return ret;
907a5f4374aSIngo Molnar }
908a5f4374aSIngo Molnar 
909a5f4374aSIngo Molnar /*
910a5f4374aSIngo Molnar  * uprobe_unregister - unregister a already registered probe.
911a5f4374aSIngo Molnar  * @inode: the file in which the probe has to be removed.
912a5f4374aSIngo Molnar  * @offset: offset from the start of the file.
913e3343e6aSSrikar Dronamraju  * @uc: identify which probe if multiple probes are colocated.
914a5f4374aSIngo Molnar  */
915e3343e6aSSrikar Dronamraju void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
916a5f4374aSIngo Molnar {
917a5f4374aSIngo Molnar 	struct uprobe *uprobe;
918a5f4374aSIngo Molnar 
919e3343e6aSSrikar Dronamraju 	if (!inode || !uc)
920a5f4374aSIngo Molnar 		return;
921a5f4374aSIngo Molnar 
922a5f4374aSIngo Molnar 	uprobe = find_uprobe(inode, offset);
923a5f4374aSIngo Molnar 	if (!uprobe)
924a5f4374aSIngo Molnar 		return;
925a5f4374aSIngo Molnar 
926a5f4374aSIngo Molnar 	mutex_lock(uprobes_hash(inode));
927a5f4374aSIngo Molnar 
928e3343e6aSSrikar Dronamraju 	if (consumer_del(uprobe, uc)) {
929a5f4374aSIngo Molnar 		if (!uprobe->consumers) {
930a5f4374aSIngo Molnar 			__uprobe_unregister(uprobe);
931900771a4SSrikar Dronamraju 			uprobe->flags &= ~UPROBE_RUN_HANDLER;
932a5f4374aSIngo Molnar 		}
933a5f4374aSIngo Molnar 	}
934a5f4374aSIngo Molnar 
935a5f4374aSIngo Molnar 	mutex_unlock(uprobes_hash(inode));
936a5f4374aSIngo Molnar 	if (uprobe)
937a5f4374aSIngo Molnar 		put_uprobe(uprobe);
938a5f4374aSIngo Molnar }
939a5f4374aSIngo Molnar 
940891c3970SOleg Nesterov static struct rb_node *
941891c3970SOleg Nesterov find_node_in_range(struct inode *inode, loff_t min, loff_t max)
942a5f4374aSIngo Molnar {
943a5f4374aSIngo Molnar 	struct rb_node *n = uprobes_tree.rb_node;
944a5f4374aSIngo Molnar 
945a5f4374aSIngo Molnar 	while (n) {
946891c3970SOleg Nesterov 		struct uprobe *u = rb_entry(n, struct uprobe, rb_node);
947a5f4374aSIngo Molnar 
948891c3970SOleg Nesterov 		if (inode < u->inode) {
949a5f4374aSIngo Molnar 			n = n->rb_left;
950891c3970SOleg Nesterov 		} else if (inode > u->inode) {
951a5f4374aSIngo Molnar 			n = n->rb_right;
952891c3970SOleg Nesterov 		} else {
953891c3970SOleg Nesterov 			if (max < u->offset)
954891c3970SOleg Nesterov 				n = n->rb_left;
955891c3970SOleg Nesterov 			else if (min > u->offset)
956891c3970SOleg Nesterov 				n = n->rb_right;
957891c3970SOleg Nesterov 			else
958891c3970SOleg Nesterov 				break;
959891c3970SOleg Nesterov 		}
960a5f4374aSIngo Molnar 	}
961a5f4374aSIngo Molnar 
962891c3970SOleg Nesterov 	return n;
963a5f4374aSIngo Molnar }
964a5f4374aSIngo Molnar 
965a5f4374aSIngo Molnar /*
966891c3970SOleg Nesterov  * For a given range in vma, build a list of probes that need to be inserted.
967a5f4374aSIngo Molnar  */
968891c3970SOleg Nesterov static void build_probe_list(struct inode *inode,
969891c3970SOleg Nesterov 				struct vm_area_struct *vma,
970891c3970SOleg Nesterov 				unsigned long start, unsigned long end,
971891c3970SOleg Nesterov 				struct list_head *head)
972a5f4374aSIngo Molnar {
973891c3970SOleg Nesterov 	loff_t min, max;
974891c3970SOleg Nesterov 	struct rb_node *n, *t;
975891c3970SOleg Nesterov 	struct uprobe *u;
976891c3970SOleg Nesterov 
977891c3970SOleg Nesterov 	INIT_LIST_HEAD(head);
978cb113b47SOleg Nesterov 	min = vaddr_to_offset(vma, start);
979891c3970SOleg Nesterov 	max = min + (end - start) - 1;
980a5f4374aSIngo Molnar 
9816f47caa0SOleg Nesterov 	spin_lock(&uprobes_treelock);
982891c3970SOleg Nesterov 	n = find_node_in_range(inode, min, max);
983891c3970SOleg Nesterov 	if (n) {
984891c3970SOleg Nesterov 		for (t = n; t; t = rb_prev(t)) {
985891c3970SOleg Nesterov 			u = rb_entry(t, struct uprobe, rb_node);
986891c3970SOleg Nesterov 			if (u->inode != inode || u->offset < min)
987a5f4374aSIngo Molnar 				break;
988891c3970SOleg Nesterov 			list_add(&u->pending_list, head);
989891c3970SOleg Nesterov 			atomic_inc(&u->ref);
990a5f4374aSIngo Molnar 		}
991891c3970SOleg Nesterov 		for (t = n; (t = rb_next(t)); ) {
992891c3970SOleg Nesterov 			u = rb_entry(t, struct uprobe, rb_node);
993891c3970SOleg Nesterov 			if (u->inode != inode || u->offset > max)
994891c3970SOleg Nesterov 				break;
995891c3970SOleg Nesterov 			list_add(&u->pending_list, head);
996891c3970SOleg Nesterov 			atomic_inc(&u->ref);
997891c3970SOleg Nesterov 		}
998891c3970SOleg Nesterov 	}
9996f47caa0SOleg Nesterov 	spin_unlock(&uprobes_treelock);
1000a5f4374aSIngo Molnar }
1001a5f4374aSIngo Molnar 
1002a5f4374aSIngo Molnar /*
10035e5be71aSOleg Nesterov  * Called from mmap_region/vma_adjust with mm->mmap_sem acquired.
1004a5f4374aSIngo Molnar  *
10055e5be71aSOleg Nesterov  * Currently we ignore all errors and always return 0, the callers
10065e5be71aSOleg Nesterov  * can't handle the failure anyway.
1007a5f4374aSIngo Molnar  */
1008a5f4374aSIngo Molnar int uprobe_mmap(struct vm_area_struct *vma)
1009a5f4374aSIngo Molnar {
1010a5f4374aSIngo Molnar 	struct list_head tmp_list;
1011665605a2SOleg Nesterov 	struct uprobe *uprobe, *u;
1012a5f4374aSIngo Molnar 	struct inode *inode;
1013a5f4374aSIngo Molnar 
1014a5f4374aSIngo Molnar 	if (!atomic_read(&uprobe_events) || !valid_vma(vma, true))
1015a5f4374aSIngo Molnar 		return 0;
1016a5f4374aSIngo Molnar 
1017a5f4374aSIngo Molnar 	inode = vma->vm_file->f_mapping->host;
1018a5f4374aSIngo Molnar 	if (!inode)
1019a5f4374aSIngo Molnar 		return 0;
1020a5f4374aSIngo Molnar 
1021a5f4374aSIngo Molnar 	mutex_lock(uprobes_mmap_hash(inode));
1022891c3970SOleg Nesterov 	build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list);
1023a5f4374aSIngo Molnar 
1024665605a2SOleg Nesterov 	list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
10255e5be71aSOleg Nesterov 		if (!fatal_signal_pending(current)) {
102657683f72SOleg Nesterov 			unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset);
10275e5be71aSOleg Nesterov 			install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
1028a5f4374aSIngo Molnar 		}
1029a5f4374aSIngo Molnar 		put_uprobe(uprobe);
1030a5f4374aSIngo Molnar 	}
1031a5f4374aSIngo Molnar 	mutex_unlock(uprobes_mmap_hash(inode));
1032a5f4374aSIngo Molnar 
10335e5be71aSOleg Nesterov 	return 0;
1034a5f4374aSIngo Molnar }
1035a5f4374aSIngo Molnar 
10369f68f672SOleg Nesterov static bool
10379f68f672SOleg Nesterov vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end)
10389f68f672SOleg Nesterov {
10399f68f672SOleg Nesterov 	loff_t min, max;
10409f68f672SOleg Nesterov 	struct inode *inode;
10419f68f672SOleg Nesterov 	struct rb_node *n;
10429f68f672SOleg Nesterov 
10439f68f672SOleg Nesterov 	inode = vma->vm_file->f_mapping->host;
10449f68f672SOleg Nesterov 
10459f68f672SOleg Nesterov 	min = vaddr_to_offset(vma, start);
10469f68f672SOleg Nesterov 	max = min + (end - start) - 1;
10479f68f672SOleg Nesterov 
10489f68f672SOleg Nesterov 	spin_lock(&uprobes_treelock);
10499f68f672SOleg Nesterov 	n = find_node_in_range(inode, min, max);
10509f68f672SOleg Nesterov 	spin_unlock(&uprobes_treelock);
10519f68f672SOleg Nesterov 
10529f68f672SOleg Nesterov 	return !!n;
10539f68f672SOleg Nesterov }
10549f68f672SOleg Nesterov 
1055682968e0SSrikar Dronamraju /*
1056682968e0SSrikar Dronamraju  * Called in context of a munmap of a vma.
1057682968e0SSrikar Dronamraju  */
1058cbc91f71SSrikar Dronamraju void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1059682968e0SSrikar Dronamraju {
1060682968e0SSrikar Dronamraju 	if (!atomic_read(&uprobe_events) || !valid_vma(vma, false))
1061682968e0SSrikar Dronamraju 		return;
1062682968e0SSrikar Dronamraju 
10632fd611a9SOleg Nesterov 	if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */
10642fd611a9SOleg Nesterov 		return;
10652fd611a9SOleg Nesterov 
10669f68f672SOleg Nesterov 	if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) ||
10679f68f672SOleg Nesterov 	     test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags))
1068f8ac4ec9SOleg Nesterov 		return;
1069f8ac4ec9SOleg Nesterov 
10709f68f672SOleg Nesterov 	if (vma_has_uprobes(vma, start, end))
10719f68f672SOleg Nesterov 		set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags);
1072682968e0SSrikar Dronamraju }
1073682968e0SSrikar Dronamraju 
1074d4b3b638SSrikar Dronamraju /* Slot allocation for XOL */
1075d4b3b638SSrikar Dronamraju static int xol_add_vma(struct xol_area *area)
1076d4b3b638SSrikar Dronamraju {
1077d4b3b638SSrikar Dronamraju 	struct mm_struct *mm;
1078d4b3b638SSrikar Dronamraju 	int ret;
1079d4b3b638SSrikar Dronamraju 
1080d4b3b638SSrikar Dronamraju 	area->page = alloc_page(GFP_HIGHUSER);
1081d4b3b638SSrikar Dronamraju 	if (!area->page)
1082d4b3b638SSrikar Dronamraju 		return -ENOMEM;
1083d4b3b638SSrikar Dronamraju 
1084d4b3b638SSrikar Dronamraju 	ret = -EALREADY;
1085d4b3b638SSrikar Dronamraju 	mm = current->mm;
1086d4b3b638SSrikar Dronamraju 
1087d4b3b638SSrikar Dronamraju 	down_write(&mm->mmap_sem);
1088d4b3b638SSrikar Dronamraju 	if (mm->uprobes_state.xol_area)
1089d4b3b638SSrikar Dronamraju 		goto fail;
1090d4b3b638SSrikar Dronamraju 
1091d4b3b638SSrikar Dronamraju 	ret = -ENOMEM;
1092d4b3b638SSrikar Dronamraju 
1093d4b3b638SSrikar Dronamraju 	/* Try to map as high as possible, this is only a hint. */
1094d4b3b638SSrikar Dronamraju 	area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, PAGE_SIZE, 0, 0);
1095d4b3b638SSrikar Dronamraju 	if (area->vaddr & ~PAGE_MASK) {
1096d4b3b638SSrikar Dronamraju 		ret = area->vaddr;
1097d4b3b638SSrikar Dronamraju 		goto fail;
1098d4b3b638SSrikar Dronamraju 	}
1099d4b3b638SSrikar Dronamraju 
1100d4b3b638SSrikar Dronamraju 	ret = install_special_mapping(mm, area->vaddr, PAGE_SIZE,
1101d4b3b638SSrikar Dronamraju 				VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO, &area->page);
1102d4b3b638SSrikar Dronamraju 	if (ret)
1103d4b3b638SSrikar Dronamraju 		goto fail;
1104d4b3b638SSrikar Dronamraju 
1105d4b3b638SSrikar Dronamraju 	smp_wmb();	/* pairs with get_xol_area() */
1106d4b3b638SSrikar Dronamraju 	mm->uprobes_state.xol_area = area;
1107d4b3b638SSrikar Dronamraju 	ret = 0;
1108d4b3b638SSrikar Dronamraju 
1109d4b3b638SSrikar Dronamraju fail:
1110d4b3b638SSrikar Dronamraju 	up_write(&mm->mmap_sem);
1111d4b3b638SSrikar Dronamraju 	if (ret)
1112d4b3b638SSrikar Dronamraju 		__free_page(area->page);
1113d4b3b638SSrikar Dronamraju 
1114d4b3b638SSrikar Dronamraju 	return ret;
1115d4b3b638SSrikar Dronamraju }
1116d4b3b638SSrikar Dronamraju 
1117d4b3b638SSrikar Dronamraju static struct xol_area *get_xol_area(struct mm_struct *mm)
1118d4b3b638SSrikar Dronamraju {
1119d4b3b638SSrikar Dronamraju 	struct xol_area *area;
1120d4b3b638SSrikar Dronamraju 
1121d4b3b638SSrikar Dronamraju 	area = mm->uprobes_state.xol_area;
1122d4b3b638SSrikar Dronamraju 	smp_read_barrier_depends();	/* pairs with wmb in xol_add_vma() */
1123d4b3b638SSrikar Dronamraju 
1124d4b3b638SSrikar Dronamraju 	return area;
1125d4b3b638SSrikar Dronamraju }
1126d4b3b638SSrikar Dronamraju 
1127d4b3b638SSrikar Dronamraju /*
1128d4b3b638SSrikar Dronamraju  * xol_alloc_area - Allocate process's xol_area.
1129d4b3b638SSrikar Dronamraju  * This area will be used for storing instructions for execution out of
1130d4b3b638SSrikar Dronamraju  * line.
1131d4b3b638SSrikar Dronamraju  *
1132d4b3b638SSrikar Dronamraju  * Returns the allocated area or NULL.
1133d4b3b638SSrikar Dronamraju  */
1134d4b3b638SSrikar Dronamraju static struct xol_area *xol_alloc_area(void)
1135d4b3b638SSrikar Dronamraju {
1136d4b3b638SSrikar Dronamraju 	struct xol_area *area;
1137d4b3b638SSrikar Dronamraju 
1138d4b3b638SSrikar Dronamraju 	area = kzalloc(sizeof(*area), GFP_KERNEL);
1139d4b3b638SSrikar Dronamraju 	if (unlikely(!area))
1140d4b3b638SSrikar Dronamraju 		return NULL;
1141d4b3b638SSrikar Dronamraju 
1142d4b3b638SSrikar Dronamraju 	area->bitmap = kzalloc(BITS_TO_LONGS(UINSNS_PER_PAGE) * sizeof(long), GFP_KERNEL);
1143d4b3b638SSrikar Dronamraju 
1144d4b3b638SSrikar Dronamraju 	if (!area->bitmap)
1145d4b3b638SSrikar Dronamraju 		goto fail;
1146d4b3b638SSrikar Dronamraju 
1147d4b3b638SSrikar Dronamraju 	init_waitqueue_head(&area->wq);
1148d4b3b638SSrikar Dronamraju 	if (!xol_add_vma(area))
1149d4b3b638SSrikar Dronamraju 		return area;
1150d4b3b638SSrikar Dronamraju 
1151d4b3b638SSrikar Dronamraju fail:
1152d4b3b638SSrikar Dronamraju 	kfree(area->bitmap);
1153d4b3b638SSrikar Dronamraju 	kfree(area);
1154d4b3b638SSrikar Dronamraju 
1155d4b3b638SSrikar Dronamraju 	return get_xol_area(current->mm);
1156d4b3b638SSrikar Dronamraju }
1157d4b3b638SSrikar Dronamraju 
1158d4b3b638SSrikar Dronamraju /*
1159d4b3b638SSrikar Dronamraju  * uprobe_clear_state - Free the area allocated for slots.
1160d4b3b638SSrikar Dronamraju  */
1161d4b3b638SSrikar Dronamraju void uprobe_clear_state(struct mm_struct *mm)
1162d4b3b638SSrikar Dronamraju {
1163d4b3b638SSrikar Dronamraju 	struct xol_area *area = mm->uprobes_state.xol_area;
1164d4b3b638SSrikar Dronamraju 
1165d4b3b638SSrikar Dronamraju 	if (!area)
1166d4b3b638SSrikar Dronamraju 		return;
1167d4b3b638SSrikar Dronamraju 
1168d4b3b638SSrikar Dronamraju 	put_page(area->page);
1169d4b3b638SSrikar Dronamraju 	kfree(area->bitmap);
1170d4b3b638SSrikar Dronamraju 	kfree(area);
1171d4b3b638SSrikar Dronamraju }
1172d4b3b638SSrikar Dronamraju 
1173f8ac4ec9SOleg Nesterov void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
1174f8ac4ec9SOleg Nesterov {
117561559a81SOleg Nesterov 	newmm->uprobes_state.xol_area = NULL;
117661559a81SOleg Nesterov 
11779f68f672SOleg Nesterov 	if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) {
1178f8ac4ec9SOleg Nesterov 		set_bit(MMF_HAS_UPROBES, &newmm->flags);
11799f68f672SOleg Nesterov 		/* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */
11809f68f672SOleg Nesterov 		set_bit(MMF_RECALC_UPROBES, &newmm->flags);
11819f68f672SOleg Nesterov 	}
1182f8ac4ec9SOleg Nesterov }
1183f8ac4ec9SOleg Nesterov 
1184d4b3b638SSrikar Dronamraju /*
1185d4b3b638SSrikar Dronamraju  *  - search for a free slot.
1186d4b3b638SSrikar Dronamraju  */
1187d4b3b638SSrikar Dronamraju static unsigned long xol_take_insn_slot(struct xol_area *area)
1188d4b3b638SSrikar Dronamraju {
1189d4b3b638SSrikar Dronamraju 	unsigned long slot_addr;
1190d4b3b638SSrikar Dronamraju 	int slot_nr;
1191d4b3b638SSrikar Dronamraju 
1192d4b3b638SSrikar Dronamraju 	do {
1193d4b3b638SSrikar Dronamraju 		slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE);
1194d4b3b638SSrikar Dronamraju 		if (slot_nr < UINSNS_PER_PAGE) {
1195d4b3b638SSrikar Dronamraju 			if (!test_and_set_bit(slot_nr, area->bitmap))
1196d4b3b638SSrikar Dronamraju 				break;
1197d4b3b638SSrikar Dronamraju 
1198d4b3b638SSrikar Dronamraju 			slot_nr = UINSNS_PER_PAGE;
1199d4b3b638SSrikar Dronamraju 			continue;
1200d4b3b638SSrikar Dronamraju 		}
1201d4b3b638SSrikar Dronamraju 		wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE));
1202d4b3b638SSrikar Dronamraju 	} while (slot_nr >= UINSNS_PER_PAGE);
1203d4b3b638SSrikar Dronamraju 
1204d4b3b638SSrikar Dronamraju 	slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES);
1205d4b3b638SSrikar Dronamraju 	atomic_inc(&area->slot_count);
1206d4b3b638SSrikar Dronamraju 
1207d4b3b638SSrikar Dronamraju 	return slot_addr;
1208d4b3b638SSrikar Dronamraju }
1209d4b3b638SSrikar Dronamraju 
1210d4b3b638SSrikar Dronamraju /*
1211d4b3b638SSrikar Dronamraju  * xol_get_insn_slot - If was not allocated a slot, then
1212d4b3b638SSrikar Dronamraju  * allocate a slot.
1213d4b3b638SSrikar Dronamraju  * Returns the allocated slot address or 0.
1214d4b3b638SSrikar Dronamraju  */
1215d4b3b638SSrikar Dronamraju static unsigned long xol_get_insn_slot(struct uprobe *uprobe, unsigned long slot_addr)
1216d4b3b638SSrikar Dronamraju {
1217d4b3b638SSrikar Dronamraju 	struct xol_area *area;
1218d4b3b638SSrikar Dronamraju 	unsigned long offset;
1219d4b3b638SSrikar Dronamraju 	void *vaddr;
1220d4b3b638SSrikar Dronamraju 
1221d4b3b638SSrikar Dronamraju 	area = get_xol_area(current->mm);
1222d4b3b638SSrikar Dronamraju 	if (!area) {
1223d4b3b638SSrikar Dronamraju 		area = xol_alloc_area();
1224d4b3b638SSrikar Dronamraju 		if (!area)
1225d4b3b638SSrikar Dronamraju 			return 0;
1226d4b3b638SSrikar Dronamraju 	}
1227d4b3b638SSrikar Dronamraju 	current->utask->xol_vaddr = xol_take_insn_slot(area);
1228d4b3b638SSrikar Dronamraju 
1229d4b3b638SSrikar Dronamraju 	/*
1230d4b3b638SSrikar Dronamraju 	 * Initialize the slot if xol_vaddr points to valid
1231d4b3b638SSrikar Dronamraju 	 * instruction slot.
1232d4b3b638SSrikar Dronamraju 	 */
1233d4b3b638SSrikar Dronamraju 	if (unlikely(!current->utask->xol_vaddr))
1234d4b3b638SSrikar Dronamraju 		return 0;
1235d4b3b638SSrikar Dronamraju 
1236d4b3b638SSrikar Dronamraju 	current->utask->vaddr = slot_addr;
1237d4b3b638SSrikar Dronamraju 	offset = current->utask->xol_vaddr & ~PAGE_MASK;
1238d4b3b638SSrikar Dronamraju 	vaddr = kmap_atomic(area->page);
1239d4b3b638SSrikar Dronamraju 	memcpy(vaddr + offset, uprobe->arch.insn, MAX_UINSN_BYTES);
1240d4b3b638SSrikar Dronamraju 	kunmap_atomic(vaddr);
1241d4b3b638SSrikar Dronamraju 
1242d4b3b638SSrikar Dronamraju 	return current->utask->xol_vaddr;
1243d4b3b638SSrikar Dronamraju }
1244d4b3b638SSrikar Dronamraju 
1245d4b3b638SSrikar Dronamraju /*
1246d4b3b638SSrikar Dronamraju  * xol_free_insn_slot - If slot was earlier allocated by
1247d4b3b638SSrikar Dronamraju  * @xol_get_insn_slot(), make the slot available for
1248d4b3b638SSrikar Dronamraju  * subsequent requests.
1249d4b3b638SSrikar Dronamraju  */
1250d4b3b638SSrikar Dronamraju static void xol_free_insn_slot(struct task_struct *tsk)
1251d4b3b638SSrikar Dronamraju {
1252d4b3b638SSrikar Dronamraju 	struct xol_area *area;
1253d4b3b638SSrikar Dronamraju 	unsigned long vma_end;
1254d4b3b638SSrikar Dronamraju 	unsigned long slot_addr;
1255d4b3b638SSrikar Dronamraju 
1256d4b3b638SSrikar Dronamraju 	if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask)
1257d4b3b638SSrikar Dronamraju 		return;
1258d4b3b638SSrikar Dronamraju 
1259d4b3b638SSrikar Dronamraju 	slot_addr = tsk->utask->xol_vaddr;
1260d4b3b638SSrikar Dronamraju 
1261d4b3b638SSrikar Dronamraju 	if (unlikely(!slot_addr || IS_ERR_VALUE(slot_addr)))
1262d4b3b638SSrikar Dronamraju 		return;
1263d4b3b638SSrikar Dronamraju 
1264d4b3b638SSrikar Dronamraju 	area = tsk->mm->uprobes_state.xol_area;
1265d4b3b638SSrikar Dronamraju 	vma_end = area->vaddr + PAGE_SIZE;
1266d4b3b638SSrikar Dronamraju 	if (area->vaddr <= slot_addr && slot_addr < vma_end) {
1267d4b3b638SSrikar Dronamraju 		unsigned long offset;
1268d4b3b638SSrikar Dronamraju 		int slot_nr;
1269d4b3b638SSrikar Dronamraju 
1270d4b3b638SSrikar Dronamraju 		offset = slot_addr - area->vaddr;
1271d4b3b638SSrikar Dronamraju 		slot_nr = offset / UPROBE_XOL_SLOT_BYTES;
1272d4b3b638SSrikar Dronamraju 		if (slot_nr >= UINSNS_PER_PAGE)
1273d4b3b638SSrikar Dronamraju 			return;
1274d4b3b638SSrikar Dronamraju 
1275d4b3b638SSrikar Dronamraju 		clear_bit(slot_nr, area->bitmap);
1276d4b3b638SSrikar Dronamraju 		atomic_dec(&area->slot_count);
1277d4b3b638SSrikar Dronamraju 		if (waitqueue_active(&area->wq))
1278d4b3b638SSrikar Dronamraju 			wake_up(&area->wq);
1279d4b3b638SSrikar Dronamraju 
1280d4b3b638SSrikar Dronamraju 		tsk->utask->xol_vaddr = 0;
1281d4b3b638SSrikar Dronamraju 	}
1282d4b3b638SSrikar Dronamraju }
1283d4b3b638SSrikar Dronamraju 
12840326f5a9SSrikar Dronamraju /**
12850326f5a9SSrikar Dronamraju  * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
12860326f5a9SSrikar Dronamraju  * @regs: Reflects the saved state of the task after it has hit a breakpoint
12870326f5a9SSrikar Dronamraju  * instruction.
12880326f5a9SSrikar Dronamraju  * Return the address of the breakpoint instruction.
12890326f5a9SSrikar Dronamraju  */
12900326f5a9SSrikar Dronamraju unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs)
12910326f5a9SSrikar Dronamraju {
12920326f5a9SSrikar Dronamraju 	return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE;
12930326f5a9SSrikar Dronamraju }
12940326f5a9SSrikar Dronamraju 
12950326f5a9SSrikar Dronamraju /*
12960326f5a9SSrikar Dronamraju  * Called with no locks held.
12970326f5a9SSrikar Dronamraju  * Called in context of a exiting or a exec-ing thread.
12980326f5a9SSrikar Dronamraju  */
12990326f5a9SSrikar Dronamraju void uprobe_free_utask(struct task_struct *t)
13000326f5a9SSrikar Dronamraju {
13010326f5a9SSrikar Dronamraju 	struct uprobe_task *utask = t->utask;
13020326f5a9SSrikar Dronamraju 
13030326f5a9SSrikar Dronamraju 	if (!utask)
13040326f5a9SSrikar Dronamraju 		return;
13050326f5a9SSrikar Dronamraju 
13060326f5a9SSrikar Dronamraju 	if (utask->active_uprobe)
13070326f5a9SSrikar Dronamraju 		put_uprobe(utask->active_uprobe);
13080326f5a9SSrikar Dronamraju 
1309d4b3b638SSrikar Dronamraju 	xol_free_insn_slot(t);
13100326f5a9SSrikar Dronamraju 	kfree(utask);
13110326f5a9SSrikar Dronamraju 	t->utask = NULL;
13120326f5a9SSrikar Dronamraju }
13130326f5a9SSrikar Dronamraju 
13140326f5a9SSrikar Dronamraju /*
13150326f5a9SSrikar Dronamraju  * Called in context of a new clone/fork from copy_process.
13160326f5a9SSrikar Dronamraju  */
13170326f5a9SSrikar Dronamraju void uprobe_copy_process(struct task_struct *t)
13180326f5a9SSrikar Dronamraju {
13190326f5a9SSrikar Dronamraju 	t->utask = NULL;
13200326f5a9SSrikar Dronamraju }
13210326f5a9SSrikar Dronamraju 
13220326f5a9SSrikar Dronamraju /*
13230326f5a9SSrikar Dronamraju  * Allocate a uprobe_task object for the task.
13240326f5a9SSrikar Dronamraju  * Called when the thread hits a breakpoint for the first time.
13250326f5a9SSrikar Dronamraju  *
13260326f5a9SSrikar Dronamraju  * Returns:
13270326f5a9SSrikar Dronamraju  * - pointer to new uprobe_task on success
13280326f5a9SSrikar Dronamraju  * - NULL otherwise
13290326f5a9SSrikar Dronamraju  */
13300326f5a9SSrikar Dronamraju static struct uprobe_task *add_utask(void)
13310326f5a9SSrikar Dronamraju {
13320326f5a9SSrikar Dronamraju 	struct uprobe_task *utask;
13330326f5a9SSrikar Dronamraju 
13340326f5a9SSrikar Dronamraju 	utask = kzalloc(sizeof *utask, GFP_KERNEL);
13350326f5a9SSrikar Dronamraju 	if (unlikely(!utask))
13360326f5a9SSrikar Dronamraju 		return NULL;
13370326f5a9SSrikar Dronamraju 
13380326f5a9SSrikar Dronamraju 	current->utask = utask;
13390326f5a9SSrikar Dronamraju 	return utask;
13400326f5a9SSrikar Dronamraju }
13410326f5a9SSrikar Dronamraju 
13420326f5a9SSrikar Dronamraju /* Prepare to single-step probed instruction out of line. */
13430326f5a9SSrikar Dronamraju static int
13440326f5a9SSrikar Dronamraju pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long vaddr)
13450326f5a9SSrikar Dronamraju {
1346d4b3b638SSrikar Dronamraju 	if (xol_get_insn_slot(uprobe, vaddr) && !arch_uprobe_pre_xol(&uprobe->arch, regs))
1347d4b3b638SSrikar Dronamraju 		return 0;
1348d4b3b638SSrikar Dronamraju 
13490326f5a9SSrikar Dronamraju 	return -EFAULT;
13500326f5a9SSrikar Dronamraju }
13510326f5a9SSrikar Dronamraju 
13520326f5a9SSrikar Dronamraju /*
13530326f5a9SSrikar Dronamraju  * If we are singlestepping, then ensure this thread is not connected to
13540326f5a9SSrikar Dronamraju  * non-fatal signals until completion of singlestep.  When xol insn itself
13550326f5a9SSrikar Dronamraju  * triggers the signal,  restart the original insn even if the task is
13560326f5a9SSrikar Dronamraju  * already SIGKILL'ed (since coredump should report the correct ip).  This
13570326f5a9SSrikar Dronamraju  * is even more important if the task has a handler for SIGSEGV/etc, The
13580326f5a9SSrikar Dronamraju  * _same_ instruction should be repeated again after return from the signal
13590326f5a9SSrikar Dronamraju  * handler, and SSTEP can never finish in this case.
13600326f5a9SSrikar Dronamraju  */
13610326f5a9SSrikar Dronamraju bool uprobe_deny_signal(void)
13620326f5a9SSrikar Dronamraju {
13630326f5a9SSrikar Dronamraju 	struct task_struct *t = current;
13640326f5a9SSrikar Dronamraju 	struct uprobe_task *utask = t->utask;
13650326f5a9SSrikar Dronamraju 
13660326f5a9SSrikar Dronamraju 	if (likely(!utask || !utask->active_uprobe))
13670326f5a9SSrikar Dronamraju 		return false;
13680326f5a9SSrikar Dronamraju 
13690326f5a9SSrikar Dronamraju 	WARN_ON_ONCE(utask->state != UTASK_SSTEP);
13700326f5a9SSrikar Dronamraju 
13710326f5a9SSrikar Dronamraju 	if (signal_pending(t)) {
13720326f5a9SSrikar Dronamraju 		spin_lock_irq(&t->sighand->siglock);
13730326f5a9SSrikar Dronamraju 		clear_tsk_thread_flag(t, TIF_SIGPENDING);
13740326f5a9SSrikar Dronamraju 		spin_unlock_irq(&t->sighand->siglock);
13750326f5a9SSrikar Dronamraju 
13760326f5a9SSrikar Dronamraju 		if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) {
13770326f5a9SSrikar Dronamraju 			utask->state = UTASK_SSTEP_TRAPPED;
13780326f5a9SSrikar Dronamraju 			set_tsk_thread_flag(t, TIF_UPROBE);
13790326f5a9SSrikar Dronamraju 			set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
13800326f5a9SSrikar Dronamraju 		}
13810326f5a9SSrikar Dronamraju 	}
13820326f5a9SSrikar Dronamraju 
13830326f5a9SSrikar Dronamraju 	return true;
13840326f5a9SSrikar Dronamraju }
13850326f5a9SSrikar Dronamraju 
13860326f5a9SSrikar Dronamraju /*
13870326f5a9SSrikar Dronamraju  * Avoid singlestepping the original instruction if the original instruction
13880326f5a9SSrikar Dronamraju  * is a NOP or can be emulated.
13890326f5a9SSrikar Dronamraju  */
13900326f5a9SSrikar Dronamraju static bool can_skip_sstep(struct uprobe *uprobe, struct pt_regs *regs)
13910326f5a9SSrikar Dronamraju {
13920578a970SOleg Nesterov 	if (uprobe->flags & UPROBE_SKIP_SSTEP) {
13930326f5a9SSrikar Dronamraju 		if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
13940326f5a9SSrikar Dronamraju 			return true;
13950326f5a9SSrikar Dronamraju 		uprobe->flags &= ~UPROBE_SKIP_SSTEP;
13960578a970SOleg Nesterov 	}
13970326f5a9SSrikar Dronamraju 	return false;
13980326f5a9SSrikar Dronamraju }
13990326f5a9SSrikar Dronamraju 
1400499a4f3eSOleg Nesterov static void mmf_recalc_uprobes(struct mm_struct *mm)
1401499a4f3eSOleg Nesterov {
1402499a4f3eSOleg Nesterov 	struct vm_area_struct *vma;
1403499a4f3eSOleg Nesterov 
1404499a4f3eSOleg Nesterov 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
1405499a4f3eSOleg Nesterov 		if (!valid_vma(vma, false))
1406499a4f3eSOleg Nesterov 			continue;
1407499a4f3eSOleg Nesterov 		/*
1408499a4f3eSOleg Nesterov 		 * This is not strictly accurate, we can race with
1409499a4f3eSOleg Nesterov 		 * uprobe_unregister() and see the already removed
1410499a4f3eSOleg Nesterov 		 * uprobe if delete_uprobe() was not yet called.
1411499a4f3eSOleg Nesterov 		 */
1412499a4f3eSOleg Nesterov 		if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end))
1413499a4f3eSOleg Nesterov 			return;
1414499a4f3eSOleg Nesterov 	}
1415499a4f3eSOleg Nesterov 
1416499a4f3eSOleg Nesterov 	clear_bit(MMF_HAS_UPROBES, &mm->flags);
1417499a4f3eSOleg Nesterov }
1418499a4f3eSOleg Nesterov 
1419d790d346SOleg Nesterov static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
14200326f5a9SSrikar Dronamraju {
14213a9ea052SOleg Nesterov 	struct mm_struct *mm = current->mm;
14223a9ea052SOleg Nesterov 	struct uprobe *uprobe = NULL;
14230326f5a9SSrikar Dronamraju 	struct vm_area_struct *vma;
14240326f5a9SSrikar Dronamraju 
14250326f5a9SSrikar Dronamraju 	down_read(&mm->mmap_sem);
14260326f5a9SSrikar Dronamraju 	vma = find_vma(mm, bp_vaddr);
14273a9ea052SOleg Nesterov 	if (vma && vma->vm_start <= bp_vaddr) {
14283a9ea052SOleg Nesterov 		if (valid_vma(vma, false)) {
1429cb113b47SOleg Nesterov 			struct inode *inode = vma->vm_file->f_mapping->host;
1430cb113b47SOleg Nesterov 			loff_t offset = vaddr_to_offset(vma, bp_vaddr);
14310326f5a9SSrikar Dronamraju 
14320326f5a9SSrikar Dronamraju 			uprobe = find_uprobe(inode, offset);
14330326f5a9SSrikar Dronamraju 		}
1434d790d346SOleg Nesterov 
1435d790d346SOleg Nesterov 		if (!uprobe)
1436d790d346SOleg Nesterov 			*is_swbp = is_swbp_at_addr(mm, bp_vaddr);
1437d790d346SOleg Nesterov 	} else {
1438d790d346SOleg Nesterov 		*is_swbp = -EFAULT;
14393a9ea052SOleg Nesterov 	}
1440499a4f3eSOleg Nesterov 
1441499a4f3eSOleg Nesterov 	if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags))
1442499a4f3eSOleg Nesterov 		mmf_recalc_uprobes(mm);
14430326f5a9SSrikar Dronamraju 	up_read(&mm->mmap_sem);
14440326f5a9SSrikar Dronamraju 
14453a9ea052SOleg Nesterov 	return uprobe;
14463a9ea052SOleg Nesterov }
14473a9ea052SOleg Nesterov 
14489d778782SSebastian Andrzej Siewior void __weak arch_uprobe_enable_step(struct arch_uprobe *arch)
14499d778782SSebastian Andrzej Siewior {
14509d778782SSebastian Andrzej Siewior 	user_enable_single_step(current);
14519d778782SSebastian Andrzej Siewior }
14529d778782SSebastian Andrzej Siewior 
14539d778782SSebastian Andrzej Siewior void __weak arch_uprobe_disable_step(struct arch_uprobe *arch)
14549d778782SSebastian Andrzej Siewior {
14559d778782SSebastian Andrzej Siewior 	user_disable_single_step(current);
14569d778782SSebastian Andrzej Siewior }
14579d778782SSebastian Andrzej Siewior 
14583a9ea052SOleg Nesterov /*
14593a9ea052SOleg Nesterov  * Run handler and ask thread to singlestep.
14603a9ea052SOleg Nesterov  * Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
14613a9ea052SOleg Nesterov  */
14623a9ea052SOleg Nesterov static void handle_swbp(struct pt_regs *regs)
14633a9ea052SOleg Nesterov {
14643a9ea052SOleg Nesterov 	struct uprobe_task *utask;
14653a9ea052SOleg Nesterov 	struct uprobe *uprobe;
14663a9ea052SOleg Nesterov 	unsigned long bp_vaddr;
146756bb4cf6SOleg Nesterov 	int uninitialized_var(is_swbp);
14683a9ea052SOleg Nesterov 
14693a9ea052SOleg Nesterov 	bp_vaddr = uprobe_get_swbp_addr(regs);
1470d790d346SOleg Nesterov 	uprobe = find_active_uprobe(bp_vaddr, &is_swbp);
14713a9ea052SOleg Nesterov 
14720326f5a9SSrikar Dronamraju 	if (!uprobe) {
147356bb4cf6SOleg Nesterov 		if (is_swbp > 0) {
14740326f5a9SSrikar Dronamraju 			/* No matching uprobe; signal SIGTRAP. */
14750326f5a9SSrikar Dronamraju 			send_sig(SIGTRAP, current, 0);
147656bb4cf6SOleg Nesterov 		} else {
147756bb4cf6SOleg Nesterov 			/*
147856bb4cf6SOleg Nesterov 			 * Either we raced with uprobe_unregister() or we can't
147956bb4cf6SOleg Nesterov 			 * access this memory. The latter is only possible if
148056bb4cf6SOleg Nesterov 			 * another thread plays with our ->mm. In both cases
148156bb4cf6SOleg Nesterov 			 * we can simply restart. If this vma was unmapped we
148256bb4cf6SOleg Nesterov 			 * can pretend this insn was not executed yet and get
148356bb4cf6SOleg Nesterov 			 * the (correct) SIGSEGV after restart.
148456bb4cf6SOleg Nesterov 			 */
148556bb4cf6SOleg Nesterov 			instruction_pointer_set(regs, bp_vaddr);
148656bb4cf6SOleg Nesterov 		}
14870326f5a9SSrikar Dronamraju 		return;
14880326f5a9SSrikar Dronamraju 	}
14890326f5a9SSrikar Dronamraju 
14901b08e907SOleg Nesterov 	utask = current->utask;
14910326f5a9SSrikar Dronamraju 	if (!utask) {
14920326f5a9SSrikar Dronamraju 		utask = add_utask();
14930326f5a9SSrikar Dronamraju 		/* Cannot allocate; re-execute the instruction. */
14940326f5a9SSrikar Dronamraju 		if (!utask)
14950578a970SOleg Nesterov 			goto restart;
14960326f5a9SSrikar Dronamraju 	}
1497746a9e6bSOleg Nesterov 
14980326f5a9SSrikar Dronamraju 	handler_chain(uprobe, regs);
14990578a970SOleg Nesterov 	if (can_skip_sstep(uprobe, regs))
15000578a970SOleg Nesterov 		goto out;
15010326f5a9SSrikar Dronamraju 
15020326f5a9SSrikar Dronamraju 	if (!pre_ssout(uprobe, regs, bp_vaddr)) {
15039d778782SSebastian Andrzej Siewior 		arch_uprobe_enable_step(&uprobe->arch);
1504746a9e6bSOleg Nesterov 		utask->active_uprobe = uprobe;
1505746a9e6bSOleg Nesterov 		utask->state = UTASK_SSTEP;
15060326f5a9SSrikar Dronamraju 		return;
15070326f5a9SSrikar Dronamraju 	}
15080326f5a9SSrikar Dronamraju 
15090578a970SOleg Nesterov restart:
15100326f5a9SSrikar Dronamraju 	/*
15110326f5a9SSrikar Dronamraju 	 * cannot singlestep; cannot skip instruction;
15120326f5a9SSrikar Dronamraju 	 * re-execute the instruction.
15130326f5a9SSrikar Dronamraju 	 */
15140326f5a9SSrikar Dronamraju 	instruction_pointer_set(regs, bp_vaddr);
15150578a970SOleg Nesterov out:
15160326f5a9SSrikar Dronamraju 	put_uprobe(uprobe);
15170326f5a9SSrikar Dronamraju }
15180326f5a9SSrikar Dronamraju 
15190326f5a9SSrikar Dronamraju /*
15200326f5a9SSrikar Dronamraju  * Perform required fix-ups and disable singlestep.
15210326f5a9SSrikar Dronamraju  * Allow pending signals to take effect.
15220326f5a9SSrikar Dronamraju  */
15230326f5a9SSrikar Dronamraju static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
15240326f5a9SSrikar Dronamraju {
15250326f5a9SSrikar Dronamraju 	struct uprobe *uprobe;
15260326f5a9SSrikar Dronamraju 
15270326f5a9SSrikar Dronamraju 	uprobe = utask->active_uprobe;
15280326f5a9SSrikar Dronamraju 	if (utask->state == UTASK_SSTEP_ACK)
15290326f5a9SSrikar Dronamraju 		arch_uprobe_post_xol(&uprobe->arch, regs);
15300326f5a9SSrikar Dronamraju 	else if (utask->state == UTASK_SSTEP_TRAPPED)
15310326f5a9SSrikar Dronamraju 		arch_uprobe_abort_xol(&uprobe->arch, regs);
15320326f5a9SSrikar Dronamraju 	else
15330326f5a9SSrikar Dronamraju 		WARN_ON_ONCE(1);
15340326f5a9SSrikar Dronamraju 
15359d778782SSebastian Andrzej Siewior 	arch_uprobe_disable_step(&uprobe->arch);
15360326f5a9SSrikar Dronamraju 	put_uprobe(uprobe);
15370326f5a9SSrikar Dronamraju 	utask->active_uprobe = NULL;
15380326f5a9SSrikar Dronamraju 	utask->state = UTASK_RUNNING;
1539d4b3b638SSrikar Dronamraju 	xol_free_insn_slot(current);
15400326f5a9SSrikar Dronamraju 
15410326f5a9SSrikar Dronamraju 	spin_lock_irq(&current->sighand->siglock);
15420326f5a9SSrikar Dronamraju 	recalc_sigpending(); /* see uprobe_deny_signal() */
15430326f5a9SSrikar Dronamraju 	spin_unlock_irq(&current->sighand->siglock);
15440326f5a9SSrikar Dronamraju }
15450326f5a9SSrikar Dronamraju 
15460326f5a9SSrikar Dronamraju /*
15471b08e907SOleg Nesterov  * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and
15481b08e907SOleg Nesterov  * allows the thread to return from interrupt. After that handle_swbp()
15491b08e907SOleg Nesterov  * sets utask->active_uprobe.
15500326f5a9SSrikar Dronamraju  *
15511b08e907SOleg Nesterov  * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag
15521b08e907SOleg Nesterov  * and allows the thread to return from interrupt.
15530326f5a9SSrikar Dronamraju  *
15540326f5a9SSrikar Dronamraju  * While returning to userspace, thread notices the TIF_UPROBE flag and calls
15550326f5a9SSrikar Dronamraju  * uprobe_notify_resume().
15560326f5a9SSrikar Dronamraju  */
15570326f5a9SSrikar Dronamraju void uprobe_notify_resume(struct pt_regs *regs)
15580326f5a9SSrikar Dronamraju {
15590326f5a9SSrikar Dronamraju 	struct uprobe_task *utask;
15600326f5a9SSrikar Dronamraju 
15610326f5a9SSrikar Dronamraju 	utask = current->utask;
15621b08e907SOleg Nesterov 	if (utask && utask->active_uprobe)
15630326f5a9SSrikar Dronamraju 		handle_singlestep(utask, regs);
15641b08e907SOleg Nesterov 	else
15651b08e907SOleg Nesterov 		handle_swbp(regs);
15660326f5a9SSrikar Dronamraju }
15670326f5a9SSrikar Dronamraju 
15680326f5a9SSrikar Dronamraju /*
15690326f5a9SSrikar Dronamraju  * uprobe_pre_sstep_notifier gets called from interrupt context as part of
15700326f5a9SSrikar Dronamraju  * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit.
15710326f5a9SSrikar Dronamraju  */
15720326f5a9SSrikar Dronamraju int uprobe_pre_sstep_notifier(struct pt_regs *regs)
15730326f5a9SSrikar Dronamraju {
1574f8ac4ec9SOleg Nesterov 	if (!current->mm || !test_bit(MMF_HAS_UPROBES, &current->mm->flags))
15750326f5a9SSrikar Dronamraju 		return 0;
15760326f5a9SSrikar Dronamraju 
15770326f5a9SSrikar Dronamraju 	set_thread_flag(TIF_UPROBE);
15780326f5a9SSrikar Dronamraju 	return 1;
15790326f5a9SSrikar Dronamraju }
15800326f5a9SSrikar Dronamraju 
15810326f5a9SSrikar Dronamraju /*
15820326f5a9SSrikar Dronamraju  * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier
15830326f5a9SSrikar Dronamraju  * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep.
15840326f5a9SSrikar Dronamraju  */
15850326f5a9SSrikar Dronamraju int uprobe_post_sstep_notifier(struct pt_regs *regs)
15860326f5a9SSrikar Dronamraju {
15870326f5a9SSrikar Dronamraju 	struct uprobe_task *utask = current->utask;
15880326f5a9SSrikar Dronamraju 
15890326f5a9SSrikar Dronamraju 	if (!current->mm || !utask || !utask->active_uprobe)
15900326f5a9SSrikar Dronamraju 		/* task is currently not uprobed */
15910326f5a9SSrikar Dronamraju 		return 0;
15920326f5a9SSrikar Dronamraju 
15930326f5a9SSrikar Dronamraju 	utask->state = UTASK_SSTEP_ACK;
15940326f5a9SSrikar Dronamraju 	set_thread_flag(TIF_UPROBE);
15950326f5a9SSrikar Dronamraju 	return 1;
15960326f5a9SSrikar Dronamraju }
15970326f5a9SSrikar Dronamraju 
15980326f5a9SSrikar Dronamraju static struct notifier_block uprobe_exception_nb = {
15990326f5a9SSrikar Dronamraju 	.notifier_call		= arch_uprobe_exception_notify,
16000326f5a9SSrikar Dronamraju 	.priority		= INT_MAX-1,	/* notified after kprobes, kgdb */
16010326f5a9SSrikar Dronamraju };
16020326f5a9SSrikar Dronamraju 
1603a5f4374aSIngo Molnar static int __init init_uprobes(void)
1604a5f4374aSIngo Molnar {
1605a5f4374aSIngo Molnar 	int i;
1606a5f4374aSIngo Molnar 
1607a5f4374aSIngo Molnar 	for (i = 0; i < UPROBES_HASH_SZ; i++) {
1608a5f4374aSIngo Molnar 		mutex_init(&uprobes_mutex[i]);
1609a5f4374aSIngo Molnar 		mutex_init(&uprobes_mmap_mutex[i]);
1610a5f4374aSIngo Molnar 	}
16110326f5a9SSrikar Dronamraju 
16120326f5a9SSrikar Dronamraju 	return register_die_notifier(&uprobe_exception_nb);
1613a5f4374aSIngo Molnar }
16140326f5a9SSrikar Dronamraju module_init(init_uprobes);
1615a5f4374aSIngo Molnar 
1616a5f4374aSIngo Molnar static void __exit exit_uprobes(void)
1617a5f4374aSIngo Molnar {
1618a5f4374aSIngo Molnar }
1619a5f4374aSIngo Molnar module_exit(exit_uprobes);
1620