xref: /openbmc/linux/kernel/events/uprobes.c (revision 7bcae826)
1 /*
2  * User-space Probes (UProbes)
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright (C) IBM Corporation, 2008-2012
19  * Authors:
20  *	Srikar Dronamraju
21  *	Jim Keniston
22  * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
23  */
24 
25 #include <linux/kernel.h>
26 #include <linux/highmem.h>
27 #include <linux/pagemap.h>	/* read_mapping_page */
28 #include <linux/slab.h>
29 #include <linux/sched.h>
30 #include <linux/export.h>
31 #include <linux/rmap.h>		/* anon_vma_prepare */
32 #include <linux/mmu_notifier.h>	/* set_pte_at_notify */
33 #include <linux/swap.h>		/* try_to_free_swap */
34 #include <linux/ptrace.h>	/* user_enable_single_step */
35 #include <linux/kdebug.h>	/* notifier mechanism */
36 #include "../../mm/internal.h"	/* munlock_vma_page */
37 #include <linux/percpu-rwsem.h>
38 #include <linux/task_work.h>
39 #include <linux/shmem_fs.h>
40 
41 #include <linux/uprobes.h>
42 
43 #define UINSNS_PER_PAGE			(PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
44 #define MAX_UPROBE_XOL_SLOTS		UINSNS_PER_PAGE
45 
46 static struct rb_root uprobes_tree = RB_ROOT;
47 /*
48  * allows us to skip the uprobe_mmap if there are no uprobe events active
49  * at this time.  Probably a fine grained per inode count is better?
50  */
51 #define no_uprobe_events()	RB_EMPTY_ROOT(&uprobes_tree)
52 
53 static DEFINE_SPINLOCK(uprobes_treelock);	/* serialize rbtree access */
54 
55 #define UPROBES_HASH_SZ	13
56 /* serialize uprobe->pending_list */
57 static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
58 #define uprobes_mmap_hash(v)	(&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
59 
60 static struct percpu_rw_semaphore dup_mmap_sem;
61 
62 /* Have a copy of original instruction */
63 #define UPROBE_COPY_INSN	0
64 
65 struct uprobe {
66 	struct rb_node		rb_node;	/* node in the rb tree */
67 	atomic_t		ref;
68 	struct rw_semaphore	register_rwsem;
69 	struct rw_semaphore	consumer_rwsem;
70 	struct list_head	pending_list;
71 	struct uprobe_consumer	*consumers;
72 	struct inode		*inode;		/* Also hold a ref to inode */
73 	loff_t			offset;
74 	unsigned long		flags;
75 
76 	/*
77 	 * The generic code assumes that it has two members of unknown type
78 	 * owned by the arch-specific code:
79 	 *
80 	 * 	insn -	copy_insn() saves the original instruction here for
81 	 *		arch_uprobe_analyze_insn().
82 	 *
83 	 *	ixol -	potentially modified instruction to execute out of
84 	 *		line, copied to xol_area by xol_get_insn_slot().
85 	 */
86 	struct arch_uprobe	arch;
87 };
88 
89 /*
90  * Execute out of line area: anonymous executable mapping installed
91  * by the probed task to execute the copy of the original instruction
92  * mangled by set_swbp().
93  *
94  * On a breakpoint hit, thread contests for a slot.  It frees the
95  * slot after singlestep. Currently a fixed number of slots are
96  * allocated.
97  */
98 struct xol_area {
99 	wait_queue_head_t 		wq;		/* if all slots are busy */
100 	atomic_t 			slot_count;	/* number of in-use slots */
101 	unsigned long 			*bitmap;	/* 0 = free slot */
102 
103 	struct vm_special_mapping	xol_mapping;
104 	struct page 			*pages[2];
105 	/*
106 	 * We keep the vma's vm_start rather than a pointer to the vma
107 	 * itself.  The probed process or a naughty kernel module could make
108 	 * the vma go away, and we must handle that reasonably gracefully.
109 	 */
110 	unsigned long 			vaddr;		/* Page(s) of instruction slots */
111 };
112 
113 /*
114  * valid_vma: Verify if the specified vma is an executable vma
115  * Relax restrictions while unregistering: vm_flags might have
116  * changed after breakpoint was inserted.
117  *	- is_register: indicates if we are in register context.
118  *	- Return 1 if the specified virtual address is in an
119  *	  executable vma.
120  */
121 static bool valid_vma(struct vm_area_struct *vma, bool is_register)
122 {
123 	vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE;
124 
125 	if (is_register)
126 		flags |= VM_WRITE;
127 
128 	return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC;
129 }
130 
131 static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset)
132 {
133 	return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
134 }
135 
136 static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
137 {
138 	return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start);
139 }
140 
141 /**
142  * __replace_page - replace page in vma by new page.
143  * based on replace_page in mm/ksm.c
144  *
145  * @vma:      vma that holds the pte pointing to page
146  * @addr:     address the old @page is mapped at
147  * @page:     the cowed page we are replacing by kpage
148  * @kpage:    the modified page we replace page by
149  *
150  * Returns 0 on success, -EFAULT on failure.
151  */
152 static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
153 				struct page *old_page, struct page *new_page)
154 {
155 	struct mm_struct *mm = vma->vm_mm;
156 	struct page_vma_mapped_walk pvmw = {
157 		.page = old_page,
158 		.vma = vma,
159 		.address = addr,
160 	};
161 	int err;
162 	/* For mmu_notifiers */
163 	const unsigned long mmun_start = addr;
164 	const unsigned long mmun_end   = addr + PAGE_SIZE;
165 	struct mem_cgroup *memcg;
166 
167 	VM_BUG_ON_PAGE(PageTransHuge(old_page), old_page);
168 
169 	err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL, &memcg,
170 			false);
171 	if (err)
172 		return err;
173 
174 	/* For try_to_free_swap() and munlock_vma_page() below */
175 	lock_page(old_page);
176 
177 	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
178 	err = -EAGAIN;
179 	if (!page_vma_mapped_walk(&pvmw)) {
180 		mem_cgroup_cancel_charge(new_page, memcg, false);
181 		goto unlock;
182 	}
183 	VM_BUG_ON_PAGE(addr != pvmw.address, old_page);
184 
185 	get_page(new_page);
186 	page_add_new_anon_rmap(new_page, vma, addr, false);
187 	mem_cgroup_commit_charge(new_page, memcg, false, false);
188 	lru_cache_add_active_or_unevictable(new_page, vma);
189 
190 	if (!PageAnon(old_page)) {
191 		dec_mm_counter(mm, mm_counter_file(old_page));
192 		inc_mm_counter(mm, MM_ANONPAGES);
193 	}
194 
195 	flush_cache_page(vma, addr, pte_pfn(*pvmw.pte));
196 	ptep_clear_flush_notify(vma, addr, pvmw.pte);
197 	set_pte_at_notify(mm, addr, pvmw.pte,
198 			mk_pte(new_page, vma->vm_page_prot));
199 
200 	page_remove_rmap(old_page, false);
201 	if (!page_mapped(old_page))
202 		try_to_free_swap(old_page);
203 	page_vma_mapped_walk_done(&pvmw);
204 
205 	if (vma->vm_flags & VM_LOCKED)
206 		munlock_vma_page(old_page);
207 	put_page(old_page);
208 
209 	err = 0;
210  unlock:
211 	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
212 	unlock_page(old_page);
213 	return err;
214 }
215 
216 /**
217  * is_swbp_insn - check if instruction is breakpoint instruction.
218  * @insn: instruction to be checked.
219  * Default implementation of is_swbp_insn
220  * Returns true if @insn is a breakpoint instruction.
221  */
222 bool __weak is_swbp_insn(uprobe_opcode_t *insn)
223 {
224 	return *insn == UPROBE_SWBP_INSN;
225 }
226 
227 /**
228  * is_trap_insn - check if instruction is breakpoint instruction.
229  * @insn: instruction to be checked.
230  * Default implementation of is_trap_insn
231  * Returns true if @insn is a breakpoint instruction.
232  *
233  * This function is needed for the case where an architecture has multiple
234  * trap instructions (like powerpc).
235  */
236 bool __weak is_trap_insn(uprobe_opcode_t *insn)
237 {
238 	return is_swbp_insn(insn);
239 }
240 
241 static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len)
242 {
243 	void *kaddr = kmap_atomic(page);
244 	memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len);
245 	kunmap_atomic(kaddr);
246 }
247 
248 static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len)
249 {
250 	void *kaddr = kmap_atomic(page);
251 	memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len);
252 	kunmap_atomic(kaddr);
253 }
254 
255 static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode)
256 {
257 	uprobe_opcode_t old_opcode;
258 	bool is_swbp;
259 
260 	/*
261 	 * Note: We only check if the old_opcode is UPROBE_SWBP_INSN here.
262 	 * We do not check if it is any other 'trap variant' which could
263 	 * be conditional trap instruction such as the one powerpc supports.
264 	 *
265 	 * The logic is that we do not care if the underlying instruction
266 	 * is a trap variant; uprobes always wins over any other (gdb)
267 	 * breakpoint.
268 	 */
269 	copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE);
270 	is_swbp = is_swbp_insn(&old_opcode);
271 
272 	if (is_swbp_insn(new_opcode)) {
273 		if (is_swbp)		/* register: already installed? */
274 			return 0;
275 	} else {
276 		if (!is_swbp)		/* unregister: was it changed by us? */
277 			return 0;
278 	}
279 
280 	return 1;
281 }
282 
283 /*
284  * NOTE:
285  * Expect the breakpoint instruction to be the smallest size instruction for
286  * the architecture. If an arch has variable length instruction and the
287  * breakpoint instruction is not of the smallest length instruction
288  * supported by that architecture then we need to modify is_trap_at_addr and
289  * uprobe_write_opcode accordingly. This would never be a problem for archs
290  * that have fixed length instructions.
291  *
292  * uprobe_write_opcode - write the opcode at a given virtual address.
293  * @mm: the probed process address space.
294  * @vaddr: the virtual address to store the opcode.
295  * @opcode: opcode to be written at @vaddr.
296  *
297  * Called with mm->mmap_sem held for write.
298  * Return 0 (success) or a negative errno.
299  */
300 int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr,
301 			uprobe_opcode_t opcode)
302 {
303 	struct page *old_page, *new_page;
304 	struct vm_area_struct *vma;
305 	int ret;
306 
307 retry:
308 	/* Read the page with vaddr into memory */
309 	ret = get_user_pages_remote(NULL, mm, vaddr, 1,
310 			FOLL_FORCE | FOLL_SPLIT, &old_page, &vma, NULL);
311 	if (ret <= 0)
312 		return ret;
313 
314 	ret = verify_opcode(old_page, vaddr, &opcode);
315 	if (ret <= 0)
316 		goto put_old;
317 
318 	ret = anon_vma_prepare(vma);
319 	if (ret)
320 		goto put_old;
321 
322 	ret = -ENOMEM;
323 	new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
324 	if (!new_page)
325 		goto put_old;
326 
327 	__SetPageUptodate(new_page);
328 	copy_highpage(new_page, old_page);
329 	copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
330 
331 	ret = __replace_page(vma, vaddr, old_page, new_page);
332 	put_page(new_page);
333 put_old:
334 	put_page(old_page);
335 
336 	if (unlikely(ret == -EAGAIN))
337 		goto retry;
338 	return ret;
339 }
340 
341 /**
342  * set_swbp - store breakpoint at a given address.
343  * @auprobe: arch specific probepoint information.
344  * @mm: the probed process address space.
345  * @vaddr: the virtual address to insert the opcode.
346  *
347  * For mm @mm, store the breakpoint instruction at @vaddr.
348  * Return 0 (success) or a negative errno.
349  */
350 int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
351 {
352 	return uprobe_write_opcode(mm, vaddr, UPROBE_SWBP_INSN);
353 }
354 
355 /**
356  * set_orig_insn - Restore the original instruction.
357  * @mm: the probed process address space.
358  * @auprobe: arch specific probepoint information.
359  * @vaddr: the virtual address to insert the opcode.
360  *
361  * For mm @mm, restore the original opcode (opcode) at @vaddr.
362  * Return 0 (success) or a negative errno.
363  */
364 int __weak
365 set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
366 {
367 	return uprobe_write_opcode(mm, vaddr, *(uprobe_opcode_t *)&auprobe->insn);
368 }
369 
370 static struct uprobe *get_uprobe(struct uprobe *uprobe)
371 {
372 	atomic_inc(&uprobe->ref);
373 	return uprobe;
374 }
375 
376 static void put_uprobe(struct uprobe *uprobe)
377 {
378 	if (atomic_dec_and_test(&uprobe->ref))
379 		kfree(uprobe);
380 }
381 
382 static int match_uprobe(struct uprobe *l, struct uprobe *r)
383 {
384 	if (l->inode < r->inode)
385 		return -1;
386 
387 	if (l->inode > r->inode)
388 		return 1;
389 
390 	if (l->offset < r->offset)
391 		return -1;
392 
393 	if (l->offset > r->offset)
394 		return 1;
395 
396 	return 0;
397 }
398 
399 static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset)
400 {
401 	struct uprobe u = { .inode = inode, .offset = offset };
402 	struct rb_node *n = uprobes_tree.rb_node;
403 	struct uprobe *uprobe;
404 	int match;
405 
406 	while (n) {
407 		uprobe = rb_entry(n, struct uprobe, rb_node);
408 		match = match_uprobe(&u, uprobe);
409 		if (!match)
410 			return get_uprobe(uprobe);
411 
412 		if (match < 0)
413 			n = n->rb_left;
414 		else
415 			n = n->rb_right;
416 	}
417 	return NULL;
418 }
419 
420 /*
421  * Find a uprobe corresponding to a given inode:offset
422  * Acquires uprobes_treelock
423  */
424 static struct uprobe *find_uprobe(struct inode *inode, loff_t offset)
425 {
426 	struct uprobe *uprobe;
427 
428 	spin_lock(&uprobes_treelock);
429 	uprobe = __find_uprobe(inode, offset);
430 	spin_unlock(&uprobes_treelock);
431 
432 	return uprobe;
433 }
434 
435 static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
436 {
437 	struct rb_node **p = &uprobes_tree.rb_node;
438 	struct rb_node *parent = NULL;
439 	struct uprobe *u;
440 	int match;
441 
442 	while (*p) {
443 		parent = *p;
444 		u = rb_entry(parent, struct uprobe, rb_node);
445 		match = match_uprobe(uprobe, u);
446 		if (!match)
447 			return get_uprobe(u);
448 
449 		if (match < 0)
450 			p = &parent->rb_left;
451 		else
452 			p = &parent->rb_right;
453 
454 	}
455 
456 	u = NULL;
457 	rb_link_node(&uprobe->rb_node, parent, p);
458 	rb_insert_color(&uprobe->rb_node, &uprobes_tree);
459 	/* get access + creation ref */
460 	atomic_set(&uprobe->ref, 2);
461 
462 	return u;
463 }
464 
465 /*
466  * Acquire uprobes_treelock.
467  * Matching uprobe already exists in rbtree;
468  *	increment (access refcount) and return the matching uprobe.
469  *
470  * No matching uprobe; insert the uprobe in rb_tree;
471  *	get a double refcount (access + creation) and return NULL.
472  */
473 static struct uprobe *insert_uprobe(struct uprobe *uprobe)
474 {
475 	struct uprobe *u;
476 
477 	spin_lock(&uprobes_treelock);
478 	u = __insert_uprobe(uprobe);
479 	spin_unlock(&uprobes_treelock);
480 
481 	return u;
482 }
483 
484 static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
485 {
486 	struct uprobe *uprobe, *cur_uprobe;
487 
488 	uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL);
489 	if (!uprobe)
490 		return NULL;
491 
492 	uprobe->inode = igrab(inode);
493 	uprobe->offset = offset;
494 	init_rwsem(&uprobe->register_rwsem);
495 	init_rwsem(&uprobe->consumer_rwsem);
496 
497 	/* add to uprobes_tree, sorted on inode:offset */
498 	cur_uprobe = insert_uprobe(uprobe);
499 	/* a uprobe exists for this inode:offset combination */
500 	if (cur_uprobe) {
501 		kfree(uprobe);
502 		uprobe = cur_uprobe;
503 		iput(inode);
504 	}
505 
506 	return uprobe;
507 }
508 
509 static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
510 {
511 	down_write(&uprobe->consumer_rwsem);
512 	uc->next = uprobe->consumers;
513 	uprobe->consumers = uc;
514 	up_write(&uprobe->consumer_rwsem);
515 }
516 
517 /*
518  * For uprobe @uprobe, delete the consumer @uc.
519  * Return true if the @uc is deleted successfully
520  * or return false.
521  */
522 static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
523 {
524 	struct uprobe_consumer **con;
525 	bool ret = false;
526 
527 	down_write(&uprobe->consumer_rwsem);
528 	for (con = &uprobe->consumers; *con; con = &(*con)->next) {
529 		if (*con == uc) {
530 			*con = uc->next;
531 			ret = true;
532 			break;
533 		}
534 	}
535 	up_write(&uprobe->consumer_rwsem);
536 
537 	return ret;
538 }
539 
540 static int __copy_insn(struct address_space *mapping, struct file *filp,
541 			void *insn, int nbytes, loff_t offset)
542 {
543 	struct page *page;
544 	/*
545 	 * Ensure that the page that has the original instruction is populated
546 	 * and in page-cache. If ->readpage == NULL it must be shmem_mapping(),
547 	 * see uprobe_register().
548 	 */
549 	if (mapping->a_ops->readpage)
550 		page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp);
551 	else
552 		page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
553 	if (IS_ERR(page))
554 		return PTR_ERR(page);
555 
556 	copy_from_page(page, offset, insn, nbytes);
557 	put_page(page);
558 
559 	return 0;
560 }
561 
562 static int copy_insn(struct uprobe *uprobe, struct file *filp)
563 {
564 	struct address_space *mapping = uprobe->inode->i_mapping;
565 	loff_t offs = uprobe->offset;
566 	void *insn = &uprobe->arch.insn;
567 	int size = sizeof(uprobe->arch.insn);
568 	int len, err = -EIO;
569 
570 	/* Copy only available bytes, -EIO if nothing was read */
571 	do {
572 		if (offs >= i_size_read(uprobe->inode))
573 			break;
574 
575 		len = min_t(int, size, PAGE_SIZE - (offs & ~PAGE_MASK));
576 		err = __copy_insn(mapping, filp, insn, len, offs);
577 		if (err)
578 			break;
579 
580 		insn += len;
581 		offs += len;
582 		size -= len;
583 	} while (size);
584 
585 	return err;
586 }
587 
588 static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
589 				struct mm_struct *mm, unsigned long vaddr)
590 {
591 	int ret = 0;
592 
593 	if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
594 		return ret;
595 
596 	/* TODO: move this into _register, until then we abuse this sem. */
597 	down_write(&uprobe->consumer_rwsem);
598 	if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
599 		goto out;
600 
601 	ret = copy_insn(uprobe, file);
602 	if (ret)
603 		goto out;
604 
605 	ret = -ENOTSUPP;
606 	if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn))
607 		goto out;
608 
609 	ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
610 	if (ret)
611 		goto out;
612 
613 	/* uprobe_write_opcode() assumes we don't cross page boundary */
614 	BUG_ON((uprobe->offset & ~PAGE_MASK) +
615 			UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
616 
617 	smp_wmb(); /* pairs with rmb() in find_active_uprobe() */
618 	set_bit(UPROBE_COPY_INSN, &uprobe->flags);
619 
620  out:
621 	up_write(&uprobe->consumer_rwsem);
622 
623 	return ret;
624 }
625 
626 static inline bool consumer_filter(struct uprobe_consumer *uc,
627 				   enum uprobe_filter_ctx ctx, struct mm_struct *mm)
628 {
629 	return !uc->filter || uc->filter(uc, ctx, mm);
630 }
631 
632 static bool filter_chain(struct uprobe *uprobe,
633 			 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
634 {
635 	struct uprobe_consumer *uc;
636 	bool ret = false;
637 
638 	down_read(&uprobe->consumer_rwsem);
639 	for (uc = uprobe->consumers; uc; uc = uc->next) {
640 		ret = consumer_filter(uc, ctx, mm);
641 		if (ret)
642 			break;
643 	}
644 	up_read(&uprobe->consumer_rwsem);
645 
646 	return ret;
647 }
648 
649 static int
650 install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
651 			struct vm_area_struct *vma, unsigned long vaddr)
652 {
653 	bool first_uprobe;
654 	int ret;
655 
656 	ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr);
657 	if (ret)
658 		return ret;
659 
660 	/*
661 	 * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(),
662 	 * the task can hit this breakpoint right after __replace_page().
663 	 */
664 	first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags);
665 	if (first_uprobe)
666 		set_bit(MMF_HAS_UPROBES, &mm->flags);
667 
668 	ret = set_swbp(&uprobe->arch, mm, vaddr);
669 	if (!ret)
670 		clear_bit(MMF_RECALC_UPROBES, &mm->flags);
671 	else if (first_uprobe)
672 		clear_bit(MMF_HAS_UPROBES, &mm->flags);
673 
674 	return ret;
675 }
676 
677 static int
678 remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
679 {
680 	set_bit(MMF_RECALC_UPROBES, &mm->flags);
681 	return set_orig_insn(&uprobe->arch, mm, vaddr);
682 }
683 
684 static inline bool uprobe_is_active(struct uprobe *uprobe)
685 {
686 	return !RB_EMPTY_NODE(&uprobe->rb_node);
687 }
688 /*
689  * There could be threads that have already hit the breakpoint. They
690  * will recheck the current insn and restart if find_uprobe() fails.
691  * See find_active_uprobe().
692  */
693 static void delete_uprobe(struct uprobe *uprobe)
694 {
695 	if (WARN_ON(!uprobe_is_active(uprobe)))
696 		return;
697 
698 	spin_lock(&uprobes_treelock);
699 	rb_erase(&uprobe->rb_node, &uprobes_tree);
700 	spin_unlock(&uprobes_treelock);
701 	RB_CLEAR_NODE(&uprobe->rb_node); /* for uprobe_is_active() */
702 	iput(uprobe->inode);
703 	put_uprobe(uprobe);
704 }
705 
706 struct map_info {
707 	struct map_info *next;
708 	struct mm_struct *mm;
709 	unsigned long vaddr;
710 };
711 
712 static inline struct map_info *free_map_info(struct map_info *info)
713 {
714 	struct map_info *next = info->next;
715 	kfree(info);
716 	return next;
717 }
718 
719 static struct map_info *
720 build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
721 {
722 	unsigned long pgoff = offset >> PAGE_SHIFT;
723 	struct vm_area_struct *vma;
724 	struct map_info *curr = NULL;
725 	struct map_info *prev = NULL;
726 	struct map_info *info;
727 	int more = 0;
728 
729  again:
730 	i_mmap_lock_read(mapping);
731 	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
732 		if (!valid_vma(vma, is_register))
733 			continue;
734 
735 		if (!prev && !more) {
736 			/*
737 			 * Needs GFP_NOWAIT to avoid i_mmap_rwsem recursion through
738 			 * reclaim. This is optimistic, no harm done if it fails.
739 			 */
740 			prev = kmalloc(sizeof(struct map_info),
741 					GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN);
742 			if (prev)
743 				prev->next = NULL;
744 		}
745 		if (!prev) {
746 			more++;
747 			continue;
748 		}
749 
750 		if (!atomic_inc_not_zero(&vma->vm_mm->mm_users))
751 			continue;
752 
753 		info = prev;
754 		prev = prev->next;
755 		info->next = curr;
756 		curr = info;
757 
758 		info->mm = vma->vm_mm;
759 		info->vaddr = offset_to_vaddr(vma, offset);
760 	}
761 	i_mmap_unlock_read(mapping);
762 
763 	if (!more)
764 		goto out;
765 
766 	prev = curr;
767 	while (curr) {
768 		mmput(curr->mm);
769 		curr = curr->next;
770 	}
771 
772 	do {
773 		info = kmalloc(sizeof(struct map_info), GFP_KERNEL);
774 		if (!info) {
775 			curr = ERR_PTR(-ENOMEM);
776 			goto out;
777 		}
778 		info->next = prev;
779 		prev = info;
780 	} while (--more);
781 
782 	goto again;
783  out:
784 	while (prev)
785 		prev = free_map_info(prev);
786 	return curr;
787 }
788 
789 static int
790 register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new)
791 {
792 	bool is_register = !!new;
793 	struct map_info *info;
794 	int err = 0;
795 
796 	percpu_down_write(&dup_mmap_sem);
797 	info = build_map_info(uprobe->inode->i_mapping,
798 					uprobe->offset, is_register);
799 	if (IS_ERR(info)) {
800 		err = PTR_ERR(info);
801 		goto out;
802 	}
803 
804 	while (info) {
805 		struct mm_struct *mm = info->mm;
806 		struct vm_area_struct *vma;
807 
808 		if (err && is_register)
809 			goto free;
810 
811 		down_write(&mm->mmap_sem);
812 		vma = find_vma(mm, info->vaddr);
813 		if (!vma || !valid_vma(vma, is_register) ||
814 		    file_inode(vma->vm_file) != uprobe->inode)
815 			goto unlock;
816 
817 		if (vma->vm_start > info->vaddr ||
818 		    vaddr_to_offset(vma, info->vaddr) != uprobe->offset)
819 			goto unlock;
820 
821 		if (is_register) {
822 			/* consult only the "caller", new consumer. */
823 			if (consumer_filter(new,
824 					UPROBE_FILTER_REGISTER, mm))
825 				err = install_breakpoint(uprobe, mm, vma, info->vaddr);
826 		} else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) {
827 			if (!filter_chain(uprobe,
828 					UPROBE_FILTER_UNREGISTER, mm))
829 				err |= remove_breakpoint(uprobe, mm, info->vaddr);
830 		}
831 
832  unlock:
833 		up_write(&mm->mmap_sem);
834  free:
835 		mmput(mm);
836 		info = free_map_info(info);
837 	}
838  out:
839 	percpu_up_write(&dup_mmap_sem);
840 	return err;
841 }
842 
843 static int __uprobe_register(struct uprobe *uprobe, struct uprobe_consumer *uc)
844 {
845 	consumer_add(uprobe, uc);
846 	return register_for_each_vma(uprobe, uc);
847 }
848 
849 static void __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc)
850 {
851 	int err;
852 
853 	if (WARN_ON(!consumer_del(uprobe, uc)))
854 		return;
855 
856 	err = register_for_each_vma(uprobe, NULL);
857 	/* TODO : cant unregister? schedule a worker thread */
858 	if (!uprobe->consumers && !err)
859 		delete_uprobe(uprobe);
860 }
861 
862 /*
863  * uprobe_register - register a probe
864  * @inode: the file in which the probe has to be placed.
865  * @offset: offset from the start of the file.
866  * @uc: information on howto handle the probe..
867  *
868  * Apart from the access refcount, uprobe_register() takes a creation
869  * refcount (thro alloc_uprobe) if and only if this @uprobe is getting
870  * inserted into the rbtree (i.e first consumer for a @inode:@offset
871  * tuple).  Creation refcount stops uprobe_unregister from freeing the
872  * @uprobe even before the register operation is complete. Creation
873  * refcount is released when the last @uc for the @uprobe
874  * unregisters.
875  *
876  * Return errno if it cannot successully install probes
877  * else return 0 (success)
878  */
879 int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
880 {
881 	struct uprobe *uprobe;
882 	int ret;
883 
884 	/* Uprobe must have at least one set consumer */
885 	if (!uc->handler && !uc->ret_handler)
886 		return -EINVAL;
887 
888 	/* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */
889 	if (!inode->i_mapping->a_ops->readpage && !shmem_mapping(inode->i_mapping))
890 		return -EIO;
891 	/* Racy, just to catch the obvious mistakes */
892 	if (offset > i_size_read(inode))
893 		return -EINVAL;
894 
895  retry:
896 	uprobe = alloc_uprobe(inode, offset);
897 	if (!uprobe)
898 		return -ENOMEM;
899 	/*
900 	 * We can race with uprobe_unregister()->delete_uprobe().
901 	 * Check uprobe_is_active() and retry if it is false.
902 	 */
903 	down_write(&uprobe->register_rwsem);
904 	ret = -EAGAIN;
905 	if (likely(uprobe_is_active(uprobe))) {
906 		ret = __uprobe_register(uprobe, uc);
907 		if (ret)
908 			__uprobe_unregister(uprobe, uc);
909 	}
910 	up_write(&uprobe->register_rwsem);
911 	put_uprobe(uprobe);
912 
913 	if (unlikely(ret == -EAGAIN))
914 		goto retry;
915 	return ret;
916 }
917 EXPORT_SYMBOL_GPL(uprobe_register);
918 
919 /*
920  * uprobe_apply - unregister a already registered probe.
921  * @inode: the file in which the probe has to be removed.
922  * @offset: offset from the start of the file.
923  * @uc: consumer which wants to add more or remove some breakpoints
924  * @add: add or remove the breakpoints
925  */
926 int uprobe_apply(struct inode *inode, loff_t offset,
927 			struct uprobe_consumer *uc, bool add)
928 {
929 	struct uprobe *uprobe;
930 	struct uprobe_consumer *con;
931 	int ret = -ENOENT;
932 
933 	uprobe = find_uprobe(inode, offset);
934 	if (WARN_ON(!uprobe))
935 		return ret;
936 
937 	down_write(&uprobe->register_rwsem);
938 	for (con = uprobe->consumers; con && con != uc ; con = con->next)
939 		;
940 	if (con)
941 		ret = register_for_each_vma(uprobe, add ? uc : NULL);
942 	up_write(&uprobe->register_rwsem);
943 	put_uprobe(uprobe);
944 
945 	return ret;
946 }
947 
948 /*
949  * uprobe_unregister - unregister a already registered probe.
950  * @inode: the file in which the probe has to be removed.
951  * @offset: offset from the start of the file.
952  * @uc: identify which probe if multiple probes are colocated.
953  */
954 void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
955 {
956 	struct uprobe *uprobe;
957 
958 	uprobe = find_uprobe(inode, offset);
959 	if (WARN_ON(!uprobe))
960 		return;
961 
962 	down_write(&uprobe->register_rwsem);
963 	__uprobe_unregister(uprobe, uc);
964 	up_write(&uprobe->register_rwsem);
965 	put_uprobe(uprobe);
966 }
967 EXPORT_SYMBOL_GPL(uprobe_unregister);
968 
969 static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm)
970 {
971 	struct vm_area_struct *vma;
972 	int err = 0;
973 
974 	down_read(&mm->mmap_sem);
975 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
976 		unsigned long vaddr;
977 		loff_t offset;
978 
979 		if (!valid_vma(vma, false) ||
980 		    file_inode(vma->vm_file) != uprobe->inode)
981 			continue;
982 
983 		offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
984 		if (uprobe->offset <  offset ||
985 		    uprobe->offset >= offset + vma->vm_end - vma->vm_start)
986 			continue;
987 
988 		vaddr = offset_to_vaddr(vma, uprobe->offset);
989 		err |= remove_breakpoint(uprobe, mm, vaddr);
990 	}
991 	up_read(&mm->mmap_sem);
992 
993 	return err;
994 }
995 
996 static struct rb_node *
997 find_node_in_range(struct inode *inode, loff_t min, loff_t max)
998 {
999 	struct rb_node *n = uprobes_tree.rb_node;
1000 
1001 	while (n) {
1002 		struct uprobe *u = rb_entry(n, struct uprobe, rb_node);
1003 
1004 		if (inode < u->inode) {
1005 			n = n->rb_left;
1006 		} else if (inode > u->inode) {
1007 			n = n->rb_right;
1008 		} else {
1009 			if (max < u->offset)
1010 				n = n->rb_left;
1011 			else if (min > u->offset)
1012 				n = n->rb_right;
1013 			else
1014 				break;
1015 		}
1016 	}
1017 
1018 	return n;
1019 }
1020 
1021 /*
1022  * For a given range in vma, build a list of probes that need to be inserted.
1023  */
1024 static void build_probe_list(struct inode *inode,
1025 				struct vm_area_struct *vma,
1026 				unsigned long start, unsigned long end,
1027 				struct list_head *head)
1028 {
1029 	loff_t min, max;
1030 	struct rb_node *n, *t;
1031 	struct uprobe *u;
1032 
1033 	INIT_LIST_HEAD(head);
1034 	min = vaddr_to_offset(vma, start);
1035 	max = min + (end - start) - 1;
1036 
1037 	spin_lock(&uprobes_treelock);
1038 	n = find_node_in_range(inode, min, max);
1039 	if (n) {
1040 		for (t = n; t; t = rb_prev(t)) {
1041 			u = rb_entry(t, struct uprobe, rb_node);
1042 			if (u->inode != inode || u->offset < min)
1043 				break;
1044 			list_add(&u->pending_list, head);
1045 			get_uprobe(u);
1046 		}
1047 		for (t = n; (t = rb_next(t)); ) {
1048 			u = rb_entry(t, struct uprobe, rb_node);
1049 			if (u->inode != inode || u->offset > max)
1050 				break;
1051 			list_add(&u->pending_list, head);
1052 			get_uprobe(u);
1053 		}
1054 	}
1055 	spin_unlock(&uprobes_treelock);
1056 }
1057 
1058 /*
1059  * Called from mmap_region/vma_adjust with mm->mmap_sem acquired.
1060  *
1061  * Currently we ignore all errors and always return 0, the callers
1062  * can't handle the failure anyway.
1063  */
1064 int uprobe_mmap(struct vm_area_struct *vma)
1065 {
1066 	struct list_head tmp_list;
1067 	struct uprobe *uprobe, *u;
1068 	struct inode *inode;
1069 
1070 	if (no_uprobe_events() || !valid_vma(vma, true))
1071 		return 0;
1072 
1073 	inode = file_inode(vma->vm_file);
1074 	if (!inode)
1075 		return 0;
1076 
1077 	mutex_lock(uprobes_mmap_hash(inode));
1078 	build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list);
1079 	/*
1080 	 * We can race with uprobe_unregister(), this uprobe can be already
1081 	 * removed. But in this case filter_chain() must return false, all
1082 	 * consumers have gone away.
1083 	 */
1084 	list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
1085 		if (!fatal_signal_pending(current) &&
1086 		    filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) {
1087 			unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset);
1088 			install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
1089 		}
1090 		put_uprobe(uprobe);
1091 	}
1092 	mutex_unlock(uprobes_mmap_hash(inode));
1093 
1094 	return 0;
1095 }
1096 
1097 static bool
1098 vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1099 {
1100 	loff_t min, max;
1101 	struct inode *inode;
1102 	struct rb_node *n;
1103 
1104 	inode = file_inode(vma->vm_file);
1105 
1106 	min = vaddr_to_offset(vma, start);
1107 	max = min + (end - start) - 1;
1108 
1109 	spin_lock(&uprobes_treelock);
1110 	n = find_node_in_range(inode, min, max);
1111 	spin_unlock(&uprobes_treelock);
1112 
1113 	return !!n;
1114 }
1115 
1116 /*
1117  * Called in context of a munmap of a vma.
1118  */
1119 void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1120 {
1121 	if (no_uprobe_events() || !valid_vma(vma, false))
1122 		return;
1123 
1124 	if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */
1125 		return;
1126 
1127 	if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) ||
1128 	     test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags))
1129 		return;
1130 
1131 	if (vma_has_uprobes(vma, start, end))
1132 		set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags);
1133 }
1134 
1135 /* Slot allocation for XOL */
1136 static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
1137 {
1138 	struct vm_area_struct *vma;
1139 	int ret;
1140 
1141 	if (down_write_killable(&mm->mmap_sem))
1142 		return -EINTR;
1143 
1144 	if (mm->uprobes_state.xol_area) {
1145 		ret = -EALREADY;
1146 		goto fail;
1147 	}
1148 
1149 	if (!area->vaddr) {
1150 		/* Try to map as high as possible, this is only a hint. */
1151 		area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE,
1152 						PAGE_SIZE, 0, 0);
1153 		if (area->vaddr & ~PAGE_MASK) {
1154 			ret = area->vaddr;
1155 			goto fail;
1156 		}
1157 	}
1158 
1159 	vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE,
1160 				VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO,
1161 				&area->xol_mapping);
1162 	if (IS_ERR(vma)) {
1163 		ret = PTR_ERR(vma);
1164 		goto fail;
1165 	}
1166 
1167 	ret = 0;
1168 	smp_wmb();	/* pairs with get_xol_area() */
1169 	mm->uprobes_state.xol_area = area;
1170  fail:
1171 	up_write(&mm->mmap_sem);
1172 
1173 	return ret;
1174 }
1175 
1176 static struct xol_area *__create_xol_area(unsigned long vaddr)
1177 {
1178 	struct mm_struct *mm = current->mm;
1179 	uprobe_opcode_t insn = UPROBE_SWBP_INSN;
1180 	struct xol_area *area;
1181 
1182 	area = kmalloc(sizeof(*area), GFP_KERNEL);
1183 	if (unlikely(!area))
1184 		goto out;
1185 
1186 	area->bitmap = kzalloc(BITS_TO_LONGS(UINSNS_PER_PAGE) * sizeof(long), GFP_KERNEL);
1187 	if (!area->bitmap)
1188 		goto free_area;
1189 
1190 	area->xol_mapping.name = "[uprobes]";
1191 	area->xol_mapping.fault = NULL;
1192 	area->xol_mapping.pages = area->pages;
1193 	area->pages[0] = alloc_page(GFP_HIGHUSER);
1194 	if (!area->pages[0])
1195 		goto free_bitmap;
1196 	area->pages[1] = NULL;
1197 
1198 	area->vaddr = vaddr;
1199 	init_waitqueue_head(&area->wq);
1200 	/* Reserve the 1st slot for get_trampoline_vaddr() */
1201 	set_bit(0, area->bitmap);
1202 	atomic_set(&area->slot_count, 1);
1203 	arch_uprobe_copy_ixol(area->pages[0], 0, &insn, UPROBE_SWBP_INSN_SIZE);
1204 
1205 	if (!xol_add_vma(mm, area))
1206 		return area;
1207 
1208 	__free_page(area->pages[0]);
1209  free_bitmap:
1210 	kfree(area->bitmap);
1211  free_area:
1212 	kfree(area);
1213  out:
1214 	return NULL;
1215 }
1216 
1217 /*
1218  * get_xol_area - Allocate process's xol_area if necessary.
1219  * This area will be used for storing instructions for execution out of line.
1220  *
1221  * Returns the allocated area or NULL.
1222  */
1223 static struct xol_area *get_xol_area(void)
1224 {
1225 	struct mm_struct *mm = current->mm;
1226 	struct xol_area *area;
1227 
1228 	if (!mm->uprobes_state.xol_area)
1229 		__create_xol_area(0);
1230 
1231 	area = mm->uprobes_state.xol_area;
1232 	smp_read_barrier_depends();	/* pairs with wmb in xol_add_vma() */
1233 	return area;
1234 }
1235 
1236 /*
1237  * uprobe_clear_state - Free the area allocated for slots.
1238  */
1239 void uprobe_clear_state(struct mm_struct *mm)
1240 {
1241 	struct xol_area *area = mm->uprobes_state.xol_area;
1242 
1243 	if (!area)
1244 		return;
1245 
1246 	put_page(area->pages[0]);
1247 	kfree(area->bitmap);
1248 	kfree(area);
1249 }
1250 
1251 void uprobe_start_dup_mmap(void)
1252 {
1253 	percpu_down_read(&dup_mmap_sem);
1254 }
1255 
1256 void uprobe_end_dup_mmap(void)
1257 {
1258 	percpu_up_read(&dup_mmap_sem);
1259 }
1260 
1261 void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
1262 {
1263 	newmm->uprobes_state.xol_area = NULL;
1264 
1265 	if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) {
1266 		set_bit(MMF_HAS_UPROBES, &newmm->flags);
1267 		/* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */
1268 		set_bit(MMF_RECALC_UPROBES, &newmm->flags);
1269 	}
1270 }
1271 
1272 /*
1273  *  - search for a free slot.
1274  */
1275 static unsigned long xol_take_insn_slot(struct xol_area *area)
1276 {
1277 	unsigned long slot_addr;
1278 	int slot_nr;
1279 
1280 	do {
1281 		slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE);
1282 		if (slot_nr < UINSNS_PER_PAGE) {
1283 			if (!test_and_set_bit(slot_nr, area->bitmap))
1284 				break;
1285 
1286 			slot_nr = UINSNS_PER_PAGE;
1287 			continue;
1288 		}
1289 		wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE));
1290 	} while (slot_nr >= UINSNS_PER_PAGE);
1291 
1292 	slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES);
1293 	atomic_inc(&area->slot_count);
1294 
1295 	return slot_addr;
1296 }
1297 
1298 /*
1299  * xol_get_insn_slot - allocate a slot for xol.
1300  * Returns the allocated slot address or 0.
1301  */
1302 static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
1303 {
1304 	struct xol_area *area;
1305 	unsigned long xol_vaddr;
1306 
1307 	area = get_xol_area();
1308 	if (!area)
1309 		return 0;
1310 
1311 	xol_vaddr = xol_take_insn_slot(area);
1312 	if (unlikely(!xol_vaddr))
1313 		return 0;
1314 
1315 	arch_uprobe_copy_ixol(area->pages[0], xol_vaddr,
1316 			      &uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
1317 
1318 	return xol_vaddr;
1319 }
1320 
1321 /*
1322  * xol_free_insn_slot - If slot was earlier allocated by
1323  * @xol_get_insn_slot(), make the slot available for
1324  * subsequent requests.
1325  */
1326 static void xol_free_insn_slot(struct task_struct *tsk)
1327 {
1328 	struct xol_area *area;
1329 	unsigned long vma_end;
1330 	unsigned long slot_addr;
1331 
1332 	if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask)
1333 		return;
1334 
1335 	slot_addr = tsk->utask->xol_vaddr;
1336 	if (unlikely(!slot_addr))
1337 		return;
1338 
1339 	area = tsk->mm->uprobes_state.xol_area;
1340 	vma_end = area->vaddr + PAGE_SIZE;
1341 	if (area->vaddr <= slot_addr && slot_addr < vma_end) {
1342 		unsigned long offset;
1343 		int slot_nr;
1344 
1345 		offset = slot_addr - area->vaddr;
1346 		slot_nr = offset / UPROBE_XOL_SLOT_BYTES;
1347 		if (slot_nr >= UINSNS_PER_PAGE)
1348 			return;
1349 
1350 		clear_bit(slot_nr, area->bitmap);
1351 		atomic_dec(&area->slot_count);
1352 		smp_mb__after_atomic(); /* pairs with prepare_to_wait() */
1353 		if (waitqueue_active(&area->wq))
1354 			wake_up(&area->wq);
1355 
1356 		tsk->utask->xol_vaddr = 0;
1357 	}
1358 }
1359 
1360 void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
1361 				  void *src, unsigned long len)
1362 {
1363 	/* Initialize the slot */
1364 	copy_to_page(page, vaddr, src, len);
1365 
1366 	/*
1367 	 * We probably need flush_icache_user_range() but it needs vma.
1368 	 * This should work on most of architectures by default. If
1369 	 * architecture needs to do something different it can define
1370 	 * its own version of the function.
1371 	 */
1372 	flush_dcache_page(page);
1373 }
1374 
1375 /**
1376  * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
1377  * @regs: Reflects the saved state of the task after it has hit a breakpoint
1378  * instruction.
1379  * Return the address of the breakpoint instruction.
1380  */
1381 unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs)
1382 {
1383 	return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE;
1384 }
1385 
1386 unsigned long uprobe_get_trap_addr(struct pt_regs *regs)
1387 {
1388 	struct uprobe_task *utask = current->utask;
1389 
1390 	if (unlikely(utask && utask->active_uprobe))
1391 		return utask->vaddr;
1392 
1393 	return instruction_pointer(regs);
1394 }
1395 
1396 static struct return_instance *free_ret_instance(struct return_instance *ri)
1397 {
1398 	struct return_instance *next = ri->next;
1399 	put_uprobe(ri->uprobe);
1400 	kfree(ri);
1401 	return next;
1402 }
1403 
1404 /*
1405  * Called with no locks held.
1406  * Called in context of a exiting or a exec-ing thread.
1407  */
1408 void uprobe_free_utask(struct task_struct *t)
1409 {
1410 	struct uprobe_task *utask = t->utask;
1411 	struct return_instance *ri;
1412 
1413 	if (!utask)
1414 		return;
1415 
1416 	if (utask->active_uprobe)
1417 		put_uprobe(utask->active_uprobe);
1418 
1419 	ri = utask->return_instances;
1420 	while (ri)
1421 		ri = free_ret_instance(ri);
1422 
1423 	xol_free_insn_slot(t);
1424 	kfree(utask);
1425 	t->utask = NULL;
1426 }
1427 
1428 /*
1429  * Allocate a uprobe_task object for the task if if necessary.
1430  * Called when the thread hits a breakpoint.
1431  *
1432  * Returns:
1433  * - pointer to new uprobe_task on success
1434  * - NULL otherwise
1435  */
1436 static struct uprobe_task *get_utask(void)
1437 {
1438 	if (!current->utask)
1439 		current->utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
1440 	return current->utask;
1441 }
1442 
1443 static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
1444 {
1445 	struct uprobe_task *n_utask;
1446 	struct return_instance **p, *o, *n;
1447 
1448 	n_utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
1449 	if (!n_utask)
1450 		return -ENOMEM;
1451 	t->utask = n_utask;
1452 
1453 	p = &n_utask->return_instances;
1454 	for (o = o_utask->return_instances; o; o = o->next) {
1455 		n = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
1456 		if (!n)
1457 			return -ENOMEM;
1458 
1459 		*n = *o;
1460 		get_uprobe(n->uprobe);
1461 		n->next = NULL;
1462 
1463 		*p = n;
1464 		p = &n->next;
1465 		n_utask->depth++;
1466 	}
1467 
1468 	return 0;
1469 }
1470 
1471 static void uprobe_warn(struct task_struct *t, const char *msg)
1472 {
1473 	pr_warn("uprobe: %s:%d failed to %s\n",
1474 			current->comm, current->pid, msg);
1475 }
1476 
1477 static void dup_xol_work(struct callback_head *work)
1478 {
1479 	if (current->flags & PF_EXITING)
1480 		return;
1481 
1482 	if (!__create_xol_area(current->utask->dup_xol_addr) &&
1483 			!fatal_signal_pending(current))
1484 		uprobe_warn(current, "dup xol area");
1485 }
1486 
1487 /*
1488  * Called in context of a new clone/fork from copy_process.
1489  */
1490 void uprobe_copy_process(struct task_struct *t, unsigned long flags)
1491 {
1492 	struct uprobe_task *utask = current->utask;
1493 	struct mm_struct *mm = current->mm;
1494 	struct xol_area *area;
1495 
1496 	t->utask = NULL;
1497 
1498 	if (!utask || !utask->return_instances)
1499 		return;
1500 
1501 	if (mm == t->mm && !(flags & CLONE_VFORK))
1502 		return;
1503 
1504 	if (dup_utask(t, utask))
1505 		return uprobe_warn(t, "dup ret instances");
1506 
1507 	/* The task can fork() after dup_xol_work() fails */
1508 	area = mm->uprobes_state.xol_area;
1509 	if (!area)
1510 		return uprobe_warn(t, "dup xol area");
1511 
1512 	if (mm == t->mm)
1513 		return;
1514 
1515 	t->utask->dup_xol_addr = area->vaddr;
1516 	init_task_work(&t->utask->dup_xol_work, dup_xol_work);
1517 	task_work_add(t, &t->utask->dup_xol_work, true);
1518 }
1519 
1520 /*
1521  * Current area->vaddr notion assume the trampoline address is always
1522  * equal area->vaddr.
1523  *
1524  * Returns -1 in case the xol_area is not allocated.
1525  */
1526 static unsigned long get_trampoline_vaddr(void)
1527 {
1528 	struct xol_area *area;
1529 	unsigned long trampoline_vaddr = -1;
1530 
1531 	area = current->mm->uprobes_state.xol_area;
1532 	smp_read_barrier_depends();
1533 	if (area)
1534 		trampoline_vaddr = area->vaddr;
1535 
1536 	return trampoline_vaddr;
1537 }
1538 
1539 static void cleanup_return_instances(struct uprobe_task *utask, bool chained,
1540 					struct pt_regs *regs)
1541 {
1542 	struct return_instance *ri = utask->return_instances;
1543 	enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL;
1544 
1545 	while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) {
1546 		ri = free_ret_instance(ri);
1547 		utask->depth--;
1548 	}
1549 	utask->return_instances = ri;
1550 }
1551 
1552 static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
1553 {
1554 	struct return_instance *ri;
1555 	struct uprobe_task *utask;
1556 	unsigned long orig_ret_vaddr, trampoline_vaddr;
1557 	bool chained;
1558 
1559 	if (!get_xol_area())
1560 		return;
1561 
1562 	utask = get_utask();
1563 	if (!utask)
1564 		return;
1565 
1566 	if (utask->depth >= MAX_URETPROBE_DEPTH) {
1567 		printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to"
1568 				" nestedness limit pid/tgid=%d/%d\n",
1569 				current->pid, current->tgid);
1570 		return;
1571 	}
1572 
1573 	ri = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
1574 	if (!ri)
1575 		return;
1576 
1577 	trampoline_vaddr = get_trampoline_vaddr();
1578 	orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs);
1579 	if (orig_ret_vaddr == -1)
1580 		goto fail;
1581 
1582 	/* drop the entries invalidated by longjmp() */
1583 	chained = (orig_ret_vaddr == trampoline_vaddr);
1584 	cleanup_return_instances(utask, chained, regs);
1585 
1586 	/*
1587 	 * We don't want to keep trampoline address in stack, rather keep the
1588 	 * original return address of first caller thru all the consequent
1589 	 * instances. This also makes breakpoint unwrapping easier.
1590 	 */
1591 	if (chained) {
1592 		if (!utask->return_instances) {
1593 			/*
1594 			 * This situation is not possible. Likely we have an
1595 			 * attack from user-space.
1596 			 */
1597 			uprobe_warn(current, "handle tail call");
1598 			goto fail;
1599 		}
1600 		orig_ret_vaddr = utask->return_instances->orig_ret_vaddr;
1601 	}
1602 
1603 	ri->uprobe = get_uprobe(uprobe);
1604 	ri->func = instruction_pointer(regs);
1605 	ri->stack = user_stack_pointer(regs);
1606 	ri->orig_ret_vaddr = orig_ret_vaddr;
1607 	ri->chained = chained;
1608 
1609 	utask->depth++;
1610 	ri->next = utask->return_instances;
1611 	utask->return_instances = ri;
1612 
1613 	return;
1614  fail:
1615 	kfree(ri);
1616 }
1617 
1618 /* Prepare to single-step probed instruction out of line. */
1619 static int
1620 pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr)
1621 {
1622 	struct uprobe_task *utask;
1623 	unsigned long xol_vaddr;
1624 	int err;
1625 
1626 	utask = get_utask();
1627 	if (!utask)
1628 		return -ENOMEM;
1629 
1630 	xol_vaddr = xol_get_insn_slot(uprobe);
1631 	if (!xol_vaddr)
1632 		return -ENOMEM;
1633 
1634 	utask->xol_vaddr = xol_vaddr;
1635 	utask->vaddr = bp_vaddr;
1636 
1637 	err = arch_uprobe_pre_xol(&uprobe->arch, regs);
1638 	if (unlikely(err)) {
1639 		xol_free_insn_slot(current);
1640 		return err;
1641 	}
1642 
1643 	utask->active_uprobe = uprobe;
1644 	utask->state = UTASK_SSTEP;
1645 	return 0;
1646 }
1647 
1648 /*
1649  * If we are singlestepping, then ensure this thread is not connected to
1650  * non-fatal signals until completion of singlestep.  When xol insn itself
1651  * triggers the signal,  restart the original insn even if the task is
1652  * already SIGKILL'ed (since coredump should report the correct ip).  This
1653  * is even more important if the task has a handler for SIGSEGV/etc, The
1654  * _same_ instruction should be repeated again after return from the signal
1655  * handler, and SSTEP can never finish in this case.
1656  */
1657 bool uprobe_deny_signal(void)
1658 {
1659 	struct task_struct *t = current;
1660 	struct uprobe_task *utask = t->utask;
1661 
1662 	if (likely(!utask || !utask->active_uprobe))
1663 		return false;
1664 
1665 	WARN_ON_ONCE(utask->state != UTASK_SSTEP);
1666 
1667 	if (signal_pending(t)) {
1668 		spin_lock_irq(&t->sighand->siglock);
1669 		clear_tsk_thread_flag(t, TIF_SIGPENDING);
1670 		spin_unlock_irq(&t->sighand->siglock);
1671 
1672 		if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) {
1673 			utask->state = UTASK_SSTEP_TRAPPED;
1674 			set_tsk_thread_flag(t, TIF_UPROBE);
1675 		}
1676 	}
1677 
1678 	return true;
1679 }
1680 
1681 static void mmf_recalc_uprobes(struct mm_struct *mm)
1682 {
1683 	struct vm_area_struct *vma;
1684 
1685 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
1686 		if (!valid_vma(vma, false))
1687 			continue;
1688 		/*
1689 		 * This is not strictly accurate, we can race with
1690 		 * uprobe_unregister() and see the already removed
1691 		 * uprobe if delete_uprobe() was not yet called.
1692 		 * Or this uprobe can be filtered out.
1693 		 */
1694 		if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end))
1695 			return;
1696 	}
1697 
1698 	clear_bit(MMF_HAS_UPROBES, &mm->flags);
1699 }
1700 
1701 static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
1702 {
1703 	struct page *page;
1704 	uprobe_opcode_t opcode;
1705 	int result;
1706 
1707 	pagefault_disable();
1708 	result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr);
1709 	pagefault_enable();
1710 
1711 	if (likely(result == 0))
1712 		goto out;
1713 
1714 	/*
1715 	 * The NULL 'tsk' here ensures that any faults that occur here
1716 	 * will not be accounted to the task.  'mm' *is* current->mm,
1717 	 * but we treat this as a 'remote' access since it is
1718 	 * essentially a kernel access to the memory.
1719 	 */
1720 	result = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &page,
1721 			NULL, NULL);
1722 	if (result < 0)
1723 		return result;
1724 
1725 	copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
1726 	put_page(page);
1727  out:
1728 	/* This needs to return true for any variant of the trap insn */
1729 	return is_trap_insn(&opcode);
1730 }
1731 
1732 static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
1733 {
1734 	struct mm_struct *mm = current->mm;
1735 	struct uprobe *uprobe = NULL;
1736 	struct vm_area_struct *vma;
1737 
1738 	down_read(&mm->mmap_sem);
1739 	vma = find_vma(mm, bp_vaddr);
1740 	if (vma && vma->vm_start <= bp_vaddr) {
1741 		if (valid_vma(vma, false)) {
1742 			struct inode *inode = file_inode(vma->vm_file);
1743 			loff_t offset = vaddr_to_offset(vma, bp_vaddr);
1744 
1745 			uprobe = find_uprobe(inode, offset);
1746 		}
1747 
1748 		if (!uprobe)
1749 			*is_swbp = is_trap_at_addr(mm, bp_vaddr);
1750 	} else {
1751 		*is_swbp = -EFAULT;
1752 	}
1753 
1754 	if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags))
1755 		mmf_recalc_uprobes(mm);
1756 	up_read(&mm->mmap_sem);
1757 
1758 	return uprobe;
1759 }
1760 
1761 static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
1762 {
1763 	struct uprobe_consumer *uc;
1764 	int remove = UPROBE_HANDLER_REMOVE;
1765 	bool need_prep = false; /* prepare return uprobe, when needed */
1766 
1767 	down_read(&uprobe->register_rwsem);
1768 	for (uc = uprobe->consumers; uc; uc = uc->next) {
1769 		int rc = 0;
1770 
1771 		if (uc->handler) {
1772 			rc = uc->handler(uc, regs);
1773 			WARN(rc & ~UPROBE_HANDLER_MASK,
1774 				"bad rc=0x%x from %pf()\n", rc, uc->handler);
1775 		}
1776 
1777 		if (uc->ret_handler)
1778 			need_prep = true;
1779 
1780 		remove &= rc;
1781 	}
1782 
1783 	if (need_prep && !remove)
1784 		prepare_uretprobe(uprobe, regs); /* put bp at return */
1785 
1786 	if (remove && uprobe->consumers) {
1787 		WARN_ON(!uprobe_is_active(uprobe));
1788 		unapply_uprobe(uprobe, current->mm);
1789 	}
1790 	up_read(&uprobe->register_rwsem);
1791 }
1792 
1793 static void
1794 handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs)
1795 {
1796 	struct uprobe *uprobe = ri->uprobe;
1797 	struct uprobe_consumer *uc;
1798 
1799 	down_read(&uprobe->register_rwsem);
1800 	for (uc = uprobe->consumers; uc; uc = uc->next) {
1801 		if (uc->ret_handler)
1802 			uc->ret_handler(uc, ri->func, regs);
1803 	}
1804 	up_read(&uprobe->register_rwsem);
1805 }
1806 
1807 static struct return_instance *find_next_ret_chain(struct return_instance *ri)
1808 {
1809 	bool chained;
1810 
1811 	do {
1812 		chained = ri->chained;
1813 		ri = ri->next;	/* can't be NULL if chained */
1814 	} while (chained);
1815 
1816 	return ri;
1817 }
1818 
1819 static void handle_trampoline(struct pt_regs *regs)
1820 {
1821 	struct uprobe_task *utask;
1822 	struct return_instance *ri, *next;
1823 	bool valid;
1824 
1825 	utask = current->utask;
1826 	if (!utask)
1827 		goto sigill;
1828 
1829 	ri = utask->return_instances;
1830 	if (!ri)
1831 		goto sigill;
1832 
1833 	do {
1834 		/*
1835 		 * We should throw out the frames invalidated by longjmp().
1836 		 * If this chain is valid, then the next one should be alive
1837 		 * or NULL; the latter case means that nobody but ri->func
1838 		 * could hit this trampoline on return. TODO: sigaltstack().
1839 		 */
1840 		next = find_next_ret_chain(ri);
1841 		valid = !next || arch_uretprobe_is_alive(next, RP_CHECK_RET, regs);
1842 
1843 		instruction_pointer_set(regs, ri->orig_ret_vaddr);
1844 		do {
1845 			if (valid)
1846 				handle_uretprobe_chain(ri, regs);
1847 			ri = free_ret_instance(ri);
1848 			utask->depth--;
1849 		} while (ri != next);
1850 	} while (!valid);
1851 
1852 	utask->return_instances = ri;
1853 	return;
1854 
1855  sigill:
1856 	uprobe_warn(current, "handle uretprobe, sending SIGILL.");
1857 	force_sig_info(SIGILL, SEND_SIG_FORCED, current);
1858 
1859 }
1860 
1861 bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs)
1862 {
1863 	return false;
1864 }
1865 
1866 bool __weak arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
1867 					struct pt_regs *regs)
1868 {
1869 	return true;
1870 }
1871 
1872 /*
1873  * Run handler and ask thread to singlestep.
1874  * Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
1875  */
1876 static void handle_swbp(struct pt_regs *regs)
1877 {
1878 	struct uprobe *uprobe;
1879 	unsigned long bp_vaddr;
1880 	int uninitialized_var(is_swbp);
1881 
1882 	bp_vaddr = uprobe_get_swbp_addr(regs);
1883 	if (bp_vaddr == get_trampoline_vaddr())
1884 		return handle_trampoline(regs);
1885 
1886 	uprobe = find_active_uprobe(bp_vaddr, &is_swbp);
1887 	if (!uprobe) {
1888 		if (is_swbp > 0) {
1889 			/* No matching uprobe; signal SIGTRAP. */
1890 			send_sig(SIGTRAP, current, 0);
1891 		} else {
1892 			/*
1893 			 * Either we raced with uprobe_unregister() or we can't
1894 			 * access this memory. The latter is only possible if
1895 			 * another thread plays with our ->mm. In both cases
1896 			 * we can simply restart. If this vma was unmapped we
1897 			 * can pretend this insn was not executed yet and get
1898 			 * the (correct) SIGSEGV after restart.
1899 			 */
1900 			instruction_pointer_set(regs, bp_vaddr);
1901 		}
1902 		return;
1903 	}
1904 
1905 	/* change it in advance for ->handler() and restart */
1906 	instruction_pointer_set(regs, bp_vaddr);
1907 
1908 	/*
1909 	 * TODO: move copy_insn/etc into _register and remove this hack.
1910 	 * After we hit the bp, _unregister + _register can install the
1911 	 * new and not-yet-analyzed uprobe at the same address, restart.
1912 	 */
1913 	smp_rmb(); /* pairs with wmb() in install_breakpoint() */
1914 	if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags)))
1915 		goto out;
1916 
1917 	/* Tracing handlers use ->utask to communicate with fetch methods */
1918 	if (!get_utask())
1919 		goto out;
1920 
1921 	if (arch_uprobe_ignore(&uprobe->arch, regs))
1922 		goto out;
1923 
1924 	handler_chain(uprobe, regs);
1925 
1926 	if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
1927 		goto out;
1928 
1929 	if (!pre_ssout(uprobe, regs, bp_vaddr))
1930 		return;
1931 
1932 	/* arch_uprobe_skip_sstep() succeeded, or restart if can't singlestep */
1933 out:
1934 	put_uprobe(uprobe);
1935 }
1936 
1937 /*
1938  * Perform required fix-ups and disable singlestep.
1939  * Allow pending signals to take effect.
1940  */
1941 static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
1942 {
1943 	struct uprobe *uprobe;
1944 	int err = 0;
1945 
1946 	uprobe = utask->active_uprobe;
1947 	if (utask->state == UTASK_SSTEP_ACK)
1948 		err = arch_uprobe_post_xol(&uprobe->arch, regs);
1949 	else if (utask->state == UTASK_SSTEP_TRAPPED)
1950 		arch_uprobe_abort_xol(&uprobe->arch, regs);
1951 	else
1952 		WARN_ON_ONCE(1);
1953 
1954 	put_uprobe(uprobe);
1955 	utask->active_uprobe = NULL;
1956 	utask->state = UTASK_RUNNING;
1957 	xol_free_insn_slot(current);
1958 
1959 	spin_lock_irq(&current->sighand->siglock);
1960 	recalc_sigpending(); /* see uprobe_deny_signal() */
1961 	spin_unlock_irq(&current->sighand->siglock);
1962 
1963 	if (unlikely(err)) {
1964 		uprobe_warn(current, "execute the probed insn, sending SIGILL.");
1965 		force_sig_info(SIGILL, SEND_SIG_FORCED, current);
1966 	}
1967 }
1968 
1969 /*
1970  * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and
1971  * allows the thread to return from interrupt. After that handle_swbp()
1972  * sets utask->active_uprobe.
1973  *
1974  * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag
1975  * and allows the thread to return from interrupt.
1976  *
1977  * While returning to userspace, thread notices the TIF_UPROBE flag and calls
1978  * uprobe_notify_resume().
1979  */
1980 void uprobe_notify_resume(struct pt_regs *regs)
1981 {
1982 	struct uprobe_task *utask;
1983 
1984 	clear_thread_flag(TIF_UPROBE);
1985 
1986 	utask = current->utask;
1987 	if (utask && utask->active_uprobe)
1988 		handle_singlestep(utask, regs);
1989 	else
1990 		handle_swbp(regs);
1991 }
1992 
1993 /*
1994  * uprobe_pre_sstep_notifier gets called from interrupt context as part of
1995  * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit.
1996  */
1997 int uprobe_pre_sstep_notifier(struct pt_regs *regs)
1998 {
1999 	if (!current->mm)
2000 		return 0;
2001 
2002 	if (!test_bit(MMF_HAS_UPROBES, &current->mm->flags) &&
2003 	    (!current->utask || !current->utask->return_instances))
2004 		return 0;
2005 
2006 	set_thread_flag(TIF_UPROBE);
2007 	return 1;
2008 }
2009 
2010 /*
2011  * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier
2012  * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep.
2013  */
2014 int uprobe_post_sstep_notifier(struct pt_regs *regs)
2015 {
2016 	struct uprobe_task *utask = current->utask;
2017 
2018 	if (!current->mm || !utask || !utask->active_uprobe)
2019 		/* task is currently not uprobed */
2020 		return 0;
2021 
2022 	utask->state = UTASK_SSTEP_ACK;
2023 	set_thread_flag(TIF_UPROBE);
2024 	return 1;
2025 }
2026 
2027 static struct notifier_block uprobe_exception_nb = {
2028 	.notifier_call		= arch_uprobe_exception_notify,
2029 	.priority		= INT_MAX-1,	/* notified after kprobes, kgdb */
2030 };
2031 
2032 static int __init init_uprobes(void)
2033 {
2034 	int i;
2035 
2036 	for (i = 0; i < UPROBES_HASH_SZ; i++)
2037 		mutex_init(&uprobes_mmap_mutex[i]);
2038 
2039 	if (percpu_init_rwsem(&dup_mmap_sem))
2040 		return -ENOMEM;
2041 
2042 	return register_die_notifier(&uprobe_exception_nb);
2043 }
2044 __initcall(init_uprobes);
2045