xref: /openbmc/linux/kernel/events/uprobes.c (revision 9ac17575)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * User-space Probes (UProbes)
4  *
5  * Copyright (C) IBM Corporation, 2008-2012
6  * Authors:
7  *	Srikar Dronamraju
8  *	Jim Keniston
9  * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
10  */
11 
12 #include <linux/kernel.h>
13 #include <linux/highmem.h>
14 #include <linux/pagemap.h>	/* read_mapping_page */
15 #include <linux/slab.h>
16 #include <linux/sched.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/coredump.h>
19 #include <linux/export.h>
20 #include <linux/rmap.h>		/* anon_vma_prepare */
21 #include <linux/mmu_notifier.h>	/* set_pte_at_notify */
22 #include <linux/swap.h>		/* try_to_free_swap */
23 #include <linux/ptrace.h>	/* user_enable_single_step */
24 #include <linux/kdebug.h>	/* notifier mechanism */
25 #include "../../mm/internal.h"	/* munlock_vma_page */
26 #include <linux/percpu-rwsem.h>
27 #include <linux/task_work.h>
28 #include <linux/shmem_fs.h>
29 #include <linux/khugepaged.h>
30 
31 #include <linux/uprobes.h>
32 
33 #define UINSNS_PER_PAGE			(PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
34 #define MAX_UPROBE_XOL_SLOTS		UINSNS_PER_PAGE
35 
36 static struct rb_root uprobes_tree = RB_ROOT;
37 /*
38  * allows us to skip the uprobe_mmap if there are no uprobe events active
39  * at this time.  Probably a fine grained per inode count is better?
40  */
41 #define no_uprobe_events()	RB_EMPTY_ROOT(&uprobes_tree)
42 
43 static DEFINE_SPINLOCK(uprobes_treelock);	/* serialize rbtree access */
44 
45 #define UPROBES_HASH_SZ	13
46 /* serialize uprobe->pending_list */
47 static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
48 #define uprobes_mmap_hash(v)	(&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
49 
50 DEFINE_STATIC_PERCPU_RWSEM(dup_mmap_sem);
51 
52 /* Have a copy of original instruction */
53 #define UPROBE_COPY_INSN	0
54 
55 struct uprobe {
56 	struct rb_node		rb_node;	/* node in the rb tree */
57 	refcount_t		ref;
58 	struct rw_semaphore	register_rwsem;
59 	struct rw_semaphore	consumer_rwsem;
60 	struct list_head	pending_list;
61 	struct uprobe_consumer	*consumers;
62 	struct inode		*inode;		/* Also hold a ref to inode */
63 	loff_t			offset;
64 	loff_t			ref_ctr_offset;
65 	unsigned long		flags;
66 
67 	/*
68 	 * The generic code assumes that it has two members of unknown type
69 	 * owned by the arch-specific code:
70 	 *
71 	 * 	insn -	copy_insn() saves the original instruction here for
72 	 *		arch_uprobe_analyze_insn().
73 	 *
74 	 *	ixol -	potentially modified instruction to execute out of
75 	 *		line, copied to xol_area by xol_get_insn_slot().
76 	 */
77 	struct arch_uprobe	arch;
78 };
79 
80 struct delayed_uprobe {
81 	struct list_head list;
82 	struct uprobe *uprobe;
83 	struct mm_struct *mm;
84 };
85 
86 static DEFINE_MUTEX(delayed_uprobe_lock);
87 static LIST_HEAD(delayed_uprobe_list);
88 
89 /*
90  * Execute out of line area: anonymous executable mapping installed
91  * by the probed task to execute the copy of the original instruction
92  * mangled by set_swbp().
93  *
94  * On a breakpoint hit, thread contests for a slot.  It frees the
95  * slot after singlestep. Currently a fixed number of slots are
96  * allocated.
97  */
98 struct xol_area {
99 	wait_queue_head_t 		wq;		/* if all slots are busy */
100 	atomic_t 			slot_count;	/* number of in-use slots */
101 	unsigned long 			*bitmap;	/* 0 = free slot */
102 
103 	struct vm_special_mapping	xol_mapping;
104 	struct page 			*pages[2];
105 	/*
106 	 * We keep the vma's vm_start rather than a pointer to the vma
107 	 * itself.  The probed process or a naughty kernel module could make
108 	 * the vma go away, and we must handle that reasonably gracefully.
109 	 */
110 	unsigned long 			vaddr;		/* Page(s) of instruction slots */
111 };
112 
113 /*
114  * valid_vma: Verify if the specified vma is an executable vma
115  * Relax restrictions while unregistering: vm_flags might have
116  * changed after breakpoint was inserted.
117  *	- is_register: indicates if we are in register context.
118  *	- Return 1 if the specified virtual address is in an
119  *	  executable vma.
120  */
121 static bool valid_vma(struct vm_area_struct *vma, bool is_register)
122 {
123 	vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE;
124 
125 	if (is_register)
126 		flags |= VM_WRITE;
127 
128 	return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC;
129 }
130 
131 static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset)
132 {
133 	return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
134 }
135 
136 static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
137 {
138 	return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start);
139 }
140 
141 /**
142  * __replace_page - replace page in vma by new page.
143  * based on replace_page in mm/ksm.c
144  *
145  * @vma:      vma that holds the pte pointing to page
146  * @addr:     address the old @page is mapped at
147  * @old_page: the page we are replacing by new_page
148  * @new_page: the modified page we replace page by
149  *
150  * If @new_page is NULL, only unmap @old_page.
151  *
152  * Returns 0 on success, negative error code otherwise.
153  */
154 static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
155 				struct page *old_page, struct page *new_page)
156 {
157 	struct mm_struct *mm = vma->vm_mm;
158 	struct page_vma_mapped_walk pvmw = {
159 		.page = compound_head(old_page),
160 		.vma = vma,
161 		.address = addr,
162 	};
163 	int err;
164 	struct mmu_notifier_range range;
165 
166 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr,
167 				addr + PAGE_SIZE);
168 
169 	if (new_page) {
170 		err = mem_cgroup_charge(new_page, vma->vm_mm, GFP_KERNEL);
171 		if (err)
172 			return err;
173 	}
174 
175 	/* For try_to_free_swap() and munlock_vma_page() below */
176 	lock_page(old_page);
177 
178 	mmu_notifier_invalidate_range_start(&range);
179 	err = -EAGAIN;
180 	if (!page_vma_mapped_walk(&pvmw))
181 		goto unlock;
182 	VM_BUG_ON_PAGE(addr != pvmw.address, old_page);
183 
184 	if (new_page) {
185 		get_page(new_page);
186 		page_add_new_anon_rmap(new_page, vma, addr, false);
187 		lru_cache_add_active_or_unevictable(new_page, vma);
188 	} else
189 		/* no new page, just dec_mm_counter for old_page */
190 		dec_mm_counter(mm, MM_ANONPAGES);
191 
192 	if (!PageAnon(old_page)) {
193 		dec_mm_counter(mm, mm_counter_file(old_page));
194 		inc_mm_counter(mm, MM_ANONPAGES);
195 	}
196 
197 	flush_cache_page(vma, addr, pte_pfn(*pvmw.pte));
198 	ptep_clear_flush_notify(vma, addr, pvmw.pte);
199 	if (new_page)
200 		set_pte_at_notify(mm, addr, pvmw.pte,
201 				  mk_pte(new_page, vma->vm_page_prot));
202 
203 	page_remove_rmap(old_page, false);
204 	if (!page_mapped(old_page))
205 		try_to_free_swap(old_page);
206 	page_vma_mapped_walk_done(&pvmw);
207 
208 	if (vma->vm_flags & VM_LOCKED)
209 		munlock_vma_page(old_page);
210 	put_page(old_page);
211 
212 	err = 0;
213  unlock:
214 	mmu_notifier_invalidate_range_end(&range);
215 	unlock_page(old_page);
216 	return err;
217 }
218 
219 /**
220  * is_swbp_insn - check if instruction is breakpoint instruction.
221  * @insn: instruction to be checked.
222  * Default implementation of is_swbp_insn
223  * Returns true if @insn is a breakpoint instruction.
224  */
225 bool __weak is_swbp_insn(uprobe_opcode_t *insn)
226 {
227 	return *insn == UPROBE_SWBP_INSN;
228 }
229 
230 /**
231  * is_trap_insn - check if instruction is breakpoint instruction.
232  * @insn: instruction to be checked.
233  * Default implementation of is_trap_insn
234  * Returns true if @insn is a breakpoint instruction.
235  *
236  * This function is needed for the case where an architecture has multiple
237  * trap instructions (like powerpc).
238  */
239 bool __weak is_trap_insn(uprobe_opcode_t *insn)
240 {
241 	return is_swbp_insn(insn);
242 }
243 
244 static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len)
245 {
246 	void *kaddr = kmap_atomic(page);
247 	memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len);
248 	kunmap_atomic(kaddr);
249 }
250 
251 static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len)
252 {
253 	void *kaddr = kmap_atomic(page);
254 	memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len);
255 	kunmap_atomic(kaddr);
256 }
257 
258 static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode)
259 {
260 	uprobe_opcode_t old_opcode;
261 	bool is_swbp;
262 
263 	/*
264 	 * Note: We only check if the old_opcode is UPROBE_SWBP_INSN here.
265 	 * We do not check if it is any other 'trap variant' which could
266 	 * be conditional trap instruction such as the one powerpc supports.
267 	 *
268 	 * The logic is that we do not care if the underlying instruction
269 	 * is a trap variant; uprobes always wins over any other (gdb)
270 	 * breakpoint.
271 	 */
272 	copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE);
273 	is_swbp = is_swbp_insn(&old_opcode);
274 
275 	if (is_swbp_insn(new_opcode)) {
276 		if (is_swbp)		/* register: already installed? */
277 			return 0;
278 	} else {
279 		if (!is_swbp)		/* unregister: was it changed by us? */
280 			return 0;
281 	}
282 
283 	return 1;
284 }
285 
286 static struct delayed_uprobe *
287 delayed_uprobe_check(struct uprobe *uprobe, struct mm_struct *mm)
288 {
289 	struct delayed_uprobe *du;
290 
291 	list_for_each_entry(du, &delayed_uprobe_list, list)
292 		if (du->uprobe == uprobe && du->mm == mm)
293 			return du;
294 	return NULL;
295 }
296 
297 static int delayed_uprobe_add(struct uprobe *uprobe, struct mm_struct *mm)
298 {
299 	struct delayed_uprobe *du;
300 
301 	if (delayed_uprobe_check(uprobe, mm))
302 		return 0;
303 
304 	du  = kzalloc(sizeof(*du), GFP_KERNEL);
305 	if (!du)
306 		return -ENOMEM;
307 
308 	du->uprobe = uprobe;
309 	du->mm = mm;
310 	list_add(&du->list, &delayed_uprobe_list);
311 	return 0;
312 }
313 
314 static void delayed_uprobe_delete(struct delayed_uprobe *du)
315 {
316 	if (WARN_ON(!du))
317 		return;
318 	list_del(&du->list);
319 	kfree(du);
320 }
321 
322 static void delayed_uprobe_remove(struct uprobe *uprobe, struct mm_struct *mm)
323 {
324 	struct list_head *pos, *q;
325 	struct delayed_uprobe *du;
326 
327 	if (!uprobe && !mm)
328 		return;
329 
330 	list_for_each_safe(pos, q, &delayed_uprobe_list) {
331 		du = list_entry(pos, struct delayed_uprobe, list);
332 
333 		if (uprobe && du->uprobe != uprobe)
334 			continue;
335 		if (mm && du->mm != mm)
336 			continue;
337 
338 		delayed_uprobe_delete(du);
339 	}
340 }
341 
342 static bool valid_ref_ctr_vma(struct uprobe *uprobe,
343 			      struct vm_area_struct *vma)
344 {
345 	unsigned long vaddr = offset_to_vaddr(vma, uprobe->ref_ctr_offset);
346 
347 	return uprobe->ref_ctr_offset &&
348 		vma->vm_file &&
349 		file_inode(vma->vm_file) == uprobe->inode &&
350 		(vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE &&
351 		vma->vm_start <= vaddr &&
352 		vma->vm_end > vaddr;
353 }
354 
355 static struct vm_area_struct *
356 find_ref_ctr_vma(struct uprobe *uprobe, struct mm_struct *mm)
357 {
358 	struct vm_area_struct *tmp;
359 
360 	for (tmp = mm->mmap; tmp; tmp = tmp->vm_next)
361 		if (valid_ref_ctr_vma(uprobe, tmp))
362 			return tmp;
363 
364 	return NULL;
365 }
366 
367 static int
368 __update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d)
369 {
370 	void *kaddr;
371 	struct page *page;
372 	struct vm_area_struct *vma;
373 	int ret;
374 	short *ptr;
375 
376 	if (!vaddr || !d)
377 		return -EINVAL;
378 
379 	ret = get_user_pages_remote(NULL, mm, vaddr, 1,
380 			FOLL_WRITE, &page, &vma, NULL);
381 	if (unlikely(ret <= 0)) {
382 		/*
383 		 * We are asking for 1 page. If get_user_pages_remote() fails,
384 		 * it may return 0, in that case we have to return error.
385 		 */
386 		return ret == 0 ? -EBUSY : ret;
387 	}
388 
389 	kaddr = kmap_atomic(page);
390 	ptr = kaddr + (vaddr & ~PAGE_MASK);
391 
392 	if (unlikely(*ptr + d < 0)) {
393 		pr_warn("ref_ctr going negative. vaddr: 0x%lx, "
394 			"curr val: %d, delta: %d\n", vaddr, *ptr, d);
395 		ret = -EINVAL;
396 		goto out;
397 	}
398 
399 	*ptr += d;
400 	ret = 0;
401 out:
402 	kunmap_atomic(kaddr);
403 	put_page(page);
404 	return ret;
405 }
406 
407 static void update_ref_ctr_warn(struct uprobe *uprobe,
408 				struct mm_struct *mm, short d)
409 {
410 	pr_warn("ref_ctr %s failed for inode: 0x%lx offset: "
411 		"0x%llx ref_ctr_offset: 0x%llx of mm: 0x%pK\n",
412 		d > 0 ? "increment" : "decrement", uprobe->inode->i_ino,
413 		(unsigned long long) uprobe->offset,
414 		(unsigned long long) uprobe->ref_ctr_offset, mm);
415 }
416 
417 static int update_ref_ctr(struct uprobe *uprobe, struct mm_struct *mm,
418 			  short d)
419 {
420 	struct vm_area_struct *rc_vma;
421 	unsigned long rc_vaddr;
422 	int ret = 0;
423 
424 	rc_vma = find_ref_ctr_vma(uprobe, mm);
425 
426 	if (rc_vma) {
427 		rc_vaddr = offset_to_vaddr(rc_vma, uprobe->ref_ctr_offset);
428 		ret = __update_ref_ctr(mm, rc_vaddr, d);
429 		if (ret)
430 			update_ref_ctr_warn(uprobe, mm, d);
431 
432 		if (d > 0)
433 			return ret;
434 	}
435 
436 	mutex_lock(&delayed_uprobe_lock);
437 	if (d > 0)
438 		ret = delayed_uprobe_add(uprobe, mm);
439 	else
440 		delayed_uprobe_remove(uprobe, mm);
441 	mutex_unlock(&delayed_uprobe_lock);
442 
443 	return ret;
444 }
445 
446 /*
447  * NOTE:
448  * Expect the breakpoint instruction to be the smallest size instruction for
449  * the architecture. If an arch has variable length instruction and the
450  * breakpoint instruction is not of the smallest length instruction
451  * supported by that architecture then we need to modify is_trap_at_addr and
452  * uprobe_write_opcode accordingly. This would never be a problem for archs
453  * that have fixed length instructions.
454  *
455  * uprobe_write_opcode - write the opcode at a given virtual address.
456  * @mm: the probed process address space.
457  * @vaddr: the virtual address to store the opcode.
458  * @opcode: opcode to be written at @vaddr.
459  *
460  * Called with mm->mmap_sem held for write.
461  * Return 0 (success) or a negative errno.
462  */
463 int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
464 			unsigned long vaddr, uprobe_opcode_t opcode)
465 {
466 	struct uprobe *uprobe;
467 	struct page *old_page, *new_page;
468 	struct vm_area_struct *vma;
469 	int ret, is_register, ref_ctr_updated = 0;
470 	bool orig_page_huge = false;
471 	unsigned int gup_flags = FOLL_FORCE;
472 
473 	is_register = is_swbp_insn(&opcode);
474 	uprobe = container_of(auprobe, struct uprobe, arch);
475 
476 retry:
477 	if (is_register)
478 		gup_flags |= FOLL_SPLIT_PMD;
479 	/* Read the page with vaddr into memory */
480 	ret = get_user_pages_remote(NULL, mm, vaddr, 1, gup_flags,
481 				    &old_page, &vma, NULL);
482 	if (ret <= 0)
483 		return ret;
484 
485 	ret = verify_opcode(old_page, vaddr, &opcode);
486 	if (ret <= 0)
487 		goto put_old;
488 
489 	if (WARN(!is_register && PageCompound(old_page),
490 		 "uprobe unregister should never work on compound page\n")) {
491 		ret = -EINVAL;
492 		goto put_old;
493 	}
494 
495 	/* We are going to replace instruction, update ref_ctr. */
496 	if (!ref_ctr_updated && uprobe->ref_ctr_offset) {
497 		ret = update_ref_ctr(uprobe, mm, is_register ? 1 : -1);
498 		if (ret)
499 			goto put_old;
500 
501 		ref_ctr_updated = 1;
502 	}
503 
504 	ret = 0;
505 	if (!is_register && !PageAnon(old_page))
506 		goto put_old;
507 
508 	ret = anon_vma_prepare(vma);
509 	if (ret)
510 		goto put_old;
511 
512 	ret = -ENOMEM;
513 	new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
514 	if (!new_page)
515 		goto put_old;
516 
517 	__SetPageUptodate(new_page);
518 	copy_highpage(new_page, old_page);
519 	copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
520 
521 	if (!is_register) {
522 		struct page *orig_page;
523 		pgoff_t index;
524 
525 		VM_BUG_ON_PAGE(!PageAnon(old_page), old_page);
526 
527 		index = vaddr_to_offset(vma, vaddr & PAGE_MASK) >> PAGE_SHIFT;
528 		orig_page = find_get_page(vma->vm_file->f_inode->i_mapping,
529 					  index);
530 
531 		if (orig_page) {
532 			if (PageUptodate(orig_page) &&
533 			    pages_identical(new_page, orig_page)) {
534 				/* let go new_page */
535 				put_page(new_page);
536 				new_page = NULL;
537 
538 				if (PageCompound(orig_page))
539 					orig_page_huge = true;
540 			}
541 			put_page(orig_page);
542 		}
543 	}
544 
545 	ret = __replace_page(vma, vaddr, old_page, new_page);
546 	if (new_page)
547 		put_page(new_page);
548 put_old:
549 	put_page(old_page);
550 
551 	if (unlikely(ret == -EAGAIN))
552 		goto retry;
553 
554 	/* Revert back reference counter if instruction update failed. */
555 	if (ret && is_register && ref_ctr_updated)
556 		update_ref_ctr(uprobe, mm, -1);
557 
558 	/* try collapse pmd for compound page */
559 	if (!ret && orig_page_huge)
560 		collapse_pte_mapped_thp(mm, vaddr);
561 
562 	return ret;
563 }
564 
565 /**
566  * set_swbp - store breakpoint at a given address.
567  * @auprobe: arch specific probepoint information.
568  * @mm: the probed process address space.
569  * @vaddr: the virtual address to insert the opcode.
570  *
571  * For mm @mm, store the breakpoint instruction at @vaddr.
572  * Return 0 (success) or a negative errno.
573  */
574 int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
575 {
576 	return uprobe_write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN);
577 }
578 
579 /**
580  * set_orig_insn - Restore the original instruction.
581  * @mm: the probed process address space.
582  * @auprobe: arch specific probepoint information.
583  * @vaddr: the virtual address to insert the opcode.
584  *
585  * For mm @mm, restore the original opcode (opcode) at @vaddr.
586  * Return 0 (success) or a negative errno.
587  */
588 int __weak
589 set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
590 {
591 	return uprobe_write_opcode(auprobe, mm, vaddr,
592 			*(uprobe_opcode_t *)&auprobe->insn);
593 }
594 
595 static struct uprobe *get_uprobe(struct uprobe *uprobe)
596 {
597 	refcount_inc(&uprobe->ref);
598 	return uprobe;
599 }
600 
601 static void put_uprobe(struct uprobe *uprobe)
602 {
603 	if (refcount_dec_and_test(&uprobe->ref)) {
604 		/*
605 		 * If application munmap(exec_vma) before uprobe_unregister()
606 		 * gets called, we don't get a chance to remove uprobe from
607 		 * delayed_uprobe_list from remove_breakpoint(). Do it here.
608 		 */
609 		mutex_lock(&delayed_uprobe_lock);
610 		delayed_uprobe_remove(uprobe, NULL);
611 		mutex_unlock(&delayed_uprobe_lock);
612 		kfree(uprobe);
613 	}
614 }
615 
616 static int match_uprobe(struct uprobe *l, struct uprobe *r)
617 {
618 	if (l->inode < r->inode)
619 		return -1;
620 
621 	if (l->inode > r->inode)
622 		return 1;
623 
624 	if (l->offset < r->offset)
625 		return -1;
626 
627 	if (l->offset > r->offset)
628 		return 1;
629 
630 	return 0;
631 }
632 
633 static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset)
634 {
635 	struct uprobe u = { .inode = inode, .offset = offset };
636 	struct rb_node *n = uprobes_tree.rb_node;
637 	struct uprobe *uprobe;
638 	int match;
639 
640 	while (n) {
641 		uprobe = rb_entry(n, struct uprobe, rb_node);
642 		match = match_uprobe(&u, uprobe);
643 		if (!match)
644 			return get_uprobe(uprobe);
645 
646 		if (match < 0)
647 			n = n->rb_left;
648 		else
649 			n = n->rb_right;
650 	}
651 	return NULL;
652 }
653 
654 /*
655  * Find a uprobe corresponding to a given inode:offset
656  * Acquires uprobes_treelock
657  */
658 static struct uprobe *find_uprobe(struct inode *inode, loff_t offset)
659 {
660 	struct uprobe *uprobe;
661 
662 	spin_lock(&uprobes_treelock);
663 	uprobe = __find_uprobe(inode, offset);
664 	spin_unlock(&uprobes_treelock);
665 
666 	return uprobe;
667 }
668 
669 static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
670 {
671 	struct rb_node **p = &uprobes_tree.rb_node;
672 	struct rb_node *parent = NULL;
673 	struct uprobe *u;
674 	int match;
675 
676 	while (*p) {
677 		parent = *p;
678 		u = rb_entry(parent, struct uprobe, rb_node);
679 		match = match_uprobe(uprobe, u);
680 		if (!match)
681 			return get_uprobe(u);
682 
683 		if (match < 0)
684 			p = &parent->rb_left;
685 		else
686 			p = &parent->rb_right;
687 
688 	}
689 
690 	u = NULL;
691 	rb_link_node(&uprobe->rb_node, parent, p);
692 	rb_insert_color(&uprobe->rb_node, &uprobes_tree);
693 	/* get access + creation ref */
694 	refcount_set(&uprobe->ref, 2);
695 
696 	return u;
697 }
698 
699 /*
700  * Acquire uprobes_treelock.
701  * Matching uprobe already exists in rbtree;
702  *	increment (access refcount) and return the matching uprobe.
703  *
704  * No matching uprobe; insert the uprobe in rb_tree;
705  *	get a double refcount (access + creation) and return NULL.
706  */
707 static struct uprobe *insert_uprobe(struct uprobe *uprobe)
708 {
709 	struct uprobe *u;
710 
711 	spin_lock(&uprobes_treelock);
712 	u = __insert_uprobe(uprobe);
713 	spin_unlock(&uprobes_treelock);
714 
715 	return u;
716 }
717 
718 static void
719 ref_ctr_mismatch_warn(struct uprobe *cur_uprobe, struct uprobe *uprobe)
720 {
721 	pr_warn("ref_ctr_offset mismatch. inode: 0x%lx offset: 0x%llx "
722 		"ref_ctr_offset(old): 0x%llx ref_ctr_offset(new): 0x%llx\n",
723 		uprobe->inode->i_ino, (unsigned long long) uprobe->offset,
724 		(unsigned long long) cur_uprobe->ref_ctr_offset,
725 		(unsigned long long) uprobe->ref_ctr_offset);
726 }
727 
728 static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset,
729 				   loff_t ref_ctr_offset)
730 {
731 	struct uprobe *uprobe, *cur_uprobe;
732 
733 	uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL);
734 	if (!uprobe)
735 		return NULL;
736 
737 	uprobe->inode = inode;
738 	uprobe->offset = offset;
739 	uprobe->ref_ctr_offset = ref_ctr_offset;
740 	init_rwsem(&uprobe->register_rwsem);
741 	init_rwsem(&uprobe->consumer_rwsem);
742 
743 	/* add to uprobes_tree, sorted on inode:offset */
744 	cur_uprobe = insert_uprobe(uprobe);
745 	/* a uprobe exists for this inode:offset combination */
746 	if (cur_uprobe) {
747 		if (cur_uprobe->ref_ctr_offset != uprobe->ref_ctr_offset) {
748 			ref_ctr_mismatch_warn(cur_uprobe, uprobe);
749 			put_uprobe(cur_uprobe);
750 			kfree(uprobe);
751 			return ERR_PTR(-EINVAL);
752 		}
753 		kfree(uprobe);
754 		uprobe = cur_uprobe;
755 	}
756 
757 	return uprobe;
758 }
759 
760 static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
761 {
762 	down_write(&uprobe->consumer_rwsem);
763 	uc->next = uprobe->consumers;
764 	uprobe->consumers = uc;
765 	up_write(&uprobe->consumer_rwsem);
766 }
767 
768 /*
769  * For uprobe @uprobe, delete the consumer @uc.
770  * Return true if the @uc is deleted successfully
771  * or return false.
772  */
773 static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
774 {
775 	struct uprobe_consumer **con;
776 	bool ret = false;
777 
778 	down_write(&uprobe->consumer_rwsem);
779 	for (con = &uprobe->consumers; *con; con = &(*con)->next) {
780 		if (*con == uc) {
781 			*con = uc->next;
782 			ret = true;
783 			break;
784 		}
785 	}
786 	up_write(&uprobe->consumer_rwsem);
787 
788 	return ret;
789 }
790 
791 static int __copy_insn(struct address_space *mapping, struct file *filp,
792 			void *insn, int nbytes, loff_t offset)
793 {
794 	struct page *page;
795 	/*
796 	 * Ensure that the page that has the original instruction is populated
797 	 * and in page-cache. If ->readpage == NULL it must be shmem_mapping(),
798 	 * see uprobe_register().
799 	 */
800 	if (mapping->a_ops->readpage)
801 		page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp);
802 	else
803 		page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
804 	if (IS_ERR(page))
805 		return PTR_ERR(page);
806 
807 	copy_from_page(page, offset, insn, nbytes);
808 	put_page(page);
809 
810 	return 0;
811 }
812 
813 static int copy_insn(struct uprobe *uprobe, struct file *filp)
814 {
815 	struct address_space *mapping = uprobe->inode->i_mapping;
816 	loff_t offs = uprobe->offset;
817 	void *insn = &uprobe->arch.insn;
818 	int size = sizeof(uprobe->arch.insn);
819 	int len, err = -EIO;
820 
821 	/* Copy only available bytes, -EIO if nothing was read */
822 	do {
823 		if (offs >= i_size_read(uprobe->inode))
824 			break;
825 
826 		len = min_t(int, size, PAGE_SIZE - (offs & ~PAGE_MASK));
827 		err = __copy_insn(mapping, filp, insn, len, offs);
828 		if (err)
829 			break;
830 
831 		insn += len;
832 		offs += len;
833 		size -= len;
834 	} while (size);
835 
836 	return err;
837 }
838 
839 static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
840 				struct mm_struct *mm, unsigned long vaddr)
841 {
842 	int ret = 0;
843 
844 	if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
845 		return ret;
846 
847 	/* TODO: move this into _register, until then we abuse this sem. */
848 	down_write(&uprobe->consumer_rwsem);
849 	if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
850 		goto out;
851 
852 	ret = copy_insn(uprobe, file);
853 	if (ret)
854 		goto out;
855 
856 	ret = -ENOTSUPP;
857 	if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn))
858 		goto out;
859 
860 	ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
861 	if (ret)
862 		goto out;
863 
864 	/* uprobe_write_opcode() assumes we don't cross page boundary */
865 	BUG_ON((uprobe->offset & ~PAGE_MASK) +
866 			UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
867 
868 	smp_wmb(); /* pairs with the smp_rmb() in handle_swbp() */
869 	set_bit(UPROBE_COPY_INSN, &uprobe->flags);
870 
871  out:
872 	up_write(&uprobe->consumer_rwsem);
873 
874 	return ret;
875 }
876 
877 static inline bool consumer_filter(struct uprobe_consumer *uc,
878 				   enum uprobe_filter_ctx ctx, struct mm_struct *mm)
879 {
880 	return !uc->filter || uc->filter(uc, ctx, mm);
881 }
882 
883 static bool filter_chain(struct uprobe *uprobe,
884 			 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
885 {
886 	struct uprobe_consumer *uc;
887 	bool ret = false;
888 
889 	down_read(&uprobe->consumer_rwsem);
890 	for (uc = uprobe->consumers; uc; uc = uc->next) {
891 		ret = consumer_filter(uc, ctx, mm);
892 		if (ret)
893 			break;
894 	}
895 	up_read(&uprobe->consumer_rwsem);
896 
897 	return ret;
898 }
899 
900 static int
901 install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
902 			struct vm_area_struct *vma, unsigned long vaddr)
903 {
904 	bool first_uprobe;
905 	int ret;
906 
907 	ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr);
908 	if (ret)
909 		return ret;
910 
911 	/*
912 	 * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(),
913 	 * the task can hit this breakpoint right after __replace_page().
914 	 */
915 	first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags);
916 	if (first_uprobe)
917 		set_bit(MMF_HAS_UPROBES, &mm->flags);
918 
919 	ret = set_swbp(&uprobe->arch, mm, vaddr);
920 	if (!ret)
921 		clear_bit(MMF_RECALC_UPROBES, &mm->flags);
922 	else if (first_uprobe)
923 		clear_bit(MMF_HAS_UPROBES, &mm->flags);
924 
925 	return ret;
926 }
927 
928 static int
929 remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
930 {
931 	set_bit(MMF_RECALC_UPROBES, &mm->flags);
932 	return set_orig_insn(&uprobe->arch, mm, vaddr);
933 }
934 
935 static inline bool uprobe_is_active(struct uprobe *uprobe)
936 {
937 	return !RB_EMPTY_NODE(&uprobe->rb_node);
938 }
939 /*
940  * There could be threads that have already hit the breakpoint. They
941  * will recheck the current insn and restart if find_uprobe() fails.
942  * See find_active_uprobe().
943  */
944 static void delete_uprobe(struct uprobe *uprobe)
945 {
946 	if (WARN_ON(!uprobe_is_active(uprobe)))
947 		return;
948 
949 	spin_lock(&uprobes_treelock);
950 	rb_erase(&uprobe->rb_node, &uprobes_tree);
951 	spin_unlock(&uprobes_treelock);
952 	RB_CLEAR_NODE(&uprobe->rb_node); /* for uprobe_is_active() */
953 	put_uprobe(uprobe);
954 }
955 
956 struct map_info {
957 	struct map_info *next;
958 	struct mm_struct *mm;
959 	unsigned long vaddr;
960 };
961 
962 static inline struct map_info *free_map_info(struct map_info *info)
963 {
964 	struct map_info *next = info->next;
965 	kfree(info);
966 	return next;
967 }
968 
969 static struct map_info *
970 build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
971 {
972 	unsigned long pgoff = offset >> PAGE_SHIFT;
973 	struct vm_area_struct *vma;
974 	struct map_info *curr = NULL;
975 	struct map_info *prev = NULL;
976 	struct map_info *info;
977 	int more = 0;
978 
979  again:
980 	i_mmap_lock_read(mapping);
981 	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
982 		if (!valid_vma(vma, is_register))
983 			continue;
984 
985 		if (!prev && !more) {
986 			/*
987 			 * Needs GFP_NOWAIT to avoid i_mmap_rwsem recursion through
988 			 * reclaim. This is optimistic, no harm done if it fails.
989 			 */
990 			prev = kmalloc(sizeof(struct map_info),
991 					GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN);
992 			if (prev)
993 				prev->next = NULL;
994 		}
995 		if (!prev) {
996 			more++;
997 			continue;
998 		}
999 
1000 		if (!mmget_not_zero(vma->vm_mm))
1001 			continue;
1002 
1003 		info = prev;
1004 		prev = prev->next;
1005 		info->next = curr;
1006 		curr = info;
1007 
1008 		info->mm = vma->vm_mm;
1009 		info->vaddr = offset_to_vaddr(vma, offset);
1010 	}
1011 	i_mmap_unlock_read(mapping);
1012 
1013 	if (!more)
1014 		goto out;
1015 
1016 	prev = curr;
1017 	while (curr) {
1018 		mmput(curr->mm);
1019 		curr = curr->next;
1020 	}
1021 
1022 	do {
1023 		info = kmalloc(sizeof(struct map_info), GFP_KERNEL);
1024 		if (!info) {
1025 			curr = ERR_PTR(-ENOMEM);
1026 			goto out;
1027 		}
1028 		info->next = prev;
1029 		prev = info;
1030 	} while (--more);
1031 
1032 	goto again;
1033  out:
1034 	while (prev)
1035 		prev = free_map_info(prev);
1036 	return curr;
1037 }
1038 
1039 static int
1040 register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new)
1041 {
1042 	bool is_register = !!new;
1043 	struct map_info *info;
1044 	int err = 0;
1045 
1046 	percpu_down_write(&dup_mmap_sem);
1047 	info = build_map_info(uprobe->inode->i_mapping,
1048 					uprobe->offset, is_register);
1049 	if (IS_ERR(info)) {
1050 		err = PTR_ERR(info);
1051 		goto out;
1052 	}
1053 
1054 	while (info) {
1055 		struct mm_struct *mm = info->mm;
1056 		struct vm_area_struct *vma;
1057 
1058 		if (err && is_register)
1059 			goto free;
1060 
1061 		down_write(&mm->mmap_sem);
1062 		vma = find_vma(mm, info->vaddr);
1063 		if (!vma || !valid_vma(vma, is_register) ||
1064 		    file_inode(vma->vm_file) != uprobe->inode)
1065 			goto unlock;
1066 
1067 		if (vma->vm_start > info->vaddr ||
1068 		    vaddr_to_offset(vma, info->vaddr) != uprobe->offset)
1069 			goto unlock;
1070 
1071 		if (is_register) {
1072 			/* consult only the "caller", new consumer. */
1073 			if (consumer_filter(new,
1074 					UPROBE_FILTER_REGISTER, mm))
1075 				err = install_breakpoint(uprobe, mm, vma, info->vaddr);
1076 		} else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) {
1077 			if (!filter_chain(uprobe,
1078 					UPROBE_FILTER_UNREGISTER, mm))
1079 				err |= remove_breakpoint(uprobe, mm, info->vaddr);
1080 		}
1081 
1082  unlock:
1083 		up_write(&mm->mmap_sem);
1084  free:
1085 		mmput(mm);
1086 		info = free_map_info(info);
1087 	}
1088  out:
1089 	percpu_up_write(&dup_mmap_sem);
1090 	return err;
1091 }
1092 
1093 static void
1094 __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc)
1095 {
1096 	int err;
1097 
1098 	if (WARN_ON(!consumer_del(uprobe, uc)))
1099 		return;
1100 
1101 	err = register_for_each_vma(uprobe, NULL);
1102 	/* TODO : cant unregister? schedule a worker thread */
1103 	if (!uprobe->consumers && !err)
1104 		delete_uprobe(uprobe);
1105 }
1106 
1107 /*
1108  * uprobe_unregister - unregister an already registered probe.
1109  * @inode: the file in which the probe has to be removed.
1110  * @offset: offset from the start of the file.
1111  * @uc: identify which probe if multiple probes are colocated.
1112  */
1113 void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
1114 {
1115 	struct uprobe *uprobe;
1116 
1117 	uprobe = find_uprobe(inode, offset);
1118 	if (WARN_ON(!uprobe))
1119 		return;
1120 
1121 	down_write(&uprobe->register_rwsem);
1122 	__uprobe_unregister(uprobe, uc);
1123 	up_write(&uprobe->register_rwsem);
1124 	put_uprobe(uprobe);
1125 }
1126 EXPORT_SYMBOL_GPL(uprobe_unregister);
1127 
1128 /*
1129  * __uprobe_register - register a probe
1130  * @inode: the file in which the probe has to be placed.
1131  * @offset: offset from the start of the file.
1132  * @uc: information on howto handle the probe..
1133  *
1134  * Apart from the access refcount, __uprobe_register() takes a creation
1135  * refcount (thro alloc_uprobe) if and only if this @uprobe is getting
1136  * inserted into the rbtree (i.e first consumer for a @inode:@offset
1137  * tuple).  Creation refcount stops uprobe_unregister from freeing the
1138  * @uprobe even before the register operation is complete. Creation
1139  * refcount is released when the last @uc for the @uprobe
1140  * unregisters. Caller of __uprobe_register() is required to keep @inode
1141  * (and the containing mount) referenced.
1142  *
1143  * Return errno if it cannot successully install probes
1144  * else return 0 (success)
1145  */
1146 static int __uprobe_register(struct inode *inode, loff_t offset,
1147 			     loff_t ref_ctr_offset, struct uprobe_consumer *uc)
1148 {
1149 	struct uprobe *uprobe;
1150 	int ret;
1151 
1152 	/* Uprobe must have at least one set consumer */
1153 	if (!uc->handler && !uc->ret_handler)
1154 		return -EINVAL;
1155 
1156 	/* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */
1157 	if (!inode->i_mapping->a_ops->readpage && !shmem_mapping(inode->i_mapping))
1158 		return -EIO;
1159 	/* Racy, just to catch the obvious mistakes */
1160 	if (offset > i_size_read(inode))
1161 		return -EINVAL;
1162 
1163  retry:
1164 	uprobe = alloc_uprobe(inode, offset, ref_ctr_offset);
1165 	if (!uprobe)
1166 		return -ENOMEM;
1167 	if (IS_ERR(uprobe))
1168 		return PTR_ERR(uprobe);
1169 
1170 	/*
1171 	 * We can race with uprobe_unregister()->delete_uprobe().
1172 	 * Check uprobe_is_active() and retry if it is false.
1173 	 */
1174 	down_write(&uprobe->register_rwsem);
1175 	ret = -EAGAIN;
1176 	if (likely(uprobe_is_active(uprobe))) {
1177 		consumer_add(uprobe, uc);
1178 		ret = register_for_each_vma(uprobe, uc);
1179 		if (ret)
1180 			__uprobe_unregister(uprobe, uc);
1181 	}
1182 	up_write(&uprobe->register_rwsem);
1183 	put_uprobe(uprobe);
1184 
1185 	if (unlikely(ret == -EAGAIN))
1186 		goto retry;
1187 	return ret;
1188 }
1189 
1190 int uprobe_register(struct inode *inode, loff_t offset,
1191 		    struct uprobe_consumer *uc)
1192 {
1193 	return __uprobe_register(inode, offset, 0, uc);
1194 }
1195 EXPORT_SYMBOL_GPL(uprobe_register);
1196 
1197 int uprobe_register_refctr(struct inode *inode, loff_t offset,
1198 			   loff_t ref_ctr_offset, struct uprobe_consumer *uc)
1199 {
1200 	return __uprobe_register(inode, offset, ref_ctr_offset, uc);
1201 }
1202 EXPORT_SYMBOL_GPL(uprobe_register_refctr);
1203 
1204 /*
1205  * uprobe_apply - unregister an already registered probe.
1206  * @inode: the file in which the probe has to be removed.
1207  * @offset: offset from the start of the file.
1208  * @uc: consumer which wants to add more or remove some breakpoints
1209  * @add: add or remove the breakpoints
1210  */
1211 int uprobe_apply(struct inode *inode, loff_t offset,
1212 			struct uprobe_consumer *uc, bool add)
1213 {
1214 	struct uprobe *uprobe;
1215 	struct uprobe_consumer *con;
1216 	int ret = -ENOENT;
1217 
1218 	uprobe = find_uprobe(inode, offset);
1219 	if (WARN_ON(!uprobe))
1220 		return ret;
1221 
1222 	down_write(&uprobe->register_rwsem);
1223 	for (con = uprobe->consumers; con && con != uc ; con = con->next)
1224 		;
1225 	if (con)
1226 		ret = register_for_each_vma(uprobe, add ? uc : NULL);
1227 	up_write(&uprobe->register_rwsem);
1228 	put_uprobe(uprobe);
1229 
1230 	return ret;
1231 }
1232 
1233 static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm)
1234 {
1235 	struct vm_area_struct *vma;
1236 	int err = 0;
1237 
1238 	down_read(&mm->mmap_sem);
1239 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
1240 		unsigned long vaddr;
1241 		loff_t offset;
1242 
1243 		if (!valid_vma(vma, false) ||
1244 		    file_inode(vma->vm_file) != uprobe->inode)
1245 			continue;
1246 
1247 		offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
1248 		if (uprobe->offset <  offset ||
1249 		    uprobe->offset >= offset + vma->vm_end - vma->vm_start)
1250 			continue;
1251 
1252 		vaddr = offset_to_vaddr(vma, uprobe->offset);
1253 		err |= remove_breakpoint(uprobe, mm, vaddr);
1254 	}
1255 	up_read(&mm->mmap_sem);
1256 
1257 	return err;
1258 }
1259 
1260 static struct rb_node *
1261 find_node_in_range(struct inode *inode, loff_t min, loff_t max)
1262 {
1263 	struct rb_node *n = uprobes_tree.rb_node;
1264 
1265 	while (n) {
1266 		struct uprobe *u = rb_entry(n, struct uprobe, rb_node);
1267 
1268 		if (inode < u->inode) {
1269 			n = n->rb_left;
1270 		} else if (inode > u->inode) {
1271 			n = n->rb_right;
1272 		} else {
1273 			if (max < u->offset)
1274 				n = n->rb_left;
1275 			else if (min > u->offset)
1276 				n = n->rb_right;
1277 			else
1278 				break;
1279 		}
1280 	}
1281 
1282 	return n;
1283 }
1284 
1285 /*
1286  * For a given range in vma, build a list of probes that need to be inserted.
1287  */
1288 static void build_probe_list(struct inode *inode,
1289 				struct vm_area_struct *vma,
1290 				unsigned long start, unsigned long end,
1291 				struct list_head *head)
1292 {
1293 	loff_t min, max;
1294 	struct rb_node *n, *t;
1295 	struct uprobe *u;
1296 
1297 	INIT_LIST_HEAD(head);
1298 	min = vaddr_to_offset(vma, start);
1299 	max = min + (end - start) - 1;
1300 
1301 	spin_lock(&uprobes_treelock);
1302 	n = find_node_in_range(inode, min, max);
1303 	if (n) {
1304 		for (t = n; t; t = rb_prev(t)) {
1305 			u = rb_entry(t, struct uprobe, rb_node);
1306 			if (u->inode != inode || u->offset < min)
1307 				break;
1308 			list_add(&u->pending_list, head);
1309 			get_uprobe(u);
1310 		}
1311 		for (t = n; (t = rb_next(t)); ) {
1312 			u = rb_entry(t, struct uprobe, rb_node);
1313 			if (u->inode != inode || u->offset > max)
1314 				break;
1315 			list_add(&u->pending_list, head);
1316 			get_uprobe(u);
1317 		}
1318 	}
1319 	spin_unlock(&uprobes_treelock);
1320 }
1321 
1322 /* @vma contains reference counter, not the probed instruction. */
1323 static int delayed_ref_ctr_inc(struct vm_area_struct *vma)
1324 {
1325 	struct list_head *pos, *q;
1326 	struct delayed_uprobe *du;
1327 	unsigned long vaddr;
1328 	int ret = 0, err = 0;
1329 
1330 	mutex_lock(&delayed_uprobe_lock);
1331 	list_for_each_safe(pos, q, &delayed_uprobe_list) {
1332 		du = list_entry(pos, struct delayed_uprobe, list);
1333 
1334 		if (du->mm != vma->vm_mm ||
1335 		    !valid_ref_ctr_vma(du->uprobe, vma))
1336 			continue;
1337 
1338 		vaddr = offset_to_vaddr(vma, du->uprobe->ref_ctr_offset);
1339 		ret = __update_ref_ctr(vma->vm_mm, vaddr, 1);
1340 		if (ret) {
1341 			update_ref_ctr_warn(du->uprobe, vma->vm_mm, 1);
1342 			if (!err)
1343 				err = ret;
1344 		}
1345 		delayed_uprobe_delete(du);
1346 	}
1347 	mutex_unlock(&delayed_uprobe_lock);
1348 	return err;
1349 }
1350 
1351 /*
1352  * Called from mmap_region/vma_adjust with mm->mmap_sem acquired.
1353  *
1354  * Currently we ignore all errors and always return 0, the callers
1355  * can't handle the failure anyway.
1356  */
1357 int uprobe_mmap(struct vm_area_struct *vma)
1358 {
1359 	struct list_head tmp_list;
1360 	struct uprobe *uprobe, *u;
1361 	struct inode *inode;
1362 
1363 	if (no_uprobe_events())
1364 		return 0;
1365 
1366 	if (vma->vm_file &&
1367 	    (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE &&
1368 	    test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags))
1369 		delayed_ref_ctr_inc(vma);
1370 
1371 	if (!valid_vma(vma, true))
1372 		return 0;
1373 
1374 	inode = file_inode(vma->vm_file);
1375 	if (!inode)
1376 		return 0;
1377 
1378 	mutex_lock(uprobes_mmap_hash(inode));
1379 	build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list);
1380 	/*
1381 	 * We can race with uprobe_unregister(), this uprobe can be already
1382 	 * removed. But in this case filter_chain() must return false, all
1383 	 * consumers have gone away.
1384 	 */
1385 	list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
1386 		if (!fatal_signal_pending(current) &&
1387 		    filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) {
1388 			unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset);
1389 			install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
1390 		}
1391 		put_uprobe(uprobe);
1392 	}
1393 	mutex_unlock(uprobes_mmap_hash(inode));
1394 
1395 	return 0;
1396 }
1397 
1398 static bool
1399 vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1400 {
1401 	loff_t min, max;
1402 	struct inode *inode;
1403 	struct rb_node *n;
1404 
1405 	inode = file_inode(vma->vm_file);
1406 
1407 	min = vaddr_to_offset(vma, start);
1408 	max = min + (end - start) - 1;
1409 
1410 	spin_lock(&uprobes_treelock);
1411 	n = find_node_in_range(inode, min, max);
1412 	spin_unlock(&uprobes_treelock);
1413 
1414 	return !!n;
1415 }
1416 
1417 /*
1418  * Called in context of a munmap of a vma.
1419  */
1420 void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1421 {
1422 	if (no_uprobe_events() || !valid_vma(vma, false))
1423 		return;
1424 
1425 	if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */
1426 		return;
1427 
1428 	if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) ||
1429 	     test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags))
1430 		return;
1431 
1432 	if (vma_has_uprobes(vma, start, end))
1433 		set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags);
1434 }
1435 
1436 /* Slot allocation for XOL */
1437 static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
1438 {
1439 	struct vm_area_struct *vma;
1440 	int ret;
1441 
1442 	if (down_write_killable(&mm->mmap_sem))
1443 		return -EINTR;
1444 
1445 	if (mm->uprobes_state.xol_area) {
1446 		ret = -EALREADY;
1447 		goto fail;
1448 	}
1449 
1450 	if (!area->vaddr) {
1451 		/* Try to map as high as possible, this is only a hint. */
1452 		area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE,
1453 						PAGE_SIZE, 0, 0);
1454 		if (IS_ERR_VALUE(area->vaddr)) {
1455 			ret = area->vaddr;
1456 			goto fail;
1457 		}
1458 	}
1459 
1460 	vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE,
1461 				VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO,
1462 				&area->xol_mapping);
1463 	if (IS_ERR(vma)) {
1464 		ret = PTR_ERR(vma);
1465 		goto fail;
1466 	}
1467 
1468 	ret = 0;
1469 	/* pairs with get_xol_area() */
1470 	smp_store_release(&mm->uprobes_state.xol_area, area); /* ^^^ */
1471  fail:
1472 	up_write(&mm->mmap_sem);
1473 
1474 	return ret;
1475 }
1476 
1477 static struct xol_area *__create_xol_area(unsigned long vaddr)
1478 {
1479 	struct mm_struct *mm = current->mm;
1480 	uprobe_opcode_t insn = UPROBE_SWBP_INSN;
1481 	struct xol_area *area;
1482 
1483 	area = kmalloc(sizeof(*area), GFP_KERNEL);
1484 	if (unlikely(!area))
1485 		goto out;
1486 
1487 	area->bitmap = kcalloc(BITS_TO_LONGS(UINSNS_PER_PAGE), sizeof(long),
1488 			       GFP_KERNEL);
1489 	if (!area->bitmap)
1490 		goto free_area;
1491 
1492 	area->xol_mapping.name = "[uprobes]";
1493 	area->xol_mapping.fault = NULL;
1494 	area->xol_mapping.pages = area->pages;
1495 	area->pages[0] = alloc_page(GFP_HIGHUSER);
1496 	if (!area->pages[0])
1497 		goto free_bitmap;
1498 	area->pages[1] = NULL;
1499 
1500 	area->vaddr = vaddr;
1501 	init_waitqueue_head(&area->wq);
1502 	/* Reserve the 1st slot for get_trampoline_vaddr() */
1503 	set_bit(0, area->bitmap);
1504 	atomic_set(&area->slot_count, 1);
1505 	arch_uprobe_copy_ixol(area->pages[0], 0, &insn, UPROBE_SWBP_INSN_SIZE);
1506 
1507 	if (!xol_add_vma(mm, area))
1508 		return area;
1509 
1510 	__free_page(area->pages[0]);
1511  free_bitmap:
1512 	kfree(area->bitmap);
1513  free_area:
1514 	kfree(area);
1515  out:
1516 	return NULL;
1517 }
1518 
1519 /*
1520  * get_xol_area - Allocate process's xol_area if necessary.
1521  * This area will be used for storing instructions for execution out of line.
1522  *
1523  * Returns the allocated area or NULL.
1524  */
1525 static struct xol_area *get_xol_area(void)
1526 {
1527 	struct mm_struct *mm = current->mm;
1528 	struct xol_area *area;
1529 
1530 	if (!mm->uprobes_state.xol_area)
1531 		__create_xol_area(0);
1532 
1533 	/* Pairs with xol_add_vma() smp_store_release() */
1534 	area = READ_ONCE(mm->uprobes_state.xol_area); /* ^^^ */
1535 	return area;
1536 }
1537 
1538 /*
1539  * uprobe_clear_state - Free the area allocated for slots.
1540  */
1541 void uprobe_clear_state(struct mm_struct *mm)
1542 {
1543 	struct xol_area *area = mm->uprobes_state.xol_area;
1544 
1545 	mutex_lock(&delayed_uprobe_lock);
1546 	delayed_uprobe_remove(NULL, mm);
1547 	mutex_unlock(&delayed_uprobe_lock);
1548 
1549 	if (!area)
1550 		return;
1551 
1552 	put_page(area->pages[0]);
1553 	kfree(area->bitmap);
1554 	kfree(area);
1555 }
1556 
1557 void uprobe_start_dup_mmap(void)
1558 {
1559 	percpu_down_read(&dup_mmap_sem);
1560 }
1561 
1562 void uprobe_end_dup_mmap(void)
1563 {
1564 	percpu_up_read(&dup_mmap_sem);
1565 }
1566 
1567 void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
1568 {
1569 	if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) {
1570 		set_bit(MMF_HAS_UPROBES, &newmm->flags);
1571 		/* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */
1572 		set_bit(MMF_RECALC_UPROBES, &newmm->flags);
1573 	}
1574 }
1575 
1576 /*
1577  *  - search for a free slot.
1578  */
1579 static unsigned long xol_take_insn_slot(struct xol_area *area)
1580 {
1581 	unsigned long slot_addr;
1582 	int slot_nr;
1583 
1584 	do {
1585 		slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE);
1586 		if (slot_nr < UINSNS_PER_PAGE) {
1587 			if (!test_and_set_bit(slot_nr, area->bitmap))
1588 				break;
1589 
1590 			slot_nr = UINSNS_PER_PAGE;
1591 			continue;
1592 		}
1593 		wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE));
1594 	} while (slot_nr >= UINSNS_PER_PAGE);
1595 
1596 	slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES);
1597 	atomic_inc(&area->slot_count);
1598 
1599 	return slot_addr;
1600 }
1601 
1602 /*
1603  * xol_get_insn_slot - allocate a slot for xol.
1604  * Returns the allocated slot address or 0.
1605  */
1606 static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
1607 {
1608 	struct xol_area *area;
1609 	unsigned long xol_vaddr;
1610 
1611 	area = get_xol_area();
1612 	if (!area)
1613 		return 0;
1614 
1615 	xol_vaddr = xol_take_insn_slot(area);
1616 	if (unlikely(!xol_vaddr))
1617 		return 0;
1618 
1619 	arch_uprobe_copy_ixol(area->pages[0], xol_vaddr,
1620 			      &uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
1621 
1622 	return xol_vaddr;
1623 }
1624 
1625 /*
1626  * xol_free_insn_slot - If slot was earlier allocated by
1627  * @xol_get_insn_slot(), make the slot available for
1628  * subsequent requests.
1629  */
1630 static void xol_free_insn_slot(struct task_struct *tsk)
1631 {
1632 	struct xol_area *area;
1633 	unsigned long vma_end;
1634 	unsigned long slot_addr;
1635 
1636 	if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask)
1637 		return;
1638 
1639 	slot_addr = tsk->utask->xol_vaddr;
1640 	if (unlikely(!slot_addr))
1641 		return;
1642 
1643 	area = tsk->mm->uprobes_state.xol_area;
1644 	vma_end = area->vaddr + PAGE_SIZE;
1645 	if (area->vaddr <= slot_addr && slot_addr < vma_end) {
1646 		unsigned long offset;
1647 		int slot_nr;
1648 
1649 		offset = slot_addr - area->vaddr;
1650 		slot_nr = offset / UPROBE_XOL_SLOT_BYTES;
1651 		if (slot_nr >= UINSNS_PER_PAGE)
1652 			return;
1653 
1654 		clear_bit(slot_nr, area->bitmap);
1655 		atomic_dec(&area->slot_count);
1656 		smp_mb__after_atomic(); /* pairs with prepare_to_wait() */
1657 		if (waitqueue_active(&area->wq))
1658 			wake_up(&area->wq);
1659 
1660 		tsk->utask->xol_vaddr = 0;
1661 	}
1662 }
1663 
1664 void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
1665 				  void *src, unsigned long len)
1666 {
1667 	/* Initialize the slot */
1668 	copy_to_page(page, vaddr, src, len);
1669 
1670 	/*
1671 	 * We probably need flush_icache_user_range() but it needs vma.
1672 	 * This should work on most of architectures by default. If
1673 	 * architecture needs to do something different it can define
1674 	 * its own version of the function.
1675 	 */
1676 	flush_dcache_page(page);
1677 }
1678 
1679 /**
1680  * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
1681  * @regs: Reflects the saved state of the task after it has hit a breakpoint
1682  * instruction.
1683  * Return the address of the breakpoint instruction.
1684  */
1685 unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs)
1686 {
1687 	return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE;
1688 }
1689 
1690 unsigned long uprobe_get_trap_addr(struct pt_regs *regs)
1691 {
1692 	struct uprobe_task *utask = current->utask;
1693 
1694 	if (unlikely(utask && utask->active_uprobe))
1695 		return utask->vaddr;
1696 
1697 	return instruction_pointer(regs);
1698 }
1699 
1700 static struct return_instance *free_ret_instance(struct return_instance *ri)
1701 {
1702 	struct return_instance *next = ri->next;
1703 	put_uprobe(ri->uprobe);
1704 	kfree(ri);
1705 	return next;
1706 }
1707 
1708 /*
1709  * Called with no locks held.
1710  * Called in context of an exiting or an exec-ing thread.
1711  */
1712 void uprobe_free_utask(struct task_struct *t)
1713 {
1714 	struct uprobe_task *utask = t->utask;
1715 	struct return_instance *ri;
1716 
1717 	if (!utask)
1718 		return;
1719 
1720 	if (utask->active_uprobe)
1721 		put_uprobe(utask->active_uprobe);
1722 
1723 	ri = utask->return_instances;
1724 	while (ri)
1725 		ri = free_ret_instance(ri);
1726 
1727 	xol_free_insn_slot(t);
1728 	kfree(utask);
1729 	t->utask = NULL;
1730 }
1731 
1732 /*
1733  * Allocate a uprobe_task object for the task if if necessary.
1734  * Called when the thread hits a breakpoint.
1735  *
1736  * Returns:
1737  * - pointer to new uprobe_task on success
1738  * - NULL otherwise
1739  */
1740 static struct uprobe_task *get_utask(void)
1741 {
1742 	if (!current->utask)
1743 		current->utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
1744 	return current->utask;
1745 }
1746 
1747 static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
1748 {
1749 	struct uprobe_task *n_utask;
1750 	struct return_instance **p, *o, *n;
1751 
1752 	n_utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
1753 	if (!n_utask)
1754 		return -ENOMEM;
1755 	t->utask = n_utask;
1756 
1757 	p = &n_utask->return_instances;
1758 	for (o = o_utask->return_instances; o; o = o->next) {
1759 		n = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
1760 		if (!n)
1761 			return -ENOMEM;
1762 
1763 		*n = *o;
1764 		get_uprobe(n->uprobe);
1765 		n->next = NULL;
1766 
1767 		*p = n;
1768 		p = &n->next;
1769 		n_utask->depth++;
1770 	}
1771 
1772 	return 0;
1773 }
1774 
1775 static void uprobe_warn(struct task_struct *t, const char *msg)
1776 {
1777 	pr_warn("uprobe: %s:%d failed to %s\n",
1778 			current->comm, current->pid, msg);
1779 }
1780 
1781 static void dup_xol_work(struct callback_head *work)
1782 {
1783 	if (current->flags & PF_EXITING)
1784 		return;
1785 
1786 	if (!__create_xol_area(current->utask->dup_xol_addr) &&
1787 			!fatal_signal_pending(current))
1788 		uprobe_warn(current, "dup xol area");
1789 }
1790 
1791 /*
1792  * Called in context of a new clone/fork from copy_process.
1793  */
1794 void uprobe_copy_process(struct task_struct *t, unsigned long flags)
1795 {
1796 	struct uprobe_task *utask = current->utask;
1797 	struct mm_struct *mm = current->mm;
1798 	struct xol_area *area;
1799 
1800 	t->utask = NULL;
1801 
1802 	if (!utask || !utask->return_instances)
1803 		return;
1804 
1805 	if (mm == t->mm && !(flags & CLONE_VFORK))
1806 		return;
1807 
1808 	if (dup_utask(t, utask))
1809 		return uprobe_warn(t, "dup ret instances");
1810 
1811 	/* The task can fork() after dup_xol_work() fails */
1812 	area = mm->uprobes_state.xol_area;
1813 	if (!area)
1814 		return uprobe_warn(t, "dup xol area");
1815 
1816 	if (mm == t->mm)
1817 		return;
1818 
1819 	t->utask->dup_xol_addr = area->vaddr;
1820 	init_task_work(&t->utask->dup_xol_work, dup_xol_work);
1821 	task_work_add(t, &t->utask->dup_xol_work, true);
1822 }
1823 
1824 /*
1825  * Current area->vaddr notion assume the trampoline address is always
1826  * equal area->vaddr.
1827  *
1828  * Returns -1 in case the xol_area is not allocated.
1829  */
1830 static unsigned long get_trampoline_vaddr(void)
1831 {
1832 	struct xol_area *area;
1833 	unsigned long trampoline_vaddr = -1;
1834 
1835 	/* Pairs with xol_add_vma() smp_store_release() */
1836 	area = READ_ONCE(current->mm->uprobes_state.xol_area); /* ^^^ */
1837 	if (area)
1838 		trampoline_vaddr = area->vaddr;
1839 
1840 	return trampoline_vaddr;
1841 }
1842 
1843 static void cleanup_return_instances(struct uprobe_task *utask, bool chained,
1844 					struct pt_regs *regs)
1845 {
1846 	struct return_instance *ri = utask->return_instances;
1847 	enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL;
1848 
1849 	while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) {
1850 		ri = free_ret_instance(ri);
1851 		utask->depth--;
1852 	}
1853 	utask->return_instances = ri;
1854 }
1855 
1856 static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
1857 {
1858 	struct return_instance *ri;
1859 	struct uprobe_task *utask;
1860 	unsigned long orig_ret_vaddr, trampoline_vaddr;
1861 	bool chained;
1862 
1863 	if (!get_xol_area())
1864 		return;
1865 
1866 	utask = get_utask();
1867 	if (!utask)
1868 		return;
1869 
1870 	if (utask->depth >= MAX_URETPROBE_DEPTH) {
1871 		printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to"
1872 				" nestedness limit pid/tgid=%d/%d\n",
1873 				current->pid, current->tgid);
1874 		return;
1875 	}
1876 
1877 	ri = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
1878 	if (!ri)
1879 		return;
1880 
1881 	trampoline_vaddr = get_trampoline_vaddr();
1882 	orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs);
1883 	if (orig_ret_vaddr == -1)
1884 		goto fail;
1885 
1886 	/* drop the entries invalidated by longjmp() */
1887 	chained = (orig_ret_vaddr == trampoline_vaddr);
1888 	cleanup_return_instances(utask, chained, regs);
1889 
1890 	/*
1891 	 * We don't want to keep trampoline address in stack, rather keep the
1892 	 * original return address of first caller thru all the consequent
1893 	 * instances. This also makes breakpoint unwrapping easier.
1894 	 */
1895 	if (chained) {
1896 		if (!utask->return_instances) {
1897 			/*
1898 			 * This situation is not possible. Likely we have an
1899 			 * attack from user-space.
1900 			 */
1901 			uprobe_warn(current, "handle tail call");
1902 			goto fail;
1903 		}
1904 		orig_ret_vaddr = utask->return_instances->orig_ret_vaddr;
1905 	}
1906 
1907 	ri->uprobe = get_uprobe(uprobe);
1908 	ri->func = instruction_pointer(regs);
1909 	ri->stack = user_stack_pointer(regs);
1910 	ri->orig_ret_vaddr = orig_ret_vaddr;
1911 	ri->chained = chained;
1912 
1913 	utask->depth++;
1914 	ri->next = utask->return_instances;
1915 	utask->return_instances = ri;
1916 
1917 	return;
1918  fail:
1919 	kfree(ri);
1920 }
1921 
1922 /* Prepare to single-step probed instruction out of line. */
1923 static int
1924 pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr)
1925 {
1926 	struct uprobe_task *utask;
1927 	unsigned long xol_vaddr;
1928 	int err;
1929 
1930 	utask = get_utask();
1931 	if (!utask)
1932 		return -ENOMEM;
1933 
1934 	xol_vaddr = xol_get_insn_slot(uprobe);
1935 	if (!xol_vaddr)
1936 		return -ENOMEM;
1937 
1938 	utask->xol_vaddr = xol_vaddr;
1939 	utask->vaddr = bp_vaddr;
1940 
1941 	err = arch_uprobe_pre_xol(&uprobe->arch, regs);
1942 	if (unlikely(err)) {
1943 		xol_free_insn_slot(current);
1944 		return err;
1945 	}
1946 
1947 	utask->active_uprobe = uprobe;
1948 	utask->state = UTASK_SSTEP;
1949 	return 0;
1950 }
1951 
1952 /*
1953  * If we are singlestepping, then ensure this thread is not connected to
1954  * non-fatal signals until completion of singlestep.  When xol insn itself
1955  * triggers the signal,  restart the original insn even if the task is
1956  * already SIGKILL'ed (since coredump should report the correct ip).  This
1957  * is even more important if the task has a handler for SIGSEGV/etc, The
1958  * _same_ instruction should be repeated again after return from the signal
1959  * handler, and SSTEP can never finish in this case.
1960  */
1961 bool uprobe_deny_signal(void)
1962 {
1963 	struct task_struct *t = current;
1964 	struct uprobe_task *utask = t->utask;
1965 
1966 	if (likely(!utask || !utask->active_uprobe))
1967 		return false;
1968 
1969 	WARN_ON_ONCE(utask->state != UTASK_SSTEP);
1970 
1971 	if (signal_pending(t)) {
1972 		spin_lock_irq(&t->sighand->siglock);
1973 		clear_tsk_thread_flag(t, TIF_SIGPENDING);
1974 		spin_unlock_irq(&t->sighand->siglock);
1975 
1976 		if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) {
1977 			utask->state = UTASK_SSTEP_TRAPPED;
1978 			set_tsk_thread_flag(t, TIF_UPROBE);
1979 		}
1980 	}
1981 
1982 	return true;
1983 }
1984 
1985 static void mmf_recalc_uprobes(struct mm_struct *mm)
1986 {
1987 	struct vm_area_struct *vma;
1988 
1989 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
1990 		if (!valid_vma(vma, false))
1991 			continue;
1992 		/*
1993 		 * This is not strictly accurate, we can race with
1994 		 * uprobe_unregister() and see the already removed
1995 		 * uprobe if delete_uprobe() was not yet called.
1996 		 * Or this uprobe can be filtered out.
1997 		 */
1998 		if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end))
1999 			return;
2000 	}
2001 
2002 	clear_bit(MMF_HAS_UPROBES, &mm->flags);
2003 }
2004 
2005 static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
2006 {
2007 	struct page *page;
2008 	uprobe_opcode_t opcode;
2009 	int result;
2010 
2011 	pagefault_disable();
2012 	result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr);
2013 	pagefault_enable();
2014 
2015 	if (likely(result == 0))
2016 		goto out;
2017 
2018 	/*
2019 	 * The NULL 'tsk' here ensures that any faults that occur here
2020 	 * will not be accounted to the task.  'mm' *is* current->mm,
2021 	 * but we treat this as a 'remote' access since it is
2022 	 * essentially a kernel access to the memory.
2023 	 */
2024 	result = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &page,
2025 			NULL, NULL);
2026 	if (result < 0)
2027 		return result;
2028 
2029 	copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
2030 	put_page(page);
2031  out:
2032 	/* This needs to return true for any variant of the trap insn */
2033 	return is_trap_insn(&opcode);
2034 }
2035 
2036 static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
2037 {
2038 	struct mm_struct *mm = current->mm;
2039 	struct uprobe *uprobe = NULL;
2040 	struct vm_area_struct *vma;
2041 
2042 	down_read(&mm->mmap_sem);
2043 	vma = find_vma(mm, bp_vaddr);
2044 	if (vma && vma->vm_start <= bp_vaddr) {
2045 		if (valid_vma(vma, false)) {
2046 			struct inode *inode = file_inode(vma->vm_file);
2047 			loff_t offset = vaddr_to_offset(vma, bp_vaddr);
2048 
2049 			uprobe = find_uprobe(inode, offset);
2050 		}
2051 
2052 		if (!uprobe)
2053 			*is_swbp = is_trap_at_addr(mm, bp_vaddr);
2054 	} else {
2055 		*is_swbp = -EFAULT;
2056 	}
2057 
2058 	if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags))
2059 		mmf_recalc_uprobes(mm);
2060 	up_read(&mm->mmap_sem);
2061 
2062 	return uprobe;
2063 }
2064 
2065 static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
2066 {
2067 	struct uprobe_consumer *uc;
2068 	int remove = UPROBE_HANDLER_REMOVE;
2069 	bool need_prep = false; /* prepare return uprobe, when needed */
2070 
2071 	down_read(&uprobe->register_rwsem);
2072 	for (uc = uprobe->consumers; uc; uc = uc->next) {
2073 		int rc = 0;
2074 
2075 		if (uc->handler) {
2076 			rc = uc->handler(uc, regs);
2077 			WARN(rc & ~UPROBE_HANDLER_MASK,
2078 				"bad rc=0x%x from %ps()\n", rc, uc->handler);
2079 		}
2080 
2081 		if (uc->ret_handler)
2082 			need_prep = true;
2083 
2084 		remove &= rc;
2085 	}
2086 
2087 	if (need_prep && !remove)
2088 		prepare_uretprobe(uprobe, regs); /* put bp at return */
2089 
2090 	if (remove && uprobe->consumers) {
2091 		WARN_ON(!uprobe_is_active(uprobe));
2092 		unapply_uprobe(uprobe, current->mm);
2093 	}
2094 	up_read(&uprobe->register_rwsem);
2095 }
2096 
2097 static void
2098 handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs)
2099 {
2100 	struct uprobe *uprobe = ri->uprobe;
2101 	struct uprobe_consumer *uc;
2102 
2103 	down_read(&uprobe->register_rwsem);
2104 	for (uc = uprobe->consumers; uc; uc = uc->next) {
2105 		if (uc->ret_handler)
2106 			uc->ret_handler(uc, ri->func, regs);
2107 	}
2108 	up_read(&uprobe->register_rwsem);
2109 }
2110 
2111 static struct return_instance *find_next_ret_chain(struct return_instance *ri)
2112 {
2113 	bool chained;
2114 
2115 	do {
2116 		chained = ri->chained;
2117 		ri = ri->next;	/* can't be NULL if chained */
2118 	} while (chained);
2119 
2120 	return ri;
2121 }
2122 
2123 static void handle_trampoline(struct pt_regs *regs)
2124 {
2125 	struct uprobe_task *utask;
2126 	struct return_instance *ri, *next;
2127 	bool valid;
2128 
2129 	utask = current->utask;
2130 	if (!utask)
2131 		goto sigill;
2132 
2133 	ri = utask->return_instances;
2134 	if (!ri)
2135 		goto sigill;
2136 
2137 	do {
2138 		/*
2139 		 * We should throw out the frames invalidated by longjmp().
2140 		 * If this chain is valid, then the next one should be alive
2141 		 * or NULL; the latter case means that nobody but ri->func
2142 		 * could hit this trampoline on return. TODO: sigaltstack().
2143 		 */
2144 		next = find_next_ret_chain(ri);
2145 		valid = !next || arch_uretprobe_is_alive(next, RP_CHECK_RET, regs);
2146 
2147 		instruction_pointer_set(regs, ri->orig_ret_vaddr);
2148 		do {
2149 			if (valid)
2150 				handle_uretprobe_chain(ri, regs);
2151 			ri = free_ret_instance(ri);
2152 			utask->depth--;
2153 		} while (ri != next);
2154 	} while (!valid);
2155 
2156 	utask->return_instances = ri;
2157 	return;
2158 
2159  sigill:
2160 	uprobe_warn(current, "handle uretprobe, sending SIGILL.");
2161 	force_sig(SIGILL);
2162 
2163 }
2164 
2165 bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs)
2166 {
2167 	return false;
2168 }
2169 
2170 bool __weak arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
2171 					struct pt_regs *regs)
2172 {
2173 	return true;
2174 }
2175 
2176 /*
2177  * Run handler and ask thread to singlestep.
2178  * Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
2179  */
2180 static void handle_swbp(struct pt_regs *regs)
2181 {
2182 	struct uprobe *uprobe;
2183 	unsigned long bp_vaddr;
2184 	int uninitialized_var(is_swbp);
2185 
2186 	bp_vaddr = uprobe_get_swbp_addr(regs);
2187 	if (bp_vaddr == get_trampoline_vaddr())
2188 		return handle_trampoline(regs);
2189 
2190 	uprobe = find_active_uprobe(bp_vaddr, &is_swbp);
2191 	if (!uprobe) {
2192 		if (is_swbp > 0) {
2193 			/* No matching uprobe; signal SIGTRAP. */
2194 			send_sig(SIGTRAP, current, 0);
2195 		} else {
2196 			/*
2197 			 * Either we raced with uprobe_unregister() or we can't
2198 			 * access this memory. The latter is only possible if
2199 			 * another thread plays with our ->mm. In both cases
2200 			 * we can simply restart. If this vma was unmapped we
2201 			 * can pretend this insn was not executed yet and get
2202 			 * the (correct) SIGSEGV after restart.
2203 			 */
2204 			instruction_pointer_set(regs, bp_vaddr);
2205 		}
2206 		return;
2207 	}
2208 
2209 	/* change it in advance for ->handler() and restart */
2210 	instruction_pointer_set(regs, bp_vaddr);
2211 
2212 	/*
2213 	 * TODO: move copy_insn/etc into _register and remove this hack.
2214 	 * After we hit the bp, _unregister + _register can install the
2215 	 * new and not-yet-analyzed uprobe at the same address, restart.
2216 	 */
2217 	if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags)))
2218 		goto out;
2219 
2220 	/*
2221 	 * Pairs with the smp_wmb() in prepare_uprobe().
2222 	 *
2223 	 * Guarantees that if we see the UPROBE_COPY_INSN bit set, then
2224 	 * we must also see the stores to &uprobe->arch performed by the
2225 	 * prepare_uprobe() call.
2226 	 */
2227 	smp_rmb();
2228 
2229 	/* Tracing handlers use ->utask to communicate with fetch methods */
2230 	if (!get_utask())
2231 		goto out;
2232 
2233 	if (arch_uprobe_ignore(&uprobe->arch, regs))
2234 		goto out;
2235 
2236 	handler_chain(uprobe, regs);
2237 
2238 	if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
2239 		goto out;
2240 
2241 	if (!pre_ssout(uprobe, regs, bp_vaddr))
2242 		return;
2243 
2244 	/* arch_uprobe_skip_sstep() succeeded, or restart if can't singlestep */
2245 out:
2246 	put_uprobe(uprobe);
2247 }
2248 
2249 /*
2250  * Perform required fix-ups and disable singlestep.
2251  * Allow pending signals to take effect.
2252  */
2253 static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
2254 {
2255 	struct uprobe *uprobe;
2256 	int err = 0;
2257 
2258 	uprobe = utask->active_uprobe;
2259 	if (utask->state == UTASK_SSTEP_ACK)
2260 		err = arch_uprobe_post_xol(&uprobe->arch, regs);
2261 	else if (utask->state == UTASK_SSTEP_TRAPPED)
2262 		arch_uprobe_abort_xol(&uprobe->arch, regs);
2263 	else
2264 		WARN_ON_ONCE(1);
2265 
2266 	put_uprobe(uprobe);
2267 	utask->active_uprobe = NULL;
2268 	utask->state = UTASK_RUNNING;
2269 	xol_free_insn_slot(current);
2270 
2271 	spin_lock_irq(&current->sighand->siglock);
2272 	recalc_sigpending(); /* see uprobe_deny_signal() */
2273 	spin_unlock_irq(&current->sighand->siglock);
2274 
2275 	if (unlikely(err)) {
2276 		uprobe_warn(current, "execute the probed insn, sending SIGILL.");
2277 		force_sig(SIGILL);
2278 	}
2279 }
2280 
2281 /*
2282  * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and
2283  * allows the thread to return from interrupt. After that handle_swbp()
2284  * sets utask->active_uprobe.
2285  *
2286  * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag
2287  * and allows the thread to return from interrupt.
2288  *
2289  * While returning to userspace, thread notices the TIF_UPROBE flag and calls
2290  * uprobe_notify_resume().
2291  */
2292 void uprobe_notify_resume(struct pt_regs *regs)
2293 {
2294 	struct uprobe_task *utask;
2295 
2296 	clear_thread_flag(TIF_UPROBE);
2297 
2298 	utask = current->utask;
2299 	if (utask && utask->active_uprobe)
2300 		handle_singlestep(utask, regs);
2301 	else
2302 		handle_swbp(regs);
2303 }
2304 
2305 /*
2306  * uprobe_pre_sstep_notifier gets called from interrupt context as part of
2307  * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit.
2308  */
2309 int uprobe_pre_sstep_notifier(struct pt_regs *regs)
2310 {
2311 	if (!current->mm)
2312 		return 0;
2313 
2314 	if (!test_bit(MMF_HAS_UPROBES, &current->mm->flags) &&
2315 	    (!current->utask || !current->utask->return_instances))
2316 		return 0;
2317 
2318 	set_thread_flag(TIF_UPROBE);
2319 	return 1;
2320 }
2321 
2322 /*
2323  * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier
2324  * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep.
2325  */
2326 int uprobe_post_sstep_notifier(struct pt_regs *regs)
2327 {
2328 	struct uprobe_task *utask = current->utask;
2329 
2330 	if (!current->mm || !utask || !utask->active_uprobe)
2331 		/* task is currently not uprobed */
2332 		return 0;
2333 
2334 	utask->state = UTASK_SSTEP_ACK;
2335 	set_thread_flag(TIF_UPROBE);
2336 	return 1;
2337 }
2338 
2339 static struct notifier_block uprobe_exception_nb = {
2340 	.notifier_call		= arch_uprobe_exception_notify,
2341 	.priority		= INT_MAX-1,	/* notified after kprobes, kgdb */
2342 };
2343 
2344 void __init uprobes_init(void)
2345 {
2346 	int i;
2347 
2348 	for (i = 0; i < UPROBES_HASH_SZ; i++)
2349 		mutex_init(&uprobes_mmap_mutex[i]);
2350 
2351 	BUG_ON(register_die_notifier(&uprobe_exception_nb));
2352 }
2353