xref: /openbmc/linux/kernel/events/uprobes.c (revision 84d517f3)
1 /*
2  * User-space Probes (UProbes)
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright (C) IBM Corporation, 2008-2012
19  * Authors:
20  *	Srikar Dronamraju
21  *	Jim Keniston
22  * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
23  */
24 
25 #include <linux/kernel.h>
26 #include <linux/highmem.h>
27 #include <linux/pagemap.h>	/* read_mapping_page */
28 #include <linux/slab.h>
29 #include <linux/sched.h>
30 #include <linux/export.h>
31 #include <linux/rmap.h>		/* anon_vma_prepare */
32 #include <linux/mmu_notifier.h>	/* set_pte_at_notify */
33 #include <linux/swap.h>		/* try_to_free_swap */
34 #include <linux/ptrace.h>	/* user_enable_single_step */
35 #include <linux/kdebug.h>	/* notifier mechanism */
36 #include "../../mm/internal.h"	/* munlock_vma_page */
37 #include <linux/percpu-rwsem.h>
38 #include <linux/task_work.h>
39 
40 #include <linux/uprobes.h>
41 
42 #define UINSNS_PER_PAGE			(PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
43 #define MAX_UPROBE_XOL_SLOTS		UINSNS_PER_PAGE
44 
45 static struct rb_root uprobes_tree = RB_ROOT;
46 /*
47  * allows us to skip the uprobe_mmap if there are no uprobe events active
48  * at this time.  Probably a fine grained per inode count is better?
49  */
50 #define no_uprobe_events()	RB_EMPTY_ROOT(&uprobes_tree)
51 
52 static DEFINE_SPINLOCK(uprobes_treelock);	/* serialize rbtree access */
53 
54 #define UPROBES_HASH_SZ	13
55 /* serialize uprobe->pending_list */
56 static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
57 #define uprobes_mmap_hash(v)	(&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
58 
59 static struct percpu_rw_semaphore dup_mmap_sem;
60 
61 /* Have a copy of original instruction */
62 #define UPROBE_COPY_INSN	0
63 
64 struct uprobe {
65 	struct rb_node		rb_node;	/* node in the rb tree */
66 	atomic_t		ref;
67 	struct rw_semaphore	register_rwsem;
68 	struct rw_semaphore	consumer_rwsem;
69 	struct list_head	pending_list;
70 	struct uprobe_consumer	*consumers;
71 	struct inode		*inode;		/* Also hold a ref to inode */
72 	loff_t			offset;
73 	unsigned long		flags;
74 
75 	/*
76 	 * The generic code assumes that it has two members of unknown type
77 	 * owned by the arch-specific code:
78 	 *
79 	 * 	insn -	copy_insn() saves the original instruction here for
80 	 *		arch_uprobe_analyze_insn().
81 	 *
82 	 *	ixol -	potentially modified instruction to execute out of
83 	 *		line, copied to xol_area by xol_get_insn_slot().
84 	 */
85 	struct arch_uprobe	arch;
86 };
87 
88 struct return_instance {
89 	struct uprobe		*uprobe;
90 	unsigned long		func;
91 	unsigned long		orig_ret_vaddr; /* original return address */
92 	bool			chained;	/* true, if instance is nested */
93 
94 	struct return_instance	*next;		/* keep as stack */
95 };
96 
97 /*
98  * Execute out of line area: anonymous executable mapping installed
99  * by the probed task to execute the copy of the original instruction
100  * mangled by set_swbp().
101  *
102  * On a breakpoint hit, thread contests for a slot.  It frees the
103  * slot after singlestep. Currently a fixed number of slots are
104  * allocated.
105  */
106 struct xol_area {
107 	wait_queue_head_t 	wq;		/* if all slots are busy */
108 	atomic_t 		slot_count;	/* number of in-use slots */
109 	unsigned long 		*bitmap;	/* 0 = free slot */
110 	struct page 		*page;
111 
112 	/*
113 	 * We keep the vma's vm_start rather than a pointer to the vma
114 	 * itself.  The probed process or a naughty kernel module could make
115 	 * the vma go away, and we must handle that reasonably gracefully.
116 	 */
117 	unsigned long 		vaddr;		/* Page(s) of instruction slots */
118 };
119 
120 /*
121  * valid_vma: Verify if the specified vma is an executable vma
122  * Relax restrictions while unregistering: vm_flags might have
123  * changed after breakpoint was inserted.
124  *	- is_register: indicates if we are in register context.
125  *	- Return 1 if the specified virtual address is in an
126  *	  executable vma.
127  */
128 static bool valid_vma(struct vm_area_struct *vma, bool is_register)
129 {
130 	vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_SHARED;
131 
132 	if (is_register)
133 		flags |= VM_WRITE;
134 
135 	return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC;
136 }
137 
138 static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset)
139 {
140 	return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
141 }
142 
143 static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
144 {
145 	return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start);
146 }
147 
148 /**
149  * __replace_page - replace page in vma by new page.
150  * based on replace_page in mm/ksm.c
151  *
152  * @vma:      vma that holds the pte pointing to page
153  * @addr:     address the old @page is mapped at
154  * @page:     the cowed page we are replacing by kpage
155  * @kpage:    the modified page we replace page by
156  *
157  * Returns 0 on success, -EFAULT on failure.
158  */
159 static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
160 				struct page *page, struct page *kpage)
161 {
162 	struct mm_struct *mm = vma->vm_mm;
163 	spinlock_t *ptl;
164 	pte_t *ptep;
165 	int err;
166 	/* For mmu_notifiers */
167 	const unsigned long mmun_start = addr;
168 	const unsigned long mmun_end   = addr + PAGE_SIZE;
169 
170 	/* For try_to_free_swap() and munlock_vma_page() below */
171 	lock_page(page);
172 
173 	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
174 	err = -EAGAIN;
175 	ptep = page_check_address(page, mm, addr, &ptl, 0);
176 	if (!ptep)
177 		goto unlock;
178 
179 	get_page(kpage);
180 	page_add_new_anon_rmap(kpage, vma, addr);
181 
182 	if (!PageAnon(page)) {
183 		dec_mm_counter(mm, MM_FILEPAGES);
184 		inc_mm_counter(mm, MM_ANONPAGES);
185 	}
186 
187 	flush_cache_page(vma, addr, pte_pfn(*ptep));
188 	ptep_clear_flush(vma, addr, ptep);
189 	set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot));
190 
191 	page_remove_rmap(page);
192 	if (!page_mapped(page))
193 		try_to_free_swap(page);
194 	pte_unmap_unlock(ptep, ptl);
195 
196 	if (vma->vm_flags & VM_LOCKED)
197 		munlock_vma_page(page);
198 	put_page(page);
199 
200 	err = 0;
201  unlock:
202 	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
203 	unlock_page(page);
204 	return err;
205 }
206 
207 /**
208  * is_swbp_insn - check if instruction is breakpoint instruction.
209  * @insn: instruction to be checked.
210  * Default implementation of is_swbp_insn
211  * Returns true if @insn is a breakpoint instruction.
212  */
213 bool __weak is_swbp_insn(uprobe_opcode_t *insn)
214 {
215 	return *insn == UPROBE_SWBP_INSN;
216 }
217 
218 /**
219  * is_trap_insn - check if instruction is breakpoint instruction.
220  * @insn: instruction to be checked.
221  * Default implementation of is_trap_insn
222  * Returns true if @insn is a breakpoint instruction.
223  *
224  * This function is needed for the case where an architecture has multiple
225  * trap instructions (like powerpc).
226  */
227 bool __weak is_trap_insn(uprobe_opcode_t *insn)
228 {
229 	return is_swbp_insn(insn);
230 }
231 
232 static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len)
233 {
234 	void *kaddr = kmap_atomic(page);
235 	memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len);
236 	kunmap_atomic(kaddr);
237 }
238 
239 static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len)
240 {
241 	void *kaddr = kmap_atomic(page);
242 	memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len);
243 	kunmap_atomic(kaddr);
244 }
245 
246 static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode)
247 {
248 	uprobe_opcode_t old_opcode;
249 	bool is_swbp;
250 
251 	/*
252 	 * Note: We only check if the old_opcode is UPROBE_SWBP_INSN here.
253 	 * We do not check if it is any other 'trap variant' which could
254 	 * be conditional trap instruction such as the one powerpc supports.
255 	 *
256 	 * The logic is that we do not care if the underlying instruction
257 	 * is a trap variant; uprobes always wins over any other (gdb)
258 	 * breakpoint.
259 	 */
260 	copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE);
261 	is_swbp = is_swbp_insn(&old_opcode);
262 
263 	if (is_swbp_insn(new_opcode)) {
264 		if (is_swbp)		/* register: already installed? */
265 			return 0;
266 	} else {
267 		if (!is_swbp)		/* unregister: was it changed by us? */
268 			return 0;
269 	}
270 
271 	return 1;
272 }
273 
274 /*
275  * NOTE:
276  * Expect the breakpoint instruction to be the smallest size instruction for
277  * the architecture. If an arch has variable length instruction and the
278  * breakpoint instruction is not of the smallest length instruction
279  * supported by that architecture then we need to modify is_trap_at_addr and
280  * uprobe_write_opcode accordingly. This would never be a problem for archs
281  * that have fixed length instructions.
282  */
283 
284 /*
285  * uprobe_write_opcode - write the opcode at a given virtual address.
286  * @mm: the probed process address space.
287  * @vaddr: the virtual address to store the opcode.
288  * @opcode: opcode to be written at @vaddr.
289  *
290  * Called with mm->mmap_sem held (for read and with a reference to
291  * mm).
292  *
293  * For mm @mm, write the opcode at @vaddr.
294  * Return 0 (success) or a negative errno.
295  */
296 int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr,
297 			uprobe_opcode_t opcode)
298 {
299 	struct page *old_page, *new_page;
300 	struct vm_area_struct *vma;
301 	int ret;
302 
303 retry:
304 	/* Read the page with vaddr into memory */
305 	ret = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &old_page, &vma);
306 	if (ret <= 0)
307 		return ret;
308 
309 	ret = verify_opcode(old_page, vaddr, &opcode);
310 	if (ret <= 0)
311 		goto put_old;
312 
313 	ret = -ENOMEM;
314 	new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
315 	if (!new_page)
316 		goto put_old;
317 
318 	__SetPageUptodate(new_page);
319 
320 	copy_highpage(new_page, old_page);
321 	copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
322 
323 	ret = anon_vma_prepare(vma);
324 	if (ret)
325 		goto put_new;
326 
327 	ret = __replace_page(vma, vaddr, old_page, new_page);
328 
329 put_new:
330 	page_cache_release(new_page);
331 put_old:
332 	put_page(old_page);
333 
334 	if (unlikely(ret == -EAGAIN))
335 		goto retry;
336 	return ret;
337 }
338 
339 /**
340  * set_swbp - store breakpoint at a given address.
341  * @auprobe: arch specific probepoint information.
342  * @mm: the probed process address space.
343  * @vaddr: the virtual address to insert the opcode.
344  *
345  * For mm @mm, store the breakpoint instruction at @vaddr.
346  * Return 0 (success) or a negative errno.
347  */
348 int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
349 {
350 	return uprobe_write_opcode(mm, vaddr, UPROBE_SWBP_INSN);
351 }
352 
353 /**
354  * set_orig_insn - Restore the original instruction.
355  * @mm: the probed process address space.
356  * @auprobe: arch specific probepoint information.
357  * @vaddr: the virtual address to insert the opcode.
358  *
359  * For mm @mm, restore the original opcode (opcode) at @vaddr.
360  * Return 0 (success) or a negative errno.
361  */
362 int __weak
363 set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
364 {
365 	return uprobe_write_opcode(mm, vaddr, *(uprobe_opcode_t *)&auprobe->insn);
366 }
367 
368 static int match_uprobe(struct uprobe *l, struct uprobe *r)
369 {
370 	if (l->inode < r->inode)
371 		return -1;
372 
373 	if (l->inode > r->inode)
374 		return 1;
375 
376 	if (l->offset < r->offset)
377 		return -1;
378 
379 	if (l->offset > r->offset)
380 		return 1;
381 
382 	return 0;
383 }
384 
385 static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset)
386 {
387 	struct uprobe u = { .inode = inode, .offset = offset };
388 	struct rb_node *n = uprobes_tree.rb_node;
389 	struct uprobe *uprobe;
390 	int match;
391 
392 	while (n) {
393 		uprobe = rb_entry(n, struct uprobe, rb_node);
394 		match = match_uprobe(&u, uprobe);
395 		if (!match) {
396 			atomic_inc(&uprobe->ref);
397 			return uprobe;
398 		}
399 
400 		if (match < 0)
401 			n = n->rb_left;
402 		else
403 			n = n->rb_right;
404 	}
405 	return NULL;
406 }
407 
408 /*
409  * Find a uprobe corresponding to a given inode:offset
410  * Acquires uprobes_treelock
411  */
412 static struct uprobe *find_uprobe(struct inode *inode, loff_t offset)
413 {
414 	struct uprobe *uprobe;
415 
416 	spin_lock(&uprobes_treelock);
417 	uprobe = __find_uprobe(inode, offset);
418 	spin_unlock(&uprobes_treelock);
419 
420 	return uprobe;
421 }
422 
423 static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
424 {
425 	struct rb_node **p = &uprobes_tree.rb_node;
426 	struct rb_node *parent = NULL;
427 	struct uprobe *u;
428 	int match;
429 
430 	while (*p) {
431 		parent = *p;
432 		u = rb_entry(parent, struct uprobe, rb_node);
433 		match = match_uprobe(uprobe, u);
434 		if (!match) {
435 			atomic_inc(&u->ref);
436 			return u;
437 		}
438 
439 		if (match < 0)
440 			p = &parent->rb_left;
441 		else
442 			p = &parent->rb_right;
443 
444 	}
445 
446 	u = NULL;
447 	rb_link_node(&uprobe->rb_node, parent, p);
448 	rb_insert_color(&uprobe->rb_node, &uprobes_tree);
449 	/* get access + creation ref */
450 	atomic_set(&uprobe->ref, 2);
451 
452 	return u;
453 }
454 
455 /*
456  * Acquire uprobes_treelock.
457  * Matching uprobe already exists in rbtree;
458  *	increment (access refcount) and return the matching uprobe.
459  *
460  * No matching uprobe; insert the uprobe in rb_tree;
461  *	get a double refcount (access + creation) and return NULL.
462  */
463 static struct uprobe *insert_uprobe(struct uprobe *uprobe)
464 {
465 	struct uprobe *u;
466 
467 	spin_lock(&uprobes_treelock);
468 	u = __insert_uprobe(uprobe);
469 	spin_unlock(&uprobes_treelock);
470 
471 	return u;
472 }
473 
474 static void put_uprobe(struct uprobe *uprobe)
475 {
476 	if (atomic_dec_and_test(&uprobe->ref))
477 		kfree(uprobe);
478 }
479 
480 static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
481 {
482 	struct uprobe *uprobe, *cur_uprobe;
483 
484 	uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL);
485 	if (!uprobe)
486 		return NULL;
487 
488 	uprobe->inode = igrab(inode);
489 	uprobe->offset = offset;
490 	init_rwsem(&uprobe->register_rwsem);
491 	init_rwsem(&uprobe->consumer_rwsem);
492 
493 	/* add to uprobes_tree, sorted on inode:offset */
494 	cur_uprobe = insert_uprobe(uprobe);
495 	/* a uprobe exists for this inode:offset combination */
496 	if (cur_uprobe) {
497 		kfree(uprobe);
498 		uprobe = cur_uprobe;
499 		iput(inode);
500 	}
501 
502 	return uprobe;
503 }
504 
505 static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
506 {
507 	down_write(&uprobe->consumer_rwsem);
508 	uc->next = uprobe->consumers;
509 	uprobe->consumers = uc;
510 	up_write(&uprobe->consumer_rwsem);
511 }
512 
513 /*
514  * For uprobe @uprobe, delete the consumer @uc.
515  * Return true if the @uc is deleted successfully
516  * or return false.
517  */
518 static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
519 {
520 	struct uprobe_consumer **con;
521 	bool ret = false;
522 
523 	down_write(&uprobe->consumer_rwsem);
524 	for (con = &uprobe->consumers; *con; con = &(*con)->next) {
525 		if (*con == uc) {
526 			*con = uc->next;
527 			ret = true;
528 			break;
529 		}
530 	}
531 	up_write(&uprobe->consumer_rwsem);
532 
533 	return ret;
534 }
535 
536 static int __copy_insn(struct address_space *mapping, struct file *filp,
537 			void *insn, int nbytes, loff_t offset)
538 {
539 	struct page *page;
540 
541 	if (!mapping->a_ops->readpage)
542 		return -EIO;
543 	/*
544 	 * Ensure that the page that has the original instruction is
545 	 * populated and in page-cache.
546 	 */
547 	page = read_mapping_page(mapping, offset >> PAGE_CACHE_SHIFT, filp);
548 	if (IS_ERR(page))
549 		return PTR_ERR(page);
550 
551 	copy_from_page(page, offset, insn, nbytes);
552 	page_cache_release(page);
553 
554 	return 0;
555 }
556 
557 static int copy_insn(struct uprobe *uprobe, struct file *filp)
558 {
559 	struct address_space *mapping = uprobe->inode->i_mapping;
560 	loff_t offs = uprobe->offset;
561 	void *insn = &uprobe->arch.insn;
562 	int size = sizeof(uprobe->arch.insn);
563 	int len, err = -EIO;
564 
565 	/* Copy only available bytes, -EIO if nothing was read */
566 	do {
567 		if (offs >= i_size_read(uprobe->inode))
568 			break;
569 
570 		len = min_t(int, size, PAGE_SIZE - (offs & ~PAGE_MASK));
571 		err = __copy_insn(mapping, filp, insn, len, offs);
572 		if (err)
573 			break;
574 
575 		insn += len;
576 		offs += len;
577 		size -= len;
578 	} while (size);
579 
580 	return err;
581 }
582 
583 static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
584 				struct mm_struct *mm, unsigned long vaddr)
585 {
586 	int ret = 0;
587 
588 	if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
589 		return ret;
590 
591 	/* TODO: move this into _register, until then we abuse this sem. */
592 	down_write(&uprobe->consumer_rwsem);
593 	if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
594 		goto out;
595 
596 	ret = copy_insn(uprobe, file);
597 	if (ret)
598 		goto out;
599 
600 	ret = -ENOTSUPP;
601 	if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn))
602 		goto out;
603 
604 	ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
605 	if (ret)
606 		goto out;
607 
608 	/* uprobe_write_opcode() assumes we don't cross page boundary */
609 	BUG_ON((uprobe->offset & ~PAGE_MASK) +
610 			UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
611 
612 	smp_wmb(); /* pairs with rmb() in find_active_uprobe() */
613 	set_bit(UPROBE_COPY_INSN, &uprobe->flags);
614 
615  out:
616 	up_write(&uprobe->consumer_rwsem);
617 
618 	return ret;
619 }
620 
621 static inline bool consumer_filter(struct uprobe_consumer *uc,
622 				   enum uprobe_filter_ctx ctx, struct mm_struct *mm)
623 {
624 	return !uc->filter || uc->filter(uc, ctx, mm);
625 }
626 
627 static bool filter_chain(struct uprobe *uprobe,
628 			 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
629 {
630 	struct uprobe_consumer *uc;
631 	bool ret = false;
632 
633 	down_read(&uprobe->consumer_rwsem);
634 	for (uc = uprobe->consumers; uc; uc = uc->next) {
635 		ret = consumer_filter(uc, ctx, mm);
636 		if (ret)
637 			break;
638 	}
639 	up_read(&uprobe->consumer_rwsem);
640 
641 	return ret;
642 }
643 
644 static int
645 install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
646 			struct vm_area_struct *vma, unsigned long vaddr)
647 {
648 	bool first_uprobe;
649 	int ret;
650 
651 	ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr);
652 	if (ret)
653 		return ret;
654 
655 	/*
656 	 * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(),
657 	 * the task can hit this breakpoint right after __replace_page().
658 	 */
659 	first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags);
660 	if (first_uprobe)
661 		set_bit(MMF_HAS_UPROBES, &mm->flags);
662 
663 	ret = set_swbp(&uprobe->arch, mm, vaddr);
664 	if (!ret)
665 		clear_bit(MMF_RECALC_UPROBES, &mm->flags);
666 	else if (first_uprobe)
667 		clear_bit(MMF_HAS_UPROBES, &mm->flags);
668 
669 	return ret;
670 }
671 
672 static int
673 remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
674 {
675 	set_bit(MMF_RECALC_UPROBES, &mm->flags);
676 	return set_orig_insn(&uprobe->arch, mm, vaddr);
677 }
678 
679 static inline bool uprobe_is_active(struct uprobe *uprobe)
680 {
681 	return !RB_EMPTY_NODE(&uprobe->rb_node);
682 }
683 /*
684  * There could be threads that have already hit the breakpoint. They
685  * will recheck the current insn and restart if find_uprobe() fails.
686  * See find_active_uprobe().
687  */
688 static void delete_uprobe(struct uprobe *uprobe)
689 {
690 	if (WARN_ON(!uprobe_is_active(uprobe)))
691 		return;
692 
693 	spin_lock(&uprobes_treelock);
694 	rb_erase(&uprobe->rb_node, &uprobes_tree);
695 	spin_unlock(&uprobes_treelock);
696 	RB_CLEAR_NODE(&uprobe->rb_node); /* for uprobe_is_active() */
697 	iput(uprobe->inode);
698 	put_uprobe(uprobe);
699 }
700 
701 struct map_info {
702 	struct map_info *next;
703 	struct mm_struct *mm;
704 	unsigned long vaddr;
705 };
706 
707 static inline struct map_info *free_map_info(struct map_info *info)
708 {
709 	struct map_info *next = info->next;
710 	kfree(info);
711 	return next;
712 }
713 
714 static struct map_info *
715 build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
716 {
717 	unsigned long pgoff = offset >> PAGE_SHIFT;
718 	struct vm_area_struct *vma;
719 	struct map_info *curr = NULL;
720 	struct map_info *prev = NULL;
721 	struct map_info *info;
722 	int more = 0;
723 
724  again:
725 	mutex_lock(&mapping->i_mmap_mutex);
726 	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
727 		if (!valid_vma(vma, is_register))
728 			continue;
729 
730 		if (!prev && !more) {
731 			/*
732 			 * Needs GFP_NOWAIT to avoid i_mmap_mutex recursion through
733 			 * reclaim. This is optimistic, no harm done if it fails.
734 			 */
735 			prev = kmalloc(sizeof(struct map_info),
736 					GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN);
737 			if (prev)
738 				prev->next = NULL;
739 		}
740 		if (!prev) {
741 			more++;
742 			continue;
743 		}
744 
745 		if (!atomic_inc_not_zero(&vma->vm_mm->mm_users))
746 			continue;
747 
748 		info = prev;
749 		prev = prev->next;
750 		info->next = curr;
751 		curr = info;
752 
753 		info->mm = vma->vm_mm;
754 		info->vaddr = offset_to_vaddr(vma, offset);
755 	}
756 	mutex_unlock(&mapping->i_mmap_mutex);
757 
758 	if (!more)
759 		goto out;
760 
761 	prev = curr;
762 	while (curr) {
763 		mmput(curr->mm);
764 		curr = curr->next;
765 	}
766 
767 	do {
768 		info = kmalloc(sizeof(struct map_info), GFP_KERNEL);
769 		if (!info) {
770 			curr = ERR_PTR(-ENOMEM);
771 			goto out;
772 		}
773 		info->next = prev;
774 		prev = info;
775 	} while (--more);
776 
777 	goto again;
778  out:
779 	while (prev)
780 		prev = free_map_info(prev);
781 	return curr;
782 }
783 
784 static int
785 register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new)
786 {
787 	bool is_register = !!new;
788 	struct map_info *info;
789 	int err = 0;
790 
791 	percpu_down_write(&dup_mmap_sem);
792 	info = build_map_info(uprobe->inode->i_mapping,
793 					uprobe->offset, is_register);
794 	if (IS_ERR(info)) {
795 		err = PTR_ERR(info);
796 		goto out;
797 	}
798 
799 	while (info) {
800 		struct mm_struct *mm = info->mm;
801 		struct vm_area_struct *vma;
802 
803 		if (err && is_register)
804 			goto free;
805 
806 		down_write(&mm->mmap_sem);
807 		vma = find_vma(mm, info->vaddr);
808 		if (!vma || !valid_vma(vma, is_register) ||
809 		    file_inode(vma->vm_file) != uprobe->inode)
810 			goto unlock;
811 
812 		if (vma->vm_start > info->vaddr ||
813 		    vaddr_to_offset(vma, info->vaddr) != uprobe->offset)
814 			goto unlock;
815 
816 		if (is_register) {
817 			/* consult only the "caller", new consumer. */
818 			if (consumer_filter(new,
819 					UPROBE_FILTER_REGISTER, mm))
820 				err = install_breakpoint(uprobe, mm, vma, info->vaddr);
821 		} else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) {
822 			if (!filter_chain(uprobe,
823 					UPROBE_FILTER_UNREGISTER, mm))
824 				err |= remove_breakpoint(uprobe, mm, info->vaddr);
825 		}
826 
827  unlock:
828 		up_write(&mm->mmap_sem);
829  free:
830 		mmput(mm);
831 		info = free_map_info(info);
832 	}
833  out:
834 	percpu_up_write(&dup_mmap_sem);
835 	return err;
836 }
837 
838 static int __uprobe_register(struct uprobe *uprobe, struct uprobe_consumer *uc)
839 {
840 	consumer_add(uprobe, uc);
841 	return register_for_each_vma(uprobe, uc);
842 }
843 
844 static void __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc)
845 {
846 	int err;
847 
848 	if (!consumer_del(uprobe, uc))	/* WARN? */
849 		return;
850 
851 	err = register_for_each_vma(uprobe, NULL);
852 	/* TODO : cant unregister? schedule a worker thread */
853 	if (!uprobe->consumers && !err)
854 		delete_uprobe(uprobe);
855 }
856 
857 /*
858  * uprobe_register - register a probe
859  * @inode: the file in which the probe has to be placed.
860  * @offset: offset from the start of the file.
861  * @uc: information on howto handle the probe..
862  *
863  * Apart from the access refcount, uprobe_register() takes a creation
864  * refcount (thro alloc_uprobe) if and only if this @uprobe is getting
865  * inserted into the rbtree (i.e first consumer for a @inode:@offset
866  * tuple).  Creation refcount stops uprobe_unregister from freeing the
867  * @uprobe even before the register operation is complete. Creation
868  * refcount is released when the last @uc for the @uprobe
869  * unregisters.
870  *
871  * Return errno if it cannot successully install probes
872  * else return 0 (success)
873  */
874 int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
875 {
876 	struct uprobe *uprobe;
877 	int ret;
878 
879 	/* Uprobe must have at least one set consumer */
880 	if (!uc->handler && !uc->ret_handler)
881 		return -EINVAL;
882 
883 	/* Racy, just to catch the obvious mistakes */
884 	if (offset > i_size_read(inode))
885 		return -EINVAL;
886 
887  retry:
888 	uprobe = alloc_uprobe(inode, offset);
889 	if (!uprobe)
890 		return -ENOMEM;
891 	/*
892 	 * We can race with uprobe_unregister()->delete_uprobe().
893 	 * Check uprobe_is_active() and retry if it is false.
894 	 */
895 	down_write(&uprobe->register_rwsem);
896 	ret = -EAGAIN;
897 	if (likely(uprobe_is_active(uprobe))) {
898 		ret = __uprobe_register(uprobe, uc);
899 		if (ret)
900 			__uprobe_unregister(uprobe, uc);
901 	}
902 	up_write(&uprobe->register_rwsem);
903 	put_uprobe(uprobe);
904 
905 	if (unlikely(ret == -EAGAIN))
906 		goto retry;
907 	return ret;
908 }
909 EXPORT_SYMBOL_GPL(uprobe_register);
910 
911 /*
912  * uprobe_apply - unregister a already registered probe.
913  * @inode: the file in which the probe has to be removed.
914  * @offset: offset from the start of the file.
915  * @uc: consumer which wants to add more or remove some breakpoints
916  * @add: add or remove the breakpoints
917  */
918 int uprobe_apply(struct inode *inode, loff_t offset,
919 			struct uprobe_consumer *uc, bool add)
920 {
921 	struct uprobe *uprobe;
922 	struct uprobe_consumer *con;
923 	int ret = -ENOENT;
924 
925 	uprobe = find_uprobe(inode, offset);
926 	if (!uprobe)
927 		return ret;
928 
929 	down_write(&uprobe->register_rwsem);
930 	for (con = uprobe->consumers; con && con != uc ; con = con->next)
931 		;
932 	if (con)
933 		ret = register_for_each_vma(uprobe, add ? uc : NULL);
934 	up_write(&uprobe->register_rwsem);
935 	put_uprobe(uprobe);
936 
937 	return ret;
938 }
939 
940 /*
941  * uprobe_unregister - unregister a already registered probe.
942  * @inode: the file in which the probe has to be removed.
943  * @offset: offset from the start of the file.
944  * @uc: identify which probe if multiple probes are colocated.
945  */
946 void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
947 {
948 	struct uprobe *uprobe;
949 
950 	uprobe = find_uprobe(inode, offset);
951 	if (!uprobe)
952 		return;
953 
954 	down_write(&uprobe->register_rwsem);
955 	__uprobe_unregister(uprobe, uc);
956 	up_write(&uprobe->register_rwsem);
957 	put_uprobe(uprobe);
958 }
959 EXPORT_SYMBOL_GPL(uprobe_unregister);
960 
961 static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm)
962 {
963 	struct vm_area_struct *vma;
964 	int err = 0;
965 
966 	down_read(&mm->mmap_sem);
967 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
968 		unsigned long vaddr;
969 		loff_t offset;
970 
971 		if (!valid_vma(vma, false) ||
972 		    file_inode(vma->vm_file) != uprobe->inode)
973 			continue;
974 
975 		offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
976 		if (uprobe->offset <  offset ||
977 		    uprobe->offset >= offset + vma->vm_end - vma->vm_start)
978 			continue;
979 
980 		vaddr = offset_to_vaddr(vma, uprobe->offset);
981 		err |= remove_breakpoint(uprobe, mm, vaddr);
982 	}
983 	up_read(&mm->mmap_sem);
984 
985 	return err;
986 }
987 
988 static struct rb_node *
989 find_node_in_range(struct inode *inode, loff_t min, loff_t max)
990 {
991 	struct rb_node *n = uprobes_tree.rb_node;
992 
993 	while (n) {
994 		struct uprobe *u = rb_entry(n, struct uprobe, rb_node);
995 
996 		if (inode < u->inode) {
997 			n = n->rb_left;
998 		} else if (inode > u->inode) {
999 			n = n->rb_right;
1000 		} else {
1001 			if (max < u->offset)
1002 				n = n->rb_left;
1003 			else if (min > u->offset)
1004 				n = n->rb_right;
1005 			else
1006 				break;
1007 		}
1008 	}
1009 
1010 	return n;
1011 }
1012 
1013 /*
1014  * For a given range in vma, build a list of probes that need to be inserted.
1015  */
1016 static void build_probe_list(struct inode *inode,
1017 				struct vm_area_struct *vma,
1018 				unsigned long start, unsigned long end,
1019 				struct list_head *head)
1020 {
1021 	loff_t min, max;
1022 	struct rb_node *n, *t;
1023 	struct uprobe *u;
1024 
1025 	INIT_LIST_HEAD(head);
1026 	min = vaddr_to_offset(vma, start);
1027 	max = min + (end - start) - 1;
1028 
1029 	spin_lock(&uprobes_treelock);
1030 	n = find_node_in_range(inode, min, max);
1031 	if (n) {
1032 		for (t = n; t; t = rb_prev(t)) {
1033 			u = rb_entry(t, struct uprobe, rb_node);
1034 			if (u->inode != inode || u->offset < min)
1035 				break;
1036 			list_add(&u->pending_list, head);
1037 			atomic_inc(&u->ref);
1038 		}
1039 		for (t = n; (t = rb_next(t)); ) {
1040 			u = rb_entry(t, struct uprobe, rb_node);
1041 			if (u->inode != inode || u->offset > max)
1042 				break;
1043 			list_add(&u->pending_list, head);
1044 			atomic_inc(&u->ref);
1045 		}
1046 	}
1047 	spin_unlock(&uprobes_treelock);
1048 }
1049 
1050 /*
1051  * Called from mmap_region/vma_adjust with mm->mmap_sem acquired.
1052  *
1053  * Currently we ignore all errors and always return 0, the callers
1054  * can't handle the failure anyway.
1055  */
1056 int uprobe_mmap(struct vm_area_struct *vma)
1057 {
1058 	struct list_head tmp_list;
1059 	struct uprobe *uprobe, *u;
1060 	struct inode *inode;
1061 
1062 	if (no_uprobe_events() || !valid_vma(vma, true))
1063 		return 0;
1064 
1065 	inode = file_inode(vma->vm_file);
1066 	if (!inode)
1067 		return 0;
1068 
1069 	mutex_lock(uprobes_mmap_hash(inode));
1070 	build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list);
1071 	/*
1072 	 * We can race with uprobe_unregister(), this uprobe can be already
1073 	 * removed. But in this case filter_chain() must return false, all
1074 	 * consumers have gone away.
1075 	 */
1076 	list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
1077 		if (!fatal_signal_pending(current) &&
1078 		    filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) {
1079 			unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset);
1080 			install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
1081 		}
1082 		put_uprobe(uprobe);
1083 	}
1084 	mutex_unlock(uprobes_mmap_hash(inode));
1085 
1086 	return 0;
1087 }
1088 
1089 static bool
1090 vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1091 {
1092 	loff_t min, max;
1093 	struct inode *inode;
1094 	struct rb_node *n;
1095 
1096 	inode = file_inode(vma->vm_file);
1097 
1098 	min = vaddr_to_offset(vma, start);
1099 	max = min + (end - start) - 1;
1100 
1101 	spin_lock(&uprobes_treelock);
1102 	n = find_node_in_range(inode, min, max);
1103 	spin_unlock(&uprobes_treelock);
1104 
1105 	return !!n;
1106 }
1107 
1108 /*
1109  * Called in context of a munmap of a vma.
1110  */
1111 void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1112 {
1113 	if (no_uprobe_events() || !valid_vma(vma, false))
1114 		return;
1115 
1116 	if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */
1117 		return;
1118 
1119 	if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) ||
1120 	     test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags))
1121 		return;
1122 
1123 	if (vma_has_uprobes(vma, start, end))
1124 		set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags);
1125 }
1126 
1127 /* Slot allocation for XOL */
1128 static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
1129 {
1130 	int ret = -EALREADY;
1131 
1132 	down_write(&mm->mmap_sem);
1133 	if (mm->uprobes_state.xol_area)
1134 		goto fail;
1135 
1136 	if (!area->vaddr) {
1137 		/* Try to map as high as possible, this is only a hint. */
1138 		area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE,
1139 						PAGE_SIZE, 0, 0);
1140 		if (area->vaddr & ~PAGE_MASK) {
1141 			ret = area->vaddr;
1142 			goto fail;
1143 		}
1144 	}
1145 
1146 	ret = install_special_mapping(mm, area->vaddr, PAGE_SIZE,
1147 				VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO, &area->page);
1148 	if (ret)
1149 		goto fail;
1150 
1151 	smp_wmb();	/* pairs with get_xol_area() */
1152 	mm->uprobes_state.xol_area = area;
1153  fail:
1154 	up_write(&mm->mmap_sem);
1155 
1156 	return ret;
1157 }
1158 
1159 static struct xol_area *__create_xol_area(unsigned long vaddr)
1160 {
1161 	struct mm_struct *mm = current->mm;
1162 	uprobe_opcode_t insn = UPROBE_SWBP_INSN;
1163 	struct xol_area *area;
1164 
1165 	area = kmalloc(sizeof(*area), GFP_KERNEL);
1166 	if (unlikely(!area))
1167 		goto out;
1168 
1169 	area->bitmap = kzalloc(BITS_TO_LONGS(UINSNS_PER_PAGE) * sizeof(long), GFP_KERNEL);
1170 	if (!area->bitmap)
1171 		goto free_area;
1172 
1173 	area->page = alloc_page(GFP_HIGHUSER);
1174 	if (!area->page)
1175 		goto free_bitmap;
1176 
1177 	area->vaddr = vaddr;
1178 	init_waitqueue_head(&area->wq);
1179 	/* Reserve the 1st slot for get_trampoline_vaddr() */
1180 	set_bit(0, area->bitmap);
1181 	atomic_set(&area->slot_count, 1);
1182 	copy_to_page(area->page, 0, &insn, UPROBE_SWBP_INSN_SIZE);
1183 
1184 	if (!xol_add_vma(mm, area))
1185 		return area;
1186 
1187 	__free_page(area->page);
1188  free_bitmap:
1189 	kfree(area->bitmap);
1190  free_area:
1191 	kfree(area);
1192  out:
1193 	return NULL;
1194 }
1195 
1196 /*
1197  * get_xol_area - Allocate process's xol_area if necessary.
1198  * This area will be used for storing instructions for execution out of line.
1199  *
1200  * Returns the allocated area or NULL.
1201  */
1202 static struct xol_area *get_xol_area(void)
1203 {
1204 	struct mm_struct *mm = current->mm;
1205 	struct xol_area *area;
1206 
1207 	if (!mm->uprobes_state.xol_area)
1208 		__create_xol_area(0);
1209 
1210 	area = mm->uprobes_state.xol_area;
1211 	smp_read_barrier_depends();	/* pairs with wmb in xol_add_vma() */
1212 	return area;
1213 }
1214 
1215 /*
1216  * uprobe_clear_state - Free the area allocated for slots.
1217  */
1218 void uprobe_clear_state(struct mm_struct *mm)
1219 {
1220 	struct xol_area *area = mm->uprobes_state.xol_area;
1221 
1222 	if (!area)
1223 		return;
1224 
1225 	put_page(area->page);
1226 	kfree(area->bitmap);
1227 	kfree(area);
1228 }
1229 
1230 void uprobe_start_dup_mmap(void)
1231 {
1232 	percpu_down_read(&dup_mmap_sem);
1233 }
1234 
1235 void uprobe_end_dup_mmap(void)
1236 {
1237 	percpu_up_read(&dup_mmap_sem);
1238 }
1239 
1240 void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
1241 {
1242 	newmm->uprobes_state.xol_area = NULL;
1243 
1244 	if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) {
1245 		set_bit(MMF_HAS_UPROBES, &newmm->flags);
1246 		/* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */
1247 		set_bit(MMF_RECALC_UPROBES, &newmm->flags);
1248 	}
1249 }
1250 
1251 /*
1252  *  - search for a free slot.
1253  */
1254 static unsigned long xol_take_insn_slot(struct xol_area *area)
1255 {
1256 	unsigned long slot_addr;
1257 	int slot_nr;
1258 
1259 	do {
1260 		slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE);
1261 		if (slot_nr < UINSNS_PER_PAGE) {
1262 			if (!test_and_set_bit(slot_nr, area->bitmap))
1263 				break;
1264 
1265 			slot_nr = UINSNS_PER_PAGE;
1266 			continue;
1267 		}
1268 		wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE));
1269 	} while (slot_nr >= UINSNS_PER_PAGE);
1270 
1271 	slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES);
1272 	atomic_inc(&area->slot_count);
1273 
1274 	return slot_addr;
1275 }
1276 
1277 /*
1278  * xol_get_insn_slot - allocate a slot for xol.
1279  * Returns the allocated slot address or 0.
1280  */
1281 static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
1282 {
1283 	struct xol_area *area;
1284 	unsigned long xol_vaddr;
1285 
1286 	area = get_xol_area();
1287 	if (!area)
1288 		return 0;
1289 
1290 	xol_vaddr = xol_take_insn_slot(area);
1291 	if (unlikely(!xol_vaddr))
1292 		return 0;
1293 
1294 	/* Initialize the slot */
1295 	copy_to_page(area->page, xol_vaddr,
1296 			&uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
1297 	/*
1298 	 * We probably need flush_icache_user_range() but it needs vma.
1299 	 * This should work on supported architectures too.
1300 	 */
1301 	flush_dcache_page(area->page);
1302 
1303 	return xol_vaddr;
1304 }
1305 
1306 /*
1307  * xol_free_insn_slot - If slot was earlier allocated by
1308  * @xol_get_insn_slot(), make the slot available for
1309  * subsequent requests.
1310  */
1311 static void xol_free_insn_slot(struct task_struct *tsk)
1312 {
1313 	struct xol_area *area;
1314 	unsigned long vma_end;
1315 	unsigned long slot_addr;
1316 
1317 	if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask)
1318 		return;
1319 
1320 	slot_addr = tsk->utask->xol_vaddr;
1321 	if (unlikely(!slot_addr))
1322 		return;
1323 
1324 	area = tsk->mm->uprobes_state.xol_area;
1325 	vma_end = area->vaddr + PAGE_SIZE;
1326 	if (area->vaddr <= slot_addr && slot_addr < vma_end) {
1327 		unsigned long offset;
1328 		int slot_nr;
1329 
1330 		offset = slot_addr - area->vaddr;
1331 		slot_nr = offset / UPROBE_XOL_SLOT_BYTES;
1332 		if (slot_nr >= UINSNS_PER_PAGE)
1333 			return;
1334 
1335 		clear_bit(slot_nr, area->bitmap);
1336 		atomic_dec(&area->slot_count);
1337 		if (waitqueue_active(&area->wq))
1338 			wake_up(&area->wq);
1339 
1340 		tsk->utask->xol_vaddr = 0;
1341 	}
1342 }
1343 
1344 /**
1345  * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
1346  * @regs: Reflects the saved state of the task after it has hit a breakpoint
1347  * instruction.
1348  * Return the address of the breakpoint instruction.
1349  */
1350 unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs)
1351 {
1352 	return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE;
1353 }
1354 
1355 /*
1356  * Called with no locks held.
1357  * Called in context of a exiting or a exec-ing thread.
1358  */
1359 void uprobe_free_utask(struct task_struct *t)
1360 {
1361 	struct uprobe_task *utask = t->utask;
1362 	struct return_instance *ri, *tmp;
1363 
1364 	if (!utask)
1365 		return;
1366 
1367 	if (utask->active_uprobe)
1368 		put_uprobe(utask->active_uprobe);
1369 
1370 	ri = utask->return_instances;
1371 	while (ri) {
1372 		tmp = ri;
1373 		ri = ri->next;
1374 
1375 		put_uprobe(tmp->uprobe);
1376 		kfree(tmp);
1377 	}
1378 
1379 	xol_free_insn_slot(t);
1380 	kfree(utask);
1381 	t->utask = NULL;
1382 }
1383 
1384 /*
1385  * Allocate a uprobe_task object for the task if if necessary.
1386  * Called when the thread hits a breakpoint.
1387  *
1388  * Returns:
1389  * - pointer to new uprobe_task on success
1390  * - NULL otherwise
1391  */
1392 static struct uprobe_task *get_utask(void)
1393 {
1394 	if (!current->utask)
1395 		current->utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
1396 	return current->utask;
1397 }
1398 
1399 static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
1400 {
1401 	struct uprobe_task *n_utask;
1402 	struct return_instance **p, *o, *n;
1403 
1404 	n_utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
1405 	if (!n_utask)
1406 		return -ENOMEM;
1407 	t->utask = n_utask;
1408 
1409 	p = &n_utask->return_instances;
1410 	for (o = o_utask->return_instances; o; o = o->next) {
1411 		n = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
1412 		if (!n)
1413 			return -ENOMEM;
1414 
1415 		*n = *o;
1416 		atomic_inc(&n->uprobe->ref);
1417 		n->next = NULL;
1418 
1419 		*p = n;
1420 		p = &n->next;
1421 		n_utask->depth++;
1422 	}
1423 
1424 	return 0;
1425 }
1426 
1427 static void uprobe_warn(struct task_struct *t, const char *msg)
1428 {
1429 	pr_warn("uprobe: %s:%d failed to %s\n",
1430 			current->comm, current->pid, msg);
1431 }
1432 
1433 static void dup_xol_work(struct callback_head *work)
1434 {
1435 	if (current->flags & PF_EXITING)
1436 		return;
1437 
1438 	if (!__create_xol_area(current->utask->dup_xol_addr))
1439 		uprobe_warn(current, "dup xol area");
1440 }
1441 
1442 /*
1443  * Called in context of a new clone/fork from copy_process.
1444  */
1445 void uprobe_copy_process(struct task_struct *t, unsigned long flags)
1446 {
1447 	struct uprobe_task *utask = current->utask;
1448 	struct mm_struct *mm = current->mm;
1449 	struct xol_area *area;
1450 
1451 	t->utask = NULL;
1452 
1453 	if (!utask || !utask->return_instances)
1454 		return;
1455 
1456 	if (mm == t->mm && !(flags & CLONE_VFORK))
1457 		return;
1458 
1459 	if (dup_utask(t, utask))
1460 		return uprobe_warn(t, "dup ret instances");
1461 
1462 	/* The task can fork() after dup_xol_work() fails */
1463 	area = mm->uprobes_state.xol_area;
1464 	if (!area)
1465 		return uprobe_warn(t, "dup xol area");
1466 
1467 	if (mm == t->mm)
1468 		return;
1469 
1470 	t->utask->dup_xol_addr = area->vaddr;
1471 	init_task_work(&t->utask->dup_xol_work, dup_xol_work);
1472 	task_work_add(t, &t->utask->dup_xol_work, true);
1473 }
1474 
1475 /*
1476  * Current area->vaddr notion assume the trampoline address is always
1477  * equal area->vaddr.
1478  *
1479  * Returns -1 in case the xol_area is not allocated.
1480  */
1481 static unsigned long get_trampoline_vaddr(void)
1482 {
1483 	struct xol_area *area;
1484 	unsigned long trampoline_vaddr = -1;
1485 
1486 	area = current->mm->uprobes_state.xol_area;
1487 	smp_read_barrier_depends();
1488 	if (area)
1489 		trampoline_vaddr = area->vaddr;
1490 
1491 	return trampoline_vaddr;
1492 }
1493 
1494 static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
1495 {
1496 	struct return_instance *ri;
1497 	struct uprobe_task *utask;
1498 	unsigned long orig_ret_vaddr, trampoline_vaddr;
1499 	bool chained = false;
1500 
1501 	if (!get_xol_area())
1502 		return;
1503 
1504 	utask = get_utask();
1505 	if (!utask)
1506 		return;
1507 
1508 	if (utask->depth >= MAX_URETPROBE_DEPTH) {
1509 		printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to"
1510 				" nestedness limit pid/tgid=%d/%d\n",
1511 				current->pid, current->tgid);
1512 		return;
1513 	}
1514 
1515 	ri = kzalloc(sizeof(struct return_instance), GFP_KERNEL);
1516 	if (!ri)
1517 		goto fail;
1518 
1519 	trampoline_vaddr = get_trampoline_vaddr();
1520 	orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs);
1521 	if (orig_ret_vaddr == -1)
1522 		goto fail;
1523 
1524 	/*
1525 	 * We don't want to keep trampoline address in stack, rather keep the
1526 	 * original return address of first caller thru all the consequent
1527 	 * instances. This also makes breakpoint unwrapping easier.
1528 	 */
1529 	if (orig_ret_vaddr == trampoline_vaddr) {
1530 		if (!utask->return_instances) {
1531 			/*
1532 			 * This situation is not possible. Likely we have an
1533 			 * attack from user-space.
1534 			 */
1535 			pr_warn("uprobe: unable to set uretprobe pid/tgid=%d/%d\n",
1536 						current->pid, current->tgid);
1537 			goto fail;
1538 		}
1539 
1540 		chained = true;
1541 		orig_ret_vaddr = utask->return_instances->orig_ret_vaddr;
1542 	}
1543 
1544 	atomic_inc(&uprobe->ref);
1545 	ri->uprobe = uprobe;
1546 	ri->func = instruction_pointer(regs);
1547 	ri->orig_ret_vaddr = orig_ret_vaddr;
1548 	ri->chained = chained;
1549 
1550 	utask->depth++;
1551 
1552 	/* add instance to the stack */
1553 	ri->next = utask->return_instances;
1554 	utask->return_instances = ri;
1555 
1556 	return;
1557 
1558  fail:
1559 	kfree(ri);
1560 }
1561 
1562 /* Prepare to single-step probed instruction out of line. */
1563 static int
1564 pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr)
1565 {
1566 	struct uprobe_task *utask;
1567 	unsigned long xol_vaddr;
1568 	int err;
1569 
1570 	utask = get_utask();
1571 	if (!utask)
1572 		return -ENOMEM;
1573 
1574 	xol_vaddr = xol_get_insn_slot(uprobe);
1575 	if (!xol_vaddr)
1576 		return -ENOMEM;
1577 
1578 	utask->xol_vaddr = xol_vaddr;
1579 	utask->vaddr = bp_vaddr;
1580 
1581 	err = arch_uprobe_pre_xol(&uprobe->arch, regs);
1582 	if (unlikely(err)) {
1583 		xol_free_insn_slot(current);
1584 		return err;
1585 	}
1586 
1587 	utask->active_uprobe = uprobe;
1588 	utask->state = UTASK_SSTEP;
1589 	return 0;
1590 }
1591 
1592 /*
1593  * If we are singlestepping, then ensure this thread is not connected to
1594  * non-fatal signals until completion of singlestep.  When xol insn itself
1595  * triggers the signal,  restart the original insn even if the task is
1596  * already SIGKILL'ed (since coredump should report the correct ip).  This
1597  * is even more important if the task has a handler for SIGSEGV/etc, The
1598  * _same_ instruction should be repeated again after return from the signal
1599  * handler, and SSTEP can never finish in this case.
1600  */
1601 bool uprobe_deny_signal(void)
1602 {
1603 	struct task_struct *t = current;
1604 	struct uprobe_task *utask = t->utask;
1605 
1606 	if (likely(!utask || !utask->active_uprobe))
1607 		return false;
1608 
1609 	WARN_ON_ONCE(utask->state != UTASK_SSTEP);
1610 
1611 	if (signal_pending(t)) {
1612 		spin_lock_irq(&t->sighand->siglock);
1613 		clear_tsk_thread_flag(t, TIF_SIGPENDING);
1614 		spin_unlock_irq(&t->sighand->siglock);
1615 
1616 		if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) {
1617 			utask->state = UTASK_SSTEP_TRAPPED;
1618 			set_tsk_thread_flag(t, TIF_UPROBE);
1619 			set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
1620 		}
1621 	}
1622 
1623 	return true;
1624 }
1625 
1626 static void mmf_recalc_uprobes(struct mm_struct *mm)
1627 {
1628 	struct vm_area_struct *vma;
1629 
1630 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
1631 		if (!valid_vma(vma, false))
1632 			continue;
1633 		/*
1634 		 * This is not strictly accurate, we can race with
1635 		 * uprobe_unregister() and see the already removed
1636 		 * uprobe if delete_uprobe() was not yet called.
1637 		 * Or this uprobe can be filtered out.
1638 		 */
1639 		if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end))
1640 			return;
1641 	}
1642 
1643 	clear_bit(MMF_HAS_UPROBES, &mm->flags);
1644 }
1645 
1646 static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
1647 {
1648 	struct page *page;
1649 	uprobe_opcode_t opcode;
1650 	int result;
1651 
1652 	pagefault_disable();
1653 	result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
1654 							sizeof(opcode));
1655 	pagefault_enable();
1656 
1657 	if (likely(result == 0))
1658 		goto out;
1659 
1660 	result = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &page, NULL);
1661 	if (result < 0)
1662 		return result;
1663 
1664 	copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
1665 	put_page(page);
1666  out:
1667 	/* This needs to return true for any variant of the trap insn */
1668 	return is_trap_insn(&opcode);
1669 }
1670 
1671 static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
1672 {
1673 	struct mm_struct *mm = current->mm;
1674 	struct uprobe *uprobe = NULL;
1675 	struct vm_area_struct *vma;
1676 
1677 	down_read(&mm->mmap_sem);
1678 	vma = find_vma(mm, bp_vaddr);
1679 	if (vma && vma->vm_start <= bp_vaddr) {
1680 		if (valid_vma(vma, false)) {
1681 			struct inode *inode = file_inode(vma->vm_file);
1682 			loff_t offset = vaddr_to_offset(vma, bp_vaddr);
1683 
1684 			uprobe = find_uprobe(inode, offset);
1685 		}
1686 
1687 		if (!uprobe)
1688 			*is_swbp = is_trap_at_addr(mm, bp_vaddr);
1689 	} else {
1690 		*is_swbp = -EFAULT;
1691 	}
1692 
1693 	if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags))
1694 		mmf_recalc_uprobes(mm);
1695 	up_read(&mm->mmap_sem);
1696 
1697 	return uprobe;
1698 }
1699 
1700 static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
1701 {
1702 	struct uprobe_consumer *uc;
1703 	int remove = UPROBE_HANDLER_REMOVE;
1704 	bool need_prep = false; /* prepare return uprobe, when needed */
1705 
1706 	down_read(&uprobe->register_rwsem);
1707 	for (uc = uprobe->consumers; uc; uc = uc->next) {
1708 		int rc = 0;
1709 
1710 		if (uc->handler) {
1711 			rc = uc->handler(uc, regs);
1712 			WARN(rc & ~UPROBE_HANDLER_MASK,
1713 				"bad rc=0x%x from %pf()\n", rc, uc->handler);
1714 		}
1715 
1716 		if (uc->ret_handler)
1717 			need_prep = true;
1718 
1719 		remove &= rc;
1720 	}
1721 
1722 	if (need_prep && !remove)
1723 		prepare_uretprobe(uprobe, regs); /* put bp at return */
1724 
1725 	if (remove && uprobe->consumers) {
1726 		WARN_ON(!uprobe_is_active(uprobe));
1727 		unapply_uprobe(uprobe, current->mm);
1728 	}
1729 	up_read(&uprobe->register_rwsem);
1730 }
1731 
1732 static void
1733 handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs)
1734 {
1735 	struct uprobe *uprobe = ri->uprobe;
1736 	struct uprobe_consumer *uc;
1737 
1738 	down_read(&uprobe->register_rwsem);
1739 	for (uc = uprobe->consumers; uc; uc = uc->next) {
1740 		if (uc->ret_handler)
1741 			uc->ret_handler(uc, ri->func, regs);
1742 	}
1743 	up_read(&uprobe->register_rwsem);
1744 }
1745 
1746 static bool handle_trampoline(struct pt_regs *regs)
1747 {
1748 	struct uprobe_task *utask;
1749 	struct return_instance *ri, *tmp;
1750 	bool chained;
1751 
1752 	utask = current->utask;
1753 	if (!utask)
1754 		return false;
1755 
1756 	ri = utask->return_instances;
1757 	if (!ri)
1758 		return false;
1759 
1760 	/*
1761 	 * TODO: we should throw out return_instance's invalidated by
1762 	 * longjmp(), currently we assume that the probed function always
1763 	 * returns.
1764 	 */
1765 	instruction_pointer_set(regs, ri->orig_ret_vaddr);
1766 
1767 	for (;;) {
1768 		handle_uretprobe_chain(ri, regs);
1769 
1770 		chained = ri->chained;
1771 		put_uprobe(ri->uprobe);
1772 
1773 		tmp = ri;
1774 		ri = ri->next;
1775 		kfree(tmp);
1776 		utask->depth--;
1777 
1778 		if (!chained)
1779 			break;
1780 		BUG_ON(!ri);
1781 	}
1782 
1783 	utask->return_instances = ri;
1784 
1785 	return true;
1786 }
1787 
1788 bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs)
1789 {
1790 	return false;
1791 }
1792 
1793 /*
1794  * Run handler and ask thread to singlestep.
1795  * Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
1796  */
1797 static void handle_swbp(struct pt_regs *regs)
1798 {
1799 	struct uprobe *uprobe;
1800 	unsigned long bp_vaddr;
1801 	int uninitialized_var(is_swbp);
1802 
1803 	bp_vaddr = uprobe_get_swbp_addr(regs);
1804 	if (bp_vaddr == get_trampoline_vaddr()) {
1805 		if (handle_trampoline(regs))
1806 			return;
1807 
1808 		pr_warn("uprobe: unable to handle uretprobe pid/tgid=%d/%d\n",
1809 						current->pid, current->tgid);
1810 	}
1811 
1812 	uprobe = find_active_uprobe(bp_vaddr, &is_swbp);
1813 	if (!uprobe) {
1814 		if (is_swbp > 0) {
1815 			/* No matching uprobe; signal SIGTRAP. */
1816 			send_sig(SIGTRAP, current, 0);
1817 		} else {
1818 			/*
1819 			 * Either we raced with uprobe_unregister() or we can't
1820 			 * access this memory. The latter is only possible if
1821 			 * another thread plays with our ->mm. In both cases
1822 			 * we can simply restart. If this vma was unmapped we
1823 			 * can pretend this insn was not executed yet and get
1824 			 * the (correct) SIGSEGV after restart.
1825 			 */
1826 			instruction_pointer_set(regs, bp_vaddr);
1827 		}
1828 		return;
1829 	}
1830 
1831 	/* change it in advance for ->handler() and restart */
1832 	instruction_pointer_set(regs, bp_vaddr);
1833 
1834 	/*
1835 	 * TODO: move copy_insn/etc into _register and remove this hack.
1836 	 * After we hit the bp, _unregister + _register can install the
1837 	 * new and not-yet-analyzed uprobe at the same address, restart.
1838 	 */
1839 	smp_rmb(); /* pairs with wmb() in install_breakpoint() */
1840 	if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags)))
1841 		goto out;
1842 
1843 	/* Tracing handlers use ->utask to communicate with fetch methods */
1844 	if (!get_utask())
1845 		goto out;
1846 
1847 	if (arch_uprobe_ignore(&uprobe->arch, regs))
1848 		goto out;
1849 
1850 	handler_chain(uprobe, regs);
1851 
1852 	if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
1853 		goto out;
1854 
1855 	if (!pre_ssout(uprobe, regs, bp_vaddr))
1856 		return;
1857 
1858 	/* arch_uprobe_skip_sstep() succeeded, or restart if can't singlestep */
1859 out:
1860 	put_uprobe(uprobe);
1861 }
1862 
1863 /*
1864  * Perform required fix-ups and disable singlestep.
1865  * Allow pending signals to take effect.
1866  */
1867 static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
1868 {
1869 	struct uprobe *uprobe;
1870 	int err = 0;
1871 
1872 	uprobe = utask->active_uprobe;
1873 	if (utask->state == UTASK_SSTEP_ACK)
1874 		err = arch_uprobe_post_xol(&uprobe->arch, regs);
1875 	else if (utask->state == UTASK_SSTEP_TRAPPED)
1876 		arch_uprobe_abort_xol(&uprobe->arch, regs);
1877 	else
1878 		WARN_ON_ONCE(1);
1879 
1880 	put_uprobe(uprobe);
1881 	utask->active_uprobe = NULL;
1882 	utask->state = UTASK_RUNNING;
1883 	xol_free_insn_slot(current);
1884 
1885 	spin_lock_irq(&current->sighand->siglock);
1886 	recalc_sigpending(); /* see uprobe_deny_signal() */
1887 	spin_unlock_irq(&current->sighand->siglock);
1888 
1889 	if (unlikely(err)) {
1890 		uprobe_warn(current, "execute the probed insn, sending SIGILL.");
1891 		force_sig_info(SIGILL, SEND_SIG_FORCED, current);
1892 	}
1893 }
1894 
1895 /*
1896  * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and
1897  * allows the thread to return from interrupt. After that handle_swbp()
1898  * sets utask->active_uprobe.
1899  *
1900  * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag
1901  * and allows the thread to return from interrupt.
1902  *
1903  * While returning to userspace, thread notices the TIF_UPROBE flag and calls
1904  * uprobe_notify_resume().
1905  */
1906 void uprobe_notify_resume(struct pt_regs *regs)
1907 {
1908 	struct uprobe_task *utask;
1909 
1910 	clear_thread_flag(TIF_UPROBE);
1911 
1912 	utask = current->utask;
1913 	if (utask && utask->active_uprobe)
1914 		handle_singlestep(utask, regs);
1915 	else
1916 		handle_swbp(regs);
1917 }
1918 
1919 /*
1920  * uprobe_pre_sstep_notifier gets called from interrupt context as part of
1921  * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit.
1922  */
1923 int uprobe_pre_sstep_notifier(struct pt_regs *regs)
1924 {
1925 	if (!current->mm)
1926 		return 0;
1927 
1928 	if (!test_bit(MMF_HAS_UPROBES, &current->mm->flags) &&
1929 	    (!current->utask || !current->utask->return_instances))
1930 		return 0;
1931 
1932 	set_thread_flag(TIF_UPROBE);
1933 	return 1;
1934 }
1935 
1936 /*
1937  * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier
1938  * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep.
1939  */
1940 int uprobe_post_sstep_notifier(struct pt_regs *regs)
1941 {
1942 	struct uprobe_task *utask = current->utask;
1943 
1944 	if (!current->mm || !utask || !utask->active_uprobe)
1945 		/* task is currently not uprobed */
1946 		return 0;
1947 
1948 	utask->state = UTASK_SSTEP_ACK;
1949 	set_thread_flag(TIF_UPROBE);
1950 	return 1;
1951 }
1952 
1953 static struct notifier_block uprobe_exception_nb = {
1954 	.notifier_call		= arch_uprobe_exception_notify,
1955 	.priority		= INT_MAX-1,	/* notified after kprobes, kgdb */
1956 };
1957 
1958 static int __init init_uprobes(void)
1959 {
1960 	int i;
1961 
1962 	for (i = 0; i < UPROBES_HASH_SZ; i++)
1963 		mutex_init(&uprobes_mmap_mutex[i]);
1964 
1965 	if (percpu_init_rwsem(&dup_mmap_sem))
1966 		return -ENOMEM;
1967 
1968 	return register_die_notifier(&uprobe_exception_nb);
1969 }
1970 __initcall(init_uprobes);
1971