xref: /openbmc/linux/kernel/kprobes.c (revision ba6e8564)
1 /*
2  *  Kernel Probes (KProbes)
3  *  kernel/kprobes.c
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18  *
19  * Copyright (C) IBM Corporation, 2002, 2004
20  *
21  * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22  *		Probes initial implementation (includes suggestions from
23  *		Rusty Russell).
24  * 2004-Aug	Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25  *		hlists and exceptions notifier as suggested by Andi Kleen.
26  * 2004-July	Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27  *		interface to access function arguments.
28  * 2004-Sep	Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29  *		exceptions notifier to be first on the priority list.
30  * 2005-May	Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31  *		<jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32  *		<prasanna@in.ibm.com> added function-return probes.
33  */
34 #include <linux/kprobes.h>
35 #include <linux/hash.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/module.h>
39 #include <linux/moduleloader.h>
40 #include <linux/kallsyms.h>
41 #include <linux/freezer.h>
42 #include <linux/seq_file.h>
43 #include <linux/debugfs.h>
44 #include <asm-generic/sections.h>
45 #include <asm/cacheflush.h>
46 #include <asm/errno.h>
47 #include <asm/kdebug.h>
48 
49 #define KPROBE_HASH_BITS 6
50 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
51 
52 
53 /*
54  * Some oddball architectures like 64bit powerpc have function descriptors
55  * so this must be overridable.
56  */
57 #ifndef kprobe_lookup_name
58 #define kprobe_lookup_name(name, addr) \
59 	addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
60 #endif
61 
62 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
63 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
64 static atomic_t kprobe_count;
65 
66 DEFINE_MUTEX(kprobe_mutex);		/* Protects kprobe_table */
67 DEFINE_SPINLOCK(kretprobe_lock);	/* Protects kretprobe_inst_table */
68 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
69 
70 static struct notifier_block kprobe_page_fault_nb = {
71 	.notifier_call = kprobe_exceptions_notify,
72 	.priority = 0x7fffffff /* we need to notified first */
73 };
74 
75 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
76 /*
77  * kprobe->ainsn.insn points to the copy of the instruction to be
78  * single-stepped. x86_64, POWER4 and above have no-exec support and
79  * stepping on the instruction on a vmalloced/kmalloced/data page
80  * is a recipe for disaster
81  */
82 #define INSNS_PER_PAGE	(PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
83 
84 struct kprobe_insn_page {
85 	struct hlist_node hlist;
86 	kprobe_opcode_t *insns;		/* Page of instruction slots */
87 	char slot_used[INSNS_PER_PAGE];
88 	int nused;
89 	int ngarbage;
90 };
91 
92 enum kprobe_slot_state {
93 	SLOT_CLEAN = 0,
94 	SLOT_DIRTY = 1,
95 	SLOT_USED = 2,
96 };
97 
98 static struct hlist_head kprobe_insn_pages;
99 static int kprobe_garbage_slots;
100 static int collect_garbage_slots(void);
101 
102 static int __kprobes check_safety(void)
103 {
104 	int ret = 0;
105 #if defined(CONFIG_PREEMPT) && defined(CONFIG_PM)
106 	ret = freeze_processes();
107 	if (ret == 0) {
108 		struct task_struct *p, *q;
109 		do_each_thread(p, q) {
110 			if (p != current && p->state == TASK_RUNNING &&
111 			    p->pid != 0) {
112 				printk("Check failed: %s is running\n",p->comm);
113 				ret = -1;
114 				goto loop_end;
115 			}
116 		} while_each_thread(p, q);
117 	}
118 loop_end:
119 	thaw_processes();
120 #else
121 	synchronize_sched();
122 #endif
123 	return ret;
124 }
125 
126 /**
127  * get_insn_slot() - Find a slot on an executable page for an instruction.
128  * We allocate an executable page if there's no room on existing ones.
129  */
130 kprobe_opcode_t __kprobes *get_insn_slot(void)
131 {
132 	struct kprobe_insn_page *kip;
133 	struct hlist_node *pos;
134 
135       retry:
136 	hlist_for_each(pos, &kprobe_insn_pages) {
137 		kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
138 		if (kip->nused < INSNS_PER_PAGE) {
139 			int i;
140 			for (i = 0; i < INSNS_PER_PAGE; i++) {
141 				if (kip->slot_used[i] == SLOT_CLEAN) {
142 					kip->slot_used[i] = SLOT_USED;
143 					kip->nused++;
144 					return kip->insns + (i * MAX_INSN_SIZE);
145 				}
146 			}
147 			/* Surprise!  No unused slots.  Fix kip->nused. */
148 			kip->nused = INSNS_PER_PAGE;
149 		}
150 	}
151 
152 	/* If there are any garbage slots, collect it and try again. */
153 	if (kprobe_garbage_slots && collect_garbage_slots() == 0) {
154 		goto retry;
155 	}
156 	/* All out of space.  Need to allocate a new page. Use slot 0. */
157 	kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
158 	if (!kip) {
159 		return NULL;
160 	}
161 
162 	/*
163 	 * Use module_alloc so this page is within +/- 2GB of where the
164 	 * kernel image and loaded module images reside. This is required
165 	 * so x86_64 can correctly handle the %rip-relative fixups.
166 	 */
167 	kip->insns = module_alloc(PAGE_SIZE);
168 	if (!kip->insns) {
169 		kfree(kip);
170 		return NULL;
171 	}
172 	INIT_HLIST_NODE(&kip->hlist);
173 	hlist_add_head(&kip->hlist, &kprobe_insn_pages);
174 	memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
175 	kip->slot_used[0] = SLOT_USED;
176 	kip->nused = 1;
177 	kip->ngarbage = 0;
178 	return kip->insns;
179 }
180 
181 /* Return 1 if all garbages are collected, otherwise 0. */
182 static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
183 {
184 	kip->slot_used[idx] = SLOT_CLEAN;
185 	kip->nused--;
186 	if (kip->nused == 0) {
187 		/*
188 		 * Page is no longer in use.  Free it unless
189 		 * it's the last one.  We keep the last one
190 		 * so as not to have to set it up again the
191 		 * next time somebody inserts a probe.
192 		 */
193 		hlist_del(&kip->hlist);
194 		if (hlist_empty(&kprobe_insn_pages)) {
195 			INIT_HLIST_NODE(&kip->hlist);
196 			hlist_add_head(&kip->hlist,
197 				       &kprobe_insn_pages);
198 		} else {
199 			module_free(NULL, kip->insns);
200 			kfree(kip);
201 		}
202 		return 1;
203 	}
204 	return 0;
205 }
206 
207 static int __kprobes collect_garbage_slots(void)
208 {
209 	struct kprobe_insn_page *kip;
210 	struct hlist_node *pos, *next;
211 
212 	/* Ensure no-one is preepmted on the garbages */
213 	if (check_safety() != 0)
214 		return -EAGAIN;
215 
216 	hlist_for_each_safe(pos, next, &kprobe_insn_pages) {
217 		int i;
218 		kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
219 		if (kip->ngarbage == 0)
220 			continue;
221 		kip->ngarbage = 0;	/* we will collect all garbages */
222 		for (i = 0; i < INSNS_PER_PAGE; i++) {
223 			if (kip->slot_used[i] == SLOT_DIRTY &&
224 			    collect_one_slot(kip, i))
225 				break;
226 		}
227 	}
228 	kprobe_garbage_slots = 0;
229 	return 0;
230 }
231 
232 void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
233 {
234 	struct kprobe_insn_page *kip;
235 	struct hlist_node *pos;
236 
237 	hlist_for_each(pos, &kprobe_insn_pages) {
238 		kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
239 		if (kip->insns <= slot &&
240 		    slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
241 			int i = (slot - kip->insns) / MAX_INSN_SIZE;
242 			if (dirty) {
243 				kip->slot_used[i] = SLOT_DIRTY;
244 				kip->ngarbage++;
245 			} else {
246 				collect_one_slot(kip, i);
247 			}
248 			break;
249 		}
250 	}
251 	if (dirty && (++kprobe_garbage_slots > INSNS_PER_PAGE)) {
252 		collect_garbage_slots();
253 	}
254 }
255 #endif
256 
257 /* We have preemption disabled.. so it is safe to use __ versions */
258 static inline void set_kprobe_instance(struct kprobe *kp)
259 {
260 	__get_cpu_var(kprobe_instance) = kp;
261 }
262 
263 static inline void reset_kprobe_instance(void)
264 {
265 	__get_cpu_var(kprobe_instance) = NULL;
266 }
267 
268 /*
269  * This routine is called either:
270  * 	- under the kprobe_mutex - during kprobe_[un]register()
271  * 				OR
272  * 	- with preemption disabled - from arch/xxx/kernel/kprobes.c
273  */
274 struct kprobe __kprobes *get_kprobe(void *addr)
275 {
276 	struct hlist_head *head;
277 	struct hlist_node *node;
278 	struct kprobe *p;
279 
280 	head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
281 	hlist_for_each_entry_rcu(p, node, head, hlist) {
282 		if (p->addr == addr)
283 			return p;
284 	}
285 	return NULL;
286 }
287 
288 /*
289  * Aggregate handlers for multiple kprobes support - these handlers
290  * take care of invoking the individual kprobe handlers on p->list
291  */
292 static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
293 {
294 	struct kprobe *kp;
295 
296 	list_for_each_entry_rcu(kp, &p->list, list) {
297 		if (kp->pre_handler) {
298 			set_kprobe_instance(kp);
299 			if (kp->pre_handler(kp, regs))
300 				return 1;
301 		}
302 		reset_kprobe_instance();
303 	}
304 	return 0;
305 }
306 
307 static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
308 					unsigned long flags)
309 {
310 	struct kprobe *kp;
311 
312 	list_for_each_entry_rcu(kp, &p->list, list) {
313 		if (kp->post_handler) {
314 			set_kprobe_instance(kp);
315 			kp->post_handler(kp, regs, flags);
316 			reset_kprobe_instance();
317 		}
318 	}
319 	return;
320 }
321 
322 static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
323 					int trapnr)
324 {
325 	struct kprobe *cur = __get_cpu_var(kprobe_instance);
326 
327 	/*
328 	 * if we faulted "during" the execution of a user specified
329 	 * probe handler, invoke just that probe's fault handler
330 	 */
331 	if (cur && cur->fault_handler) {
332 		if (cur->fault_handler(cur, regs, trapnr))
333 			return 1;
334 	}
335 	return 0;
336 }
337 
338 static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
339 {
340 	struct kprobe *cur = __get_cpu_var(kprobe_instance);
341 	int ret = 0;
342 
343 	if (cur && cur->break_handler) {
344 		if (cur->break_handler(cur, regs))
345 			ret = 1;
346 	}
347 	reset_kprobe_instance();
348 	return ret;
349 }
350 
351 /* Walks the list and increments nmissed count for multiprobe case */
352 void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
353 {
354 	struct kprobe *kp;
355 	if (p->pre_handler != aggr_pre_handler) {
356 		p->nmissed++;
357 	} else {
358 		list_for_each_entry_rcu(kp, &p->list, list)
359 			kp->nmissed++;
360 	}
361 	return;
362 }
363 
364 /* Called with kretprobe_lock held */
365 struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp)
366 {
367 	struct hlist_node *node;
368 	struct kretprobe_instance *ri;
369 	hlist_for_each_entry(ri, node, &rp->free_instances, uflist)
370 		return ri;
371 	return NULL;
372 }
373 
374 /* Called with kretprobe_lock held */
375 static struct kretprobe_instance __kprobes *get_used_rp_inst(struct kretprobe
376 							      *rp)
377 {
378 	struct hlist_node *node;
379 	struct kretprobe_instance *ri;
380 	hlist_for_each_entry(ri, node, &rp->used_instances, uflist)
381 		return ri;
382 	return NULL;
383 }
384 
385 /* Called with kretprobe_lock held */
386 void __kprobes add_rp_inst(struct kretprobe_instance *ri)
387 {
388 	/*
389 	 * Remove rp inst off the free list -
390 	 * Add it back when probed function returns
391 	 */
392 	hlist_del(&ri->uflist);
393 
394 	/* Add rp inst onto table */
395 	INIT_HLIST_NODE(&ri->hlist);
396 	hlist_add_head(&ri->hlist,
397 			&kretprobe_inst_table[hash_ptr(ri->task, KPROBE_HASH_BITS)]);
398 
399 	/* Also add this rp inst to the used list. */
400 	INIT_HLIST_NODE(&ri->uflist);
401 	hlist_add_head(&ri->uflist, &ri->rp->used_instances);
402 }
403 
404 /* Called with kretprobe_lock held */
405 void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
406 				struct hlist_head *head)
407 {
408 	/* remove rp inst off the rprobe_inst_table */
409 	hlist_del(&ri->hlist);
410 	if (ri->rp) {
411 		/* remove rp inst off the used list */
412 		hlist_del(&ri->uflist);
413 		/* put rp inst back onto the free list */
414 		INIT_HLIST_NODE(&ri->uflist);
415 		hlist_add_head(&ri->uflist, &ri->rp->free_instances);
416 	} else
417 		/* Unregistering */
418 		hlist_add_head(&ri->hlist, head);
419 }
420 
421 struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk)
422 {
423 	return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
424 }
425 
426 /*
427  * This function is called from finish_task_switch when task tk becomes dead,
428  * so that we can recycle any function-return probe instances associated
429  * with this task. These left over instances represent probed functions
430  * that have been called but will never return.
431  */
432 void __kprobes kprobe_flush_task(struct task_struct *tk)
433 {
434 	struct kretprobe_instance *ri;
435 	struct hlist_head *head, empty_rp;
436 	struct hlist_node *node, *tmp;
437 	unsigned long flags = 0;
438 
439 	INIT_HLIST_HEAD(&empty_rp);
440 	spin_lock_irqsave(&kretprobe_lock, flags);
441 	head = kretprobe_inst_table_head(tk);
442 	hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
443 		if (ri->task == tk)
444 			recycle_rp_inst(ri, &empty_rp);
445 	}
446 	spin_unlock_irqrestore(&kretprobe_lock, flags);
447 
448 	hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
449 		hlist_del(&ri->hlist);
450 		kfree(ri);
451 	}
452 }
453 
454 static inline void free_rp_inst(struct kretprobe *rp)
455 {
456 	struct kretprobe_instance *ri;
457 	while ((ri = get_free_rp_inst(rp)) != NULL) {
458 		hlist_del(&ri->uflist);
459 		kfree(ri);
460 	}
461 }
462 
463 /*
464  * Keep all fields in the kprobe consistent
465  */
466 static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
467 {
468 	memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
469 	memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
470 }
471 
472 /*
473 * Add the new probe to old_p->list. Fail if this is the
474 * second jprobe at the address - two jprobes can't coexist
475 */
476 static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
477 {
478 	if (p->break_handler) {
479 		if (old_p->break_handler)
480 			return -EEXIST;
481 		list_add_tail_rcu(&p->list, &old_p->list);
482 		old_p->break_handler = aggr_break_handler;
483 	} else
484 		list_add_rcu(&p->list, &old_p->list);
485 	if (p->post_handler && !old_p->post_handler)
486 		old_p->post_handler = aggr_post_handler;
487 	return 0;
488 }
489 
490 /*
491  * Fill in the required fields of the "manager kprobe". Replace the
492  * earlier kprobe in the hlist with the manager kprobe
493  */
494 static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
495 {
496 	copy_kprobe(p, ap);
497 	flush_insn_slot(ap);
498 	ap->addr = p->addr;
499 	ap->pre_handler = aggr_pre_handler;
500 	ap->fault_handler = aggr_fault_handler;
501 	if (p->post_handler)
502 		ap->post_handler = aggr_post_handler;
503 	if (p->break_handler)
504 		ap->break_handler = aggr_break_handler;
505 
506 	INIT_LIST_HEAD(&ap->list);
507 	list_add_rcu(&p->list, &ap->list);
508 
509 	hlist_replace_rcu(&p->hlist, &ap->hlist);
510 }
511 
512 /*
513  * This is the second or subsequent kprobe at the address - handle
514  * the intricacies
515  */
516 static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
517 					  struct kprobe *p)
518 {
519 	int ret = 0;
520 	struct kprobe *ap;
521 
522 	if (old_p->pre_handler == aggr_pre_handler) {
523 		copy_kprobe(old_p, p);
524 		ret = add_new_kprobe(old_p, p);
525 	} else {
526 		ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
527 		if (!ap)
528 			return -ENOMEM;
529 		add_aggr_kprobe(ap, old_p);
530 		copy_kprobe(ap, p);
531 		ret = add_new_kprobe(ap, p);
532 	}
533 	return ret;
534 }
535 
536 static int __kprobes in_kprobes_functions(unsigned long addr)
537 {
538 	if (addr >= (unsigned long)__kprobes_text_start
539 		&& addr < (unsigned long)__kprobes_text_end)
540 		return -EINVAL;
541 	return 0;
542 }
543 
544 static int __kprobes __register_kprobe(struct kprobe *p,
545 	unsigned long called_from)
546 {
547 	int ret = 0;
548 	struct kprobe *old_p;
549 	struct module *probed_mod;
550 
551 	/*
552 	 * If we have a symbol_name argument look it up,
553 	 * and add it to the address.  That way the addr
554 	 * field can either be global or relative to a symbol.
555 	 */
556 	if (p->symbol_name) {
557 		if (p->addr)
558 			return -EINVAL;
559 		kprobe_lookup_name(p->symbol_name, p->addr);
560 	}
561 
562 	if (!p->addr)
563 		return -EINVAL;
564 	p->addr = (kprobe_opcode_t *)(((char *)p->addr)+ p->offset);
565 
566 	if ((!kernel_text_address((unsigned long) p->addr)) ||
567 		in_kprobes_functions((unsigned long) p->addr))
568 		return -EINVAL;
569 
570 	p->mod_refcounted = 0;
571 	/* Check are we probing a module */
572 	if ((probed_mod = module_text_address((unsigned long) p->addr))) {
573 		struct module *calling_mod = module_text_address(called_from);
574 		/* We must allow modules to probe themself and
575 		 * in this case avoid incrementing the module refcount,
576 		 * so as to allow unloading of self probing modules.
577 		 */
578 		if (calling_mod && (calling_mod != probed_mod)) {
579 			if (unlikely(!try_module_get(probed_mod)))
580 				return -EINVAL;
581 			p->mod_refcounted = 1;
582 		} else
583 			probed_mod = NULL;
584 	}
585 
586 	p->nmissed = 0;
587 	mutex_lock(&kprobe_mutex);
588 	old_p = get_kprobe(p->addr);
589 	if (old_p) {
590 		ret = register_aggr_kprobe(old_p, p);
591 		if (!ret)
592 			atomic_inc(&kprobe_count);
593 		goto out;
594 	}
595 
596 	if ((ret = arch_prepare_kprobe(p)) != 0)
597 		goto out;
598 
599 	INIT_HLIST_NODE(&p->hlist);
600 	hlist_add_head_rcu(&p->hlist,
601 		       &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
602 
603 	if (atomic_add_return(1, &kprobe_count) == \
604 				(ARCH_INACTIVE_KPROBE_COUNT + 1))
605 		register_page_fault_notifier(&kprobe_page_fault_nb);
606 
607 	arch_arm_kprobe(p);
608 
609 out:
610 	mutex_unlock(&kprobe_mutex);
611 
612 	if (ret && probed_mod)
613 		module_put(probed_mod);
614 	return ret;
615 }
616 
617 int __kprobes register_kprobe(struct kprobe *p)
618 {
619 	return __register_kprobe(p,
620 		(unsigned long)__builtin_return_address(0));
621 }
622 
623 void __kprobes unregister_kprobe(struct kprobe *p)
624 {
625 	struct module *mod;
626 	struct kprobe *old_p, *list_p;
627 	int cleanup_p;
628 
629 	mutex_lock(&kprobe_mutex);
630 	old_p = get_kprobe(p->addr);
631 	if (unlikely(!old_p)) {
632 		mutex_unlock(&kprobe_mutex);
633 		return;
634 	}
635 	if (p != old_p) {
636 		list_for_each_entry_rcu(list_p, &old_p->list, list)
637 			if (list_p == p)
638 			/* kprobe p is a valid probe */
639 				goto valid_p;
640 		mutex_unlock(&kprobe_mutex);
641 		return;
642 	}
643 valid_p:
644 	if ((old_p == p) || ((old_p->pre_handler == aggr_pre_handler) &&
645 		(p->list.next == &old_p->list) &&
646 		(p->list.prev == &old_p->list))) {
647 		/* Only probe on the hash list */
648 		arch_disarm_kprobe(p);
649 		hlist_del_rcu(&old_p->hlist);
650 		cleanup_p = 1;
651 	} else {
652 		list_del_rcu(&p->list);
653 		cleanup_p = 0;
654 	}
655 
656 	mutex_unlock(&kprobe_mutex);
657 
658 	synchronize_sched();
659 	if (p->mod_refcounted &&
660 	    (mod = module_text_address((unsigned long)p->addr)))
661 		module_put(mod);
662 
663 	if (cleanup_p) {
664 		if (p != old_p) {
665 			list_del_rcu(&p->list);
666 			kfree(old_p);
667 		}
668 		arch_remove_kprobe(p);
669 	} else {
670 		mutex_lock(&kprobe_mutex);
671 		if (p->break_handler)
672 			old_p->break_handler = NULL;
673 		if (p->post_handler){
674 			list_for_each_entry_rcu(list_p, &old_p->list, list){
675 				if (list_p->post_handler){
676 					cleanup_p = 2;
677 					break;
678 				}
679 			}
680 			if (cleanup_p == 0)
681 				old_p->post_handler = NULL;
682 		}
683 		mutex_unlock(&kprobe_mutex);
684 	}
685 
686 	/* Call unregister_page_fault_notifier()
687 	 * if no probes are active
688 	 */
689 	mutex_lock(&kprobe_mutex);
690 	if (atomic_add_return(-1, &kprobe_count) == \
691 				ARCH_INACTIVE_KPROBE_COUNT)
692 		unregister_page_fault_notifier(&kprobe_page_fault_nb);
693 	mutex_unlock(&kprobe_mutex);
694 	return;
695 }
696 
697 static struct notifier_block kprobe_exceptions_nb = {
698 	.notifier_call = kprobe_exceptions_notify,
699 	.priority = 0x7fffffff /* we need to be notified first */
700 };
701 
702 
703 int __kprobes register_jprobe(struct jprobe *jp)
704 {
705 	/* Todo: Verify probepoint is a function entry point */
706 	jp->kp.pre_handler = setjmp_pre_handler;
707 	jp->kp.break_handler = longjmp_break_handler;
708 
709 	return __register_kprobe(&jp->kp,
710 		(unsigned long)__builtin_return_address(0));
711 }
712 
713 void __kprobes unregister_jprobe(struct jprobe *jp)
714 {
715 	unregister_kprobe(&jp->kp);
716 }
717 
718 #ifdef ARCH_SUPPORTS_KRETPROBES
719 
720 /*
721  * This kprobe pre_handler is registered with every kretprobe. When probe
722  * hits it will set up the return probe.
723  */
724 static int __kprobes pre_handler_kretprobe(struct kprobe *p,
725 					   struct pt_regs *regs)
726 {
727 	struct kretprobe *rp = container_of(p, struct kretprobe, kp);
728 	unsigned long flags = 0;
729 
730 	/*TODO: consider to only swap the RA after the last pre_handler fired */
731 	spin_lock_irqsave(&kretprobe_lock, flags);
732 	arch_prepare_kretprobe(rp, regs);
733 	spin_unlock_irqrestore(&kretprobe_lock, flags);
734 	return 0;
735 }
736 
737 int __kprobes register_kretprobe(struct kretprobe *rp)
738 {
739 	int ret = 0;
740 	struct kretprobe_instance *inst;
741 	int i;
742 
743 	rp->kp.pre_handler = pre_handler_kretprobe;
744 	rp->kp.post_handler = NULL;
745 	rp->kp.fault_handler = NULL;
746 	rp->kp.break_handler = NULL;
747 
748 	/* Pre-allocate memory for max kretprobe instances */
749 	if (rp->maxactive <= 0) {
750 #ifdef CONFIG_PREEMPT
751 		rp->maxactive = max(10, 2 * NR_CPUS);
752 #else
753 		rp->maxactive = NR_CPUS;
754 #endif
755 	}
756 	INIT_HLIST_HEAD(&rp->used_instances);
757 	INIT_HLIST_HEAD(&rp->free_instances);
758 	for (i = 0; i < rp->maxactive; i++) {
759 		inst = kmalloc(sizeof(struct kretprobe_instance), GFP_KERNEL);
760 		if (inst == NULL) {
761 			free_rp_inst(rp);
762 			return -ENOMEM;
763 		}
764 		INIT_HLIST_NODE(&inst->uflist);
765 		hlist_add_head(&inst->uflist, &rp->free_instances);
766 	}
767 
768 	rp->nmissed = 0;
769 	/* Establish function entry probe point */
770 	if ((ret = __register_kprobe(&rp->kp,
771 		(unsigned long)__builtin_return_address(0))) != 0)
772 		free_rp_inst(rp);
773 	return ret;
774 }
775 
776 #else /* ARCH_SUPPORTS_KRETPROBES */
777 
778 int __kprobes register_kretprobe(struct kretprobe *rp)
779 {
780 	return -ENOSYS;
781 }
782 
783 static int __kprobes pre_handler_kretprobe(struct kprobe *p,
784 					   struct pt_regs *regs)
785 {
786 	return 0;
787 }
788 
789 #endif /* ARCH_SUPPORTS_KRETPROBES */
790 
791 void __kprobes unregister_kretprobe(struct kretprobe *rp)
792 {
793 	unsigned long flags;
794 	struct kretprobe_instance *ri;
795 
796 	unregister_kprobe(&rp->kp);
797 	/* No race here */
798 	spin_lock_irqsave(&kretprobe_lock, flags);
799 	while ((ri = get_used_rp_inst(rp)) != NULL) {
800 		ri->rp = NULL;
801 		hlist_del(&ri->uflist);
802 	}
803 	spin_unlock_irqrestore(&kretprobe_lock, flags);
804 	free_rp_inst(rp);
805 }
806 
807 static int __init init_kprobes(void)
808 {
809 	int i, err = 0;
810 
811 	/* FIXME allocate the probe table, currently defined statically */
812 	/* initialize all list heads */
813 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
814 		INIT_HLIST_HEAD(&kprobe_table[i]);
815 		INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
816 	}
817 	atomic_set(&kprobe_count, 0);
818 
819 	err = arch_init_kprobes();
820 	if (!err)
821 		err = register_die_notifier(&kprobe_exceptions_nb);
822 
823 	return err;
824 }
825 
826 #ifdef CONFIG_DEBUG_FS
827 static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
828                const char *sym, int offset,char *modname)
829 {
830 	char *kprobe_type;
831 
832 	if (p->pre_handler == pre_handler_kretprobe)
833 		kprobe_type = "r";
834 	else if (p->pre_handler == setjmp_pre_handler)
835 		kprobe_type = "j";
836 	else
837 		kprobe_type = "k";
838 	if (sym)
839 		seq_printf(pi, "%p  %s  %s+0x%x  %s\n", p->addr, kprobe_type,
840 			sym, offset, (modname ? modname : " "));
841 	else
842 		seq_printf(pi, "%p  %s  %p\n", p->addr, kprobe_type, p->addr);
843 }
844 
845 static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
846 {
847 	return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
848 }
849 
850 static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
851 {
852 	(*pos)++;
853 	if (*pos >= KPROBE_TABLE_SIZE)
854 		return NULL;
855 	return pos;
856 }
857 
858 static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
859 {
860 	/* Nothing to do */
861 }
862 
863 static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
864 {
865 	struct hlist_head *head;
866 	struct hlist_node *node;
867 	struct kprobe *p, *kp;
868 	const char *sym = NULL;
869 	unsigned int i = *(loff_t *) v;
870 	unsigned long size, offset = 0;
871 	char *modname, namebuf[128];
872 
873 	head = &kprobe_table[i];
874 	preempt_disable();
875 	hlist_for_each_entry_rcu(p, node, head, hlist) {
876 		sym = kallsyms_lookup((unsigned long)p->addr, &size,
877 					&offset, &modname, namebuf);
878 		if (p->pre_handler == aggr_pre_handler) {
879 			list_for_each_entry_rcu(kp, &p->list, list)
880 				report_probe(pi, kp, sym, offset, modname);
881 		} else
882 			report_probe(pi, p, sym, offset, modname);
883 	}
884 	preempt_enable();
885 	return 0;
886 }
887 
888 static struct seq_operations kprobes_seq_ops = {
889 	.start = kprobe_seq_start,
890 	.next  = kprobe_seq_next,
891 	.stop  = kprobe_seq_stop,
892 	.show  = show_kprobe_addr
893 };
894 
895 static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
896 {
897 	return seq_open(filp, &kprobes_seq_ops);
898 }
899 
900 static struct file_operations debugfs_kprobes_operations = {
901 	.open           = kprobes_open,
902 	.read           = seq_read,
903 	.llseek         = seq_lseek,
904 	.release        = seq_release,
905 };
906 
907 static int __kprobes debugfs_kprobe_init(void)
908 {
909 	struct dentry *dir, *file;
910 
911 	dir = debugfs_create_dir("kprobes", NULL);
912 	if (!dir)
913 		return -ENOMEM;
914 
915 	file = debugfs_create_file("list", 0444, dir , 0 ,
916 				&debugfs_kprobes_operations);
917 	if (!file) {
918 		debugfs_remove(dir);
919 		return -ENOMEM;
920 	}
921 
922 	return 0;
923 }
924 
925 late_initcall(debugfs_kprobe_init);
926 #endif /* CONFIG_DEBUG_FS */
927 
928 module_init(init_kprobes);
929 
930 EXPORT_SYMBOL_GPL(register_kprobe);
931 EXPORT_SYMBOL_GPL(unregister_kprobe);
932 EXPORT_SYMBOL_GPL(register_jprobe);
933 EXPORT_SYMBOL_GPL(unregister_jprobe);
934 EXPORT_SYMBOL_GPL(jprobe_return);
935 EXPORT_SYMBOL_GPL(register_kretprobe);
936 EXPORT_SYMBOL_GPL(unregister_kretprobe);
937