xref: /openbmc/linux/kernel/kprobes.c (revision e868d61272caa648214046a096e5a6bfc068dc8c)
1 /*
2  *  Kernel Probes (KProbes)
3  *  kernel/kprobes.c
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18  *
19  * Copyright (C) IBM Corporation, 2002, 2004
20  *
21  * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22  *		Probes initial implementation (includes suggestions from
23  *		Rusty Russell).
24  * 2004-Aug	Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25  *		hlists and exceptions notifier as suggested by Andi Kleen.
26  * 2004-July	Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27  *		interface to access function arguments.
28  * 2004-Sep	Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29  *		exceptions notifier to be first on the priority list.
30  * 2005-May	Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31  *		<jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32  *		<prasanna@in.ibm.com> added function-return probes.
33  */
34 #include <linux/kprobes.h>
35 #include <linux/hash.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/stddef.h>
39 #include <linux/module.h>
40 #include <linux/moduleloader.h>
41 #include <linux/kallsyms.h>
42 #include <linux/freezer.h>
43 #include <linux/seq_file.h>
44 #include <linux/debugfs.h>
45 #include <linux/kdebug.h>
46 
47 #include <asm-generic/sections.h>
48 #include <asm/cacheflush.h>
49 #include <asm/errno.h>
50 #include <asm/uaccess.h>
51 
52 #define KPROBE_HASH_BITS 6
53 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
54 
55 
56 /*
57  * Some oddball architectures like 64bit powerpc have function descriptors
58  * so this must be overridable.
59  */
60 #ifndef kprobe_lookup_name
61 #define kprobe_lookup_name(name, addr) \
62 	addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
63 #endif
64 
65 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
66 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
67 static atomic_t kprobe_count;
68 
69 /* NOTE: change this value only with kprobe_mutex held */
70 static bool kprobe_enabled;
71 
72 DEFINE_MUTEX(kprobe_mutex);		/* Protects kprobe_table */
73 DEFINE_SPINLOCK(kretprobe_lock);	/* Protects kretprobe_inst_table */
74 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
75 
76 static struct notifier_block kprobe_page_fault_nb = {
77 	.notifier_call = kprobe_exceptions_notify,
78 	.priority = 0x7fffffff /* we need to notified first */
79 };
80 
81 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
82 /*
83  * kprobe->ainsn.insn points to the copy of the instruction to be
84  * single-stepped. x86_64, POWER4 and above have no-exec support and
85  * stepping on the instruction on a vmalloced/kmalloced/data page
86  * is a recipe for disaster
87  */
88 #define INSNS_PER_PAGE	(PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
89 
90 struct kprobe_insn_page {
91 	struct hlist_node hlist;
92 	kprobe_opcode_t *insns;		/* Page of instruction slots */
93 	char slot_used[INSNS_PER_PAGE];
94 	int nused;
95 	int ngarbage;
96 };
97 
98 enum kprobe_slot_state {
99 	SLOT_CLEAN = 0,
100 	SLOT_DIRTY = 1,
101 	SLOT_USED = 2,
102 };
103 
104 static struct hlist_head kprobe_insn_pages;
105 static int kprobe_garbage_slots;
106 static int collect_garbage_slots(void);
107 
108 static int __kprobes check_safety(void)
109 {
110 	int ret = 0;
111 #if defined(CONFIG_PREEMPT) && defined(CONFIG_PM)
112 	ret = freeze_processes();
113 	if (ret == 0) {
114 		struct task_struct *p, *q;
115 		do_each_thread(p, q) {
116 			if (p != current && p->state == TASK_RUNNING &&
117 			    p->pid != 0) {
118 				printk("Check failed: %s is running\n",p->comm);
119 				ret = -1;
120 				goto loop_end;
121 			}
122 		} while_each_thread(p, q);
123 	}
124 loop_end:
125 	thaw_processes();
126 #else
127 	synchronize_sched();
128 #endif
129 	return ret;
130 }
131 
132 /**
133  * get_insn_slot() - Find a slot on an executable page for an instruction.
134  * We allocate an executable page if there's no room on existing ones.
135  */
136 kprobe_opcode_t __kprobes *get_insn_slot(void)
137 {
138 	struct kprobe_insn_page *kip;
139 	struct hlist_node *pos;
140 
141  retry:
142 	hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
143 		if (kip->nused < INSNS_PER_PAGE) {
144 			int i;
145 			for (i = 0; i < INSNS_PER_PAGE; i++) {
146 				if (kip->slot_used[i] == SLOT_CLEAN) {
147 					kip->slot_used[i] = SLOT_USED;
148 					kip->nused++;
149 					return kip->insns + (i * MAX_INSN_SIZE);
150 				}
151 			}
152 			/* Surprise!  No unused slots.  Fix kip->nused. */
153 			kip->nused = INSNS_PER_PAGE;
154 		}
155 	}
156 
157 	/* If there are any garbage slots, collect it and try again. */
158 	if (kprobe_garbage_slots && collect_garbage_slots() == 0) {
159 		goto retry;
160 	}
161 	/* All out of space.  Need to allocate a new page. Use slot 0. */
162 	kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
163 	if (!kip)
164 		return NULL;
165 
166 	/*
167 	 * Use module_alloc so this page is within +/- 2GB of where the
168 	 * kernel image and loaded module images reside. This is required
169 	 * so x86_64 can correctly handle the %rip-relative fixups.
170 	 */
171 	kip->insns = module_alloc(PAGE_SIZE);
172 	if (!kip->insns) {
173 		kfree(kip);
174 		return NULL;
175 	}
176 	INIT_HLIST_NODE(&kip->hlist);
177 	hlist_add_head(&kip->hlist, &kprobe_insn_pages);
178 	memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
179 	kip->slot_used[0] = SLOT_USED;
180 	kip->nused = 1;
181 	kip->ngarbage = 0;
182 	return kip->insns;
183 }
184 
185 /* Return 1 if all garbages are collected, otherwise 0. */
186 static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
187 {
188 	kip->slot_used[idx] = SLOT_CLEAN;
189 	kip->nused--;
190 	if (kip->nused == 0) {
191 		/*
192 		 * Page is no longer in use.  Free it unless
193 		 * it's the last one.  We keep the last one
194 		 * so as not to have to set it up again the
195 		 * next time somebody inserts a probe.
196 		 */
197 		hlist_del(&kip->hlist);
198 		if (hlist_empty(&kprobe_insn_pages)) {
199 			INIT_HLIST_NODE(&kip->hlist);
200 			hlist_add_head(&kip->hlist,
201 				       &kprobe_insn_pages);
202 		} else {
203 			module_free(NULL, kip->insns);
204 			kfree(kip);
205 		}
206 		return 1;
207 	}
208 	return 0;
209 }
210 
211 static int __kprobes collect_garbage_slots(void)
212 {
213 	struct kprobe_insn_page *kip;
214 	struct hlist_node *pos, *next;
215 
216 	/* Ensure no-one is preepmted on the garbages */
217 	if (check_safety() != 0)
218 		return -EAGAIN;
219 
220 	hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) {
221 		int i;
222 		if (kip->ngarbage == 0)
223 			continue;
224 		kip->ngarbage = 0;	/* we will collect all garbages */
225 		for (i = 0; i < INSNS_PER_PAGE; i++) {
226 			if (kip->slot_used[i] == SLOT_DIRTY &&
227 			    collect_one_slot(kip, i))
228 				break;
229 		}
230 	}
231 	kprobe_garbage_slots = 0;
232 	return 0;
233 }
234 
235 void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
236 {
237 	struct kprobe_insn_page *kip;
238 	struct hlist_node *pos;
239 
240 	hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
241 		if (kip->insns <= slot &&
242 		    slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
243 			int i = (slot - kip->insns) / MAX_INSN_SIZE;
244 			if (dirty) {
245 				kip->slot_used[i] = SLOT_DIRTY;
246 				kip->ngarbage++;
247 			} else {
248 				collect_one_slot(kip, i);
249 			}
250 			break;
251 		}
252 	}
253 
254 	if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE)
255 		collect_garbage_slots();
256 }
257 #endif
258 
259 /* We have preemption disabled.. so it is safe to use __ versions */
260 static inline void set_kprobe_instance(struct kprobe *kp)
261 {
262 	__get_cpu_var(kprobe_instance) = kp;
263 }
264 
265 static inline void reset_kprobe_instance(void)
266 {
267 	__get_cpu_var(kprobe_instance) = NULL;
268 }
269 
270 /*
271  * This routine is called either:
272  * 	- under the kprobe_mutex - during kprobe_[un]register()
273  * 				OR
274  * 	- with preemption disabled - from arch/xxx/kernel/kprobes.c
275  */
276 struct kprobe __kprobes *get_kprobe(void *addr)
277 {
278 	struct hlist_head *head;
279 	struct hlist_node *node;
280 	struct kprobe *p;
281 
282 	head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
283 	hlist_for_each_entry_rcu(p, node, head, hlist) {
284 		if (p->addr == addr)
285 			return p;
286 	}
287 	return NULL;
288 }
289 
290 /*
291  * Aggregate handlers for multiple kprobes support - these handlers
292  * take care of invoking the individual kprobe handlers on p->list
293  */
294 static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
295 {
296 	struct kprobe *kp;
297 
298 	list_for_each_entry_rcu(kp, &p->list, list) {
299 		if (kp->pre_handler) {
300 			set_kprobe_instance(kp);
301 			if (kp->pre_handler(kp, regs))
302 				return 1;
303 		}
304 		reset_kprobe_instance();
305 	}
306 	return 0;
307 }
308 
309 static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
310 					unsigned long flags)
311 {
312 	struct kprobe *kp;
313 
314 	list_for_each_entry_rcu(kp, &p->list, list) {
315 		if (kp->post_handler) {
316 			set_kprobe_instance(kp);
317 			kp->post_handler(kp, regs, flags);
318 			reset_kprobe_instance();
319 		}
320 	}
321 }
322 
323 static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
324 					int trapnr)
325 {
326 	struct kprobe *cur = __get_cpu_var(kprobe_instance);
327 
328 	/*
329 	 * if we faulted "during" the execution of a user specified
330 	 * probe handler, invoke just that probe's fault handler
331 	 */
332 	if (cur && cur->fault_handler) {
333 		if (cur->fault_handler(cur, regs, trapnr))
334 			return 1;
335 	}
336 	return 0;
337 }
338 
339 static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
340 {
341 	struct kprobe *cur = __get_cpu_var(kprobe_instance);
342 	int ret = 0;
343 
344 	if (cur && cur->break_handler) {
345 		if (cur->break_handler(cur, regs))
346 			ret = 1;
347 	}
348 	reset_kprobe_instance();
349 	return ret;
350 }
351 
352 /* Walks the list and increments nmissed count for multiprobe case */
353 void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
354 {
355 	struct kprobe *kp;
356 	if (p->pre_handler != aggr_pre_handler) {
357 		p->nmissed++;
358 	} else {
359 		list_for_each_entry_rcu(kp, &p->list, list)
360 			kp->nmissed++;
361 	}
362 	return;
363 }
364 
365 /* Called with kretprobe_lock held */
366 void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
367 				struct hlist_head *head)
368 {
369 	/* remove rp inst off the rprobe_inst_table */
370 	hlist_del(&ri->hlist);
371 	if (ri->rp) {
372 		/* remove rp inst off the used list */
373 		hlist_del(&ri->uflist);
374 		/* put rp inst back onto the free list */
375 		INIT_HLIST_NODE(&ri->uflist);
376 		hlist_add_head(&ri->uflist, &ri->rp->free_instances);
377 	} else
378 		/* Unregistering */
379 		hlist_add_head(&ri->hlist, head);
380 }
381 
382 struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk)
383 {
384 	return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
385 }
386 
387 /*
388  * This function is called from finish_task_switch when task tk becomes dead,
389  * so that we can recycle any function-return probe instances associated
390  * with this task. These left over instances represent probed functions
391  * that have been called but will never return.
392  */
393 void __kprobes kprobe_flush_task(struct task_struct *tk)
394 {
395 	struct kretprobe_instance *ri;
396 	struct hlist_head *head, empty_rp;
397 	struct hlist_node *node, *tmp;
398 	unsigned long flags = 0;
399 
400 	INIT_HLIST_HEAD(&empty_rp);
401 	spin_lock_irqsave(&kretprobe_lock, flags);
402 	head = kretprobe_inst_table_head(tk);
403 	hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
404 		if (ri->task == tk)
405 			recycle_rp_inst(ri, &empty_rp);
406 	}
407 	spin_unlock_irqrestore(&kretprobe_lock, flags);
408 
409 	hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
410 		hlist_del(&ri->hlist);
411 		kfree(ri);
412 	}
413 }
414 
415 static inline void free_rp_inst(struct kretprobe *rp)
416 {
417 	struct kretprobe_instance *ri;
418 	struct hlist_node *pos, *next;
419 
420 	hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, uflist) {
421 		hlist_del(&ri->uflist);
422 		kfree(ri);
423 	}
424 }
425 
426 /*
427  * Keep all fields in the kprobe consistent
428  */
429 static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
430 {
431 	memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
432 	memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
433 }
434 
435 /*
436 * Add the new probe to old_p->list. Fail if this is the
437 * second jprobe at the address - two jprobes can't coexist
438 */
439 static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
440 {
441 	if (p->break_handler) {
442 		if (old_p->break_handler)
443 			return -EEXIST;
444 		list_add_tail_rcu(&p->list, &old_p->list);
445 		old_p->break_handler = aggr_break_handler;
446 	} else
447 		list_add_rcu(&p->list, &old_p->list);
448 	if (p->post_handler && !old_p->post_handler)
449 		old_p->post_handler = aggr_post_handler;
450 	return 0;
451 }
452 
453 /*
454  * Fill in the required fields of the "manager kprobe". Replace the
455  * earlier kprobe in the hlist with the manager kprobe
456  */
457 static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
458 {
459 	copy_kprobe(p, ap);
460 	flush_insn_slot(ap);
461 	ap->addr = p->addr;
462 	ap->pre_handler = aggr_pre_handler;
463 	ap->fault_handler = aggr_fault_handler;
464 	if (p->post_handler)
465 		ap->post_handler = aggr_post_handler;
466 	if (p->break_handler)
467 		ap->break_handler = aggr_break_handler;
468 
469 	INIT_LIST_HEAD(&ap->list);
470 	list_add_rcu(&p->list, &ap->list);
471 
472 	hlist_replace_rcu(&p->hlist, &ap->hlist);
473 }
474 
475 /*
476  * This is the second or subsequent kprobe at the address - handle
477  * the intricacies
478  */
479 static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
480 					  struct kprobe *p)
481 {
482 	int ret = 0;
483 	struct kprobe *ap;
484 
485 	if (old_p->pre_handler == aggr_pre_handler) {
486 		copy_kprobe(old_p, p);
487 		ret = add_new_kprobe(old_p, p);
488 	} else {
489 		ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
490 		if (!ap)
491 			return -ENOMEM;
492 		add_aggr_kprobe(ap, old_p);
493 		copy_kprobe(ap, p);
494 		ret = add_new_kprobe(ap, p);
495 	}
496 	return ret;
497 }
498 
499 static int __kprobes in_kprobes_functions(unsigned long addr)
500 {
501 	if (addr >= (unsigned long)__kprobes_text_start &&
502 	    addr < (unsigned long)__kprobes_text_end)
503 		return -EINVAL;
504 	return 0;
505 }
506 
507 static int __kprobes __register_kprobe(struct kprobe *p,
508 	unsigned long called_from)
509 {
510 	int ret = 0;
511 	struct kprobe *old_p;
512 	struct module *probed_mod;
513 
514 	/*
515 	 * If we have a symbol_name argument look it up,
516 	 * and add it to the address.  That way the addr
517 	 * field can either be global or relative to a symbol.
518 	 */
519 	if (p->symbol_name) {
520 		if (p->addr)
521 			return -EINVAL;
522 		kprobe_lookup_name(p->symbol_name, p->addr);
523 	}
524 
525 	if (!p->addr)
526 		return -EINVAL;
527 	p->addr = (kprobe_opcode_t *)(((char *)p->addr)+ p->offset);
528 
529 	if (!kernel_text_address((unsigned long) p->addr) ||
530 	    in_kprobes_functions((unsigned long) p->addr))
531 		return -EINVAL;
532 
533 	p->mod_refcounted = 0;
534 
535 	/*
536 	 * Check if are we probing a module.
537 	 */
538 	probed_mod = module_text_address((unsigned long) p->addr);
539 	if (probed_mod) {
540 		struct module *calling_mod = module_text_address(called_from);
541 		/*
542 		 * We must allow modules to probe themself and in this case
543 		 * avoid incrementing the module refcount, so as to allow
544 		 * unloading of self probing modules.
545 		 */
546 		if (calling_mod && calling_mod != probed_mod) {
547 			if (unlikely(!try_module_get(probed_mod)))
548 				return -EINVAL;
549 			p->mod_refcounted = 1;
550 		} else
551 			probed_mod = NULL;
552 	}
553 
554 	p->nmissed = 0;
555 	mutex_lock(&kprobe_mutex);
556 	old_p = get_kprobe(p->addr);
557 	if (old_p) {
558 		ret = register_aggr_kprobe(old_p, p);
559 		if (!ret)
560 			atomic_inc(&kprobe_count);
561 		goto out;
562 	}
563 
564 	ret = arch_prepare_kprobe(p);
565 	if (ret)
566 		goto out;
567 
568 	INIT_HLIST_NODE(&p->hlist);
569 	hlist_add_head_rcu(&p->hlist,
570 		       &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
571 
572 	if (kprobe_enabled) {
573 		if (atomic_add_return(1, &kprobe_count) == \
574 				(ARCH_INACTIVE_KPROBE_COUNT + 1))
575 			register_page_fault_notifier(&kprobe_page_fault_nb);
576 
577 		arch_arm_kprobe(p);
578 	}
579 out:
580 	mutex_unlock(&kprobe_mutex);
581 
582 	if (ret && probed_mod)
583 		module_put(probed_mod);
584 	return ret;
585 }
586 
587 int __kprobes register_kprobe(struct kprobe *p)
588 {
589 	return __register_kprobe(p, (unsigned long)__builtin_return_address(0));
590 }
591 
592 void __kprobes unregister_kprobe(struct kprobe *p)
593 {
594 	struct module *mod;
595 	struct kprobe *old_p, *list_p;
596 	int cleanup_p;
597 
598 	mutex_lock(&kprobe_mutex);
599 	old_p = get_kprobe(p->addr);
600 	if (unlikely(!old_p)) {
601 		mutex_unlock(&kprobe_mutex);
602 		return;
603 	}
604 	if (p != old_p) {
605 		list_for_each_entry_rcu(list_p, &old_p->list, list)
606 			if (list_p == p)
607 			/* kprobe p is a valid probe */
608 				goto valid_p;
609 		mutex_unlock(&kprobe_mutex);
610 		return;
611 	}
612 valid_p:
613 	if (old_p == p ||
614 	    (old_p->pre_handler == aggr_pre_handler &&
615 	     p->list.next == &old_p->list && p->list.prev == &old_p->list)) {
616 		/*
617 		 * Only probe on the hash list. Disarm only if kprobes are
618 		 * enabled - otherwise, the breakpoint would already have
619 		 * been removed. We save on flushing icache.
620 		 */
621 		if (kprobe_enabled)
622 			arch_disarm_kprobe(p);
623 		hlist_del_rcu(&old_p->hlist);
624 		cleanup_p = 1;
625 	} else {
626 		list_del_rcu(&p->list);
627 		cleanup_p = 0;
628 	}
629 
630 	mutex_unlock(&kprobe_mutex);
631 
632 	synchronize_sched();
633 	if (p->mod_refcounted) {
634 		mod = module_text_address((unsigned long)p->addr);
635 		if (mod)
636 			module_put(mod);
637 	}
638 
639 	if (cleanup_p) {
640 		if (p != old_p) {
641 			list_del_rcu(&p->list);
642 			kfree(old_p);
643 		}
644 		arch_remove_kprobe(p);
645 	} else {
646 		mutex_lock(&kprobe_mutex);
647 		if (p->break_handler)
648 			old_p->break_handler = NULL;
649 		if (p->post_handler){
650 			list_for_each_entry_rcu(list_p, &old_p->list, list){
651 				if (list_p->post_handler){
652 					cleanup_p = 2;
653 					break;
654 				}
655 			}
656 			if (cleanup_p == 0)
657 				old_p->post_handler = NULL;
658 		}
659 		mutex_unlock(&kprobe_mutex);
660 	}
661 
662 	/* Call unregister_page_fault_notifier()
663 	 * if no probes are active
664 	 */
665 	mutex_lock(&kprobe_mutex);
666 	if (atomic_add_return(-1, &kprobe_count) == \
667 				ARCH_INACTIVE_KPROBE_COUNT)
668 		unregister_page_fault_notifier(&kprobe_page_fault_nb);
669 	mutex_unlock(&kprobe_mutex);
670 	return;
671 }
672 
673 static struct notifier_block kprobe_exceptions_nb = {
674 	.notifier_call = kprobe_exceptions_notify,
675 	.priority = 0x7fffffff /* we need to be notified first */
676 };
677 
678 
679 int __kprobes register_jprobe(struct jprobe *jp)
680 {
681 	/* Todo: Verify probepoint is a function entry point */
682 	jp->kp.pre_handler = setjmp_pre_handler;
683 	jp->kp.break_handler = longjmp_break_handler;
684 
685 	return __register_kprobe(&jp->kp,
686 		(unsigned long)__builtin_return_address(0));
687 }
688 
689 void __kprobes unregister_jprobe(struct jprobe *jp)
690 {
691 	unregister_kprobe(&jp->kp);
692 }
693 
694 #ifdef ARCH_SUPPORTS_KRETPROBES
695 
696 /*
697  * This kprobe pre_handler is registered with every kretprobe. When probe
698  * hits it will set up the return probe.
699  */
700 static int __kprobes pre_handler_kretprobe(struct kprobe *p,
701 					   struct pt_regs *regs)
702 {
703 	struct kretprobe *rp = container_of(p, struct kretprobe, kp);
704 	unsigned long flags = 0;
705 
706 	/*TODO: consider to only swap the RA after the last pre_handler fired */
707 	spin_lock_irqsave(&kretprobe_lock, flags);
708 	if (!hlist_empty(&rp->free_instances)) {
709 		struct kretprobe_instance *ri;
710 
711 		ri = hlist_entry(rp->free_instances.first,
712 				 struct kretprobe_instance, uflist);
713 		ri->rp = rp;
714 		ri->task = current;
715 		arch_prepare_kretprobe(ri, regs);
716 
717 		/* XXX(hch): why is there no hlist_move_head? */
718 		hlist_del(&ri->uflist);
719 		hlist_add_head(&ri->uflist, &ri->rp->used_instances);
720 		hlist_add_head(&ri->hlist, kretprobe_inst_table_head(ri->task));
721 	} else
722 		rp->nmissed++;
723 	spin_unlock_irqrestore(&kretprobe_lock, flags);
724 	return 0;
725 }
726 
727 int __kprobes register_kretprobe(struct kretprobe *rp)
728 {
729 	int ret = 0;
730 	struct kretprobe_instance *inst;
731 	int i;
732 
733 	rp->kp.pre_handler = pre_handler_kretprobe;
734 	rp->kp.post_handler = NULL;
735 	rp->kp.fault_handler = NULL;
736 	rp->kp.break_handler = NULL;
737 
738 	/* Pre-allocate memory for max kretprobe instances */
739 	if (rp->maxactive <= 0) {
740 #ifdef CONFIG_PREEMPT
741 		rp->maxactive = max(10, 2 * NR_CPUS);
742 #else
743 		rp->maxactive = NR_CPUS;
744 #endif
745 	}
746 	INIT_HLIST_HEAD(&rp->used_instances);
747 	INIT_HLIST_HEAD(&rp->free_instances);
748 	for (i = 0; i < rp->maxactive; i++) {
749 		inst = kmalloc(sizeof(struct kretprobe_instance), GFP_KERNEL);
750 		if (inst == NULL) {
751 			free_rp_inst(rp);
752 			return -ENOMEM;
753 		}
754 		INIT_HLIST_NODE(&inst->uflist);
755 		hlist_add_head(&inst->uflist, &rp->free_instances);
756 	}
757 
758 	rp->nmissed = 0;
759 	/* Establish function entry probe point */
760 	if ((ret = __register_kprobe(&rp->kp,
761 		(unsigned long)__builtin_return_address(0))) != 0)
762 		free_rp_inst(rp);
763 	return ret;
764 }
765 
766 #else /* ARCH_SUPPORTS_KRETPROBES */
767 
768 int __kprobes register_kretprobe(struct kretprobe *rp)
769 {
770 	return -ENOSYS;
771 }
772 
773 static int __kprobes pre_handler_kretprobe(struct kprobe *p,
774 					   struct pt_regs *regs)
775 {
776 	return 0;
777 }
778 
779 #endif /* ARCH_SUPPORTS_KRETPROBES */
780 
781 void __kprobes unregister_kretprobe(struct kretprobe *rp)
782 {
783 	unsigned long flags;
784 	struct kretprobe_instance *ri;
785 	struct hlist_node *pos, *next;
786 
787 	unregister_kprobe(&rp->kp);
788 
789 	/* No race here */
790 	spin_lock_irqsave(&kretprobe_lock, flags);
791 	hlist_for_each_entry_safe(ri, pos, next, &rp->used_instances, uflist) {
792 		ri->rp = NULL;
793 		hlist_del(&ri->uflist);
794 	}
795 	spin_unlock_irqrestore(&kretprobe_lock, flags);
796 	free_rp_inst(rp);
797 }
798 
799 static int __init init_kprobes(void)
800 {
801 	int i, err = 0;
802 
803 	/* FIXME allocate the probe table, currently defined statically */
804 	/* initialize all list heads */
805 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
806 		INIT_HLIST_HEAD(&kprobe_table[i]);
807 		INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
808 	}
809 	atomic_set(&kprobe_count, 0);
810 
811 	/* By default, kprobes are enabled */
812 	kprobe_enabled = true;
813 
814 	err = arch_init_kprobes();
815 	if (!err)
816 		err = register_die_notifier(&kprobe_exceptions_nb);
817 
818 	return err;
819 }
820 
821 #ifdef CONFIG_DEBUG_FS
822 static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
823 		const char *sym, int offset,char *modname)
824 {
825 	char *kprobe_type;
826 
827 	if (p->pre_handler == pre_handler_kretprobe)
828 		kprobe_type = "r";
829 	else if (p->pre_handler == setjmp_pre_handler)
830 		kprobe_type = "j";
831 	else
832 		kprobe_type = "k";
833 	if (sym)
834 		seq_printf(pi, "%p  %s  %s+0x%x  %s\n", p->addr, kprobe_type,
835 			sym, offset, (modname ? modname : " "));
836 	else
837 		seq_printf(pi, "%p  %s  %p\n", p->addr, kprobe_type, p->addr);
838 }
839 
840 static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
841 {
842 	return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
843 }
844 
845 static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
846 {
847 	(*pos)++;
848 	if (*pos >= KPROBE_TABLE_SIZE)
849 		return NULL;
850 	return pos;
851 }
852 
853 static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
854 {
855 	/* Nothing to do */
856 }
857 
858 static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
859 {
860 	struct hlist_head *head;
861 	struct hlist_node *node;
862 	struct kprobe *p, *kp;
863 	const char *sym = NULL;
864 	unsigned int i = *(loff_t *) v;
865 	unsigned long offset = 0;
866 	char *modname, namebuf[128];
867 
868 	head = &kprobe_table[i];
869 	preempt_disable();
870 	hlist_for_each_entry_rcu(p, node, head, hlist) {
871 		sym = kallsyms_lookup((unsigned long)p->addr, NULL,
872 					&offset, &modname, namebuf);
873 		if (p->pre_handler == aggr_pre_handler) {
874 			list_for_each_entry_rcu(kp, &p->list, list)
875 				report_probe(pi, kp, sym, offset, modname);
876 		} else
877 			report_probe(pi, p, sym, offset, modname);
878 	}
879 	preempt_enable();
880 	return 0;
881 }
882 
883 static struct seq_operations kprobes_seq_ops = {
884 	.start = kprobe_seq_start,
885 	.next  = kprobe_seq_next,
886 	.stop  = kprobe_seq_stop,
887 	.show  = show_kprobe_addr
888 };
889 
890 static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
891 {
892 	return seq_open(filp, &kprobes_seq_ops);
893 }
894 
895 static struct file_operations debugfs_kprobes_operations = {
896 	.open           = kprobes_open,
897 	.read           = seq_read,
898 	.llseek         = seq_lseek,
899 	.release        = seq_release,
900 };
901 
902 static void __kprobes enable_all_kprobes(void)
903 {
904 	struct hlist_head *head;
905 	struct hlist_node *node;
906 	struct kprobe *p;
907 	unsigned int i;
908 
909 	mutex_lock(&kprobe_mutex);
910 
911 	/* If kprobes are already enabled, just return */
912 	if (kprobe_enabled)
913 		goto already_enabled;
914 
915 	/*
916 	 * Re-register the page fault notifier only if there are any
917 	 * active probes at the time of enabling kprobes globally
918 	 */
919 	if (atomic_read(&kprobe_count) > ARCH_INACTIVE_KPROBE_COUNT)
920 		register_page_fault_notifier(&kprobe_page_fault_nb);
921 
922 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
923 		head = &kprobe_table[i];
924 		hlist_for_each_entry_rcu(p, node, head, hlist)
925 			arch_arm_kprobe(p);
926 	}
927 
928 	kprobe_enabled = true;
929 	printk(KERN_INFO "Kprobes globally enabled\n");
930 
931 already_enabled:
932 	mutex_unlock(&kprobe_mutex);
933 	return;
934 }
935 
936 static void __kprobes disable_all_kprobes(void)
937 {
938 	struct hlist_head *head;
939 	struct hlist_node *node;
940 	struct kprobe *p;
941 	unsigned int i;
942 
943 	mutex_lock(&kprobe_mutex);
944 
945 	/* If kprobes are already disabled, just return */
946 	if (!kprobe_enabled)
947 		goto already_disabled;
948 
949 	kprobe_enabled = false;
950 	printk(KERN_INFO "Kprobes globally disabled\n");
951 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
952 		head = &kprobe_table[i];
953 		hlist_for_each_entry_rcu(p, node, head, hlist) {
954 			if (!arch_trampoline_kprobe(p))
955 				arch_disarm_kprobe(p);
956 		}
957 	}
958 
959 	mutex_unlock(&kprobe_mutex);
960 	/* Allow all currently running kprobes to complete */
961 	synchronize_sched();
962 
963 	mutex_lock(&kprobe_mutex);
964 	/* Unconditionally unregister the page_fault notifier */
965 	unregister_page_fault_notifier(&kprobe_page_fault_nb);
966 
967 already_disabled:
968 	mutex_unlock(&kprobe_mutex);
969 	return;
970 }
971 
972 /*
973  * XXX: The debugfs bool file interface doesn't allow for callbacks
974  * when the bool state is switched. We can reuse that facility when
975  * available
976  */
977 static ssize_t read_enabled_file_bool(struct file *file,
978 	       char __user *user_buf, size_t count, loff_t *ppos)
979 {
980 	char buf[3];
981 
982 	if (kprobe_enabled)
983 		buf[0] = '1';
984 	else
985 		buf[0] = '0';
986 	buf[1] = '\n';
987 	buf[2] = 0x00;
988 	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
989 }
990 
991 static ssize_t write_enabled_file_bool(struct file *file,
992 	       const char __user *user_buf, size_t count, loff_t *ppos)
993 {
994 	char buf[32];
995 	int buf_size;
996 
997 	buf_size = min(count, (sizeof(buf)-1));
998 	if (copy_from_user(buf, user_buf, buf_size))
999 		return -EFAULT;
1000 
1001 	switch (buf[0]) {
1002 	case 'y':
1003 	case 'Y':
1004 	case '1':
1005 		enable_all_kprobes();
1006 		break;
1007 	case 'n':
1008 	case 'N':
1009 	case '0':
1010 		disable_all_kprobes();
1011 		break;
1012 	}
1013 
1014 	return count;
1015 }
1016 
1017 static struct file_operations fops_kp = {
1018 	.read =         read_enabled_file_bool,
1019 	.write =        write_enabled_file_bool,
1020 };
1021 
1022 static int __kprobes debugfs_kprobe_init(void)
1023 {
1024 	struct dentry *dir, *file;
1025 	unsigned int value = 1;
1026 
1027 	dir = debugfs_create_dir("kprobes", NULL);
1028 	if (!dir)
1029 		return -ENOMEM;
1030 
1031 	file = debugfs_create_file("list", 0444, dir, NULL,
1032 				&debugfs_kprobes_operations);
1033 	if (!file) {
1034 		debugfs_remove(dir);
1035 		return -ENOMEM;
1036 	}
1037 
1038 	file = debugfs_create_file("enabled", 0600, dir,
1039 					&value, &fops_kp);
1040 	if (!file) {
1041 		debugfs_remove(dir);
1042 		return -ENOMEM;
1043 	}
1044 
1045 	return 0;
1046 }
1047 
1048 late_initcall(debugfs_kprobe_init);
1049 #endif /* CONFIG_DEBUG_FS */
1050 
1051 module_init(init_kprobes);
1052 
1053 EXPORT_SYMBOL_GPL(register_kprobe);
1054 EXPORT_SYMBOL_GPL(unregister_kprobe);
1055 EXPORT_SYMBOL_GPL(register_jprobe);
1056 EXPORT_SYMBOL_GPL(unregister_jprobe);
1057 EXPORT_SYMBOL_GPL(jprobe_return);
1058 EXPORT_SYMBOL_GPL(register_kretprobe);
1059 EXPORT_SYMBOL_GPL(unregister_kretprobe);
1060