xref: /openbmc/linux/kernel/kprobes.c (revision d5cb9783536a41df9f9cba5b0a1d78047ed787f7)
1 /*
2  *  Kernel Probes (KProbes)
3  *  kernel/kprobes.c
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18  *
19  * Copyright (C) IBM Corporation, 2002, 2004
20  *
21  * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22  *		Probes initial implementation (includes suggestions from
23  *		Rusty Russell).
24  * 2004-Aug	Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25  *		hlists and exceptions notifier as suggested by Andi Kleen.
26  * 2004-July	Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27  *		interface to access function arguments.
28  * 2004-Sep	Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29  *		exceptions notifier to be first on the priority list.
30  * 2005-May	Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31  *		<jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32  *		<prasanna@in.ibm.com> added function-return probes.
33  */
34 #include <linux/kprobes.h>
35 #include <linux/spinlock.h>
36 #include <linux/hash.h>
37 #include <linux/init.h>
38 #include <linux/slab.h>
39 #include <linux/module.h>
40 #include <linux/moduleloader.h>
41 #include <asm-generic/sections.h>
42 #include <asm/cacheflush.h>
43 #include <asm/errno.h>
44 #include <asm/kdebug.h>
45 
46 #define KPROBE_HASH_BITS 6
47 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
48 
49 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
50 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
51 
52 unsigned int kprobe_cpu = NR_CPUS;
53 static DEFINE_SPINLOCK(kprobe_lock);
54 static struct kprobe *curr_kprobe;
55 
56 /*
57  * kprobe->ainsn.insn points to the copy of the instruction to be
58  * single-stepped. x86_64, POWER4 and above have no-exec support and
59  * stepping on the instruction on a vmalloced/kmalloced/data page
60  * is a recipe for disaster
61  */
62 #define INSNS_PER_PAGE	(PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
63 
64 struct kprobe_insn_page {
65 	struct hlist_node hlist;
66 	kprobe_opcode_t *insns;		/* Page of instruction slots */
67 	char slot_used[INSNS_PER_PAGE];
68 	int nused;
69 };
70 
71 static struct hlist_head kprobe_insn_pages;
72 
73 /**
74  * get_insn_slot() - Find a slot on an executable page for an instruction.
75  * We allocate an executable page if there's no room on existing ones.
76  */
77 kprobe_opcode_t __kprobes *get_insn_slot(void)
78 {
79 	struct kprobe_insn_page *kip;
80 	struct hlist_node *pos;
81 
82 	hlist_for_each(pos, &kprobe_insn_pages) {
83 		kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
84 		if (kip->nused < INSNS_PER_PAGE) {
85 			int i;
86 			for (i = 0; i < INSNS_PER_PAGE; i++) {
87 				if (!kip->slot_used[i]) {
88 					kip->slot_used[i] = 1;
89 					kip->nused++;
90 					return kip->insns + (i * MAX_INSN_SIZE);
91 				}
92 			}
93 			/* Surprise!  No unused slots.  Fix kip->nused. */
94 			kip->nused = INSNS_PER_PAGE;
95 		}
96 	}
97 
98 	/* All out of space.  Need to allocate a new page. Use slot 0.*/
99 	kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
100 	if (!kip) {
101 		return NULL;
102 	}
103 
104 	/*
105 	 * Use module_alloc so this page is within +/- 2GB of where the
106 	 * kernel image and loaded module images reside. This is required
107 	 * so x86_64 can correctly handle the %rip-relative fixups.
108 	 */
109 	kip->insns = module_alloc(PAGE_SIZE);
110 	if (!kip->insns) {
111 		kfree(kip);
112 		return NULL;
113 	}
114 	INIT_HLIST_NODE(&kip->hlist);
115 	hlist_add_head(&kip->hlist, &kprobe_insn_pages);
116 	memset(kip->slot_used, 0, INSNS_PER_PAGE);
117 	kip->slot_used[0] = 1;
118 	kip->nused = 1;
119 	return kip->insns;
120 }
121 
122 void __kprobes free_insn_slot(kprobe_opcode_t *slot)
123 {
124 	struct kprobe_insn_page *kip;
125 	struct hlist_node *pos;
126 
127 	hlist_for_each(pos, &kprobe_insn_pages) {
128 		kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
129 		if (kip->insns <= slot &&
130 		    slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
131 			int i = (slot - kip->insns) / MAX_INSN_SIZE;
132 			kip->slot_used[i] = 0;
133 			kip->nused--;
134 			if (kip->nused == 0) {
135 				/*
136 				 * Page is no longer in use.  Free it unless
137 				 * it's the last one.  We keep the last one
138 				 * so as not to have to set it up again the
139 				 * next time somebody inserts a probe.
140 				 */
141 				hlist_del(&kip->hlist);
142 				if (hlist_empty(&kprobe_insn_pages)) {
143 					INIT_HLIST_NODE(&kip->hlist);
144 					hlist_add_head(&kip->hlist,
145 						&kprobe_insn_pages);
146 				} else {
147 					module_free(NULL, kip->insns);
148 					kfree(kip);
149 				}
150 			}
151 			return;
152 		}
153 	}
154 }
155 
156 /* Locks kprobe: irqs must be disabled */
157 void __kprobes lock_kprobes(void)
158 {
159 	unsigned long flags = 0;
160 
161 	/* Avoiding local interrupts to happen right after we take the kprobe_lock
162 	 * and before we get a chance to update kprobe_cpu, this to prevent
163 	 * deadlock when we have a kprobe on ISR routine and a kprobe on task
164 	 * routine
165 	 */
166 	local_irq_save(flags);
167 
168 	spin_lock(&kprobe_lock);
169 	kprobe_cpu = smp_processor_id();
170 
171  	local_irq_restore(flags);
172 }
173 
174 void __kprobes unlock_kprobes(void)
175 {
176 	unsigned long flags = 0;
177 
178 	/* Avoiding local interrupts to happen right after we update
179 	 * kprobe_cpu and before we get a a chance to release kprobe_lock,
180 	 * this to prevent deadlock when we have a kprobe on ISR routine and
181 	 * a kprobe on task routine
182 	 */
183 	local_irq_save(flags);
184 
185 	kprobe_cpu = NR_CPUS;
186 	spin_unlock(&kprobe_lock);
187 
188  	local_irq_restore(flags);
189 }
190 
191 /* You have to be holding the kprobe_lock */
192 struct kprobe __kprobes *get_kprobe(void *addr)
193 {
194 	struct hlist_head *head;
195 	struct hlist_node *node;
196 
197 	head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
198 	hlist_for_each(node, head) {
199 		struct kprobe *p = hlist_entry(node, struct kprobe, hlist);
200 		if (p->addr == addr)
201 			return p;
202 	}
203 	return NULL;
204 }
205 
206 /*
207  * Aggregate handlers for multiple kprobes support - these handlers
208  * take care of invoking the individual kprobe handlers on p->list
209  */
210 static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
211 {
212 	struct kprobe *kp;
213 
214 	list_for_each_entry(kp, &p->list, list) {
215 		if (kp->pre_handler) {
216 			curr_kprobe = kp;
217 			if (kp->pre_handler(kp, regs))
218 				return 1;
219 		}
220 		curr_kprobe = NULL;
221 	}
222 	return 0;
223 }
224 
225 static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
226 					unsigned long flags)
227 {
228 	struct kprobe *kp;
229 
230 	list_for_each_entry(kp, &p->list, list) {
231 		if (kp->post_handler) {
232 			curr_kprobe = kp;
233 			kp->post_handler(kp, regs, flags);
234 			curr_kprobe = NULL;
235 		}
236 	}
237 	return;
238 }
239 
240 static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
241 					int trapnr)
242 {
243 	/*
244 	 * if we faulted "during" the execution of a user specified
245 	 * probe handler, invoke just that probe's fault handler
246 	 */
247 	if (curr_kprobe && curr_kprobe->fault_handler) {
248 		if (curr_kprobe->fault_handler(curr_kprobe, regs, trapnr))
249 			return 1;
250 	}
251 	return 0;
252 }
253 
254 static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
255 {
256 	struct kprobe *kp = curr_kprobe;
257 	if (curr_kprobe && kp->break_handler) {
258 		if (kp->break_handler(kp, regs)) {
259 			curr_kprobe = NULL;
260 			return 1;
261 		}
262 	}
263 	curr_kprobe = NULL;
264 	return 0;
265 }
266 
267 struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp)
268 {
269 	struct hlist_node *node;
270 	struct kretprobe_instance *ri;
271 	hlist_for_each_entry(ri, node, &rp->free_instances, uflist)
272 		return ri;
273 	return NULL;
274 }
275 
276 static struct kretprobe_instance __kprobes *get_used_rp_inst(struct kretprobe
277 							      *rp)
278 {
279 	struct hlist_node *node;
280 	struct kretprobe_instance *ri;
281 	hlist_for_each_entry(ri, node, &rp->used_instances, uflist)
282 		return ri;
283 	return NULL;
284 }
285 
286 void __kprobes add_rp_inst(struct kretprobe_instance *ri)
287 {
288 	/*
289 	 * Remove rp inst off the free list -
290 	 * Add it back when probed function returns
291 	 */
292 	hlist_del(&ri->uflist);
293 
294 	/* Add rp inst onto table */
295 	INIT_HLIST_NODE(&ri->hlist);
296 	hlist_add_head(&ri->hlist,
297 			&kretprobe_inst_table[hash_ptr(ri->task, KPROBE_HASH_BITS)]);
298 
299 	/* Also add this rp inst to the used list. */
300 	INIT_HLIST_NODE(&ri->uflist);
301 	hlist_add_head(&ri->uflist, &ri->rp->used_instances);
302 }
303 
304 void __kprobes recycle_rp_inst(struct kretprobe_instance *ri)
305 {
306 	/* remove rp inst off the rprobe_inst_table */
307 	hlist_del(&ri->hlist);
308 	if (ri->rp) {
309 		/* remove rp inst off the used list */
310 		hlist_del(&ri->uflist);
311 		/* put rp inst back onto the free list */
312 		INIT_HLIST_NODE(&ri->uflist);
313 		hlist_add_head(&ri->uflist, &ri->rp->free_instances);
314 	} else
315 		/* Unregistering */
316 		kfree(ri);
317 }
318 
319 struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk)
320 {
321 	return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
322 }
323 
324 /*
325  * This function is called from exit_thread or flush_thread when task tk's
326  * stack is being recycled so that we can recycle any function-return probe
327  * instances associated with this task. These left over instances represent
328  * probed functions that have been called but will never return.
329  */
330 void __kprobes kprobe_flush_task(struct task_struct *tk)
331 {
332         struct kretprobe_instance *ri;
333         struct hlist_head *head;
334 	struct hlist_node *node, *tmp;
335 	unsigned long flags = 0;
336 
337 	spin_lock_irqsave(&kprobe_lock, flags);
338         head = kretprobe_inst_table_head(current);
339         hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
340                 if (ri->task == tk)
341                         recycle_rp_inst(ri);
342         }
343 	spin_unlock_irqrestore(&kprobe_lock, flags);
344 }
345 
346 /*
347  * This kprobe pre_handler is registered with every kretprobe. When probe
348  * hits it will set up the return probe.
349  */
350 static int __kprobes pre_handler_kretprobe(struct kprobe *p,
351 					   struct pt_regs *regs)
352 {
353 	struct kretprobe *rp = container_of(p, struct kretprobe, kp);
354 
355 	/*TODO: consider to only swap the RA after the last pre_handler fired */
356 	arch_prepare_kretprobe(rp, regs);
357 	return 0;
358 }
359 
360 static inline void free_rp_inst(struct kretprobe *rp)
361 {
362 	struct kretprobe_instance *ri;
363 	while ((ri = get_free_rp_inst(rp)) != NULL) {
364 		hlist_del(&ri->uflist);
365 		kfree(ri);
366 	}
367 }
368 
369 /*
370  * Keep all fields in the kprobe consistent
371  */
372 static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
373 {
374 	memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
375 	memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
376 }
377 
378 /*
379 * Add the new probe to old_p->list. Fail if this is the
380 * second jprobe at the address - two jprobes can't coexist
381 */
382 static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
383 {
384         struct kprobe *kp;
385 
386 	if (p->break_handler) {
387 		list_for_each_entry(kp, &old_p->list, list) {
388 			if (kp->break_handler)
389 				return -EEXIST;
390 		}
391 		list_add_tail(&p->list, &old_p->list);
392 	} else
393 		list_add(&p->list, &old_p->list);
394 	return 0;
395 }
396 
397 /*
398  * Fill in the required fields of the "manager kprobe". Replace the
399  * earlier kprobe in the hlist with the manager kprobe
400  */
401 static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
402 {
403 	copy_kprobe(p, ap);
404 	ap->addr = p->addr;
405 	ap->pre_handler = aggr_pre_handler;
406 	ap->post_handler = aggr_post_handler;
407 	ap->fault_handler = aggr_fault_handler;
408 	ap->break_handler = aggr_break_handler;
409 
410 	INIT_LIST_HEAD(&ap->list);
411 	list_add(&p->list, &ap->list);
412 
413 	INIT_HLIST_NODE(&ap->hlist);
414 	hlist_del(&p->hlist);
415 	hlist_add_head(&ap->hlist,
416 		&kprobe_table[hash_ptr(ap->addr, KPROBE_HASH_BITS)]);
417 }
418 
419 /*
420  * This is the second or subsequent kprobe at the address - handle
421  * the intricacies
422  * TODO: Move kcalloc outside the spinlock
423  */
424 static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
425 					  struct kprobe *p)
426 {
427 	int ret = 0;
428 	struct kprobe *ap;
429 
430 	if (old_p->pre_handler == aggr_pre_handler) {
431 		copy_kprobe(old_p, p);
432 		ret = add_new_kprobe(old_p, p);
433 	} else {
434 		ap = kcalloc(1, sizeof(struct kprobe), GFP_ATOMIC);
435 		if (!ap)
436 			return -ENOMEM;
437 		add_aggr_kprobe(ap, old_p);
438 		copy_kprobe(ap, p);
439 		ret = add_new_kprobe(ap, p);
440 	}
441 	return ret;
442 }
443 
444 /* kprobe removal house-keeping routines */
445 static inline void cleanup_kprobe(struct kprobe *p, unsigned long flags)
446 {
447 	arch_disarm_kprobe(p);
448 	hlist_del(&p->hlist);
449 	spin_unlock_irqrestore(&kprobe_lock, flags);
450 	arch_remove_kprobe(p);
451 }
452 
453 static inline void cleanup_aggr_kprobe(struct kprobe *old_p,
454 		struct kprobe *p, unsigned long flags)
455 {
456 	list_del(&p->list);
457 	if (list_empty(&old_p->list)) {
458 		cleanup_kprobe(old_p, flags);
459 		kfree(old_p);
460 	} else
461 		spin_unlock_irqrestore(&kprobe_lock, flags);
462 }
463 
464 static int __kprobes in_kprobes_functions(unsigned long addr)
465 {
466 	if (addr >= (unsigned long)__kprobes_text_start
467 		&& addr < (unsigned long)__kprobes_text_end)
468 		return -EINVAL;
469 	return 0;
470 }
471 
472 int __kprobes register_kprobe(struct kprobe *p)
473 {
474 	int ret = 0;
475 	unsigned long flags = 0;
476 	struct kprobe *old_p;
477 
478 	if ((ret = in_kprobes_functions((unsigned long) p->addr)) != 0)
479 		return ret;
480 	if ((ret = arch_prepare_kprobe(p)) != 0)
481 		goto rm_kprobe;
482 
483 	spin_lock_irqsave(&kprobe_lock, flags);
484 	old_p = get_kprobe(p->addr);
485 	p->nmissed = 0;
486 	if (old_p) {
487 		ret = register_aggr_kprobe(old_p, p);
488 		goto out;
489 	}
490 
491 	arch_copy_kprobe(p);
492 	INIT_HLIST_NODE(&p->hlist);
493 	hlist_add_head(&p->hlist,
494 		       &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
495 
496   	arch_arm_kprobe(p);
497 
498 out:
499 	spin_unlock_irqrestore(&kprobe_lock, flags);
500 rm_kprobe:
501 	if (ret == -EEXIST)
502 		arch_remove_kprobe(p);
503 	return ret;
504 }
505 
506 void __kprobes unregister_kprobe(struct kprobe *p)
507 {
508 	unsigned long flags;
509 	struct kprobe *old_p;
510 
511 	spin_lock_irqsave(&kprobe_lock, flags);
512 	old_p = get_kprobe(p->addr);
513 	if (old_p) {
514 		if (old_p->pre_handler == aggr_pre_handler)
515 			cleanup_aggr_kprobe(old_p, p, flags);
516 		else
517 			cleanup_kprobe(p, flags);
518 	} else
519 		spin_unlock_irqrestore(&kprobe_lock, flags);
520 }
521 
522 static struct notifier_block kprobe_exceptions_nb = {
523 	.notifier_call = kprobe_exceptions_notify,
524 	.priority = 0x7fffffff /* we need to notified first */
525 };
526 
527 int __kprobes register_jprobe(struct jprobe *jp)
528 {
529 	/* Todo: Verify probepoint is a function entry point */
530 	jp->kp.pre_handler = setjmp_pre_handler;
531 	jp->kp.break_handler = longjmp_break_handler;
532 
533 	return register_kprobe(&jp->kp);
534 }
535 
536 void __kprobes unregister_jprobe(struct jprobe *jp)
537 {
538 	unregister_kprobe(&jp->kp);
539 }
540 
541 #ifdef ARCH_SUPPORTS_KRETPROBES
542 
543 int __kprobes register_kretprobe(struct kretprobe *rp)
544 {
545 	int ret = 0;
546 	struct kretprobe_instance *inst;
547 	int i;
548 
549 	rp->kp.pre_handler = pre_handler_kretprobe;
550 
551 	/* Pre-allocate memory for max kretprobe instances */
552 	if (rp->maxactive <= 0) {
553 #ifdef CONFIG_PREEMPT
554 		rp->maxactive = max(10, 2 * NR_CPUS);
555 #else
556 		rp->maxactive = NR_CPUS;
557 #endif
558 	}
559 	INIT_HLIST_HEAD(&rp->used_instances);
560 	INIT_HLIST_HEAD(&rp->free_instances);
561 	for (i = 0; i < rp->maxactive; i++) {
562 		inst = kmalloc(sizeof(struct kretprobe_instance), GFP_KERNEL);
563 		if (inst == NULL) {
564 			free_rp_inst(rp);
565 			return -ENOMEM;
566 		}
567 		INIT_HLIST_NODE(&inst->uflist);
568 		hlist_add_head(&inst->uflist, &rp->free_instances);
569 	}
570 
571 	rp->nmissed = 0;
572 	/* Establish function entry probe point */
573 	if ((ret = register_kprobe(&rp->kp)) != 0)
574 		free_rp_inst(rp);
575 	return ret;
576 }
577 
578 #else /* ARCH_SUPPORTS_KRETPROBES */
579 
580 int __kprobes register_kretprobe(struct kretprobe *rp)
581 {
582 	return -ENOSYS;
583 }
584 
585 #endif /* ARCH_SUPPORTS_KRETPROBES */
586 
587 void __kprobes unregister_kretprobe(struct kretprobe *rp)
588 {
589 	unsigned long flags;
590 	struct kretprobe_instance *ri;
591 
592 	unregister_kprobe(&rp->kp);
593 	/* No race here */
594 	spin_lock_irqsave(&kprobe_lock, flags);
595 	free_rp_inst(rp);
596 	while ((ri = get_used_rp_inst(rp)) != NULL) {
597 		ri->rp = NULL;
598 		hlist_del(&ri->uflist);
599 	}
600 	spin_unlock_irqrestore(&kprobe_lock, flags);
601 }
602 
603 static int __init init_kprobes(void)
604 {
605 	int i, err = 0;
606 
607 	/* FIXME allocate the probe table, currently defined statically */
608 	/* initialize all list heads */
609 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
610 		INIT_HLIST_HEAD(&kprobe_table[i]);
611 		INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
612 	}
613 
614 	err = arch_init_kprobes();
615 	if (!err)
616 		err = register_die_notifier(&kprobe_exceptions_nb);
617 
618 	return err;
619 }
620 
621 __initcall(init_kprobes);
622 
623 EXPORT_SYMBOL_GPL(register_kprobe);
624 EXPORT_SYMBOL_GPL(unregister_kprobe);
625 EXPORT_SYMBOL_GPL(register_jprobe);
626 EXPORT_SYMBOL_GPL(unregister_jprobe);
627 EXPORT_SYMBOL_GPL(jprobe_return);
628 EXPORT_SYMBOL_GPL(register_kretprobe);
629 EXPORT_SYMBOL_GPL(unregister_kretprobe);
630 
631