xref: /openbmc/linux/kernel/kprobes.c (revision 9d749629)
1 /*
2  *  Kernel Probes (KProbes)
3  *  kernel/kprobes.c
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18  *
19  * Copyright (C) IBM Corporation, 2002, 2004
20  *
21  * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22  *		Probes initial implementation (includes suggestions from
23  *		Rusty Russell).
24  * 2004-Aug	Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25  *		hlists and exceptions notifier as suggested by Andi Kleen.
26  * 2004-July	Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27  *		interface to access function arguments.
28  * 2004-Sep	Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29  *		exceptions notifier to be first on the priority list.
30  * 2005-May	Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31  *		<jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32  *		<prasanna@in.ibm.com> added function-return probes.
33  */
34 #include <linux/kprobes.h>
35 #include <linux/hash.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/stddef.h>
39 #include <linux/export.h>
40 #include <linux/moduleloader.h>
41 #include <linux/kallsyms.h>
42 #include <linux/freezer.h>
43 #include <linux/seq_file.h>
44 #include <linux/debugfs.h>
45 #include <linux/sysctl.h>
46 #include <linux/kdebug.h>
47 #include <linux/memory.h>
48 #include <linux/ftrace.h>
49 #include <linux/cpu.h>
50 #include <linux/jump_label.h>
51 
52 #include <asm-generic/sections.h>
53 #include <asm/cacheflush.h>
54 #include <asm/errno.h>
55 #include <asm/uaccess.h>
56 
57 #define KPROBE_HASH_BITS 6
58 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
59 
60 
61 /*
62  * Some oddball architectures like 64bit powerpc have function descriptors
63  * so this must be overridable.
64  */
65 #ifndef kprobe_lookup_name
66 #define kprobe_lookup_name(name, addr) \
67 	addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
68 #endif
69 
70 static int kprobes_initialized;
71 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
72 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
73 
74 /* NOTE: change this value only with kprobe_mutex held */
75 static bool kprobes_all_disarmed;
76 
77 /* This protects kprobe_table and optimizing_list */
78 static DEFINE_MUTEX(kprobe_mutex);
79 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
80 static struct {
81 	raw_spinlock_t lock ____cacheline_aligned_in_smp;
82 } kretprobe_table_locks[KPROBE_TABLE_SIZE];
83 
84 static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
85 {
86 	return &(kretprobe_table_locks[hash].lock);
87 }
88 
89 /*
90  * Normally, functions that we'd want to prohibit kprobes in, are marked
91  * __kprobes. But, there are cases where such functions already belong to
92  * a different section (__sched for preempt_schedule)
93  *
94  * For such cases, we now have a blacklist
95  */
96 static struct kprobe_blackpoint kprobe_blacklist[] = {
97 	{"preempt_schedule",},
98 	{"native_get_debugreg",},
99 	{"irq_entries_start",},
100 	{"common_interrupt",},
101 	{"mcount",},	/* mcount can be called from everywhere */
102 	{NULL}    /* Terminator */
103 };
104 
105 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
106 /*
107  * kprobe->ainsn.insn points to the copy of the instruction to be
108  * single-stepped. x86_64, POWER4 and above have no-exec support and
109  * stepping on the instruction on a vmalloced/kmalloced/data page
110  * is a recipe for disaster
111  */
112 struct kprobe_insn_page {
113 	struct list_head list;
114 	kprobe_opcode_t *insns;		/* Page of instruction slots */
115 	int nused;
116 	int ngarbage;
117 	char slot_used[];
118 };
119 
120 #define KPROBE_INSN_PAGE_SIZE(slots)			\
121 	(offsetof(struct kprobe_insn_page, slot_used) +	\
122 	 (sizeof(char) * (slots)))
123 
124 struct kprobe_insn_cache {
125 	struct list_head pages;	/* list of kprobe_insn_page */
126 	size_t insn_size;	/* size of instruction slot */
127 	int nr_garbage;
128 };
129 
130 static int slots_per_page(struct kprobe_insn_cache *c)
131 {
132 	return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
133 }
134 
135 enum kprobe_slot_state {
136 	SLOT_CLEAN = 0,
137 	SLOT_DIRTY = 1,
138 	SLOT_USED = 2,
139 };
140 
141 static DEFINE_MUTEX(kprobe_insn_mutex);	/* Protects kprobe_insn_slots */
142 static struct kprobe_insn_cache kprobe_insn_slots = {
143 	.pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
144 	.insn_size = MAX_INSN_SIZE,
145 	.nr_garbage = 0,
146 };
147 static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c);
148 
149 /**
150  * __get_insn_slot() - Find a slot on an executable page for an instruction.
151  * We allocate an executable page if there's no room on existing ones.
152  */
153 static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
154 {
155 	struct kprobe_insn_page *kip;
156 
157  retry:
158 	list_for_each_entry(kip, &c->pages, list) {
159 		if (kip->nused < slots_per_page(c)) {
160 			int i;
161 			for (i = 0; i < slots_per_page(c); i++) {
162 				if (kip->slot_used[i] == SLOT_CLEAN) {
163 					kip->slot_used[i] = SLOT_USED;
164 					kip->nused++;
165 					return kip->insns + (i * c->insn_size);
166 				}
167 			}
168 			/* kip->nused is broken. Fix it. */
169 			kip->nused = slots_per_page(c);
170 			WARN_ON(1);
171 		}
172 	}
173 
174 	/* If there are any garbage slots, collect it and try again. */
175 	if (c->nr_garbage && collect_garbage_slots(c) == 0)
176 		goto retry;
177 
178 	/* All out of space.  Need to allocate a new page. */
179 	kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
180 	if (!kip)
181 		return NULL;
182 
183 	/*
184 	 * Use module_alloc so this page is within +/- 2GB of where the
185 	 * kernel image and loaded module images reside. This is required
186 	 * so x86_64 can correctly handle the %rip-relative fixups.
187 	 */
188 	kip->insns = module_alloc(PAGE_SIZE);
189 	if (!kip->insns) {
190 		kfree(kip);
191 		return NULL;
192 	}
193 	INIT_LIST_HEAD(&kip->list);
194 	memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
195 	kip->slot_used[0] = SLOT_USED;
196 	kip->nused = 1;
197 	kip->ngarbage = 0;
198 	list_add(&kip->list, &c->pages);
199 	return kip->insns;
200 }
201 
202 
203 kprobe_opcode_t __kprobes *get_insn_slot(void)
204 {
205 	kprobe_opcode_t *ret = NULL;
206 
207 	mutex_lock(&kprobe_insn_mutex);
208 	ret = __get_insn_slot(&kprobe_insn_slots);
209 	mutex_unlock(&kprobe_insn_mutex);
210 
211 	return ret;
212 }
213 
214 /* Return 1 if all garbages are collected, otherwise 0. */
215 static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
216 {
217 	kip->slot_used[idx] = SLOT_CLEAN;
218 	kip->nused--;
219 	if (kip->nused == 0) {
220 		/*
221 		 * Page is no longer in use.  Free it unless
222 		 * it's the last one.  We keep the last one
223 		 * so as not to have to set it up again the
224 		 * next time somebody inserts a probe.
225 		 */
226 		if (!list_is_singular(&kip->list)) {
227 			list_del(&kip->list);
228 			module_free(NULL, kip->insns);
229 			kfree(kip);
230 		}
231 		return 1;
232 	}
233 	return 0;
234 }
235 
236 static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c)
237 {
238 	struct kprobe_insn_page *kip, *next;
239 
240 	/* Ensure no-one is interrupted on the garbages */
241 	synchronize_sched();
242 
243 	list_for_each_entry_safe(kip, next, &c->pages, list) {
244 		int i;
245 		if (kip->ngarbage == 0)
246 			continue;
247 		kip->ngarbage = 0;	/* we will collect all garbages */
248 		for (i = 0; i < slots_per_page(c); i++) {
249 			if (kip->slot_used[i] == SLOT_DIRTY &&
250 			    collect_one_slot(kip, i))
251 				break;
252 		}
253 	}
254 	c->nr_garbage = 0;
255 	return 0;
256 }
257 
258 static void __kprobes __free_insn_slot(struct kprobe_insn_cache *c,
259 				       kprobe_opcode_t *slot, int dirty)
260 {
261 	struct kprobe_insn_page *kip;
262 
263 	list_for_each_entry(kip, &c->pages, list) {
264 		long idx = ((long)slot - (long)kip->insns) /
265 				(c->insn_size * sizeof(kprobe_opcode_t));
266 		if (idx >= 0 && idx < slots_per_page(c)) {
267 			WARN_ON(kip->slot_used[idx] != SLOT_USED);
268 			if (dirty) {
269 				kip->slot_used[idx] = SLOT_DIRTY;
270 				kip->ngarbage++;
271 				if (++c->nr_garbage > slots_per_page(c))
272 					collect_garbage_slots(c);
273 			} else
274 				collect_one_slot(kip, idx);
275 			return;
276 		}
277 	}
278 	/* Could not free this slot. */
279 	WARN_ON(1);
280 }
281 
282 void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
283 {
284 	mutex_lock(&kprobe_insn_mutex);
285 	__free_insn_slot(&kprobe_insn_slots, slot, dirty);
286 	mutex_unlock(&kprobe_insn_mutex);
287 }
288 #ifdef CONFIG_OPTPROBES
289 /* For optimized_kprobe buffer */
290 static DEFINE_MUTEX(kprobe_optinsn_mutex); /* Protects kprobe_optinsn_slots */
291 static struct kprobe_insn_cache kprobe_optinsn_slots = {
292 	.pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
293 	/* .insn_size is initialized later */
294 	.nr_garbage = 0,
295 };
296 /* Get a slot for optimized_kprobe buffer */
297 kprobe_opcode_t __kprobes *get_optinsn_slot(void)
298 {
299 	kprobe_opcode_t *ret = NULL;
300 
301 	mutex_lock(&kprobe_optinsn_mutex);
302 	ret = __get_insn_slot(&kprobe_optinsn_slots);
303 	mutex_unlock(&kprobe_optinsn_mutex);
304 
305 	return ret;
306 }
307 
308 void __kprobes free_optinsn_slot(kprobe_opcode_t * slot, int dirty)
309 {
310 	mutex_lock(&kprobe_optinsn_mutex);
311 	__free_insn_slot(&kprobe_optinsn_slots, slot, dirty);
312 	mutex_unlock(&kprobe_optinsn_mutex);
313 }
314 #endif
315 #endif
316 
317 /* We have preemption disabled.. so it is safe to use __ versions */
318 static inline void set_kprobe_instance(struct kprobe *kp)
319 {
320 	__this_cpu_write(kprobe_instance, kp);
321 }
322 
323 static inline void reset_kprobe_instance(void)
324 {
325 	__this_cpu_write(kprobe_instance, NULL);
326 }
327 
328 /*
329  * This routine is called either:
330  * 	- under the kprobe_mutex - during kprobe_[un]register()
331  * 				OR
332  * 	- with preemption disabled - from arch/xxx/kernel/kprobes.c
333  */
334 struct kprobe __kprobes *get_kprobe(void *addr)
335 {
336 	struct hlist_head *head;
337 	struct hlist_node *node;
338 	struct kprobe *p;
339 
340 	head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
341 	hlist_for_each_entry_rcu(p, node, head, hlist) {
342 		if (p->addr == addr)
343 			return p;
344 	}
345 
346 	return NULL;
347 }
348 
349 static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
350 
351 /* Return true if the kprobe is an aggregator */
352 static inline int kprobe_aggrprobe(struct kprobe *p)
353 {
354 	return p->pre_handler == aggr_pre_handler;
355 }
356 
357 /* Return true(!0) if the kprobe is unused */
358 static inline int kprobe_unused(struct kprobe *p)
359 {
360 	return kprobe_aggrprobe(p) && kprobe_disabled(p) &&
361 	       list_empty(&p->list);
362 }
363 
364 /*
365  * Keep all fields in the kprobe consistent
366  */
367 static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p)
368 {
369 	memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t));
370 	memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn));
371 }
372 
373 #ifdef CONFIG_OPTPROBES
374 /* NOTE: change this value only with kprobe_mutex held */
375 static bool kprobes_allow_optimization;
376 
377 /*
378  * Call all pre_handler on the list, but ignores its return value.
379  * This must be called from arch-dep optimized caller.
380  */
381 void __kprobes opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
382 {
383 	struct kprobe *kp;
384 
385 	list_for_each_entry_rcu(kp, &p->list, list) {
386 		if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
387 			set_kprobe_instance(kp);
388 			kp->pre_handler(kp, regs);
389 		}
390 		reset_kprobe_instance();
391 	}
392 }
393 
394 /* Free optimized instructions and optimized_kprobe */
395 static __kprobes void free_aggr_kprobe(struct kprobe *p)
396 {
397 	struct optimized_kprobe *op;
398 
399 	op = container_of(p, struct optimized_kprobe, kp);
400 	arch_remove_optimized_kprobe(op);
401 	arch_remove_kprobe(p);
402 	kfree(op);
403 }
404 
405 /* Return true(!0) if the kprobe is ready for optimization. */
406 static inline int kprobe_optready(struct kprobe *p)
407 {
408 	struct optimized_kprobe *op;
409 
410 	if (kprobe_aggrprobe(p)) {
411 		op = container_of(p, struct optimized_kprobe, kp);
412 		return arch_prepared_optinsn(&op->optinsn);
413 	}
414 
415 	return 0;
416 }
417 
418 /* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */
419 static inline int kprobe_disarmed(struct kprobe *p)
420 {
421 	struct optimized_kprobe *op;
422 
423 	/* If kprobe is not aggr/opt probe, just return kprobe is disabled */
424 	if (!kprobe_aggrprobe(p))
425 		return kprobe_disabled(p);
426 
427 	op = container_of(p, struct optimized_kprobe, kp);
428 
429 	return kprobe_disabled(p) && list_empty(&op->list);
430 }
431 
432 /* Return true(!0) if the probe is queued on (un)optimizing lists */
433 static int __kprobes kprobe_queued(struct kprobe *p)
434 {
435 	struct optimized_kprobe *op;
436 
437 	if (kprobe_aggrprobe(p)) {
438 		op = container_of(p, struct optimized_kprobe, kp);
439 		if (!list_empty(&op->list))
440 			return 1;
441 	}
442 	return 0;
443 }
444 
445 /*
446  * Return an optimized kprobe whose optimizing code replaces
447  * instructions including addr (exclude breakpoint).
448  */
449 static struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr)
450 {
451 	int i;
452 	struct kprobe *p = NULL;
453 	struct optimized_kprobe *op;
454 
455 	/* Don't check i == 0, since that is a breakpoint case. */
456 	for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++)
457 		p = get_kprobe((void *)(addr - i));
458 
459 	if (p && kprobe_optready(p)) {
460 		op = container_of(p, struct optimized_kprobe, kp);
461 		if (arch_within_optimized_kprobe(op, addr))
462 			return p;
463 	}
464 
465 	return NULL;
466 }
467 
468 /* Optimization staging list, protected by kprobe_mutex */
469 static LIST_HEAD(optimizing_list);
470 static LIST_HEAD(unoptimizing_list);
471 
472 static void kprobe_optimizer(struct work_struct *work);
473 static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
474 #define OPTIMIZE_DELAY 5
475 
476 /*
477  * Optimize (replace a breakpoint with a jump) kprobes listed on
478  * optimizing_list.
479  */
480 static __kprobes void do_optimize_kprobes(void)
481 {
482 	/* Optimization never be done when disarmed */
483 	if (kprobes_all_disarmed || !kprobes_allow_optimization ||
484 	    list_empty(&optimizing_list))
485 		return;
486 
487 	/*
488 	 * The optimization/unoptimization refers online_cpus via
489 	 * stop_machine() and cpu-hotplug modifies online_cpus.
490 	 * And same time, text_mutex will be held in cpu-hotplug and here.
491 	 * This combination can cause a deadlock (cpu-hotplug try to lock
492 	 * text_mutex but stop_machine can not be done because online_cpus
493 	 * has been changed)
494 	 * To avoid this deadlock, we need to call get_online_cpus()
495 	 * for preventing cpu-hotplug outside of text_mutex locking.
496 	 */
497 	get_online_cpus();
498 	mutex_lock(&text_mutex);
499 	arch_optimize_kprobes(&optimizing_list);
500 	mutex_unlock(&text_mutex);
501 	put_online_cpus();
502 }
503 
504 /*
505  * Unoptimize (replace a jump with a breakpoint and remove the breakpoint
506  * if need) kprobes listed on unoptimizing_list.
507  */
508 static __kprobes void do_unoptimize_kprobes(struct list_head *free_list)
509 {
510 	struct optimized_kprobe *op, *tmp;
511 
512 	/* Unoptimization must be done anytime */
513 	if (list_empty(&unoptimizing_list))
514 		return;
515 
516 	/* Ditto to do_optimize_kprobes */
517 	get_online_cpus();
518 	mutex_lock(&text_mutex);
519 	arch_unoptimize_kprobes(&unoptimizing_list, free_list);
520 	/* Loop free_list for disarming */
521 	list_for_each_entry_safe(op, tmp, free_list, list) {
522 		/* Disarm probes if marked disabled */
523 		if (kprobe_disabled(&op->kp))
524 			arch_disarm_kprobe(&op->kp);
525 		if (kprobe_unused(&op->kp)) {
526 			/*
527 			 * Remove unused probes from hash list. After waiting
528 			 * for synchronization, these probes are reclaimed.
529 			 * (reclaiming is done by do_free_cleaned_kprobes.)
530 			 */
531 			hlist_del_rcu(&op->kp.hlist);
532 		} else
533 			list_del_init(&op->list);
534 	}
535 	mutex_unlock(&text_mutex);
536 	put_online_cpus();
537 }
538 
539 /* Reclaim all kprobes on the free_list */
540 static __kprobes void do_free_cleaned_kprobes(struct list_head *free_list)
541 {
542 	struct optimized_kprobe *op, *tmp;
543 
544 	list_for_each_entry_safe(op, tmp, free_list, list) {
545 		BUG_ON(!kprobe_unused(&op->kp));
546 		list_del_init(&op->list);
547 		free_aggr_kprobe(&op->kp);
548 	}
549 }
550 
551 /* Start optimizer after OPTIMIZE_DELAY passed */
552 static __kprobes void kick_kprobe_optimizer(void)
553 {
554 	schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
555 }
556 
557 /* Kprobe jump optimizer */
558 static __kprobes void kprobe_optimizer(struct work_struct *work)
559 {
560 	LIST_HEAD(free_list);
561 
562 	mutex_lock(&kprobe_mutex);
563 	/* Lock modules while optimizing kprobes */
564 	mutex_lock(&module_mutex);
565 
566 	/*
567 	 * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
568 	 * kprobes before waiting for quiesence period.
569 	 */
570 	do_unoptimize_kprobes(&free_list);
571 
572 	/*
573 	 * Step 2: Wait for quiesence period to ensure all running interrupts
574 	 * are done. Because optprobe may modify multiple instructions
575 	 * there is a chance that Nth instruction is interrupted. In that
576 	 * case, running interrupt can return to 2nd-Nth byte of jump
577 	 * instruction. This wait is for avoiding it.
578 	 */
579 	synchronize_sched();
580 
581 	/* Step 3: Optimize kprobes after quiesence period */
582 	do_optimize_kprobes();
583 
584 	/* Step 4: Free cleaned kprobes after quiesence period */
585 	do_free_cleaned_kprobes(&free_list);
586 
587 	mutex_unlock(&module_mutex);
588 	mutex_unlock(&kprobe_mutex);
589 
590 	/* Step 5: Kick optimizer again if needed */
591 	if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
592 		kick_kprobe_optimizer();
593 }
594 
595 /* Wait for completing optimization and unoptimization */
596 static __kprobes void wait_for_kprobe_optimizer(void)
597 {
598 	mutex_lock(&kprobe_mutex);
599 
600 	while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
601 		mutex_unlock(&kprobe_mutex);
602 
603 		/* this will also make optimizing_work execute immmediately */
604 		flush_delayed_work(&optimizing_work);
605 		/* @optimizing_work might not have been queued yet, relax */
606 		cpu_relax();
607 
608 		mutex_lock(&kprobe_mutex);
609 	}
610 
611 	mutex_unlock(&kprobe_mutex);
612 }
613 
614 /* Optimize kprobe if p is ready to be optimized */
615 static __kprobes void optimize_kprobe(struct kprobe *p)
616 {
617 	struct optimized_kprobe *op;
618 
619 	/* Check if the kprobe is disabled or not ready for optimization. */
620 	if (!kprobe_optready(p) || !kprobes_allow_optimization ||
621 	    (kprobe_disabled(p) || kprobes_all_disarmed))
622 		return;
623 
624 	/* Both of break_handler and post_handler are not supported. */
625 	if (p->break_handler || p->post_handler)
626 		return;
627 
628 	op = container_of(p, struct optimized_kprobe, kp);
629 
630 	/* Check there is no other kprobes at the optimized instructions */
631 	if (arch_check_optimized_kprobe(op) < 0)
632 		return;
633 
634 	/* Check if it is already optimized. */
635 	if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
636 		return;
637 	op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
638 
639 	if (!list_empty(&op->list))
640 		/* This is under unoptimizing. Just dequeue the probe */
641 		list_del_init(&op->list);
642 	else {
643 		list_add(&op->list, &optimizing_list);
644 		kick_kprobe_optimizer();
645 	}
646 }
647 
648 /* Short cut to direct unoptimizing */
649 static __kprobes void force_unoptimize_kprobe(struct optimized_kprobe *op)
650 {
651 	get_online_cpus();
652 	arch_unoptimize_kprobe(op);
653 	put_online_cpus();
654 	if (kprobe_disabled(&op->kp))
655 		arch_disarm_kprobe(&op->kp);
656 }
657 
658 /* Unoptimize a kprobe if p is optimized */
659 static __kprobes void unoptimize_kprobe(struct kprobe *p, bool force)
660 {
661 	struct optimized_kprobe *op;
662 
663 	if (!kprobe_aggrprobe(p) || kprobe_disarmed(p))
664 		return; /* This is not an optprobe nor optimized */
665 
666 	op = container_of(p, struct optimized_kprobe, kp);
667 	if (!kprobe_optimized(p)) {
668 		/* Unoptimized or unoptimizing case */
669 		if (force && !list_empty(&op->list)) {
670 			/*
671 			 * Only if this is unoptimizing kprobe and forced,
672 			 * forcibly unoptimize it. (No need to unoptimize
673 			 * unoptimized kprobe again :)
674 			 */
675 			list_del_init(&op->list);
676 			force_unoptimize_kprobe(op);
677 		}
678 		return;
679 	}
680 
681 	op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
682 	if (!list_empty(&op->list)) {
683 		/* Dequeue from the optimization queue */
684 		list_del_init(&op->list);
685 		return;
686 	}
687 	/* Optimized kprobe case */
688 	if (force)
689 		/* Forcibly update the code: this is a special case */
690 		force_unoptimize_kprobe(op);
691 	else {
692 		list_add(&op->list, &unoptimizing_list);
693 		kick_kprobe_optimizer();
694 	}
695 }
696 
697 /* Cancel unoptimizing for reusing */
698 static void reuse_unused_kprobe(struct kprobe *ap)
699 {
700 	struct optimized_kprobe *op;
701 
702 	BUG_ON(!kprobe_unused(ap));
703 	/*
704 	 * Unused kprobe MUST be on the way of delayed unoptimizing (means
705 	 * there is still a relative jump) and disabled.
706 	 */
707 	op = container_of(ap, struct optimized_kprobe, kp);
708 	if (unlikely(list_empty(&op->list)))
709 		printk(KERN_WARNING "Warning: found a stray unused "
710 			"aggrprobe@%p\n", ap->addr);
711 	/* Enable the probe again */
712 	ap->flags &= ~KPROBE_FLAG_DISABLED;
713 	/* Optimize it again (remove from op->list) */
714 	BUG_ON(!kprobe_optready(ap));
715 	optimize_kprobe(ap);
716 }
717 
718 /* Remove optimized instructions */
719 static void __kprobes kill_optimized_kprobe(struct kprobe *p)
720 {
721 	struct optimized_kprobe *op;
722 
723 	op = container_of(p, struct optimized_kprobe, kp);
724 	if (!list_empty(&op->list))
725 		/* Dequeue from the (un)optimization queue */
726 		list_del_init(&op->list);
727 
728 	op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
729 	/* Don't touch the code, because it is already freed. */
730 	arch_remove_optimized_kprobe(op);
731 }
732 
733 /* Try to prepare optimized instructions */
734 static __kprobes void prepare_optimized_kprobe(struct kprobe *p)
735 {
736 	struct optimized_kprobe *op;
737 
738 	op = container_of(p, struct optimized_kprobe, kp);
739 	arch_prepare_optimized_kprobe(op);
740 }
741 
742 /* Allocate new optimized_kprobe and try to prepare optimized instructions */
743 static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
744 {
745 	struct optimized_kprobe *op;
746 
747 	op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
748 	if (!op)
749 		return NULL;
750 
751 	INIT_LIST_HEAD(&op->list);
752 	op->kp.addr = p->addr;
753 	arch_prepare_optimized_kprobe(op);
754 
755 	return &op->kp;
756 }
757 
758 static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
759 
760 /*
761  * Prepare an optimized_kprobe and optimize it
762  * NOTE: p must be a normal registered kprobe
763  */
764 static __kprobes void try_to_optimize_kprobe(struct kprobe *p)
765 {
766 	struct kprobe *ap;
767 	struct optimized_kprobe *op;
768 
769 	/* Impossible to optimize ftrace-based kprobe */
770 	if (kprobe_ftrace(p))
771 		return;
772 
773 	/* For preparing optimization, jump_label_text_reserved() is called */
774 	jump_label_lock();
775 	mutex_lock(&text_mutex);
776 
777 	ap = alloc_aggr_kprobe(p);
778 	if (!ap)
779 		goto out;
780 
781 	op = container_of(ap, struct optimized_kprobe, kp);
782 	if (!arch_prepared_optinsn(&op->optinsn)) {
783 		/* If failed to setup optimizing, fallback to kprobe */
784 		arch_remove_optimized_kprobe(op);
785 		kfree(op);
786 		goto out;
787 	}
788 
789 	init_aggr_kprobe(ap, p);
790 	optimize_kprobe(ap);	/* This just kicks optimizer thread */
791 
792 out:
793 	mutex_unlock(&text_mutex);
794 	jump_label_unlock();
795 }
796 
797 #ifdef CONFIG_SYSCTL
798 /* This should be called with kprobe_mutex locked */
799 static void __kprobes optimize_all_kprobes(void)
800 {
801 	struct hlist_head *head;
802 	struct hlist_node *node;
803 	struct kprobe *p;
804 	unsigned int i;
805 
806 	/* If optimization is already allowed, just return */
807 	if (kprobes_allow_optimization)
808 		return;
809 
810 	kprobes_allow_optimization = true;
811 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
812 		head = &kprobe_table[i];
813 		hlist_for_each_entry_rcu(p, node, head, hlist)
814 			if (!kprobe_disabled(p))
815 				optimize_kprobe(p);
816 	}
817 	printk(KERN_INFO "Kprobes globally optimized\n");
818 }
819 
820 /* This should be called with kprobe_mutex locked */
821 static void __kprobes unoptimize_all_kprobes(void)
822 {
823 	struct hlist_head *head;
824 	struct hlist_node *node;
825 	struct kprobe *p;
826 	unsigned int i;
827 
828 	/* If optimization is already prohibited, just return */
829 	if (!kprobes_allow_optimization)
830 		return;
831 
832 	kprobes_allow_optimization = false;
833 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
834 		head = &kprobe_table[i];
835 		hlist_for_each_entry_rcu(p, node, head, hlist) {
836 			if (!kprobe_disabled(p))
837 				unoptimize_kprobe(p, false);
838 		}
839 	}
840 	/* Wait for unoptimizing completion */
841 	wait_for_kprobe_optimizer();
842 	printk(KERN_INFO "Kprobes globally unoptimized\n");
843 }
844 
845 int sysctl_kprobes_optimization;
846 int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
847 				      void __user *buffer, size_t *length,
848 				      loff_t *ppos)
849 {
850 	int ret;
851 
852 	mutex_lock(&kprobe_mutex);
853 	sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
854 	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
855 
856 	if (sysctl_kprobes_optimization)
857 		optimize_all_kprobes();
858 	else
859 		unoptimize_all_kprobes();
860 	mutex_unlock(&kprobe_mutex);
861 
862 	return ret;
863 }
864 #endif /* CONFIG_SYSCTL */
865 
866 /* Put a breakpoint for a probe. Must be called with text_mutex locked */
867 static void __kprobes __arm_kprobe(struct kprobe *p)
868 {
869 	struct kprobe *_p;
870 
871 	/* Check collision with other optimized kprobes */
872 	_p = get_optimized_kprobe((unsigned long)p->addr);
873 	if (unlikely(_p))
874 		/* Fallback to unoptimized kprobe */
875 		unoptimize_kprobe(_p, true);
876 
877 	arch_arm_kprobe(p);
878 	optimize_kprobe(p);	/* Try to optimize (add kprobe to a list) */
879 }
880 
881 /* Remove the breakpoint of a probe. Must be called with text_mutex locked */
882 static void __kprobes __disarm_kprobe(struct kprobe *p, bool reopt)
883 {
884 	struct kprobe *_p;
885 
886 	unoptimize_kprobe(p, false);	/* Try to unoptimize */
887 
888 	if (!kprobe_queued(p)) {
889 		arch_disarm_kprobe(p);
890 		/* If another kprobe was blocked, optimize it. */
891 		_p = get_optimized_kprobe((unsigned long)p->addr);
892 		if (unlikely(_p) && reopt)
893 			optimize_kprobe(_p);
894 	}
895 	/* TODO: reoptimize others after unoptimized this probe */
896 }
897 
898 #else /* !CONFIG_OPTPROBES */
899 
900 #define optimize_kprobe(p)			do {} while (0)
901 #define unoptimize_kprobe(p, f)			do {} while (0)
902 #define kill_optimized_kprobe(p)		do {} while (0)
903 #define prepare_optimized_kprobe(p)		do {} while (0)
904 #define try_to_optimize_kprobe(p)		do {} while (0)
905 #define __arm_kprobe(p)				arch_arm_kprobe(p)
906 #define __disarm_kprobe(p, o)			arch_disarm_kprobe(p)
907 #define kprobe_disarmed(p)			kprobe_disabled(p)
908 #define wait_for_kprobe_optimizer()		do {} while (0)
909 
910 /* There should be no unused kprobes can be reused without optimization */
911 static void reuse_unused_kprobe(struct kprobe *ap)
912 {
913 	printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
914 	BUG_ON(kprobe_unused(ap));
915 }
916 
917 static __kprobes void free_aggr_kprobe(struct kprobe *p)
918 {
919 	arch_remove_kprobe(p);
920 	kfree(p);
921 }
922 
923 static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
924 {
925 	return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
926 }
927 #endif /* CONFIG_OPTPROBES */
928 
929 #ifdef CONFIG_KPROBES_ON_FTRACE
930 static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
931 	.func = kprobe_ftrace_handler,
932 	.flags = FTRACE_OPS_FL_SAVE_REGS,
933 };
934 static int kprobe_ftrace_enabled;
935 
936 /* Must ensure p->addr is really on ftrace */
937 static int __kprobes prepare_kprobe(struct kprobe *p)
938 {
939 	if (!kprobe_ftrace(p))
940 		return arch_prepare_kprobe(p);
941 
942 	return arch_prepare_kprobe_ftrace(p);
943 }
944 
945 /* Caller must lock kprobe_mutex */
946 static void __kprobes arm_kprobe_ftrace(struct kprobe *p)
947 {
948 	int ret;
949 
950 	ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
951 				   (unsigned long)p->addr, 0, 0);
952 	WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret);
953 	kprobe_ftrace_enabled++;
954 	if (kprobe_ftrace_enabled == 1) {
955 		ret = register_ftrace_function(&kprobe_ftrace_ops);
956 		WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
957 	}
958 }
959 
960 /* Caller must lock kprobe_mutex */
961 static void __kprobes disarm_kprobe_ftrace(struct kprobe *p)
962 {
963 	int ret;
964 
965 	kprobe_ftrace_enabled--;
966 	if (kprobe_ftrace_enabled == 0) {
967 		ret = unregister_ftrace_function(&kprobe_ftrace_ops);
968 		WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
969 	}
970 	ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
971 			   (unsigned long)p->addr, 1, 0);
972 	WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret);
973 }
974 #else	/* !CONFIG_KPROBES_ON_FTRACE */
975 #define prepare_kprobe(p)	arch_prepare_kprobe(p)
976 #define arm_kprobe_ftrace(p)	do {} while (0)
977 #define disarm_kprobe_ftrace(p)	do {} while (0)
978 #endif
979 
980 /* Arm a kprobe with text_mutex */
981 static void __kprobes arm_kprobe(struct kprobe *kp)
982 {
983 	if (unlikely(kprobe_ftrace(kp))) {
984 		arm_kprobe_ftrace(kp);
985 		return;
986 	}
987 	/*
988 	 * Here, since __arm_kprobe() doesn't use stop_machine(),
989 	 * this doesn't cause deadlock on text_mutex. So, we don't
990 	 * need get_online_cpus().
991 	 */
992 	mutex_lock(&text_mutex);
993 	__arm_kprobe(kp);
994 	mutex_unlock(&text_mutex);
995 }
996 
997 /* Disarm a kprobe with text_mutex */
998 static void __kprobes disarm_kprobe(struct kprobe *kp, bool reopt)
999 {
1000 	if (unlikely(kprobe_ftrace(kp))) {
1001 		disarm_kprobe_ftrace(kp);
1002 		return;
1003 	}
1004 	/* Ditto */
1005 	mutex_lock(&text_mutex);
1006 	__disarm_kprobe(kp, reopt);
1007 	mutex_unlock(&text_mutex);
1008 }
1009 
1010 /*
1011  * Aggregate handlers for multiple kprobes support - these handlers
1012  * take care of invoking the individual kprobe handlers on p->list
1013  */
1014 static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
1015 {
1016 	struct kprobe *kp;
1017 
1018 	list_for_each_entry_rcu(kp, &p->list, list) {
1019 		if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
1020 			set_kprobe_instance(kp);
1021 			if (kp->pre_handler(kp, regs))
1022 				return 1;
1023 		}
1024 		reset_kprobe_instance();
1025 	}
1026 	return 0;
1027 }
1028 
1029 static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
1030 					unsigned long flags)
1031 {
1032 	struct kprobe *kp;
1033 
1034 	list_for_each_entry_rcu(kp, &p->list, list) {
1035 		if (kp->post_handler && likely(!kprobe_disabled(kp))) {
1036 			set_kprobe_instance(kp);
1037 			kp->post_handler(kp, regs, flags);
1038 			reset_kprobe_instance();
1039 		}
1040 	}
1041 }
1042 
1043 static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
1044 					int trapnr)
1045 {
1046 	struct kprobe *cur = __this_cpu_read(kprobe_instance);
1047 
1048 	/*
1049 	 * if we faulted "during" the execution of a user specified
1050 	 * probe handler, invoke just that probe's fault handler
1051 	 */
1052 	if (cur && cur->fault_handler) {
1053 		if (cur->fault_handler(cur, regs, trapnr))
1054 			return 1;
1055 	}
1056 	return 0;
1057 }
1058 
1059 static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
1060 {
1061 	struct kprobe *cur = __this_cpu_read(kprobe_instance);
1062 	int ret = 0;
1063 
1064 	if (cur && cur->break_handler) {
1065 		if (cur->break_handler(cur, regs))
1066 			ret = 1;
1067 	}
1068 	reset_kprobe_instance();
1069 	return ret;
1070 }
1071 
1072 /* Walks the list and increments nmissed count for multiprobe case */
1073 void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
1074 {
1075 	struct kprobe *kp;
1076 	if (!kprobe_aggrprobe(p)) {
1077 		p->nmissed++;
1078 	} else {
1079 		list_for_each_entry_rcu(kp, &p->list, list)
1080 			kp->nmissed++;
1081 	}
1082 	return;
1083 }
1084 
1085 void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
1086 				struct hlist_head *head)
1087 {
1088 	struct kretprobe *rp = ri->rp;
1089 
1090 	/* remove rp inst off the rprobe_inst_table */
1091 	hlist_del(&ri->hlist);
1092 	INIT_HLIST_NODE(&ri->hlist);
1093 	if (likely(rp)) {
1094 		raw_spin_lock(&rp->lock);
1095 		hlist_add_head(&ri->hlist, &rp->free_instances);
1096 		raw_spin_unlock(&rp->lock);
1097 	} else
1098 		/* Unregistering */
1099 		hlist_add_head(&ri->hlist, head);
1100 }
1101 
1102 void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
1103 			 struct hlist_head **head, unsigned long *flags)
1104 __acquires(hlist_lock)
1105 {
1106 	unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
1107 	raw_spinlock_t *hlist_lock;
1108 
1109 	*head = &kretprobe_inst_table[hash];
1110 	hlist_lock = kretprobe_table_lock_ptr(hash);
1111 	raw_spin_lock_irqsave(hlist_lock, *flags);
1112 }
1113 
1114 static void __kprobes kretprobe_table_lock(unsigned long hash,
1115 	unsigned long *flags)
1116 __acquires(hlist_lock)
1117 {
1118 	raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1119 	raw_spin_lock_irqsave(hlist_lock, *flags);
1120 }
1121 
1122 void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
1123 	unsigned long *flags)
1124 __releases(hlist_lock)
1125 {
1126 	unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
1127 	raw_spinlock_t *hlist_lock;
1128 
1129 	hlist_lock = kretprobe_table_lock_ptr(hash);
1130 	raw_spin_unlock_irqrestore(hlist_lock, *flags);
1131 }
1132 
1133 static void __kprobes kretprobe_table_unlock(unsigned long hash,
1134        unsigned long *flags)
1135 __releases(hlist_lock)
1136 {
1137 	raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1138 	raw_spin_unlock_irqrestore(hlist_lock, *flags);
1139 }
1140 
1141 /*
1142  * This function is called from finish_task_switch when task tk becomes dead,
1143  * so that we can recycle any function-return probe instances associated
1144  * with this task. These left over instances represent probed functions
1145  * that have been called but will never return.
1146  */
1147 void __kprobes kprobe_flush_task(struct task_struct *tk)
1148 {
1149 	struct kretprobe_instance *ri;
1150 	struct hlist_head *head, empty_rp;
1151 	struct hlist_node *node, *tmp;
1152 	unsigned long hash, flags = 0;
1153 
1154 	if (unlikely(!kprobes_initialized))
1155 		/* Early boot.  kretprobe_table_locks not yet initialized. */
1156 		return;
1157 
1158 	INIT_HLIST_HEAD(&empty_rp);
1159 	hash = hash_ptr(tk, KPROBE_HASH_BITS);
1160 	head = &kretprobe_inst_table[hash];
1161 	kretprobe_table_lock(hash, &flags);
1162 	hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
1163 		if (ri->task == tk)
1164 			recycle_rp_inst(ri, &empty_rp);
1165 	}
1166 	kretprobe_table_unlock(hash, &flags);
1167 	hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
1168 		hlist_del(&ri->hlist);
1169 		kfree(ri);
1170 	}
1171 }
1172 
1173 static inline void free_rp_inst(struct kretprobe *rp)
1174 {
1175 	struct kretprobe_instance *ri;
1176 	struct hlist_node *pos, *next;
1177 
1178 	hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) {
1179 		hlist_del(&ri->hlist);
1180 		kfree(ri);
1181 	}
1182 }
1183 
1184 static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
1185 {
1186 	unsigned long flags, hash;
1187 	struct kretprobe_instance *ri;
1188 	struct hlist_node *pos, *next;
1189 	struct hlist_head *head;
1190 
1191 	/* No race here */
1192 	for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
1193 		kretprobe_table_lock(hash, &flags);
1194 		head = &kretprobe_inst_table[hash];
1195 		hlist_for_each_entry_safe(ri, pos, next, head, hlist) {
1196 			if (ri->rp == rp)
1197 				ri->rp = NULL;
1198 		}
1199 		kretprobe_table_unlock(hash, &flags);
1200 	}
1201 	free_rp_inst(rp);
1202 }
1203 
1204 /*
1205 * Add the new probe to ap->list. Fail if this is the
1206 * second jprobe at the address - two jprobes can't coexist
1207 */
1208 static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
1209 {
1210 	BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
1211 
1212 	if (p->break_handler || p->post_handler)
1213 		unoptimize_kprobe(ap, true);	/* Fall back to normal kprobe */
1214 
1215 	if (p->break_handler) {
1216 		if (ap->break_handler)
1217 			return -EEXIST;
1218 		list_add_tail_rcu(&p->list, &ap->list);
1219 		ap->break_handler = aggr_break_handler;
1220 	} else
1221 		list_add_rcu(&p->list, &ap->list);
1222 	if (p->post_handler && !ap->post_handler)
1223 		ap->post_handler = aggr_post_handler;
1224 
1225 	return 0;
1226 }
1227 
1228 /*
1229  * Fill in the required fields of the "manager kprobe". Replace the
1230  * earlier kprobe in the hlist with the manager kprobe
1231  */
1232 static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
1233 {
1234 	/* Copy p's insn slot to ap */
1235 	copy_kprobe(p, ap);
1236 	flush_insn_slot(ap);
1237 	ap->addr = p->addr;
1238 	ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
1239 	ap->pre_handler = aggr_pre_handler;
1240 	ap->fault_handler = aggr_fault_handler;
1241 	/* We don't care the kprobe which has gone. */
1242 	if (p->post_handler && !kprobe_gone(p))
1243 		ap->post_handler = aggr_post_handler;
1244 	if (p->break_handler && !kprobe_gone(p))
1245 		ap->break_handler = aggr_break_handler;
1246 
1247 	INIT_LIST_HEAD(&ap->list);
1248 	INIT_HLIST_NODE(&ap->hlist);
1249 
1250 	list_add_rcu(&p->list, &ap->list);
1251 	hlist_replace_rcu(&p->hlist, &ap->hlist);
1252 }
1253 
1254 /*
1255  * This is the second or subsequent kprobe at the address - handle
1256  * the intricacies
1257  */
1258 static int __kprobes register_aggr_kprobe(struct kprobe *orig_p,
1259 					  struct kprobe *p)
1260 {
1261 	int ret = 0;
1262 	struct kprobe *ap = orig_p;
1263 
1264 	/* For preparing optimization, jump_label_text_reserved() is called */
1265 	jump_label_lock();
1266 	/*
1267 	 * Get online CPUs to avoid text_mutex deadlock.with stop machine,
1268 	 * which is invoked by unoptimize_kprobe() in add_new_kprobe()
1269 	 */
1270 	get_online_cpus();
1271 	mutex_lock(&text_mutex);
1272 
1273 	if (!kprobe_aggrprobe(orig_p)) {
1274 		/* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */
1275 		ap = alloc_aggr_kprobe(orig_p);
1276 		if (!ap) {
1277 			ret = -ENOMEM;
1278 			goto out;
1279 		}
1280 		init_aggr_kprobe(ap, orig_p);
1281 	} else if (kprobe_unused(ap))
1282 		/* This probe is going to die. Rescue it */
1283 		reuse_unused_kprobe(ap);
1284 
1285 	if (kprobe_gone(ap)) {
1286 		/*
1287 		 * Attempting to insert new probe at the same location that
1288 		 * had a probe in the module vaddr area which already
1289 		 * freed. So, the instruction slot has already been
1290 		 * released. We need a new slot for the new probe.
1291 		 */
1292 		ret = arch_prepare_kprobe(ap);
1293 		if (ret)
1294 			/*
1295 			 * Even if fail to allocate new slot, don't need to
1296 			 * free aggr_probe. It will be used next time, or
1297 			 * freed by unregister_kprobe.
1298 			 */
1299 			goto out;
1300 
1301 		/* Prepare optimized instructions if possible. */
1302 		prepare_optimized_kprobe(ap);
1303 
1304 		/*
1305 		 * Clear gone flag to prevent allocating new slot again, and
1306 		 * set disabled flag because it is not armed yet.
1307 		 */
1308 		ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
1309 			    | KPROBE_FLAG_DISABLED;
1310 	}
1311 
1312 	/* Copy ap's insn slot to p */
1313 	copy_kprobe(ap, p);
1314 	ret = add_new_kprobe(ap, p);
1315 
1316 out:
1317 	mutex_unlock(&text_mutex);
1318 	put_online_cpus();
1319 	jump_label_unlock();
1320 
1321 	if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
1322 		ap->flags &= ~KPROBE_FLAG_DISABLED;
1323 		if (!kprobes_all_disarmed)
1324 			/* Arm the breakpoint again. */
1325 			arm_kprobe(ap);
1326 	}
1327 	return ret;
1328 }
1329 
1330 static int __kprobes in_kprobes_functions(unsigned long addr)
1331 {
1332 	struct kprobe_blackpoint *kb;
1333 
1334 	if (addr >= (unsigned long)__kprobes_text_start &&
1335 	    addr < (unsigned long)__kprobes_text_end)
1336 		return -EINVAL;
1337 	/*
1338 	 * If there exists a kprobe_blacklist, verify and
1339 	 * fail any probe registration in the prohibited area
1340 	 */
1341 	for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
1342 		if (kb->start_addr) {
1343 			if (addr >= kb->start_addr &&
1344 			    addr < (kb->start_addr + kb->range))
1345 				return -EINVAL;
1346 		}
1347 	}
1348 	return 0;
1349 }
1350 
1351 /*
1352  * If we have a symbol_name argument, look it up and add the offset field
1353  * to it. This way, we can specify a relative address to a symbol.
1354  * This returns encoded errors if it fails to look up symbol or invalid
1355  * combination of parameters.
1356  */
1357 static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
1358 {
1359 	kprobe_opcode_t *addr = p->addr;
1360 
1361 	if ((p->symbol_name && p->addr) ||
1362 	    (!p->symbol_name && !p->addr))
1363 		goto invalid;
1364 
1365 	if (p->symbol_name) {
1366 		kprobe_lookup_name(p->symbol_name, addr);
1367 		if (!addr)
1368 			return ERR_PTR(-ENOENT);
1369 	}
1370 
1371 	addr = (kprobe_opcode_t *)(((char *)addr) + p->offset);
1372 	if (addr)
1373 		return addr;
1374 
1375 invalid:
1376 	return ERR_PTR(-EINVAL);
1377 }
1378 
1379 /* Check passed kprobe is valid and return kprobe in kprobe_table. */
1380 static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p)
1381 {
1382 	struct kprobe *ap, *list_p;
1383 
1384 	ap = get_kprobe(p->addr);
1385 	if (unlikely(!ap))
1386 		return NULL;
1387 
1388 	if (p != ap) {
1389 		list_for_each_entry_rcu(list_p, &ap->list, list)
1390 			if (list_p == p)
1391 			/* kprobe p is a valid probe */
1392 				goto valid;
1393 		return NULL;
1394 	}
1395 valid:
1396 	return ap;
1397 }
1398 
1399 /* Return error if the kprobe is being re-registered */
1400 static inline int check_kprobe_rereg(struct kprobe *p)
1401 {
1402 	int ret = 0;
1403 
1404 	mutex_lock(&kprobe_mutex);
1405 	if (__get_valid_kprobe(p))
1406 		ret = -EINVAL;
1407 	mutex_unlock(&kprobe_mutex);
1408 
1409 	return ret;
1410 }
1411 
1412 static __kprobes int check_kprobe_address_safe(struct kprobe *p,
1413 					       struct module **probed_mod)
1414 {
1415 	int ret = 0;
1416 	unsigned long ftrace_addr;
1417 
1418 	/*
1419 	 * If the address is located on a ftrace nop, set the
1420 	 * breakpoint to the following instruction.
1421 	 */
1422 	ftrace_addr = ftrace_location((unsigned long)p->addr);
1423 	if (ftrace_addr) {
1424 #ifdef CONFIG_KPROBES_ON_FTRACE
1425 		/* Given address is not on the instruction boundary */
1426 		if ((unsigned long)p->addr != ftrace_addr)
1427 			return -EILSEQ;
1428 		p->flags |= KPROBE_FLAG_FTRACE;
1429 #else	/* !CONFIG_KPROBES_ON_FTRACE */
1430 		return -EINVAL;
1431 #endif
1432 	}
1433 
1434 	jump_label_lock();
1435 	preempt_disable();
1436 
1437 	/* Ensure it is not in reserved area nor out of text */
1438 	if (!kernel_text_address((unsigned long) p->addr) ||
1439 	    in_kprobes_functions((unsigned long) p->addr) ||
1440 	    jump_label_text_reserved(p->addr, p->addr)) {
1441 		ret = -EINVAL;
1442 		goto out;
1443 	}
1444 
1445 	/* Check if are we probing a module */
1446 	*probed_mod = __module_text_address((unsigned long) p->addr);
1447 	if (*probed_mod) {
1448 		/*
1449 		 * We must hold a refcount of the probed module while updating
1450 		 * its code to prohibit unexpected unloading.
1451 		 */
1452 		if (unlikely(!try_module_get(*probed_mod))) {
1453 			ret = -ENOENT;
1454 			goto out;
1455 		}
1456 
1457 		/*
1458 		 * If the module freed .init.text, we couldn't insert
1459 		 * kprobes in there.
1460 		 */
1461 		if (within_module_init((unsigned long)p->addr, *probed_mod) &&
1462 		    (*probed_mod)->state != MODULE_STATE_COMING) {
1463 			module_put(*probed_mod);
1464 			*probed_mod = NULL;
1465 			ret = -ENOENT;
1466 		}
1467 	}
1468 out:
1469 	preempt_enable();
1470 	jump_label_unlock();
1471 
1472 	return ret;
1473 }
1474 
1475 int __kprobes register_kprobe(struct kprobe *p)
1476 {
1477 	int ret;
1478 	struct kprobe *old_p;
1479 	struct module *probed_mod;
1480 	kprobe_opcode_t *addr;
1481 
1482 	/* Adjust probe address from symbol */
1483 	addr = kprobe_addr(p);
1484 	if (IS_ERR(addr))
1485 		return PTR_ERR(addr);
1486 	p->addr = addr;
1487 
1488 	ret = check_kprobe_rereg(p);
1489 	if (ret)
1490 		return ret;
1491 
1492 	/* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
1493 	p->flags &= KPROBE_FLAG_DISABLED;
1494 	p->nmissed = 0;
1495 	INIT_LIST_HEAD(&p->list);
1496 
1497 	ret = check_kprobe_address_safe(p, &probed_mod);
1498 	if (ret)
1499 		return ret;
1500 
1501 	mutex_lock(&kprobe_mutex);
1502 
1503 	old_p = get_kprobe(p->addr);
1504 	if (old_p) {
1505 		/* Since this may unoptimize old_p, locking text_mutex. */
1506 		ret = register_aggr_kprobe(old_p, p);
1507 		goto out;
1508 	}
1509 
1510 	mutex_lock(&text_mutex);	/* Avoiding text modification */
1511 	ret = prepare_kprobe(p);
1512 	mutex_unlock(&text_mutex);
1513 	if (ret)
1514 		goto out;
1515 
1516 	INIT_HLIST_NODE(&p->hlist);
1517 	hlist_add_head_rcu(&p->hlist,
1518 		       &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
1519 
1520 	if (!kprobes_all_disarmed && !kprobe_disabled(p))
1521 		arm_kprobe(p);
1522 
1523 	/* Try to optimize kprobe */
1524 	try_to_optimize_kprobe(p);
1525 
1526 out:
1527 	mutex_unlock(&kprobe_mutex);
1528 
1529 	if (probed_mod)
1530 		module_put(probed_mod);
1531 
1532 	return ret;
1533 }
1534 EXPORT_SYMBOL_GPL(register_kprobe);
1535 
1536 /* Check if all probes on the aggrprobe are disabled */
1537 static int __kprobes aggr_kprobe_disabled(struct kprobe *ap)
1538 {
1539 	struct kprobe *kp;
1540 
1541 	list_for_each_entry_rcu(kp, &ap->list, list)
1542 		if (!kprobe_disabled(kp))
1543 			/*
1544 			 * There is an active probe on the list.
1545 			 * We can't disable this ap.
1546 			 */
1547 			return 0;
1548 
1549 	return 1;
1550 }
1551 
1552 /* Disable one kprobe: Make sure called under kprobe_mutex is locked */
1553 static struct kprobe *__kprobes __disable_kprobe(struct kprobe *p)
1554 {
1555 	struct kprobe *orig_p;
1556 
1557 	/* Get an original kprobe for return */
1558 	orig_p = __get_valid_kprobe(p);
1559 	if (unlikely(orig_p == NULL))
1560 		return NULL;
1561 
1562 	if (!kprobe_disabled(p)) {
1563 		/* Disable probe if it is a child probe */
1564 		if (p != orig_p)
1565 			p->flags |= KPROBE_FLAG_DISABLED;
1566 
1567 		/* Try to disarm and disable this/parent probe */
1568 		if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
1569 			disarm_kprobe(orig_p, true);
1570 			orig_p->flags |= KPROBE_FLAG_DISABLED;
1571 		}
1572 	}
1573 
1574 	return orig_p;
1575 }
1576 
1577 /*
1578  * Unregister a kprobe without a scheduler synchronization.
1579  */
1580 static int __kprobes __unregister_kprobe_top(struct kprobe *p)
1581 {
1582 	struct kprobe *ap, *list_p;
1583 
1584 	/* Disable kprobe. This will disarm it if needed. */
1585 	ap = __disable_kprobe(p);
1586 	if (ap == NULL)
1587 		return -EINVAL;
1588 
1589 	if (ap == p)
1590 		/*
1591 		 * This probe is an independent(and non-optimized) kprobe
1592 		 * (not an aggrprobe). Remove from the hash list.
1593 		 */
1594 		goto disarmed;
1595 
1596 	/* Following process expects this probe is an aggrprobe */
1597 	WARN_ON(!kprobe_aggrprobe(ap));
1598 
1599 	if (list_is_singular(&ap->list) && kprobe_disarmed(ap))
1600 		/*
1601 		 * !disarmed could be happen if the probe is under delayed
1602 		 * unoptimizing.
1603 		 */
1604 		goto disarmed;
1605 	else {
1606 		/* If disabling probe has special handlers, update aggrprobe */
1607 		if (p->break_handler && !kprobe_gone(p))
1608 			ap->break_handler = NULL;
1609 		if (p->post_handler && !kprobe_gone(p)) {
1610 			list_for_each_entry_rcu(list_p, &ap->list, list) {
1611 				if ((list_p != p) && (list_p->post_handler))
1612 					goto noclean;
1613 			}
1614 			ap->post_handler = NULL;
1615 		}
1616 noclean:
1617 		/*
1618 		 * Remove from the aggrprobe: this path will do nothing in
1619 		 * __unregister_kprobe_bottom().
1620 		 */
1621 		list_del_rcu(&p->list);
1622 		if (!kprobe_disabled(ap) && !kprobes_all_disarmed)
1623 			/*
1624 			 * Try to optimize this probe again, because post
1625 			 * handler may have been changed.
1626 			 */
1627 			optimize_kprobe(ap);
1628 	}
1629 	return 0;
1630 
1631 disarmed:
1632 	BUG_ON(!kprobe_disarmed(ap));
1633 	hlist_del_rcu(&ap->hlist);
1634 	return 0;
1635 }
1636 
1637 static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
1638 {
1639 	struct kprobe *ap;
1640 
1641 	if (list_empty(&p->list))
1642 		/* This is an independent kprobe */
1643 		arch_remove_kprobe(p);
1644 	else if (list_is_singular(&p->list)) {
1645 		/* This is the last child of an aggrprobe */
1646 		ap = list_entry(p->list.next, struct kprobe, list);
1647 		list_del(&p->list);
1648 		free_aggr_kprobe(ap);
1649 	}
1650 	/* Otherwise, do nothing. */
1651 }
1652 
1653 int __kprobes register_kprobes(struct kprobe **kps, int num)
1654 {
1655 	int i, ret = 0;
1656 
1657 	if (num <= 0)
1658 		return -EINVAL;
1659 	for (i = 0; i < num; i++) {
1660 		ret = register_kprobe(kps[i]);
1661 		if (ret < 0) {
1662 			if (i > 0)
1663 				unregister_kprobes(kps, i);
1664 			break;
1665 		}
1666 	}
1667 	return ret;
1668 }
1669 EXPORT_SYMBOL_GPL(register_kprobes);
1670 
1671 void __kprobes unregister_kprobe(struct kprobe *p)
1672 {
1673 	unregister_kprobes(&p, 1);
1674 }
1675 EXPORT_SYMBOL_GPL(unregister_kprobe);
1676 
1677 void __kprobes unregister_kprobes(struct kprobe **kps, int num)
1678 {
1679 	int i;
1680 
1681 	if (num <= 0)
1682 		return;
1683 	mutex_lock(&kprobe_mutex);
1684 	for (i = 0; i < num; i++)
1685 		if (__unregister_kprobe_top(kps[i]) < 0)
1686 			kps[i]->addr = NULL;
1687 	mutex_unlock(&kprobe_mutex);
1688 
1689 	synchronize_sched();
1690 	for (i = 0; i < num; i++)
1691 		if (kps[i]->addr)
1692 			__unregister_kprobe_bottom(kps[i]);
1693 }
1694 EXPORT_SYMBOL_GPL(unregister_kprobes);
1695 
1696 static struct notifier_block kprobe_exceptions_nb = {
1697 	.notifier_call = kprobe_exceptions_notify,
1698 	.priority = 0x7fffffff /* we need to be notified first */
1699 };
1700 
1701 unsigned long __weak arch_deref_entry_point(void *entry)
1702 {
1703 	return (unsigned long)entry;
1704 }
1705 
1706 int __kprobes register_jprobes(struct jprobe **jps, int num)
1707 {
1708 	struct jprobe *jp;
1709 	int ret = 0, i;
1710 
1711 	if (num <= 0)
1712 		return -EINVAL;
1713 	for (i = 0; i < num; i++) {
1714 		unsigned long addr, offset;
1715 		jp = jps[i];
1716 		addr = arch_deref_entry_point(jp->entry);
1717 
1718 		/* Verify probepoint is a function entry point */
1719 		if (kallsyms_lookup_size_offset(addr, NULL, &offset) &&
1720 		    offset == 0) {
1721 			jp->kp.pre_handler = setjmp_pre_handler;
1722 			jp->kp.break_handler = longjmp_break_handler;
1723 			ret = register_kprobe(&jp->kp);
1724 		} else
1725 			ret = -EINVAL;
1726 
1727 		if (ret < 0) {
1728 			if (i > 0)
1729 				unregister_jprobes(jps, i);
1730 			break;
1731 		}
1732 	}
1733 	return ret;
1734 }
1735 EXPORT_SYMBOL_GPL(register_jprobes);
1736 
1737 int __kprobes register_jprobe(struct jprobe *jp)
1738 {
1739 	return register_jprobes(&jp, 1);
1740 }
1741 EXPORT_SYMBOL_GPL(register_jprobe);
1742 
1743 void __kprobes unregister_jprobe(struct jprobe *jp)
1744 {
1745 	unregister_jprobes(&jp, 1);
1746 }
1747 EXPORT_SYMBOL_GPL(unregister_jprobe);
1748 
1749 void __kprobes unregister_jprobes(struct jprobe **jps, int num)
1750 {
1751 	int i;
1752 
1753 	if (num <= 0)
1754 		return;
1755 	mutex_lock(&kprobe_mutex);
1756 	for (i = 0; i < num; i++)
1757 		if (__unregister_kprobe_top(&jps[i]->kp) < 0)
1758 			jps[i]->kp.addr = NULL;
1759 	mutex_unlock(&kprobe_mutex);
1760 
1761 	synchronize_sched();
1762 	for (i = 0; i < num; i++) {
1763 		if (jps[i]->kp.addr)
1764 			__unregister_kprobe_bottom(&jps[i]->kp);
1765 	}
1766 }
1767 EXPORT_SYMBOL_GPL(unregister_jprobes);
1768 
1769 #ifdef CONFIG_KRETPROBES
1770 /*
1771  * This kprobe pre_handler is registered with every kretprobe. When probe
1772  * hits it will set up the return probe.
1773  */
1774 static int __kprobes pre_handler_kretprobe(struct kprobe *p,
1775 					   struct pt_regs *regs)
1776 {
1777 	struct kretprobe *rp = container_of(p, struct kretprobe, kp);
1778 	unsigned long hash, flags = 0;
1779 	struct kretprobe_instance *ri;
1780 
1781 	/*TODO: consider to only swap the RA after the last pre_handler fired */
1782 	hash = hash_ptr(current, KPROBE_HASH_BITS);
1783 	raw_spin_lock_irqsave(&rp->lock, flags);
1784 	if (!hlist_empty(&rp->free_instances)) {
1785 		ri = hlist_entry(rp->free_instances.first,
1786 				struct kretprobe_instance, hlist);
1787 		hlist_del(&ri->hlist);
1788 		raw_spin_unlock_irqrestore(&rp->lock, flags);
1789 
1790 		ri->rp = rp;
1791 		ri->task = current;
1792 
1793 		if (rp->entry_handler && rp->entry_handler(ri, regs)) {
1794 			raw_spin_lock_irqsave(&rp->lock, flags);
1795 			hlist_add_head(&ri->hlist, &rp->free_instances);
1796 			raw_spin_unlock_irqrestore(&rp->lock, flags);
1797 			return 0;
1798 		}
1799 
1800 		arch_prepare_kretprobe(ri, regs);
1801 
1802 		/* XXX(hch): why is there no hlist_move_head? */
1803 		INIT_HLIST_NODE(&ri->hlist);
1804 		kretprobe_table_lock(hash, &flags);
1805 		hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
1806 		kretprobe_table_unlock(hash, &flags);
1807 	} else {
1808 		rp->nmissed++;
1809 		raw_spin_unlock_irqrestore(&rp->lock, flags);
1810 	}
1811 	return 0;
1812 }
1813 
1814 int __kprobes register_kretprobe(struct kretprobe *rp)
1815 {
1816 	int ret = 0;
1817 	struct kretprobe_instance *inst;
1818 	int i;
1819 	void *addr;
1820 
1821 	if (kretprobe_blacklist_size) {
1822 		addr = kprobe_addr(&rp->kp);
1823 		if (IS_ERR(addr))
1824 			return PTR_ERR(addr);
1825 
1826 		for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1827 			if (kretprobe_blacklist[i].addr == addr)
1828 				return -EINVAL;
1829 		}
1830 	}
1831 
1832 	rp->kp.pre_handler = pre_handler_kretprobe;
1833 	rp->kp.post_handler = NULL;
1834 	rp->kp.fault_handler = NULL;
1835 	rp->kp.break_handler = NULL;
1836 
1837 	/* Pre-allocate memory for max kretprobe instances */
1838 	if (rp->maxactive <= 0) {
1839 #ifdef CONFIG_PREEMPT
1840 		rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
1841 #else
1842 		rp->maxactive = num_possible_cpus();
1843 #endif
1844 	}
1845 	raw_spin_lock_init(&rp->lock);
1846 	INIT_HLIST_HEAD(&rp->free_instances);
1847 	for (i = 0; i < rp->maxactive; i++) {
1848 		inst = kmalloc(sizeof(struct kretprobe_instance) +
1849 			       rp->data_size, GFP_KERNEL);
1850 		if (inst == NULL) {
1851 			free_rp_inst(rp);
1852 			return -ENOMEM;
1853 		}
1854 		INIT_HLIST_NODE(&inst->hlist);
1855 		hlist_add_head(&inst->hlist, &rp->free_instances);
1856 	}
1857 
1858 	rp->nmissed = 0;
1859 	/* Establish function entry probe point */
1860 	ret = register_kprobe(&rp->kp);
1861 	if (ret != 0)
1862 		free_rp_inst(rp);
1863 	return ret;
1864 }
1865 EXPORT_SYMBOL_GPL(register_kretprobe);
1866 
1867 int __kprobes register_kretprobes(struct kretprobe **rps, int num)
1868 {
1869 	int ret = 0, i;
1870 
1871 	if (num <= 0)
1872 		return -EINVAL;
1873 	for (i = 0; i < num; i++) {
1874 		ret = register_kretprobe(rps[i]);
1875 		if (ret < 0) {
1876 			if (i > 0)
1877 				unregister_kretprobes(rps, i);
1878 			break;
1879 		}
1880 	}
1881 	return ret;
1882 }
1883 EXPORT_SYMBOL_GPL(register_kretprobes);
1884 
1885 void __kprobes unregister_kretprobe(struct kretprobe *rp)
1886 {
1887 	unregister_kretprobes(&rp, 1);
1888 }
1889 EXPORT_SYMBOL_GPL(unregister_kretprobe);
1890 
1891 void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1892 {
1893 	int i;
1894 
1895 	if (num <= 0)
1896 		return;
1897 	mutex_lock(&kprobe_mutex);
1898 	for (i = 0; i < num; i++)
1899 		if (__unregister_kprobe_top(&rps[i]->kp) < 0)
1900 			rps[i]->kp.addr = NULL;
1901 	mutex_unlock(&kprobe_mutex);
1902 
1903 	synchronize_sched();
1904 	for (i = 0; i < num; i++) {
1905 		if (rps[i]->kp.addr) {
1906 			__unregister_kprobe_bottom(&rps[i]->kp);
1907 			cleanup_rp_inst(rps[i]);
1908 		}
1909 	}
1910 }
1911 EXPORT_SYMBOL_GPL(unregister_kretprobes);
1912 
1913 #else /* CONFIG_KRETPROBES */
1914 int __kprobes register_kretprobe(struct kretprobe *rp)
1915 {
1916 	return -ENOSYS;
1917 }
1918 EXPORT_SYMBOL_GPL(register_kretprobe);
1919 
1920 int __kprobes register_kretprobes(struct kretprobe **rps, int num)
1921 {
1922 	return -ENOSYS;
1923 }
1924 EXPORT_SYMBOL_GPL(register_kretprobes);
1925 
1926 void __kprobes unregister_kretprobe(struct kretprobe *rp)
1927 {
1928 }
1929 EXPORT_SYMBOL_GPL(unregister_kretprobe);
1930 
1931 void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1932 {
1933 }
1934 EXPORT_SYMBOL_GPL(unregister_kretprobes);
1935 
1936 static int __kprobes pre_handler_kretprobe(struct kprobe *p,
1937 					   struct pt_regs *regs)
1938 {
1939 	return 0;
1940 }
1941 
1942 #endif /* CONFIG_KRETPROBES */
1943 
1944 /* Set the kprobe gone and remove its instruction buffer. */
1945 static void __kprobes kill_kprobe(struct kprobe *p)
1946 {
1947 	struct kprobe *kp;
1948 
1949 	p->flags |= KPROBE_FLAG_GONE;
1950 	if (kprobe_aggrprobe(p)) {
1951 		/*
1952 		 * If this is an aggr_kprobe, we have to list all the
1953 		 * chained probes and mark them GONE.
1954 		 */
1955 		list_for_each_entry_rcu(kp, &p->list, list)
1956 			kp->flags |= KPROBE_FLAG_GONE;
1957 		p->post_handler = NULL;
1958 		p->break_handler = NULL;
1959 		kill_optimized_kprobe(p);
1960 	}
1961 	/*
1962 	 * Here, we can remove insn_slot safely, because no thread calls
1963 	 * the original probed function (which will be freed soon) any more.
1964 	 */
1965 	arch_remove_kprobe(p);
1966 }
1967 
1968 /* Disable one kprobe */
1969 int __kprobes disable_kprobe(struct kprobe *kp)
1970 {
1971 	int ret = 0;
1972 
1973 	mutex_lock(&kprobe_mutex);
1974 
1975 	/* Disable this kprobe */
1976 	if (__disable_kprobe(kp) == NULL)
1977 		ret = -EINVAL;
1978 
1979 	mutex_unlock(&kprobe_mutex);
1980 	return ret;
1981 }
1982 EXPORT_SYMBOL_GPL(disable_kprobe);
1983 
1984 /* Enable one kprobe */
1985 int __kprobes enable_kprobe(struct kprobe *kp)
1986 {
1987 	int ret = 0;
1988 	struct kprobe *p;
1989 
1990 	mutex_lock(&kprobe_mutex);
1991 
1992 	/* Check whether specified probe is valid. */
1993 	p = __get_valid_kprobe(kp);
1994 	if (unlikely(p == NULL)) {
1995 		ret = -EINVAL;
1996 		goto out;
1997 	}
1998 
1999 	if (kprobe_gone(kp)) {
2000 		/* This kprobe has gone, we couldn't enable it. */
2001 		ret = -EINVAL;
2002 		goto out;
2003 	}
2004 
2005 	if (p != kp)
2006 		kp->flags &= ~KPROBE_FLAG_DISABLED;
2007 
2008 	if (!kprobes_all_disarmed && kprobe_disabled(p)) {
2009 		p->flags &= ~KPROBE_FLAG_DISABLED;
2010 		arm_kprobe(p);
2011 	}
2012 out:
2013 	mutex_unlock(&kprobe_mutex);
2014 	return ret;
2015 }
2016 EXPORT_SYMBOL_GPL(enable_kprobe);
2017 
2018 void __kprobes dump_kprobe(struct kprobe *kp)
2019 {
2020 	printk(KERN_WARNING "Dumping kprobe:\n");
2021 	printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
2022 	       kp->symbol_name, kp->addr, kp->offset);
2023 }
2024 
2025 /* Module notifier call back, checking kprobes on the module */
2026 static int __kprobes kprobes_module_callback(struct notifier_block *nb,
2027 					     unsigned long val, void *data)
2028 {
2029 	struct module *mod = data;
2030 	struct hlist_head *head;
2031 	struct hlist_node *node;
2032 	struct kprobe *p;
2033 	unsigned int i;
2034 	int checkcore = (val == MODULE_STATE_GOING);
2035 
2036 	if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
2037 		return NOTIFY_DONE;
2038 
2039 	/*
2040 	 * When MODULE_STATE_GOING was notified, both of module .text and
2041 	 * .init.text sections would be freed. When MODULE_STATE_LIVE was
2042 	 * notified, only .init.text section would be freed. We need to
2043 	 * disable kprobes which have been inserted in the sections.
2044 	 */
2045 	mutex_lock(&kprobe_mutex);
2046 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2047 		head = &kprobe_table[i];
2048 		hlist_for_each_entry_rcu(p, node, head, hlist)
2049 			if (within_module_init((unsigned long)p->addr, mod) ||
2050 			    (checkcore &&
2051 			     within_module_core((unsigned long)p->addr, mod))) {
2052 				/*
2053 				 * The vaddr this probe is installed will soon
2054 				 * be vfreed buy not synced to disk. Hence,
2055 				 * disarming the breakpoint isn't needed.
2056 				 */
2057 				kill_kprobe(p);
2058 			}
2059 	}
2060 	mutex_unlock(&kprobe_mutex);
2061 	return NOTIFY_DONE;
2062 }
2063 
2064 static struct notifier_block kprobe_module_nb = {
2065 	.notifier_call = kprobes_module_callback,
2066 	.priority = 0
2067 };
2068 
2069 static int __init init_kprobes(void)
2070 {
2071 	int i, err = 0;
2072 	unsigned long offset = 0, size = 0;
2073 	char *modname, namebuf[128];
2074 	const char *symbol_name;
2075 	void *addr;
2076 	struct kprobe_blackpoint *kb;
2077 
2078 	/* FIXME allocate the probe table, currently defined statically */
2079 	/* initialize all list heads */
2080 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2081 		INIT_HLIST_HEAD(&kprobe_table[i]);
2082 		INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
2083 		raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
2084 	}
2085 
2086 	/*
2087 	 * Lookup and populate the kprobe_blacklist.
2088 	 *
2089 	 * Unlike the kretprobe blacklist, we'll need to determine
2090 	 * the range of addresses that belong to the said functions,
2091 	 * since a kprobe need not necessarily be at the beginning
2092 	 * of a function.
2093 	 */
2094 	for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
2095 		kprobe_lookup_name(kb->name, addr);
2096 		if (!addr)
2097 			continue;
2098 
2099 		kb->start_addr = (unsigned long)addr;
2100 		symbol_name = kallsyms_lookup(kb->start_addr,
2101 				&size, &offset, &modname, namebuf);
2102 		if (!symbol_name)
2103 			kb->range = 0;
2104 		else
2105 			kb->range = size;
2106 	}
2107 
2108 	if (kretprobe_blacklist_size) {
2109 		/* lookup the function address from its name */
2110 		for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
2111 			kprobe_lookup_name(kretprobe_blacklist[i].name,
2112 					   kretprobe_blacklist[i].addr);
2113 			if (!kretprobe_blacklist[i].addr)
2114 				printk("kretprobe: lookup failed: %s\n",
2115 				       kretprobe_blacklist[i].name);
2116 		}
2117 	}
2118 
2119 #if defined(CONFIG_OPTPROBES)
2120 #if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
2121 	/* Init kprobe_optinsn_slots */
2122 	kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
2123 #endif
2124 	/* By default, kprobes can be optimized */
2125 	kprobes_allow_optimization = true;
2126 #endif
2127 
2128 	/* By default, kprobes are armed */
2129 	kprobes_all_disarmed = false;
2130 
2131 	err = arch_init_kprobes();
2132 	if (!err)
2133 		err = register_die_notifier(&kprobe_exceptions_nb);
2134 	if (!err)
2135 		err = register_module_notifier(&kprobe_module_nb);
2136 
2137 	kprobes_initialized = (err == 0);
2138 
2139 	if (!err)
2140 		init_test_probes();
2141 	return err;
2142 }
2143 
2144 #ifdef CONFIG_DEBUG_FS
2145 static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
2146 		const char *sym, int offset, char *modname, struct kprobe *pp)
2147 {
2148 	char *kprobe_type;
2149 
2150 	if (p->pre_handler == pre_handler_kretprobe)
2151 		kprobe_type = "r";
2152 	else if (p->pre_handler == setjmp_pre_handler)
2153 		kprobe_type = "j";
2154 	else
2155 		kprobe_type = "k";
2156 
2157 	if (sym)
2158 		seq_printf(pi, "%p  %s  %s+0x%x  %s ",
2159 			p->addr, kprobe_type, sym, offset,
2160 			(modname ? modname : " "));
2161 	else
2162 		seq_printf(pi, "%p  %s  %p ",
2163 			p->addr, kprobe_type, p->addr);
2164 
2165 	if (!pp)
2166 		pp = p;
2167 	seq_printf(pi, "%s%s%s%s\n",
2168 		(kprobe_gone(p) ? "[GONE]" : ""),
2169 		((kprobe_disabled(p) && !kprobe_gone(p)) ?  "[DISABLED]" : ""),
2170 		(kprobe_optimized(pp) ? "[OPTIMIZED]" : ""),
2171 		(kprobe_ftrace(pp) ? "[FTRACE]" : ""));
2172 }
2173 
2174 static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
2175 {
2176 	return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
2177 }
2178 
2179 static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
2180 {
2181 	(*pos)++;
2182 	if (*pos >= KPROBE_TABLE_SIZE)
2183 		return NULL;
2184 	return pos;
2185 }
2186 
2187 static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
2188 {
2189 	/* Nothing to do */
2190 }
2191 
2192 static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
2193 {
2194 	struct hlist_head *head;
2195 	struct hlist_node *node;
2196 	struct kprobe *p, *kp;
2197 	const char *sym = NULL;
2198 	unsigned int i = *(loff_t *) v;
2199 	unsigned long offset = 0;
2200 	char *modname, namebuf[128];
2201 
2202 	head = &kprobe_table[i];
2203 	preempt_disable();
2204 	hlist_for_each_entry_rcu(p, node, head, hlist) {
2205 		sym = kallsyms_lookup((unsigned long)p->addr, NULL,
2206 					&offset, &modname, namebuf);
2207 		if (kprobe_aggrprobe(p)) {
2208 			list_for_each_entry_rcu(kp, &p->list, list)
2209 				report_probe(pi, kp, sym, offset, modname, p);
2210 		} else
2211 			report_probe(pi, p, sym, offset, modname, NULL);
2212 	}
2213 	preempt_enable();
2214 	return 0;
2215 }
2216 
2217 static const struct seq_operations kprobes_seq_ops = {
2218 	.start = kprobe_seq_start,
2219 	.next  = kprobe_seq_next,
2220 	.stop  = kprobe_seq_stop,
2221 	.show  = show_kprobe_addr
2222 };
2223 
2224 static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
2225 {
2226 	return seq_open(filp, &kprobes_seq_ops);
2227 }
2228 
2229 static const struct file_operations debugfs_kprobes_operations = {
2230 	.open           = kprobes_open,
2231 	.read           = seq_read,
2232 	.llseek         = seq_lseek,
2233 	.release        = seq_release,
2234 };
2235 
2236 static void __kprobes arm_all_kprobes(void)
2237 {
2238 	struct hlist_head *head;
2239 	struct hlist_node *node;
2240 	struct kprobe *p;
2241 	unsigned int i;
2242 
2243 	mutex_lock(&kprobe_mutex);
2244 
2245 	/* If kprobes are armed, just return */
2246 	if (!kprobes_all_disarmed)
2247 		goto already_enabled;
2248 
2249 	/* Arming kprobes doesn't optimize kprobe itself */
2250 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2251 		head = &kprobe_table[i];
2252 		hlist_for_each_entry_rcu(p, node, head, hlist)
2253 			if (!kprobe_disabled(p))
2254 				arm_kprobe(p);
2255 	}
2256 
2257 	kprobes_all_disarmed = false;
2258 	printk(KERN_INFO "Kprobes globally enabled\n");
2259 
2260 already_enabled:
2261 	mutex_unlock(&kprobe_mutex);
2262 	return;
2263 }
2264 
2265 static void __kprobes disarm_all_kprobes(void)
2266 {
2267 	struct hlist_head *head;
2268 	struct hlist_node *node;
2269 	struct kprobe *p;
2270 	unsigned int i;
2271 
2272 	mutex_lock(&kprobe_mutex);
2273 
2274 	/* If kprobes are already disarmed, just return */
2275 	if (kprobes_all_disarmed) {
2276 		mutex_unlock(&kprobe_mutex);
2277 		return;
2278 	}
2279 
2280 	kprobes_all_disarmed = true;
2281 	printk(KERN_INFO "Kprobes globally disabled\n");
2282 
2283 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2284 		head = &kprobe_table[i];
2285 		hlist_for_each_entry_rcu(p, node, head, hlist) {
2286 			if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
2287 				disarm_kprobe(p, false);
2288 		}
2289 	}
2290 	mutex_unlock(&kprobe_mutex);
2291 
2292 	/* Wait for disarming all kprobes by optimizer */
2293 	wait_for_kprobe_optimizer();
2294 }
2295 
2296 /*
2297  * XXX: The debugfs bool file interface doesn't allow for callbacks
2298  * when the bool state is switched. We can reuse that facility when
2299  * available
2300  */
2301 static ssize_t read_enabled_file_bool(struct file *file,
2302 	       char __user *user_buf, size_t count, loff_t *ppos)
2303 {
2304 	char buf[3];
2305 
2306 	if (!kprobes_all_disarmed)
2307 		buf[0] = '1';
2308 	else
2309 		buf[0] = '0';
2310 	buf[1] = '\n';
2311 	buf[2] = 0x00;
2312 	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
2313 }
2314 
2315 static ssize_t write_enabled_file_bool(struct file *file,
2316 	       const char __user *user_buf, size_t count, loff_t *ppos)
2317 {
2318 	char buf[32];
2319 	size_t buf_size;
2320 
2321 	buf_size = min(count, (sizeof(buf)-1));
2322 	if (copy_from_user(buf, user_buf, buf_size))
2323 		return -EFAULT;
2324 
2325 	switch (buf[0]) {
2326 	case 'y':
2327 	case 'Y':
2328 	case '1':
2329 		arm_all_kprobes();
2330 		break;
2331 	case 'n':
2332 	case 'N':
2333 	case '0':
2334 		disarm_all_kprobes();
2335 		break;
2336 	}
2337 
2338 	return count;
2339 }
2340 
2341 static const struct file_operations fops_kp = {
2342 	.read =         read_enabled_file_bool,
2343 	.write =        write_enabled_file_bool,
2344 	.llseek =	default_llseek,
2345 };
2346 
2347 static int __kprobes debugfs_kprobe_init(void)
2348 {
2349 	struct dentry *dir, *file;
2350 	unsigned int value = 1;
2351 
2352 	dir = debugfs_create_dir("kprobes", NULL);
2353 	if (!dir)
2354 		return -ENOMEM;
2355 
2356 	file = debugfs_create_file("list", 0444, dir, NULL,
2357 				&debugfs_kprobes_operations);
2358 	if (!file) {
2359 		debugfs_remove(dir);
2360 		return -ENOMEM;
2361 	}
2362 
2363 	file = debugfs_create_file("enabled", 0600, dir,
2364 					&value, &fops_kp);
2365 	if (!file) {
2366 		debugfs_remove(dir);
2367 		return -ENOMEM;
2368 	}
2369 
2370 	return 0;
2371 }
2372 
2373 late_initcall(debugfs_kprobe_init);
2374 #endif /* CONFIG_DEBUG_FS */
2375 
2376 module_init(init_kprobes);
2377 
2378 /* defined in arch/.../kernel/kprobes.c */
2379 EXPORT_SYMBOL_GPL(jprobe_return);
2380