xref: /openbmc/linux/kernel/kprobes.c (revision 68198dca)
1 /*
2  *  Kernel Probes (KProbes)
3  *  kernel/kprobes.c
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18  *
19  * Copyright (C) IBM Corporation, 2002, 2004
20  *
21  * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22  *		Probes initial implementation (includes suggestions from
23  *		Rusty Russell).
24  * 2004-Aug	Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25  *		hlists and exceptions notifier as suggested by Andi Kleen.
26  * 2004-July	Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27  *		interface to access function arguments.
28  * 2004-Sep	Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29  *		exceptions notifier to be first on the priority list.
30  * 2005-May	Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31  *		<jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32  *		<prasanna@in.ibm.com> added function-return probes.
33  */
34 #include <linux/kprobes.h>
35 #include <linux/hash.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/stddef.h>
39 #include <linux/export.h>
40 #include <linux/moduleloader.h>
41 #include <linux/kallsyms.h>
42 #include <linux/freezer.h>
43 #include <linux/seq_file.h>
44 #include <linux/debugfs.h>
45 #include <linux/sysctl.h>
46 #include <linux/kdebug.h>
47 #include <linux/memory.h>
48 #include <linux/ftrace.h>
49 #include <linux/cpu.h>
50 #include <linux/jump_label.h>
51 
52 #include <asm/sections.h>
53 #include <asm/cacheflush.h>
54 #include <asm/errno.h>
55 #include <linux/uaccess.h>
56 
57 #define KPROBE_HASH_BITS 6
58 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
59 
60 
61 static int kprobes_initialized;
62 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
63 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
64 
65 /* NOTE: change this value only with kprobe_mutex held */
66 static bool kprobes_all_disarmed;
67 
68 /* This protects kprobe_table and optimizing_list */
69 static DEFINE_MUTEX(kprobe_mutex);
70 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
71 static struct {
72 	raw_spinlock_t lock ____cacheline_aligned_in_smp;
73 } kretprobe_table_locks[KPROBE_TABLE_SIZE];
74 
75 kprobe_opcode_t * __weak kprobe_lookup_name(const char *name,
76 					unsigned int __unused)
77 {
78 	return ((kprobe_opcode_t *)(kallsyms_lookup_name(name)));
79 }
80 
81 static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
82 {
83 	return &(kretprobe_table_locks[hash].lock);
84 }
85 
86 /* Blacklist -- list of struct kprobe_blacklist_entry */
87 static LIST_HEAD(kprobe_blacklist);
88 
89 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
90 /*
91  * kprobe->ainsn.insn points to the copy of the instruction to be
92  * single-stepped. x86_64, POWER4 and above have no-exec support and
93  * stepping on the instruction on a vmalloced/kmalloced/data page
94  * is a recipe for disaster
95  */
96 struct kprobe_insn_page {
97 	struct list_head list;
98 	kprobe_opcode_t *insns;		/* Page of instruction slots */
99 	struct kprobe_insn_cache *cache;
100 	int nused;
101 	int ngarbage;
102 	char slot_used[];
103 };
104 
105 #define KPROBE_INSN_PAGE_SIZE(slots)			\
106 	(offsetof(struct kprobe_insn_page, slot_used) +	\
107 	 (sizeof(char) * (slots)))
108 
109 static int slots_per_page(struct kprobe_insn_cache *c)
110 {
111 	return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
112 }
113 
114 enum kprobe_slot_state {
115 	SLOT_CLEAN = 0,
116 	SLOT_DIRTY = 1,
117 	SLOT_USED = 2,
118 };
119 
120 void __weak *alloc_insn_page(void)
121 {
122 	return module_alloc(PAGE_SIZE);
123 }
124 
125 void __weak free_insn_page(void *page)
126 {
127 	module_memfree(page);
128 }
129 
130 struct kprobe_insn_cache kprobe_insn_slots = {
131 	.mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex),
132 	.alloc = alloc_insn_page,
133 	.free = free_insn_page,
134 	.pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
135 	.insn_size = MAX_INSN_SIZE,
136 	.nr_garbage = 0,
137 };
138 static int collect_garbage_slots(struct kprobe_insn_cache *c);
139 
140 /**
141  * __get_insn_slot() - Find a slot on an executable page for an instruction.
142  * We allocate an executable page if there's no room on existing ones.
143  */
144 kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
145 {
146 	struct kprobe_insn_page *kip;
147 	kprobe_opcode_t *slot = NULL;
148 
149 	/* Since the slot array is not protected by rcu, we need a mutex */
150 	mutex_lock(&c->mutex);
151  retry:
152 	rcu_read_lock();
153 	list_for_each_entry_rcu(kip, &c->pages, list) {
154 		if (kip->nused < slots_per_page(c)) {
155 			int i;
156 			for (i = 0; i < slots_per_page(c); i++) {
157 				if (kip->slot_used[i] == SLOT_CLEAN) {
158 					kip->slot_used[i] = SLOT_USED;
159 					kip->nused++;
160 					slot = kip->insns + (i * c->insn_size);
161 					rcu_read_unlock();
162 					goto out;
163 				}
164 			}
165 			/* kip->nused is broken. Fix it. */
166 			kip->nused = slots_per_page(c);
167 			WARN_ON(1);
168 		}
169 	}
170 	rcu_read_unlock();
171 
172 	/* If there are any garbage slots, collect it and try again. */
173 	if (c->nr_garbage && collect_garbage_slots(c) == 0)
174 		goto retry;
175 
176 	/* All out of space.  Need to allocate a new page. */
177 	kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
178 	if (!kip)
179 		goto out;
180 
181 	/*
182 	 * Use module_alloc so this page is within +/- 2GB of where the
183 	 * kernel image and loaded module images reside. This is required
184 	 * so x86_64 can correctly handle the %rip-relative fixups.
185 	 */
186 	kip->insns = c->alloc();
187 	if (!kip->insns) {
188 		kfree(kip);
189 		goto out;
190 	}
191 	INIT_LIST_HEAD(&kip->list);
192 	memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
193 	kip->slot_used[0] = SLOT_USED;
194 	kip->nused = 1;
195 	kip->ngarbage = 0;
196 	kip->cache = c;
197 	list_add_rcu(&kip->list, &c->pages);
198 	slot = kip->insns;
199 out:
200 	mutex_unlock(&c->mutex);
201 	return slot;
202 }
203 
204 /* Return 1 if all garbages are collected, otherwise 0. */
205 static int collect_one_slot(struct kprobe_insn_page *kip, int idx)
206 {
207 	kip->slot_used[idx] = SLOT_CLEAN;
208 	kip->nused--;
209 	if (kip->nused == 0) {
210 		/*
211 		 * Page is no longer in use.  Free it unless
212 		 * it's the last one.  We keep the last one
213 		 * so as not to have to set it up again the
214 		 * next time somebody inserts a probe.
215 		 */
216 		if (!list_is_singular(&kip->list)) {
217 			list_del_rcu(&kip->list);
218 			synchronize_rcu();
219 			kip->cache->free(kip->insns);
220 			kfree(kip);
221 		}
222 		return 1;
223 	}
224 	return 0;
225 }
226 
227 static int collect_garbage_slots(struct kprobe_insn_cache *c)
228 {
229 	struct kprobe_insn_page *kip, *next;
230 
231 	/* Ensure no-one is interrupted on the garbages */
232 	synchronize_sched();
233 
234 	list_for_each_entry_safe(kip, next, &c->pages, list) {
235 		int i;
236 		if (kip->ngarbage == 0)
237 			continue;
238 		kip->ngarbage = 0;	/* we will collect all garbages */
239 		for (i = 0; i < slots_per_page(c); i++) {
240 			if (kip->slot_used[i] == SLOT_DIRTY && collect_one_slot(kip, i))
241 				break;
242 		}
243 	}
244 	c->nr_garbage = 0;
245 	return 0;
246 }
247 
248 void __free_insn_slot(struct kprobe_insn_cache *c,
249 		      kprobe_opcode_t *slot, int dirty)
250 {
251 	struct kprobe_insn_page *kip;
252 	long idx;
253 
254 	mutex_lock(&c->mutex);
255 	rcu_read_lock();
256 	list_for_each_entry_rcu(kip, &c->pages, list) {
257 		idx = ((long)slot - (long)kip->insns) /
258 			(c->insn_size * sizeof(kprobe_opcode_t));
259 		if (idx >= 0 && idx < slots_per_page(c))
260 			goto out;
261 	}
262 	/* Could not find this slot. */
263 	WARN_ON(1);
264 	kip = NULL;
265 out:
266 	rcu_read_unlock();
267 	/* Mark and sweep: this may sleep */
268 	if (kip) {
269 		/* Check double free */
270 		WARN_ON(kip->slot_used[idx] != SLOT_USED);
271 		if (dirty) {
272 			kip->slot_used[idx] = SLOT_DIRTY;
273 			kip->ngarbage++;
274 			if (++c->nr_garbage > slots_per_page(c))
275 				collect_garbage_slots(c);
276 		} else {
277 			collect_one_slot(kip, idx);
278 		}
279 	}
280 	mutex_unlock(&c->mutex);
281 }
282 
283 /*
284  * Check given address is on the page of kprobe instruction slots.
285  * This will be used for checking whether the address on a stack
286  * is on a text area or not.
287  */
288 bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr)
289 {
290 	struct kprobe_insn_page *kip;
291 	bool ret = false;
292 
293 	rcu_read_lock();
294 	list_for_each_entry_rcu(kip, &c->pages, list) {
295 		if (addr >= (unsigned long)kip->insns &&
296 		    addr < (unsigned long)kip->insns + PAGE_SIZE) {
297 			ret = true;
298 			break;
299 		}
300 	}
301 	rcu_read_unlock();
302 
303 	return ret;
304 }
305 
306 #ifdef CONFIG_OPTPROBES
307 /* For optimized_kprobe buffer */
308 struct kprobe_insn_cache kprobe_optinsn_slots = {
309 	.mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex),
310 	.alloc = alloc_insn_page,
311 	.free = free_insn_page,
312 	.pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
313 	/* .insn_size is initialized later */
314 	.nr_garbage = 0,
315 };
316 #endif
317 #endif
318 
319 /* We have preemption disabled.. so it is safe to use __ versions */
320 static inline void set_kprobe_instance(struct kprobe *kp)
321 {
322 	__this_cpu_write(kprobe_instance, kp);
323 }
324 
325 static inline void reset_kprobe_instance(void)
326 {
327 	__this_cpu_write(kprobe_instance, NULL);
328 }
329 
330 /*
331  * This routine is called either:
332  * 	- under the kprobe_mutex - during kprobe_[un]register()
333  * 				OR
334  * 	- with preemption disabled - from arch/xxx/kernel/kprobes.c
335  */
336 struct kprobe *get_kprobe(void *addr)
337 {
338 	struct hlist_head *head;
339 	struct kprobe *p;
340 
341 	head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
342 	hlist_for_each_entry_rcu(p, head, hlist) {
343 		if (p->addr == addr)
344 			return p;
345 	}
346 
347 	return NULL;
348 }
349 NOKPROBE_SYMBOL(get_kprobe);
350 
351 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
352 
353 /* Return true if the kprobe is an aggregator */
354 static inline int kprobe_aggrprobe(struct kprobe *p)
355 {
356 	return p->pre_handler == aggr_pre_handler;
357 }
358 
359 /* Return true(!0) if the kprobe is unused */
360 static inline int kprobe_unused(struct kprobe *p)
361 {
362 	return kprobe_aggrprobe(p) && kprobe_disabled(p) &&
363 	       list_empty(&p->list);
364 }
365 
366 /*
367  * Keep all fields in the kprobe consistent
368  */
369 static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p)
370 {
371 	memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t));
372 	memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn));
373 }
374 
375 #ifdef CONFIG_OPTPROBES
376 /* NOTE: change this value only with kprobe_mutex held */
377 static bool kprobes_allow_optimization;
378 
379 /*
380  * Call all pre_handler on the list, but ignores its return value.
381  * This must be called from arch-dep optimized caller.
382  */
383 void opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
384 {
385 	struct kprobe *kp;
386 
387 	list_for_each_entry_rcu(kp, &p->list, list) {
388 		if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
389 			set_kprobe_instance(kp);
390 			kp->pre_handler(kp, regs);
391 		}
392 		reset_kprobe_instance();
393 	}
394 }
395 NOKPROBE_SYMBOL(opt_pre_handler);
396 
397 /* Free optimized instructions and optimized_kprobe */
398 static void free_aggr_kprobe(struct kprobe *p)
399 {
400 	struct optimized_kprobe *op;
401 
402 	op = container_of(p, struct optimized_kprobe, kp);
403 	arch_remove_optimized_kprobe(op);
404 	arch_remove_kprobe(p);
405 	kfree(op);
406 }
407 
408 /* Return true(!0) if the kprobe is ready for optimization. */
409 static inline int kprobe_optready(struct kprobe *p)
410 {
411 	struct optimized_kprobe *op;
412 
413 	if (kprobe_aggrprobe(p)) {
414 		op = container_of(p, struct optimized_kprobe, kp);
415 		return arch_prepared_optinsn(&op->optinsn);
416 	}
417 
418 	return 0;
419 }
420 
421 /* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */
422 static inline int kprobe_disarmed(struct kprobe *p)
423 {
424 	struct optimized_kprobe *op;
425 
426 	/* If kprobe is not aggr/opt probe, just return kprobe is disabled */
427 	if (!kprobe_aggrprobe(p))
428 		return kprobe_disabled(p);
429 
430 	op = container_of(p, struct optimized_kprobe, kp);
431 
432 	return kprobe_disabled(p) && list_empty(&op->list);
433 }
434 
435 /* Return true(!0) if the probe is queued on (un)optimizing lists */
436 static int kprobe_queued(struct kprobe *p)
437 {
438 	struct optimized_kprobe *op;
439 
440 	if (kprobe_aggrprobe(p)) {
441 		op = container_of(p, struct optimized_kprobe, kp);
442 		if (!list_empty(&op->list))
443 			return 1;
444 	}
445 	return 0;
446 }
447 
448 /*
449  * Return an optimized kprobe whose optimizing code replaces
450  * instructions including addr (exclude breakpoint).
451  */
452 static struct kprobe *get_optimized_kprobe(unsigned long addr)
453 {
454 	int i;
455 	struct kprobe *p = NULL;
456 	struct optimized_kprobe *op;
457 
458 	/* Don't check i == 0, since that is a breakpoint case. */
459 	for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++)
460 		p = get_kprobe((void *)(addr - i));
461 
462 	if (p && kprobe_optready(p)) {
463 		op = container_of(p, struct optimized_kprobe, kp);
464 		if (arch_within_optimized_kprobe(op, addr))
465 			return p;
466 	}
467 
468 	return NULL;
469 }
470 
471 /* Optimization staging list, protected by kprobe_mutex */
472 static LIST_HEAD(optimizing_list);
473 static LIST_HEAD(unoptimizing_list);
474 static LIST_HEAD(freeing_list);
475 
476 static void kprobe_optimizer(struct work_struct *work);
477 static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
478 #define OPTIMIZE_DELAY 5
479 
480 /*
481  * Optimize (replace a breakpoint with a jump) kprobes listed on
482  * optimizing_list.
483  */
484 static void do_optimize_kprobes(void)
485 {
486 	/*
487 	 * The optimization/unoptimization refers online_cpus via
488 	 * stop_machine() and cpu-hotplug modifies online_cpus.
489 	 * And same time, text_mutex will be held in cpu-hotplug and here.
490 	 * This combination can cause a deadlock (cpu-hotplug try to lock
491 	 * text_mutex but stop_machine can not be done because online_cpus
492 	 * has been changed)
493 	 * To avoid this deadlock, caller must have locked cpu hotplug
494 	 * for preventing cpu-hotplug outside of text_mutex locking.
495 	 */
496 	lockdep_assert_cpus_held();
497 
498 	/* Optimization never be done when disarmed */
499 	if (kprobes_all_disarmed || !kprobes_allow_optimization ||
500 	    list_empty(&optimizing_list))
501 		return;
502 
503 	mutex_lock(&text_mutex);
504 	arch_optimize_kprobes(&optimizing_list);
505 	mutex_unlock(&text_mutex);
506 }
507 
508 /*
509  * Unoptimize (replace a jump with a breakpoint and remove the breakpoint
510  * if need) kprobes listed on unoptimizing_list.
511  */
512 static void do_unoptimize_kprobes(void)
513 {
514 	struct optimized_kprobe *op, *tmp;
515 
516 	/* See comment in do_optimize_kprobes() */
517 	lockdep_assert_cpus_held();
518 
519 	/* Unoptimization must be done anytime */
520 	if (list_empty(&unoptimizing_list))
521 		return;
522 
523 	mutex_lock(&text_mutex);
524 	arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
525 	/* Loop free_list for disarming */
526 	list_for_each_entry_safe(op, tmp, &freeing_list, list) {
527 		/* Disarm probes if marked disabled */
528 		if (kprobe_disabled(&op->kp))
529 			arch_disarm_kprobe(&op->kp);
530 		if (kprobe_unused(&op->kp)) {
531 			/*
532 			 * Remove unused probes from hash list. After waiting
533 			 * for synchronization, these probes are reclaimed.
534 			 * (reclaiming is done by do_free_cleaned_kprobes.)
535 			 */
536 			hlist_del_rcu(&op->kp.hlist);
537 		} else
538 			list_del_init(&op->list);
539 	}
540 	mutex_unlock(&text_mutex);
541 }
542 
543 /* Reclaim all kprobes on the free_list */
544 static void do_free_cleaned_kprobes(void)
545 {
546 	struct optimized_kprobe *op, *tmp;
547 
548 	list_for_each_entry_safe(op, tmp, &freeing_list, list) {
549 		BUG_ON(!kprobe_unused(&op->kp));
550 		list_del_init(&op->list);
551 		free_aggr_kprobe(&op->kp);
552 	}
553 }
554 
555 /* Start optimizer after OPTIMIZE_DELAY passed */
556 static void kick_kprobe_optimizer(void)
557 {
558 	schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
559 }
560 
561 /* Kprobe jump optimizer */
562 static void kprobe_optimizer(struct work_struct *work)
563 {
564 	mutex_lock(&kprobe_mutex);
565 	cpus_read_lock();
566 	/* Lock modules while optimizing kprobes */
567 	mutex_lock(&module_mutex);
568 
569 	/*
570 	 * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
571 	 * kprobes before waiting for quiesence period.
572 	 */
573 	do_unoptimize_kprobes();
574 
575 	/*
576 	 * Step 2: Wait for quiesence period to ensure all potentially
577 	 * preempted tasks to have normally scheduled. Because optprobe
578 	 * may modify multiple instructions, there is a chance that Nth
579 	 * instruction is preempted. In that case, such tasks can return
580 	 * to 2nd-Nth byte of jump instruction. This wait is for avoiding it.
581 	 * Note that on non-preemptive kernel, this is transparently converted
582 	 * to synchronoze_sched() to wait for all interrupts to have completed.
583 	 */
584 	synchronize_rcu_tasks();
585 
586 	/* Step 3: Optimize kprobes after quiesence period */
587 	do_optimize_kprobes();
588 
589 	/* Step 4: Free cleaned kprobes after quiesence period */
590 	do_free_cleaned_kprobes();
591 
592 	mutex_unlock(&module_mutex);
593 	cpus_read_unlock();
594 	mutex_unlock(&kprobe_mutex);
595 
596 	/* Step 5: Kick optimizer again if needed */
597 	if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
598 		kick_kprobe_optimizer();
599 }
600 
601 /* Wait for completing optimization and unoptimization */
602 void wait_for_kprobe_optimizer(void)
603 {
604 	mutex_lock(&kprobe_mutex);
605 
606 	while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
607 		mutex_unlock(&kprobe_mutex);
608 
609 		/* this will also make optimizing_work execute immmediately */
610 		flush_delayed_work(&optimizing_work);
611 		/* @optimizing_work might not have been queued yet, relax */
612 		cpu_relax();
613 
614 		mutex_lock(&kprobe_mutex);
615 	}
616 
617 	mutex_unlock(&kprobe_mutex);
618 }
619 
620 /* Optimize kprobe if p is ready to be optimized */
621 static void optimize_kprobe(struct kprobe *p)
622 {
623 	struct optimized_kprobe *op;
624 
625 	/* Check if the kprobe is disabled or not ready for optimization. */
626 	if (!kprobe_optready(p) || !kprobes_allow_optimization ||
627 	    (kprobe_disabled(p) || kprobes_all_disarmed))
628 		return;
629 
630 	/* Both of break_handler and post_handler are not supported. */
631 	if (p->break_handler || p->post_handler)
632 		return;
633 
634 	op = container_of(p, struct optimized_kprobe, kp);
635 
636 	/* Check there is no other kprobes at the optimized instructions */
637 	if (arch_check_optimized_kprobe(op) < 0)
638 		return;
639 
640 	/* Check if it is already optimized. */
641 	if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
642 		return;
643 	op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
644 
645 	if (!list_empty(&op->list))
646 		/* This is under unoptimizing. Just dequeue the probe */
647 		list_del_init(&op->list);
648 	else {
649 		list_add(&op->list, &optimizing_list);
650 		kick_kprobe_optimizer();
651 	}
652 }
653 
654 /* Short cut to direct unoptimizing */
655 static void force_unoptimize_kprobe(struct optimized_kprobe *op)
656 {
657 	lockdep_assert_cpus_held();
658 	arch_unoptimize_kprobe(op);
659 	if (kprobe_disabled(&op->kp))
660 		arch_disarm_kprobe(&op->kp);
661 }
662 
663 /* Unoptimize a kprobe if p is optimized */
664 static void unoptimize_kprobe(struct kprobe *p, bool force)
665 {
666 	struct optimized_kprobe *op;
667 
668 	if (!kprobe_aggrprobe(p) || kprobe_disarmed(p))
669 		return; /* This is not an optprobe nor optimized */
670 
671 	op = container_of(p, struct optimized_kprobe, kp);
672 	if (!kprobe_optimized(p)) {
673 		/* Unoptimized or unoptimizing case */
674 		if (force && !list_empty(&op->list)) {
675 			/*
676 			 * Only if this is unoptimizing kprobe and forced,
677 			 * forcibly unoptimize it. (No need to unoptimize
678 			 * unoptimized kprobe again :)
679 			 */
680 			list_del_init(&op->list);
681 			force_unoptimize_kprobe(op);
682 		}
683 		return;
684 	}
685 
686 	op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
687 	if (!list_empty(&op->list)) {
688 		/* Dequeue from the optimization queue */
689 		list_del_init(&op->list);
690 		return;
691 	}
692 	/* Optimized kprobe case */
693 	if (force)
694 		/* Forcibly update the code: this is a special case */
695 		force_unoptimize_kprobe(op);
696 	else {
697 		list_add(&op->list, &unoptimizing_list);
698 		kick_kprobe_optimizer();
699 	}
700 }
701 
702 /* Cancel unoptimizing for reusing */
703 static void reuse_unused_kprobe(struct kprobe *ap)
704 {
705 	struct optimized_kprobe *op;
706 
707 	BUG_ON(!kprobe_unused(ap));
708 	/*
709 	 * Unused kprobe MUST be on the way of delayed unoptimizing (means
710 	 * there is still a relative jump) and disabled.
711 	 */
712 	op = container_of(ap, struct optimized_kprobe, kp);
713 	if (unlikely(list_empty(&op->list)))
714 		printk(KERN_WARNING "Warning: found a stray unused "
715 			"aggrprobe@%p\n", ap->addr);
716 	/* Enable the probe again */
717 	ap->flags &= ~KPROBE_FLAG_DISABLED;
718 	/* Optimize it again (remove from op->list) */
719 	BUG_ON(!kprobe_optready(ap));
720 	optimize_kprobe(ap);
721 }
722 
723 /* Remove optimized instructions */
724 static void kill_optimized_kprobe(struct kprobe *p)
725 {
726 	struct optimized_kprobe *op;
727 
728 	op = container_of(p, struct optimized_kprobe, kp);
729 	if (!list_empty(&op->list))
730 		/* Dequeue from the (un)optimization queue */
731 		list_del_init(&op->list);
732 	op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
733 
734 	if (kprobe_unused(p)) {
735 		/* Enqueue if it is unused */
736 		list_add(&op->list, &freeing_list);
737 		/*
738 		 * Remove unused probes from the hash list. After waiting
739 		 * for synchronization, this probe is reclaimed.
740 		 * (reclaiming is done by do_free_cleaned_kprobes().)
741 		 */
742 		hlist_del_rcu(&op->kp.hlist);
743 	}
744 
745 	/* Don't touch the code, because it is already freed. */
746 	arch_remove_optimized_kprobe(op);
747 }
748 
749 static inline
750 void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
751 {
752 	if (!kprobe_ftrace(p))
753 		arch_prepare_optimized_kprobe(op, p);
754 }
755 
756 /* Try to prepare optimized instructions */
757 static void prepare_optimized_kprobe(struct kprobe *p)
758 {
759 	struct optimized_kprobe *op;
760 
761 	op = container_of(p, struct optimized_kprobe, kp);
762 	__prepare_optimized_kprobe(op, p);
763 }
764 
765 /* Allocate new optimized_kprobe and try to prepare optimized instructions */
766 static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
767 {
768 	struct optimized_kprobe *op;
769 
770 	op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
771 	if (!op)
772 		return NULL;
773 
774 	INIT_LIST_HEAD(&op->list);
775 	op->kp.addr = p->addr;
776 	__prepare_optimized_kprobe(op, p);
777 
778 	return &op->kp;
779 }
780 
781 static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
782 
783 /*
784  * Prepare an optimized_kprobe and optimize it
785  * NOTE: p must be a normal registered kprobe
786  */
787 static void try_to_optimize_kprobe(struct kprobe *p)
788 {
789 	struct kprobe *ap;
790 	struct optimized_kprobe *op;
791 
792 	/* Impossible to optimize ftrace-based kprobe */
793 	if (kprobe_ftrace(p))
794 		return;
795 
796 	/* For preparing optimization, jump_label_text_reserved() is called */
797 	cpus_read_lock();
798 	jump_label_lock();
799 	mutex_lock(&text_mutex);
800 
801 	ap = alloc_aggr_kprobe(p);
802 	if (!ap)
803 		goto out;
804 
805 	op = container_of(ap, struct optimized_kprobe, kp);
806 	if (!arch_prepared_optinsn(&op->optinsn)) {
807 		/* If failed to setup optimizing, fallback to kprobe */
808 		arch_remove_optimized_kprobe(op);
809 		kfree(op);
810 		goto out;
811 	}
812 
813 	init_aggr_kprobe(ap, p);
814 	optimize_kprobe(ap);	/* This just kicks optimizer thread */
815 
816 out:
817 	mutex_unlock(&text_mutex);
818 	jump_label_unlock();
819 	cpus_read_unlock();
820 }
821 
822 #ifdef CONFIG_SYSCTL
823 static void optimize_all_kprobes(void)
824 {
825 	struct hlist_head *head;
826 	struct kprobe *p;
827 	unsigned int i;
828 
829 	mutex_lock(&kprobe_mutex);
830 	/* If optimization is already allowed, just return */
831 	if (kprobes_allow_optimization)
832 		goto out;
833 
834 	cpus_read_lock();
835 	kprobes_allow_optimization = true;
836 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
837 		head = &kprobe_table[i];
838 		hlist_for_each_entry_rcu(p, head, hlist)
839 			if (!kprobe_disabled(p))
840 				optimize_kprobe(p);
841 	}
842 	cpus_read_unlock();
843 	printk(KERN_INFO "Kprobes globally optimized\n");
844 out:
845 	mutex_unlock(&kprobe_mutex);
846 }
847 
848 static void unoptimize_all_kprobes(void)
849 {
850 	struct hlist_head *head;
851 	struct kprobe *p;
852 	unsigned int i;
853 
854 	mutex_lock(&kprobe_mutex);
855 	/* If optimization is already prohibited, just return */
856 	if (!kprobes_allow_optimization) {
857 		mutex_unlock(&kprobe_mutex);
858 		return;
859 	}
860 
861 	cpus_read_lock();
862 	kprobes_allow_optimization = false;
863 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
864 		head = &kprobe_table[i];
865 		hlist_for_each_entry_rcu(p, head, hlist) {
866 			if (!kprobe_disabled(p))
867 				unoptimize_kprobe(p, false);
868 		}
869 	}
870 	cpus_read_unlock();
871 	mutex_unlock(&kprobe_mutex);
872 
873 	/* Wait for unoptimizing completion */
874 	wait_for_kprobe_optimizer();
875 	printk(KERN_INFO "Kprobes globally unoptimized\n");
876 }
877 
878 static DEFINE_MUTEX(kprobe_sysctl_mutex);
879 int sysctl_kprobes_optimization;
880 int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
881 				      void __user *buffer, size_t *length,
882 				      loff_t *ppos)
883 {
884 	int ret;
885 
886 	mutex_lock(&kprobe_sysctl_mutex);
887 	sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
888 	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
889 
890 	if (sysctl_kprobes_optimization)
891 		optimize_all_kprobes();
892 	else
893 		unoptimize_all_kprobes();
894 	mutex_unlock(&kprobe_sysctl_mutex);
895 
896 	return ret;
897 }
898 #endif /* CONFIG_SYSCTL */
899 
900 /* Put a breakpoint for a probe. Must be called with text_mutex locked */
901 static void __arm_kprobe(struct kprobe *p)
902 {
903 	struct kprobe *_p;
904 
905 	/* Check collision with other optimized kprobes */
906 	_p = get_optimized_kprobe((unsigned long)p->addr);
907 	if (unlikely(_p))
908 		/* Fallback to unoptimized kprobe */
909 		unoptimize_kprobe(_p, true);
910 
911 	arch_arm_kprobe(p);
912 	optimize_kprobe(p);	/* Try to optimize (add kprobe to a list) */
913 }
914 
915 /* Remove the breakpoint of a probe. Must be called with text_mutex locked */
916 static void __disarm_kprobe(struct kprobe *p, bool reopt)
917 {
918 	struct kprobe *_p;
919 
920 	/* Try to unoptimize */
921 	unoptimize_kprobe(p, kprobes_all_disarmed);
922 
923 	if (!kprobe_queued(p)) {
924 		arch_disarm_kprobe(p);
925 		/* If another kprobe was blocked, optimize it. */
926 		_p = get_optimized_kprobe((unsigned long)p->addr);
927 		if (unlikely(_p) && reopt)
928 			optimize_kprobe(_p);
929 	}
930 	/* TODO: reoptimize others after unoptimized this probe */
931 }
932 
933 #else /* !CONFIG_OPTPROBES */
934 
935 #define optimize_kprobe(p)			do {} while (0)
936 #define unoptimize_kprobe(p, f)			do {} while (0)
937 #define kill_optimized_kprobe(p)		do {} while (0)
938 #define prepare_optimized_kprobe(p)		do {} while (0)
939 #define try_to_optimize_kprobe(p)		do {} while (0)
940 #define __arm_kprobe(p)				arch_arm_kprobe(p)
941 #define __disarm_kprobe(p, o)			arch_disarm_kprobe(p)
942 #define kprobe_disarmed(p)			kprobe_disabled(p)
943 #define wait_for_kprobe_optimizer()		do {} while (0)
944 
945 /* There should be no unused kprobes can be reused without optimization */
946 static void reuse_unused_kprobe(struct kprobe *ap)
947 {
948 	printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
949 	BUG_ON(kprobe_unused(ap));
950 }
951 
952 static void free_aggr_kprobe(struct kprobe *p)
953 {
954 	arch_remove_kprobe(p);
955 	kfree(p);
956 }
957 
958 static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
959 {
960 	return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
961 }
962 #endif /* CONFIG_OPTPROBES */
963 
964 #ifdef CONFIG_KPROBES_ON_FTRACE
965 static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
966 	.func = kprobe_ftrace_handler,
967 	.flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY,
968 };
969 static int kprobe_ftrace_enabled;
970 
971 /* Must ensure p->addr is really on ftrace */
972 static int prepare_kprobe(struct kprobe *p)
973 {
974 	if (!kprobe_ftrace(p))
975 		return arch_prepare_kprobe(p);
976 
977 	return arch_prepare_kprobe_ftrace(p);
978 }
979 
980 /* Caller must lock kprobe_mutex */
981 static void arm_kprobe_ftrace(struct kprobe *p)
982 {
983 	int ret;
984 
985 	ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
986 				   (unsigned long)p->addr, 0, 0);
987 	WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret);
988 	kprobe_ftrace_enabled++;
989 	if (kprobe_ftrace_enabled == 1) {
990 		ret = register_ftrace_function(&kprobe_ftrace_ops);
991 		WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
992 	}
993 }
994 
995 /* Caller must lock kprobe_mutex */
996 static void disarm_kprobe_ftrace(struct kprobe *p)
997 {
998 	int ret;
999 
1000 	kprobe_ftrace_enabled--;
1001 	if (kprobe_ftrace_enabled == 0) {
1002 		ret = unregister_ftrace_function(&kprobe_ftrace_ops);
1003 		WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
1004 	}
1005 	ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
1006 			   (unsigned long)p->addr, 1, 0);
1007 	WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret);
1008 }
1009 #else	/* !CONFIG_KPROBES_ON_FTRACE */
1010 #define prepare_kprobe(p)	arch_prepare_kprobe(p)
1011 #define arm_kprobe_ftrace(p)	do {} while (0)
1012 #define disarm_kprobe_ftrace(p)	do {} while (0)
1013 #endif
1014 
1015 /* Arm a kprobe with text_mutex */
1016 static void arm_kprobe(struct kprobe *kp)
1017 {
1018 	if (unlikely(kprobe_ftrace(kp))) {
1019 		arm_kprobe_ftrace(kp);
1020 		return;
1021 	}
1022 	cpus_read_lock();
1023 	mutex_lock(&text_mutex);
1024 	__arm_kprobe(kp);
1025 	mutex_unlock(&text_mutex);
1026 	cpus_read_unlock();
1027 }
1028 
1029 /* Disarm a kprobe with text_mutex */
1030 static void disarm_kprobe(struct kprobe *kp, bool reopt)
1031 {
1032 	if (unlikely(kprobe_ftrace(kp))) {
1033 		disarm_kprobe_ftrace(kp);
1034 		return;
1035 	}
1036 
1037 	cpus_read_lock();
1038 	mutex_lock(&text_mutex);
1039 	__disarm_kprobe(kp, reopt);
1040 	mutex_unlock(&text_mutex);
1041 	cpus_read_unlock();
1042 }
1043 
1044 /*
1045  * Aggregate handlers for multiple kprobes support - these handlers
1046  * take care of invoking the individual kprobe handlers on p->list
1047  */
1048 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
1049 {
1050 	struct kprobe *kp;
1051 
1052 	list_for_each_entry_rcu(kp, &p->list, list) {
1053 		if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
1054 			set_kprobe_instance(kp);
1055 			if (kp->pre_handler(kp, regs))
1056 				return 1;
1057 		}
1058 		reset_kprobe_instance();
1059 	}
1060 	return 0;
1061 }
1062 NOKPROBE_SYMBOL(aggr_pre_handler);
1063 
1064 static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
1065 			      unsigned long flags)
1066 {
1067 	struct kprobe *kp;
1068 
1069 	list_for_each_entry_rcu(kp, &p->list, list) {
1070 		if (kp->post_handler && likely(!kprobe_disabled(kp))) {
1071 			set_kprobe_instance(kp);
1072 			kp->post_handler(kp, regs, flags);
1073 			reset_kprobe_instance();
1074 		}
1075 	}
1076 }
1077 NOKPROBE_SYMBOL(aggr_post_handler);
1078 
1079 static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
1080 			      int trapnr)
1081 {
1082 	struct kprobe *cur = __this_cpu_read(kprobe_instance);
1083 
1084 	/*
1085 	 * if we faulted "during" the execution of a user specified
1086 	 * probe handler, invoke just that probe's fault handler
1087 	 */
1088 	if (cur && cur->fault_handler) {
1089 		if (cur->fault_handler(cur, regs, trapnr))
1090 			return 1;
1091 	}
1092 	return 0;
1093 }
1094 NOKPROBE_SYMBOL(aggr_fault_handler);
1095 
1096 static int aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
1097 {
1098 	struct kprobe *cur = __this_cpu_read(kprobe_instance);
1099 	int ret = 0;
1100 
1101 	if (cur && cur->break_handler) {
1102 		if (cur->break_handler(cur, regs))
1103 			ret = 1;
1104 	}
1105 	reset_kprobe_instance();
1106 	return ret;
1107 }
1108 NOKPROBE_SYMBOL(aggr_break_handler);
1109 
1110 /* Walks the list and increments nmissed count for multiprobe case */
1111 void kprobes_inc_nmissed_count(struct kprobe *p)
1112 {
1113 	struct kprobe *kp;
1114 	if (!kprobe_aggrprobe(p)) {
1115 		p->nmissed++;
1116 	} else {
1117 		list_for_each_entry_rcu(kp, &p->list, list)
1118 			kp->nmissed++;
1119 	}
1120 	return;
1121 }
1122 NOKPROBE_SYMBOL(kprobes_inc_nmissed_count);
1123 
1124 void recycle_rp_inst(struct kretprobe_instance *ri,
1125 		     struct hlist_head *head)
1126 {
1127 	struct kretprobe *rp = ri->rp;
1128 
1129 	/* remove rp inst off the rprobe_inst_table */
1130 	hlist_del(&ri->hlist);
1131 	INIT_HLIST_NODE(&ri->hlist);
1132 	if (likely(rp)) {
1133 		raw_spin_lock(&rp->lock);
1134 		hlist_add_head(&ri->hlist, &rp->free_instances);
1135 		raw_spin_unlock(&rp->lock);
1136 	} else
1137 		/* Unregistering */
1138 		hlist_add_head(&ri->hlist, head);
1139 }
1140 NOKPROBE_SYMBOL(recycle_rp_inst);
1141 
1142 void kretprobe_hash_lock(struct task_struct *tsk,
1143 			 struct hlist_head **head, unsigned long *flags)
1144 __acquires(hlist_lock)
1145 {
1146 	unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
1147 	raw_spinlock_t *hlist_lock;
1148 
1149 	*head = &kretprobe_inst_table[hash];
1150 	hlist_lock = kretprobe_table_lock_ptr(hash);
1151 	raw_spin_lock_irqsave(hlist_lock, *flags);
1152 }
1153 NOKPROBE_SYMBOL(kretprobe_hash_lock);
1154 
1155 static void kretprobe_table_lock(unsigned long hash,
1156 				 unsigned long *flags)
1157 __acquires(hlist_lock)
1158 {
1159 	raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1160 	raw_spin_lock_irqsave(hlist_lock, *flags);
1161 }
1162 NOKPROBE_SYMBOL(kretprobe_table_lock);
1163 
1164 void kretprobe_hash_unlock(struct task_struct *tsk,
1165 			   unsigned long *flags)
1166 __releases(hlist_lock)
1167 {
1168 	unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
1169 	raw_spinlock_t *hlist_lock;
1170 
1171 	hlist_lock = kretprobe_table_lock_ptr(hash);
1172 	raw_spin_unlock_irqrestore(hlist_lock, *flags);
1173 }
1174 NOKPROBE_SYMBOL(kretprobe_hash_unlock);
1175 
1176 static void kretprobe_table_unlock(unsigned long hash,
1177 				   unsigned long *flags)
1178 __releases(hlist_lock)
1179 {
1180 	raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1181 	raw_spin_unlock_irqrestore(hlist_lock, *flags);
1182 }
1183 NOKPROBE_SYMBOL(kretprobe_table_unlock);
1184 
1185 /*
1186  * This function is called from finish_task_switch when task tk becomes dead,
1187  * so that we can recycle any function-return probe instances associated
1188  * with this task. These left over instances represent probed functions
1189  * that have been called but will never return.
1190  */
1191 void kprobe_flush_task(struct task_struct *tk)
1192 {
1193 	struct kretprobe_instance *ri;
1194 	struct hlist_head *head, empty_rp;
1195 	struct hlist_node *tmp;
1196 	unsigned long hash, flags = 0;
1197 
1198 	if (unlikely(!kprobes_initialized))
1199 		/* Early boot.  kretprobe_table_locks not yet initialized. */
1200 		return;
1201 
1202 	INIT_HLIST_HEAD(&empty_rp);
1203 	hash = hash_ptr(tk, KPROBE_HASH_BITS);
1204 	head = &kretprobe_inst_table[hash];
1205 	kretprobe_table_lock(hash, &flags);
1206 	hlist_for_each_entry_safe(ri, tmp, head, hlist) {
1207 		if (ri->task == tk)
1208 			recycle_rp_inst(ri, &empty_rp);
1209 	}
1210 	kretprobe_table_unlock(hash, &flags);
1211 	hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
1212 		hlist_del(&ri->hlist);
1213 		kfree(ri);
1214 	}
1215 }
1216 NOKPROBE_SYMBOL(kprobe_flush_task);
1217 
1218 static inline void free_rp_inst(struct kretprobe *rp)
1219 {
1220 	struct kretprobe_instance *ri;
1221 	struct hlist_node *next;
1222 
1223 	hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) {
1224 		hlist_del(&ri->hlist);
1225 		kfree(ri);
1226 	}
1227 }
1228 
1229 static void cleanup_rp_inst(struct kretprobe *rp)
1230 {
1231 	unsigned long flags, hash;
1232 	struct kretprobe_instance *ri;
1233 	struct hlist_node *next;
1234 	struct hlist_head *head;
1235 
1236 	/* No race here */
1237 	for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
1238 		kretprobe_table_lock(hash, &flags);
1239 		head = &kretprobe_inst_table[hash];
1240 		hlist_for_each_entry_safe(ri, next, head, hlist) {
1241 			if (ri->rp == rp)
1242 				ri->rp = NULL;
1243 		}
1244 		kretprobe_table_unlock(hash, &flags);
1245 	}
1246 	free_rp_inst(rp);
1247 }
1248 NOKPROBE_SYMBOL(cleanup_rp_inst);
1249 
1250 /*
1251 * Add the new probe to ap->list. Fail if this is the
1252 * second jprobe at the address - two jprobes can't coexist
1253 */
1254 static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
1255 {
1256 	BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
1257 
1258 	if (p->break_handler || p->post_handler)
1259 		unoptimize_kprobe(ap, true);	/* Fall back to normal kprobe */
1260 
1261 	if (p->break_handler) {
1262 		if (ap->break_handler)
1263 			return -EEXIST;
1264 		list_add_tail_rcu(&p->list, &ap->list);
1265 		ap->break_handler = aggr_break_handler;
1266 	} else
1267 		list_add_rcu(&p->list, &ap->list);
1268 	if (p->post_handler && !ap->post_handler)
1269 		ap->post_handler = aggr_post_handler;
1270 
1271 	return 0;
1272 }
1273 
1274 /*
1275  * Fill in the required fields of the "manager kprobe". Replace the
1276  * earlier kprobe in the hlist with the manager kprobe
1277  */
1278 static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
1279 {
1280 	/* Copy p's insn slot to ap */
1281 	copy_kprobe(p, ap);
1282 	flush_insn_slot(ap);
1283 	ap->addr = p->addr;
1284 	ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
1285 	ap->pre_handler = aggr_pre_handler;
1286 	ap->fault_handler = aggr_fault_handler;
1287 	/* We don't care the kprobe which has gone. */
1288 	if (p->post_handler && !kprobe_gone(p))
1289 		ap->post_handler = aggr_post_handler;
1290 	if (p->break_handler && !kprobe_gone(p))
1291 		ap->break_handler = aggr_break_handler;
1292 
1293 	INIT_LIST_HEAD(&ap->list);
1294 	INIT_HLIST_NODE(&ap->hlist);
1295 
1296 	list_add_rcu(&p->list, &ap->list);
1297 	hlist_replace_rcu(&p->hlist, &ap->hlist);
1298 }
1299 
1300 /*
1301  * This is the second or subsequent kprobe at the address - handle
1302  * the intricacies
1303  */
1304 static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
1305 {
1306 	int ret = 0;
1307 	struct kprobe *ap = orig_p;
1308 
1309 	cpus_read_lock();
1310 
1311 	/* For preparing optimization, jump_label_text_reserved() is called */
1312 	jump_label_lock();
1313 	mutex_lock(&text_mutex);
1314 
1315 	if (!kprobe_aggrprobe(orig_p)) {
1316 		/* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */
1317 		ap = alloc_aggr_kprobe(orig_p);
1318 		if (!ap) {
1319 			ret = -ENOMEM;
1320 			goto out;
1321 		}
1322 		init_aggr_kprobe(ap, orig_p);
1323 	} else if (kprobe_unused(ap))
1324 		/* This probe is going to die. Rescue it */
1325 		reuse_unused_kprobe(ap);
1326 
1327 	if (kprobe_gone(ap)) {
1328 		/*
1329 		 * Attempting to insert new probe at the same location that
1330 		 * had a probe in the module vaddr area which already
1331 		 * freed. So, the instruction slot has already been
1332 		 * released. We need a new slot for the new probe.
1333 		 */
1334 		ret = arch_prepare_kprobe(ap);
1335 		if (ret)
1336 			/*
1337 			 * Even if fail to allocate new slot, don't need to
1338 			 * free aggr_probe. It will be used next time, or
1339 			 * freed by unregister_kprobe.
1340 			 */
1341 			goto out;
1342 
1343 		/* Prepare optimized instructions if possible. */
1344 		prepare_optimized_kprobe(ap);
1345 
1346 		/*
1347 		 * Clear gone flag to prevent allocating new slot again, and
1348 		 * set disabled flag because it is not armed yet.
1349 		 */
1350 		ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
1351 			    | KPROBE_FLAG_DISABLED;
1352 	}
1353 
1354 	/* Copy ap's insn slot to p */
1355 	copy_kprobe(ap, p);
1356 	ret = add_new_kprobe(ap, p);
1357 
1358 out:
1359 	mutex_unlock(&text_mutex);
1360 	jump_label_unlock();
1361 	cpus_read_unlock();
1362 
1363 	if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
1364 		ap->flags &= ~KPROBE_FLAG_DISABLED;
1365 		if (!kprobes_all_disarmed)
1366 			/* Arm the breakpoint again. */
1367 			arm_kprobe(ap);
1368 	}
1369 	return ret;
1370 }
1371 
1372 bool __weak arch_within_kprobe_blacklist(unsigned long addr)
1373 {
1374 	/* The __kprobes marked functions and entry code must not be probed */
1375 	return addr >= (unsigned long)__kprobes_text_start &&
1376 	       addr < (unsigned long)__kprobes_text_end;
1377 }
1378 
1379 bool within_kprobe_blacklist(unsigned long addr)
1380 {
1381 	struct kprobe_blacklist_entry *ent;
1382 
1383 	if (arch_within_kprobe_blacklist(addr))
1384 		return true;
1385 	/*
1386 	 * If there exists a kprobe_blacklist, verify and
1387 	 * fail any probe registration in the prohibited area
1388 	 */
1389 	list_for_each_entry(ent, &kprobe_blacklist, list) {
1390 		if (addr >= ent->start_addr && addr < ent->end_addr)
1391 			return true;
1392 	}
1393 
1394 	return false;
1395 }
1396 
1397 /*
1398  * If we have a symbol_name argument, look it up and add the offset field
1399  * to it. This way, we can specify a relative address to a symbol.
1400  * This returns encoded errors if it fails to look up symbol or invalid
1401  * combination of parameters.
1402  */
1403 static kprobe_opcode_t *_kprobe_addr(kprobe_opcode_t *addr,
1404 			const char *symbol_name, unsigned int offset)
1405 {
1406 	if ((symbol_name && addr) || (!symbol_name && !addr))
1407 		goto invalid;
1408 
1409 	if (symbol_name) {
1410 		addr = kprobe_lookup_name(symbol_name, offset);
1411 		if (!addr)
1412 			return ERR_PTR(-ENOENT);
1413 	}
1414 
1415 	addr = (kprobe_opcode_t *)(((char *)addr) + offset);
1416 	if (addr)
1417 		return addr;
1418 
1419 invalid:
1420 	return ERR_PTR(-EINVAL);
1421 }
1422 
1423 static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
1424 {
1425 	return _kprobe_addr(p->addr, p->symbol_name, p->offset);
1426 }
1427 
1428 /* Check passed kprobe is valid and return kprobe in kprobe_table. */
1429 static struct kprobe *__get_valid_kprobe(struct kprobe *p)
1430 {
1431 	struct kprobe *ap, *list_p;
1432 
1433 	ap = get_kprobe(p->addr);
1434 	if (unlikely(!ap))
1435 		return NULL;
1436 
1437 	if (p != ap) {
1438 		list_for_each_entry_rcu(list_p, &ap->list, list)
1439 			if (list_p == p)
1440 			/* kprobe p is a valid probe */
1441 				goto valid;
1442 		return NULL;
1443 	}
1444 valid:
1445 	return ap;
1446 }
1447 
1448 /* Return error if the kprobe is being re-registered */
1449 static inline int check_kprobe_rereg(struct kprobe *p)
1450 {
1451 	int ret = 0;
1452 
1453 	mutex_lock(&kprobe_mutex);
1454 	if (__get_valid_kprobe(p))
1455 		ret = -EINVAL;
1456 	mutex_unlock(&kprobe_mutex);
1457 
1458 	return ret;
1459 }
1460 
1461 int __weak arch_check_ftrace_location(struct kprobe *p)
1462 {
1463 	unsigned long ftrace_addr;
1464 
1465 	ftrace_addr = ftrace_location((unsigned long)p->addr);
1466 	if (ftrace_addr) {
1467 #ifdef CONFIG_KPROBES_ON_FTRACE
1468 		/* Given address is not on the instruction boundary */
1469 		if ((unsigned long)p->addr != ftrace_addr)
1470 			return -EILSEQ;
1471 		p->flags |= KPROBE_FLAG_FTRACE;
1472 #else	/* !CONFIG_KPROBES_ON_FTRACE */
1473 		return -EINVAL;
1474 #endif
1475 	}
1476 	return 0;
1477 }
1478 
1479 static int check_kprobe_address_safe(struct kprobe *p,
1480 				     struct module **probed_mod)
1481 {
1482 	int ret;
1483 
1484 	ret = arch_check_ftrace_location(p);
1485 	if (ret)
1486 		return ret;
1487 	jump_label_lock();
1488 	preempt_disable();
1489 
1490 	/* Ensure it is not in reserved area nor out of text */
1491 	if (!kernel_text_address((unsigned long) p->addr) ||
1492 	    within_kprobe_blacklist((unsigned long) p->addr) ||
1493 	    jump_label_text_reserved(p->addr, p->addr)) {
1494 		ret = -EINVAL;
1495 		goto out;
1496 	}
1497 
1498 	/* Check if are we probing a module */
1499 	*probed_mod = __module_text_address((unsigned long) p->addr);
1500 	if (*probed_mod) {
1501 		/*
1502 		 * We must hold a refcount of the probed module while updating
1503 		 * its code to prohibit unexpected unloading.
1504 		 */
1505 		if (unlikely(!try_module_get(*probed_mod))) {
1506 			ret = -ENOENT;
1507 			goto out;
1508 		}
1509 
1510 		/*
1511 		 * If the module freed .init.text, we couldn't insert
1512 		 * kprobes in there.
1513 		 */
1514 		if (within_module_init((unsigned long)p->addr, *probed_mod) &&
1515 		    (*probed_mod)->state != MODULE_STATE_COMING) {
1516 			module_put(*probed_mod);
1517 			*probed_mod = NULL;
1518 			ret = -ENOENT;
1519 		}
1520 	}
1521 out:
1522 	preempt_enable();
1523 	jump_label_unlock();
1524 
1525 	return ret;
1526 }
1527 
1528 int register_kprobe(struct kprobe *p)
1529 {
1530 	int ret;
1531 	struct kprobe *old_p;
1532 	struct module *probed_mod;
1533 	kprobe_opcode_t *addr;
1534 
1535 	/* Adjust probe address from symbol */
1536 	addr = kprobe_addr(p);
1537 	if (IS_ERR(addr))
1538 		return PTR_ERR(addr);
1539 	p->addr = addr;
1540 
1541 	ret = check_kprobe_rereg(p);
1542 	if (ret)
1543 		return ret;
1544 
1545 	/* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
1546 	p->flags &= KPROBE_FLAG_DISABLED;
1547 	p->nmissed = 0;
1548 	INIT_LIST_HEAD(&p->list);
1549 
1550 	ret = check_kprobe_address_safe(p, &probed_mod);
1551 	if (ret)
1552 		return ret;
1553 
1554 	mutex_lock(&kprobe_mutex);
1555 
1556 	old_p = get_kprobe(p->addr);
1557 	if (old_p) {
1558 		/* Since this may unoptimize old_p, locking text_mutex. */
1559 		ret = register_aggr_kprobe(old_p, p);
1560 		goto out;
1561 	}
1562 
1563 	cpus_read_lock();
1564 	/* Prevent text modification */
1565 	mutex_lock(&text_mutex);
1566 	ret = prepare_kprobe(p);
1567 	mutex_unlock(&text_mutex);
1568 	cpus_read_unlock();
1569 	if (ret)
1570 		goto out;
1571 
1572 	INIT_HLIST_NODE(&p->hlist);
1573 	hlist_add_head_rcu(&p->hlist,
1574 		       &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
1575 
1576 	if (!kprobes_all_disarmed && !kprobe_disabled(p))
1577 		arm_kprobe(p);
1578 
1579 	/* Try to optimize kprobe */
1580 	try_to_optimize_kprobe(p);
1581 out:
1582 	mutex_unlock(&kprobe_mutex);
1583 
1584 	if (probed_mod)
1585 		module_put(probed_mod);
1586 
1587 	return ret;
1588 }
1589 EXPORT_SYMBOL_GPL(register_kprobe);
1590 
1591 /* Check if all probes on the aggrprobe are disabled */
1592 static int aggr_kprobe_disabled(struct kprobe *ap)
1593 {
1594 	struct kprobe *kp;
1595 
1596 	list_for_each_entry_rcu(kp, &ap->list, list)
1597 		if (!kprobe_disabled(kp))
1598 			/*
1599 			 * There is an active probe on the list.
1600 			 * We can't disable this ap.
1601 			 */
1602 			return 0;
1603 
1604 	return 1;
1605 }
1606 
1607 /* Disable one kprobe: Make sure called under kprobe_mutex is locked */
1608 static struct kprobe *__disable_kprobe(struct kprobe *p)
1609 {
1610 	struct kprobe *orig_p;
1611 
1612 	/* Get an original kprobe for return */
1613 	orig_p = __get_valid_kprobe(p);
1614 	if (unlikely(orig_p == NULL))
1615 		return NULL;
1616 
1617 	if (!kprobe_disabled(p)) {
1618 		/* Disable probe if it is a child probe */
1619 		if (p != orig_p)
1620 			p->flags |= KPROBE_FLAG_DISABLED;
1621 
1622 		/* Try to disarm and disable this/parent probe */
1623 		if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
1624 			/*
1625 			 * If kprobes_all_disarmed is set, orig_p
1626 			 * should have already been disarmed, so
1627 			 * skip unneed disarming process.
1628 			 */
1629 			if (!kprobes_all_disarmed)
1630 				disarm_kprobe(orig_p, true);
1631 			orig_p->flags |= KPROBE_FLAG_DISABLED;
1632 		}
1633 	}
1634 
1635 	return orig_p;
1636 }
1637 
1638 /*
1639  * Unregister a kprobe without a scheduler synchronization.
1640  */
1641 static int __unregister_kprobe_top(struct kprobe *p)
1642 {
1643 	struct kprobe *ap, *list_p;
1644 
1645 	/* Disable kprobe. This will disarm it if needed. */
1646 	ap = __disable_kprobe(p);
1647 	if (ap == NULL)
1648 		return -EINVAL;
1649 
1650 	if (ap == p)
1651 		/*
1652 		 * This probe is an independent(and non-optimized) kprobe
1653 		 * (not an aggrprobe). Remove from the hash list.
1654 		 */
1655 		goto disarmed;
1656 
1657 	/* Following process expects this probe is an aggrprobe */
1658 	WARN_ON(!kprobe_aggrprobe(ap));
1659 
1660 	if (list_is_singular(&ap->list) && kprobe_disarmed(ap))
1661 		/*
1662 		 * !disarmed could be happen if the probe is under delayed
1663 		 * unoptimizing.
1664 		 */
1665 		goto disarmed;
1666 	else {
1667 		/* If disabling probe has special handlers, update aggrprobe */
1668 		if (p->break_handler && !kprobe_gone(p))
1669 			ap->break_handler = NULL;
1670 		if (p->post_handler && !kprobe_gone(p)) {
1671 			list_for_each_entry_rcu(list_p, &ap->list, list) {
1672 				if ((list_p != p) && (list_p->post_handler))
1673 					goto noclean;
1674 			}
1675 			ap->post_handler = NULL;
1676 		}
1677 noclean:
1678 		/*
1679 		 * Remove from the aggrprobe: this path will do nothing in
1680 		 * __unregister_kprobe_bottom().
1681 		 */
1682 		list_del_rcu(&p->list);
1683 		if (!kprobe_disabled(ap) && !kprobes_all_disarmed)
1684 			/*
1685 			 * Try to optimize this probe again, because post
1686 			 * handler may have been changed.
1687 			 */
1688 			optimize_kprobe(ap);
1689 	}
1690 	return 0;
1691 
1692 disarmed:
1693 	BUG_ON(!kprobe_disarmed(ap));
1694 	hlist_del_rcu(&ap->hlist);
1695 	return 0;
1696 }
1697 
1698 static void __unregister_kprobe_bottom(struct kprobe *p)
1699 {
1700 	struct kprobe *ap;
1701 
1702 	if (list_empty(&p->list))
1703 		/* This is an independent kprobe */
1704 		arch_remove_kprobe(p);
1705 	else if (list_is_singular(&p->list)) {
1706 		/* This is the last child of an aggrprobe */
1707 		ap = list_entry(p->list.next, struct kprobe, list);
1708 		list_del(&p->list);
1709 		free_aggr_kprobe(ap);
1710 	}
1711 	/* Otherwise, do nothing. */
1712 }
1713 
1714 int register_kprobes(struct kprobe **kps, int num)
1715 {
1716 	int i, ret = 0;
1717 
1718 	if (num <= 0)
1719 		return -EINVAL;
1720 	for (i = 0; i < num; i++) {
1721 		ret = register_kprobe(kps[i]);
1722 		if (ret < 0) {
1723 			if (i > 0)
1724 				unregister_kprobes(kps, i);
1725 			break;
1726 		}
1727 	}
1728 	return ret;
1729 }
1730 EXPORT_SYMBOL_GPL(register_kprobes);
1731 
1732 void unregister_kprobe(struct kprobe *p)
1733 {
1734 	unregister_kprobes(&p, 1);
1735 }
1736 EXPORT_SYMBOL_GPL(unregister_kprobe);
1737 
1738 void unregister_kprobes(struct kprobe **kps, int num)
1739 {
1740 	int i;
1741 
1742 	if (num <= 0)
1743 		return;
1744 	mutex_lock(&kprobe_mutex);
1745 	for (i = 0; i < num; i++)
1746 		if (__unregister_kprobe_top(kps[i]) < 0)
1747 			kps[i]->addr = NULL;
1748 	mutex_unlock(&kprobe_mutex);
1749 
1750 	synchronize_sched();
1751 	for (i = 0; i < num; i++)
1752 		if (kps[i]->addr)
1753 			__unregister_kprobe_bottom(kps[i]);
1754 }
1755 EXPORT_SYMBOL_GPL(unregister_kprobes);
1756 
1757 int __weak kprobe_exceptions_notify(struct notifier_block *self,
1758 					unsigned long val, void *data)
1759 {
1760 	return NOTIFY_DONE;
1761 }
1762 NOKPROBE_SYMBOL(kprobe_exceptions_notify);
1763 
1764 static struct notifier_block kprobe_exceptions_nb = {
1765 	.notifier_call = kprobe_exceptions_notify,
1766 	.priority = 0x7fffffff /* we need to be notified first */
1767 };
1768 
1769 unsigned long __weak arch_deref_entry_point(void *entry)
1770 {
1771 	return (unsigned long)entry;
1772 }
1773 
1774 #if 0
1775 int register_jprobes(struct jprobe **jps, int num)
1776 {
1777 	int ret = 0, i;
1778 
1779 	if (num <= 0)
1780 		return -EINVAL;
1781 
1782 	for (i = 0; i < num; i++) {
1783 		ret = register_jprobe(jps[i]);
1784 
1785 		if (ret < 0) {
1786 			if (i > 0)
1787 				unregister_jprobes(jps, i);
1788 			break;
1789 		}
1790 	}
1791 
1792 	return ret;
1793 }
1794 EXPORT_SYMBOL_GPL(register_jprobes);
1795 
1796 int register_jprobe(struct jprobe *jp)
1797 {
1798 	unsigned long addr, offset;
1799 	struct kprobe *kp = &jp->kp;
1800 
1801 	/*
1802 	 * Verify probepoint as well as the jprobe handler are
1803 	 * valid function entry points.
1804 	 */
1805 	addr = arch_deref_entry_point(jp->entry);
1806 
1807 	if (kallsyms_lookup_size_offset(addr, NULL, &offset) && offset == 0 &&
1808 	    kprobe_on_func_entry(kp->addr, kp->symbol_name, kp->offset)) {
1809 		kp->pre_handler = setjmp_pre_handler;
1810 		kp->break_handler = longjmp_break_handler;
1811 		return register_kprobe(kp);
1812 	}
1813 
1814 	return -EINVAL;
1815 }
1816 EXPORT_SYMBOL_GPL(register_jprobe);
1817 
1818 void unregister_jprobe(struct jprobe *jp)
1819 {
1820 	unregister_jprobes(&jp, 1);
1821 }
1822 EXPORT_SYMBOL_GPL(unregister_jprobe);
1823 
1824 void unregister_jprobes(struct jprobe **jps, int num)
1825 {
1826 	int i;
1827 
1828 	if (num <= 0)
1829 		return;
1830 	mutex_lock(&kprobe_mutex);
1831 	for (i = 0; i < num; i++)
1832 		if (__unregister_kprobe_top(&jps[i]->kp) < 0)
1833 			jps[i]->kp.addr = NULL;
1834 	mutex_unlock(&kprobe_mutex);
1835 
1836 	synchronize_sched();
1837 	for (i = 0; i < num; i++) {
1838 		if (jps[i]->kp.addr)
1839 			__unregister_kprobe_bottom(&jps[i]->kp);
1840 	}
1841 }
1842 EXPORT_SYMBOL_GPL(unregister_jprobes);
1843 #endif
1844 
1845 #ifdef CONFIG_KRETPROBES
1846 /*
1847  * This kprobe pre_handler is registered with every kretprobe. When probe
1848  * hits it will set up the return probe.
1849  */
1850 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
1851 {
1852 	struct kretprobe *rp = container_of(p, struct kretprobe, kp);
1853 	unsigned long hash, flags = 0;
1854 	struct kretprobe_instance *ri;
1855 
1856 	/*
1857 	 * To avoid deadlocks, prohibit return probing in NMI contexts,
1858 	 * just skip the probe and increase the (inexact) 'nmissed'
1859 	 * statistical counter, so that the user is informed that
1860 	 * something happened:
1861 	 */
1862 	if (unlikely(in_nmi())) {
1863 		rp->nmissed++;
1864 		return 0;
1865 	}
1866 
1867 	/* TODO: consider to only swap the RA after the last pre_handler fired */
1868 	hash = hash_ptr(current, KPROBE_HASH_BITS);
1869 	raw_spin_lock_irqsave(&rp->lock, flags);
1870 	if (!hlist_empty(&rp->free_instances)) {
1871 		ri = hlist_entry(rp->free_instances.first,
1872 				struct kretprobe_instance, hlist);
1873 		hlist_del(&ri->hlist);
1874 		raw_spin_unlock_irqrestore(&rp->lock, flags);
1875 
1876 		ri->rp = rp;
1877 		ri->task = current;
1878 
1879 		if (rp->entry_handler && rp->entry_handler(ri, regs)) {
1880 			raw_spin_lock_irqsave(&rp->lock, flags);
1881 			hlist_add_head(&ri->hlist, &rp->free_instances);
1882 			raw_spin_unlock_irqrestore(&rp->lock, flags);
1883 			return 0;
1884 		}
1885 
1886 		arch_prepare_kretprobe(ri, regs);
1887 
1888 		/* XXX(hch): why is there no hlist_move_head? */
1889 		INIT_HLIST_NODE(&ri->hlist);
1890 		kretprobe_table_lock(hash, &flags);
1891 		hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
1892 		kretprobe_table_unlock(hash, &flags);
1893 	} else {
1894 		rp->nmissed++;
1895 		raw_spin_unlock_irqrestore(&rp->lock, flags);
1896 	}
1897 	return 0;
1898 }
1899 NOKPROBE_SYMBOL(pre_handler_kretprobe);
1900 
1901 bool __weak arch_kprobe_on_func_entry(unsigned long offset)
1902 {
1903 	return !offset;
1904 }
1905 
1906 bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
1907 {
1908 	kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset);
1909 
1910 	if (IS_ERR(kp_addr))
1911 		return false;
1912 
1913 	if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset) ||
1914 						!arch_kprobe_on_func_entry(offset))
1915 		return false;
1916 
1917 	return true;
1918 }
1919 
1920 int register_kretprobe(struct kretprobe *rp)
1921 {
1922 	int ret = 0;
1923 	struct kretprobe_instance *inst;
1924 	int i;
1925 	void *addr;
1926 
1927 	if (!kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset))
1928 		return -EINVAL;
1929 
1930 	if (kretprobe_blacklist_size) {
1931 		addr = kprobe_addr(&rp->kp);
1932 		if (IS_ERR(addr))
1933 			return PTR_ERR(addr);
1934 
1935 		for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1936 			if (kretprobe_blacklist[i].addr == addr)
1937 				return -EINVAL;
1938 		}
1939 	}
1940 
1941 	rp->kp.pre_handler = pre_handler_kretprobe;
1942 	rp->kp.post_handler = NULL;
1943 	rp->kp.fault_handler = NULL;
1944 	rp->kp.break_handler = NULL;
1945 
1946 	/* Pre-allocate memory for max kretprobe instances */
1947 	if (rp->maxactive <= 0) {
1948 #ifdef CONFIG_PREEMPT
1949 		rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
1950 #else
1951 		rp->maxactive = num_possible_cpus();
1952 #endif
1953 	}
1954 	raw_spin_lock_init(&rp->lock);
1955 	INIT_HLIST_HEAD(&rp->free_instances);
1956 	for (i = 0; i < rp->maxactive; i++) {
1957 		inst = kmalloc(sizeof(struct kretprobe_instance) +
1958 			       rp->data_size, GFP_KERNEL);
1959 		if (inst == NULL) {
1960 			free_rp_inst(rp);
1961 			return -ENOMEM;
1962 		}
1963 		INIT_HLIST_NODE(&inst->hlist);
1964 		hlist_add_head(&inst->hlist, &rp->free_instances);
1965 	}
1966 
1967 	rp->nmissed = 0;
1968 	/* Establish function entry probe point */
1969 	ret = register_kprobe(&rp->kp);
1970 	if (ret != 0)
1971 		free_rp_inst(rp);
1972 	return ret;
1973 }
1974 EXPORT_SYMBOL_GPL(register_kretprobe);
1975 
1976 int register_kretprobes(struct kretprobe **rps, int num)
1977 {
1978 	int ret = 0, i;
1979 
1980 	if (num <= 0)
1981 		return -EINVAL;
1982 	for (i = 0; i < num; i++) {
1983 		ret = register_kretprobe(rps[i]);
1984 		if (ret < 0) {
1985 			if (i > 0)
1986 				unregister_kretprobes(rps, i);
1987 			break;
1988 		}
1989 	}
1990 	return ret;
1991 }
1992 EXPORT_SYMBOL_GPL(register_kretprobes);
1993 
1994 void unregister_kretprobe(struct kretprobe *rp)
1995 {
1996 	unregister_kretprobes(&rp, 1);
1997 }
1998 EXPORT_SYMBOL_GPL(unregister_kretprobe);
1999 
2000 void unregister_kretprobes(struct kretprobe **rps, int num)
2001 {
2002 	int i;
2003 
2004 	if (num <= 0)
2005 		return;
2006 	mutex_lock(&kprobe_mutex);
2007 	for (i = 0; i < num; i++)
2008 		if (__unregister_kprobe_top(&rps[i]->kp) < 0)
2009 			rps[i]->kp.addr = NULL;
2010 	mutex_unlock(&kprobe_mutex);
2011 
2012 	synchronize_sched();
2013 	for (i = 0; i < num; i++) {
2014 		if (rps[i]->kp.addr) {
2015 			__unregister_kprobe_bottom(&rps[i]->kp);
2016 			cleanup_rp_inst(rps[i]);
2017 		}
2018 	}
2019 }
2020 EXPORT_SYMBOL_GPL(unregister_kretprobes);
2021 
2022 #else /* CONFIG_KRETPROBES */
2023 int register_kretprobe(struct kretprobe *rp)
2024 {
2025 	return -ENOSYS;
2026 }
2027 EXPORT_SYMBOL_GPL(register_kretprobe);
2028 
2029 int register_kretprobes(struct kretprobe **rps, int num)
2030 {
2031 	return -ENOSYS;
2032 }
2033 EXPORT_SYMBOL_GPL(register_kretprobes);
2034 
2035 void unregister_kretprobe(struct kretprobe *rp)
2036 {
2037 }
2038 EXPORT_SYMBOL_GPL(unregister_kretprobe);
2039 
2040 void unregister_kretprobes(struct kretprobe **rps, int num)
2041 {
2042 }
2043 EXPORT_SYMBOL_GPL(unregister_kretprobes);
2044 
2045 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
2046 {
2047 	return 0;
2048 }
2049 NOKPROBE_SYMBOL(pre_handler_kretprobe);
2050 
2051 #endif /* CONFIG_KRETPROBES */
2052 
2053 /* Set the kprobe gone and remove its instruction buffer. */
2054 static void kill_kprobe(struct kprobe *p)
2055 {
2056 	struct kprobe *kp;
2057 
2058 	p->flags |= KPROBE_FLAG_GONE;
2059 	if (kprobe_aggrprobe(p)) {
2060 		/*
2061 		 * If this is an aggr_kprobe, we have to list all the
2062 		 * chained probes and mark them GONE.
2063 		 */
2064 		list_for_each_entry_rcu(kp, &p->list, list)
2065 			kp->flags |= KPROBE_FLAG_GONE;
2066 		p->post_handler = NULL;
2067 		p->break_handler = NULL;
2068 		kill_optimized_kprobe(p);
2069 	}
2070 	/*
2071 	 * Here, we can remove insn_slot safely, because no thread calls
2072 	 * the original probed function (which will be freed soon) any more.
2073 	 */
2074 	arch_remove_kprobe(p);
2075 }
2076 
2077 /* Disable one kprobe */
2078 int disable_kprobe(struct kprobe *kp)
2079 {
2080 	int ret = 0;
2081 
2082 	mutex_lock(&kprobe_mutex);
2083 
2084 	/* Disable this kprobe */
2085 	if (__disable_kprobe(kp) == NULL)
2086 		ret = -EINVAL;
2087 
2088 	mutex_unlock(&kprobe_mutex);
2089 	return ret;
2090 }
2091 EXPORT_SYMBOL_GPL(disable_kprobe);
2092 
2093 /* Enable one kprobe */
2094 int enable_kprobe(struct kprobe *kp)
2095 {
2096 	int ret = 0;
2097 	struct kprobe *p;
2098 
2099 	mutex_lock(&kprobe_mutex);
2100 
2101 	/* Check whether specified probe is valid. */
2102 	p = __get_valid_kprobe(kp);
2103 	if (unlikely(p == NULL)) {
2104 		ret = -EINVAL;
2105 		goto out;
2106 	}
2107 
2108 	if (kprobe_gone(kp)) {
2109 		/* This kprobe has gone, we couldn't enable it. */
2110 		ret = -EINVAL;
2111 		goto out;
2112 	}
2113 
2114 	if (p != kp)
2115 		kp->flags &= ~KPROBE_FLAG_DISABLED;
2116 
2117 	if (!kprobes_all_disarmed && kprobe_disabled(p)) {
2118 		p->flags &= ~KPROBE_FLAG_DISABLED;
2119 		arm_kprobe(p);
2120 	}
2121 out:
2122 	mutex_unlock(&kprobe_mutex);
2123 	return ret;
2124 }
2125 EXPORT_SYMBOL_GPL(enable_kprobe);
2126 
2127 void dump_kprobe(struct kprobe *kp)
2128 {
2129 	printk(KERN_WARNING "Dumping kprobe:\n");
2130 	printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
2131 	       kp->symbol_name, kp->addr, kp->offset);
2132 }
2133 NOKPROBE_SYMBOL(dump_kprobe);
2134 
2135 /*
2136  * Lookup and populate the kprobe_blacklist.
2137  *
2138  * Unlike the kretprobe blacklist, we'll need to determine
2139  * the range of addresses that belong to the said functions,
2140  * since a kprobe need not necessarily be at the beginning
2141  * of a function.
2142  */
2143 static int __init populate_kprobe_blacklist(unsigned long *start,
2144 					     unsigned long *end)
2145 {
2146 	unsigned long *iter;
2147 	struct kprobe_blacklist_entry *ent;
2148 	unsigned long entry, offset = 0, size = 0;
2149 
2150 	for (iter = start; iter < end; iter++) {
2151 		entry = arch_deref_entry_point((void *)*iter);
2152 
2153 		if (!kernel_text_address(entry) ||
2154 		    !kallsyms_lookup_size_offset(entry, &size, &offset)) {
2155 			pr_err("Failed to find blacklist at %p\n",
2156 				(void *)entry);
2157 			continue;
2158 		}
2159 
2160 		ent = kmalloc(sizeof(*ent), GFP_KERNEL);
2161 		if (!ent)
2162 			return -ENOMEM;
2163 		ent->start_addr = entry;
2164 		ent->end_addr = entry + size;
2165 		INIT_LIST_HEAD(&ent->list);
2166 		list_add_tail(&ent->list, &kprobe_blacklist);
2167 	}
2168 	return 0;
2169 }
2170 
2171 /* Module notifier call back, checking kprobes on the module */
2172 static int kprobes_module_callback(struct notifier_block *nb,
2173 				   unsigned long val, void *data)
2174 {
2175 	struct module *mod = data;
2176 	struct hlist_head *head;
2177 	struct kprobe *p;
2178 	unsigned int i;
2179 	int checkcore = (val == MODULE_STATE_GOING);
2180 
2181 	if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
2182 		return NOTIFY_DONE;
2183 
2184 	/*
2185 	 * When MODULE_STATE_GOING was notified, both of module .text and
2186 	 * .init.text sections would be freed. When MODULE_STATE_LIVE was
2187 	 * notified, only .init.text section would be freed. We need to
2188 	 * disable kprobes which have been inserted in the sections.
2189 	 */
2190 	mutex_lock(&kprobe_mutex);
2191 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2192 		head = &kprobe_table[i];
2193 		hlist_for_each_entry_rcu(p, head, hlist)
2194 			if (within_module_init((unsigned long)p->addr, mod) ||
2195 			    (checkcore &&
2196 			     within_module_core((unsigned long)p->addr, mod))) {
2197 				/*
2198 				 * The vaddr this probe is installed will soon
2199 				 * be vfreed buy not synced to disk. Hence,
2200 				 * disarming the breakpoint isn't needed.
2201 				 *
2202 				 * Note, this will also move any optimized probes
2203 				 * that are pending to be removed from their
2204 				 * corresponding lists to the freeing_list and
2205 				 * will not be touched by the delayed
2206 				 * kprobe_optimizer work handler.
2207 				 */
2208 				kill_kprobe(p);
2209 			}
2210 	}
2211 	mutex_unlock(&kprobe_mutex);
2212 	return NOTIFY_DONE;
2213 }
2214 
2215 static struct notifier_block kprobe_module_nb = {
2216 	.notifier_call = kprobes_module_callback,
2217 	.priority = 0
2218 };
2219 
2220 /* Markers of _kprobe_blacklist section */
2221 extern unsigned long __start_kprobe_blacklist[];
2222 extern unsigned long __stop_kprobe_blacklist[];
2223 
2224 static int __init init_kprobes(void)
2225 {
2226 	int i, err = 0;
2227 
2228 	/* FIXME allocate the probe table, currently defined statically */
2229 	/* initialize all list heads */
2230 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2231 		INIT_HLIST_HEAD(&kprobe_table[i]);
2232 		INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
2233 		raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
2234 	}
2235 
2236 	err = populate_kprobe_blacklist(__start_kprobe_blacklist,
2237 					__stop_kprobe_blacklist);
2238 	if (err) {
2239 		pr_err("kprobes: failed to populate blacklist: %d\n", err);
2240 		pr_err("Please take care of using kprobes.\n");
2241 	}
2242 
2243 	if (kretprobe_blacklist_size) {
2244 		/* lookup the function address from its name */
2245 		for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
2246 			kretprobe_blacklist[i].addr =
2247 				kprobe_lookup_name(kretprobe_blacklist[i].name, 0);
2248 			if (!kretprobe_blacklist[i].addr)
2249 				printk("kretprobe: lookup failed: %s\n",
2250 				       kretprobe_blacklist[i].name);
2251 		}
2252 	}
2253 
2254 #if defined(CONFIG_OPTPROBES)
2255 #if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
2256 	/* Init kprobe_optinsn_slots */
2257 	kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
2258 #endif
2259 	/* By default, kprobes can be optimized */
2260 	kprobes_allow_optimization = true;
2261 #endif
2262 
2263 	/* By default, kprobes are armed */
2264 	kprobes_all_disarmed = false;
2265 
2266 	err = arch_init_kprobes();
2267 	if (!err)
2268 		err = register_die_notifier(&kprobe_exceptions_nb);
2269 	if (!err)
2270 		err = register_module_notifier(&kprobe_module_nb);
2271 
2272 	kprobes_initialized = (err == 0);
2273 
2274 	if (!err)
2275 		init_test_probes();
2276 	return err;
2277 }
2278 
2279 #ifdef CONFIG_DEBUG_FS
2280 static void report_probe(struct seq_file *pi, struct kprobe *p,
2281 		const char *sym, int offset, char *modname, struct kprobe *pp)
2282 {
2283 	char *kprobe_type;
2284 
2285 	if (p->pre_handler == pre_handler_kretprobe)
2286 		kprobe_type = "r";
2287 	else if (p->pre_handler == setjmp_pre_handler)
2288 		kprobe_type = "j";
2289 	else
2290 		kprobe_type = "k";
2291 
2292 	if (sym)
2293 		seq_printf(pi, "%p  %s  %s+0x%x  %s ",
2294 			p->addr, kprobe_type, sym, offset,
2295 			(modname ? modname : " "));
2296 	else
2297 		seq_printf(pi, "%p  %s  %p ",
2298 			p->addr, kprobe_type, p->addr);
2299 
2300 	if (!pp)
2301 		pp = p;
2302 	seq_printf(pi, "%s%s%s%s\n",
2303 		(kprobe_gone(p) ? "[GONE]" : ""),
2304 		((kprobe_disabled(p) && !kprobe_gone(p)) ?  "[DISABLED]" : ""),
2305 		(kprobe_optimized(pp) ? "[OPTIMIZED]" : ""),
2306 		(kprobe_ftrace(pp) ? "[FTRACE]" : ""));
2307 }
2308 
2309 static void *kprobe_seq_start(struct seq_file *f, loff_t *pos)
2310 {
2311 	return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
2312 }
2313 
2314 static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
2315 {
2316 	(*pos)++;
2317 	if (*pos >= KPROBE_TABLE_SIZE)
2318 		return NULL;
2319 	return pos;
2320 }
2321 
2322 static void kprobe_seq_stop(struct seq_file *f, void *v)
2323 {
2324 	/* Nothing to do */
2325 }
2326 
2327 static int show_kprobe_addr(struct seq_file *pi, void *v)
2328 {
2329 	struct hlist_head *head;
2330 	struct kprobe *p, *kp;
2331 	const char *sym = NULL;
2332 	unsigned int i = *(loff_t *) v;
2333 	unsigned long offset = 0;
2334 	char *modname, namebuf[KSYM_NAME_LEN];
2335 
2336 	head = &kprobe_table[i];
2337 	preempt_disable();
2338 	hlist_for_each_entry_rcu(p, head, hlist) {
2339 		sym = kallsyms_lookup((unsigned long)p->addr, NULL,
2340 					&offset, &modname, namebuf);
2341 		if (kprobe_aggrprobe(p)) {
2342 			list_for_each_entry_rcu(kp, &p->list, list)
2343 				report_probe(pi, kp, sym, offset, modname, p);
2344 		} else
2345 			report_probe(pi, p, sym, offset, modname, NULL);
2346 	}
2347 	preempt_enable();
2348 	return 0;
2349 }
2350 
2351 static const struct seq_operations kprobes_seq_ops = {
2352 	.start = kprobe_seq_start,
2353 	.next  = kprobe_seq_next,
2354 	.stop  = kprobe_seq_stop,
2355 	.show  = show_kprobe_addr
2356 };
2357 
2358 static int kprobes_open(struct inode *inode, struct file *filp)
2359 {
2360 	return seq_open(filp, &kprobes_seq_ops);
2361 }
2362 
2363 static const struct file_operations debugfs_kprobes_operations = {
2364 	.open           = kprobes_open,
2365 	.read           = seq_read,
2366 	.llseek         = seq_lseek,
2367 	.release        = seq_release,
2368 };
2369 
2370 /* kprobes/blacklist -- shows which functions can not be probed */
2371 static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos)
2372 {
2373 	return seq_list_start(&kprobe_blacklist, *pos);
2374 }
2375 
2376 static void *kprobe_blacklist_seq_next(struct seq_file *m, void *v, loff_t *pos)
2377 {
2378 	return seq_list_next(v, &kprobe_blacklist, pos);
2379 }
2380 
2381 static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
2382 {
2383 	struct kprobe_blacklist_entry *ent =
2384 		list_entry(v, struct kprobe_blacklist_entry, list);
2385 
2386 	seq_printf(m, "0x%p-0x%p\t%ps\n", (void *)ent->start_addr,
2387 		   (void *)ent->end_addr, (void *)ent->start_addr);
2388 	return 0;
2389 }
2390 
2391 static const struct seq_operations kprobe_blacklist_seq_ops = {
2392 	.start = kprobe_blacklist_seq_start,
2393 	.next  = kprobe_blacklist_seq_next,
2394 	.stop  = kprobe_seq_stop,	/* Reuse void function */
2395 	.show  = kprobe_blacklist_seq_show,
2396 };
2397 
2398 static int kprobe_blacklist_open(struct inode *inode, struct file *filp)
2399 {
2400 	return seq_open(filp, &kprobe_blacklist_seq_ops);
2401 }
2402 
2403 static const struct file_operations debugfs_kprobe_blacklist_ops = {
2404 	.open           = kprobe_blacklist_open,
2405 	.read           = seq_read,
2406 	.llseek         = seq_lseek,
2407 	.release        = seq_release,
2408 };
2409 
2410 static void arm_all_kprobes(void)
2411 {
2412 	struct hlist_head *head;
2413 	struct kprobe *p;
2414 	unsigned int i;
2415 
2416 	mutex_lock(&kprobe_mutex);
2417 
2418 	/* If kprobes are armed, just return */
2419 	if (!kprobes_all_disarmed)
2420 		goto already_enabled;
2421 
2422 	/*
2423 	 * optimize_kprobe() called by arm_kprobe() checks
2424 	 * kprobes_all_disarmed, so set kprobes_all_disarmed before
2425 	 * arm_kprobe.
2426 	 */
2427 	kprobes_all_disarmed = false;
2428 	/* Arming kprobes doesn't optimize kprobe itself */
2429 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2430 		head = &kprobe_table[i];
2431 		hlist_for_each_entry_rcu(p, head, hlist)
2432 			if (!kprobe_disabled(p))
2433 				arm_kprobe(p);
2434 	}
2435 
2436 	printk(KERN_INFO "Kprobes globally enabled\n");
2437 
2438 already_enabled:
2439 	mutex_unlock(&kprobe_mutex);
2440 	return;
2441 }
2442 
2443 static void disarm_all_kprobes(void)
2444 {
2445 	struct hlist_head *head;
2446 	struct kprobe *p;
2447 	unsigned int i;
2448 
2449 	mutex_lock(&kprobe_mutex);
2450 
2451 	/* If kprobes are already disarmed, just return */
2452 	if (kprobes_all_disarmed) {
2453 		mutex_unlock(&kprobe_mutex);
2454 		return;
2455 	}
2456 
2457 	kprobes_all_disarmed = true;
2458 	printk(KERN_INFO "Kprobes globally disabled\n");
2459 
2460 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2461 		head = &kprobe_table[i];
2462 		hlist_for_each_entry_rcu(p, head, hlist) {
2463 			if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
2464 				disarm_kprobe(p, false);
2465 		}
2466 	}
2467 	mutex_unlock(&kprobe_mutex);
2468 
2469 	/* Wait for disarming all kprobes by optimizer */
2470 	wait_for_kprobe_optimizer();
2471 }
2472 
2473 /*
2474  * XXX: The debugfs bool file interface doesn't allow for callbacks
2475  * when the bool state is switched. We can reuse that facility when
2476  * available
2477  */
2478 static ssize_t read_enabled_file_bool(struct file *file,
2479 	       char __user *user_buf, size_t count, loff_t *ppos)
2480 {
2481 	char buf[3];
2482 
2483 	if (!kprobes_all_disarmed)
2484 		buf[0] = '1';
2485 	else
2486 		buf[0] = '0';
2487 	buf[1] = '\n';
2488 	buf[2] = 0x00;
2489 	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
2490 }
2491 
2492 static ssize_t write_enabled_file_bool(struct file *file,
2493 	       const char __user *user_buf, size_t count, loff_t *ppos)
2494 {
2495 	char buf[32];
2496 	size_t buf_size;
2497 
2498 	buf_size = min(count, (sizeof(buf)-1));
2499 	if (copy_from_user(buf, user_buf, buf_size))
2500 		return -EFAULT;
2501 
2502 	buf[buf_size] = '\0';
2503 	switch (buf[0]) {
2504 	case 'y':
2505 	case 'Y':
2506 	case '1':
2507 		arm_all_kprobes();
2508 		break;
2509 	case 'n':
2510 	case 'N':
2511 	case '0':
2512 		disarm_all_kprobes();
2513 		break;
2514 	default:
2515 		return -EINVAL;
2516 	}
2517 
2518 	return count;
2519 }
2520 
2521 static const struct file_operations fops_kp = {
2522 	.read =         read_enabled_file_bool,
2523 	.write =        write_enabled_file_bool,
2524 	.llseek =	default_llseek,
2525 };
2526 
2527 static int __init debugfs_kprobe_init(void)
2528 {
2529 	struct dentry *dir, *file;
2530 	unsigned int value = 1;
2531 
2532 	dir = debugfs_create_dir("kprobes", NULL);
2533 	if (!dir)
2534 		return -ENOMEM;
2535 
2536 	file = debugfs_create_file("list", 0444, dir, NULL,
2537 				&debugfs_kprobes_operations);
2538 	if (!file)
2539 		goto error;
2540 
2541 	file = debugfs_create_file("enabled", 0600, dir,
2542 					&value, &fops_kp);
2543 	if (!file)
2544 		goto error;
2545 
2546 	file = debugfs_create_file("blacklist", 0444, dir, NULL,
2547 				&debugfs_kprobe_blacklist_ops);
2548 	if (!file)
2549 		goto error;
2550 
2551 	return 0;
2552 
2553 error:
2554 	debugfs_remove(dir);
2555 	return -ENOMEM;
2556 }
2557 
2558 late_initcall(debugfs_kprobe_init);
2559 #endif /* CONFIG_DEBUG_FS */
2560 
2561 module_init(init_kprobes);
2562 
2563 /* defined in arch/.../kernel/kprobes.c */
2564 EXPORT_SYMBOL_GPL(jprobe_return);
2565