xref: /openbmc/linux/arch/s390/kernel/kprobes.c (revision 6774def6)
1 /*
2  *  Kernel Probes (KProbes)
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright IBM Corp. 2002, 2006
19  *
20  * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com>
21  */
22 
23 #include <linux/kprobes.h>
24 #include <linux/ptrace.h>
25 #include <linux/preempt.h>
26 #include <linux/stop_machine.h>
27 #include <linux/kdebug.h>
28 #include <linux/uaccess.h>
29 #include <linux/module.h>
30 #include <linux/slab.h>
31 #include <linux/hardirq.h>
32 #include <asm/cacheflush.h>
33 #include <asm/sections.h>
34 #include <asm/dis.h>
35 
36 DEFINE_PER_CPU(struct kprobe *, current_kprobe);
37 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
38 
39 struct kretprobe_blackpoint kretprobe_blacklist[] = { };
40 
41 DEFINE_INSN_CACHE_OPS(dmainsn);
42 
43 static void *alloc_dmainsn_page(void)
44 {
45 	return (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
46 }
47 
48 static void free_dmainsn_page(void *page)
49 {
50 	free_page((unsigned long)page);
51 }
52 
53 struct kprobe_insn_cache kprobe_dmainsn_slots = {
54 	.mutex = __MUTEX_INITIALIZER(kprobe_dmainsn_slots.mutex),
55 	.alloc = alloc_dmainsn_page,
56 	.free = free_dmainsn_page,
57 	.pages = LIST_HEAD_INIT(kprobe_dmainsn_slots.pages),
58 	.insn_size = MAX_INSN_SIZE,
59 };
60 
61 static void __kprobes copy_instruction(struct kprobe *p)
62 {
63 	s64 disp, new_disp;
64 	u64 addr, new_addr;
65 
66 	memcpy(p->ainsn.insn, p->addr, insn_length(p->opcode >> 8));
67 	if (!probe_is_insn_relative_long(p->ainsn.insn))
68 		return;
69 	/*
70 	 * For pc-relative instructions in RIL-b or RIL-c format patch the
71 	 * RI2 displacement field. We have already made sure that the insn
72 	 * slot for the patched instruction is within the same 2GB area
73 	 * as the original instruction (either kernel image or module area).
74 	 * Therefore the new displacement will always fit.
75 	 */
76 	disp = *(s32 *)&p->ainsn.insn[1];
77 	addr = (u64)(unsigned long)p->addr;
78 	new_addr = (u64)(unsigned long)p->ainsn.insn;
79 	new_disp = ((addr + (disp * 2)) - new_addr) / 2;
80 	*(s32 *)&p->ainsn.insn[1] = new_disp;
81 }
82 
83 static inline int is_kernel_addr(void *addr)
84 {
85 	return addr < (void *)_end;
86 }
87 
88 static inline int is_module_addr(void *addr)
89 {
90 #ifdef CONFIG_64BIT
91 	BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
92 	if (addr < (void *)MODULES_VADDR)
93 		return 0;
94 	if (addr > (void *)MODULES_END)
95 		return 0;
96 #endif
97 	return 1;
98 }
99 
100 static int __kprobes s390_get_insn_slot(struct kprobe *p)
101 {
102 	/*
103 	 * Get an insn slot that is within the same 2GB area like the original
104 	 * instruction. That way instructions with a 32bit signed displacement
105 	 * field can be patched and executed within the insn slot.
106 	 */
107 	p->ainsn.insn = NULL;
108 	if (is_kernel_addr(p->addr))
109 		p->ainsn.insn = get_dmainsn_slot();
110 	else if (is_module_addr(p->addr))
111 		p->ainsn.insn = get_insn_slot();
112 	return p->ainsn.insn ? 0 : -ENOMEM;
113 }
114 
115 static void __kprobes s390_free_insn_slot(struct kprobe *p)
116 {
117 	if (!p->ainsn.insn)
118 		return;
119 	if (is_kernel_addr(p->addr))
120 		free_dmainsn_slot(p->ainsn.insn, 0);
121 	else
122 		free_insn_slot(p->ainsn.insn, 0);
123 	p->ainsn.insn = NULL;
124 }
125 
126 int __kprobes arch_prepare_kprobe(struct kprobe *p)
127 {
128 	if ((unsigned long) p->addr & 0x01)
129 		return -EINVAL;
130 	/* Make sure the probe isn't going on a difficult instruction */
131 	if (probe_is_prohibited_opcode(p->addr))
132 		return -EINVAL;
133 	if (s390_get_insn_slot(p))
134 		return -ENOMEM;
135 	p->opcode = *p->addr;
136 	copy_instruction(p);
137 	return 0;
138 }
139 
140 struct ins_replace_args {
141 	kprobe_opcode_t *ptr;
142 	kprobe_opcode_t opcode;
143 };
144 
145 static int __kprobes swap_instruction(void *aref)
146 {
147 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
148 	unsigned long status = kcb->kprobe_status;
149 	struct ins_replace_args *args = aref;
150 
151 	kcb->kprobe_status = KPROBE_SWAP_INST;
152 	probe_kernel_write(args->ptr, &args->opcode, sizeof(args->opcode));
153 	kcb->kprobe_status = status;
154 	return 0;
155 }
156 
157 void __kprobes arch_arm_kprobe(struct kprobe *p)
158 {
159 	struct ins_replace_args args;
160 
161 	args.ptr = p->addr;
162 	args.opcode = BREAKPOINT_INSTRUCTION;
163 	stop_machine(swap_instruction, &args, NULL);
164 }
165 
166 void __kprobes arch_disarm_kprobe(struct kprobe *p)
167 {
168 	struct ins_replace_args args;
169 
170 	args.ptr = p->addr;
171 	args.opcode = p->opcode;
172 	stop_machine(swap_instruction, &args, NULL);
173 }
174 
175 void __kprobes arch_remove_kprobe(struct kprobe *p)
176 {
177 	s390_free_insn_slot(p);
178 }
179 
180 static void __kprobes enable_singlestep(struct kprobe_ctlblk *kcb,
181 					struct pt_regs *regs,
182 					unsigned long ip)
183 {
184 	struct per_regs per_kprobe;
185 
186 	/* Set up the PER control registers %cr9-%cr11 */
187 	per_kprobe.control = PER_EVENT_IFETCH;
188 	per_kprobe.start = ip;
189 	per_kprobe.end = ip;
190 
191 	/* Save control regs and psw mask */
192 	__ctl_store(kcb->kprobe_saved_ctl, 9, 11);
193 	kcb->kprobe_saved_imask = regs->psw.mask &
194 		(PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT);
195 
196 	/* Set PER control regs, turns on single step for the given address */
197 	__ctl_load(per_kprobe, 9, 11);
198 	regs->psw.mask |= PSW_MASK_PER;
199 	regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
200 	regs->psw.addr = ip | PSW_ADDR_AMODE;
201 }
202 
203 static void __kprobes disable_singlestep(struct kprobe_ctlblk *kcb,
204 					 struct pt_regs *regs,
205 					 unsigned long ip)
206 {
207 	/* Restore control regs and psw mask, set new psw address */
208 	__ctl_load(kcb->kprobe_saved_ctl, 9, 11);
209 	regs->psw.mask &= ~PSW_MASK_PER;
210 	regs->psw.mask |= kcb->kprobe_saved_imask;
211 	regs->psw.addr = ip | PSW_ADDR_AMODE;
212 }
213 
214 /*
215  * Activate a kprobe by storing its pointer to current_kprobe. The
216  * previous kprobe is stored in kcb->prev_kprobe. A stack of up to
217  * two kprobes can be active, see KPROBE_REENTER.
218  */
219 static void __kprobes push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p)
220 {
221 	kcb->prev_kprobe.kp = __this_cpu_read(current_kprobe);
222 	kcb->prev_kprobe.status = kcb->kprobe_status;
223 	__this_cpu_write(current_kprobe, p);
224 }
225 
226 /*
227  * Deactivate a kprobe by backing up to the previous state. If the
228  * current state is KPROBE_REENTER prev_kprobe.kp will be non-NULL,
229  * for any other state prev_kprobe.kp will be NULL.
230  */
231 static void __kprobes pop_kprobe(struct kprobe_ctlblk *kcb)
232 {
233 	__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
234 	kcb->kprobe_status = kcb->prev_kprobe.status;
235 }
236 
237 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
238 					struct pt_regs *regs)
239 {
240 	ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14];
241 
242 	/* Replace the return addr with trampoline addr */
243 	regs->gprs[14] = (unsigned long) &kretprobe_trampoline;
244 }
245 
246 static void __kprobes kprobe_reenter_check(struct kprobe_ctlblk *kcb,
247 					   struct kprobe *p)
248 {
249 	switch (kcb->kprobe_status) {
250 	case KPROBE_HIT_SSDONE:
251 	case KPROBE_HIT_ACTIVE:
252 		kprobes_inc_nmissed_count(p);
253 		break;
254 	case KPROBE_HIT_SS:
255 	case KPROBE_REENTER:
256 	default:
257 		/*
258 		 * A kprobe on the code path to single step an instruction
259 		 * is a BUG. The code path resides in the .kprobes.text
260 		 * section and is executed with interrupts disabled.
261 		 */
262 		printk(KERN_EMERG "Invalid kprobe detected at %p.\n", p->addr);
263 		dump_kprobe(p);
264 		BUG();
265 	}
266 }
267 
268 static int __kprobes kprobe_handler(struct pt_regs *regs)
269 {
270 	struct kprobe_ctlblk *kcb;
271 	struct kprobe *p;
272 
273 	/*
274 	 * We want to disable preemption for the entire duration of kprobe
275 	 * processing. That includes the calls to the pre/post handlers
276 	 * and single stepping the kprobe instruction.
277 	 */
278 	preempt_disable();
279 	kcb = get_kprobe_ctlblk();
280 	p = get_kprobe((void *)((regs->psw.addr & PSW_ADDR_INSN) - 2));
281 
282 	if (p) {
283 		if (kprobe_running()) {
284 			/*
285 			 * We have hit a kprobe while another is still
286 			 * active. This can happen in the pre and post
287 			 * handler. Single step the instruction of the
288 			 * new probe but do not call any handler function
289 			 * of this secondary kprobe.
290 			 * push_kprobe and pop_kprobe saves and restores
291 			 * the currently active kprobe.
292 			 */
293 			kprobe_reenter_check(kcb, p);
294 			push_kprobe(kcb, p);
295 			kcb->kprobe_status = KPROBE_REENTER;
296 		} else {
297 			/*
298 			 * If we have no pre-handler or it returned 0, we
299 			 * continue with single stepping. If we have a
300 			 * pre-handler and it returned non-zero, it prepped
301 			 * for calling the break_handler below on re-entry
302 			 * for jprobe processing, so get out doing nothing
303 			 * more here.
304 			 */
305 			push_kprobe(kcb, p);
306 			kcb->kprobe_status = KPROBE_HIT_ACTIVE;
307 			if (p->pre_handler && p->pre_handler(p, regs))
308 				return 1;
309 			kcb->kprobe_status = KPROBE_HIT_SS;
310 		}
311 		enable_singlestep(kcb, regs, (unsigned long) p->ainsn.insn);
312 		return 1;
313 	} else if (kprobe_running()) {
314 		p = __this_cpu_read(current_kprobe);
315 		if (p->break_handler && p->break_handler(p, regs)) {
316 			/*
317 			 * Continuation after the jprobe completed and
318 			 * caused the jprobe_return trap. The jprobe
319 			 * break_handler "returns" to the original
320 			 * function that still has the kprobe breakpoint
321 			 * installed. We continue with single stepping.
322 			 */
323 			kcb->kprobe_status = KPROBE_HIT_SS;
324 			enable_singlestep(kcb, regs,
325 					  (unsigned long) p->ainsn.insn);
326 			return 1;
327 		} /* else:
328 		   * No kprobe at this address and the current kprobe
329 		   * has no break handler (no jprobe!). The kernel just
330 		   * exploded, let the standard trap handler pick up the
331 		   * pieces.
332 		   */
333 	} /* else:
334 	   * No kprobe at this address and no active kprobe. The trap has
335 	   * not been caused by a kprobe breakpoint. The race of breakpoint
336 	   * vs. kprobe remove does not exist because on s390 as we use
337 	   * stop_machine to arm/disarm the breakpoints.
338 	   */
339 	preempt_enable_no_resched();
340 	return 0;
341 }
342 
343 /*
344  * Function return probe trampoline:
345  *	- init_kprobes() establishes a probepoint here
346  *	- When the probed function returns, this probe
347  *		causes the handlers to fire
348  */
349 static void __used kretprobe_trampoline_holder(void)
350 {
351 	asm volatile(".global kretprobe_trampoline\n"
352 		     "kretprobe_trampoline: bcr 0,0\n");
353 }
354 
355 /*
356  * Called when the probe at kretprobe trampoline is hit
357  */
358 static int __kprobes trampoline_probe_handler(struct kprobe *p,
359 					      struct pt_regs *regs)
360 {
361 	struct kretprobe_instance *ri;
362 	struct hlist_head *head, empty_rp;
363 	struct hlist_node *tmp;
364 	unsigned long flags, orig_ret_address;
365 	unsigned long trampoline_address;
366 	kprobe_opcode_t *correct_ret_addr;
367 
368 	INIT_HLIST_HEAD(&empty_rp);
369 	kretprobe_hash_lock(current, &head, &flags);
370 
371 	/*
372 	 * It is possible to have multiple instances associated with a given
373 	 * task either because an multiple functions in the call path
374 	 * have a return probe installed on them, and/or more than one return
375 	 * return probe was registered for a target function.
376 	 *
377 	 * We can handle this because:
378 	 *     - instances are always inserted at the head of the list
379 	 *     - when multiple return probes are registered for the same
380 	 *	 function, the first instance's ret_addr will point to the
381 	 *	 real return address, and all the rest will point to
382 	 *	 kretprobe_trampoline
383 	 */
384 	ri = NULL;
385 	orig_ret_address = 0;
386 	correct_ret_addr = NULL;
387 	trampoline_address = (unsigned long) &kretprobe_trampoline;
388 	hlist_for_each_entry_safe(ri, tmp, head, hlist) {
389 		if (ri->task != current)
390 			/* another task is sharing our hash bucket */
391 			continue;
392 
393 		orig_ret_address = (unsigned long) ri->ret_addr;
394 
395 		if (orig_ret_address != trampoline_address)
396 			/*
397 			 * This is the real return address. Any other
398 			 * instances associated with this task are for
399 			 * other calls deeper on the call stack
400 			 */
401 			break;
402 	}
403 
404 	kretprobe_assert(ri, orig_ret_address, trampoline_address);
405 
406 	correct_ret_addr = ri->ret_addr;
407 	hlist_for_each_entry_safe(ri, tmp, head, hlist) {
408 		if (ri->task != current)
409 			/* another task is sharing our hash bucket */
410 			continue;
411 
412 		orig_ret_address = (unsigned long) ri->ret_addr;
413 
414 		if (ri->rp && ri->rp->handler) {
415 			ri->ret_addr = correct_ret_addr;
416 			ri->rp->handler(ri, regs);
417 		}
418 
419 		recycle_rp_inst(ri, &empty_rp);
420 
421 		if (orig_ret_address != trampoline_address)
422 			/*
423 			 * This is the real return address. Any other
424 			 * instances associated with this task are for
425 			 * other calls deeper on the call stack
426 			 */
427 			break;
428 	}
429 
430 	regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE;
431 
432 	pop_kprobe(get_kprobe_ctlblk());
433 	kretprobe_hash_unlock(current, &flags);
434 	preempt_enable_no_resched();
435 
436 	hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
437 		hlist_del(&ri->hlist);
438 		kfree(ri);
439 	}
440 	/*
441 	 * By returning a non-zero value, we are telling
442 	 * kprobe_handler() that we don't want the post_handler
443 	 * to run (and have re-enabled preemption)
444 	 */
445 	return 1;
446 }
447 
448 /*
449  * Called after single-stepping.  p->addr is the address of the
450  * instruction whose first byte has been replaced by the "breakpoint"
451  * instruction.  To avoid the SMP problems that can occur when we
452  * temporarily put back the original opcode to single-step, we
453  * single-stepped a copy of the instruction.  The address of this
454  * copy is p->ainsn.insn.
455  */
456 static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
457 {
458 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
459 	unsigned long ip = regs->psw.addr & PSW_ADDR_INSN;
460 	int fixup = probe_get_fixup_type(p->ainsn.insn);
461 
462 	if (fixup & FIXUP_PSW_NORMAL)
463 		ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn;
464 
465 	if (fixup & FIXUP_BRANCH_NOT_TAKEN) {
466 		int ilen = insn_length(p->ainsn.insn[0] >> 8);
467 		if (ip - (unsigned long) p->ainsn.insn == ilen)
468 			ip = (unsigned long) p->addr + ilen;
469 	}
470 
471 	if (fixup & FIXUP_RETURN_REGISTER) {
472 		int reg = (p->ainsn.insn[0] & 0xf0) >> 4;
473 		regs->gprs[reg] += (unsigned long) p->addr -
474 				   (unsigned long) p->ainsn.insn;
475 	}
476 
477 	disable_singlestep(kcb, regs, ip);
478 }
479 
480 static int __kprobes post_kprobe_handler(struct pt_regs *regs)
481 {
482 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
483 	struct kprobe *p = kprobe_running();
484 
485 	if (!p)
486 		return 0;
487 
488 	if (kcb->kprobe_status != KPROBE_REENTER && p->post_handler) {
489 		kcb->kprobe_status = KPROBE_HIT_SSDONE;
490 		p->post_handler(p, regs, 0);
491 	}
492 
493 	resume_execution(p, regs);
494 	pop_kprobe(kcb);
495 	preempt_enable_no_resched();
496 
497 	/*
498 	 * if somebody else is singlestepping across a probe point, psw mask
499 	 * will have PER set, in which case, continue the remaining processing
500 	 * of do_single_step, as if this is not a probe hit.
501 	 */
502 	if (regs->psw.mask & PSW_MASK_PER)
503 		return 0;
504 
505 	return 1;
506 }
507 
508 static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr)
509 {
510 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
511 	struct kprobe *p = kprobe_running();
512 	const struct exception_table_entry *entry;
513 
514 	switch(kcb->kprobe_status) {
515 	case KPROBE_SWAP_INST:
516 		/* We are here because the instruction replacement failed */
517 		return 0;
518 	case KPROBE_HIT_SS:
519 	case KPROBE_REENTER:
520 		/*
521 		 * We are here because the instruction being single
522 		 * stepped caused a page fault. We reset the current
523 		 * kprobe and the nip points back to the probe address
524 		 * and allow the page fault handler to continue as a
525 		 * normal page fault.
526 		 */
527 		disable_singlestep(kcb, regs, (unsigned long) p->addr);
528 		pop_kprobe(kcb);
529 		preempt_enable_no_resched();
530 		break;
531 	case KPROBE_HIT_ACTIVE:
532 	case KPROBE_HIT_SSDONE:
533 		/*
534 		 * We increment the nmissed count for accounting,
535 		 * we can also use npre/npostfault count for accounting
536 		 * these specific fault cases.
537 		 */
538 		kprobes_inc_nmissed_count(p);
539 
540 		/*
541 		 * We come here because instructions in the pre/post
542 		 * handler caused the page_fault, this could happen
543 		 * if handler tries to access user space by
544 		 * copy_from_user(), get_user() etc. Let the
545 		 * user-specified handler try to fix it first.
546 		 */
547 		if (p->fault_handler && p->fault_handler(p, regs, trapnr))
548 			return 1;
549 
550 		/*
551 		 * In case the user-specified fault handler returned
552 		 * zero, try to fix up.
553 		 */
554 		entry = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
555 		if (entry) {
556 			regs->psw.addr = extable_fixup(entry) | PSW_ADDR_AMODE;
557 			return 1;
558 		}
559 
560 		/*
561 		 * fixup_exception() could not handle it,
562 		 * Let do_page_fault() fix it.
563 		 */
564 		break;
565 	default:
566 		break;
567 	}
568 	return 0;
569 }
570 
571 int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
572 {
573 	int ret;
574 
575 	if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
576 		local_irq_disable();
577 	ret = kprobe_trap_handler(regs, trapnr);
578 	if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
579 		local_irq_restore(regs->psw.mask & ~PSW_MASK_PER);
580 	return ret;
581 }
582 
583 /*
584  * Wrapper routine to for handling exceptions.
585  */
586 int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
587 				       unsigned long val, void *data)
588 {
589 	struct die_args *args = (struct die_args *) data;
590 	struct pt_regs *regs = args->regs;
591 	int ret = NOTIFY_DONE;
592 
593 	if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
594 		local_irq_disable();
595 
596 	switch (val) {
597 	case DIE_BPT:
598 		if (kprobe_handler(regs))
599 			ret = NOTIFY_STOP;
600 		break;
601 	case DIE_SSTEP:
602 		if (post_kprobe_handler(regs))
603 			ret = NOTIFY_STOP;
604 		break;
605 	case DIE_TRAP:
606 		if (!preemptible() && kprobe_running() &&
607 		    kprobe_trap_handler(regs, args->trapnr))
608 			ret = NOTIFY_STOP;
609 		break;
610 	default:
611 		break;
612 	}
613 
614 	if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
615 		local_irq_restore(regs->psw.mask & ~PSW_MASK_PER);
616 
617 	return ret;
618 }
619 
620 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
621 {
622 	struct jprobe *jp = container_of(p, struct jprobe, kp);
623 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
624 	unsigned long stack;
625 
626 	memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
627 
628 	/* setup return addr to the jprobe handler routine */
629 	regs->psw.addr = (unsigned long) jp->entry | PSW_ADDR_AMODE;
630 	regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
631 
632 	/* r15 is the stack pointer */
633 	stack = (unsigned long) regs->gprs[15];
634 
635 	memcpy(kcb->jprobes_stack, (void *) stack, MIN_STACK_SIZE(stack));
636 	return 1;
637 }
638 
639 void __kprobes jprobe_return(void)
640 {
641 	asm volatile(".word 0x0002");
642 }
643 
644 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
645 {
646 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
647 	unsigned long stack;
648 
649 	stack = (unsigned long) kcb->jprobe_saved_regs.gprs[15];
650 
651 	/* Put the regs back */
652 	memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
653 	/* put the stack back */
654 	memcpy((void *) stack, kcb->jprobes_stack, MIN_STACK_SIZE(stack));
655 	preempt_enable_no_resched();
656 	return 1;
657 }
658 
659 static struct kprobe trampoline = {
660 	.addr = (kprobe_opcode_t *) &kretprobe_trampoline,
661 	.pre_handler = trampoline_probe_handler
662 };
663 
664 int __init arch_init_kprobes(void)
665 {
666 	return register_kprobe(&trampoline);
667 }
668 
669 int __kprobes arch_trampoline_kprobe(struct kprobe *p)
670 {
671 	return p->addr == (kprobe_opcode_t *) &kretprobe_trampoline;
672 }
673