11a59d1b8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
20dc016dbSWang Nan /*
30dc016dbSWang Nan * Kernel Probes Jump Optimization (Optprobes)
40dc016dbSWang Nan *
50dc016dbSWang Nan * Copyright (C) IBM Corporation, 2002, 2004
60dc016dbSWang Nan * Copyright (C) Hitachi Ltd., 2012
70dc016dbSWang Nan * Copyright (C) Huawei Inc., 2014
80dc016dbSWang Nan */
90dc016dbSWang Nan
100dc016dbSWang Nan #include <linux/kprobes.h>
110dc016dbSWang Nan #include <linux/jump_label.h>
120dc016dbSWang Nan #include <asm/kprobes.h>
130dc016dbSWang Nan #include <asm/cacheflush.h>
140dc016dbSWang Nan /* for arm_gen_branch */
150dc016dbSWang Nan #include <asm/insn.h>
160dc016dbSWang Nan /* for patch_text */
170dc016dbSWang Nan #include <asm/patch.h>
180dc016dbSWang Nan
190dc016dbSWang Nan #include "core.h"
200dc016dbSWang Nan
210dc016dbSWang Nan /*
22bfc9657dSWang Nan * See register_usage_flags. If the probed instruction doesn't use PC,
23bfc9657dSWang Nan * we can copy it into template and have it executed directly without
24bfc9657dSWang Nan * simulation or emulation.
25bfc9657dSWang Nan */
26bfc9657dSWang Nan #define ARM_REG_PC 15
27bfc9657dSWang Nan #define can_kprobe_direct_exec(m) (!test_bit(ARM_REG_PC, &(m)))
28bfc9657dSWang Nan
29bfc9657dSWang Nan /*
300dc016dbSWang Nan * NOTE: the first sub and add instruction will be modified according
310dc016dbSWang Nan * to the stack cost of the instruction.
320dc016dbSWang Nan */
330dc016dbSWang Nan asm (
340dc016dbSWang Nan ".global optprobe_template_entry\n"
350dc016dbSWang Nan "optprobe_template_entry:\n"
360dc016dbSWang Nan ".global optprobe_template_sub_sp\n"
370dc016dbSWang Nan "optprobe_template_sub_sp:"
380dc016dbSWang Nan " sub sp, sp, #0xff\n"
390dc016dbSWang Nan " stmia sp, {r0 - r14} \n"
400dc016dbSWang Nan ".global optprobe_template_add_sp\n"
410dc016dbSWang Nan "optprobe_template_add_sp:"
420dc016dbSWang Nan " add r3, sp, #0xff\n"
430dc016dbSWang Nan " str r3, [sp, #52]\n"
440dc016dbSWang Nan " mrs r4, cpsr\n"
450dc016dbSWang Nan " str r4, [sp, #64]\n"
460dc016dbSWang Nan " mov r1, sp\n"
470dc016dbSWang Nan " ldr r0, 1f\n"
480dc016dbSWang Nan " ldr r2, 2f\n"
490dc016dbSWang Nan /*
500dc016dbSWang Nan * AEABI requires an 8-bytes alignment stack. If
510dc016dbSWang Nan * SP % 8 != 0 (SP % 4 == 0 should be ensured),
520dc016dbSWang Nan * alloc more bytes here.
530dc016dbSWang Nan */
540dc016dbSWang Nan " and r4, sp, #4\n"
550dc016dbSWang Nan " sub sp, sp, r4\n"
560dc016dbSWang Nan #if __LINUX_ARM_ARCH__ >= 5
570dc016dbSWang Nan " blx r2\n"
580dc016dbSWang Nan #else
590dc016dbSWang Nan " mov lr, pc\n"
600dc016dbSWang Nan " mov pc, r2\n"
610dc016dbSWang Nan #endif
620dc016dbSWang Nan " add sp, sp, r4\n"
630dc016dbSWang Nan " ldr r1, [sp, #64]\n"
640dc016dbSWang Nan " tst r1, #"__stringify(PSR_T_BIT)"\n"
650dc016dbSWang Nan " ldrne r2, [sp, #60]\n"
660dc016dbSWang Nan " orrne r2, #1\n"
670dc016dbSWang Nan " strne r2, [sp, #60] @ set bit0 of PC for thumb\n"
680dc016dbSWang Nan " msr cpsr_cxsf, r1\n"
69bfc9657dSWang Nan ".global optprobe_template_restore_begin\n"
70bfc9657dSWang Nan "optprobe_template_restore_begin:\n"
710dc016dbSWang Nan " ldmia sp, {r0 - r15}\n"
72bfc9657dSWang Nan ".global optprobe_template_restore_orig_insn\n"
73bfc9657dSWang Nan "optprobe_template_restore_orig_insn:\n"
74bfc9657dSWang Nan " nop\n"
75bfc9657dSWang Nan ".global optprobe_template_restore_end\n"
76bfc9657dSWang Nan "optprobe_template_restore_end:\n"
77bfc9657dSWang Nan " nop\n"
780dc016dbSWang Nan ".global optprobe_template_val\n"
790dc016dbSWang Nan "optprobe_template_val:\n"
800dc016dbSWang Nan "1: .long 0\n"
810dc016dbSWang Nan ".global optprobe_template_call\n"
820dc016dbSWang Nan "optprobe_template_call:\n"
830dc016dbSWang Nan "2: .long 0\n"
840dc016dbSWang Nan ".global optprobe_template_end\n"
850dc016dbSWang Nan "optprobe_template_end:\n");
860dc016dbSWang Nan
870dc016dbSWang Nan #define TMPL_VAL_IDX \
889fa2e7afSAndrew Jeffery ((unsigned long *)optprobe_template_val - (unsigned long *)optprobe_template_entry)
890dc016dbSWang Nan #define TMPL_CALL_IDX \
909fa2e7afSAndrew Jeffery ((unsigned long *)optprobe_template_call - (unsigned long *)optprobe_template_entry)
910dc016dbSWang Nan #define TMPL_END_IDX \
929fa2e7afSAndrew Jeffery ((unsigned long *)optprobe_template_end - (unsigned long *)optprobe_template_entry)
930dc016dbSWang Nan #define TMPL_ADD_SP \
949fa2e7afSAndrew Jeffery ((unsigned long *)optprobe_template_add_sp - (unsigned long *)optprobe_template_entry)
950dc016dbSWang Nan #define TMPL_SUB_SP \
969fa2e7afSAndrew Jeffery ((unsigned long *)optprobe_template_sub_sp - (unsigned long *)optprobe_template_entry)
97bfc9657dSWang Nan #define TMPL_RESTORE_BEGIN \
989fa2e7afSAndrew Jeffery ((unsigned long *)optprobe_template_restore_begin - (unsigned long *)optprobe_template_entry)
99bfc9657dSWang Nan #define TMPL_RESTORE_ORIGN_INSN \
1009fa2e7afSAndrew Jeffery ((unsigned long *)optprobe_template_restore_orig_insn - (unsigned long *)optprobe_template_entry)
101bfc9657dSWang Nan #define TMPL_RESTORE_END \
1029fa2e7afSAndrew Jeffery ((unsigned long *)optprobe_template_restore_end - (unsigned long *)optprobe_template_entry)
1030dc016dbSWang Nan
1040dc016dbSWang Nan /*
1050dc016dbSWang Nan * ARM can always optimize an instruction when using ARM ISA, except
1060dc016dbSWang Nan * instructions like 'str r0, [sp, r1]' which store to stack and unable
1070dc016dbSWang Nan * to determine stack space consumption statically.
1080dc016dbSWang Nan */
arch_prepared_optinsn(struct arch_optimized_insn * optinsn)1090dc016dbSWang Nan int arch_prepared_optinsn(struct arch_optimized_insn *optinsn)
1100dc016dbSWang Nan {
1110dc016dbSWang Nan return optinsn->insn != NULL;
1120dc016dbSWang Nan }
1130dc016dbSWang Nan
1140dc016dbSWang Nan /*
1150dc016dbSWang Nan * In ARM ISA, kprobe opt always replace one instruction (4 bytes
1160dc016dbSWang Nan * aligned and 4 bytes long). It is impossible to encounter another
1170dc016dbSWang Nan * kprobe in the address range. So always return 0.
1180dc016dbSWang Nan */
arch_check_optimized_kprobe(struct optimized_kprobe * op)1190dc016dbSWang Nan int arch_check_optimized_kprobe(struct optimized_kprobe *op)
1200dc016dbSWang Nan {
1210dc016dbSWang Nan return 0;
1220dc016dbSWang Nan }
1230dc016dbSWang Nan
1240dc016dbSWang Nan /* Caller must ensure addr & 3 == 0 */
can_optimize(struct kprobe * kp)1250dc016dbSWang Nan static int can_optimize(struct kprobe *kp)
1260dc016dbSWang Nan {
1270dc016dbSWang Nan if (kp->ainsn.stack_space < 0)
1280dc016dbSWang Nan return 0;
1290dc016dbSWang Nan /*
1300dc016dbSWang Nan * 255 is the biggest imm can be used in 'sub r0, r0, #<imm>'.
1310dc016dbSWang Nan * Number larger than 255 needs special encoding.
1320dc016dbSWang Nan */
1330dc016dbSWang Nan if (kp->ainsn.stack_space > 255 - sizeof(struct pt_regs))
1340dc016dbSWang Nan return 0;
1350dc016dbSWang Nan return 1;
1360dc016dbSWang Nan }
1370dc016dbSWang Nan
1380dc016dbSWang Nan /* Free optimized instruction slot */
1390dc016dbSWang Nan static void
__arch_remove_optimized_kprobe(struct optimized_kprobe * op,int dirty)1400dc016dbSWang Nan __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
1410dc016dbSWang Nan {
1420dc016dbSWang Nan if (op->optinsn.insn) {
1430dc016dbSWang Nan free_optinsn_slot(op->optinsn.insn, dirty);
1440dc016dbSWang Nan op->optinsn.insn = NULL;
1450dc016dbSWang Nan }
1460dc016dbSWang Nan }
1470dc016dbSWang Nan
1480dc016dbSWang Nan static void
optimized_callback(struct optimized_kprobe * op,struct pt_regs * regs)1490dc016dbSWang Nan optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
1500dc016dbSWang Nan {
1510dc016dbSWang Nan unsigned long flags;
1520dc016dbSWang Nan struct kprobe *p = &op->kp;
15369af7e23SMasami Hiramatsu struct kprobe_ctlblk *kcb;
1540dc016dbSWang Nan
1550dc016dbSWang Nan /* Save skipped registers */
1560dc016dbSWang Nan regs->ARM_pc = (unsigned long)op->kp.addr;
1570dc016dbSWang Nan regs->ARM_ORIG_r0 = ~0UL;
1580dc016dbSWang Nan
1590dc016dbSWang Nan local_irq_save(flags);
16069af7e23SMasami Hiramatsu kcb = get_kprobe_ctlblk();
1610dc016dbSWang Nan
1620dc016dbSWang Nan if (kprobe_running()) {
1630dc016dbSWang Nan kprobes_inc_nmissed_count(&op->kp);
1640dc016dbSWang Nan } else {
1650dc016dbSWang Nan __this_cpu_write(current_kprobe, &op->kp);
1660dc016dbSWang Nan kcb->kprobe_status = KPROBE_HIT_ACTIVE;
1670dc016dbSWang Nan opt_pre_handler(&op->kp, regs);
1680dc016dbSWang Nan __this_cpu_write(current_kprobe, NULL);
1690dc016dbSWang Nan }
1700dc016dbSWang Nan
171bfc9657dSWang Nan /*
172bfc9657dSWang Nan * We singlestep the replaced instruction only when it can't be
173bfc9657dSWang Nan * executed directly during restore.
174bfc9657dSWang Nan */
175bfc9657dSWang Nan if (!p->ainsn.kprobe_direct_exec)
1760dc016dbSWang Nan op->kp.ainsn.insn_singlestep(p->opcode, &p->ainsn, regs);
1770dc016dbSWang Nan
1780dc016dbSWang Nan local_irq_restore(flags);
1790dc016dbSWang Nan }
NOKPROBE_SYMBOL(optimized_callback)18070948c05SMasami Hiramatsu NOKPROBE_SYMBOL(optimized_callback)
1810dc016dbSWang Nan
1820dc016dbSWang Nan int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *orig)
1830dc016dbSWang Nan {
1840dc016dbSWang Nan kprobe_opcode_t *code;
1850dc016dbSWang Nan unsigned long rel_chk;
1860dc016dbSWang Nan unsigned long val;
1870dc016dbSWang Nan unsigned long stack_protect = sizeof(struct pt_regs);
1880dc016dbSWang Nan
1890dc016dbSWang Nan if (!can_optimize(orig))
1900dc016dbSWang Nan return -EILSEQ;
1910dc016dbSWang Nan
1920dc016dbSWang Nan code = get_optinsn_slot();
1930dc016dbSWang Nan if (!code)
1940dc016dbSWang Nan return -ENOMEM;
1950dc016dbSWang Nan
1960dc016dbSWang Nan /*
1970dc016dbSWang Nan * Verify if the address gap is in 32MiB range, because this uses
1980dc016dbSWang Nan * a relative jump.
1990dc016dbSWang Nan *
2000dc016dbSWang Nan * kprobe opt use a 'b' instruction to branch to optinsn.insn.
2010dc016dbSWang Nan * According to ARM manual, branch instruction is:
2020dc016dbSWang Nan *
2030dc016dbSWang Nan * 31 28 27 24 23 0
2040dc016dbSWang Nan * +------+---+---+---+---+----------------+
2050dc016dbSWang Nan * | cond | 1 | 0 | 1 | 0 | imm24 |
2060dc016dbSWang Nan * +------+---+---+---+---+----------------+
2070dc016dbSWang Nan *
2080dc016dbSWang Nan * imm24 is a signed 24 bits integer. The real branch offset is computed
2090dc016dbSWang Nan * by: imm32 = SignExtend(imm24:'00', 32);
2100dc016dbSWang Nan *
2110dc016dbSWang Nan * So the maximum forward branch should be:
2120dc016dbSWang Nan * (0x007fffff << 2) = 0x01fffffc = 0x1fffffc
2130dc016dbSWang Nan * The maximum backword branch should be:
2140dc016dbSWang Nan * (0xff800000 << 2) = 0xfe000000 = -0x2000000
2150dc016dbSWang Nan *
2160dc016dbSWang Nan * We can simply check (rel & 0xfe000003):
2170dc016dbSWang Nan * if rel is positive, (rel & 0xfe000000) shoule be 0
2180dc016dbSWang Nan * if rel is negitive, (rel & 0xfe000000) should be 0xfe000000
2190dc016dbSWang Nan * the last '3' is used for alignment checking.
2200dc016dbSWang Nan */
2210dc016dbSWang Nan rel_chk = (unsigned long)((long)code -
2220dc016dbSWang Nan (long)orig->addr + 8) & 0xfe000003;
2230dc016dbSWang Nan
2240dc016dbSWang Nan if ((rel_chk != 0) && (rel_chk != 0xfe000000)) {
2250dc016dbSWang Nan /*
2260dc016dbSWang Nan * Different from x86, we free code buf directly instead of
2270dc016dbSWang Nan * calling __arch_remove_optimized_kprobe() because
2280dc016dbSWang Nan * we have not fill any field in op.
2290dc016dbSWang Nan */
2300dc016dbSWang Nan free_optinsn_slot(code, 0);
2310dc016dbSWang Nan return -ERANGE;
2320dc016dbSWang Nan }
2330dc016dbSWang Nan
2340dc016dbSWang Nan /* Copy arch-dep-instance from template. */
2359fa2e7afSAndrew Jeffery memcpy(code, (unsigned long *)optprobe_template_entry,
2360dc016dbSWang Nan TMPL_END_IDX * sizeof(kprobe_opcode_t));
2370dc016dbSWang Nan
2380dc016dbSWang Nan /* Adjust buffer according to instruction. */
2390dc016dbSWang Nan BUG_ON(orig->ainsn.stack_space < 0);
2400dc016dbSWang Nan
2410dc016dbSWang Nan stack_protect += orig->ainsn.stack_space;
2420dc016dbSWang Nan
2430dc016dbSWang Nan /* Should have been filtered by can_optimize(). */
2440dc016dbSWang Nan BUG_ON(stack_protect > 255);
2450dc016dbSWang Nan
2460dc016dbSWang Nan /* Create a 'sub sp, sp, #<stack_protect>' */
2470dc016dbSWang Nan code[TMPL_SUB_SP] = __opcode_to_mem_arm(0xe24dd000 | stack_protect);
2480dc016dbSWang Nan /* Create a 'add r3, sp, #<stack_protect>' */
2490dc016dbSWang Nan code[TMPL_ADD_SP] = __opcode_to_mem_arm(0xe28d3000 | stack_protect);
2500dc016dbSWang Nan
2510dc016dbSWang Nan /* Set probe information */
2520dc016dbSWang Nan val = (unsigned long)op;
2530dc016dbSWang Nan code[TMPL_VAL_IDX] = val;
2540dc016dbSWang Nan
2550dc016dbSWang Nan /* Set probe function call */
2560dc016dbSWang Nan val = (unsigned long)optimized_callback;
2570dc016dbSWang Nan code[TMPL_CALL_IDX] = val;
2580dc016dbSWang Nan
259bfc9657dSWang Nan /* If possible, copy insn and have it executed during restore */
260bfc9657dSWang Nan orig->ainsn.kprobe_direct_exec = false;
261bfc9657dSWang Nan if (can_kprobe_direct_exec(orig->ainsn.register_usage_flags)) {
262bfc9657dSWang Nan kprobe_opcode_t final_branch = arm_gen_branch(
263bfc9657dSWang Nan (unsigned long)(&code[TMPL_RESTORE_END]),
264bfc9657dSWang Nan (unsigned long)(op->kp.addr) + 4);
265bfc9657dSWang Nan if (final_branch != 0) {
266bfc9657dSWang Nan /*
267bfc9657dSWang Nan * Replace original 'ldmia sp, {r0 - r15}' with
268bfc9657dSWang Nan * 'ldmia {r0 - r14}', restore all registers except pc.
269bfc9657dSWang Nan */
270bfc9657dSWang Nan code[TMPL_RESTORE_BEGIN] = __opcode_to_mem_arm(0xe89d7fff);
271bfc9657dSWang Nan
272bfc9657dSWang Nan /* The original probed instruction */
273bfc9657dSWang Nan code[TMPL_RESTORE_ORIGN_INSN] = __opcode_to_mem_arm(orig->opcode);
274bfc9657dSWang Nan
275bfc9657dSWang Nan /* Jump back to next instruction */
276bfc9657dSWang Nan code[TMPL_RESTORE_END] = __opcode_to_mem_arm(final_branch);
277bfc9657dSWang Nan orig->ainsn.kprobe_direct_exec = true;
278bfc9657dSWang Nan }
279bfc9657dSWang Nan }
280bfc9657dSWang Nan
2810dc016dbSWang Nan flush_icache_range((unsigned long)code,
2820dc016dbSWang Nan (unsigned long)(&code[TMPL_END_IDX]));
2830dc016dbSWang Nan
2840dc016dbSWang Nan /* Set op->optinsn.insn means prepared. */
2850dc016dbSWang Nan op->optinsn.insn = code;
2860dc016dbSWang Nan return 0;
2870dc016dbSWang Nan }
2880dc016dbSWang Nan
arch_optimize_kprobes(struct list_head * oplist)2890dc016dbSWang Nan void __kprobes arch_optimize_kprobes(struct list_head *oplist)
2900dc016dbSWang Nan {
2910dc016dbSWang Nan struct optimized_kprobe *op, *tmp;
2920dc016dbSWang Nan
2930dc016dbSWang Nan list_for_each_entry_safe(op, tmp, oplist, list) {
2940dc016dbSWang Nan unsigned long insn;
2950dc016dbSWang Nan WARN_ON(kprobe_disabled(&op->kp));
2960dc016dbSWang Nan
2970dc016dbSWang Nan /*
2980dc016dbSWang Nan * Backup instructions which will be replaced
2990dc016dbSWang Nan * by jump address
3000dc016dbSWang Nan */
3010dc016dbSWang Nan memcpy(op->optinsn.copied_insn, op->kp.addr,
3020dc016dbSWang Nan RELATIVEJUMP_SIZE);
3030dc016dbSWang Nan
3040dc016dbSWang Nan insn = arm_gen_branch((unsigned long)op->kp.addr,
3050dc016dbSWang Nan (unsigned long)op->optinsn.insn);
3060dc016dbSWang Nan BUG_ON(insn == 0);
3070dc016dbSWang Nan
3080dc016dbSWang Nan /*
3090dc016dbSWang Nan * Make it a conditional branch if replaced insn
3100dc016dbSWang Nan * is consitional
3110dc016dbSWang Nan */
3120dc016dbSWang Nan insn = (__mem_to_opcode_arm(
3130dc016dbSWang Nan op->optinsn.copied_insn[0]) & 0xf0000000) |
3140dc016dbSWang Nan (insn & 0x0fffffff);
3150dc016dbSWang Nan
3160dc016dbSWang Nan /*
3170dc016dbSWang Nan * Similar to __arch_disarm_kprobe, operations which
3180dc016dbSWang Nan * removing breakpoints must be wrapped by stop_machine
3190dc016dbSWang Nan * to avoid racing.
3200dc016dbSWang Nan */
3210dc016dbSWang Nan kprobes_remove_breakpoint(op->kp.addr, insn);
3220dc016dbSWang Nan
3230dc016dbSWang Nan list_del_init(&op->list);
3240dc016dbSWang Nan }
3250dc016dbSWang Nan }
3260dc016dbSWang Nan
arch_unoptimize_kprobe(struct optimized_kprobe * op)3270dc016dbSWang Nan void arch_unoptimize_kprobe(struct optimized_kprobe *op)
3280dc016dbSWang Nan {
3290dc016dbSWang Nan arch_arm_kprobe(&op->kp);
3300dc016dbSWang Nan }
3310dc016dbSWang Nan
3320dc016dbSWang Nan /*
3330dc016dbSWang Nan * Recover original instructions and breakpoints from relative jumps.
3340dc016dbSWang Nan * Caller must call with locking kprobe_mutex.
3350dc016dbSWang Nan */
arch_unoptimize_kprobes(struct list_head * oplist,struct list_head * done_list)3360dc016dbSWang Nan void arch_unoptimize_kprobes(struct list_head *oplist,
3370dc016dbSWang Nan struct list_head *done_list)
3380dc016dbSWang Nan {
3390dc016dbSWang Nan struct optimized_kprobe *op, *tmp;
3400dc016dbSWang Nan
3410dc016dbSWang Nan list_for_each_entry_safe(op, tmp, oplist, list) {
3420dc016dbSWang Nan arch_unoptimize_kprobe(op);
3430dc016dbSWang Nan list_move(&op->list, done_list);
3440dc016dbSWang Nan }
3450dc016dbSWang Nan }
3460dc016dbSWang Nan
arch_within_optimized_kprobe(struct optimized_kprobe * op,kprobe_opcode_t * addr)3470dc016dbSWang Nan int arch_within_optimized_kprobe(struct optimized_kprobe *op,
348*c42421e2SMasami Hiramatsu kprobe_opcode_t *addr)
3490dc016dbSWang Nan {
350*c42421e2SMasami Hiramatsu return (op->kp.addr <= addr &&
351*c42421e2SMasami Hiramatsu op->kp.addr + (RELATIVEJUMP_SIZE / sizeof(kprobe_opcode_t)) > addr);
352*c42421e2SMasami Hiramatsu
3530dc016dbSWang Nan }
3540dc016dbSWang Nan
arch_remove_optimized_kprobe(struct optimized_kprobe * op)3550dc016dbSWang Nan void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
3560dc016dbSWang Nan {
3570dc016dbSWang Nan __arch_remove_optimized_kprobe(op, 1);
3580dc016dbSWang Nan }
359