1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Kernel Probes (KProbes) 4 * 5 * Copyright IBM Corp. 2002, 2006 6 * 7 * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com> 8 */ 9 10 #include <linux/moduleloader.h> 11 #include <linux/kprobes.h> 12 #include <linux/ptrace.h> 13 #include <linux/preempt.h> 14 #include <linux/stop_machine.h> 15 #include <linux/kdebug.h> 16 #include <linux/uaccess.h> 17 #include <linux/extable.h> 18 #include <linux/module.h> 19 #include <linux/slab.h> 20 #include <linux/hardirq.h> 21 #include <linux/ftrace.h> 22 #include <asm/set_memory.h> 23 #include <asm/sections.h> 24 #include <asm/dis.h> 25 #include "entry.h" 26 27 DEFINE_PER_CPU(struct kprobe *, current_kprobe); 28 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 29 30 struct kretprobe_blackpoint kretprobe_blacklist[] = { }; 31 32 DEFINE_INSN_CACHE_OPS(s390_insn); 33 34 static int insn_page_in_use; 35 36 void *alloc_insn_page(void) 37 { 38 void *page; 39 40 page = module_alloc(PAGE_SIZE); 41 if (!page) 42 return NULL; 43 __set_memory((unsigned long) page, 1, SET_MEMORY_RO | SET_MEMORY_X); 44 return page; 45 } 46 47 static void *alloc_s390_insn_page(void) 48 { 49 if (xchg(&insn_page_in_use, 1) == 1) 50 return NULL; 51 return &kprobes_insn_page; 52 } 53 54 static void free_s390_insn_page(void *page) 55 { 56 xchg(&insn_page_in_use, 0); 57 } 58 59 struct kprobe_insn_cache kprobe_s390_insn_slots = { 60 .mutex = __MUTEX_INITIALIZER(kprobe_s390_insn_slots.mutex), 61 .alloc = alloc_s390_insn_page, 62 .free = free_s390_insn_page, 63 .pages = LIST_HEAD_INIT(kprobe_s390_insn_slots.pages), 64 .insn_size = MAX_INSN_SIZE, 65 }; 66 67 static void copy_instruction(struct kprobe *p) 68 { 69 kprobe_opcode_t insn[MAX_INSN_SIZE]; 70 s64 disp, new_disp; 71 u64 addr, new_addr; 72 unsigned int len; 73 74 len = insn_length(*p->addr >> 8); 75 memcpy(&insn, p->addr, len); 76 p->opcode = insn[0]; 77 if (probe_is_insn_relative_long(&insn[0])) { 78 /* 79 * For pc-relative instructions in RIL-b or RIL-c format patch 80 * the RI2 displacement field. We have already made sure that 81 * the insn slot for the patched instruction is within the same 82 * 2GB area as the original instruction (either kernel image or 83 * module area). Therefore the new displacement will always fit. 84 */ 85 disp = *(s32 *)&insn[1]; 86 addr = (u64)(unsigned long)p->addr; 87 new_addr = (u64)(unsigned long)p->ainsn.insn; 88 new_disp = ((addr + (disp * 2)) - new_addr) / 2; 89 *(s32 *)&insn[1] = new_disp; 90 } 91 s390_kernel_write(p->ainsn.insn, &insn, len); 92 } 93 NOKPROBE_SYMBOL(copy_instruction); 94 95 static int s390_get_insn_slot(struct kprobe *p) 96 { 97 /* 98 * Get an insn slot that is within the same 2GB area like the original 99 * instruction. That way instructions with a 32bit signed displacement 100 * field can be patched and executed within the insn slot. 101 */ 102 p->ainsn.insn = NULL; 103 if (is_kernel((unsigned long)p->addr)) 104 p->ainsn.insn = get_s390_insn_slot(); 105 else if (is_module_addr(p->addr)) 106 p->ainsn.insn = get_insn_slot(); 107 return p->ainsn.insn ? 0 : -ENOMEM; 108 } 109 NOKPROBE_SYMBOL(s390_get_insn_slot); 110 111 static void s390_free_insn_slot(struct kprobe *p) 112 { 113 if (!p->ainsn.insn) 114 return; 115 if (is_kernel((unsigned long)p->addr)) 116 free_s390_insn_slot(p->ainsn.insn, 0); 117 else 118 free_insn_slot(p->ainsn.insn, 0); 119 p->ainsn.insn = NULL; 120 } 121 NOKPROBE_SYMBOL(s390_free_insn_slot); 122 123 int arch_prepare_kprobe(struct kprobe *p) 124 { 125 if ((unsigned long) p->addr & 0x01) 126 return -EINVAL; 127 /* Make sure the probe isn't going on a difficult instruction */ 128 if (probe_is_prohibited_opcode(p->addr)) 129 return -EINVAL; 130 if (s390_get_insn_slot(p)) 131 return -ENOMEM; 132 copy_instruction(p); 133 return 0; 134 } 135 NOKPROBE_SYMBOL(arch_prepare_kprobe); 136 137 struct swap_insn_args { 138 struct kprobe *p; 139 unsigned int arm_kprobe : 1; 140 }; 141 142 static int swap_instruction(void *data) 143 { 144 struct swap_insn_args *args = data; 145 struct kprobe *p = args->p; 146 u16 opc; 147 148 opc = args->arm_kprobe ? BREAKPOINT_INSTRUCTION : p->opcode; 149 s390_kernel_write(p->addr, &opc, sizeof(opc)); 150 return 0; 151 } 152 NOKPROBE_SYMBOL(swap_instruction); 153 154 void arch_arm_kprobe(struct kprobe *p) 155 { 156 struct swap_insn_args args = {.p = p, .arm_kprobe = 1}; 157 158 stop_machine_cpuslocked(swap_instruction, &args, NULL); 159 } 160 NOKPROBE_SYMBOL(arch_arm_kprobe); 161 162 void arch_disarm_kprobe(struct kprobe *p) 163 { 164 struct swap_insn_args args = {.p = p, .arm_kprobe = 0}; 165 166 stop_machine_cpuslocked(swap_instruction, &args, NULL); 167 } 168 NOKPROBE_SYMBOL(arch_disarm_kprobe); 169 170 void arch_remove_kprobe(struct kprobe *p) 171 { 172 s390_free_insn_slot(p); 173 } 174 NOKPROBE_SYMBOL(arch_remove_kprobe); 175 176 static void enable_singlestep(struct kprobe_ctlblk *kcb, 177 struct pt_regs *regs, 178 unsigned long ip) 179 { 180 struct per_regs per_kprobe; 181 182 /* Set up the PER control registers %cr9-%cr11 */ 183 per_kprobe.control = PER_EVENT_IFETCH; 184 per_kprobe.start = ip; 185 per_kprobe.end = ip; 186 187 /* Save control regs and psw mask */ 188 __ctl_store(kcb->kprobe_saved_ctl, 9, 11); 189 kcb->kprobe_saved_imask = regs->psw.mask & 190 (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT); 191 192 /* Set PER control regs, turns on single step for the given address */ 193 __ctl_load(per_kprobe, 9, 11); 194 regs->psw.mask |= PSW_MASK_PER; 195 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); 196 regs->psw.addr = ip; 197 } 198 NOKPROBE_SYMBOL(enable_singlestep); 199 200 static void disable_singlestep(struct kprobe_ctlblk *kcb, 201 struct pt_regs *regs, 202 unsigned long ip) 203 { 204 /* Restore control regs and psw mask, set new psw address */ 205 __ctl_load(kcb->kprobe_saved_ctl, 9, 11); 206 regs->psw.mask &= ~PSW_MASK_PER; 207 regs->psw.mask |= kcb->kprobe_saved_imask; 208 regs->psw.addr = ip; 209 } 210 NOKPROBE_SYMBOL(disable_singlestep); 211 212 /* 213 * Activate a kprobe by storing its pointer to current_kprobe. The 214 * previous kprobe is stored in kcb->prev_kprobe. A stack of up to 215 * two kprobes can be active, see KPROBE_REENTER. 216 */ 217 static void push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p) 218 { 219 kcb->prev_kprobe.kp = __this_cpu_read(current_kprobe); 220 kcb->prev_kprobe.status = kcb->kprobe_status; 221 __this_cpu_write(current_kprobe, p); 222 } 223 NOKPROBE_SYMBOL(push_kprobe); 224 225 /* 226 * Deactivate a kprobe by backing up to the previous state. If the 227 * current state is KPROBE_REENTER prev_kprobe.kp will be non-NULL, 228 * for any other state prev_kprobe.kp will be NULL. 229 */ 230 static void pop_kprobe(struct kprobe_ctlblk *kcb) 231 { 232 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); 233 kcb->kprobe_status = kcb->prev_kprobe.status; 234 } 235 NOKPROBE_SYMBOL(pop_kprobe); 236 237 void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) 238 { 239 ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14]; 240 ri->fp = NULL; 241 242 /* Replace the return addr with trampoline addr */ 243 regs->gprs[14] = (unsigned long) &kretprobe_trampoline; 244 } 245 NOKPROBE_SYMBOL(arch_prepare_kretprobe); 246 247 static void kprobe_reenter_check(struct kprobe_ctlblk *kcb, struct kprobe *p) 248 { 249 switch (kcb->kprobe_status) { 250 case KPROBE_HIT_SSDONE: 251 case KPROBE_HIT_ACTIVE: 252 kprobes_inc_nmissed_count(p); 253 break; 254 case KPROBE_HIT_SS: 255 case KPROBE_REENTER: 256 default: 257 /* 258 * A kprobe on the code path to single step an instruction 259 * is a BUG. The code path resides in the .kprobes.text 260 * section and is executed with interrupts disabled. 261 */ 262 pr_err("Invalid kprobe detected.\n"); 263 dump_kprobe(p); 264 BUG(); 265 } 266 } 267 NOKPROBE_SYMBOL(kprobe_reenter_check); 268 269 static int kprobe_handler(struct pt_regs *regs) 270 { 271 struct kprobe_ctlblk *kcb; 272 struct kprobe *p; 273 274 /* 275 * We want to disable preemption for the entire duration of kprobe 276 * processing. That includes the calls to the pre/post handlers 277 * and single stepping the kprobe instruction. 278 */ 279 preempt_disable(); 280 kcb = get_kprobe_ctlblk(); 281 p = get_kprobe((void *)(regs->psw.addr - 2)); 282 283 if (p) { 284 if (kprobe_running()) { 285 /* 286 * We have hit a kprobe while another is still 287 * active. This can happen in the pre and post 288 * handler. Single step the instruction of the 289 * new probe but do not call any handler function 290 * of this secondary kprobe. 291 * push_kprobe and pop_kprobe saves and restores 292 * the currently active kprobe. 293 */ 294 kprobe_reenter_check(kcb, p); 295 push_kprobe(kcb, p); 296 kcb->kprobe_status = KPROBE_REENTER; 297 } else { 298 /* 299 * If we have no pre-handler or it returned 0, we 300 * continue with single stepping. If we have a 301 * pre-handler and it returned non-zero, it prepped 302 * for changing execution path, so get out doing 303 * nothing more here. 304 */ 305 push_kprobe(kcb, p); 306 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 307 if (p->pre_handler && p->pre_handler(p, regs)) { 308 pop_kprobe(kcb); 309 preempt_enable_no_resched(); 310 return 1; 311 } 312 kcb->kprobe_status = KPROBE_HIT_SS; 313 } 314 enable_singlestep(kcb, regs, (unsigned long) p->ainsn.insn); 315 return 1; 316 } /* else: 317 * No kprobe at this address and no active kprobe. The trap has 318 * not been caused by a kprobe breakpoint. The race of breakpoint 319 * vs. kprobe remove does not exist because on s390 as we use 320 * stop_machine to arm/disarm the breakpoints. 321 */ 322 preempt_enable_no_resched(); 323 return 0; 324 } 325 NOKPROBE_SYMBOL(kprobe_handler); 326 327 /* 328 * Function return probe trampoline: 329 * - init_kprobes() establishes a probepoint here 330 * - When the probed function returns, this probe 331 * causes the handlers to fire 332 */ 333 static void __used kretprobe_trampoline_holder(void) 334 { 335 asm volatile(".global kretprobe_trampoline\n" 336 "kretprobe_trampoline: bcr 0,0\n"); 337 } 338 339 /* 340 * Called when the probe at kretprobe trampoline is hit 341 */ 342 static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) 343 { 344 regs->psw.addr = __kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL); 345 /* 346 * By returning a non-zero value, we are telling 347 * kprobe_handler() that we don't want the post_handler 348 * to run (and have re-enabled preemption) 349 */ 350 return 1; 351 } 352 NOKPROBE_SYMBOL(trampoline_probe_handler); 353 354 /* 355 * Called after single-stepping. p->addr is the address of the 356 * instruction whose first byte has been replaced by the "breakpoint" 357 * instruction. To avoid the SMP problems that can occur when we 358 * temporarily put back the original opcode to single-step, we 359 * single-stepped a copy of the instruction. The address of this 360 * copy is p->ainsn.insn. 361 */ 362 static void resume_execution(struct kprobe *p, struct pt_regs *regs) 363 { 364 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 365 unsigned long ip = regs->psw.addr; 366 int fixup = probe_get_fixup_type(p->ainsn.insn); 367 368 if (fixup & FIXUP_PSW_NORMAL) 369 ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn; 370 371 if (fixup & FIXUP_BRANCH_NOT_TAKEN) { 372 int ilen = insn_length(p->ainsn.insn[0] >> 8); 373 if (ip - (unsigned long) p->ainsn.insn == ilen) 374 ip = (unsigned long) p->addr + ilen; 375 } 376 377 if (fixup & FIXUP_RETURN_REGISTER) { 378 int reg = (p->ainsn.insn[0] & 0xf0) >> 4; 379 regs->gprs[reg] += (unsigned long) p->addr - 380 (unsigned long) p->ainsn.insn; 381 } 382 383 disable_singlestep(kcb, regs, ip); 384 } 385 NOKPROBE_SYMBOL(resume_execution); 386 387 static int post_kprobe_handler(struct pt_regs *regs) 388 { 389 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 390 struct kprobe *p = kprobe_running(); 391 392 if (!p) 393 return 0; 394 395 if (kcb->kprobe_status != KPROBE_REENTER && p->post_handler) { 396 kcb->kprobe_status = KPROBE_HIT_SSDONE; 397 p->post_handler(p, regs, 0); 398 } 399 400 resume_execution(p, regs); 401 pop_kprobe(kcb); 402 preempt_enable_no_resched(); 403 404 /* 405 * if somebody else is singlestepping across a probe point, psw mask 406 * will have PER set, in which case, continue the remaining processing 407 * of do_single_step, as if this is not a probe hit. 408 */ 409 if (regs->psw.mask & PSW_MASK_PER) 410 return 0; 411 412 return 1; 413 } 414 NOKPROBE_SYMBOL(post_kprobe_handler); 415 416 static int kprobe_trap_handler(struct pt_regs *regs, int trapnr) 417 { 418 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 419 struct kprobe *p = kprobe_running(); 420 const struct exception_table_entry *entry; 421 422 switch(kcb->kprobe_status) { 423 case KPROBE_HIT_SS: 424 case KPROBE_REENTER: 425 /* 426 * We are here because the instruction being single 427 * stepped caused a page fault. We reset the current 428 * kprobe and the nip points back to the probe address 429 * and allow the page fault handler to continue as a 430 * normal page fault. 431 */ 432 disable_singlestep(kcb, regs, (unsigned long) p->addr); 433 pop_kprobe(kcb); 434 preempt_enable_no_resched(); 435 break; 436 case KPROBE_HIT_ACTIVE: 437 case KPROBE_HIT_SSDONE: 438 /* 439 * In case the user-specified fault handler returned 440 * zero, try to fix up. 441 */ 442 entry = s390_search_extables(regs->psw.addr); 443 if (entry && ex_handle(entry, regs)) 444 return 1; 445 446 /* 447 * fixup_exception() could not handle it, 448 * Let do_page_fault() fix it. 449 */ 450 break; 451 default: 452 break; 453 } 454 return 0; 455 } 456 NOKPROBE_SYMBOL(kprobe_trap_handler); 457 458 int kprobe_fault_handler(struct pt_regs *regs, int trapnr) 459 { 460 int ret; 461 462 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) 463 local_irq_disable(); 464 ret = kprobe_trap_handler(regs, trapnr); 465 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) 466 local_irq_restore(regs->psw.mask & ~PSW_MASK_PER); 467 return ret; 468 } 469 NOKPROBE_SYMBOL(kprobe_fault_handler); 470 471 /* 472 * Wrapper routine to for handling exceptions. 473 */ 474 int kprobe_exceptions_notify(struct notifier_block *self, 475 unsigned long val, void *data) 476 { 477 struct die_args *args = (struct die_args *) data; 478 struct pt_regs *regs = args->regs; 479 int ret = NOTIFY_DONE; 480 481 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) 482 local_irq_disable(); 483 484 switch (val) { 485 case DIE_BPT: 486 if (kprobe_handler(regs)) 487 ret = NOTIFY_STOP; 488 break; 489 case DIE_SSTEP: 490 if (post_kprobe_handler(regs)) 491 ret = NOTIFY_STOP; 492 break; 493 case DIE_TRAP: 494 if (!preemptible() && kprobe_running() && 495 kprobe_trap_handler(regs, args->trapnr)) 496 ret = NOTIFY_STOP; 497 break; 498 default: 499 break; 500 } 501 502 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) 503 local_irq_restore(regs->psw.mask & ~PSW_MASK_PER); 504 505 return ret; 506 } 507 NOKPROBE_SYMBOL(kprobe_exceptions_notify); 508 509 static struct kprobe trampoline = { 510 .addr = (kprobe_opcode_t *) &kretprobe_trampoline, 511 .pre_handler = trampoline_probe_handler 512 }; 513 514 int __init arch_init_kprobes(void) 515 { 516 return register_kprobe(&trampoline); 517 } 518 519 int arch_trampoline_kprobe(struct kprobe *p) 520 { 521 return p->addr == (kprobe_opcode_t *) &kretprobe_trampoline; 522 } 523 NOKPROBE_SYMBOL(arch_trampoline_kprobe); 524