1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Kernel Probes (KProbes) 4 * 5 * Copyright IBM Corp. 2002, 2006 6 * 7 * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com> 8 */ 9 10 #include <linux/moduleloader.h> 11 #include <linux/kprobes.h> 12 #include <linux/ptrace.h> 13 #include <linux/preempt.h> 14 #include <linux/stop_machine.h> 15 #include <linux/kdebug.h> 16 #include <linux/uaccess.h> 17 #include <linux/extable.h> 18 #include <linux/module.h> 19 #include <linux/slab.h> 20 #include <linux/hardirq.h> 21 #include <linux/ftrace.h> 22 #include <asm/set_memory.h> 23 #include <asm/sections.h> 24 #include <asm/dis.h> 25 #include "entry.h" 26 27 DEFINE_PER_CPU(struct kprobe *, current_kprobe); 28 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 29 30 struct kretprobe_blackpoint kretprobe_blacklist[] = { }; 31 32 DEFINE_INSN_CACHE_OPS(s390_insn); 33 34 static int insn_page_in_use; 35 36 void *alloc_insn_page(void) 37 { 38 void *page; 39 40 page = module_alloc(PAGE_SIZE); 41 if (!page) 42 return NULL; 43 __set_memory((unsigned long) page, 1, SET_MEMORY_RO | SET_MEMORY_X); 44 return page; 45 } 46 47 void free_insn_page(void *page) 48 { 49 module_memfree(page); 50 } 51 52 static void *alloc_s390_insn_page(void) 53 { 54 if (xchg(&insn_page_in_use, 1) == 1) 55 return NULL; 56 return &kprobes_insn_page; 57 } 58 59 static void free_s390_insn_page(void *page) 60 { 61 xchg(&insn_page_in_use, 0); 62 } 63 64 struct kprobe_insn_cache kprobe_s390_insn_slots = { 65 .mutex = __MUTEX_INITIALIZER(kprobe_s390_insn_slots.mutex), 66 .alloc = alloc_s390_insn_page, 67 .free = free_s390_insn_page, 68 .pages = LIST_HEAD_INIT(kprobe_s390_insn_slots.pages), 69 .insn_size = MAX_INSN_SIZE, 70 }; 71 72 static void copy_instruction(struct kprobe *p) 73 { 74 kprobe_opcode_t insn[MAX_INSN_SIZE]; 75 s64 disp, new_disp; 76 u64 addr, new_addr; 77 unsigned int len; 78 79 len = insn_length(*p->addr >> 8); 80 memcpy(&insn, p->addr, len); 81 p->opcode = insn[0]; 82 if (probe_is_insn_relative_long(&insn[0])) { 83 /* 84 * For pc-relative instructions in RIL-b or RIL-c format patch 85 * the RI2 displacement field. We have already made sure that 86 * the insn slot for the patched instruction is within the same 87 * 2GB area as the original instruction (either kernel image or 88 * module area). Therefore the new displacement will always fit. 89 */ 90 disp = *(s32 *)&insn[1]; 91 addr = (u64)(unsigned long)p->addr; 92 new_addr = (u64)(unsigned long)p->ainsn.insn; 93 new_disp = ((addr + (disp * 2)) - new_addr) / 2; 94 *(s32 *)&insn[1] = new_disp; 95 } 96 s390_kernel_write(p->ainsn.insn, &insn, len); 97 } 98 NOKPROBE_SYMBOL(copy_instruction); 99 100 static inline int is_kernel_addr(void *addr) 101 { 102 return addr < (void *)_end; 103 } 104 105 static int s390_get_insn_slot(struct kprobe *p) 106 { 107 /* 108 * Get an insn slot that is within the same 2GB area like the original 109 * instruction. That way instructions with a 32bit signed displacement 110 * field can be patched and executed within the insn slot. 111 */ 112 p->ainsn.insn = NULL; 113 if (is_kernel_addr(p->addr)) 114 p->ainsn.insn = get_s390_insn_slot(); 115 else if (is_module_addr(p->addr)) 116 p->ainsn.insn = get_insn_slot(); 117 return p->ainsn.insn ? 0 : -ENOMEM; 118 } 119 NOKPROBE_SYMBOL(s390_get_insn_slot); 120 121 static void s390_free_insn_slot(struct kprobe *p) 122 { 123 if (!p->ainsn.insn) 124 return; 125 if (is_kernel_addr(p->addr)) 126 free_s390_insn_slot(p->ainsn.insn, 0); 127 else 128 free_insn_slot(p->ainsn.insn, 0); 129 p->ainsn.insn = NULL; 130 } 131 NOKPROBE_SYMBOL(s390_free_insn_slot); 132 133 int arch_prepare_kprobe(struct kprobe *p) 134 { 135 if ((unsigned long) p->addr & 0x01) 136 return -EINVAL; 137 /* Make sure the probe isn't going on a difficult instruction */ 138 if (probe_is_prohibited_opcode(p->addr)) 139 return -EINVAL; 140 if (s390_get_insn_slot(p)) 141 return -ENOMEM; 142 copy_instruction(p); 143 return 0; 144 } 145 NOKPROBE_SYMBOL(arch_prepare_kprobe); 146 147 struct swap_insn_args { 148 struct kprobe *p; 149 unsigned int arm_kprobe : 1; 150 }; 151 152 static int swap_instruction(void *data) 153 { 154 struct swap_insn_args *args = data; 155 struct kprobe *p = args->p; 156 u16 opc; 157 158 opc = args->arm_kprobe ? BREAKPOINT_INSTRUCTION : p->opcode; 159 s390_kernel_write(p->addr, &opc, sizeof(opc)); 160 return 0; 161 } 162 NOKPROBE_SYMBOL(swap_instruction); 163 164 void arch_arm_kprobe(struct kprobe *p) 165 { 166 struct swap_insn_args args = {.p = p, .arm_kprobe = 1}; 167 168 stop_machine_cpuslocked(swap_instruction, &args, NULL); 169 } 170 NOKPROBE_SYMBOL(arch_arm_kprobe); 171 172 void arch_disarm_kprobe(struct kprobe *p) 173 { 174 struct swap_insn_args args = {.p = p, .arm_kprobe = 0}; 175 176 stop_machine_cpuslocked(swap_instruction, &args, NULL); 177 } 178 NOKPROBE_SYMBOL(arch_disarm_kprobe); 179 180 void arch_remove_kprobe(struct kprobe *p) 181 { 182 s390_free_insn_slot(p); 183 } 184 NOKPROBE_SYMBOL(arch_remove_kprobe); 185 186 static void enable_singlestep(struct kprobe_ctlblk *kcb, 187 struct pt_regs *regs, 188 unsigned long ip) 189 { 190 struct per_regs per_kprobe; 191 192 /* Set up the PER control registers %cr9-%cr11 */ 193 per_kprobe.control = PER_EVENT_IFETCH; 194 per_kprobe.start = ip; 195 per_kprobe.end = ip; 196 197 /* Save control regs and psw mask */ 198 __ctl_store(kcb->kprobe_saved_ctl, 9, 11); 199 kcb->kprobe_saved_imask = regs->psw.mask & 200 (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT); 201 202 /* Set PER control regs, turns on single step for the given address */ 203 __ctl_load(per_kprobe, 9, 11); 204 regs->psw.mask |= PSW_MASK_PER; 205 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); 206 regs->psw.addr = ip; 207 } 208 NOKPROBE_SYMBOL(enable_singlestep); 209 210 static void disable_singlestep(struct kprobe_ctlblk *kcb, 211 struct pt_regs *regs, 212 unsigned long ip) 213 { 214 /* Restore control regs and psw mask, set new psw address */ 215 __ctl_load(kcb->kprobe_saved_ctl, 9, 11); 216 regs->psw.mask &= ~PSW_MASK_PER; 217 regs->psw.mask |= kcb->kprobe_saved_imask; 218 regs->psw.addr = ip; 219 } 220 NOKPROBE_SYMBOL(disable_singlestep); 221 222 /* 223 * Activate a kprobe by storing its pointer to current_kprobe. The 224 * previous kprobe is stored in kcb->prev_kprobe. A stack of up to 225 * two kprobes can be active, see KPROBE_REENTER. 226 */ 227 static void push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p) 228 { 229 kcb->prev_kprobe.kp = __this_cpu_read(current_kprobe); 230 kcb->prev_kprobe.status = kcb->kprobe_status; 231 __this_cpu_write(current_kprobe, p); 232 } 233 NOKPROBE_SYMBOL(push_kprobe); 234 235 /* 236 * Deactivate a kprobe by backing up to the previous state. If the 237 * current state is KPROBE_REENTER prev_kprobe.kp will be non-NULL, 238 * for any other state prev_kprobe.kp will be NULL. 239 */ 240 static void pop_kprobe(struct kprobe_ctlblk *kcb) 241 { 242 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); 243 kcb->kprobe_status = kcb->prev_kprobe.status; 244 } 245 NOKPROBE_SYMBOL(pop_kprobe); 246 247 void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) 248 { 249 ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14]; 250 ri->fp = NULL; 251 252 /* Replace the return addr with trampoline addr */ 253 regs->gprs[14] = (unsigned long) &kretprobe_trampoline; 254 } 255 NOKPROBE_SYMBOL(arch_prepare_kretprobe); 256 257 static void kprobe_reenter_check(struct kprobe_ctlblk *kcb, struct kprobe *p) 258 { 259 switch (kcb->kprobe_status) { 260 case KPROBE_HIT_SSDONE: 261 case KPROBE_HIT_ACTIVE: 262 kprobes_inc_nmissed_count(p); 263 break; 264 case KPROBE_HIT_SS: 265 case KPROBE_REENTER: 266 default: 267 /* 268 * A kprobe on the code path to single step an instruction 269 * is a BUG. The code path resides in the .kprobes.text 270 * section and is executed with interrupts disabled. 271 */ 272 pr_err("Invalid kprobe detected.\n"); 273 dump_kprobe(p); 274 BUG(); 275 } 276 } 277 NOKPROBE_SYMBOL(kprobe_reenter_check); 278 279 static int kprobe_handler(struct pt_regs *regs) 280 { 281 struct kprobe_ctlblk *kcb; 282 struct kprobe *p; 283 284 /* 285 * We want to disable preemption for the entire duration of kprobe 286 * processing. That includes the calls to the pre/post handlers 287 * and single stepping the kprobe instruction. 288 */ 289 preempt_disable(); 290 kcb = get_kprobe_ctlblk(); 291 p = get_kprobe((void *)(regs->psw.addr - 2)); 292 293 if (p) { 294 if (kprobe_running()) { 295 /* 296 * We have hit a kprobe while another is still 297 * active. This can happen in the pre and post 298 * handler. Single step the instruction of the 299 * new probe but do not call any handler function 300 * of this secondary kprobe. 301 * push_kprobe and pop_kprobe saves and restores 302 * the currently active kprobe. 303 */ 304 kprobe_reenter_check(kcb, p); 305 push_kprobe(kcb, p); 306 kcb->kprobe_status = KPROBE_REENTER; 307 } else { 308 /* 309 * If we have no pre-handler or it returned 0, we 310 * continue with single stepping. If we have a 311 * pre-handler and it returned non-zero, it prepped 312 * for changing execution path, so get out doing 313 * nothing more here. 314 */ 315 push_kprobe(kcb, p); 316 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 317 if (p->pre_handler && p->pre_handler(p, regs)) { 318 pop_kprobe(kcb); 319 preempt_enable_no_resched(); 320 return 1; 321 } 322 kcb->kprobe_status = KPROBE_HIT_SS; 323 } 324 enable_singlestep(kcb, regs, (unsigned long) p->ainsn.insn); 325 return 1; 326 } /* else: 327 * No kprobe at this address and no active kprobe. The trap has 328 * not been caused by a kprobe breakpoint. The race of breakpoint 329 * vs. kprobe remove does not exist because on s390 as we use 330 * stop_machine to arm/disarm the breakpoints. 331 */ 332 preempt_enable_no_resched(); 333 return 0; 334 } 335 NOKPROBE_SYMBOL(kprobe_handler); 336 337 /* 338 * Function return probe trampoline: 339 * - init_kprobes() establishes a probepoint here 340 * - When the probed function returns, this probe 341 * causes the handlers to fire 342 */ 343 static void __used kretprobe_trampoline_holder(void) 344 { 345 asm volatile(".global kretprobe_trampoline\n" 346 "kretprobe_trampoline: bcr 0,0\n"); 347 } 348 349 /* 350 * Called when the probe at kretprobe trampoline is hit 351 */ 352 static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) 353 { 354 regs->psw.addr = __kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL); 355 /* 356 * By returning a non-zero value, we are telling 357 * kprobe_handler() that we don't want the post_handler 358 * to run (and have re-enabled preemption) 359 */ 360 return 1; 361 } 362 NOKPROBE_SYMBOL(trampoline_probe_handler); 363 364 /* 365 * Called after single-stepping. p->addr is the address of the 366 * instruction whose first byte has been replaced by the "breakpoint" 367 * instruction. To avoid the SMP problems that can occur when we 368 * temporarily put back the original opcode to single-step, we 369 * single-stepped a copy of the instruction. The address of this 370 * copy is p->ainsn.insn. 371 */ 372 static void resume_execution(struct kprobe *p, struct pt_regs *regs) 373 { 374 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 375 unsigned long ip = regs->psw.addr; 376 int fixup = probe_get_fixup_type(p->ainsn.insn); 377 378 if (fixup & FIXUP_PSW_NORMAL) 379 ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn; 380 381 if (fixup & FIXUP_BRANCH_NOT_TAKEN) { 382 int ilen = insn_length(p->ainsn.insn[0] >> 8); 383 if (ip - (unsigned long) p->ainsn.insn == ilen) 384 ip = (unsigned long) p->addr + ilen; 385 } 386 387 if (fixup & FIXUP_RETURN_REGISTER) { 388 int reg = (p->ainsn.insn[0] & 0xf0) >> 4; 389 regs->gprs[reg] += (unsigned long) p->addr - 390 (unsigned long) p->ainsn.insn; 391 } 392 393 disable_singlestep(kcb, regs, ip); 394 } 395 NOKPROBE_SYMBOL(resume_execution); 396 397 static int post_kprobe_handler(struct pt_regs *regs) 398 { 399 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 400 struct kprobe *p = kprobe_running(); 401 402 if (!p) 403 return 0; 404 405 if (kcb->kprobe_status != KPROBE_REENTER && p->post_handler) { 406 kcb->kprobe_status = KPROBE_HIT_SSDONE; 407 p->post_handler(p, regs, 0); 408 } 409 410 resume_execution(p, regs); 411 pop_kprobe(kcb); 412 preempt_enable_no_resched(); 413 414 /* 415 * if somebody else is singlestepping across a probe point, psw mask 416 * will have PER set, in which case, continue the remaining processing 417 * of do_single_step, as if this is not a probe hit. 418 */ 419 if (regs->psw.mask & PSW_MASK_PER) 420 return 0; 421 422 return 1; 423 } 424 NOKPROBE_SYMBOL(post_kprobe_handler); 425 426 static int kprobe_trap_handler(struct pt_regs *regs, int trapnr) 427 { 428 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 429 struct kprobe *p = kprobe_running(); 430 const struct exception_table_entry *entry; 431 432 switch(kcb->kprobe_status) { 433 case KPROBE_HIT_SS: 434 case KPROBE_REENTER: 435 /* 436 * We are here because the instruction being single 437 * stepped caused a page fault. We reset the current 438 * kprobe and the nip points back to the probe address 439 * and allow the page fault handler to continue as a 440 * normal page fault. 441 */ 442 disable_singlestep(kcb, regs, (unsigned long) p->addr); 443 pop_kprobe(kcb); 444 preempt_enable_no_resched(); 445 break; 446 case KPROBE_HIT_ACTIVE: 447 case KPROBE_HIT_SSDONE: 448 /* 449 * We increment the nmissed count for accounting, 450 * we can also use npre/npostfault count for accounting 451 * these specific fault cases. 452 */ 453 kprobes_inc_nmissed_count(p); 454 455 /* 456 * We come here because instructions in the pre/post 457 * handler caused the page_fault, this could happen 458 * if handler tries to access user space by 459 * copy_from_user(), get_user() etc. Let the 460 * user-specified handler try to fix it first. 461 */ 462 if (p->fault_handler && p->fault_handler(p, regs, trapnr)) 463 return 1; 464 465 /* 466 * In case the user-specified fault handler returned 467 * zero, try to fix up. 468 */ 469 entry = s390_search_extables(regs->psw.addr); 470 if (entry && ex_handle(entry, regs)) 471 return 1; 472 473 /* 474 * fixup_exception() could not handle it, 475 * Let do_page_fault() fix it. 476 */ 477 break; 478 default: 479 break; 480 } 481 return 0; 482 } 483 NOKPROBE_SYMBOL(kprobe_trap_handler); 484 485 int kprobe_fault_handler(struct pt_regs *regs, int trapnr) 486 { 487 int ret; 488 489 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) 490 local_irq_disable(); 491 ret = kprobe_trap_handler(regs, trapnr); 492 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) 493 local_irq_restore(regs->psw.mask & ~PSW_MASK_PER); 494 return ret; 495 } 496 NOKPROBE_SYMBOL(kprobe_fault_handler); 497 498 /* 499 * Wrapper routine to for handling exceptions. 500 */ 501 int kprobe_exceptions_notify(struct notifier_block *self, 502 unsigned long val, void *data) 503 { 504 struct die_args *args = (struct die_args *) data; 505 struct pt_regs *regs = args->regs; 506 int ret = NOTIFY_DONE; 507 508 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) 509 local_irq_disable(); 510 511 switch (val) { 512 case DIE_BPT: 513 if (kprobe_handler(regs)) 514 ret = NOTIFY_STOP; 515 break; 516 case DIE_SSTEP: 517 if (post_kprobe_handler(regs)) 518 ret = NOTIFY_STOP; 519 break; 520 case DIE_TRAP: 521 if (!preemptible() && kprobe_running() && 522 kprobe_trap_handler(regs, args->trapnr)) 523 ret = NOTIFY_STOP; 524 break; 525 default: 526 break; 527 } 528 529 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) 530 local_irq_restore(regs->psw.mask & ~PSW_MASK_PER); 531 532 return ret; 533 } 534 NOKPROBE_SYMBOL(kprobe_exceptions_notify); 535 536 static struct kprobe trampoline = { 537 .addr = (kprobe_opcode_t *) &kretprobe_trampoline, 538 .pre_handler = trampoline_probe_handler 539 }; 540 541 int __init arch_init_kprobes(void) 542 { 543 return register_kprobe(&trampoline); 544 } 545 546 int arch_trampoline_kprobe(struct kprobe *p) 547 { 548 return p->addr == (kprobe_opcode_t *) &kretprobe_trampoline; 549 } 550 NOKPROBE_SYMBOL(arch_trampoline_kprobe); 551