1 // SPDX-License-Identifier: GPL-2.0-or-later 2 3 #include <linux/context_tracking.h> 4 #include <linux/err.h> 5 #include <linux/compat.h> 6 #include <linux/sched/debug.h> /* for show_regs */ 7 8 #include <asm/asm-prototypes.h> 9 #include <asm/kup.h> 10 #include <asm/cputime.h> 11 #include <asm/hw_irq.h> 12 #include <asm/interrupt.h> 13 #include <asm/kprobes.h> 14 #include <asm/paca.h> 15 #include <asm/ptrace.h> 16 #include <asm/reg.h> 17 #include <asm/signal.h> 18 #include <asm/switch_to.h> 19 #include <asm/syscall.h> 20 #include <asm/time.h> 21 #include <asm/tm.h> 22 #include <asm/unistd.h> 23 24 #if defined(CONFIG_PPC_ADV_DEBUG_REGS) && defined(CONFIG_PPC32) 25 unsigned long global_dbcr0[NR_CPUS]; 26 #endif 27 28 typedef long (*syscall_fn)(long, long, long, long, long, long); 29 30 #ifdef CONFIG_PPC_BOOK3S_64 31 DEFINE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant); 32 static inline bool exit_must_hard_disable(void) 33 { 34 return static_branch_unlikely(&interrupt_exit_not_reentrant); 35 } 36 #else 37 static inline bool exit_must_hard_disable(void) 38 { 39 return true; 40 } 41 #endif 42 43 /* 44 * local irqs must be disabled. Returns false if the caller must re-enable 45 * them, check for new work, and try again. 46 * 47 * This should be called with local irqs disabled, but if they were previously 48 * enabled when the interrupt handler returns (indicating a process-context / 49 * synchronous interrupt) then irqs_enabled should be true. 50 * 51 * restartable is true then EE/RI can be left on because interrupts are handled 52 * with a restart sequence. 53 */ 54 static notrace __always_inline bool prep_irq_for_enabled_exit(bool restartable) 55 { 56 /* This must be done with RI=1 because tracing may touch vmaps */ 57 trace_hardirqs_on(); 58 59 if (exit_must_hard_disable() || !restartable) 60 __hard_EE_RI_disable(); 61 62 #ifdef CONFIG_PPC64 63 /* This pattern matches prep_irq_for_idle */ 64 if (unlikely(lazy_irq_pending_nocheck())) { 65 if (exit_must_hard_disable() || !restartable) { 66 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 67 __hard_RI_enable(); 68 } 69 trace_hardirqs_off(); 70 71 return false; 72 } 73 #endif 74 return true; 75 } 76 77 /* Has to run notrace because it is entered not completely "reconciled" */ 78 notrace long system_call_exception(long r3, long r4, long r5, 79 long r6, long r7, long r8, 80 unsigned long r0, struct pt_regs *regs) 81 { 82 syscall_fn f; 83 84 kuep_lock(); 85 86 regs->orig_gpr3 = r3; 87 88 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) 89 BUG_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED); 90 91 trace_hardirqs_off(); /* finish reconciling */ 92 93 CT_WARN_ON(ct_state() == CONTEXT_KERNEL); 94 user_exit_irqoff(); 95 96 BUG_ON(regs_is_unrecoverable(regs)); 97 BUG_ON(!(regs->msr & MSR_PR)); 98 BUG_ON(arch_irq_disabled_regs(regs)); 99 100 #ifdef CONFIG_PPC_PKEY 101 if (mmu_has_feature(MMU_FTR_PKEY)) { 102 unsigned long amr, iamr; 103 bool flush_needed = false; 104 /* 105 * When entering from userspace we mostly have the AMR/IAMR 106 * different from kernel default values. Hence don't compare. 107 */ 108 amr = mfspr(SPRN_AMR); 109 iamr = mfspr(SPRN_IAMR); 110 regs->amr = amr; 111 regs->iamr = iamr; 112 if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) { 113 mtspr(SPRN_AMR, AMR_KUAP_BLOCKED); 114 flush_needed = true; 115 } 116 if (mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) { 117 mtspr(SPRN_IAMR, AMR_KUEP_BLOCKED); 118 flush_needed = true; 119 } 120 if (flush_needed) 121 isync(); 122 } else 123 #endif 124 kuap_assert_locked(); 125 126 booke_restore_dbcr0(); 127 128 account_cpu_user_entry(); 129 130 account_stolen_time(); 131 132 /* 133 * This is not required for the syscall exit path, but makes the 134 * stack frame look nicer. If this was initialised in the first stack 135 * frame, or if the unwinder was taught the first stack frame always 136 * returns to user with IRQS_ENABLED, this store could be avoided! 137 */ 138 irq_soft_mask_regs_set_state(regs, IRQS_ENABLED); 139 140 /* 141 * If system call is called with TM active, set _TIF_RESTOREALL to 142 * prevent RFSCV being used to return to userspace, because POWER9 143 * TM implementation has problems with this instruction returning to 144 * transactional state. Final register values are not relevant because 145 * the transaction will be aborted upon return anyway. Or in the case 146 * of unsupported_scv SIGILL fault, the return state does not much 147 * matter because it's an edge case. 148 */ 149 if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) && 150 unlikely(MSR_TM_TRANSACTIONAL(regs->msr))) 151 current_thread_info()->flags |= _TIF_RESTOREALL; 152 153 /* 154 * If the system call was made with a transaction active, doom it and 155 * return without performing the system call. Unless it was an 156 * unsupported scv vector, in which case it's treated like an illegal 157 * instruction. 158 */ 159 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 160 if (unlikely(MSR_TM_TRANSACTIONAL(regs->msr)) && 161 !trap_is_unsupported_scv(regs)) { 162 /* Enable TM in the kernel, and disable EE (for scv) */ 163 hard_irq_disable(); 164 mtmsr(mfmsr() | MSR_TM); 165 166 /* tabort, this dooms the transaction, nothing else */ 167 asm volatile(".long 0x7c00071d | ((%0) << 16)" 168 :: "r"(TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)); 169 170 /* 171 * Userspace will never see the return value. Execution will 172 * resume after the tbegin. of the aborted transaction with the 173 * checkpointed register state. A context switch could occur 174 * or signal delivered to the process before resuming the 175 * doomed transaction context, but that should all be handled 176 * as expected. 177 */ 178 return -ENOSYS; 179 } 180 #endif // CONFIG_PPC_TRANSACTIONAL_MEM 181 182 local_irq_enable(); 183 184 if (unlikely(current_thread_info()->flags & _TIF_SYSCALL_DOTRACE)) { 185 if (unlikely(trap_is_unsupported_scv(regs))) { 186 /* Unsupported scv vector */ 187 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 188 return regs->gpr[3]; 189 } 190 /* 191 * We use the return value of do_syscall_trace_enter() as the 192 * syscall number. If the syscall was rejected for any reason 193 * do_syscall_trace_enter() returns an invalid syscall number 194 * and the test against NR_syscalls will fail and the return 195 * value to be used is in regs->gpr[3]. 196 */ 197 r0 = do_syscall_trace_enter(regs); 198 if (unlikely(r0 >= NR_syscalls)) 199 return regs->gpr[3]; 200 r3 = regs->gpr[3]; 201 r4 = regs->gpr[4]; 202 r5 = regs->gpr[5]; 203 r6 = regs->gpr[6]; 204 r7 = regs->gpr[7]; 205 r8 = regs->gpr[8]; 206 207 } else if (unlikely(r0 >= NR_syscalls)) { 208 if (unlikely(trap_is_unsupported_scv(regs))) { 209 /* Unsupported scv vector */ 210 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 211 return regs->gpr[3]; 212 } 213 return -ENOSYS; 214 } 215 216 /* May be faster to do array_index_nospec? */ 217 barrier_nospec(); 218 219 if (unlikely(is_compat_task())) { 220 f = (void *)compat_sys_call_table[r0]; 221 222 r3 &= 0x00000000ffffffffULL; 223 r4 &= 0x00000000ffffffffULL; 224 r5 &= 0x00000000ffffffffULL; 225 r6 &= 0x00000000ffffffffULL; 226 r7 &= 0x00000000ffffffffULL; 227 r8 &= 0x00000000ffffffffULL; 228 229 } else { 230 f = (void *)sys_call_table[r0]; 231 } 232 233 return f(r3, r4, r5, r6, r7, r8); 234 } 235 236 static notrace void booke_load_dbcr0(void) 237 { 238 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 239 unsigned long dbcr0 = current->thread.debug.dbcr0; 240 241 if (likely(!(dbcr0 & DBCR0_IDM))) 242 return; 243 244 /* 245 * Check to see if the dbcr0 register is set up to debug. 246 * Use the internal debug mode bit to do this. 247 */ 248 mtmsr(mfmsr() & ~MSR_DE); 249 if (IS_ENABLED(CONFIG_PPC32)) { 250 isync(); 251 global_dbcr0[smp_processor_id()] = mfspr(SPRN_DBCR0); 252 } 253 mtspr(SPRN_DBCR0, dbcr0); 254 mtspr(SPRN_DBSR, -1); 255 #endif 256 } 257 258 static void check_return_regs_valid(struct pt_regs *regs) 259 { 260 #ifdef CONFIG_PPC_BOOK3S_64 261 unsigned long trap, srr0, srr1; 262 static bool warned; 263 u8 *validp; 264 char *h; 265 266 if (trap_is_scv(regs)) 267 return; 268 269 trap = regs->trap; 270 // EE in HV mode sets HSRRs like 0xea0 271 if (cpu_has_feature(CPU_FTR_HVMODE) && trap == INTERRUPT_EXTERNAL) 272 trap = 0xea0; 273 274 switch (trap) { 275 case 0x980: 276 case INTERRUPT_H_DATA_STORAGE: 277 case 0xe20: 278 case 0xe40: 279 case INTERRUPT_HMI: 280 case 0xe80: 281 case 0xea0: 282 case INTERRUPT_H_FAC_UNAVAIL: 283 case 0x1200: 284 case 0x1500: 285 case 0x1600: 286 case 0x1800: 287 validp = &local_paca->hsrr_valid; 288 if (!*validp) 289 return; 290 291 srr0 = mfspr(SPRN_HSRR0); 292 srr1 = mfspr(SPRN_HSRR1); 293 h = "H"; 294 295 break; 296 default: 297 validp = &local_paca->srr_valid; 298 if (!*validp) 299 return; 300 301 srr0 = mfspr(SPRN_SRR0); 302 srr1 = mfspr(SPRN_SRR1); 303 h = ""; 304 break; 305 } 306 307 if (srr0 == regs->nip && srr1 == regs->msr) 308 return; 309 310 /* 311 * A NMI / soft-NMI interrupt may have come in after we found 312 * srr_valid and before the SRRs are loaded. The interrupt then 313 * comes in and clobbers SRRs and clears srr_valid. Then we load 314 * the SRRs here and test them above and find they don't match. 315 * 316 * Test validity again after that, to catch such false positives. 317 * 318 * This test in general will have some window for false negatives 319 * and may not catch and fix all such cases if an NMI comes in 320 * later and clobbers SRRs without clearing srr_valid, but hopefully 321 * such things will get caught most of the time, statistically 322 * enough to be able to get a warning out. 323 */ 324 barrier(); 325 326 if (!*validp) 327 return; 328 329 if (!warned) { 330 warned = true; 331 printk("%sSRR0 was: %lx should be: %lx\n", h, srr0, regs->nip); 332 printk("%sSRR1 was: %lx should be: %lx\n", h, srr1, regs->msr); 333 show_regs(regs); 334 } 335 336 *validp = 0; /* fixup */ 337 #endif 338 } 339 340 static notrace unsigned long 341 interrupt_exit_user_prepare_main(unsigned long ret, struct pt_regs *regs) 342 { 343 unsigned long ti_flags; 344 345 again: 346 ti_flags = READ_ONCE(current_thread_info()->flags); 347 while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) { 348 local_irq_enable(); 349 if (ti_flags & _TIF_NEED_RESCHED) { 350 schedule(); 351 } else { 352 /* 353 * SIGPENDING must restore signal handler function 354 * argument GPRs, and some non-volatiles (e.g., r1). 355 * Restore all for now. This could be made lighter. 356 */ 357 if (ti_flags & _TIF_SIGPENDING) 358 ret |= _TIF_RESTOREALL; 359 do_notify_resume(regs, ti_flags); 360 } 361 local_irq_disable(); 362 ti_flags = READ_ONCE(current_thread_info()->flags); 363 } 364 365 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && IS_ENABLED(CONFIG_PPC_FPU)) { 366 if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) && 367 unlikely((ti_flags & _TIF_RESTORE_TM))) { 368 restore_tm_state(regs); 369 } else { 370 unsigned long mathflags = MSR_FP; 371 372 if (cpu_has_feature(CPU_FTR_VSX)) 373 mathflags |= MSR_VEC | MSR_VSX; 374 else if (cpu_has_feature(CPU_FTR_ALTIVEC)) 375 mathflags |= MSR_VEC; 376 377 /* 378 * If userspace MSR has all available FP bits set, 379 * then they are live and no need to restore. If not, 380 * it means the regs were given up and restore_math 381 * may decide to restore them (to avoid taking an FP 382 * fault). 383 */ 384 if ((regs->msr & mathflags) != mathflags) 385 restore_math(regs); 386 } 387 } 388 389 check_return_regs_valid(regs); 390 391 user_enter_irqoff(); 392 if (!prep_irq_for_enabled_exit(true)) { 393 user_exit_irqoff(); 394 local_irq_enable(); 395 local_irq_disable(); 396 goto again; 397 } 398 399 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 400 local_paca->tm_scratch = regs->msr; 401 #endif 402 403 booke_load_dbcr0(); 404 405 account_cpu_user_exit(); 406 407 /* Restore user access locks last */ 408 kuap_user_restore(regs); 409 kuep_unlock(); 410 411 return ret; 412 } 413 414 /* 415 * This should be called after a syscall returns, with r3 the return value 416 * from the syscall. If this function returns non-zero, the system call 417 * exit assembly should additionally load all GPR registers and CTR and XER 418 * from the interrupt frame. 419 * 420 * The function graph tracer can not trace the return side of this function, 421 * because RI=0 and soft mask state is "unreconciled", so it is marked notrace. 422 */ 423 notrace unsigned long syscall_exit_prepare(unsigned long r3, 424 struct pt_regs *regs, 425 long scv) 426 { 427 unsigned long ti_flags; 428 unsigned long ret = 0; 429 bool is_not_scv = !IS_ENABLED(CONFIG_PPC_BOOK3S_64) || !scv; 430 431 CT_WARN_ON(ct_state() == CONTEXT_USER); 432 433 kuap_assert_locked(); 434 435 regs->result = r3; 436 437 /* Check whether the syscall is issued inside a restartable sequence */ 438 rseq_syscall(regs); 439 440 ti_flags = current_thread_info()->flags; 441 442 if (unlikely(r3 >= (unsigned long)-MAX_ERRNO) && is_not_scv) { 443 if (likely(!(ti_flags & (_TIF_NOERROR | _TIF_RESTOREALL)))) { 444 r3 = -r3; 445 regs->ccr |= 0x10000000; /* Set SO bit in CR */ 446 } 447 } 448 449 if (unlikely(ti_flags & _TIF_PERSYSCALL_MASK)) { 450 if (ti_flags & _TIF_RESTOREALL) 451 ret = _TIF_RESTOREALL; 452 else 453 regs->gpr[3] = r3; 454 clear_bits(_TIF_PERSYSCALL_MASK, ¤t_thread_info()->flags); 455 } else { 456 regs->gpr[3] = r3; 457 } 458 459 if (unlikely(ti_flags & _TIF_SYSCALL_DOTRACE)) { 460 do_syscall_trace_leave(regs); 461 ret |= _TIF_RESTOREALL; 462 } 463 464 local_irq_disable(); 465 ret = interrupt_exit_user_prepare_main(ret, regs); 466 467 #ifdef CONFIG_PPC64 468 regs->exit_result = ret; 469 #endif 470 471 return ret; 472 } 473 474 #ifdef CONFIG_PPC64 475 notrace unsigned long syscall_exit_restart(unsigned long r3, struct pt_regs *regs) 476 { 477 /* 478 * This is called when detecting a soft-pending interrupt as well as 479 * an alternate-return interrupt. So we can't just have the alternate 480 * return path clear SRR1[MSR] and set PACA_IRQ_HARD_DIS (unless 481 * the soft-pending case were to fix things up as well). RI might be 482 * disabled, in which case it gets re-enabled by __hard_irq_disable(). 483 */ 484 __hard_irq_disable(); 485 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 486 487 #ifdef CONFIG_PPC_BOOK3S_64 488 set_kuap(AMR_KUAP_BLOCKED); 489 #endif 490 491 trace_hardirqs_off(); 492 user_exit_irqoff(); 493 account_cpu_user_entry(); 494 495 BUG_ON(!user_mode(regs)); 496 497 regs->exit_result = interrupt_exit_user_prepare_main(regs->exit_result, regs); 498 499 return regs->exit_result; 500 } 501 #endif 502 503 notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs) 504 { 505 unsigned long ret; 506 507 BUG_ON(regs_is_unrecoverable(regs)); 508 BUG_ON(arch_irq_disabled_regs(regs)); 509 CT_WARN_ON(ct_state() == CONTEXT_USER); 510 511 /* 512 * We don't need to restore AMR on the way back to userspace for KUAP. 513 * AMR can only have been unlocked if we interrupted the kernel. 514 */ 515 kuap_assert_locked(); 516 517 local_irq_disable(); 518 519 ret = interrupt_exit_user_prepare_main(0, regs); 520 521 #ifdef CONFIG_PPC64 522 regs->exit_result = ret; 523 #endif 524 525 return ret; 526 } 527 528 void preempt_schedule_irq(void); 529 530 notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs) 531 { 532 unsigned long flags; 533 unsigned long ret = 0; 534 unsigned long kuap; 535 bool stack_store = current_thread_info()->flags & 536 _TIF_EMULATE_STACK_STORE; 537 538 if (regs_is_unrecoverable(regs)) 539 unrecoverable_exception(regs); 540 /* 541 * CT_WARN_ON comes here via program_check_exception, 542 * so avoid recursion. 543 */ 544 if (TRAP(regs) != INTERRUPT_PROGRAM) 545 CT_WARN_ON(ct_state() == CONTEXT_USER); 546 547 kuap = kuap_get_and_assert_locked(); 548 549 local_irq_save(flags); 550 551 if (!arch_irq_disabled_regs(regs)) { 552 /* Returning to a kernel context with local irqs enabled. */ 553 WARN_ON_ONCE(!(regs->msr & MSR_EE)); 554 again: 555 if (IS_ENABLED(CONFIG_PREEMPT)) { 556 /* Return to preemptible kernel context */ 557 if (unlikely(current_thread_info()->flags & _TIF_NEED_RESCHED)) { 558 if (preempt_count() == 0) 559 preempt_schedule_irq(); 560 } 561 } 562 563 check_return_regs_valid(regs); 564 565 /* 566 * Stack store exit can't be restarted because the interrupt 567 * stack frame might have been clobbered. 568 */ 569 if (!prep_irq_for_enabled_exit(unlikely(stack_store))) { 570 /* 571 * Replay pending soft-masked interrupts now. Don't 572 * just local_irq_enabe(); local_irq_disable(); because 573 * if we are returning from an asynchronous interrupt 574 * here, another one might hit after irqs are enabled, 575 * and it would exit via this same path allowing 576 * another to fire, and so on unbounded. 577 */ 578 hard_irq_disable(); 579 replay_soft_interrupts(); 580 /* Took an interrupt, may have more exit work to do. */ 581 goto again; 582 } 583 #ifdef CONFIG_PPC64 584 /* 585 * An interrupt may clear MSR[EE] and set this concurrently, 586 * but it will be marked pending and the exit will be retried. 587 * This leaves a racy window where MSR[EE]=0 and HARD_DIS is 588 * clear, until interrupt_exit_kernel_restart() calls 589 * hard_irq_disable(), which will set HARD_DIS again. 590 */ 591 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; 592 593 } else { 594 check_return_regs_valid(regs); 595 596 if (unlikely(stack_store)) 597 __hard_EE_RI_disable(); 598 /* 599 * Returning to a kernel context with local irqs disabled. 600 * Here, if EE was enabled in the interrupted context, enable 601 * it on return as well. A problem exists here where a soft 602 * masked interrupt may have cleared MSR[EE] and set HARD_DIS 603 * here, and it will still exist on return to the caller. This 604 * will be resolved by the masked interrupt firing again. 605 */ 606 if (regs->msr & MSR_EE) 607 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; 608 #endif /* CONFIG_PPC64 */ 609 } 610 611 if (unlikely(stack_store)) { 612 clear_bits(_TIF_EMULATE_STACK_STORE, ¤t_thread_info()->flags); 613 ret = 1; 614 } 615 616 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 617 local_paca->tm_scratch = regs->msr; 618 #endif 619 620 /* 621 * 64s does not want to mfspr(SPRN_AMR) here, because this comes after 622 * mtmsr, which would cause Read-After-Write stalls. Hence, take the 623 * AMR value from the check above. 624 */ 625 kuap_kernel_restore(regs, kuap); 626 627 return ret; 628 } 629 630 #ifdef CONFIG_PPC64 631 notrace unsigned long interrupt_exit_user_restart(struct pt_regs *regs) 632 { 633 __hard_irq_disable(); 634 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 635 636 #ifdef CONFIG_PPC_BOOK3S_64 637 set_kuap(AMR_KUAP_BLOCKED); 638 #endif 639 640 trace_hardirqs_off(); 641 user_exit_irqoff(); 642 account_cpu_user_entry(); 643 644 BUG_ON(!user_mode(regs)); 645 646 regs->exit_result |= interrupt_exit_user_prepare(regs); 647 648 return regs->exit_result; 649 } 650 651 /* 652 * No real need to return a value here because the stack store case does not 653 * get restarted. 654 */ 655 notrace unsigned long interrupt_exit_kernel_restart(struct pt_regs *regs) 656 { 657 __hard_irq_disable(); 658 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 659 660 #ifdef CONFIG_PPC_BOOK3S_64 661 set_kuap(AMR_KUAP_BLOCKED); 662 #endif 663 664 if (regs->softe == IRQS_ENABLED) 665 trace_hardirqs_off(); 666 667 BUG_ON(user_mode(regs)); 668 669 return interrupt_exit_kernel_prepare(regs); 670 } 671 #endif 672