1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Derived from arch/i386/kernel/irq.c 4 * Copyright (C) 1992 Linus Torvalds 5 * Adapted from arch/i386 by Gary Thomas 6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 7 * Updated and modified by Cort Dougan <cort@fsmlabs.com> 8 * Copyright (C) 1996-2001 Cort Dougan 9 * Adapted for Power Macintosh by Paul Mackerras 10 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) 11 * 12 * This file contains the code used by various IRQ handling routines: 13 * asking for different IRQ's should be done through these routines 14 * instead of just grabbing them. Thus setups with different IRQ numbers 15 * shouldn't result in any weird surprises, and installing new handlers 16 * should be easier. 17 */ 18 19 #undef DEBUG 20 21 #include <linux/export.h> 22 #include <linux/threads.h> 23 #include <linux/kernel_stat.h> 24 #include <linux/signal.h> 25 #include <linux/sched.h> 26 #include <linux/ptrace.h> 27 #include <linux/ioport.h> 28 #include <linux/interrupt.h> 29 #include <linux/timex.h> 30 #include <linux/init.h> 31 #include <linux/slab.h> 32 #include <linux/delay.h> 33 #include <linux/irq.h> 34 #include <linux/seq_file.h> 35 #include <linux/cpumask.h> 36 #include <linux/profile.h> 37 #include <linux/bitops.h> 38 #include <linux/list.h> 39 #include <linux/radix-tree.h> 40 #include <linux/mutex.h> 41 #include <linux/pci.h> 42 #include <linux/debugfs.h> 43 #include <linux/of.h> 44 #include <linux/of_irq.h> 45 #include <linux/vmalloc.h> 46 #include <linux/pgtable.h> 47 #include <linux/static_call.h> 48 49 #include <linux/uaccess.h> 50 #include <asm/interrupt.h> 51 #include <asm/io.h> 52 #include <asm/irq.h> 53 #include <asm/cache.h> 54 #include <asm/ptrace.h> 55 #include <asm/machdep.h> 56 #include <asm/udbg.h> 57 #include <asm/smp.h> 58 #include <asm/hw_irq.h> 59 #include <asm/softirq_stack.h> 60 #include <asm/ppc_asm.h> 61 62 #include <asm/paca.h> 63 #include <asm/firmware.h> 64 #include <asm/lv1call.h> 65 #include <asm/dbell.h> 66 #include <asm/trace.h> 67 #include <asm/cpu_has_feature.h> 68 69 int distribute_irqs = 1; 70 71 static inline void next_interrupt(struct pt_regs *regs) 72 { 73 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) { 74 WARN_ON(!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)); 75 WARN_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED); 76 } 77 78 /* 79 * We are responding to the next interrupt, so interrupt-off 80 * latencies should be reset here. 81 */ 82 lockdep_hardirq_exit(); 83 trace_hardirqs_on(); 84 trace_hardirqs_off(); 85 lockdep_hardirq_enter(); 86 } 87 88 static inline bool irq_happened_test_and_clear(u8 irq) 89 { 90 if (local_paca->irq_happened & irq) { 91 local_paca->irq_happened &= ~irq; 92 return true; 93 } 94 return false; 95 } 96 97 static __no_kcsan void __replay_soft_interrupts(void) 98 { 99 struct pt_regs regs; 100 101 /* 102 * We use local_paca rather than get_paca() to avoid all the 103 * debug_smp_processor_id() business in this low level function. 104 */ 105 106 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) { 107 WARN_ON_ONCE(mfmsr() & MSR_EE); 108 WARN_ON(!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)); 109 WARN_ON(local_paca->irq_happened & PACA_IRQ_REPLAYING); 110 } 111 112 /* 113 * PACA_IRQ_REPLAYING prevents interrupt handlers from enabling 114 * MSR[EE] to get PMIs, which can result in more IRQs becoming 115 * pending. 116 */ 117 local_paca->irq_happened |= PACA_IRQ_REPLAYING; 118 119 ppc_save_regs(®s); 120 regs.softe = IRQS_ENABLED; 121 regs.msr |= MSR_EE; 122 123 /* 124 * Force the delivery of pending soft-disabled interrupts on PS3. 125 * Any HV call will have this side effect. 126 */ 127 if (firmware_has_feature(FW_FEATURE_PS3_LV1)) { 128 u64 tmp, tmp2; 129 lv1_get_version_info(&tmp, &tmp2); 130 } 131 132 /* 133 * Check if an hypervisor Maintenance interrupt happened. 134 * This is a higher priority interrupt than the others, so 135 * replay it first. 136 */ 137 if (IS_ENABLED(CONFIG_PPC_BOOK3S) && 138 irq_happened_test_and_clear(PACA_IRQ_HMI)) { 139 regs.trap = INTERRUPT_HMI; 140 handle_hmi_exception(®s); 141 next_interrupt(®s); 142 } 143 144 if (irq_happened_test_and_clear(PACA_IRQ_DEC)) { 145 regs.trap = INTERRUPT_DECREMENTER; 146 timer_interrupt(®s); 147 next_interrupt(®s); 148 } 149 150 if (irq_happened_test_and_clear(PACA_IRQ_EE)) { 151 regs.trap = INTERRUPT_EXTERNAL; 152 do_IRQ(®s); 153 next_interrupt(®s); 154 } 155 156 if (IS_ENABLED(CONFIG_PPC_DOORBELL) && 157 irq_happened_test_and_clear(PACA_IRQ_DBELL)) { 158 regs.trap = INTERRUPT_DOORBELL; 159 doorbell_exception(®s); 160 next_interrupt(®s); 161 } 162 163 /* Book3E does not support soft-masking PMI interrupts */ 164 if (IS_ENABLED(CONFIG_PPC_BOOK3S) && 165 irq_happened_test_and_clear(PACA_IRQ_PMI)) { 166 regs.trap = INTERRUPT_PERFMON; 167 performance_monitor_exception(®s); 168 next_interrupt(®s); 169 } 170 171 local_paca->irq_happened &= ~PACA_IRQ_REPLAYING; 172 } 173 174 __no_kcsan void replay_soft_interrupts(void) 175 { 176 irq_enter(); /* See comment in arch_local_irq_restore */ 177 __replay_soft_interrupts(); 178 irq_exit(); 179 } 180 181 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_KUAP) 182 static inline __no_kcsan void replay_soft_interrupts_irqrestore(void) 183 { 184 unsigned long kuap_state = get_kuap(); 185 186 /* 187 * Check if anything calls local_irq_enable/restore() when KUAP is 188 * disabled (user access enabled). We handle that case here by saving 189 * and re-locking AMR but we shouldn't get here in the first place, 190 * hence the warning. 191 */ 192 kuap_assert_locked(); 193 194 if (kuap_state != AMR_KUAP_BLOCKED) 195 set_kuap(AMR_KUAP_BLOCKED); 196 197 __replay_soft_interrupts(); 198 199 if (kuap_state != AMR_KUAP_BLOCKED) 200 set_kuap(kuap_state); 201 } 202 #else 203 #define replay_soft_interrupts_irqrestore() __replay_soft_interrupts() 204 #endif 205 206 notrace __no_kcsan void arch_local_irq_restore(unsigned long mask) 207 { 208 unsigned char irq_happened; 209 210 /* Write the new soft-enabled value if it is a disable */ 211 if (mask) { 212 irq_soft_mask_set(mask); 213 return; 214 } 215 216 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) { 217 WARN_ON_ONCE(in_nmi()); 218 WARN_ON_ONCE(in_hardirq()); 219 WARN_ON_ONCE(local_paca->irq_happened & PACA_IRQ_REPLAYING); 220 } 221 222 again: 223 /* 224 * After the stb, interrupts are unmasked and there are no interrupts 225 * pending replay. The restart sequence makes this atomic with 226 * respect to soft-masked interrupts. If this was just a simple code 227 * sequence, a soft-masked interrupt could become pending right after 228 * the comparison and before the stb. 229 * 230 * This allows interrupts to be unmasked without hard disabling, and 231 * also without new hard interrupts coming in ahead of pending ones. 232 */ 233 asm_volatile_goto( 234 "1: \n" 235 " lbz 9,%0(13) \n" 236 " cmpwi 9,0 \n" 237 " bne %l[happened] \n" 238 " stb 9,%1(13) \n" 239 "2: \n" 240 RESTART_TABLE(1b, 2b, 1b) 241 : : "i" (offsetof(struct paca_struct, irq_happened)), 242 "i" (offsetof(struct paca_struct, irq_soft_mask)) 243 : "cr0", "r9" 244 : happened); 245 246 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) 247 WARN_ON_ONCE(!(mfmsr() & MSR_EE)); 248 249 /* 250 * If we came here from the replay below, we might have a preempt 251 * pending (due to preempt_enable_no_resched()). Have to check now. 252 */ 253 preempt_check_resched(); 254 255 return; 256 257 happened: 258 irq_happened = READ_ONCE(local_paca->irq_happened); 259 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) 260 WARN_ON_ONCE(!irq_happened); 261 262 if (irq_happened == PACA_IRQ_HARD_DIS) { 263 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) 264 WARN_ON_ONCE(mfmsr() & MSR_EE); 265 irq_soft_mask_set(IRQS_ENABLED); 266 local_paca->irq_happened = 0; 267 __hard_irq_enable(); 268 preempt_check_resched(); 269 return; 270 } 271 272 /* Have interrupts to replay, need to hard disable first */ 273 if (!(irq_happened & PACA_IRQ_HARD_DIS)) { 274 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) { 275 if (!(mfmsr() & MSR_EE)) { 276 /* 277 * An interrupt could have come in and cleared 278 * MSR[EE] and set IRQ_HARD_DIS, so check 279 * IRQ_HARD_DIS again and warn if it is still 280 * clear. 281 */ 282 irq_happened = READ_ONCE(local_paca->irq_happened); 283 WARN_ON_ONCE(!(irq_happened & PACA_IRQ_HARD_DIS)); 284 } 285 } 286 __hard_irq_disable(); 287 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 288 } else { 289 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) { 290 if (WARN_ON_ONCE(mfmsr() & MSR_EE)) 291 __hard_irq_disable(); 292 } 293 } 294 295 /* 296 * Disable preempt here, so that the below preempt_enable will 297 * perform resched if required (a replayed interrupt may set 298 * need_resched). 299 */ 300 preempt_disable(); 301 irq_soft_mask_set(IRQS_ALL_DISABLED); 302 trace_hardirqs_off(); 303 304 /* 305 * Now enter interrupt context. The interrupt handlers themselves 306 * also call irq_enter/exit (which is okay, they can nest). But call 307 * it here now to hold off softirqs until the below irq_exit(). If 308 * we allowed replayed handlers to run softirqs, that enables irqs, 309 * which must replay interrupts, which recurses in here and makes 310 * things more complicated. The recursion is limited to 2, and it can 311 * be made to work, but it's complicated. 312 * 313 * local_bh_disable can not be used here because interrupts taken in 314 * idle are not in the right context (RCU, tick, etc) to run softirqs 315 * so irq_enter must be called. 316 */ 317 irq_enter(); 318 319 replay_soft_interrupts_irqrestore(); 320 321 irq_exit(); 322 323 if (unlikely(local_paca->irq_happened != PACA_IRQ_HARD_DIS)) { 324 /* 325 * The softirq processing in irq_exit() may enable interrupts 326 * temporarily, which can result in MSR[EE] being enabled and 327 * more irqs becoming pending. Go around again if that happens. 328 */ 329 trace_hardirqs_on(); 330 preempt_enable_no_resched(); 331 goto again; 332 } 333 334 trace_hardirqs_on(); 335 irq_soft_mask_set(IRQS_ENABLED); 336 local_paca->irq_happened = 0; 337 __hard_irq_enable(); 338 preempt_enable(); 339 } 340 EXPORT_SYMBOL(arch_local_irq_restore); 341 342 /* 343 * This is a helper to use when about to go into idle low-power 344 * when the latter has the side effect of re-enabling interrupts 345 * (such as calling H_CEDE under pHyp). 346 * 347 * You call this function with interrupts soft-disabled (this is 348 * already the case when ppc_md.power_save is called). The function 349 * will return whether to enter power save or just return. 350 * 351 * In the former case, it will have notified lockdep of interrupts 352 * being re-enabled and generally sanitized the lazy irq state, 353 * and in the latter case it will leave with interrupts hard 354 * disabled and marked as such, so the local_irq_enable() call 355 * in arch_cpu_idle() will properly re-enable everything. 356 */ 357 bool prep_irq_for_idle(void) 358 { 359 /* 360 * First we need to hard disable to ensure no interrupt 361 * occurs before we effectively enter the low power state 362 */ 363 __hard_irq_disable(); 364 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 365 366 /* 367 * If anything happened while we were soft-disabled, 368 * we return now and do not enter the low power state. 369 */ 370 if (lazy_irq_pending()) 371 return false; 372 373 /* Tell lockdep we are about to re-enable */ 374 trace_hardirqs_on(); 375 376 /* 377 * Mark interrupts as soft-enabled and clear the 378 * PACA_IRQ_HARD_DIS from the pending mask since we 379 * are about to hard enable as well as a side effect 380 * of entering the low power state. 381 */ 382 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; 383 irq_soft_mask_set(IRQS_ENABLED); 384 385 /* Tell the caller to enter the low power state */ 386 return true; 387 } 388 389 #ifdef CONFIG_PPC_BOOK3S 390 /* 391 * This is for idle sequences that return with IRQs off, but the 392 * idle state itself wakes on interrupt. Tell the irq tracer that 393 * IRQs are enabled for the duration of idle so it does not get long 394 * off times. Must be paired with fini_irq_for_idle_irqsoff. 395 */ 396 bool prep_irq_for_idle_irqsoff(void) 397 { 398 WARN_ON(!irqs_disabled()); 399 400 /* 401 * First we need to hard disable to ensure no interrupt 402 * occurs before we effectively enter the low power state 403 */ 404 __hard_irq_disable(); 405 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 406 407 /* 408 * If anything happened while we were soft-disabled, 409 * we return now and do not enter the low power state. 410 */ 411 if (lazy_irq_pending()) 412 return false; 413 414 /* Tell lockdep we are about to re-enable */ 415 trace_hardirqs_on(); 416 417 return true; 418 } 419 420 /* 421 * Take the SRR1 wakeup reason, index into this table to find the 422 * appropriate irq_happened bit. 423 * 424 * Sytem reset exceptions taken in idle state also come through here, 425 * but they are NMI interrupts so do not need to wait for IRQs to be 426 * restored, and should be taken as early as practical. These are marked 427 * with 0xff in the table. The Power ISA specifies 0100b as the system 428 * reset interrupt reason. 429 */ 430 #define IRQ_SYSTEM_RESET 0xff 431 432 static const u8 srr1_to_lazyirq[0x10] = { 433 0, 0, 0, 434 PACA_IRQ_DBELL, 435 IRQ_SYSTEM_RESET, 436 PACA_IRQ_DBELL, 437 PACA_IRQ_DEC, 438 0, 439 PACA_IRQ_EE, 440 PACA_IRQ_EE, 441 PACA_IRQ_HMI, 442 0, 0, 0, 0, 0 }; 443 444 void replay_system_reset(void) 445 { 446 struct pt_regs regs; 447 448 ppc_save_regs(®s); 449 regs.trap = 0x100; 450 get_paca()->in_nmi = 1; 451 system_reset_exception(®s); 452 get_paca()->in_nmi = 0; 453 } 454 EXPORT_SYMBOL_GPL(replay_system_reset); 455 456 void irq_set_pending_from_srr1(unsigned long srr1) 457 { 458 unsigned int idx = (srr1 & SRR1_WAKEMASK_P8) >> 18; 459 u8 reason = srr1_to_lazyirq[idx]; 460 461 /* 462 * Take the system reset now, which is immediately after registers 463 * are restored from idle. It's an NMI, so interrupts need not be 464 * re-enabled before it is taken. 465 */ 466 if (unlikely(reason == IRQ_SYSTEM_RESET)) { 467 replay_system_reset(); 468 return; 469 } 470 471 if (reason == PACA_IRQ_DBELL) { 472 /* 473 * When doorbell triggers a system reset wakeup, the message 474 * is not cleared, so if the doorbell interrupt is replayed 475 * and the IPI handled, the doorbell interrupt would still 476 * fire when EE is enabled. 477 * 478 * To avoid taking the superfluous doorbell interrupt, 479 * execute a msgclr here before the interrupt is replayed. 480 */ 481 ppc_msgclr(PPC_DBELL_MSGTYPE); 482 } 483 484 /* 485 * The 0 index (SRR1[42:45]=b0000) must always evaluate to 0, 486 * so this can be called unconditionally with the SRR1 wake 487 * reason as returned by the idle code, which uses 0 to mean no 488 * interrupt. 489 * 490 * If a future CPU was to designate this as an interrupt reason, 491 * then a new index for no interrupt must be assigned. 492 */ 493 local_paca->irq_happened |= reason; 494 } 495 #endif /* CONFIG_PPC_BOOK3S */ 496 497 /* 498 * Force a replay of the external interrupt handler on this CPU. 499 */ 500 void force_external_irq_replay(void) 501 { 502 /* 503 * This must only be called with interrupts soft-disabled, 504 * the replay will happen when re-enabling. 505 */ 506 WARN_ON(!arch_irqs_disabled()); 507 508 /* 509 * Interrupts must always be hard disabled before irq_happened is 510 * modified (to prevent lost update in case of interrupt between 511 * load and store). 512 */ 513 __hard_irq_disable(); 514 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 515 516 /* Indicate in the PACA that we have an interrupt to replay */ 517 local_paca->irq_happened |= PACA_IRQ_EE; 518 } 519 520 static int __init setup_noirqdistrib(char *str) 521 { 522 distribute_irqs = 0; 523 return 1; 524 } 525 526 __setup("noirqdistrib", setup_noirqdistrib); 527