1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2012 ARM Ltd. 4 */ 5 #ifndef __ASM_HARDIRQ_H 6 #define __ASM_HARDIRQ_H 7 8 #include <linux/cache.h> 9 #include <linux/percpu.h> 10 #include <linux/threads.h> 11 #include <asm/barrier.h> 12 #include <asm/irq.h> 13 #include <asm/kvm_arm.h> 14 #include <asm/sysreg.h> 15 16 typedef struct { 17 unsigned int __softirq_pending; 18 } ____cacheline_aligned irq_cpustat_t; 19 20 #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ 21 22 #define __ARCH_IRQ_EXIT_IRQS_DISABLED 1 23 24 struct nmi_ctx { 25 u64 hcr; 26 unsigned int cnt; 27 }; 28 29 DECLARE_PER_CPU(struct nmi_ctx, nmi_contexts); 30 31 #define arch_nmi_enter() \ 32 do { \ 33 struct nmi_ctx *___ctx; \ 34 u64 ___hcr; \ 35 \ 36 if (!is_kernel_in_hyp_mode()) \ 37 break; \ 38 \ 39 ___ctx = this_cpu_ptr(&nmi_contexts); \ 40 if (___ctx->cnt) { \ 41 ___ctx->cnt++; \ 42 break; \ 43 } \ 44 \ 45 ___hcr = read_sysreg(hcr_el2); \ 46 if (!(___hcr & HCR_TGE)) { \ 47 write_sysreg(___hcr | HCR_TGE, hcr_el2); \ 48 isb(); \ 49 } \ 50 /* \ 51 * Make sure the sysreg write is performed before ___ctx->cnt \ 52 * is set to 1. NMIs that see cnt == 1 will rely on us. \ 53 */ \ 54 barrier(); \ 55 ___ctx->cnt = 1; \ 56 /* \ 57 * Make sure ___ctx->cnt is set before we save ___hcr. We \ 58 * don't want ___ctx->hcr to be overwritten. \ 59 */ \ 60 barrier(); \ 61 ___ctx->hcr = ___hcr; \ 62 } while (0) 63 64 #define arch_nmi_exit() \ 65 do { \ 66 struct nmi_ctx *___ctx; \ 67 u64 ___hcr; \ 68 \ 69 if (!is_kernel_in_hyp_mode()) \ 70 break; \ 71 \ 72 ___ctx = this_cpu_ptr(&nmi_contexts); \ 73 ___hcr = ___ctx->hcr; \ 74 /* \ 75 * Make sure we read ___ctx->hcr before we release \ 76 * ___ctx->cnt as it makes ___ctx->hcr updatable again. \ 77 */ \ 78 barrier(); \ 79 ___ctx->cnt--; \ 80 /* \ 81 * Make sure ___ctx->cnt release is visible before we \ 82 * restore the sysreg. Otherwise a new NMI occurring \ 83 * right after write_sysreg() can be fooled and think \ 84 * we secured things for it. \ 85 */ \ 86 barrier(); \ 87 if (!___ctx->cnt && !(___hcr & HCR_TGE)) \ 88 write_sysreg(___hcr, hcr_el2); \ 89 } while (0) 90 91 static inline void ack_bad_irq(unsigned int irq) 92 { 93 extern unsigned long irq_err_count; 94 irq_err_count++; 95 } 96 97 #endif /* __ASM_HARDIRQ_H */ 98