xref: /openbmc/linux/arch/arm64/include/asm/hardirq.h (revision fc772314)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012 ARM Ltd.
4  */
5 #ifndef __ASM_HARDIRQ_H
6 #define __ASM_HARDIRQ_H
7 
8 #include <linux/cache.h>
9 #include <linux/percpu.h>
10 #include <linux/threads.h>
11 #include <asm/barrier.h>
12 #include <asm/irq.h>
13 #include <asm/kvm_arm.h>
14 #include <asm/sysreg.h>
15 
16 #define NR_IPI	7
17 
18 typedef struct {
19 	unsigned int __softirq_pending;
20 	unsigned int ipi_irqs[NR_IPI];
21 } ____cacheline_aligned irq_cpustat_t;
22 
23 #include <linux/irq_cpustat.h>	/* Standard mappings for irq_cpustat_t above */
24 
25 #define __inc_irq_stat(cpu, member)	__IRQ_STAT(cpu, member)++
26 #define __get_irq_stat(cpu, member)	__IRQ_STAT(cpu, member)
27 
28 u64 smp_irq_stat_cpu(unsigned int cpu);
29 #define arch_irq_stat_cpu	smp_irq_stat_cpu
30 
31 #define __ARCH_IRQ_EXIT_IRQS_DISABLED	1
32 
33 struct nmi_ctx {
34 	u64 hcr;
35 	unsigned int cnt;
36 };
37 
38 DECLARE_PER_CPU(struct nmi_ctx, nmi_contexts);
39 
40 #define arch_nmi_enter()						\
41 do {									\
42 	struct nmi_ctx *___ctx;						\
43 	u64 ___hcr;							\
44 									\
45 	if (!is_kernel_in_hyp_mode())					\
46 		break;							\
47 									\
48 	___ctx = this_cpu_ptr(&nmi_contexts);				\
49 	if (___ctx->cnt) {						\
50 		___ctx->cnt++;						\
51 		break;							\
52 	}								\
53 									\
54 	___hcr = read_sysreg(hcr_el2);					\
55 	if (!(___hcr & HCR_TGE)) {					\
56 		write_sysreg(___hcr | HCR_TGE, hcr_el2);		\
57 		isb();							\
58 	}								\
59 	/*								\
60 	 * Make sure the sysreg write is performed before ___ctx->cnt	\
61 	 * is set to 1. NMIs that see cnt == 1 will rely on us.		\
62 	 */								\
63 	barrier();							\
64 	___ctx->cnt = 1;                                                \
65 	/*								\
66 	 * Make sure ___ctx->cnt is set before we save ___hcr. We	\
67 	 * don't want ___ctx->hcr to be overwritten.			\
68 	 */								\
69 	barrier();							\
70 	___ctx->hcr = ___hcr;						\
71 } while (0)
72 
73 #define arch_nmi_exit()							\
74 do {									\
75 	struct nmi_ctx *___ctx;						\
76 	u64 ___hcr;							\
77 									\
78 	if (!is_kernel_in_hyp_mode())					\
79 		break;							\
80 									\
81 	___ctx = this_cpu_ptr(&nmi_contexts);				\
82 	___hcr = ___ctx->hcr;						\
83 	/*								\
84 	 * Make sure we read ___ctx->hcr before we release		\
85 	 * ___ctx->cnt as it makes ___ctx->hcr updatable again.		\
86 	 */								\
87 	barrier();							\
88 	___ctx->cnt--;							\
89 	/*								\
90 	 * Make sure ___ctx->cnt release is visible before we		\
91 	 * restore the sysreg. Otherwise a new NMI occurring		\
92 	 * right after write_sysreg() can be fooled and think		\
93 	 * we secured things for it.					\
94 	 */								\
95 	barrier();							\
96 	if (!___ctx->cnt && !(___hcr & HCR_TGE))			\
97 		write_sysreg(___hcr, hcr_el2);				\
98 } while (0)
99 
100 static inline void ack_bad_irq(unsigned int irq)
101 {
102 	extern unsigned long irq_err_count;
103 	irq_err_count++;
104 }
105 
106 #endif /* __ASM_HARDIRQ_H */
107