xref: /openbmc/linux/arch/arm64/include/asm/hardirq.h (revision 6c33a6f4)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012 ARM Ltd.
4  */
5 #ifndef __ASM_HARDIRQ_H
6 #define __ASM_HARDIRQ_H
7 
8 #include <linux/cache.h>
9 #include <linux/percpu.h>
10 #include <linux/threads.h>
11 #include <asm/barrier.h>
12 #include <asm/irq.h>
13 #include <asm/kvm_arm.h>
14 #include <asm/sysreg.h>
15 
16 #define NR_IPI	7
17 
18 typedef struct {
19 	unsigned int __softirq_pending;
20 	unsigned int ipi_irqs[NR_IPI];
21 } ____cacheline_aligned irq_cpustat_t;
22 
23 #include <linux/irq_cpustat.h>	/* Standard mappings for irq_cpustat_t above */
24 
25 #define __inc_irq_stat(cpu, member)	__IRQ_STAT(cpu, member)++
26 #define __get_irq_stat(cpu, member)	__IRQ_STAT(cpu, member)
27 
28 u64 smp_irq_stat_cpu(unsigned int cpu);
29 #define arch_irq_stat_cpu	smp_irq_stat_cpu
30 
31 #define __ARCH_IRQ_EXIT_IRQS_DISABLED	1
32 
33 struct nmi_ctx {
34 	u64 hcr;
35 };
36 
37 DECLARE_PER_CPU(struct nmi_ctx, nmi_contexts);
38 
39 #define arch_nmi_enter()							\
40 	do {									\
41 		if (is_kernel_in_hyp_mode()) {					\
42 			struct nmi_ctx *nmi_ctx = this_cpu_ptr(&nmi_contexts);	\
43 			nmi_ctx->hcr = read_sysreg(hcr_el2);			\
44 			if (!(nmi_ctx->hcr & HCR_TGE)) {			\
45 				write_sysreg(nmi_ctx->hcr | HCR_TGE, hcr_el2);	\
46 				isb();						\
47 			}							\
48 		}								\
49 	} while (0)
50 
51 #define arch_nmi_exit()								\
52 	do {									\
53 		if (is_kernel_in_hyp_mode()) {					\
54 			struct nmi_ctx *nmi_ctx = this_cpu_ptr(&nmi_contexts);	\
55 			if (!(nmi_ctx->hcr & HCR_TGE))				\
56 				write_sysreg(nmi_ctx->hcr, hcr_el2);		\
57 		}								\
58 	} while (0)
59 
60 static inline void ack_bad_irq(unsigned int irq)
61 {
62 	extern unsigned long irq_err_count;
63 	irq_err_count++;
64 }
65 
66 #endif /* __ASM_HARDIRQ_H */
67