xref: /openbmc/linux/arch/arm64/include/asm/irqflags.h (revision b8d312aa)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012 ARM Ltd.
4  */
5 #ifndef __ASM_IRQFLAGS_H
6 #define __ASM_IRQFLAGS_H
7 
8 #ifdef __KERNEL__
9 
10 #include <asm/alternative.h>
11 #include <asm/ptrace.h>
12 #include <asm/sysreg.h>
13 
14 /*
15  * Aarch64 has flags for masking: Debug, Asynchronous (serror), Interrupts and
16  * FIQ exceptions, in the 'daif' register. We mask and unmask them in 'dai'
17  * order:
18  * Masking debug exceptions causes all other exceptions to be masked too/
19  * Masking SError masks irq, but not debug exceptions. Masking irqs has no
20  * side effects for other flags. Keeping to this order makes it easier for
21  * entry.S to know which exceptions should be unmasked.
22  *
23  * FIQ is never expected, but we mask it when we disable debug exceptions, and
24  * unmask it at all other times.
25  */
26 
27 /*
28  * CPU interrupt mask handling.
29  */
30 static inline void arch_local_irq_enable(void)
31 {
32 	if (system_has_prio_mask_debugging()) {
33 		u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1);
34 
35 		WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF);
36 	}
37 
38 	asm volatile(ALTERNATIVE(
39 		"msr	daifclr, #2		// arch_local_irq_enable\n"
40 		"nop",
41 		__msr_s(SYS_ICC_PMR_EL1, "%0")
42 		"dsb	sy",
43 		ARM64_HAS_IRQ_PRIO_MASKING)
44 		:
45 		: "r" ((unsigned long) GIC_PRIO_IRQON)
46 		: "memory");
47 }
48 
49 static inline void arch_local_irq_disable(void)
50 {
51 	if (system_has_prio_mask_debugging()) {
52 		u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1);
53 
54 		WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF);
55 	}
56 
57 	asm volatile(ALTERNATIVE(
58 		"msr	daifset, #2		// arch_local_irq_disable",
59 		__msr_s(SYS_ICC_PMR_EL1, "%0"),
60 		ARM64_HAS_IRQ_PRIO_MASKING)
61 		:
62 		: "r" ((unsigned long) GIC_PRIO_IRQOFF)
63 		: "memory");
64 }
65 
66 /*
67  * Save the current interrupt enable state.
68  */
69 static inline unsigned long arch_local_save_flags(void)
70 {
71 	unsigned long flags;
72 
73 	asm volatile(ALTERNATIVE(
74 		"mrs	%0, daif",
75 		__mrs_s("%0", SYS_ICC_PMR_EL1),
76 		ARM64_HAS_IRQ_PRIO_MASKING)
77 		: "=&r" (flags)
78 		:
79 		: "memory");
80 
81 	return flags;
82 }
83 
84 static inline int arch_irqs_disabled_flags(unsigned long flags)
85 {
86 	int res;
87 
88 	asm volatile(ALTERNATIVE(
89 		"and	%w0, %w1, #" __stringify(PSR_I_BIT),
90 		"eor	%w0, %w1, #" __stringify(GIC_PRIO_IRQON),
91 		ARM64_HAS_IRQ_PRIO_MASKING)
92 		: "=&r" (res)
93 		: "r" ((int) flags)
94 		: "memory");
95 
96 	return res;
97 }
98 
99 static inline unsigned long arch_local_irq_save(void)
100 {
101 	unsigned long flags;
102 
103 	flags = arch_local_save_flags();
104 
105 	/*
106 	 * There are too many states with IRQs disabled, just keep the current
107 	 * state if interrupts are already disabled/masked.
108 	 */
109 	if (!arch_irqs_disabled_flags(flags))
110 		arch_local_irq_disable();
111 
112 	return flags;
113 }
114 
115 /*
116  * restore saved IRQ state
117  */
118 static inline void arch_local_irq_restore(unsigned long flags)
119 {
120 	asm volatile(ALTERNATIVE(
121 			"msr	daif, %0\n"
122 			"nop",
123 			__msr_s(SYS_ICC_PMR_EL1, "%0")
124 			"dsb	sy",
125 			ARM64_HAS_IRQ_PRIO_MASKING)
126 		:
127 		: "r" (flags)
128 		: "memory");
129 }
130 
131 #endif
132 #endif
133