xref: /openbmc/linux/arch/arm64/include/asm/irqflags.h (revision 6a143a7c)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012 ARM Ltd.
4  */
5 #ifndef __ASM_IRQFLAGS_H
6 #define __ASM_IRQFLAGS_H
7 
8 #include <asm/alternative.h>
9 #include <asm/barrier.h>
10 #include <asm/ptrace.h>
11 #include <asm/sysreg.h>
12 
13 /*
14  * Aarch64 has flags for masking: Debug, Asynchronous (serror), Interrupts and
15  * FIQ exceptions, in the 'daif' register. We mask and unmask them in 'dai'
16  * order:
17  * Masking debug exceptions causes all other exceptions to be masked too/
18  * Masking SError masks irq, but not debug exceptions. Masking irqs has no
19  * side effects for other flags. Keeping to this order makes it easier for
20  * entry.S to know which exceptions should be unmasked.
21  *
22  * FIQ is never expected, but we mask it when we disable debug exceptions, and
23  * unmask it at all other times.
24  */
25 
26 /*
27  * CPU interrupt mask handling.
28  */
29 static inline void arch_local_irq_enable(void)
30 {
31 	if (system_has_prio_mask_debugging()) {
32 		u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1);
33 
34 		WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF);
35 	}
36 
37 	asm volatile(ALTERNATIVE(
38 		"msr	daifclr, #2		// arch_local_irq_enable",
39 		__msr_s(SYS_ICC_PMR_EL1, "%0"),
40 		ARM64_HAS_IRQ_PRIO_MASKING)
41 		:
42 		: "r" ((unsigned long) GIC_PRIO_IRQON)
43 		: "memory");
44 
45 	pmr_sync();
46 }
47 
48 static inline void arch_local_irq_disable(void)
49 {
50 	if (system_has_prio_mask_debugging()) {
51 		u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1);
52 
53 		WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF);
54 	}
55 
56 	asm volatile(ALTERNATIVE(
57 		"msr	daifset, #2		// arch_local_irq_disable",
58 		__msr_s(SYS_ICC_PMR_EL1, "%0"),
59 		ARM64_HAS_IRQ_PRIO_MASKING)
60 		:
61 		: "r" ((unsigned long) GIC_PRIO_IRQOFF)
62 		: "memory");
63 }
64 
65 /*
66  * Save the current interrupt enable state.
67  */
68 static inline unsigned long arch_local_save_flags(void)
69 {
70 	unsigned long flags;
71 
72 	asm volatile(ALTERNATIVE(
73 		"mrs	%0, daif",
74 		__mrs_s("%0", SYS_ICC_PMR_EL1),
75 		ARM64_HAS_IRQ_PRIO_MASKING)
76 		: "=&r" (flags)
77 		:
78 		: "memory");
79 
80 	return flags;
81 }
82 
83 static inline int arch_irqs_disabled_flags(unsigned long flags)
84 {
85 	int res;
86 
87 	asm volatile(ALTERNATIVE(
88 		"and	%w0, %w1, #" __stringify(PSR_I_BIT),
89 		"eor	%w0, %w1, #" __stringify(GIC_PRIO_IRQON),
90 		ARM64_HAS_IRQ_PRIO_MASKING)
91 		: "=&r" (res)
92 		: "r" ((int) flags)
93 		: "memory");
94 
95 	return res;
96 }
97 
98 static inline int arch_irqs_disabled(void)
99 {
100 	return arch_irqs_disabled_flags(arch_local_save_flags());
101 }
102 
103 static inline unsigned long arch_local_irq_save(void)
104 {
105 	unsigned long flags;
106 
107 	flags = arch_local_save_flags();
108 
109 	/*
110 	 * There are too many states with IRQs disabled, just keep the current
111 	 * state if interrupts are already disabled/masked.
112 	 */
113 	if (!arch_irqs_disabled_flags(flags))
114 		arch_local_irq_disable();
115 
116 	return flags;
117 }
118 
119 /*
120  * restore saved IRQ state
121  */
122 static inline void arch_local_irq_restore(unsigned long flags)
123 {
124 	asm volatile(ALTERNATIVE(
125 		"msr	daif, %0",
126 		__msr_s(SYS_ICC_PMR_EL1, "%0"),
127 		ARM64_HAS_IRQ_PRIO_MASKING)
128 		:
129 		: "r" (flags)
130 		: "memory");
131 
132 	pmr_sync();
133 }
134 
135 #endif /* __ASM_IRQFLAGS_H */
136