xref: /openbmc/linux/arch/x86/include/asm/irqflags.h (revision d699090510c3223641a23834b4710e2d4309a6ad)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _X86_IRQFLAGS_H_
3 #define _X86_IRQFLAGS_H_
4 
5 #include <asm/processor-flags.h>
6 
7 #ifndef __ASSEMBLY__
8 
9 #include <asm/nospec-branch.h>
10 
11 /*
12  * Interrupt control:
13  */
14 
15 /* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */
16 extern inline unsigned long native_save_fl(void);
native_save_fl(void)17 extern __always_inline unsigned long native_save_fl(void)
18 {
19 	unsigned long flags;
20 
21 	/*
22 	 * "=rm" is safe here, because "pop" adjusts the stack before
23 	 * it evaluates its effective address -- this is part of the
24 	 * documented behavior of the "pop" instruction.
25 	 */
26 	asm volatile("# __raw_save_flags\n\t"
27 		     "pushf ; pop %0"
28 		     : "=rm" (flags)
29 		     : /* no input */
30 		     : "memory");
31 
32 	return flags;
33 }
34 
native_irq_disable(void)35 static __always_inline void native_irq_disable(void)
36 {
37 	asm volatile("cli": : :"memory");
38 }
39 
native_irq_enable(void)40 static __always_inline void native_irq_enable(void)
41 {
42 	asm volatile("sti": : :"memory");
43 }
44 
native_safe_halt(void)45 static __always_inline void native_safe_halt(void)
46 {
47 	mds_idle_clear_cpu_buffers();
48 	asm volatile("sti; hlt": : :"memory");
49 }
50 
native_halt(void)51 static __always_inline void native_halt(void)
52 {
53 	mds_idle_clear_cpu_buffers();
54 	asm volatile("hlt": : :"memory");
55 }
56 
57 #endif
58 
59 #ifndef CONFIG_PARAVIRT
60 #ifndef __ASSEMBLY__
61 /*
62  * Used in the idle loop; sti takes one instruction cycle
63  * to complete:
64  */
arch_safe_halt(void)65 static __always_inline void arch_safe_halt(void)
66 {
67 	native_safe_halt();
68 }
69 
70 /*
71  * Used when interrupts are already enabled or to
72  * shutdown the processor:
73  */
halt(void)74 static __always_inline void halt(void)
75 {
76 	native_halt();
77 }
78 #endif /* __ASSEMBLY__ */
79 #endif /* CONFIG_PARAVIRT */
80 
81 #ifdef CONFIG_PARAVIRT_XXL
82 #include <asm/paravirt.h>
83 #else
84 #ifndef __ASSEMBLY__
85 #include <linux/types.h>
86 
arch_local_save_flags(void)87 static __always_inline unsigned long arch_local_save_flags(void)
88 {
89 	return native_save_fl();
90 }
91 
arch_local_irq_disable(void)92 static __always_inline void arch_local_irq_disable(void)
93 {
94 	native_irq_disable();
95 }
96 
arch_local_irq_enable(void)97 static __always_inline void arch_local_irq_enable(void)
98 {
99 	native_irq_enable();
100 }
101 
102 /*
103  * For spinlocks, etc:
104  */
arch_local_irq_save(void)105 static __always_inline unsigned long arch_local_irq_save(void)
106 {
107 	unsigned long flags = arch_local_save_flags();
108 	arch_local_irq_disable();
109 	return flags;
110 }
111 #else
112 
113 #ifdef CONFIG_X86_64
114 #ifdef CONFIG_DEBUG_ENTRY
115 #define SAVE_FLAGS		pushfq; popq %rax
116 #endif
117 
118 #endif
119 
120 #endif /* __ASSEMBLY__ */
121 #endif /* CONFIG_PARAVIRT_XXL */
122 
123 #ifndef __ASSEMBLY__
arch_irqs_disabled_flags(unsigned long flags)124 static __always_inline int arch_irqs_disabled_flags(unsigned long flags)
125 {
126 	return !(flags & X86_EFLAGS_IF);
127 }
128 
arch_irqs_disabled(void)129 static __always_inline int arch_irqs_disabled(void)
130 {
131 	unsigned long flags = arch_local_save_flags();
132 
133 	return arch_irqs_disabled_flags(flags);
134 }
135 
arch_local_irq_restore(unsigned long flags)136 static __always_inline void arch_local_irq_restore(unsigned long flags)
137 {
138 	if (!arch_irqs_disabled_flags(flags))
139 		arch_local_irq_enable();
140 }
141 #endif /* !__ASSEMBLY__ */
142 
143 #endif
144