xref: /openbmc/linux/arch/x86/include/asm/irqflags.h (revision 9a87ffc99ec8eb8d35eed7c4f816d75f5cc9662e)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2bb898558SAl Viro #ifndef _X86_IRQFLAGS_H_
3bb898558SAl Viro #define _X86_IRQFLAGS_H_
4bb898558SAl Viro 
5bb898558SAl Viro #include <asm/processor-flags.h>
6bb898558SAl Viro 
7bb898558SAl Viro #ifndef __ASSEMBLY__
86727ad9eSChris Metcalf 
907f07f55SThomas Gleixner #include <asm/nospec-branch.h>
1007f07f55SThomas Gleixner 
11bb898558SAl Viro /*
12bb898558SAl Viro  * Interrupt control:
13bb898558SAl Viro  */
14bb898558SAl Viro 
15208cbb32SNick Desaulniers /* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */
16208cbb32SNick Desaulniers extern inline unsigned long native_save_fl(void);
native_save_fl(void)177a745be1SPeter Zijlstra extern __always_inline unsigned long native_save_fl(void)
18bb898558SAl Viro {
19bb898558SAl Viro 	unsigned long flags;
20bb898558SAl Viro 
21f1f029c7SH. Peter Anvin 	/*
22ab94fcf5SH. Peter Anvin 	 * "=rm" is safe here, because "pop" adjusts the stack before
23ab94fcf5SH. Peter Anvin 	 * it evaluates its effective address -- this is part of the
24ab94fcf5SH. Peter Anvin 	 * documented behavior of the "pop" instruction.
25f1f029c7SH. Peter Anvin 	 */
26bb898558SAl Viro 	asm volatile("# __raw_save_flags\n\t"
27bb898558SAl Viro 		     "pushf ; pop %0"
28ab94fcf5SH. Peter Anvin 		     : "=rm" (flags)
29bb898558SAl Viro 		     : /* no input */
30bb898558SAl Viro 		     : "memory");
31bb898558SAl Viro 
32bb898558SAl Viro 	return flags;
33bb898558SAl Viro }
34bb898558SAl Viro 
native_irq_disable(void)357a745be1SPeter Zijlstra static __always_inline void native_irq_disable(void)
36bb898558SAl Viro {
37bb898558SAl Viro 	asm volatile("cli": : :"memory");
38bb898558SAl Viro }
39bb898558SAl Viro 
native_irq_enable(void)407a745be1SPeter Zijlstra static __always_inline void native_irq_enable(void)
41bb898558SAl Viro {
42bb898558SAl Viro 	asm volatile("sti": : :"memory");
43bb898558SAl Viro }
44bb898558SAl Viro 
native_safe_halt(void)45*2b5a0e42SPeter Zijlstra static __always_inline void native_safe_halt(void)
46bb898558SAl Viro {
4707f07f55SThomas Gleixner 	mds_idle_clear_cpu_buffers();
48bb898558SAl Viro 	asm volatile("sti; hlt": : :"memory");
49bb898558SAl Viro }
50bb898558SAl Viro 
native_halt(void)51*2b5a0e42SPeter Zijlstra static __always_inline void native_halt(void)
52bb898558SAl Viro {
5307f07f55SThomas Gleixner 	mds_idle_clear_cpu_buffers();
54bb898558SAl Viro 	asm volatile("hlt": : :"memory");
55bb898558SAl Viro }
56bb898558SAl Viro 
57bb898558SAl Viro #endif
58bb898558SAl Viro 
596da63eb2SJuergen Gross #ifdef CONFIG_PARAVIRT_XXL
60bb898558SAl Viro #include <asm/paravirt.h>
61bb898558SAl Viro #else
62bb898558SAl Viro #ifndef __ASSEMBLY__
63e08fbb78SSteven Rostedt #include <linux/types.h>
64bb898558SAl Viro 
arch_local_save_flags(void)657a745be1SPeter Zijlstra static __always_inline unsigned long arch_local_save_flags(void)
66bb898558SAl Viro {
67bb898558SAl Viro 	return native_save_fl();
68bb898558SAl Viro }
69bb898558SAl Viro 
arch_local_irq_disable(void)707a745be1SPeter Zijlstra static __always_inline void arch_local_irq_disable(void)
71bb898558SAl Viro {
72bb898558SAl Viro 	native_irq_disable();
73bb898558SAl Viro }
74bb898558SAl Viro 
arch_local_irq_enable(void)757a745be1SPeter Zijlstra static __always_inline void arch_local_irq_enable(void)
76bb898558SAl Viro {
77bb898558SAl Viro 	native_irq_enable();
78bb898558SAl Viro }
79bb898558SAl Viro 
80bb898558SAl Viro /*
81bb898558SAl Viro  * Used in the idle loop; sti takes one instruction cycle
82bb898558SAl Viro  * to complete:
83bb898558SAl Viro  */
arch_safe_halt(void)84*2b5a0e42SPeter Zijlstra static __always_inline void arch_safe_halt(void)
85bb898558SAl Viro {
86bb898558SAl Viro 	native_safe_halt();
87bb898558SAl Viro }
88bb898558SAl Viro 
89bb898558SAl Viro /*
90bb898558SAl Viro  * Used when interrupts are already enabled or to
91bb898558SAl Viro  * shutdown the processor:
92bb898558SAl Viro  */
halt(void)93*2b5a0e42SPeter Zijlstra static __always_inline void halt(void)
94bb898558SAl Viro {
95bb898558SAl Viro 	native_halt();
96bb898558SAl Viro }
97bb898558SAl Viro 
98bb898558SAl Viro /*
99bb898558SAl Viro  * For spinlocks, etc:
100bb898558SAl Viro  */
arch_local_irq_save(void)1017a745be1SPeter Zijlstra static __always_inline unsigned long arch_local_irq_save(void)
102bb898558SAl Viro {
103df9ee292SDavid Howells 	unsigned long flags = arch_local_save_flags();
104df9ee292SDavid Howells 	arch_local_irq_disable();
105bb898558SAl Viro 	return flags;
106bb898558SAl Viro }
107bb898558SAl Viro #else
108bb898558SAl Viro 
109bb898558SAl Viro #ifdef CONFIG_X86_64
1109bad5658SJuergen Gross #ifdef CONFIG_DEBUG_ENTRY
111fafe5e74SJuergen Gross #define SAVE_FLAGS		pushfq; popq %rax
1129bad5658SJuergen Gross #endif
1139bad5658SJuergen Gross 
114bb898558SAl Viro #endif
115bb898558SAl Viro 
116bb898558SAl Viro #endif /* __ASSEMBLY__ */
1179bad5658SJuergen Gross #endif /* CONFIG_PARAVIRT_XXL */
118bb898558SAl Viro 
119bb898558SAl Viro #ifndef __ASSEMBLY__
arch_irqs_disabled_flags(unsigned long flags)1207a745be1SPeter Zijlstra static __always_inline int arch_irqs_disabled_flags(unsigned long flags)
121bb898558SAl Viro {
122bb898558SAl Viro 	return !(flags & X86_EFLAGS_IF);
123bb898558SAl Viro }
124bb898558SAl Viro 
arch_irqs_disabled(void)1257a745be1SPeter Zijlstra static __always_inline int arch_irqs_disabled(void)
126bb898558SAl Viro {
127df9ee292SDavid Howells 	unsigned long flags = arch_local_save_flags();
128bb898558SAl Viro 
129df9ee292SDavid Howells 	return arch_irqs_disabled_flags(flags);
130bb898558SAl Viro }
131ab234a26SJuergen Gross 
arch_local_irq_restore(unsigned long flags)132ab234a26SJuergen Gross static __always_inline void arch_local_irq_restore(unsigned long flags)
133ab234a26SJuergen Gross {
134ab234a26SJuergen Gross 	if (!arch_irqs_disabled_flags(flags))
135ab234a26SJuergen Gross 		arch_local_irq_enable();
136ab234a26SJuergen Gross }
13740e2ec65SDenys Vlasenko #endif /* !__ASSEMBLY__ */
138bb898558SAl Viro 
139bb898558SAl Viro #endif
140