xref: /openbmc/linux/arch/x86/include/asm/irqflags.h (revision 9bad5658ea710f45e4ee68b88a01cfe1839d8b00)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2bb898558SAl Viro #ifndef _X86_IRQFLAGS_H_
3bb898558SAl Viro #define _X86_IRQFLAGS_H_
4bb898558SAl Viro 
5bb898558SAl Viro #include <asm/processor-flags.h>
6bb898558SAl Viro 
7bb898558SAl Viro #ifndef __ASSEMBLY__
86727ad9eSChris Metcalf 
96727ad9eSChris Metcalf /* Provide __cpuidle; we can't safely include <linux/cpu.h> */
106727ad9eSChris Metcalf #define __cpuidle __attribute__((__section__(".cpuidle.text")))
116727ad9eSChris Metcalf 
12bb898558SAl Viro /*
13bb898558SAl Viro  * Interrupt control:
14bb898558SAl Viro  */
15bb898558SAl Viro 
16208cbb32SNick Desaulniers /* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */
17208cbb32SNick Desaulniers extern inline unsigned long native_save_fl(void);
18d0a8d937SNick Desaulniers extern inline unsigned long native_save_fl(void)
19bb898558SAl Viro {
20bb898558SAl Viro 	unsigned long flags;
21bb898558SAl Viro 
22f1f029c7SH. Peter Anvin 	/*
23ab94fcf5SH. Peter Anvin 	 * "=rm" is safe here, because "pop" adjusts the stack before
24ab94fcf5SH. Peter Anvin 	 * it evaluates its effective address -- this is part of the
25ab94fcf5SH. Peter Anvin 	 * documented behavior of the "pop" instruction.
26f1f029c7SH. Peter Anvin 	 */
27bb898558SAl Viro 	asm volatile("# __raw_save_flags\n\t"
28bb898558SAl Viro 		     "pushf ; pop %0"
29ab94fcf5SH. Peter Anvin 		     : "=rm" (flags)
30bb898558SAl Viro 		     : /* no input */
31bb898558SAl Viro 		     : "memory");
32bb898558SAl Viro 
33bb898558SAl Viro 	return flags;
34bb898558SAl Viro }
35bb898558SAl Viro 
361f59a458SNick Desaulniers extern inline void native_restore_fl(unsigned long flags);
371f59a458SNick Desaulniers extern inline void native_restore_fl(unsigned long flags)
38bb898558SAl Viro {
39bb898558SAl Viro 	asm volatile("push %0 ; popf"
40bb898558SAl Viro 		     : /* no output */
41bb898558SAl Viro 		     :"g" (flags)
42bb898558SAl Viro 		     :"memory", "cc");
43bb898558SAl Viro }
44bb898558SAl Viro 
45bb898558SAl Viro static inline void native_irq_disable(void)
46bb898558SAl Viro {
47bb898558SAl Viro 	asm volatile("cli": : :"memory");
48bb898558SAl Viro }
49bb898558SAl Viro 
50bb898558SAl Viro static inline void native_irq_enable(void)
51bb898558SAl Viro {
52bb898558SAl Viro 	asm volatile("sti": : :"memory");
53bb898558SAl Viro }
54bb898558SAl Viro 
556727ad9eSChris Metcalf static inline __cpuidle void native_safe_halt(void)
56bb898558SAl Viro {
57bb898558SAl Viro 	asm volatile("sti; hlt": : :"memory");
58bb898558SAl Viro }
59bb898558SAl Viro 
606727ad9eSChris Metcalf static inline __cpuidle void native_halt(void)
61bb898558SAl Viro {
62bb898558SAl Viro 	asm volatile("hlt": : :"memory");
63bb898558SAl Viro }
64bb898558SAl Viro 
65bb898558SAl Viro #endif
66bb898558SAl Viro 
67bb898558SAl Viro #ifdef CONFIG_PARAVIRT
68bb898558SAl Viro #include <asm/paravirt.h>
69bb898558SAl Viro #else
70bb898558SAl Viro #ifndef __ASSEMBLY__
71e08fbb78SSteven Rostedt #include <linux/types.h>
72bb898558SAl Viro 
73e08fbb78SSteven Rostedt static inline notrace unsigned long arch_local_save_flags(void)
74bb898558SAl Viro {
75bb898558SAl Viro 	return native_save_fl();
76bb898558SAl Viro }
77bb898558SAl Viro 
78e08fbb78SSteven Rostedt static inline notrace void arch_local_irq_restore(unsigned long flags)
79bb898558SAl Viro {
80bb898558SAl Viro 	native_restore_fl(flags);
81bb898558SAl Viro }
82bb898558SAl Viro 
83e08fbb78SSteven Rostedt static inline notrace void arch_local_irq_disable(void)
84bb898558SAl Viro {
85bb898558SAl Viro 	native_irq_disable();
86bb898558SAl Viro }
87bb898558SAl Viro 
88e08fbb78SSteven Rostedt static inline notrace void arch_local_irq_enable(void)
89bb898558SAl Viro {
90bb898558SAl Viro 	native_irq_enable();
91bb898558SAl Viro }
92bb898558SAl Viro 
93bb898558SAl Viro /*
94bb898558SAl Viro  * Used in the idle loop; sti takes one instruction cycle
95bb898558SAl Viro  * to complete:
96bb898558SAl Viro  */
976727ad9eSChris Metcalf static inline __cpuidle void arch_safe_halt(void)
98bb898558SAl Viro {
99bb898558SAl Viro 	native_safe_halt();
100bb898558SAl Viro }
101bb898558SAl Viro 
102bb898558SAl Viro /*
103bb898558SAl Viro  * Used when interrupts are already enabled or to
104bb898558SAl Viro  * shutdown the processor:
105bb898558SAl Viro  */
1066727ad9eSChris Metcalf static inline __cpuidle void halt(void)
107bb898558SAl Viro {
108bb898558SAl Viro 	native_halt();
109bb898558SAl Viro }
110bb898558SAl Viro 
111bb898558SAl Viro /*
112bb898558SAl Viro  * For spinlocks, etc:
113bb898558SAl Viro  */
114e08fbb78SSteven Rostedt static inline notrace unsigned long arch_local_irq_save(void)
115bb898558SAl Viro {
116df9ee292SDavid Howells 	unsigned long flags = arch_local_save_flags();
117df9ee292SDavid Howells 	arch_local_irq_disable();
118bb898558SAl Viro 	return flags;
119bb898558SAl Viro }
120bb898558SAl Viro #else
121bb898558SAl Viro 
122bb898558SAl Viro #define ENABLE_INTERRUPTS(x)	sti
123bb898558SAl Viro #define DISABLE_INTERRUPTS(x)	cli
124bb898558SAl Viro 
125bb898558SAl Viro #ifdef CONFIG_X86_64
126*9bad5658SJuergen Gross #ifdef CONFIG_DEBUG_ENTRY
127*9bad5658SJuergen Gross #define SAVE_FLAGS(x)		pushfq; popq %rax
128*9bad5658SJuergen Gross #endif
129*9bad5658SJuergen Gross #endif
130*9bad5658SJuergen Gross #endif /* __ASSEMBLY__ */
131*9bad5658SJuergen Gross #endif /* CONFIG_PARAVIRT */
132*9bad5658SJuergen Gross 
133*9bad5658SJuergen Gross #ifndef CONFIG_PARAVIRT_XXL
134*9bad5658SJuergen Gross #ifdef __ASSEMBLY__
135*9bad5658SJuergen Gross #ifdef CONFIG_X86_64
136bb898558SAl Viro #define SWAPGS	swapgs
137bb898558SAl Viro /*
138bb898558SAl Viro  * Currently paravirt can't handle swapgs nicely when we
139bb898558SAl Viro  * don't have a stack we can rely on (such as a user space
140bb898558SAl Viro  * stack).  So we either find a way around these or just fault
141bb898558SAl Viro  * and emulate if a guest tries to call swapgs directly.
142bb898558SAl Viro  *
143bb898558SAl Viro  * Either way, this is a good way to document that we don't
144bb898558SAl Viro  * have a reliable stack. x86_64 only.
145bb898558SAl Viro  */
146bb898558SAl Viro #define SWAPGS_UNSAFE_STACK	swapgs
147bb898558SAl Viro 
1487209a75dSAndy Lutomirski #define INTERRUPT_RETURN	jmp native_iret
149bb898558SAl Viro #define USERGS_SYSRET64				\
150bb898558SAl Viro 	swapgs;					\
151bb898558SAl Viro 	sysretq;
152bb898558SAl Viro #define USERGS_SYSRET32				\
153bb898558SAl Viro 	swapgs;					\
154bb898558SAl Viro 	sysretl
155bb898558SAl Viro 
156bb898558SAl Viro #else
157bb898558SAl Viro #define INTERRUPT_RETURN		iret
158bb898558SAl Viro #endif
159bb898558SAl Viro 
160bb898558SAl Viro #endif /* __ASSEMBLY__ */
161*9bad5658SJuergen Gross #endif /* CONFIG_PARAVIRT_XXL */
162bb898558SAl Viro 
163bb898558SAl Viro #ifndef __ASSEMBLY__
164df9ee292SDavid Howells static inline int arch_irqs_disabled_flags(unsigned long flags)
165bb898558SAl Viro {
166bb898558SAl Viro 	return !(flags & X86_EFLAGS_IF);
167bb898558SAl Viro }
168bb898558SAl Viro 
169df9ee292SDavid Howells static inline int arch_irqs_disabled(void)
170bb898558SAl Viro {
171df9ee292SDavid Howells 	unsigned long flags = arch_local_save_flags();
172bb898558SAl Viro 
173df9ee292SDavid Howells 	return arch_irqs_disabled_flags(flags);
174bb898558SAl Viro }
17540e2ec65SDenys Vlasenko #endif /* !__ASSEMBLY__ */
176bb898558SAl Viro 
17740e2ec65SDenys Vlasenko #ifdef __ASSEMBLY__
17840e2ec65SDenys Vlasenko #ifdef CONFIG_TRACE_IRQFLAGS
17940e2ec65SDenys Vlasenko #  define TRACE_IRQS_ON		call trace_hardirqs_on_thunk;
18040e2ec65SDenys Vlasenko #  define TRACE_IRQS_OFF	call trace_hardirqs_off_thunk;
181bb898558SAl Viro #else
18240e2ec65SDenys Vlasenko #  define TRACE_IRQS_ON
18340e2ec65SDenys Vlasenko #  define TRACE_IRQS_OFF
18440e2ec65SDenys Vlasenko #endif
18540e2ec65SDenys Vlasenko #ifdef CONFIG_DEBUG_LOCK_ALLOC
186bb898558SAl Viro #  ifdef CONFIG_X86_64
1877dc7cc07SDenys Vlasenko #    define LOCKDEP_SYS_EXIT		call lockdep_sys_exit_thunk
1887dc7cc07SDenys Vlasenko #    define LOCKDEP_SYS_EXIT_IRQ \
189bb898558SAl Viro 	TRACE_IRQS_ON; \
190bb898558SAl Viro 	sti; \
1917dc7cc07SDenys Vlasenko 	call lockdep_sys_exit_thunk; \
192bb898558SAl Viro 	cli; \
193bb898558SAl Viro 	TRACE_IRQS_OFF;
194bb898558SAl Viro #  else
1957dc7cc07SDenys Vlasenko #    define LOCKDEP_SYS_EXIT \
196bb898558SAl Viro 	pushl %eax;				\
197bb898558SAl Viro 	pushl %ecx;				\
198bb898558SAl Viro 	pushl %edx;				\
199bb898558SAl Viro 	call lockdep_sys_exit;			\
200bb898558SAl Viro 	popl %edx;				\
201bb898558SAl Viro 	popl %ecx;				\
202bb898558SAl Viro 	popl %eax;
2037dc7cc07SDenys Vlasenko #    define LOCKDEP_SYS_EXIT_IRQ
204bb898558SAl Viro #  endif
205bb898558SAl Viro #else
206bb898558SAl Viro #  define LOCKDEP_SYS_EXIT
207bb898558SAl Viro #  define LOCKDEP_SYS_EXIT_IRQ
208bb898558SAl Viro #endif
209bb898558SAl Viro #endif /* __ASSEMBLY__ */
21040e2ec65SDenys Vlasenko 
211bb898558SAl Viro #endif
212