xref: /openbmc/linux/arch/x86/include/asm/irqflags.h (revision bb8985586b7a906e116db835c64773b7a7d51663)
1*bb898558SAl Viro #ifndef _X86_IRQFLAGS_H_
2*bb898558SAl Viro #define _X86_IRQFLAGS_H_
3*bb898558SAl Viro 
4*bb898558SAl Viro #include <asm/processor-flags.h>
5*bb898558SAl Viro 
6*bb898558SAl Viro #ifndef __ASSEMBLY__
7*bb898558SAl Viro /*
8*bb898558SAl Viro  * Interrupt control:
9*bb898558SAl Viro  */
10*bb898558SAl Viro 
11*bb898558SAl Viro static inline unsigned long native_save_fl(void)
12*bb898558SAl Viro {
13*bb898558SAl Viro 	unsigned long flags;
14*bb898558SAl Viro 
15*bb898558SAl Viro 	asm volatile("# __raw_save_flags\n\t"
16*bb898558SAl Viro 		     "pushf ; pop %0"
17*bb898558SAl Viro 		     : "=g" (flags)
18*bb898558SAl Viro 		     : /* no input */
19*bb898558SAl Viro 		     : "memory");
20*bb898558SAl Viro 
21*bb898558SAl Viro 	return flags;
22*bb898558SAl Viro }
23*bb898558SAl Viro 
24*bb898558SAl Viro static inline void native_restore_fl(unsigned long flags)
25*bb898558SAl Viro {
26*bb898558SAl Viro 	asm volatile("push %0 ; popf"
27*bb898558SAl Viro 		     : /* no output */
28*bb898558SAl Viro 		     :"g" (flags)
29*bb898558SAl Viro 		     :"memory", "cc");
30*bb898558SAl Viro }
31*bb898558SAl Viro 
32*bb898558SAl Viro static inline void native_irq_disable(void)
33*bb898558SAl Viro {
34*bb898558SAl Viro 	asm volatile("cli": : :"memory");
35*bb898558SAl Viro }
36*bb898558SAl Viro 
37*bb898558SAl Viro static inline void native_irq_enable(void)
38*bb898558SAl Viro {
39*bb898558SAl Viro 	asm volatile("sti": : :"memory");
40*bb898558SAl Viro }
41*bb898558SAl Viro 
42*bb898558SAl Viro static inline void native_safe_halt(void)
43*bb898558SAl Viro {
44*bb898558SAl Viro 	asm volatile("sti; hlt": : :"memory");
45*bb898558SAl Viro }
46*bb898558SAl Viro 
47*bb898558SAl Viro static inline void native_halt(void)
48*bb898558SAl Viro {
49*bb898558SAl Viro 	asm volatile("hlt": : :"memory");
50*bb898558SAl Viro }
51*bb898558SAl Viro 
52*bb898558SAl Viro #endif
53*bb898558SAl Viro 
54*bb898558SAl Viro #ifdef CONFIG_PARAVIRT
55*bb898558SAl Viro #include <asm/paravirt.h>
56*bb898558SAl Viro #else
57*bb898558SAl Viro #ifndef __ASSEMBLY__
58*bb898558SAl Viro 
59*bb898558SAl Viro static inline unsigned long __raw_local_save_flags(void)
60*bb898558SAl Viro {
61*bb898558SAl Viro 	return native_save_fl();
62*bb898558SAl Viro }
63*bb898558SAl Viro 
64*bb898558SAl Viro static inline void raw_local_irq_restore(unsigned long flags)
65*bb898558SAl Viro {
66*bb898558SAl Viro 	native_restore_fl(flags);
67*bb898558SAl Viro }
68*bb898558SAl Viro 
69*bb898558SAl Viro static inline void raw_local_irq_disable(void)
70*bb898558SAl Viro {
71*bb898558SAl Viro 	native_irq_disable();
72*bb898558SAl Viro }
73*bb898558SAl Viro 
74*bb898558SAl Viro static inline void raw_local_irq_enable(void)
75*bb898558SAl Viro {
76*bb898558SAl Viro 	native_irq_enable();
77*bb898558SAl Viro }
78*bb898558SAl Viro 
79*bb898558SAl Viro /*
80*bb898558SAl Viro  * Used in the idle loop; sti takes one instruction cycle
81*bb898558SAl Viro  * to complete:
82*bb898558SAl Viro  */
83*bb898558SAl Viro static inline void raw_safe_halt(void)
84*bb898558SAl Viro {
85*bb898558SAl Viro 	native_safe_halt();
86*bb898558SAl Viro }
87*bb898558SAl Viro 
88*bb898558SAl Viro /*
89*bb898558SAl Viro  * Used when interrupts are already enabled or to
90*bb898558SAl Viro  * shutdown the processor:
91*bb898558SAl Viro  */
92*bb898558SAl Viro static inline void halt(void)
93*bb898558SAl Viro {
94*bb898558SAl Viro 	native_halt();
95*bb898558SAl Viro }
96*bb898558SAl Viro 
97*bb898558SAl Viro /*
98*bb898558SAl Viro  * For spinlocks, etc:
99*bb898558SAl Viro  */
100*bb898558SAl Viro static inline unsigned long __raw_local_irq_save(void)
101*bb898558SAl Viro {
102*bb898558SAl Viro 	unsigned long flags = __raw_local_save_flags();
103*bb898558SAl Viro 
104*bb898558SAl Viro 	raw_local_irq_disable();
105*bb898558SAl Viro 
106*bb898558SAl Viro 	return flags;
107*bb898558SAl Viro }
108*bb898558SAl Viro #else
109*bb898558SAl Viro 
110*bb898558SAl Viro #define ENABLE_INTERRUPTS(x)	sti
111*bb898558SAl Viro #define DISABLE_INTERRUPTS(x)	cli
112*bb898558SAl Viro 
113*bb898558SAl Viro #ifdef CONFIG_X86_64
114*bb898558SAl Viro #define SWAPGS	swapgs
115*bb898558SAl Viro /*
116*bb898558SAl Viro  * Currently paravirt can't handle swapgs nicely when we
117*bb898558SAl Viro  * don't have a stack we can rely on (such as a user space
118*bb898558SAl Viro  * stack).  So we either find a way around these or just fault
119*bb898558SAl Viro  * and emulate if a guest tries to call swapgs directly.
120*bb898558SAl Viro  *
121*bb898558SAl Viro  * Either way, this is a good way to document that we don't
122*bb898558SAl Viro  * have a reliable stack. x86_64 only.
123*bb898558SAl Viro  */
124*bb898558SAl Viro #define SWAPGS_UNSAFE_STACK	swapgs
125*bb898558SAl Viro 
126*bb898558SAl Viro #define PARAVIRT_ADJUST_EXCEPTION_FRAME	/*  */
127*bb898558SAl Viro 
128*bb898558SAl Viro #define INTERRUPT_RETURN	iretq
129*bb898558SAl Viro #define USERGS_SYSRET64				\
130*bb898558SAl Viro 	swapgs;					\
131*bb898558SAl Viro 	sysretq;
132*bb898558SAl Viro #define USERGS_SYSRET32				\
133*bb898558SAl Viro 	swapgs;					\
134*bb898558SAl Viro 	sysretl
135*bb898558SAl Viro #define ENABLE_INTERRUPTS_SYSEXIT32		\
136*bb898558SAl Viro 	swapgs;					\
137*bb898558SAl Viro 	sti;					\
138*bb898558SAl Viro 	sysexit
139*bb898558SAl Viro 
140*bb898558SAl Viro #else
141*bb898558SAl Viro #define INTERRUPT_RETURN		iret
142*bb898558SAl Viro #define ENABLE_INTERRUPTS_SYSEXIT	sti; sysexit
143*bb898558SAl Viro #define GET_CR0_INTO_EAX		movl %cr0, %eax
144*bb898558SAl Viro #endif
145*bb898558SAl Viro 
146*bb898558SAl Viro 
147*bb898558SAl Viro #endif /* __ASSEMBLY__ */
148*bb898558SAl Viro #endif /* CONFIG_PARAVIRT */
149*bb898558SAl Viro 
150*bb898558SAl Viro #ifndef __ASSEMBLY__
151*bb898558SAl Viro #define raw_local_save_flags(flags)				\
152*bb898558SAl Viro 	do { (flags) = __raw_local_save_flags(); } while (0)
153*bb898558SAl Viro 
154*bb898558SAl Viro #define raw_local_irq_save(flags)				\
155*bb898558SAl Viro 	do { (flags) = __raw_local_irq_save(); } while (0)
156*bb898558SAl Viro 
157*bb898558SAl Viro static inline int raw_irqs_disabled_flags(unsigned long flags)
158*bb898558SAl Viro {
159*bb898558SAl Viro 	return !(flags & X86_EFLAGS_IF);
160*bb898558SAl Viro }
161*bb898558SAl Viro 
162*bb898558SAl Viro static inline int raw_irqs_disabled(void)
163*bb898558SAl Viro {
164*bb898558SAl Viro 	unsigned long flags = __raw_local_save_flags();
165*bb898558SAl Viro 
166*bb898558SAl Viro 	return raw_irqs_disabled_flags(flags);
167*bb898558SAl Viro }
168*bb898558SAl Viro 
169*bb898558SAl Viro #else
170*bb898558SAl Viro 
171*bb898558SAl Viro #ifdef CONFIG_X86_64
172*bb898558SAl Viro #define ARCH_LOCKDEP_SYS_EXIT		call lockdep_sys_exit_thunk
173*bb898558SAl Viro #define ARCH_LOCKDEP_SYS_EXIT_IRQ	\
174*bb898558SAl Viro 	TRACE_IRQS_ON; \
175*bb898558SAl Viro 	sti; \
176*bb898558SAl Viro 	SAVE_REST; \
177*bb898558SAl Viro 	LOCKDEP_SYS_EXIT; \
178*bb898558SAl Viro 	RESTORE_REST; \
179*bb898558SAl Viro 	cli; \
180*bb898558SAl Viro 	TRACE_IRQS_OFF;
181*bb898558SAl Viro 
182*bb898558SAl Viro #else
183*bb898558SAl Viro #define ARCH_LOCKDEP_SYS_EXIT			\
184*bb898558SAl Viro 	pushl %eax;				\
185*bb898558SAl Viro 	pushl %ecx;				\
186*bb898558SAl Viro 	pushl %edx;				\
187*bb898558SAl Viro 	call lockdep_sys_exit;			\
188*bb898558SAl Viro 	popl %edx;				\
189*bb898558SAl Viro 	popl %ecx;				\
190*bb898558SAl Viro 	popl %eax;
191*bb898558SAl Viro 
192*bb898558SAl Viro #define ARCH_LOCKDEP_SYS_EXIT_IRQ
193*bb898558SAl Viro #endif
194*bb898558SAl Viro 
195*bb898558SAl Viro #ifdef CONFIG_TRACE_IRQFLAGS
196*bb898558SAl Viro #  define TRACE_IRQS_ON		call trace_hardirqs_on_thunk;
197*bb898558SAl Viro #  define TRACE_IRQS_OFF	call trace_hardirqs_off_thunk;
198*bb898558SAl Viro #else
199*bb898558SAl Viro #  define TRACE_IRQS_ON
200*bb898558SAl Viro #  define TRACE_IRQS_OFF
201*bb898558SAl Viro #endif
202*bb898558SAl Viro #ifdef CONFIG_DEBUG_LOCK_ALLOC
203*bb898558SAl Viro #  define LOCKDEP_SYS_EXIT	ARCH_LOCKDEP_SYS_EXIT
204*bb898558SAl Viro #  define LOCKDEP_SYS_EXIT_IRQ	ARCH_LOCKDEP_SYS_EXIT_IRQ
205*bb898558SAl Viro # else
206*bb898558SAl Viro #  define LOCKDEP_SYS_EXIT
207*bb898558SAl Viro #  define LOCKDEP_SYS_EXIT_IRQ
208*bb898558SAl Viro # endif
209*bb898558SAl Viro 
210*bb898558SAl Viro #endif /* __ASSEMBLY__ */
211*bb898558SAl Viro #endif
212