1bb898558SAl Viro #ifndef _X86_IRQFLAGS_H_ 2bb898558SAl Viro #define _X86_IRQFLAGS_H_ 3bb898558SAl Viro 4bb898558SAl Viro #include <asm/processor-flags.h> 5bb898558SAl Viro 6bb898558SAl Viro #ifndef __ASSEMBLY__ 7bb898558SAl Viro /* 8bb898558SAl Viro * Interrupt control: 9bb898558SAl Viro */ 10bb898558SAl Viro 11bb898558SAl Viro static inline unsigned long native_save_fl(void) 12bb898558SAl Viro { 13bb898558SAl Viro unsigned long flags; 14bb898558SAl Viro 15f1f029c7SH. Peter Anvin /* 16ab94fcf5SH. Peter Anvin * "=rm" is safe here, because "pop" adjusts the stack before 17ab94fcf5SH. Peter Anvin * it evaluates its effective address -- this is part of the 18ab94fcf5SH. Peter Anvin * documented behavior of the "pop" instruction. 19f1f029c7SH. Peter Anvin */ 20bb898558SAl Viro asm volatile("# __raw_save_flags\n\t" 21bb898558SAl Viro "pushf ; pop %0" 22ab94fcf5SH. Peter Anvin : "=rm" (flags) 23bb898558SAl Viro : /* no input */ 24bb898558SAl Viro : "memory"); 25bb898558SAl Viro 26bb898558SAl Viro return flags; 27bb898558SAl Viro } 28bb898558SAl Viro 29bb898558SAl Viro static inline void native_restore_fl(unsigned long flags) 30bb898558SAl Viro { 31bb898558SAl Viro asm volatile("push %0 ; popf" 32bb898558SAl Viro : /* no output */ 33bb898558SAl Viro :"g" (flags) 34bb898558SAl Viro :"memory", "cc"); 35bb898558SAl Viro } 36bb898558SAl Viro 37bb898558SAl Viro static inline void native_irq_disable(void) 38bb898558SAl Viro { 39bb898558SAl Viro asm volatile("cli": : :"memory"); 40bb898558SAl Viro } 41bb898558SAl Viro 42bb898558SAl Viro static inline void native_irq_enable(void) 43bb898558SAl Viro { 44bb898558SAl Viro asm volatile("sti": : :"memory"); 45bb898558SAl Viro } 46bb898558SAl Viro 47bb898558SAl Viro static inline void native_safe_halt(void) 48bb898558SAl Viro { 49bb898558SAl Viro asm volatile("sti; hlt": : :"memory"); 50bb898558SAl Viro } 51bb898558SAl Viro 52bb898558SAl Viro static inline void native_halt(void) 53bb898558SAl Viro { 54bb898558SAl Viro asm volatile("hlt": : :"memory"); 55bb898558SAl Viro } 56bb898558SAl Viro 57bb898558SAl Viro #endif 58bb898558SAl Viro 59bb898558SAl Viro #ifdef CONFIG_PARAVIRT 60bb898558SAl Viro #include <asm/paravirt.h> 61bb898558SAl Viro #else 62bb898558SAl Viro #ifndef __ASSEMBLY__ 63e08fbb78SSteven Rostedt #include <linux/types.h> 64bb898558SAl Viro 65e08fbb78SSteven Rostedt static inline notrace unsigned long arch_local_save_flags(void) 66bb898558SAl Viro { 67bb898558SAl Viro return native_save_fl(); 68bb898558SAl Viro } 69bb898558SAl Viro 70e08fbb78SSteven Rostedt static inline notrace void arch_local_irq_restore(unsigned long flags) 71bb898558SAl Viro { 72bb898558SAl Viro native_restore_fl(flags); 73bb898558SAl Viro } 74bb898558SAl Viro 75e08fbb78SSteven Rostedt static inline notrace void arch_local_irq_disable(void) 76bb898558SAl Viro { 77bb898558SAl Viro native_irq_disable(); 78bb898558SAl Viro } 79bb898558SAl Viro 80e08fbb78SSteven Rostedt static inline notrace void arch_local_irq_enable(void) 81bb898558SAl Viro { 82bb898558SAl Viro native_irq_enable(); 83bb898558SAl Viro } 84bb898558SAl Viro 85bb898558SAl Viro /* 86bb898558SAl Viro * Used in the idle loop; sti takes one instruction cycle 87bb898558SAl Viro * to complete: 88bb898558SAl Viro */ 89df9ee292SDavid Howells static inline void arch_safe_halt(void) 90bb898558SAl Viro { 91bb898558SAl Viro native_safe_halt(); 92bb898558SAl Viro } 93bb898558SAl Viro 94bb898558SAl Viro /* 95bb898558SAl Viro * Used when interrupts are already enabled or to 96bb898558SAl Viro * shutdown the processor: 97bb898558SAl Viro */ 98bb898558SAl Viro static inline void halt(void) 99bb898558SAl Viro { 100bb898558SAl Viro native_halt(); 101bb898558SAl Viro } 102bb898558SAl Viro 103bb898558SAl Viro /* 104bb898558SAl Viro * For spinlocks, etc: 105bb898558SAl Viro */ 106e08fbb78SSteven Rostedt static inline notrace unsigned long arch_local_irq_save(void) 107bb898558SAl Viro { 108df9ee292SDavid Howells unsigned long flags = arch_local_save_flags(); 109df9ee292SDavid Howells arch_local_irq_disable(); 110bb898558SAl Viro return flags; 111bb898558SAl Viro } 112bb898558SAl Viro #else 113bb898558SAl Viro 114bb898558SAl Viro #define ENABLE_INTERRUPTS(x) sti 115bb898558SAl Viro #define DISABLE_INTERRUPTS(x) cli 116bb898558SAl Viro 117bb898558SAl Viro #ifdef CONFIG_X86_64 118bb898558SAl Viro #define SWAPGS swapgs 119bb898558SAl Viro /* 120bb898558SAl Viro * Currently paravirt can't handle swapgs nicely when we 121bb898558SAl Viro * don't have a stack we can rely on (such as a user space 122bb898558SAl Viro * stack). So we either find a way around these or just fault 123bb898558SAl Viro * and emulate if a guest tries to call swapgs directly. 124bb898558SAl Viro * 125bb898558SAl Viro * Either way, this is a good way to document that we don't 126bb898558SAl Viro * have a reliable stack. x86_64 only. 127bb898558SAl Viro */ 128bb898558SAl Viro #define SWAPGS_UNSAFE_STACK swapgs 129bb898558SAl Viro 130bb898558SAl Viro #define PARAVIRT_ADJUST_EXCEPTION_FRAME /* */ 131bb898558SAl Viro 1327209a75dSAndy Lutomirski #define INTERRUPT_RETURN jmp native_iret 133bb898558SAl Viro #define USERGS_SYSRET64 \ 134bb898558SAl Viro swapgs; \ 135bb898558SAl Viro sysretq; 136bb898558SAl Viro #define USERGS_SYSRET32 \ 137bb898558SAl Viro swapgs; \ 138bb898558SAl Viro sysretl 139bb898558SAl Viro #define ENABLE_INTERRUPTS_SYSEXIT32 \ 140bb898558SAl Viro swapgs; \ 141bb898558SAl Viro sti; \ 142bb898558SAl Viro sysexit 143bb898558SAl Viro 144bb898558SAl Viro #else 145bb898558SAl Viro #define INTERRUPT_RETURN iret 146bb898558SAl Viro #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit 147bb898558SAl Viro #define GET_CR0_INTO_EAX movl %cr0, %eax 148bb898558SAl Viro #endif 149bb898558SAl Viro 150bb898558SAl Viro 151bb898558SAl Viro #endif /* __ASSEMBLY__ */ 152bb898558SAl Viro #endif /* CONFIG_PARAVIRT */ 153bb898558SAl Viro 154bb898558SAl Viro #ifndef __ASSEMBLY__ 155df9ee292SDavid Howells static inline int arch_irqs_disabled_flags(unsigned long flags) 156bb898558SAl Viro { 157bb898558SAl Viro return !(flags & X86_EFLAGS_IF); 158bb898558SAl Viro } 159bb898558SAl Viro 160df9ee292SDavid Howells static inline int arch_irqs_disabled(void) 161bb898558SAl Viro { 162df9ee292SDavid Howells unsigned long flags = arch_local_save_flags(); 163bb898558SAl Viro 164df9ee292SDavid Howells return arch_irqs_disabled_flags(flags); 165bb898558SAl Viro } 166*40e2ec65SDenys Vlasenko #endif /* !__ASSEMBLY__ */ 167bb898558SAl Viro 168*40e2ec65SDenys Vlasenko #ifdef __ASSEMBLY__ 169*40e2ec65SDenys Vlasenko #ifdef CONFIG_TRACE_IRQFLAGS 170*40e2ec65SDenys Vlasenko # define TRACE_IRQS_ON call trace_hardirqs_on_thunk; 171*40e2ec65SDenys Vlasenko # define TRACE_IRQS_OFF call trace_hardirqs_off_thunk; 172bb898558SAl Viro #else 173*40e2ec65SDenys Vlasenko # define TRACE_IRQS_ON 174*40e2ec65SDenys Vlasenko # define TRACE_IRQS_OFF 175*40e2ec65SDenys Vlasenko #endif 176*40e2ec65SDenys Vlasenko #ifdef CONFIG_DEBUG_LOCK_ALLOC 177bb898558SAl Viro # ifdef CONFIG_X86_64 178bb898558SAl Viro # define ARCH_LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk 179bb898558SAl Viro # define ARCH_LOCKDEP_SYS_EXIT_IRQ \ 180bb898558SAl Viro TRACE_IRQS_ON; \ 181bb898558SAl Viro sti; \ 18276f5df43SDenys Vlasenko SAVE_EXTRA_REGS; \ 183bb898558SAl Viro LOCKDEP_SYS_EXIT; \ 18476f5df43SDenys Vlasenko RESTORE_EXTRA_REGS; \ 185bb898558SAl Viro cli; \ 186bb898558SAl Viro TRACE_IRQS_OFF; 187bb898558SAl Viro # else 188bb898558SAl Viro # define ARCH_LOCKDEP_SYS_EXIT \ 189bb898558SAl Viro pushl %eax; \ 190bb898558SAl Viro pushl %ecx; \ 191bb898558SAl Viro pushl %edx; \ 192bb898558SAl Viro call lockdep_sys_exit; \ 193bb898558SAl Viro popl %edx; \ 194bb898558SAl Viro popl %ecx; \ 195bb898558SAl Viro popl %eax; 196bb898558SAl Viro # define ARCH_LOCKDEP_SYS_EXIT_IRQ 197bb898558SAl Viro # endif 198bb898558SAl Viro # define LOCKDEP_SYS_EXIT ARCH_LOCKDEP_SYS_EXIT 199bb898558SAl Viro # define LOCKDEP_SYS_EXIT_IRQ ARCH_LOCKDEP_SYS_EXIT_IRQ 200bb898558SAl Viro # else 201bb898558SAl Viro # define LOCKDEP_SYS_EXIT 202bb898558SAl Viro # define LOCKDEP_SYS_EXIT_IRQ 203bb898558SAl Viro # endif 204bb898558SAl Viro #endif /* __ASSEMBLY__ */ 205*40e2ec65SDenys Vlasenko 206bb898558SAl Viro #endif 207