1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 2bb898558SAl Viro #ifndef _X86_IRQFLAGS_H_ 3bb898558SAl Viro #define _X86_IRQFLAGS_H_ 4bb898558SAl Viro 5bb898558SAl Viro #include <asm/processor-flags.h> 6bb898558SAl Viro 7bb898558SAl Viro #ifndef __ASSEMBLY__ 86727ad9eSChris Metcalf 96727ad9eSChris Metcalf /* Provide __cpuidle; we can't safely include <linux/cpu.h> */ 106727ad9eSChris Metcalf #define __cpuidle __attribute__((__section__(".cpuidle.text"))) 116727ad9eSChris Metcalf 12bb898558SAl Viro /* 13bb898558SAl Viro * Interrupt control: 14bb898558SAl Viro */ 15bb898558SAl Viro 16*208cbb32SNick Desaulniers /* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */ 17*208cbb32SNick Desaulniers extern inline unsigned long native_save_fl(void); 18d0a8d937SNick Desaulniers extern inline unsigned long native_save_fl(void) 19bb898558SAl Viro { 20bb898558SAl Viro unsigned long flags; 21bb898558SAl Viro 22f1f029c7SH. Peter Anvin /* 23ab94fcf5SH. Peter Anvin * "=rm" is safe here, because "pop" adjusts the stack before 24ab94fcf5SH. Peter Anvin * it evaluates its effective address -- this is part of the 25ab94fcf5SH. Peter Anvin * documented behavior of the "pop" instruction. 26f1f029c7SH. Peter Anvin */ 27bb898558SAl Viro asm volatile("# __raw_save_flags\n\t" 28bb898558SAl Viro "pushf ; pop %0" 29ab94fcf5SH. Peter Anvin : "=rm" (flags) 30bb898558SAl Viro : /* no input */ 31bb898558SAl Viro : "memory"); 32bb898558SAl Viro 33bb898558SAl Viro return flags; 34bb898558SAl Viro } 35bb898558SAl Viro 36bb898558SAl Viro static inline void native_restore_fl(unsigned long flags) 37bb898558SAl Viro { 38bb898558SAl Viro asm volatile("push %0 ; popf" 39bb898558SAl Viro : /* no output */ 40bb898558SAl Viro :"g" (flags) 41bb898558SAl Viro :"memory", "cc"); 42bb898558SAl Viro } 43bb898558SAl Viro 44bb898558SAl Viro static inline void native_irq_disable(void) 45bb898558SAl Viro { 46bb898558SAl Viro asm volatile("cli": : :"memory"); 47bb898558SAl Viro } 48bb898558SAl Viro 49bb898558SAl Viro static inline void native_irq_enable(void) 50bb898558SAl Viro { 51bb898558SAl Viro asm volatile("sti": : :"memory"); 52bb898558SAl Viro } 53bb898558SAl Viro 546727ad9eSChris Metcalf static inline __cpuidle void native_safe_halt(void) 55bb898558SAl Viro { 56bb898558SAl Viro asm volatile("sti; hlt": : :"memory"); 57bb898558SAl Viro } 58bb898558SAl Viro 596727ad9eSChris Metcalf static inline __cpuidle void native_halt(void) 60bb898558SAl Viro { 61bb898558SAl Viro asm volatile("hlt": : :"memory"); 62bb898558SAl Viro } 63bb898558SAl Viro 64bb898558SAl Viro #endif 65bb898558SAl Viro 66bb898558SAl Viro #ifdef CONFIG_PARAVIRT 67bb898558SAl Viro #include <asm/paravirt.h> 68bb898558SAl Viro #else 69bb898558SAl Viro #ifndef __ASSEMBLY__ 70e08fbb78SSteven Rostedt #include <linux/types.h> 71bb898558SAl Viro 72e08fbb78SSteven Rostedt static inline notrace unsigned long arch_local_save_flags(void) 73bb898558SAl Viro { 74bb898558SAl Viro return native_save_fl(); 75bb898558SAl Viro } 76bb898558SAl Viro 77e08fbb78SSteven Rostedt static inline notrace void arch_local_irq_restore(unsigned long flags) 78bb898558SAl Viro { 79bb898558SAl Viro native_restore_fl(flags); 80bb898558SAl Viro } 81bb898558SAl Viro 82e08fbb78SSteven Rostedt static inline notrace void arch_local_irq_disable(void) 83bb898558SAl Viro { 84bb898558SAl Viro native_irq_disable(); 85bb898558SAl Viro } 86bb898558SAl Viro 87e08fbb78SSteven Rostedt static inline notrace void arch_local_irq_enable(void) 88bb898558SAl Viro { 89bb898558SAl Viro native_irq_enable(); 90bb898558SAl Viro } 91bb898558SAl Viro 92bb898558SAl Viro /* 93bb898558SAl Viro * Used in the idle loop; sti takes one instruction cycle 94bb898558SAl Viro * to complete: 95bb898558SAl Viro */ 966727ad9eSChris Metcalf static inline __cpuidle void arch_safe_halt(void) 97bb898558SAl Viro { 98bb898558SAl Viro native_safe_halt(); 99bb898558SAl Viro } 100bb898558SAl Viro 101bb898558SAl Viro /* 102bb898558SAl Viro * Used when interrupts are already enabled or to 103bb898558SAl Viro * shutdown the processor: 104bb898558SAl Viro */ 1056727ad9eSChris Metcalf static inline __cpuidle void halt(void) 106bb898558SAl Viro { 107bb898558SAl Viro native_halt(); 108bb898558SAl Viro } 109bb898558SAl Viro 110bb898558SAl Viro /* 111bb898558SAl Viro * For spinlocks, etc: 112bb898558SAl Viro */ 113e08fbb78SSteven Rostedt static inline notrace unsigned long arch_local_irq_save(void) 114bb898558SAl Viro { 115df9ee292SDavid Howells unsigned long flags = arch_local_save_flags(); 116df9ee292SDavid Howells arch_local_irq_disable(); 117bb898558SAl Viro return flags; 118bb898558SAl Viro } 119bb898558SAl Viro #else 120bb898558SAl Viro 121bb898558SAl Viro #define ENABLE_INTERRUPTS(x) sti 122bb898558SAl Viro #define DISABLE_INTERRUPTS(x) cli 123bb898558SAl Viro 124bb898558SAl Viro #ifdef CONFIG_X86_64 125bb898558SAl Viro #define SWAPGS swapgs 126bb898558SAl Viro /* 127bb898558SAl Viro * Currently paravirt can't handle swapgs nicely when we 128bb898558SAl Viro * don't have a stack we can rely on (such as a user space 129bb898558SAl Viro * stack). So we either find a way around these or just fault 130bb898558SAl Viro * and emulate if a guest tries to call swapgs directly. 131bb898558SAl Viro * 132bb898558SAl Viro * Either way, this is a good way to document that we don't 133bb898558SAl Viro * have a reliable stack. x86_64 only. 134bb898558SAl Viro */ 135bb898558SAl Viro #define SWAPGS_UNSAFE_STACK swapgs 136bb898558SAl Viro 137bb898558SAl Viro #define PARAVIRT_ADJUST_EXCEPTION_FRAME /* */ 138bb898558SAl Viro 1397209a75dSAndy Lutomirski #define INTERRUPT_RETURN jmp native_iret 140bb898558SAl Viro #define USERGS_SYSRET64 \ 141bb898558SAl Viro swapgs; \ 142bb898558SAl Viro sysretq; 143bb898558SAl Viro #define USERGS_SYSRET32 \ 144bb898558SAl Viro swapgs; \ 145bb898558SAl Viro sysretl 146bb898558SAl Viro 147e17f8234SBoris Ostrovsky #ifdef CONFIG_DEBUG_ENTRY 148e17f8234SBoris Ostrovsky #define SAVE_FLAGS(x) pushfq; popq %rax 149e17f8234SBoris Ostrovsky #endif 150bb898558SAl Viro #else 151bb898558SAl Viro #define INTERRUPT_RETURN iret 152bb898558SAl Viro #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit 153bb898558SAl Viro #define GET_CR0_INTO_EAX movl %cr0, %eax 154bb898558SAl Viro #endif 155bb898558SAl Viro 156bb898558SAl Viro 157bb898558SAl Viro #endif /* __ASSEMBLY__ */ 158bb898558SAl Viro #endif /* CONFIG_PARAVIRT */ 159bb898558SAl Viro 160bb898558SAl Viro #ifndef __ASSEMBLY__ 161df9ee292SDavid Howells static inline int arch_irqs_disabled_flags(unsigned long flags) 162bb898558SAl Viro { 163bb898558SAl Viro return !(flags & X86_EFLAGS_IF); 164bb898558SAl Viro } 165bb898558SAl Viro 166df9ee292SDavid Howells static inline int arch_irqs_disabled(void) 167bb898558SAl Viro { 168df9ee292SDavid Howells unsigned long flags = arch_local_save_flags(); 169bb898558SAl Viro 170df9ee292SDavid Howells return arch_irqs_disabled_flags(flags); 171bb898558SAl Viro } 17240e2ec65SDenys Vlasenko #endif /* !__ASSEMBLY__ */ 173bb898558SAl Viro 17440e2ec65SDenys Vlasenko #ifdef __ASSEMBLY__ 17540e2ec65SDenys Vlasenko #ifdef CONFIG_TRACE_IRQFLAGS 17640e2ec65SDenys Vlasenko # define TRACE_IRQS_ON call trace_hardirqs_on_thunk; 17740e2ec65SDenys Vlasenko # define TRACE_IRQS_OFF call trace_hardirqs_off_thunk; 178bb898558SAl Viro #else 17940e2ec65SDenys Vlasenko # define TRACE_IRQS_ON 18040e2ec65SDenys Vlasenko # define TRACE_IRQS_OFF 18140e2ec65SDenys Vlasenko #endif 18240e2ec65SDenys Vlasenko #ifdef CONFIG_DEBUG_LOCK_ALLOC 183bb898558SAl Viro # ifdef CONFIG_X86_64 1847dc7cc07SDenys Vlasenko # define LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk 1857dc7cc07SDenys Vlasenko # define LOCKDEP_SYS_EXIT_IRQ \ 186bb898558SAl Viro TRACE_IRQS_ON; \ 187bb898558SAl Viro sti; \ 1887dc7cc07SDenys Vlasenko call lockdep_sys_exit_thunk; \ 189bb898558SAl Viro cli; \ 190bb898558SAl Viro TRACE_IRQS_OFF; 191bb898558SAl Viro # else 1927dc7cc07SDenys Vlasenko # define LOCKDEP_SYS_EXIT \ 193bb898558SAl Viro pushl %eax; \ 194bb898558SAl Viro pushl %ecx; \ 195bb898558SAl Viro pushl %edx; \ 196bb898558SAl Viro call lockdep_sys_exit; \ 197bb898558SAl Viro popl %edx; \ 198bb898558SAl Viro popl %ecx; \ 199bb898558SAl Viro popl %eax; 2007dc7cc07SDenys Vlasenko # define LOCKDEP_SYS_EXIT_IRQ 201bb898558SAl Viro # endif 202bb898558SAl Viro #else 203bb898558SAl Viro # define LOCKDEP_SYS_EXIT 204bb898558SAl Viro # define LOCKDEP_SYS_EXIT_IRQ 205bb898558SAl Viro #endif 206bb898558SAl Viro #endif /* __ASSEMBLY__ */ 20740e2ec65SDenys Vlasenko 208bb898558SAl Viro #endif 209