1*b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 2bb898558SAl Viro #ifndef _X86_IRQFLAGS_H_ 3bb898558SAl Viro #define _X86_IRQFLAGS_H_ 4bb898558SAl Viro 5bb898558SAl Viro #include <asm/processor-flags.h> 6bb898558SAl Viro 7bb898558SAl Viro #ifndef __ASSEMBLY__ 86727ad9eSChris Metcalf 96727ad9eSChris Metcalf /* Provide __cpuidle; we can't safely include <linux/cpu.h> */ 106727ad9eSChris Metcalf #define __cpuidle __attribute__((__section__(".cpuidle.text"))) 116727ad9eSChris Metcalf 12bb898558SAl Viro /* 13bb898558SAl Viro * Interrupt control: 14bb898558SAl Viro */ 15bb898558SAl Viro 16bb898558SAl Viro static inline unsigned long native_save_fl(void) 17bb898558SAl Viro { 18bb898558SAl Viro unsigned long flags; 19bb898558SAl Viro 20f1f029c7SH. Peter Anvin /* 21ab94fcf5SH. Peter Anvin * "=rm" is safe here, because "pop" adjusts the stack before 22ab94fcf5SH. Peter Anvin * it evaluates its effective address -- this is part of the 23ab94fcf5SH. Peter Anvin * documented behavior of the "pop" instruction. 24f1f029c7SH. Peter Anvin */ 25bb898558SAl Viro asm volatile("# __raw_save_flags\n\t" 26bb898558SAl Viro "pushf ; pop %0" 27ab94fcf5SH. Peter Anvin : "=rm" (flags) 28bb898558SAl Viro : /* no input */ 29bb898558SAl Viro : "memory"); 30bb898558SAl Viro 31bb898558SAl Viro return flags; 32bb898558SAl Viro } 33bb898558SAl Viro 34bb898558SAl Viro static inline void native_restore_fl(unsigned long flags) 35bb898558SAl Viro { 36bb898558SAl Viro asm volatile("push %0 ; popf" 37bb898558SAl Viro : /* no output */ 38bb898558SAl Viro :"g" (flags) 39bb898558SAl Viro :"memory", "cc"); 40bb898558SAl Viro } 41bb898558SAl Viro 42bb898558SAl Viro static inline void native_irq_disable(void) 43bb898558SAl Viro { 44bb898558SAl Viro asm volatile("cli": : :"memory"); 45bb898558SAl Viro } 46bb898558SAl Viro 47bb898558SAl Viro static inline void native_irq_enable(void) 48bb898558SAl Viro { 49bb898558SAl Viro asm volatile("sti": : :"memory"); 50bb898558SAl Viro } 51bb898558SAl Viro 526727ad9eSChris Metcalf static inline __cpuidle void native_safe_halt(void) 53bb898558SAl Viro { 54bb898558SAl Viro asm volatile("sti; hlt": : :"memory"); 55bb898558SAl Viro } 56bb898558SAl Viro 576727ad9eSChris Metcalf static inline __cpuidle void native_halt(void) 58bb898558SAl Viro { 59bb898558SAl Viro asm volatile("hlt": : :"memory"); 60bb898558SAl Viro } 61bb898558SAl Viro 62bb898558SAl Viro #endif 63bb898558SAl Viro 64bb898558SAl Viro #ifdef CONFIG_PARAVIRT 65bb898558SAl Viro #include <asm/paravirt.h> 66bb898558SAl Viro #else 67bb898558SAl Viro #ifndef __ASSEMBLY__ 68e08fbb78SSteven Rostedt #include <linux/types.h> 69bb898558SAl Viro 70e08fbb78SSteven Rostedt static inline notrace unsigned long arch_local_save_flags(void) 71bb898558SAl Viro { 72bb898558SAl Viro return native_save_fl(); 73bb898558SAl Viro } 74bb898558SAl Viro 75e08fbb78SSteven Rostedt static inline notrace void arch_local_irq_restore(unsigned long flags) 76bb898558SAl Viro { 77bb898558SAl Viro native_restore_fl(flags); 78bb898558SAl Viro } 79bb898558SAl Viro 80e08fbb78SSteven Rostedt static inline notrace void arch_local_irq_disable(void) 81bb898558SAl Viro { 82bb898558SAl Viro native_irq_disable(); 83bb898558SAl Viro } 84bb898558SAl Viro 85e08fbb78SSteven Rostedt static inline notrace void arch_local_irq_enable(void) 86bb898558SAl Viro { 87bb898558SAl Viro native_irq_enable(); 88bb898558SAl Viro } 89bb898558SAl Viro 90bb898558SAl Viro /* 91bb898558SAl Viro * Used in the idle loop; sti takes one instruction cycle 92bb898558SAl Viro * to complete: 93bb898558SAl Viro */ 946727ad9eSChris Metcalf static inline __cpuidle void arch_safe_halt(void) 95bb898558SAl Viro { 96bb898558SAl Viro native_safe_halt(); 97bb898558SAl Viro } 98bb898558SAl Viro 99bb898558SAl Viro /* 100bb898558SAl Viro * Used when interrupts are already enabled or to 101bb898558SAl Viro * shutdown the processor: 102bb898558SAl Viro */ 1036727ad9eSChris Metcalf static inline __cpuidle void halt(void) 104bb898558SAl Viro { 105bb898558SAl Viro native_halt(); 106bb898558SAl Viro } 107bb898558SAl Viro 108bb898558SAl Viro /* 109bb898558SAl Viro * For spinlocks, etc: 110bb898558SAl Viro */ 111e08fbb78SSteven Rostedt static inline notrace unsigned long arch_local_irq_save(void) 112bb898558SAl Viro { 113df9ee292SDavid Howells unsigned long flags = arch_local_save_flags(); 114df9ee292SDavid Howells arch_local_irq_disable(); 115bb898558SAl Viro return flags; 116bb898558SAl Viro } 117bb898558SAl Viro #else 118bb898558SAl Viro 119bb898558SAl Viro #define ENABLE_INTERRUPTS(x) sti 120bb898558SAl Viro #define DISABLE_INTERRUPTS(x) cli 121bb898558SAl Viro 122bb898558SAl Viro #ifdef CONFIG_X86_64 123bb898558SAl Viro #define SWAPGS swapgs 124bb898558SAl Viro /* 125bb898558SAl Viro * Currently paravirt can't handle swapgs nicely when we 126bb898558SAl Viro * don't have a stack we can rely on (such as a user space 127bb898558SAl Viro * stack). So we either find a way around these or just fault 128bb898558SAl Viro * and emulate if a guest tries to call swapgs directly. 129bb898558SAl Viro * 130bb898558SAl Viro * Either way, this is a good way to document that we don't 131bb898558SAl Viro * have a reliable stack. x86_64 only. 132bb898558SAl Viro */ 133bb898558SAl Viro #define SWAPGS_UNSAFE_STACK swapgs 134bb898558SAl Viro 135bb898558SAl Viro #define PARAVIRT_ADJUST_EXCEPTION_FRAME /* */ 136bb898558SAl Viro 1377209a75dSAndy Lutomirski #define INTERRUPT_RETURN jmp native_iret 138bb898558SAl Viro #define USERGS_SYSRET64 \ 139bb898558SAl Viro swapgs; \ 140bb898558SAl Viro sysretq; 141bb898558SAl Viro #define USERGS_SYSRET32 \ 142bb898558SAl Viro swapgs; \ 143bb898558SAl Viro sysretl 144bb898558SAl Viro 145bb898558SAl Viro #else 146bb898558SAl Viro #define INTERRUPT_RETURN iret 147bb898558SAl Viro #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit 148bb898558SAl Viro #define GET_CR0_INTO_EAX movl %cr0, %eax 149bb898558SAl Viro #endif 150bb898558SAl Viro 151bb898558SAl Viro 152bb898558SAl Viro #endif /* __ASSEMBLY__ */ 153bb898558SAl Viro #endif /* CONFIG_PARAVIRT */ 154bb898558SAl Viro 155bb898558SAl Viro #ifndef __ASSEMBLY__ 156df9ee292SDavid Howells static inline int arch_irqs_disabled_flags(unsigned long flags) 157bb898558SAl Viro { 158bb898558SAl Viro return !(flags & X86_EFLAGS_IF); 159bb898558SAl Viro } 160bb898558SAl Viro 161df9ee292SDavid Howells static inline int arch_irqs_disabled(void) 162bb898558SAl Viro { 163df9ee292SDavid Howells unsigned long flags = arch_local_save_flags(); 164bb898558SAl Viro 165df9ee292SDavid Howells return arch_irqs_disabled_flags(flags); 166bb898558SAl Viro } 16740e2ec65SDenys Vlasenko #endif /* !__ASSEMBLY__ */ 168bb898558SAl Viro 16940e2ec65SDenys Vlasenko #ifdef __ASSEMBLY__ 17040e2ec65SDenys Vlasenko #ifdef CONFIG_TRACE_IRQFLAGS 17140e2ec65SDenys Vlasenko # define TRACE_IRQS_ON call trace_hardirqs_on_thunk; 17240e2ec65SDenys Vlasenko # define TRACE_IRQS_OFF call trace_hardirqs_off_thunk; 173bb898558SAl Viro #else 17440e2ec65SDenys Vlasenko # define TRACE_IRQS_ON 17540e2ec65SDenys Vlasenko # define TRACE_IRQS_OFF 17640e2ec65SDenys Vlasenko #endif 17740e2ec65SDenys Vlasenko #ifdef CONFIG_DEBUG_LOCK_ALLOC 178bb898558SAl Viro # ifdef CONFIG_X86_64 1797dc7cc07SDenys Vlasenko # define LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk 1807dc7cc07SDenys Vlasenko # define LOCKDEP_SYS_EXIT_IRQ \ 181bb898558SAl Viro TRACE_IRQS_ON; \ 182bb898558SAl Viro sti; \ 1837dc7cc07SDenys Vlasenko call lockdep_sys_exit_thunk; \ 184bb898558SAl Viro cli; \ 185bb898558SAl Viro TRACE_IRQS_OFF; 186bb898558SAl Viro # else 1877dc7cc07SDenys Vlasenko # define LOCKDEP_SYS_EXIT \ 188bb898558SAl Viro pushl %eax; \ 189bb898558SAl Viro pushl %ecx; \ 190bb898558SAl Viro pushl %edx; \ 191bb898558SAl Viro call lockdep_sys_exit; \ 192bb898558SAl Viro popl %edx; \ 193bb898558SAl Viro popl %ecx; \ 194bb898558SAl Viro popl %eax; 1957dc7cc07SDenys Vlasenko # define LOCKDEP_SYS_EXIT_IRQ 196bb898558SAl Viro # endif 197bb898558SAl Viro #else 198bb898558SAl Viro # define LOCKDEP_SYS_EXIT 199bb898558SAl Viro # define LOCKDEP_SYS_EXIT_IRQ 200bb898558SAl Viro #endif 201bb898558SAl Viro #endif /* __ASSEMBLY__ */ 20240e2ec65SDenys Vlasenko 203bb898558SAl Viro #endif 204