1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 2bb898558SAl Viro #ifndef _X86_IRQFLAGS_H_ 3bb898558SAl Viro #define _X86_IRQFLAGS_H_ 4bb898558SAl Viro 5bb898558SAl Viro #include <asm/processor-flags.h> 6bb898558SAl Viro 7bb898558SAl Viro #ifndef __ASSEMBLY__ 86727ad9eSChris Metcalf 907f07f55SThomas Gleixner #include <asm/nospec-branch.h> 1007f07f55SThomas Gleixner 116727ad9eSChris Metcalf /* Provide __cpuidle; we can't safely include <linux/cpu.h> */ 1233def849SJoe Perches #define __cpuidle __section(".cpuidle.text") 136727ad9eSChris Metcalf 14bb898558SAl Viro /* 15bb898558SAl Viro * Interrupt control: 16bb898558SAl Viro */ 17bb898558SAl Viro 18208cbb32SNick Desaulniers /* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */ 19208cbb32SNick Desaulniers extern inline unsigned long native_save_fl(void); 207a745be1SPeter Zijlstra extern __always_inline unsigned long native_save_fl(void) 21bb898558SAl Viro { 22bb898558SAl Viro unsigned long flags; 23bb898558SAl Viro 24f1f029c7SH. Peter Anvin /* 25ab94fcf5SH. Peter Anvin * "=rm" is safe here, because "pop" adjusts the stack before 26ab94fcf5SH. Peter Anvin * it evaluates its effective address -- this is part of the 27ab94fcf5SH. Peter Anvin * documented behavior of the "pop" instruction. 28f1f029c7SH. Peter Anvin */ 29bb898558SAl Viro asm volatile("# __raw_save_flags\n\t" 30bb898558SAl Viro "pushf ; pop %0" 31ab94fcf5SH. Peter Anvin : "=rm" (flags) 32bb898558SAl Viro : /* no input */ 33bb898558SAl Viro : "memory"); 34bb898558SAl Viro 35bb898558SAl Viro return flags; 36bb898558SAl Viro } 37bb898558SAl Viro 387a745be1SPeter Zijlstra static __always_inline void native_irq_disable(void) 39bb898558SAl Viro { 40bb898558SAl Viro asm volatile("cli": : :"memory"); 41bb898558SAl Viro } 42bb898558SAl Viro 437a745be1SPeter Zijlstra static __always_inline void native_irq_enable(void) 44bb898558SAl Viro { 45bb898558SAl Viro asm volatile("sti": : :"memory"); 46bb898558SAl Viro } 47bb898558SAl Viro 486727ad9eSChris Metcalf static inline __cpuidle void native_safe_halt(void) 49bb898558SAl Viro { 5007f07f55SThomas Gleixner mds_idle_clear_cpu_buffers(); 51bb898558SAl Viro asm volatile("sti; hlt": : :"memory"); 52bb898558SAl Viro } 53bb898558SAl Viro 546727ad9eSChris Metcalf static inline __cpuidle void native_halt(void) 55bb898558SAl Viro { 5607f07f55SThomas Gleixner mds_idle_clear_cpu_buffers(); 57bb898558SAl Viro asm volatile("hlt": : :"memory"); 58bb898558SAl Viro } 59bb898558SAl Viro 60bb898558SAl Viro #endif 61bb898558SAl Viro 626da63eb2SJuergen Gross #ifdef CONFIG_PARAVIRT_XXL 63bb898558SAl Viro #include <asm/paravirt.h> 64bb898558SAl Viro #else 65bb898558SAl Viro #ifndef __ASSEMBLY__ 66e08fbb78SSteven Rostedt #include <linux/types.h> 67bb898558SAl Viro 687a745be1SPeter Zijlstra static __always_inline unsigned long arch_local_save_flags(void) 69bb898558SAl Viro { 70bb898558SAl Viro return native_save_fl(); 71bb898558SAl Viro } 72bb898558SAl Viro 737a745be1SPeter Zijlstra static __always_inline void arch_local_irq_disable(void) 74bb898558SAl Viro { 75bb898558SAl Viro native_irq_disable(); 76bb898558SAl Viro } 77bb898558SAl Viro 787a745be1SPeter Zijlstra static __always_inline void arch_local_irq_enable(void) 79bb898558SAl Viro { 80bb898558SAl Viro native_irq_enable(); 81bb898558SAl Viro } 82bb898558SAl Viro 83bb898558SAl Viro /* 84bb898558SAl Viro * Used in the idle loop; sti takes one instruction cycle 85bb898558SAl Viro * to complete: 86bb898558SAl Viro */ 876727ad9eSChris Metcalf static inline __cpuidle void arch_safe_halt(void) 88bb898558SAl Viro { 89bb898558SAl Viro native_safe_halt(); 90bb898558SAl Viro } 91bb898558SAl Viro 92bb898558SAl Viro /* 93bb898558SAl Viro * Used when interrupts are already enabled or to 94bb898558SAl Viro * shutdown the processor: 95bb898558SAl Viro */ 966727ad9eSChris Metcalf static inline __cpuidle void halt(void) 97bb898558SAl Viro { 98bb898558SAl Viro native_halt(); 99bb898558SAl Viro } 100bb898558SAl Viro 101bb898558SAl Viro /* 102bb898558SAl Viro * For spinlocks, etc: 103bb898558SAl Viro */ 1047a745be1SPeter Zijlstra static __always_inline unsigned long arch_local_irq_save(void) 105bb898558SAl Viro { 106df9ee292SDavid Howells unsigned long flags = arch_local_save_flags(); 107df9ee292SDavid Howells arch_local_irq_disable(); 108bb898558SAl Viro return flags; 109bb898558SAl Viro } 110bb898558SAl Viro #else 111bb898558SAl Viro 112bb898558SAl Viro #define ENABLE_INTERRUPTS(x) sti 113bb898558SAl Viro #define DISABLE_INTERRUPTS(x) cli 114bb898558SAl Viro 115bb898558SAl Viro #ifdef CONFIG_X86_64 1169bad5658SJuergen Gross #ifdef CONFIG_DEBUG_ENTRY 1179bad5658SJuergen Gross #define SAVE_FLAGS(x) pushfq; popq %rax 1189bad5658SJuergen Gross #endif 1199bad5658SJuergen Gross 1207209a75dSAndy Lutomirski #define INTERRUPT_RETURN jmp native_iret 121bb898558SAl Viro 122bb898558SAl Viro #else 123bb898558SAl Viro #define INTERRUPT_RETURN iret 124bb898558SAl Viro #endif 125bb898558SAl Viro 126bb898558SAl Viro #endif /* __ASSEMBLY__ */ 1279bad5658SJuergen Gross #endif /* CONFIG_PARAVIRT_XXL */ 128bb898558SAl Viro 129bb898558SAl Viro #ifndef __ASSEMBLY__ 1307a745be1SPeter Zijlstra static __always_inline int arch_irqs_disabled_flags(unsigned long flags) 131bb898558SAl Viro { 132bb898558SAl Viro return !(flags & X86_EFLAGS_IF); 133bb898558SAl Viro } 134bb898558SAl Viro 1357a745be1SPeter Zijlstra static __always_inline int arch_irqs_disabled(void) 136bb898558SAl Viro { 137df9ee292SDavid Howells unsigned long flags = arch_local_save_flags(); 138bb898558SAl Viro 139df9ee292SDavid Howells return arch_irqs_disabled_flags(flags); 140bb898558SAl Viro } 141*ab234a26SJuergen Gross 142*ab234a26SJuergen Gross static __always_inline void arch_local_irq_restore(unsigned long flags) 143*ab234a26SJuergen Gross { 144*ab234a26SJuergen Gross if (!arch_irqs_disabled_flags(flags)) 145*ab234a26SJuergen Gross arch_local_irq_enable(); 146*ab234a26SJuergen Gross } 14753c9d924SJuergen Gross #else 14853c9d924SJuergen Gross #ifdef CONFIG_X86_64 14953c9d924SJuergen Gross #ifdef CONFIG_XEN_PV 15053c9d924SJuergen Gross #define SWAPGS ALTERNATIVE "swapgs", "", X86_FEATURE_XENPV 15153c9d924SJuergen Gross #else 15253c9d924SJuergen Gross #define SWAPGS swapgs 15353c9d924SJuergen Gross #endif 15453c9d924SJuergen Gross #endif 15540e2ec65SDenys Vlasenko #endif /* !__ASSEMBLY__ */ 156bb898558SAl Viro 157bb898558SAl Viro #endif 158