1 #ifndef _ASM_X86_SPINLOCK_H 2 #define _ASM_X86_SPINLOCK_H 3 4 #include <linux/jump_label.h> 5 #include <linux/atomic.h> 6 #include <asm/page.h> 7 #include <asm/processor.h> 8 #include <linux/compiler.h> 9 #include <asm/paravirt.h> 10 #include <asm/bitops.h> 11 12 /* 13 * Your basic SMP spinlocks, allowing only a single CPU anywhere 14 * 15 * Simple spin lock operations. There are two variants, one clears IRQ's 16 * on the local processor, one does not. 17 * 18 * These are fair FIFO ticket locks, which support up to 2^16 CPUs. 19 * 20 * (the type definitions are in asm/spinlock_types.h) 21 */ 22 23 /* How long a lock should spin before we consider blocking */ 24 #define SPIN_THRESHOLD (1 << 15) 25 26 extern struct static_key paravirt_ticketlocks_enabled; 27 static __always_inline bool static_key_false(struct static_key *key); 28 29 #ifdef CONFIG_PARAVIRT_SPINLOCKS 30 #define vcpu_is_preempted vcpu_is_preempted 31 static inline bool vcpu_is_preempted(int cpu) 32 { 33 return pv_lock_ops.vcpu_is_preempted(cpu); 34 } 35 #endif 36 37 #include <asm/qspinlock.h> 38 39 /* 40 * Read-write spinlocks, allowing multiple readers 41 * but only one writer. 42 * 43 * NOTE! it is quite common to have readers in interrupts 44 * but no interrupt writers. For those circumstances we 45 * can "mix" irq-safe locks - any writer needs to get a 46 * irq-safe write-lock, but readers can get non-irqsafe 47 * read-locks. 48 * 49 * On x86, we implement read-write locks using the generic qrwlock with 50 * x86 specific optimization. 51 */ 52 53 #include <asm/qrwlock.h> 54 55 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 56 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 57 58 #define arch_spin_relax(lock) cpu_relax() 59 #define arch_read_relax(lock) cpu_relax() 60 #define arch_write_relax(lock) cpu_relax() 61 62 #endif /* _ASM_X86_SPINLOCK_H */ 63