1 /* 2 * Split spinlock implementation out into its own file, so it can be 3 * compiled in a FTRACE-compatible way. 4 */ 5 #include <linux/spinlock.h> 6 #include <linux/export.h> 7 #include <linux/jump_label.h> 8 9 #include <asm/paravirt.h> 10 11 __visible void __native_queued_spin_unlock(struct qspinlock *lock) 12 { 13 native_queued_spin_unlock(lock); 14 } 15 PV_CALLEE_SAVE_REGS_THUNK(__native_queued_spin_unlock); 16 17 bool pv_is_native_spin_unlock(void) 18 { 19 return pv_lock_ops.queued_spin_unlock.func == 20 __raw_callee_save___native_queued_spin_unlock; 21 } 22 23 __visible bool __native_vcpu_is_preempted(long cpu) 24 { 25 return false; 26 } 27 PV_CALLEE_SAVE_REGS_THUNK(__native_vcpu_is_preempted); 28 29 bool pv_is_native_vcpu_is_preempted(void) 30 { 31 return pv_lock_ops.vcpu_is_preempted.func == 32 __raw_callee_save___native_vcpu_is_preempted; 33 } 34 35 struct pv_lock_ops pv_lock_ops = { 36 #ifdef CONFIG_SMP 37 .queued_spin_lock_slowpath = native_queued_spin_lock_slowpath, 38 .queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock), 39 .wait = paravirt_nop, 40 .kick = paravirt_nop, 41 .vcpu_is_preempted = PV_CALLEE_SAVE(__native_vcpu_is_preempted), 42 #endif /* SMP */ 43 }; 44 EXPORT_SYMBOL(pv_lock_ops); 45