1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Split spinlock implementation out into its own file, so it can be 4 * compiled in a FTRACE-compatible way. 5 */ 6 #include <linux/spinlock.h> 7 #include <linux/export.h> 8 #include <linux/jump_label.h> 9 10 #include <asm/paravirt.h> 11 12 __visible void __native_queued_spin_unlock(struct qspinlock *lock) 13 { 14 native_queued_spin_unlock(lock); 15 } 16 PV_CALLEE_SAVE_REGS_THUNK(__native_queued_spin_unlock); 17 18 bool pv_is_native_spin_unlock(void) 19 { 20 return pv_lock_ops.queued_spin_unlock.func == 21 __raw_callee_save___native_queued_spin_unlock; 22 } 23 24 __visible bool __native_vcpu_is_preempted(long cpu) 25 { 26 return false; 27 } 28 PV_CALLEE_SAVE_REGS_THUNK(__native_vcpu_is_preempted); 29 30 bool pv_is_native_vcpu_is_preempted(void) 31 { 32 return pv_lock_ops.vcpu_is_preempted.func == 33 __raw_callee_save___native_vcpu_is_preempted; 34 } 35 36 struct pv_lock_ops pv_lock_ops = { 37 #ifdef CONFIG_SMP 38 .queued_spin_lock_slowpath = native_queued_spin_lock_slowpath, 39 .queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock), 40 .wait = paravirt_nop, 41 .kick = paravirt_nop, 42 .vcpu_is_preempted = PV_CALLEE_SAVE(__native_vcpu_is_preempted), 43 #endif /* SMP */ 44 }; 45 EXPORT_SYMBOL(pv_lock_ops); 46