paravirt-spinlocks.c (498495dba268b20e8eadd7fe93c140c68b6cc9d2) | paravirt-spinlocks.c (5c83511bdb9832c86be20fb86b783356e2f58062) |
---|---|
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Split spinlock implementation out into its own file, so it can be 4 * compiled in a FTRACE-compatible way. 5 */ 6#include <linux/spinlock.h> 7#include <linux/export.h> 8#include <linux/jump_label.h> 9 10#include <asm/paravirt.h> 11 12__visible void __native_queued_spin_unlock(struct qspinlock *lock) 13{ 14 native_queued_spin_unlock(lock); 15} 16PV_CALLEE_SAVE_REGS_THUNK(__native_queued_spin_unlock); 17 18bool pv_is_native_spin_unlock(void) 19{ | 1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Split spinlock implementation out into its own file, so it can be 4 * compiled in a FTRACE-compatible way. 5 */ 6#include <linux/spinlock.h> 7#include <linux/export.h> 8#include <linux/jump_label.h> 9 10#include <asm/paravirt.h> 11 12__visible void __native_queued_spin_unlock(struct qspinlock *lock) 13{ 14 native_queued_spin_unlock(lock); 15} 16PV_CALLEE_SAVE_REGS_THUNK(__native_queued_spin_unlock); 17 18bool pv_is_native_spin_unlock(void) 19{ |
20 return pv_lock_ops.queued_spin_unlock.func == | 20 return pv_ops.lock.queued_spin_unlock.func == |
21 __raw_callee_save___native_queued_spin_unlock; 22} 23 24__visible bool __native_vcpu_is_preempted(long cpu) 25{ 26 return false; 27} 28PV_CALLEE_SAVE_REGS_THUNK(__native_vcpu_is_preempted); 29 30bool pv_is_native_vcpu_is_preempted(void) 31{ | 21 __raw_callee_save___native_queued_spin_unlock; 22} 23 24__visible bool __native_vcpu_is_preempted(long cpu) 25{ 26 return false; 27} 28PV_CALLEE_SAVE_REGS_THUNK(__native_vcpu_is_preempted); 29 30bool pv_is_native_vcpu_is_preempted(void) 31{ |
32 return pv_lock_ops.vcpu_is_preempted.func == | 32 return pv_ops.lock.vcpu_is_preempted.func == |
33 __raw_callee_save___native_vcpu_is_preempted; 34} | 33 __raw_callee_save___native_vcpu_is_preempted; 34} |
35 36struct pv_lock_ops pv_lock_ops = { 37#ifdef CONFIG_SMP 38 .queued_spin_lock_slowpath = native_queued_spin_lock_slowpath, 39 .queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock), 40 .wait = paravirt_nop, 41 .kick = paravirt_nop, 42 .vcpu_is_preempted = PV_CALLEE_SAVE(__native_vcpu_is_preempted), 43#endif /* SMP */ 44}; 45EXPORT_SYMBOL(pv_lock_ops); | |