xref: /openbmc/linux/arch/arm64/include/asm/spinlock.h (revision d8d0da4e)
1caab277bSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
208e875c1SCatalin Marinas /*
308e875c1SCatalin Marinas  * Copyright (C) 2012 ARM Ltd.
408e875c1SCatalin Marinas  */
508e875c1SCatalin Marinas #ifndef __ASM_SPINLOCK_H
608e875c1SCatalin Marinas #define __ASM_SPINLOCK_H
708e875c1SCatalin Marinas 
8c1109047SWill Deacon #include <asm/qspinlock.h>
9*d8d0da4eSWaiman Long #include <asm/qrwlock.h>
1008e875c1SCatalin Marinas 
11d89e588cSPeter Zijlstra /* See include/linux/spinlock.h */
12d89e588cSPeter Zijlstra #define smp_mb__after_spinlock()	smp_mb()
13872c63fbSWill Deacon 
14f5bfdc8eSWaiman Long /*
15f5bfdc8eSWaiman Long  * Changing this will break osq_lock() thanks to the call inside
16f5bfdc8eSWaiman Long  * smp_cond_load_relaxed().
17f5bfdc8eSWaiman Long  *
18f5bfdc8eSWaiman Long  * See:
19f5bfdc8eSWaiman Long  * https://lore.kernel.org/lkml/20200110100612.GC2827@hirez.programming.kicks-ass.net
20f5bfdc8eSWaiman Long  */
21345d52c1SQian Cai #define vcpu_is_preempted vcpu_is_preempted
vcpu_is_preempted(int cpu)22345d52c1SQian Cai static inline bool vcpu_is_preempted(int cpu)
23345d52c1SQian Cai {
24345d52c1SQian Cai 	return false;
25345d52c1SQian Cai }
26f5bfdc8eSWaiman Long 
2708e875c1SCatalin Marinas #endif /* __ASM_SPINLOCK_H */
28