xref: /openbmc/linux/arch/sh/include/asm/spinlock-cas.h (revision 597473720f4dc69749542bfcfed4a927a43d935e)
1  /* SPDX-License-Identifier: GPL-2.0
2   *
3   * include/asm-sh/spinlock-cas.h
4   *
5   * Copyright (C) 2015 SEI
6   */
7  #ifndef __ASM_SH_SPINLOCK_CAS_H
8  #define __ASM_SH_SPINLOCK_CAS_H
9  
10  #include <asm/barrier.h>
11  #include <asm/processor.h>
12  
__sl_cas(volatile unsigned * p,unsigned old,unsigned new)13  static inline unsigned __sl_cas(volatile unsigned *p, unsigned old, unsigned new)
14  {
15  	__asm__ __volatile__("cas.l %1,%0,@r0"
16  		: "+r"(new)
17  		: "r"(old), "z"(p)
18  		: "t", "memory" );
19  	return new;
20  }
21  
22  /*
23   * Your basic SMP spinlocks, allowing only a single CPU anywhere
24   */
25  
26  #define arch_spin_is_locked(x)		((x)->lock <= 0)
27  
arch_spin_lock(arch_spinlock_t * lock)28  static inline void arch_spin_lock(arch_spinlock_t *lock)
29  {
30  	while (!__sl_cas(&lock->lock, 1, 0));
31  }
32  
arch_spin_unlock(arch_spinlock_t * lock)33  static inline void arch_spin_unlock(arch_spinlock_t *lock)
34  {
35  	__sl_cas(&lock->lock, 0, 1);
36  }
37  
arch_spin_trylock(arch_spinlock_t * lock)38  static inline int arch_spin_trylock(arch_spinlock_t *lock)
39  {
40  	return __sl_cas(&lock->lock, 1, 0);
41  }
42  
43  /*
44   * Read-write spinlocks, allowing multiple readers but only one writer.
45   *
46   * NOTE! it is quite common to have readers in interrupts but no interrupt
47   * writers. For those circumstances we can "mix" irq-safe locks - any writer
48   * needs to get a irq-safe write-lock, but readers can get non-irqsafe
49   * read-locks.
50   */
51  
arch_read_lock(arch_rwlock_t * rw)52  static inline void arch_read_lock(arch_rwlock_t *rw)
53  {
54  	unsigned old;
55  	do old = rw->lock;
56  	while (!old || __sl_cas(&rw->lock, old, old-1) != old);
57  }
58  
arch_read_unlock(arch_rwlock_t * rw)59  static inline void arch_read_unlock(arch_rwlock_t *rw)
60  {
61  	unsigned old;
62  	do old = rw->lock;
63  	while (__sl_cas(&rw->lock, old, old+1) != old);
64  }
65  
arch_write_lock(arch_rwlock_t * rw)66  static inline void arch_write_lock(arch_rwlock_t *rw)
67  {
68  	while (__sl_cas(&rw->lock, RW_LOCK_BIAS, 0) != RW_LOCK_BIAS);
69  }
70  
arch_write_unlock(arch_rwlock_t * rw)71  static inline void arch_write_unlock(arch_rwlock_t *rw)
72  {
73  	__sl_cas(&rw->lock, 0, RW_LOCK_BIAS);
74  }
75  
arch_read_trylock(arch_rwlock_t * rw)76  static inline int arch_read_trylock(arch_rwlock_t *rw)
77  {
78  	unsigned old;
79  	do old = rw->lock;
80  	while (old && __sl_cas(&rw->lock, old, old-1) != old);
81  	return !!old;
82  }
83  
arch_write_trylock(arch_rwlock_t * rw)84  static inline int arch_write_trylock(arch_rwlock_t *rw)
85  {
86  	return __sl_cas(&rw->lock, RW_LOCK_BIAS, 0) == RW_LOCK_BIAS;
87  }
88  
89  #endif /* __ASM_SH_SPINLOCK_CAS_H */
90