xref: /openbmc/linux/arch/alpha/include/asm/spinlock.h (revision b24413180f5600bcb3bb70fbed5cf186b60864bd)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ALPHA_SPINLOCK_H
3 #define _ALPHA_SPINLOCK_H
4 
5 #include <linux/kernel.h>
6 #include <asm/current.h>
7 #include <asm/barrier.h>
8 #include <asm/processor.h>
9 
10 /*
11  * Simple spin lock operations.  There are two variants, one clears IRQ's
12  * on the local processor, one does not.
13  *
14  * We make no fairness assumptions. They have a cost.
15  */
16 
17 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
18 #define arch_spin_is_locked(x)	((x)->lock != 0)
19 
20 static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
21 {
22         return lock.lock == 0;
23 }
24 
25 static inline void arch_spin_unlock(arch_spinlock_t * lock)
26 {
27 	mb();
28 	lock->lock = 0;
29 }
30 
31 static inline void arch_spin_lock(arch_spinlock_t * lock)
32 {
33 	long tmp;
34 
35 	__asm__ __volatile__(
36 	"1:	ldl_l	%0,%1\n"
37 	"	bne	%0,2f\n"
38 	"	lda	%0,1\n"
39 	"	stl_c	%0,%1\n"
40 	"	beq	%0,2f\n"
41 	"	mb\n"
42 	".subsection 2\n"
43 	"2:	ldl	%0,%1\n"
44 	"	bne	%0,2b\n"
45 	"	br	1b\n"
46 	".previous"
47 	: "=&r" (tmp), "=m" (lock->lock)
48 	: "m"(lock->lock) : "memory");
49 }
50 
51 static inline int arch_spin_trylock(arch_spinlock_t *lock)
52 {
53 	return !test_and_set_bit(0, &lock->lock);
54 }
55 
56 /***********************************************************/
57 
58 static inline int arch_read_can_lock(arch_rwlock_t *lock)
59 {
60 	return (lock->lock & 1) == 0;
61 }
62 
63 static inline int arch_write_can_lock(arch_rwlock_t *lock)
64 {
65 	return lock->lock == 0;
66 }
67 
68 static inline void arch_read_lock(arch_rwlock_t *lock)
69 {
70 	long regx;
71 
72 	__asm__ __volatile__(
73 	"1:	ldl_l	%1,%0\n"
74 	"	blbs	%1,6f\n"
75 	"	subl	%1,2,%1\n"
76 	"	stl_c	%1,%0\n"
77 	"	beq	%1,6f\n"
78 	"	mb\n"
79 	".subsection 2\n"
80 	"6:	ldl	%1,%0\n"
81 	"	blbs	%1,6b\n"
82 	"	br	1b\n"
83 	".previous"
84 	: "=m" (*lock), "=&r" (regx)
85 	: "m" (*lock) : "memory");
86 }
87 
88 static inline void arch_write_lock(arch_rwlock_t *lock)
89 {
90 	long regx;
91 
92 	__asm__ __volatile__(
93 	"1:	ldl_l	%1,%0\n"
94 	"	bne	%1,6f\n"
95 	"	lda	%1,1\n"
96 	"	stl_c	%1,%0\n"
97 	"	beq	%1,6f\n"
98 	"	mb\n"
99 	".subsection 2\n"
100 	"6:	ldl	%1,%0\n"
101 	"	bne	%1,6b\n"
102 	"	br	1b\n"
103 	".previous"
104 	: "=m" (*lock), "=&r" (regx)
105 	: "m" (*lock) : "memory");
106 }
107 
108 static inline int arch_read_trylock(arch_rwlock_t * lock)
109 {
110 	long regx;
111 	int success;
112 
113 	__asm__ __volatile__(
114 	"1:	ldl_l	%1,%0\n"
115 	"	lda	%2,0\n"
116 	"	blbs	%1,2f\n"
117 	"	subl	%1,2,%2\n"
118 	"	stl_c	%2,%0\n"
119 	"	beq	%2,6f\n"
120 	"2:	mb\n"
121 	".subsection 2\n"
122 	"6:	br	1b\n"
123 	".previous"
124 	: "=m" (*lock), "=&r" (regx), "=&r" (success)
125 	: "m" (*lock) : "memory");
126 
127 	return success;
128 }
129 
130 static inline int arch_write_trylock(arch_rwlock_t * lock)
131 {
132 	long regx;
133 	int success;
134 
135 	__asm__ __volatile__(
136 	"1:	ldl_l	%1,%0\n"
137 	"	lda	%2,0\n"
138 	"	bne	%1,2f\n"
139 	"	lda	%2,1\n"
140 	"	stl_c	%2,%0\n"
141 	"	beq	%2,6f\n"
142 	"2:	mb\n"
143 	".subsection 2\n"
144 	"6:	br	1b\n"
145 	".previous"
146 	: "=m" (*lock), "=&r" (regx), "=&r" (success)
147 	: "m" (*lock) : "memory");
148 
149 	return success;
150 }
151 
152 static inline void arch_read_unlock(arch_rwlock_t * lock)
153 {
154 	long regx;
155 	__asm__ __volatile__(
156 	"	mb\n"
157 	"1:	ldl_l	%1,%0\n"
158 	"	addl	%1,2,%1\n"
159 	"	stl_c	%1,%0\n"
160 	"	beq	%1,6f\n"
161 	".subsection 2\n"
162 	"6:	br	1b\n"
163 	".previous"
164 	: "=m" (*lock), "=&r" (regx)
165 	: "m" (*lock) : "memory");
166 }
167 
168 static inline void arch_write_unlock(arch_rwlock_t * lock)
169 {
170 	mb();
171 	lock->lock = 0;
172 }
173 
174 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
175 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
176 
177 #endif /* _ALPHA_SPINLOCK_H */
178