xref: /openbmc/linux/kernel/locking/spinlock.c (revision b24413180f5600bcb3bb70fbed5cf186b60864bd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (2004) Linus Torvalds
4  *
5  * Author: Zwane Mwaikambo <zwane@fsmlabs.com>
6  *
7  * Copyright (2004, 2005) Ingo Molnar
8  *
9  * This file contains the spinlock/rwlock implementations for the
10  * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them)
11  *
12  * Note that some architectures have special knowledge about the
13  * stack frames of these functions in their profile_pc. If you
14  * change anything significant here that could change the stack
15  * frame contact the architecture maintainers.
16  */
17 
18 #include <linux/linkage.h>
19 #include <linux/preempt.h>
20 #include <linux/spinlock.h>
21 #include <linux/interrupt.h>
22 #include <linux/debug_locks.h>
23 #include <linux/export.h>
24 
25 /*
26  * If lockdep is enabled then we use the non-preemption spin-ops
27  * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
28  * not re-enabled during lock-acquire (which the preempt-spin-ops do):
29  */
30 #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
31 /*
32  * The __lock_function inlines are taken from
33  * include/linux/spinlock_api_smp.h
34  */
35 #else
36 #define raw_read_can_lock(l)	read_can_lock(l)
37 #define raw_write_can_lock(l)	write_can_lock(l)
38 
39 /*
40  * Some architectures can relax in favour of the CPU owning the lock.
41  */
42 #ifndef arch_read_relax
43 # define arch_read_relax(l)	cpu_relax()
44 #endif
45 #ifndef arch_write_relax
46 # define arch_write_relax(l)	cpu_relax()
47 #endif
48 #ifndef arch_spin_relax
49 # define arch_spin_relax(l)	cpu_relax()
50 #endif
51 
52 /*
53  * We build the __lock_function inlines here. They are too large for
54  * inlining all over the place, but here is only one user per function
55  * which embedds them into the calling _lock_function below.
56  *
57  * This could be a long-held lock. We both prepare to spin for a long
58  * time (making _this_ CPU preemptable if possible), and we also signal
59  * towards that other CPU that it should break the lock ASAP.
60  */
61 #define BUILD_LOCK_OPS(op, locktype)					\
62 void __lockfunc __raw_##op##_lock(locktype##_t *lock)			\
63 {									\
64 	for (;;) {							\
65 		preempt_disable();					\
66 		if (likely(do_raw_##op##_trylock(lock)))		\
67 			break;						\
68 		preempt_enable();					\
69 									\
70 		if (!(lock)->break_lock)				\
71 			(lock)->break_lock = 1;				\
72 		while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\
73 			arch_##op##_relax(&lock->raw_lock);		\
74 	}								\
75 	(lock)->break_lock = 0;						\
76 }									\
77 									\
78 unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock)	\
79 {									\
80 	unsigned long flags;						\
81 									\
82 	for (;;) {							\
83 		preempt_disable();					\
84 		local_irq_save(flags);					\
85 		if (likely(do_raw_##op##_trylock(lock)))		\
86 			break;						\
87 		local_irq_restore(flags);				\
88 		preempt_enable();					\
89 									\
90 		if (!(lock)->break_lock)				\
91 			(lock)->break_lock = 1;				\
92 		while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\
93 			arch_##op##_relax(&lock->raw_lock);		\
94 	}								\
95 	(lock)->break_lock = 0;						\
96 	return flags;							\
97 }									\
98 									\
99 void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock)		\
100 {									\
101 	_raw_##op##_lock_irqsave(lock);					\
102 }									\
103 									\
104 void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock)		\
105 {									\
106 	unsigned long flags;						\
107 									\
108 	/*							*/	\
109 	/* Careful: we must exclude softirqs too, hence the	*/	\
110 	/* irq-disabling. We use the generic preemption-aware	*/	\
111 	/* function:						*/	\
112 	/**/								\
113 	flags = _raw_##op##_lock_irqsave(lock);				\
114 	local_bh_disable();						\
115 	local_irq_restore(flags);					\
116 }									\
117 
118 /*
119  * Build preemption-friendly versions of the following
120  * lock-spinning functions:
121  *
122  *         __[spin|read|write]_lock()
123  *         __[spin|read|write]_lock_irq()
124  *         __[spin|read|write]_lock_irqsave()
125  *         __[spin|read|write]_lock_bh()
126  */
127 BUILD_LOCK_OPS(spin, raw_spinlock);
128 BUILD_LOCK_OPS(read, rwlock);
129 BUILD_LOCK_OPS(write, rwlock);
130 
131 #endif
132 
133 #ifndef CONFIG_INLINE_SPIN_TRYLOCK
134 int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock)
135 {
136 	return __raw_spin_trylock(lock);
137 }
138 EXPORT_SYMBOL(_raw_spin_trylock);
139 #endif
140 
141 #ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH
142 int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock)
143 {
144 	return __raw_spin_trylock_bh(lock);
145 }
146 EXPORT_SYMBOL(_raw_spin_trylock_bh);
147 #endif
148 
149 #ifndef CONFIG_INLINE_SPIN_LOCK
150 void __lockfunc _raw_spin_lock(raw_spinlock_t *lock)
151 {
152 	__raw_spin_lock(lock);
153 }
154 EXPORT_SYMBOL(_raw_spin_lock);
155 #endif
156 
157 #ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
158 unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
159 {
160 	return __raw_spin_lock_irqsave(lock);
161 }
162 EXPORT_SYMBOL(_raw_spin_lock_irqsave);
163 #endif
164 
165 #ifndef CONFIG_INLINE_SPIN_LOCK_IRQ
166 void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
167 {
168 	__raw_spin_lock_irq(lock);
169 }
170 EXPORT_SYMBOL(_raw_spin_lock_irq);
171 #endif
172 
173 #ifndef CONFIG_INLINE_SPIN_LOCK_BH
174 void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock)
175 {
176 	__raw_spin_lock_bh(lock);
177 }
178 EXPORT_SYMBOL(_raw_spin_lock_bh);
179 #endif
180 
181 #ifdef CONFIG_UNINLINE_SPIN_UNLOCK
182 void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock)
183 {
184 	__raw_spin_unlock(lock);
185 }
186 EXPORT_SYMBOL(_raw_spin_unlock);
187 #endif
188 
189 #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
190 void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
191 {
192 	__raw_spin_unlock_irqrestore(lock, flags);
193 }
194 EXPORT_SYMBOL(_raw_spin_unlock_irqrestore);
195 #endif
196 
197 #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ
198 void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock)
199 {
200 	__raw_spin_unlock_irq(lock);
201 }
202 EXPORT_SYMBOL(_raw_spin_unlock_irq);
203 #endif
204 
205 #ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
206 void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)
207 {
208 	__raw_spin_unlock_bh(lock);
209 }
210 EXPORT_SYMBOL(_raw_spin_unlock_bh);
211 #endif
212 
213 #ifndef CONFIG_INLINE_READ_TRYLOCK
214 int __lockfunc _raw_read_trylock(rwlock_t *lock)
215 {
216 	return __raw_read_trylock(lock);
217 }
218 EXPORT_SYMBOL(_raw_read_trylock);
219 #endif
220 
221 #ifndef CONFIG_INLINE_READ_LOCK
222 void __lockfunc _raw_read_lock(rwlock_t *lock)
223 {
224 	__raw_read_lock(lock);
225 }
226 EXPORT_SYMBOL(_raw_read_lock);
227 #endif
228 
229 #ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE
230 unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
231 {
232 	return __raw_read_lock_irqsave(lock);
233 }
234 EXPORT_SYMBOL(_raw_read_lock_irqsave);
235 #endif
236 
237 #ifndef CONFIG_INLINE_READ_LOCK_IRQ
238 void __lockfunc _raw_read_lock_irq(rwlock_t *lock)
239 {
240 	__raw_read_lock_irq(lock);
241 }
242 EXPORT_SYMBOL(_raw_read_lock_irq);
243 #endif
244 
245 #ifndef CONFIG_INLINE_READ_LOCK_BH
246 void __lockfunc _raw_read_lock_bh(rwlock_t *lock)
247 {
248 	__raw_read_lock_bh(lock);
249 }
250 EXPORT_SYMBOL(_raw_read_lock_bh);
251 #endif
252 
253 #ifndef CONFIG_INLINE_READ_UNLOCK
254 void __lockfunc _raw_read_unlock(rwlock_t *lock)
255 {
256 	__raw_read_unlock(lock);
257 }
258 EXPORT_SYMBOL(_raw_read_unlock);
259 #endif
260 
261 #ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
262 void __lockfunc _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
263 {
264 	__raw_read_unlock_irqrestore(lock, flags);
265 }
266 EXPORT_SYMBOL(_raw_read_unlock_irqrestore);
267 #endif
268 
269 #ifndef CONFIG_INLINE_READ_UNLOCK_IRQ
270 void __lockfunc _raw_read_unlock_irq(rwlock_t *lock)
271 {
272 	__raw_read_unlock_irq(lock);
273 }
274 EXPORT_SYMBOL(_raw_read_unlock_irq);
275 #endif
276 
277 #ifndef CONFIG_INLINE_READ_UNLOCK_BH
278 void __lockfunc _raw_read_unlock_bh(rwlock_t *lock)
279 {
280 	__raw_read_unlock_bh(lock);
281 }
282 EXPORT_SYMBOL(_raw_read_unlock_bh);
283 #endif
284 
285 #ifndef CONFIG_INLINE_WRITE_TRYLOCK
286 int __lockfunc _raw_write_trylock(rwlock_t *lock)
287 {
288 	return __raw_write_trylock(lock);
289 }
290 EXPORT_SYMBOL(_raw_write_trylock);
291 #endif
292 
293 #ifndef CONFIG_INLINE_WRITE_LOCK
294 void __lockfunc _raw_write_lock(rwlock_t *lock)
295 {
296 	__raw_write_lock(lock);
297 }
298 EXPORT_SYMBOL(_raw_write_lock);
299 #endif
300 
301 #ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
302 unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
303 {
304 	return __raw_write_lock_irqsave(lock);
305 }
306 EXPORT_SYMBOL(_raw_write_lock_irqsave);
307 #endif
308 
309 #ifndef CONFIG_INLINE_WRITE_LOCK_IRQ
310 void __lockfunc _raw_write_lock_irq(rwlock_t *lock)
311 {
312 	__raw_write_lock_irq(lock);
313 }
314 EXPORT_SYMBOL(_raw_write_lock_irq);
315 #endif
316 
317 #ifndef CONFIG_INLINE_WRITE_LOCK_BH
318 void __lockfunc _raw_write_lock_bh(rwlock_t *lock)
319 {
320 	__raw_write_lock_bh(lock);
321 }
322 EXPORT_SYMBOL(_raw_write_lock_bh);
323 #endif
324 
325 #ifndef CONFIG_INLINE_WRITE_UNLOCK
326 void __lockfunc _raw_write_unlock(rwlock_t *lock)
327 {
328 	__raw_write_unlock(lock);
329 }
330 EXPORT_SYMBOL(_raw_write_unlock);
331 #endif
332 
333 #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
334 void __lockfunc _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
335 {
336 	__raw_write_unlock_irqrestore(lock, flags);
337 }
338 EXPORT_SYMBOL(_raw_write_unlock_irqrestore);
339 #endif
340 
341 #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ
342 void __lockfunc _raw_write_unlock_irq(rwlock_t *lock)
343 {
344 	__raw_write_unlock_irq(lock);
345 }
346 EXPORT_SYMBOL(_raw_write_unlock_irq);
347 #endif
348 
349 #ifndef CONFIG_INLINE_WRITE_UNLOCK_BH
350 void __lockfunc _raw_write_unlock_bh(rwlock_t *lock)
351 {
352 	__raw_write_unlock_bh(lock);
353 }
354 EXPORT_SYMBOL(_raw_write_unlock_bh);
355 #endif
356 
357 #ifdef CONFIG_DEBUG_LOCK_ALLOC
358 
359 void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
360 {
361 	preempt_disable();
362 	spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
363 	LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
364 }
365 EXPORT_SYMBOL(_raw_spin_lock_nested);
366 
367 unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock,
368 						   int subclass)
369 {
370 	unsigned long flags;
371 
372 	local_irq_save(flags);
373 	preempt_disable();
374 	spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
375 	LOCK_CONTENDED_FLAGS(lock, do_raw_spin_trylock, do_raw_spin_lock,
376 				do_raw_spin_lock_flags, &flags);
377 	return flags;
378 }
379 EXPORT_SYMBOL(_raw_spin_lock_irqsave_nested);
380 
381 void __lockfunc _raw_spin_lock_nest_lock(raw_spinlock_t *lock,
382 				     struct lockdep_map *nest_lock)
383 {
384 	preempt_disable();
385 	spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
386 	LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
387 }
388 EXPORT_SYMBOL(_raw_spin_lock_nest_lock);
389 
390 #endif
391 
392 notrace int in_lock_functions(unsigned long addr)
393 {
394 	/* Linker adds these: start and end of __lockfunc functions */
395 	extern char __lock_text_start[], __lock_text_end[];
396 
397 	return addr >= (unsigned long)__lock_text_start
398 	&& addr < (unsigned long)__lock_text_end;
399 }
400 EXPORT_SYMBOL(in_lock_functions);
401