xref: /openbmc/linux/kernel/locking/spinlock.c (revision 6aa7de05)
1 /*
2  * Copyright (2004) Linus Torvalds
3  *
4  * Author: Zwane Mwaikambo <zwane@fsmlabs.com>
5  *
6  * Copyright (2004, 2005) Ingo Molnar
7  *
8  * This file contains the spinlock/rwlock implementations for the
9  * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them)
10  *
11  * Note that some architectures have special knowledge about the
12  * stack frames of these functions in their profile_pc. If you
13  * change anything significant here that could change the stack
14  * frame contact the architecture maintainers.
15  */
16 
17 #include <linux/linkage.h>
18 #include <linux/preempt.h>
19 #include <linux/spinlock.h>
20 #include <linux/interrupt.h>
21 #include <linux/debug_locks.h>
22 #include <linux/export.h>
23 
24 /*
25  * If lockdep is enabled then we use the non-preemption spin-ops
26  * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
27  * not re-enabled during lock-acquire (which the preempt-spin-ops do):
28  */
29 #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
30 /*
31  * The __lock_function inlines are taken from
32  * include/linux/spinlock_api_smp.h
33  */
34 #else
35 
36 /*
37  * Some architectures can relax in favour of the CPU owning the lock.
38  */
39 #ifndef arch_read_relax
40 # define arch_read_relax(l)	cpu_relax()
41 #endif
42 #ifndef arch_write_relax
43 # define arch_write_relax(l)	cpu_relax()
44 #endif
45 #ifndef arch_spin_relax
46 # define arch_spin_relax(l)	cpu_relax()
47 #endif
48 
49 /*
50  * We build the __lock_function inlines here. They are too large for
51  * inlining all over the place, but here is only one user per function
52  * which embedds them into the calling _lock_function below.
53  *
54  * This could be a long-held lock. We both prepare to spin for a long
55  * time (making _this_ CPU preemptable if possible), and we also signal
56  * towards that other CPU that it should break the lock ASAP.
57  */
58 #define BUILD_LOCK_OPS(op, locktype)					\
59 void __lockfunc __raw_##op##_lock(locktype##_t *lock)			\
60 {									\
61 	for (;;) {							\
62 		preempt_disable();					\
63 		if (likely(do_raw_##op##_trylock(lock)))		\
64 			break;						\
65 		preempt_enable();					\
66 									\
67 		if (!(lock)->break_lock)				\
68 			(lock)->break_lock = 1;				\
69 		while ((lock)->break_lock)				\
70 			arch_##op##_relax(&lock->raw_lock);		\
71 	}								\
72 	(lock)->break_lock = 0;						\
73 }									\
74 									\
75 unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock)	\
76 {									\
77 	unsigned long flags;						\
78 									\
79 	for (;;) {							\
80 		preempt_disable();					\
81 		local_irq_save(flags);					\
82 		if (likely(do_raw_##op##_trylock(lock)))		\
83 			break;						\
84 		local_irq_restore(flags);				\
85 		preempt_enable();					\
86 									\
87 		if (!(lock)->break_lock)				\
88 			(lock)->break_lock = 1;				\
89 		while ((lock)->break_lock)				\
90 			arch_##op##_relax(&lock->raw_lock);		\
91 	}								\
92 	(lock)->break_lock = 0;						\
93 	return flags;							\
94 }									\
95 									\
96 void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock)		\
97 {									\
98 	_raw_##op##_lock_irqsave(lock);					\
99 }									\
100 									\
101 void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock)		\
102 {									\
103 	unsigned long flags;						\
104 									\
105 	/*							*/	\
106 	/* Careful: we must exclude softirqs too, hence the	*/	\
107 	/* irq-disabling. We use the generic preemption-aware	*/	\
108 	/* function:						*/	\
109 	/**/								\
110 	flags = _raw_##op##_lock_irqsave(lock);				\
111 	local_bh_disable();						\
112 	local_irq_restore(flags);					\
113 }									\
114 
115 /*
116  * Build preemption-friendly versions of the following
117  * lock-spinning functions:
118  *
119  *         __[spin|read|write]_lock()
120  *         __[spin|read|write]_lock_irq()
121  *         __[spin|read|write]_lock_irqsave()
122  *         __[spin|read|write]_lock_bh()
123  */
124 BUILD_LOCK_OPS(spin, raw_spinlock);
125 BUILD_LOCK_OPS(read, rwlock);
126 BUILD_LOCK_OPS(write, rwlock);
127 
128 #endif
129 
130 #ifndef CONFIG_INLINE_SPIN_TRYLOCK
131 int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock)
132 {
133 	return __raw_spin_trylock(lock);
134 }
135 EXPORT_SYMBOL(_raw_spin_trylock);
136 #endif
137 
138 #ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH
139 int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock)
140 {
141 	return __raw_spin_trylock_bh(lock);
142 }
143 EXPORT_SYMBOL(_raw_spin_trylock_bh);
144 #endif
145 
146 #ifndef CONFIG_INLINE_SPIN_LOCK
147 void __lockfunc _raw_spin_lock(raw_spinlock_t *lock)
148 {
149 	__raw_spin_lock(lock);
150 }
151 EXPORT_SYMBOL(_raw_spin_lock);
152 #endif
153 
154 #ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
155 unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
156 {
157 	return __raw_spin_lock_irqsave(lock);
158 }
159 EXPORT_SYMBOL(_raw_spin_lock_irqsave);
160 #endif
161 
162 #ifndef CONFIG_INLINE_SPIN_LOCK_IRQ
163 void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
164 {
165 	__raw_spin_lock_irq(lock);
166 }
167 EXPORT_SYMBOL(_raw_spin_lock_irq);
168 #endif
169 
170 #ifndef CONFIG_INLINE_SPIN_LOCK_BH
171 void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock)
172 {
173 	__raw_spin_lock_bh(lock);
174 }
175 EXPORT_SYMBOL(_raw_spin_lock_bh);
176 #endif
177 
178 #ifdef CONFIG_UNINLINE_SPIN_UNLOCK
179 void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock)
180 {
181 	__raw_spin_unlock(lock);
182 }
183 EXPORT_SYMBOL(_raw_spin_unlock);
184 #endif
185 
186 #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
187 void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
188 {
189 	__raw_spin_unlock_irqrestore(lock, flags);
190 }
191 EXPORT_SYMBOL(_raw_spin_unlock_irqrestore);
192 #endif
193 
194 #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ
195 void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock)
196 {
197 	__raw_spin_unlock_irq(lock);
198 }
199 EXPORT_SYMBOL(_raw_spin_unlock_irq);
200 #endif
201 
202 #ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
203 void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)
204 {
205 	__raw_spin_unlock_bh(lock);
206 }
207 EXPORT_SYMBOL(_raw_spin_unlock_bh);
208 #endif
209 
210 #ifndef CONFIG_INLINE_READ_TRYLOCK
211 int __lockfunc _raw_read_trylock(rwlock_t *lock)
212 {
213 	return __raw_read_trylock(lock);
214 }
215 EXPORT_SYMBOL(_raw_read_trylock);
216 #endif
217 
218 #ifndef CONFIG_INLINE_READ_LOCK
219 void __lockfunc _raw_read_lock(rwlock_t *lock)
220 {
221 	__raw_read_lock(lock);
222 }
223 EXPORT_SYMBOL(_raw_read_lock);
224 #endif
225 
226 #ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE
227 unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
228 {
229 	return __raw_read_lock_irqsave(lock);
230 }
231 EXPORT_SYMBOL(_raw_read_lock_irqsave);
232 #endif
233 
234 #ifndef CONFIG_INLINE_READ_LOCK_IRQ
235 void __lockfunc _raw_read_lock_irq(rwlock_t *lock)
236 {
237 	__raw_read_lock_irq(lock);
238 }
239 EXPORT_SYMBOL(_raw_read_lock_irq);
240 #endif
241 
242 #ifndef CONFIG_INLINE_READ_LOCK_BH
243 void __lockfunc _raw_read_lock_bh(rwlock_t *lock)
244 {
245 	__raw_read_lock_bh(lock);
246 }
247 EXPORT_SYMBOL(_raw_read_lock_bh);
248 #endif
249 
250 #ifndef CONFIG_INLINE_READ_UNLOCK
251 void __lockfunc _raw_read_unlock(rwlock_t *lock)
252 {
253 	__raw_read_unlock(lock);
254 }
255 EXPORT_SYMBOL(_raw_read_unlock);
256 #endif
257 
258 #ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
259 void __lockfunc _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
260 {
261 	__raw_read_unlock_irqrestore(lock, flags);
262 }
263 EXPORT_SYMBOL(_raw_read_unlock_irqrestore);
264 #endif
265 
266 #ifndef CONFIG_INLINE_READ_UNLOCK_IRQ
267 void __lockfunc _raw_read_unlock_irq(rwlock_t *lock)
268 {
269 	__raw_read_unlock_irq(lock);
270 }
271 EXPORT_SYMBOL(_raw_read_unlock_irq);
272 #endif
273 
274 #ifndef CONFIG_INLINE_READ_UNLOCK_BH
275 void __lockfunc _raw_read_unlock_bh(rwlock_t *lock)
276 {
277 	__raw_read_unlock_bh(lock);
278 }
279 EXPORT_SYMBOL(_raw_read_unlock_bh);
280 #endif
281 
282 #ifndef CONFIG_INLINE_WRITE_TRYLOCK
283 int __lockfunc _raw_write_trylock(rwlock_t *lock)
284 {
285 	return __raw_write_trylock(lock);
286 }
287 EXPORT_SYMBOL(_raw_write_trylock);
288 #endif
289 
290 #ifndef CONFIG_INLINE_WRITE_LOCK
291 void __lockfunc _raw_write_lock(rwlock_t *lock)
292 {
293 	__raw_write_lock(lock);
294 }
295 EXPORT_SYMBOL(_raw_write_lock);
296 #endif
297 
298 #ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
299 unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
300 {
301 	return __raw_write_lock_irqsave(lock);
302 }
303 EXPORT_SYMBOL(_raw_write_lock_irqsave);
304 #endif
305 
306 #ifndef CONFIG_INLINE_WRITE_LOCK_IRQ
307 void __lockfunc _raw_write_lock_irq(rwlock_t *lock)
308 {
309 	__raw_write_lock_irq(lock);
310 }
311 EXPORT_SYMBOL(_raw_write_lock_irq);
312 #endif
313 
314 #ifndef CONFIG_INLINE_WRITE_LOCK_BH
315 void __lockfunc _raw_write_lock_bh(rwlock_t *lock)
316 {
317 	__raw_write_lock_bh(lock);
318 }
319 EXPORT_SYMBOL(_raw_write_lock_bh);
320 #endif
321 
322 #ifndef CONFIG_INLINE_WRITE_UNLOCK
323 void __lockfunc _raw_write_unlock(rwlock_t *lock)
324 {
325 	__raw_write_unlock(lock);
326 }
327 EXPORT_SYMBOL(_raw_write_unlock);
328 #endif
329 
330 #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
331 void __lockfunc _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
332 {
333 	__raw_write_unlock_irqrestore(lock, flags);
334 }
335 EXPORT_SYMBOL(_raw_write_unlock_irqrestore);
336 #endif
337 
338 #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ
339 void __lockfunc _raw_write_unlock_irq(rwlock_t *lock)
340 {
341 	__raw_write_unlock_irq(lock);
342 }
343 EXPORT_SYMBOL(_raw_write_unlock_irq);
344 #endif
345 
346 #ifndef CONFIG_INLINE_WRITE_UNLOCK_BH
347 void __lockfunc _raw_write_unlock_bh(rwlock_t *lock)
348 {
349 	__raw_write_unlock_bh(lock);
350 }
351 EXPORT_SYMBOL(_raw_write_unlock_bh);
352 #endif
353 
354 #ifdef CONFIG_DEBUG_LOCK_ALLOC
355 
356 void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
357 {
358 	preempt_disable();
359 	spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
360 	LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
361 }
362 EXPORT_SYMBOL(_raw_spin_lock_nested);
363 
364 unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock,
365 						   int subclass)
366 {
367 	unsigned long flags;
368 
369 	local_irq_save(flags);
370 	preempt_disable();
371 	spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
372 	LOCK_CONTENDED_FLAGS(lock, do_raw_spin_trylock, do_raw_spin_lock,
373 				do_raw_spin_lock_flags, &flags);
374 	return flags;
375 }
376 EXPORT_SYMBOL(_raw_spin_lock_irqsave_nested);
377 
378 void __lockfunc _raw_spin_lock_nest_lock(raw_spinlock_t *lock,
379 				     struct lockdep_map *nest_lock)
380 {
381 	preempt_disable();
382 	spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
383 	LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
384 }
385 EXPORT_SYMBOL(_raw_spin_lock_nest_lock);
386 
387 #endif
388 
389 notrace int in_lock_functions(unsigned long addr)
390 {
391 	/* Linker adds these: start and end of __lockfunc functions */
392 	extern char __lock_text_start[], __lock_text_end[];
393 
394 	return addr >= (unsigned long)__lock_text_start
395 	&& addr < (unsigned long)__lock_text_end;
396 }
397 EXPORT_SYMBOL(in_lock_functions);
398