1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Hardware spinlock public header
4 *
5 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
6 *
7 * Contact: Ohad Ben-Cohen <ohad@wizery.com>
8 */
9
10 #ifndef __LINUX_HWSPINLOCK_H
11 #define __LINUX_HWSPINLOCK_H
12
13 #include <linux/err.h>
14 #include <linux/sched.h>
15
16 /* hwspinlock mode argument */
17 #define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */
18 #define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
19 #define HWLOCK_RAW 0x03
20 #define HWLOCK_IN_ATOMIC 0x04 /* Called while in atomic context */
21
22 struct device;
23 struct device_node;
24 struct hwspinlock;
25 struct hwspinlock_device;
26 struct hwspinlock_ops;
27
28 /**
29 * struct hwspinlock_pdata - platform data for hwspinlock drivers
30 * @base_id: base id for this hwspinlock device
31 *
32 * hwspinlock devices provide system-wide hardware locks that are used
33 * by remote processors that have no other way to achieve synchronization.
34 *
35 * To achieve that, each physical lock must have a system-wide id number
36 * that is agreed upon, otherwise remote processors can't possibly assume
37 * they're using the same hardware lock.
38 *
39 * Usually boards have a single hwspinlock device, which provides several
40 * hwspinlocks, and in this case, they can be trivially numbered 0 to
41 * (num-of-locks - 1).
42 *
43 * In case boards have several hwspinlocks devices, a different base id
44 * should be used for each hwspinlock device (they can't all use 0 as
45 * a starting id!).
46 *
47 * This platform data structure should be used to provide the base id
48 * for each device (which is trivially 0 when only a single hwspinlock
49 * device exists). It can be shared between different platforms, hence
50 * its location.
51 */
52 struct hwspinlock_pdata {
53 int base_id;
54 };
55
56 #ifdef CONFIG_HWSPINLOCK
57
58 int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
59 const struct hwspinlock_ops *ops, int base_id, int num_locks);
60 int hwspin_lock_unregister(struct hwspinlock_device *bank);
61 struct hwspinlock *hwspin_lock_request(void);
62 struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
63 int hwspin_lock_free(struct hwspinlock *hwlock);
64 int of_hwspin_lock_get_id(struct device_node *np, int index);
65 int hwspin_lock_get_id(struct hwspinlock *hwlock);
66 int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int,
67 unsigned long *);
68 int __hwspin_trylock(struct hwspinlock *, int, unsigned long *);
69 void __hwspin_unlock(struct hwspinlock *, int, unsigned long *);
70 int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name);
71 int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id);
72 int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock);
73 struct hwspinlock *devm_hwspin_lock_request(struct device *dev);
74 struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
75 unsigned int id);
76 int devm_hwspin_lock_unregister(struct device *dev,
77 struct hwspinlock_device *bank);
78 int devm_hwspin_lock_register(struct device *dev,
79 struct hwspinlock_device *bank,
80 const struct hwspinlock_ops *ops,
81 int base_id, int num_locks);
82
83 #else /* !CONFIG_HWSPINLOCK */
84
85 /*
86 * We don't want these functions to fail if CONFIG_HWSPINLOCK is not
87 * enabled. We prefer to silently succeed in this case, and let the
88 * code path get compiled away. This way, if CONFIG_HWSPINLOCK is not
89 * required on a given setup, users will still work.
90 *
91 * The only exception is hwspin_lock_register/hwspin_lock_unregister, with which
92 * we _do_ want users to fail (no point in registering hwspinlock instances if
93 * the framework is not available).
94 *
95 * Note: ERR_PTR(-ENODEV) will still be considered a success for NULL-checking
96 * users. Others, which care, can still check this with IS_ERR.
97 */
hwspin_lock_request(void)98 static inline struct hwspinlock *hwspin_lock_request(void)
99 {
100 return ERR_PTR(-ENODEV);
101 }
102
hwspin_lock_request_specific(unsigned int id)103 static inline struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
104 {
105 return ERR_PTR(-ENODEV);
106 }
107
hwspin_lock_free(struct hwspinlock * hwlock)108 static inline int hwspin_lock_free(struct hwspinlock *hwlock)
109 {
110 return 0;
111 }
112
113 static inline
__hwspin_lock_timeout(struct hwspinlock * hwlock,unsigned int to,int mode,unsigned long * flags)114 int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
115 int mode, unsigned long *flags)
116 {
117 return 0;
118 }
119
120 static inline
__hwspin_trylock(struct hwspinlock * hwlock,int mode,unsigned long * flags)121 int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
122 {
123 return 0;
124 }
125
126 static inline
__hwspin_unlock(struct hwspinlock * hwlock,int mode,unsigned long * flags)127 void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
128 {
129 }
130
hwspin_lock_bust(struct hwspinlock * hwlock,unsigned int id)131 static inline int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id)
132 {
133 return 0;
134 }
135
of_hwspin_lock_get_id(struct device_node * np,int index)136 static inline int of_hwspin_lock_get_id(struct device_node *np, int index)
137 {
138 return 0;
139 }
140
hwspin_lock_get_id(struct hwspinlock * hwlock)141 static inline int hwspin_lock_get_id(struct hwspinlock *hwlock)
142 {
143 return 0;
144 }
145
146 static inline
of_hwspin_lock_get_id_byname(struct device_node * np,const char * name)147 int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name)
148 {
149 return 0;
150 }
151
152 static inline
devm_hwspin_lock_free(struct device * dev,struct hwspinlock * hwlock)153 int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock)
154 {
155 return 0;
156 }
157
devm_hwspin_lock_request(struct device * dev)158 static inline struct hwspinlock *devm_hwspin_lock_request(struct device *dev)
159 {
160 return ERR_PTR(-ENODEV);
161 }
162
163 static inline
devm_hwspin_lock_request_specific(struct device * dev,unsigned int id)164 struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
165 unsigned int id)
166 {
167 return ERR_PTR(-ENODEV);
168 }
169
170 #endif /* !CONFIG_HWSPINLOCK */
171
172 /**
173 * hwspin_trylock_irqsave() - try to lock an hwspinlock, disable interrupts
174 * @hwlock: an hwspinlock which we want to trylock
175 * @flags: a pointer to where the caller's interrupt state will be saved at
176 *
177 * This function attempts to lock the underlying hwspinlock, and will
178 * immediately fail if the hwspinlock is already locked.
179 *
180 * Upon a successful return from this function, preemption and local
181 * interrupts are disabled (previous interrupts state is saved at @flags),
182 * so the caller must not sleep, and is advised to release the hwspinlock
183 * as soon as possible.
184 *
185 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
186 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
187 */
188 static inline
hwspin_trylock_irqsave(struct hwspinlock * hwlock,unsigned long * flags)189 int hwspin_trylock_irqsave(struct hwspinlock *hwlock, unsigned long *flags)
190 {
191 return __hwspin_trylock(hwlock, HWLOCK_IRQSTATE, flags);
192 }
193
194 /**
195 * hwspin_trylock_irq() - try to lock an hwspinlock, disable interrupts
196 * @hwlock: an hwspinlock which we want to trylock
197 *
198 * This function attempts to lock the underlying hwspinlock, and will
199 * immediately fail if the hwspinlock is already locked.
200 *
201 * Upon a successful return from this function, preemption and local
202 * interrupts are disabled, so the caller must not sleep, and is advised
203 * to release the hwspinlock as soon as possible.
204 *
205 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
206 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
207 */
hwspin_trylock_irq(struct hwspinlock * hwlock)208 static inline int hwspin_trylock_irq(struct hwspinlock *hwlock)
209 {
210 return __hwspin_trylock(hwlock, HWLOCK_IRQ, NULL);
211 }
212
213 /**
214 * hwspin_trylock_raw() - attempt to lock a specific hwspinlock
215 * @hwlock: an hwspinlock which we want to trylock
216 *
217 * This function attempts to lock an hwspinlock, and will immediately fail
218 * if the hwspinlock is already taken.
219 *
220 * Caution: User must protect the routine of getting hardware lock with mutex
221 * or spinlock to avoid dead-lock, that will let user can do some time-consuming
222 * or sleepable operations under the hardware lock.
223 *
224 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
225 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
226 */
hwspin_trylock_raw(struct hwspinlock * hwlock)227 static inline int hwspin_trylock_raw(struct hwspinlock *hwlock)
228 {
229 return __hwspin_trylock(hwlock, HWLOCK_RAW, NULL);
230 }
231
232 /**
233 * hwspin_trylock_in_atomic() - attempt to lock a specific hwspinlock
234 * @hwlock: an hwspinlock which we want to trylock
235 *
236 * This function attempts to lock an hwspinlock, and will immediately fail
237 * if the hwspinlock is already taken.
238 *
239 * This function shall be called only from an atomic context.
240 *
241 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
242 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
243 */
hwspin_trylock_in_atomic(struct hwspinlock * hwlock)244 static inline int hwspin_trylock_in_atomic(struct hwspinlock *hwlock)
245 {
246 return __hwspin_trylock(hwlock, HWLOCK_IN_ATOMIC, NULL);
247 }
248
249 /**
250 * hwspin_trylock() - attempt to lock a specific hwspinlock
251 * @hwlock: an hwspinlock which we want to trylock
252 *
253 * This function attempts to lock an hwspinlock, and will immediately fail
254 * if the hwspinlock is already taken.
255 *
256 * Upon a successful return from this function, preemption is disabled,
257 * so the caller must not sleep, and is advised to release the hwspinlock
258 * as soon as possible. This is required in order to minimize remote cores
259 * polling on the hardware interconnect.
260 *
261 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
262 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
263 */
hwspin_trylock(struct hwspinlock * hwlock)264 static inline int hwspin_trylock(struct hwspinlock *hwlock)
265 {
266 return __hwspin_trylock(hwlock, 0, NULL);
267 }
268
269 /**
270 * hwspin_lock_timeout_irqsave() - lock hwspinlock, with timeout, disable irqs
271 * @hwlock: the hwspinlock to be locked
272 * @to: timeout value in msecs
273 * @flags: a pointer to where the caller's interrupt state will be saved at
274 *
275 * This function locks the underlying @hwlock. If the @hwlock
276 * is already taken, the function will busy loop waiting for it to
277 * be released, but give up when @timeout msecs have elapsed.
278 *
279 * Upon a successful return from this function, preemption and local interrupts
280 * are disabled (plus previous interrupt state is saved), so the caller must
281 * not sleep, and is advised to release the hwspinlock as soon as possible.
282 *
283 * Returns 0 when the @hwlock was successfully taken, and an appropriate
284 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
285 * busy after @timeout msecs). The function will never sleep.
286 */
hwspin_lock_timeout_irqsave(struct hwspinlock * hwlock,unsigned int to,unsigned long * flags)287 static inline int hwspin_lock_timeout_irqsave(struct hwspinlock *hwlock,
288 unsigned int to, unsigned long *flags)
289 {
290 return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQSTATE, flags);
291 }
292
293 /**
294 * hwspin_lock_timeout_irq() - lock hwspinlock, with timeout, disable irqs
295 * @hwlock: the hwspinlock to be locked
296 * @to: timeout value in msecs
297 *
298 * This function locks the underlying @hwlock. If the @hwlock
299 * is already taken, the function will busy loop waiting for it to
300 * be released, but give up when @timeout msecs have elapsed.
301 *
302 * Upon a successful return from this function, preemption and local interrupts
303 * are disabled so the caller must not sleep, and is advised to release the
304 * hwspinlock as soon as possible.
305 *
306 * Returns 0 when the @hwlock was successfully taken, and an appropriate
307 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
308 * busy after @timeout msecs). The function will never sleep.
309 */
310 static inline
hwspin_lock_timeout_irq(struct hwspinlock * hwlock,unsigned int to)311 int hwspin_lock_timeout_irq(struct hwspinlock *hwlock, unsigned int to)
312 {
313 return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQ, NULL);
314 }
315
316 /**
317 * hwspin_lock_timeout_raw() - lock an hwspinlock with timeout limit
318 * @hwlock: the hwspinlock to be locked
319 * @to: timeout value in msecs
320 *
321 * This function locks the underlying @hwlock. If the @hwlock
322 * is already taken, the function will busy loop waiting for it to
323 * be released, but give up when @timeout msecs have elapsed.
324 *
325 * Caution: User must protect the routine of getting hardware lock with mutex
326 * or spinlock to avoid dead-lock, that will let user can do some time-consuming
327 * or sleepable operations under the hardware lock.
328 *
329 * Returns 0 when the @hwlock was successfully taken, and an appropriate
330 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
331 * busy after @timeout msecs). The function will never sleep.
332 */
333 static inline
hwspin_lock_timeout_raw(struct hwspinlock * hwlock,unsigned int to)334 int hwspin_lock_timeout_raw(struct hwspinlock *hwlock, unsigned int to)
335 {
336 return __hwspin_lock_timeout(hwlock, to, HWLOCK_RAW, NULL);
337 }
338
339 /**
340 * hwspin_lock_timeout_in_atomic() - lock an hwspinlock with timeout limit
341 * @hwlock: the hwspinlock to be locked
342 * @to: timeout value in msecs
343 *
344 * This function locks the underlying @hwlock. If the @hwlock
345 * is already taken, the function will busy loop waiting for it to
346 * be released, but give up when @timeout msecs have elapsed.
347 *
348 * This function shall be called only from an atomic context and the timeout
349 * value shall not exceed a few msecs.
350 *
351 * Returns 0 when the @hwlock was successfully taken, and an appropriate
352 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
353 * busy after @timeout msecs). The function will never sleep.
354 */
355 static inline
hwspin_lock_timeout_in_atomic(struct hwspinlock * hwlock,unsigned int to)356 int hwspin_lock_timeout_in_atomic(struct hwspinlock *hwlock, unsigned int to)
357 {
358 return __hwspin_lock_timeout(hwlock, to, HWLOCK_IN_ATOMIC, NULL);
359 }
360
361 /**
362 * hwspin_lock_timeout() - lock an hwspinlock with timeout limit
363 * @hwlock: the hwspinlock to be locked
364 * @to: timeout value in msecs
365 *
366 * This function locks the underlying @hwlock. If the @hwlock
367 * is already taken, the function will busy loop waiting for it to
368 * be released, but give up when @timeout msecs have elapsed.
369 *
370 * Upon a successful return from this function, preemption is disabled
371 * so the caller must not sleep, and is advised to release the hwspinlock
372 * as soon as possible.
373 * This is required in order to minimize remote cores polling on the
374 * hardware interconnect.
375 *
376 * Returns 0 when the @hwlock was successfully taken, and an appropriate
377 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
378 * busy after @timeout msecs). The function will never sleep.
379 */
380 static inline
hwspin_lock_timeout(struct hwspinlock * hwlock,unsigned int to)381 int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to)
382 {
383 return __hwspin_lock_timeout(hwlock, to, 0, NULL);
384 }
385
386 /**
387 * hwspin_unlock_irqrestore() - unlock hwspinlock, restore irq state
388 * @hwlock: a previously-acquired hwspinlock which we want to unlock
389 * @flags: previous caller's interrupt state to restore
390 *
391 * This function will unlock a specific hwspinlock, enable preemption and
392 * restore the previous state of the local interrupts. It should be used
393 * to undo, e.g., hwspin_trylock_irqsave().
394 *
395 * @hwlock must be already locked before calling this function: it is a bug
396 * to call unlock on a @hwlock that is already unlocked.
397 */
hwspin_unlock_irqrestore(struct hwspinlock * hwlock,unsigned long * flags)398 static inline void hwspin_unlock_irqrestore(struct hwspinlock *hwlock,
399 unsigned long *flags)
400 {
401 __hwspin_unlock(hwlock, HWLOCK_IRQSTATE, flags);
402 }
403
404 /**
405 * hwspin_unlock_irq() - unlock hwspinlock, enable interrupts
406 * @hwlock: a previously-acquired hwspinlock which we want to unlock
407 *
408 * This function will unlock a specific hwspinlock, enable preemption and
409 * enable local interrupts. Should be used to undo hwspin_lock_irq().
410 *
411 * @hwlock must be already locked (e.g. by hwspin_trylock_irq()) before
412 * calling this function: it is a bug to call unlock on a @hwlock that is
413 * already unlocked.
414 */
hwspin_unlock_irq(struct hwspinlock * hwlock)415 static inline void hwspin_unlock_irq(struct hwspinlock *hwlock)
416 {
417 __hwspin_unlock(hwlock, HWLOCK_IRQ, NULL);
418 }
419
420 /**
421 * hwspin_unlock_raw() - unlock hwspinlock
422 * @hwlock: a previously-acquired hwspinlock which we want to unlock
423 *
424 * This function will unlock a specific hwspinlock.
425 *
426 * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
427 * this function: it is a bug to call unlock on a @hwlock that is already
428 * unlocked.
429 */
hwspin_unlock_raw(struct hwspinlock * hwlock)430 static inline void hwspin_unlock_raw(struct hwspinlock *hwlock)
431 {
432 __hwspin_unlock(hwlock, HWLOCK_RAW, NULL);
433 }
434
435 /**
436 * hwspin_unlock_in_atomic() - unlock hwspinlock
437 * @hwlock: a previously-acquired hwspinlock which we want to unlock
438 *
439 * This function will unlock a specific hwspinlock.
440 *
441 * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
442 * this function: it is a bug to call unlock on a @hwlock that is already
443 * unlocked.
444 */
hwspin_unlock_in_atomic(struct hwspinlock * hwlock)445 static inline void hwspin_unlock_in_atomic(struct hwspinlock *hwlock)
446 {
447 __hwspin_unlock(hwlock, HWLOCK_IN_ATOMIC, NULL);
448 }
449
450 /**
451 * hwspin_unlock() - unlock hwspinlock
452 * @hwlock: a previously-acquired hwspinlock which we want to unlock
453 *
454 * This function will unlock a specific hwspinlock and enable preemption
455 * back.
456 *
457 * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
458 * this function: it is a bug to call unlock on a @hwlock that is already
459 * unlocked.
460 */
hwspin_unlock(struct hwspinlock * hwlock)461 static inline void hwspin_unlock(struct hwspinlock *hwlock)
462 {
463 __hwspin_unlock(hwlock, 0, NULL);
464 }
465
466 #endif /* __LINUX_HWSPINLOCK_H */
467