1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Hardware spinlock framework
4  *
5  * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
6  *
7  * Contact: Ohad Ben-Cohen <ohad@wizery.com>
8  */
9 
10 #define pr_fmt(fmt)    "%s: " fmt, __func__
11 
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/spinlock.h>
15 #include <linux/types.h>
16 #include <linux/err.h>
17 #include <linux/jiffies.h>
18 #include <linux/radix-tree.h>
19 #include <linux/hwspinlock.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/mutex.h>
22 #include <linux/of.h>
23 
24 #include "hwspinlock_internal.h"
25 
26 /* radix tree tags */
27 #define HWSPINLOCK_UNUSED	(0) /* tags an hwspinlock as unused */
28 
29 /*
30  * A radix tree is used to maintain the available hwspinlock instances.
31  * The tree associates hwspinlock pointers with their integer key id,
32  * and provides easy-to-use API which makes the hwspinlock core code simple
33  * and easy to read.
34  *
35  * Radix trees are quick on lookups, and reasonably efficient in terms of
36  * storage, especially with high density usages such as this framework
37  * requires (a continuous range of integer keys, beginning with zero, is
38  * used as the ID's of the hwspinlock instances).
39  *
40  * The radix tree API supports tagging items in the tree, which this
41  * framework uses to mark unused hwspinlock instances (see the
42  * HWSPINLOCK_UNUSED tag above). As a result, the process of querying the
43  * tree, looking for an unused hwspinlock instance, is now reduced to a
44  * single radix tree API call.
45  */
46 static RADIX_TREE(hwspinlock_tree, GFP_KERNEL);
47 
48 /*
49  * Synchronization of access to the tree is achieved using this mutex,
50  * as the radix-tree API requires that users provide all synchronisation.
51  * A mutex is needed because we're using non-atomic radix tree allocations.
52  */
53 static DEFINE_MUTEX(hwspinlock_tree_lock);
54 
55 
56 /**
57  * __hwspin_trylock() - attempt to lock a specific hwspinlock
58  * @hwlock: an hwspinlock which we want to trylock
59  * @mode: controls whether local interrupts are disabled or not
60  * @flags: a pointer where the caller's interrupt state will be saved at (if
61  *         requested)
62  *
63  * This function attempts to lock an hwspinlock, and will immediately
64  * fail if the hwspinlock is already taken.
65  *
66  * Caution: If the mode is HWLOCK_RAW, that means user must protect the routine
67  * of getting hardware lock with mutex or spinlock. Since in some scenarios,
68  * user need some time-consuming or sleepable operations under the hardware
69  * lock, they need one sleepable lock (like mutex) to protect the operations.
70  *
71  * If the mode is not HWLOCK_RAW, upon a successful return from this function,
72  * preemption (and possibly interrupts) is disabled, so the caller must not
73  * sleep, and is advised to release the hwspinlock as soon as possible. This is
74  * required in order to minimize remote cores polling on the hardware
75  * interconnect.
76  *
77  * The user decides whether local interrupts are disabled or not, and if yes,
78  * whether he wants their previous state to be saved. It is up to the user
79  * to choose the appropriate @mode of operation, exactly the same way users
80  * should decide between spin_trylock, spin_trylock_irq and
81  * spin_trylock_irqsave.
82  *
83  * Returns 0 if we successfully locked the hwspinlock or -EBUSY if
84  * the hwspinlock was already taken.
85  * This function will never sleep.
86  */
87 int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
88 {
89 	int ret;
90 
91 	BUG_ON(!hwlock);
92 	BUG_ON(!flags && mode == HWLOCK_IRQSTATE);
93 
94 	/*
95 	 * This spin_lock{_irq, _irqsave} serves three purposes:
96 	 *
97 	 * 1. Disable preemption, in order to minimize the period of time
98 	 *    in which the hwspinlock is taken. This is important in order
99 	 *    to minimize the possible polling on the hardware interconnect
100 	 *    by a remote user of this lock.
101 	 * 2. Make the hwspinlock SMP-safe (so we can take it from
102 	 *    additional contexts on the local host).
103 	 * 3. Ensure that in_atomic/might_sleep checks catch potential
104 	 *    problems with hwspinlock usage (e.g. scheduler checks like
105 	 *    'scheduling while atomic' etc.)
106 	 */
107 	switch (mode) {
108 	case HWLOCK_IRQSTATE:
109 		ret = spin_trylock_irqsave(&hwlock->lock, *flags);
110 		break;
111 	case HWLOCK_IRQ:
112 		ret = spin_trylock_irq(&hwlock->lock);
113 		break;
114 	case HWLOCK_RAW:
115 		ret = 1;
116 		break;
117 	default:
118 		ret = spin_trylock(&hwlock->lock);
119 		break;
120 	}
121 
122 	/* is lock already taken by another context on the local cpu ? */
123 	if (!ret)
124 		return -EBUSY;
125 
126 	/* try to take the hwspinlock device */
127 	ret = hwlock->bank->ops->trylock(hwlock);
128 
129 	/* if hwlock is already taken, undo spin_trylock_* and exit */
130 	if (!ret) {
131 		switch (mode) {
132 		case HWLOCK_IRQSTATE:
133 			spin_unlock_irqrestore(&hwlock->lock, *flags);
134 			break;
135 		case HWLOCK_IRQ:
136 			spin_unlock_irq(&hwlock->lock);
137 			break;
138 		case HWLOCK_RAW:
139 			/* Nothing to do */
140 			break;
141 		default:
142 			spin_unlock(&hwlock->lock);
143 			break;
144 		}
145 
146 		return -EBUSY;
147 	}
148 
149 	/*
150 	 * We can be sure the other core's memory operations
151 	 * are observable to us only _after_ we successfully take
152 	 * the hwspinlock, and we must make sure that subsequent memory
153 	 * operations (both reads and writes) will not be reordered before
154 	 * we actually took the hwspinlock.
155 	 *
156 	 * Note: the implicit memory barrier of the spinlock above is too
157 	 * early, so we need this additional explicit memory barrier.
158 	 */
159 	mb();
160 
161 	return 0;
162 }
163 EXPORT_SYMBOL_GPL(__hwspin_trylock);
164 
165 /**
166  * __hwspin_lock_timeout() - lock an hwspinlock with timeout limit
167  * @hwlock: the hwspinlock to be locked
168  * @timeout: timeout value in msecs
169  * @mode: mode which controls whether local interrupts are disabled or not
170  * @flags: a pointer to where the caller's interrupt state will be saved at (if
171  *         requested)
172  *
173  * This function locks the given @hwlock. If the @hwlock
174  * is already taken, the function will busy loop waiting for it to
175  * be released, but give up after @timeout msecs have elapsed.
176  *
177  * Caution: If the mode is HWLOCK_RAW, that means user must protect the routine
178  * of getting hardware lock with mutex or spinlock. Since in some scenarios,
179  * user need some time-consuming or sleepable operations under the hardware
180  * lock, they need one sleepable lock (like mutex) to protect the operations.
181  *
182  * If the mode is not HWLOCK_RAW, upon a successful return from this function,
183  * preemption is disabled (and possibly local interrupts, too), so the caller
184  * must not sleep, and is advised to release the hwspinlock as soon as possible.
185  * This is required in order to minimize remote cores polling on the
186  * hardware interconnect.
187  *
188  * The user decides whether local interrupts are disabled or not, and if yes,
189  * whether he wants their previous state to be saved. It is up to the user
190  * to choose the appropriate @mode of operation, exactly the same way users
191  * should decide between spin_lock, spin_lock_irq and spin_lock_irqsave.
192  *
193  * Returns 0 when the @hwlock was successfully taken, and an appropriate
194  * error code otherwise (most notably -ETIMEDOUT if the @hwlock is still
195  * busy after @timeout msecs). The function will never sleep.
196  */
197 int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
198 					int mode, unsigned long *flags)
199 {
200 	int ret;
201 	unsigned long expire;
202 
203 	expire = msecs_to_jiffies(to) + jiffies;
204 
205 	for (;;) {
206 		/* Try to take the hwspinlock */
207 		ret = __hwspin_trylock(hwlock, mode, flags);
208 		if (ret != -EBUSY)
209 			break;
210 
211 		/*
212 		 * The lock is already taken, let's check if the user wants
213 		 * us to try again
214 		 */
215 		if (time_is_before_eq_jiffies(expire))
216 			return -ETIMEDOUT;
217 
218 		/*
219 		 * Allow platform-specific relax handlers to prevent
220 		 * hogging the interconnect (no sleeping, though)
221 		 */
222 		if (hwlock->bank->ops->relax)
223 			hwlock->bank->ops->relax(hwlock);
224 	}
225 
226 	return ret;
227 }
228 EXPORT_SYMBOL_GPL(__hwspin_lock_timeout);
229 
230 /**
231  * __hwspin_unlock() - unlock a specific hwspinlock
232  * @hwlock: a previously-acquired hwspinlock which we want to unlock
233  * @mode: controls whether local interrupts needs to be restored or not
234  * @flags: previous caller's interrupt state to restore (if requested)
235  *
236  * This function will unlock a specific hwspinlock, enable preemption and
237  * (possibly) enable interrupts or restore their previous state.
238  * @hwlock must be already locked before calling this function: it is a bug
239  * to call unlock on a @hwlock that is already unlocked.
240  *
241  * The user decides whether local interrupts should be enabled or not, and
242  * if yes, whether he wants their previous state to be restored. It is up
243  * to the user to choose the appropriate @mode of operation, exactly the
244  * same way users decide between spin_unlock, spin_unlock_irq and
245  * spin_unlock_irqrestore.
246  *
247  * The function will never sleep.
248  */
249 void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
250 {
251 	BUG_ON(!hwlock);
252 	BUG_ON(!flags && mode == HWLOCK_IRQSTATE);
253 
254 	/*
255 	 * We must make sure that memory operations (both reads and writes),
256 	 * done before unlocking the hwspinlock, will not be reordered
257 	 * after the lock is released.
258 	 *
259 	 * That's the purpose of this explicit memory barrier.
260 	 *
261 	 * Note: the memory barrier induced by the spin_unlock below is too
262 	 * late; the other core is going to access memory soon after it will
263 	 * take the hwspinlock, and by then we want to be sure our memory
264 	 * operations are already observable.
265 	 */
266 	mb();
267 
268 	hwlock->bank->ops->unlock(hwlock);
269 
270 	/* Undo the spin_trylock{_irq, _irqsave} called while locking */
271 	switch (mode) {
272 	case HWLOCK_IRQSTATE:
273 		spin_unlock_irqrestore(&hwlock->lock, *flags);
274 		break;
275 	case HWLOCK_IRQ:
276 		spin_unlock_irq(&hwlock->lock);
277 		break;
278 	case HWLOCK_RAW:
279 		/* Nothing to do */
280 		break;
281 	default:
282 		spin_unlock(&hwlock->lock);
283 		break;
284 	}
285 }
286 EXPORT_SYMBOL_GPL(__hwspin_unlock);
287 
288 /**
289  * of_hwspin_lock_simple_xlate - translate hwlock_spec to return a lock id
290  * @bank: the hwspinlock device bank
291  * @hwlock_spec: hwlock specifier as found in the device tree
292  *
293  * This is a simple translation function, suitable for hwspinlock platform
294  * drivers that only has a lock specifier length of 1.
295  *
296  * Returns a relative index of the lock within a specified bank on success,
297  * or -EINVAL on invalid specifier cell count.
298  */
299 static inline int
300 of_hwspin_lock_simple_xlate(const struct of_phandle_args *hwlock_spec)
301 {
302 	if (WARN_ON(hwlock_spec->args_count != 1))
303 		return -EINVAL;
304 
305 	return hwlock_spec->args[0];
306 }
307 
308 /**
309  * of_hwspin_lock_get_id() - get lock id for an OF phandle-based specific lock
310  * @np: device node from which to request the specific hwlock
311  * @index: index of the hwlock in the list of values
312  *
313  * This function provides a means for DT users of the hwspinlock module to
314  * get the global lock id of a specific hwspinlock using the phandle of the
315  * hwspinlock device, so that it can be requested using the normal
316  * hwspin_lock_request_specific() API.
317  *
318  * Returns the global lock id number on success, -EPROBE_DEFER if the hwspinlock
319  * device is not yet registered, -EINVAL on invalid args specifier value or an
320  * appropriate error as returned from the OF parsing of the DT client node.
321  */
322 int of_hwspin_lock_get_id(struct device_node *np, int index)
323 {
324 	struct of_phandle_args args;
325 	struct hwspinlock *hwlock;
326 	struct radix_tree_iter iter;
327 	void **slot;
328 	int id;
329 	int ret;
330 
331 	ret = of_parse_phandle_with_args(np, "hwlocks", "#hwlock-cells", index,
332 					 &args);
333 	if (ret)
334 		return ret;
335 
336 	/* Find the hwspinlock device: we need its base_id */
337 	ret = -EPROBE_DEFER;
338 	rcu_read_lock();
339 	radix_tree_for_each_slot(slot, &hwspinlock_tree, &iter, 0) {
340 		hwlock = radix_tree_deref_slot(slot);
341 		if (unlikely(!hwlock))
342 			continue;
343 		if (radix_tree_deref_retry(hwlock)) {
344 			slot = radix_tree_iter_retry(&iter);
345 			continue;
346 		}
347 
348 		if (hwlock->bank->dev->of_node == args.np) {
349 			ret = 0;
350 			break;
351 		}
352 	}
353 	rcu_read_unlock();
354 	if (ret < 0)
355 		goto out;
356 
357 	id = of_hwspin_lock_simple_xlate(&args);
358 	if (id < 0 || id >= hwlock->bank->num_locks) {
359 		ret = -EINVAL;
360 		goto out;
361 	}
362 	id += hwlock->bank->base_id;
363 
364 out:
365 	of_node_put(args.np);
366 	return ret ? ret : id;
367 }
368 EXPORT_SYMBOL_GPL(of_hwspin_lock_get_id);
369 
370 /**
371  * of_hwspin_lock_get_id_byname() - get lock id for an specified hwlock name
372  * @np: device node from which to request the specific hwlock
373  * @name: hwlock name
374  *
375  * This function provides a means for DT users of the hwspinlock module to
376  * get the global lock id of a specific hwspinlock using the specified name of
377  * the hwspinlock device, so that it can be requested using the normal
378  * hwspin_lock_request_specific() API.
379  *
380  * Returns the global lock id number on success, -EPROBE_DEFER if the hwspinlock
381  * device is not yet registered, -EINVAL on invalid args specifier value or an
382  * appropriate error as returned from the OF parsing of the DT client node.
383  */
384 int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name)
385 {
386 	int index;
387 
388 	if (!name)
389 		return -EINVAL;
390 
391 	index = of_property_match_string(np, "hwlock-names", name);
392 	if (index < 0)
393 		return index;
394 
395 	return of_hwspin_lock_get_id(np, index);
396 }
397 EXPORT_SYMBOL_GPL(of_hwspin_lock_get_id_byname);
398 
399 static int hwspin_lock_register_single(struct hwspinlock *hwlock, int id)
400 {
401 	struct hwspinlock *tmp;
402 	int ret;
403 
404 	mutex_lock(&hwspinlock_tree_lock);
405 
406 	ret = radix_tree_insert(&hwspinlock_tree, id, hwlock);
407 	if (ret) {
408 		if (ret == -EEXIST)
409 			pr_err("hwspinlock id %d already exists!\n", id);
410 		goto out;
411 	}
412 
413 	/* mark this hwspinlock as available */
414 	tmp = radix_tree_tag_set(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
415 
416 	/* self-sanity check which should never fail */
417 	WARN_ON(tmp != hwlock);
418 
419 out:
420 	mutex_unlock(&hwspinlock_tree_lock);
421 	return 0;
422 }
423 
424 static struct hwspinlock *hwspin_lock_unregister_single(unsigned int id)
425 {
426 	struct hwspinlock *hwlock = NULL;
427 	int ret;
428 
429 	mutex_lock(&hwspinlock_tree_lock);
430 
431 	/* make sure the hwspinlock is not in use (tag is set) */
432 	ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
433 	if (ret == 0) {
434 		pr_err("hwspinlock %d still in use (or not present)\n", id);
435 		goto out;
436 	}
437 
438 	hwlock = radix_tree_delete(&hwspinlock_tree, id);
439 	if (!hwlock) {
440 		pr_err("failed to delete hwspinlock %d\n", id);
441 		goto out;
442 	}
443 
444 out:
445 	mutex_unlock(&hwspinlock_tree_lock);
446 	return hwlock;
447 }
448 
449 /**
450  * hwspin_lock_register() - register a new hw spinlock device
451  * @bank: the hwspinlock device, which usually provides numerous hw locks
452  * @dev: the backing device
453  * @ops: hwspinlock handlers for this device
454  * @base_id: id of the first hardware spinlock in this bank
455  * @num_locks: number of hwspinlocks provided by this device
456  *
457  * This function should be called from the underlying platform-specific
458  * implementation, to register a new hwspinlock device instance.
459  *
460  * Should be called from a process context (might sleep)
461  *
462  * Returns 0 on success, or an appropriate error code on failure
463  */
464 int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
465 		const struct hwspinlock_ops *ops, int base_id, int num_locks)
466 {
467 	struct hwspinlock *hwlock;
468 	int ret = 0, i;
469 
470 	if (!bank || !ops || !dev || !num_locks || !ops->trylock ||
471 							!ops->unlock) {
472 		pr_err("invalid parameters\n");
473 		return -EINVAL;
474 	}
475 
476 	bank->dev = dev;
477 	bank->ops = ops;
478 	bank->base_id = base_id;
479 	bank->num_locks = num_locks;
480 
481 	for (i = 0; i < num_locks; i++) {
482 		hwlock = &bank->lock[i];
483 
484 		spin_lock_init(&hwlock->lock);
485 		hwlock->bank = bank;
486 
487 		ret = hwspin_lock_register_single(hwlock, base_id + i);
488 		if (ret)
489 			goto reg_failed;
490 	}
491 
492 	return 0;
493 
494 reg_failed:
495 	while (--i >= 0)
496 		hwspin_lock_unregister_single(base_id + i);
497 	return ret;
498 }
499 EXPORT_SYMBOL_GPL(hwspin_lock_register);
500 
501 /**
502  * hwspin_lock_unregister() - unregister an hw spinlock device
503  * @bank: the hwspinlock device, which usually provides numerous hw locks
504  *
505  * This function should be called from the underlying platform-specific
506  * implementation, to unregister an existing (and unused) hwspinlock.
507  *
508  * Should be called from a process context (might sleep)
509  *
510  * Returns 0 on success, or an appropriate error code on failure
511  */
512 int hwspin_lock_unregister(struct hwspinlock_device *bank)
513 {
514 	struct hwspinlock *hwlock, *tmp;
515 	int i;
516 
517 	for (i = 0; i < bank->num_locks; i++) {
518 		hwlock = &bank->lock[i];
519 
520 		tmp = hwspin_lock_unregister_single(bank->base_id + i);
521 		if (!tmp)
522 			return -EBUSY;
523 
524 		/* self-sanity check that should never fail */
525 		WARN_ON(tmp != hwlock);
526 	}
527 
528 	return 0;
529 }
530 EXPORT_SYMBOL_GPL(hwspin_lock_unregister);
531 
532 static void devm_hwspin_lock_unreg(struct device *dev, void *res)
533 {
534 	hwspin_lock_unregister(*(struct hwspinlock_device **)res);
535 }
536 
537 static int devm_hwspin_lock_device_match(struct device *dev, void *res,
538 					 void *data)
539 {
540 	struct hwspinlock_device **bank = res;
541 
542 	if (WARN_ON(!bank || !*bank))
543 		return 0;
544 
545 	return *bank == data;
546 }
547 
548 /**
549  * devm_hwspin_lock_unregister() - unregister an hw spinlock device for
550  *				   a managed device
551  * @dev: the backing device
552  * @bank: the hwspinlock device, which usually provides numerous hw locks
553  *
554  * This function should be called from the underlying platform-specific
555  * implementation, to unregister an existing (and unused) hwspinlock.
556  *
557  * Should be called from a process context (might sleep)
558  *
559  * Returns 0 on success, or an appropriate error code on failure
560  */
561 int devm_hwspin_lock_unregister(struct device *dev,
562 				struct hwspinlock_device *bank)
563 {
564 	int ret;
565 
566 	ret = devres_release(dev, devm_hwspin_lock_unreg,
567 			     devm_hwspin_lock_device_match, bank);
568 	WARN_ON(ret);
569 
570 	return ret;
571 }
572 EXPORT_SYMBOL_GPL(devm_hwspin_lock_unregister);
573 
574 /**
575  * devm_hwspin_lock_register() - register a new hw spinlock device for
576  *				 a managed device
577  * @dev: the backing device
578  * @bank: the hwspinlock device, which usually provides numerous hw locks
579  * @ops: hwspinlock handlers for this device
580  * @base_id: id of the first hardware spinlock in this bank
581  * @num_locks: number of hwspinlocks provided by this device
582  *
583  * This function should be called from the underlying platform-specific
584  * implementation, to register a new hwspinlock device instance.
585  *
586  * Should be called from a process context (might sleep)
587  *
588  * Returns 0 on success, or an appropriate error code on failure
589  */
590 int devm_hwspin_lock_register(struct device *dev,
591 			      struct hwspinlock_device *bank,
592 			      const struct hwspinlock_ops *ops,
593 			      int base_id, int num_locks)
594 {
595 	struct hwspinlock_device **ptr;
596 	int ret;
597 
598 	ptr = devres_alloc(devm_hwspin_lock_unreg, sizeof(*ptr), GFP_KERNEL);
599 	if (!ptr)
600 		return -ENOMEM;
601 
602 	ret = hwspin_lock_register(bank, dev, ops, base_id, num_locks);
603 	if (!ret) {
604 		*ptr = bank;
605 		devres_add(dev, ptr);
606 	} else {
607 		devres_free(ptr);
608 	}
609 
610 	return ret;
611 }
612 EXPORT_SYMBOL_GPL(devm_hwspin_lock_register);
613 
614 /**
615  * __hwspin_lock_request() - tag an hwspinlock as used and power it up
616  *
617  * This is an internal function that prepares an hwspinlock instance
618  * before it is given to the user. The function assumes that
619  * hwspinlock_tree_lock is taken.
620  *
621  * Returns 0 or positive to indicate success, and a negative value to
622  * indicate an error (with the appropriate error code)
623  */
624 static int __hwspin_lock_request(struct hwspinlock *hwlock)
625 {
626 	struct device *dev = hwlock->bank->dev;
627 	struct hwspinlock *tmp;
628 	int ret;
629 
630 	/* prevent underlying implementation from being removed */
631 	if (!try_module_get(dev->driver->owner)) {
632 		dev_err(dev, "%s: can't get owner\n", __func__);
633 		return -EINVAL;
634 	}
635 
636 	/* notify PM core that power is now needed */
637 	ret = pm_runtime_get_sync(dev);
638 	if (ret < 0) {
639 		dev_err(dev, "%s: can't power on device\n", __func__);
640 		pm_runtime_put_noidle(dev);
641 		module_put(dev->driver->owner);
642 		return ret;
643 	}
644 
645 	/* mark hwspinlock as used, should not fail */
646 	tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock_to_id(hwlock),
647 							HWSPINLOCK_UNUSED);
648 
649 	/* self-sanity check that should never fail */
650 	WARN_ON(tmp != hwlock);
651 
652 	return ret;
653 }
654 
655 /**
656  * hwspin_lock_get_id() - retrieve id number of a given hwspinlock
657  * @hwlock: a valid hwspinlock instance
658  *
659  * Returns the id number of a given @hwlock, or -EINVAL if @hwlock is invalid.
660  */
661 int hwspin_lock_get_id(struct hwspinlock *hwlock)
662 {
663 	if (!hwlock) {
664 		pr_err("invalid hwlock\n");
665 		return -EINVAL;
666 	}
667 
668 	return hwlock_to_id(hwlock);
669 }
670 EXPORT_SYMBOL_GPL(hwspin_lock_get_id);
671 
672 /**
673  * hwspin_lock_request() - request an hwspinlock
674  *
675  * This function should be called by users of the hwspinlock device,
676  * in order to dynamically assign them an unused hwspinlock.
677  * Usually the user of this lock will then have to communicate the lock's id
678  * to the remote core before it can be used for synchronization (to get the
679  * id of a given hwlock, use hwspin_lock_get_id()).
680  *
681  * Should be called from a process context (might sleep)
682  *
683  * Returns the address of the assigned hwspinlock, or NULL on error
684  */
685 struct hwspinlock *hwspin_lock_request(void)
686 {
687 	struct hwspinlock *hwlock;
688 	int ret;
689 
690 	mutex_lock(&hwspinlock_tree_lock);
691 
692 	/* look for an unused lock */
693 	ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock,
694 						0, 1, HWSPINLOCK_UNUSED);
695 	if (ret == 0) {
696 		pr_warn("a free hwspinlock is not available\n");
697 		hwlock = NULL;
698 		goto out;
699 	}
700 
701 	/* sanity check that should never fail */
702 	WARN_ON(ret > 1);
703 
704 	/* mark as used and power up */
705 	ret = __hwspin_lock_request(hwlock);
706 	if (ret < 0)
707 		hwlock = NULL;
708 
709 out:
710 	mutex_unlock(&hwspinlock_tree_lock);
711 	return hwlock;
712 }
713 EXPORT_SYMBOL_GPL(hwspin_lock_request);
714 
715 /**
716  * hwspin_lock_request_specific() - request for a specific hwspinlock
717  * @id: index of the specific hwspinlock that is requested
718  *
719  * This function should be called by users of the hwspinlock module,
720  * in order to assign them a specific hwspinlock.
721  * Usually early board code will be calling this function in order to
722  * reserve specific hwspinlock ids for predefined purposes.
723  *
724  * Should be called from a process context (might sleep)
725  *
726  * Returns the address of the assigned hwspinlock, or NULL on error
727  */
728 struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
729 {
730 	struct hwspinlock *hwlock;
731 	int ret;
732 
733 	mutex_lock(&hwspinlock_tree_lock);
734 
735 	/* make sure this hwspinlock exists */
736 	hwlock = radix_tree_lookup(&hwspinlock_tree, id);
737 	if (!hwlock) {
738 		pr_warn("hwspinlock %u does not exist\n", id);
739 		goto out;
740 	}
741 
742 	/* sanity check (this shouldn't happen) */
743 	WARN_ON(hwlock_to_id(hwlock) != id);
744 
745 	/* make sure this hwspinlock is unused */
746 	ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
747 	if (ret == 0) {
748 		pr_warn("hwspinlock %u is already in use\n", id);
749 		hwlock = NULL;
750 		goto out;
751 	}
752 
753 	/* mark as used and power up */
754 	ret = __hwspin_lock_request(hwlock);
755 	if (ret < 0)
756 		hwlock = NULL;
757 
758 out:
759 	mutex_unlock(&hwspinlock_tree_lock);
760 	return hwlock;
761 }
762 EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
763 
764 /**
765  * hwspin_lock_free() - free a specific hwspinlock
766  * @hwlock: the specific hwspinlock to free
767  *
768  * This function mark @hwlock as free again.
769  * Should only be called with an @hwlock that was retrieved from
770  * an earlier call to hwspin_lock_request{_specific}.
771  *
772  * Should be called from a process context (might sleep)
773  *
774  * Returns 0 on success, or an appropriate error code on failure
775  */
776 int hwspin_lock_free(struct hwspinlock *hwlock)
777 {
778 	struct device *dev;
779 	struct hwspinlock *tmp;
780 	int ret;
781 
782 	if (!hwlock) {
783 		pr_err("invalid hwlock\n");
784 		return -EINVAL;
785 	}
786 
787 	dev = hwlock->bank->dev;
788 	mutex_lock(&hwspinlock_tree_lock);
789 
790 	/* make sure the hwspinlock is used */
791 	ret = radix_tree_tag_get(&hwspinlock_tree, hwlock_to_id(hwlock),
792 							HWSPINLOCK_UNUSED);
793 	if (ret == 1) {
794 		dev_err(dev, "%s: hwlock is already free\n", __func__);
795 		dump_stack();
796 		ret = -EINVAL;
797 		goto out;
798 	}
799 
800 	/* notify the underlying device that power is not needed */
801 	ret = pm_runtime_put(dev);
802 	if (ret < 0)
803 		goto out;
804 
805 	/* mark this hwspinlock as available */
806 	tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock_to_id(hwlock),
807 							HWSPINLOCK_UNUSED);
808 
809 	/* sanity check (this shouldn't happen) */
810 	WARN_ON(tmp != hwlock);
811 
812 	module_put(dev->driver->owner);
813 
814 out:
815 	mutex_unlock(&hwspinlock_tree_lock);
816 	return ret;
817 }
818 EXPORT_SYMBOL_GPL(hwspin_lock_free);
819 
820 static int devm_hwspin_lock_match(struct device *dev, void *res, void *data)
821 {
822 	struct hwspinlock **hwlock = res;
823 
824 	if (WARN_ON(!hwlock || !*hwlock))
825 		return 0;
826 
827 	return *hwlock == data;
828 }
829 
830 static void devm_hwspin_lock_release(struct device *dev, void *res)
831 {
832 	hwspin_lock_free(*(struct hwspinlock **)res);
833 }
834 
835 /**
836  * devm_hwspin_lock_free() - free a specific hwspinlock for a managed device
837  * @dev: the device to free the specific hwspinlock
838  * @hwlock: the specific hwspinlock to free
839  *
840  * This function mark @hwlock as free again.
841  * Should only be called with an @hwlock that was retrieved from
842  * an earlier call to hwspin_lock_request{_specific}.
843  *
844  * Should be called from a process context (might sleep)
845  *
846  * Returns 0 on success, or an appropriate error code on failure
847  */
848 int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock)
849 {
850 	int ret;
851 
852 	ret = devres_release(dev, devm_hwspin_lock_release,
853 			     devm_hwspin_lock_match, hwlock);
854 	WARN_ON(ret);
855 
856 	return ret;
857 }
858 EXPORT_SYMBOL_GPL(devm_hwspin_lock_free);
859 
860 /**
861  * devm_hwspin_lock_request() - request an hwspinlock for a managed device
862  * @dev: the device to request an hwspinlock
863  *
864  * This function should be called by users of the hwspinlock device,
865  * in order to dynamically assign them an unused hwspinlock.
866  * Usually the user of this lock will then have to communicate the lock's id
867  * to the remote core before it can be used for synchronization (to get the
868  * id of a given hwlock, use hwspin_lock_get_id()).
869  *
870  * Should be called from a process context (might sleep)
871  *
872  * Returns the address of the assigned hwspinlock, or NULL on error
873  */
874 struct hwspinlock *devm_hwspin_lock_request(struct device *dev)
875 {
876 	struct hwspinlock **ptr, *hwlock;
877 
878 	ptr = devres_alloc(devm_hwspin_lock_release, sizeof(*ptr), GFP_KERNEL);
879 	if (!ptr)
880 		return NULL;
881 
882 	hwlock = hwspin_lock_request();
883 	if (hwlock) {
884 		*ptr = hwlock;
885 		devres_add(dev, ptr);
886 	} else {
887 		devres_free(ptr);
888 	}
889 
890 	return hwlock;
891 }
892 EXPORT_SYMBOL_GPL(devm_hwspin_lock_request);
893 
894 /**
895  * devm_hwspin_lock_request_specific() - request for a specific hwspinlock for
896  *					 a managed device
897  * @dev: the device to request the specific hwspinlock
898  * @id: index of the specific hwspinlock that is requested
899  *
900  * This function should be called by users of the hwspinlock module,
901  * in order to assign them a specific hwspinlock.
902  * Usually early board code will be calling this function in order to
903  * reserve specific hwspinlock ids for predefined purposes.
904  *
905  * Should be called from a process context (might sleep)
906  *
907  * Returns the address of the assigned hwspinlock, or NULL on error
908  */
909 struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
910 						     unsigned int id)
911 {
912 	struct hwspinlock **ptr, *hwlock;
913 
914 	ptr = devres_alloc(devm_hwspin_lock_release, sizeof(*ptr), GFP_KERNEL);
915 	if (!ptr)
916 		return NULL;
917 
918 	hwlock = hwspin_lock_request_specific(id);
919 	if (hwlock) {
920 		*ptr = hwlock;
921 		devres_add(dev, ptr);
922 	} else {
923 		devres_free(ptr);
924 	}
925 
926 	return hwlock;
927 }
928 EXPORT_SYMBOL_GPL(devm_hwspin_lock_request_specific);
929 
930 MODULE_LICENSE("GPL v2");
931 MODULE_DESCRIPTION("Hardware spinlock interface");
932 MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>");
933