1 /*
2  * Hardware spinlock framework
3  *
4  * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
5  *
6  * Contact: Ohad Ben-Cohen <ohad@wizery.com>
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License version 2 as published
10  * by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  */
17 
18 #define pr_fmt(fmt)    "%s: " fmt, __func__
19 
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/spinlock.h>
23 #include <linux/types.h>
24 #include <linux/err.h>
25 #include <linux/jiffies.h>
26 #include <linux/radix-tree.h>
27 #include <linux/hwspinlock.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/mutex.h>
30 
31 #include "hwspinlock_internal.h"
32 
33 /* radix tree tags */
34 #define HWSPINLOCK_UNUSED	(0) /* tags an hwspinlock as unused */
35 
36 /*
37  * A radix tree is used to maintain the available hwspinlock instances.
38  * The tree associates hwspinlock pointers with their integer key id,
39  * and provides easy-to-use API which makes the hwspinlock core code simple
40  * and easy to read.
41  *
42  * Radix trees are quick on lookups, and reasonably efficient in terms of
43  * storage, especially with high density usages such as this framework
44  * requires (a continuous range of integer keys, beginning with zero, is
45  * used as the ID's of the hwspinlock instances).
46  *
47  * The radix tree API supports tagging items in the tree, which this
48  * framework uses to mark unused hwspinlock instances (see the
49  * HWSPINLOCK_UNUSED tag above). As a result, the process of querying the
50  * tree, looking for an unused hwspinlock instance, is now reduced to a
51  * single radix tree API call.
52  */
53 static RADIX_TREE(hwspinlock_tree, GFP_KERNEL);
54 
55 /*
56  * Synchronization of access to the tree is achieved using this mutex,
57  * as the radix-tree API requires that users provide all synchronisation.
58  * A mutex is needed because we're using non-atomic radix tree allocations.
59  */
60 static DEFINE_MUTEX(hwspinlock_tree_lock);
61 
62 
63 /**
64  * __hwspin_trylock() - attempt to lock a specific hwspinlock
65  * @hwlock: an hwspinlock which we want to trylock
66  * @mode: controls whether local interrupts are disabled or not
67  * @flags: a pointer where the caller's interrupt state will be saved at (if
68  *         requested)
69  *
70  * This function attempts to lock an hwspinlock, and will immediately
71  * fail if the hwspinlock is already taken.
72  *
73  * Upon a successful return from this function, preemption (and possibly
74  * interrupts) is disabled, so the caller must not sleep, and is advised to
75  * release the hwspinlock as soon as possible. This is required in order to
76  * minimize remote cores polling on the hardware interconnect.
77  *
78  * The user decides whether local interrupts are disabled or not, and if yes,
79  * whether he wants their previous state to be saved. It is up to the user
80  * to choose the appropriate @mode of operation, exactly the same way users
81  * should decide between spin_trylock, spin_trylock_irq and
82  * spin_trylock_irqsave.
83  *
84  * Returns 0 if we successfully locked the hwspinlock or -EBUSY if
85  * the hwspinlock was already taken.
86  * This function will never sleep.
87  */
88 int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
89 {
90 	int ret;
91 
92 	BUG_ON(!hwlock);
93 	BUG_ON(!flags && mode == HWLOCK_IRQSTATE);
94 
95 	/*
96 	 * This spin_lock{_irq, _irqsave} serves three purposes:
97 	 *
98 	 * 1. Disable preemption, in order to minimize the period of time
99 	 *    in which the hwspinlock is taken. This is important in order
100 	 *    to minimize the possible polling on the hardware interconnect
101 	 *    by a remote user of this lock.
102 	 * 2. Make the hwspinlock SMP-safe (so we can take it from
103 	 *    additional contexts on the local host).
104 	 * 3. Ensure that in_atomic/might_sleep checks catch potential
105 	 *    problems with hwspinlock usage (e.g. scheduler checks like
106 	 *    'scheduling while atomic' etc.)
107 	 */
108 	if (mode == HWLOCK_IRQSTATE)
109 		ret = spin_trylock_irqsave(&hwlock->lock, *flags);
110 	else if (mode == HWLOCK_IRQ)
111 		ret = spin_trylock_irq(&hwlock->lock);
112 	else
113 		ret = spin_trylock(&hwlock->lock);
114 
115 	/* is lock already taken by another context on the local cpu ? */
116 	if (!ret)
117 		return -EBUSY;
118 
119 	/* try to take the hwspinlock device */
120 	ret = hwlock->bank->ops->trylock(hwlock);
121 
122 	/* if hwlock is already taken, undo spin_trylock_* and exit */
123 	if (!ret) {
124 		if (mode == HWLOCK_IRQSTATE)
125 			spin_unlock_irqrestore(&hwlock->lock, *flags);
126 		else if (mode == HWLOCK_IRQ)
127 			spin_unlock_irq(&hwlock->lock);
128 		else
129 			spin_unlock(&hwlock->lock);
130 
131 		return -EBUSY;
132 	}
133 
134 	/*
135 	 * We can be sure the other core's memory operations
136 	 * are observable to us only _after_ we successfully take
137 	 * the hwspinlock, and we must make sure that subsequent memory
138 	 * operations (both reads and writes) will not be reordered before
139 	 * we actually took the hwspinlock.
140 	 *
141 	 * Note: the implicit memory barrier of the spinlock above is too
142 	 * early, so we need this additional explicit memory barrier.
143 	 */
144 	mb();
145 
146 	return 0;
147 }
148 EXPORT_SYMBOL_GPL(__hwspin_trylock);
149 
150 /**
151  * __hwspin_lock_timeout() - lock an hwspinlock with timeout limit
152  * @hwlock: the hwspinlock to be locked
153  * @timeout: timeout value in msecs
154  * @mode: mode which controls whether local interrupts are disabled or not
155  * @flags: a pointer to where the caller's interrupt state will be saved at (if
156  *         requested)
157  *
158  * This function locks the given @hwlock. If the @hwlock
159  * is already taken, the function will busy loop waiting for it to
160  * be released, but give up after @timeout msecs have elapsed.
161  *
162  * Upon a successful return from this function, preemption is disabled
163  * (and possibly local interrupts, too), so the caller must not sleep,
164  * and is advised to release the hwspinlock as soon as possible.
165  * This is required in order to minimize remote cores polling on the
166  * hardware interconnect.
167  *
168  * The user decides whether local interrupts are disabled or not, and if yes,
169  * whether he wants their previous state to be saved. It is up to the user
170  * to choose the appropriate @mode of operation, exactly the same way users
171  * should decide between spin_lock, spin_lock_irq and spin_lock_irqsave.
172  *
173  * Returns 0 when the @hwlock was successfully taken, and an appropriate
174  * error code otherwise (most notably -ETIMEDOUT if the @hwlock is still
175  * busy after @timeout msecs). The function will never sleep.
176  */
177 int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
178 					int mode, unsigned long *flags)
179 {
180 	int ret;
181 	unsigned long expire;
182 
183 	expire = msecs_to_jiffies(to) + jiffies;
184 
185 	for (;;) {
186 		/* Try to take the hwspinlock */
187 		ret = __hwspin_trylock(hwlock, mode, flags);
188 		if (ret != -EBUSY)
189 			break;
190 
191 		/*
192 		 * The lock is already taken, let's check if the user wants
193 		 * us to try again
194 		 */
195 		if (time_is_before_eq_jiffies(expire))
196 			return -ETIMEDOUT;
197 
198 		/*
199 		 * Allow platform-specific relax handlers to prevent
200 		 * hogging the interconnect (no sleeping, though)
201 		 */
202 		if (hwlock->bank->ops->relax)
203 			hwlock->bank->ops->relax(hwlock);
204 	}
205 
206 	return ret;
207 }
208 EXPORT_SYMBOL_GPL(__hwspin_lock_timeout);
209 
210 /**
211  * __hwspin_unlock() - unlock a specific hwspinlock
212  * @hwlock: a previously-acquired hwspinlock which we want to unlock
213  * @mode: controls whether local interrupts needs to be restored or not
214  * @flags: previous caller's interrupt state to restore (if requested)
215  *
216  * This function will unlock a specific hwspinlock, enable preemption and
217  * (possibly) enable interrupts or restore their previous state.
218  * @hwlock must be already locked before calling this function: it is a bug
219  * to call unlock on a @hwlock that is already unlocked.
220  *
221  * The user decides whether local interrupts should be enabled or not, and
222  * if yes, whether he wants their previous state to be restored. It is up
223  * to the user to choose the appropriate @mode of operation, exactly the
224  * same way users decide between spin_unlock, spin_unlock_irq and
225  * spin_unlock_irqrestore.
226  *
227  * The function will never sleep.
228  */
229 void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
230 {
231 	BUG_ON(!hwlock);
232 	BUG_ON(!flags && mode == HWLOCK_IRQSTATE);
233 
234 	/*
235 	 * We must make sure that memory operations (both reads and writes),
236 	 * done before unlocking the hwspinlock, will not be reordered
237 	 * after the lock is released.
238 	 *
239 	 * That's the purpose of this explicit memory barrier.
240 	 *
241 	 * Note: the memory barrier induced by the spin_unlock below is too
242 	 * late; the other core is going to access memory soon after it will
243 	 * take the hwspinlock, and by then we want to be sure our memory
244 	 * operations are already observable.
245 	 */
246 	mb();
247 
248 	hwlock->bank->ops->unlock(hwlock);
249 
250 	/* Undo the spin_trylock{_irq, _irqsave} called while locking */
251 	if (mode == HWLOCK_IRQSTATE)
252 		spin_unlock_irqrestore(&hwlock->lock, *flags);
253 	else if (mode == HWLOCK_IRQ)
254 		spin_unlock_irq(&hwlock->lock);
255 	else
256 		spin_unlock(&hwlock->lock);
257 }
258 EXPORT_SYMBOL_GPL(__hwspin_unlock);
259 
260 static int hwspin_lock_register_single(struct hwspinlock *hwlock, int id)
261 {
262 	struct hwspinlock *tmp;
263 	int ret;
264 
265 	mutex_lock(&hwspinlock_tree_lock);
266 
267 	ret = radix_tree_insert(&hwspinlock_tree, id, hwlock);
268 	if (ret) {
269 		if (ret == -EEXIST)
270 			pr_err("hwspinlock id %d already exists!\n", id);
271 		goto out;
272 	}
273 
274 	/* mark this hwspinlock as available */
275 	tmp = radix_tree_tag_set(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
276 
277 	/* self-sanity check which should never fail */
278 	WARN_ON(tmp != hwlock);
279 
280 out:
281 	mutex_unlock(&hwspinlock_tree_lock);
282 	return 0;
283 }
284 
285 static struct hwspinlock *hwspin_lock_unregister_single(unsigned int id)
286 {
287 	struct hwspinlock *hwlock = NULL;
288 	int ret;
289 
290 	mutex_lock(&hwspinlock_tree_lock);
291 
292 	/* make sure the hwspinlock is not in use (tag is set) */
293 	ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
294 	if (ret == 0) {
295 		pr_err("hwspinlock %d still in use (or not present)\n", id);
296 		goto out;
297 	}
298 
299 	hwlock = radix_tree_delete(&hwspinlock_tree, id);
300 	if (!hwlock) {
301 		pr_err("failed to delete hwspinlock %d\n", id);
302 		goto out;
303 	}
304 
305 out:
306 	mutex_unlock(&hwspinlock_tree_lock);
307 	return hwlock;
308 }
309 
310 /**
311  * hwspin_lock_register() - register a new hw spinlock device
312  * @bank: the hwspinlock device, which usually provides numerous hw locks
313  * @dev: the backing device
314  * @ops: hwspinlock handlers for this device
315  * @base_id: id of the first hardware spinlock in this bank
316  * @num_locks: number of hwspinlocks provided by this device
317  *
318  * This function should be called from the underlying platform-specific
319  * implementation, to register a new hwspinlock device instance.
320  *
321  * Should be called from a process context (might sleep)
322  *
323  * Returns 0 on success, or an appropriate error code on failure
324  */
325 int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
326 		const struct hwspinlock_ops *ops, int base_id, int num_locks)
327 {
328 	struct hwspinlock *hwlock;
329 	int ret = 0, i;
330 
331 	if (!bank || !ops || !dev || !num_locks || !ops->trylock ||
332 							!ops->unlock) {
333 		pr_err("invalid parameters\n");
334 		return -EINVAL;
335 	}
336 
337 	bank->dev = dev;
338 	bank->ops = ops;
339 	bank->base_id = base_id;
340 	bank->num_locks = num_locks;
341 
342 	for (i = 0; i < num_locks; i++) {
343 		hwlock = &bank->lock[i];
344 
345 		spin_lock_init(&hwlock->lock);
346 		hwlock->bank = bank;
347 
348 		ret = hwspin_lock_register_single(hwlock, base_id + i);
349 		if (ret)
350 			goto reg_failed;
351 	}
352 
353 	return 0;
354 
355 reg_failed:
356 	while (--i >= 0)
357 		hwspin_lock_unregister_single(base_id + i);
358 	return ret;
359 }
360 EXPORT_SYMBOL_GPL(hwspin_lock_register);
361 
362 /**
363  * hwspin_lock_unregister() - unregister an hw spinlock device
364  * @bank: the hwspinlock device, which usually provides numerous hw locks
365  *
366  * This function should be called from the underlying platform-specific
367  * implementation, to unregister an existing (and unused) hwspinlock.
368  *
369  * Should be called from a process context (might sleep)
370  *
371  * Returns 0 on success, or an appropriate error code on failure
372  */
373 int hwspin_lock_unregister(struct hwspinlock_device *bank)
374 {
375 	struct hwspinlock *hwlock, *tmp;
376 	int i;
377 
378 	for (i = 0; i < bank->num_locks; i++) {
379 		hwlock = &bank->lock[i];
380 
381 		tmp = hwspin_lock_unregister_single(bank->base_id + i);
382 		if (!tmp)
383 			return -EBUSY;
384 
385 		/* self-sanity check that should never fail */
386 		WARN_ON(tmp != hwlock);
387 	}
388 
389 	return 0;
390 }
391 EXPORT_SYMBOL_GPL(hwspin_lock_unregister);
392 
393 /**
394  * __hwspin_lock_request() - tag an hwspinlock as used and power it up
395  *
396  * This is an internal function that prepares an hwspinlock instance
397  * before it is given to the user. The function assumes that
398  * hwspinlock_tree_lock is taken.
399  *
400  * Returns 0 or positive to indicate success, and a negative value to
401  * indicate an error (with the appropriate error code)
402  */
403 static int __hwspin_lock_request(struct hwspinlock *hwlock)
404 {
405 	struct device *dev = hwlock->bank->dev;
406 	struct hwspinlock *tmp;
407 	int ret;
408 
409 	/* prevent underlying implementation from being removed */
410 	if (!try_module_get(dev->driver->owner)) {
411 		dev_err(dev, "%s: can't get owner\n", __func__);
412 		return -EINVAL;
413 	}
414 
415 	/* notify PM core that power is now needed */
416 	ret = pm_runtime_get_sync(dev);
417 	if (ret < 0) {
418 		dev_err(dev, "%s: can't power on device\n", __func__);
419 		pm_runtime_put_noidle(dev);
420 		module_put(dev->driver->owner);
421 		return ret;
422 	}
423 
424 	/* mark hwspinlock as used, should not fail */
425 	tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock_to_id(hwlock),
426 							HWSPINLOCK_UNUSED);
427 
428 	/* self-sanity check that should never fail */
429 	WARN_ON(tmp != hwlock);
430 
431 	return ret;
432 }
433 
434 /**
435  * hwspin_lock_get_id() - retrieve id number of a given hwspinlock
436  * @hwlock: a valid hwspinlock instance
437  *
438  * Returns the id number of a given @hwlock, or -EINVAL if @hwlock is invalid.
439  */
440 int hwspin_lock_get_id(struct hwspinlock *hwlock)
441 {
442 	if (!hwlock) {
443 		pr_err("invalid hwlock\n");
444 		return -EINVAL;
445 	}
446 
447 	return hwlock_to_id(hwlock);
448 }
449 EXPORT_SYMBOL_GPL(hwspin_lock_get_id);
450 
451 /**
452  * hwspin_lock_request() - request an hwspinlock
453  *
454  * This function should be called by users of the hwspinlock device,
455  * in order to dynamically assign them an unused hwspinlock.
456  * Usually the user of this lock will then have to communicate the lock's id
457  * to the remote core before it can be used for synchronization (to get the
458  * id of a given hwlock, use hwspin_lock_get_id()).
459  *
460  * Should be called from a process context (might sleep)
461  *
462  * Returns the address of the assigned hwspinlock, or NULL on error
463  */
464 struct hwspinlock *hwspin_lock_request(void)
465 {
466 	struct hwspinlock *hwlock;
467 	int ret;
468 
469 	mutex_lock(&hwspinlock_tree_lock);
470 
471 	/* look for an unused lock */
472 	ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock,
473 						0, 1, HWSPINLOCK_UNUSED);
474 	if (ret == 0) {
475 		pr_warn("a free hwspinlock is not available\n");
476 		hwlock = NULL;
477 		goto out;
478 	}
479 
480 	/* sanity check that should never fail */
481 	WARN_ON(ret > 1);
482 
483 	/* mark as used and power up */
484 	ret = __hwspin_lock_request(hwlock);
485 	if (ret < 0)
486 		hwlock = NULL;
487 
488 out:
489 	mutex_unlock(&hwspinlock_tree_lock);
490 	return hwlock;
491 }
492 EXPORT_SYMBOL_GPL(hwspin_lock_request);
493 
494 /**
495  * hwspin_lock_request_specific() - request for a specific hwspinlock
496  * @id: index of the specific hwspinlock that is requested
497  *
498  * This function should be called by users of the hwspinlock module,
499  * in order to assign them a specific hwspinlock.
500  * Usually early board code will be calling this function in order to
501  * reserve specific hwspinlock ids for predefined purposes.
502  *
503  * Should be called from a process context (might sleep)
504  *
505  * Returns the address of the assigned hwspinlock, or NULL on error
506  */
507 struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
508 {
509 	struct hwspinlock *hwlock;
510 	int ret;
511 
512 	mutex_lock(&hwspinlock_tree_lock);
513 
514 	/* make sure this hwspinlock exists */
515 	hwlock = radix_tree_lookup(&hwspinlock_tree, id);
516 	if (!hwlock) {
517 		pr_warn("hwspinlock %u does not exist\n", id);
518 		goto out;
519 	}
520 
521 	/* sanity check (this shouldn't happen) */
522 	WARN_ON(hwlock_to_id(hwlock) != id);
523 
524 	/* make sure this hwspinlock is unused */
525 	ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
526 	if (ret == 0) {
527 		pr_warn("hwspinlock %u is already in use\n", id);
528 		hwlock = NULL;
529 		goto out;
530 	}
531 
532 	/* mark as used and power up */
533 	ret = __hwspin_lock_request(hwlock);
534 	if (ret < 0)
535 		hwlock = NULL;
536 
537 out:
538 	mutex_unlock(&hwspinlock_tree_lock);
539 	return hwlock;
540 }
541 EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
542 
543 /**
544  * hwspin_lock_free() - free a specific hwspinlock
545  * @hwlock: the specific hwspinlock to free
546  *
547  * This function mark @hwlock as free again.
548  * Should only be called with an @hwlock that was retrieved from
549  * an earlier call to omap_hwspin_lock_request{_specific}.
550  *
551  * Should be called from a process context (might sleep)
552  *
553  * Returns 0 on success, or an appropriate error code on failure
554  */
555 int hwspin_lock_free(struct hwspinlock *hwlock)
556 {
557 	struct device *dev;
558 	struct hwspinlock *tmp;
559 	int ret;
560 
561 	if (!hwlock) {
562 		pr_err("invalid hwlock\n");
563 		return -EINVAL;
564 	}
565 
566 	dev = hwlock->bank->dev;
567 	mutex_lock(&hwspinlock_tree_lock);
568 
569 	/* make sure the hwspinlock is used */
570 	ret = radix_tree_tag_get(&hwspinlock_tree, hwlock_to_id(hwlock),
571 							HWSPINLOCK_UNUSED);
572 	if (ret == 1) {
573 		dev_err(dev, "%s: hwlock is already free\n", __func__);
574 		dump_stack();
575 		ret = -EINVAL;
576 		goto out;
577 	}
578 
579 	/* notify the underlying device that power is not needed */
580 	ret = pm_runtime_put(dev);
581 	if (ret < 0)
582 		goto out;
583 
584 	/* mark this hwspinlock as available */
585 	tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock_to_id(hwlock),
586 							HWSPINLOCK_UNUSED);
587 
588 	/* sanity check (this shouldn't happen) */
589 	WARN_ON(tmp != hwlock);
590 
591 	module_put(dev->driver->owner);
592 
593 out:
594 	mutex_unlock(&hwspinlock_tree_lock);
595 	return ret;
596 }
597 EXPORT_SYMBOL_GPL(hwspin_lock_free);
598 
599 MODULE_LICENSE("GPL v2");
600 MODULE_DESCRIPTION("Hardware spinlock interface");
601 MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>");
602