1eebba71eSSuman Anna // SPDX-License-Identifier: GPL-2.0
2bd9a4c7dSOhad Ben-Cohen /*
3bd9a4c7dSOhad Ben-Cohen * Hardware spinlock framework
4bd9a4c7dSOhad Ben-Cohen *
5bd9a4c7dSOhad Ben-Cohen * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
6bd9a4c7dSOhad Ben-Cohen *
7bd9a4c7dSOhad Ben-Cohen * Contact: Ohad Ben-Cohen <ohad@wizery.com>
8bd9a4c7dSOhad Ben-Cohen */
9bd9a4c7dSOhad Ben-Cohen
10bd9a4c7dSOhad Ben-Cohen #define pr_fmt(fmt) "%s: " fmt, __func__
11bd9a4c7dSOhad Ben-Cohen
12360aa640SFabien Dessenne #include <linux/delay.h>
13bd9a4c7dSOhad Ben-Cohen #include <linux/kernel.h>
14bd9a4c7dSOhad Ben-Cohen #include <linux/module.h>
15bd9a4c7dSOhad Ben-Cohen #include <linux/spinlock.h>
16bd9a4c7dSOhad Ben-Cohen #include <linux/types.h>
17bd9a4c7dSOhad Ben-Cohen #include <linux/err.h>
18bd9a4c7dSOhad Ben-Cohen #include <linux/jiffies.h>
19bd9a4c7dSOhad Ben-Cohen #include <linux/radix-tree.h>
20bd9a4c7dSOhad Ben-Cohen #include <linux/hwspinlock.h>
21bd9a4c7dSOhad Ben-Cohen #include <linux/pm_runtime.h>
2293b465c2SJuan Gutierrez #include <linux/mutex.h>
23fb7737e9SSuman Anna #include <linux/of.h>
24bd9a4c7dSOhad Ben-Cohen
25bd9a4c7dSOhad Ben-Cohen #include "hwspinlock_internal.h"
26bd9a4c7dSOhad Ben-Cohen
27360aa640SFabien Dessenne /* retry delay used in atomic context */
28360aa640SFabien Dessenne #define HWSPINLOCK_RETRY_DELAY_US 100
29360aa640SFabien Dessenne
30bd9a4c7dSOhad Ben-Cohen /* radix tree tags */
31bd9a4c7dSOhad Ben-Cohen #define HWSPINLOCK_UNUSED (0) /* tags an hwspinlock as unused */
32bd9a4c7dSOhad Ben-Cohen
33bd9a4c7dSOhad Ben-Cohen /*
34bd9a4c7dSOhad Ben-Cohen * A radix tree is used to maintain the available hwspinlock instances.
35bd9a4c7dSOhad Ben-Cohen * The tree associates hwspinlock pointers with their integer key id,
36bd9a4c7dSOhad Ben-Cohen * and provides easy-to-use API which makes the hwspinlock core code simple
37bd9a4c7dSOhad Ben-Cohen * and easy to read.
38bd9a4c7dSOhad Ben-Cohen *
39bd9a4c7dSOhad Ben-Cohen * Radix trees are quick on lookups, and reasonably efficient in terms of
40bd9a4c7dSOhad Ben-Cohen * storage, especially with high density usages such as this framework
41bd9a4c7dSOhad Ben-Cohen * requires (a continuous range of integer keys, beginning with zero, is
42bd9a4c7dSOhad Ben-Cohen * used as the ID's of the hwspinlock instances).
43bd9a4c7dSOhad Ben-Cohen *
44bd9a4c7dSOhad Ben-Cohen * The radix tree API supports tagging items in the tree, which this
45bd9a4c7dSOhad Ben-Cohen * framework uses to mark unused hwspinlock instances (see the
46bd9a4c7dSOhad Ben-Cohen * HWSPINLOCK_UNUSED tag above). As a result, the process of querying the
47bd9a4c7dSOhad Ben-Cohen * tree, looking for an unused hwspinlock instance, is now reduced to a
48bd9a4c7dSOhad Ben-Cohen * single radix tree API call.
49bd9a4c7dSOhad Ben-Cohen */
50bd9a4c7dSOhad Ben-Cohen static RADIX_TREE(hwspinlock_tree, GFP_KERNEL);
51bd9a4c7dSOhad Ben-Cohen
52bd9a4c7dSOhad Ben-Cohen /*
5393b465c2SJuan Gutierrez * Synchronization of access to the tree is achieved using this mutex,
54bd9a4c7dSOhad Ben-Cohen * as the radix-tree API requires that users provide all synchronisation.
5593b465c2SJuan Gutierrez * A mutex is needed because we're using non-atomic radix tree allocations.
56bd9a4c7dSOhad Ben-Cohen */
5793b465c2SJuan Gutierrez static DEFINE_MUTEX(hwspinlock_tree_lock);
5893b465c2SJuan Gutierrez
59bd9a4c7dSOhad Ben-Cohen
60bd9a4c7dSOhad Ben-Cohen /**
61bd9a4c7dSOhad Ben-Cohen * __hwspin_trylock() - attempt to lock a specific hwspinlock
62bd9a4c7dSOhad Ben-Cohen * @hwlock: an hwspinlock which we want to trylock
63bd9a4c7dSOhad Ben-Cohen * @mode: controls whether local interrupts are disabled or not
64bd9a4c7dSOhad Ben-Cohen * @flags: a pointer where the caller's interrupt state will be saved at (if
65bd9a4c7dSOhad Ben-Cohen * requested)
66bd9a4c7dSOhad Ben-Cohen *
67bd9a4c7dSOhad Ben-Cohen * This function attempts to lock an hwspinlock, and will immediately
68bd9a4c7dSOhad Ben-Cohen * fail if the hwspinlock is already taken.
69bd9a4c7dSOhad Ben-Cohen *
701e6c06a7SBaolin Wang * Caution: If the mode is HWLOCK_RAW, that means user must protect the routine
711e6c06a7SBaolin Wang * of getting hardware lock with mutex or spinlock. Since in some scenarios,
721e6c06a7SBaolin Wang * user need some time-consuming or sleepable operations under the hardware
731e6c06a7SBaolin Wang * lock, they need one sleepable lock (like mutex) to protect the operations.
741e6c06a7SBaolin Wang *
75360aa640SFabien Dessenne * If the mode is neither HWLOCK_IN_ATOMIC nor HWLOCK_RAW, upon a successful
76360aa640SFabien Dessenne * return from this function, preemption (and possibly interrupts) is disabled,
77360aa640SFabien Dessenne * so the caller must not sleep, and is advised to release the hwspinlock as
78360aa640SFabien Dessenne * soon as possible. This is required in order to minimize remote cores polling
79360aa640SFabien Dessenne * on the hardware interconnect.
80bd9a4c7dSOhad Ben-Cohen *
81bd9a4c7dSOhad Ben-Cohen * The user decides whether local interrupts are disabled or not, and if yes,
82bd9a4c7dSOhad Ben-Cohen * whether he wants their previous state to be saved. It is up to the user
83bd9a4c7dSOhad Ben-Cohen * to choose the appropriate @mode of operation, exactly the same way users
84bd9a4c7dSOhad Ben-Cohen * should decide between spin_trylock, spin_trylock_irq and
85bd9a4c7dSOhad Ben-Cohen * spin_trylock_irqsave.
86bd9a4c7dSOhad Ben-Cohen *
87bd9a4c7dSOhad Ben-Cohen * Returns 0 if we successfully locked the hwspinlock or -EBUSY if
88bd9a4c7dSOhad Ben-Cohen * the hwspinlock was already taken.
89bd9a4c7dSOhad Ben-Cohen * This function will never sleep.
90bd9a4c7dSOhad Ben-Cohen */
__hwspin_trylock(struct hwspinlock * hwlock,int mode,unsigned long * flags)91bd9a4c7dSOhad Ben-Cohen int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
92bd9a4c7dSOhad Ben-Cohen {
93bd9a4c7dSOhad Ben-Cohen int ret;
94bd9a4c7dSOhad Ben-Cohen
95b2547dceSBaolin Wang if (WARN_ON(!hwlock || (!flags && mode == HWLOCK_IRQSTATE)))
96b2547dceSBaolin Wang return -EINVAL;
97bd9a4c7dSOhad Ben-Cohen
98bd9a4c7dSOhad Ben-Cohen /*
99bd9a4c7dSOhad Ben-Cohen * This spin_lock{_irq, _irqsave} serves three purposes:
100bd9a4c7dSOhad Ben-Cohen *
101bd9a4c7dSOhad Ben-Cohen * 1. Disable preemption, in order to minimize the period of time
102bd9a4c7dSOhad Ben-Cohen * in which the hwspinlock is taken. This is important in order
103bd9a4c7dSOhad Ben-Cohen * to minimize the possible polling on the hardware interconnect
104bd9a4c7dSOhad Ben-Cohen * by a remote user of this lock.
105bd9a4c7dSOhad Ben-Cohen * 2. Make the hwspinlock SMP-safe (so we can take it from
106bd9a4c7dSOhad Ben-Cohen * additional contexts on the local host).
107bd9a4c7dSOhad Ben-Cohen * 3. Ensure that in_atomic/might_sleep checks catch potential
108bd9a4c7dSOhad Ben-Cohen * problems with hwspinlock usage (e.g. scheduler checks like
109bd9a4c7dSOhad Ben-Cohen * 'scheduling while atomic' etc.)
110bd9a4c7dSOhad Ben-Cohen */
11166742b19SBaolin Wang switch (mode) {
11266742b19SBaolin Wang case HWLOCK_IRQSTATE:
113bd9a4c7dSOhad Ben-Cohen ret = spin_trylock_irqsave(&hwlock->lock, *flags);
11466742b19SBaolin Wang break;
11566742b19SBaolin Wang case HWLOCK_IRQ:
116bd9a4c7dSOhad Ben-Cohen ret = spin_trylock_irq(&hwlock->lock);
11766742b19SBaolin Wang break;
1181e6c06a7SBaolin Wang case HWLOCK_RAW:
119360aa640SFabien Dessenne case HWLOCK_IN_ATOMIC:
1201e6c06a7SBaolin Wang ret = 1;
1211e6c06a7SBaolin Wang break;
12266742b19SBaolin Wang default:
123bd9a4c7dSOhad Ben-Cohen ret = spin_trylock(&hwlock->lock);
12466742b19SBaolin Wang break;
12566742b19SBaolin Wang }
126bd9a4c7dSOhad Ben-Cohen
127bd9a4c7dSOhad Ben-Cohen /* is lock already taken by another context on the local cpu ? */
128bd9a4c7dSOhad Ben-Cohen if (!ret)
129bd9a4c7dSOhad Ben-Cohen return -EBUSY;
130bd9a4c7dSOhad Ben-Cohen
131bd9a4c7dSOhad Ben-Cohen /* try to take the hwspinlock device */
132300bab97SOhad Ben-Cohen ret = hwlock->bank->ops->trylock(hwlock);
133bd9a4c7dSOhad Ben-Cohen
134bd9a4c7dSOhad Ben-Cohen /* if hwlock is already taken, undo spin_trylock_* and exit */
135bd9a4c7dSOhad Ben-Cohen if (!ret) {
13666742b19SBaolin Wang switch (mode) {
13766742b19SBaolin Wang case HWLOCK_IRQSTATE:
138bd9a4c7dSOhad Ben-Cohen spin_unlock_irqrestore(&hwlock->lock, *flags);
13966742b19SBaolin Wang break;
14066742b19SBaolin Wang case HWLOCK_IRQ:
141bd9a4c7dSOhad Ben-Cohen spin_unlock_irq(&hwlock->lock);
14266742b19SBaolin Wang break;
1431e6c06a7SBaolin Wang case HWLOCK_RAW:
144360aa640SFabien Dessenne case HWLOCK_IN_ATOMIC:
1451e6c06a7SBaolin Wang /* Nothing to do */
1461e6c06a7SBaolin Wang break;
14766742b19SBaolin Wang default:
148bd9a4c7dSOhad Ben-Cohen spin_unlock(&hwlock->lock);
14966742b19SBaolin Wang break;
15066742b19SBaolin Wang }
151bd9a4c7dSOhad Ben-Cohen
152bd9a4c7dSOhad Ben-Cohen return -EBUSY;
153bd9a4c7dSOhad Ben-Cohen }
154bd9a4c7dSOhad Ben-Cohen
155bd9a4c7dSOhad Ben-Cohen /*
156bd9a4c7dSOhad Ben-Cohen * We can be sure the other core's memory operations
157bd9a4c7dSOhad Ben-Cohen * are observable to us only _after_ we successfully take
158bd9a4c7dSOhad Ben-Cohen * the hwspinlock, and we must make sure that subsequent memory
159bd9a4c7dSOhad Ben-Cohen * operations (both reads and writes) will not be reordered before
160bd9a4c7dSOhad Ben-Cohen * we actually took the hwspinlock.
161bd9a4c7dSOhad Ben-Cohen *
162bd9a4c7dSOhad Ben-Cohen * Note: the implicit memory barrier of the spinlock above is too
163bd9a4c7dSOhad Ben-Cohen * early, so we need this additional explicit memory barrier.
164bd9a4c7dSOhad Ben-Cohen */
165bd9a4c7dSOhad Ben-Cohen mb();
166bd9a4c7dSOhad Ben-Cohen
167bd9a4c7dSOhad Ben-Cohen return 0;
168bd9a4c7dSOhad Ben-Cohen }
169bd9a4c7dSOhad Ben-Cohen EXPORT_SYMBOL_GPL(__hwspin_trylock);
170bd9a4c7dSOhad Ben-Cohen
171bd9a4c7dSOhad Ben-Cohen /**
172bd9a4c7dSOhad Ben-Cohen * __hwspin_lock_timeout() - lock an hwspinlock with timeout limit
173bd9a4c7dSOhad Ben-Cohen * @hwlock: the hwspinlock to be locked
174bd9a4c7dSOhad Ben-Cohen * @timeout: timeout value in msecs
175bd9a4c7dSOhad Ben-Cohen * @mode: mode which controls whether local interrupts are disabled or not
176bd9a4c7dSOhad Ben-Cohen * @flags: a pointer to where the caller's interrupt state will be saved at (if
177bd9a4c7dSOhad Ben-Cohen * requested)
178bd9a4c7dSOhad Ben-Cohen *
179bd9a4c7dSOhad Ben-Cohen * This function locks the given @hwlock. If the @hwlock
180bd9a4c7dSOhad Ben-Cohen * is already taken, the function will busy loop waiting for it to
181bd9a4c7dSOhad Ben-Cohen * be released, but give up after @timeout msecs have elapsed.
182bd9a4c7dSOhad Ben-Cohen *
1831e6c06a7SBaolin Wang * Caution: If the mode is HWLOCK_RAW, that means user must protect the routine
1841e6c06a7SBaolin Wang * of getting hardware lock with mutex or spinlock. Since in some scenarios,
1851e6c06a7SBaolin Wang * user need some time-consuming or sleepable operations under the hardware
1861e6c06a7SBaolin Wang * lock, they need one sleepable lock (like mutex) to protect the operations.
1871e6c06a7SBaolin Wang *
188360aa640SFabien Dessenne * If the mode is HWLOCK_IN_ATOMIC (called from an atomic context) the timeout
189360aa640SFabien Dessenne * is handled with busy-waiting delays, hence shall not exceed few msecs.
190360aa640SFabien Dessenne *
191360aa640SFabien Dessenne * If the mode is neither HWLOCK_IN_ATOMIC nor HWLOCK_RAW, upon a successful
192360aa640SFabien Dessenne * return from this function, preemption (and possibly interrupts) is disabled,
193360aa640SFabien Dessenne * so the caller must not sleep, and is advised to release the hwspinlock as
194360aa640SFabien Dessenne * soon as possible. This is required in order to minimize remote cores polling
195360aa640SFabien Dessenne * on the hardware interconnect.
196bd9a4c7dSOhad Ben-Cohen *
197bd9a4c7dSOhad Ben-Cohen * The user decides whether local interrupts are disabled or not, and if yes,
198bd9a4c7dSOhad Ben-Cohen * whether he wants their previous state to be saved. It is up to the user
199bd9a4c7dSOhad Ben-Cohen * to choose the appropriate @mode of operation, exactly the same way users
200bd9a4c7dSOhad Ben-Cohen * should decide between spin_lock, spin_lock_irq and spin_lock_irqsave.
201bd9a4c7dSOhad Ben-Cohen *
202bd9a4c7dSOhad Ben-Cohen * Returns 0 when the @hwlock was successfully taken, and an appropriate
203bd9a4c7dSOhad Ben-Cohen * error code otherwise (most notably -ETIMEDOUT if the @hwlock is still
204bd9a4c7dSOhad Ben-Cohen * busy after @timeout msecs). The function will never sleep.
205bd9a4c7dSOhad Ben-Cohen */
__hwspin_lock_timeout(struct hwspinlock * hwlock,unsigned int to,int mode,unsigned long * flags)206bd9a4c7dSOhad Ben-Cohen int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
207bd9a4c7dSOhad Ben-Cohen int mode, unsigned long *flags)
208bd9a4c7dSOhad Ben-Cohen {
209bd9a4c7dSOhad Ben-Cohen int ret;
210360aa640SFabien Dessenne unsigned long expire, atomic_delay = 0;
211bd9a4c7dSOhad Ben-Cohen
212bd9a4c7dSOhad Ben-Cohen expire = msecs_to_jiffies(to) + jiffies;
213bd9a4c7dSOhad Ben-Cohen
214bd9a4c7dSOhad Ben-Cohen for (;;) {
215bd9a4c7dSOhad Ben-Cohen /* Try to take the hwspinlock */
216bd9a4c7dSOhad Ben-Cohen ret = __hwspin_trylock(hwlock, mode, flags);
217bd9a4c7dSOhad Ben-Cohen if (ret != -EBUSY)
218bd9a4c7dSOhad Ben-Cohen break;
219bd9a4c7dSOhad Ben-Cohen
220bd9a4c7dSOhad Ben-Cohen /*
221bd9a4c7dSOhad Ben-Cohen * The lock is already taken, let's check if the user wants
222bd9a4c7dSOhad Ben-Cohen * us to try again
223bd9a4c7dSOhad Ben-Cohen */
224360aa640SFabien Dessenne if (mode == HWLOCK_IN_ATOMIC) {
225360aa640SFabien Dessenne udelay(HWSPINLOCK_RETRY_DELAY_US);
226360aa640SFabien Dessenne atomic_delay += HWSPINLOCK_RETRY_DELAY_US;
227360aa640SFabien Dessenne if (atomic_delay > to * 1000)
228360aa640SFabien Dessenne return -ETIMEDOUT;
229360aa640SFabien Dessenne } else {
230bd9a4c7dSOhad Ben-Cohen if (time_is_before_eq_jiffies(expire))
231bd9a4c7dSOhad Ben-Cohen return -ETIMEDOUT;
232360aa640SFabien Dessenne }
233bd9a4c7dSOhad Ben-Cohen
234bd9a4c7dSOhad Ben-Cohen /*
235bd9a4c7dSOhad Ben-Cohen * Allow platform-specific relax handlers to prevent
236bd9a4c7dSOhad Ben-Cohen * hogging the interconnect (no sleeping, though)
237bd9a4c7dSOhad Ben-Cohen */
238300bab97SOhad Ben-Cohen if (hwlock->bank->ops->relax)
239300bab97SOhad Ben-Cohen hwlock->bank->ops->relax(hwlock);
240bd9a4c7dSOhad Ben-Cohen }
241bd9a4c7dSOhad Ben-Cohen
242bd9a4c7dSOhad Ben-Cohen return ret;
243bd9a4c7dSOhad Ben-Cohen }
244bd9a4c7dSOhad Ben-Cohen EXPORT_SYMBOL_GPL(__hwspin_lock_timeout);
245bd9a4c7dSOhad Ben-Cohen
246bd9a4c7dSOhad Ben-Cohen /**
247bd9a4c7dSOhad Ben-Cohen * __hwspin_unlock() - unlock a specific hwspinlock
248bd9a4c7dSOhad Ben-Cohen * @hwlock: a previously-acquired hwspinlock which we want to unlock
249bd9a4c7dSOhad Ben-Cohen * @mode: controls whether local interrupts needs to be restored or not
250bd9a4c7dSOhad Ben-Cohen * @flags: previous caller's interrupt state to restore (if requested)
251bd9a4c7dSOhad Ben-Cohen *
252bd9a4c7dSOhad Ben-Cohen * This function will unlock a specific hwspinlock, enable preemption and
253bd9a4c7dSOhad Ben-Cohen * (possibly) enable interrupts or restore their previous state.
254bd9a4c7dSOhad Ben-Cohen * @hwlock must be already locked before calling this function: it is a bug
255bd9a4c7dSOhad Ben-Cohen * to call unlock on a @hwlock that is already unlocked.
256bd9a4c7dSOhad Ben-Cohen *
257bd9a4c7dSOhad Ben-Cohen * The user decides whether local interrupts should be enabled or not, and
258bd9a4c7dSOhad Ben-Cohen * if yes, whether he wants their previous state to be restored. It is up
259bd9a4c7dSOhad Ben-Cohen * to the user to choose the appropriate @mode of operation, exactly the
260bd9a4c7dSOhad Ben-Cohen * same way users decide between spin_unlock, spin_unlock_irq and
261bd9a4c7dSOhad Ben-Cohen * spin_unlock_irqrestore.
262bd9a4c7dSOhad Ben-Cohen *
263bd9a4c7dSOhad Ben-Cohen * The function will never sleep.
264bd9a4c7dSOhad Ben-Cohen */
__hwspin_unlock(struct hwspinlock * hwlock,int mode,unsigned long * flags)265bd9a4c7dSOhad Ben-Cohen void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
266bd9a4c7dSOhad Ben-Cohen {
267b2547dceSBaolin Wang if (WARN_ON(!hwlock || (!flags && mode == HWLOCK_IRQSTATE)))
268b2547dceSBaolin Wang return;
269bd9a4c7dSOhad Ben-Cohen
270bd9a4c7dSOhad Ben-Cohen /*
271bd9a4c7dSOhad Ben-Cohen * We must make sure that memory operations (both reads and writes),
272bd9a4c7dSOhad Ben-Cohen * done before unlocking the hwspinlock, will not be reordered
273bd9a4c7dSOhad Ben-Cohen * after the lock is released.
274bd9a4c7dSOhad Ben-Cohen *
275bd9a4c7dSOhad Ben-Cohen * That's the purpose of this explicit memory barrier.
276bd9a4c7dSOhad Ben-Cohen *
277bd9a4c7dSOhad Ben-Cohen * Note: the memory barrier induced by the spin_unlock below is too
278bd9a4c7dSOhad Ben-Cohen * late; the other core is going to access memory soon after it will
279bd9a4c7dSOhad Ben-Cohen * take the hwspinlock, and by then we want to be sure our memory
280bd9a4c7dSOhad Ben-Cohen * operations are already observable.
281bd9a4c7dSOhad Ben-Cohen */
282bd9a4c7dSOhad Ben-Cohen mb();
283bd9a4c7dSOhad Ben-Cohen
284300bab97SOhad Ben-Cohen hwlock->bank->ops->unlock(hwlock);
285bd9a4c7dSOhad Ben-Cohen
286bd9a4c7dSOhad Ben-Cohen /* Undo the spin_trylock{_irq, _irqsave} called while locking */
28766742b19SBaolin Wang switch (mode) {
28866742b19SBaolin Wang case HWLOCK_IRQSTATE:
289bd9a4c7dSOhad Ben-Cohen spin_unlock_irqrestore(&hwlock->lock, *flags);
29066742b19SBaolin Wang break;
29166742b19SBaolin Wang case HWLOCK_IRQ:
292bd9a4c7dSOhad Ben-Cohen spin_unlock_irq(&hwlock->lock);
29366742b19SBaolin Wang break;
2941e6c06a7SBaolin Wang case HWLOCK_RAW:
295360aa640SFabien Dessenne case HWLOCK_IN_ATOMIC:
2961e6c06a7SBaolin Wang /* Nothing to do */
2971e6c06a7SBaolin Wang break;
29866742b19SBaolin Wang default:
299bd9a4c7dSOhad Ben-Cohen spin_unlock(&hwlock->lock);
30066742b19SBaolin Wang break;
30166742b19SBaolin Wang }
302bd9a4c7dSOhad Ben-Cohen }
303bd9a4c7dSOhad Ben-Cohen EXPORT_SYMBOL_GPL(__hwspin_unlock);
304bd9a4c7dSOhad Ben-Cohen
305fb7737e9SSuman Anna /**
306*a6978d1bSRichard Maina * hwspin_lock_bust() - bust a specific hwspinlock
307*a6978d1bSRichard Maina * @hwlock: a previously-acquired hwspinlock which we want to bust
308*a6978d1bSRichard Maina * @id: identifier of the remote lock holder, if applicable
309*a6978d1bSRichard Maina *
310*a6978d1bSRichard Maina * This function will bust a hwspinlock that was previously acquired as
311*a6978d1bSRichard Maina * long as the current owner of the lock matches the id given by the caller.
312*a6978d1bSRichard Maina *
313*a6978d1bSRichard Maina * Context: Process context.
314*a6978d1bSRichard Maina *
315*a6978d1bSRichard Maina * Returns: 0 on success, or -EINVAL if the hwspinlock does not exist, or
316*a6978d1bSRichard Maina * the bust operation fails, and -EOPNOTSUPP if the bust operation is not
317*a6978d1bSRichard Maina * defined for the hwspinlock.
318*a6978d1bSRichard Maina */
hwspin_lock_bust(struct hwspinlock * hwlock,unsigned int id)319*a6978d1bSRichard Maina int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id)
320*a6978d1bSRichard Maina {
321*a6978d1bSRichard Maina if (WARN_ON(!hwlock))
322*a6978d1bSRichard Maina return -EINVAL;
323*a6978d1bSRichard Maina
324*a6978d1bSRichard Maina if (!hwlock->bank->ops->bust) {
325*a6978d1bSRichard Maina pr_err("bust operation not defined\n");
326*a6978d1bSRichard Maina return -EOPNOTSUPP;
327*a6978d1bSRichard Maina }
328*a6978d1bSRichard Maina
329*a6978d1bSRichard Maina return hwlock->bank->ops->bust(hwlock, id);
330*a6978d1bSRichard Maina }
331*a6978d1bSRichard Maina EXPORT_SYMBOL_GPL(hwspin_lock_bust);
332*a6978d1bSRichard Maina
333*a6978d1bSRichard Maina /**
334fb7737e9SSuman Anna * of_hwspin_lock_simple_xlate - translate hwlock_spec to return a lock id
335fb7737e9SSuman Anna * @bank: the hwspinlock device bank
336fb7737e9SSuman Anna * @hwlock_spec: hwlock specifier as found in the device tree
337fb7737e9SSuman Anna *
338fb7737e9SSuman Anna * This is a simple translation function, suitable for hwspinlock platform
339fb7737e9SSuman Anna * drivers that only has a lock specifier length of 1.
340fb7737e9SSuman Anna *
341fb7737e9SSuman Anna * Returns a relative index of the lock within a specified bank on success,
342fb7737e9SSuman Anna * or -EINVAL on invalid specifier cell count.
343fb7737e9SSuman Anna */
344fb7737e9SSuman Anna static inline int
of_hwspin_lock_simple_xlate(const struct of_phandle_args * hwlock_spec)345fb7737e9SSuman Anna of_hwspin_lock_simple_xlate(const struct of_phandle_args *hwlock_spec)
346fb7737e9SSuman Anna {
347fb7737e9SSuman Anna if (WARN_ON(hwlock_spec->args_count != 1))
348fb7737e9SSuman Anna return -EINVAL;
349fb7737e9SSuman Anna
350fb7737e9SSuman Anna return hwlock_spec->args[0];
351fb7737e9SSuman Anna }
352fb7737e9SSuman Anna
353fb7737e9SSuman Anna /**
354fb7737e9SSuman Anna * of_hwspin_lock_get_id() - get lock id for an OF phandle-based specific lock
355fb7737e9SSuman Anna * @np: device node from which to request the specific hwlock
356fb7737e9SSuman Anna * @index: index of the hwlock in the list of values
357fb7737e9SSuman Anna *
358fb7737e9SSuman Anna * This function provides a means for DT users of the hwspinlock module to
359fb7737e9SSuman Anna * get the global lock id of a specific hwspinlock using the phandle of the
360fb7737e9SSuman Anna * hwspinlock device, so that it can be requested using the normal
361fb7737e9SSuman Anna * hwspin_lock_request_specific() API.
362fb7737e9SSuman Anna *
363fb7737e9SSuman Anna * Returns the global lock id number on success, -EPROBE_DEFER if the hwspinlock
364fb7737e9SSuman Anna * device is not yet registered, -EINVAL on invalid args specifier value or an
365fb7737e9SSuman Anna * appropriate error as returned from the OF parsing of the DT client node.
366fb7737e9SSuman Anna */
of_hwspin_lock_get_id(struct device_node * np,int index)367fb7737e9SSuman Anna int of_hwspin_lock_get_id(struct device_node *np, int index)
368fb7737e9SSuman Anna {
369fb7737e9SSuman Anna struct of_phandle_args args;
370fb7737e9SSuman Anna struct hwspinlock *hwlock;
371fb7737e9SSuman Anna struct radix_tree_iter iter;
372fb7737e9SSuman Anna void **slot;
373fb7737e9SSuman Anna int id;
374fb7737e9SSuman Anna int ret;
375fb7737e9SSuman Anna
376fb7737e9SSuman Anna ret = of_parse_phandle_with_args(np, "hwlocks", "#hwlock-cells", index,
377fb7737e9SSuman Anna &args);
378fb7737e9SSuman Anna if (ret)
379fb7737e9SSuman Anna return ret;
380fb7737e9SSuman Anna
381fbd7330cSFabien Dessenne if (!of_device_is_available(args.np)) {
382fbd7330cSFabien Dessenne ret = -ENOENT;
383fbd7330cSFabien Dessenne goto out;
384fbd7330cSFabien Dessenne }
385fbd7330cSFabien Dessenne
386fb7737e9SSuman Anna /* Find the hwspinlock device: we need its base_id */
387fb7737e9SSuman Anna ret = -EPROBE_DEFER;
388fb7737e9SSuman Anna rcu_read_lock();
389fb7737e9SSuman Anna radix_tree_for_each_slot(slot, &hwspinlock_tree, &iter, 0) {
390fb7737e9SSuman Anna hwlock = radix_tree_deref_slot(slot);
391fb7737e9SSuman Anna if (unlikely(!hwlock))
392fb7737e9SSuman Anna continue;
393b76ba4afSMatthew Wilcox if (radix_tree_deref_retry(hwlock)) {
394c6400ba7SMatthew Wilcox slot = radix_tree_iter_retry(&iter);
395c6400ba7SMatthew Wilcox continue;
396c6400ba7SMatthew Wilcox }
397fb7737e9SSuman Anna
398ec5c05e5Sye xingchen if (device_match_of_node(hwlock->bank->dev, args.np)) {
399fb7737e9SSuman Anna ret = 0;
400fb7737e9SSuman Anna break;
401fb7737e9SSuman Anna }
402fb7737e9SSuman Anna }
403fb7737e9SSuman Anna rcu_read_unlock();
404fb7737e9SSuman Anna if (ret < 0)
405fb7737e9SSuman Anna goto out;
406fb7737e9SSuman Anna
407fb7737e9SSuman Anna id = of_hwspin_lock_simple_xlate(&args);
408fb7737e9SSuman Anna if (id < 0 || id >= hwlock->bank->num_locks) {
409fb7737e9SSuman Anna ret = -EINVAL;
410fb7737e9SSuman Anna goto out;
411fb7737e9SSuman Anna }
412fb7737e9SSuman Anna id += hwlock->bank->base_id;
413fb7737e9SSuman Anna
414fb7737e9SSuman Anna out:
415fb7737e9SSuman Anna of_node_put(args.np);
416fb7737e9SSuman Anna return ret ? ret : id;
417fb7737e9SSuman Anna }
418fb7737e9SSuman Anna EXPORT_SYMBOL_GPL(of_hwspin_lock_get_id);
419fb7737e9SSuman Anna
4205560f70cSBaolin Wang /**
4215560f70cSBaolin Wang * of_hwspin_lock_get_id_byname() - get lock id for an specified hwlock name
4225560f70cSBaolin Wang * @np: device node from which to request the specific hwlock
4235560f70cSBaolin Wang * @name: hwlock name
4245560f70cSBaolin Wang *
4255560f70cSBaolin Wang * This function provides a means for DT users of the hwspinlock module to
4265560f70cSBaolin Wang * get the global lock id of a specific hwspinlock using the specified name of
4275560f70cSBaolin Wang * the hwspinlock device, so that it can be requested using the normal
4285560f70cSBaolin Wang * hwspin_lock_request_specific() API.
4295560f70cSBaolin Wang *
4305560f70cSBaolin Wang * Returns the global lock id number on success, -EPROBE_DEFER if the hwspinlock
4315560f70cSBaolin Wang * device is not yet registered, -EINVAL on invalid args specifier value or an
4325560f70cSBaolin Wang * appropriate error as returned from the OF parsing of the DT client node.
4335560f70cSBaolin Wang */
of_hwspin_lock_get_id_byname(struct device_node * np,const char * name)4345560f70cSBaolin Wang int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name)
4355560f70cSBaolin Wang {
4365560f70cSBaolin Wang int index;
4375560f70cSBaolin Wang
4385560f70cSBaolin Wang if (!name)
4395560f70cSBaolin Wang return -EINVAL;
4405560f70cSBaolin Wang
4415560f70cSBaolin Wang index = of_property_match_string(np, "hwlock-names", name);
4425560f70cSBaolin Wang if (index < 0)
4435560f70cSBaolin Wang return index;
4445560f70cSBaolin Wang
4455560f70cSBaolin Wang return of_hwspin_lock_get_id(np, index);
4465560f70cSBaolin Wang }
4475560f70cSBaolin Wang EXPORT_SYMBOL_GPL(of_hwspin_lock_get_id_byname);
4485560f70cSBaolin Wang
hwspin_lock_register_single(struct hwspinlock * hwlock,int id)449300bab97SOhad Ben-Cohen static int hwspin_lock_register_single(struct hwspinlock *hwlock, int id)
450bd9a4c7dSOhad Ben-Cohen {
451bd9a4c7dSOhad Ben-Cohen struct hwspinlock *tmp;
452bd9a4c7dSOhad Ben-Cohen int ret;
453bd9a4c7dSOhad Ben-Cohen
45493b465c2SJuan Gutierrez mutex_lock(&hwspinlock_tree_lock);
455bd9a4c7dSOhad Ben-Cohen
456300bab97SOhad Ben-Cohen ret = radix_tree_insert(&hwspinlock_tree, id, hwlock);
457300bab97SOhad Ben-Cohen if (ret) {
458c3c1250eSOhad Ben-Cohen if (ret == -EEXIST)
459300bab97SOhad Ben-Cohen pr_err("hwspinlock id %d already exists!\n", id);
460bd9a4c7dSOhad Ben-Cohen goto out;
461300bab97SOhad Ben-Cohen }
462bd9a4c7dSOhad Ben-Cohen
463bd9a4c7dSOhad Ben-Cohen /* mark this hwspinlock as available */
464300bab97SOhad Ben-Cohen tmp = radix_tree_tag_set(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
465bd9a4c7dSOhad Ben-Cohen
466bd9a4c7dSOhad Ben-Cohen /* self-sanity check which should never fail */
467bd9a4c7dSOhad Ben-Cohen WARN_ON(tmp != hwlock);
468bd9a4c7dSOhad Ben-Cohen
469bd9a4c7dSOhad Ben-Cohen out:
47093b465c2SJuan Gutierrez mutex_unlock(&hwspinlock_tree_lock);
471300bab97SOhad Ben-Cohen return 0;
472bd9a4c7dSOhad Ben-Cohen }
473bd9a4c7dSOhad Ben-Cohen
hwspin_lock_unregister_single(unsigned int id)474300bab97SOhad Ben-Cohen static struct hwspinlock *hwspin_lock_unregister_single(unsigned int id)
475bd9a4c7dSOhad Ben-Cohen {
476bd9a4c7dSOhad Ben-Cohen struct hwspinlock *hwlock = NULL;
477bd9a4c7dSOhad Ben-Cohen int ret;
478bd9a4c7dSOhad Ben-Cohen
47993b465c2SJuan Gutierrez mutex_lock(&hwspinlock_tree_lock);
480bd9a4c7dSOhad Ben-Cohen
481bd9a4c7dSOhad Ben-Cohen /* make sure the hwspinlock is not in use (tag is set) */
482bd9a4c7dSOhad Ben-Cohen ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
483bd9a4c7dSOhad Ben-Cohen if (ret == 0) {
484bd9a4c7dSOhad Ben-Cohen pr_err("hwspinlock %d still in use (or not present)\n", id);
485bd9a4c7dSOhad Ben-Cohen goto out;
486bd9a4c7dSOhad Ben-Cohen }
487bd9a4c7dSOhad Ben-Cohen
488bd9a4c7dSOhad Ben-Cohen hwlock = radix_tree_delete(&hwspinlock_tree, id);
489bd9a4c7dSOhad Ben-Cohen if (!hwlock) {
490bd9a4c7dSOhad Ben-Cohen pr_err("failed to delete hwspinlock %d\n", id);
491bd9a4c7dSOhad Ben-Cohen goto out;
492bd9a4c7dSOhad Ben-Cohen }
493bd9a4c7dSOhad Ben-Cohen
494bd9a4c7dSOhad Ben-Cohen out:
49593b465c2SJuan Gutierrez mutex_unlock(&hwspinlock_tree_lock);
496bd9a4c7dSOhad Ben-Cohen return hwlock;
497bd9a4c7dSOhad Ben-Cohen }
498300bab97SOhad Ben-Cohen
499300bab97SOhad Ben-Cohen /**
500300bab97SOhad Ben-Cohen * hwspin_lock_register() - register a new hw spinlock device
501300bab97SOhad Ben-Cohen * @bank: the hwspinlock device, which usually provides numerous hw locks
502300bab97SOhad Ben-Cohen * @dev: the backing device
503300bab97SOhad Ben-Cohen * @ops: hwspinlock handlers for this device
504300bab97SOhad Ben-Cohen * @base_id: id of the first hardware spinlock in this bank
505300bab97SOhad Ben-Cohen * @num_locks: number of hwspinlocks provided by this device
506300bab97SOhad Ben-Cohen *
507300bab97SOhad Ben-Cohen * This function should be called from the underlying platform-specific
508300bab97SOhad Ben-Cohen * implementation, to register a new hwspinlock device instance.
509300bab97SOhad Ben-Cohen *
510300bab97SOhad Ben-Cohen * Should be called from a process context (might sleep)
511300bab97SOhad Ben-Cohen *
512300bab97SOhad Ben-Cohen * Returns 0 on success, or an appropriate error code on failure
513300bab97SOhad Ben-Cohen */
hwspin_lock_register(struct hwspinlock_device * bank,struct device * dev,const struct hwspinlock_ops * ops,int base_id,int num_locks)514300bab97SOhad Ben-Cohen int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
515300bab97SOhad Ben-Cohen const struct hwspinlock_ops *ops, int base_id, int num_locks)
516300bab97SOhad Ben-Cohen {
517300bab97SOhad Ben-Cohen struct hwspinlock *hwlock;
518300bab97SOhad Ben-Cohen int ret = 0, i;
519300bab97SOhad Ben-Cohen
520300bab97SOhad Ben-Cohen if (!bank || !ops || !dev || !num_locks || !ops->trylock ||
521300bab97SOhad Ben-Cohen !ops->unlock) {
522300bab97SOhad Ben-Cohen pr_err("invalid parameters\n");
523300bab97SOhad Ben-Cohen return -EINVAL;
524300bab97SOhad Ben-Cohen }
525300bab97SOhad Ben-Cohen
526300bab97SOhad Ben-Cohen bank->dev = dev;
527300bab97SOhad Ben-Cohen bank->ops = ops;
528300bab97SOhad Ben-Cohen bank->base_id = base_id;
529300bab97SOhad Ben-Cohen bank->num_locks = num_locks;
530300bab97SOhad Ben-Cohen
531300bab97SOhad Ben-Cohen for (i = 0; i < num_locks; i++) {
532300bab97SOhad Ben-Cohen hwlock = &bank->lock[i];
533300bab97SOhad Ben-Cohen
534300bab97SOhad Ben-Cohen spin_lock_init(&hwlock->lock);
535300bab97SOhad Ben-Cohen hwlock->bank = bank;
536300bab97SOhad Ben-Cohen
537476a7eebSShinya Kuribayashi ret = hwspin_lock_register_single(hwlock, base_id + i);
538300bab97SOhad Ben-Cohen if (ret)
539300bab97SOhad Ben-Cohen goto reg_failed;
540300bab97SOhad Ben-Cohen }
541300bab97SOhad Ben-Cohen
542300bab97SOhad Ben-Cohen return 0;
543300bab97SOhad Ben-Cohen
544300bab97SOhad Ben-Cohen reg_failed:
545300bab97SOhad Ben-Cohen while (--i >= 0)
546476a7eebSShinya Kuribayashi hwspin_lock_unregister_single(base_id + i);
547300bab97SOhad Ben-Cohen return ret;
548300bab97SOhad Ben-Cohen }
549300bab97SOhad Ben-Cohen EXPORT_SYMBOL_GPL(hwspin_lock_register);
550300bab97SOhad Ben-Cohen
551300bab97SOhad Ben-Cohen /**
552300bab97SOhad Ben-Cohen * hwspin_lock_unregister() - unregister an hw spinlock device
553300bab97SOhad Ben-Cohen * @bank: the hwspinlock device, which usually provides numerous hw locks
554300bab97SOhad Ben-Cohen *
555300bab97SOhad Ben-Cohen * This function should be called from the underlying platform-specific
556300bab97SOhad Ben-Cohen * implementation, to unregister an existing (and unused) hwspinlock.
557300bab97SOhad Ben-Cohen *
558300bab97SOhad Ben-Cohen * Should be called from a process context (might sleep)
559300bab97SOhad Ben-Cohen *
560300bab97SOhad Ben-Cohen * Returns 0 on success, or an appropriate error code on failure
561300bab97SOhad Ben-Cohen */
hwspin_lock_unregister(struct hwspinlock_device * bank)562300bab97SOhad Ben-Cohen int hwspin_lock_unregister(struct hwspinlock_device *bank)
563300bab97SOhad Ben-Cohen {
564300bab97SOhad Ben-Cohen struct hwspinlock *hwlock, *tmp;
565300bab97SOhad Ben-Cohen int i;
566300bab97SOhad Ben-Cohen
567300bab97SOhad Ben-Cohen for (i = 0; i < bank->num_locks; i++) {
568300bab97SOhad Ben-Cohen hwlock = &bank->lock[i];
569300bab97SOhad Ben-Cohen
570300bab97SOhad Ben-Cohen tmp = hwspin_lock_unregister_single(bank->base_id + i);
571300bab97SOhad Ben-Cohen if (!tmp)
572300bab97SOhad Ben-Cohen return -EBUSY;
573300bab97SOhad Ben-Cohen
574300bab97SOhad Ben-Cohen /* self-sanity check that should never fail */
575300bab97SOhad Ben-Cohen WARN_ON(tmp != hwlock);
576300bab97SOhad Ben-Cohen }
577300bab97SOhad Ben-Cohen
578300bab97SOhad Ben-Cohen return 0;
579300bab97SOhad Ben-Cohen }
580bd9a4c7dSOhad Ben-Cohen EXPORT_SYMBOL_GPL(hwspin_lock_unregister);
581bd9a4c7dSOhad Ben-Cohen
devm_hwspin_lock_unreg(struct device * dev,void * res)582c102780aSBaolin Wang static void devm_hwspin_lock_unreg(struct device *dev, void *res)
583c102780aSBaolin Wang {
584c102780aSBaolin Wang hwspin_lock_unregister(*(struct hwspinlock_device **)res);
585c102780aSBaolin Wang }
586c102780aSBaolin Wang
devm_hwspin_lock_device_match(struct device * dev,void * res,void * data)587c102780aSBaolin Wang static int devm_hwspin_lock_device_match(struct device *dev, void *res,
588c102780aSBaolin Wang void *data)
589c102780aSBaolin Wang {
590c102780aSBaolin Wang struct hwspinlock_device **bank = res;
591c102780aSBaolin Wang
592c102780aSBaolin Wang if (WARN_ON(!bank || !*bank))
593c102780aSBaolin Wang return 0;
594c102780aSBaolin Wang
595c102780aSBaolin Wang return *bank == data;
596c102780aSBaolin Wang }
597c102780aSBaolin Wang
598c102780aSBaolin Wang /**
599c102780aSBaolin Wang * devm_hwspin_lock_unregister() - unregister an hw spinlock device for
600c102780aSBaolin Wang * a managed device
601c102780aSBaolin Wang * @dev: the backing device
602c102780aSBaolin Wang * @bank: the hwspinlock device, which usually provides numerous hw locks
603c102780aSBaolin Wang *
604c102780aSBaolin Wang * This function should be called from the underlying platform-specific
605c102780aSBaolin Wang * implementation, to unregister an existing (and unused) hwspinlock.
606c102780aSBaolin Wang *
607c102780aSBaolin Wang * Should be called from a process context (might sleep)
608c102780aSBaolin Wang *
609c102780aSBaolin Wang * Returns 0 on success, or an appropriate error code on failure
610c102780aSBaolin Wang */
devm_hwspin_lock_unregister(struct device * dev,struct hwspinlock_device * bank)611c102780aSBaolin Wang int devm_hwspin_lock_unregister(struct device *dev,
612c102780aSBaolin Wang struct hwspinlock_device *bank)
613c102780aSBaolin Wang {
614c102780aSBaolin Wang int ret;
615c102780aSBaolin Wang
616c102780aSBaolin Wang ret = devres_release(dev, devm_hwspin_lock_unreg,
617c102780aSBaolin Wang devm_hwspin_lock_device_match, bank);
618c102780aSBaolin Wang WARN_ON(ret);
619c102780aSBaolin Wang
620c102780aSBaolin Wang return ret;
621c102780aSBaolin Wang }
622c102780aSBaolin Wang EXPORT_SYMBOL_GPL(devm_hwspin_lock_unregister);
623c102780aSBaolin Wang
624c102780aSBaolin Wang /**
625c102780aSBaolin Wang * devm_hwspin_lock_register() - register a new hw spinlock device for
626c102780aSBaolin Wang * a managed device
627c102780aSBaolin Wang * @dev: the backing device
628c102780aSBaolin Wang * @bank: the hwspinlock device, which usually provides numerous hw locks
629c102780aSBaolin Wang * @ops: hwspinlock handlers for this device
630c102780aSBaolin Wang * @base_id: id of the first hardware spinlock in this bank
631c102780aSBaolin Wang * @num_locks: number of hwspinlocks provided by this device
632c102780aSBaolin Wang *
633c102780aSBaolin Wang * This function should be called from the underlying platform-specific
634c102780aSBaolin Wang * implementation, to register a new hwspinlock device instance.
635c102780aSBaolin Wang *
636c102780aSBaolin Wang * Should be called from a process context (might sleep)
637c102780aSBaolin Wang *
638c102780aSBaolin Wang * Returns 0 on success, or an appropriate error code on failure
639c102780aSBaolin Wang */
devm_hwspin_lock_register(struct device * dev,struct hwspinlock_device * bank,const struct hwspinlock_ops * ops,int base_id,int num_locks)640c102780aSBaolin Wang int devm_hwspin_lock_register(struct device *dev,
641c102780aSBaolin Wang struct hwspinlock_device *bank,
642c102780aSBaolin Wang const struct hwspinlock_ops *ops,
643c102780aSBaolin Wang int base_id, int num_locks)
644c102780aSBaolin Wang {
645c102780aSBaolin Wang struct hwspinlock_device **ptr;
646c102780aSBaolin Wang int ret;
647c102780aSBaolin Wang
648c102780aSBaolin Wang ptr = devres_alloc(devm_hwspin_lock_unreg, sizeof(*ptr), GFP_KERNEL);
649c102780aSBaolin Wang if (!ptr)
650c102780aSBaolin Wang return -ENOMEM;
651c102780aSBaolin Wang
652c102780aSBaolin Wang ret = hwspin_lock_register(bank, dev, ops, base_id, num_locks);
653c102780aSBaolin Wang if (!ret) {
654c102780aSBaolin Wang *ptr = bank;
655c102780aSBaolin Wang devres_add(dev, ptr);
656c102780aSBaolin Wang } else {
657c102780aSBaolin Wang devres_free(ptr);
658c102780aSBaolin Wang }
659c102780aSBaolin Wang
660c102780aSBaolin Wang return ret;
661c102780aSBaolin Wang }
662c102780aSBaolin Wang EXPORT_SYMBOL_GPL(devm_hwspin_lock_register);
663c102780aSBaolin Wang
664bd9a4c7dSOhad Ben-Cohen /**
665bd9a4c7dSOhad Ben-Cohen * __hwspin_lock_request() - tag an hwspinlock as used and power it up
666bd9a4c7dSOhad Ben-Cohen *
667bd9a4c7dSOhad Ben-Cohen * This is an internal function that prepares an hwspinlock instance
668bd9a4c7dSOhad Ben-Cohen * before it is given to the user. The function assumes that
669bd9a4c7dSOhad Ben-Cohen * hwspinlock_tree_lock is taken.
670bd9a4c7dSOhad Ben-Cohen *
671bd9a4c7dSOhad Ben-Cohen * Returns 0 or positive to indicate success, and a negative value to
672bd9a4c7dSOhad Ben-Cohen * indicate an error (with the appropriate error code)
673bd9a4c7dSOhad Ben-Cohen */
__hwspin_lock_request(struct hwspinlock * hwlock)674bd9a4c7dSOhad Ben-Cohen static int __hwspin_lock_request(struct hwspinlock *hwlock)
675bd9a4c7dSOhad Ben-Cohen {
676300bab97SOhad Ben-Cohen struct device *dev = hwlock->bank->dev;
677bd9a4c7dSOhad Ben-Cohen struct hwspinlock *tmp;
678bd9a4c7dSOhad Ben-Cohen int ret;
679bd9a4c7dSOhad Ben-Cohen
680bd9a4c7dSOhad Ben-Cohen /* prevent underlying implementation from being removed */
681300bab97SOhad Ben-Cohen if (!try_module_get(dev->driver->owner)) {
682300bab97SOhad Ben-Cohen dev_err(dev, "%s: can't get owner\n", __func__);
683bd9a4c7dSOhad Ben-Cohen return -EINVAL;
684bd9a4c7dSOhad Ben-Cohen }
685bd9a4c7dSOhad Ben-Cohen
686bd9a4c7dSOhad Ben-Cohen /* notify PM core that power is now needed */
687300bab97SOhad Ben-Cohen ret = pm_runtime_get_sync(dev);
688a6fc4905SBaolin Wang if (ret < 0 && ret != -EACCES) {
689300bab97SOhad Ben-Cohen dev_err(dev, "%s: can't power on device\n", __func__);
690c10b90d8SLi Fei pm_runtime_put_noidle(dev);
691c10b90d8SLi Fei module_put(dev->driver->owner);
692bd9a4c7dSOhad Ben-Cohen return ret;
693bd9a4c7dSOhad Ben-Cohen }
694bd9a4c7dSOhad Ben-Cohen
695a6fc4905SBaolin Wang ret = 0;
696a6fc4905SBaolin Wang
697bd9a4c7dSOhad Ben-Cohen /* mark hwspinlock as used, should not fail */
698300bab97SOhad Ben-Cohen tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock_to_id(hwlock),
699bd9a4c7dSOhad Ben-Cohen HWSPINLOCK_UNUSED);
700bd9a4c7dSOhad Ben-Cohen
701bd9a4c7dSOhad Ben-Cohen /* self-sanity check that should never fail */
702bd9a4c7dSOhad Ben-Cohen WARN_ON(tmp != hwlock);
703bd9a4c7dSOhad Ben-Cohen
704bd9a4c7dSOhad Ben-Cohen return ret;
705bd9a4c7dSOhad Ben-Cohen }
706bd9a4c7dSOhad Ben-Cohen
707bd9a4c7dSOhad Ben-Cohen /**
708bd9a4c7dSOhad Ben-Cohen * hwspin_lock_get_id() - retrieve id number of a given hwspinlock
709bd9a4c7dSOhad Ben-Cohen * @hwlock: a valid hwspinlock instance
710bd9a4c7dSOhad Ben-Cohen *
711bd9a4c7dSOhad Ben-Cohen * Returns the id number of a given @hwlock, or -EINVAL if @hwlock is invalid.
712bd9a4c7dSOhad Ben-Cohen */
hwspin_lock_get_id(struct hwspinlock * hwlock)713bd9a4c7dSOhad Ben-Cohen int hwspin_lock_get_id(struct hwspinlock *hwlock)
714bd9a4c7dSOhad Ben-Cohen {
715bd9a4c7dSOhad Ben-Cohen if (!hwlock) {
716bd9a4c7dSOhad Ben-Cohen pr_err("invalid hwlock\n");
717bd9a4c7dSOhad Ben-Cohen return -EINVAL;
718bd9a4c7dSOhad Ben-Cohen }
719bd9a4c7dSOhad Ben-Cohen
720300bab97SOhad Ben-Cohen return hwlock_to_id(hwlock);
721bd9a4c7dSOhad Ben-Cohen }
722bd9a4c7dSOhad Ben-Cohen EXPORT_SYMBOL_GPL(hwspin_lock_get_id);
723bd9a4c7dSOhad Ben-Cohen
724bd9a4c7dSOhad Ben-Cohen /**
725bd9a4c7dSOhad Ben-Cohen * hwspin_lock_request() - request an hwspinlock
726bd9a4c7dSOhad Ben-Cohen *
727bd9a4c7dSOhad Ben-Cohen * This function should be called by users of the hwspinlock device,
728bd9a4c7dSOhad Ben-Cohen * in order to dynamically assign them an unused hwspinlock.
729bd9a4c7dSOhad Ben-Cohen * Usually the user of this lock will then have to communicate the lock's id
730bd9a4c7dSOhad Ben-Cohen * to the remote core before it can be used for synchronization (to get the
731bd9a4c7dSOhad Ben-Cohen * id of a given hwlock, use hwspin_lock_get_id()).
732bd9a4c7dSOhad Ben-Cohen *
73393b465c2SJuan Gutierrez * Should be called from a process context (might sleep)
734bd9a4c7dSOhad Ben-Cohen *
735bd9a4c7dSOhad Ben-Cohen * Returns the address of the assigned hwspinlock, or NULL on error
736bd9a4c7dSOhad Ben-Cohen */
hwspin_lock_request(void)737bd9a4c7dSOhad Ben-Cohen struct hwspinlock *hwspin_lock_request(void)
738bd9a4c7dSOhad Ben-Cohen {
739bd9a4c7dSOhad Ben-Cohen struct hwspinlock *hwlock;
740bd9a4c7dSOhad Ben-Cohen int ret;
741bd9a4c7dSOhad Ben-Cohen
74293b465c2SJuan Gutierrez mutex_lock(&hwspinlock_tree_lock);
743bd9a4c7dSOhad Ben-Cohen
744bd9a4c7dSOhad Ben-Cohen /* look for an unused lock */
745bd9a4c7dSOhad Ben-Cohen ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock,
746bd9a4c7dSOhad Ben-Cohen 0, 1, HWSPINLOCK_UNUSED);
747bd9a4c7dSOhad Ben-Cohen if (ret == 0) {
748bd9a4c7dSOhad Ben-Cohen pr_warn("a free hwspinlock is not available\n");
749bd9a4c7dSOhad Ben-Cohen hwlock = NULL;
750bd9a4c7dSOhad Ben-Cohen goto out;
751bd9a4c7dSOhad Ben-Cohen }
752bd9a4c7dSOhad Ben-Cohen
753bd9a4c7dSOhad Ben-Cohen /* sanity check that should never fail */
754bd9a4c7dSOhad Ben-Cohen WARN_ON(ret > 1);
755bd9a4c7dSOhad Ben-Cohen
756bd9a4c7dSOhad Ben-Cohen /* mark as used and power up */
757bd9a4c7dSOhad Ben-Cohen ret = __hwspin_lock_request(hwlock);
758bd9a4c7dSOhad Ben-Cohen if (ret < 0)
759bd9a4c7dSOhad Ben-Cohen hwlock = NULL;
760bd9a4c7dSOhad Ben-Cohen
761bd9a4c7dSOhad Ben-Cohen out:
76293b465c2SJuan Gutierrez mutex_unlock(&hwspinlock_tree_lock);
763bd9a4c7dSOhad Ben-Cohen return hwlock;
764bd9a4c7dSOhad Ben-Cohen }
765bd9a4c7dSOhad Ben-Cohen EXPORT_SYMBOL_GPL(hwspin_lock_request);
766bd9a4c7dSOhad Ben-Cohen
767bd9a4c7dSOhad Ben-Cohen /**
768bd9a4c7dSOhad Ben-Cohen * hwspin_lock_request_specific() - request for a specific hwspinlock
769bd9a4c7dSOhad Ben-Cohen * @id: index of the specific hwspinlock that is requested
770bd9a4c7dSOhad Ben-Cohen *
771bd9a4c7dSOhad Ben-Cohen * This function should be called by users of the hwspinlock module,
772bd9a4c7dSOhad Ben-Cohen * in order to assign them a specific hwspinlock.
773bd9a4c7dSOhad Ben-Cohen * Usually early board code will be calling this function in order to
774bd9a4c7dSOhad Ben-Cohen * reserve specific hwspinlock ids for predefined purposes.
775bd9a4c7dSOhad Ben-Cohen *
77693b465c2SJuan Gutierrez * Should be called from a process context (might sleep)
777bd9a4c7dSOhad Ben-Cohen *
778bd9a4c7dSOhad Ben-Cohen * Returns the address of the assigned hwspinlock, or NULL on error
779bd9a4c7dSOhad Ben-Cohen */
hwspin_lock_request_specific(unsigned int id)780bd9a4c7dSOhad Ben-Cohen struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
781bd9a4c7dSOhad Ben-Cohen {
782bd9a4c7dSOhad Ben-Cohen struct hwspinlock *hwlock;
783bd9a4c7dSOhad Ben-Cohen int ret;
784bd9a4c7dSOhad Ben-Cohen
78593b465c2SJuan Gutierrez mutex_lock(&hwspinlock_tree_lock);
786bd9a4c7dSOhad Ben-Cohen
787bd9a4c7dSOhad Ben-Cohen /* make sure this hwspinlock exists */
788bd9a4c7dSOhad Ben-Cohen hwlock = radix_tree_lookup(&hwspinlock_tree, id);
789bd9a4c7dSOhad Ben-Cohen if (!hwlock) {
790bd9a4c7dSOhad Ben-Cohen pr_warn("hwspinlock %u does not exist\n", id);
791bd9a4c7dSOhad Ben-Cohen goto out;
792bd9a4c7dSOhad Ben-Cohen }
793bd9a4c7dSOhad Ben-Cohen
794bd9a4c7dSOhad Ben-Cohen /* sanity check (this shouldn't happen) */
795300bab97SOhad Ben-Cohen WARN_ON(hwlock_to_id(hwlock) != id);
796bd9a4c7dSOhad Ben-Cohen
797bd9a4c7dSOhad Ben-Cohen /* make sure this hwspinlock is unused */
798bd9a4c7dSOhad Ben-Cohen ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
799bd9a4c7dSOhad Ben-Cohen if (ret == 0) {
800bd9a4c7dSOhad Ben-Cohen pr_warn("hwspinlock %u is already in use\n", id);
801bd9a4c7dSOhad Ben-Cohen hwlock = NULL;
802bd9a4c7dSOhad Ben-Cohen goto out;
803bd9a4c7dSOhad Ben-Cohen }
804bd9a4c7dSOhad Ben-Cohen
805bd9a4c7dSOhad Ben-Cohen /* mark as used and power up */
806bd9a4c7dSOhad Ben-Cohen ret = __hwspin_lock_request(hwlock);
807bd9a4c7dSOhad Ben-Cohen if (ret < 0)
808bd9a4c7dSOhad Ben-Cohen hwlock = NULL;
809bd9a4c7dSOhad Ben-Cohen
810bd9a4c7dSOhad Ben-Cohen out:
81193b465c2SJuan Gutierrez mutex_unlock(&hwspinlock_tree_lock);
812bd9a4c7dSOhad Ben-Cohen return hwlock;
813bd9a4c7dSOhad Ben-Cohen }
814bd9a4c7dSOhad Ben-Cohen EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
815bd9a4c7dSOhad Ben-Cohen
816bd9a4c7dSOhad Ben-Cohen /**
817bd9a4c7dSOhad Ben-Cohen * hwspin_lock_free() - free a specific hwspinlock
818bd9a4c7dSOhad Ben-Cohen * @hwlock: the specific hwspinlock to free
819bd9a4c7dSOhad Ben-Cohen *
820bd9a4c7dSOhad Ben-Cohen * This function mark @hwlock as free again.
821bd9a4c7dSOhad Ben-Cohen * Should only be called with an @hwlock that was retrieved from
82238ce6065SBaolin Wang * an earlier call to hwspin_lock_request{_specific}.
823bd9a4c7dSOhad Ben-Cohen *
82493b465c2SJuan Gutierrez * Should be called from a process context (might sleep)
825bd9a4c7dSOhad Ben-Cohen *
826bd9a4c7dSOhad Ben-Cohen * Returns 0 on success, or an appropriate error code on failure
827bd9a4c7dSOhad Ben-Cohen */
hwspin_lock_free(struct hwspinlock * hwlock)828bd9a4c7dSOhad Ben-Cohen int hwspin_lock_free(struct hwspinlock *hwlock)
829bd9a4c7dSOhad Ben-Cohen {
830e352614cSWei Yongjun struct device *dev;
831bd9a4c7dSOhad Ben-Cohen struct hwspinlock *tmp;
832bd9a4c7dSOhad Ben-Cohen int ret;
833bd9a4c7dSOhad Ben-Cohen
834bd9a4c7dSOhad Ben-Cohen if (!hwlock) {
835bd9a4c7dSOhad Ben-Cohen pr_err("invalid hwlock\n");
836bd9a4c7dSOhad Ben-Cohen return -EINVAL;
837bd9a4c7dSOhad Ben-Cohen }
838bd9a4c7dSOhad Ben-Cohen
839e352614cSWei Yongjun dev = hwlock->bank->dev;
84093b465c2SJuan Gutierrez mutex_lock(&hwspinlock_tree_lock);
841bd9a4c7dSOhad Ben-Cohen
842bd9a4c7dSOhad Ben-Cohen /* make sure the hwspinlock is used */
843300bab97SOhad Ben-Cohen ret = radix_tree_tag_get(&hwspinlock_tree, hwlock_to_id(hwlock),
844bd9a4c7dSOhad Ben-Cohen HWSPINLOCK_UNUSED);
845bd9a4c7dSOhad Ben-Cohen if (ret == 1) {
846300bab97SOhad Ben-Cohen dev_err(dev, "%s: hwlock is already free\n", __func__);
847bd9a4c7dSOhad Ben-Cohen dump_stack();
848bd9a4c7dSOhad Ben-Cohen ret = -EINVAL;
849bd9a4c7dSOhad Ben-Cohen goto out;
850bd9a4c7dSOhad Ben-Cohen }
851bd9a4c7dSOhad Ben-Cohen
852bd9a4c7dSOhad Ben-Cohen /* notify the underlying device that power is not needed */
853a6fc4905SBaolin Wang pm_runtime_put(dev);
854bd9a4c7dSOhad Ben-Cohen
855bd9a4c7dSOhad Ben-Cohen /* mark this hwspinlock as available */
856300bab97SOhad Ben-Cohen tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock_to_id(hwlock),
857bd9a4c7dSOhad Ben-Cohen HWSPINLOCK_UNUSED);
858bd9a4c7dSOhad Ben-Cohen
859bd9a4c7dSOhad Ben-Cohen /* sanity check (this shouldn't happen) */
860bd9a4c7dSOhad Ben-Cohen WARN_ON(tmp != hwlock);
861bd9a4c7dSOhad Ben-Cohen
862300bab97SOhad Ben-Cohen module_put(dev->driver->owner);
863bd9a4c7dSOhad Ben-Cohen
864bd9a4c7dSOhad Ben-Cohen out:
86593b465c2SJuan Gutierrez mutex_unlock(&hwspinlock_tree_lock);
866bd9a4c7dSOhad Ben-Cohen return ret;
867bd9a4c7dSOhad Ben-Cohen }
868bd9a4c7dSOhad Ben-Cohen EXPORT_SYMBOL_GPL(hwspin_lock_free);
869bd9a4c7dSOhad Ben-Cohen
devm_hwspin_lock_match(struct device * dev,void * res,void * data)8704f1acd75SBaolin Wang static int devm_hwspin_lock_match(struct device *dev, void *res, void *data)
8714f1acd75SBaolin Wang {
8724f1acd75SBaolin Wang struct hwspinlock **hwlock = res;
8734f1acd75SBaolin Wang
8744f1acd75SBaolin Wang if (WARN_ON(!hwlock || !*hwlock))
8754f1acd75SBaolin Wang return 0;
8764f1acd75SBaolin Wang
8774f1acd75SBaolin Wang return *hwlock == data;
8784f1acd75SBaolin Wang }
8794f1acd75SBaolin Wang
devm_hwspin_lock_release(struct device * dev,void * res)8804f1acd75SBaolin Wang static void devm_hwspin_lock_release(struct device *dev, void *res)
8814f1acd75SBaolin Wang {
8824f1acd75SBaolin Wang hwspin_lock_free(*(struct hwspinlock **)res);
8834f1acd75SBaolin Wang }
8844f1acd75SBaolin Wang
8854f1acd75SBaolin Wang /**
8864f1acd75SBaolin Wang * devm_hwspin_lock_free() - free a specific hwspinlock for a managed device
8874f1acd75SBaolin Wang * @dev: the device to free the specific hwspinlock
8884f1acd75SBaolin Wang * @hwlock: the specific hwspinlock to free
8894f1acd75SBaolin Wang *
8904f1acd75SBaolin Wang * This function mark @hwlock as free again.
8914f1acd75SBaolin Wang * Should only be called with an @hwlock that was retrieved from
8924f1acd75SBaolin Wang * an earlier call to hwspin_lock_request{_specific}.
8934f1acd75SBaolin Wang *
8944f1acd75SBaolin Wang * Should be called from a process context (might sleep)
8954f1acd75SBaolin Wang *
8964f1acd75SBaolin Wang * Returns 0 on success, or an appropriate error code on failure
8974f1acd75SBaolin Wang */
devm_hwspin_lock_free(struct device * dev,struct hwspinlock * hwlock)8984f1acd75SBaolin Wang int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock)
8994f1acd75SBaolin Wang {
9004f1acd75SBaolin Wang int ret;
9014f1acd75SBaolin Wang
9024f1acd75SBaolin Wang ret = devres_release(dev, devm_hwspin_lock_release,
9034f1acd75SBaolin Wang devm_hwspin_lock_match, hwlock);
9044f1acd75SBaolin Wang WARN_ON(ret);
9054f1acd75SBaolin Wang
9064f1acd75SBaolin Wang return ret;
9074f1acd75SBaolin Wang }
9084f1acd75SBaolin Wang EXPORT_SYMBOL_GPL(devm_hwspin_lock_free);
9094f1acd75SBaolin Wang
9104f1acd75SBaolin Wang /**
9114f1acd75SBaolin Wang * devm_hwspin_lock_request() - request an hwspinlock for a managed device
9124f1acd75SBaolin Wang * @dev: the device to request an hwspinlock
9134f1acd75SBaolin Wang *
9144f1acd75SBaolin Wang * This function should be called by users of the hwspinlock device,
9154f1acd75SBaolin Wang * in order to dynamically assign them an unused hwspinlock.
9164f1acd75SBaolin Wang * Usually the user of this lock will then have to communicate the lock's id
9174f1acd75SBaolin Wang * to the remote core before it can be used for synchronization (to get the
9184f1acd75SBaolin Wang * id of a given hwlock, use hwspin_lock_get_id()).
9194f1acd75SBaolin Wang *
9204f1acd75SBaolin Wang * Should be called from a process context (might sleep)
9214f1acd75SBaolin Wang *
9224f1acd75SBaolin Wang * Returns the address of the assigned hwspinlock, or NULL on error
9234f1acd75SBaolin Wang */
devm_hwspin_lock_request(struct device * dev)9244f1acd75SBaolin Wang struct hwspinlock *devm_hwspin_lock_request(struct device *dev)
9254f1acd75SBaolin Wang {
9264f1acd75SBaolin Wang struct hwspinlock **ptr, *hwlock;
9274f1acd75SBaolin Wang
9284f1acd75SBaolin Wang ptr = devres_alloc(devm_hwspin_lock_release, sizeof(*ptr), GFP_KERNEL);
9294f1acd75SBaolin Wang if (!ptr)
930ddb34f48SBaolin Wang return NULL;
9314f1acd75SBaolin Wang
9324f1acd75SBaolin Wang hwlock = hwspin_lock_request();
933ddb34f48SBaolin Wang if (hwlock) {
9344f1acd75SBaolin Wang *ptr = hwlock;
9354f1acd75SBaolin Wang devres_add(dev, ptr);
9364f1acd75SBaolin Wang } else {
9374f1acd75SBaolin Wang devres_free(ptr);
9384f1acd75SBaolin Wang }
9394f1acd75SBaolin Wang
9404f1acd75SBaolin Wang return hwlock;
9414f1acd75SBaolin Wang }
9424f1acd75SBaolin Wang EXPORT_SYMBOL_GPL(devm_hwspin_lock_request);
9434f1acd75SBaolin Wang
9444f1acd75SBaolin Wang /**
9454f1acd75SBaolin Wang * devm_hwspin_lock_request_specific() - request for a specific hwspinlock for
9464f1acd75SBaolin Wang * a managed device
9474f1acd75SBaolin Wang * @dev: the device to request the specific hwspinlock
9484f1acd75SBaolin Wang * @id: index of the specific hwspinlock that is requested
9494f1acd75SBaolin Wang *
9504f1acd75SBaolin Wang * This function should be called by users of the hwspinlock module,
9514f1acd75SBaolin Wang * in order to assign them a specific hwspinlock.
9524f1acd75SBaolin Wang * Usually early board code will be calling this function in order to
9534f1acd75SBaolin Wang * reserve specific hwspinlock ids for predefined purposes.
9544f1acd75SBaolin Wang *
9554f1acd75SBaolin Wang * Should be called from a process context (might sleep)
9564f1acd75SBaolin Wang *
9574f1acd75SBaolin Wang * Returns the address of the assigned hwspinlock, or NULL on error
9584f1acd75SBaolin Wang */
devm_hwspin_lock_request_specific(struct device * dev,unsigned int id)9594f1acd75SBaolin Wang struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
9604f1acd75SBaolin Wang unsigned int id)
9614f1acd75SBaolin Wang {
9624f1acd75SBaolin Wang struct hwspinlock **ptr, *hwlock;
9634f1acd75SBaolin Wang
9644f1acd75SBaolin Wang ptr = devres_alloc(devm_hwspin_lock_release, sizeof(*ptr), GFP_KERNEL);
9654f1acd75SBaolin Wang if (!ptr)
966ddb34f48SBaolin Wang return NULL;
9674f1acd75SBaolin Wang
9684f1acd75SBaolin Wang hwlock = hwspin_lock_request_specific(id);
969ddb34f48SBaolin Wang if (hwlock) {
9704f1acd75SBaolin Wang *ptr = hwlock;
9714f1acd75SBaolin Wang devres_add(dev, ptr);
9724f1acd75SBaolin Wang } else {
9734f1acd75SBaolin Wang devres_free(ptr);
9744f1acd75SBaolin Wang }
9754f1acd75SBaolin Wang
9764f1acd75SBaolin Wang return hwlock;
9774f1acd75SBaolin Wang }
9784f1acd75SBaolin Wang EXPORT_SYMBOL_GPL(devm_hwspin_lock_request_specific);
9794f1acd75SBaolin Wang
980bd9a4c7dSOhad Ben-Cohen MODULE_DESCRIPTION("Hardware spinlock interface");
981bd9a4c7dSOhad Ben-Cohen MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>");
982