xref: /openbmc/linux/arch/xtensa/include/asm/spinlock.h (revision 71872b5f)
1367b8112SChris Zankel /*
2367b8112SChris Zankel  * include/asm-xtensa/spinlock.h
3367b8112SChris Zankel  *
4367b8112SChris Zankel  * This file is subject to the terms and conditions of the GNU General Public
5367b8112SChris Zankel  * License.  See the file "COPYING" in the main directory of this archive
6367b8112SChris Zankel  * for more details.
7367b8112SChris Zankel  *
8367b8112SChris Zankel  * Copyright (C) 2001 - 2005 Tensilica Inc.
9367b8112SChris Zankel  */
10367b8112SChris Zankel 
11367b8112SChris Zankel #ifndef _XTENSA_SPINLOCK_H
12367b8112SChris Zankel #define _XTENSA_SPINLOCK_H
13367b8112SChris Zankel 
1471872b5fSMax Filippov /*
1571872b5fSMax Filippov  * spinlock
1671872b5fSMax Filippov  *
1771872b5fSMax Filippov  * There is at most one owner of a spinlock.  There are not different
1871872b5fSMax Filippov  * types of spinlock owners like there are for rwlocks (see below).
1971872b5fSMax Filippov  *
2071872b5fSMax Filippov  * When trying to obtain a spinlock, the function "spins" forever, or busy-
2171872b5fSMax Filippov  * waits, until the lock is obtained.  When spinning, presumably some other
2271872b5fSMax Filippov  * owner will soon give up the spinlock making it available to others.  Use
2371872b5fSMax Filippov  * the trylock functions to avoid spinning forever.
2471872b5fSMax Filippov  *
2571872b5fSMax Filippov  * possible values:
2671872b5fSMax Filippov  *
2771872b5fSMax Filippov  *    0         nobody owns the spinlock
2871872b5fSMax Filippov  *    1         somebody owns the spinlock
2971872b5fSMax Filippov  */
3071872b5fSMax Filippov 
3171872b5fSMax Filippov #define __raw_spin_is_locked(x) ((x)->slock != 0)
3271872b5fSMax Filippov #define __raw_spin_unlock_wait(lock) \
3371872b5fSMax Filippov 	do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
3471872b5fSMax Filippov 
3571872b5fSMax Filippov #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
3671872b5fSMax Filippov 
3771872b5fSMax Filippov static inline void __raw_spin_lock(raw_spinlock_t *lock)
3871872b5fSMax Filippov {
3971872b5fSMax Filippov 	unsigned long tmp;
4071872b5fSMax Filippov 
4171872b5fSMax Filippov 	__asm__ __volatile__(
4271872b5fSMax Filippov 			"       movi    %0, 0\n"
4371872b5fSMax Filippov 			"       wsr     %0, scompare1\n"
4471872b5fSMax Filippov 			"1:     movi    %0, 1\n"
4571872b5fSMax Filippov 			"       s32c1i  %0, %1, 0\n"
4671872b5fSMax Filippov 			"       bnez    %0, 1b\n"
4771872b5fSMax Filippov 			: "=&a" (tmp)
4871872b5fSMax Filippov 			: "a" (&lock->slock)
4971872b5fSMax Filippov 			: "memory");
5071872b5fSMax Filippov }
5171872b5fSMax Filippov 
5271872b5fSMax Filippov /* Returns 1 if the lock is obtained, 0 otherwise. */
5371872b5fSMax Filippov 
5471872b5fSMax Filippov static inline int __raw_spin_trylock(raw_spinlock_t *lock)
5571872b5fSMax Filippov {
5671872b5fSMax Filippov 	unsigned long tmp;
5771872b5fSMax Filippov 
5871872b5fSMax Filippov 	__asm__ __volatile__(
5971872b5fSMax Filippov 			"       movi    %0, 0\n"
6071872b5fSMax Filippov 			"       wsr     %0, scompare1\n"
6171872b5fSMax Filippov 			"       movi    %0, 1\n"
6271872b5fSMax Filippov 			"       s32c1i  %0, %1, 0\n"
6371872b5fSMax Filippov 			: "=&a" (tmp)
6471872b5fSMax Filippov 			: "a" (&lock->slock)
6571872b5fSMax Filippov 			: "memory");
6671872b5fSMax Filippov 
6771872b5fSMax Filippov 	return tmp == 0 ? 1 : 0;
6871872b5fSMax Filippov }
6971872b5fSMax Filippov 
7071872b5fSMax Filippov static inline void __raw_spin_unlock(raw_spinlock_t *lock)
7171872b5fSMax Filippov {
7271872b5fSMax Filippov 	unsigned long tmp;
7371872b5fSMax Filippov 
7471872b5fSMax Filippov 	__asm__ __volatile__(
7571872b5fSMax Filippov 			"       movi    %0, 0\n"
7671872b5fSMax Filippov 			"       s32ri   %0, %1, 0\n"
7771872b5fSMax Filippov 			: "=&a" (tmp)
7871872b5fSMax Filippov 			: "a" (&lock->slock)
7971872b5fSMax Filippov 			: "memory");
8071872b5fSMax Filippov }
8171872b5fSMax Filippov 
8271872b5fSMax Filippov /*
8371872b5fSMax Filippov  * rwlock
8471872b5fSMax Filippov  *
8571872b5fSMax Filippov  * Read-write locks are really a more flexible spinlock.  They allow
8671872b5fSMax Filippov  * multiple readers but only one writer.  Write ownership is exclusive
8771872b5fSMax Filippov  * (i.e., all other readers and writers are blocked from ownership while
8871872b5fSMax Filippov  * there is a write owner).  These rwlocks are unfair to writers.  Writers
8971872b5fSMax Filippov  * can be starved for an indefinite time by readers.
9071872b5fSMax Filippov  *
9171872b5fSMax Filippov  * possible values:
9271872b5fSMax Filippov  *
9371872b5fSMax Filippov  *   0          nobody owns the rwlock
9471872b5fSMax Filippov  *  >0          one or more readers own the rwlock
9571872b5fSMax Filippov  *                (the positive value is the actual number of readers)
9671872b5fSMax Filippov  *  0x80000000  one writer owns the rwlock, no other writers, no readers
9771872b5fSMax Filippov  */
9871872b5fSMax Filippov 
9971872b5fSMax Filippov #define __raw_write_can_lock(x)  ((x)->lock == 0)
10071872b5fSMax Filippov 
10171872b5fSMax Filippov static inline void __raw_write_lock(raw_rwlock_t *rw)
10271872b5fSMax Filippov {
10371872b5fSMax Filippov 	unsigned long tmp;
10471872b5fSMax Filippov 
10571872b5fSMax Filippov 	__asm__ __volatile__(
10671872b5fSMax Filippov 			"       movi    %0, 0\n"
10771872b5fSMax Filippov 			"       wsr     %0, scompare1\n"
10871872b5fSMax Filippov 			"1:     movi    %0, 1\n"
10971872b5fSMax Filippov 			"       slli    %0, %0, 31\n"
11071872b5fSMax Filippov 			"       s32c1i  %0, %1, 0\n"
11171872b5fSMax Filippov 			"       bnez    %0, 1b\n"
11271872b5fSMax Filippov 			: "=&a" (tmp)
11371872b5fSMax Filippov 			: "a" (&rw->lock)
11471872b5fSMax Filippov 			: "memory");
11571872b5fSMax Filippov }
11671872b5fSMax Filippov 
11771872b5fSMax Filippov /* Returns 1 if the lock is obtained, 0 otherwise. */
11871872b5fSMax Filippov 
11971872b5fSMax Filippov static inline int __raw_write_trylock(raw_rwlock_t *rw)
12071872b5fSMax Filippov {
12171872b5fSMax Filippov 	unsigned long tmp;
12271872b5fSMax Filippov 
12371872b5fSMax Filippov 	__asm__ __volatile__(
12471872b5fSMax Filippov 			"       movi    %0, 0\n"
12571872b5fSMax Filippov 			"       wsr     %0, scompare1\n"
12671872b5fSMax Filippov 			"       movi    %0, 1\n"
12771872b5fSMax Filippov 			"       slli    %0, %0, 31\n"
12871872b5fSMax Filippov 			"       s32c1i  %0, %1, 0\n"
12971872b5fSMax Filippov 			: "=&a" (tmp)
13071872b5fSMax Filippov 			: "a" (&rw->lock)
13171872b5fSMax Filippov 			: "memory");
13271872b5fSMax Filippov 
13371872b5fSMax Filippov 	return tmp == 0 ? 1 : 0;
13471872b5fSMax Filippov }
13571872b5fSMax Filippov 
13671872b5fSMax Filippov static inline void __raw_write_unlock(raw_rwlock_t *rw)
13771872b5fSMax Filippov {
13871872b5fSMax Filippov 	unsigned long tmp;
13971872b5fSMax Filippov 
14071872b5fSMax Filippov 	__asm__ __volatile__(
14171872b5fSMax Filippov 			"       movi    %0, 0\n"
14271872b5fSMax Filippov 			"       s32ri   %0, %1, 0\n"
14371872b5fSMax Filippov 			: "=&a" (tmp)
14471872b5fSMax Filippov 			: "a" (&rw->lock)
14571872b5fSMax Filippov 			: "memory");
14671872b5fSMax Filippov }
14771872b5fSMax Filippov 
14871872b5fSMax Filippov static inline void __raw_read_lock(raw_rwlock_t *rw)
14971872b5fSMax Filippov {
15071872b5fSMax Filippov 	unsigned long tmp;
15171872b5fSMax Filippov 	unsigned long result;
15271872b5fSMax Filippov 
15371872b5fSMax Filippov 	__asm__ __volatile__(
15471872b5fSMax Filippov 			"1:     l32i    %1, %2, 0\n"
15571872b5fSMax Filippov 			"       bltz    %1, 1b\n"
15671872b5fSMax Filippov 			"       wsr     %1, scompare1\n"
15771872b5fSMax Filippov 			"       addi    %0, %1, 1\n"
15871872b5fSMax Filippov 			"       s32c1i  %0, %2, 0\n"
15971872b5fSMax Filippov 			"       bne     %0, %1, 1b\n"
16071872b5fSMax Filippov 			: "=&a" (result), "=&a" (tmp)
16171872b5fSMax Filippov 			: "a" (&rw->lock)
16271872b5fSMax Filippov 			: "memory");
16371872b5fSMax Filippov }
16471872b5fSMax Filippov 
16571872b5fSMax Filippov /* Returns 1 if the lock is obtained, 0 otherwise. */
16671872b5fSMax Filippov 
16771872b5fSMax Filippov static inline int __raw_read_trylock(raw_rwlock_t *rw)
16871872b5fSMax Filippov {
16971872b5fSMax Filippov 	unsigned long result;
17071872b5fSMax Filippov 	unsigned long tmp;
17171872b5fSMax Filippov 
17271872b5fSMax Filippov 	__asm__ __volatile__(
17371872b5fSMax Filippov 			"       l32i    %1, %2, 0\n"
17471872b5fSMax Filippov 			"       addi    %0, %1, 1\n"
17571872b5fSMax Filippov 			"       bltz    %0, 1f\n"
17671872b5fSMax Filippov 			"       wsr     %1, scompare1\n"
17771872b5fSMax Filippov 			"       s32c1i  %0, %2, 0\n"
17871872b5fSMax Filippov 			"       sub     %0, %0, %1\n"
17971872b5fSMax Filippov 			"1:\n"
18071872b5fSMax Filippov 			: "=&a" (result), "=&a" (tmp)
18171872b5fSMax Filippov 			: "a" (&rw->lock)
18271872b5fSMax Filippov 			: "memory");
18371872b5fSMax Filippov 
18471872b5fSMax Filippov 	return result == 0;
18571872b5fSMax Filippov }
18671872b5fSMax Filippov 
18771872b5fSMax Filippov static inline void __raw_read_unlock(raw_rwlock_t *rw)
18871872b5fSMax Filippov {
18971872b5fSMax Filippov 	unsigned long tmp1, tmp2;
19071872b5fSMax Filippov 
19171872b5fSMax Filippov 	__asm__ __volatile__(
19271872b5fSMax Filippov 			"1:     l32i    %1, %2, 0\n"
19371872b5fSMax Filippov 			"       addi    %0, %1, -1\n"
19471872b5fSMax Filippov 			"       wsr     %1, scompare1\n"
19571872b5fSMax Filippov 			"       s32c1i  %0, %2, 0\n"
19671872b5fSMax Filippov 			"       bne     %0, %1, 1b\n"
19771872b5fSMax Filippov 			: "=&a" (tmp1), "=&a" (tmp2)
19871872b5fSMax Filippov 			: "a" (&rw->lock)
19971872b5fSMax Filippov 			: "memory");
20071872b5fSMax Filippov }
201367b8112SChris Zankel 
202367b8112SChris Zankel #endif	/* _XTENSA_SPINLOCK_H */
203