1 /* 2 * include/asm-xtensa/spinlock.h 3 * 4 * This file is subject to the terms and conditions of the GNU General Public 5 * License. See the file "COPYING" in the main directory of this archive 6 * for more details. 7 * 8 * Copyright (C) 2001 - 2005 Tensilica Inc. 9 */ 10 11 #ifndef _XTENSA_SPINLOCK_H 12 #define _XTENSA_SPINLOCK_H 13 14 #include <asm/barrier.h> 15 #include <asm/processor.h> 16 17 /* 18 * spinlock 19 * 20 * There is at most one owner of a spinlock. There are not different 21 * types of spinlock owners like there are for rwlocks (see below). 22 * 23 * When trying to obtain a spinlock, the function "spins" forever, or busy- 24 * waits, until the lock is obtained. When spinning, presumably some other 25 * owner will soon give up the spinlock making it available to others. Use 26 * the trylock functions to avoid spinning forever. 27 * 28 * possible values: 29 * 30 * 0 nobody owns the spinlock 31 * 1 somebody owns the spinlock 32 */ 33 34 #define arch_spin_is_locked(x) ((x)->slock != 0) 35 36 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) 37 38 static inline void arch_spin_lock(arch_spinlock_t *lock) 39 { 40 unsigned long tmp; 41 42 __asm__ __volatile__( 43 " movi %0, 0\n" 44 " wsr %0, scompare1\n" 45 "1: movi %0, 1\n" 46 " s32c1i %0, %1, 0\n" 47 " bnez %0, 1b\n" 48 : "=&a" (tmp) 49 : "a" (&lock->slock) 50 : "memory"); 51 } 52 53 /* Returns 1 if the lock is obtained, 0 otherwise. */ 54 55 static inline int arch_spin_trylock(arch_spinlock_t *lock) 56 { 57 unsigned long tmp; 58 59 __asm__ __volatile__( 60 " movi %0, 0\n" 61 " wsr %0, scompare1\n" 62 " movi %0, 1\n" 63 " s32c1i %0, %1, 0\n" 64 : "=&a" (tmp) 65 : "a" (&lock->slock) 66 : "memory"); 67 68 return tmp == 0 ? 1 : 0; 69 } 70 71 static inline void arch_spin_unlock(arch_spinlock_t *lock) 72 { 73 unsigned long tmp; 74 75 __asm__ __volatile__( 76 " movi %0, 0\n" 77 " s32ri %0, %1, 0\n" 78 : "=&a" (tmp) 79 : "a" (&lock->slock) 80 : "memory"); 81 } 82 83 /* 84 * rwlock 85 * 86 * Read-write locks are really a more flexible spinlock. They allow 87 * multiple readers but only one writer. Write ownership is exclusive 88 * (i.e., all other readers and writers are blocked from ownership while 89 * there is a write owner). These rwlocks are unfair to writers. Writers 90 * can be starved for an indefinite time by readers. 91 * 92 * possible values: 93 * 94 * 0 nobody owns the rwlock 95 * >0 one or more readers own the rwlock 96 * (the positive value is the actual number of readers) 97 * 0x80000000 one writer owns the rwlock, no other writers, no readers 98 */ 99 100 #define arch_write_can_lock(x) ((x)->lock == 0) 101 102 static inline void arch_write_lock(arch_rwlock_t *rw) 103 { 104 unsigned long tmp; 105 106 __asm__ __volatile__( 107 " movi %0, 0\n" 108 " wsr %0, scompare1\n" 109 "1: movi %0, 1\n" 110 " slli %0, %0, 31\n" 111 " s32c1i %0, %1, 0\n" 112 " bnez %0, 1b\n" 113 : "=&a" (tmp) 114 : "a" (&rw->lock) 115 : "memory"); 116 } 117 118 /* Returns 1 if the lock is obtained, 0 otherwise. */ 119 120 static inline int arch_write_trylock(arch_rwlock_t *rw) 121 { 122 unsigned long tmp; 123 124 __asm__ __volatile__( 125 " movi %0, 0\n" 126 " wsr %0, scompare1\n" 127 " movi %0, 1\n" 128 " slli %0, %0, 31\n" 129 " s32c1i %0, %1, 0\n" 130 : "=&a" (tmp) 131 : "a" (&rw->lock) 132 : "memory"); 133 134 return tmp == 0 ? 1 : 0; 135 } 136 137 static inline void arch_write_unlock(arch_rwlock_t *rw) 138 { 139 unsigned long tmp; 140 141 __asm__ __volatile__( 142 " movi %0, 0\n" 143 " s32ri %0, %1, 0\n" 144 : "=&a" (tmp) 145 : "a" (&rw->lock) 146 : "memory"); 147 } 148 149 static inline void arch_read_lock(arch_rwlock_t *rw) 150 { 151 unsigned long tmp; 152 unsigned long result; 153 154 __asm__ __volatile__( 155 "1: l32i %1, %2, 0\n" 156 " bltz %1, 1b\n" 157 " wsr %1, scompare1\n" 158 " addi %0, %1, 1\n" 159 " s32c1i %0, %2, 0\n" 160 " bne %0, %1, 1b\n" 161 : "=&a" (result), "=&a" (tmp) 162 : "a" (&rw->lock) 163 : "memory"); 164 } 165 166 /* Returns 1 if the lock is obtained, 0 otherwise. */ 167 168 static inline int arch_read_trylock(arch_rwlock_t *rw) 169 { 170 unsigned long result; 171 unsigned long tmp; 172 173 __asm__ __volatile__( 174 " l32i %1, %2, 0\n" 175 " addi %0, %1, 1\n" 176 " bltz %0, 1f\n" 177 " wsr %1, scompare1\n" 178 " s32c1i %0, %2, 0\n" 179 " sub %0, %0, %1\n" 180 "1:\n" 181 : "=&a" (result), "=&a" (tmp) 182 : "a" (&rw->lock) 183 : "memory"); 184 185 return result == 0; 186 } 187 188 static inline void arch_read_unlock(arch_rwlock_t *rw) 189 { 190 unsigned long tmp1, tmp2; 191 192 __asm__ __volatile__( 193 "1: l32i %1, %2, 0\n" 194 " addi %0, %1, -1\n" 195 " wsr %1, scompare1\n" 196 " s32c1i %0, %2, 0\n" 197 " bne %0, %1, 1b\n" 198 : "=&a" (tmp1), "=&a" (tmp2) 199 : "a" (&rw->lock) 200 : "memory"); 201 } 202 203 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 204 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 205 206 #endif /* _XTENSA_SPINLOCK_H */ 207