1 /* 2 * Copyright (C) 2012 ARM Ltd. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program. If not, see <http://www.gnu.org/licenses/>. 15 */ 16 #ifndef __ASM_SPINLOCK_H 17 #define __ASM_SPINLOCK_H 18 19 #include <asm/spinlock_types.h> 20 #include <asm/processor.h> 21 22 /* 23 * Spinlock implementation. 24 * 25 * The old value is read exclusively and the new one, if unlocked, is written 26 * exclusively. In case of failure, the loop is restarted. 27 * 28 * The memory barriers are implicit with the load-acquire and store-release 29 * instructions. 30 * 31 * Unlocked value: 0 32 * Locked value: 1 33 */ 34 35 #define arch_spin_is_locked(x) ((x)->lock != 0) 36 #define arch_spin_unlock_wait(lock) \ 37 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) 38 39 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) 40 41 static inline void arch_spin_lock(arch_spinlock_t *lock) 42 { 43 unsigned int tmp; 44 45 asm volatile( 46 " sevl\n" 47 "1: wfe\n" 48 "2: ldaxr %w0, %1\n" 49 " cbnz %w0, 1b\n" 50 " stxr %w0, %w2, %1\n" 51 " cbnz %w0, 2b\n" 52 : "=&r" (tmp), "+Q" (lock->lock) 53 : "r" (1) 54 : "cc", "memory"); 55 } 56 57 static inline int arch_spin_trylock(arch_spinlock_t *lock) 58 { 59 unsigned int tmp; 60 61 asm volatile( 62 "2: ldaxr %w0, %1\n" 63 " cbnz %w0, 1f\n" 64 " stxr %w0, %w2, %1\n" 65 " cbnz %w0, 2b\n" 66 "1:\n" 67 : "=&r" (tmp), "+Q" (lock->lock) 68 : "r" (1) 69 : "cc", "memory"); 70 71 return !tmp; 72 } 73 74 static inline void arch_spin_unlock(arch_spinlock_t *lock) 75 { 76 asm volatile( 77 " stlr %w1, %0\n" 78 : "=Q" (lock->lock) : "r" (0) : "memory"); 79 } 80 81 /* 82 * Write lock implementation. 83 * 84 * Write locks set bit 31. Unlocking, is done by writing 0 since the lock is 85 * exclusively held. 86 * 87 * The memory barriers are implicit with the load-acquire and store-release 88 * instructions. 89 */ 90 91 static inline void arch_write_lock(arch_rwlock_t *rw) 92 { 93 unsigned int tmp; 94 95 asm volatile( 96 " sevl\n" 97 "1: wfe\n" 98 "2: ldaxr %w0, %1\n" 99 " cbnz %w0, 1b\n" 100 " stxr %w0, %w2, %1\n" 101 " cbnz %w0, 2b\n" 102 : "=&r" (tmp), "+Q" (rw->lock) 103 : "r" (0x80000000) 104 : "cc", "memory"); 105 } 106 107 static inline int arch_write_trylock(arch_rwlock_t *rw) 108 { 109 unsigned int tmp; 110 111 asm volatile( 112 " ldaxr %w0, %1\n" 113 " cbnz %w0, 1f\n" 114 " stxr %w0, %w2, %1\n" 115 "1:\n" 116 : "=&r" (tmp), "+Q" (rw->lock) 117 : "r" (0x80000000) 118 : "cc", "memory"); 119 120 return !tmp; 121 } 122 123 static inline void arch_write_unlock(arch_rwlock_t *rw) 124 { 125 asm volatile( 126 " stlr %w1, %0\n" 127 : "=Q" (rw->lock) : "r" (0) : "memory"); 128 } 129 130 /* write_can_lock - would write_trylock() succeed? */ 131 #define arch_write_can_lock(x) ((x)->lock == 0) 132 133 /* 134 * Read lock implementation. 135 * 136 * It exclusively loads the lock value, increments it and stores the new value 137 * back if positive and the CPU still exclusively owns the location. If the 138 * value is negative, the lock is already held. 139 * 140 * During unlocking there may be multiple active read locks but no write lock. 141 * 142 * The memory barriers are implicit with the load-acquire and store-release 143 * instructions. 144 */ 145 static inline void arch_read_lock(arch_rwlock_t *rw) 146 { 147 unsigned int tmp, tmp2; 148 149 asm volatile( 150 " sevl\n" 151 "1: wfe\n" 152 "2: ldaxr %w0, %2\n" 153 " add %w0, %w0, #1\n" 154 " tbnz %w0, #31, 1b\n" 155 " stxr %w1, %w0, %2\n" 156 " cbnz %w1, 2b\n" 157 : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) 158 : 159 : "cc", "memory"); 160 } 161 162 static inline void arch_read_unlock(arch_rwlock_t *rw) 163 { 164 unsigned int tmp, tmp2; 165 166 asm volatile( 167 "1: ldxr %w0, %2\n" 168 " sub %w0, %w0, #1\n" 169 " stlxr %w1, %w0, %2\n" 170 " cbnz %w1, 1b\n" 171 : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) 172 : 173 : "cc", "memory"); 174 } 175 176 static inline int arch_read_trylock(arch_rwlock_t *rw) 177 { 178 unsigned int tmp, tmp2 = 1; 179 180 asm volatile( 181 " ldaxr %w0, %2\n" 182 " add %w0, %w0, #1\n" 183 " tbnz %w0, #31, 1f\n" 184 " stxr %w1, %w0, %2\n" 185 "1:\n" 186 : "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock) 187 : 188 : "cc", "memory"); 189 190 return !tmp2; 191 } 192 193 /* read_can_lock - would read_trylock() succeed? */ 194 #define arch_read_can_lock(x) ((x)->lock < 0x80000000) 195 196 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 197 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 198 199 #define arch_spin_relax(lock) cpu_relax() 200 #define arch_read_relax(lock) cpu_relax() 201 #define arch_write_relax(lock) cpu_relax() 202 203 #endif /* __ASM_SPINLOCK_H */ 204