1 #ifndef __ASM_SPINLOCK_H 2 #define __ASM_SPINLOCK_H 3 4 #if __LINUX_ARM_ARCH__ < 6 5 #error SMP not supported on pre-ARMv6 CPUs 6 #endif 7 8 /* 9 * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K 10 * extensions, so when running on UP, we have to patch these instructions away. 11 */ 12 #define ALT_SMP(smp, up) \ 13 "9998: " smp "\n" \ 14 " .pushsection \".alt.smp.init\", \"a\"\n" \ 15 " .long 9998b\n" \ 16 " " up "\n" \ 17 " .popsection\n" 18 19 #ifdef CONFIG_THUMB2_KERNEL 20 #define SEV ALT_SMP("sev.w", "nop.w") 21 /* 22 * For Thumb-2, special care is needed to ensure that the conditional WFE 23 * instruction really does assemble to exactly 4 bytes (as required by 24 * the SMP_ON_UP fixup code). By itself "wfene" might cause the 25 * assembler to insert a extra (16-bit) IT instruction, depending on the 26 * presence or absence of neighbouring conditional instructions. 27 * 28 * To avoid this unpredictableness, an approprite IT is inserted explicitly: 29 * the assembler won't change IT instructions which are explicitly present 30 * in the input. 31 */ 32 #define WFE(cond) ALT_SMP( \ 33 "it " cond "\n\t" \ 34 "wfe" cond ".n", \ 35 \ 36 "nop.w" \ 37 ) 38 #else 39 #define SEV ALT_SMP("sev", "nop") 40 #define WFE(cond) ALT_SMP("wfe" cond, "nop") 41 #endif 42 43 static inline void dsb_sev(void) 44 { 45 #if __LINUX_ARM_ARCH__ >= 7 46 __asm__ __volatile__ ( 47 "dsb\n" 48 SEV 49 ); 50 #else 51 __asm__ __volatile__ ( 52 "mcr p15, 0, %0, c7, c10, 4\n" 53 SEV 54 : : "r" (0) 55 ); 56 #endif 57 } 58 59 /* 60 * ARMv6 Spin-locking. 61 * 62 * We exclusively read the old value. If it is zero, we may have 63 * won the lock, so we try exclusively storing it. A memory barrier 64 * is required after we get a lock, and before we release it, because 65 * V6 CPUs are assumed to have weakly ordered memory. 66 * 67 * Unlocked value: 0 68 * Locked value: 1 69 */ 70 71 #define arch_spin_is_locked(x) ((x)->lock != 0) 72 #define arch_spin_unlock_wait(lock) \ 73 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) 74 75 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) 76 77 static inline void arch_spin_lock(arch_spinlock_t *lock) 78 { 79 unsigned long tmp; 80 81 __asm__ __volatile__( 82 "1: ldrex %0, [%1]\n" 83 " teq %0, #0\n" 84 WFE("ne") 85 " strexeq %0, %2, [%1]\n" 86 " teqeq %0, #0\n" 87 " bne 1b" 88 : "=&r" (tmp) 89 : "r" (&lock->lock), "r" (1) 90 : "cc"); 91 92 smp_mb(); 93 } 94 95 static inline int arch_spin_trylock(arch_spinlock_t *lock) 96 { 97 unsigned long tmp; 98 99 __asm__ __volatile__( 100 " ldrex %0, [%1]\n" 101 " teq %0, #0\n" 102 " strexeq %0, %2, [%1]" 103 : "=&r" (tmp) 104 : "r" (&lock->lock), "r" (1) 105 : "cc"); 106 107 if (tmp == 0) { 108 smp_mb(); 109 return 1; 110 } else { 111 return 0; 112 } 113 } 114 115 static inline void arch_spin_unlock(arch_spinlock_t *lock) 116 { 117 smp_mb(); 118 119 __asm__ __volatile__( 120 " str %1, [%0]\n" 121 : 122 : "r" (&lock->lock), "r" (0) 123 : "cc"); 124 125 dsb_sev(); 126 } 127 128 /* 129 * RWLOCKS 130 * 131 * 132 * Write locks are easy - we just set bit 31. When unlocking, we can 133 * just write zero since the lock is exclusively held. 134 */ 135 136 static inline void arch_write_lock(arch_rwlock_t *rw) 137 { 138 unsigned long tmp; 139 140 __asm__ __volatile__( 141 "1: ldrex %0, [%1]\n" 142 " teq %0, #0\n" 143 WFE("ne") 144 " strexeq %0, %2, [%1]\n" 145 " teq %0, #0\n" 146 " bne 1b" 147 : "=&r" (tmp) 148 : "r" (&rw->lock), "r" (0x80000000) 149 : "cc"); 150 151 smp_mb(); 152 } 153 154 static inline int arch_write_trylock(arch_rwlock_t *rw) 155 { 156 unsigned long tmp; 157 158 __asm__ __volatile__( 159 "1: ldrex %0, [%1]\n" 160 " teq %0, #0\n" 161 " strexeq %0, %2, [%1]" 162 : "=&r" (tmp) 163 : "r" (&rw->lock), "r" (0x80000000) 164 : "cc"); 165 166 if (tmp == 0) { 167 smp_mb(); 168 return 1; 169 } else { 170 return 0; 171 } 172 } 173 174 static inline void arch_write_unlock(arch_rwlock_t *rw) 175 { 176 smp_mb(); 177 178 __asm__ __volatile__( 179 "str %1, [%0]\n" 180 : 181 : "r" (&rw->lock), "r" (0) 182 : "cc"); 183 184 dsb_sev(); 185 } 186 187 /* write_can_lock - would write_trylock() succeed? */ 188 #define arch_write_can_lock(x) ((x)->lock == 0) 189 190 /* 191 * Read locks are a bit more hairy: 192 * - Exclusively load the lock value. 193 * - Increment it. 194 * - Store new lock value if positive, and we still own this location. 195 * If the value is negative, we've already failed. 196 * - If we failed to store the value, we want a negative result. 197 * - If we failed, try again. 198 * Unlocking is similarly hairy. We may have multiple read locks 199 * currently active. However, we know we won't have any write 200 * locks. 201 */ 202 static inline void arch_read_lock(arch_rwlock_t *rw) 203 { 204 unsigned long tmp, tmp2; 205 206 __asm__ __volatile__( 207 "1: ldrex %0, [%2]\n" 208 " adds %0, %0, #1\n" 209 " strexpl %1, %0, [%2]\n" 210 WFE("mi") 211 " rsbpls %0, %1, #0\n" 212 " bmi 1b" 213 : "=&r" (tmp), "=&r" (tmp2) 214 : "r" (&rw->lock) 215 : "cc"); 216 217 smp_mb(); 218 } 219 220 static inline void arch_read_unlock(arch_rwlock_t *rw) 221 { 222 unsigned long tmp, tmp2; 223 224 smp_mb(); 225 226 __asm__ __volatile__( 227 "1: ldrex %0, [%2]\n" 228 " sub %0, %0, #1\n" 229 " strex %1, %0, [%2]\n" 230 " teq %1, #0\n" 231 " bne 1b" 232 : "=&r" (tmp), "=&r" (tmp2) 233 : "r" (&rw->lock) 234 : "cc"); 235 236 if (tmp == 0) 237 dsb_sev(); 238 } 239 240 static inline int arch_read_trylock(arch_rwlock_t *rw) 241 { 242 unsigned long tmp, tmp2 = 1; 243 244 __asm__ __volatile__( 245 "1: ldrex %0, [%2]\n" 246 " adds %0, %0, #1\n" 247 " strexpl %1, %0, [%2]\n" 248 : "=&r" (tmp), "+r" (tmp2) 249 : "r" (&rw->lock) 250 : "cc"); 251 252 smp_mb(); 253 return tmp2 == 0; 254 } 255 256 /* read_can_lock - would read_trylock() succeed? */ 257 #define arch_read_can_lock(x) ((x)->lock < 0x80000000) 258 259 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 260 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 261 262 #define arch_spin_relax(lock) cpu_relax() 263 #define arch_read_relax(lock) cpu_relax() 264 #define arch_write_relax(lock) cpu_relax() 265 266 #endif /* __ASM_SPINLOCK_H */ 267