spinlock.h (fb3a6bbc912b12347614e5742c7c61416cdb0ca0) | spinlock.h (e5931943d02bf751b1ec849c0d2ade23d76a8d41) |
---|---|
1#ifndef __ASM_SPINLOCK_H 2#define __ASM_SPINLOCK_H 3 4#if __LINUX_ARM_ARCH__ < 6 5#error SMP not supported on pre-ARMv6 CPUs 6#endif 7 8/* --- 72 unchanged lines hidden (view full) --- 81/* 82 * RWLOCKS 83 * 84 * 85 * Write locks are easy - we just set bit 31. When unlocking, we can 86 * just write zero since the lock is exclusively held. 87 */ 88 | 1#ifndef __ASM_SPINLOCK_H 2#define __ASM_SPINLOCK_H 3 4#if __LINUX_ARM_ARCH__ < 6 5#error SMP not supported on pre-ARMv6 CPUs 6#endif 7 8/* --- 72 unchanged lines hidden (view full) --- 81/* 82 * RWLOCKS 83 * 84 * 85 * Write locks are easy - we just set bit 31. When unlocking, we can 86 * just write zero since the lock is exclusively held. 87 */ 88 |
89static inline void __raw_write_lock(arch_rwlock_t *rw) | 89static inline void arch_write_lock(arch_rwlock_t *rw) |
90{ 91 unsigned long tmp; 92 93 __asm__ __volatile__( 94"1: ldrex %0, [%1]\n" 95" teq %0, #0\n" 96#ifdef CONFIG_CPU_32v6K 97" wfene\n" 98#endif 99" strexeq %0, %2, [%1]\n" 100" teq %0, #0\n" 101" bne 1b" 102 : "=&r" (tmp) 103 : "r" (&rw->lock), "r" (0x80000000) 104 : "cc"); 105 106 smp_mb(); 107} 108 | 90{ 91 unsigned long tmp; 92 93 __asm__ __volatile__( 94"1: ldrex %0, [%1]\n" 95" teq %0, #0\n" 96#ifdef CONFIG_CPU_32v6K 97" wfene\n" 98#endif 99" strexeq %0, %2, [%1]\n" 100" teq %0, #0\n" 101" bne 1b" 102 : "=&r" (tmp) 103 : "r" (&rw->lock), "r" (0x80000000) 104 : "cc"); 105 106 smp_mb(); 107} 108 |
109static inline int __raw_write_trylock(arch_rwlock_t *rw) | 109static inline int arch_write_trylock(arch_rwlock_t *rw) |
110{ 111 unsigned long tmp; 112 113 __asm__ __volatile__( 114"1: ldrex %0, [%1]\n" 115" teq %0, #0\n" 116" strexeq %0, %2, [%1]" 117 : "=&r" (tmp) 118 : "r" (&rw->lock), "r" (0x80000000) 119 : "cc"); 120 121 if (tmp == 0) { 122 smp_mb(); 123 return 1; 124 } else { 125 return 0; 126 } 127} 128 | 110{ 111 unsigned long tmp; 112 113 __asm__ __volatile__( 114"1: ldrex %0, [%1]\n" 115" teq %0, #0\n" 116" strexeq %0, %2, [%1]" 117 : "=&r" (tmp) 118 : "r" (&rw->lock), "r" (0x80000000) 119 : "cc"); 120 121 if (tmp == 0) { 122 smp_mb(); 123 return 1; 124 } else { 125 return 0; 126 } 127} 128 |
129static inline void __raw_write_unlock(arch_rwlock_t *rw) | 129static inline void arch_write_unlock(arch_rwlock_t *rw) |
130{ 131 smp_mb(); 132 133 __asm__ __volatile__( 134 "str %1, [%0]\n" 135#ifdef CONFIG_CPU_32v6K 136" mcr p15, 0, %1, c7, c10, 4\n" /* DSB */ 137" sev\n" 138#endif 139 : 140 : "r" (&rw->lock), "r" (0) 141 : "cc"); 142} 143 144/* write_can_lock - would write_trylock() succeed? */ | 130{ 131 smp_mb(); 132 133 __asm__ __volatile__( 134 "str %1, [%0]\n" 135#ifdef CONFIG_CPU_32v6K 136" mcr p15, 0, %1, c7, c10, 4\n" /* DSB */ 137" sev\n" 138#endif 139 : 140 : "r" (&rw->lock), "r" (0) 141 : "cc"); 142} 143 144/* write_can_lock - would write_trylock() succeed? */ |
145#define __raw_write_can_lock(x) ((x)->lock == 0) | 145#define arch_write_can_lock(x) ((x)->lock == 0) |
146 147/* 148 * Read locks are a bit more hairy: 149 * - Exclusively load the lock value. 150 * - Increment it. 151 * - Store new lock value if positive, and we still own this location. 152 * If the value is negative, we've already failed. 153 * - If we failed to store the value, we want a negative result. 154 * - If we failed, try again. 155 * Unlocking is similarly hairy. We may have multiple read locks 156 * currently active. However, we know we won't have any write 157 * locks. 158 */ | 146 147/* 148 * Read locks are a bit more hairy: 149 * - Exclusively load the lock value. 150 * - Increment it. 151 * - Store new lock value if positive, and we still own this location. 152 * If the value is negative, we've already failed. 153 * - If we failed to store the value, we want a negative result. 154 * - If we failed, try again. 155 * Unlocking is similarly hairy. We may have multiple read locks 156 * currently active. However, we know we won't have any write 157 * locks. 158 */ |
159static inline void __raw_read_lock(arch_rwlock_t *rw) | 159static inline void arch_read_lock(arch_rwlock_t *rw) |
160{ 161 unsigned long tmp, tmp2; 162 163 __asm__ __volatile__( 164"1: ldrex %0, [%2]\n" 165" adds %0, %0, #1\n" 166" strexpl %1, %0, [%2]\n" 167#ifdef CONFIG_CPU_32v6K 168" wfemi\n" 169#endif 170" rsbpls %0, %1, #0\n" 171" bmi 1b" 172 : "=&r" (tmp), "=&r" (tmp2) 173 : "r" (&rw->lock) 174 : "cc"); 175 176 smp_mb(); 177} 178 | 160{ 161 unsigned long tmp, tmp2; 162 163 __asm__ __volatile__( 164"1: ldrex %0, [%2]\n" 165" adds %0, %0, #1\n" 166" strexpl %1, %0, [%2]\n" 167#ifdef CONFIG_CPU_32v6K 168" wfemi\n" 169#endif 170" rsbpls %0, %1, #0\n" 171" bmi 1b" 172 : "=&r" (tmp), "=&r" (tmp2) 173 : "r" (&rw->lock) 174 : "cc"); 175 176 smp_mb(); 177} 178 |
179static inline void __raw_read_unlock(arch_rwlock_t *rw) | 179static inline void arch_read_unlock(arch_rwlock_t *rw) |
180{ 181 unsigned long tmp, tmp2; 182 183 smp_mb(); 184 185 __asm__ __volatile__( 186"1: ldrex %0, [%2]\n" 187" sub %0, %0, #1\n" --- 5 unchanged lines hidden (view full) --- 193" mcreq p15, 0, %0, c7, c10, 4\n" 194" seveq" 195#endif 196 : "=&r" (tmp), "=&r" (tmp2) 197 : "r" (&rw->lock) 198 : "cc"); 199} 200 | 180{ 181 unsigned long tmp, tmp2; 182 183 smp_mb(); 184 185 __asm__ __volatile__( 186"1: ldrex %0, [%2]\n" 187" sub %0, %0, #1\n" --- 5 unchanged lines hidden (view full) --- 193" mcreq p15, 0, %0, c7, c10, 4\n" 194" seveq" 195#endif 196 : "=&r" (tmp), "=&r" (tmp2) 197 : "r" (&rw->lock) 198 : "cc"); 199} 200 |
201static inline int __raw_read_trylock(arch_rwlock_t *rw) | 201static inline int arch_read_trylock(arch_rwlock_t *rw) |
202{ 203 unsigned long tmp, tmp2 = 1; 204 205 __asm__ __volatile__( 206"1: ldrex %0, [%2]\n" 207" adds %0, %0, #1\n" 208" strexpl %1, %0, [%2]\n" 209 : "=&r" (tmp), "+r" (tmp2) 210 : "r" (&rw->lock) 211 : "cc"); 212 213 smp_mb(); 214 return tmp2 == 0; 215} 216 217/* read_can_lock - would read_trylock() succeed? */ | 202{ 203 unsigned long tmp, tmp2 = 1; 204 205 __asm__ __volatile__( 206"1: ldrex %0, [%2]\n" 207" adds %0, %0, #1\n" 208" strexpl %1, %0, [%2]\n" 209 : "=&r" (tmp), "+r" (tmp2) 210 : "r" (&rw->lock) 211 : "cc"); 212 213 smp_mb(); 214 return tmp2 == 0; 215} 216 217/* read_can_lock - would read_trylock() succeed? */ |
218#define __raw_read_can_lock(x) ((x)->lock < 0x80000000) | 218#define arch_read_can_lock(x) ((x)->lock < 0x80000000) |
219 | 219 |
220#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 221#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | 220#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 221#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) |
222 223#define arch_spin_relax(lock) cpu_relax() 224#define arch_read_relax(lock) cpu_relax() 225#define arch_write_relax(lock) cpu_relax() 226 227#endif /* __ASM_SPINLOCK_H */ | 222 223#define arch_spin_relax(lock) cpu_relax() 224#define arch_read_relax(lock) cpu_relax() 225#define arch_write_relax(lock) cpu_relax() 226 227#endif /* __ASM_SPINLOCK_H */ |