spinlock.h (fb3a6bbc912b12347614e5742c7c61416cdb0ca0) spinlock.h (e5931943d02bf751b1ec849c0d2ade23d76a8d41)
1#ifndef _ASM_X86_SPINLOCK_H
2#define _ASM_X86_SPINLOCK_H
3
4#include <asm/atomic.h>
5#include <asm/rwlock.h>
6#include <asm/page.h>
7#include <asm/processor.h>
8#include <linux/compiler.h>

--- 218 unchanged lines hidden (view full) ---

227 * On x86, we implement read-write locks as a 32-bit counter
228 * with the high bit (sign) being the "contended" bit.
229 */
230
231/**
232 * read_can_lock - would read_trylock() succeed?
233 * @lock: the rwlock in question.
234 */
1#ifndef _ASM_X86_SPINLOCK_H
2#define _ASM_X86_SPINLOCK_H
3
4#include <asm/atomic.h>
5#include <asm/rwlock.h>
6#include <asm/page.h>
7#include <asm/processor.h>
8#include <linux/compiler.h>

--- 218 unchanged lines hidden (view full) ---

227 * On x86, we implement read-write locks as a 32-bit counter
228 * with the high bit (sign) being the "contended" bit.
229 */
230
231/**
232 * read_can_lock - would read_trylock() succeed?
233 * @lock: the rwlock in question.
234 */
235static inline int __raw_read_can_lock(arch_rwlock_t *lock)
235static inline int arch_read_can_lock(arch_rwlock_t *lock)
236{
237 return (int)(lock)->lock > 0;
238}
239
240/**
241 * write_can_lock - would write_trylock() succeed?
242 * @lock: the rwlock in question.
243 */
236{
237 return (int)(lock)->lock > 0;
238}
239
240/**
241 * write_can_lock - would write_trylock() succeed?
242 * @lock: the rwlock in question.
243 */
244static inline int __raw_write_can_lock(arch_rwlock_t *lock)
244static inline int arch_write_can_lock(arch_rwlock_t *lock)
245{
246 return (lock)->lock == RW_LOCK_BIAS;
247}
248
245{
246 return (lock)->lock == RW_LOCK_BIAS;
247}
248
249static inline void __raw_read_lock(arch_rwlock_t *rw)
249static inline void arch_read_lock(arch_rwlock_t *rw)
250{
251 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
252 "jns 1f\n"
253 "call __read_lock_failed\n\t"
254 "1:\n"
255 ::LOCK_PTR_REG (rw) : "memory");
256}
257
250{
251 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
252 "jns 1f\n"
253 "call __read_lock_failed\n\t"
254 "1:\n"
255 ::LOCK_PTR_REG (rw) : "memory");
256}
257
258static inline void __raw_write_lock(arch_rwlock_t *rw)
258static inline void arch_write_lock(arch_rwlock_t *rw)
259{
260 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
261 "jz 1f\n"
262 "call __write_lock_failed\n\t"
263 "1:\n"
264 ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory");
265}
266
259{
260 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
261 "jz 1f\n"
262 "call __write_lock_failed\n\t"
263 "1:\n"
264 ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory");
265}
266
267static inline int __raw_read_trylock(arch_rwlock_t *lock)
267static inline int arch_read_trylock(arch_rwlock_t *lock)
268{
269 atomic_t *count = (atomic_t *)lock;
270
271 if (atomic_dec_return(count) >= 0)
272 return 1;
273 atomic_inc(count);
274 return 0;
275}
276
268{
269 atomic_t *count = (atomic_t *)lock;
270
271 if (atomic_dec_return(count) >= 0)
272 return 1;
273 atomic_inc(count);
274 return 0;
275}
276
277static inline int __raw_write_trylock(arch_rwlock_t *lock)
277static inline int arch_write_trylock(arch_rwlock_t *lock)
278{
279 atomic_t *count = (atomic_t *)lock;
280
281 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
282 return 1;
283 atomic_add(RW_LOCK_BIAS, count);
284 return 0;
285}
286
278{
279 atomic_t *count = (atomic_t *)lock;
280
281 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
282 return 1;
283 atomic_add(RW_LOCK_BIAS, count);
284 return 0;
285}
286
287static inline void __raw_read_unlock(arch_rwlock_t *rw)
287static inline void arch_read_unlock(arch_rwlock_t *rw)
288{
289 asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
290}
291
288{
289 asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
290}
291
292static inline void __raw_write_unlock(arch_rwlock_t *rw)
292static inline void arch_write_unlock(arch_rwlock_t *rw)
293{
294 asm volatile(LOCK_PREFIX "addl %1, %0"
295 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
296}
297
293{
294 asm volatile(LOCK_PREFIX "addl %1, %0"
295 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
296}
297
298#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
299#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
298#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
299#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
300
301#define arch_spin_relax(lock) cpu_relax()
302#define arch_read_relax(lock) cpu_relax()
303#define arch_write_relax(lock) cpu_relax()
304
305/* The {read|write|spin}_lock() on x86 are full memory barriers. */
306static inline void smp_mb__after_lock(void) { }
307#define ARCH_HAS_SMP_MB_AFTER_LOCK
308
309#endif /* _ASM_X86_SPINLOCK_H */
300
301#define arch_spin_relax(lock) cpu_relax()
302#define arch_read_relax(lock) cpu_relax()
303#define arch_write_relax(lock) cpu_relax()
304
305/* The {read|write|spin}_lock() on x86 are full memory barriers. */
306static inline void smp_mb__after_lock(void) { }
307#define ARCH_HAS_SMP_MB_AFTER_LOCK
308
309#endif /* _ASM_X86_SPINLOCK_H */