1 #ifndef __ASM_SPINLOCK_H 2 #define __ASM_SPINLOCK_H 3 4 #include <asm/barrier.h> 5 #include <asm/ldcw.h> 6 #include <asm/processor.h> 7 #include <asm/spinlock_types.h> 8 9 static inline int arch_spin_is_locked(arch_spinlock_t *x) 10 { 11 volatile unsigned int *a = __ldcw_align(x); 12 return *a == 0; 13 } 14 15 #define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0) 16 17 static inline void arch_spin_lock_flags(arch_spinlock_t *x, 18 unsigned long flags) 19 { 20 volatile unsigned int *a; 21 22 mb(); 23 a = __ldcw_align(x); 24 while (__ldcw(a) == 0) 25 while (*a == 0) 26 if (flags & PSW_SM_I) { 27 local_irq_enable(); 28 cpu_relax(); 29 local_irq_disable(); 30 } else 31 cpu_relax(); 32 mb(); 33 } 34 35 static inline void arch_spin_unlock(arch_spinlock_t *x) 36 { 37 volatile unsigned int *a; 38 mb(); 39 a = __ldcw_align(x); 40 *a = 1; 41 mb(); 42 } 43 44 static inline int arch_spin_trylock(arch_spinlock_t *x) 45 { 46 volatile unsigned int *a; 47 int ret; 48 49 mb(); 50 a = __ldcw_align(x); 51 ret = __ldcw(a) != 0; 52 mb(); 53 54 return ret; 55 } 56 57 /* 58 * Read-write spinlocks, allowing multiple readers but only one writer. 59 * Linux rwlocks are unfair to writers; they can be starved for an indefinite 60 * time by readers. With care, they can also be taken in interrupt context. 61 * 62 * In the PA-RISC implementation, we have a spinlock and a counter. 63 * Readers use the lock to serialise their access to the counter (which 64 * records how many readers currently hold the lock). 65 * Writers hold the spinlock, preventing any readers or other writers from 66 * grabbing the rwlock. 67 */ 68 69 /* Note that we have to ensure interrupts are disabled in case we're 70 * interrupted by some other code that wants to grab the same read lock */ 71 static __inline__ void arch_read_lock(arch_rwlock_t *rw) 72 { 73 unsigned long flags; 74 local_irq_save(flags); 75 arch_spin_lock_flags(&rw->lock, flags); 76 rw->counter++; 77 arch_spin_unlock(&rw->lock); 78 local_irq_restore(flags); 79 } 80 81 /* Note that we have to ensure interrupts are disabled in case we're 82 * interrupted by some other code that wants to grab the same read lock */ 83 static __inline__ void arch_read_unlock(arch_rwlock_t *rw) 84 { 85 unsigned long flags; 86 local_irq_save(flags); 87 arch_spin_lock_flags(&rw->lock, flags); 88 rw->counter--; 89 arch_spin_unlock(&rw->lock); 90 local_irq_restore(flags); 91 } 92 93 /* Note that we have to ensure interrupts are disabled in case we're 94 * interrupted by some other code that wants to grab the same read lock */ 95 static __inline__ int arch_read_trylock(arch_rwlock_t *rw) 96 { 97 unsigned long flags; 98 retry: 99 local_irq_save(flags); 100 if (arch_spin_trylock(&rw->lock)) { 101 rw->counter++; 102 arch_spin_unlock(&rw->lock); 103 local_irq_restore(flags); 104 return 1; 105 } 106 107 local_irq_restore(flags); 108 /* If write-locked, we fail to acquire the lock */ 109 if (rw->counter < 0) 110 return 0; 111 112 /* Wait until we have a realistic chance at the lock */ 113 while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0) 114 cpu_relax(); 115 116 goto retry; 117 } 118 119 /* Note that we have to ensure interrupts are disabled in case we're 120 * interrupted by some other code that wants to read_trylock() this lock */ 121 static __inline__ void arch_write_lock(arch_rwlock_t *rw) 122 { 123 unsigned long flags; 124 retry: 125 local_irq_save(flags); 126 arch_spin_lock_flags(&rw->lock, flags); 127 128 if (rw->counter != 0) { 129 arch_spin_unlock(&rw->lock); 130 local_irq_restore(flags); 131 132 while (rw->counter != 0) 133 cpu_relax(); 134 135 goto retry; 136 } 137 138 rw->counter = -1; /* mark as write-locked */ 139 mb(); 140 local_irq_restore(flags); 141 } 142 143 static __inline__ void arch_write_unlock(arch_rwlock_t *rw) 144 { 145 rw->counter = 0; 146 arch_spin_unlock(&rw->lock); 147 } 148 149 /* Note that we have to ensure interrupts are disabled in case we're 150 * interrupted by some other code that wants to read_trylock() this lock */ 151 static __inline__ int arch_write_trylock(arch_rwlock_t *rw) 152 { 153 unsigned long flags; 154 int result = 0; 155 156 local_irq_save(flags); 157 if (arch_spin_trylock(&rw->lock)) { 158 if (rw->counter == 0) { 159 rw->counter = -1; 160 result = 1; 161 } else { 162 /* Read-locked. Oh well. */ 163 arch_spin_unlock(&rw->lock); 164 } 165 } 166 local_irq_restore(flags); 167 168 return result; 169 } 170 171 /* 172 * read_can_lock - would read_trylock() succeed? 173 * @lock: the rwlock in question. 174 */ 175 static __inline__ int arch_read_can_lock(arch_rwlock_t *rw) 176 { 177 return rw->counter >= 0; 178 } 179 180 /* 181 * write_can_lock - would write_trylock() succeed? 182 * @lock: the rwlock in question. 183 */ 184 static __inline__ int arch_write_can_lock(arch_rwlock_t *rw) 185 { 186 return !rw->counter; 187 } 188 189 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 190 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 191 192 #endif /* __ASM_SPINLOCK_H */ 193