1 #ifndef __ARCH_M68K_ATOMIC__ 2 #define __ARCH_M68K_ATOMIC__ 3 4 #include <linux/types.h> 5 #include <linux/irqflags.h> 6 #include <asm/cmpxchg.h> 7 #include <asm/barrier.h> 8 9 /* 10 * Atomic operations that C can't guarantee us. Useful for 11 * resource counting etc.. 12 */ 13 14 /* 15 * We do not have SMP m68k systems, so we don't have to deal with that. 16 */ 17 18 #define ATOMIC_INIT(i) { (i) } 19 20 #define atomic_read(v) (*(volatile int *)&(v)->counter) 21 #define atomic_set(v, i) (((v)->counter) = i) 22 23 /* 24 * The ColdFire parts cannot do some immediate to memory operations, 25 * so for them we do not specify the "i" asm constraint. 26 */ 27 #ifdef CONFIG_COLDFIRE 28 #define ASM_DI "d" 29 #else 30 #define ASM_DI "di" 31 #endif 32 33 static inline void atomic_add(int i, atomic_t *v) 34 { 35 __asm__ __volatile__("addl %1,%0" : "+m" (*v) : ASM_DI (i)); 36 } 37 38 static inline void atomic_sub(int i, atomic_t *v) 39 { 40 __asm__ __volatile__("subl %1,%0" : "+m" (*v) : ASM_DI (i)); 41 } 42 43 static inline void atomic_inc(atomic_t *v) 44 { 45 __asm__ __volatile__("addql #1,%0" : "+m" (*v)); 46 } 47 48 static inline void atomic_dec(atomic_t *v) 49 { 50 __asm__ __volatile__("subql #1,%0" : "+m" (*v)); 51 } 52 53 static inline int atomic_dec_and_test(atomic_t *v) 54 { 55 char c; 56 __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v)); 57 return c != 0; 58 } 59 60 static inline int atomic_dec_and_test_lt(atomic_t *v) 61 { 62 char c; 63 __asm__ __volatile__( 64 "subql #1,%1; slt %0" 65 : "=d" (c), "=m" (*v) 66 : "m" (*v)); 67 return c != 0; 68 } 69 70 static inline int atomic_inc_and_test(atomic_t *v) 71 { 72 char c; 73 __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v)); 74 return c != 0; 75 } 76 77 #ifdef CONFIG_RMW_INSNS 78 79 static inline int atomic_add_return(int i, atomic_t *v) 80 { 81 int t, tmp; 82 83 __asm__ __volatile__( 84 "1: movel %2,%1\n" 85 " addl %3,%1\n" 86 " casl %2,%1,%0\n" 87 " jne 1b" 88 : "+m" (*v), "=&d" (t), "=&d" (tmp) 89 : "g" (i), "2" (atomic_read(v))); 90 return t; 91 } 92 93 static inline int atomic_sub_return(int i, atomic_t *v) 94 { 95 int t, tmp; 96 97 __asm__ __volatile__( 98 "1: movel %2,%1\n" 99 " subl %3,%1\n" 100 " casl %2,%1,%0\n" 101 " jne 1b" 102 : "+m" (*v), "=&d" (t), "=&d" (tmp) 103 : "g" (i), "2" (atomic_read(v))); 104 return t; 105 } 106 107 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 108 #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 109 110 #else /* !CONFIG_RMW_INSNS */ 111 112 static inline int atomic_add_return(int i, atomic_t * v) 113 { 114 unsigned long flags; 115 int t; 116 117 local_irq_save(flags); 118 t = atomic_read(v); 119 t += i; 120 atomic_set(v, t); 121 local_irq_restore(flags); 122 123 return t; 124 } 125 126 static inline int atomic_sub_return(int i, atomic_t * v) 127 { 128 unsigned long flags; 129 int t; 130 131 local_irq_save(flags); 132 t = atomic_read(v); 133 t -= i; 134 atomic_set(v, t); 135 local_irq_restore(flags); 136 137 return t; 138 } 139 140 static inline int atomic_cmpxchg(atomic_t *v, int old, int new) 141 { 142 unsigned long flags; 143 int prev; 144 145 local_irq_save(flags); 146 prev = atomic_read(v); 147 if (prev == old) 148 atomic_set(v, new); 149 local_irq_restore(flags); 150 return prev; 151 } 152 153 static inline int atomic_xchg(atomic_t *v, int new) 154 { 155 unsigned long flags; 156 int prev; 157 158 local_irq_save(flags); 159 prev = atomic_read(v); 160 atomic_set(v, new); 161 local_irq_restore(flags); 162 return prev; 163 } 164 165 #endif /* !CONFIG_RMW_INSNS */ 166 167 #define atomic_dec_return(v) atomic_sub_return(1, (v)) 168 #define atomic_inc_return(v) atomic_add_return(1, (v)) 169 170 static inline int atomic_sub_and_test(int i, atomic_t *v) 171 { 172 char c; 173 __asm__ __volatile__("subl %2,%1; seq %0" 174 : "=d" (c), "+m" (*v) 175 : ASM_DI (i)); 176 return c != 0; 177 } 178 179 static inline int atomic_add_negative(int i, atomic_t *v) 180 { 181 char c; 182 __asm__ __volatile__("addl %2,%1; smi %0" 183 : "=d" (c), "+m" (*v) 184 : ASM_DI (i)); 185 return c != 0; 186 } 187 188 static inline void atomic_clear_mask(unsigned long mask, unsigned long *v) 189 { 190 __asm__ __volatile__("andl %1,%0" : "+m" (*v) : ASM_DI (~(mask))); 191 } 192 193 static inline void atomic_set_mask(unsigned long mask, unsigned long *v) 194 { 195 __asm__ __volatile__("orl %1,%0" : "+m" (*v) : ASM_DI (mask)); 196 } 197 198 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) 199 { 200 int c, old; 201 c = atomic_read(v); 202 for (;;) { 203 if (unlikely(c == (u))) 204 break; 205 old = atomic_cmpxchg((v), c, c + (a)); 206 if (likely(old == c)) 207 break; 208 c = old; 209 } 210 return c; 211 } 212 213 #endif /* __ARCH_M68K_ATOMIC __ */ 214