1 /* 2 * PowerPC atomic bit operations. 3 * 4 * Merged version by David Gibson <david@gibson.dropbear.id.au>. 5 * Based on ppc64 versions by: Dave Engebretsen, Todd Inglett, Don 6 * Reed, Pat McCarthy, Peter Bergner, Anton Blanchard. They 7 * originally took it from the ppc32 code. 8 * 9 * Within a word, bits are numbered LSB first. Lot's of places make 10 * this assumption by directly testing bits with (val & (1<<nr)). 11 * This can cause confusion for large (> 1 word) bitmaps on a 12 * big-endian system because, unlike little endian, the number of each 13 * bit depends on the word size. 14 * 15 * The bitop functions are defined to work on unsigned longs, so for a 16 * ppc64 system the bits end up numbered: 17 * |63..............0|127............64|191...........128|255...........196| 18 * and on ppc32: 19 * |31.....0|63....31|95....64|127...96|159..128|191..160|223..192|255..224| 20 * 21 * There are a few little-endian macros used mostly for filesystem 22 * bitmaps, these work on similar bit arrays layouts, but 23 * byte-oriented: 24 * |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56| 25 * 26 * The main difference is that bit 3-5 (64b) or 3-4 (32b) in the bit 27 * number field needs to be reversed compared to the big-endian bit 28 * fields. This can be achieved by XOR with 0x38 (64b) or 0x18 (32b). 29 * 30 * This program is free software; you can redistribute it and/or 31 * modify it under the terms of the GNU General Public License 32 * as published by the Free Software Foundation; either version 33 * 2 of the License, or (at your option) any later version. 34 */ 35 36 #ifndef _ASM_POWERPC_BITOPS_H 37 #define _ASM_POWERPC_BITOPS_H 38 39 #ifdef __KERNEL__ 40 41 #ifndef _LINUX_BITOPS_H 42 #error only <linux/bitops.h> can be included directly 43 #endif 44 45 #include <linux/compiler.h> 46 #include <asm/asm-compat.h> 47 #include <asm/synch.h> 48 49 /* PPC bit number conversion */ 50 #define PPC_BITLSHIFT(be) (BITS_PER_LONG - 1 - (be)) 51 #define PPC_BIT(bit) (1UL << PPC_BITLSHIFT(bit)) 52 #define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs)) 53 54 #include <asm/barrier.h> 55 56 /* Macro for generating the ***_bits() functions */ 57 #define DEFINE_BITOP(fn, op, prefix) \ 58 static __inline__ void fn(unsigned long mask, \ 59 volatile unsigned long *_p) \ 60 { \ 61 unsigned long old; \ 62 unsigned long *p = (unsigned long *)_p; \ 63 __asm__ __volatile__ ( \ 64 prefix \ 65 "1:" PPC_LLARX(%0,0,%3,0) "\n" \ 66 stringify_in_c(op) "%0,%0,%2\n" \ 67 PPC405_ERR77(0,%3) \ 68 PPC_STLCX "%0,0,%3\n" \ 69 "bne- 1b\n" \ 70 : "=&r" (old), "+m" (*p) \ 71 : "r" (mask), "r" (p) \ 72 : "cc", "memory"); \ 73 } 74 75 DEFINE_BITOP(set_bits, or, "") 76 DEFINE_BITOP(clear_bits, andc, "") 77 DEFINE_BITOP(clear_bits_unlock, andc, PPC_RELEASE_BARRIER) 78 DEFINE_BITOP(change_bits, xor, "") 79 80 static __inline__ void set_bit(int nr, volatile unsigned long *addr) 81 { 82 set_bits(BIT_MASK(nr), addr + BIT_WORD(nr)); 83 } 84 85 static __inline__ void clear_bit(int nr, volatile unsigned long *addr) 86 { 87 clear_bits(BIT_MASK(nr), addr + BIT_WORD(nr)); 88 } 89 90 static __inline__ void clear_bit_unlock(int nr, volatile unsigned long *addr) 91 { 92 clear_bits_unlock(BIT_MASK(nr), addr + BIT_WORD(nr)); 93 } 94 95 static __inline__ void change_bit(int nr, volatile unsigned long *addr) 96 { 97 change_bits(BIT_MASK(nr), addr + BIT_WORD(nr)); 98 } 99 100 /* Like DEFINE_BITOP(), with changes to the arguments to 'op' and the output 101 * operands. */ 102 #define DEFINE_TESTOP(fn, op, prefix, postfix, eh) \ 103 static __inline__ unsigned long fn( \ 104 unsigned long mask, \ 105 volatile unsigned long *_p) \ 106 { \ 107 unsigned long old, t; \ 108 unsigned long *p = (unsigned long *)_p; \ 109 __asm__ __volatile__ ( \ 110 prefix \ 111 "1:" PPC_LLARX(%0,0,%3,eh) "\n" \ 112 stringify_in_c(op) "%1,%0,%2\n" \ 113 PPC405_ERR77(0,%3) \ 114 PPC_STLCX "%1,0,%3\n" \ 115 "bne- 1b\n" \ 116 postfix \ 117 : "=&r" (old), "=&r" (t) \ 118 : "r" (mask), "r" (p) \ 119 : "cc", "memory"); \ 120 return (old & mask); \ 121 } 122 123 DEFINE_TESTOP(test_and_set_bits, or, PPC_ATOMIC_ENTRY_BARRIER, 124 PPC_ATOMIC_EXIT_BARRIER, 0) 125 DEFINE_TESTOP(test_and_set_bits_lock, or, "", 126 PPC_ACQUIRE_BARRIER, 1) 127 DEFINE_TESTOP(test_and_clear_bits, andc, PPC_ATOMIC_ENTRY_BARRIER, 128 PPC_ATOMIC_EXIT_BARRIER, 0) 129 DEFINE_TESTOP(test_and_change_bits, xor, PPC_ATOMIC_ENTRY_BARRIER, 130 PPC_ATOMIC_EXIT_BARRIER, 0) 131 132 static __inline__ int test_and_set_bit(unsigned long nr, 133 volatile unsigned long *addr) 134 { 135 return test_and_set_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0; 136 } 137 138 static __inline__ int test_and_set_bit_lock(unsigned long nr, 139 volatile unsigned long *addr) 140 { 141 return test_and_set_bits_lock(BIT_MASK(nr), 142 addr + BIT_WORD(nr)) != 0; 143 } 144 145 static __inline__ int test_and_clear_bit(unsigned long nr, 146 volatile unsigned long *addr) 147 { 148 return test_and_clear_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0; 149 } 150 151 static __inline__ int test_and_change_bit(unsigned long nr, 152 volatile unsigned long *addr) 153 { 154 return test_and_change_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0; 155 } 156 157 #include <asm-generic/bitops/non-atomic.h> 158 159 static __inline__ void __clear_bit_unlock(int nr, volatile unsigned long *addr) 160 { 161 __asm__ __volatile__(PPC_RELEASE_BARRIER "" ::: "memory"); 162 __clear_bit(nr, addr); 163 } 164 165 /* 166 * Return the zero-based bit position (LE, not IBM bit numbering) of 167 * the most significant 1-bit in a double word. 168 */ 169 static __inline__ __attribute__((const)) 170 int __ilog2(unsigned long x) 171 { 172 int lz; 173 174 asm (PPC_CNTLZL "%0,%1" : "=r" (lz) : "r" (x)); 175 return BITS_PER_LONG - 1 - lz; 176 } 177 178 static inline __attribute__((const)) 179 int __ilog2_u32(u32 n) 180 { 181 int bit; 182 asm ("cntlzw %0,%1" : "=r" (bit) : "r" (n)); 183 return 31 - bit; 184 } 185 186 #ifdef __powerpc64__ 187 static inline __attribute__((const)) 188 int __ilog2_u64(u64 n) 189 { 190 int bit; 191 asm ("cntlzd %0,%1" : "=r" (bit) : "r" (n)); 192 return 63 - bit; 193 } 194 #endif 195 196 /* 197 * Determines the bit position of the least significant 0 bit in the 198 * specified double word. The returned bit position will be 199 * zero-based, starting from the right side (63/31 - 0). 200 */ 201 static __inline__ unsigned long ffz(unsigned long x) 202 { 203 /* no zero exists anywhere in the 8 byte area. */ 204 if ((x = ~x) == 0) 205 return BITS_PER_LONG; 206 207 /* 208 * Calculate the bit position of the least significant '1' bit in x 209 * (since x has been changed this will actually be the least significant 210 * '0' bit in * the original x). Note: (x & -x) gives us a mask that 211 * is the least significant * (RIGHT-most) 1-bit of the value in x. 212 */ 213 return __ilog2(x & -x); 214 } 215 216 static __inline__ int __ffs(unsigned long x) 217 { 218 return __ilog2(x & -x); 219 } 220 221 /* 222 * ffs: find first bit set. This is defined the same way as 223 * the libc and compiler builtin ffs routines, therefore 224 * differs in spirit from the above ffz (man ffs). 225 */ 226 static __inline__ int ffs(int x) 227 { 228 unsigned long i = (unsigned long)x; 229 return __ilog2(i & -i) + 1; 230 } 231 232 /* 233 * fls: find last (most-significant) bit set. 234 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. 235 */ 236 static __inline__ int fls(unsigned int x) 237 { 238 int lz; 239 240 asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x)); 241 return 32 - lz; 242 } 243 244 static __inline__ unsigned long __fls(unsigned long x) 245 { 246 return __ilog2(x); 247 } 248 249 /* 250 * 64-bit can do this using one cntlzd (count leading zeroes doubleword) 251 * instruction; for 32-bit we use the generic version, which does two 252 * 32-bit fls calls. 253 */ 254 #ifdef __powerpc64__ 255 static __inline__ int fls64(__u64 x) 256 { 257 int lz; 258 259 asm ("cntlzd %0,%1" : "=r" (lz) : "r" (x)); 260 return 64 - lz; 261 } 262 #else 263 #include <asm-generic/bitops/fls64.h> 264 #endif /* __powerpc64__ */ 265 266 #ifdef CONFIG_PPC64 267 unsigned int __arch_hweight8(unsigned int w); 268 unsigned int __arch_hweight16(unsigned int w); 269 unsigned int __arch_hweight32(unsigned int w); 270 unsigned long __arch_hweight64(__u64 w); 271 #include <asm-generic/bitops/const_hweight.h> 272 #else 273 #include <asm-generic/bitops/hweight.h> 274 #endif 275 276 #include <asm-generic/bitops/find.h> 277 278 /* Little-endian versions */ 279 #include <asm-generic/bitops/le.h> 280 281 /* Bitmap functions for the ext2 filesystem */ 282 283 #include <asm-generic/bitops/ext2-atomic-setbit.h> 284 285 #include <asm-generic/bitops/sched.h> 286 287 #endif /* __KERNEL__ */ 288 289 #endif /* _ASM_POWERPC_BITOPS_H */ 290