1 /* 2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 */ 8 9 #ifndef _ASM_BITOPS_H 10 #define _ASM_BITOPS_H 11 12 #ifndef _LINUX_BITOPS_H 13 #error only <linux/bitops.h> can be included directly 14 #endif 15 16 #ifdef __KERNEL__ 17 18 #ifndef __ASSEMBLY__ 19 20 #include <linux/types.h> 21 #include <linux/compiler.h> 22 #include <asm/barrier.h> 23 24 /* 25 * Hardware assisted read-modify-write using ARC700 LLOCK/SCOND insns. 26 * The Kconfig glue ensures that in SMP, this is only set if the container 27 * SoC/platform has cross-core coherent LLOCK/SCOND 28 */ 29 #if defined(CONFIG_ARC_HAS_LLSC) 30 31 static inline void set_bit(unsigned long nr, volatile unsigned long *m) 32 { 33 unsigned int temp; 34 35 m += nr >> 5; 36 37 if (__builtin_constant_p(nr)) 38 nr &= 0x1f; 39 40 __asm__ __volatile__( 41 "1: llock %0, [%1] \n" 42 " bset %0, %0, %2 \n" 43 " scond %0, [%1] \n" 44 " bnz 1b \n" 45 : "=&r"(temp) 46 : "r"(m), "ir"(nr) 47 : "cc"); 48 } 49 50 static inline void clear_bit(unsigned long nr, volatile unsigned long *m) 51 { 52 unsigned int temp; 53 54 m += nr >> 5; 55 56 if (__builtin_constant_p(nr)) 57 nr &= 0x1f; 58 59 __asm__ __volatile__( 60 "1: llock %0, [%1] \n" 61 " bclr %0, %0, %2 \n" 62 " scond %0, [%1] \n" 63 " bnz 1b \n" 64 : "=&r"(temp) 65 : "r"(m), "ir"(nr) 66 : "cc"); 67 } 68 69 static inline void change_bit(unsigned long nr, volatile unsigned long *m) 70 { 71 unsigned int temp; 72 73 m += nr >> 5; 74 75 if (__builtin_constant_p(nr)) 76 nr &= 0x1f; 77 78 __asm__ __volatile__( 79 "1: llock %0, [%1] \n" 80 " bxor %0, %0, %2 \n" 81 " scond %0, [%1] \n" 82 " bnz 1b \n" 83 : "=&r"(temp) 84 : "r"(m), "ir"(nr) 85 : "cc"); 86 } 87 88 /* 89 * Semantically: 90 * Test the bit 91 * if clear 92 * set it and return 0 (old value) 93 * else 94 * return 1 (old value). 95 * 96 * Since ARC lacks a equivalent h/w primitive, the bit is set unconditionally 97 * and the old value of bit is returned 98 */ 99 static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m) 100 { 101 unsigned long old, temp; 102 103 m += nr >> 5; 104 105 if (__builtin_constant_p(nr)) 106 nr &= 0x1f; 107 108 __asm__ __volatile__( 109 "1: llock %0, [%2] \n" 110 " bset %1, %0, %3 \n" 111 " scond %1, [%2] \n" 112 " bnz 1b \n" 113 : "=&r"(old), "=&r"(temp) 114 : "r"(m), "ir"(nr) 115 : "cc"); 116 117 return (old & (1 << nr)) != 0; 118 } 119 120 static inline int 121 test_and_clear_bit(unsigned long nr, volatile unsigned long *m) 122 { 123 unsigned int old, temp; 124 125 m += nr >> 5; 126 127 if (__builtin_constant_p(nr)) 128 nr &= 0x1f; 129 130 __asm__ __volatile__( 131 "1: llock %0, [%2] \n" 132 " bclr %1, %0, %3 \n" 133 " scond %1, [%2] \n" 134 " bnz 1b \n" 135 : "=&r"(old), "=&r"(temp) 136 : "r"(m), "ir"(nr) 137 : "cc"); 138 139 return (old & (1 << nr)) != 0; 140 } 141 142 static inline int 143 test_and_change_bit(unsigned long nr, volatile unsigned long *m) 144 { 145 unsigned int old, temp; 146 147 m += nr >> 5; 148 149 if (__builtin_constant_p(nr)) 150 nr &= 0x1f; 151 152 __asm__ __volatile__( 153 "1: llock %0, [%2] \n" 154 " bxor %1, %0, %3 \n" 155 " scond %1, [%2] \n" 156 " bnz 1b \n" 157 : "=&r"(old), "=&r"(temp) 158 : "r"(m), "ir"(nr) 159 : "cc"); 160 161 return (old & (1 << nr)) != 0; 162 } 163 164 #else /* !CONFIG_ARC_HAS_LLSC */ 165 166 #include <asm/smp.h> 167 168 /* 169 * Non hardware assisted Atomic-R-M-W 170 * Locking would change to irq-disabling only (UP) and spinlocks (SMP) 171 * 172 * There's "significant" micro-optimization in writing our own variants of 173 * bitops (over generic variants) 174 * 175 * (1) The generic APIs have "signed" @nr while we have it "unsigned" 176 * This avoids extra code to be generated for pointer arithmatic, since 177 * is "not sure" that index is NOT -ve 178 * (2) Utilize the fact that ARCompact bit fidding insn (BSET/BCLR/ASL) etc 179 * only consider bottom 5 bits of @nr, so NO need to mask them off. 180 * (GCC Quirk: however for constant @nr we still need to do the masking 181 * at compile time) 182 */ 183 184 static inline void set_bit(unsigned long nr, volatile unsigned long *m) 185 { 186 unsigned long temp, flags; 187 m += nr >> 5; 188 189 if (__builtin_constant_p(nr)) 190 nr &= 0x1f; 191 192 bitops_lock(flags); 193 194 temp = *m; 195 *m = temp | (1UL << nr); 196 197 bitops_unlock(flags); 198 } 199 200 static inline void clear_bit(unsigned long nr, volatile unsigned long *m) 201 { 202 unsigned long temp, flags; 203 m += nr >> 5; 204 205 if (__builtin_constant_p(nr)) 206 nr &= 0x1f; 207 208 bitops_lock(flags); 209 210 temp = *m; 211 *m = temp & ~(1UL << nr); 212 213 bitops_unlock(flags); 214 } 215 216 static inline void change_bit(unsigned long nr, volatile unsigned long *m) 217 { 218 unsigned long temp, flags; 219 m += nr >> 5; 220 221 if (__builtin_constant_p(nr)) 222 nr &= 0x1f; 223 224 bitops_lock(flags); 225 226 temp = *m; 227 *m = temp ^ (1UL << nr); 228 229 bitops_unlock(flags); 230 } 231 232 static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m) 233 { 234 unsigned long old, flags; 235 m += nr >> 5; 236 237 if (__builtin_constant_p(nr)) 238 nr &= 0x1f; 239 240 bitops_lock(flags); 241 242 old = *m; 243 *m = old | (1 << nr); 244 245 bitops_unlock(flags); 246 247 return (old & (1 << nr)) != 0; 248 } 249 250 static inline int 251 test_and_clear_bit(unsigned long nr, volatile unsigned long *m) 252 { 253 unsigned long old, flags; 254 m += nr >> 5; 255 256 if (__builtin_constant_p(nr)) 257 nr &= 0x1f; 258 259 bitops_lock(flags); 260 261 old = *m; 262 *m = old & ~(1 << nr); 263 264 bitops_unlock(flags); 265 266 return (old & (1 << nr)) != 0; 267 } 268 269 static inline int 270 test_and_change_bit(unsigned long nr, volatile unsigned long *m) 271 { 272 unsigned long old, flags; 273 m += nr >> 5; 274 275 if (__builtin_constant_p(nr)) 276 nr &= 0x1f; 277 278 bitops_lock(flags); 279 280 old = *m; 281 *m = old ^ (1 << nr); 282 283 bitops_unlock(flags); 284 285 return (old & (1 << nr)) != 0; 286 } 287 288 #endif /* CONFIG_ARC_HAS_LLSC */ 289 290 /*************************************** 291 * Non atomic variants 292 **************************************/ 293 294 static inline void __set_bit(unsigned long nr, volatile unsigned long *m) 295 { 296 unsigned long temp; 297 m += nr >> 5; 298 299 if (__builtin_constant_p(nr)) 300 nr &= 0x1f; 301 302 temp = *m; 303 *m = temp | (1UL << nr); 304 } 305 306 static inline void __clear_bit(unsigned long nr, volatile unsigned long *m) 307 { 308 unsigned long temp; 309 m += nr >> 5; 310 311 if (__builtin_constant_p(nr)) 312 nr &= 0x1f; 313 314 temp = *m; 315 *m = temp & ~(1UL << nr); 316 } 317 318 static inline void __change_bit(unsigned long nr, volatile unsigned long *m) 319 { 320 unsigned long temp; 321 m += nr >> 5; 322 323 if (__builtin_constant_p(nr)) 324 nr &= 0x1f; 325 326 temp = *m; 327 *m = temp ^ (1UL << nr); 328 } 329 330 static inline int 331 __test_and_set_bit(unsigned long nr, volatile unsigned long *m) 332 { 333 unsigned long old; 334 m += nr >> 5; 335 336 if (__builtin_constant_p(nr)) 337 nr &= 0x1f; 338 339 old = *m; 340 *m = old | (1 << nr); 341 342 return (old & (1 << nr)) != 0; 343 } 344 345 static inline int 346 __test_and_clear_bit(unsigned long nr, volatile unsigned long *m) 347 { 348 unsigned long old; 349 m += nr >> 5; 350 351 if (__builtin_constant_p(nr)) 352 nr &= 0x1f; 353 354 old = *m; 355 *m = old & ~(1 << nr); 356 357 return (old & (1 << nr)) != 0; 358 } 359 360 static inline int 361 __test_and_change_bit(unsigned long nr, volatile unsigned long *m) 362 { 363 unsigned long old; 364 m += nr >> 5; 365 366 if (__builtin_constant_p(nr)) 367 nr &= 0x1f; 368 369 old = *m; 370 *m = old ^ (1 << nr); 371 372 return (old & (1 << nr)) != 0; 373 } 374 375 /* 376 * This routine doesn't need to be atomic. 377 */ 378 static inline int 379 __constant_test_bit(unsigned int nr, const volatile unsigned long *addr) 380 { 381 return ((1UL << (nr & 31)) & 382 (((const volatile unsigned int *)addr)[nr >> 5])) != 0; 383 } 384 385 static inline int 386 __test_bit(unsigned int nr, const volatile unsigned long *addr) 387 { 388 unsigned long mask; 389 390 addr += nr >> 5; 391 392 /* ARC700 only considers 5 bits in bit-fiddling insn */ 393 mask = 1 << nr; 394 395 return ((mask & *addr) != 0); 396 } 397 398 #define test_bit(nr, addr) (__builtin_constant_p(nr) ? \ 399 __constant_test_bit((nr), (addr)) : \ 400 __test_bit((nr), (addr))) 401 402 /* 403 * Count the number of zeros, starting from MSB 404 * Helper for fls( ) friends 405 * This is a pure count, so (1-32) or (0-31) doesn't apply 406 * It could be 0 to 32, based on num of 0's in there 407 * clz(0x8000_0000) = 0, clz(0xFFFF_FFFF)=0, clz(0) = 32, clz(1) = 31 408 */ 409 static inline __attribute__ ((const)) int clz(unsigned int x) 410 { 411 unsigned int res; 412 413 __asm__ __volatile__( 414 " norm.f %0, %1 \n" 415 " mov.n %0, 0 \n" 416 " add.p %0, %0, 1 \n" 417 : "=r"(res) 418 : "r"(x) 419 : "cc"); 420 421 return res; 422 } 423 424 static inline int constant_fls(int x) 425 { 426 int r = 32; 427 428 if (!x) 429 return 0; 430 if (!(x & 0xffff0000u)) { 431 x <<= 16; 432 r -= 16; 433 } 434 if (!(x & 0xff000000u)) { 435 x <<= 8; 436 r -= 8; 437 } 438 if (!(x & 0xf0000000u)) { 439 x <<= 4; 440 r -= 4; 441 } 442 if (!(x & 0xc0000000u)) { 443 x <<= 2; 444 r -= 2; 445 } 446 if (!(x & 0x80000000u)) { 447 x <<= 1; 448 r -= 1; 449 } 450 return r; 451 } 452 453 /* 454 * fls = Find Last Set in word 455 * @result: [1-32] 456 * fls(1) = 1, fls(0x80000000) = 32, fls(0) = 0 457 */ 458 static inline __attribute__ ((const)) int fls(unsigned long x) 459 { 460 if (__builtin_constant_p(x)) 461 return constant_fls(x); 462 463 return 32 - clz(x); 464 } 465 466 /* 467 * __fls: Similar to fls, but zero based (0-31) 468 */ 469 static inline __attribute__ ((const)) int __fls(unsigned long x) 470 { 471 if (!x) 472 return 0; 473 else 474 return fls(x) - 1; 475 } 476 477 /* 478 * ffs = Find First Set in word (LSB to MSB) 479 * @result: [1-32], 0 if all 0's 480 */ 481 #define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); }) 482 483 /* 484 * __ffs: Similar to ffs, but zero based (0-31) 485 */ 486 static inline __attribute__ ((const)) int __ffs(unsigned long word) 487 { 488 if (!word) 489 return word; 490 491 return ffs(word) - 1; 492 } 493 494 /* 495 * ffz = Find First Zero in word. 496 * @return:[0-31], 32 if all 1's 497 */ 498 #define ffz(x) __ffs(~(x)) 499 500 #include <asm-generic/bitops/hweight.h> 501 #include <asm-generic/bitops/fls64.h> 502 #include <asm-generic/bitops/sched.h> 503 #include <asm-generic/bitops/lock.h> 504 505 #include <asm-generic/bitops/find.h> 506 #include <asm-generic/bitops/le.h> 507 #include <asm-generic/bitops/ext2-atomic-setbit.h> 508 509 #endif /* !__ASSEMBLY__ */ 510 511 #endif /* __KERNEL__ */ 512 513 #endif 514