1 /* 2 * S390 version 3 * Copyright IBM Corp. 1999 4 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) 5 * 6 * Derived from "include/asm-i386/bitops.h" 7 * Copyright (C) 1992, Linus Torvalds 8 * 9 */ 10 11 #ifndef _S390_BITOPS_H 12 #define _S390_BITOPS_H 13 14 #ifndef _LINUX_BITOPS_H 15 #error only <linux/bitops.h> can be included directly 16 #endif 17 18 #include <linux/compiler.h> 19 20 /* 21 * 32 bit bitops format: 22 * bit 0 is the LSB of *addr; bit 31 is the MSB of *addr; 23 * bit 32 is the LSB of *(addr+4). That combined with the 24 * big endian byte order on S390 give the following bit 25 * order in memory: 26 * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 \ 27 * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00 28 * after that follows the next long with bit numbers 29 * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30 30 * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20 31 * The reason for this bit ordering is the fact that 32 * in the architecture independent code bits operations 33 * of the form "flags |= (1 << bitnr)" are used INTERMIXED 34 * with operation of the form "set_bit(bitnr, flags)". 35 * 36 * 64 bit bitops format: 37 * bit 0 is the LSB of *addr; bit 63 is the MSB of *addr; 38 * bit 64 is the LSB of *(addr+8). That combined with the 39 * big endian byte order on S390 give the following bit 40 * order in memory: 41 * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30 42 * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20 43 * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 44 * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00 45 * after that follows the next long with bit numbers 46 * 7f 7e 7d 7c 7b 7a 79 78 77 76 75 74 73 72 71 70 47 * 6f 6e 6d 6c 6b 6a 69 68 67 66 65 64 63 62 61 60 48 * 5f 5e 5d 5c 5b 5a 59 58 57 56 55 54 53 52 51 50 49 * 4f 4e 4d 4c 4b 4a 49 48 47 46 45 44 43 42 41 40 50 * The reason for this bit ordering is the fact that 51 * in the architecture independent code bits operations 52 * of the form "flags |= (1 << bitnr)" are used INTERMIXED 53 * with operation of the form "set_bit(bitnr, flags)". 54 */ 55 56 /* bitmap tables from arch/s390/kernel/bitmap.c */ 57 extern const char _oi_bitmap[]; 58 extern const char _ni_bitmap[]; 59 extern const char _zb_findmap[]; 60 extern const char _sb_findmap[]; 61 62 #ifndef CONFIG_64BIT 63 64 #define __BITOPS_ALIGN 3 65 #define __BITOPS_WORDSIZE 32 66 #define __BITOPS_OR "or" 67 #define __BITOPS_AND "nr" 68 #define __BITOPS_XOR "xr" 69 70 #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ 71 asm volatile( \ 72 " l %0,%2\n" \ 73 "0: lr %1,%0\n" \ 74 __op_string " %1,%3\n" \ 75 " cs %0,%1,%2\n" \ 76 " jl 0b" \ 77 : "=&d" (__old), "=&d" (__new), \ 78 "=Q" (*(unsigned long *) __addr) \ 79 : "d" (__val), "Q" (*(unsigned long *) __addr) \ 80 : "cc"); 81 82 #else /* CONFIG_64BIT */ 83 84 #define __BITOPS_ALIGN 7 85 #define __BITOPS_WORDSIZE 64 86 #define __BITOPS_OR "ogr" 87 #define __BITOPS_AND "ngr" 88 #define __BITOPS_XOR "xgr" 89 90 #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ 91 asm volatile( \ 92 " lg %0,%2\n" \ 93 "0: lgr %1,%0\n" \ 94 __op_string " %1,%3\n" \ 95 " csg %0,%1,%2\n" \ 96 " jl 0b" \ 97 : "=&d" (__old), "=&d" (__new), \ 98 "=Q" (*(unsigned long *) __addr) \ 99 : "d" (__val), "Q" (*(unsigned long *) __addr) \ 100 : "cc"); 101 102 #endif /* CONFIG_64BIT */ 103 104 #define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE) 105 #define __BITOPS_BARRIER() asm volatile("" : : : "memory") 106 107 #ifdef CONFIG_SMP 108 /* 109 * SMP safe set_bit routine based on compare and swap (CS) 110 */ 111 static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr) 112 { 113 unsigned long addr, old, new, mask; 114 115 addr = (unsigned long) ptr; 116 /* calculate address for CS */ 117 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; 118 /* make OR mask */ 119 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); 120 /* Do the atomic update. */ 121 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR); 122 } 123 124 /* 125 * SMP safe clear_bit routine based on compare and swap (CS) 126 */ 127 static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr) 128 { 129 unsigned long addr, old, new, mask; 130 131 addr = (unsigned long) ptr; 132 /* calculate address for CS */ 133 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; 134 /* make AND mask */ 135 mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1))); 136 /* Do the atomic update. */ 137 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND); 138 } 139 140 /* 141 * SMP safe change_bit routine based on compare and swap (CS) 142 */ 143 static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr) 144 { 145 unsigned long addr, old, new, mask; 146 147 addr = (unsigned long) ptr; 148 /* calculate address for CS */ 149 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; 150 /* make XOR mask */ 151 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); 152 /* Do the atomic update. */ 153 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR); 154 } 155 156 /* 157 * SMP safe test_and_set_bit routine based on compare and swap (CS) 158 */ 159 static inline int 160 test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr) 161 { 162 unsigned long addr, old, new, mask; 163 164 addr = (unsigned long) ptr; 165 /* calculate address for CS */ 166 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; 167 /* make OR/test mask */ 168 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); 169 /* Do the atomic update. */ 170 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR); 171 __BITOPS_BARRIER(); 172 return (old & mask) != 0; 173 } 174 175 /* 176 * SMP safe test_and_clear_bit routine based on compare and swap (CS) 177 */ 178 static inline int 179 test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr) 180 { 181 unsigned long addr, old, new, mask; 182 183 addr = (unsigned long) ptr; 184 /* calculate address for CS */ 185 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; 186 /* make AND/test mask */ 187 mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1))); 188 /* Do the atomic update. */ 189 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND); 190 __BITOPS_BARRIER(); 191 return (old ^ new) != 0; 192 } 193 194 /* 195 * SMP safe test_and_change_bit routine based on compare and swap (CS) 196 */ 197 static inline int 198 test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr) 199 { 200 unsigned long addr, old, new, mask; 201 202 addr = (unsigned long) ptr; 203 /* calculate address for CS */ 204 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; 205 /* make XOR/test mask */ 206 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); 207 /* Do the atomic update. */ 208 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR); 209 __BITOPS_BARRIER(); 210 return (old & mask) != 0; 211 } 212 #endif /* CONFIG_SMP */ 213 214 /* 215 * fast, non-SMP set_bit routine 216 */ 217 static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr) 218 { 219 unsigned long addr; 220 221 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 222 asm volatile( 223 " oc %O0(1,%R0),%1" 224 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" ); 225 } 226 227 static inline void 228 __constant_set_bit(const unsigned long nr, volatile unsigned long *ptr) 229 { 230 unsigned long addr; 231 232 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 233 *(unsigned char *) addr |= 1 << (nr & 7); 234 } 235 236 #define set_bit_simple(nr,addr) \ 237 (__builtin_constant_p((nr)) ? \ 238 __constant_set_bit((nr),(addr)) : \ 239 __set_bit((nr),(addr)) ) 240 241 /* 242 * fast, non-SMP clear_bit routine 243 */ 244 static inline void 245 __clear_bit(unsigned long nr, volatile unsigned long *ptr) 246 { 247 unsigned long addr; 248 249 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 250 asm volatile( 251 " nc %O0(1,%R0),%1" 252 : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc" ); 253 } 254 255 static inline void 256 __constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr) 257 { 258 unsigned long addr; 259 260 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 261 *(unsigned char *) addr &= ~(1 << (nr & 7)); 262 } 263 264 #define clear_bit_simple(nr,addr) \ 265 (__builtin_constant_p((nr)) ? \ 266 __constant_clear_bit((nr),(addr)) : \ 267 __clear_bit((nr),(addr)) ) 268 269 /* 270 * fast, non-SMP change_bit routine 271 */ 272 static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr) 273 { 274 unsigned long addr; 275 276 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 277 asm volatile( 278 " xc %O0(1,%R0),%1" 279 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" ); 280 } 281 282 static inline void 283 __constant_change_bit(const unsigned long nr, volatile unsigned long *ptr) 284 { 285 unsigned long addr; 286 287 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 288 *(unsigned char *) addr ^= 1 << (nr & 7); 289 } 290 291 #define change_bit_simple(nr,addr) \ 292 (__builtin_constant_p((nr)) ? \ 293 __constant_change_bit((nr),(addr)) : \ 294 __change_bit((nr),(addr)) ) 295 296 /* 297 * fast, non-SMP test_and_set_bit routine 298 */ 299 static inline int 300 test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr) 301 { 302 unsigned long addr; 303 unsigned char ch; 304 305 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 306 ch = *(unsigned char *) addr; 307 asm volatile( 308 " oc %O0(1,%R0),%1" 309 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) 310 : "cc", "memory"); 311 return (ch >> (nr & 7)) & 1; 312 } 313 #define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y) 314 315 /* 316 * fast, non-SMP test_and_clear_bit routine 317 */ 318 static inline int 319 test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr) 320 { 321 unsigned long addr; 322 unsigned char ch; 323 324 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 325 ch = *(unsigned char *) addr; 326 asm volatile( 327 " nc %O0(1,%R0),%1" 328 : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) 329 : "cc", "memory"); 330 return (ch >> (nr & 7)) & 1; 331 } 332 #define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y) 333 334 /* 335 * fast, non-SMP test_and_change_bit routine 336 */ 337 static inline int 338 test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr) 339 { 340 unsigned long addr; 341 unsigned char ch; 342 343 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 344 ch = *(unsigned char *) addr; 345 asm volatile( 346 " xc %O0(1,%R0),%1" 347 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) 348 : "cc", "memory"); 349 return (ch >> (nr & 7)) & 1; 350 } 351 #define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y) 352 353 #ifdef CONFIG_SMP 354 #define set_bit set_bit_cs 355 #define clear_bit clear_bit_cs 356 #define change_bit change_bit_cs 357 #define test_and_set_bit test_and_set_bit_cs 358 #define test_and_clear_bit test_and_clear_bit_cs 359 #define test_and_change_bit test_and_change_bit_cs 360 #else 361 #define set_bit set_bit_simple 362 #define clear_bit clear_bit_simple 363 #define change_bit change_bit_simple 364 #define test_and_set_bit test_and_set_bit_simple 365 #define test_and_clear_bit test_and_clear_bit_simple 366 #define test_and_change_bit test_and_change_bit_simple 367 #endif 368 369 370 /* 371 * This routine doesn't need to be atomic. 372 */ 373 374 static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr) 375 { 376 unsigned long addr; 377 unsigned char ch; 378 379 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 380 ch = *(volatile unsigned char *) addr; 381 return (ch >> (nr & 7)) & 1; 382 } 383 384 static inline int 385 __constant_test_bit(unsigned long nr, const volatile unsigned long *addr) { 386 return (((volatile char *) addr) 387 [(nr^(__BITOPS_WORDSIZE-8))>>3] & (1<<(nr&7))) != 0; 388 } 389 390 #define test_bit(nr,addr) \ 391 (__builtin_constant_p((nr)) ? \ 392 __constant_test_bit((nr),(addr)) : \ 393 __test_bit((nr),(addr)) ) 394 395 /* 396 * Optimized find bit helper functions. 397 */ 398 399 /** 400 * __ffz_word_loop - find byte offset of first long != -1UL 401 * @addr: pointer to array of unsigned long 402 * @size: size of the array in bits 403 */ 404 static inline unsigned long __ffz_word_loop(const unsigned long *addr, 405 unsigned long size) 406 { 407 typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; 408 unsigned long bytes = 0; 409 410 asm volatile( 411 #ifndef CONFIG_64BIT 412 " ahi %1,-1\n" 413 " sra %1,5\n" 414 " jz 1f\n" 415 "0: c %2,0(%0,%3)\n" 416 " jne 1f\n" 417 " la %0,4(%0)\n" 418 " brct %1,0b\n" 419 "1:\n" 420 #else 421 " aghi %1,-1\n" 422 " srag %1,%1,6\n" 423 " jz 1f\n" 424 "0: cg %2,0(%0,%3)\n" 425 " jne 1f\n" 426 " la %0,8(%0)\n" 427 " brct %1,0b\n" 428 "1:\n" 429 #endif 430 : "+&a" (bytes), "+&d" (size) 431 : "d" (-1UL), "a" (addr), "m" (*(addrtype *) addr) 432 : "cc" ); 433 return bytes; 434 } 435 436 /** 437 * __ffs_word_loop - find byte offset of first long != 0UL 438 * @addr: pointer to array of unsigned long 439 * @size: size of the array in bits 440 */ 441 static inline unsigned long __ffs_word_loop(const unsigned long *addr, 442 unsigned long size) 443 { 444 typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; 445 unsigned long bytes = 0; 446 447 asm volatile( 448 #ifndef CONFIG_64BIT 449 " ahi %1,-1\n" 450 " sra %1,5\n" 451 " jz 1f\n" 452 "0: c %2,0(%0,%3)\n" 453 " jne 1f\n" 454 " la %0,4(%0)\n" 455 " brct %1,0b\n" 456 "1:\n" 457 #else 458 " aghi %1,-1\n" 459 " srag %1,%1,6\n" 460 " jz 1f\n" 461 "0: cg %2,0(%0,%3)\n" 462 " jne 1f\n" 463 " la %0,8(%0)\n" 464 " brct %1,0b\n" 465 "1:\n" 466 #endif 467 : "+&a" (bytes), "+&a" (size) 468 : "d" (0UL), "a" (addr), "m" (*(addrtype *) addr) 469 : "cc" ); 470 return bytes; 471 } 472 473 /** 474 * __ffz_word - add number of the first unset bit 475 * @nr: base value the bit number is added to 476 * @word: the word that is searched for unset bits 477 */ 478 static inline unsigned long __ffz_word(unsigned long nr, unsigned long word) 479 { 480 #ifdef CONFIG_64BIT 481 if ((word & 0xffffffff) == 0xffffffff) { 482 word >>= 32; 483 nr += 32; 484 } 485 #endif 486 if ((word & 0xffff) == 0xffff) { 487 word >>= 16; 488 nr += 16; 489 } 490 if ((word & 0xff) == 0xff) { 491 word >>= 8; 492 nr += 8; 493 } 494 return nr + _zb_findmap[(unsigned char) word]; 495 } 496 497 /** 498 * __ffs_word - add number of the first set bit 499 * @nr: base value the bit number is added to 500 * @word: the word that is searched for set bits 501 */ 502 static inline unsigned long __ffs_word(unsigned long nr, unsigned long word) 503 { 504 #ifdef CONFIG_64BIT 505 if ((word & 0xffffffff) == 0) { 506 word >>= 32; 507 nr += 32; 508 } 509 #endif 510 if ((word & 0xffff) == 0) { 511 word >>= 16; 512 nr += 16; 513 } 514 if ((word & 0xff) == 0) { 515 word >>= 8; 516 nr += 8; 517 } 518 return nr + _sb_findmap[(unsigned char) word]; 519 } 520 521 522 /** 523 * __load_ulong_be - load big endian unsigned long 524 * @p: pointer to array of unsigned long 525 * @offset: byte offset of source value in the array 526 */ 527 static inline unsigned long __load_ulong_be(const unsigned long *p, 528 unsigned long offset) 529 { 530 p = (unsigned long *)((unsigned long) p + offset); 531 return *p; 532 } 533 534 /** 535 * __load_ulong_le - load little endian unsigned long 536 * @p: pointer to array of unsigned long 537 * @offset: byte offset of source value in the array 538 */ 539 static inline unsigned long __load_ulong_le(const unsigned long *p, 540 unsigned long offset) 541 { 542 unsigned long word; 543 544 p = (unsigned long *)((unsigned long) p + offset); 545 #ifndef CONFIG_64BIT 546 asm volatile( 547 " ic %0,%O1(%R1)\n" 548 " icm %0,2,%O1+1(%R1)\n" 549 " icm %0,4,%O1+2(%R1)\n" 550 " icm %0,8,%O1+3(%R1)" 551 : "=&d" (word) : "Q" (*p) : "cc"); 552 #else 553 asm volatile( 554 " lrvg %0,%1" 555 : "=d" (word) : "m" (*p) ); 556 #endif 557 return word; 558 } 559 560 /* 561 * The various find bit functions. 562 */ 563 564 /* 565 * ffz - find first zero in word. 566 * @word: The word to search 567 * 568 * Undefined if no zero exists, so code should check against ~0UL first. 569 */ 570 static inline unsigned long ffz(unsigned long word) 571 { 572 return __ffz_word(0, word); 573 } 574 575 /** 576 * __ffs - find first bit in word. 577 * @word: The word to search 578 * 579 * Undefined if no bit exists, so code should check against 0 first. 580 */ 581 static inline unsigned long __ffs (unsigned long word) 582 { 583 return __ffs_word(0, word); 584 } 585 586 /** 587 * ffs - find first bit set 588 * @x: the word to search 589 * 590 * This is defined the same way as 591 * the libc and compiler builtin ffs routines, therefore 592 * differs in spirit from the above ffz (man ffs). 593 */ 594 static inline int ffs(int x) 595 { 596 if (!x) 597 return 0; 598 return __ffs_word(1, x); 599 } 600 601 /** 602 * find_first_zero_bit - find the first zero bit in a memory region 603 * @addr: The address to start the search at 604 * @size: The maximum size to search 605 * 606 * Returns the bit-number of the first zero bit, not the number of the byte 607 * containing a bit. 608 */ 609 static inline unsigned long find_first_zero_bit(const unsigned long *addr, 610 unsigned long size) 611 { 612 unsigned long bytes, bits; 613 614 if (!size) 615 return 0; 616 bytes = __ffz_word_loop(addr, size); 617 bits = __ffz_word(bytes*8, __load_ulong_be(addr, bytes)); 618 return (bits < size) ? bits : size; 619 } 620 #define find_first_zero_bit find_first_zero_bit 621 622 /** 623 * find_first_bit - find the first set bit in a memory region 624 * @addr: The address to start the search at 625 * @size: The maximum size to search 626 * 627 * Returns the bit-number of the first set bit, not the number of the byte 628 * containing a bit. 629 */ 630 static inline unsigned long find_first_bit(const unsigned long * addr, 631 unsigned long size) 632 { 633 unsigned long bytes, bits; 634 635 if (!size) 636 return 0; 637 bytes = __ffs_word_loop(addr, size); 638 bits = __ffs_word(bytes*8, __load_ulong_be(addr, bytes)); 639 return (bits < size) ? bits : size; 640 } 641 #define find_first_bit find_first_bit 642 643 /* 644 * Big endian variant whichs starts bit counting from left using 645 * the flogr (find leftmost one) instruction. 646 */ 647 static inline unsigned long __flo_word(unsigned long nr, unsigned long val) 648 { 649 register unsigned long bit asm("2") = val; 650 register unsigned long out asm("3"); 651 652 asm volatile ( 653 " .insn rre,0xb9830000,%[bit],%[bit]\n" 654 : [bit] "+d" (bit), [out] "=d" (out) : : "cc"); 655 return nr + bit; 656 } 657 658 /* 659 * 64 bit special left bitops format: 660 * order in memory: 661 * 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 662 * 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 663 * 20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 664 * 30 31 32 33 34 35 36 37 38 39 3a 3b 3c 3d 3e 3f 665 * after that follows the next long with bit numbers 666 * 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f 667 * 50 51 52 53 54 55 56 57 58 59 5a 5b 5c 5d 5e 5f 668 * 60 61 62 63 64 65 66 67 68 69 6a 6b 6c 6d 6e 6f 669 * 70 71 72 73 74 75 76 77 78 79 7a 7b 7c 7d 7e 7f 670 * The reason for this bit ordering is the fact that 671 * the hardware sets bits in a bitmap starting at bit 0 672 * and we don't want to scan the bitmap from the 'wrong 673 * end'. 674 */ 675 static inline unsigned long find_first_bit_left(const unsigned long *addr, 676 unsigned long size) 677 { 678 unsigned long bytes, bits; 679 680 if (!size) 681 return 0; 682 bytes = __ffs_word_loop(addr, size); 683 bits = __flo_word(bytes * 8, __load_ulong_be(addr, bytes)); 684 return (bits < size) ? bits : size; 685 } 686 687 static inline int find_next_bit_left(const unsigned long *addr, 688 unsigned long size, 689 unsigned long offset) 690 { 691 const unsigned long *p; 692 unsigned long bit, set; 693 694 if (offset >= size) 695 return size; 696 bit = offset & (__BITOPS_WORDSIZE - 1); 697 offset -= bit; 698 size -= offset; 699 p = addr + offset / __BITOPS_WORDSIZE; 700 if (bit) { 701 set = __flo_word(0, *p & (~0UL << bit)); 702 if (set >= size) 703 return size + offset; 704 if (set < __BITOPS_WORDSIZE) 705 return set + offset; 706 offset += __BITOPS_WORDSIZE; 707 size -= __BITOPS_WORDSIZE; 708 p++; 709 } 710 return offset + find_first_bit_left(p, size); 711 } 712 713 #define for_each_set_bit_left(bit, addr, size) \ 714 for ((bit) = find_first_bit_left((addr), (size)); \ 715 (bit) < (size); \ 716 (bit) = find_next_bit_left((addr), (size), (bit) + 1)) 717 718 /* same as for_each_set_bit() but use bit as value to start with */ 719 #define for_each_set_bit_left_cont(bit, addr, size) \ 720 for ((bit) = find_next_bit_left((addr), (size), (bit)); \ 721 (bit) < (size); \ 722 (bit) = find_next_bit_left((addr), (size), (bit) + 1)) 723 724 /** 725 * find_next_zero_bit - find the first zero bit in a memory region 726 * @addr: The address to base the search on 727 * @offset: The bitnumber to start searching at 728 * @size: The maximum size to search 729 */ 730 static inline int find_next_zero_bit (const unsigned long * addr, 731 unsigned long size, 732 unsigned long offset) 733 { 734 const unsigned long *p; 735 unsigned long bit, set; 736 737 if (offset >= size) 738 return size; 739 bit = offset & (__BITOPS_WORDSIZE - 1); 740 offset -= bit; 741 size -= offset; 742 p = addr + offset / __BITOPS_WORDSIZE; 743 if (bit) { 744 /* 745 * __ffz_word returns __BITOPS_WORDSIZE 746 * if no zero bit is present in the word. 747 */ 748 set = __ffz_word(bit, *p >> bit); 749 if (set >= size) 750 return size + offset; 751 if (set < __BITOPS_WORDSIZE) 752 return set + offset; 753 offset += __BITOPS_WORDSIZE; 754 size -= __BITOPS_WORDSIZE; 755 p++; 756 } 757 return offset + find_first_zero_bit(p, size); 758 } 759 #define find_next_zero_bit find_next_zero_bit 760 761 /** 762 * find_next_bit - find the first set bit in a memory region 763 * @addr: The address to base the search on 764 * @offset: The bitnumber to start searching at 765 * @size: The maximum size to search 766 */ 767 static inline int find_next_bit (const unsigned long * addr, 768 unsigned long size, 769 unsigned long offset) 770 { 771 const unsigned long *p; 772 unsigned long bit, set; 773 774 if (offset >= size) 775 return size; 776 bit = offset & (__BITOPS_WORDSIZE - 1); 777 offset -= bit; 778 size -= offset; 779 p = addr + offset / __BITOPS_WORDSIZE; 780 if (bit) { 781 /* 782 * __ffs_word returns __BITOPS_WORDSIZE 783 * if no one bit is present in the word. 784 */ 785 set = __ffs_word(0, *p & (~0UL << bit)); 786 if (set >= size) 787 return size + offset; 788 if (set < __BITOPS_WORDSIZE) 789 return set + offset; 790 offset += __BITOPS_WORDSIZE; 791 size -= __BITOPS_WORDSIZE; 792 p++; 793 } 794 return offset + find_first_bit(p, size); 795 } 796 #define find_next_bit find_next_bit 797 798 /* 799 * Every architecture must define this function. It's the fastest 800 * way of searching a 140-bit bitmap where the first 100 bits are 801 * unlikely to be set. It's guaranteed that at least one of the 140 802 * bits is cleared. 803 */ 804 static inline int sched_find_first_bit(unsigned long *b) 805 { 806 return find_first_bit(b, 140); 807 } 808 809 #include <asm-generic/bitops/fls.h> 810 #include <asm-generic/bitops/__fls.h> 811 #include <asm-generic/bitops/fls64.h> 812 813 #include <asm-generic/bitops/hweight.h> 814 #include <asm-generic/bitops/lock.h> 815 816 /* 817 * ATTENTION: intel byte ordering convention for ext2 and minix !! 818 * bit 0 is the LSB of addr; bit 31 is the MSB of addr; 819 * bit 32 is the LSB of (addr+4). 820 * That combined with the little endian byte order of Intel gives the 821 * following bit order in memory: 822 * 07 06 05 04 03 02 01 00 15 14 13 12 11 10 09 08 \ 823 * 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24 824 */ 825 826 static inline int find_first_zero_bit_le(void *vaddr, unsigned int size) 827 { 828 unsigned long bytes, bits; 829 830 if (!size) 831 return 0; 832 bytes = __ffz_word_loop(vaddr, size); 833 bits = __ffz_word(bytes*8, __load_ulong_le(vaddr, bytes)); 834 return (bits < size) ? bits : size; 835 } 836 #define find_first_zero_bit_le find_first_zero_bit_le 837 838 static inline int find_next_zero_bit_le(void *vaddr, unsigned long size, 839 unsigned long offset) 840 { 841 unsigned long *addr = vaddr, *p; 842 unsigned long bit, set; 843 844 if (offset >= size) 845 return size; 846 bit = offset & (__BITOPS_WORDSIZE - 1); 847 offset -= bit; 848 size -= offset; 849 p = addr + offset / __BITOPS_WORDSIZE; 850 if (bit) { 851 /* 852 * s390 version of ffz returns __BITOPS_WORDSIZE 853 * if no zero bit is present in the word. 854 */ 855 set = __ffz_word(bit, __load_ulong_le(p, 0) >> bit); 856 if (set >= size) 857 return size + offset; 858 if (set < __BITOPS_WORDSIZE) 859 return set + offset; 860 offset += __BITOPS_WORDSIZE; 861 size -= __BITOPS_WORDSIZE; 862 p++; 863 } 864 return offset + find_first_zero_bit_le(p, size); 865 } 866 #define find_next_zero_bit_le find_next_zero_bit_le 867 868 static inline unsigned long find_first_bit_le(void *vaddr, unsigned long size) 869 { 870 unsigned long bytes, bits; 871 872 if (!size) 873 return 0; 874 bytes = __ffs_word_loop(vaddr, size); 875 bits = __ffs_word(bytes*8, __load_ulong_le(vaddr, bytes)); 876 return (bits < size) ? bits : size; 877 } 878 #define find_first_bit_le find_first_bit_le 879 880 static inline int find_next_bit_le(void *vaddr, unsigned long size, 881 unsigned long offset) 882 { 883 unsigned long *addr = vaddr, *p; 884 unsigned long bit, set; 885 886 if (offset >= size) 887 return size; 888 bit = offset & (__BITOPS_WORDSIZE - 1); 889 offset -= bit; 890 size -= offset; 891 p = addr + offset / __BITOPS_WORDSIZE; 892 if (bit) { 893 /* 894 * s390 version of ffz returns __BITOPS_WORDSIZE 895 * if no zero bit is present in the word. 896 */ 897 set = __ffs_word(0, __load_ulong_le(p, 0) & (~0UL << bit)); 898 if (set >= size) 899 return size + offset; 900 if (set < __BITOPS_WORDSIZE) 901 return set + offset; 902 offset += __BITOPS_WORDSIZE; 903 size -= __BITOPS_WORDSIZE; 904 p++; 905 } 906 return offset + find_first_bit_le(p, size); 907 } 908 #define find_next_bit_le find_next_bit_le 909 910 #include <asm-generic/bitops/le.h> 911 912 #include <asm-generic/bitops/ext2-atomic-setbit.h> 913 914 #endif /* _S390_BITOPS_H */ 915