1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2016 Facebook 4 * Copyright (C) 2013-2014 Jens Axboe 5 */ 6 7 #include <linux/sched.h> 8 #include <linux/random.h> 9 #include <linux/sbitmap.h> 10 #include <linux/seq_file.h> 11 12 static int init_alloc_hint(struct sbitmap *sb, gfp_t flags) 13 { 14 unsigned depth = sb->depth; 15 16 sb->alloc_hint = alloc_percpu_gfp(unsigned int, flags); 17 if (!sb->alloc_hint) 18 return -ENOMEM; 19 20 if (depth && !sb->round_robin) { 21 int i; 22 23 for_each_possible_cpu(i) 24 *per_cpu_ptr(sb->alloc_hint, i) = prandom_u32() % depth; 25 } 26 return 0; 27 } 28 29 static inline unsigned update_alloc_hint_before_get(struct sbitmap *sb, 30 unsigned int depth) 31 { 32 unsigned hint; 33 34 hint = this_cpu_read(*sb->alloc_hint); 35 if (unlikely(hint >= depth)) { 36 hint = depth ? prandom_u32() % depth : 0; 37 this_cpu_write(*sb->alloc_hint, hint); 38 } 39 40 return hint; 41 } 42 43 static inline void update_alloc_hint_after_get(struct sbitmap *sb, 44 unsigned int depth, 45 unsigned int hint, 46 unsigned int nr) 47 { 48 if (nr == -1) { 49 /* If the map is full, a hint won't do us much good. */ 50 this_cpu_write(*sb->alloc_hint, 0); 51 } else if (nr == hint || unlikely(sb->round_robin)) { 52 /* Only update the hint if we used it. */ 53 hint = nr + 1; 54 if (hint >= depth - 1) 55 hint = 0; 56 this_cpu_write(*sb->alloc_hint, hint); 57 } 58 } 59 60 /* 61 * See if we have deferred clears that we can batch move 62 */ 63 static inline bool sbitmap_deferred_clear(struct sbitmap_word *map) 64 { 65 unsigned long mask; 66 67 if (!READ_ONCE(map->cleared)) 68 return false; 69 70 /* 71 * First get a stable cleared mask, setting the old mask to 0. 72 */ 73 mask = xchg(&map->cleared, 0); 74 75 /* 76 * Now clear the masked bits in our free word 77 */ 78 atomic_long_andnot(mask, (atomic_long_t *)&map->word); 79 BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(map->word)); 80 return true; 81 } 82 83 int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, 84 gfp_t flags, int node, bool round_robin, 85 bool alloc_hint) 86 { 87 unsigned int bits_per_word; 88 unsigned int i; 89 90 if (shift < 0) 91 shift = sbitmap_calculate_shift(depth); 92 93 bits_per_word = 1U << shift; 94 if (bits_per_word > BITS_PER_LONG) 95 return -EINVAL; 96 97 sb->shift = shift; 98 sb->depth = depth; 99 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word); 100 sb->round_robin = round_robin; 101 102 if (depth == 0) { 103 sb->map = NULL; 104 return 0; 105 } 106 107 if (alloc_hint) { 108 if (init_alloc_hint(sb, flags)) 109 return -ENOMEM; 110 } else { 111 sb->alloc_hint = NULL; 112 } 113 114 sb->map = kcalloc_node(sb->map_nr, sizeof(*sb->map), flags, node); 115 if (!sb->map) { 116 free_percpu(sb->alloc_hint); 117 return -ENOMEM; 118 } 119 120 for (i = 0; i < sb->map_nr; i++) { 121 sb->map[i].depth = min(depth, bits_per_word); 122 depth -= sb->map[i].depth; 123 } 124 return 0; 125 } 126 EXPORT_SYMBOL_GPL(sbitmap_init_node); 127 128 void sbitmap_resize(struct sbitmap *sb, unsigned int depth) 129 { 130 unsigned int bits_per_word = 1U << sb->shift; 131 unsigned int i; 132 133 for (i = 0; i < sb->map_nr; i++) 134 sbitmap_deferred_clear(&sb->map[i]); 135 136 sb->depth = depth; 137 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word); 138 139 for (i = 0; i < sb->map_nr; i++) { 140 sb->map[i].depth = min(depth, bits_per_word); 141 depth -= sb->map[i].depth; 142 } 143 } 144 EXPORT_SYMBOL_GPL(sbitmap_resize); 145 146 static int __sbitmap_get_word(unsigned long *word, unsigned long depth, 147 unsigned int hint, bool wrap) 148 { 149 int nr; 150 151 /* don't wrap if starting from 0 */ 152 wrap = wrap && hint; 153 154 while (1) { 155 nr = find_next_zero_bit(word, depth, hint); 156 if (unlikely(nr >= depth)) { 157 /* 158 * We started with an offset, and we didn't reset the 159 * offset to 0 in a failure case, so start from 0 to 160 * exhaust the map. 161 */ 162 if (hint && wrap) { 163 hint = 0; 164 continue; 165 } 166 return -1; 167 } 168 169 if (!test_and_set_bit_lock(nr, word)) 170 break; 171 172 hint = nr + 1; 173 if (hint >= depth - 1) 174 hint = 0; 175 } 176 177 return nr; 178 } 179 180 static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index, 181 unsigned int alloc_hint) 182 { 183 struct sbitmap_word *map = &sb->map[index]; 184 int nr; 185 186 do { 187 nr = __sbitmap_get_word(&map->word, map->depth, alloc_hint, 188 !sb->round_robin); 189 if (nr != -1) 190 break; 191 if (!sbitmap_deferred_clear(map)) 192 break; 193 } while (1); 194 195 return nr; 196 } 197 198 static int __sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint) 199 { 200 unsigned int i, index; 201 int nr = -1; 202 203 index = SB_NR_TO_INDEX(sb, alloc_hint); 204 205 /* 206 * Unless we're doing round robin tag allocation, just use the 207 * alloc_hint to find the right word index. No point in looping 208 * twice in find_next_zero_bit() for that case. 209 */ 210 if (sb->round_robin) 211 alloc_hint = SB_NR_TO_BIT(sb, alloc_hint); 212 else 213 alloc_hint = 0; 214 215 for (i = 0; i < sb->map_nr; i++) { 216 nr = sbitmap_find_bit_in_index(sb, index, alloc_hint); 217 if (nr != -1) { 218 nr += index << sb->shift; 219 break; 220 } 221 222 /* Jump to next index. */ 223 alloc_hint = 0; 224 if (++index >= sb->map_nr) 225 index = 0; 226 } 227 228 return nr; 229 } 230 231 int sbitmap_get(struct sbitmap *sb) 232 { 233 int nr; 234 unsigned int hint, depth; 235 236 if (WARN_ON_ONCE(unlikely(!sb->alloc_hint))) 237 return -1; 238 239 depth = READ_ONCE(sb->depth); 240 hint = update_alloc_hint_before_get(sb, depth); 241 nr = __sbitmap_get(sb, hint); 242 update_alloc_hint_after_get(sb, depth, hint, nr); 243 244 return nr; 245 } 246 EXPORT_SYMBOL_GPL(sbitmap_get); 247 248 static int __sbitmap_get_shallow(struct sbitmap *sb, 249 unsigned int alloc_hint, 250 unsigned long shallow_depth) 251 { 252 unsigned int i, index; 253 int nr = -1; 254 255 index = SB_NR_TO_INDEX(sb, alloc_hint); 256 257 for (i = 0; i < sb->map_nr; i++) { 258 again: 259 nr = __sbitmap_get_word(&sb->map[index].word, 260 min(sb->map[index].depth, shallow_depth), 261 SB_NR_TO_BIT(sb, alloc_hint), true); 262 if (nr != -1) { 263 nr += index << sb->shift; 264 break; 265 } 266 267 if (sbitmap_deferred_clear(&sb->map[index])) 268 goto again; 269 270 /* Jump to next index. */ 271 index++; 272 alloc_hint = index << sb->shift; 273 274 if (index >= sb->map_nr) { 275 index = 0; 276 alloc_hint = 0; 277 } 278 } 279 280 return nr; 281 } 282 283 int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth) 284 { 285 int nr; 286 unsigned int hint, depth; 287 288 if (WARN_ON_ONCE(unlikely(!sb->alloc_hint))) 289 return -1; 290 291 depth = READ_ONCE(sb->depth); 292 hint = update_alloc_hint_before_get(sb, depth); 293 nr = __sbitmap_get_shallow(sb, hint, shallow_depth); 294 update_alloc_hint_after_get(sb, depth, hint, nr); 295 296 return nr; 297 } 298 EXPORT_SYMBOL_GPL(sbitmap_get_shallow); 299 300 bool sbitmap_any_bit_set(const struct sbitmap *sb) 301 { 302 unsigned int i; 303 304 for (i = 0; i < sb->map_nr; i++) { 305 if (sb->map[i].word & ~sb->map[i].cleared) 306 return true; 307 } 308 return false; 309 } 310 EXPORT_SYMBOL_GPL(sbitmap_any_bit_set); 311 312 static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set) 313 { 314 unsigned int i, weight = 0; 315 316 for (i = 0; i < sb->map_nr; i++) { 317 const struct sbitmap_word *word = &sb->map[i]; 318 319 if (set) 320 weight += bitmap_weight(&word->word, word->depth); 321 else 322 weight += bitmap_weight(&word->cleared, word->depth); 323 } 324 return weight; 325 } 326 327 static unsigned int sbitmap_cleared(const struct sbitmap *sb) 328 { 329 return __sbitmap_weight(sb, false); 330 } 331 332 unsigned int sbitmap_weight(const struct sbitmap *sb) 333 { 334 return __sbitmap_weight(sb, true) - sbitmap_cleared(sb); 335 } 336 EXPORT_SYMBOL_GPL(sbitmap_weight); 337 338 void sbitmap_show(struct sbitmap *sb, struct seq_file *m) 339 { 340 seq_printf(m, "depth=%u\n", sb->depth); 341 seq_printf(m, "busy=%u\n", sbitmap_weight(sb)); 342 seq_printf(m, "cleared=%u\n", sbitmap_cleared(sb)); 343 seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift); 344 seq_printf(m, "map_nr=%u\n", sb->map_nr); 345 } 346 EXPORT_SYMBOL_GPL(sbitmap_show); 347 348 static inline void emit_byte(struct seq_file *m, unsigned int offset, u8 byte) 349 { 350 if ((offset & 0xf) == 0) { 351 if (offset != 0) 352 seq_putc(m, '\n'); 353 seq_printf(m, "%08x:", offset); 354 } 355 if ((offset & 0x1) == 0) 356 seq_putc(m, ' '); 357 seq_printf(m, "%02x", byte); 358 } 359 360 void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m) 361 { 362 u8 byte = 0; 363 unsigned int byte_bits = 0; 364 unsigned int offset = 0; 365 int i; 366 367 for (i = 0; i < sb->map_nr; i++) { 368 unsigned long word = READ_ONCE(sb->map[i].word); 369 unsigned long cleared = READ_ONCE(sb->map[i].cleared); 370 unsigned int word_bits = READ_ONCE(sb->map[i].depth); 371 372 word &= ~cleared; 373 374 while (word_bits > 0) { 375 unsigned int bits = min(8 - byte_bits, word_bits); 376 377 byte |= (word & (BIT(bits) - 1)) << byte_bits; 378 byte_bits += bits; 379 if (byte_bits == 8) { 380 emit_byte(m, offset, byte); 381 byte = 0; 382 byte_bits = 0; 383 offset++; 384 } 385 word >>= bits; 386 word_bits -= bits; 387 } 388 } 389 if (byte_bits) { 390 emit_byte(m, offset, byte); 391 offset++; 392 } 393 if (offset) 394 seq_putc(m, '\n'); 395 } 396 EXPORT_SYMBOL_GPL(sbitmap_bitmap_show); 397 398 static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq, 399 unsigned int depth) 400 { 401 unsigned int wake_batch; 402 unsigned int shallow_depth; 403 404 /* 405 * For each batch, we wake up one queue. We need to make sure that our 406 * batch size is small enough that the full depth of the bitmap, 407 * potentially limited by a shallow depth, is enough to wake up all of 408 * the queues. 409 * 410 * Each full word of the bitmap has bits_per_word bits, and there might 411 * be a partial word. There are depth / bits_per_word full words and 412 * depth % bits_per_word bits left over. In bitwise arithmetic: 413 * 414 * bits_per_word = 1 << shift 415 * depth / bits_per_word = depth >> shift 416 * depth % bits_per_word = depth & ((1 << shift) - 1) 417 * 418 * Each word can be limited to sbq->min_shallow_depth bits. 419 */ 420 shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth); 421 depth = ((depth >> sbq->sb.shift) * shallow_depth + 422 min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth)); 423 wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1, 424 SBQ_WAKE_BATCH); 425 426 return wake_batch; 427 } 428 429 int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, 430 int shift, bool round_robin, gfp_t flags, int node) 431 { 432 int ret; 433 int i; 434 435 ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node, 436 round_robin, true); 437 if (ret) 438 return ret; 439 440 sbq->min_shallow_depth = UINT_MAX; 441 sbq->wake_batch = sbq_calc_wake_batch(sbq, depth); 442 atomic_set(&sbq->wake_index, 0); 443 atomic_set(&sbq->ws_active, 0); 444 445 sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node); 446 if (!sbq->ws) { 447 sbitmap_free(&sbq->sb); 448 return -ENOMEM; 449 } 450 451 for (i = 0; i < SBQ_WAIT_QUEUES; i++) { 452 init_waitqueue_head(&sbq->ws[i].wait); 453 atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch); 454 } 455 456 return 0; 457 } 458 EXPORT_SYMBOL_GPL(sbitmap_queue_init_node); 459 460 static inline void __sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq, 461 unsigned int wake_batch) 462 { 463 int i; 464 465 if (sbq->wake_batch != wake_batch) { 466 WRITE_ONCE(sbq->wake_batch, wake_batch); 467 /* 468 * Pairs with the memory barrier in sbitmap_queue_wake_up() 469 * to ensure that the batch size is updated before the wait 470 * counts. 471 */ 472 smp_mb(); 473 for (i = 0; i < SBQ_WAIT_QUEUES; i++) 474 atomic_set(&sbq->ws[i].wait_cnt, 1); 475 } 476 } 477 478 static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq, 479 unsigned int depth) 480 { 481 unsigned int wake_batch; 482 483 wake_batch = sbq_calc_wake_batch(sbq, depth); 484 __sbitmap_queue_update_wake_batch(sbq, wake_batch); 485 } 486 487 void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq, 488 unsigned int users) 489 { 490 unsigned int wake_batch; 491 unsigned int min_batch; 492 unsigned int depth = (sbq->sb.depth + users - 1) / users; 493 494 min_batch = sbq->sb.depth >= (4 * SBQ_WAIT_QUEUES) ? 4 : 1; 495 496 wake_batch = clamp_val(depth / SBQ_WAIT_QUEUES, 497 min_batch, SBQ_WAKE_BATCH); 498 __sbitmap_queue_update_wake_batch(sbq, wake_batch); 499 } 500 EXPORT_SYMBOL_GPL(sbitmap_queue_recalculate_wake_batch); 501 502 void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth) 503 { 504 sbitmap_queue_update_wake_batch(sbq, depth); 505 sbitmap_resize(&sbq->sb, depth); 506 } 507 EXPORT_SYMBOL_GPL(sbitmap_queue_resize); 508 509 int __sbitmap_queue_get(struct sbitmap_queue *sbq) 510 { 511 return sbitmap_get(&sbq->sb); 512 } 513 EXPORT_SYMBOL_GPL(__sbitmap_queue_get); 514 515 unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags, 516 unsigned int *offset) 517 { 518 struct sbitmap *sb = &sbq->sb; 519 unsigned int hint, depth; 520 unsigned long index, nr; 521 int i; 522 523 if (unlikely(sb->round_robin)) 524 return 0; 525 526 depth = READ_ONCE(sb->depth); 527 hint = update_alloc_hint_before_get(sb, depth); 528 529 index = SB_NR_TO_INDEX(sb, hint); 530 531 for (i = 0; i < sb->map_nr; i++) { 532 struct sbitmap_word *map = &sb->map[index]; 533 unsigned long get_mask; 534 535 sbitmap_deferred_clear(map); 536 if (map->word == (1UL << (map->depth - 1)) - 1) 537 continue; 538 539 nr = find_first_zero_bit(&map->word, map->depth); 540 if (nr + nr_tags <= map->depth) { 541 atomic_long_t *ptr = (atomic_long_t *) &map->word; 542 int map_tags = min_t(int, nr_tags, map->depth); 543 unsigned long val, ret; 544 545 get_mask = ((1UL << map_tags) - 1) << nr; 546 do { 547 val = READ_ONCE(map->word); 548 ret = atomic_long_cmpxchg(ptr, val, get_mask | val); 549 } while (ret != val); 550 get_mask = (get_mask & ~ret) >> nr; 551 if (get_mask) { 552 *offset = nr + (index << sb->shift); 553 update_alloc_hint_after_get(sb, depth, hint, 554 *offset + map_tags - 1); 555 return get_mask; 556 } 557 } 558 /* Jump to next index. */ 559 if (++index >= sb->map_nr) 560 index = 0; 561 } 562 563 return 0; 564 } 565 566 int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, 567 unsigned int shallow_depth) 568 { 569 WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth); 570 571 return sbitmap_get_shallow(&sbq->sb, shallow_depth); 572 } 573 EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow); 574 575 void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq, 576 unsigned int min_shallow_depth) 577 { 578 sbq->min_shallow_depth = min_shallow_depth; 579 sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth); 580 } 581 EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth); 582 583 static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq) 584 { 585 int i, wake_index; 586 587 if (!atomic_read(&sbq->ws_active)) 588 return NULL; 589 590 wake_index = atomic_read(&sbq->wake_index); 591 for (i = 0; i < SBQ_WAIT_QUEUES; i++) { 592 struct sbq_wait_state *ws = &sbq->ws[wake_index]; 593 594 if (waitqueue_active(&ws->wait)) { 595 if (wake_index != atomic_read(&sbq->wake_index)) 596 atomic_set(&sbq->wake_index, wake_index); 597 return ws; 598 } 599 600 wake_index = sbq_index_inc(wake_index); 601 } 602 603 return NULL; 604 } 605 606 static bool __sbq_wake_up(struct sbitmap_queue *sbq) 607 { 608 struct sbq_wait_state *ws; 609 unsigned int wake_batch; 610 int wait_cnt; 611 612 ws = sbq_wake_ptr(sbq); 613 if (!ws) 614 return false; 615 616 wait_cnt = atomic_dec_return(&ws->wait_cnt); 617 if (wait_cnt <= 0) { 618 int ret; 619 620 wake_batch = READ_ONCE(sbq->wake_batch); 621 622 /* 623 * Pairs with the memory barrier in sbitmap_queue_resize() to 624 * ensure that we see the batch size update before the wait 625 * count is reset. 626 */ 627 smp_mb__before_atomic(); 628 629 /* 630 * For concurrent callers of this, the one that failed the 631 * atomic_cmpxhcg() race should call this function again 632 * to wakeup a new batch on a different 'ws'. 633 */ 634 ret = atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wake_batch); 635 if (ret == wait_cnt) { 636 sbq_index_atomic_inc(&sbq->wake_index); 637 wake_up_nr(&ws->wait, wake_batch); 638 return false; 639 } 640 641 return true; 642 } 643 644 return false; 645 } 646 647 void sbitmap_queue_wake_up(struct sbitmap_queue *sbq) 648 { 649 while (__sbq_wake_up(sbq)) 650 ; 651 } 652 EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up); 653 654 static inline void sbitmap_update_cpu_hint(struct sbitmap *sb, int cpu, int tag) 655 { 656 if (likely(!sb->round_robin && tag < sb->depth)) 657 data_race(*per_cpu_ptr(sb->alloc_hint, cpu) = tag); 658 } 659 660 void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset, 661 int *tags, int nr_tags) 662 { 663 struct sbitmap *sb = &sbq->sb; 664 unsigned long *addr = NULL; 665 unsigned long mask = 0; 666 int i; 667 668 smp_mb__before_atomic(); 669 for (i = 0; i < nr_tags; i++) { 670 const int tag = tags[i] - offset; 671 unsigned long *this_addr; 672 673 /* since we're clearing a batch, skip the deferred map */ 674 this_addr = &sb->map[SB_NR_TO_INDEX(sb, tag)].word; 675 if (!addr) { 676 addr = this_addr; 677 } else if (addr != this_addr) { 678 atomic_long_andnot(mask, (atomic_long_t *) addr); 679 mask = 0; 680 addr = this_addr; 681 } 682 mask |= (1UL << SB_NR_TO_BIT(sb, tag)); 683 } 684 685 if (mask) 686 atomic_long_andnot(mask, (atomic_long_t *) addr); 687 688 smp_mb__after_atomic(); 689 sbitmap_queue_wake_up(sbq); 690 sbitmap_update_cpu_hint(&sbq->sb, raw_smp_processor_id(), 691 tags[nr_tags - 1] - offset); 692 } 693 694 void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, 695 unsigned int cpu) 696 { 697 /* 698 * Once the clear bit is set, the bit may be allocated out. 699 * 700 * Orders READ/WRITE on the associated instance(such as request 701 * of blk_mq) by this bit for avoiding race with re-allocation, 702 * and its pair is the memory barrier implied in __sbitmap_get_word. 703 * 704 * One invariant is that the clear bit has to be zero when the bit 705 * is in use. 706 */ 707 smp_mb__before_atomic(); 708 sbitmap_deferred_clear_bit(&sbq->sb, nr); 709 710 /* 711 * Pairs with the memory barrier in set_current_state() to ensure the 712 * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker 713 * and test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the 714 * waiter. See the comment on waitqueue_active(). 715 */ 716 smp_mb__after_atomic(); 717 sbitmap_queue_wake_up(sbq); 718 sbitmap_update_cpu_hint(&sbq->sb, cpu, nr); 719 } 720 EXPORT_SYMBOL_GPL(sbitmap_queue_clear); 721 722 void sbitmap_queue_wake_all(struct sbitmap_queue *sbq) 723 { 724 int i, wake_index; 725 726 /* 727 * Pairs with the memory barrier in set_current_state() like in 728 * sbitmap_queue_wake_up(). 729 */ 730 smp_mb(); 731 wake_index = atomic_read(&sbq->wake_index); 732 for (i = 0; i < SBQ_WAIT_QUEUES; i++) { 733 struct sbq_wait_state *ws = &sbq->ws[wake_index]; 734 735 if (waitqueue_active(&ws->wait)) 736 wake_up(&ws->wait); 737 738 wake_index = sbq_index_inc(wake_index); 739 } 740 } 741 EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all); 742 743 void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m) 744 { 745 bool first; 746 int i; 747 748 sbitmap_show(&sbq->sb, m); 749 750 seq_puts(m, "alloc_hint={"); 751 first = true; 752 for_each_possible_cpu(i) { 753 if (!first) 754 seq_puts(m, ", "); 755 first = false; 756 seq_printf(m, "%u", *per_cpu_ptr(sbq->sb.alloc_hint, i)); 757 } 758 seq_puts(m, "}\n"); 759 760 seq_printf(m, "wake_batch=%u\n", sbq->wake_batch); 761 seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index)); 762 seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active)); 763 764 seq_puts(m, "ws={\n"); 765 for (i = 0; i < SBQ_WAIT_QUEUES; i++) { 766 struct sbq_wait_state *ws = &sbq->ws[i]; 767 768 seq_printf(m, "\t{.wait_cnt=%d, .wait=%s},\n", 769 atomic_read(&ws->wait_cnt), 770 waitqueue_active(&ws->wait) ? "active" : "inactive"); 771 } 772 seq_puts(m, "}\n"); 773 774 seq_printf(m, "round_robin=%d\n", sbq->sb.round_robin); 775 seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth); 776 } 777 EXPORT_SYMBOL_GPL(sbitmap_queue_show); 778 779 void sbitmap_add_wait_queue(struct sbitmap_queue *sbq, 780 struct sbq_wait_state *ws, 781 struct sbq_wait *sbq_wait) 782 { 783 if (!sbq_wait->sbq) { 784 sbq_wait->sbq = sbq; 785 atomic_inc(&sbq->ws_active); 786 add_wait_queue(&ws->wait, &sbq_wait->wait); 787 } 788 } 789 EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue); 790 791 void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait) 792 { 793 list_del_init(&sbq_wait->wait.entry); 794 if (sbq_wait->sbq) { 795 atomic_dec(&sbq_wait->sbq->ws_active); 796 sbq_wait->sbq = NULL; 797 } 798 } 799 EXPORT_SYMBOL_GPL(sbitmap_del_wait_queue); 800 801 void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq, 802 struct sbq_wait_state *ws, 803 struct sbq_wait *sbq_wait, int state) 804 { 805 if (!sbq_wait->sbq) { 806 atomic_inc(&sbq->ws_active); 807 sbq_wait->sbq = sbq; 808 } 809 prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state); 810 } 811 EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait); 812 813 void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws, 814 struct sbq_wait *sbq_wait) 815 { 816 finish_wait(&ws->wait, &sbq_wait->wait); 817 if (sbq_wait->sbq) { 818 atomic_dec(&sbq->ws_active); 819 sbq_wait->sbq = NULL; 820 } 821 } 822 EXPORT_SYMBOL_GPL(sbitmap_finish_wait); 823