1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2016 Facebook 4 * Copyright (C) 2013-2014 Jens Axboe 5 */ 6 7 #include <linux/sched.h> 8 #include <linux/random.h> 9 #include <linux/sbitmap.h> 10 #include <linux/seq_file.h> 11 12 static int init_alloc_hint(struct sbitmap *sb, gfp_t flags) 13 { 14 unsigned depth = sb->depth; 15 16 sb->alloc_hint = alloc_percpu_gfp(unsigned int, flags); 17 if (!sb->alloc_hint) 18 return -ENOMEM; 19 20 if (depth && !sb->round_robin) { 21 int i; 22 23 for_each_possible_cpu(i) 24 *per_cpu_ptr(sb->alloc_hint, i) = prandom_u32() % depth; 25 } 26 return 0; 27 } 28 29 static inline unsigned update_alloc_hint_before_get(struct sbitmap *sb, 30 unsigned int depth) 31 { 32 unsigned hint; 33 34 hint = this_cpu_read(*sb->alloc_hint); 35 if (unlikely(hint >= depth)) { 36 hint = depth ? prandom_u32() % depth : 0; 37 this_cpu_write(*sb->alloc_hint, hint); 38 } 39 40 return hint; 41 } 42 43 static inline void update_alloc_hint_after_get(struct sbitmap *sb, 44 unsigned int depth, 45 unsigned int hint, 46 unsigned int nr) 47 { 48 if (nr == -1) { 49 /* If the map is full, a hint won't do us much good. */ 50 this_cpu_write(*sb->alloc_hint, 0); 51 } else if (nr == hint || unlikely(sb->round_robin)) { 52 /* Only update the hint if we used it. */ 53 hint = nr + 1; 54 if (hint >= depth - 1) 55 hint = 0; 56 this_cpu_write(*sb->alloc_hint, hint); 57 } 58 } 59 60 /* 61 * See if we have deferred clears that we can batch move 62 */ 63 static inline bool sbitmap_deferred_clear(struct sbitmap_word *map) 64 { 65 unsigned long mask; 66 67 if (!READ_ONCE(map->cleared)) 68 return false; 69 70 /* 71 * First get a stable cleared mask, setting the old mask to 0. 72 */ 73 mask = xchg(&map->cleared, 0); 74 75 /* 76 * Now clear the masked bits in our free word 77 */ 78 atomic_long_andnot(mask, (atomic_long_t *)&map->word); 79 BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(map->word)); 80 return true; 81 } 82 83 int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, 84 gfp_t flags, int node, bool round_robin, 85 bool alloc_hint) 86 { 87 unsigned int bits_per_word; 88 89 if (shift < 0) 90 shift = sbitmap_calculate_shift(depth); 91 92 bits_per_word = 1U << shift; 93 if (bits_per_word > BITS_PER_LONG) 94 return -EINVAL; 95 96 sb->shift = shift; 97 sb->depth = depth; 98 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word); 99 sb->round_robin = round_robin; 100 101 if (depth == 0) { 102 sb->map = NULL; 103 return 0; 104 } 105 106 if (alloc_hint) { 107 if (init_alloc_hint(sb, flags)) 108 return -ENOMEM; 109 } else { 110 sb->alloc_hint = NULL; 111 } 112 113 sb->map = kvzalloc_node(sb->map_nr * sizeof(*sb->map), flags, node); 114 if (!sb->map) { 115 free_percpu(sb->alloc_hint); 116 return -ENOMEM; 117 } 118 119 return 0; 120 } 121 EXPORT_SYMBOL_GPL(sbitmap_init_node); 122 123 void sbitmap_resize(struct sbitmap *sb, unsigned int depth) 124 { 125 unsigned int bits_per_word = 1U << sb->shift; 126 unsigned int i; 127 128 for (i = 0; i < sb->map_nr; i++) 129 sbitmap_deferred_clear(&sb->map[i]); 130 131 sb->depth = depth; 132 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word); 133 } 134 EXPORT_SYMBOL_GPL(sbitmap_resize); 135 136 static int __sbitmap_get_word(unsigned long *word, unsigned long depth, 137 unsigned int hint, bool wrap) 138 { 139 int nr; 140 141 /* don't wrap if starting from 0 */ 142 wrap = wrap && hint; 143 144 while (1) { 145 nr = find_next_zero_bit(word, depth, hint); 146 if (unlikely(nr >= depth)) { 147 /* 148 * We started with an offset, and we didn't reset the 149 * offset to 0 in a failure case, so start from 0 to 150 * exhaust the map. 151 */ 152 if (hint && wrap) { 153 hint = 0; 154 continue; 155 } 156 return -1; 157 } 158 159 if (!test_and_set_bit_lock(nr, word)) 160 break; 161 162 hint = nr + 1; 163 if (hint >= depth - 1) 164 hint = 0; 165 } 166 167 return nr; 168 } 169 170 static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index, 171 unsigned int alloc_hint) 172 { 173 struct sbitmap_word *map = &sb->map[index]; 174 int nr; 175 176 do { 177 nr = __sbitmap_get_word(&map->word, __map_depth(sb, index), 178 alloc_hint, !sb->round_robin); 179 if (nr != -1) 180 break; 181 if (!sbitmap_deferred_clear(map)) 182 break; 183 } while (1); 184 185 return nr; 186 } 187 188 static int __sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint) 189 { 190 unsigned int i, index; 191 int nr = -1; 192 193 index = SB_NR_TO_INDEX(sb, alloc_hint); 194 195 /* 196 * Unless we're doing round robin tag allocation, just use the 197 * alloc_hint to find the right word index. No point in looping 198 * twice in find_next_zero_bit() for that case. 199 */ 200 if (sb->round_robin) 201 alloc_hint = SB_NR_TO_BIT(sb, alloc_hint); 202 else 203 alloc_hint = 0; 204 205 for (i = 0; i < sb->map_nr; i++) { 206 nr = sbitmap_find_bit_in_index(sb, index, alloc_hint); 207 if (nr != -1) { 208 nr += index << sb->shift; 209 break; 210 } 211 212 /* Jump to next index. */ 213 alloc_hint = 0; 214 if (++index >= sb->map_nr) 215 index = 0; 216 } 217 218 return nr; 219 } 220 221 int sbitmap_get(struct sbitmap *sb) 222 { 223 int nr; 224 unsigned int hint, depth; 225 226 if (WARN_ON_ONCE(unlikely(!sb->alloc_hint))) 227 return -1; 228 229 depth = READ_ONCE(sb->depth); 230 hint = update_alloc_hint_before_get(sb, depth); 231 nr = __sbitmap_get(sb, hint); 232 update_alloc_hint_after_get(sb, depth, hint, nr); 233 234 return nr; 235 } 236 EXPORT_SYMBOL_GPL(sbitmap_get); 237 238 static int __sbitmap_get_shallow(struct sbitmap *sb, 239 unsigned int alloc_hint, 240 unsigned long shallow_depth) 241 { 242 unsigned int i, index; 243 int nr = -1; 244 245 index = SB_NR_TO_INDEX(sb, alloc_hint); 246 247 for (i = 0; i < sb->map_nr; i++) { 248 again: 249 nr = __sbitmap_get_word(&sb->map[index].word, 250 min_t(unsigned int, 251 __map_depth(sb, index), 252 shallow_depth), 253 SB_NR_TO_BIT(sb, alloc_hint), true); 254 if (nr != -1) { 255 nr += index << sb->shift; 256 break; 257 } 258 259 if (sbitmap_deferred_clear(&sb->map[index])) 260 goto again; 261 262 /* Jump to next index. */ 263 index++; 264 alloc_hint = index << sb->shift; 265 266 if (index >= sb->map_nr) { 267 index = 0; 268 alloc_hint = 0; 269 } 270 } 271 272 return nr; 273 } 274 275 int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth) 276 { 277 int nr; 278 unsigned int hint, depth; 279 280 if (WARN_ON_ONCE(unlikely(!sb->alloc_hint))) 281 return -1; 282 283 depth = READ_ONCE(sb->depth); 284 hint = update_alloc_hint_before_get(sb, depth); 285 nr = __sbitmap_get_shallow(sb, hint, shallow_depth); 286 update_alloc_hint_after_get(sb, depth, hint, nr); 287 288 return nr; 289 } 290 EXPORT_SYMBOL_GPL(sbitmap_get_shallow); 291 292 bool sbitmap_any_bit_set(const struct sbitmap *sb) 293 { 294 unsigned int i; 295 296 for (i = 0; i < sb->map_nr; i++) { 297 if (sb->map[i].word & ~sb->map[i].cleared) 298 return true; 299 } 300 return false; 301 } 302 EXPORT_SYMBOL_GPL(sbitmap_any_bit_set); 303 304 static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set) 305 { 306 unsigned int i, weight = 0; 307 308 for (i = 0; i < sb->map_nr; i++) { 309 const struct sbitmap_word *word = &sb->map[i]; 310 unsigned int word_depth = __map_depth(sb, i); 311 312 if (set) 313 weight += bitmap_weight(&word->word, word_depth); 314 else 315 weight += bitmap_weight(&word->cleared, word_depth); 316 } 317 return weight; 318 } 319 320 static unsigned int sbitmap_cleared(const struct sbitmap *sb) 321 { 322 return __sbitmap_weight(sb, false); 323 } 324 325 unsigned int sbitmap_weight(const struct sbitmap *sb) 326 { 327 return __sbitmap_weight(sb, true) - sbitmap_cleared(sb); 328 } 329 EXPORT_SYMBOL_GPL(sbitmap_weight); 330 331 void sbitmap_show(struct sbitmap *sb, struct seq_file *m) 332 { 333 seq_printf(m, "depth=%u\n", sb->depth); 334 seq_printf(m, "busy=%u\n", sbitmap_weight(sb)); 335 seq_printf(m, "cleared=%u\n", sbitmap_cleared(sb)); 336 seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift); 337 seq_printf(m, "map_nr=%u\n", sb->map_nr); 338 } 339 EXPORT_SYMBOL_GPL(sbitmap_show); 340 341 static inline void emit_byte(struct seq_file *m, unsigned int offset, u8 byte) 342 { 343 if ((offset & 0xf) == 0) { 344 if (offset != 0) 345 seq_putc(m, '\n'); 346 seq_printf(m, "%08x:", offset); 347 } 348 if ((offset & 0x1) == 0) 349 seq_putc(m, ' '); 350 seq_printf(m, "%02x", byte); 351 } 352 353 void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m) 354 { 355 u8 byte = 0; 356 unsigned int byte_bits = 0; 357 unsigned int offset = 0; 358 int i; 359 360 for (i = 0; i < sb->map_nr; i++) { 361 unsigned long word = READ_ONCE(sb->map[i].word); 362 unsigned long cleared = READ_ONCE(sb->map[i].cleared); 363 unsigned int word_bits = __map_depth(sb, i); 364 365 word &= ~cleared; 366 367 while (word_bits > 0) { 368 unsigned int bits = min(8 - byte_bits, word_bits); 369 370 byte |= (word & (BIT(bits) - 1)) << byte_bits; 371 byte_bits += bits; 372 if (byte_bits == 8) { 373 emit_byte(m, offset, byte); 374 byte = 0; 375 byte_bits = 0; 376 offset++; 377 } 378 word >>= bits; 379 word_bits -= bits; 380 } 381 } 382 if (byte_bits) { 383 emit_byte(m, offset, byte); 384 offset++; 385 } 386 if (offset) 387 seq_putc(m, '\n'); 388 } 389 EXPORT_SYMBOL_GPL(sbitmap_bitmap_show); 390 391 static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq, 392 unsigned int depth) 393 { 394 unsigned int wake_batch; 395 unsigned int shallow_depth; 396 397 /* 398 * For each batch, we wake up one queue. We need to make sure that our 399 * batch size is small enough that the full depth of the bitmap, 400 * potentially limited by a shallow depth, is enough to wake up all of 401 * the queues. 402 * 403 * Each full word of the bitmap has bits_per_word bits, and there might 404 * be a partial word. There are depth / bits_per_word full words and 405 * depth % bits_per_word bits left over. In bitwise arithmetic: 406 * 407 * bits_per_word = 1 << shift 408 * depth / bits_per_word = depth >> shift 409 * depth % bits_per_word = depth & ((1 << shift) - 1) 410 * 411 * Each word can be limited to sbq->min_shallow_depth bits. 412 */ 413 shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth); 414 depth = ((depth >> sbq->sb.shift) * shallow_depth + 415 min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth)); 416 wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1, 417 SBQ_WAKE_BATCH); 418 419 return wake_batch; 420 } 421 422 int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, 423 int shift, bool round_robin, gfp_t flags, int node) 424 { 425 int ret; 426 int i; 427 428 ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node, 429 round_robin, true); 430 if (ret) 431 return ret; 432 433 sbq->min_shallow_depth = UINT_MAX; 434 sbq->wake_batch = sbq_calc_wake_batch(sbq, depth); 435 atomic_set(&sbq->wake_index, 0); 436 atomic_set(&sbq->ws_active, 0); 437 438 sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node); 439 if (!sbq->ws) { 440 sbitmap_free(&sbq->sb); 441 return -ENOMEM; 442 } 443 444 for (i = 0; i < SBQ_WAIT_QUEUES; i++) { 445 init_waitqueue_head(&sbq->ws[i].wait); 446 atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch); 447 } 448 449 return 0; 450 } 451 EXPORT_SYMBOL_GPL(sbitmap_queue_init_node); 452 453 static inline void __sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq, 454 unsigned int wake_batch) 455 { 456 int i; 457 458 if (sbq->wake_batch != wake_batch) { 459 WRITE_ONCE(sbq->wake_batch, wake_batch); 460 /* 461 * Pairs with the memory barrier in sbitmap_queue_wake_up() 462 * to ensure that the batch size is updated before the wait 463 * counts. 464 */ 465 smp_mb(); 466 for (i = 0; i < SBQ_WAIT_QUEUES; i++) 467 atomic_set(&sbq->ws[i].wait_cnt, 1); 468 } 469 } 470 471 static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq, 472 unsigned int depth) 473 { 474 unsigned int wake_batch; 475 476 wake_batch = sbq_calc_wake_batch(sbq, depth); 477 __sbitmap_queue_update_wake_batch(sbq, wake_batch); 478 } 479 480 void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq, 481 unsigned int users) 482 { 483 unsigned int wake_batch; 484 unsigned int min_batch; 485 unsigned int depth = (sbq->sb.depth + users - 1) / users; 486 487 min_batch = sbq->sb.depth >= (4 * SBQ_WAIT_QUEUES) ? 4 : 1; 488 489 wake_batch = clamp_val(depth / SBQ_WAIT_QUEUES, 490 min_batch, SBQ_WAKE_BATCH); 491 __sbitmap_queue_update_wake_batch(sbq, wake_batch); 492 } 493 EXPORT_SYMBOL_GPL(sbitmap_queue_recalculate_wake_batch); 494 495 void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth) 496 { 497 sbitmap_queue_update_wake_batch(sbq, depth); 498 sbitmap_resize(&sbq->sb, depth); 499 } 500 EXPORT_SYMBOL_GPL(sbitmap_queue_resize); 501 502 int __sbitmap_queue_get(struct sbitmap_queue *sbq) 503 { 504 return sbitmap_get(&sbq->sb); 505 } 506 EXPORT_SYMBOL_GPL(__sbitmap_queue_get); 507 508 unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags, 509 unsigned int *offset) 510 { 511 struct sbitmap *sb = &sbq->sb; 512 unsigned int hint, depth; 513 unsigned long index, nr; 514 int i; 515 516 if (unlikely(sb->round_robin)) 517 return 0; 518 519 depth = READ_ONCE(sb->depth); 520 hint = update_alloc_hint_before_get(sb, depth); 521 522 index = SB_NR_TO_INDEX(sb, hint); 523 524 for (i = 0; i < sb->map_nr; i++) { 525 struct sbitmap_word *map = &sb->map[index]; 526 unsigned long get_mask; 527 unsigned int map_depth = __map_depth(sb, index); 528 529 sbitmap_deferred_clear(map); 530 if (map->word == (1UL << (map_depth - 1)) - 1) 531 goto next; 532 533 nr = find_first_zero_bit(&map->word, map_depth); 534 if (nr + nr_tags <= map_depth) { 535 atomic_long_t *ptr = (atomic_long_t *) &map->word; 536 unsigned long val, ret; 537 538 get_mask = ((1UL << nr_tags) - 1) << nr; 539 do { 540 val = READ_ONCE(map->word); 541 if ((val & ~get_mask) != val) 542 goto next; 543 ret = atomic_long_cmpxchg(ptr, val, get_mask | val); 544 } while (ret != val); 545 get_mask = (get_mask & ~ret) >> nr; 546 if (get_mask) { 547 *offset = nr + (index << sb->shift); 548 update_alloc_hint_after_get(sb, depth, hint, 549 *offset + nr_tags - 1); 550 return get_mask; 551 } 552 } 553 next: 554 /* Jump to next index. */ 555 if (++index >= sb->map_nr) 556 index = 0; 557 } 558 559 return 0; 560 } 561 562 int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, 563 unsigned int shallow_depth) 564 { 565 WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth); 566 567 return sbitmap_get_shallow(&sbq->sb, shallow_depth); 568 } 569 EXPORT_SYMBOL_GPL(sbitmap_queue_get_shallow); 570 571 void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq, 572 unsigned int min_shallow_depth) 573 { 574 sbq->min_shallow_depth = min_shallow_depth; 575 sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth); 576 } 577 EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth); 578 579 static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq) 580 { 581 int i, wake_index; 582 583 if (!atomic_read(&sbq->ws_active)) 584 return NULL; 585 586 wake_index = atomic_read(&sbq->wake_index); 587 for (i = 0; i < SBQ_WAIT_QUEUES; i++) { 588 struct sbq_wait_state *ws = &sbq->ws[wake_index]; 589 590 if (waitqueue_active(&ws->wait)) { 591 if (wake_index != atomic_read(&sbq->wake_index)) 592 atomic_set(&sbq->wake_index, wake_index); 593 return ws; 594 } 595 596 wake_index = sbq_index_inc(wake_index); 597 } 598 599 return NULL; 600 } 601 602 static bool __sbq_wake_up(struct sbitmap_queue *sbq, int nr) 603 { 604 struct sbq_wait_state *ws; 605 int wake_batch, wait_cnt, cur; 606 607 ws = sbq_wake_ptr(sbq); 608 if (!ws || !nr) 609 return false; 610 611 wake_batch = READ_ONCE(sbq->wake_batch); 612 cur = atomic_read(&ws->wait_cnt); 613 do { 614 if (cur <= 0) 615 return true; 616 wait_cnt = cur - nr; 617 } while (!atomic_try_cmpxchg(&ws->wait_cnt, &cur, wait_cnt)); 618 619 /* 620 * For concurrent callers of this, callers should call this function 621 * again to wakeup a new batch on a different 'ws'. 622 */ 623 if (!waitqueue_active(&ws->wait)) 624 return true; 625 626 if (wait_cnt > 0) 627 return false; 628 629 /* 630 * Wake up first in case that concurrent callers decrease wait_cnt 631 * while waitqueue is empty. 632 */ 633 wake_up_nr(&ws->wait, max(wake_batch, nr)); 634 635 /* 636 * Pairs with the memory barrier in sbitmap_queue_resize() to 637 * ensure that we see the batch size update before the wait 638 * count is reset. 639 * 640 * Also pairs with the implicit barrier between decrementing wait_cnt 641 * and checking for waitqueue_active() to make sure waitqueue_active() 642 * sees result of the wakeup if atomic_dec_return() has seen the result 643 * of atomic_set(). 644 */ 645 smp_mb__before_atomic(); 646 647 /* 648 * Increase wake_index before updating wait_cnt, otherwise concurrent 649 * callers can see valid wait_cnt in old waitqueue, which can cause 650 * invalid wakeup on the old waitqueue. 651 */ 652 sbq_index_atomic_inc(&sbq->wake_index); 653 atomic_set(&ws->wait_cnt, wake_batch); 654 655 return false; 656 } 657 658 void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr) 659 { 660 while (__sbq_wake_up(sbq, nr)) 661 ; 662 } 663 664 static inline void sbitmap_update_cpu_hint(struct sbitmap *sb, int cpu, int tag) 665 { 666 if (likely(!sb->round_robin && tag < sb->depth)) 667 data_race(*per_cpu_ptr(sb->alloc_hint, cpu) = tag); 668 } 669 670 void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset, 671 int *tags, int nr_tags) 672 { 673 struct sbitmap *sb = &sbq->sb; 674 unsigned long *addr = NULL; 675 unsigned long mask = 0; 676 int i; 677 678 smp_mb__before_atomic(); 679 for (i = 0; i < nr_tags; i++) { 680 const int tag = tags[i] - offset; 681 unsigned long *this_addr; 682 683 /* since we're clearing a batch, skip the deferred map */ 684 this_addr = &sb->map[SB_NR_TO_INDEX(sb, tag)].word; 685 if (!addr) { 686 addr = this_addr; 687 } else if (addr != this_addr) { 688 atomic_long_andnot(mask, (atomic_long_t *) addr); 689 mask = 0; 690 addr = this_addr; 691 } 692 mask |= (1UL << SB_NR_TO_BIT(sb, tag)); 693 } 694 695 if (mask) 696 atomic_long_andnot(mask, (atomic_long_t *) addr); 697 698 smp_mb__after_atomic(); 699 sbitmap_queue_wake_up(sbq, nr_tags); 700 sbitmap_update_cpu_hint(&sbq->sb, raw_smp_processor_id(), 701 tags[nr_tags - 1] - offset); 702 } 703 704 void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, 705 unsigned int cpu) 706 { 707 /* 708 * Once the clear bit is set, the bit may be allocated out. 709 * 710 * Orders READ/WRITE on the associated instance(such as request 711 * of blk_mq) by this bit for avoiding race with re-allocation, 712 * and its pair is the memory barrier implied in __sbitmap_get_word. 713 * 714 * One invariant is that the clear bit has to be zero when the bit 715 * is in use. 716 */ 717 smp_mb__before_atomic(); 718 sbitmap_deferred_clear_bit(&sbq->sb, nr); 719 720 /* 721 * Pairs with the memory barrier in set_current_state() to ensure the 722 * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker 723 * and test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the 724 * waiter. See the comment on waitqueue_active(). 725 */ 726 smp_mb__after_atomic(); 727 sbitmap_queue_wake_up(sbq, 1); 728 sbitmap_update_cpu_hint(&sbq->sb, cpu, nr); 729 } 730 EXPORT_SYMBOL_GPL(sbitmap_queue_clear); 731 732 void sbitmap_queue_wake_all(struct sbitmap_queue *sbq) 733 { 734 int i, wake_index; 735 736 /* 737 * Pairs with the memory barrier in set_current_state() like in 738 * sbitmap_queue_wake_up(). 739 */ 740 smp_mb(); 741 wake_index = atomic_read(&sbq->wake_index); 742 for (i = 0; i < SBQ_WAIT_QUEUES; i++) { 743 struct sbq_wait_state *ws = &sbq->ws[wake_index]; 744 745 if (waitqueue_active(&ws->wait)) 746 wake_up(&ws->wait); 747 748 wake_index = sbq_index_inc(wake_index); 749 } 750 } 751 EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all); 752 753 void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m) 754 { 755 bool first; 756 int i; 757 758 sbitmap_show(&sbq->sb, m); 759 760 seq_puts(m, "alloc_hint={"); 761 first = true; 762 for_each_possible_cpu(i) { 763 if (!first) 764 seq_puts(m, ", "); 765 first = false; 766 seq_printf(m, "%u", *per_cpu_ptr(sbq->sb.alloc_hint, i)); 767 } 768 seq_puts(m, "}\n"); 769 770 seq_printf(m, "wake_batch=%u\n", sbq->wake_batch); 771 seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index)); 772 seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active)); 773 774 seq_puts(m, "ws={\n"); 775 for (i = 0; i < SBQ_WAIT_QUEUES; i++) { 776 struct sbq_wait_state *ws = &sbq->ws[i]; 777 778 seq_printf(m, "\t{.wait_cnt=%d, .wait=%s},\n", 779 atomic_read(&ws->wait_cnt), 780 waitqueue_active(&ws->wait) ? "active" : "inactive"); 781 } 782 seq_puts(m, "}\n"); 783 784 seq_printf(m, "round_robin=%d\n", sbq->sb.round_robin); 785 seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth); 786 } 787 EXPORT_SYMBOL_GPL(sbitmap_queue_show); 788 789 void sbitmap_add_wait_queue(struct sbitmap_queue *sbq, 790 struct sbq_wait_state *ws, 791 struct sbq_wait *sbq_wait) 792 { 793 if (!sbq_wait->sbq) { 794 sbq_wait->sbq = sbq; 795 atomic_inc(&sbq->ws_active); 796 add_wait_queue(&ws->wait, &sbq_wait->wait); 797 } 798 } 799 EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue); 800 801 void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait) 802 { 803 list_del_init(&sbq_wait->wait.entry); 804 if (sbq_wait->sbq) { 805 atomic_dec(&sbq_wait->sbq->ws_active); 806 sbq_wait->sbq = NULL; 807 } 808 } 809 EXPORT_SYMBOL_GPL(sbitmap_del_wait_queue); 810 811 void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq, 812 struct sbq_wait_state *ws, 813 struct sbq_wait *sbq_wait, int state) 814 { 815 if (!sbq_wait->sbq) { 816 atomic_inc(&sbq->ws_active); 817 sbq_wait->sbq = sbq; 818 } 819 prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state); 820 } 821 EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait); 822 823 void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws, 824 struct sbq_wait *sbq_wait) 825 { 826 finish_wait(&ws->wait, &sbq_wait->wait); 827 if (sbq_wait->sbq) { 828 atomic_dec(&sbq->ws_active); 829 sbq_wait->sbq = NULL; 830 } 831 } 832 EXPORT_SYMBOL_GPL(sbitmap_finish_wait); 833