1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2016 Facebook 4 * Copyright (C) 2013-2014 Jens Axboe 5 */ 6 7 #include <linux/sched.h> 8 #include <linux/random.h> 9 #include <linux/sbitmap.h> 10 #include <linux/seq_file.h> 11 12 static int init_alloc_hint(struct sbitmap *sb, gfp_t flags) 13 { 14 unsigned depth = sb->depth; 15 16 sb->alloc_hint = alloc_percpu_gfp(unsigned int, flags); 17 if (!sb->alloc_hint) 18 return -ENOMEM; 19 20 if (depth && !sb->round_robin) { 21 int i; 22 23 for_each_possible_cpu(i) 24 *per_cpu_ptr(sb->alloc_hint, i) = get_random_u32_below(depth); 25 } 26 return 0; 27 } 28 29 static inline unsigned update_alloc_hint_before_get(struct sbitmap *sb, 30 unsigned int depth) 31 { 32 unsigned hint; 33 34 hint = this_cpu_read(*sb->alloc_hint); 35 if (unlikely(hint >= depth)) { 36 hint = depth ? get_random_u32_below(depth) : 0; 37 this_cpu_write(*sb->alloc_hint, hint); 38 } 39 40 return hint; 41 } 42 43 static inline void update_alloc_hint_after_get(struct sbitmap *sb, 44 unsigned int depth, 45 unsigned int hint, 46 unsigned int nr) 47 { 48 if (nr == -1) { 49 /* If the map is full, a hint won't do us much good. */ 50 this_cpu_write(*sb->alloc_hint, 0); 51 } else if (nr == hint || unlikely(sb->round_robin)) { 52 /* Only update the hint if we used it. */ 53 hint = nr + 1; 54 if (hint >= depth - 1) 55 hint = 0; 56 this_cpu_write(*sb->alloc_hint, hint); 57 } 58 } 59 60 /* 61 * See if we have deferred clears that we can batch move 62 */ 63 static inline bool sbitmap_deferred_clear(struct sbitmap_word *map) 64 { 65 unsigned long mask; 66 67 if (!READ_ONCE(map->cleared)) 68 return false; 69 70 /* 71 * First get a stable cleared mask, setting the old mask to 0. 72 */ 73 mask = xchg(&map->cleared, 0); 74 75 /* 76 * Now clear the masked bits in our free word 77 */ 78 atomic_long_andnot(mask, (atomic_long_t *)&map->word); 79 BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(map->word)); 80 return true; 81 } 82 83 int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, 84 gfp_t flags, int node, bool round_robin, 85 bool alloc_hint) 86 { 87 unsigned int bits_per_word; 88 89 if (shift < 0) 90 shift = sbitmap_calculate_shift(depth); 91 92 bits_per_word = 1U << shift; 93 if (bits_per_word > BITS_PER_LONG) 94 return -EINVAL; 95 96 sb->shift = shift; 97 sb->depth = depth; 98 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word); 99 sb->round_robin = round_robin; 100 101 if (depth == 0) { 102 sb->map = NULL; 103 return 0; 104 } 105 106 if (alloc_hint) { 107 if (init_alloc_hint(sb, flags)) 108 return -ENOMEM; 109 } else { 110 sb->alloc_hint = NULL; 111 } 112 113 sb->map = kvzalloc_node(sb->map_nr * sizeof(*sb->map), flags, node); 114 if (!sb->map) { 115 free_percpu(sb->alloc_hint); 116 return -ENOMEM; 117 } 118 119 return 0; 120 } 121 EXPORT_SYMBOL_GPL(sbitmap_init_node); 122 123 void sbitmap_resize(struct sbitmap *sb, unsigned int depth) 124 { 125 unsigned int bits_per_word = 1U << sb->shift; 126 unsigned int i; 127 128 for (i = 0; i < sb->map_nr; i++) 129 sbitmap_deferred_clear(&sb->map[i]); 130 131 sb->depth = depth; 132 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word); 133 } 134 EXPORT_SYMBOL_GPL(sbitmap_resize); 135 136 static int __sbitmap_get_word(unsigned long *word, unsigned long depth, 137 unsigned int hint, bool wrap) 138 { 139 int nr; 140 141 /* don't wrap if starting from 0 */ 142 wrap = wrap && hint; 143 144 while (1) { 145 nr = find_next_zero_bit(word, depth, hint); 146 if (unlikely(nr >= depth)) { 147 /* 148 * We started with an offset, and we didn't reset the 149 * offset to 0 in a failure case, so start from 0 to 150 * exhaust the map. 151 */ 152 if (hint && wrap) { 153 hint = 0; 154 continue; 155 } 156 return -1; 157 } 158 159 if (!test_and_set_bit_lock(nr, word)) 160 break; 161 162 hint = nr + 1; 163 if (hint >= depth - 1) 164 hint = 0; 165 } 166 167 return nr; 168 } 169 170 static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index, 171 unsigned int alloc_hint) 172 { 173 struct sbitmap_word *map = &sb->map[index]; 174 int nr; 175 176 do { 177 nr = __sbitmap_get_word(&map->word, __map_depth(sb, index), 178 alloc_hint, !sb->round_robin); 179 if (nr != -1) 180 break; 181 if (!sbitmap_deferred_clear(map)) 182 break; 183 } while (1); 184 185 return nr; 186 } 187 188 static int __sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint) 189 { 190 unsigned int i, index; 191 int nr = -1; 192 193 index = SB_NR_TO_INDEX(sb, alloc_hint); 194 195 /* 196 * Unless we're doing round robin tag allocation, just use the 197 * alloc_hint to find the right word index. No point in looping 198 * twice in find_next_zero_bit() for that case. 199 */ 200 if (sb->round_robin) 201 alloc_hint = SB_NR_TO_BIT(sb, alloc_hint); 202 else 203 alloc_hint = 0; 204 205 for (i = 0; i < sb->map_nr; i++) { 206 nr = sbitmap_find_bit_in_index(sb, index, alloc_hint); 207 if (nr != -1) { 208 nr += index << sb->shift; 209 break; 210 } 211 212 /* Jump to next index. */ 213 alloc_hint = 0; 214 if (++index >= sb->map_nr) 215 index = 0; 216 } 217 218 return nr; 219 } 220 221 int sbitmap_get(struct sbitmap *sb) 222 { 223 int nr; 224 unsigned int hint, depth; 225 226 if (WARN_ON_ONCE(unlikely(!sb->alloc_hint))) 227 return -1; 228 229 depth = READ_ONCE(sb->depth); 230 hint = update_alloc_hint_before_get(sb, depth); 231 nr = __sbitmap_get(sb, hint); 232 update_alloc_hint_after_get(sb, depth, hint, nr); 233 234 return nr; 235 } 236 EXPORT_SYMBOL_GPL(sbitmap_get); 237 238 static int __sbitmap_get_shallow(struct sbitmap *sb, 239 unsigned int alloc_hint, 240 unsigned long shallow_depth) 241 { 242 unsigned int i, index; 243 int nr = -1; 244 245 index = SB_NR_TO_INDEX(sb, alloc_hint); 246 alloc_hint = SB_NR_TO_BIT(sb, alloc_hint); 247 248 for (i = 0; i < sb->map_nr; i++) { 249 again: 250 nr = __sbitmap_get_word(&sb->map[index].word, 251 min_t(unsigned int, 252 __map_depth(sb, index), 253 shallow_depth), 254 alloc_hint, true); 255 if (nr != -1) { 256 nr += index << sb->shift; 257 break; 258 } 259 260 if (sbitmap_deferred_clear(&sb->map[index])) 261 goto again; 262 263 /* Jump to next index. */ 264 alloc_hint = 0; 265 if (++index >= sb->map_nr) 266 index = 0; 267 } 268 269 return nr; 270 } 271 272 int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth) 273 { 274 int nr; 275 unsigned int hint, depth; 276 277 if (WARN_ON_ONCE(unlikely(!sb->alloc_hint))) 278 return -1; 279 280 depth = READ_ONCE(sb->depth); 281 hint = update_alloc_hint_before_get(sb, depth); 282 nr = __sbitmap_get_shallow(sb, hint, shallow_depth); 283 update_alloc_hint_after_get(sb, depth, hint, nr); 284 285 return nr; 286 } 287 EXPORT_SYMBOL_GPL(sbitmap_get_shallow); 288 289 bool sbitmap_any_bit_set(const struct sbitmap *sb) 290 { 291 unsigned int i; 292 293 for (i = 0; i < sb->map_nr; i++) { 294 if (sb->map[i].word & ~sb->map[i].cleared) 295 return true; 296 } 297 return false; 298 } 299 EXPORT_SYMBOL_GPL(sbitmap_any_bit_set); 300 301 static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set) 302 { 303 unsigned int i, weight = 0; 304 305 for (i = 0; i < sb->map_nr; i++) { 306 const struct sbitmap_word *word = &sb->map[i]; 307 unsigned int word_depth = __map_depth(sb, i); 308 309 if (set) 310 weight += bitmap_weight(&word->word, word_depth); 311 else 312 weight += bitmap_weight(&word->cleared, word_depth); 313 } 314 return weight; 315 } 316 317 static unsigned int sbitmap_cleared(const struct sbitmap *sb) 318 { 319 return __sbitmap_weight(sb, false); 320 } 321 322 unsigned int sbitmap_weight(const struct sbitmap *sb) 323 { 324 return __sbitmap_weight(sb, true) - sbitmap_cleared(sb); 325 } 326 EXPORT_SYMBOL_GPL(sbitmap_weight); 327 328 void sbitmap_show(struct sbitmap *sb, struct seq_file *m) 329 { 330 seq_printf(m, "depth=%u\n", sb->depth); 331 seq_printf(m, "busy=%u\n", sbitmap_weight(sb)); 332 seq_printf(m, "cleared=%u\n", sbitmap_cleared(sb)); 333 seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift); 334 seq_printf(m, "map_nr=%u\n", sb->map_nr); 335 } 336 EXPORT_SYMBOL_GPL(sbitmap_show); 337 338 static inline void emit_byte(struct seq_file *m, unsigned int offset, u8 byte) 339 { 340 if ((offset & 0xf) == 0) { 341 if (offset != 0) 342 seq_putc(m, '\n'); 343 seq_printf(m, "%08x:", offset); 344 } 345 if ((offset & 0x1) == 0) 346 seq_putc(m, ' '); 347 seq_printf(m, "%02x", byte); 348 } 349 350 void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m) 351 { 352 u8 byte = 0; 353 unsigned int byte_bits = 0; 354 unsigned int offset = 0; 355 int i; 356 357 for (i = 0; i < sb->map_nr; i++) { 358 unsigned long word = READ_ONCE(sb->map[i].word); 359 unsigned long cleared = READ_ONCE(sb->map[i].cleared); 360 unsigned int word_bits = __map_depth(sb, i); 361 362 word &= ~cleared; 363 364 while (word_bits > 0) { 365 unsigned int bits = min(8 - byte_bits, word_bits); 366 367 byte |= (word & (BIT(bits) - 1)) << byte_bits; 368 byte_bits += bits; 369 if (byte_bits == 8) { 370 emit_byte(m, offset, byte); 371 byte = 0; 372 byte_bits = 0; 373 offset++; 374 } 375 word >>= bits; 376 word_bits -= bits; 377 } 378 } 379 if (byte_bits) { 380 emit_byte(m, offset, byte); 381 offset++; 382 } 383 if (offset) 384 seq_putc(m, '\n'); 385 } 386 EXPORT_SYMBOL_GPL(sbitmap_bitmap_show); 387 388 static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq, 389 unsigned int depth) 390 { 391 unsigned int wake_batch; 392 unsigned int shallow_depth; 393 394 /* 395 * For each batch, we wake up one queue. We need to make sure that our 396 * batch size is small enough that the full depth of the bitmap, 397 * potentially limited by a shallow depth, is enough to wake up all of 398 * the queues. 399 * 400 * Each full word of the bitmap has bits_per_word bits, and there might 401 * be a partial word. There are depth / bits_per_word full words and 402 * depth % bits_per_word bits left over. In bitwise arithmetic: 403 * 404 * bits_per_word = 1 << shift 405 * depth / bits_per_word = depth >> shift 406 * depth % bits_per_word = depth & ((1 << shift) - 1) 407 * 408 * Each word can be limited to sbq->min_shallow_depth bits. 409 */ 410 shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth); 411 depth = ((depth >> sbq->sb.shift) * shallow_depth + 412 min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth)); 413 wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1, 414 SBQ_WAKE_BATCH); 415 416 return wake_batch; 417 } 418 419 int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, 420 int shift, bool round_robin, gfp_t flags, int node) 421 { 422 int ret; 423 int i; 424 425 ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node, 426 round_robin, true); 427 if (ret) 428 return ret; 429 430 sbq->min_shallow_depth = UINT_MAX; 431 sbq->wake_batch = sbq_calc_wake_batch(sbq, depth); 432 atomic_set(&sbq->wake_index, 0); 433 atomic_set(&sbq->ws_active, 0); 434 atomic_set(&sbq->completion_cnt, 0); 435 atomic_set(&sbq->wakeup_cnt, 0); 436 437 sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node); 438 if (!sbq->ws) { 439 sbitmap_free(&sbq->sb); 440 return -ENOMEM; 441 } 442 443 for (i = 0; i < SBQ_WAIT_QUEUES; i++) 444 init_waitqueue_head(&sbq->ws[i].wait); 445 446 return 0; 447 } 448 EXPORT_SYMBOL_GPL(sbitmap_queue_init_node); 449 450 static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq, 451 unsigned int depth) 452 { 453 unsigned int wake_batch; 454 455 wake_batch = sbq_calc_wake_batch(sbq, depth); 456 if (sbq->wake_batch != wake_batch) 457 WRITE_ONCE(sbq->wake_batch, wake_batch); 458 } 459 460 void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq, 461 unsigned int users) 462 { 463 unsigned int wake_batch; 464 unsigned int min_batch; 465 unsigned int depth = (sbq->sb.depth + users - 1) / users; 466 467 min_batch = sbq->sb.depth >= (4 * SBQ_WAIT_QUEUES) ? 4 : 1; 468 469 wake_batch = clamp_val(depth / SBQ_WAIT_QUEUES, 470 min_batch, SBQ_WAKE_BATCH); 471 472 WRITE_ONCE(sbq->wake_batch, wake_batch); 473 } 474 EXPORT_SYMBOL_GPL(sbitmap_queue_recalculate_wake_batch); 475 476 void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth) 477 { 478 sbitmap_queue_update_wake_batch(sbq, depth); 479 sbitmap_resize(&sbq->sb, depth); 480 } 481 EXPORT_SYMBOL_GPL(sbitmap_queue_resize); 482 483 int __sbitmap_queue_get(struct sbitmap_queue *sbq) 484 { 485 return sbitmap_get(&sbq->sb); 486 } 487 EXPORT_SYMBOL_GPL(__sbitmap_queue_get); 488 489 unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags, 490 unsigned int *offset) 491 { 492 struct sbitmap *sb = &sbq->sb; 493 unsigned int hint, depth; 494 unsigned long index, nr; 495 int i; 496 497 if (unlikely(sb->round_robin)) 498 return 0; 499 500 depth = READ_ONCE(sb->depth); 501 hint = update_alloc_hint_before_get(sb, depth); 502 503 index = SB_NR_TO_INDEX(sb, hint); 504 505 for (i = 0; i < sb->map_nr; i++) { 506 struct sbitmap_word *map = &sb->map[index]; 507 unsigned long get_mask; 508 unsigned int map_depth = __map_depth(sb, index); 509 510 sbitmap_deferred_clear(map); 511 if (map->word == (1UL << (map_depth - 1)) - 1) 512 goto next; 513 514 nr = find_first_zero_bit(&map->word, map_depth); 515 if (nr + nr_tags <= map_depth) { 516 atomic_long_t *ptr = (atomic_long_t *) &map->word; 517 unsigned long val; 518 519 get_mask = ((1UL << nr_tags) - 1) << nr; 520 val = READ_ONCE(map->word); 521 do { 522 if ((val & ~get_mask) != val) 523 goto next; 524 } while (!atomic_long_try_cmpxchg(ptr, &val, 525 get_mask | val)); 526 get_mask = (get_mask & ~val) >> nr; 527 if (get_mask) { 528 *offset = nr + (index << sb->shift); 529 update_alloc_hint_after_get(sb, depth, hint, 530 *offset + nr_tags - 1); 531 return get_mask; 532 } 533 } 534 next: 535 /* Jump to next index. */ 536 if (++index >= sb->map_nr) 537 index = 0; 538 } 539 540 return 0; 541 } 542 543 int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, 544 unsigned int shallow_depth) 545 { 546 WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth); 547 548 return sbitmap_get_shallow(&sbq->sb, shallow_depth); 549 } 550 EXPORT_SYMBOL_GPL(sbitmap_queue_get_shallow); 551 552 void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq, 553 unsigned int min_shallow_depth) 554 { 555 sbq->min_shallow_depth = min_shallow_depth; 556 sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth); 557 } 558 EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth); 559 560 static void __sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr) 561 { 562 int i, wake_index; 563 564 if (!atomic_read(&sbq->ws_active)) 565 return; 566 567 wake_index = atomic_read(&sbq->wake_index); 568 for (i = 0; i < SBQ_WAIT_QUEUES; i++) { 569 struct sbq_wait_state *ws = &sbq->ws[wake_index]; 570 571 /* 572 * Advance the index before checking the current queue. 573 * It improves fairness, by ensuring the queue doesn't 574 * need to be fully emptied before trying to wake up 575 * from the next one. 576 */ 577 wake_index = sbq_index_inc(wake_index); 578 579 /* 580 * It is sufficient to wake up at least one waiter to 581 * guarantee forward progress. 582 */ 583 if (waitqueue_active(&ws->wait) && 584 wake_up_nr(&ws->wait, nr)) 585 break; 586 } 587 588 if (wake_index != atomic_read(&sbq->wake_index)) 589 atomic_set(&sbq->wake_index, wake_index); 590 } 591 592 void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr) 593 { 594 unsigned int wake_batch = READ_ONCE(sbq->wake_batch); 595 unsigned int wakeups; 596 597 if (!atomic_read(&sbq->ws_active)) 598 return; 599 600 atomic_add(nr, &sbq->completion_cnt); 601 wakeups = atomic_read(&sbq->wakeup_cnt); 602 603 do { 604 if (atomic_read(&sbq->completion_cnt) - wakeups < wake_batch) 605 return; 606 } while (!atomic_try_cmpxchg(&sbq->wakeup_cnt, 607 &wakeups, wakeups + wake_batch)); 608 609 __sbitmap_queue_wake_up(sbq, wake_batch); 610 } 611 EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up); 612 613 static inline void sbitmap_update_cpu_hint(struct sbitmap *sb, int cpu, int tag) 614 { 615 if (likely(!sb->round_robin && tag < sb->depth)) 616 data_race(*per_cpu_ptr(sb->alloc_hint, cpu) = tag); 617 } 618 619 void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset, 620 int *tags, int nr_tags) 621 { 622 struct sbitmap *sb = &sbq->sb; 623 unsigned long *addr = NULL; 624 unsigned long mask = 0; 625 int i; 626 627 smp_mb__before_atomic(); 628 for (i = 0; i < nr_tags; i++) { 629 const int tag = tags[i] - offset; 630 unsigned long *this_addr; 631 632 /* since we're clearing a batch, skip the deferred map */ 633 this_addr = &sb->map[SB_NR_TO_INDEX(sb, tag)].word; 634 if (!addr) { 635 addr = this_addr; 636 } else if (addr != this_addr) { 637 atomic_long_andnot(mask, (atomic_long_t *) addr); 638 mask = 0; 639 addr = this_addr; 640 } 641 mask |= (1UL << SB_NR_TO_BIT(sb, tag)); 642 } 643 644 if (mask) 645 atomic_long_andnot(mask, (atomic_long_t *) addr); 646 647 smp_mb__after_atomic(); 648 sbitmap_queue_wake_up(sbq, nr_tags); 649 sbitmap_update_cpu_hint(&sbq->sb, raw_smp_processor_id(), 650 tags[nr_tags - 1] - offset); 651 } 652 653 void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, 654 unsigned int cpu) 655 { 656 /* 657 * Once the clear bit is set, the bit may be allocated out. 658 * 659 * Orders READ/WRITE on the associated instance(such as request 660 * of blk_mq) by this bit for avoiding race with re-allocation, 661 * and its pair is the memory barrier implied in __sbitmap_get_word. 662 * 663 * One invariant is that the clear bit has to be zero when the bit 664 * is in use. 665 */ 666 smp_mb__before_atomic(); 667 sbitmap_deferred_clear_bit(&sbq->sb, nr); 668 669 /* 670 * Pairs with the memory barrier in set_current_state() to ensure the 671 * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker 672 * and test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the 673 * waiter. See the comment on waitqueue_active(). 674 */ 675 smp_mb__after_atomic(); 676 sbitmap_queue_wake_up(sbq, 1); 677 sbitmap_update_cpu_hint(&sbq->sb, cpu, nr); 678 } 679 EXPORT_SYMBOL_GPL(sbitmap_queue_clear); 680 681 void sbitmap_queue_wake_all(struct sbitmap_queue *sbq) 682 { 683 int i, wake_index; 684 685 /* 686 * Pairs with the memory barrier in set_current_state() like in 687 * sbitmap_queue_wake_up(). 688 */ 689 smp_mb(); 690 wake_index = atomic_read(&sbq->wake_index); 691 for (i = 0; i < SBQ_WAIT_QUEUES; i++) { 692 struct sbq_wait_state *ws = &sbq->ws[wake_index]; 693 694 if (waitqueue_active(&ws->wait)) 695 wake_up(&ws->wait); 696 697 wake_index = sbq_index_inc(wake_index); 698 } 699 } 700 EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all); 701 702 void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m) 703 { 704 bool first; 705 int i; 706 707 sbitmap_show(&sbq->sb, m); 708 709 seq_puts(m, "alloc_hint={"); 710 first = true; 711 for_each_possible_cpu(i) { 712 if (!first) 713 seq_puts(m, ", "); 714 first = false; 715 seq_printf(m, "%u", *per_cpu_ptr(sbq->sb.alloc_hint, i)); 716 } 717 seq_puts(m, "}\n"); 718 719 seq_printf(m, "wake_batch=%u\n", sbq->wake_batch); 720 seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index)); 721 seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active)); 722 723 seq_puts(m, "ws={\n"); 724 for (i = 0; i < SBQ_WAIT_QUEUES; i++) { 725 struct sbq_wait_state *ws = &sbq->ws[i]; 726 seq_printf(m, "\t{.wait=%s},\n", 727 waitqueue_active(&ws->wait) ? "active" : "inactive"); 728 } 729 seq_puts(m, "}\n"); 730 731 seq_printf(m, "round_robin=%d\n", sbq->sb.round_robin); 732 seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth); 733 } 734 EXPORT_SYMBOL_GPL(sbitmap_queue_show); 735 736 void sbitmap_add_wait_queue(struct sbitmap_queue *sbq, 737 struct sbq_wait_state *ws, 738 struct sbq_wait *sbq_wait) 739 { 740 if (!sbq_wait->sbq) { 741 sbq_wait->sbq = sbq; 742 atomic_inc(&sbq->ws_active); 743 add_wait_queue(&ws->wait, &sbq_wait->wait); 744 } 745 } 746 EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue); 747 748 void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait) 749 { 750 list_del_init(&sbq_wait->wait.entry); 751 if (sbq_wait->sbq) { 752 atomic_dec(&sbq_wait->sbq->ws_active); 753 sbq_wait->sbq = NULL; 754 } 755 } 756 EXPORT_SYMBOL_GPL(sbitmap_del_wait_queue); 757 758 void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq, 759 struct sbq_wait_state *ws, 760 struct sbq_wait *sbq_wait, int state) 761 { 762 if (!sbq_wait->sbq) { 763 atomic_inc(&sbq->ws_active); 764 sbq_wait->sbq = sbq; 765 } 766 prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state); 767 } 768 EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait); 769 770 void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws, 771 struct sbq_wait *sbq_wait) 772 { 773 finish_wait(&ws->wait, &sbq_wait->wait); 774 if (sbq_wait->sbq) { 775 atomic_dec(&sbq->ws_active); 776 sbq_wait->sbq = NULL; 777 } 778 } 779 EXPORT_SYMBOL_GPL(sbitmap_finish_wait); 780