1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2016 Facebook 4 * Copyright (C) 2013-2014 Jens Axboe 5 */ 6 7 #include <linux/sched.h> 8 #include <linux/random.h> 9 #include <linux/sbitmap.h> 10 #include <linux/seq_file.h> 11 12 /* 13 * See if we have deferred clears that we can batch move 14 */ 15 static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index) 16 { 17 unsigned long mask, val; 18 bool ret = false; 19 unsigned long flags; 20 21 spin_lock_irqsave(&sb->map[index].swap_lock, flags); 22 23 if (!sb->map[index].cleared) 24 goto out_unlock; 25 26 /* 27 * First get a stable cleared mask, setting the old mask to 0. 28 */ 29 do { 30 mask = sb->map[index].cleared; 31 } while (cmpxchg(&sb->map[index].cleared, mask, 0) != mask); 32 33 /* 34 * Now clear the masked bits in our free word 35 */ 36 do { 37 val = sb->map[index].word; 38 } while (cmpxchg(&sb->map[index].word, val, val & ~mask) != val); 39 40 ret = true; 41 out_unlock: 42 spin_unlock_irqrestore(&sb->map[index].swap_lock, flags); 43 return ret; 44 } 45 46 int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, 47 gfp_t flags, int node) 48 { 49 unsigned int bits_per_word; 50 unsigned int i; 51 52 if (shift < 0) { 53 shift = ilog2(BITS_PER_LONG); 54 /* 55 * If the bitmap is small, shrink the number of bits per word so 56 * we spread over a few cachelines, at least. If less than 4 57 * bits, just forget about it, it's not going to work optimally 58 * anyway. 59 */ 60 if (depth >= 4) { 61 while ((4U << shift) > depth) 62 shift--; 63 } 64 } 65 bits_per_word = 1U << shift; 66 if (bits_per_word > BITS_PER_LONG) 67 return -EINVAL; 68 69 sb->shift = shift; 70 sb->depth = depth; 71 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word); 72 73 if (depth == 0) { 74 sb->map = NULL; 75 return 0; 76 } 77 78 sb->map = kcalloc_node(sb->map_nr, sizeof(*sb->map), flags, node); 79 if (!sb->map) 80 return -ENOMEM; 81 82 for (i = 0; i < sb->map_nr; i++) { 83 sb->map[i].depth = min(depth, bits_per_word); 84 depth -= sb->map[i].depth; 85 spin_lock_init(&sb->map[i].swap_lock); 86 } 87 return 0; 88 } 89 EXPORT_SYMBOL_GPL(sbitmap_init_node); 90 91 void sbitmap_resize(struct sbitmap *sb, unsigned int depth) 92 { 93 unsigned int bits_per_word = 1U << sb->shift; 94 unsigned int i; 95 96 for (i = 0; i < sb->map_nr; i++) 97 sbitmap_deferred_clear(sb, i); 98 99 sb->depth = depth; 100 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word); 101 102 for (i = 0; i < sb->map_nr; i++) { 103 sb->map[i].depth = min(depth, bits_per_word); 104 depth -= sb->map[i].depth; 105 } 106 } 107 EXPORT_SYMBOL_GPL(sbitmap_resize); 108 109 static int __sbitmap_get_word(unsigned long *word, unsigned long depth, 110 unsigned int hint, bool wrap) 111 { 112 unsigned int orig_hint = hint; 113 int nr; 114 115 while (1) { 116 nr = find_next_zero_bit(word, depth, hint); 117 if (unlikely(nr >= depth)) { 118 /* 119 * We started with an offset, and we didn't reset the 120 * offset to 0 in a failure case, so start from 0 to 121 * exhaust the map. 122 */ 123 if (orig_hint && hint && wrap) { 124 hint = orig_hint = 0; 125 continue; 126 } 127 return -1; 128 } 129 130 if (!test_and_set_bit_lock(nr, word)) 131 break; 132 133 hint = nr + 1; 134 if (hint >= depth - 1) 135 hint = 0; 136 } 137 138 return nr; 139 } 140 141 static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index, 142 unsigned int alloc_hint, bool round_robin) 143 { 144 int nr; 145 146 do { 147 nr = __sbitmap_get_word(&sb->map[index].word, 148 sb->map[index].depth, alloc_hint, 149 !round_robin); 150 if (nr != -1) 151 break; 152 if (!sbitmap_deferred_clear(sb, index)) 153 break; 154 } while (1); 155 156 return nr; 157 } 158 159 int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin) 160 { 161 unsigned int i, index; 162 int nr = -1; 163 164 index = SB_NR_TO_INDEX(sb, alloc_hint); 165 166 /* 167 * Unless we're doing round robin tag allocation, just use the 168 * alloc_hint to find the right word index. No point in looping 169 * twice in find_next_zero_bit() for that case. 170 */ 171 if (round_robin) 172 alloc_hint = SB_NR_TO_BIT(sb, alloc_hint); 173 else 174 alloc_hint = 0; 175 176 for (i = 0; i < sb->map_nr; i++) { 177 nr = sbitmap_find_bit_in_index(sb, index, alloc_hint, 178 round_robin); 179 if (nr != -1) { 180 nr += index << sb->shift; 181 break; 182 } 183 184 /* Jump to next index. */ 185 alloc_hint = 0; 186 if (++index >= sb->map_nr) 187 index = 0; 188 } 189 190 return nr; 191 } 192 EXPORT_SYMBOL_GPL(sbitmap_get); 193 194 int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint, 195 unsigned long shallow_depth) 196 { 197 unsigned int i, index; 198 int nr = -1; 199 200 index = SB_NR_TO_INDEX(sb, alloc_hint); 201 202 for (i = 0; i < sb->map_nr; i++) { 203 again: 204 nr = __sbitmap_get_word(&sb->map[index].word, 205 min(sb->map[index].depth, shallow_depth), 206 SB_NR_TO_BIT(sb, alloc_hint), true); 207 if (nr != -1) { 208 nr += index << sb->shift; 209 break; 210 } 211 212 if (sbitmap_deferred_clear(sb, index)) 213 goto again; 214 215 /* Jump to next index. */ 216 index++; 217 alloc_hint = index << sb->shift; 218 219 if (index >= sb->map_nr) { 220 index = 0; 221 alloc_hint = 0; 222 } 223 } 224 225 return nr; 226 } 227 EXPORT_SYMBOL_GPL(sbitmap_get_shallow); 228 229 bool sbitmap_any_bit_set(const struct sbitmap *sb) 230 { 231 unsigned int i; 232 233 for (i = 0; i < sb->map_nr; i++) { 234 if (sb->map[i].word & ~sb->map[i].cleared) 235 return true; 236 } 237 return false; 238 } 239 EXPORT_SYMBOL_GPL(sbitmap_any_bit_set); 240 241 bool sbitmap_any_bit_clear(const struct sbitmap *sb) 242 { 243 unsigned int i; 244 245 for (i = 0; i < sb->map_nr; i++) { 246 const struct sbitmap_word *word = &sb->map[i]; 247 unsigned long mask = word->word & ~word->cleared; 248 unsigned long ret; 249 250 ret = find_first_zero_bit(&mask, word->depth); 251 if (ret < word->depth) 252 return true; 253 } 254 return false; 255 } 256 EXPORT_SYMBOL_GPL(sbitmap_any_bit_clear); 257 258 static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set) 259 { 260 unsigned int i, weight = 0; 261 262 for (i = 0; i < sb->map_nr; i++) { 263 const struct sbitmap_word *word = &sb->map[i]; 264 265 if (set) 266 weight += bitmap_weight(&word->word, word->depth); 267 else 268 weight += bitmap_weight(&word->cleared, word->depth); 269 } 270 return weight; 271 } 272 273 static unsigned int sbitmap_weight(const struct sbitmap *sb) 274 { 275 return __sbitmap_weight(sb, true); 276 } 277 278 static unsigned int sbitmap_cleared(const struct sbitmap *sb) 279 { 280 return __sbitmap_weight(sb, false); 281 } 282 283 void sbitmap_show(struct sbitmap *sb, struct seq_file *m) 284 { 285 seq_printf(m, "depth=%u\n", sb->depth); 286 seq_printf(m, "busy=%u\n", sbitmap_weight(sb) - sbitmap_cleared(sb)); 287 seq_printf(m, "cleared=%u\n", sbitmap_cleared(sb)); 288 seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift); 289 seq_printf(m, "map_nr=%u\n", sb->map_nr); 290 } 291 EXPORT_SYMBOL_GPL(sbitmap_show); 292 293 static inline void emit_byte(struct seq_file *m, unsigned int offset, u8 byte) 294 { 295 if ((offset & 0xf) == 0) { 296 if (offset != 0) 297 seq_putc(m, '\n'); 298 seq_printf(m, "%08x:", offset); 299 } 300 if ((offset & 0x1) == 0) 301 seq_putc(m, ' '); 302 seq_printf(m, "%02x", byte); 303 } 304 305 void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m) 306 { 307 u8 byte = 0; 308 unsigned int byte_bits = 0; 309 unsigned int offset = 0; 310 int i; 311 312 for (i = 0; i < sb->map_nr; i++) { 313 unsigned long word = READ_ONCE(sb->map[i].word); 314 unsigned int word_bits = READ_ONCE(sb->map[i].depth); 315 316 while (word_bits > 0) { 317 unsigned int bits = min(8 - byte_bits, word_bits); 318 319 byte |= (word & (BIT(bits) - 1)) << byte_bits; 320 byte_bits += bits; 321 if (byte_bits == 8) { 322 emit_byte(m, offset, byte); 323 byte = 0; 324 byte_bits = 0; 325 offset++; 326 } 327 word >>= bits; 328 word_bits -= bits; 329 } 330 } 331 if (byte_bits) { 332 emit_byte(m, offset, byte); 333 offset++; 334 } 335 if (offset) 336 seq_putc(m, '\n'); 337 } 338 EXPORT_SYMBOL_GPL(sbitmap_bitmap_show); 339 340 static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq, 341 unsigned int depth) 342 { 343 unsigned int wake_batch; 344 unsigned int shallow_depth; 345 346 /* 347 * For each batch, we wake up one queue. We need to make sure that our 348 * batch size is small enough that the full depth of the bitmap, 349 * potentially limited by a shallow depth, is enough to wake up all of 350 * the queues. 351 * 352 * Each full word of the bitmap has bits_per_word bits, and there might 353 * be a partial word. There are depth / bits_per_word full words and 354 * depth % bits_per_word bits left over. In bitwise arithmetic: 355 * 356 * bits_per_word = 1 << shift 357 * depth / bits_per_word = depth >> shift 358 * depth % bits_per_word = depth & ((1 << shift) - 1) 359 * 360 * Each word can be limited to sbq->min_shallow_depth bits. 361 */ 362 shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth); 363 depth = ((depth >> sbq->sb.shift) * shallow_depth + 364 min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth)); 365 wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1, 366 SBQ_WAKE_BATCH); 367 368 return wake_batch; 369 } 370 371 int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, 372 int shift, bool round_robin, gfp_t flags, int node) 373 { 374 int ret; 375 int i; 376 377 ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node); 378 if (ret) 379 return ret; 380 381 sbq->alloc_hint = alloc_percpu_gfp(unsigned int, flags); 382 if (!sbq->alloc_hint) { 383 sbitmap_free(&sbq->sb); 384 return -ENOMEM; 385 } 386 387 if (depth && !round_robin) { 388 for_each_possible_cpu(i) 389 *per_cpu_ptr(sbq->alloc_hint, i) = prandom_u32() % depth; 390 } 391 392 sbq->min_shallow_depth = UINT_MAX; 393 sbq->wake_batch = sbq_calc_wake_batch(sbq, depth); 394 atomic_set(&sbq->wake_index, 0); 395 atomic_set(&sbq->ws_active, 0); 396 397 sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node); 398 if (!sbq->ws) { 399 free_percpu(sbq->alloc_hint); 400 sbitmap_free(&sbq->sb); 401 return -ENOMEM; 402 } 403 404 for (i = 0; i < SBQ_WAIT_QUEUES; i++) { 405 init_waitqueue_head(&sbq->ws[i].wait); 406 atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch); 407 } 408 409 sbq->round_robin = round_robin; 410 return 0; 411 } 412 EXPORT_SYMBOL_GPL(sbitmap_queue_init_node); 413 414 static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq, 415 unsigned int depth) 416 { 417 unsigned int wake_batch = sbq_calc_wake_batch(sbq, depth); 418 int i; 419 420 if (sbq->wake_batch != wake_batch) { 421 WRITE_ONCE(sbq->wake_batch, wake_batch); 422 /* 423 * Pairs with the memory barrier in sbitmap_queue_wake_up() 424 * to ensure that the batch size is updated before the wait 425 * counts. 426 */ 427 smp_mb(); 428 for (i = 0; i < SBQ_WAIT_QUEUES; i++) 429 atomic_set(&sbq->ws[i].wait_cnt, 1); 430 } 431 } 432 433 void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth) 434 { 435 sbitmap_queue_update_wake_batch(sbq, depth); 436 sbitmap_resize(&sbq->sb, depth); 437 } 438 EXPORT_SYMBOL_GPL(sbitmap_queue_resize); 439 440 int __sbitmap_queue_get(struct sbitmap_queue *sbq) 441 { 442 unsigned int hint, depth; 443 int nr; 444 445 hint = this_cpu_read(*sbq->alloc_hint); 446 depth = READ_ONCE(sbq->sb.depth); 447 if (unlikely(hint >= depth)) { 448 hint = depth ? prandom_u32() % depth : 0; 449 this_cpu_write(*sbq->alloc_hint, hint); 450 } 451 nr = sbitmap_get(&sbq->sb, hint, sbq->round_robin); 452 453 if (nr == -1) { 454 /* If the map is full, a hint won't do us much good. */ 455 this_cpu_write(*sbq->alloc_hint, 0); 456 } else if (nr == hint || unlikely(sbq->round_robin)) { 457 /* Only update the hint if we used it. */ 458 hint = nr + 1; 459 if (hint >= depth - 1) 460 hint = 0; 461 this_cpu_write(*sbq->alloc_hint, hint); 462 } 463 464 return nr; 465 } 466 EXPORT_SYMBOL_GPL(__sbitmap_queue_get); 467 468 int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, 469 unsigned int shallow_depth) 470 { 471 unsigned int hint, depth; 472 int nr; 473 474 WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth); 475 476 hint = this_cpu_read(*sbq->alloc_hint); 477 depth = READ_ONCE(sbq->sb.depth); 478 if (unlikely(hint >= depth)) { 479 hint = depth ? prandom_u32() % depth : 0; 480 this_cpu_write(*sbq->alloc_hint, hint); 481 } 482 nr = sbitmap_get_shallow(&sbq->sb, hint, shallow_depth); 483 484 if (nr == -1) { 485 /* If the map is full, a hint won't do us much good. */ 486 this_cpu_write(*sbq->alloc_hint, 0); 487 } else if (nr == hint || unlikely(sbq->round_robin)) { 488 /* Only update the hint if we used it. */ 489 hint = nr + 1; 490 if (hint >= depth - 1) 491 hint = 0; 492 this_cpu_write(*sbq->alloc_hint, hint); 493 } 494 495 return nr; 496 } 497 EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow); 498 499 void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq, 500 unsigned int min_shallow_depth) 501 { 502 sbq->min_shallow_depth = min_shallow_depth; 503 sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth); 504 } 505 EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth); 506 507 static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq) 508 { 509 int i, wake_index; 510 511 if (!atomic_read(&sbq->ws_active)) 512 return NULL; 513 514 wake_index = atomic_read(&sbq->wake_index); 515 for (i = 0; i < SBQ_WAIT_QUEUES; i++) { 516 struct sbq_wait_state *ws = &sbq->ws[wake_index]; 517 518 if (waitqueue_active(&ws->wait)) { 519 int o = atomic_read(&sbq->wake_index); 520 521 if (wake_index != o) 522 atomic_cmpxchg(&sbq->wake_index, o, wake_index); 523 return ws; 524 } 525 526 wake_index = sbq_index_inc(wake_index); 527 } 528 529 return NULL; 530 } 531 532 static bool __sbq_wake_up(struct sbitmap_queue *sbq) 533 { 534 struct sbq_wait_state *ws; 535 unsigned int wake_batch; 536 int wait_cnt; 537 538 ws = sbq_wake_ptr(sbq); 539 if (!ws) 540 return false; 541 542 wait_cnt = atomic_dec_return(&ws->wait_cnt); 543 if (wait_cnt <= 0) { 544 int ret; 545 546 wake_batch = READ_ONCE(sbq->wake_batch); 547 548 /* 549 * Pairs with the memory barrier in sbitmap_queue_resize() to 550 * ensure that we see the batch size update before the wait 551 * count is reset. 552 */ 553 smp_mb__before_atomic(); 554 555 /* 556 * For concurrent callers of this, the one that failed the 557 * atomic_cmpxhcg() race should call this function again 558 * to wakeup a new batch on a different 'ws'. 559 */ 560 ret = atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wake_batch); 561 if (ret == wait_cnt) { 562 sbq_index_atomic_inc(&sbq->wake_index); 563 wake_up_nr(&ws->wait, wake_batch); 564 return false; 565 } 566 567 return true; 568 } 569 570 return false; 571 } 572 573 void sbitmap_queue_wake_up(struct sbitmap_queue *sbq) 574 { 575 while (__sbq_wake_up(sbq)) 576 ; 577 } 578 EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up); 579 580 void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, 581 unsigned int cpu) 582 { 583 /* 584 * Once the clear bit is set, the bit may be allocated out. 585 * 586 * Orders READ/WRITE on the asssociated instance(such as request 587 * of blk_mq) by this bit for avoiding race with re-allocation, 588 * and its pair is the memory barrier implied in __sbitmap_get_word. 589 * 590 * One invariant is that the clear bit has to be zero when the bit 591 * is in use. 592 */ 593 smp_mb__before_atomic(); 594 sbitmap_deferred_clear_bit(&sbq->sb, nr); 595 596 /* 597 * Pairs with the memory barrier in set_current_state() to ensure the 598 * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker 599 * and test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the 600 * waiter. See the comment on waitqueue_active(). 601 */ 602 smp_mb__after_atomic(); 603 sbitmap_queue_wake_up(sbq); 604 605 if (likely(!sbq->round_robin && nr < sbq->sb.depth)) 606 *per_cpu_ptr(sbq->alloc_hint, cpu) = nr; 607 } 608 EXPORT_SYMBOL_GPL(sbitmap_queue_clear); 609 610 void sbitmap_queue_wake_all(struct sbitmap_queue *sbq) 611 { 612 int i, wake_index; 613 614 /* 615 * Pairs with the memory barrier in set_current_state() like in 616 * sbitmap_queue_wake_up(). 617 */ 618 smp_mb(); 619 wake_index = atomic_read(&sbq->wake_index); 620 for (i = 0; i < SBQ_WAIT_QUEUES; i++) { 621 struct sbq_wait_state *ws = &sbq->ws[wake_index]; 622 623 if (waitqueue_active(&ws->wait)) 624 wake_up(&ws->wait); 625 626 wake_index = sbq_index_inc(wake_index); 627 } 628 } 629 EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all); 630 631 void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m) 632 { 633 bool first; 634 int i; 635 636 sbitmap_show(&sbq->sb, m); 637 638 seq_puts(m, "alloc_hint={"); 639 first = true; 640 for_each_possible_cpu(i) { 641 if (!first) 642 seq_puts(m, ", "); 643 first = false; 644 seq_printf(m, "%u", *per_cpu_ptr(sbq->alloc_hint, i)); 645 } 646 seq_puts(m, "}\n"); 647 648 seq_printf(m, "wake_batch=%u\n", sbq->wake_batch); 649 seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index)); 650 seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active)); 651 652 seq_puts(m, "ws={\n"); 653 for (i = 0; i < SBQ_WAIT_QUEUES; i++) { 654 struct sbq_wait_state *ws = &sbq->ws[i]; 655 656 seq_printf(m, "\t{.wait_cnt=%d, .wait=%s},\n", 657 atomic_read(&ws->wait_cnt), 658 waitqueue_active(&ws->wait) ? "active" : "inactive"); 659 } 660 seq_puts(m, "}\n"); 661 662 seq_printf(m, "round_robin=%d\n", sbq->round_robin); 663 seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth); 664 } 665 EXPORT_SYMBOL_GPL(sbitmap_queue_show); 666 667 void sbitmap_add_wait_queue(struct sbitmap_queue *sbq, 668 struct sbq_wait_state *ws, 669 struct sbq_wait *sbq_wait) 670 { 671 if (!sbq_wait->sbq) { 672 sbq_wait->sbq = sbq; 673 atomic_inc(&sbq->ws_active); 674 } 675 add_wait_queue(&ws->wait, &sbq_wait->wait); 676 } 677 EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue); 678 679 void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait) 680 { 681 list_del_init(&sbq_wait->wait.entry); 682 if (sbq_wait->sbq) { 683 atomic_dec(&sbq_wait->sbq->ws_active); 684 sbq_wait->sbq = NULL; 685 } 686 } 687 EXPORT_SYMBOL_GPL(sbitmap_del_wait_queue); 688 689 void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq, 690 struct sbq_wait_state *ws, 691 struct sbq_wait *sbq_wait, int state) 692 { 693 if (!sbq_wait->sbq) { 694 atomic_inc(&sbq->ws_active); 695 sbq_wait->sbq = sbq; 696 } 697 prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state); 698 } 699 EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait); 700 701 void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws, 702 struct sbq_wait *sbq_wait) 703 { 704 finish_wait(&ws->wait, &sbq_wait->wait); 705 if (sbq_wait->sbq) { 706 atomic_dec(&sbq->ws_active); 707 sbq_wait->sbq = NULL; 708 } 709 } 710 EXPORT_SYMBOL_GPL(sbitmap_finish_wait); 711