1 /* 2 * Copyright (C) 2016 Facebook 3 * Copyright (C) 2013-2014 Jens Axboe 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public 7 * License v2 as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 * General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program. If not, see <https://www.gnu.org/licenses/>. 16 */ 17 18 #include <linux/sched.h> 19 #include <linux/random.h> 20 #include <linux/sbitmap.h> 21 #include <linux/seq_file.h> 22 23 /* 24 * See if we have deferred clears that we can batch move 25 */ 26 static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index) 27 { 28 unsigned long mask, val; 29 unsigned long __maybe_unused flags; 30 bool ret = false; 31 32 /* Silence bogus lockdep warning */ 33 #if defined(CONFIG_LOCKDEP) 34 local_irq_save(flags); 35 #endif 36 spin_lock(&sb->map[index].swap_lock); 37 38 if (!sb->map[index].cleared) 39 goto out_unlock; 40 41 /* 42 * First get a stable cleared mask, setting the old mask to 0. 43 */ 44 do { 45 mask = sb->map[index].cleared; 46 } while (cmpxchg(&sb->map[index].cleared, mask, 0) != mask); 47 48 /* 49 * Now clear the masked bits in our free word 50 */ 51 do { 52 val = sb->map[index].word; 53 } while (cmpxchg(&sb->map[index].word, val, val & ~mask) != val); 54 55 ret = true; 56 out_unlock: 57 spin_unlock(&sb->map[index].swap_lock); 58 #if defined(CONFIG_LOCKDEP) 59 local_irq_restore(flags); 60 #endif 61 return ret; 62 } 63 64 int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, 65 gfp_t flags, int node) 66 { 67 unsigned int bits_per_word; 68 unsigned int i; 69 70 if (shift < 0) { 71 shift = ilog2(BITS_PER_LONG); 72 /* 73 * If the bitmap is small, shrink the number of bits per word so 74 * we spread over a few cachelines, at least. If less than 4 75 * bits, just forget about it, it's not going to work optimally 76 * anyway. 77 */ 78 if (depth >= 4) { 79 while ((4U << shift) > depth) 80 shift--; 81 } 82 } 83 bits_per_word = 1U << shift; 84 if (bits_per_word > BITS_PER_LONG) 85 return -EINVAL; 86 87 sb->shift = shift; 88 sb->depth = depth; 89 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word); 90 91 if (depth == 0) { 92 sb->map = NULL; 93 return 0; 94 } 95 96 sb->map = kcalloc_node(sb->map_nr, sizeof(*sb->map), flags, node); 97 if (!sb->map) 98 return -ENOMEM; 99 100 for (i = 0; i < sb->map_nr; i++) { 101 sb->map[i].depth = min(depth, bits_per_word); 102 depth -= sb->map[i].depth; 103 spin_lock_init(&sb->map[i].swap_lock); 104 } 105 return 0; 106 } 107 EXPORT_SYMBOL_GPL(sbitmap_init_node); 108 109 void sbitmap_resize(struct sbitmap *sb, unsigned int depth) 110 { 111 unsigned int bits_per_word = 1U << sb->shift; 112 unsigned int i; 113 114 for (i = 0; i < sb->map_nr; i++) 115 sbitmap_deferred_clear(sb, i); 116 117 sb->depth = depth; 118 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word); 119 120 for (i = 0; i < sb->map_nr; i++) { 121 sb->map[i].depth = min(depth, bits_per_word); 122 depth -= sb->map[i].depth; 123 } 124 } 125 EXPORT_SYMBOL_GPL(sbitmap_resize); 126 127 static int __sbitmap_get_word(unsigned long *word, unsigned long depth, 128 unsigned int hint, bool wrap) 129 { 130 unsigned int orig_hint = hint; 131 int nr; 132 133 while (1) { 134 nr = find_next_zero_bit(word, depth, hint); 135 if (unlikely(nr >= depth)) { 136 /* 137 * We started with an offset, and we didn't reset the 138 * offset to 0 in a failure case, so start from 0 to 139 * exhaust the map. 140 */ 141 if (orig_hint && hint && wrap) { 142 hint = orig_hint = 0; 143 continue; 144 } 145 return -1; 146 } 147 148 if (!test_and_set_bit_lock(nr, word)) 149 break; 150 151 hint = nr + 1; 152 if (hint >= depth - 1) 153 hint = 0; 154 } 155 156 return nr; 157 } 158 159 static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index, 160 unsigned int alloc_hint, bool round_robin) 161 { 162 int nr; 163 164 do { 165 nr = __sbitmap_get_word(&sb->map[index].word, 166 sb->map[index].depth, alloc_hint, 167 !round_robin); 168 if (nr != -1) 169 break; 170 if (!sbitmap_deferred_clear(sb, index)) 171 break; 172 } while (1); 173 174 return nr; 175 } 176 177 int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin) 178 { 179 unsigned int i, index; 180 int nr = -1; 181 182 index = SB_NR_TO_INDEX(sb, alloc_hint); 183 184 /* 185 * Unless we're doing round robin tag allocation, just use the 186 * alloc_hint to find the right word index. No point in looping 187 * twice in find_next_zero_bit() for that case. 188 */ 189 if (round_robin) 190 alloc_hint = SB_NR_TO_BIT(sb, alloc_hint); 191 else 192 alloc_hint = 0; 193 194 for (i = 0; i < sb->map_nr; i++) { 195 nr = sbitmap_find_bit_in_index(sb, index, alloc_hint, 196 round_robin); 197 if (nr != -1) { 198 nr += index << sb->shift; 199 break; 200 } 201 202 /* Jump to next index. */ 203 alloc_hint = 0; 204 if (++index >= sb->map_nr) 205 index = 0; 206 } 207 208 return nr; 209 } 210 EXPORT_SYMBOL_GPL(sbitmap_get); 211 212 int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint, 213 unsigned long shallow_depth) 214 { 215 unsigned int i, index; 216 int nr = -1; 217 218 index = SB_NR_TO_INDEX(sb, alloc_hint); 219 220 for (i = 0; i < sb->map_nr; i++) { 221 again: 222 nr = __sbitmap_get_word(&sb->map[index].word, 223 min(sb->map[index].depth, shallow_depth), 224 SB_NR_TO_BIT(sb, alloc_hint), true); 225 if (nr != -1) { 226 nr += index << sb->shift; 227 break; 228 } 229 230 if (sbitmap_deferred_clear(sb, index)) 231 goto again; 232 233 /* Jump to next index. */ 234 index++; 235 alloc_hint = index << sb->shift; 236 237 if (index >= sb->map_nr) { 238 index = 0; 239 alloc_hint = 0; 240 } 241 } 242 243 return nr; 244 } 245 EXPORT_SYMBOL_GPL(sbitmap_get_shallow); 246 247 bool sbitmap_any_bit_set(const struct sbitmap *sb) 248 { 249 unsigned int i; 250 251 for (i = 0; i < sb->map_nr; i++) { 252 if (sb->map[i].word & ~sb->map[i].cleared) 253 return true; 254 } 255 return false; 256 } 257 EXPORT_SYMBOL_GPL(sbitmap_any_bit_set); 258 259 bool sbitmap_any_bit_clear(const struct sbitmap *sb) 260 { 261 unsigned int i; 262 263 for (i = 0; i < sb->map_nr; i++) { 264 const struct sbitmap_word *word = &sb->map[i]; 265 unsigned long mask = word->word & ~word->cleared; 266 unsigned long ret; 267 268 ret = find_first_zero_bit(&mask, word->depth); 269 if (ret < word->depth) 270 return true; 271 } 272 return false; 273 } 274 EXPORT_SYMBOL_GPL(sbitmap_any_bit_clear); 275 276 static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set) 277 { 278 unsigned int i, weight = 0; 279 280 for (i = 0; i < sb->map_nr; i++) { 281 const struct sbitmap_word *word = &sb->map[i]; 282 283 if (set) 284 weight += bitmap_weight(&word->word, word->depth); 285 else 286 weight += bitmap_weight(&word->cleared, word->depth); 287 } 288 return weight; 289 } 290 291 static unsigned int sbitmap_weight(const struct sbitmap *sb) 292 { 293 return __sbitmap_weight(sb, true); 294 } 295 296 static unsigned int sbitmap_cleared(const struct sbitmap *sb) 297 { 298 return __sbitmap_weight(sb, false); 299 } 300 301 void sbitmap_show(struct sbitmap *sb, struct seq_file *m) 302 { 303 seq_printf(m, "depth=%u\n", sb->depth); 304 seq_printf(m, "busy=%u\n", sbitmap_weight(sb) - sbitmap_cleared(sb)); 305 seq_printf(m, "cleared=%u\n", sbitmap_cleared(sb)); 306 seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift); 307 seq_printf(m, "map_nr=%u\n", sb->map_nr); 308 } 309 EXPORT_SYMBOL_GPL(sbitmap_show); 310 311 static inline void emit_byte(struct seq_file *m, unsigned int offset, u8 byte) 312 { 313 if ((offset & 0xf) == 0) { 314 if (offset != 0) 315 seq_putc(m, '\n'); 316 seq_printf(m, "%08x:", offset); 317 } 318 if ((offset & 0x1) == 0) 319 seq_putc(m, ' '); 320 seq_printf(m, "%02x", byte); 321 } 322 323 void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m) 324 { 325 u8 byte = 0; 326 unsigned int byte_bits = 0; 327 unsigned int offset = 0; 328 int i; 329 330 for (i = 0; i < sb->map_nr; i++) { 331 unsigned long word = READ_ONCE(sb->map[i].word); 332 unsigned int word_bits = READ_ONCE(sb->map[i].depth); 333 334 while (word_bits > 0) { 335 unsigned int bits = min(8 - byte_bits, word_bits); 336 337 byte |= (word & (BIT(bits) - 1)) << byte_bits; 338 byte_bits += bits; 339 if (byte_bits == 8) { 340 emit_byte(m, offset, byte); 341 byte = 0; 342 byte_bits = 0; 343 offset++; 344 } 345 word >>= bits; 346 word_bits -= bits; 347 } 348 } 349 if (byte_bits) { 350 emit_byte(m, offset, byte); 351 offset++; 352 } 353 if (offset) 354 seq_putc(m, '\n'); 355 } 356 EXPORT_SYMBOL_GPL(sbitmap_bitmap_show); 357 358 static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq, 359 unsigned int depth) 360 { 361 unsigned int wake_batch; 362 unsigned int shallow_depth; 363 364 /* 365 * For each batch, we wake up one queue. We need to make sure that our 366 * batch size is small enough that the full depth of the bitmap, 367 * potentially limited by a shallow depth, is enough to wake up all of 368 * the queues. 369 * 370 * Each full word of the bitmap has bits_per_word bits, and there might 371 * be a partial word. There are depth / bits_per_word full words and 372 * depth % bits_per_word bits left over. In bitwise arithmetic: 373 * 374 * bits_per_word = 1 << shift 375 * depth / bits_per_word = depth >> shift 376 * depth % bits_per_word = depth & ((1 << shift) - 1) 377 * 378 * Each word can be limited to sbq->min_shallow_depth bits. 379 */ 380 shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth); 381 depth = ((depth >> sbq->sb.shift) * shallow_depth + 382 min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth)); 383 wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1, 384 SBQ_WAKE_BATCH); 385 386 return wake_batch; 387 } 388 389 int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, 390 int shift, bool round_robin, gfp_t flags, int node) 391 { 392 int ret; 393 int i; 394 395 ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node); 396 if (ret) 397 return ret; 398 399 sbq->alloc_hint = alloc_percpu_gfp(unsigned int, flags); 400 if (!sbq->alloc_hint) { 401 sbitmap_free(&sbq->sb); 402 return -ENOMEM; 403 } 404 405 if (depth && !round_robin) { 406 for_each_possible_cpu(i) 407 *per_cpu_ptr(sbq->alloc_hint, i) = prandom_u32() % depth; 408 } 409 410 sbq->min_shallow_depth = UINT_MAX; 411 sbq->wake_batch = sbq_calc_wake_batch(sbq, depth); 412 atomic_set(&sbq->wake_index, 0); 413 atomic_set(&sbq->ws_active, 0); 414 415 sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node); 416 if (!sbq->ws) { 417 free_percpu(sbq->alloc_hint); 418 sbitmap_free(&sbq->sb); 419 return -ENOMEM; 420 } 421 422 for (i = 0; i < SBQ_WAIT_QUEUES; i++) { 423 init_waitqueue_head(&sbq->ws[i].wait); 424 atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch); 425 } 426 427 sbq->round_robin = round_robin; 428 return 0; 429 } 430 EXPORT_SYMBOL_GPL(sbitmap_queue_init_node); 431 432 static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq, 433 unsigned int depth) 434 { 435 unsigned int wake_batch = sbq_calc_wake_batch(sbq, depth); 436 int i; 437 438 if (sbq->wake_batch != wake_batch) { 439 WRITE_ONCE(sbq->wake_batch, wake_batch); 440 /* 441 * Pairs with the memory barrier in sbitmap_queue_wake_up() 442 * to ensure that the batch size is updated before the wait 443 * counts. 444 */ 445 smp_mb__before_atomic(); 446 for (i = 0; i < SBQ_WAIT_QUEUES; i++) 447 atomic_set(&sbq->ws[i].wait_cnt, 1); 448 } 449 } 450 451 void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth) 452 { 453 sbitmap_queue_update_wake_batch(sbq, depth); 454 sbitmap_resize(&sbq->sb, depth); 455 } 456 EXPORT_SYMBOL_GPL(sbitmap_queue_resize); 457 458 int __sbitmap_queue_get(struct sbitmap_queue *sbq) 459 { 460 unsigned int hint, depth; 461 int nr; 462 463 hint = this_cpu_read(*sbq->alloc_hint); 464 depth = READ_ONCE(sbq->sb.depth); 465 if (unlikely(hint >= depth)) { 466 hint = depth ? prandom_u32() % depth : 0; 467 this_cpu_write(*sbq->alloc_hint, hint); 468 } 469 nr = sbitmap_get(&sbq->sb, hint, sbq->round_robin); 470 471 if (nr == -1) { 472 /* If the map is full, a hint won't do us much good. */ 473 this_cpu_write(*sbq->alloc_hint, 0); 474 } else if (nr == hint || unlikely(sbq->round_robin)) { 475 /* Only update the hint if we used it. */ 476 hint = nr + 1; 477 if (hint >= depth - 1) 478 hint = 0; 479 this_cpu_write(*sbq->alloc_hint, hint); 480 } 481 482 return nr; 483 } 484 EXPORT_SYMBOL_GPL(__sbitmap_queue_get); 485 486 int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, 487 unsigned int shallow_depth) 488 { 489 unsigned int hint, depth; 490 int nr; 491 492 WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth); 493 494 hint = this_cpu_read(*sbq->alloc_hint); 495 depth = READ_ONCE(sbq->sb.depth); 496 if (unlikely(hint >= depth)) { 497 hint = depth ? prandom_u32() % depth : 0; 498 this_cpu_write(*sbq->alloc_hint, hint); 499 } 500 nr = sbitmap_get_shallow(&sbq->sb, hint, shallow_depth); 501 502 if (nr == -1) { 503 /* If the map is full, a hint won't do us much good. */ 504 this_cpu_write(*sbq->alloc_hint, 0); 505 } else if (nr == hint || unlikely(sbq->round_robin)) { 506 /* Only update the hint if we used it. */ 507 hint = nr + 1; 508 if (hint >= depth - 1) 509 hint = 0; 510 this_cpu_write(*sbq->alloc_hint, hint); 511 } 512 513 return nr; 514 } 515 EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow); 516 517 void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq, 518 unsigned int min_shallow_depth) 519 { 520 sbq->min_shallow_depth = min_shallow_depth; 521 sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth); 522 } 523 EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth); 524 525 static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq) 526 { 527 int i, wake_index; 528 529 if (!atomic_read(&sbq->ws_active)) 530 return NULL; 531 532 wake_index = atomic_read(&sbq->wake_index); 533 for (i = 0; i < SBQ_WAIT_QUEUES; i++) { 534 struct sbq_wait_state *ws = &sbq->ws[wake_index]; 535 536 if (waitqueue_active(&ws->wait)) { 537 int o = atomic_read(&sbq->wake_index); 538 539 if (wake_index != o) 540 atomic_cmpxchg(&sbq->wake_index, o, wake_index); 541 return ws; 542 } 543 544 wake_index = sbq_index_inc(wake_index); 545 } 546 547 return NULL; 548 } 549 550 static bool __sbq_wake_up(struct sbitmap_queue *sbq) 551 { 552 struct sbq_wait_state *ws; 553 unsigned int wake_batch; 554 int wait_cnt; 555 556 ws = sbq_wake_ptr(sbq); 557 if (!ws) 558 return false; 559 560 wait_cnt = atomic_dec_return(&ws->wait_cnt); 561 if (wait_cnt <= 0) { 562 int ret; 563 564 wake_batch = READ_ONCE(sbq->wake_batch); 565 566 /* 567 * Pairs with the memory barrier in sbitmap_queue_resize() to 568 * ensure that we see the batch size update before the wait 569 * count is reset. 570 */ 571 smp_mb__before_atomic(); 572 573 /* 574 * For concurrent callers of this, the one that failed the 575 * atomic_cmpxhcg() race should call this function again 576 * to wakeup a new batch on a different 'ws'. 577 */ 578 ret = atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wake_batch); 579 if (ret == wait_cnt) { 580 sbq_index_atomic_inc(&sbq->wake_index); 581 wake_up_nr(&ws->wait, wake_batch); 582 return false; 583 } 584 585 return true; 586 } 587 588 return false; 589 } 590 591 void sbitmap_queue_wake_up(struct sbitmap_queue *sbq) 592 { 593 while (__sbq_wake_up(sbq)) 594 ; 595 } 596 EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up); 597 598 void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, 599 unsigned int cpu) 600 { 601 sbitmap_deferred_clear_bit(&sbq->sb, nr); 602 603 /* 604 * Pairs with the memory barrier in set_current_state() to ensure the 605 * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker 606 * and test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the 607 * waiter. See the comment on waitqueue_active(). 608 */ 609 smp_mb__after_atomic(); 610 sbitmap_queue_wake_up(sbq); 611 612 if (likely(!sbq->round_robin && nr < sbq->sb.depth)) 613 *per_cpu_ptr(sbq->alloc_hint, cpu) = nr; 614 } 615 EXPORT_SYMBOL_GPL(sbitmap_queue_clear); 616 617 void sbitmap_queue_wake_all(struct sbitmap_queue *sbq) 618 { 619 int i, wake_index; 620 621 /* 622 * Pairs with the memory barrier in set_current_state() like in 623 * sbitmap_queue_wake_up(). 624 */ 625 smp_mb(); 626 wake_index = atomic_read(&sbq->wake_index); 627 for (i = 0; i < SBQ_WAIT_QUEUES; i++) { 628 struct sbq_wait_state *ws = &sbq->ws[wake_index]; 629 630 if (waitqueue_active(&ws->wait)) 631 wake_up(&ws->wait); 632 633 wake_index = sbq_index_inc(wake_index); 634 } 635 } 636 EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all); 637 638 void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m) 639 { 640 bool first; 641 int i; 642 643 sbitmap_show(&sbq->sb, m); 644 645 seq_puts(m, "alloc_hint={"); 646 first = true; 647 for_each_possible_cpu(i) { 648 if (!first) 649 seq_puts(m, ", "); 650 first = false; 651 seq_printf(m, "%u", *per_cpu_ptr(sbq->alloc_hint, i)); 652 } 653 seq_puts(m, "}\n"); 654 655 seq_printf(m, "wake_batch=%u\n", sbq->wake_batch); 656 seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index)); 657 seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active)); 658 659 seq_puts(m, "ws={\n"); 660 for (i = 0; i < SBQ_WAIT_QUEUES; i++) { 661 struct sbq_wait_state *ws = &sbq->ws[i]; 662 663 seq_printf(m, "\t{.wait_cnt=%d, .wait=%s},\n", 664 atomic_read(&ws->wait_cnt), 665 waitqueue_active(&ws->wait) ? "active" : "inactive"); 666 } 667 seq_puts(m, "}\n"); 668 669 seq_printf(m, "round_robin=%d\n", sbq->round_robin); 670 seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth); 671 } 672 EXPORT_SYMBOL_GPL(sbitmap_queue_show); 673 674 void sbitmap_add_wait_queue(struct sbitmap_queue *sbq, 675 struct sbq_wait_state *ws, 676 struct sbq_wait *sbq_wait) 677 { 678 if (!sbq_wait->sbq) { 679 sbq_wait->sbq = sbq; 680 atomic_inc(&sbq->ws_active); 681 } 682 add_wait_queue(&ws->wait, &sbq_wait->wait); 683 } 684 EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue); 685 686 void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait) 687 { 688 list_del_init(&sbq_wait->wait.entry); 689 if (sbq_wait->sbq) { 690 atomic_dec(&sbq_wait->sbq->ws_active); 691 sbq_wait->sbq = NULL; 692 } 693 } 694 EXPORT_SYMBOL_GPL(sbitmap_del_wait_queue); 695 696 void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq, 697 struct sbq_wait_state *ws, 698 struct sbq_wait *sbq_wait, int state) 699 { 700 if (!sbq_wait->sbq) { 701 atomic_inc(&sbq->ws_active); 702 sbq_wait->sbq = sbq; 703 } 704 prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state); 705 } 706 EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait); 707 708 void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws, 709 struct sbq_wait *sbq_wait) 710 { 711 finish_wait(&ws->wait, &sbq_wait->wait); 712 if (sbq_wait->sbq) { 713 atomic_dec(&sbq->ws_active); 714 sbq_wait->sbq = NULL; 715 } 716 } 717 EXPORT_SYMBOL_GPL(sbitmap_finish_wait); 718