1 /* 2 * Fast and scalable bitmap tagging variant. Uses sparser bitmaps spread 3 * over multiple cachelines to avoid ping-pong between multiple submitters 4 * or submitter and completer. Uses rolling wakeups to avoid falling of 5 * the scaling cliff when we run out of tags and have to start putting 6 * submitters to sleep. 7 * 8 * Uses active queue tracking to support fairer distribution of tags 9 * between multiple submitters when a shared tag map is used. 10 * 11 * Copyright (C) 2013-2014 Jens Axboe 12 */ 13 #include <linux/kernel.h> 14 #include <linux/module.h> 15 #include <linux/random.h> 16 17 #include <linux/blk-mq.h> 18 #include "blk.h" 19 #include "blk-mq.h" 20 #include "blk-mq-tag.h" 21 22 static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt) 23 { 24 int i; 25 26 for (i = 0; i < bt->map_nr; i++) { 27 struct blk_align_bitmap *bm = &bt->map[i]; 28 int ret; 29 30 ret = find_first_zero_bit(&bm->word, bm->depth); 31 if (ret < bm->depth) 32 return true; 33 } 34 35 return false; 36 } 37 38 bool blk_mq_has_free_tags(struct blk_mq_tags *tags) 39 { 40 if (!tags) 41 return true; 42 43 return bt_has_free_tags(&tags->bitmap_tags); 44 } 45 46 static inline void bt_index_inc(unsigned int *index) 47 { 48 *index = (*index + 1) & (BT_WAIT_QUEUES - 1); 49 } 50 51 /* 52 * If a previously inactive queue goes active, bump the active user count. 53 */ 54 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) 55 { 56 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) && 57 !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) 58 atomic_inc(&hctx->tags->active_queues); 59 60 return true; 61 } 62 63 /* 64 * Wakeup all potentially sleeping on normal (non-reserved) tags 65 */ 66 static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags) 67 { 68 struct blk_mq_bitmap_tags *bt; 69 int i, wake_index; 70 71 bt = &tags->bitmap_tags; 72 wake_index = bt->wake_index; 73 for (i = 0; i < BT_WAIT_QUEUES; i++) { 74 struct bt_wait_state *bs = &bt->bs[wake_index]; 75 76 if (waitqueue_active(&bs->wait)) 77 wake_up(&bs->wait); 78 79 bt_index_inc(&wake_index); 80 } 81 } 82 83 /* 84 * If a previously busy queue goes inactive, potential waiters could now 85 * be allowed to queue. Wake them up and check. 86 */ 87 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) 88 { 89 struct blk_mq_tags *tags = hctx->tags; 90 91 if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) 92 return; 93 94 atomic_dec(&tags->active_queues); 95 96 blk_mq_tag_wakeup_all(tags); 97 } 98 99 /* 100 * For shared tag users, we track the number of currently active users 101 * and attempt to provide a fair share of the tag depth for each of them. 102 */ 103 static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, 104 struct blk_mq_bitmap_tags *bt) 105 { 106 unsigned int depth, users; 107 108 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED)) 109 return true; 110 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) 111 return true; 112 113 /* 114 * Don't try dividing an ant 115 */ 116 if (bt->depth == 1) 117 return true; 118 119 users = atomic_read(&hctx->tags->active_queues); 120 if (!users) 121 return true; 122 123 /* 124 * Allow at least some tags 125 */ 126 depth = max((bt->depth + users - 1) / users, 4U); 127 return atomic_read(&hctx->nr_active) < depth; 128 } 129 130 static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag) 131 { 132 int tag, org_last_tag, end; 133 134 org_last_tag = last_tag; 135 end = bm->depth; 136 do { 137 restart: 138 tag = find_next_zero_bit(&bm->word, end, last_tag); 139 if (unlikely(tag >= end)) { 140 /* 141 * We started with an offset, start from 0 to 142 * exhaust the map. 143 */ 144 if (org_last_tag && last_tag) { 145 end = last_tag; 146 last_tag = 0; 147 goto restart; 148 } 149 return -1; 150 } 151 last_tag = tag + 1; 152 } while (test_and_set_bit_lock(tag, &bm->word)); 153 154 return tag; 155 } 156 157 /* 158 * Straight forward bitmap tag implementation, where each bit is a tag 159 * (cleared == free, and set == busy). The small twist is using per-cpu 160 * last_tag caches, which blk-mq stores in the blk_mq_ctx software queue 161 * contexts. This enables us to drastically limit the space searched, 162 * without dirtying an extra shared cacheline like we would if we stored 163 * the cache value inside the shared blk_mq_bitmap_tags structure. On top 164 * of that, each word of tags is in a separate cacheline. This means that 165 * multiple users will tend to stick to different cachelines, at least 166 * until the map is exhausted. 167 */ 168 static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt, 169 unsigned int *tag_cache) 170 { 171 unsigned int last_tag, org_last_tag; 172 int index, i, tag; 173 174 if (!hctx_may_queue(hctx, bt)) 175 return -1; 176 177 last_tag = org_last_tag = *tag_cache; 178 index = TAG_TO_INDEX(bt, last_tag); 179 180 for (i = 0; i < bt->map_nr; i++) { 181 tag = __bt_get_word(&bt->map[index], TAG_TO_BIT(bt, last_tag)); 182 if (tag != -1) { 183 tag += (index << bt->bits_per_word); 184 goto done; 185 } 186 187 last_tag = 0; 188 if (++index >= bt->map_nr) 189 index = 0; 190 } 191 192 *tag_cache = 0; 193 return -1; 194 195 /* 196 * Only update the cache from the allocation path, if we ended 197 * up using the specific cached tag. 198 */ 199 done: 200 if (tag == org_last_tag) { 201 last_tag = tag + 1; 202 if (last_tag >= bt->depth - 1) 203 last_tag = 0; 204 205 *tag_cache = last_tag; 206 } 207 208 return tag; 209 } 210 211 static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt, 212 struct blk_mq_hw_ctx *hctx) 213 { 214 struct bt_wait_state *bs; 215 216 if (!hctx) 217 return &bt->bs[0]; 218 219 bs = &bt->bs[hctx->wait_index]; 220 bt_index_inc(&hctx->wait_index); 221 return bs; 222 } 223 224 static int bt_get(struct blk_mq_alloc_data *data, 225 struct blk_mq_bitmap_tags *bt, 226 struct blk_mq_hw_ctx *hctx, 227 unsigned int *last_tag) 228 { 229 struct bt_wait_state *bs; 230 DEFINE_WAIT(wait); 231 int tag; 232 233 tag = __bt_get(hctx, bt, last_tag); 234 if (tag != -1) 235 return tag; 236 237 if (!(data->gfp & __GFP_WAIT)) 238 return -1; 239 240 bs = bt_wait_ptr(bt, hctx); 241 do { 242 bool was_empty; 243 244 was_empty = list_empty(&wait.task_list); 245 prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE); 246 247 tag = __bt_get(hctx, bt, last_tag); 248 if (tag != -1) 249 break; 250 251 if (was_empty) 252 atomic_set(&bs->wait_cnt, bt->wake_cnt); 253 254 blk_mq_put_ctx(data->ctx); 255 256 io_schedule(); 257 258 data->ctx = blk_mq_get_ctx(data->q); 259 data->hctx = data->q->mq_ops->map_queue(data->q, 260 data->ctx->cpu); 261 if (data->reserved) { 262 bt = &data->hctx->tags->breserved_tags; 263 } else { 264 last_tag = &data->ctx->last_tag; 265 hctx = data->hctx; 266 bt = &hctx->tags->bitmap_tags; 267 } 268 finish_wait(&bs->wait, &wait); 269 bs = bt_wait_ptr(bt, hctx); 270 } while (1); 271 272 finish_wait(&bs->wait, &wait); 273 return tag; 274 } 275 276 static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data) 277 { 278 int tag; 279 280 tag = bt_get(data, &data->hctx->tags->bitmap_tags, data->hctx, 281 &data->ctx->last_tag); 282 if (tag >= 0) 283 return tag + data->hctx->tags->nr_reserved_tags; 284 285 return BLK_MQ_TAG_FAIL; 286 } 287 288 static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data *data) 289 { 290 int tag, zero = 0; 291 292 if (unlikely(!data->hctx->tags->nr_reserved_tags)) { 293 WARN_ON_ONCE(1); 294 return BLK_MQ_TAG_FAIL; 295 } 296 297 tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL, &zero); 298 if (tag < 0) 299 return BLK_MQ_TAG_FAIL; 300 301 return tag; 302 } 303 304 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) 305 { 306 if (!data->reserved) 307 return __blk_mq_get_tag(data); 308 309 return __blk_mq_get_reserved_tag(data); 310 } 311 312 static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt) 313 { 314 int i, wake_index; 315 316 wake_index = bt->wake_index; 317 for (i = 0; i < BT_WAIT_QUEUES; i++) { 318 struct bt_wait_state *bs = &bt->bs[wake_index]; 319 320 if (waitqueue_active(&bs->wait)) { 321 if (wake_index != bt->wake_index) 322 bt->wake_index = wake_index; 323 324 return bs; 325 } 326 327 bt_index_inc(&wake_index); 328 } 329 330 return NULL; 331 } 332 333 static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag) 334 { 335 const int index = TAG_TO_INDEX(bt, tag); 336 struct bt_wait_state *bs; 337 338 /* 339 * The unlock memory barrier need to order access to req in free 340 * path and clearing tag bit 341 */ 342 clear_bit_unlock(TAG_TO_BIT(bt, tag), &bt->map[index].word); 343 344 bs = bt_wake_ptr(bt); 345 if (bs && atomic_dec_and_test(&bs->wait_cnt)) { 346 atomic_set(&bs->wait_cnt, bt->wake_cnt); 347 bt_index_inc(&bt->wake_index); 348 wake_up(&bs->wait); 349 } 350 } 351 352 static void __blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag) 353 { 354 BUG_ON(tag >= tags->nr_tags); 355 356 bt_clear_tag(&tags->bitmap_tags, tag); 357 } 358 359 static void __blk_mq_put_reserved_tag(struct blk_mq_tags *tags, 360 unsigned int tag) 361 { 362 BUG_ON(tag >= tags->nr_reserved_tags); 363 364 bt_clear_tag(&tags->breserved_tags, tag); 365 } 366 367 void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag, 368 unsigned int *last_tag) 369 { 370 struct blk_mq_tags *tags = hctx->tags; 371 372 if (tag >= tags->nr_reserved_tags) { 373 const int real_tag = tag - tags->nr_reserved_tags; 374 375 __blk_mq_put_tag(tags, real_tag); 376 *last_tag = real_tag; 377 } else 378 __blk_mq_put_reserved_tag(tags, tag); 379 } 380 381 static void bt_for_each_free(struct blk_mq_bitmap_tags *bt, 382 unsigned long *free_map, unsigned int off) 383 { 384 int i; 385 386 for (i = 0; i < bt->map_nr; i++) { 387 struct blk_align_bitmap *bm = &bt->map[i]; 388 int bit = 0; 389 390 do { 391 bit = find_next_zero_bit(&bm->word, bm->depth, bit); 392 if (bit >= bm->depth) 393 break; 394 395 __set_bit(bit + off, free_map); 396 bit++; 397 } while (1); 398 399 off += (1 << bt->bits_per_word); 400 } 401 } 402 403 void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, 404 void (*fn)(void *, unsigned long *), void *data) 405 { 406 unsigned long *tag_map; 407 size_t map_size; 408 409 map_size = ALIGN(tags->nr_tags, BITS_PER_LONG) / BITS_PER_LONG; 410 tag_map = kzalloc(map_size * sizeof(unsigned long), GFP_ATOMIC); 411 if (!tag_map) 412 return; 413 414 bt_for_each_free(&tags->bitmap_tags, tag_map, tags->nr_reserved_tags); 415 if (tags->nr_reserved_tags) 416 bt_for_each_free(&tags->breserved_tags, tag_map, 0); 417 418 fn(data, tag_map); 419 kfree(tag_map); 420 } 421 EXPORT_SYMBOL(blk_mq_tag_busy_iter); 422 423 static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt) 424 { 425 unsigned int i, used; 426 427 for (i = 0, used = 0; i < bt->map_nr; i++) { 428 struct blk_align_bitmap *bm = &bt->map[i]; 429 430 used += bitmap_weight(&bm->word, bm->depth); 431 } 432 433 return bt->depth - used; 434 } 435 436 static void bt_update_count(struct blk_mq_bitmap_tags *bt, 437 unsigned int depth) 438 { 439 unsigned int tags_per_word = 1U << bt->bits_per_word; 440 unsigned int map_depth = depth; 441 442 if (depth) { 443 int i; 444 445 for (i = 0; i < bt->map_nr; i++) { 446 bt->map[i].depth = min(map_depth, tags_per_word); 447 map_depth -= bt->map[i].depth; 448 } 449 } 450 451 bt->wake_cnt = BT_WAIT_BATCH; 452 if (bt->wake_cnt > depth / 4) 453 bt->wake_cnt = max(1U, depth / 4); 454 455 bt->depth = depth; 456 } 457 458 static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth, 459 int node, bool reserved) 460 { 461 int i; 462 463 bt->bits_per_word = ilog2(BITS_PER_LONG); 464 465 /* 466 * Depth can be zero for reserved tags, that's not a failure 467 * condition. 468 */ 469 if (depth) { 470 unsigned int nr, tags_per_word; 471 472 tags_per_word = (1 << bt->bits_per_word); 473 474 /* 475 * If the tag space is small, shrink the number of tags 476 * per word so we spread over a few cachelines, at least. 477 * If less than 4 tags, just forget about it, it's not 478 * going to work optimally anyway. 479 */ 480 if (depth >= 4) { 481 while (tags_per_word * 4 > depth) { 482 bt->bits_per_word--; 483 tags_per_word = (1 << bt->bits_per_word); 484 } 485 } 486 487 nr = ALIGN(depth, tags_per_word) / tags_per_word; 488 bt->map = kzalloc_node(nr * sizeof(struct blk_align_bitmap), 489 GFP_KERNEL, node); 490 if (!bt->map) 491 return -ENOMEM; 492 493 bt->map_nr = nr; 494 } 495 496 bt->bs = kzalloc(BT_WAIT_QUEUES * sizeof(*bt->bs), GFP_KERNEL); 497 if (!bt->bs) { 498 kfree(bt->map); 499 return -ENOMEM; 500 } 501 502 for (i = 0; i < BT_WAIT_QUEUES; i++) 503 init_waitqueue_head(&bt->bs[i].wait); 504 505 bt_update_count(bt, depth); 506 return 0; 507 } 508 509 static void bt_free(struct blk_mq_bitmap_tags *bt) 510 { 511 kfree(bt->map); 512 kfree(bt->bs); 513 } 514 515 static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags, 516 int node) 517 { 518 unsigned int depth = tags->nr_tags - tags->nr_reserved_tags; 519 520 if (bt_alloc(&tags->bitmap_tags, depth, node, false)) 521 goto enomem; 522 if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, node, true)) 523 goto enomem; 524 525 return tags; 526 enomem: 527 bt_free(&tags->bitmap_tags); 528 kfree(tags); 529 return NULL; 530 } 531 532 struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, 533 unsigned int reserved_tags, int node) 534 { 535 struct blk_mq_tags *tags; 536 537 if (total_tags > BLK_MQ_TAG_MAX) { 538 pr_err("blk-mq: tag depth too large\n"); 539 return NULL; 540 } 541 542 tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node); 543 if (!tags) 544 return NULL; 545 546 tags->nr_tags = total_tags; 547 tags->nr_reserved_tags = reserved_tags; 548 549 return blk_mq_init_bitmap_tags(tags, node); 550 } 551 552 void blk_mq_free_tags(struct blk_mq_tags *tags) 553 { 554 bt_free(&tags->bitmap_tags); 555 bt_free(&tags->breserved_tags); 556 kfree(tags); 557 } 558 559 void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *tag) 560 { 561 unsigned int depth = tags->nr_tags - tags->nr_reserved_tags; 562 563 *tag = prandom_u32() % depth; 564 } 565 566 int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth) 567 { 568 tdepth -= tags->nr_reserved_tags; 569 if (tdepth > tags->nr_tags) 570 return -EINVAL; 571 572 /* 573 * Don't need (or can't) update reserved tags here, they remain 574 * static and should never need resizing. 575 */ 576 bt_update_count(&tags->bitmap_tags, tdepth); 577 blk_mq_tag_wakeup_all(tags); 578 return 0; 579 } 580 581 ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page) 582 { 583 char *orig_page = page; 584 unsigned int free, res; 585 586 if (!tags) 587 return 0; 588 589 page += sprintf(page, "nr_tags=%u, reserved_tags=%u, " 590 "bits_per_word=%u\n", 591 tags->nr_tags, tags->nr_reserved_tags, 592 tags->bitmap_tags.bits_per_word); 593 594 free = bt_unused_tags(&tags->bitmap_tags); 595 res = bt_unused_tags(&tags->breserved_tags); 596 597 page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res); 598 page += sprintf(page, "active_queues=%u\n", atomic_read(&tags->active_queues)); 599 600 return page - orig_page; 601 } 602