1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * blk-mq scheduling framework 4 * 5 * Copyright (C) 2016 Jens Axboe 6 */ 7 #include <linux/kernel.h> 8 #include <linux/module.h> 9 #include <linux/blk-mq.h> 10 #include <linux/list_sort.h> 11 12 #include <trace/events/block.h> 13 14 #include "blk.h" 15 #include "blk-mq.h" 16 #include "blk-mq-debugfs.h" 17 #include "blk-mq-sched.h" 18 #include "blk-mq-tag.h" 19 #include "blk-wbt.h" 20 21 void blk_mq_sched_assign_ioc(struct request *rq) 22 { 23 struct request_queue *q = rq->q; 24 struct io_context *ioc; 25 struct io_cq *icq; 26 27 /* 28 * May not have an IO context if it's a passthrough request 29 */ 30 ioc = current->io_context; 31 if (!ioc) 32 return; 33 34 spin_lock_irq(&q->queue_lock); 35 icq = ioc_lookup_icq(ioc, q); 36 spin_unlock_irq(&q->queue_lock); 37 38 if (!icq) { 39 icq = ioc_create_icq(ioc, q, GFP_ATOMIC); 40 if (!icq) 41 return; 42 } 43 get_io_context(icq->ioc); 44 rq->elv.icq = icq; 45 } 46 47 /* 48 * Mark a hardware queue as needing a restart. For shared queues, maintain 49 * a count of how many hardware queues are marked for restart. 50 */ 51 void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) 52 { 53 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) 54 return; 55 56 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); 57 } 58 EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx); 59 60 void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) 61 { 62 clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); 63 64 /* 65 * Order clearing SCHED_RESTART and list_empty_careful(&hctx->dispatch) 66 * in blk_mq_run_hw_queue(). Its pair is the barrier in 67 * blk_mq_dispatch_rq_list(). So dispatch code won't see SCHED_RESTART, 68 * meantime new request added to hctx->dispatch is missed to check in 69 * blk_mq_run_hw_queue(). 70 */ 71 smp_mb(); 72 73 blk_mq_run_hw_queue(hctx, true); 74 } 75 76 static int sched_rq_cmp(void *priv, const struct list_head *a, 77 const struct list_head *b) 78 { 79 struct request *rqa = container_of(a, struct request, queuelist); 80 struct request *rqb = container_of(b, struct request, queuelist); 81 82 return rqa->mq_hctx > rqb->mq_hctx; 83 } 84 85 static bool blk_mq_dispatch_hctx_list(struct list_head *rq_list) 86 { 87 struct blk_mq_hw_ctx *hctx = 88 list_first_entry(rq_list, struct request, queuelist)->mq_hctx; 89 struct request *rq; 90 LIST_HEAD(hctx_list); 91 unsigned int count = 0; 92 93 list_for_each_entry(rq, rq_list, queuelist) { 94 if (rq->mq_hctx != hctx) { 95 list_cut_before(&hctx_list, rq_list, &rq->queuelist); 96 goto dispatch; 97 } 98 count++; 99 } 100 list_splice_tail_init(rq_list, &hctx_list); 101 102 dispatch: 103 return blk_mq_dispatch_rq_list(hctx, &hctx_list, count); 104 } 105 106 #define BLK_MQ_BUDGET_DELAY 3 /* ms units */ 107 108 /* 109 * Only SCSI implements .get_budget and .put_budget, and SCSI restarts 110 * its queue by itself in its completion handler, so we don't need to 111 * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE. 112 * 113 * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to 114 * be run again. This is necessary to avoid starving flushes. 115 */ 116 static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) 117 { 118 struct request_queue *q = hctx->queue; 119 struct elevator_queue *e = q->elevator; 120 bool multi_hctxs = false, run_queue = false; 121 bool dispatched = false, busy = false; 122 unsigned int max_dispatch; 123 LIST_HEAD(rq_list); 124 int count = 0; 125 126 if (hctx->dispatch_busy) 127 max_dispatch = 1; 128 else 129 max_dispatch = hctx->queue->nr_requests; 130 131 do { 132 struct request *rq; 133 int budget_token; 134 135 if (e->type->ops.has_work && !e->type->ops.has_work(hctx)) 136 break; 137 138 if (!list_empty_careful(&hctx->dispatch)) { 139 busy = true; 140 break; 141 } 142 143 budget_token = blk_mq_get_dispatch_budget(q); 144 if (budget_token < 0) 145 break; 146 147 rq = e->type->ops.dispatch_request(hctx); 148 if (!rq) { 149 blk_mq_put_dispatch_budget(q, budget_token); 150 /* 151 * We're releasing without dispatching. Holding the 152 * budget could have blocked any "hctx"s with the 153 * same queue and if we didn't dispatch then there's 154 * no guarantee anyone will kick the queue. Kick it 155 * ourselves. 156 */ 157 run_queue = true; 158 break; 159 } 160 161 blk_mq_set_rq_budget_token(rq, budget_token); 162 163 /* 164 * Now this rq owns the budget which has to be released 165 * if this rq won't be queued to driver via .queue_rq() 166 * in blk_mq_dispatch_rq_list(). 167 */ 168 list_add_tail(&rq->queuelist, &rq_list); 169 count++; 170 if (rq->mq_hctx != hctx) 171 multi_hctxs = true; 172 173 /* 174 * If we cannot get tag for the request, stop dequeueing 175 * requests from the IO scheduler. We are unlikely to be able 176 * to submit them anyway and it creates false impression for 177 * scheduling heuristics that the device can take more IO. 178 */ 179 if (!blk_mq_get_driver_tag(rq)) 180 break; 181 } while (count < max_dispatch); 182 183 if (!count) { 184 if (run_queue) 185 blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY); 186 } else if (multi_hctxs) { 187 /* 188 * Requests from different hctx may be dequeued from some 189 * schedulers, such as bfq and deadline. 190 * 191 * Sort the requests in the list according to their hctx, 192 * dispatch batching requests from same hctx at a time. 193 */ 194 list_sort(NULL, &rq_list, sched_rq_cmp); 195 do { 196 dispatched |= blk_mq_dispatch_hctx_list(&rq_list); 197 } while (!list_empty(&rq_list)); 198 } else { 199 dispatched = blk_mq_dispatch_rq_list(hctx, &rq_list, count); 200 } 201 202 if (busy) 203 return -EAGAIN; 204 return !!dispatched; 205 } 206 207 static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) 208 { 209 int ret; 210 211 do { 212 ret = __blk_mq_do_dispatch_sched(hctx); 213 } while (ret == 1); 214 215 return ret; 216 } 217 218 static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx, 219 struct blk_mq_ctx *ctx) 220 { 221 unsigned short idx = ctx->index_hw[hctx->type]; 222 223 if (++idx == hctx->nr_ctx) 224 idx = 0; 225 226 return hctx->ctxs[idx]; 227 } 228 229 /* 230 * Only SCSI implements .get_budget and .put_budget, and SCSI restarts 231 * its queue by itself in its completion handler, so we don't need to 232 * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE. 233 * 234 * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to 235 * be run again. This is necessary to avoid starving flushes. 236 */ 237 static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx) 238 { 239 struct request_queue *q = hctx->queue; 240 LIST_HEAD(rq_list); 241 struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from); 242 int ret = 0; 243 struct request *rq; 244 245 do { 246 int budget_token; 247 248 if (!list_empty_careful(&hctx->dispatch)) { 249 ret = -EAGAIN; 250 break; 251 } 252 253 if (!sbitmap_any_bit_set(&hctx->ctx_map)) 254 break; 255 256 budget_token = blk_mq_get_dispatch_budget(q); 257 if (budget_token < 0) 258 break; 259 260 rq = blk_mq_dequeue_from_ctx(hctx, ctx); 261 if (!rq) { 262 blk_mq_put_dispatch_budget(q, budget_token); 263 /* 264 * We're releasing without dispatching. Holding the 265 * budget could have blocked any "hctx"s with the 266 * same queue and if we didn't dispatch then there's 267 * no guarantee anyone will kick the queue. Kick it 268 * ourselves. 269 */ 270 blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY); 271 break; 272 } 273 274 blk_mq_set_rq_budget_token(rq, budget_token); 275 276 /* 277 * Now this rq owns the budget which has to be released 278 * if this rq won't be queued to driver via .queue_rq() 279 * in blk_mq_dispatch_rq_list(). 280 */ 281 list_add(&rq->queuelist, &rq_list); 282 283 /* round robin for fair dispatch */ 284 ctx = blk_mq_next_ctx(hctx, rq->mq_ctx); 285 286 } while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, 1)); 287 288 WRITE_ONCE(hctx->dispatch_from, ctx); 289 return ret; 290 } 291 292 static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) 293 { 294 struct request_queue *q = hctx->queue; 295 const bool has_sched = q->elevator; 296 int ret = 0; 297 LIST_HEAD(rq_list); 298 299 /* 300 * If we have previous entries on our dispatch list, grab them first for 301 * more fair dispatch. 302 */ 303 if (!list_empty_careful(&hctx->dispatch)) { 304 spin_lock(&hctx->lock); 305 if (!list_empty(&hctx->dispatch)) 306 list_splice_init(&hctx->dispatch, &rq_list); 307 spin_unlock(&hctx->lock); 308 } 309 310 /* 311 * Only ask the scheduler for requests, if we didn't have residual 312 * requests from the dispatch list. This is to avoid the case where 313 * we only ever dispatch a fraction of the requests available because 314 * of low device queue depth. Once we pull requests out of the IO 315 * scheduler, we can no longer merge or sort them. So it's best to 316 * leave them there for as long as we can. Mark the hw queue as 317 * needing a restart in that case. 318 * 319 * We want to dispatch from the scheduler if there was nothing 320 * on the dispatch list or we were able to dispatch from the 321 * dispatch list. 322 */ 323 if (!list_empty(&rq_list)) { 324 blk_mq_sched_mark_restart_hctx(hctx); 325 if (blk_mq_dispatch_rq_list(hctx, &rq_list, 0)) { 326 if (has_sched) 327 ret = blk_mq_do_dispatch_sched(hctx); 328 else 329 ret = blk_mq_do_dispatch_ctx(hctx); 330 } 331 } else if (has_sched) { 332 ret = blk_mq_do_dispatch_sched(hctx); 333 } else if (hctx->dispatch_busy) { 334 /* dequeue request one by one from sw queue if queue is busy */ 335 ret = blk_mq_do_dispatch_ctx(hctx); 336 } else { 337 blk_mq_flush_busy_ctxs(hctx, &rq_list); 338 blk_mq_dispatch_rq_list(hctx, &rq_list, 0); 339 } 340 341 return ret; 342 } 343 344 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) 345 { 346 struct request_queue *q = hctx->queue; 347 348 /* RCU or SRCU read lock is needed before checking quiesced flag */ 349 if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) 350 return; 351 352 hctx->run++; 353 354 /* 355 * A return of -EAGAIN is an indication that hctx->dispatch is not 356 * empty and we must run again in order to avoid starving flushes. 357 */ 358 if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) { 359 if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) 360 blk_mq_run_hw_queue(hctx, true); 361 } 362 } 363 364 bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, 365 unsigned int nr_segs) 366 { 367 struct elevator_queue *e = q->elevator; 368 struct blk_mq_ctx *ctx; 369 struct blk_mq_hw_ctx *hctx; 370 bool ret = false; 371 enum hctx_type type; 372 373 if (e && e->type->ops.bio_merge) 374 return e->type->ops.bio_merge(q, bio, nr_segs); 375 376 ctx = blk_mq_get_ctx(q); 377 hctx = blk_mq_map_queue(q, bio->bi_opf, ctx); 378 type = hctx->type; 379 if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) || 380 list_empty_careful(&ctx->rq_lists[type])) 381 return false; 382 383 /* default per sw-queue merge */ 384 spin_lock(&ctx->lock); 385 /* 386 * Reverse check our software queue for entries that we could 387 * potentially merge with. Currently includes a hand-wavy stop 388 * count of 8, to not spend too much time checking for merges. 389 */ 390 if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) 391 ret = true; 392 393 spin_unlock(&ctx->lock); 394 return ret; 395 } 396 397 bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq, 398 struct list_head *free) 399 { 400 return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq, free); 401 } 402 EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge); 403 404 static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, 405 struct request *rq) 406 { 407 /* 408 * dispatch flush and passthrough rq directly 409 * 410 * passthrough request has to be added to hctx->dispatch directly. 411 * For some reason, device may be in one situation which can't 412 * handle FS request, so STS_RESOURCE is always returned and the 413 * FS request will be added to hctx->dispatch. However passthrough 414 * request may be required at that time for fixing the problem. If 415 * passthrough request is added to scheduler queue, there isn't any 416 * chance to dispatch it given we prioritize requests in hctx->dispatch. 417 */ 418 if ((rq->rq_flags & RQF_FLUSH_SEQ) || blk_rq_is_passthrough(rq)) 419 return true; 420 421 return false; 422 } 423 424 void blk_mq_sched_insert_request(struct request *rq, bool at_head, 425 bool run_queue, bool async) 426 { 427 struct request_queue *q = rq->q; 428 struct elevator_queue *e = q->elevator; 429 struct blk_mq_ctx *ctx = rq->mq_ctx; 430 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 431 432 WARN_ON(e && (rq->tag != BLK_MQ_NO_TAG)); 433 434 if (blk_mq_sched_bypass_insert(hctx, rq)) { 435 /* 436 * Firstly normal IO request is inserted to scheduler queue or 437 * sw queue, meantime we add flush request to dispatch queue( 438 * hctx->dispatch) directly and there is at most one in-flight 439 * flush request for each hw queue, so it doesn't matter to add 440 * flush request to tail or front of the dispatch queue. 441 * 442 * Secondly in case of NCQ, flush request belongs to non-NCQ 443 * command, and queueing it will fail when there is any 444 * in-flight normal IO request(NCQ command). When adding flush 445 * rq to the front of hctx->dispatch, it is easier to introduce 446 * extra time to flush rq's latency because of S_SCHED_RESTART 447 * compared with adding to the tail of dispatch queue, then 448 * chance of flush merge is increased, and less flush requests 449 * will be issued to controller. It is observed that ~10% time 450 * is saved in blktests block/004 on disk attached to AHCI/NCQ 451 * drive when adding flush rq to the front of hctx->dispatch. 452 * 453 * Simply queue flush rq to the front of hctx->dispatch so that 454 * intensive flush workloads can benefit in case of NCQ HW. 455 */ 456 at_head = (rq->rq_flags & RQF_FLUSH_SEQ) ? true : at_head; 457 blk_mq_request_bypass_insert(rq, at_head, false); 458 goto run; 459 } 460 461 if (e) { 462 LIST_HEAD(list); 463 464 list_add(&rq->queuelist, &list); 465 e->type->ops.insert_requests(hctx, &list, at_head); 466 } else { 467 spin_lock(&ctx->lock); 468 __blk_mq_insert_request(hctx, rq, at_head); 469 spin_unlock(&ctx->lock); 470 } 471 472 run: 473 if (run_queue) 474 blk_mq_run_hw_queue(hctx, async); 475 } 476 477 void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx, 478 struct blk_mq_ctx *ctx, 479 struct list_head *list, bool run_queue_async) 480 { 481 struct elevator_queue *e; 482 struct request_queue *q = hctx->queue; 483 484 /* 485 * blk_mq_sched_insert_requests() is called from flush plug 486 * context only, and hold one usage counter to prevent queue 487 * from being released. 488 */ 489 percpu_ref_get(&q->q_usage_counter); 490 491 e = hctx->queue->elevator; 492 if (e) { 493 e->type->ops.insert_requests(hctx, list, false); 494 } else { 495 /* 496 * try to issue requests directly if the hw queue isn't 497 * busy in case of 'none' scheduler, and this way may save 498 * us one extra enqueue & dequeue to sw queue. 499 */ 500 if (!hctx->dispatch_busy && !e && !run_queue_async) { 501 blk_mq_try_issue_list_directly(hctx, list); 502 if (list_empty(list)) 503 goto out; 504 } 505 blk_mq_insert_requests(hctx, ctx, list); 506 } 507 508 blk_mq_run_hw_queue(hctx, run_queue_async); 509 out: 510 percpu_ref_put(&q->q_usage_counter); 511 } 512 513 static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q, 514 struct blk_mq_hw_ctx *hctx, 515 unsigned int hctx_idx) 516 { 517 if (blk_mq_is_shared_tags(q->tag_set->flags)) { 518 hctx->sched_tags = q->sched_shared_tags; 519 return 0; 520 } 521 522 hctx->sched_tags = blk_mq_alloc_map_and_rqs(q->tag_set, hctx_idx, 523 q->nr_requests); 524 525 if (!hctx->sched_tags) 526 return -ENOMEM; 527 return 0; 528 } 529 530 static void blk_mq_exit_sched_shared_tags(struct request_queue *queue) 531 { 532 blk_mq_free_rq_map(queue->sched_shared_tags); 533 queue->sched_shared_tags = NULL; 534 } 535 536 /* called in queue's release handler, tagset has gone away */ 537 static void blk_mq_sched_tags_teardown(struct request_queue *q, unsigned int flags) 538 { 539 struct blk_mq_hw_ctx *hctx; 540 int i; 541 542 queue_for_each_hw_ctx(q, hctx, i) { 543 if (hctx->sched_tags) { 544 if (!blk_mq_is_shared_tags(flags)) 545 blk_mq_free_rq_map(hctx->sched_tags); 546 hctx->sched_tags = NULL; 547 } 548 } 549 550 if (blk_mq_is_shared_tags(flags)) 551 blk_mq_exit_sched_shared_tags(q); 552 } 553 554 static int blk_mq_init_sched_shared_tags(struct request_queue *queue) 555 { 556 struct blk_mq_tag_set *set = queue->tag_set; 557 558 /* 559 * Set initial depth at max so that we don't need to reallocate for 560 * updating nr_requests. 561 */ 562 queue->sched_shared_tags = blk_mq_alloc_map_and_rqs(set, 563 BLK_MQ_NO_HCTX_IDX, 564 MAX_SCHED_RQ); 565 if (!queue->sched_shared_tags) 566 return -ENOMEM; 567 568 blk_mq_tag_update_sched_shared_tags(queue); 569 570 return 0; 571 } 572 573 int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) 574 { 575 unsigned int i, flags = q->tag_set->flags; 576 struct blk_mq_hw_ctx *hctx; 577 struct elevator_queue *eq; 578 int ret; 579 580 if (!e) { 581 q->elevator = NULL; 582 q->nr_requests = q->tag_set->queue_depth; 583 return 0; 584 } 585 586 /* 587 * Default to double of smaller one between hw queue_depth and 128, 588 * since we don't split into sync/async like the old code did. 589 * Additionally, this is a per-hw queue depth. 590 */ 591 q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth, 592 BLKDEV_DEFAULT_RQ); 593 594 if (blk_mq_is_shared_tags(flags)) { 595 ret = blk_mq_init_sched_shared_tags(q); 596 if (ret) 597 return ret; 598 } 599 600 queue_for_each_hw_ctx(q, hctx, i) { 601 ret = blk_mq_sched_alloc_map_and_rqs(q, hctx, i); 602 if (ret) 603 goto err_free_map_and_rqs; 604 } 605 606 ret = e->ops.init_sched(q, e); 607 if (ret) 608 goto err_free_map_and_rqs; 609 610 blk_mq_debugfs_register_sched(q); 611 612 queue_for_each_hw_ctx(q, hctx, i) { 613 if (e->ops.init_hctx) { 614 ret = e->ops.init_hctx(hctx, i); 615 if (ret) { 616 eq = q->elevator; 617 blk_mq_sched_free_rqs(q); 618 blk_mq_exit_sched(q, eq); 619 kobject_put(&eq->kobj); 620 return ret; 621 } 622 } 623 blk_mq_debugfs_register_sched_hctx(q, hctx); 624 } 625 626 return 0; 627 628 err_free_map_and_rqs: 629 blk_mq_sched_free_rqs(q); 630 blk_mq_sched_tags_teardown(q, flags); 631 632 q->elevator = NULL; 633 return ret; 634 } 635 636 /* 637 * called in either blk_queue_cleanup or elevator_switch, tagset 638 * is required for freeing requests 639 */ 640 void blk_mq_sched_free_rqs(struct request_queue *q) 641 { 642 struct blk_mq_hw_ctx *hctx; 643 int i; 644 645 if (blk_mq_is_shared_tags(q->tag_set->flags)) { 646 blk_mq_free_rqs(q->tag_set, q->sched_shared_tags, 647 BLK_MQ_NO_HCTX_IDX); 648 } else { 649 queue_for_each_hw_ctx(q, hctx, i) { 650 if (hctx->sched_tags) 651 blk_mq_free_rqs(q->tag_set, 652 hctx->sched_tags, i); 653 } 654 } 655 } 656 657 void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e) 658 { 659 struct blk_mq_hw_ctx *hctx; 660 unsigned int i; 661 unsigned int flags = 0; 662 663 queue_for_each_hw_ctx(q, hctx, i) { 664 blk_mq_debugfs_unregister_sched_hctx(hctx); 665 if (e->type->ops.exit_hctx && hctx->sched_data) { 666 e->type->ops.exit_hctx(hctx, i); 667 hctx->sched_data = NULL; 668 } 669 flags = hctx->flags; 670 } 671 blk_mq_debugfs_unregister_sched(q); 672 if (e->type->ops.exit_sched) 673 e->type->ops.exit_sched(e); 674 blk_mq_sched_tags_teardown(q, flags); 675 q->elevator = NULL; 676 } 677