13dcf60bcSChristoph Hellwig // SPDX-License-Identifier: GPL-2.0 2bd166ef1SJens Axboe /* 3bd166ef1SJens Axboe * blk-mq scheduling framework 4bd166ef1SJens Axboe * 5bd166ef1SJens Axboe * Copyright (C) 2016 Jens Axboe 6bd166ef1SJens Axboe */ 7bd166ef1SJens Axboe #include <linux/kernel.h> 8bd166ef1SJens Axboe #include <linux/module.h> 9bd166ef1SJens Axboe #include <linux/blk-mq.h> 106e6fcbc2SMing Lei #include <linux/list_sort.h> 11bd166ef1SJens Axboe 12bd166ef1SJens Axboe #include <trace/events/block.h> 13bd166ef1SJens Axboe 14bd166ef1SJens Axboe #include "blk.h" 15bd166ef1SJens Axboe #include "blk-mq.h" 16d332ce09SOmar Sandoval #include "blk-mq-debugfs.h" 17bd166ef1SJens Axboe #include "blk-mq-sched.h" 18bd166ef1SJens Axboe #include "blk-mq-tag.h" 19bd166ef1SJens Axboe #include "blk-wbt.h" 20bd166ef1SJens Axboe 218e8320c9SJens Axboe /* 228e8320c9SJens Axboe * Mark a hardware queue as needing a restart. For shared queues, maintain 238e8320c9SJens Axboe * a count of how many hardware queues are marked for restart. 248e8320c9SJens Axboe */ 257211aef8SDamien Le Moal void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) 268e8320c9SJens Axboe { 278e8320c9SJens Axboe if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) 288e8320c9SJens Axboe return; 298e8320c9SJens Axboe 308e8320c9SJens Axboe set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); 318e8320c9SJens Axboe } 327211aef8SDamien Le Moal EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx); 338e8320c9SJens Axboe 34e9ea1596SPavel Begunkov void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) 358e8320c9SJens Axboe { 368e8320c9SJens Axboe clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); 378e8320c9SJens Axboe 38d7d8535fSMing Lei /* 39d7d8535fSMing Lei * Order clearing SCHED_RESTART and list_empty_careful(&hctx->dispatch) 40d7d8535fSMing Lei * in blk_mq_run_hw_queue(). Its pair is the barrier in 41d7d8535fSMing Lei * blk_mq_dispatch_rq_list(). So dispatch code won't see SCHED_RESTART, 42d7d8535fSMing Lei * meantime new request added to hctx->dispatch is missed to check in 43d7d8535fSMing Lei * blk_mq_run_hw_queue(). 44d7d8535fSMing Lei */ 45d7d8535fSMing Lei smp_mb(); 46d7d8535fSMing Lei 4797889f9aSMing Lei blk_mq_run_hw_queue(hctx, true); 488e8320c9SJens Axboe } 498e8320c9SJens Axboe 504f0f586bSSami Tolvanen static int sched_rq_cmp(void *priv, const struct list_head *a, 514f0f586bSSami Tolvanen const struct list_head *b) 526e6fcbc2SMing Lei { 536e6fcbc2SMing Lei struct request *rqa = container_of(a, struct request, queuelist); 546e6fcbc2SMing Lei struct request *rqb = container_of(b, struct request, queuelist); 556e6fcbc2SMing Lei 566e6fcbc2SMing Lei return rqa->mq_hctx > rqb->mq_hctx; 576e6fcbc2SMing Lei } 586e6fcbc2SMing Lei 596e6fcbc2SMing Lei static bool blk_mq_dispatch_hctx_list(struct list_head *rq_list) 606e6fcbc2SMing Lei { 616e6fcbc2SMing Lei struct blk_mq_hw_ctx *hctx = 626e6fcbc2SMing Lei list_first_entry(rq_list, struct request, queuelist)->mq_hctx; 636e6fcbc2SMing Lei struct request *rq; 646e6fcbc2SMing Lei LIST_HEAD(hctx_list); 656e6fcbc2SMing Lei unsigned int count = 0; 666e6fcbc2SMing Lei 676e6fcbc2SMing Lei list_for_each_entry(rq, rq_list, queuelist) { 686e6fcbc2SMing Lei if (rq->mq_hctx != hctx) { 696e6fcbc2SMing Lei list_cut_before(&hctx_list, rq_list, &rq->queuelist); 706e6fcbc2SMing Lei goto dispatch; 716e6fcbc2SMing Lei } 726e6fcbc2SMing Lei count++; 736e6fcbc2SMing Lei } 746e6fcbc2SMing Lei list_splice_tail_init(rq_list, &hctx_list); 756e6fcbc2SMing Lei 766e6fcbc2SMing Lei dispatch: 77106e71c5SBaolin Wang return blk_mq_dispatch_rq_list(hctx, &hctx_list, count); 786e6fcbc2SMing Lei } 796e6fcbc2SMing Lei 80a0823421SDouglas Anderson #define BLK_MQ_BUDGET_DELAY 3 /* ms units */ 81a0823421SDouglas Anderson 821f460b63SMing Lei /* 831f460b63SMing Lei * Only SCSI implements .get_budget and .put_budget, and SCSI restarts 841f460b63SMing Lei * its queue by itself in its completion handler, so we don't need to 851f460b63SMing Lei * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE. 8628d65729SSalman Qazi * 8728d65729SSalman Qazi * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to 8828d65729SSalman Qazi * be run again. This is necessary to avoid starving flushes. 891f460b63SMing Lei */ 906e6fcbc2SMing Lei static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) 91caf8eb0dSMing Lei { 92caf8eb0dSMing Lei struct request_queue *q = hctx->queue; 93caf8eb0dSMing Lei struct elevator_queue *e = q->elevator; 946e6fcbc2SMing Lei bool multi_hctxs = false, run_queue = false; 956e6fcbc2SMing Lei bool dispatched = false, busy = false; 966e6fcbc2SMing Lei unsigned int max_dispatch; 97caf8eb0dSMing Lei LIST_HEAD(rq_list); 986e6fcbc2SMing Lei int count = 0; 996e6fcbc2SMing Lei 1006e6fcbc2SMing Lei if (hctx->dispatch_busy) 1016e6fcbc2SMing Lei max_dispatch = 1; 1026e6fcbc2SMing Lei else 1036e6fcbc2SMing Lei max_dispatch = hctx->queue->nr_requests; 104caf8eb0dSMing Lei 105445874e8SMing Lei do { 1066e6fcbc2SMing Lei struct request *rq; 1072a5a24aaSMing Lei int budget_token; 1086e6fcbc2SMing Lei 109f9cd4bfeSJens Axboe if (e->type->ops.has_work && !e->type->ops.has_work(hctx)) 110caf8eb0dSMing Lei break; 111de148297SMing Lei 11228d65729SSalman Qazi if (!list_empty_careful(&hctx->dispatch)) { 1136e6fcbc2SMing Lei busy = true; 11428d65729SSalman Qazi break; 11528d65729SSalman Qazi } 11628d65729SSalman Qazi 1172a5a24aaSMing Lei budget_token = blk_mq_get_dispatch_budget(q); 1182a5a24aaSMing Lei if (budget_token < 0) 1191f460b63SMing Lei break; 120de148297SMing Lei 121f9cd4bfeSJens Axboe rq = e->type->ops.dispatch_request(hctx); 122de148297SMing Lei if (!rq) { 1232a5a24aaSMing Lei blk_mq_put_dispatch_budget(q, budget_token); 124a0823421SDouglas Anderson /* 125a0823421SDouglas Anderson * We're releasing without dispatching. Holding the 126a0823421SDouglas Anderson * budget could have blocked any "hctx"s with the 127a0823421SDouglas Anderson * same queue and if we didn't dispatch then there's 128a0823421SDouglas Anderson * no guarantee anyone will kick the queue. Kick it 129a0823421SDouglas Anderson * ourselves. 130a0823421SDouglas Anderson */ 1316e6fcbc2SMing Lei run_queue = true; 132de148297SMing Lei break; 133caf8eb0dSMing Lei } 134caf8eb0dSMing Lei 1352a5a24aaSMing Lei blk_mq_set_rq_budget_token(rq, budget_token); 1362a5a24aaSMing Lei 137de148297SMing Lei /* 138de148297SMing Lei * Now this rq owns the budget which has to be released 139de148297SMing Lei * if this rq won't be queued to driver via .queue_rq() 140de148297SMing Lei * in blk_mq_dispatch_rq_list(). 141de148297SMing Lei */ 1426e6fcbc2SMing Lei list_add_tail(&rq->queuelist, &rq_list); 14361347154SJan Kara count++; 1446e6fcbc2SMing Lei if (rq->mq_hctx != hctx) 1456e6fcbc2SMing Lei multi_hctxs = true; 14661347154SJan Kara 14761347154SJan Kara /* 14861347154SJan Kara * If we cannot get tag for the request, stop dequeueing 14961347154SJan Kara * requests from the IO scheduler. We are unlikely to be able 15061347154SJan Kara * to submit them anyway and it creates false impression for 15161347154SJan Kara * scheduling heuristics that the device can take more IO. 15261347154SJan Kara */ 15361347154SJan Kara if (!blk_mq_get_driver_tag(rq)) 15461347154SJan Kara break; 15561347154SJan Kara } while (count < max_dispatch); 1566e6fcbc2SMing Lei 1576e6fcbc2SMing Lei if (!count) { 1586e6fcbc2SMing Lei if (run_queue) 1596e6fcbc2SMing Lei blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY); 1606e6fcbc2SMing Lei } else if (multi_hctxs) { 1616e6fcbc2SMing Lei /* 1626e6fcbc2SMing Lei * Requests from different hctx may be dequeued from some 1636e6fcbc2SMing Lei * schedulers, such as bfq and deadline. 1646e6fcbc2SMing Lei * 1656e6fcbc2SMing Lei * Sort the requests in the list according to their hctx, 1666e6fcbc2SMing Lei * dispatch batching requests from same hctx at a time. 1676e6fcbc2SMing Lei */ 1686e6fcbc2SMing Lei list_sort(NULL, &rq_list, sched_rq_cmp); 1696e6fcbc2SMing Lei do { 1706e6fcbc2SMing Lei dispatched |= blk_mq_dispatch_hctx_list(&rq_list); 1716e6fcbc2SMing Lei } while (!list_empty(&rq_list)); 1726e6fcbc2SMing Lei } else { 1736e6fcbc2SMing Lei dispatched = blk_mq_dispatch_rq_list(hctx, &rq_list, count); 1746e6fcbc2SMing Lei } 1756e6fcbc2SMing Lei 1766e6fcbc2SMing Lei if (busy) 1776e6fcbc2SMing Lei return -EAGAIN; 1786e6fcbc2SMing Lei return !!dispatched; 1796e6fcbc2SMing Lei } 1806e6fcbc2SMing Lei 1816e6fcbc2SMing Lei static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) 1826e6fcbc2SMing Lei { 1836e6fcbc2SMing Lei int ret; 1846e6fcbc2SMing Lei 1856e6fcbc2SMing Lei do { 1866e6fcbc2SMing Lei ret = __blk_mq_do_dispatch_sched(hctx); 1876e6fcbc2SMing Lei } while (ret == 1); 18828d65729SSalman Qazi 18928d65729SSalman Qazi return ret; 190de148297SMing Lei } 191de148297SMing Lei 192b347689fSMing Lei static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx, 193b347689fSMing Lei struct blk_mq_ctx *ctx) 194b347689fSMing Lei { 195f31967f0SJens Axboe unsigned short idx = ctx->index_hw[hctx->type]; 196b347689fSMing Lei 197b347689fSMing Lei if (++idx == hctx->nr_ctx) 198b347689fSMing Lei idx = 0; 199b347689fSMing Lei 200b347689fSMing Lei return hctx->ctxs[idx]; 201b347689fSMing Lei } 202b347689fSMing Lei 2031f460b63SMing Lei /* 2041f460b63SMing Lei * Only SCSI implements .get_budget and .put_budget, and SCSI restarts 2051f460b63SMing Lei * its queue by itself in its completion handler, so we don't need to 2061f460b63SMing Lei * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE. 20728d65729SSalman Qazi * 20828d65729SSalman Qazi * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to 209c4aecaa2SRandy Dunlap * be run again. This is necessary to avoid starving flushes. 2101f460b63SMing Lei */ 21128d65729SSalman Qazi static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx) 212b347689fSMing Lei { 213b347689fSMing Lei struct request_queue *q = hctx->queue; 214b347689fSMing Lei LIST_HEAD(rq_list); 215b347689fSMing Lei struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from); 21628d65729SSalman Qazi int ret = 0; 217b347689fSMing Lei struct request *rq; 218b347689fSMing Lei 219445874e8SMing Lei do { 2202a5a24aaSMing Lei int budget_token; 2212a5a24aaSMing Lei 22228d65729SSalman Qazi if (!list_empty_careful(&hctx->dispatch)) { 22328d65729SSalman Qazi ret = -EAGAIN; 22428d65729SSalman Qazi break; 22528d65729SSalman Qazi } 22628d65729SSalman Qazi 227b347689fSMing Lei if (!sbitmap_any_bit_set(&hctx->ctx_map)) 228b347689fSMing Lei break; 229b347689fSMing Lei 2302a5a24aaSMing Lei budget_token = blk_mq_get_dispatch_budget(q); 2312a5a24aaSMing Lei if (budget_token < 0) 2321f460b63SMing Lei break; 233b347689fSMing Lei 234b347689fSMing Lei rq = blk_mq_dequeue_from_ctx(hctx, ctx); 235b347689fSMing Lei if (!rq) { 2362a5a24aaSMing Lei blk_mq_put_dispatch_budget(q, budget_token); 237a0823421SDouglas Anderson /* 238a0823421SDouglas Anderson * We're releasing without dispatching. Holding the 239a0823421SDouglas Anderson * budget could have blocked any "hctx"s with the 240a0823421SDouglas Anderson * same queue and if we didn't dispatch then there's 241a0823421SDouglas Anderson * no guarantee anyone will kick the queue. Kick it 242a0823421SDouglas Anderson * ourselves. 243a0823421SDouglas Anderson */ 244a0823421SDouglas Anderson blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY); 245b347689fSMing Lei break; 246b347689fSMing Lei } 247b347689fSMing Lei 2482a5a24aaSMing Lei blk_mq_set_rq_budget_token(rq, budget_token); 2492a5a24aaSMing Lei 250b347689fSMing Lei /* 251b347689fSMing Lei * Now this rq owns the budget which has to be released 252b347689fSMing Lei * if this rq won't be queued to driver via .queue_rq() 253b347689fSMing Lei * in blk_mq_dispatch_rq_list(). 254b347689fSMing Lei */ 255b347689fSMing Lei list_add(&rq->queuelist, &rq_list); 256b347689fSMing Lei 257b347689fSMing Lei /* round robin for fair dispatch */ 258b347689fSMing Lei ctx = blk_mq_next_ctx(hctx, rq->mq_ctx); 259b347689fSMing Lei 2601fd40b5eSMing Lei } while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, 1)); 261b347689fSMing Lei 262b347689fSMing Lei WRITE_ONCE(hctx->dispatch_from, ctx); 26328d65729SSalman Qazi return ret; 264b347689fSMing Lei } 265b347689fSMing Lei 266e1b586f2SZheng Bin static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) 267bd166ef1SJens Axboe { 26881380ca1SOmar Sandoval struct request_queue *q = hctx->queue; 269e42cfb1dSDamien Le Moal const bool has_sched = q->elevator; 27028d65729SSalman Qazi int ret = 0; 271bd166ef1SJens Axboe LIST_HEAD(rq_list); 272bd166ef1SJens Axboe 273bd166ef1SJens Axboe /* 274bd166ef1SJens Axboe * If we have previous entries on our dispatch list, grab them first for 275bd166ef1SJens Axboe * more fair dispatch. 276bd166ef1SJens Axboe */ 277bd166ef1SJens Axboe if (!list_empty_careful(&hctx->dispatch)) { 278bd166ef1SJens Axboe spin_lock(&hctx->lock); 279bd166ef1SJens Axboe if (!list_empty(&hctx->dispatch)) 280bd166ef1SJens Axboe list_splice_init(&hctx->dispatch, &rq_list); 281bd166ef1SJens Axboe spin_unlock(&hctx->lock); 282bd166ef1SJens Axboe } 283bd166ef1SJens Axboe 284bd166ef1SJens Axboe /* 285bd166ef1SJens Axboe * Only ask the scheduler for requests, if we didn't have residual 286bd166ef1SJens Axboe * requests from the dispatch list. This is to avoid the case where 287bd166ef1SJens Axboe * we only ever dispatch a fraction of the requests available because 288bd166ef1SJens Axboe * of low device queue depth. Once we pull requests out of the IO 289bd166ef1SJens Axboe * scheduler, we can no longer merge or sort them. So it's best to 290bd166ef1SJens Axboe * leave them there for as long as we can. Mark the hw queue as 291bd166ef1SJens Axboe * needing a restart in that case. 292caf8eb0dSMing Lei * 2935e3d02bbSMing Lei * We want to dispatch from the scheduler if there was nothing 2945e3d02bbSMing Lei * on the dispatch list or we were able to dispatch from the 2955e3d02bbSMing Lei * dispatch list. 29664765a75SJens Axboe */ 297caf8eb0dSMing Lei if (!list_empty(&rq_list)) { 298caf8eb0dSMing Lei blk_mq_sched_mark_restart_hctx(hctx); 2991fd40b5eSMing Lei if (blk_mq_dispatch_rq_list(hctx, &rq_list, 0)) { 300e42cfb1dSDamien Le Moal if (has_sched) 30128d65729SSalman Qazi ret = blk_mq_do_dispatch_sched(hctx); 302b347689fSMing Lei else 30328d65729SSalman Qazi ret = blk_mq_do_dispatch_ctx(hctx); 304b347689fSMing Lei } 305e42cfb1dSDamien Le Moal } else if (has_sched) { 30628d65729SSalman Qazi ret = blk_mq_do_dispatch_sched(hctx); 3076e768717SMing Lei } else if (hctx->dispatch_busy) { 3086e768717SMing Lei /* dequeue request one by one from sw queue if queue is busy */ 30928d65729SSalman Qazi ret = blk_mq_do_dispatch_ctx(hctx); 310caf8eb0dSMing Lei } else { 311caf8eb0dSMing Lei blk_mq_flush_busy_ctxs(hctx, &rq_list); 3121fd40b5eSMing Lei blk_mq_dispatch_rq_list(hctx, &rq_list, 0); 313c13660a0SJens Axboe } 31428d65729SSalman Qazi 31528d65729SSalman Qazi return ret; 31628d65729SSalman Qazi } 31728d65729SSalman Qazi 31828d65729SSalman Qazi void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) 31928d65729SSalman Qazi { 32028d65729SSalman Qazi struct request_queue *q = hctx->queue; 32128d65729SSalman Qazi 32228d65729SSalman Qazi /* RCU or SRCU read lock is needed before checking quiesced flag */ 32328d65729SSalman Qazi if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) 32428d65729SSalman Qazi return; 32528d65729SSalman Qazi 32628d65729SSalman Qazi hctx->run++; 32728d65729SSalman Qazi 32828d65729SSalman Qazi /* 32928d65729SSalman Qazi * A return of -EAGAIN is an indication that hctx->dispatch is not 33028d65729SSalman Qazi * empty and we must run again in order to avoid starving flushes. 33128d65729SSalman Qazi */ 33228d65729SSalman Qazi if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) { 33328d65729SSalman Qazi if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) 33428d65729SSalman Qazi blk_mq_run_hw_queue(hctx, true); 33528d65729SSalman Qazi } 336bd166ef1SJens Axboe } 337bd166ef1SJens Axboe 338179ae84fSPavel Begunkov bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, 33914ccb66bSChristoph Hellwig unsigned int nr_segs) 340bd166ef1SJens Axboe { 341bd166ef1SJens Axboe struct elevator_queue *e = q->elevator; 342efed9a33SOmar Sandoval struct blk_mq_ctx *ctx; 343efed9a33SOmar Sandoval struct blk_mq_hw_ctx *hctx; 3449bddeb2aSMing Lei bool ret = false; 345c16d6b5aSMing Lei enum hctx_type type; 346bd166ef1SJens Axboe 347900e0807SJens Axboe if (e && e->type->ops.bio_merge) { 348900e0807SJens Axboe ret = e->type->ops.bio_merge(q, bio, nr_segs); 349900e0807SJens Axboe goto out_put; 350900e0807SJens Axboe } 351bd166ef1SJens Axboe 352efed9a33SOmar Sandoval ctx = blk_mq_get_ctx(q); 353efed9a33SOmar Sandoval hctx = blk_mq_map_queue(q, bio->bi_opf, ctx); 354c16d6b5aSMing Lei type = hctx->type; 355cdfcef9eSBaolin Wang if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) || 356cdfcef9eSBaolin Wang list_empty_careful(&ctx->rq_lists[type])) 357900e0807SJens Axboe goto out_put; 358cdfcef9eSBaolin Wang 3599bddeb2aSMing Lei /* default per sw-queue merge */ 3609bddeb2aSMing Lei spin_lock(&ctx->lock); 361cdfcef9eSBaolin Wang /* 362cdfcef9eSBaolin Wang * Reverse check our software queue for entries that we could 363cdfcef9eSBaolin Wang * potentially merge with. Currently includes a hand-wavy stop 364cdfcef9eSBaolin Wang * count of 8, to not spend too much time checking for merges. 365cdfcef9eSBaolin Wang */ 3669a14d6ceSJens Axboe if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) 367cdfcef9eSBaolin Wang ret = true; 3689bddeb2aSMing Lei 369cdfcef9eSBaolin Wang spin_unlock(&ctx->lock); 370900e0807SJens Axboe out_put: 3719bddeb2aSMing Lei return ret; 372bd166ef1SJens Axboe } 373bd166ef1SJens Axboe 374fd2ef39cSJan Kara bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq, 375fd2ef39cSJan Kara struct list_head *free) 376bd166ef1SJens Axboe { 377fd2ef39cSJan Kara return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq, free); 378bd166ef1SJens Axboe } 379bd166ef1SJens Axboe EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge); 380bd166ef1SJens Axboe 3810cacba6cSOmar Sandoval static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, 3820cacba6cSOmar Sandoval struct request *rq) 383bd166ef1SJens Axboe { 38401e99aecSMing Lei /* 38501e99aecSMing Lei * dispatch flush and passthrough rq directly 38601e99aecSMing Lei * 38701e99aecSMing Lei * passthrough request has to be added to hctx->dispatch directly. 38801e99aecSMing Lei * For some reason, device may be in one situation which can't 38901e99aecSMing Lei * handle FS request, so STS_RESOURCE is always returned and the 39001e99aecSMing Lei * FS request will be added to hctx->dispatch. However passthrough 39101e99aecSMing Lei * request may be required at that time for fixing the problem. If 39201e99aecSMing Lei * passthrough request is added to scheduler queue, there isn't any 39301e99aecSMing Lei * chance to dispatch it given we prioritize requests in hctx->dispatch. 39401e99aecSMing Lei */ 39501e99aecSMing Lei if ((rq->rq_flags & RQF_FLUSH_SEQ) || blk_rq_is_passthrough(rq)) 396bd166ef1SJens Axboe return true; 397bd166ef1SJens Axboe 398a6a252e6SMing Lei return false; 399a6a252e6SMing Lei } 400a6a252e6SMing Lei 401bd6737f1SJens Axboe void blk_mq_sched_insert_request(struct request *rq, bool at_head, 4029e97d295SMike Snitzer bool run_queue, bool async) 403bd6737f1SJens Axboe { 404bd6737f1SJens Axboe struct request_queue *q = rq->q; 405bd6737f1SJens Axboe struct elevator_queue *e = q->elevator; 406bd6737f1SJens Axboe struct blk_mq_ctx *ctx = rq->mq_ctx; 407ea4f995eSJens Axboe struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 408bd6737f1SJens Axboe 409e44a6a23SXianting Tian WARN_ON(e && (rq->tag != BLK_MQ_NO_TAG)); 410923218f6SMing Lei 4115218e12eSJean Delvare if (blk_mq_sched_bypass_insert(hctx, rq)) { 412cc3200eaSMing Lei /* 413cc3200eaSMing Lei * Firstly normal IO request is inserted to scheduler queue or 414cc3200eaSMing Lei * sw queue, meantime we add flush request to dispatch queue( 415cc3200eaSMing Lei * hctx->dispatch) directly and there is at most one in-flight 416cc3200eaSMing Lei * flush request for each hw queue, so it doesn't matter to add 417cc3200eaSMing Lei * flush request to tail or front of the dispatch queue. 418cc3200eaSMing Lei * 419cc3200eaSMing Lei * Secondly in case of NCQ, flush request belongs to non-NCQ 420cc3200eaSMing Lei * command, and queueing it will fail when there is any 421cc3200eaSMing Lei * in-flight normal IO request(NCQ command). When adding flush 422cc3200eaSMing Lei * rq to the front of hctx->dispatch, it is easier to introduce 423cc3200eaSMing Lei * extra time to flush rq's latency because of S_SCHED_RESTART 424cc3200eaSMing Lei * compared with adding to the tail of dispatch queue, then 425cc3200eaSMing Lei * chance of flush merge is increased, and less flush requests 426cc3200eaSMing Lei * will be issued to controller. It is observed that ~10% time 427cc3200eaSMing Lei * is saved in blktests block/004 on disk attached to AHCI/NCQ 428cc3200eaSMing Lei * drive when adding flush rq to the front of hctx->dispatch. 429cc3200eaSMing Lei * 430cc3200eaSMing Lei * Simply queue flush rq to the front of hctx->dispatch so that 431cc3200eaSMing Lei * intensive flush workloads can benefit in case of NCQ HW. 432cc3200eaSMing Lei */ 433cc3200eaSMing Lei at_head = (rq->rq_flags & RQF_FLUSH_SEQ) ? true : at_head; 43401e99aecSMing Lei blk_mq_request_bypass_insert(rq, at_head, false); 4350cacba6cSOmar Sandoval goto run; 43601e99aecSMing Lei } 4370cacba6cSOmar Sandoval 438e42cfb1dSDamien Le Moal if (e) { 439bd6737f1SJens Axboe LIST_HEAD(list); 440bd6737f1SJens Axboe 441bd6737f1SJens Axboe list_add(&rq->queuelist, &list); 442f9cd4bfeSJens Axboe e->type->ops.insert_requests(hctx, &list, at_head); 443bd6737f1SJens Axboe } else { 444bd6737f1SJens Axboe spin_lock(&ctx->lock); 445bd6737f1SJens Axboe __blk_mq_insert_request(hctx, rq, at_head); 446bd6737f1SJens Axboe spin_unlock(&ctx->lock); 447bd6737f1SJens Axboe } 448bd6737f1SJens Axboe 4490cacba6cSOmar Sandoval run: 450bd6737f1SJens Axboe if (run_queue) 451bd6737f1SJens Axboe blk_mq_run_hw_queue(hctx, async); 452bd6737f1SJens Axboe } 453bd6737f1SJens Axboe 45467cae4c9SJens Axboe void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx, 455bd6737f1SJens Axboe struct blk_mq_ctx *ctx, 456bd6737f1SJens Axboe struct list_head *list, bool run_queue_async) 457bd6737f1SJens Axboe { 458f9afca4dSJens Axboe struct elevator_queue *e; 459e87eb301SMing Lei struct request_queue *q = hctx->queue; 460e87eb301SMing Lei 461e87eb301SMing Lei /* 462e87eb301SMing Lei * blk_mq_sched_insert_requests() is called from flush plug 463e87eb301SMing Lei * context only, and hold one usage counter to prevent queue 464e87eb301SMing Lei * from being released. 465e87eb301SMing Lei */ 466e87eb301SMing Lei percpu_ref_get(&q->q_usage_counter); 467f9afca4dSJens Axboe 468f9afca4dSJens Axboe e = hctx->queue->elevator; 469e42cfb1dSDamien Le Moal if (e) { 470f9cd4bfeSJens Axboe e->type->ops.insert_requests(hctx, list, false); 471e42cfb1dSDamien Le Moal } else { 4726ce3dd6eSMing Lei /* 4736ce3dd6eSMing Lei * try to issue requests directly if the hw queue isn't 4746ce3dd6eSMing Lei * busy in case of 'none' scheduler, and this way may save 4756ce3dd6eSMing Lei * us one extra enqueue & dequeue to sw queue. 4766ce3dd6eSMing Lei */ 477ef1661baSJean Sacren if (!hctx->dispatch_busy && !run_queue_async) { 4784cafe86cSMing Lei blk_mq_run_dispatch_ops(hctx->queue, 4794cafe86cSMing Lei blk_mq_try_issue_list_directly(hctx, list)); 480fd9c40f6SBart Van Assche if (list_empty(list)) 481e87eb301SMing Lei goto out; 482fd9c40f6SBart Van Assche } 483bd6737f1SJens Axboe blk_mq_insert_requests(hctx, ctx, list); 4846ce3dd6eSMing Lei } 485bd6737f1SJens Axboe 486bd6737f1SJens Axboe blk_mq_run_hw_queue(hctx, run_queue_async); 487e87eb301SMing Lei out: 488e87eb301SMing Lei percpu_ref_put(&q->q_usage_counter); 489bd6737f1SJens Axboe } 490bd6737f1SJens Axboe 491d99a6bb3SJohn Garry static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q, 4926917ff0bSOmar Sandoval struct blk_mq_hw_ctx *hctx, 4936917ff0bSOmar Sandoval unsigned int hctx_idx) 494bd166ef1SJens Axboe { 495079a2e3eSJohn Garry if (blk_mq_is_shared_tags(q->tag_set->flags)) { 496079a2e3eSJohn Garry hctx->sched_tags = q->sched_shared_tags; 497e155b0c2SJohn Garry return 0; 498e155b0c2SJohn Garry } 499e155b0c2SJohn Garry 50063064be1SJohn Garry hctx->sched_tags = blk_mq_alloc_map_and_rqs(q->tag_set, hctx_idx, 50163064be1SJohn Garry q->nr_requests); 502bd166ef1SJens Axboe 503bd166ef1SJens Axboe if (!hctx->sched_tags) 5046917ff0bSOmar Sandoval return -ENOMEM; 50563064be1SJohn Garry return 0; 506bd166ef1SJens Axboe } 507bd166ef1SJens Axboe 508079a2e3eSJohn Garry static void blk_mq_exit_sched_shared_tags(struct request_queue *queue) 509e155b0c2SJohn Garry { 510079a2e3eSJohn Garry blk_mq_free_rq_map(queue->sched_shared_tags); 511079a2e3eSJohn Garry queue->sched_shared_tags = NULL; 512e155b0c2SJohn Garry } 513e155b0c2SJohn Garry 514c3e22192SMing Lei /* called in queue's release handler, tagset has gone away */ 515e155b0c2SJohn Garry static void blk_mq_sched_tags_teardown(struct request_queue *q, unsigned int flags) 516bd166ef1SJens Axboe { 517bd166ef1SJens Axboe struct blk_mq_hw_ctx *hctx; 518*4f481208SMing Lei unsigned long i; 519bd166ef1SJens Axboe 520c3e22192SMing Lei queue_for_each_hw_ctx(q, hctx, i) { 521c3e22192SMing Lei if (hctx->sched_tags) { 5228bdf7b3fSJohn Garry if (!blk_mq_is_shared_tags(flags)) 523e155b0c2SJohn Garry blk_mq_free_rq_map(hctx->sched_tags); 524c3e22192SMing Lei hctx->sched_tags = NULL; 525c3e22192SMing Lei } 526c3e22192SMing Lei } 527e155b0c2SJohn Garry 528079a2e3eSJohn Garry if (blk_mq_is_shared_tags(flags)) 529079a2e3eSJohn Garry blk_mq_exit_sched_shared_tags(q); 530bd166ef1SJens Axboe } 531d3484991SJens Axboe 532079a2e3eSJohn Garry static int blk_mq_init_sched_shared_tags(struct request_queue *queue) 533d97e594cSJohn Garry { 534d97e594cSJohn Garry struct blk_mq_tag_set *set = queue->tag_set; 535d97e594cSJohn Garry 536d97e594cSJohn Garry /* 537d97e594cSJohn Garry * Set initial depth at max so that we don't need to reallocate for 538d97e594cSJohn Garry * updating nr_requests. 539d97e594cSJohn Garry */ 540079a2e3eSJohn Garry queue->sched_shared_tags = blk_mq_alloc_map_and_rqs(set, 541e155b0c2SJohn Garry BLK_MQ_NO_HCTX_IDX, 542e155b0c2SJohn Garry MAX_SCHED_RQ); 543079a2e3eSJohn Garry if (!queue->sched_shared_tags) 544e155b0c2SJohn Garry return -ENOMEM; 545d97e594cSJohn Garry 546079a2e3eSJohn Garry blk_mq_tag_update_sched_shared_tags(queue); 547d97e594cSJohn Garry 548d97e594cSJohn Garry return 0; 549d97e594cSJohn Garry } 550d97e594cSJohn Garry 5516917ff0bSOmar Sandoval int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) 5526917ff0bSOmar Sandoval { 553*4f481208SMing Lei unsigned int flags = q->tag_set->flags; 5546917ff0bSOmar Sandoval struct blk_mq_hw_ctx *hctx; 555ee056f98SOmar Sandoval struct elevator_queue *eq; 556*4f481208SMing Lei unsigned long i; 5576917ff0bSOmar Sandoval int ret; 5586917ff0bSOmar Sandoval 5596917ff0bSOmar Sandoval if (!e) { 5606917ff0bSOmar Sandoval q->elevator = NULL; 56132a50fabSMing Lei q->nr_requests = q->tag_set->queue_depth; 5626917ff0bSOmar Sandoval return 0; 5636917ff0bSOmar Sandoval } 5646917ff0bSOmar Sandoval 5656917ff0bSOmar Sandoval /* 56632825c45SMing Lei * Default to double of smaller one between hw queue_depth and 128, 56732825c45SMing Lei * since we don't split into sync/async like the old code did. 56832825c45SMing Lei * Additionally, this is a per-hw queue depth. 5696917ff0bSOmar Sandoval */ 57032825c45SMing Lei q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth, 571d2a27964SJohn Garry BLKDEV_DEFAULT_RQ); 5726917ff0bSOmar Sandoval 573079a2e3eSJohn Garry if (blk_mq_is_shared_tags(flags)) { 574079a2e3eSJohn Garry ret = blk_mq_init_sched_shared_tags(q); 575e155b0c2SJohn Garry if (ret) 576e155b0c2SJohn Garry return ret; 577e155b0c2SJohn Garry } 578e155b0c2SJohn Garry 5796917ff0bSOmar Sandoval queue_for_each_hw_ctx(q, hctx, i) { 580d99a6bb3SJohn Garry ret = blk_mq_sched_alloc_map_and_rqs(q, hctx, i); 5816917ff0bSOmar Sandoval if (ret) 582d99a6bb3SJohn Garry goto err_free_map_and_rqs; 583d97e594cSJohn Garry } 584d97e594cSJohn Garry 585f9cd4bfeSJens Axboe ret = e->ops.init_sched(q, e); 5866917ff0bSOmar Sandoval if (ret) 587e155b0c2SJohn Garry goto err_free_map_and_rqs; 5886917ff0bSOmar Sandoval 589d332ce09SOmar Sandoval blk_mq_debugfs_register_sched(q); 590d332ce09SOmar Sandoval 591ee056f98SOmar Sandoval queue_for_each_hw_ctx(q, hctx, i) { 592f9cd4bfeSJens Axboe if (e->ops.init_hctx) { 593f9cd4bfeSJens Axboe ret = e->ops.init_hctx(hctx, i); 594ee056f98SOmar Sandoval if (ret) { 595ee056f98SOmar Sandoval eq = q->elevator; 5961820f4f0SJohn Garry blk_mq_sched_free_rqs(q); 597ee056f98SOmar Sandoval blk_mq_exit_sched(q, eq); 598ee056f98SOmar Sandoval kobject_put(&eq->kobj); 599ee056f98SOmar Sandoval return ret; 600ee056f98SOmar Sandoval } 601ee056f98SOmar Sandoval } 602d332ce09SOmar Sandoval blk_mq_debugfs_register_sched_hctx(q, hctx); 603ee056f98SOmar Sandoval } 604ee056f98SOmar Sandoval 6056917ff0bSOmar Sandoval return 0; 6066917ff0bSOmar Sandoval 607d99a6bb3SJohn Garry err_free_map_and_rqs: 6081820f4f0SJohn Garry blk_mq_sched_free_rqs(q); 609e155b0c2SJohn Garry blk_mq_sched_tags_teardown(q, flags); 610e155b0c2SJohn Garry 61154d5329dSOmar Sandoval q->elevator = NULL; 6126917ff0bSOmar Sandoval return ret; 6136917ff0bSOmar Sandoval } 6146917ff0bSOmar Sandoval 615c3e22192SMing Lei /* 616c3e22192SMing Lei * called in either blk_queue_cleanup or elevator_switch, tagset 617c3e22192SMing Lei * is required for freeing requests 618c3e22192SMing Lei */ 6191820f4f0SJohn Garry void blk_mq_sched_free_rqs(struct request_queue *q) 620c3e22192SMing Lei { 621c3e22192SMing Lei struct blk_mq_hw_ctx *hctx; 622*4f481208SMing Lei unsigned long i; 623c3e22192SMing Lei 624079a2e3eSJohn Garry if (blk_mq_is_shared_tags(q->tag_set->flags)) { 625079a2e3eSJohn Garry blk_mq_free_rqs(q->tag_set, q->sched_shared_tags, 626e155b0c2SJohn Garry BLK_MQ_NO_HCTX_IDX); 627e155b0c2SJohn Garry } else { 628c3e22192SMing Lei queue_for_each_hw_ctx(q, hctx, i) { 629c3e22192SMing Lei if (hctx->sched_tags) 630e155b0c2SJohn Garry blk_mq_free_rqs(q->tag_set, 631e155b0c2SJohn Garry hctx->sched_tags, i); 632e155b0c2SJohn Garry } 633c3e22192SMing Lei } 634c3e22192SMing Lei } 635c3e22192SMing Lei 63654d5329dSOmar Sandoval void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e) 63754d5329dSOmar Sandoval { 638ee056f98SOmar Sandoval struct blk_mq_hw_ctx *hctx; 639*4f481208SMing Lei unsigned long i; 640f0c1c4d2SMing Lei unsigned int flags = 0; 641ee056f98SOmar Sandoval 642ee056f98SOmar Sandoval queue_for_each_hw_ctx(q, hctx, i) { 643d332ce09SOmar Sandoval blk_mq_debugfs_unregister_sched_hctx(hctx); 644f9cd4bfeSJens Axboe if (e->type->ops.exit_hctx && hctx->sched_data) { 645f9cd4bfeSJens Axboe e->type->ops.exit_hctx(hctx, i); 646ee056f98SOmar Sandoval hctx->sched_data = NULL; 647ee056f98SOmar Sandoval } 648f0c1c4d2SMing Lei flags = hctx->flags; 649ee056f98SOmar Sandoval } 650d332ce09SOmar Sandoval blk_mq_debugfs_unregister_sched(q); 651f9cd4bfeSJens Axboe if (e->type->ops.exit_sched) 652f9cd4bfeSJens Axboe e->type->ops.exit_sched(e); 653e155b0c2SJohn Garry blk_mq_sched_tags_teardown(q, flags); 65454d5329dSOmar Sandoval q->elevator = NULL; 65554d5329dSOmar Sandoval } 656