13dcf60bcSChristoph Hellwig // SPDX-License-Identifier: GPL-2.0 2bd166ef1SJens Axboe /* 3bd166ef1SJens Axboe * blk-mq scheduling framework 4bd166ef1SJens Axboe * 5bd166ef1SJens Axboe * Copyright (C) 2016 Jens Axboe 6bd166ef1SJens Axboe */ 7bd166ef1SJens Axboe #include <linux/kernel.h> 8bd166ef1SJens Axboe #include <linux/module.h> 9bd166ef1SJens Axboe #include <linux/blk-mq.h> 106e6fcbc2SMing Lei #include <linux/list_sort.h> 11bd166ef1SJens Axboe 12bd166ef1SJens Axboe #include <trace/events/block.h> 13bd166ef1SJens Axboe 14bd166ef1SJens Axboe #include "blk.h" 15bd166ef1SJens Axboe #include "blk-mq.h" 16d332ce09SOmar Sandoval #include "blk-mq-debugfs.h" 17bd166ef1SJens Axboe #include "blk-mq-sched.h" 18bd166ef1SJens Axboe #include "blk-mq-tag.h" 19bd166ef1SJens Axboe #include "blk-wbt.h" 20bd166ef1SJens Axboe 21e2b3fa5aSDamien Le Moal void blk_mq_sched_assign_ioc(struct request *rq) 22bd166ef1SJens Axboe { 2344e8c2bfSChristoph Hellwig struct request_queue *q = rq->q; 240c62bff1SJens Axboe struct io_context *ioc; 25bd166ef1SJens Axboe struct io_cq *icq; 26bd166ef1SJens Axboe 270c62bff1SJens Axboe /* 280c62bff1SJens Axboe * May not have an IO context if it's a passthrough request 290c62bff1SJens Axboe */ 300c62bff1SJens Axboe ioc = current->io_context; 310c62bff1SJens Axboe if (!ioc) 320c62bff1SJens Axboe return; 330c62bff1SJens Axboe 340d945c1fSChristoph Hellwig spin_lock_irq(&q->queue_lock); 35bd166ef1SJens Axboe icq = ioc_lookup_icq(ioc, q); 360d945c1fSChristoph Hellwig spin_unlock_irq(&q->queue_lock); 37bd166ef1SJens Axboe 38bd166ef1SJens Axboe if (!icq) { 39bd166ef1SJens Axboe icq = ioc_create_icq(ioc, q, GFP_ATOMIC); 40bd166ef1SJens Axboe if (!icq) 41bd166ef1SJens Axboe return; 42bd166ef1SJens Axboe } 43ea511e3cSChristoph Hellwig get_io_context(icq->ioc); 4444e8c2bfSChristoph Hellwig rq->elv.icq = icq; 45bd166ef1SJens Axboe } 46bd166ef1SJens Axboe 478e8320c9SJens Axboe /* 488e8320c9SJens Axboe * Mark a hardware queue as needing a restart. For shared queues, maintain 498e8320c9SJens Axboe * a count of how many hardware queues are marked for restart. 508e8320c9SJens Axboe */ 517211aef8SDamien Le Moal void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) 528e8320c9SJens Axboe { 538e8320c9SJens Axboe if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) 548e8320c9SJens Axboe return; 558e8320c9SJens Axboe 568e8320c9SJens Axboe set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); 578e8320c9SJens Axboe } 587211aef8SDamien Le Moal EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx); 598e8320c9SJens Axboe 6097889f9aSMing Lei void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) 618e8320c9SJens Axboe { 628e8320c9SJens Axboe if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) 6397889f9aSMing Lei return; 648e8320c9SJens Axboe clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); 658e8320c9SJens Axboe 66d7d8535fSMing Lei /* 67d7d8535fSMing Lei * Order clearing SCHED_RESTART and list_empty_careful(&hctx->dispatch) 68d7d8535fSMing Lei * in blk_mq_run_hw_queue(). Its pair is the barrier in 69d7d8535fSMing Lei * blk_mq_dispatch_rq_list(). So dispatch code won't see SCHED_RESTART, 70d7d8535fSMing Lei * meantime new request added to hctx->dispatch is missed to check in 71d7d8535fSMing Lei * blk_mq_run_hw_queue(). 72d7d8535fSMing Lei */ 73d7d8535fSMing Lei smp_mb(); 74d7d8535fSMing Lei 7597889f9aSMing Lei blk_mq_run_hw_queue(hctx, true); 768e8320c9SJens Axboe } 778e8320c9SJens Axboe 784f0f586bSSami Tolvanen static int sched_rq_cmp(void *priv, const struct list_head *a, 794f0f586bSSami Tolvanen const struct list_head *b) 806e6fcbc2SMing Lei { 816e6fcbc2SMing Lei struct request *rqa = container_of(a, struct request, queuelist); 826e6fcbc2SMing Lei struct request *rqb = container_of(b, struct request, queuelist); 836e6fcbc2SMing Lei 846e6fcbc2SMing Lei return rqa->mq_hctx > rqb->mq_hctx; 856e6fcbc2SMing Lei } 866e6fcbc2SMing Lei 876e6fcbc2SMing Lei static bool blk_mq_dispatch_hctx_list(struct list_head *rq_list) 886e6fcbc2SMing Lei { 896e6fcbc2SMing Lei struct blk_mq_hw_ctx *hctx = 906e6fcbc2SMing Lei list_first_entry(rq_list, struct request, queuelist)->mq_hctx; 916e6fcbc2SMing Lei struct request *rq; 926e6fcbc2SMing Lei LIST_HEAD(hctx_list); 936e6fcbc2SMing Lei unsigned int count = 0; 946e6fcbc2SMing Lei 956e6fcbc2SMing Lei list_for_each_entry(rq, rq_list, queuelist) { 966e6fcbc2SMing Lei if (rq->mq_hctx != hctx) { 976e6fcbc2SMing Lei list_cut_before(&hctx_list, rq_list, &rq->queuelist); 986e6fcbc2SMing Lei goto dispatch; 996e6fcbc2SMing Lei } 1006e6fcbc2SMing Lei count++; 1016e6fcbc2SMing Lei } 1026e6fcbc2SMing Lei list_splice_tail_init(rq_list, &hctx_list); 1036e6fcbc2SMing Lei 1046e6fcbc2SMing Lei dispatch: 105106e71c5SBaolin Wang return blk_mq_dispatch_rq_list(hctx, &hctx_list, count); 1066e6fcbc2SMing Lei } 1076e6fcbc2SMing Lei 108a0823421SDouglas Anderson #define BLK_MQ_BUDGET_DELAY 3 /* ms units */ 109a0823421SDouglas Anderson 1101f460b63SMing Lei /* 1111f460b63SMing Lei * Only SCSI implements .get_budget and .put_budget, and SCSI restarts 1121f460b63SMing Lei * its queue by itself in its completion handler, so we don't need to 1131f460b63SMing Lei * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE. 11428d65729SSalman Qazi * 11528d65729SSalman Qazi * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to 11628d65729SSalman Qazi * be run again. This is necessary to avoid starving flushes. 1171f460b63SMing Lei */ 1186e6fcbc2SMing Lei static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) 119caf8eb0dSMing Lei { 120caf8eb0dSMing Lei struct request_queue *q = hctx->queue; 121caf8eb0dSMing Lei struct elevator_queue *e = q->elevator; 1226e6fcbc2SMing Lei bool multi_hctxs = false, run_queue = false; 1236e6fcbc2SMing Lei bool dispatched = false, busy = false; 1246e6fcbc2SMing Lei unsigned int max_dispatch; 125caf8eb0dSMing Lei LIST_HEAD(rq_list); 1266e6fcbc2SMing Lei int count = 0; 1276e6fcbc2SMing Lei 1286e6fcbc2SMing Lei if (hctx->dispatch_busy) 1296e6fcbc2SMing Lei max_dispatch = 1; 1306e6fcbc2SMing Lei else 1316e6fcbc2SMing Lei max_dispatch = hctx->queue->nr_requests; 132caf8eb0dSMing Lei 133445874e8SMing Lei do { 1346e6fcbc2SMing Lei struct request *rq; 1352a5a24aaSMing Lei int budget_token; 1366e6fcbc2SMing Lei 137f9cd4bfeSJens Axboe if (e->type->ops.has_work && !e->type->ops.has_work(hctx)) 138caf8eb0dSMing Lei break; 139de148297SMing Lei 14028d65729SSalman Qazi if (!list_empty_careful(&hctx->dispatch)) { 1416e6fcbc2SMing Lei busy = true; 14228d65729SSalman Qazi break; 14328d65729SSalman Qazi } 14428d65729SSalman Qazi 1452a5a24aaSMing Lei budget_token = blk_mq_get_dispatch_budget(q); 1462a5a24aaSMing Lei if (budget_token < 0) 1471f460b63SMing Lei break; 148de148297SMing Lei 149f9cd4bfeSJens Axboe rq = e->type->ops.dispatch_request(hctx); 150de148297SMing Lei if (!rq) { 1512a5a24aaSMing Lei blk_mq_put_dispatch_budget(q, budget_token); 152a0823421SDouglas Anderson /* 153a0823421SDouglas Anderson * We're releasing without dispatching. Holding the 154a0823421SDouglas Anderson * budget could have blocked any "hctx"s with the 155a0823421SDouglas Anderson * same queue and if we didn't dispatch then there's 156a0823421SDouglas Anderson * no guarantee anyone will kick the queue. Kick it 157a0823421SDouglas Anderson * ourselves. 158a0823421SDouglas Anderson */ 1596e6fcbc2SMing Lei run_queue = true; 160de148297SMing Lei break; 161caf8eb0dSMing Lei } 162caf8eb0dSMing Lei 1632a5a24aaSMing Lei blk_mq_set_rq_budget_token(rq, budget_token); 1642a5a24aaSMing Lei 165de148297SMing Lei /* 166de148297SMing Lei * Now this rq owns the budget which has to be released 167de148297SMing Lei * if this rq won't be queued to driver via .queue_rq() 168de148297SMing Lei * in blk_mq_dispatch_rq_list(). 169de148297SMing Lei */ 1706e6fcbc2SMing Lei list_add_tail(&rq->queuelist, &rq_list); 17161347154SJan Kara count++; 1726e6fcbc2SMing Lei if (rq->mq_hctx != hctx) 1736e6fcbc2SMing Lei multi_hctxs = true; 17461347154SJan Kara 17561347154SJan Kara /* 17661347154SJan Kara * If we cannot get tag for the request, stop dequeueing 17761347154SJan Kara * requests from the IO scheduler. We are unlikely to be able 17861347154SJan Kara * to submit them anyway and it creates false impression for 17961347154SJan Kara * scheduling heuristics that the device can take more IO. 18061347154SJan Kara */ 18161347154SJan Kara if (!blk_mq_get_driver_tag(rq)) 18261347154SJan Kara break; 18361347154SJan Kara } while (count < max_dispatch); 1846e6fcbc2SMing Lei 1856e6fcbc2SMing Lei if (!count) { 1866e6fcbc2SMing Lei if (run_queue) 1876e6fcbc2SMing Lei blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY); 1886e6fcbc2SMing Lei } else if (multi_hctxs) { 1896e6fcbc2SMing Lei /* 1906e6fcbc2SMing Lei * Requests from different hctx may be dequeued from some 1916e6fcbc2SMing Lei * schedulers, such as bfq and deadline. 1926e6fcbc2SMing Lei * 1936e6fcbc2SMing Lei * Sort the requests in the list according to their hctx, 1946e6fcbc2SMing Lei * dispatch batching requests from same hctx at a time. 1956e6fcbc2SMing Lei */ 1966e6fcbc2SMing Lei list_sort(NULL, &rq_list, sched_rq_cmp); 1976e6fcbc2SMing Lei do { 1986e6fcbc2SMing Lei dispatched |= blk_mq_dispatch_hctx_list(&rq_list); 1996e6fcbc2SMing Lei } while (!list_empty(&rq_list)); 2006e6fcbc2SMing Lei } else { 2016e6fcbc2SMing Lei dispatched = blk_mq_dispatch_rq_list(hctx, &rq_list, count); 2026e6fcbc2SMing Lei } 2036e6fcbc2SMing Lei 2046e6fcbc2SMing Lei if (busy) 2056e6fcbc2SMing Lei return -EAGAIN; 2066e6fcbc2SMing Lei return !!dispatched; 2076e6fcbc2SMing Lei } 2086e6fcbc2SMing Lei 2096e6fcbc2SMing Lei static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) 2106e6fcbc2SMing Lei { 2116e6fcbc2SMing Lei int ret; 2126e6fcbc2SMing Lei 2136e6fcbc2SMing Lei do { 2146e6fcbc2SMing Lei ret = __blk_mq_do_dispatch_sched(hctx); 2156e6fcbc2SMing Lei } while (ret == 1); 21628d65729SSalman Qazi 21728d65729SSalman Qazi return ret; 218de148297SMing Lei } 219de148297SMing Lei 220b347689fSMing Lei static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx, 221b347689fSMing Lei struct blk_mq_ctx *ctx) 222b347689fSMing Lei { 223f31967f0SJens Axboe unsigned short idx = ctx->index_hw[hctx->type]; 224b347689fSMing Lei 225b347689fSMing Lei if (++idx == hctx->nr_ctx) 226b347689fSMing Lei idx = 0; 227b347689fSMing Lei 228b347689fSMing Lei return hctx->ctxs[idx]; 229b347689fSMing Lei } 230b347689fSMing Lei 2311f460b63SMing Lei /* 2321f460b63SMing Lei * Only SCSI implements .get_budget and .put_budget, and SCSI restarts 2331f460b63SMing Lei * its queue by itself in its completion handler, so we don't need to 2341f460b63SMing Lei * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE. 23528d65729SSalman Qazi * 23628d65729SSalman Qazi * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to 237c4aecaa2SRandy Dunlap * be run again. This is necessary to avoid starving flushes. 2381f460b63SMing Lei */ 23928d65729SSalman Qazi static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx) 240b347689fSMing Lei { 241b347689fSMing Lei struct request_queue *q = hctx->queue; 242b347689fSMing Lei LIST_HEAD(rq_list); 243b347689fSMing Lei struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from); 24428d65729SSalman Qazi int ret = 0; 245b347689fSMing Lei struct request *rq; 246b347689fSMing Lei 247445874e8SMing Lei do { 2482a5a24aaSMing Lei int budget_token; 2492a5a24aaSMing Lei 25028d65729SSalman Qazi if (!list_empty_careful(&hctx->dispatch)) { 25128d65729SSalman Qazi ret = -EAGAIN; 25228d65729SSalman Qazi break; 25328d65729SSalman Qazi } 25428d65729SSalman Qazi 255b347689fSMing Lei if (!sbitmap_any_bit_set(&hctx->ctx_map)) 256b347689fSMing Lei break; 257b347689fSMing Lei 2582a5a24aaSMing Lei budget_token = blk_mq_get_dispatch_budget(q); 2592a5a24aaSMing Lei if (budget_token < 0) 2601f460b63SMing Lei break; 261b347689fSMing Lei 262b347689fSMing Lei rq = blk_mq_dequeue_from_ctx(hctx, ctx); 263b347689fSMing Lei if (!rq) { 2642a5a24aaSMing Lei blk_mq_put_dispatch_budget(q, budget_token); 265a0823421SDouglas Anderson /* 266a0823421SDouglas Anderson * We're releasing without dispatching. Holding the 267a0823421SDouglas Anderson * budget could have blocked any "hctx"s with the 268a0823421SDouglas Anderson * same queue and if we didn't dispatch then there's 269a0823421SDouglas Anderson * no guarantee anyone will kick the queue. Kick it 270a0823421SDouglas Anderson * ourselves. 271a0823421SDouglas Anderson */ 272a0823421SDouglas Anderson blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY); 273b347689fSMing Lei break; 274b347689fSMing Lei } 275b347689fSMing Lei 2762a5a24aaSMing Lei blk_mq_set_rq_budget_token(rq, budget_token); 2772a5a24aaSMing Lei 278b347689fSMing Lei /* 279b347689fSMing Lei * Now this rq owns the budget which has to be released 280b347689fSMing Lei * if this rq won't be queued to driver via .queue_rq() 281b347689fSMing Lei * in blk_mq_dispatch_rq_list(). 282b347689fSMing Lei */ 283b347689fSMing Lei list_add(&rq->queuelist, &rq_list); 284b347689fSMing Lei 285b347689fSMing Lei /* round robin for fair dispatch */ 286b347689fSMing Lei ctx = blk_mq_next_ctx(hctx, rq->mq_ctx); 287b347689fSMing Lei 2881fd40b5eSMing Lei } while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, 1)); 289b347689fSMing Lei 290b347689fSMing Lei WRITE_ONCE(hctx->dispatch_from, ctx); 29128d65729SSalman Qazi return ret; 292b347689fSMing Lei } 293b347689fSMing Lei 294e1b586f2SZheng Bin static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) 295bd166ef1SJens Axboe { 29681380ca1SOmar Sandoval struct request_queue *q = hctx->queue; 297e42cfb1dSDamien Le Moal const bool has_sched = q->elevator; 29828d65729SSalman Qazi int ret = 0; 299bd166ef1SJens Axboe LIST_HEAD(rq_list); 300bd166ef1SJens Axboe 301bd166ef1SJens Axboe /* 302bd166ef1SJens Axboe * If we have previous entries on our dispatch list, grab them first for 303bd166ef1SJens Axboe * more fair dispatch. 304bd166ef1SJens Axboe */ 305bd166ef1SJens Axboe if (!list_empty_careful(&hctx->dispatch)) { 306bd166ef1SJens Axboe spin_lock(&hctx->lock); 307bd166ef1SJens Axboe if (!list_empty(&hctx->dispatch)) 308bd166ef1SJens Axboe list_splice_init(&hctx->dispatch, &rq_list); 309bd166ef1SJens Axboe spin_unlock(&hctx->lock); 310bd166ef1SJens Axboe } 311bd166ef1SJens Axboe 312bd166ef1SJens Axboe /* 313bd166ef1SJens Axboe * Only ask the scheduler for requests, if we didn't have residual 314bd166ef1SJens Axboe * requests from the dispatch list. This is to avoid the case where 315bd166ef1SJens Axboe * we only ever dispatch a fraction of the requests available because 316bd166ef1SJens Axboe * of low device queue depth. Once we pull requests out of the IO 317bd166ef1SJens Axboe * scheduler, we can no longer merge or sort them. So it's best to 318bd166ef1SJens Axboe * leave them there for as long as we can. Mark the hw queue as 319bd166ef1SJens Axboe * needing a restart in that case. 320caf8eb0dSMing Lei * 3215e3d02bbSMing Lei * We want to dispatch from the scheduler if there was nothing 3225e3d02bbSMing Lei * on the dispatch list or we were able to dispatch from the 3235e3d02bbSMing Lei * dispatch list. 32464765a75SJens Axboe */ 325caf8eb0dSMing Lei if (!list_empty(&rq_list)) { 326caf8eb0dSMing Lei blk_mq_sched_mark_restart_hctx(hctx); 3271fd40b5eSMing Lei if (blk_mq_dispatch_rq_list(hctx, &rq_list, 0)) { 328e42cfb1dSDamien Le Moal if (has_sched) 32928d65729SSalman Qazi ret = blk_mq_do_dispatch_sched(hctx); 330b347689fSMing Lei else 33128d65729SSalman Qazi ret = blk_mq_do_dispatch_ctx(hctx); 332b347689fSMing Lei } 333e42cfb1dSDamien Le Moal } else if (has_sched) { 33428d65729SSalman Qazi ret = blk_mq_do_dispatch_sched(hctx); 3356e768717SMing Lei } else if (hctx->dispatch_busy) { 3366e768717SMing Lei /* dequeue request one by one from sw queue if queue is busy */ 33728d65729SSalman Qazi ret = blk_mq_do_dispatch_ctx(hctx); 338caf8eb0dSMing Lei } else { 339caf8eb0dSMing Lei blk_mq_flush_busy_ctxs(hctx, &rq_list); 3401fd40b5eSMing Lei blk_mq_dispatch_rq_list(hctx, &rq_list, 0); 341c13660a0SJens Axboe } 34228d65729SSalman Qazi 34328d65729SSalman Qazi return ret; 34428d65729SSalman Qazi } 34528d65729SSalman Qazi 34628d65729SSalman Qazi void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) 34728d65729SSalman Qazi { 34828d65729SSalman Qazi struct request_queue *q = hctx->queue; 34928d65729SSalman Qazi 35028d65729SSalman Qazi /* RCU or SRCU read lock is needed before checking quiesced flag */ 35128d65729SSalman Qazi if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) 35228d65729SSalman Qazi return; 35328d65729SSalman Qazi 35428d65729SSalman Qazi hctx->run++; 35528d65729SSalman Qazi 35628d65729SSalman Qazi /* 35728d65729SSalman Qazi * A return of -EAGAIN is an indication that hctx->dispatch is not 35828d65729SSalman Qazi * empty and we must run again in order to avoid starving flushes. 35928d65729SSalman Qazi */ 36028d65729SSalman Qazi if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) { 36128d65729SSalman Qazi if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) 36228d65729SSalman Qazi blk_mq_run_hw_queue(hctx, true); 36328d65729SSalman Qazi } 364bd166ef1SJens Axboe } 365bd166ef1SJens Axboe 36614ccb66bSChristoph Hellwig bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, 36714ccb66bSChristoph Hellwig unsigned int nr_segs) 368bd166ef1SJens Axboe { 369bd166ef1SJens Axboe struct elevator_queue *e = q->elevator; 370efed9a33SOmar Sandoval struct blk_mq_ctx *ctx; 371efed9a33SOmar Sandoval struct blk_mq_hw_ctx *hctx; 3729bddeb2aSMing Lei bool ret = false; 373c16d6b5aSMing Lei enum hctx_type type; 374bd166ef1SJens Axboe 375c05f4220SBart Van Assche if (e && e->type->ops.bio_merge) 376efed9a33SOmar Sandoval return e->type->ops.bio_merge(q, bio, nr_segs); 377bd166ef1SJens Axboe 378efed9a33SOmar Sandoval ctx = blk_mq_get_ctx(q); 379efed9a33SOmar Sandoval hctx = blk_mq_map_queue(q, bio->bi_opf, ctx); 380c16d6b5aSMing Lei type = hctx->type; 381cdfcef9eSBaolin Wang if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) || 382cdfcef9eSBaolin Wang list_empty_careful(&ctx->rq_lists[type])) 383cdfcef9eSBaolin Wang return false; 384cdfcef9eSBaolin Wang 3859bddeb2aSMing Lei /* default per sw-queue merge */ 3869bddeb2aSMing Lei spin_lock(&ctx->lock); 387cdfcef9eSBaolin Wang /* 388cdfcef9eSBaolin Wang * Reverse check our software queue for entries that we could 389cdfcef9eSBaolin Wang * potentially merge with. Currently includes a hand-wavy stop 390cdfcef9eSBaolin Wang * count of 8, to not spend too much time checking for merges. 391cdfcef9eSBaolin Wang */ 392cdfcef9eSBaolin Wang if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) { 393cdfcef9eSBaolin Wang ctx->rq_merged++; 394cdfcef9eSBaolin Wang ret = true; 3959bddeb2aSMing Lei } 3969bddeb2aSMing Lei 397cdfcef9eSBaolin Wang spin_unlock(&ctx->lock); 398cdfcef9eSBaolin Wang 3999bddeb2aSMing Lei return ret; 400bd166ef1SJens Axboe } 401bd166ef1SJens Axboe 402fd2ef39cSJan Kara bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq, 403fd2ef39cSJan Kara struct list_head *free) 404bd166ef1SJens Axboe { 405fd2ef39cSJan Kara return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq, free); 406bd166ef1SJens Axboe } 407bd166ef1SJens Axboe EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge); 408bd166ef1SJens Axboe 4090cacba6cSOmar Sandoval static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, 4100cacba6cSOmar Sandoval struct request *rq) 411bd166ef1SJens Axboe { 41201e99aecSMing Lei /* 41301e99aecSMing Lei * dispatch flush and passthrough rq directly 41401e99aecSMing Lei * 41501e99aecSMing Lei * passthrough request has to be added to hctx->dispatch directly. 41601e99aecSMing Lei * For some reason, device may be in one situation which can't 41701e99aecSMing Lei * handle FS request, so STS_RESOURCE is always returned and the 41801e99aecSMing Lei * FS request will be added to hctx->dispatch. However passthrough 41901e99aecSMing Lei * request may be required at that time for fixing the problem. If 42001e99aecSMing Lei * passthrough request is added to scheduler queue, there isn't any 42101e99aecSMing Lei * chance to dispatch it given we prioritize requests in hctx->dispatch. 42201e99aecSMing Lei */ 42301e99aecSMing Lei if ((rq->rq_flags & RQF_FLUSH_SEQ) || blk_rq_is_passthrough(rq)) 424bd166ef1SJens Axboe return true; 425bd166ef1SJens Axboe 426a6a252e6SMing Lei return false; 427a6a252e6SMing Lei } 428a6a252e6SMing Lei 429bd6737f1SJens Axboe void blk_mq_sched_insert_request(struct request *rq, bool at_head, 4309e97d295SMike Snitzer bool run_queue, bool async) 431bd6737f1SJens Axboe { 432bd6737f1SJens Axboe struct request_queue *q = rq->q; 433bd6737f1SJens Axboe struct elevator_queue *e = q->elevator; 434bd6737f1SJens Axboe struct blk_mq_ctx *ctx = rq->mq_ctx; 435ea4f995eSJens Axboe struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 436bd6737f1SJens Axboe 437e44a6a23SXianting Tian WARN_ON(e && (rq->tag != BLK_MQ_NO_TAG)); 438923218f6SMing Lei 4395218e12eSJean Delvare if (blk_mq_sched_bypass_insert(hctx, rq)) { 440cc3200eaSMing Lei /* 441cc3200eaSMing Lei * Firstly normal IO request is inserted to scheduler queue or 442cc3200eaSMing Lei * sw queue, meantime we add flush request to dispatch queue( 443cc3200eaSMing Lei * hctx->dispatch) directly and there is at most one in-flight 444cc3200eaSMing Lei * flush request for each hw queue, so it doesn't matter to add 445cc3200eaSMing Lei * flush request to tail or front of the dispatch queue. 446cc3200eaSMing Lei * 447cc3200eaSMing Lei * Secondly in case of NCQ, flush request belongs to non-NCQ 448cc3200eaSMing Lei * command, and queueing it will fail when there is any 449cc3200eaSMing Lei * in-flight normal IO request(NCQ command). When adding flush 450cc3200eaSMing Lei * rq to the front of hctx->dispatch, it is easier to introduce 451cc3200eaSMing Lei * extra time to flush rq's latency because of S_SCHED_RESTART 452cc3200eaSMing Lei * compared with adding to the tail of dispatch queue, then 453cc3200eaSMing Lei * chance of flush merge is increased, and less flush requests 454cc3200eaSMing Lei * will be issued to controller. It is observed that ~10% time 455cc3200eaSMing Lei * is saved in blktests block/004 on disk attached to AHCI/NCQ 456cc3200eaSMing Lei * drive when adding flush rq to the front of hctx->dispatch. 457cc3200eaSMing Lei * 458cc3200eaSMing Lei * Simply queue flush rq to the front of hctx->dispatch so that 459cc3200eaSMing Lei * intensive flush workloads can benefit in case of NCQ HW. 460cc3200eaSMing Lei */ 461cc3200eaSMing Lei at_head = (rq->rq_flags & RQF_FLUSH_SEQ) ? true : at_head; 46201e99aecSMing Lei blk_mq_request_bypass_insert(rq, at_head, false); 4630cacba6cSOmar Sandoval goto run; 46401e99aecSMing Lei } 4650cacba6cSOmar Sandoval 466e42cfb1dSDamien Le Moal if (e) { 467bd6737f1SJens Axboe LIST_HEAD(list); 468bd6737f1SJens Axboe 469bd6737f1SJens Axboe list_add(&rq->queuelist, &list); 470f9cd4bfeSJens Axboe e->type->ops.insert_requests(hctx, &list, at_head); 471bd6737f1SJens Axboe } else { 472bd6737f1SJens Axboe spin_lock(&ctx->lock); 473bd6737f1SJens Axboe __blk_mq_insert_request(hctx, rq, at_head); 474bd6737f1SJens Axboe spin_unlock(&ctx->lock); 475bd6737f1SJens Axboe } 476bd6737f1SJens Axboe 4770cacba6cSOmar Sandoval run: 478bd6737f1SJens Axboe if (run_queue) 479bd6737f1SJens Axboe blk_mq_run_hw_queue(hctx, async); 480bd6737f1SJens Axboe } 481bd6737f1SJens Axboe 48267cae4c9SJens Axboe void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx, 483bd6737f1SJens Axboe struct blk_mq_ctx *ctx, 484bd6737f1SJens Axboe struct list_head *list, bool run_queue_async) 485bd6737f1SJens Axboe { 486f9afca4dSJens Axboe struct elevator_queue *e; 487e87eb301SMing Lei struct request_queue *q = hctx->queue; 488e87eb301SMing Lei 489e87eb301SMing Lei /* 490e87eb301SMing Lei * blk_mq_sched_insert_requests() is called from flush plug 491e87eb301SMing Lei * context only, and hold one usage counter to prevent queue 492e87eb301SMing Lei * from being released. 493e87eb301SMing Lei */ 494e87eb301SMing Lei percpu_ref_get(&q->q_usage_counter); 495f9afca4dSJens Axboe 496f9afca4dSJens Axboe e = hctx->queue->elevator; 497e42cfb1dSDamien Le Moal if (e) { 498f9cd4bfeSJens Axboe e->type->ops.insert_requests(hctx, list, false); 499e42cfb1dSDamien Le Moal } else { 5006ce3dd6eSMing Lei /* 5016ce3dd6eSMing Lei * try to issue requests directly if the hw queue isn't 5026ce3dd6eSMing Lei * busy in case of 'none' scheduler, and this way may save 5036ce3dd6eSMing Lei * us one extra enqueue & dequeue to sw queue. 5046ce3dd6eSMing Lei */ 505fd9c40f6SBart Van Assche if (!hctx->dispatch_busy && !e && !run_queue_async) { 5066ce3dd6eSMing Lei blk_mq_try_issue_list_directly(hctx, list); 507fd9c40f6SBart Van Assche if (list_empty(list)) 508e87eb301SMing Lei goto out; 509fd9c40f6SBart Van Assche } 510bd6737f1SJens Axboe blk_mq_insert_requests(hctx, ctx, list); 5116ce3dd6eSMing Lei } 512bd6737f1SJens Axboe 513bd6737f1SJens Axboe blk_mq_run_hw_queue(hctx, run_queue_async); 514e87eb301SMing Lei out: 515e87eb301SMing Lei percpu_ref_put(&q->q_usage_counter); 516bd6737f1SJens Axboe } 517bd6737f1SJens Axboe 518d99a6bb3SJohn Garry static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q, 5196917ff0bSOmar Sandoval struct blk_mq_hw_ctx *hctx, 5206917ff0bSOmar Sandoval unsigned int hctx_idx) 521bd166ef1SJens Axboe { 522bd166ef1SJens Axboe struct blk_mq_tag_set *set = q->tag_set; 5236917ff0bSOmar Sandoval int ret; 524bd166ef1SJens Axboe 5256917ff0bSOmar Sandoval hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests, 526d97e594cSJohn Garry set->reserved_tags, set->flags); 527bd166ef1SJens Axboe if (!hctx->sched_tags) 5286917ff0bSOmar Sandoval return -ENOMEM; 5296917ff0bSOmar Sandoval 5306917ff0bSOmar Sandoval ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests); 531b93af305SJohn Garry if (ret) { 532b93af305SJohn Garry blk_mq_free_rq_map(hctx->sched_tags, set->flags); 533b93af305SJohn Garry hctx->sched_tags = NULL; 534b93af305SJohn Garry } 535bd166ef1SJens Axboe 536bd166ef1SJens Axboe return ret; 537bd166ef1SJens Axboe } 538bd166ef1SJens Axboe 539c3e22192SMing Lei /* called in queue's release handler, tagset has gone away */ 54054d5329dSOmar Sandoval static void blk_mq_sched_tags_teardown(struct request_queue *q) 541bd166ef1SJens Axboe { 542bd166ef1SJens Axboe struct blk_mq_hw_ctx *hctx; 543bd166ef1SJens Axboe int i; 544bd166ef1SJens Axboe 545c3e22192SMing Lei queue_for_each_hw_ctx(q, hctx, i) { 546c3e22192SMing Lei if (hctx->sched_tags) { 547d97e594cSJohn Garry blk_mq_free_rq_map(hctx->sched_tags, hctx->flags); 548c3e22192SMing Lei hctx->sched_tags = NULL; 549c3e22192SMing Lei } 550c3e22192SMing Lei } 551bd166ef1SJens Axboe } 552d3484991SJens Axboe 553d97e594cSJohn Garry static int blk_mq_init_sched_shared_sbitmap(struct request_queue *queue) 554d97e594cSJohn Garry { 555d97e594cSJohn Garry struct blk_mq_tag_set *set = queue->tag_set; 556d97e594cSJohn Garry int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags); 557d97e594cSJohn Garry struct blk_mq_hw_ctx *hctx; 558d97e594cSJohn Garry int ret, i; 559d97e594cSJohn Garry 560d97e594cSJohn Garry /* 561d97e594cSJohn Garry * Set initial depth at max so that we don't need to reallocate for 562d97e594cSJohn Garry * updating nr_requests. 563d97e594cSJohn Garry */ 564d97e594cSJohn Garry ret = blk_mq_init_bitmaps(&queue->sched_bitmap_tags, 565d97e594cSJohn Garry &queue->sched_breserved_tags, 566d97e594cSJohn Garry MAX_SCHED_RQ, set->reserved_tags, 567d97e594cSJohn Garry set->numa_node, alloc_policy); 568d97e594cSJohn Garry if (ret) 569d97e594cSJohn Garry return ret; 570d97e594cSJohn Garry 571d97e594cSJohn Garry queue_for_each_hw_ctx(queue, hctx, i) { 572d97e594cSJohn Garry hctx->sched_tags->bitmap_tags = 573d97e594cSJohn Garry &queue->sched_bitmap_tags; 574d97e594cSJohn Garry hctx->sched_tags->breserved_tags = 575d97e594cSJohn Garry &queue->sched_breserved_tags; 576d97e594cSJohn Garry } 577d97e594cSJohn Garry 578d97e594cSJohn Garry sbitmap_queue_resize(&queue->sched_bitmap_tags, 579d97e594cSJohn Garry queue->nr_requests - set->reserved_tags); 580d97e594cSJohn Garry 581d97e594cSJohn Garry return 0; 582d97e594cSJohn Garry } 583d97e594cSJohn Garry 584d97e594cSJohn Garry static void blk_mq_exit_sched_shared_sbitmap(struct request_queue *queue) 585d97e594cSJohn Garry { 586d97e594cSJohn Garry sbitmap_queue_free(&queue->sched_bitmap_tags); 587d97e594cSJohn Garry sbitmap_queue_free(&queue->sched_breserved_tags); 588d97e594cSJohn Garry } 589d97e594cSJohn Garry 5906917ff0bSOmar Sandoval int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) 5916917ff0bSOmar Sandoval { 5926917ff0bSOmar Sandoval struct blk_mq_hw_ctx *hctx; 593ee056f98SOmar Sandoval struct elevator_queue *eq; 5946917ff0bSOmar Sandoval unsigned int i; 5956917ff0bSOmar Sandoval int ret; 5966917ff0bSOmar Sandoval 5976917ff0bSOmar Sandoval if (!e) { 5986917ff0bSOmar Sandoval q->elevator = NULL; 59932a50fabSMing Lei q->nr_requests = q->tag_set->queue_depth; 6006917ff0bSOmar Sandoval return 0; 6016917ff0bSOmar Sandoval } 6026917ff0bSOmar Sandoval 6036917ff0bSOmar Sandoval /* 60432825c45SMing Lei * Default to double of smaller one between hw queue_depth and 128, 60532825c45SMing Lei * since we don't split into sync/async like the old code did. 60632825c45SMing Lei * Additionally, this is a per-hw queue depth. 6076917ff0bSOmar Sandoval */ 60832825c45SMing Lei q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth, 609d2a27964SJohn Garry BLKDEV_DEFAULT_RQ); 6106917ff0bSOmar Sandoval 6116917ff0bSOmar Sandoval queue_for_each_hw_ctx(q, hctx, i) { 612d99a6bb3SJohn Garry ret = blk_mq_sched_alloc_map_and_rqs(q, hctx, i); 6136917ff0bSOmar Sandoval if (ret) 614d99a6bb3SJohn Garry goto err_free_map_and_rqs; 615d97e594cSJohn Garry } 616d97e594cSJohn Garry 617d97e594cSJohn Garry if (blk_mq_is_sbitmap_shared(q->tag_set->flags)) { 618d97e594cSJohn Garry ret = blk_mq_init_sched_shared_sbitmap(q); 619d97e594cSJohn Garry if (ret) 620d99a6bb3SJohn Garry goto err_free_map_and_rqs; 6216917ff0bSOmar Sandoval } 6226917ff0bSOmar Sandoval 623f9cd4bfeSJens Axboe ret = e->ops.init_sched(q, e); 6246917ff0bSOmar Sandoval if (ret) 625d97e594cSJohn Garry goto err_free_sbitmap; 6266917ff0bSOmar Sandoval 627d332ce09SOmar Sandoval blk_mq_debugfs_register_sched(q); 628d332ce09SOmar Sandoval 629ee056f98SOmar Sandoval queue_for_each_hw_ctx(q, hctx, i) { 630f9cd4bfeSJens Axboe if (e->ops.init_hctx) { 631f9cd4bfeSJens Axboe ret = e->ops.init_hctx(hctx, i); 632ee056f98SOmar Sandoval if (ret) { 633ee056f98SOmar Sandoval eq = q->elevator; 634*1820f4f0SJohn Garry blk_mq_sched_free_rqs(q); 635ee056f98SOmar Sandoval blk_mq_exit_sched(q, eq); 636ee056f98SOmar Sandoval kobject_put(&eq->kobj); 637ee056f98SOmar Sandoval return ret; 638ee056f98SOmar Sandoval } 639ee056f98SOmar Sandoval } 640d332ce09SOmar Sandoval blk_mq_debugfs_register_sched_hctx(q, hctx); 641ee056f98SOmar Sandoval } 642ee056f98SOmar Sandoval 6436917ff0bSOmar Sandoval return 0; 6446917ff0bSOmar Sandoval 645d97e594cSJohn Garry err_free_sbitmap: 646d97e594cSJohn Garry if (blk_mq_is_sbitmap_shared(q->tag_set->flags)) 647d97e594cSJohn Garry blk_mq_exit_sched_shared_sbitmap(q); 648d99a6bb3SJohn Garry err_free_map_and_rqs: 649*1820f4f0SJohn Garry blk_mq_sched_free_rqs(q); 65054d5329dSOmar Sandoval blk_mq_sched_tags_teardown(q); 65154d5329dSOmar Sandoval q->elevator = NULL; 6526917ff0bSOmar Sandoval return ret; 6536917ff0bSOmar Sandoval } 6546917ff0bSOmar Sandoval 655c3e22192SMing Lei /* 656c3e22192SMing Lei * called in either blk_queue_cleanup or elevator_switch, tagset 657c3e22192SMing Lei * is required for freeing requests 658c3e22192SMing Lei */ 659*1820f4f0SJohn Garry void blk_mq_sched_free_rqs(struct request_queue *q) 660c3e22192SMing Lei { 661c3e22192SMing Lei struct blk_mq_hw_ctx *hctx; 662c3e22192SMing Lei int i; 663c3e22192SMing Lei 664c3e22192SMing Lei queue_for_each_hw_ctx(q, hctx, i) { 665c3e22192SMing Lei if (hctx->sched_tags) 666c3e22192SMing Lei blk_mq_free_rqs(q->tag_set, hctx->sched_tags, i); 667c3e22192SMing Lei } 668c3e22192SMing Lei } 669c3e22192SMing Lei 67054d5329dSOmar Sandoval void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e) 67154d5329dSOmar Sandoval { 672ee056f98SOmar Sandoval struct blk_mq_hw_ctx *hctx; 673ee056f98SOmar Sandoval unsigned int i; 674f0c1c4d2SMing Lei unsigned int flags = 0; 675ee056f98SOmar Sandoval 676ee056f98SOmar Sandoval queue_for_each_hw_ctx(q, hctx, i) { 677d332ce09SOmar Sandoval blk_mq_debugfs_unregister_sched_hctx(hctx); 678f9cd4bfeSJens Axboe if (e->type->ops.exit_hctx && hctx->sched_data) { 679f9cd4bfeSJens Axboe e->type->ops.exit_hctx(hctx, i); 680ee056f98SOmar Sandoval hctx->sched_data = NULL; 681ee056f98SOmar Sandoval } 682f0c1c4d2SMing Lei flags = hctx->flags; 683ee056f98SOmar Sandoval } 684d332ce09SOmar Sandoval blk_mq_debugfs_unregister_sched(q); 685f9cd4bfeSJens Axboe if (e->type->ops.exit_sched) 686f9cd4bfeSJens Axboe e->type->ops.exit_sched(e); 68754d5329dSOmar Sandoval blk_mq_sched_tags_teardown(q); 688f0c1c4d2SMing Lei if (blk_mq_is_sbitmap_shared(flags)) 689d97e594cSJohn Garry blk_mq_exit_sched_shared_sbitmap(q); 69054d5329dSOmar Sandoval q->elevator = NULL; 69154d5329dSOmar Sandoval } 692