13dcf60bcSChristoph Hellwig // SPDX-License-Identifier: GPL-2.0 2bd166ef1SJens Axboe /* 3bd166ef1SJens Axboe * blk-mq scheduling framework 4bd166ef1SJens Axboe * 5bd166ef1SJens Axboe * Copyright (C) 2016 Jens Axboe 6bd166ef1SJens Axboe */ 7bd166ef1SJens Axboe #include <linux/kernel.h> 8bd166ef1SJens Axboe #include <linux/module.h> 9bd166ef1SJens Axboe #include <linux/blk-mq.h> 106e6fcbc2SMing Lei #include <linux/list_sort.h> 11bd166ef1SJens Axboe 12bd166ef1SJens Axboe #include <trace/events/block.h> 13bd166ef1SJens Axboe 14bd166ef1SJens Axboe #include "blk.h" 15bd166ef1SJens Axboe #include "blk-mq.h" 16d332ce09SOmar Sandoval #include "blk-mq-debugfs.h" 17bd166ef1SJens Axboe #include "blk-mq-sched.h" 18bd166ef1SJens Axboe #include "blk-mq-tag.h" 19bd166ef1SJens Axboe #include "blk-wbt.h" 20bd166ef1SJens Axboe 21bd166ef1SJens Axboe void blk_mq_sched_free_hctx_data(struct request_queue *q, 22bd166ef1SJens Axboe void (*exit)(struct blk_mq_hw_ctx *)) 23bd166ef1SJens Axboe { 24bd166ef1SJens Axboe struct blk_mq_hw_ctx *hctx; 25bd166ef1SJens Axboe int i; 26bd166ef1SJens Axboe 27bd166ef1SJens Axboe queue_for_each_hw_ctx(q, hctx, i) { 28bd166ef1SJens Axboe if (exit && hctx->sched_data) 29bd166ef1SJens Axboe exit(hctx); 30bd166ef1SJens Axboe kfree(hctx->sched_data); 31bd166ef1SJens Axboe hctx->sched_data = NULL; 32bd166ef1SJens Axboe } 33bd166ef1SJens Axboe } 34bd166ef1SJens Axboe EXPORT_SYMBOL_GPL(blk_mq_sched_free_hctx_data); 35bd166ef1SJens Axboe 36e2b3fa5aSDamien Le Moal void blk_mq_sched_assign_ioc(struct request *rq) 37bd166ef1SJens Axboe { 3844e8c2bfSChristoph Hellwig struct request_queue *q = rq->q; 390c62bff1SJens Axboe struct io_context *ioc; 40bd166ef1SJens Axboe struct io_cq *icq; 41bd166ef1SJens Axboe 420c62bff1SJens Axboe /* 430c62bff1SJens Axboe * May not have an IO context if it's a passthrough request 440c62bff1SJens Axboe */ 450c62bff1SJens Axboe ioc = current->io_context; 460c62bff1SJens Axboe if (!ioc) 470c62bff1SJens Axboe return; 480c62bff1SJens Axboe 490d945c1fSChristoph Hellwig spin_lock_irq(&q->queue_lock); 50bd166ef1SJens Axboe icq = ioc_lookup_icq(ioc, q); 510d945c1fSChristoph Hellwig spin_unlock_irq(&q->queue_lock); 52bd166ef1SJens Axboe 53bd166ef1SJens Axboe if (!icq) { 54bd166ef1SJens Axboe icq = ioc_create_icq(ioc, q, GFP_ATOMIC); 55bd166ef1SJens Axboe if (!icq) 56bd166ef1SJens Axboe return; 57bd166ef1SJens Axboe } 58ea511e3cSChristoph Hellwig get_io_context(icq->ioc); 5944e8c2bfSChristoph Hellwig rq->elv.icq = icq; 60bd166ef1SJens Axboe } 61bd166ef1SJens Axboe 628e8320c9SJens Axboe /* 638e8320c9SJens Axboe * Mark a hardware queue as needing a restart. For shared queues, maintain 648e8320c9SJens Axboe * a count of how many hardware queues are marked for restart. 658e8320c9SJens Axboe */ 667211aef8SDamien Le Moal void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) 678e8320c9SJens Axboe { 688e8320c9SJens Axboe if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) 698e8320c9SJens Axboe return; 708e8320c9SJens Axboe 718e8320c9SJens Axboe set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); 728e8320c9SJens Axboe } 737211aef8SDamien Le Moal EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx); 748e8320c9SJens Axboe 7597889f9aSMing Lei void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) 768e8320c9SJens Axboe { 778e8320c9SJens Axboe if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) 7897889f9aSMing Lei return; 798e8320c9SJens Axboe clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); 808e8320c9SJens Axboe 8197889f9aSMing Lei blk_mq_run_hw_queue(hctx, true); 828e8320c9SJens Axboe } 838e8320c9SJens Axboe 846e6fcbc2SMing Lei static int sched_rq_cmp(void *priv, struct list_head *a, struct list_head *b) 856e6fcbc2SMing Lei { 866e6fcbc2SMing Lei struct request *rqa = container_of(a, struct request, queuelist); 876e6fcbc2SMing Lei struct request *rqb = container_of(b, struct request, queuelist); 886e6fcbc2SMing Lei 896e6fcbc2SMing Lei return rqa->mq_hctx > rqb->mq_hctx; 906e6fcbc2SMing Lei } 916e6fcbc2SMing Lei 926e6fcbc2SMing Lei static bool blk_mq_dispatch_hctx_list(struct list_head *rq_list) 936e6fcbc2SMing Lei { 946e6fcbc2SMing Lei struct blk_mq_hw_ctx *hctx = 956e6fcbc2SMing Lei list_first_entry(rq_list, struct request, queuelist)->mq_hctx; 966e6fcbc2SMing Lei struct request *rq; 976e6fcbc2SMing Lei LIST_HEAD(hctx_list); 986e6fcbc2SMing Lei unsigned int count = 0; 996e6fcbc2SMing Lei 1006e6fcbc2SMing Lei list_for_each_entry(rq, rq_list, queuelist) { 1016e6fcbc2SMing Lei if (rq->mq_hctx != hctx) { 1026e6fcbc2SMing Lei list_cut_before(&hctx_list, rq_list, &rq->queuelist); 1036e6fcbc2SMing Lei goto dispatch; 1046e6fcbc2SMing Lei } 1056e6fcbc2SMing Lei count++; 1066e6fcbc2SMing Lei } 1076e6fcbc2SMing Lei list_splice_tail_init(rq_list, &hctx_list); 1086e6fcbc2SMing Lei 1096e6fcbc2SMing Lei dispatch: 110106e71c5SBaolin Wang return blk_mq_dispatch_rq_list(hctx, &hctx_list, count); 1116e6fcbc2SMing Lei } 1126e6fcbc2SMing Lei 113a0823421SDouglas Anderson #define BLK_MQ_BUDGET_DELAY 3 /* ms units */ 114a0823421SDouglas Anderson 1151f460b63SMing Lei /* 1161f460b63SMing Lei * Only SCSI implements .get_budget and .put_budget, and SCSI restarts 1171f460b63SMing Lei * its queue by itself in its completion handler, so we don't need to 1181f460b63SMing Lei * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE. 11928d65729SSalman Qazi * 12028d65729SSalman Qazi * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to 12128d65729SSalman Qazi * be run again. This is necessary to avoid starving flushes. 1221f460b63SMing Lei */ 1236e6fcbc2SMing Lei static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) 124caf8eb0dSMing Lei { 125caf8eb0dSMing Lei struct request_queue *q = hctx->queue; 126caf8eb0dSMing Lei struct elevator_queue *e = q->elevator; 1276e6fcbc2SMing Lei bool multi_hctxs = false, run_queue = false; 1286e6fcbc2SMing Lei bool dispatched = false, busy = false; 1296e6fcbc2SMing Lei unsigned int max_dispatch; 130caf8eb0dSMing Lei LIST_HEAD(rq_list); 1316e6fcbc2SMing Lei int count = 0; 1326e6fcbc2SMing Lei 1336e6fcbc2SMing Lei if (hctx->dispatch_busy) 1346e6fcbc2SMing Lei max_dispatch = 1; 1356e6fcbc2SMing Lei else 1366e6fcbc2SMing Lei max_dispatch = hctx->queue->nr_requests; 137caf8eb0dSMing Lei 138445874e8SMing Lei do { 1396e6fcbc2SMing Lei struct request *rq; 1406e6fcbc2SMing Lei 141f9cd4bfeSJens Axboe if (e->type->ops.has_work && !e->type->ops.has_work(hctx)) 142caf8eb0dSMing Lei break; 143de148297SMing Lei 14428d65729SSalman Qazi if (!list_empty_careful(&hctx->dispatch)) { 1456e6fcbc2SMing Lei busy = true; 14628d65729SSalman Qazi break; 14728d65729SSalman Qazi } 14828d65729SSalman Qazi 14965c76369SMing Lei if (!blk_mq_get_dispatch_budget(q)) 1501f460b63SMing Lei break; 151de148297SMing Lei 152f9cd4bfeSJens Axboe rq = e->type->ops.dispatch_request(hctx); 153de148297SMing Lei if (!rq) { 15465c76369SMing Lei blk_mq_put_dispatch_budget(q); 155a0823421SDouglas Anderson /* 156a0823421SDouglas Anderson * We're releasing without dispatching. Holding the 157a0823421SDouglas Anderson * budget could have blocked any "hctx"s with the 158a0823421SDouglas Anderson * same queue and if we didn't dispatch then there's 159a0823421SDouglas Anderson * no guarantee anyone will kick the queue. Kick it 160a0823421SDouglas Anderson * ourselves. 161a0823421SDouglas Anderson */ 1626e6fcbc2SMing Lei run_queue = true; 163de148297SMing Lei break; 164caf8eb0dSMing Lei } 165caf8eb0dSMing Lei 166de148297SMing Lei /* 167de148297SMing Lei * Now this rq owns the budget which has to be released 168de148297SMing Lei * if this rq won't be queued to driver via .queue_rq() 169de148297SMing Lei * in blk_mq_dispatch_rq_list(). 170de148297SMing Lei */ 1716e6fcbc2SMing Lei list_add_tail(&rq->queuelist, &rq_list); 1726e6fcbc2SMing Lei if (rq->mq_hctx != hctx) 1736e6fcbc2SMing Lei multi_hctxs = true; 1746e6fcbc2SMing Lei } while (++count < max_dispatch); 1756e6fcbc2SMing Lei 1766e6fcbc2SMing Lei if (!count) { 1776e6fcbc2SMing Lei if (run_queue) 1786e6fcbc2SMing Lei blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY); 1796e6fcbc2SMing Lei } else if (multi_hctxs) { 1806e6fcbc2SMing Lei /* 1816e6fcbc2SMing Lei * Requests from different hctx may be dequeued from some 1826e6fcbc2SMing Lei * schedulers, such as bfq and deadline. 1836e6fcbc2SMing Lei * 1846e6fcbc2SMing Lei * Sort the requests in the list according to their hctx, 1856e6fcbc2SMing Lei * dispatch batching requests from same hctx at a time. 1866e6fcbc2SMing Lei */ 1876e6fcbc2SMing Lei list_sort(NULL, &rq_list, sched_rq_cmp); 1886e6fcbc2SMing Lei do { 1896e6fcbc2SMing Lei dispatched |= blk_mq_dispatch_hctx_list(&rq_list); 1906e6fcbc2SMing Lei } while (!list_empty(&rq_list)); 1916e6fcbc2SMing Lei } else { 1926e6fcbc2SMing Lei dispatched = blk_mq_dispatch_rq_list(hctx, &rq_list, count); 1936e6fcbc2SMing Lei } 1946e6fcbc2SMing Lei 1956e6fcbc2SMing Lei if (busy) 1966e6fcbc2SMing Lei return -EAGAIN; 1976e6fcbc2SMing Lei return !!dispatched; 1986e6fcbc2SMing Lei } 1996e6fcbc2SMing Lei 2006e6fcbc2SMing Lei static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) 2016e6fcbc2SMing Lei { 2026e6fcbc2SMing Lei int ret; 2036e6fcbc2SMing Lei 2046e6fcbc2SMing Lei do { 2056e6fcbc2SMing Lei ret = __blk_mq_do_dispatch_sched(hctx); 2066e6fcbc2SMing Lei } while (ret == 1); 20728d65729SSalman Qazi 20828d65729SSalman Qazi return ret; 209de148297SMing Lei } 210de148297SMing Lei 211b347689fSMing Lei static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx, 212b347689fSMing Lei struct blk_mq_ctx *ctx) 213b347689fSMing Lei { 214f31967f0SJens Axboe unsigned short idx = ctx->index_hw[hctx->type]; 215b347689fSMing Lei 216b347689fSMing Lei if (++idx == hctx->nr_ctx) 217b347689fSMing Lei idx = 0; 218b347689fSMing Lei 219b347689fSMing Lei return hctx->ctxs[idx]; 220b347689fSMing Lei } 221b347689fSMing Lei 2221f460b63SMing Lei /* 2231f460b63SMing Lei * Only SCSI implements .get_budget and .put_budget, and SCSI restarts 2241f460b63SMing Lei * its queue by itself in its completion handler, so we don't need to 2251f460b63SMing Lei * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE. 22628d65729SSalman Qazi * 22728d65729SSalman Qazi * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to 22828d65729SSalman Qazi * to be run again. This is necessary to avoid starving flushes. 2291f460b63SMing Lei */ 23028d65729SSalman Qazi static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx) 231b347689fSMing Lei { 232b347689fSMing Lei struct request_queue *q = hctx->queue; 233b347689fSMing Lei LIST_HEAD(rq_list); 234b347689fSMing Lei struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from); 23528d65729SSalman Qazi int ret = 0; 236b347689fSMing Lei struct request *rq; 237b347689fSMing Lei 238445874e8SMing Lei do { 23928d65729SSalman Qazi if (!list_empty_careful(&hctx->dispatch)) { 24028d65729SSalman Qazi ret = -EAGAIN; 24128d65729SSalman Qazi break; 24228d65729SSalman Qazi } 24328d65729SSalman Qazi 244b347689fSMing Lei if (!sbitmap_any_bit_set(&hctx->ctx_map)) 245b347689fSMing Lei break; 246b347689fSMing Lei 24765c76369SMing Lei if (!blk_mq_get_dispatch_budget(q)) 2481f460b63SMing Lei break; 249b347689fSMing Lei 250b347689fSMing Lei rq = blk_mq_dequeue_from_ctx(hctx, ctx); 251b347689fSMing Lei if (!rq) { 25265c76369SMing Lei blk_mq_put_dispatch_budget(q); 253a0823421SDouglas Anderson /* 254a0823421SDouglas Anderson * We're releasing without dispatching. Holding the 255a0823421SDouglas Anderson * budget could have blocked any "hctx"s with the 256a0823421SDouglas Anderson * same queue and if we didn't dispatch then there's 257a0823421SDouglas Anderson * no guarantee anyone will kick the queue. Kick it 258a0823421SDouglas Anderson * ourselves. 259a0823421SDouglas Anderson */ 260a0823421SDouglas Anderson blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY); 261b347689fSMing Lei break; 262b347689fSMing Lei } 263b347689fSMing Lei 264b347689fSMing Lei /* 265b347689fSMing Lei * Now this rq owns the budget which has to be released 266b347689fSMing Lei * if this rq won't be queued to driver via .queue_rq() 267b347689fSMing Lei * in blk_mq_dispatch_rq_list(). 268b347689fSMing Lei */ 269b347689fSMing Lei list_add(&rq->queuelist, &rq_list); 270b347689fSMing Lei 271b347689fSMing Lei /* round robin for fair dispatch */ 272b347689fSMing Lei ctx = blk_mq_next_ctx(hctx, rq->mq_ctx); 273b347689fSMing Lei 2741fd40b5eSMing Lei } while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, 1)); 275b347689fSMing Lei 276b347689fSMing Lei WRITE_ONCE(hctx->dispatch_from, ctx); 27728d65729SSalman Qazi return ret; 278b347689fSMing Lei } 279b347689fSMing Lei 280e1b586f2SZheng Bin static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) 281bd166ef1SJens Axboe { 28281380ca1SOmar Sandoval struct request_queue *q = hctx->queue; 28381380ca1SOmar Sandoval struct elevator_queue *e = q->elevator; 284f9cd4bfeSJens Axboe const bool has_sched_dispatch = e && e->type->ops.dispatch_request; 28528d65729SSalman Qazi int ret = 0; 286bd166ef1SJens Axboe LIST_HEAD(rq_list); 287bd166ef1SJens Axboe 288bd166ef1SJens Axboe /* 289bd166ef1SJens Axboe * If we have previous entries on our dispatch list, grab them first for 290bd166ef1SJens Axboe * more fair dispatch. 291bd166ef1SJens Axboe */ 292bd166ef1SJens Axboe if (!list_empty_careful(&hctx->dispatch)) { 293bd166ef1SJens Axboe spin_lock(&hctx->lock); 294bd166ef1SJens Axboe if (!list_empty(&hctx->dispatch)) 295bd166ef1SJens Axboe list_splice_init(&hctx->dispatch, &rq_list); 296bd166ef1SJens Axboe spin_unlock(&hctx->lock); 297bd166ef1SJens Axboe } 298bd166ef1SJens Axboe 299bd166ef1SJens Axboe /* 300bd166ef1SJens Axboe * Only ask the scheduler for requests, if we didn't have residual 301bd166ef1SJens Axboe * requests from the dispatch list. This is to avoid the case where 302bd166ef1SJens Axboe * we only ever dispatch a fraction of the requests available because 303bd166ef1SJens Axboe * of low device queue depth. Once we pull requests out of the IO 304bd166ef1SJens Axboe * scheduler, we can no longer merge or sort them. So it's best to 305bd166ef1SJens Axboe * leave them there for as long as we can. Mark the hw queue as 306bd166ef1SJens Axboe * needing a restart in that case. 307caf8eb0dSMing Lei * 3085e3d02bbSMing Lei * We want to dispatch from the scheduler if there was nothing 3095e3d02bbSMing Lei * on the dispatch list or we were able to dispatch from the 3105e3d02bbSMing Lei * dispatch list. 31164765a75SJens Axboe */ 312caf8eb0dSMing Lei if (!list_empty(&rq_list)) { 313caf8eb0dSMing Lei blk_mq_sched_mark_restart_hctx(hctx); 3141fd40b5eSMing Lei if (blk_mq_dispatch_rq_list(hctx, &rq_list, 0)) { 315b347689fSMing Lei if (has_sched_dispatch) 31628d65729SSalman Qazi ret = blk_mq_do_dispatch_sched(hctx); 317b347689fSMing Lei else 31828d65729SSalman Qazi ret = blk_mq_do_dispatch_ctx(hctx); 319b347689fSMing Lei } 320caf8eb0dSMing Lei } else if (has_sched_dispatch) { 32128d65729SSalman Qazi ret = blk_mq_do_dispatch_sched(hctx); 3226e768717SMing Lei } else if (hctx->dispatch_busy) { 3236e768717SMing Lei /* dequeue request one by one from sw queue if queue is busy */ 32428d65729SSalman Qazi ret = blk_mq_do_dispatch_ctx(hctx); 325caf8eb0dSMing Lei } else { 326caf8eb0dSMing Lei blk_mq_flush_busy_ctxs(hctx, &rq_list); 3271fd40b5eSMing Lei blk_mq_dispatch_rq_list(hctx, &rq_list, 0); 328c13660a0SJens Axboe } 32928d65729SSalman Qazi 33028d65729SSalman Qazi return ret; 33128d65729SSalman Qazi } 33228d65729SSalman Qazi 33328d65729SSalman Qazi void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) 33428d65729SSalman Qazi { 33528d65729SSalman Qazi struct request_queue *q = hctx->queue; 33628d65729SSalman Qazi 33728d65729SSalman Qazi /* RCU or SRCU read lock is needed before checking quiesced flag */ 33828d65729SSalman Qazi if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) 33928d65729SSalman Qazi return; 34028d65729SSalman Qazi 34128d65729SSalman Qazi hctx->run++; 34228d65729SSalman Qazi 34328d65729SSalman Qazi /* 34428d65729SSalman Qazi * A return of -EAGAIN is an indication that hctx->dispatch is not 34528d65729SSalman Qazi * empty and we must run again in order to avoid starving flushes. 34628d65729SSalman Qazi */ 34728d65729SSalman Qazi if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) { 34828d65729SSalman Qazi if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) 34928d65729SSalman Qazi blk_mq_run_hw_queue(hctx, true); 35028d65729SSalman Qazi } 351bd166ef1SJens Axboe } 352bd166ef1SJens Axboe 353e4d750c9SJens Axboe bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, 35414ccb66bSChristoph Hellwig unsigned int nr_segs, struct request **merged_request) 355bd166ef1SJens Axboe { 356bd166ef1SJens Axboe struct request *rq; 357bd166ef1SJens Axboe 35834fe7c05SChristoph Hellwig switch (elv_merge(q, &rq, bio)) { 35934fe7c05SChristoph Hellwig case ELEVATOR_BACK_MERGE: 360bd166ef1SJens Axboe if (!blk_mq_sched_allow_merge(q, rq, bio)) 361bd166ef1SJens Axboe return false; 36214ccb66bSChristoph Hellwig if (!bio_attempt_back_merge(rq, bio, nr_segs)) 36334fe7c05SChristoph Hellwig return false; 364e4d750c9SJens Axboe *merged_request = attempt_back_merge(q, rq); 365e4d750c9SJens Axboe if (!*merged_request) 36634fe7c05SChristoph Hellwig elv_merged_request(q, rq, ELEVATOR_BACK_MERGE); 367bd166ef1SJens Axboe return true; 36834fe7c05SChristoph Hellwig case ELEVATOR_FRONT_MERGE: 369bd166ef1SJens Axboe if (!blk_mq_sched_allow_merge(q, rq, bio)) 370bd166ef1SJens Axboe return false; 37114ccb66bSChristoph Hellwig if (!bio_attempt_front_merge(rq, bio, nr_segs)) 37234fe7c05SChristoph Hellwig return false; 373e4d750c9SJens Axboe *merged_request = attempt_front_merge(q, rq); 374e4d750c9SJens Axboe if (!*merged_request) 37534fe7c05SChristoph Hellwig elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE); 376bd166ef1SJens Axboe return true; 377bea99a50SKeith Busch case ELEVATOR_DISCARD_MERGE: 378bea99a50SKeith Busch return bio_attempt_discard_merge(q, rq, bio); 37934fe7c05SChristoph Hellwig default: 380bd166ef1SJens Axboe return false; 381bd166ef1SJens Axboe } 38234fe7c05SChristoph Hellwig } 383bd166ef1SJens Axboe EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge); 384bd166ef1SJens Axboe 3859bddeb2aSMing Lei /* 3869c558734SJens Axboe * Iterate list of requests and see if we can merge this bio with any 3879c558734SJens Axboe * of them. 3889bddeb2aSMing Lei */ 3899c558734SJens Axboe bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list, 39014ccb66bSChristoph Hellwig struct bio *bio, unsigned int nr_segs) 3919bddeb2aSMing Lei { 3929bddeb2aSMing Lei struct request *rq; 3939bddeb2aSMing Lei int checked = 8; 3949bddeb2aSMing Lei 3959c558734SJens Axboe list_for_each_entry_reverse(rq, list, queuelist) { 3969bddeb2aSMing Lei bool merged = false; 3979bddeb2aSMing Lei 3989bddeb2aSMing Lei if (!checked--) 3999bddeb2aSMing Lei break; 4009bddeb2aSMing Lei 4019bddeb2aSMing Lei if (!blk_rq_merge_ok(rq, bio)) 4029bddeb2aSMing Lei continue; 4039bddeb2aSMing Lei 4049bddeb2aSMing Lei switch (blk_try_merge(rq, bio)) { 4059bddeb2aSMing Lei case ELEVATOR_BACK_MERGE: 4069bddeb2aSMing Lei if (blk_mq_sched_allow_merge(q, rq, bio)) 40714ccb66bSChristoph Hellwig merged = bio_attempt_back_merge(rq, bio, 40814ccb66bSChristoph Hellwig nr_segs); 4099bddeb2aSMing Lei break; 4109bddeb2aSMing Lei case ELEVATOR_FRONT_MERGE: 4119bddeb2aSMing Lei if (blk_mq_sched_allow_merge(q, rq, bio)) 41214ccb66bSChristoph Hellwig merged = bio_attempt_front_merge(rq, bio, 41314ccb66bSChristoph Hellwig nr_segs); 4149bddeb2aSMing Lei break; 4159bddeb2aSMing Lei case ELEVATOR_DISCARD_MERGE: 4169bddeb2aSMing Lei merged = bio_attempt_discard_merge(q, rq, bio); 4179bddeb2aSMing Lei break; 4189bddeb2aSMing Lei default: 4199bddeb2aSMing Lei continue; 4209bddeb2aSMing Lei } 4219bddeb2aSMing Lei 4229bddeb2aSMing Lei return merged; 4239bddeb2aSMing Lei } 4249bddeb2aSMing Lei 4259bddeb2aSMing Lei return false; 4269bddeb2aSMing Lei } 4279c558734SJens Axboe EXPORT_SYMBOL_GPL(blk_mq_bio_list_merge); 4289c558734SJens Axboe 4299c558734SJens Axboe /* 4309c558734SJens Axboe * Reverse check our software queue for entries that we could potentially 4319c558734SJens Axboe * merge with. Currently includes a hand-wavy stop count of 8, to not spend 4329c558734SJens Axboe * too much time checking for merges. 4339c558734SJens Axboe */ 4349c558734SJens Axboe static bool blk_mq_attempt_merge(struct request_queue *q, 435c16d6b5aSMing Lei struct blk_mq_hw_ctx *hctx, 43614ccb66bSChristoph Hellwig struct blk_mq_ctx *ctx, struct bio *bio, 43714ccb66bSChristoph Hellwig unsigned int nr_segs) 4389c558734SJens Axboe { 439c16d6b5aSMing Lei enum hctx_type type = hctx->type; 440c16d6b5aSMing Lei 4419c558734SJens Axboe lockdep_assert_held(&ctx->lock); 4429c558734SJens Axboe 44314ccb66bSChristoph Hellwig if (blk_mq_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) { 4449c558734SJens Axboe ctx->rq_merged++; 4459c558734SJens Axboe return true; 4469c558734SJens Axboe } 4479c558734SJens Axboe 4489c558734SJens Axboe return false; 4499c558734SJens Axboe } 4509bddeb2aSMing Lei 45114ccb66bSChristoph Hellwig bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, 45214ccb66bSChristoph Hellwig unsigned int nr_segs) 453bd166ef1SJens Axboe { 454bd166ef1SJens Axboe struct elevator_queue *e = q->elevator; 455bd166ef1SJens Axboe struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); 4568ccdf4a3SJianchao Wang struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx); 4579bddeb2aSMing Lei bool ret = false; 458c16d6b5aSMing Lei enum hctx_type type; 459bd166ef1SJens Axboe 460c05f4220SBart Van Assche if (e && e->type->ops.bio_merge) 46114ccb66bSChristoph Hellwig return e->type->ops.bio_merge(hctx, bio, nr_segs); 462bd166ef1SJens Axboe 463c16d6b5aSMing Lei type = hctx->type; 464b04f50abSMing Lei if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) && 465c16d6b5aSMing Lei !list_empty_careful(&ctx->rq_lists[type])) { 4669bddeb2aSMing Lei /* default per sw-queue merge */ 4679bddeb2aSMing Lei spin_lock(&ctx->lock); 46814ccb66bSChristoph Hellwig ret = blk_mq_attempt_merge(q, hctx, ctx, bio, nr_segs); 4699bddeb2aSMing Lei spin_unlock(&ctx->lock); 4709bddeb2aSMing Lei } 4719bddeb2aSMing Lei 4729bddeb2aSMing Lei return ret; 473bd166ef1SJens Axboe } 474bd166ef1SJens Axboe 475bd166ef1SJens Axboe bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq) 476bd166ef1SJens Axboe { 477bd166ef1SJens Axboe return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq); 478bd166ef1SJens Axboe } 479bd166ef1SJens Axboe EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge); 480bd166ef1SJens Axboe 481bd166ef1SJens Axboe void blk_mq_sched_request_inserted(struct request *rq) 482bd166ef1SJens Axboe { 483bd166ef1SJens Axboe trace_block_rq_insert(rq->q, rq); 484bd166ef1SJens Axboe } 485bd166ef1SJens Axboe EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted); 486bd166ef1SJens Axboe 4870cacba6cSOmar Sandoval static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, 488a6a252e6SMing Lei bool has_sched, 4890cacba6cSOmar Sandoval struct request *rq) 490bd166ef1SJens Axboe { 49101e99aecSMing Lei /* 49201e99aecSMing Lei * dispatch flush and passthrough rq directly 49301e99aecSMing Lei * 49401e99aecSMing Lei * passthrough request has to be added to hctx->dispatch directly. 49501e99aecSMing Lei * For some reason, device may be in one situation which can't 49601e99aecSMing Lei * handle FS request, so STS_RESOURCE is always returned and the 49701e99aecSMing Lei * FS request will be added to hctx->dispatch. However passthrough 49801e99aecSMing Lei * request may be required at that time for fixing the problem. If 49901e99aecSMing Lei * passthrough request is added to scheduler queue, there isn't any 50001e99aecSMing Lei * chance to dispatch it given we prioritize requests in hctx->dispatch. 50101e99aecSMing Lei */ 50201e99aecSMing Lei if ((rq->rq_flags & RQF_FLUSH_SEQ) || blk_rq_is_passthrough(rq)) 503bd166ef1SJens Axboe return true; 504bd166ef1SJens Axboe 505923218f6SMing Lei if (has_sched) 506a6a252e6SMing Lei rq->rq_flags |= RQF_SORTED; 507a6a252e6SMing Lei 508a6a252e6SMing Lei return false; 509a6a252e6SMing Lei } 510a6a252e6SMing Lei 511bd6737f1SJens Axboe void blk_mq_sched_insert_request(struct request *rq, bool at_head, 5129e97d295SMike Snitzer bool run_queue, bool async) 513bd6737f1SJens Axboe { 514bd6737f1SJens Axboe struct request_queue *q = rq->q; 515bd6737f1SJens Axboe struct elevator_queue *e = q->elevator; 516bd6737f1SJens Axboe struct blk_mq_ctx *ctx = rq->mq_ctx; 517ea4f995eSJens Axboe struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 518bd6737f1SJens Axboe 519a6a252e6SMing Lei /* flush rq in flush machinery need to be dispatched directly */ 520a6a252e6SMing Lei if (!(rq->rq_flags & RQF_FLUSH_SEQ) && op_is_flush(rq->cmd_flags)) { 521923218f6SMing Lei blk_insert_flush(rq); 522923218f6SMing Lei goto run; 523bd6737f1SJens Axboe } 524bd6737f1SJens Axboe 525923218f6SMing Lei WARN_ON(e && (rq->tag != -1)); 526923218f6SMing Lei 52701e99aecSMing Lei if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) { 528cc3200eaSMing Lei /* 529cc3200eaSMing Lei * Firstly normal IO request is inserted to scheduler queue or 530cc3200eaSMing Lei * sw queue, meantime we add flush request to dispatch queue( 531cc3200eaSMing Lei * hctx->dispatch) directly and there is at most one in-flight 532cc3200eaSMing Lei * flush request for each hw queue, so it doesn't matter to add 533cc3200eaSMing Lei * flush request to tail or front of the dispatch queue. 534cc3200eaSMing Lei * 535cc3200eaSMing Lei * Secondly in case of NCQ, flush request belongs to non-NCQ 536cc3200eaSMing Lei * command, and queueing it will fail when there is any 537cc3200eaSMing Lei * in-flight normal IO request(NCQ command). When adding flush 538cc3200eaSMing Lei * rq to the front of hctx->dispatch, it is easier to introduce 539cc3200eaSMing Lei * extra time to flush rq's latency because of S_SCHED_RESTART 540cc3200eaSMing Lei * compared with adding to the tail of dispatch queue, then 541cc3200eaSMing Lei * chance of flush merge is increased, and less flush requests 542cc3200eaSMing Lei * will be issued to controller. It is observed that ~10% time 543cc3200eaSMing Lei * is saved in blktests block/004 on disk attached to AHCI/NCQ 544cc3200eaSMing Lei * drive when adding flush rq to the front of hctx->dispatch. 545cc3200eaSMing Lei * 546cc3200eaSMing Lei * Simply queue flush rq to the front of hctx->dispatch so that 547cc3200eaSMing Lei * intensive flush workloads can benefit in case of NCQ HW. 548cc3200eaSMing Lei */ 549cc3200eaSMing Lei at_head = (rq->rq_flags & RQF_FLUSH_SEQ) ? true : at_head; 55001e99aecSMing Lei blk_mq_request_bypass_insert(rq, at_head, false); 5510cacba6cSOmar Sandoval goto run; 55201e99aecSMing Lei } 5530cacba6cSOmar Sandoval 554f9cd4bfeSJens Axboe if (e && e->type->ops.insert_requests) { 555bd6737f1SJens Axboe LIST_HEAD(list); 556bd6737f1SJens Axboe 557bd6737f1SJens Axboe list_add(&rq->queuelist, &list); 558f9cd4bfeSJens Axboe e->type->ops.insert_requests(hctx, &list, at_head); 559bd6737f1SJens Axboe } else { 560bd6737f1SJens Axboe spin_lock(&ctx->lock); 561bd6737f1SJens Axboe __blk_mq_insert_request(hctx, rq, at_head); 562bd6737f1SJens Axboe spin_unlock(&ctx->lock); 563bd6737f1SJens Axboe } 564bd6737f1SJens Axboe 5650cacba6cSOmar Sandoval run: 566bd6737f1SJens Axboe if (run_queue) 567bd6737f1SJens Axboe blk_mq_run_hw_queue(hctx, async); 568bd6737f1SJens Axboe } 569bd6737f1SJens Axboe 57067cae4c9SJens Axboe void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx, 571bd6737f1SJens Axboe struct blk_mq_ctx *ctx, 572bd6737f1SJens Axboe struct list_head *list, bool run_queue_async) 573bd6737f1SJens Axboe { 574f9afca4dSJens Axboe struct elevator_queue *e; 575e87eb301SMing Lei struct request_queue *q = hctx->queue; 576e87eb301SMing Lei 577e87eb301SMing Lei /* 578e87eb301SMing Lei * blk_mq_sched_insert_requests() is called from flush plug 579e87eb301SMing Lei * context only, and hold one usage counter to prevent queue 580e87eb301SMing Lei * from being released. 581e87eb301SMing Lei */ 582e87eb301SMing Lei percpu_ref_get(&q->q_usage_counter); 583f9afca4dSJens Axboe 584f9afca4dSJens Axboe e = hctx->queue->elevator; 585f9cd4bfeSJens Axboe if (e && e->type->ops.insert_requests) 586f9cd4bfeSJens Axboe e->type->ops.insert_requests(hctx, list, false); 5876ce3dd6eSMing Lei else { 5886ce3dd6eSMing Lei /* 5896ce3dd6eSMing Lei * try to issue requests directly if the hw queue isn't 5906ce3dd6eSMing Lei * busy in case of 'none' scheduler, and this way may save 5916ce3dd6eSMing Lei * us one extra enqueue & dequeue to sw queue. 5926ce3dd6eSMing Lei */ 593fd9c40f6SBart Van Assche if (!hctx->dispatch_busy && !e && !run_queue_async) { 5946ce3dd6eSMing Lei blk_mq_try_issue_list_directly(hctx, list); 595fd9c40f6SBart Van Assche if (list_empty(list)) 596e87eb301SMing Lei goto out; 597fd9c40f6SBart Van Assche } 598bd6737f1SJens Axboe blk_mq_insert_requests(hctx, ctx, list); 5996ce3dd6eSMing Lei } 600bd6737f1SJens Axboe 601bd6737f1SJens Axboe blk_mq_run_hw_queue(hctx, run_queue_async); 602e87eb301SMing Lei out: 603e87eb301SMing Lei percpu_ref_put(&q->q_usage_counter); 604bd6737f1SJens Axboe } 605bd6737f1SJens Axboe 606bd166ef1SJens Axboe static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set, 607bd166ef1SJens Axboe struct blk_mq_hw_ctx *hctx, 608bd166ef1SJens Axboe unsigned int hctx_idx) 609bd166ef1SJens Axboe { 610bd166ef1SJens Axboe if (hctx->sched_tags) { 611bd166ef1SJens Axboe blk_mq_free_rqs(set, hctx->sched_tags, hctx_idx); 612bd166ef1SJens Axboe blk_mq_free_rq_map(hctx->sched_tags); 613bd166ef1SJens Axboe hctx->sched_tags = NULL; 614bd166ef1SJens Axboe } 615bd166ef1SJens Axboe } 616bd166ef1SJens Axboe 6176917ff0bSOmar Sandoval static int blk_mq_sched_alloc_tags(struct request_queue *q, 6186917ff0bSOmar Sandoval struct blk_mq_hw_ctx *hctx, 6196917ff0bSOmar Sandoval unsigned int hctx_idx) 620bd166ef1SJens Axboe { 621bd166ef1SJens Axboe struct blk_mq_tag_set *set = q->tag_set; 6226917ff0bSOmar Sandoval int ret; 623bd166ef1SJens Axboe 6246917ff0bSOmar Sandoval hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests, 6256917ff0bSOmar Sandoval set->reserved_tags); 626bd166ef1SJens Axboe if (!hctx->sched_tags) 6276917ff0bSOmar Sandoval return -ENOMEM; 6286917ff0bSOmar Sandoval 6296917ff0bSOmar Sandoval ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests); 6306917ff0bSOmar Sandoval if (ret) 6316917ff0bSOmar Sandoval blk_mq_sched_free_tags(set, hctx, hctx_idx); 632bd166ef1SJens Axboe 633bd166ef1SJens Axboe return ret; 634bd166ef1SJens Axboe } 635bd166ef1SJens Axboe 636c3e22192SMing Lei /* called in queue's release handler, tagset has gone away */ 63754d5329dSOmar Sandoval static void blk_mq_sched_tags_teardown(struct request_queue *q) 638bd166ef1SJens Axboe { 639bd166ef1SJens Axboe struct blk_mq_hw_ctx *hctx; 640bd166ef1SJens Axboe int i; 641bd166ef1SJens Axboe 642c3e22192SMing Lei queue_for_each_hw_ctx(q, hctx, i) { 643c3e22192SMing Lei if (hctx->sched_tags) { 644c3e22192SMing Lei blk_mq_free_rq_map(hctx->sched_tags); 645c3e22192SMing Lei hctx->sched_tags = NULL; 646c3e22192SMing Lei } 647c3e22192SMing Lei } 648bd166ef1SJens Axboe } 649d3484991SJens Axboe 6506917ff0bSOmar Sandoval int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) 6516917ff0bSOmar Sandoval { 6526917ff0bSOmar Sandoval struct blk_mq_hw_ctx *hctx; 653ee056f98SOmar Sandoval struct elevator_queue *eq; 6546917ff0bSOmar Sandoval unsigned int i; 6556917ff0bSOmar Sandoval int ret; 6566917ff0bSOmar Sandoval 6576917ff0bSOmar Sandoval if (!e) { 6586917ff0bSOmar Sandoval q->elevator = NULL; 65932a50fabSMing Lei q->nr_requests = q->tag_set->queue_depth; 6606917ff0bSOmar Sandoval return 0; 6616917ff0bSOmar Sandoval } 6626917ff0bSOmar Sandoval 6636917ff0bSOmar Sandoval /* 66432825c45SMing Lei * Default to double of smaller one between hw queue_depth and 128, 66532825c45SMing Lei * since we don't split into sync/async like the old code did. 66632825c45SMing Lei * Additionally, this is a per-hw queue depth. 6676917ff0bSOmar Sandoval */ 66832825c45SMing Lei q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth, 66932825c45SMing Lei BLKDEV_MAX_RQ); 6706917ff0bSOmar Sandoval 6716917ff0bSOmar Sandoval queue_for_each_hw_ctx(q, hctx, i) { 6726917ff0bSOmar Sandoval ret = blk_mq_sched_alloc_tags(q, hctx, i); 6736917ff0bSOmar Sandoval if (ret) 6746917ff0bSOmar Sandoval goto err; 6756917ff0bSOmar Sandoval } 6766917ff0bSOmar Sandoval 677f9cd4bfeSJens Axboe ret = e->ops.init_sched(q, e); 6786917ff0bSOmar Sandoval if (ret) 6796917ff0bSOmar Sandoval goto err; 6806917ff0bSOmar Sandoval 681d332ce09SOmar Sandoval blk_mq_debugfs_register_sched(q); 682d332ce09SOmar Sandoval 683ee056f98SOmar Sandoval queue_for_each_hw_ctx(q, hctx, i) { 684f9cd4bfeSJens Axboe if (e->ops.init_hctx) { 685f9cd4bfeSJens Axboe ret = e->ops.init_hctx(hctx, i); 686ee056f98SOmar Sandoval if (ret) { 687ee056f98SOmar Sandoval eq = q->elevator; 688c3e22192SMing Lei blk_mq_sched_free_requests(q); 689ee056f98SOmar Sandoval blk_mq_exit_sched(q, eq); 690ee056f98SOmar Sandoval kobject_put(&eq->kobj); 691ee056f98SOmar Sandoval return ret; 692ee056f98SOmar Sandoval } 693ee056f98SOmar Sandoval } 694d332ce09SOmar Sandoval blk_mq_debugfs_register_sched_hctx(q, hctx); 695ee056f98SOmar Sandoval } 696ee056f98SOmar Sandoval 6976917ff0bSOmar Sandoval return 0; 6986917ff0bSOmar Sandoval 6996917ff0bSOmar Sandoval err: 700c3e22192SMing Lei blk_mq_sched_free_requests(q); 70154d5329dSOmar Sandoval blk_mq_sched_tags_teardown(q); 70254d5329dSOmar Sandoval q->elevator = NULL; 7036917ff0bSOmar Sandoval return ret; 7046917ff0bSOmar Sandoval } 7056917ff0bSOmar Sandoval 706c3e22192SMing Lei /* 707c3e22192SMing Lei * called in either blk_queue_cleanup or elevator_switch, tagset 708c3e22192SMing Lei * is required for freeing requests 709c3e22192SMing Lei */ 710c3e22192SMing Lei void blk_mq_sched_free_requests(struct request_queue *q) 711c3e22192SMing Lei { 712c3e22192SMing Lei struct blk_mq_hw_ctx *hctx; 713c3e22192SMing Lei int i; 714c3e22192SMing Lei 715c3e22192SMing Lei queue_for_each_hw_ctx(q, hctx, i) { 716c3e22192SMing Lei if (hctx->sched_tags) 717c3e22192SMing Lei blk_mq_free_rqs(q->tag_set, hctx->sched_tags, i); 718c3e22192SMing Lei } 719c3e22192SMing Lei } 720c3e22192SMing Lei 72154d5329dSOmar Sandoval void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e) 72254d5329dSOmar Sandoval { 723ee056f98SOmar Sandoval struct blk_mq_hw_ctx *hctx; 724ee056f98SOmar Sandoval unsigned int i; 725ee056f98SOmar Sandoval 726ee056f98SOmar Sandoval queue_for_each_hw_ctx(q, hctx, i) { 727d332ce09SOmar Sandoval blk_mq_debugfs_unregister_sched_hctx(hctx); 728f9cd4bfeSJens Axboe if (e->type->ops.exit_hctx && hctx->sched_data) { 729f9cd4bfeSJens Axboe e->type->ops.exit_hctx(hctx, i); 730ee056f98SOmar Sandoval hctx->sched_data = NULL; 731ee056f98SOmar Sandoval } 732ee056f98SOmar Sandoval } 733d332ce09SOmar Sandoval blk_mq_debugfs_unregister_sched(q); 734f9cd4bfeSJens Axboe if (e->type->ops.exit_sched) 735f9cd4bfeSJens Axboe e->type->ops.exit_sched(e); 73654d5329dSOmar Sandoval blk_mq_sched_tags_teardown(q); 73754d5329dSOmar Sandoval q->elevator = NULL; 73854d5329dSOmar Sandoval } 739