13dcf60bcSChristoph Hellwig // SPDX-License-Identifier: GPL-2.0 2bd166ef1SJens Axboe /* 3bd166ef1SJens Axboe * blk-mq scheduling framework 4bd166ef1SJens Axboe * 5bd166ef1SJens Axboe * Copyright (C) 2016 Jens Axboe 6bd166ef1SJens Axboe */ 7bd166ef1SJens Axboe #include <linux/kernel.h> 8bd166ef1SJens Axboe #include <linux/module.h> 9bd166ef1SJens Axboe #include <linux/blk-mq.h> 10bd166ef1SJens Axboe 11bd166ef1SJens Axboe #include <trace/events/block.h> 12bd166ef1SJens Axboe 13bd166ef1SJens Axboe #include "blk.h" 14bd166ef1SJens Axboe #include "blk-mq.h" 15d332ce09SOmar Sandoval #include "blk-mq-debugfs.h" 16bd166ef1SJens Axboe #include "blk-mq-sched.h" 17bd166ef1SJens Axboe #include "blk-mq-tag.h" 18bd166ef1SJens Axboe #include "blk-wbt.h" 19bd166ef1SJens Axboe 20bd166ef1SJens Axboe void blk_mq_sched_free_hctx_data(struct request_queue *q, 21bd166ef1SJens Axboe void (*exit)(struct blk_mq_hw_ctx *)) 22bd166ef1SJens Axboe { 23bd166ef1SJens Axboe struct blk_mq_hw_ctx *hctx; 24bd166ef1SJens Axboe int i; 25bd166ef1SJens Axboe 26bd166ef1SJens Axboe queue_for_each_hw_ctx(q, hctx, i) { 27bd166ef1SJens Axboe if (exit && hctx->sched_data) 28bd166ef1SJens Axboe exit(hctx); 29bd166ef1SJens Axboe kfree(hctx->sched_data); 30bd166ef1SJens Axboe hctx->sched_data = NULL; 31bd166ef1SJens Axboe } 32bd166ef1SJens Axboe } 33bd166ef1SJens Axboe EXPORT_SYMBOL_GPL(blk_mq_sched_free_hctx_data); 34bd166ef1SJens Axboe 35e2b3fa5aSDamien Le Moal void blk_mq_sched_assign_ioc(struct request *rq) 36bd166ef1SJens Axboe { 3744e8c2bfSChristoph Hellwig struct request_queue *q = rq->q; 380c62bff1SJens Axboe struct io_context *ioc; 39bd166ef1SJens Axboe struct io_cq *icq; 40bd166ef1SJens Axboe 410c62bff1SJens Axboe /* 420c62bff1SJens Axboe * May not have an IO context if it's a passthrough request 430c62bff1SJens Axboe */ 440c62bff1SJens Axboe ioc = current->io_context; 450c62bff1SJens Axboe if (!ioc) 460c62bff1SJens Axboe return; 470c62bff1SJens Axboe 480d945c1fSChristoph Hellwig spin_lock_irq(&q->queue_lock); 49bd166ef1SJens Axboe icq = ioc_lookup_icq(ioc, q); 500d945c1fSChristoph Hellwig spin_unlock_irq(&q->queue_lock); 51bd166ef1SJens Axboe 52bd166ef1SJens Axboe if (!icq) { 53bd166ef1SJens Axboe icq = ioc_create_icq(ioc, q, GFP_ATOMIC); 54bd166ef1SJens Axboe if (!icq) 55bd166ef1SJens Axboe return; 56bd166ef1SJens Axboe } 57ea511e3cSChristoph Hellwig get_io_context(icq->ioc); 5844e8c2bfSChristoph Hellwig rq->elv.icq = icq; 59bd166ef1SJens Axboe } 60bd166ef1SJens Axboe 618e8320c9SJens Axboe /* 628e8320c9SJens Axboe * Mark a hardware queue as needing a restart. For shared queues, maintain 638e8320c9SJens Axboe * a count of how many hardware queues are marked for restart. 648e8320c9SJens Axboe */ 657211aef8SDamien Le Moal void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) 668e8320c9SJens Axboe { 678e8320c9SJens Axboe if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) 688e8320c9SJens Axboe return; 698e8320c9SJens Axboe 708e8320c9SJens Axboe set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); 718e8320c9SJens Axboe } 727211aef8SDamien Le Moal EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx); 738e8320c9SJens Axboe 7497889f9aSMing Lei void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) 758e8320c9SJens Axboe { 768e8320c9SJens Axboe if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) 7797889f9aSMing Lei return; 788e8320c9SJens Axboe clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); 798e8320c9SJens Axboe 8097889f9aSMing Lei blk_mq_run_hw_queue(hctx, true); 818e8320c9SJens Axboe } 828e8320c9SJens Axboe 83a0823421SDouglas Anderson #define BLK_MQ_BUDGET_DELAY 3 /* ms units */ 84a0823421SDouglas Anderson 851f460b63SMing Lei /* 861f460b63SMing Lei * Only SCSI implements .get_budget and .put_budget, and SCSI restarts 871f460b63SMing Lei * its queue by itself in its completion handler, so we don't need to 881f460b63SMing Lei * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE. 891f460b63SMing Lei */ 901f460b63SMing Lei static void blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) 91caf8eb0dSMing Lei { 92caf8eb0dSMing Lei struct request_queue *q = hctx->queue; 93caf8eb0dSMing Lei struct elevator_queue *e = q->elevator; 94caf8eb0dSMing Lei LIST_HEAD(rq_list); 95caf8eb0dSMing Lei 96caf8eb0dSMing Lei do { 97de148297SMing Lei struct request *rq; 98caf8eb0dSMing Lei 99f9cd4bfeSJens Axboe if (e->type->ops.has_work && !e->type->ops.has_work(hctx)) 100caf8eb0dSMing Lei break; 101de148297SMing Lei 10288022d72SMing Lei if (!blk_mq_get_dispatch_budget(hctx)) 1031f460b63SMing Lei break; 104de148297SMing Lei 105f9cd4bfeSJens Axboe rq = e->type->ops.dispatch_request(hctx); 106de148297SMing Lei if (!rq) { 107de148297SMing Lei blk_mq_put_dispatch_budget(hctx); 108a0823421SDouglas Anderson /* 109a0823421SDouglas Anderson * We're releasing without dispatching. Holding the 110a0823421SDouglas Anderson * budget could have blocked any "hctx"s with the 111a0823421SDouglas Anderson * same queue and if we didn't dispatch then there's 112a0823421SDouglas Anderson * no guarantee anyone will kick the queue. Kick it 113a0823421SDouglas Anderson * ourselves. 114a0823421SDouglas Anderson */ 115a0823421SDouglas Anderson blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY); 116de148297SMing Lei break; 117caf8eb0dSMing Lei } 118caf8eb0dSMing Lei 119de148297SMing Lei /* 120de148297SMing Lei * Now this rq owns the budget which has to be released 121de148297SMing Lei * if this rq won't be queued to driver via .queue_rq() 122de148297SMing Lei * in blk_mq_dispatch_rq_list(). 123de148297SMing Lei */ 124de148297SMing Lei list_add(&rq->queuelist, &rq_list); 125de148297SMing Lei } while (blk_mq_dispatch_rq_list(q, &rq_list, true)); 126de148297SMing Lei } 127de148297SMing Lei 128b347689fSMing Lei static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx, 129b347689fSMing Lei struct blk_mq_ctx *ctx) 130b347689fSMing Lei { 131f31967f0SJens Axboe unsigned short idx = ctx->index_hw[hctx->type]; 132b347689fSMing Lei 133b347689fSMing Lei if (++idx == hctx->nr_ctx) 134b347689fSMing Lei idx = 0; 135b347689fSMing Lei 136b347689fSMing Lei return hctx->ctxs[idx]; 137b347689fSMing Lei } 138b347689fSMing Lei 1391f460b63SMing Lei /* 1401f460b63SMing Lei * Only SCSI implements .get_budget and .put_budget, and SCSI restarts 1411f460b63SMing Lei * its queue by itself in its completion handler, so we don't need to 1421f460b63SMing Lei * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE. 1431f460b63SMing Lei */ 1441f460b63SMing Lei static void blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx) 145b347689fSMing Lei { 146b347689fSMing Lei struct request_queue *q = hctx->queue; 147b347689fSMing Lei LIST_HEAD(rq_list); 148b347689fSMing Lei struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from); 149b347689fSMing Lei 150b347689fSMing Lei do { 151b347689fSMing Lei struct request *rq; 152b347689fSMing Lei 153b347689fSMing Lei if (!sbitmap_any_bit_set(&hctx->ctx_map)) 154b347689fSMing Lei break; 155b347689fSMing Lei 15688022d72SMing Lei if (!blk_mq_get_dispatch_budget(hctx)) 1571f460b63SMing Lei break; 158b347689fSMing Lei 159b347689fSMing Lei rq = blk_mq_dequeue_from_ctx(hctx, ctx); 160b347689fSMing Lei if (!rq) { 161b347689fSMing Lei blk_mq_put_dispatch_budget(hctx); 162a0823421SDouglas Anderson /* 163a0823421SDouglas Anderson * We're releasing without dispatching. Holding the 164a0823421SDouglas Anderson * budget could have blocked any "hctx"s with the 165a0823421SDouglas Anderson * same queue and if we didn't dispatch then there's 166a0823421SDouglas Anderson * no guarantee anyone will kick the queue. Kick it 167a0823421SDouglas Anderson * ourselves. 168a0823421SDouglas Anderson */ 169a0823421SDouglas Anderson blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY); 170b347689fSMing Lei break; 171b347689fSMing Lei } 172b347689fSMing Lei 173b347689fSMing Lei /* 174b347689fSMing Lei * Now this rq owns the budget which has to be released 175b347689fSMing Lei * if this rq won't be queued to driver via .queue_rq() 176b347689fSMing Lei * in blk_mq_dispatch_rq_list(). 177b347689fSMing Lei */ 178b347689fSMing Lei list_add(&rq->queuelist, &rq_list); 179b347689fSMing Lei 180b347689fSMing Lei /* round robin for fair dispatch */ 181b347689fSMing Lei ctx = blk_mq_next_ctx(hctx, rq->mq_ctx); 182b347689fSMing Lei 183b347689fSMing Lei } while (blk_mq_dispatch_rq_list(q, &rq_list, true)); 184b347689fSMing Lei 185b347689fSMing Lei WRITE_ONCE(hctx->dispatch_from, ctx); 186b347689fSMing Lei } 187b347689fSMing Lei 1881f460b63SMing Lei void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) 189bd166ef1SJens Axboe { 19081380ca1SOmar Sandoval struct request_queue *q = hctx->queue; 19181380ca1SOmar Sandoval struct elevator_queue *e = q->elevator; 192f9cd4bfeSJens Axboe const bool has_sched_dispatch = e && e->type->ops.dispatch_request; 193bd166ef1SJens Axboe LIST_HEAD(rq_list); 194bd166ef1SJens Axboe 195f4560ffeSMing Lei /* RCU or SRCU read lock is needed before checking quiesced flag */ 196f4560ffeSMing Lei if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) 1971f460b63SMing Lei return; 198bd166ef1SJens Axboe 199bd166ef1SJens Axboe hctx->run++; 200bd166ef1SJens Axboe 201bd166ef1SJens Axboe /* 202bd166ef1SJens Axboe * If we have previous entries on our dispatch list, grab them first for 203bd166ef1SJens Axboe * more fair dispatch. 204bd166ef1SJens Axboe */ 205bd166ef1SJens Axboe if (!list_empty_careful(&hctx->dispatch)) { 206bd166ef1SJens Axboe spin_lock(&hctx->lock); 207bd166ef1SJens Axboe if (!list_empty(&hctx->dispatch)) 208bd166ef1SJens Axboe list_splice_init(&hctx->dispatch, &rq_list); 209bd166ef1SJens Axboe spin_unlock(&hctx->lock); 210bd166ef1SJens Axboe } 211bd166ef1SJens Axboe 212bd166ef1SJens Axboe /* 213bd166ef1SJens Axboe * Only ask the scheduler for requests, if we didn't have residual 214bd166ef1SJens Axboe * requests from the dispatch list. This is to avoid the case where 215bd166ef1SJens Axboe * we only ever dispatch a fraction of the requests available because 216bd166ef1SJens Axboe * of low device queue depth. Once we pull requests out of the IO 217bd166ef1SJens Axboe * scheduler, we can no longer merge or sort them. So it's best to 218bd166ef1SJens Axboe * leave them there for as long as we can. Mark the hw queue as 219bd166ef1SJens Axboe * needing a restart in that case. 220caf8eb0dSMing Lei * 2215e3d02bbSMing Lei * We want to dispatch from the scheduler if there was nothing 2225e3d02bbSMing Lei * on the dispatch list or we were able to dispatch from the 2235e3d02bbSMing Lei * dispatch list. 22464765a75SJens Axboe */ 225caf8eb0dSMing Lei if (!list_empty(&rq_list)) { 226caf8eb0dSMing Lei blk_mq_sched_mark_restart_hctx(hctx); 227b347689fSMing Lei if (blk_mq_dispatch_rq_list(q, &rq_list, false)) { 228b347689fSMing Lei if (has_sched_dispatch) 2291f460b63SMing Lei blk_mq_do_dispatch_sched(hctx); 230b347689fSMing Lei else 2311f460b63SMing Lei blk_mq_do_dispatch_ctx(hctx); 232b347689fSMing Lei } 233caf8eb0dSMing Lei } else if (has_sched_dispatch) { 2341f460b63SMing Lei blk_mq_do_dispatch_sched(hctx); 2356e768717SMing Lei } else if (hctx->dispatch_busy) { 2366e768717SMing Lei /* dequeue request one by one from sw queue if queue is busy */ 2371f460b63SMing Lei blk_mq_do_dispatch_ctx(hctx); 238caf8eb0dSMing Lei } else { 239caf8eb0dSMing Lei blk_mq_flush_busy_ctxs(hctx, &rq_list); 240de148297SMing Lei blk_mq_dispatch_rq_list(q, &rq_list, false); 241c13660a0SJens Axboe } 242bd166ef1SJens Axboe } 243bd166ef1SJens Axboe 244e4d750c9SJens Axboe bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, 24514ccb66bSChristoph Hellwig unsigned int nr_segs, struct request **merged_request) 246bd166ef1SJens Axboe { 247bd166ef1SJens Axboe struct request *rq; 248bd166ef1SJens Axboe 24934fe7c05SChristoph Hellwig switch (elv_merge(q, &rq, bio)) { 25034fe7c05SChristoph Hellwig case ELEVATOR_BACK_MERGE: 251bd166ef1SJens Axboe if (!blk_mq_sched_allow_merge(q, rq, bio)) 252bd166ef1SJens Axboe return false; 25314ccb66bSChristoph Hellwig if (!bio_attempt_back_merge(rq, bio, nr_segs)) 25434fe7c05SChristoph Hellwig return false; 255e4d750c9SJens Axboe *merged_request = attempt_back_merge(q, rq); 256e4d750c9SJens Axboe if (!*merged_request) 25734fe7c05SChristoph Hellwig elv_merged_request(q, rq, ELEVATOR_BACK_MERGE); 258bd166ef1SJens Axboe return true; 25934fe7c05SChristoph Hellwig case ELEVATOR_FRONT_MERGE: 260bd166ef1SJens Axboe if (!blk_mq_sched_allow_merge(q, rq, bio)) 261bd166ef1SJens Axboe return false; 26214ccb66bSChristoph Hellwig if (!bio_attempt_front_merge(rq, bio, nr_segs)) 26334fe7c05SChristoph Hellwig return false; 264e4d750c9SJens Axboe *merged_request = attempt_front_merge(q, rq); 265e4d750c9SJens Axboe if (!*merged_request) 26634fe7c05SChristoph Hellwig elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE); 267bd166ef1SJens Axboe return true; 268bea99a50SKeith Busch case ELEVATOR_DISCARD_MERGE: 269bea99a50SKeith Busch return bio_attempt_discard_merge(q, rq, bio); 27034fe7c05SChristoph Hellwig default: 271bd166ef1SJens Axboe return false; 272bd166ef1SJens Axboe } 27334fe7c05SChristoph Hellwig } 274bd166ef1SJens Axboe EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge); 275bd166ef1SJens Axboe 2769bddeb2aSMing Lei /* 2779c558734SJens Axboe * Iterate list of requests and see if we can merge this bio with any 2789c558734SJens Axboe * of them. 2799bddeb2aSMing Lei */ 2809c558734SJens Axboe bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list, 28114ccb66bSChristoph Hellwig struct bio *bio, unsigned int nr_segs) 2829bddeb2aSMing Lei { 2839bddeb2aSMing Lei struct request *rq; 2849bddeb2aSMing Lei int checked = 8; 2859bddeb2aSMing Lei 2869c558734SJens Axboe list_for_each_entry_reverse(rq, list, queuelist) { 2879bddeb2aSMing Lei bool merged = false; 2889bddeb2aSMing Lei 2899bddeb2aSMing Lei if (!checked--) 2909bddeb2aSMing Lei break; 2919bddeb2aSMing Lei 2929bddeb2aSMing Lei if (!blk_rq_merge_ok(rq, bio)) 2939bddeb2aSMing Lei continue; 2949bddeb2aSMing Lei 2959bddeb2aSMing Lei switch (blk_try_merge(rq, bio)) { 2969bddeb2aSMing Lei case ELEVATOR_BACK_MERGE: 2979bddeb2aSMing Lei if (blk_mq_sched_allow_merge(q, rq, bio)) 29814ccb66bSChristoph Hellwig merged = bio_attempt_back_merge(rq, bio, 29914ccb66bSChristoph Hellwig nr_segs); 3009bddeb2aSMing Lei break; 3019bddeb2aSMing Lei case ELEVATOR_FRONT_MERGE: 3029bddeb2aSMing Lei if (blk_mq_sched_allow_merge(q, rq, bio)) 30314ccb66bSChristoph Hellwig merged = bio_attempt_front_merge(rq, bio, 30414ccb66bSChristoph Hellwig nr_segs); 3059bddeb2aSMing Lei break; 3069bddeb2aSMing Lei case ELEVATOR_DISCARD_MERGE: 3079bddeb2aSMing Lei merged = bio_attempt_discard_merge(q, rq, bio); 3089bddeb2aSMing Lei break; 3099bddeb2aSMing Lei default: 3109bddeb2aSMing Lei continue; 3119bddeb2aSMing Lei } 3129bddeb2aSMing Lei 3139bddeb2aSMing Lei return merged; 3149bddeb2aSMing Lei } 3159bddeb2aSMing Lei 3169bddeb2aSMing Lei return false; 3179bddeb2aSMing Lei } 3189c558734SJens Axboe EXPORT_SYMBOL_GPL(blk_mq_bio_list_merge); 3199c558734SJens Axboe 3209c558734SJens Axboe /* 3219c558734SJens Axboe * Reverse check our software queue for entries that we could potentially 3229c558734SJens Axboe * merge with. Currently includes a hand-wavy stop count of 8, to not spend 3239c558734SJens Axboe * too much time checking for merges. 3249c558734SJens Axboe */ 3259c558734SJens Axboe static bool blk_mq_attempt_merge(struct request_queue *q, 326c16d6b5aSMing Lei struct blk_mq_hw_ctx *hctx, 32714ccb66bSChristoph Hellwig struct blk_mq_ctx *ctx, struct bio *bio, 32814ccb66bSChristoph Hellwig unsigned int nr_segs) 3299c558734SJens Axboe { 330c16d6b5aSMing Lei enum hctx_type type = hctx->type; 331c16d6b5aSMing Lei 3329c558734SJens Axboe lockdep_assert_held(&ctx->lock); 3339c558734SJens Axboe 33414ccb66bSChristoph Hellwig if (blk_mq_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) { 3359c558734SJens Axboe ctx->rq_merged++; 3369c558734SJens Axboe return true; 3379c558734SJens Axboe } 3389c558734SJens Axboe 3399c558734SJens Axboe return false; 3409c558734SJens Axboe } 3419bddeb2aSMing Lei 34214ccb66bSChristoph Hellwig bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, 34314ccb66bSChristoph Hellwig unsigned int nr_segs) 344bd166ef1SJens Axboe { 345bd166ef1SJens Axboe struct elevator_queue *e = q->elevator; 346bd166ef1SJens Axboe struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); 3478ccdf4a3SJianchao Wang struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx); 3489bddeb2aSMing Lei bool ret = false; 349c16d6b5aSMing Lei enum hctx_type type; 350bd166ef1SJens Axboe 351c05f4220SBart Van Assche if (e && e->type->ops.bio_merge) 35214ccb66bSChristoph Hellwig return e->type->ops.bio_merge(hctx, bio, nr_segs); 353bd166ef1SJens Axboe 354c16d6b5aSMing Lei type = hctx->type; 355b04f50abSMing Lei if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) && 356c16d6b5aSMing Lei !list_empty_careful(&ctx->rq_lists[type])) { 3579bddeb2aSMing Lei /* default per sw-queue merge */ 3589bddeb2aSMing Lei spin_lock(&ctx->lock); 35914ccb66bSChristoph Hellwig ret = blk_mq_attempt_merge(q, hctx, ctx, bio, nr_segs); 3609bddeb2aSMing Lei spin_unlock(&ctx->lock); 3619bddeb2aSMing Lei } 3629bddeb2aSMing Lei 3639bddeb2aSMing Lei return ret; 364bd166ef1SJens Axboe } 365bd166ef1SJens Axboe 366bd166ef1SJens Axboe bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq) 367bd166ef1SJens Axboe { 368bd166ef1SJens Axboe return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq); 369bd166ef1SJens Axboe } 370bd166ef1SJens Axboe EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge); 371bd166ef1SJens Axboe 372bd166ef1SJens Axboe void blk_mq_sched_request_inserted(struct request *rq) 373bd166ef1SJens Axboe { 374bd166ef1SJens Axboe trace_block_rq_insert(rq->q, rq); 375bd166ef1SJens Axboe } 376bd166ef1SJens Axboe EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted); 377bd166ef1SJens Axboe 3780cacba6cSOmar Sandoval static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, 379a6a252e6SMing Lei bool has_sched, 3800cacba6cSOmar Sandoval struct request *rq) 381bd166ef1SJens Axboe { 38201e99aecSMing Lei /* 38301e99aecSMing Lei * dispatch flush and passthrough rq directly 38401e99aecSMing Lei * 38501e99aecSMing Lei * passthrough request has to be added to hctx->dispatch directly. 38601e99aecSMing Lei * For some reason, device may be in one situation which can't 38701e99aecSMing Lei * handle FS request, so STS_RESOURCE is always returned and the 38801e99aecSMing Lei * FS request will be added to hctx->dispatch. However passthrough 38901e99aecSMing Lei * request may be required at that time for fixing the problem. If 39001e99aecSMing Lei * passthrough request is added to scheduler queue, there isn't any 39101e99aecSMing Lei * chance to dispatch it given we prioritize requests in hctx->dispatch. 39201e99aecSMing Lei */ 39301e99aecSMing Lei if ((rq->rq_flags & RQF_FLUSH_SEQ) || blk_rq_is_passthrough(rq)) 394bd166ef1SJens Axboe return true; 395bd166ef1SJens Axboe 396923218f6SMing Lei if (has_sched) 397a6a252e6SMing Lei rq->rq_flags |= RQF_SORTED; 398a6a252e6SMing Lei 399a6a252e6SMing Lei return false; 400a6a252e6SMing Lei } 401a6a252e6SMing Lei 402bd6737f1SJens Axboe void blk_mq_sched_insert_request(struct request *rq, bool at_head, 4039e97d295SMike Snitzer bool run_queue, bool async) 404bd6737f1SJens Axboe { 405bd6737f1SJens Axboe struct request_queue *q = rq->q; 406bd6737f1SJens Axboe struct elevator_queue *e = q->elevator; 407bd6737f1SJens Axboe struct blk_mq_ctx *ctx = rq->mq_ctx; 408ea4f995eSJens Axboe struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 409bd6737f1SJens Axboe 410a6a252e6SMing Lei /* flush rq in flush machinery need to be dispatched directly */ 411a6a252e6SMing Lei if (!(rq->rq_flags & RQF_FLUSH_SEQ) && op_is_flush(rq->cmd_flags)) { 412923218f6SMing Lei blk_insert_flush(rq); 413923218f6SMing Lei goto run; 414bd6737f1SJens Axboe } 415bd6737f1SJens Axboe 416923218f6SMing Lei WARN_ON(e && (rq->tag != -1)); 417923218f6SMing Lei 41801e99aecSMing Lei if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) { 419cc3200eaSMing Lei /* 420cc3200eaSMing Lei * Firstly normal IO request is inserted to scheduler queue or 421cc3200eaSMing Lei * sw queue, meantime we add flush request to dispatch queue( 422cc3200eaSMing Lei * hctx->dispatch) directly and there is at most one in-flight 423cc3200eaSMing Lei * flush request for each hw queue, so it doesn't matter to add 424cc3200eaSMing Lei * flush request to tail or front of the dispatch queue. 425cc3200eaSMing Lei * 426cc3200eaSMing Lei * Secondly in case of NCQ, flush request belongs to non-NCQ 427cc3200eaSMing Lei * command, and queueing it will fail when there is any 428cc3200eaSMing Lei * in-flight normal IO request(NCQ command). When adding flush 429cc3200eaSMing Lei * rq to the front of hctx->dispatch, it is easier to introduce 430cc3200eaSMing Lei * extra time to flush rq's latency because of S_SCHED_RESTART 431cc3200eaSMing Lei * compared with adding to the tail of dispatch queue, then 432cc3200eaSMing Lei * chance of flush merge is increased, and less flush requests 433cc3200eaSMing Lei * will be issued to controller. It is observed that ~10% time 434cc3200eaSMing Lei * is saved in blktests block/004 on disk attached to AHCI/NCQ 435cc3200eaSMing Lei * drive when adding flush rq to the front of hctx->dispatch. 436cc3200eaSMing Lei * 437cc3200eaSMing Lei * Simply queue flush rq to the front of hctx->dispatch so that 438cc3200eaSMing Lei * intensive flush workloads can benefit in case of NCQ HW. 439cc3200eaSMing Lei */ 440cc3200eaSMing Lei at_head = (rq->rq_flags & RQF_FLUSH_SEQ) ? true : at_head; 44101e99aecSMing Lei blk_mq_request_bypass_insert(rq, at_head, false); 4420cacba6cSOmar Sandoval goto run; 44301e99aecSMing Lei } 4440cacba6cSOmar Sandoval 445f9cd4bfeSJens Axboe if (e && e->type->ops.insert_requests) { 446bd6737f1SJens Axboe LIST_HEAD(list); 447bd6737f1SJens Axboe 448bd6737f1SJens Axboe list_add(&rq->queuelist, &list); 449f9cd4bfeSJens Axboe e->type->ops.insert_requests(hctx, &list, at_head); 450bd6737f1SJens Axboe } else { 451bd6737f1SJens Axboe spin_lock(&ctx->lock); 452bd6737f1SJens Axboe __blk_mq_insert_request(hctx, rq, at_head); 453bd6737f1SJens Axboe spin_unlock(&ctx->lock); 454bd6737f1SJens Axboe } 455bd6737f1SJens Axboe 4560cacba6cSOmar Sandoval run: 457bd6737f1SJens Axboe if (run_queue) 458bd6737f1SJens Axboe blk_mq_run_hw_queue(hctx, async); 459bd6737f1SJens Axboe } 460bd6737f1SJens Axboe 46167cae4c9SJens Axboe void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx, 462bd6737f1SJens Axboe struct blk_mq_ctx *ctx, 463bd6737f1SJens Axboe struct list_head *list, bool run_queue_async) 464bd6737f1SJens Axboe { 465f9afca4dSJens Axboe struct elevator_queue *e; 466e87eb301SMing Lei struct request_queue *q = hctx->queue; 467e87eb301SMing Lei 468e87eb301SMing Lei /* 469e87eb301SMing Lei * blk_mq_sched_insert_requests() is called from flush plug 470e87eb301SMing Lei * context only, and hold one usage counter to prevent queue 471e87eb301SMing Lei * from being released. 472e87eb301SMing Lei */ 473e87eb301SMing Lei percpu_ref_get(&q->q_usage_counter); 474f9afca4dSJens Axboe 475f9afca4dSJens Axboe e = hctx->queue->elevator; 476f9cd4bfeSJens Axboe if (e && e->type->ops.insert_requests) 477f9cd4bfeSJens Axboe e->type->ops.insert_requests(hctx, list, false); 4786ce3dd6eSMing Lei else { 4796ce3dd6eSMing Lei /* 4806ce3dd6eSMing Lei * try to issue requests directly if the hw queue isn't 4816ce3dd6eSMing Lei * busy in case of 'none' scheduler, and this way may save 4826ce3dd6eSMing Lei * us one extra enqueue & dequeue to sw queue. 4836ce3dd6eSMing Lei */ 484fd9c40f6SBart Van Assche if (!hctx->dispatch_busy && !e && !run_queue_async) { 4856ce3dd6eSMing Lei blk_mq_try_issue_list_directly(hctx, list); 486fd9c40f6SBart Van Assche if (list_empty(list)) 487e87eb301SMing Lei goto out; 488fd9c40f6SBart Van Assche } 489bd6737f1SJens Axboe blk_mq_insert_requests(hctx, ctx, list); 4906ce3dd6eSMing Lei } 491bd6737f1SJens Axboe 492bd6737f1SJens Axboe blk_mq_run_hw_queue(hctx, run_queue_async); 493e87eb301SMing Lei out: 494e87eb301SMing Lei percpu_ref_put(&q->q_usage_counter); 495bd6737f1SJens Axboe } 496bd6737f1SJens Axboe 497bd166ef1SJens Axboe static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set, 498bd166ef1SJens Axboe struct blk_mq_hw_ctx *hctx, 499bd166ef1SJens Axboe unsigned int hctx_idx) 500bd166ef1SJens Axboe { 501bd166ef1SJens Axboe if (hctx->sched_tags) { 502bd166ef1SJens Axboe blk_mq_free_rqs(set, hctx->sched_tags, hctx_idx); 503bd166ef1SJens Axboe blk_mq_free_rq_map(hctx->sched_tags); 504bd166ef1SJens Axboe hctx->sched_tags = NULL; 505bd166ef1SJens Axboe } 506bd166ef1SJens Axboe } 507bd166ef1SJens Axboe 5086917ff0bSOmar Sandoval static int blk_mq_sched_alloc_tags(struct request_queue *q, 5096917ff0bSOmar Sandoval struct blk_mq_hw_ctx *hctx, 5106917ff0bSOmar Sandoval unsigned int hctx_idx) 511bd166ef1SJens Axboe { 512bd166ef1SJens Axboe struct blk_mq_tag_set *set = q->tag_set; 5136917ff0bSOmar Sandoval int ret; 514bd166ef1SJens Axboe 5156917ff0bSOmar Sandoval hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests, 5166917ff0bSOmar Sandoval set->reserved_tags); 517bd166ef1SJens Axboe if (!hctx->sched_tags) 5186917ff0bSOmar Sandoval return -ENOMEM; 5196917ff0bSOmar Sandoval 5206917ff0bSOmar Sandoval ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests); 5216917ff0bSOmar Sandoval if (ret) 5226917ff0bSOmar Sandoval blk_mq_sched_free_tags(set, hctx, hctx_idx); 523bd166ef1SJens Axboe 524bd166ef1SJens Axboe return ret; 525bd166ef1SJens Axboe } 526bd166ef1SJens Axboe 527c3e22192SMing Lei /* called in queue's release handler, tagset has gone away */ 52854d5329dSOmar Sandoval static void blk_mq_sched_tags_teardown(struct request_queue *q) 529bd166ef1SJens Axboe { 530bd166ef1SJens Axboe struct blk_mq_hw_ctx *hctx; 531bd166ef1SJens Axboe int i; 532bd166ef1SJens Axboe 533c3e22192SMing Lei queue_for_each_hw_ctx(q, hctx, i) { 534c3e22192SMing Lei if (hctx->sched_tags) { 535c3e22192SMing Lei blk_mq_free_rq_map(hctx->sched_tags); 536c3e22192SMing Lei hctx->sched_tags = NULL; 537c3e22192SMing Lei } 538c3e22192SMing Lei } 539bd166ef1SJens Axboe } 540d3484991SJens Axboe 5416917ff0bSOmar Sandoval int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) 5426917ff0bSOmar Sandoval { 5436917ff0bSOmar Sandoval struct blk_mq_hw_ctx *hctx; 544ee056f98SOmar Sandoval struct elevator_queue *eq; 5456917ff0bSOmar Sandoval unsigned int i; 5466917ff0bSOmar Sandoval int ret; 5476917ff0bSOmar Sandoval 5486917ff0bSOmar Sandoval if (!e) { 5496917ff0bSOmar Sandoval q->elevator = NULL; 55032a50fabSMing Lei q->nr_requests = q->tag_set->queue_depth; 5516917ff0bSOmar Sandoval return 0; 5526917ff0bSOmar Sandoval } 5536917ff0bSOmar Sandoval 5546917ff0bSOmar Sandoval /* 55532825c45SMing Lei * Default to double of smaller one between hw queue_depth and 128, 55632825c45SMing Lei * since we don't split into sync/async like the old code did. 55732825c45SMing Lei * Additionally, this is a per-hw queue depth. 5586917ff0bSOmar Sandoval */ 55932825c45SMing Lei q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth, 56032825c45SMing Lei BLKDEV_MAX_RQ); 5616917ff0bSOmar Sandoval 5626917ff0bSOmar Sandoval queue_for_each_hw_ctx(q, hctx, i) { 5636917ff0bSOmar Sandoval ret = blk_mq_sched_alloc_tags(q, hctx, i); 5646917ff0bSOmar Sandoval if (ret) 5656917ff0bSOmar Sandoval goto err; 5666917ff0bSOmar Sandoval } 5676917ff0bSOmar Sandoval 568f9cd4bfeSJens Axboe ret = e->ops.init_sched(q, e); 5696917ff0bSOmar Sandoval if (ret) 5706917ff0bSOmar Sandoval goto err; 5716917ff0bSOmar Sandoval 572d332ce09SOmar Sandoval blk_mq_debugfs_register_sched(q); 573d332ce09SOmar Sandoval 574ee056f98SOmar Sandoval queue_for_each_hw_ctx(q, hctx, i) { 575f9cd4bfeSJens Axboe if (e->ops.init_hctx) { 576f9cd4bfeSJens Axboe ret = e->ops.init_hctx(hctx, i); 577ee056f98SOmar Sandoval if (ret) { 578ee056f98SOmar Sandoval eq = q->elevator; 579c3e22192SMing Lei blk_mq_sched_free_requests(q); 580ee056f98SOmar Sandoval blk_mq_exit_sched(q, eq); 581ee056f98SOmar Sandoval kobject_put(&eq->kobj); 582ee056f98SOmar Sandoval return ret; 583ee056f98SOmar Sandoval } 584ee056f98SOmar Sandoval } 585d332ce09SOmar Sandoval blk_mq_debugfs_register_sched_hctx(q, hctx); 586ee056f98SOmar Sandoval } 587ee056f98SOmar Sandoval 5886917ff0bSOmar Sandoval return 0; 5896917ff0bSOmar Sandoval 5906917ff0bSOmar Sandoval err: 591c3e22192SMing Lei blk_mq_sched_free_requests(q); 59254d5329dSOmar Sandoval blk_mq_sched_tags_teardown(q); 59354d5329dSOmar Sandoval q->elevator = NULL; 5946917ff0bSOmar Sandoval return ret; 5956917ff0bSOmar Sandoval } 5966917ff0bSOmar Sandoval 597c3e22192SMing Lei /* 598c3e22192SMing Lei * called in either blk_queue_cleanup or elevator_switch, tagset 599c3e22192SMing Lei * is required for freeing requests 600c3e22192SMing Lei */ 601c3e22192SMing Lei void blk_mq_sched_free_requests(struct request_queue *q) 602c3e22192SMing Lei { 603c3e22192SMing Lei struct blk_mq_hw_ctx *hctx; 604c3e22192SMing Lei int i; 605c3e22192SMing Lei 606c3e22192SMing Lei queue_for_each_hw_ctx(q, hctx, i) { 607c3e22192SMing Lei if (hctx->sched_tags) 608c3e22192SMing Lei blk_mq_free_rqs(q->tag_set, hctx->sched_tags, i); 609c3e22192SMing Lei } 610c3e22192SMing Lei } 611c3e22192SMing Lei 61254d5329dSOmar Sandoval void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e) 61354d5329dSOmar Sandoval { 614ee056f98SOmar Sandoval struct blk_mq_hw_ctx *hctx; 615ee056f98SOmar Sandoval unsigned int i; 616ee056f98SOmar Sandoval 617ee056f98SOmar Sandoval queue_for_each_hw_ctx(q, hctx, i) { 618d332ce09SOmar Sandoval blk_mq_debugfs_unregister_sched_hctx(hctx); 619f9cd4bfeSJens Axboe if (e->type->ops.exit_hctx && hctx->sched_data) { 620f9cd4bfeSJens Axboe e->type->ops.exit_hctx(hctx, i); 621ee056f98SOmar Sandoval hctx->sched_data = NULL; 622ee056f98SOmar Sandoval } 623ee056f98SOmar Sandoval } 624d332ce09SOmar Sandoval blk_mq_debugfs_unregister_sched(q); 625f9cd4bfeSJens Axboe if (e->type->ops.exit_sched) 626f9cd4bfeSJens Axboe e->type->ops.exit_sched(e); 62754d5329dSOmar Sandoval blk_mq_sched_tags_teardown(q); 62854d5329dSOmar Sandoval q->elevator = NULL; 62954d5329dSOmar Sandoval } 630