13dcf60bcSChristoph Hellwig // SPDX-License-Identifier: GPL-2.0 2bd166ef1SJens Axboe /* 3bd166ef1SJens Axboe * blk-mq scheduling framework 4bd166ef1SJens Axboe * 5bd166ef1SJens Axboe * Copyright (C) 2016 Jens Axboe 6bd166ef1SJens Axboe */ 7bd166ef1SJens Axboe #include <linux/kernel.h> 8bd166ef1SJens Axboe #include <linux/module.h> 9bd166ef1SJens Axboe #include <linux/blk-mq.h> 10bd166ef1SJens Axboe 11bd166ef1SJens Axboe #include <trace/events/block.h> 12bd166ef1SJens Axboe 13bd166ef1SJens Axboe #include "blk.h" 14bd166ef1SJens Axboe #include "blk-mq.h" 15d332ce09SOmar Sandoval #include "blk-mq-debugfs.h" 16bd166ef1SJens Axboe #include "blk-mq-sched.h" 17bd166ef1SJens Axboe #include "blk-mq-tag.h" 18bd166ef1SJens Axboe #include "blk-wbt.h" 19bd166ef1SJens Axboe 20bd166ef1SJens Axboe void blk_mq_sched_free_hctx_data(struct request_queue *q, 21bd166ef1SJens Axboe void (*exit)(struct blk_mq_hw_ctx *)) 22bd166ef1SJens Axboe { 23bd166ef1SJens Axboe struct blk_mq_hw_ctx *hctx; 24bd166ef1SJens Axboe int i; 25bd166ef1SJens Axboe 26bd166ef1SJens Axboe queue_for_each_hw_ctx(q, hctx, i) { 27bd166ef1SJens Axboe if (exit && hctx->sched_data) 28bd166ef1SJens Axboe exit(hctx); 29bd166ef1SJens Axboe kfree(hctx->sched_data); 30bd166ef1SJens Axboe hctx->sched_data = NULL; 31bd166ef1SJens Axboe } 32bd166ef1SJens Axboe } 33bd166ef1SJens Axboe EXPORT_SYMBOL_GPL(blk_mq_sched_free_hctx_data); 34bd166ef1SJens Axboe 35e2b3fa5aSDamien Le Moal void blk_mq_sched_assign_ioc(struct request *rq) 36bd166ef1SJens Axboe { 3744e8c2bfSChristoph Hellwig struct request_queue *q = rq->q; 380c62bff1SJens Axboe struct io_context *ioc; 39bd166ef1SJens Axboe struct io_cq *icq; 40bd166ef1SJens Axboe 410c62bff1SJens Axboe /* 420c62bff1SJens Axboe * May not have an IO context if it's a passthrough request 430c62bff1SJens Axboe */ 440c62bff1SJens Axboe ioc = current->io_context; 450c62bff1SJens Axboe if (!ioc) 460c62bff1SJens Axboe return; 470c62bff1SJens Axboe 480d945c1fSChristoph Hellwig spin_lock_irq(&q->queue_lock); 49bd166ef1SJens Axboe icq = ioc_lookup_icq(ioc, q); 500d945c1fSChristoph Hellwig spin_unlock_irq(&q->queue_lock); 51bd166ef1SJens Axboe 52bd166ef1SJens Axboe if (!icq) { 53bd166ef1SJens Axboe icq = ioc_create_icq(ioc, q, GFP_ATOMIC); 54bd166ef1SJens Axboe if (!icq) 55bd166ef1SJens Axboe return; 56bd166ef1SJens Axboe } 57ea511e3cSChristoph Hellwig get_io_context(icq->ioc); 5844e8c2bfSChristoph Hellwig rq->elv.icq = icq; 59bd166ef1SJens Axboe } 60bd166ef1SJens Axboe 618e8320c9SJens Axboe /* 628e8320c9SJens Axboe * Mark a hardware queue as needing a restart. For shared queues, maintain 638e8320c9SJens Axboe * a count of how many hardware queues are marked for restart. 648e8320c9SJens Axboe */ 657211aef8SDamien Le Moal void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) 668e8320c9SJens Axboe { 678e8320c9SJens Axboe if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) 688e8320c9SJens Axboe return; 698e8320c9SJens Axboe 708e8320c9SJens Axboe set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); 718e8320c9SJens Axboe } 727211aef8SDamien Le Moal EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx); 738e8320c9SJens Axboe 7497889f9aSMing Lei void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) 758e8320c9SJens Axboe { 768e8320c9SJens Axboe if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) 7797889f9aSMing Lei return; 788e8320c9SJens Axboe clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); 798e8320c9SJens Axboe 8097889f9aSMing Lei blk_mq_run_hw_queue(hctx, true); 818e8320c9SJens Axboe } 828e8320c9SJens Axboe 831f460b63SMing Lei /* 841f460b63SMing Lei * Only SCSI implements .get_budget and .put_budget, and SCSI restarts 851f460b63SMing Lei * its queue by itself in its completion handler, so we don't need to 861f460b63SMing Lei * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE. 871f460b63SMing Lei */ 881f460b63SMing Lei static void blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) 89caf8eb0dSMing Lei { 90caf8eb0dSMing Lei struct request_queue *q = hctx->queue; 91caf8eb0dSMing Lei struct elevator_queue *e = q->elevator; 92caf8eb0dSMing Lei LIST_HEAD(rq_list); 93caf8eb0dSMing Lei 94caf8eb0dSMing Lei do { 95de148297SMing Lei struct request *rq; 96caf8eb0dSMing Lei 97f9cd4bfeSJens Axboe if (e->type->ops.has_work && !e->type->ops.has_work(hctx)) 98caf8eb0dSMing Lei break; 99de148297SMing Lei 10088022d72SMing Lei if (!blk_mq_get_dispatch_budget(hctx)) 1011f460b63SMing Lei break; 102de148297SMing Lei 103f9cd4bfeSJens Axboe rq = e->type->ops.dispatch_request(hctx); 104de148297SMing Lei if (!rq) { 105de148297SMing Lei blk_mq_put_dispatch_budget(hctx); 106de148297SMing Lei break; 107caf8eb0dSMing Lei } 108caf8eb0dSMing Lei 109de148297SMing Lei /* 110de148297SMing Lei * Now this rq owns the budget which has to be released 111de148297SMing Lei * if this rq won't be queued to driver via .queue_rq() 112de148297SMing Lei * in blk_mq_dispatch_rq_list(). 113de148297SMing Lei */ 114de148297SMing Lei list_add(&rq->queuelist, &rq_list); 115de148297SMing Lei } while (blk_mq_dispatch_rq_list(q, &rq_list, true)); 116de148297SMing Lei } 117de148297SMing Lei 118b347689fSMing Lei static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx, 119b347689fSMing Lei struct blk_mq_ctx *ctx) 120b347689fSMing Lei { 121f31967f0SJens Axboe unsigned short idx = ctx->index_hw[hctx->type]; 122b347689fSMing Lei 123b347689fSMing Lei if (++idx == hctx->nr_ctx) 124b347689fSMing Lei idx = 0; 125b347689fSMing Lei 126b347689fSMing Lei return hctx->ctxs[idx]; 127b347689fSMing Lei } 128b347689fSMing Lei 1291f460b63SMing Lei /* 1301f460b63SMing Lei * Only SCSI implements .get_budget and .put_budget, and SCSI restarts 1311f460b63SMing Lei * its queue by itself in its completion handler, so we don't need to 1321f460b63SMing Lei * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE. 1331f460b63SMing Lei */ 1341f460b63SMing Lei static void blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx) 135b347689fSMing Lei { 136b347689fSMing Lei struct request_queue *q = hctx->queue; 137b347689fSMing Lei LIST_HEAD(rq_list); 138b347689fSMing Lei struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from); 139b347689fSMing Lei 140b347689fSMing Lei do { 141b347689fSMing Lei struct request *rq; 142b347689fSMing Lei 143b347689fSMing Lei if (!sbitmap_any_bit_set(&hctx->ctx_map)) 144b347689fSMing Lei break; 145b347689fSMing Lei 14688022d72SMing Lei if (!blk_mq_get_dispatch_budget(hctx)) 1471f460b63SMing Lei break; 148b347689fSMing Lei 149b347689fSMing Lei rq = blk_mq_dequeue_from_ctx(hctx, ctx); 150b347689fSMing Lei if (!rq) { 151b347689fSMing Lei blk_mq_put_dispatch_budget(hctx); 152b347689fSMing Lei break; 153b347689fSMing Lei } 154b347689fSMing Lei 155b347689fSMing Lei /* 156b347689fSMing Lei * Now this rq owns the budget which has to be released 157b347689fSMing Lei * if this rq won't be queued to driver via .queue_rq() 158b347689fSMing Lei * in blk_mq_dispatch_rq_list(). 159b347689fSMing Lei */ 160b347689fSMing Lei list_add(&rq->queuelist, &rq_list); 161b347689fSMing Lei 162b347689fSMing Lei /* round robin for fair dispatch */ 163b347689fSMing Lei ctx = blk_mq_next_ctx(hctx, rq->mq_ctx); 164b347689fSMing Lei 165b347689fSMing Lei } while (blk_mq_dispatch_rq_list(q, &rq_list, true)); 166b347689fSMing Lei 167b347689fSMing Lei WRITE_ONCE(hctx->dispatch_from, ctx); 168b347689fSMing Lei } 169b347689fSMing Lei 1701f460b63SMing Lei void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) 171bd166ef1SJens Axboe { 17281380ca1SOmar Sandoval struct request_queue *q = hctx->queue; 17381380ca1SOmar Sandoval struct elevator_queue *e = q->elevator; 174f9cd4bfeSJens Axboe const bool has_sched_dispatch = e && e->type->ops.dispatch_request; 175bd166ef1SJens Axboe LIST_HEAD(rq_list); 176bd166ef1SJens Axboe 177f4560ffeSMing Lei /* RCU or SRCU read lock is needed before checking quiesced flag */ 178f4560ffeSMing Lei if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) 1791f460b63SMing Lei return; 180bd166ef1SJens Axboe 181bd166ef1SJens Axboe hctx->run++; 182bd166ef1SJens Axboe 183bd166ef1SJens Axboe /* 184bd166ef1SJens Axboe * If we have previous entries on our dispatch list, grab them first for 185bd166ef1SJens Axboe * more fair dispatch. 186bd166ef1SJens Axboe */ 187bd166ef1SJens Axboe if (!list_empty_careful(&hctx->dispatch)) { 188bd166ef1SJens Axboe spin_lock(&hctx->lock); 189bd166ef1SJens Axboe if (!list_empty(&hctx->dispatch)) 190bd166ef1SJens Axboe list_splice_init(&hctx->dispatch, &rq_list); 191bd166ef1SJens Axboe spin_unlock(&hctx->lock); 192bd166ef1SJens Axboe } 193bd166ef1SJens Axboe 194bd166ef1SJens Axboe /* 195bd166ef1SJens Axboe * Only ask the scheduler for requests, if we didn't have residual 196bd166ef1SJens Axboe * requests from the dispatch list. This is to avoid the case where 197bd166ef1SJens Axboe * we only ever dispatch a fraction of the requests available because 198bd166ef1SJens Axboe * of low device queue depth. Once we pull requests out of the IO 199bd166ef1SJens Axboe * scheduler, we can no longer merge or sort them. So it's best to 200bd166ef1SJens Axboe * leave them there for as long as we can. Mark the hw queue as 201bd166ef1SJens Axboe * needing a restart in that case. 202caf8eb0dSMing Lei * 2035e3d02bbSMing Lei * We want to dispatch from the scheduler if there was nothing 2045e3d02bbSMing Lei * on the dispatch list or we were able to dispatch from the 2055e3d02bbSMing Lei * dispatch list. 20664765a75SJens Axboe */ 207caf8eb0dSMing Lei if (!list_empty(&rq_list)) { 208caf8eb0dSMing Lei blk_mq_sched_mark_restart_hctx(hctx); 209b347689fSMing Lei if (blk_mq_dispatch_rq_list(q, &rq_list, false)) { 210b347689fSMing Lei if (has_sched_dispatch) 2111f460b63SMing Lei blk_mq_do_dispatch_sched(hctx); 212b347689fSMing Lei else 2131f460b63SMing Lei blk_mq_do_dispatch_ctx(hctx); 214b347689fSMing Lei } 215caf8eb0dSMing Lei } else if (has_sched_dispatch) { 2161f460b63SMing Lei blk_mq_do_dispatch_sched(hctx); 2176e768717SMing Lei } else if (hctx->dispatch_busy) { 2186e768717SMing Lei /* dequeue request one by one from sw queue if queue is busy */ 2191f460b63SMing Lei blk_mq_do_dispatch_ctx(hctx); 220caf8eb0dSMing Lei } else { 221caf8eb0dSMing Lei blk_mq_flush_busy_ctxs(hctx, &rq_list); 222de148297SMing Lei blk_mq_dispatch_rq_list(q, &rq_list, false); 223c13660a0SJens Axboe } 224bd166ef1SJens Axboe } 225bd166ef1SJens Axboe 226e4d750c9SJens Axboe bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, 227e4d750c9SJens Axboe struct request **merged_request) 228bd166ef1SJens Axboe { 229bd166ef1SJens Axboe struct request *rq; 230bd166ef1SJens Axboe 23134fe7c05SChristoph Hellwig switch (elv_merge(q, &rq, bio)) { 23234fe7c05SChristoph Hellwig case ELEVATOR_BACK_MERGE: 233bd166ef1SJens Axboe if (!blk_mq_sched_allow_merge(q, rq, bio)) 234bd166ef1SJens Axboe return false; 23534fe7c05SChristoph Hellwig if (!bio_attempt_back_merge(q, rq, bio)) 23634fe7c05SChristoph Hellwig return false; 237e4d750c9SJens Axboe *merged_request = attempt_back_merge(q, rq); 238e4d750c9SJens Axboe if (!*merged_request) 23934fe7c05SChristoph Hellwig elv_merged_request(q, rq, ELEVATOR_BACK_MERGE); 240bd166ef1SJens Axboe return true; 24134fe7c05SChristoph Hellwig case ELEVATOR_FRONT_MERGE: 242bd166ef1SJens Axboe if (!blk_mq_sched_allow_merge(q, rq, bio)) 243bd166ef1SJens Axboe return false; 24434fe7c05SChristoph Hellwig if (!bio_attempt_front_merge(q, rq, bio)) 24534fe7c05SChristoph Hellwig return false; 246e4d750c9SJens Axboe *merged_request = attempt_front_merge(q, rq); 247e4d750c9SJens Axboe if (!*merged_request) 24834fe7c05SChristoph Hellwig elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE); 249bd166ef1SJens Axboe return true; 250bea99a50SKeith Busch case ELEVATOR_DISCARD_MERGE: 251bea99a50SKeith Busch return bio_attempt_discard_merge(q, rq, bio); 25234fe7c05SChristoph Hellwig default: 253bd166ef1SJens Axboe return false; 254bd166ef1SJens Axboe } 25534fe7c05SChristoph Hellwig } 256bd166ef1SJens Axboe EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge); 257bd166ef1SJens Axboe 2589bddeb2aSMing Lei /* 2599c558734SJens Axboe * Iterate list of requests and see if we can merge this bio with any 2609c558734SJens Axboe * of them. 2619bddeb2aSMing Lei */ 2629c558734SJens Axboe bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list, 2639c558734SJens Axboe struct bio *bio) 2649bddeb2aSMing Lei { 2659bddeb2aSMing Lei struct request *rq; 2669bddeb2aSMing Lei int checked = 8; 2679bddeb2aSMing Lei 2689c558734SJens Axboe list_for_each_entry_reverse(rq, list, queuelist) { 2699bddeb2aSMing Lei bool merged = false; 2709bddeb2aSMing Lei 2719bddeb2aSMing Lei if (!checked--) 2729bddeb2aSMing Lei break; 2739bddeb2aSMing Lei 2749bddeb2aSMing Lei if (!blk_rq_merge_ok(rq, bio)) 2759bddeb2aSMing Lei continue; 2769bddeb2aSMing Lei 2779bddeb2aSMing Lei switch (blk_try_merge(rq, bio)) { 2789bddeb2aSMing Lei case ELEVATOR_BACK_MERGE: 2799bddeb2aSMing Lei if (blk_mq_sched_allow_merge(q, rq, bio)) 2809bddeb2aSMing Lei merged = bio_attempt_back_merge(q, rq, bio); 2819bddeb2aSMing Lei break; 2829bddeb2aSMing Lei case ELEVATOR_FRONT_MERGE: 2839bddeb2aSMing Lei if (blk_mq_sched_allow_merge(q, rq, bio)) 2849bddeb2aSMing Lei merged = bio_attempt_front_merge(q, rq, bio); 2859bddeb2aSMing Lei break; 2869bddeb2aSMing Lei case ELEVATOR_DISCARD_MERGE: 2879bddeb2aSMing Lei merged = bio_attempt_discard_merge(q, rq, bio); 2889bddeb2aSMing Lei break; 2899bddeb2aSMing Lei default: 2909bddeb2aSMing Lei continue; 2919bddeb2aSMing Lei } 2929bddeb2aSMing Lei 2939bddeb2aSMing Lei return merged; 2949bddeb2aSMing Lei } 2959bddeb2aSMing Lei 2969bddeb2aSMing Lei return false; 2979bddeb2aSMing Lei } 2989c558734SJens Axboe EXPORT_SYMBOL_GPL(blk_mq_bio_list_merge); 2999c558734SJens Axboe 3009c558734SJens Axboe /* 3019c558734SJens Axboe * Reverse check our software queue for entries that we could potentially 3029c558734SJens Axboe * merge with. Currently includes a hand-wavy stop count of 8, to not spend 3039c558734SJens Axboe * too much time checking for merges. 3049c558734SJens Axboe */ 3059c558734SJens Axboe static bool blk_mq_attempt_merge(struct request_queue *q, 306c16d6b5aSMing Lei struct blk_mq_hw_ctx *hctx, 3079c558734SJens Axboe struct blk_mq_ctx *ctx, struct bio *bio) 3089c558734SJens Axboe { 309c16d6b5aSMing Lei enum hctx_type type = hctx->type; 310c16d6b5aSMing Lei 3119c558734SJens Axboe lockdep_assert_held(&ctx->lock); 3129c558734SJens Axboe 313c16d6b5aSMing Lei if (blk_mq_bio_list_merge(q, &ctx->rq_lists[type], bio)) { 3149c558734SJens Axboe ctx->rq_merged++; 3159c558734SJens Axboe return true; 3169c558734SJens Axboe } 3179c558734SJens Axboe 3189c558734SJens Axboe return false; 3199c558734SJens Axboe } 3209bddeb2aSMing Lei 321bd166ef1SJens Axboe bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio) 322bd166ef1SJens Axboe { 323bd166ef1SJens Axboe struct elevator_queue *e = q->elevator; 324bd166ef1SJens Axboe struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); 3258ccdf4a3SJianchao Wang struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx); 3269bddeb2aSMing Lei bool ret = false; 327c16d6b5aSMing Lei enum hctx_type type; 328bd166ef1SJens Axboe 329f9cd4bfeSJens Axboe if (e && e->type->ops.bio_merge) { 330bd166ef1SJens Axboe blk_mq_put_ctx(ctx); 331f9cd4bfeSJens Axboe return e->type->ops.bio_merge(hctx, bio); 332bd166ef1SJens Axboe } 333bd166ef1SJens Axboe 334c16d6b5aSMing Lei type = hctx->type; 335b04f50abSMing Lei if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) && 336c16d6b5aSMing Lei !list_empty_careful(&ctx->rq_lists[type])) { 3379bddeb2aSMing Lei /* default per sw-queue merge */ 3389bddeb2aSMing Lei spin_lock(&ctx->lock); 339c16d6b5aSMing Lei ret = blk_mq_attempt_merge(q, hctx, ctx, bio); 3409bddeb2aSMing Lei spin_unlock(&ctx->lock); 3419bddeb2aSMing Lei } 3429bddeb2aSMing Lei 3439bddeb2aSMing Lei blk_mq_put_ctx(ctx); 3449bddeb2aSMing Lei return ret; 345bd166ef1SJens Axboe } 346bd166ef1SJens Axboe 347bd166ef1SJens Axboe bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq) 348bd166ef1SJens Axboe { 349bd166ef1SJens Axboe return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq); 350bd166ef1SJens Axboe } 351bd166ef1SJens Axboe EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge); 352bd166ef1SJens Axboe 353bd166ef1SJens Axboe void blk_mq_sched_request_inserted(struct request *rq) 354bd166ef1SJens Axboe { 355bd166ef1SJens Axboe trace_block_rq_insert(rq->q, rq); 356bd166ef1SJens Axboe } 357bd166ef1SJens Axboe EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted); 358bd166ef1SJens Axboe 3590cacba6cSOmar Sandoval static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, 360a6a252e6SMing Lei bool has_sched, 3610cacba6cSOmar Sandoval struct request *rq) 362bd166ef1SJens Axboe { 363a6a252e6SMing Lei /* dispatch flush rq directly */ 364a6a252e6SMing Lei if (rq->rq_flags & RQF_FLUSH_SEQ) { 365bd166ef1SJens Axboe spin_lock(&hctx->lock); 366bd166ef1SJens Axboe list_add(&rq->queuelist, &hctx->dispatch); 367bd166ef1SJens Axboe spin_unlock(&hctx->lock); 368bd166ef1SJens Axboe return true; 369bd166ef1SJens Axboe } 370bd166ef1SJens Axboe 371923218f6SMing Lei if (has_sched) 372a6a252e6SMing Lei rq->rq_flags |= RQF_SORTED; 373a6a252e6SMing Lei 374a6a252e6SMing Lei return false; 375a6a252e6SMing Lei } 376a6a252e6SMing Lei 377bd6737f1SJens Axboe void blk_mq_sched_insert_request(struct request *rq, bool at_head, 3789e97d295SMike Snitzer bool run_queue, bool async) 379bd6737f1SJens Axboe { 380bd6737f1SJens Axboe struct request_queue *q = rq->q; 381bd6737f1SJens Axboe struct elevator_queue *e = q->elevator; 382bd6737f1SJens Axboe struct blk_mq_ctx *ctx = rq->mq_ctx; 383ea4f995eSJens Axboe struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 384bd6737f1SJens Axboe 385a6a252e6SMing Lei /* flush rq in flush machinery need to be dispatched directly */ 386a6a252e6SMing Lei if (!(rq->rq_flags & RQF_FLUSH_SEQ) && op_is_flush(rq->cmd_flags)) { 387923218f6SMing Lei blk_insert_flush(rq); 388923218f6SMing Lei goto run; 389bd6737f1SJens Axboe } 390bd6737f1SJens Axboe 391923218f6SMing Lei WARN_ON(e && (rq->tag != -1)); 392923218f6SMing Lei 393a6a252e6SMing Lei if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) 3940cacba6cSOmar Sandoval goto run; 3950cacba6cSOmar Sandoval 396f9cd4bfeSJens Axboe if (e && e->type->ops.insert_requests) { 397bd6737f1SJens Axboe LIST_HEAD(list); 398bd6737f1SJens Axboe 399bd6737f1SJens Axboe list_add(&rq->queuelist, &list); 400f9cd4bfeSJens Axboe e->type->ops.insert_requests(hctx, &list, at_head); 401bd6737f1SJens Axboe } else { 402bd6737f1SJens Axboe spin_lock(&ctx->lock); 403bd6737f1SJens Axboe __blk_mq_insert_request(hctx, rq, at_head); 404bd6737f1SJens Axboe spin_unlock(&ctx->lock); 405bd6737f1SJens Axboe } 406bd6737f1SJens Axboe 4070cacba6cSOmar Sandoval run: 408bd6737f1SJens Axboe if (run_queue) 409bd6737f1SJens Axboe blk_mq_run_hw_queue(hctx, async); 410bd6737f1SJens Axboe } 411bd6737f1SJens Axboe 41267cae4c9SJens Axboe void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx, 413bd6737f1SJens Axboe struct blk_mq_ctx *ctx, 414bd6737f1SJens Axboe struct list_head *list, bool run_queue_async) 415bd6737f1SJens Axboe { 416f9afca4dSJens Axboe struct elevator_queue *e; 417e87eb301SMing Lei struct request_queue *q = hctx->queue; 418e87eb301SMing Lei 419e87eb301SMing Lei /* 420e87eb301SMing Lei * blk_mq_sched_insert_requests() is called from flush plug 421e87eb301SMing Lei * context only, and hold one usage counter to prevent queue 422e87eb301SMing Lei * from being released. 423e87eb301SMing Lei */ 424e87eb301SMing Lei percpu_ref_get(&q->q_usage_counter); 425f9afca4dSJens Axboe 426f9afca4dSJens Axboe e = hctx->queue->elevator; 427f9cd4bfeSJens Axboe if (e && e->type->ops.insert_requests) 428f9cd4bfeSJens Axboe e->type->ops.insert_requests(hctx, list, false); 4296ce3dd6eSMing Lei else { 4306ce3dd6eSMing Lei /* 4316ce3dd6eSMing Lei * try to issue requests directly if the hw queue isn't 4326ce3dd6eSMing Lei * busy in case of 'none' scheduler, and this way may save 4336ce3dd6eSMing Lei * us one extra enqueue & dequeue to sw queue. 4346ce3dd6eSMing Lei */ 435fd9c40f6SBart Van Assche if (!hctx->dispatch_busy && !e && !run_queue_async) { 4366ce3dd6eSMing Lei blk_mq_try_issue_list_directly(hctx, list); 437fd9c40f6SBart Van Assche if (list_empty(list)) 438e87eb301SMing Lei goto out; 439fd9c40f6SBart Van Assche } 440bd6737f1SJens Axboe blk_mq_insert_requests(hctx, ctx, list); 4416ce3dd6eSMing Lei } 442bd6737f1SJens Axboe 443bd6737f1SJens Axboe blk_mq_run_hw_queue(hctx, run_queue_async); 444e87eb301SMing Lei out: 445e87eb301SMing Lei percpu_ref_put(&q->q_usage_counter); 446bd6737f1SJens Axboe } 447bd6737f1SJens Axboe 448bd166ef1SJens Axboe static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set, 449bd166ef1SJens Axboe struct blk_mq_hw_ctx *hctx, 450bd166ef1SJens Axboe unsigned int hctx_idx) 451bd166ef1SJens Axboe { 452bd166ef1SJens Axboe if (hctx->sched_tags) { 453bd166ef1SJens Axboe blk_mq_free_rqs(set, hctx->sched_tags, hctx_idx); 454bd166ef1SJens Axboe blk_mq_free_rq_map(hctx->sched_tags); 455bd166ef1SJens Axboe hctx->sched_tags = NULL; 456bd166ef1SJens Axboe } 457bd166ef1SJens Axboe } 458bd166ef1SJens Axboe 4596917ff0bSOmar Sandoval static int blk_mq_sched_alloc_tags(struct request_queue *q, 4606917ff0bSOmar Sandoval struct blk_mq_hw_ctx *hctx, 4616917ff0bSOmar Sandoval unsigned int hctx_idx) 462bd166ef1SJens Axboe { 463bd166ef1SJens Axboe struct blk_mq_tag_set *set = q->tag_set; 4646917ff0bSOmar Sandoval int ret; 465bd166ef1SJens Axboe 4666917ff0bSOmar Sandoval hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests, 4676917ff0bSOmar Sandoval set->reserved_tags); 468bd166ef1SJens Axboe if (!hctx->sched_tags) 4696917ff0bSOmar Sandoval return -ENOMEM; 4706917ff0bSOmar Sandoval 4716917ff0bSOmar Sandoval ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests); 4726917ff0bSOmar Sandoval if (ret) 4736917ff0bSOmar Sandoval blk_mq_sched_free_tags(set, hctx, hctx_idx); 474bd166ef1SJens Axboe 475bd166ef1SJens Axboe return ret; 476bd166ef1SJens Axboe } 477bd166ef1SJens Axboe 47854d5329dSOmar Sandoval static void blk_mq_sched_tags_teardown(struct request_queue *q) 479bd166ef1SJens Axboe { 480bd166ef1SJens Axboe struct blk_mq_tag_set *set = q->tag_set; 481bd166ef1SJens Axboe struct blk_mq_hw_ctx *hctx; 482bd166ef1SJens Axboe int i; 483bd166ef1SJens Axboe 484bd166ef1SJens Axboe queue_for_each_hw_ctx(q, hctx, i) 485bd166ef1SJens Axboe blk_mq_sched_free_tags(set, hctx, i); 486bd166ef1SJens Axboe } 487d3484991SJens Axboe 4886917ff0bSOmar Sandoval int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) 4896917ff0bSOmar Sandoval { 4906917ff0bSOmar Sandoval struct blk_mq_hw_ctx *hctx; 491ee056f98SOmar Sandoval struct elevator_queue *eq; 4926917ff0bSOmar Sandoval unsigned int i; 4936917ff0bSOmar Sandoval int ret; 4946917ff0bSOmar Sandoval 4956917ff0bSOmar Sandoval if (!e) { 4966917ff0bSOmar Sandoval q->elevator = NULL; 49732a50fabSMing Lei q->nr_requests = q->tag_set->queue_depth; 4986917ff0bSOmar Sandoval return 0; 4996917ff0bSOmar Sandoval } 5006917ff0bSOmar Sandoval 5016917ff0bSOmar Sandoval /* 50232825c45SMing Lei * Default to double of smaller one between hw queue_depth and 128, 50332825c45SMing Lei * since we don't split into sync/async like the old code did. 50432825c45SMing Lei * Additionally, this is a per-hw queue depth. 5056917ff0bSOmar Sandoval */ 50632825c45SMing Lei q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth, 50732825c45SMing Lei BLKDEV_MAX_RQ); 5086917ff0bSOmar Sandoval 5096917ff0bSOmar Sandoval queue_for_each_hw_ctx(q, hctx, i) { 5106917ff0bSOmar Sandoval ret = blk_mq_sched_alloc_tags(q, hctx, i); 5116917ff0bSOmar Sandoval if (ret) 5126917ff0bSOmar Sandoval goto err; 5136917ff0bSOmar Sandoval } 5146917ff0bSOmar Sandoval 515f9cd4bfeSJens Axboe ret = e->ops.init_sched(q, e); 5166917ff0bSOmar Sandoval if (ret) 5176917ff0bSOmar Sandoval goto err; 5186917ff0bSOmar Sandoval 519d332ce09SOmar Sandoval blk_mq_debugfs_register_sched(q); 520d332ce09SOmar Sandoval 521ee056f98SOmar Sandoval queue_for_each_hw_ctx(q, hctx, i) { 522f9cd4bfeSJens Axboe if (e->ops.init_hctx) { 523f9cd4bfeSJens Axboe ret = e->ops.init_hctx(hctx, i); 524ee056f98SOmar Sandoval if (ret) { 525ee056f98SOmar Sandoval eq = q->elevator; 526ee056f98SOmar Sandoval blk_mq_exit_sched(q, eq); 527ee056f98SOmar Sandoval kobject_put(&eq->kobj); 528ee056f98SOmar Sandoval return ret; 529ee056f98SOmar Sandoval } 530ee056f98SOmar Sandoval } 531d332ce09SOmar Sandoval blk_mq_debugfs_register_sched_hctx(q, hctx); 532ee056f98SOmar Sandoval } 533ee056f98SOmar Sandoval 5346917ff0bSOmar Sandoval return 0; 5356917ff0bSOmar Sandoval 5366917ff0bSOmar Sandoval err: 53754d5329dSOmar Sandoval blk_mq_sched_tags_teardown(q); 53854d5329dSOmar Sandoval q->elevator = NULL; 5396917ff0bSOmar Sandoval return ret; 5406917ff0bSOmar Sandoval } 5416917ff0bSOmar Sandoval 54254d5329dSOmar Sandoval void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e) 54354d5329dSOmar Sandoval { 544ee056f98SOmar Sandoval struct blk_mq_hw_ctx *hctx; 545ee056f98SOmar Sandoval unsigned int i; 546ee056f98SOmar Sandoval 547ee056f98SOmar Sandoval queue_for_each_hw_ctx(q, hctx, i) { 548d332ce09SOmar Sandoval blk_mq_debugfs_unregister_sched_hctx(hctx); 549f9cd4bfeSJens Axboe if (e->type->ops.exit_hctx && hctx->sched_data) { 550f9cd4bfeSJens Axboe e->type->ops.exit_hctx(hctx, i); 551ee056f98SOmar Sandoval hctx->sched_data = NULL; 552ee056f98SOmar Sandoval } 553ee056f98SOmar Sandoval } 554d332ce09SOmar Sandoval blk_mq_debugfs_unregister_sched(q); 555f9cd4bfeSJens Axboe if (e->type->ops.exit_sched) 556f9cd4bfeSJens Axboe e->type->ops.exit_sched(e); 55754d5329dSOmar Sandoval blk_mq_sched_tags_teardown(q); 55854d5329dSOmar Sandoval q->elevator = NULL; 55954d5329dSOmar Sandoval } 560