1bd166ef1SJens Axboe /* 2bd166ef1SJens Axboe * blk-mq scheduling framework 3bd166ef1SJens Axboe * 4bd166ef1SJens Axboe * Copyright (C) 2016 Jens Axboe 5bd166ef1SJens Axboe */ 6bd166ef1SJens Axboe #include <linux/kernel.h> 7bd166ef1SJens Axboe #include <linux/module.h> 8bd166ef1SJens Axboe #include <linux/blk-mq.h> 9bd166ef1SJens Axboe 10bd166ef1SJens Axboe #include <trace/events/block.h> 11bd166ef1SJens Axboe 12bd166ef1SJens Axboe #include "blk.h" 13bd166ef1SJens Axboe #include "blk-mq.h" 14d332ce09SOmar Sandoval #include "blk-mq-debugfs.h" 15bd166ef1SJens Axboe #include "blk-mq-sched.h" 16bd166ef1SJens Axboe #include "blk-mq-tag.h" 17bd166ef1SJens Axboe #include "blk-wbt.h" 18bd166ef1SJens Axboe 19bd166ef1SJens Axboe void blk_mq_sched_free_hctx_data(struct request_queue *q, 20bd166ef1SJens Axboe void (*exit)(struct blk_mq_hw_ctx *)) 21bd166ef1SJens Axboe { 22bd166ef1SJens Axboe struct blk_mq_hw_ctx *hctx; 23bd166ef1SJens Axboe int i; 24bd166ef1SJens Axboe 25bd166ef1SJens Axboe queue_for_each_hw_ctx(q, hctx, i) { 26bd166ef1SJens Axboe if (exit && hctx->sched_data) 27bd166ef1SJens Axboe exit(hctx); 28bd166ef1SJens Axboe kfree(hctx->sched_data); 29bd166ef1SJens Axboe hctx->sched_data = NULL; 30bd166ef1SJens Axboe } 31bd166ef1SJens Axboe } 32bd166ef1SJens Axboe EXPORT_SYMBOL_GPL(blk_mq_sched_free_hctx_data); 33bd166ef1SJens Axboe 3444e8c2bfSChristoph Hellwig void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio) 35bd166ef1SJens Axboe { 3644e8c2bfSChristoph Hellwig struct request_queue *q = rq->q; 3744e8c2bfSChristoph Hellwig struct io_context *ioc = rq_ioc(bio); 38bd166ef1SJens Axboe struct io_cq *icq; 39bd166ef1SJens Axboe 40bd166ef1SJens Axboe spin_lock_irq(q->queue_lock); 41bd166ef1SJens Axboe icq = ioc_lookup_icq(ioc, q); 42bd166ef1SJens Axboe spin_unlock_irq(q->queue_lock); 43bd166ef1SJens Axboe 44bd166ef1SJens Axboe if (!icq) { 45bd166ef1SJens Axboe icq = ioc_create_icq(ioc, q, GFP_ATOMIC); 46bd166ef1SJens Axboe if (!icq) 47bd166ef1SJens Axboe return; 48bd166ef1SJens Axboe } 49ea511e3cSChristoph Hellwig get_io_context(icq->ioc); 5044e8c2bfSChristoph Hellwig rq->elv.icq = icq; 51bd166ef1SJens Axboe } 52bd166ef1SJens Axboe 538e8320c9SJens Axboe /* 548e8320c9SJens Axboe * Mark a hardware queue as needing a restart. For shared queues, maintain 558e8320c9SJens Axboe * a count of how many hardware queues are marked for restart. 568e8320c9SJens Axboe */ 578e8320c9SJens Axboe static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) 588e8320c9SJens Axboe { 598e8320c9SJens Axboe if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) 608e8320c9SJens Axboe return; 618e8320c9SJens Axboe 628e8320c9SJens Axboe if (hctx->flags & BLK_MQ_F_TAG_SHARED) { 638e8320c9SJens Axboe struct request_queue *q = hctx->queue; 648e8320c9SJens Axboe 658e8320c9SJens Axboe if (!test_and_set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) 668e8320c9SJens Axboe atomic_inc(&q->shared_hctx_restart); 678e8320c9SJens Axboe } else 688e8320c9SJens Axboe set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); 698e8320c9SJens Axboe } 708e8320c9SJens Axboe 7105b79413SJens Axboe static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx) 728e8320c9SJens Axboe { 738e8320c9SJens Axboe if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) 7405b79413SJens Axboe return false; 758e8320c9SJens Axboe 7605b79413SJens Axboe if (hctx->flags & BLK_MQ_F_TAG_SHARED) { 7705b79413SJens Axboe struct request_queue *q = hctx->queue; 7805b79413SJens Axboe 7905b79413SJens Axboe if (test_and_clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) 8005b79413SJens Axboe atomic_dec(&q->shared_hctx_restart); 8105b79413SJens Axboe } else 828e8320c9SJens Axboe clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); 838e8320c9SJens Axboe 8479f720a7SJens Axboe return blk_mq_run_hw_queue(hctx, true); 858e8320c9SJens Axboe } 868e8320c9SJens Axboe 871f460b63SMing Lei /* 881f460b63SMing Lei * Only SCSI implements .get_budget and .put_budget, and SCSI restarts 891f460b63SMing Lei * its queue by itself in its completion handler, so we don't need to 901f460b63SMing Lei * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE. 911f460b63SMing Lei */ 921f460b63SMing Lei static void blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) 93caf8eb0dSMing Lei { 94caf8eb0dSMing Lei struct request_queue *q = hctx->queue; 95caf8eb0dSMing Lei struct elevator_queue *e = q->elevator; 96caf8eb0dSMing Lei LIST_HEAD(rq_list); 97caf8eb0dSMing Lei 98caf8eb0dSMing Lei do { 99de148297SMing Lei struct request *rq; 100caf8eb0dSMing Lei 101de148297SMing Lei if (e->type->ops.mq.has_work && 102de148297SMing Lei !e->type->ops.mq.has_work(hctx)) 103caf8eb0dSMing Lei break; 104de148297SMing Lei 10588022d72SMing Lei if (!blk_mq_get_dispatch_budget(hctx)) 1061f460b63SMing Lei break; 107de148297SMing Lei 108de148297SMing Lei rq = e->type->ops.mq.dispatch_request(hctx); 109de148297SMing Lei if (!rq) { 110de148297SMing Lei blk_mq_put_dispatch_budget(hctx); 111de148297SMing Lei break; 112caf8eb0dSMing Lei } 113caf8eb0dSMing Lei 114de148297SMing Lei /* 115de148297SMing Lei * Now this rq owns the budget which has to be released 116de148297SMing Lei * if this rq won't be queued to driver via .queue_rq() 117de148297SMing Lei * in blk_mq_dispatch_rq_list(). 118de148297SMing Lei */ 119de148297SMing Lei list_add(&rq->queuelist, &rq_list); 120de148297SMing Lei } while (blk_mq_dispatch_rq_list(q, &rq_list, true)); 121de148297SMing Lei } 122de148297SMing Lei 123b347689fSMing Lei static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx, 124b347689fSMing Lei struct blk_mq_ctx *ctx) 125b347689fSMing Lei { 126b347689fSMing Lei unsigned idx = ctx->index_hw; 127b347689fSMing Lei 128b347689fSMing Lei if (++idx == hctx->nr_ctx) 129b347689fSMing Lei idx = 0; 130b347689fSMing Lei 131b347689fSMing Lei return hctx->ctxs[idx]; 132b347689fSMing Lei } 133b347689fSMing Lei 1341f460b63SMing Lei /* 1351f460b63SMing Lei * Only SCSI implements .get_budget and .put_budget, and SCSI restarts 1361f460b63SMing Lei * its queue by itself in its completion handler, so we don't need to 1371f460b63SMing Lei * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE. 1381f460b63SMing Lei */ 1391f460b63SMing Lei static void blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx) 140b347689fSMing Lei { 141b347689fSMing Lei struct request_queue *q = hctx->queue; 142b347689fSMing Lei LIST_HEAD(rq_list); 143b347689fSMing Lei struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from); 144b347689fSMing Lei 145b347689fSMing Lei do { 146b347689fSMing Lei struct request *rq; 147b347689fSMing Lei 148b347689fSMing Lei if (!sbitmap_any_bit_set(&hctx->ctx_map)) 149b347689fSMing Lei break; 150b347689fSMing Lei 15188022d72SMing Lei if (!blk_mq_get_dispatch_budget(hctx)) 1521f460b63SMing Lei break; 153b347689fSMing Lei 154b347689fSMing Lei rq = blk_mq_dequeue_from_ctx(hctx, ctx); 155b347689fSMing Lei if (!rq) { 156b347689fSMing Lei blk_mq_put_dispatch_budget(hctx); 157b347689fSMing Lei break; 158b347689fSMing Lei } 159b347689fSMing Lei 160b347689fSMing Lei /* 161b347689fSMing Lei * Now this rq owns the budget which has to be released 162b347689fSMing Lei * if this rq won't be queued to driver via .queue_rq() 163b347689fSMing Lei * in blk_mq_dispatch_rq_list(). 164b347689fSMing Lei */ 165b347689fSMing Lei list_add(&rq->queuelist, &rq_list); 166b347689fSMing Lei 167b347689fSMing Lei /* round robin for fair dispatch */ 168b347689fSMing Lei ctx = blk_mq_next_ctx(hctx, rq->mq_ctx); 169b347689fSMing Lei 170b347689fSMing Lei } while (blk_mq_dispatch_rq_list(q, &rq_list, true)); 171b347689fSMing Lei 172b347689fSMing Lei WRITE_ONCE(hctx->dispatch_from, ctx); 173b347689fSMing Lei } 174b347689fSMing Lei 1751f460b63SMing Lei void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) 176bd166ef1SJens Axboe { 17781380ca1SOmar Sandoval struct request_queue *q = hctx->queue; 17881380ca1SOmar Sandoval struct elevator_queue *e = q->elevator; 17964765a75SJens Axboe const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request; 180bd166ef1SJens Axboe LIST_HEAD(rq_list); 181bd166ef1SJens Axboe 182f4560ffeSMing Lei /* RCU or SRCU read lock is needed before checking quiesced flag */ 183f4560ffeSMing Lei if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) 1841f460b63SMing Lei return; 185bd166ef1SJens Axboe 186bd166ef1SJens Axboe hctx->run++; 187bd166ef1SJens Axboe 188bd166ef1SJens Axboe /* 189bd166ef1SJens Axboe * If we have previous entries on our dispatch list, grab them first for 190bd166ef1SJens Axboe * more fair dispatch. 191bd166ef1SJens Axboe */ 192bd166ef1SJens Axboe if (!list_empty_careful(&hctx->dispatch)) { 193bd166ef1SJens Axboe spin_lock(&hctx->lock); 194bd166ef1SJens Axboe if (!list_empty(&hctx->dispatch)) 195bd166ef1SJens Axboe list_splice_init(&hctx->dispatch, &rq_list); 196bd166ef1SJens Axboe spin_unlock(&hctx->lock); 197bd166ef1SJens Axboe } 198bd166ef1SJens Axboe 199bd166ef1SJens Axboe /* 200bd166ef1SJens Axboe * Only ask the scheduler for requests, if we didn't have residual 201bd166ef1SJens Axboe * requests from the dispatch list. This is to avoid the case where 202bd166ef1SJens Axboe * we only ever dispatch a fraction of the requests available because 203bd166ef1SJens Axboe * of low device queue depth. Once we pull requests out of the IO 204bd166ef1SJens Axboe * scheduler, we can no longer merge or sort them. So it's best to 205bd166ef1SJens Axboe * leave them there for as long as we can. Mark the hw queue as 206bd166ef1SJens Axboe * needing a restart in that case. 207caf8eb0dSMing Lei * 2085e3d02bbSMing Lei * We want to dispatch from the scheduler if there was nothing 2095e3d02bbSMing Lei * on the dispatch list or we were able to dispatch from the 2105e3d02bbSMing Lei * dispatch list. 21164765a75SJens Axboe */ 212caf8eb0dSMing Lei if (!list_empty(&rq_list)) { 213caf8eb0dSMing Lei blk_mq_sched_mark_restart_hctx(hctx); 214b347689fSMing Lei if (blk_mq_dispatch_rq_list(q, &rq_list, false)) { 215b347689fSMing Lei if (has_sched_dispatch) 2161f460b63SMing Lei blk_mq_do_dispatch_sched(hctx); 217b347689fSMing Lei else 2181f460b63SMing Lei blk_mq_do_dispatch_ctx(hctx); 219b347689fSMing Lei } 220caf8eb0dSMing Lei } else if (has_sched_dispatch) { 2211f460b63SMing Lei blk_mq_do_dispatch_sched(hctx); 222b347689fSMing Lei } else if (q->mq_ops->get_budget) { 223b347689fSMing Lei /* 224b347689fSMing Lei * If we need to get budget before queuing request, we 225b347689fSMing Lei * dequeue request one by one from sw queue for avoiding 226b347689fSMing Lei * to mess up I/O merge when dispatch runs out of resource. 227b347689fSMing Lei * 228b347689fSMing Lei * TODO: get more budgets, and dequeue more requests in 229b347689fSMing Lei * one time. 230b347689fSMing Lei */ 2311f460b63SMing Lei blk_mq_do_dispatch_ctx(hctx); 232caf8eb0dSMing Lei } else { 233caf8eb0dSMing Lei blk_mq_flush_busy_ctxs(hctx, &rq_list); 234de148297SMing Lei blk_mq_dispatch_rq_list(q, &rq_list, false); 235c13660a0SJens Axboe } 236bd166ef1SJens Axboe } 237bd166ef1SJens Axboe 238e4d750c9SJens Axboe bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, 239e4d750c9SJens Axboe struct request **merged_request) 240bd166ef1SJens Axboe { 241bd166ef1SJens Axboe struct request *rq; 242bd166ef1SJens Axboe 24334fe7c05SChristoph Hellwig switch (elv_merge(q, &rq, bio)) { 24434fe7c05SChristoph Hellwig case ELEVATOR_BACK_MERGE: 245bd166ef1SJens Axboe if (!blk_mq_sched_allow_merge(q, rq, bio)) 246bd166ef1SJens Axboe return false; 24734fe7c05SChristoph Hellwig if (!bio_attempt_back_merge(q, rq, bio)) 24834fe7c05SChristoph Hellwig return false; 249e4d750c9SJens Axboe *merged_request = attempt_back_merge(q, rq); 250e4d750c9SJens Axboe if (!*merged_request) 25134fe7c05SChristoph Hellwig elv_merged_request(q, rq, ELEVATOR_BACK_MERGE); 252bd166ef1SJens Axboe return true; 25334fe7c05SChristoph Hellwig case ELEVATOR_FRONT_MERGE: 254bd166ef1SJens Axboe if (!blk_mq_sched_allow_merge(q, rq, bio)) 255bd166ef1SJens Axboe return false; 25634fe7c05SChristoph Hellwig if (!bio_attempt_front_merge(q, rq, bio)) 25734fe7c05SChristoph Hellwig return false; 258e4d750c9SJens Axboe *merged_request = attempt_front_merge(q, rq); 259e4d750c9SJens Axboe if (!*merged_request) 26034fe7c05SChristoph Hellwig elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE); 261bd166ef1SJens Axboe return true; 262bea99a50SKeith Busch case ELEVATOR_DISCARD_MERGE: 263bea99a50SKeith Busch return bio_attempt_discard_merge(q, rq, bio); 26434fe7c05SChristoph Hellwig default: 265bd166ef1SJens Axboe return false; 266bd166ef1SJens Axboe } 26734fe7c05SChristoph Hellwig } 268bd166ef1SJens Axboe EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge); 269bd166ef1SJens Axboe 2709bddeb2aSMing Lei /* 2719c558734SJens Axboe * Iterate list of requests and see if we can merge this bio with any 2729c558734SJens Axboe * of them. 2739bddeb2aSMing Lei */ 2749c558734SJens Axboe bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list, 2759c558734SJens Axboe struct bio *bio) 2769bddeb2aSMing Lei { 2779bddeb2aSMing Lei struct request *rq; 2789bddeb2aSMing Lei int checked = 8; 2799bddeb2aSMing Lei 2809c558734SJens Axboe list_for_each_entry_reverse(rq, list, queuelist) { 2819bddeb2aSMing Lei bool merged = false; 2829bddeb2aSMing Lei 2839bddeb2aSMing Lei if (!checked--) 2849bddeb2aSMing Lei break; 2859bddeb2aSMing Lei 2869bddeb2aSMing Lei if (!blk_rq_merge_ok(rq, bio)) 2879bddeb2aSMing Lei continue; 2889bddeb2aSMing Lei 2899bddeb2aSMing Lei switch (blk_try_merge(rq, bio)) { 2909bddeb2aSMing Lei case ELEVATOR_BACK_MERGE: 2919bddeb2aSMing Lei if (blk_mq_sched_allow_merge(q, rq, bio)) 2929bddeb2aSMing Lei merged = bio_attempt_back_merge(q, rq, bio); 2939bddeb2aSMing Lei break; 2949bddeb2aSMing Lei case ELEVATOR_FRONT_MERGE: 2959bddeb2aSMing Lei if (blk_mq_sched_allow_merge(q, rq, bio)) 2969bddeb2aSMing Lei merged = bio_attempt_front_merge(q, rq, bio); 2979bddeb2aSMing Lei break; 2989bddeb2aSMing Lei case ELEVATOR_DISCARD_MERGE: 2999bddeb2aSMing Lei merged = bio_attempt_discard_merge(q, rq, bio); 3009bddeb2aSMing Lei break; 3019bddeb2aSMing Lei default: 3029bddeb2aSMing Lei continue; 3039bddeb2aSMing Lei } 3049bddeb2aSMing Lei 3059bddeb2aSMing Lei return merged; 3069bddeb2aSMing Lei } 3079bddeb2aSMing Lei 3089bddeb2aSMing Lei return false; 3099bddeb2aSMing Lei } 3109c558734SJens Axboe EXPORT_SYMBOL_GPL(blk_mq_bio_list_merge); 3119c558734SJens Axboe 3129c558734SJens Axboe /* 3139c558734SJens Axboe * Reverse check our software queue for entries that we could potentially 3149c558734SJens Axboe * merge with. Currently includes a hand-wavy stop count of 8, to not spend 3159c558734SJens Axboe * too much time checking for merges. 3169c558734SJens Axboe */ 3179c558734SJens Axboe static bool blk_mq_attempt_merge(struct request_queue *q, 3189c558734SJens Axboe struct blk_mq_ctx *ctx, struct bio *bio) 3199c558734SJens Axboe { 3209c558734SJens Axboe lockdep_assert_held(&ctx->lock); 3219c558734SJens Axboe 3229c558734SJens Axboe if (blk_mq_bio_list_merge(q, &ctx->rq_list, bio)) { 3239c558734SJens Axboe ctx->rq_merged++; 3249c558734SJens Axboe return true; 3259c558734SJens Axboe } 3269c558734SJens Axboe 3279c558734SJens Axboe return false; 3289c558734SJens Axboe } 3299bddeb2aSMing Lei 330bd166ef1SJens Axboe bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio) 331bd166ef1SJens Axboe { 332bd166ef1SJens Axboe struct elevator_queue *e = q->elevator; 333bd166ef1SJens Axboe struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); 334bd166ef1SJens Axboe struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); 3359bddeb2aSMing Lei bool ret = false; 336bd166ef1SJens Axboe 3379bddeb2aSMing Lei if (e && e->type->ops.mq.bio_merge) { 338bd166ef1SJens Axboe blk_mq_put_ctx(ctx); 339bd166ef1SJens Axboe return e->type->ops.mq.bio_merge(hctx, bio); 340bd166ef1SJens Axboe } 341bd166ef1SJens Axboe 3429bddeb2aSMing Lei if (hctx->flags & BLK_MQ_F_SHOULD_MERGE) { 3439bddeb2aSMing Lei /* default per sw-queue merge */ 3449bddeb2aSMing Lei spin_lock(&ctx->lock); 3459bddeb2aSMing Lei ret = blk_mq_attempt_merge(q, ctx, bio); 3469bddeb2aSMing Lei spin_unlock(&ctx->lock); 3479bddeb2aSMing Lei } 3489bddeb2aSMing Lei 3499bddeb2aSMing Lei blk_mq_put_ctx(ctx); 3509bddeb2aSMing Lei return ret; 351bd166ef1SJens Axboe } 352bd166ef1SJens Axboe 353bd166ef1SJens Axboe bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq) 354bd166ef1SJens Axboe { 355bd166ef1SJens Axboe return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq); 356bd166ef1SJens Axboe } 357bd166ef1SJens Axboe EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge); 358bd166ef1SJens Axboe 359bd166ef1SJens Axboe void blk_mq_sched_request_inserted(struct request *rq) 360bd166ef1SJens Axboe { 361bd166ef1SJens Axboe trace_block_rq_insert(rq->q, rq); 362bd166ef1SJens Axboe } 363bd166ef1SJens Axboe EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted); 364bd166ef1SJens Axboe 3650cacba6cSOmar Sandoval static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, 366a6a252e6SMing Lei bool has_sched, 3670cacba6cSOmar Sandoval struct request *rq) 368bd166ef1SJens Axboe { 369a6a252e6SMing Lei /* dispatch flush rq directly */ 370a6a252e6SMing Lei if (rq->rq_flags & RQF_FLUSH_SEQ) { 371bd166ef1SJens Axboe spin_lock(&hctx->lock); 372bd166ef1SJens Axboe list_add(&rq->queuelist, &hctx->dispatch); 373bd166ef1SJens Axboe spin_unlock(&hctx->lock); 374bd166ef1SJens Axboe return true; 375bd166ef1SJens Axboe } 376bd166ef1SJens Axboe 377923218f6SMing Lei if (has_sched) 378a6a252e6SMing Lei rq->rq_flags |= RQF_SORTED; 379a6a252e6SMing Lei 380a6a252e6SMing Lei return false; 381a6a252e6SMing Lei } 382a6a252e6SMing Lei 38305b79413SJens Axboe /** 38405b79413SJens Axboe * list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list 38505b79413SJens Axboe * @pos: loop cursor. 38605b79413SJens Axboe * @skip: the list element that will not be examined. Iteration starts at 38705b79413SJens Axboe * @skip->next. 38805b79413SJens Axboe * @head: head of the list to examine. This list must have at least one 38905b79413SJens Axboe * element, namely @skip. 39005b79413SJens Axboe * @member: name of the list_head structure within typeof(*pos). 39105b79413SJens Axboe */ 39205b79413SJens Axboe #define list_for_each_entry_rcu_rr(pos, skip, head, member) \ 39305b79413SJens Axboe for ((pos) = (skip); \ 39405b79413SJens Axboe (pos = (pos)->member.next != (head) ? list_entry_rcu( \ 39505b79413SJens Axboe (pos)->member.next, typeof(*pos), member) : \ 39605b79413SJens Axboe list_entry_rcu((pos)->member.next->next, typeof(*pos), member)), \ 39705b79413SJens Axboe (pos) != (skip); ) 39805b79413SJens Axboe 39905b79413SJens Axboe /* 40005b79413SJens Axboe * Called after a driver tag has been freed to check whether a hctx needs to 40105b79413SJens Axboe * be restarted. Restarts @hctx if its tag set is not shared. Restarts hardware 40205b79413SJens Axboe * queues in a round-robin fashion if the tag set of @hctx is shared with other 40305b79413SJens Axboe * hardware queues. 40405b79413SJens Axboe */ 40505b79413SJens Axboe void blk_mq_sched_restart(struct blk_mq_hw_ctx *const hctx) 40605b79413SJens Axboe { 40705b79413SJens Axboe struct blk_mq_tags *const tags = hctx->tags; 40805b79413SJens Axboe struct blk_mq_tag_set *const set = hctx->queue->tag_set; 40905b79413SJens Axboe struct request_queue *const queue = hctx->queue, *q; 41005b79413SJens Axboe struct blk_mq_hw_ctx *hctx2; 41105b79413SJens Axboe unsigned int i, j; 41205b79413SJens Axboe 41305b79413SJens Axboe if (set->flags & BLK_MQ_F_TAG_SHARED) { 41405b79413SJens Axboe /* 41505b79413SJens Axboe * If this is 0, then we know that no hardware queues 41605b79413SJens Axboe * have RESTART marked. We're done. 41705b79413SJens Axboe */ 41805b79413SJens Axboe if (!atomic_read(&queue->shared_hctx_restart)) 41905b79413SJens Axboe return; 42005b79413SJens Axboe 42105b79413SJens Axboe rcu_read_lock(); 42205b79413SJens Axboe list_for_each_entry_rcu_rr(q, queue, &set->tag_list, 42305b79413SJens Axboe tag_set_list) { 42405b79413SJens Axboe queue_for_each_hw_ctx(q, hctx2, i) 42505b79413SJens Axboe if (hctx2->tags == tags && 42605b79413SJens Axboe blk_mq_sched_restart_hctx(hctx2)) 42705b79413SJens Axboe goto done; 42805b79413SJens Axboe } 42905b79413SJens Axboe j = hctx->queue_num + 1; 43005b79413SJens Axboe for (i = 0; i < queue->nr_hw_queues; i++, j++) { 43105b79413SJens Axboe if (j == queue->nr_hw_queues) 43205b79413SJens Axboe j = 0; 43305b79413SJens Axboe hctx2 = queue->queue_hw_ctx[j]; 43405b79413SJens Axboe if (hctx2->tags == tags && 43505b79413SJens Axboe blk_mq_sched_restart_hctx(hctx2)) 43605b79413SJens Axboe break; 43705b79413SJens Axboe } 43805b79413SJens Axboe done: 43905b79413SJens Axboe rcu_read_unlock(); 44005b79413SJens Axboe } else { 44105b79413SJens Axboe blk_mq_sched_restart_hctx(hctx); 44205b79413SJens Axboe } 44305b79413SJens Axboe } 44405b79413SJens Axboe 445bd6737f1SJens Axboe void blk_mq_sched_insert_request(struct request *rq, bool at_head, 4469e97d295SMike Snitzer bool run_queue, bool async) 447bd6737f1SJens Axboe { 448bd6737f1SJens Axboe struct request_queue *q = rq->q; 449bd6737f1SJens Axboe struct elevator_queue *e = q->elevator; 450bd6737f1SJens Axboe struct blk_mq_ctx *ctx = rq->mq_ctx; 451bd6737f1SJens Axboe struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); 452bd6737f1SJens Axboe 453a6a252e6SMing Lei /* flush rq in flush machinery need to be dispatched directly */ 454a6a252e6SMing Lei if (!(rq->rq_flags & RQF_FLUSH_SEQ) && op_is_flush(rq->cmd_flags)) { 455923218f6SMing Lei blk_insert_flush(rq); 456923218f6SMing Lei goto run; 457bd6737f1SJens Axboe } 458bd6737f1SJens Axboe 459923218f6SMing Lei WARN_ON(e && (rq->tag != -1)); 460923218f6SMing Lei 461a6a252e6SMing Lei if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) 4620cacba6cSOmar Sandoval goto run; 4630cacba6cSOmar Sandoval 464bd6737f1SJens Axboe if (e && e->type->ops.mq.insert_requests) { 465bd6737f1SJens Axboe LIST_HEAD(list); 466bd6737f1SJens Axboe 467bd6737f1SJens Axboe list_add(&rq->queuelist, &list); 468bd6737f1SJens Axboe e->type->ops.mq.insert_requests(hctx, &list, at_head); 469bd6737f1SJens Axboe } else { 470bd6737f1SJens Axboe spin_lock(&ctx->lock); 471bd6737f1SJens Axboe __blk_mq_insert_request(hctx, rq, at_head); 472bd6737f1SJens Axboe spin_unlock(&ctx->lock); 473bd6737f1SJens Axboe } 474bd6737f1SJens Axboe 4750cacba6cSOmar Sandoval run: 476bd6737f1SJens Axboe if (run_queue) 477bd6737f1SJens Axboe blk_mq_run_hw_queue(hctx, async); 478bd6737f1SJens Axboe } 479bd6737f1SJens Axboe 480bd6737f1SJens Axboe void blk_mq_sched_insert_requests(struct request_queue *q, 481bd6737f1SJens Axboe struct blk_mq_ctx *ctx, 482bd6737f1SJens Axboe struct list_head *list, bool run_queue_async) 483bd6737f1SJens Axboe { 484bd6737f1SJens Axboe struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); 485bd6737f1SJens Axboe struct elevator_queue *e = hctx->queue->elevator; 486bd6737f1SJens Axboe 487bd6737f1SJens Axboe if (e && e->type->ops.mq.insert_requests) 488bd6737f1SJens Axboe e->type->ops.mq.insert_requests(hctx, list, false); 489bd6737f1SJens Axboe else 490bd6737f1SJens Axboe blk_mq_insert_requests(hctx, ctx, list); 491bd6737f1SJens Axboe 492bd6737f1SJens Axboe blk_mq_run_hw_queue(hctx, run_queue_async); 493bd6737f1SJens Axboe } 494bd6737f1SJens Axboe 495bd166ef1SJens Axboe static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set, 496bd166ef1SJens Axboe struct blk_mq_hw_ctx *hctx, 497bd166ef1SJens Axboe unsigned int hctx_idx) 498bd166ef1SJens Axboe { 499bd166ef1SJens Axboe if (hctx->sched_tags) { 500bd166ef1SJens Axboe blk_mq_free_rqs(set, hctx->sched_tags, hctx_idx); 501bd166ef1SJens Axboe blk_mq_free_rq_map(hctx->sched_tags); 502bd166ef1SJens Axboe hctx->sched_tags = NULL; 503bd166ef1SJens Axboe } 504bd166ef1SJens Axboe } 505bd166ef1SJens Axboe 5066917ff0bSOmar Sandoval static int blk_mq_sched_alloc_tags(struct request_queue *q, 5076917ff0bSOmar Sandoval struct blk_mq_hw_ctx *hctx, 5086917ff0bSOmar Sandoval unsigned int hctx_idx) 509bd166ef1SJens Axboe { 510bd166ef1SJens Axboe struct blk_mq_tag_set *set = q->tag_set; 5116917ff0bSOmar Sandoval int ret; 512bd166ef1SJens Axboe 5136917ff0bSOmar Sandoval hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests, 5146917ff0bSOmar Sandoval set->reserved_tags); 515bd166ef1SJens Axboe if (!hctx->sched_tags) 5166917ff0bSOmar Sandoval return -ENOMEM; 5176917ff0bSOmar Sandoval 5186917ff0bSOmar Sandoval ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests); 5196917ff0bSOmar Sandoval if (ret) 5206917ff0bSOmar Sandoval blk_mq_sched_free_tags(set, hctx, hctx_idx); 521bd166ef1SJens Axboe 522bd166ef1SJens Axboe return ret; 523bd166ef1SJens Axboe } 524bd166ef1SJens Axboe 52554d5329dSOmar Sandoval static void blk_mq_sched_tags_teardown(struct request_queue *q) 526bd166ef1SJens Axboe { 527bd166ef1SJens Axboe struct blk_mq_tag_set *set = q->tag_set; 528bd166ef1SJens Axboe struct blk_mq_hw_ctx *hctx; 529bd166ef1SJens Axboe int i; 530bd166ef1SJens Axboe 531bd166ef1SJens Axboe queue_for_each_hw_ctx(q, hctx, i) 532bd166ef1SJens Axboe blk_mq_sched_free_tags(set, hctx, i); 533bd166ef1SJens Axboe } 534d3484991SJens Axboe 53593252632SOmar Sandoval int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx, 53693252632SOmar Sandoval unsigned int hctx_idx) 53793252632SOmar Sandoval { 53893252632SOmar Sandoval struct elevator_queue *e = q->elevator; 539ee056f98SOmar Sandoval int ret; 54093252632SOmar Sandoval 54193252632SOmar Sandoval if (!e) 54293252632SOmar Sandoval return 0; 54393252632SOmar Sandoval 544ee056f98SOmar Sandoval ret = blk_mq_sched_alloc_tags(q, hctx, hctx_idx); 545ee056f98SOmar Sandoval if (ret) 546ee056f98SOmar Sandoval return ret; 547ee056f98SOmar Sandoval 548ee056f98SOmar Sandoval if (e->type->ops.mq.init_hctx) { 549ee056f98SOmar Sandoval ret = e->type->ops.mq.init_hctx(hctx, hctx_idx); 550ee056f98SOmar Sandoval if (ret) { 551ee056f98SOmar Sandoval blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx); 552ee056f98SOmar Sandoval return ret; 553ee056f98SOmar Sandoval } 554ee056f98SOmar Sandoval } 555ee056f98SOmar Sandoval 556d332ce09SOmar Sandoval blk_mq_debugfs_register_sched_hctx(q, hctx); 557d332ce09SOmar Sandoval 558ee056f98SOmar Sandoval return 0; 55993252632SOmar Sandoval } 56093252632SOmar Sandoval 56193252632SOmar Sandoval void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx, 56293252632SOmar Sandoval unsigned int hctx_idx) 56393252632SOmar Sandoval { 56493252632SOmar Sandoval struct elevator_queue *e = q->elevator; 56593252632SOmar Sandoval 56693252632SOmar Sandoval if (!e) 56793252632SOmar Sandoval return; 56893252632SOmar Sandoval 569d332ce09SOmar Sandoval blk_mq_debugfs_unregister_sched_hctx(hctx); 570d332ce09SOmar Sandoval 571ee056f98SOmar Sandoval if (e->type->ops.mq.exit_hctx && hctx->sched_data) { 572ee056f98SOmar Sandoval e->type->ops.mq.exit_hctx(hctx, hctx_idx); 573ee056f98SOmar Sandoval hctx->sched_data = NULL; 574ee056f98SOmar Sandoval } 575ee056f98SOmar Sandoval 57693252632SOmar Sandoval blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx); 57793252632SOmar Sandoval } 57893252632SOmar Sandoval 5796917ff0bSOmar Sandoval int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) 5806917ff0bSOmar Sandoval { 5816917ff0bSOmar Sandoval struct blk_mq_hw_ctx *hctx; 582ee056f98SOmar Sandoval struct elevator_queue *eq; 5836917ff0bSOmar Sandoval unsigned int i; 5846917ff0bSOmar Sandoval int ret; 5856917ff0bSOmar Sandoval 5866917ff0bSOmar Sandoval if (!e) { 5876917ff0bSOmar Sandoval q->elevator = NULL; 5886917ff0bSOmar Sandoval return 0; 5896917ff0bSOmar Sandoval } 5906917ff0bSOmar Sandoval 5916917ff0bSOmar Sandoval /* 59232825c45SMing Lei * Default to double of smaller one between hw queue_depth and 128, 59332825c45SMing Lei * since we don't split into sync/async like the old code did. 59432825c45SMing Lei * Additionally, this is a per-hw queue depth. 5956917ff0bSOmar Sandoval */ 59632825c45SMing Lei q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth, 59732825c45SMing Lei BLKDEV_MAX_RQ); 5986917ff0bSOmar Sandoval 5996917ff0bSOmar Sandoval queue_for_each_hw_ctx(q, hctx, i) { 6006917ff0bSOmar Sandoval ret = blk_mq_sched_alloc_tags(q, hctx, i); 6016917ff0bSOmar Sandoval if (ret) 6026917ff0bSOmar Sandoval goto err; 6036917ff0bSOmar Sandoval } 6046917ff0bSOmar Sandoval 6056917ff0bSOmar Sandoval ret = e->ops.mq.init_sched(q, e); 6066917ff0bSOmar Sandoval if (ret) 6076917ff0bSOmar Sandoval goto err; 6086917ff0bSOmar Sandoval 609d332ce09SOmar Sandoval blk_mq_debugfs_register_sched(q); 610d332ce09SOmar Sandoval 611ee056f98SOmar Sandoval queue_for_each_hw_ctx(q, hctx, i) { 612d332ce09SOmar Sandoval if (e->ops.mq.init_hctx) { 613ee056f98SOmar Sandoval ret = e->ops.mq.init_hctx(hctx, i); 614ee056f98SOmar Sandoval if (ret) { 615ee056f98SOmar Sandoval eq = q->elevator; 616ee056f98SOmar Sandoval blk_mq_exit_sched(q, eq); 617ee056f98SOmar Sandoval kobject_put(&eq->kobj); 618ee056f98SOmar Sandoval return ret; 619ee056f98SOmar Sandoval } 620ee056f98SOmar Sandoval } 621d332ce09SOmar Sandoval blk_mq_debugfs_register_sched_hctx(q, hctx); 622ee056f98SOmar Sandoval } 623ee056f98SOmar Sandoval 6246917ff0bSOmar Sandoval return 0; 6256917ff0bSOmar Sandoval 6266917ff0bSOmar Sandoval err: 62754d5329dSOmar Sandoval blk_mq_sched_tags_teardown(q); 62854d5329dSOmar Sandoval q->elevator = NULL; 6296917ff0bSOmar Sandoval return ret; 6306917ff0bSOmar Sandoval } 6316917ff0bSOmar Sandoval 63254d5329dSOmar Sandoval void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e) 63354d5329dSOmar Sandoval { 634ee056f98SOmar Sandoval struct blk_mq_hw_ctx *hctx; 635ee056f98SOmar Sandoval unsigned int i; 636ee056f98SOmar Sandoval 637ee056f98SOmar Sandoval queue_for_each_hw_ctx(q, hctx, i) { 638d332ce09SOmar Sandoval blk_mq_debugfs_unregister_sched_hctx(hctx); 639d332ce09SOmar Sandoval if (e->type->ops.mq.exit_hctx && hctx->sched_data) { 640ee056f98SOmar Sandoval e->type->ops.mq.exit_hctx(hctx, i); 641ee056f98SOmar Sandoval hctx->sched_data = NULL; 642ee056f98SOmar Sandoval } 643ee056f98SOmar Sandoval } 644d332ce09SOmar Sandoval blk_mq_debugfs_unregister_sched(q); 64554d5329dSOmar Sandoval if (e->type->ops.mq.exit_sched) 64654d5329dSOmar Sandoval e->type->ops.mq.exit_sched(e); 64754d5329dSOmar Sandoval blk_mq_sched_tags_teardown(q); 64854d5329dSOmar Sandoval q->elevator = NULL; 64954d5329dSOmar Sandoval } 65054d5329dSOmar Sandoval 651d3484991SJens Axboe int blk_mq_sched_init(struct request_queue *q) 652d3484991SJens Axboe { 653d3484991SJens Axboe int ret; 654d3484991SJens Axboe 655d3484991SJens Axboe mutex_lock(&q->sysfs_lock); 656d3484991SJens Axboe ret = elevator_init(q, NULL); 657d3484991SJens Axboe mutex_unlock(&q->sysfs_lock); 658d3484991SJens Axboe 659d3484991SJens Axboe return ret; 660d3484991SJens Axboe } 661