1bd166ef1SJens Axboe /* 2bd166ef1SJens Axboe * blk-mq scheduling framework 3bd166ef1SJens Axboe * 4bd166ef1SJens Axboe * Copyright (C) 2016 Jens Axboe 5bd166ef1SJens Axboe */ 6bd166ef1SJens Axboe #include <linux/kernel.h> 7bd166ef1SJens Axboe #include <linux/module.h> 8bd166ef1SJens Axboe #include <linux/blk-mq.h> 9bd166ef1SJens Axboe 10bd166ef1SJens Axboe #include <trace/events/block.h> 11bd166ef1SJens Axboe 12bd166ef1SJens Axboe #include "blk.h" 13bd166ef1SJens Axboe #include "blk-mq.h" 14d332ce09SOmar Sandoval #include "blk-mq-debugfs.h" 15bd166ef1SJens Axboe #include "blk-mq-sched.h" 16bd166ef1SJens Axboe #include "blk-mq-tag.h" 17bd166ef1SJens Axboe #include "blk-wbt.h" 18bd166ef1SJens Axboe 19bd166ef1SJens Axboe void blk_mq_sched_free_hctx_data(struct request_queue *q, 20bd166ef1SJens Axboe void (*exit)(struct blk_mq_hw_ctx *)) 21bd166ef1SJens Axboe { 22bd166ef1SJens Axboe struct blk_mq_hw_ctx *hctx; 23bd166ef1SJens Axboe int i; 24bd166ef1SJens Axboe 25bd166ef1SJens Axboe queue_for_each_hw_ctx(q, hctx, i) { 26bd166ef1SJens Axboe if (exit && hctx->sched_data) 27bd166ef1SJens Axboe exit(hctx); 28bd166ef1SJens Axboe kfree(hctx->sched_data); 29bd166ef1SJens Axboe hctx->sched_data = NULL; 30bd166ef1SJens Axboe } 31bd166ef1SJens Axboe } 32bd166ef1SJens Axboe EXPORT_SYMBOL_GPL(blk_mq_sched_free_hctx_data); 33bd166ef1SJens Axboe 34bd166ef1SJens Axboe static void __blk_mq_sched_assign_ioc(struct request_queue *q, 35f1ba8261SPaolo Valente struct request *rq, 36f1ba8261SPaolo Valente struct bio *bio, 37f1ba8261SPaolo Valente struct io_context *ioc) 38bd166ef1SJens Axboe { 39bd166ef1SJens Axboe struct io_cq *icq; 40bd166ef1SJens Axboe 41bd166ef1SJens Axboe spin_lock_irq(q->queue_lock); 42bd166ef1SJens Axboe icq = ioc_lookup_icq(ioc, q); 43bd166ef1SJens Axboe spin_unlock_irq(q->queue_lock); 44bd166ef1SJens Axboe 45bd166ef1SJens Axboe if (!icq) { 46bd166ef1SJens Axboe icq = ioc_create_icq(ioc, q, GFP_ATOMIC); 47bd166ef1SJens Axboe if (!icq) 48bd166ef1SJens Axboe return; 49bd166ef1SJens Axboe } 50bd166ef1SJens Axboe 51bd166ef1SJens Axboe rq->elv.icq = icq; 52f1ba8261SPaolo Valente if (!blk_mq_sched_get_rq_priv(q, rq, bio)) { 53bd166ef1SJens Axboe rq->rq_flags |= RQF_ELVPRIV; 54bd166ef1SJens Axboe get_io_context(icq->ioc); 55bd166ef1SJens Axboe return; 56bd166ef1SJens Axboe } 57bd166ef1SJens Axboe 58bd166ef1SJens Axboe rq->elv.icq = NULL; 59bd166ef1SJens Axboe } 60bd166ef1SJens Axboe 61bd166ef1SJens Axboe static void blk_mq_sched_assign_ioc(struct request_queue *q, 62bd166ef1SJens Axboe struct request *rq, struct bio *bio) 63bd166ef1SJens Axboe { 64bd166ef1SJens Axboe struct io_context *ioc; 65bd166ef1SJens Axboe 66bd166ef1SJens Axboe ioc = rq_ioc(bio); 67bd166ef1SJens Axboe if (ioc) 68f1ba8261SPaolo Valente __blk_mq_sched_assign_ioc(q, rq, bio, ioc); 69bd166ef1SJens Axboe } 70bd166ef1SJens Axboe 718e8320c9SJens Axboe /* 728e8320c9SJens Axboe * Mark a hardware queue as needing a restart. For shared queues, maintain 738e8320c9SJens Axboe * a count of how many hardware queues are marked for restart. 748e8320c9SJens Axboe */ 758e8320c9SJens Axboe static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) 768e8320c9SJens Axboe { 778e8320c9SJens Axboe if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) 788e8320c9SJens Axboe return; 798e8320c9SJens Axboe 808e8320c9SJens Axboe if (hctx->flags & BLK_MQ_F_TAG_SHARED) { 818e8320c9SJens Axboe struct request_queue *q = hctx->queue; 828e8320c9SJens Axboe 838e8320c9SJens Axboe if (!test_and_set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) 848e8320c9SJens Axboe atomic_inc(&q->shared_hctx_restart); 858e8320c9SJens Axboe } else 868e8320c9SJens Axboe set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); 878e8320c9SJens Axboe } 888e8320c9SJens Axboe 898e8320c9SJens Axboe static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx) 908e8320c9SJens Axboe { 918e8320c9SJens Axboe if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) 928e8320c9SJens Axboe return false; 938e8320c9SJens Axboe 948e8320c9SJens Axboe if (hctx->flags & BLK_MQ_F_TAG_SHARED) { 958e8320c9SJens Axboe struct request_queue *q = hctx->queue; 968e8320c9SJens Axboe 978e8320c9SJens Axboe if (test_and_clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) 988e8320c9SJens Axboe atomic_dec(&q->shared_hctx_restart); 998e8320c9SJens Axboe } else 1008e8320c9SJens Axboe clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); 1018e8320c9SJens Axboe 1028e8320c9SJens Axboe if (blk_mq_hctx_has_pending(hctx)) { 1038e8320c9SJens Axboe blk_mq_run_hw_queue(hctx, true); 1048e8320c9SJens Axboe return true; 1058e8320c9SJens Axboe } 1068e8320c9SJens Axboe 1078e8320c9SJens Axboe return false; 1088e8320c9SJens Axboe } 1098e8320c9SJens Axboe 110bd166ef1SJens Axboe struct request *blk_mq_sched_get_request(struct request_queue *q, 111bd166ef1SJens Axboe struct bio *bio, 112bd166ef1SJens Axboe unsigned int op, 113bd166ef1SJens Axboe struct blk_mq_alloc_data *data) 114bd166ef1SJens Axboe { 115bd166ef1SJens Axboe struct elevator_queue *e = q->elevator; 116bd166ef1SJens Axboe struct request *rq; 117bd166ef1SJens Axboe 118bd166ef1SJens Axboe blk_queue_enter_live(q); 1196d2809d5SOmar Sandoval data->q = q; 1206d2809d5SOmar Sandoval if (likely(!data->ctx)) 1216d2809d5SOmar Sandoval data->ctx = blk_mq_get_ctx(q); 1226d2809d5SOmar Sandoval if (likely(!data->hctx)) 1236d2809d5SOmar Sandoval data->hctx = blk_mq_map_queue(q, data->ctx->cpu); 124bd166ef1SJens Axboe 1259f2779bfSJens Axboe if (e) { 126bd166ef1SJens Axboe data->flags |= BLK_MQ_REQ_INTERNAL; 127bd166ef1SJens Axboe 128bd166ef1SJens Axboe /* 129bd166ef1SJens Axboe * Flush requests are special and go directly to the 130bd166ef1SJens Axboe * dispatch list. 131bd166ef1SJens Axboe */ 132f73f44ebSChristoph Hellwig if (!op_is_flush(op) && e->type->ops.mq.get_request) { 133bd166ef1SJens Axboe rq = e->type->ops.mq.get_request(q, op, data); 134bd166ef1SJens Axboe if (rq) 135bd166ef1SJens Axboe rq->rq_flags |= RQF_QUEUED; 136bd166ef1SJens Axboe } else 137bd166ef1SJens Axboe rq = __blk_mq_alloc_request(data, op); 138bd166ef1SJens Axboe } else { 139bd166ef1SJens Axboe rq = __blk_mq_alloc_request(data, op); 140bd166ef1SJens Axboe } 141bd166ef1SJens Axboe 142bd166ef1SJens Axboe if (rq) { 143f73f44ebSChristoph Hellwig if (!op_is_flush(op)) { 144bd166ef1SJens Axboe rq->elv.icq = NULL; 145bd166ef1SJens Axboe if (e && e->type->icq_cache) 146bd166ef1SJens Axboe blk_mq_sched_assign_ioc(q, rq, bio); 147bd166ef1SJens Axboe } 148bd166ef1SJens Axboe data->hctx->queued++; 149bd166ef1SJens Axboe return rq; 150bd166ef1SJens Axboe } 151bd166ef1SJens Axboe 152bd166ef1SJens Axboe blk_queue_exit(q); 153bd166ef1SJens Axboe return NULL; 154bd166ef1SJens Axboe } 155bd166ef1SJens Axboe 156bd166ef1SJens Axboe void blk_mq_sched_put_request(struct request *rq) 157bd166ef1SJens Axboe { 158bd166ef1SJens Axboe struct request_queue *q = rq->q; 159bd166ef1SJens Axboe struct elevator_queue *e = q->elevator; 160bd166ef1SJens Axboe 161bd166ef1SJens Axboe if (rq->rq_flags & RQF_ELVPRIV) { 162bd166ef1SJens Axboe blk_mq_sched_put_rq_priv(rq->q, rq); 163bd166ef1SJens Axboe if (rq->elv.icq) { 164bd166ef1SJens Axboe put_io_context(rq->elv.icq->ioc); 165bd166ef1SJens Axboe rq->elv.icq = NULL; 166bd166ef1SJens Axboe } 167bd166ef1SJens Axboe } 168bd166ef1SJens Axboe 169bd166ef1SJens Axboe if ((rq->rq_flags & RQF_QUEUED) && e && e->type->ops.mq.put_request) 170bd166ef1SJens Axboe e->type->ops.mq.put_request(rq); 171bd166ef1SJens Axboe else 172bd166ef1SJens Axboe blk_mq_finish_request(rq); 173bd166ef1SJens Axboe } 174bd166ef1SJens Axboe 175bd166ef1SJens Axboe void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) 176bd166ef1SJens Axboe { 17781380ca1SOmar Sandoval struct request_queue *q = hctx->queue; 17881380ca1SOmar Sandoval struct elevator_queue *e = q->elevator; 17964765a75SJens Axboe const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request; 18064765a75SJens Axboe bool did_work = false; 181bd166ef1SJens Axboe LIST_HEAD(rq_list); 182bd166ef1SJens Axboe 183bd166ef1SJens Axboe if (unlikely(blk_mq_hctx_stopped(hctx))) 184bd166ef1SJens Axboe return; 185bd166ef1SJens Axboe 186bd166ef1SJens Axboe hctx->run++; 187bd166ef1SJens Axboe 188bd166ef1SJens Axboe /* 189bd166ef1SJens Axboe * If we have previous entries on our dispatch list, grab them first for 190bd166ef1SJens Axboe * more fair dispatch. 191bd166ef1SJens Axboe */ 192bd166ef1SJens Axboe if (!list_empty_careful(&hctx->dispatch)) { 193bd166ef1SJens Axboe spin_lock(&hctx->lock); 194bd166ef1SJens Axboe if (!list_empty(&hctx->dispatch)) 195bd166ef1SJens Axboe list_splice_init(&hctx->dispatch, &rq_list); 196bd166ef1SJens Axboe spin_unlock(&hctx->lock); 197bd166ef1SJens Axboe } 198bd166ef1SJens Axboe 199bd166ef1SJens Axboe /* 200bd166ef1SJens Axboe * Only ask the scheduler for requests, if we didn't have residual 201bd166ef1SJens Axboe * requests from the dispatch list. This is to avoid the case where 202bd166ef1SJens Axboe * we only ever dispatch a fraction of the requests available because 203bd166ef1SJens Axboe * of low device queue depth. Once we pull requests out of the IO 204bd166ef1SJens Axboe * scheduler, we can no longer merge or sort them. So it's best to 205bd166ef1SJens Axboe * leave them there for as long as we can. Mark the hw queue as 206bd166ef1SJens Axboe * needing a restart in that case. 207bd166ef1SJens Axboe */ 208c13660a0SJens Axboe if (!list_empty(&rq_list)) { 209d38d3515SOmar Sandoval blk_mq_sched_mark_restart_hctx(hctx); 21081380ca1SOmar Sandoval did_work = blk_mq_dispatch_rq_list(q, &rq_list); 21164765a75SJens Axboe } else if (!has_sched_dispatch) { 212c13660a0SJens Axboe blk_mq_flush_busy_ctxs(hctx, &rq_list); 21381380ca1SOmar Sandoval blk_mq_dispatch_rq_list(q, &rq_list); 21464765a75SJens Axboe } 21564765a75SJens Axboe 21664765a75SJens Axboe /* 21764765a75SJens Axboe * We want to dispatch from the scheduler if we had no work left 21864765a75SJens Axboe * on the dispatch list, OR if we did have work but weren't able 21964765a75SJens Axboe * to make progress. 22064765a75SJens Axboe */ 22164765a75SJens Axboe if (!did_work && has_sched_dispatch) { 222c13660a0SJens Axboe do { 223c13660a0SJens Axboe struct request *rq; 224c13660a0SJens Axboe 225c13660a0SJens Axboe rq = e->type->ops.mq.dispatch_request(hctx); 226c13660a0SJens Axboe if (!rq) 227c13660a0SJens Axboe break; 228c13660a0SJens Axboe list_add(&rq->queuelist, &rq_list); 22981380ca1SOmar Sandoval } while (blk_mq_dispatch_rq_list(q, &rq_list)); 230c13660a0SJens Axboe } 231bd166ef1SJens Axboe } 232bd166ef1SJens Axboe 233e4d750c9SJens Axboe bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, 234e4d750c9SJens Axboe struct request **merged_request) 235bd166ef1SJens Axboe { 236bd166ef1SJens Axboe struct request *rq; 237bd166ef1SJens Axboe 23834fe7c05SChristoph Hellwig switch (elv_merge(q, &rq, bio)) { 23934fe7c05SChristoph Hellwig case ELEVATOR_BACK_MERGE: 240bd166ef1SJens Axboe if (!blk_mq_sched_allow_merge(q, rq, bio)) 241bd166ef1SJens Axboe return false; 24234fe7c05SChristoph Hellwig if (!bio_attempt_back_merge(q, rq, bio)) 24334fe7c05SChristoph Hellwig return false; 244e4d750c9SJens Axboe *merged_request = attempt_back_merge(q, rq); 245e4d750c9SJens Axboe if (!*merged_request) 24634fe7c05SChristoph Hellwig elv_merged_request(q, rq, ELEVATOR_BACK_MERGE); 247bd166ef1SJens Axboe return true; 24834fe7c05SChristoph Hellwig case ELEVATOR_FRONT_MERGE: 249bd166ef1SJens Axboe if (!blk_mq_sched_allow_merge(q, rq, bio)) 250bd166ef1SJens Axboe return false; 25134fe7c05SChristoph Hellwig if (!bio_attempt_front_merge(q, rq, bio)) 25234fe7c05SChristoph Hellwig return false; 253e4d750c9SJens Axboe *merged_request = attempt_front_merge(q, rq); 254e4d750c9SJens Axboe if (!*merged_request) 25534fe7c05SChristoph Hellwig elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE); 256bd166ef1SJens Axboe return true; 25734fe7c05SChristoph Hellwig default: 258bd166ef1SJens Axboe return false; 259bd166ef1SJens Axboe } 26034fe7c05SChristoph Hellwig } 261bd166ef1SJens Axboe EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge); 262bd166ef1SJens Axboe 263bd166ef1SJens Axboe bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio) 264bd166ef1SJens Axboe { 265bd166ef1SJens Axboe struct elevator_queue *e = q->elevator; 266bd166ef1SJens Axboe 267bd166ef1SJens Axboe if (e->type->ops.mq.bio_merge) { 268bd166ef1SJens Axboe struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); 269bd166ef1SJens Axboe struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); 270bd166ef1SJens Axboe 271bd166ef1SJens Axboe blk_mq_put_ctx(ctx); 272bd166ef1SJens Axboe return e->type->ops.mq.bio_merge(hctx, bio); 273bd166ef1SJens Axboe } 274bd166ef1SJens Axboe 275bd166ef1SJens Axboe return false; 276bd166ef1SJens Axboe } 277bd166ef1SJens Axboe 278bd166ef1SJens Axboe bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq) 279bd166ef1SJens Axboe { 280bd166ef1SJens Axboe return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq); 281bd166ef1SJens Axboe } 282bd166ef1SJens Axboe EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge); 283bd166ef1SJens Axboe 284bd166ef1SJens Axboe void blk_mq_sched_request_inserted(struct request *rq) 285bd166ef1SJens Axboe { 286bd166ef1SJens Axboe trace_block_rq_insert(rq->q, rq); 287bd166ef1SJens Axboe } 288bd166ef1SJens Axboe EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted); 289bd166ef1SJens Axboe 2900cacba6cSOmar Sandoval static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, 2910cacba6cSOmar Sandoval struct request *rq) 292bd166ef1SJens Axboe { 293bd166ef1SJens Axboe if (rq->tag == -1) { 294bd166ef1SJens Axboe rq->rq_flags |= RQF_SORTED; 295bd166ef1SJens Axboe return false; 296bd166ef1SJens Axboe } 297bd166ef1SJens Axboe 298bd166ef1SJens Axboe /* 299bd166ef1SJens Axboe * If we already have a real request tag, send directly to 300bd166ef1SJens Axboe * the dispatch list. 301bd166ef1SJens Axboe */ 302bd166ef1SJens Axboe spin_lock(&hctx->lock); 303bd166ef1SJens Axboe list_add(&rq->queuelist, &hctx->dispatch); 304bd166ef1SJens Axboe spin_unlock(&hctx->lock); 305bd166ef1SJens Axboe return true; 306bd166ef1SJens Axboe } 307bd166ef1SJens Axboe 3086d8c6c0fSBart Van Assche /** 3096d8c6c0fSBart Van Assche * list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list 3106d8c6c0fSBart Van Assche * @pos: loop cursor. 3116d8c6c0fSBart Van Assche * @skip: the list element that will not be examined. Iteration starts at 3126d8c6c0fSBart Van Assche * @skip->next. 3136d8c6c0fSBart Van Assche * @head: head of the list to examine. This list must have at least one 3146d8c6c0fSBart Van Assche * element, namely @skip. 3156d8c6c0fSBart Van Assche * @member: name of the list_head structure within typeof(*pos). 3166d8c6c0fSBart Van Assche */ 3176d8c6c0fSBart Van Assche #define list_for_each_entry_rcu_rr(pos, skip, head, member) \ 3186d8c6c0fSBart Van Assche for ((pos) = (skip); \ 3196d8c6c0fSBart Van Assche (pos = (pos)->member.next != (head) ? list_entry_rcu( \ 3206d8c6c0fSBart Van Assche (pos)->member.next, typeof(*pos), member) : \ 3216d8c6c0fSBart Van Assche list_entry_rcu((pos)->member.next->next, typeof(*pos), member)), \ 3226d8c6c0fSBart Van Assche (pos) != (skip); ) 3236d8c6c0fSBart Van Assche 3246d8c6c0fSBart Van Assche /* 3256d8c6c0fSBart Van Assche * Called after a driver tag has been freed to check whether a hctx needs to 3266d8c6c0fSBart Van Assche * be restarted. Restarts @hctx if its tag set is not shared. Restarts hardware 3276d8c6c0fSBart Van Assche * queues in a round-robin fashion if the tag set of @hctx is shared with other 3286d8c6c0fSBart Van Assche * hardware queues. 3296d8c6c0fSBart Van Assche */ 3306d8c6c0fSBart Van Assche void blk_mq_sched_restart(struct blk_mq_hw_ctx *const hctx) 33150e1dab8SJens Axboe { 3326d8c6c0fSBart Van Assche struct blk_mq_tags *const tags = hctx->tags; 3336d8c6c0fSBart Van Assche struct blk_mq_tag_set *const set = hctx->queue->tag_set; 3346d8c6c0fSBart Van Assche struct request_queue *const queue = hctx->queue, *q; 3356d8c6c0fSBart Van Assche struct blk_mq_hw_ctx *hctx2; 3366d8c6c0fSBart Van Assche unsigned int i, j; 33750e1dab8SJens Axboe 3386d8c6c0fSBart Van Assche if (set->flags & BLK_MQ_F_TAG_SHARED) { 3398e8320c9SJens Axboe /* 3408e8320c9SJens Axboe * If this is 0, then we know that no hardware queues 3418e8320c9SJens Axboe * have RESTART marked. We're done. 3428e8320c9SJens Axboe */ 3438e8320c9SJens Axboe if (!atomic_read(&queue->shared_hctx_restart)) 3448e8320c9SJens Axboe return; 3458e8320c9SJens Axboe 3466d8c6c0fSBart Van Assche rcu_read_lock(); 3476d8c6c0fSBart Van Assche list_for_each_entry_rcu_rr(q, queue, &set->tag_list, 3486d8c6c0fSBart Van Assche tag_set_list) { 3496d8c6c0fSBart Van Assche queue_for_each_hw_ctx(q, hctx2, i) 3506d8c6c0fSBart Van Assche if (hctx2->tags == tags && 3516d8c6c0fSBart Van Assche blk_mq_sched_restart_hctx(hctx2)) 3526d8c6c0fSBart Van Assche goto done; 35350e1dab8SJens Axboe } 3546d8c6c0fSBart Van Assche j = hctx->queue_num + 1; 3556d8c6c0fSBart Van Assche for (i = 0; i < queue->nr_hw_queues; i++, j++) { 3566d8c6c0fSBart Van Assche if (j == queue->nr_hw_queues) 3576d8c6c0fSBart Van Assche j = 0; 3586d8c6c0fSBart Van Assche hctx2 = queue->queue_hw_ctx[j]; 3596d8c6c0fSBart Van Assche if (hctx2->tags == tags && 3606d8c6c0fSBart Van Assche blk_mq_sched_restart_hctx(hctx2)) 3616d8c6c0fSBart Van Assche break; 3626d8c6c0fSBart Van Assche } 3636d8c6c0fSBart Van Assche done: 3646d8c6c0fSBart Van Assche rcu_read_unlock(); 365d38d3515SOmar Sandoval } else { 366d38d3515SOmar Sandoval blk_mq_sched_restart_hctx(hctx); 367d38d3515SOmar Sandoval } 36850e1dab8SJens Axboe } 36950e1dab8SJens Axboe 370bd6737f1SJens Axboe /* 371bd6737f1SJens Axboe * Add flush/fua to the queue. If we fail getting a driver tag, then 372bd6737f1SJens Axboe * punt to the requeue list. Requeue will re-invoke us from a context 373bd6737f1SJens Axboe * that's safe to block from. 374bd6737f1SJens Axboe */ 375bd6737f1SJens Axboe static void blk_mq_sched_insert_flush(struct blk_mq_hw_ctx *hctx, 376bd6737f1SJens Axboe struct request *rq, bool can_block) 377bd6737f1SJens Axboe { 378bd6737f1SJens Axboe if (blk_mq_get_driver_tag(rq, &hctx, can_block)) { 379bd6737f1SJens Axboe blk_insert_flush(rq); 380bd6737f1SJens Axboe blk_mq_run_hw_queue(hctx, true); 381bd6737f1SJens Axboe } else 382c7a571b4SJens Axboe blk_mq_add_to_requeue_list(rq, false, true); 383bd6737f1SJens Axboe } 384bd6737f1SJens Axboe 385bd6737f1SJens Axboe void blk_mq_sched_insert_request(struct request *rq, bool at_head, 386bd6737f1SJens Axboe bool run_queue, bool async, bool can_block) 387bd6737f1SJens Axboe { 388bd6737f1SJens Axboe struct request_queue *q = rq->q; 389bd6737f1SJens Axboe struct elevator_queue *e = q->elevator; 390bd6737f1SJens Axboe struct blk_mq_ctx *ctx = rq->mq_ctx; 391bd6737f1SJens Axboe struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); 392bd6737f1SJens Axboe 393f3a8ab7dSJens Axboe if (rq->tag == -1 && op_is_flush(rq->cmd_flags)) { 394bd6737f1SJens Axboe blk_mq_sched_insert_flush(hctx, rq, can_block); 395bd6737f1SJens Axboe return; 396bd6737f1SJens Axboe } 397bd6737f1SJens Axboe 3980cacba6cSOmar Sandoval if (e && blk_mq_sched_bypass_insert(hctx, rq)) 3990cacba6cSOmar Sandoval goto run; 4000cacba6cSOmar Sandoval 401bd6737f1SJens Axboe if (e && e->type->ops.mq.insert_requests) { 402bd6737f1SJens Axboe LIST_HEAD(list); 403bd6737f1SJens Axboe 404bd6737f1SJens Axboe list_add(&rq->queuelist, &list); 405bd6737f1SJens Axboe e->type->ops.mq.insert_requests(hctx, &list, at_head); 406bd6737f1SJens Axboe } else { 407bd6737f1SJens Axboe spin_lock(&ctx->lock); 408bd6737f1SJens Axboe __blk_mq_insert_request(hctx, rq, at_head); 409bd6737f1SJens Axboe spin_unlock(&ctx->lock); 410bd6737f1SJens Axboe } 411bd6737f1SJens Axboe 4120cacba6cSOmar Sandoval run: 413bd6737f1SJens Axboe if (run_queue) 414bd6737f1SJens Axboe blk_mq_run_hw_queue(hctx, async); 415bd6737f1SJens Axboe } 416bd6737f1SJens Axboe 417bd6737f1SJens Axboe void blk_mq_sched_insert_requests(struct request_queue *q, 418bd6737f1SJens Axboe struct blk_mq_ctx *ctx, 419bd6737f1SJens Axboe struct list_head *list, bool run_queue_async) 420bd6737f1SJens Axboe { 421bd6737f1SJens Axboe struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); 422bd6737f1SJens Axboe struct elevator_queue *e = hctx->queue->elevator; 423bd6737f1SJens Axboe 4240cacba6cSOmar Sandoval if (e) { 4250cacba6cSOmar Sandoval struct request *rq, *next; 4260cacba6cSOmar Sandoval 4270cacba6cSOmar Sandoval /* 4280cacba6cSOmar Sandoval * We bypass requests that already have a driver tag assigned, 4290cacba6cSOmar Sandoval * which should only be flushes. Flushes are only ever inserted 4300cacba6cSOmar Sandoval * as single requests, so we shouldn't ever hit the 4310cacba6cSOmar Sandoval * WARN_ON_ONCE() below (but let's handle it just in case). 4320cacba6cSOmar Sandoval */ 4330cacba6cSOmar Sandoval list_for_each_entry_safe(rq, next, list, queuelist) { 4340cacba6cSOmar Sandoval if (WARN_ON_ONCE(rq->tag != -1)) { 4350cacba6cSOmar Sandoval list_del_init(&rq->queuelist); 4360cacba6cSOmar Sandoval blk_mq_sched_bypass_insert(hctx, rq); 4370cacba6cSOmar Sandoval } 4380cacba6cSOmar Sandoval } 4390cacba6cSOmar Sandoval } 4400cacba6cSOmar Sandoval 441bd6737f1SJens Axboe if (e && e->type->ops.mq.insert_requests) 442bd6737f1SJens Axboe e->type->ops.mq.insert_requests(hctx, list, false); 443bd6737f1SJens Axboe else 444bd6737f1SJens Axboe blk_mq_insert_requests(hctx, ctx, list); 445bd6737f1SJens Axboe 446bd6737f1SJens Axboe blk_mq_run_hw_queue(hctx, run_queue_async); 447bd6737f1SJens Axboe } 448bd6737f1SJens Axboe 449bd166ef1SJens Axboe static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set, 450bd166ef1SJens Axboe struct blk_mq_hw_ctx *hctx, 451bd166ef1SJens Axboe unsigned int hctx_idx) 452bd166ef1SJens Axboe { 453bd166ef1SJens Axboe if (hctx->sched_tags) { 454bd166ef1SJens Axboe blk_mq_free_rqs(set, hctx->sched_tags, hctx_idx); 455bd166ef1SJens Axboe blk_mq_free_rq_map(hctx->sched_tags); 456bd166ef1SJens Axboe hctx->sched_tags = NULL; 457bd166ef1SJens Axboe } 458bd166ef1SJens Axboe } 459bd166ef1SJens Axboe 4606917ff0bSOmar Sandoval static int blk_mq_sched_alloc_tags(struct request_queue *q, 4616917ff0bSOmar Sandoval struct blk_mq_hw_ctx *hctx, 4626917ff0bSOmar Sandoval unsigned int hctx_idx) 463bd166ef1SJens Axboe { 464bd166ef1SJens Axboe struct blk_mq_tag_set *set = q->tag_set; 4656917ff0bSOmar Sandoval int ret; 466bd166ef1SJens Axboe 4676917ff0bSOmar Sandoval hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests, 4686917ff0bSOmar Sandoval set->reserved_tags); 469bd166ef1SJens Axboe if (!hctx->sched_tags) 4706917ff0bSOmar Sandoval return -ENOMEM; 4716917ff0bSOmar Sandoval 4726917ff0bSOmar Sandoval ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests); 4736917ff0bSOmar Sandoval if (ret) 4746917ff0bSOmar Sandoval blk_mq_sched_free_tags(set, hctx, hctx_idx); 475bd166ef1SJens Axboe 476bd166ef1SJens Axboe return ret; 477bd166ef1SJens Axboe } 478bd166ef1SJens Axboe 47954d5329dSOmar Sandoval static void blk_mq_sched_tags_teardown(struct request_queue *q) 480bd166ef1SJens Axboe { 481bd166ef1SJens Axboe struct blk_mq_tag_set *set = q->tag_set; 482bd166ef1SJens Axboe struct blk_mq_hw_ctx *hctx; 483bd166ef1SJens Axboe int i; 484bd166ef1SJens Axboe 485bd166ef1SJens Axboe queue_for_each_hw_ctx(q, hctx, i) 486bd166ef1SJens Axboe blk_mq_sched_free_tags(set, hctx, i); 487bd166ef1SJens Axboe } 488d3484991SJens Axboe 48993252632SOmar Sandoval int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx, 49093252632SOmar Sandoval unsigned int hctx_idx) 49193252632SOmar Sandoval { 49293252632SOmar Sandoval struct elevator_queue *e = q->elevator; 493ee056f98SOmar Sandoval int ret; 49493252632SOmar Sandoval 49593252632SOmar Sandoval if (!e) 49693252632SOmar Sandoval return 0; 49793252632SOmar Sandoval 498ee056f98SOmar Sandoval ret = blk_mq_sched_alloc_tags(q, hctx, hctx_idx); 499ee056f98SOmar Sandoval if (ret) 500ee056f98SOmar Sandoval return ret; 501ee056f98SOmar Sandoval 502ee056f98SOmar Sandoval if (e->type->ops.mq.init_hctx) { 503ee056f98SOmar Sandoval ret = e->type->ops.mq.init_hctx(hctx, hctx_idx); 504ee056f98SOmar Sandoval if (ret) { 505ee056f98SOmar Sandoval blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx); 506ee056f98SOmar Sandoval return ret; 507ee056f98SOmar Sandoval } 508ee056f98SOmar Sandoval } 509ee056f98SOmar Sandoval 510d332ce09SOmar Sandoval blk_mq_debugfs_register_sched_hctx(q, hctx); 511d332ce09SOmar Sandoval 512ee056f98SOmar Sandoval return 0; 51393252632SOmar Sandoval } 51493252632SOmar Sandoval 51593252632SOmar Sandoval void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx, 51693252632SOmar Sandoval unsigned int hctx_idx) 51793252632SOmar Sandoval { 51893252632SOmar Sandoval struct elevator_queue *e = q->elevator; 51993252632SOmar Sandoval 52093252632SOmar Sandoval if (!e) 52193252632SOmar Sandoval return; 52293252632SOmar Sandoval 523d332ce09SOmar Sandoval blk_mq_debugfs_unregister_sched_hctx(hctx); 524d332ce09SOmar Sandoval 525ee056f98SOmar Sandoval if (e->type->ops.mq.exit_hctx && hctx->sched_data) { 526ee056f98SOmar Sandoval e->type->ops.mq.exit_hctx(hctx, hctx_idx); 527ee056f98SOmar Sandoval hctx->sched_data = NULL; 528ee056f98SOmar Sandoval } 529ee056f98SOmar Sandoval 53093252632SOmar Sandoval blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx); 53193252632SOmar Sandoval } 53293252632SOmar Sandoval 5336917ff0bSOmar Sandoval int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) 5346917ff0bSOmar Sandoval { 5356917ff0bSOmar Sandoval struct blk_mq_hw_ctx *hctx; 536ee056f98SOmar Sandoval struct elevator_queue *eq; 5376917ff0bSOmar Sandoval unsigned int i; 5386917ff0bSOmar Sandoval int ret; 5396917ff0bSOmar Sandoval 5406917ff0bSOmar Sandoval if (!e) { 5416917ff0bSOmar Sandoval q->elevator = NULL; 5426917ff0bSOmar Sandoval return 0; 5436917ff0bSOmar Sandoval } 5446917ff0bSOmar Sandoval 5456917ff0bSOmar Sandoval /* 5466917ff0bSOmar Sandoval * Default to 256, since we don't split into sync/async like the 5476917ff0bSOmar Sandoval * old code did. Additionally, this is a per-hw queue depth. 5486917ff0bSOmar Sandoval */ 5496917ff0bSOmar Sandoval q->nr_requests = 2 * BLKDEV_MAX_RQ; 5506917ff0bSOmar Sandoval 5516917ff0bSOmar Sandoval queue_for_each_hw_ctx(q, hctx, i) { 5526917ff0bSOmar Sandoval ret = blk_mq_sched_alloc_tags(q, hctx, i); 5536917ff0bSOmar Sandoval if (ret) 5546917ff0bSOmar Sandoval goto err; 5556917ff0bSOmar Sandoval } 5566917ff0bSOmar Sandoval 5576917ff0bSOmar Sandoval ret = e->ops.mq.init_sched(q, e); 5586917ff0bSOmar Sandoval if (ret) 5596917ff0bSOmar Sandoval goto err; 5606917ff0bSOmar Sandoval 561d332ce09SOmar Sandoval blk_mq_debugfs_register_sched(q); 562d332ce09SOmar Sandoval 563ee056f98SOmar Sandoval queue_for_each_hw_ctx(q, hctx, i) { 564d332ce09SOmar Sandoval if (e->ops.mq.init_hctx) { 565ee056f98SOmar Sandoval ret = e->ops.mq.init_hctx(hctx, i); 566ee056f98SOmar Sandoval if (ret) { 567ee056f98SOmar Sandoval eq = q->elevator; 568ee056f98SOmar Sandoval blk_mq_exit_sched(q, eq); 569ee056f98SOmar Sandoval kobject_put(&eq->kobj); 570ee056f98SOmar Sandoval return ret; 571ee056f98SOmar Sandoval } 572ee056f98SOmar Sandoval } 573d332ce09SOmar Sandoval blk_mq_debugfs_register_sched_hctx(q, hctx); 574ee056f98SOmar Sandoval } 575ee056f98SOmar Sandoval 5766917ff0bSOmar Sandoval return 0; 5776917ff0bSOmar Sandoval 5786917ff0bSOmar Sandoval err: 57954d5329dSOmar Sandoval blk_mq_sched_tags_teardown(q); 58054d5329dSOmar Sandoval q->elevator = NULL; 5816917ff0bSOmar Sandoval return ret; 5826917ff0bSOmar Sandoval } 5836917ff0bSOmar Sandoval 58454d5329dSOmar Sandoval void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e) 58554d5329dSOmar Sandoval { 586ee056f98SOmar Sandoval struct blk_mq_hw_ctx *hctx; 587ee056f98SOmar Sandoval unsigned int i; 588ee056f98SOmar Sandoval 589ee056f98SOmar Sandoval queue_for_each_hw_ctx(q, hctx, i) { 590d332ce09SOmar Sandoval blk_mq_debugfs_unregister_sched_hctx(hctx); 591d332ce09SOmar Sandoval if (e->type->ops.mq.exit_hctx && hctx->sched_data) { 592ee056f98SOmar Sandoval e->type->ops.mq.exit_hctx(hctx, i); 593ee056f98SOmar Sandoval hctx->sched_data = NULL; 594ee056f98SOmar Sandoval } 595ee056f98SOmar Sandoval } 596d332ce09SOmar Sandoval blk_mq_debugfs_unregister_sched(q); 59754d5329dSOmar Sandoval if (e->type->ops.mq.exit_sched) 59854d5329dSOmar Sandoval e->type->ops.mq.exit_sched(e); 59954d5329dSOmar Sandoval blk_mq_sched_tags_teardown(q); 60054d5329dSOmar Sandoval q->elevator = NULL; 60154d5329dSOmar Sandoval } 60254d5329dSOmar Sandoval 603d3484991SJens Axboe int blk_mq_sched_init(struct request_queue *q) 604d3484991SJens Axboe { 605d3484991SJens Axboe int ret; 606d3484991SJens Axboe 607d3484991SJens Axboe mutex_lock(&q->sysfs_lock); 608d3484991SJens Axboe ret = elevator_init(q, NULL); 609d3484991SJens Axboe mutex_unlock(&q->sysfs_lock); 610d3484991SJens Axboe 611d3484991SJens Axboe return ret; 612d3484991SJens Axboe } 613