1bd166ef1SJens Axboe /* 2bd166ef1SJens Axboe * blk-mq scheduling framework 3bd166ef1SJens Axboe * 4bd166ef1SJens Axboe * Copyright (C) 2016 Jens Axboe 5bd166ef1SJens Axboe */ 6bd166ef1SJens Axboe #include <linux/kernel.h> 7bd166ef1SJens Axboe #include <linux/module.h> 8bd166ef1SJens Axboe #include <linux/blk-mq.h> 9bd166ef1SJens Axboe 10bd166ef1SJens Axboe #include <trace/events/block.h> 11bd166ef1SJens Axboe 12bd166ef1SJens Axboe #include "blk.h" 13bd166ef1SJens Axboe #include "blk-mq.h" 14bd166ef1SJens Axboe #include "blk-mq-sched.h" 15bd166ef1SJens Axboe #include "blk-mq-tag.h" 16bd166ef1SJens Axboe #include "blk-wbt.h" 17bd166ef1SJens Axboe 18bd166ef1SJens Axboe void blk_mq_sched_free_hctx_data(struct request_queue *q, 19bd166ef1SJens Axboe void (*exit)(struct blk_mq_hw_ctx *)) 20bd166ef1SJens Axboe { 21bd166ef1SJens Axboe struct blk_mq_hw_ctx *hctx; 22bd166ef1SJens Axboe int i; 23bd166ef1SJens Axboe 24bd166ef1SJens Axboe queue_for_each_hw_ctx(q, hctx, i) { 25bd166ef1SJens Axboe if (exit && hctx->sched_data) 26bd166ef1SJens Axboe exit(hctx); 27bd166ef1SJens Axboe kfree(hctx->sched_data); 28bd166ef1SJens Axboe hctx->sched_data = NULL; 29bd166ef1SJens Axboe } 30bd166ef1SJens Axboe } 31bd166ef1SJens Axboe EXPORT_SYMBOL_GPL(blk_mq_sched_free_hctx_data); 32bd166ef1SJens Axboe 33bd166ef1SJens Axboe int blk_mq_sched_init_hctx_data(struct request_queue *q, size_t size, 34bd166ef1SJens Axboe int (*init)(struct blk_mq_hw_ctx *), 35bd166ef1SJens Axboe void (*exit)(struct blk_mq_hw_ctx *)) 36bd166ef1SJens Axboe { 37bd166ef1SJens Axboe struct blk_mq_hw_ctx *hctx; 38bd166ef1SJens Axboe int ret; 39bd166ef1SJens Axboe int i; 40bd166ef1SJens Axboe 41bd166ef1SJens Axboe queue_for_each_hw_ctx(q, hctx, i) { 42bd166ef1SJens Axboe hctx->sched_data = kmalloc_node(size, GFP_KERNEL, hctx->numa_node); 43bd166ef1SJens Axboe if (!hctx->sched_data) { 44bd166ef1SJens Axboe ret = -ENOMEM; 45bd166ef1SJens Axboe goto error; 46bd166ef1SJens Axboe } 47bd166ef1SJens Axboe 48bd166ef1SJens Axboe if (init) { 49bd166ef1SJens Axboe ret = init(hctx); 50bd166ef1SJens Axboe if (ret) { 51bd166ef1SJens Axboe /* 52bd166ef1SJens Axboe * We don't want to give exit() a partially 53bd166ef1SJens Axboe * initialized sched_data. init() must clean up 54bd166ef1SJens Axboe * if it fails. 55bd166ef1SJens Axboe */ 56bd166ef1SJens Axboe kfree(hctx->sched_data); 57bd166ef1SJens Axboe hctx->sched_data = NULL; 58bd166ef1SJens Axboe goto error; 59bd166ef1SJens Axboe } 60bd166ef1SJens Axboe } 61bd166ef1SJens Axboe } 62bd166ef1SJens Axboe 63bd166ef1SJens Axboe return 0; 64bd166ef1SJens Axboe error: 65bd166ef1SJens Axboe blk_mq_sched_free_hctx_data(q, exit); 66bd166ef1SJens Axboe return ret; 67bd166ef1SJens Axboe } 68bd166ef1SJens Axboe EXPORT_SYMBOL_GPL(blk_mq_sched_init_hctx_data); 69bd166ef1SJens Axboe 70bd166ef1SJens Axboe static void __blk_mq_sched_assign_ioc(struct request_queue *q, 71f1ba8261SPaolo Valente struct request *rq, 72f1ba8261SPaolo Valente struct bio *bio, 73f1ba8261SPaolo Valente struct io_context *ioc) 74bd166ef1SJens Axboe { 75bd166ef1SJens Axboe struct io_cq *icq; 76bd166ef1SJens Axboe 77bd166ef1SJens Axboe spin_lock_irq(q->queue_lock); 78bd166ef1SJens Axboe icq = ioc_lookup_icq(ioc, q); 79bd166ef1SJens Axboe spin_unlock_irq(q->queue_lock); 80bd166ef1SJens Axboe 81bd166ef1SJens Axboe if (!icq) { 82bd166ef1SJens Axboe icq = ioc_create_icq(ioc, q, GFP_ATOMIC); 83bd166ef1SJens Axboe if (!icq) 84bd166ef1SJens Axboe return; 85bd166ef1SJens Axboe } 86bd166ef1SJens Axboe 87bd166ef1SJens Axboe rq->elv.icq = icq; 88f1ba8261SPaolo Valente if (!blk_mq_sched_get_rq_priv(q, rq, bio)) { 89bd166ef1SJens Axboe rq->rq_flags |= RQF_ELVPRIV; 90bd166ef1SJens Axboe get_io_context(icq->ioc); 91bd166ef1SJens Axboe return; 92bd166ef1SJens Axboe } 93bd166ef1SJens Axboe 94bd166ef1SJens Axboe rq->elv.icq = NULL; 95bd166ef1SJens Axboe } 96bd166ef1SJens Axboe 97bd166ef1SJens Axboe static void blk_mq_sched_assign_ioc(struct request_queue *q, 98bd166ef1SJens Axboe struct request *rq, struct bio *bio) 99bd166ef1SJens Axboe { 100bd166ef1SJens Axboe struct io_context *ioc; 101bd166ef1SJens Axboe 102bd166ef1SJens Axboe ioc = rq_ioc(bio); 103bd166ef1SJens Axboe if (ioc) 104f1ba8261SPaolo Valente __blk_mq_sched_assign_ioc(q, rq, bio, ioc); 105bd166ef1SJens Axboe } 106bd166ef1SJens Axboe 107bd166ef1SJens Axboe struct request *blk_mq_sched_get_request(struct request_queue *q, 108bd166ef1SJens Axboe struct bio *bio, 109bd166ef1SJens Axboe unsigned int op, 110bd166ef1SJens Axboe struct blk_mq_alloc_data *data) 111bd166ef1SJens Axboe { 112bd166ef1SJens Axboe struct elevator_queue *e = q->elevator; 113bd166ef1SJens Axboe struct request *rq; 114bd166ef1SJens Axboe 115bd166ef1SJens Axboe blk_queue_enter_live(q); 1166d2809d5SOmar Sandoval data->q = q; 1176d2809d5SOmar Sandoval if (likely(!data->ctx)) 1186d2809d5SOmar Sandoval data->ctx = blk_mq_get_ctx(q); 1196d2809d5SOmar Sandoval if (likely(!data->hctx)) 1206d2809d5SOmar Sandoval data->hctx = blk_mq_map_queue(q, data->ctx->cpu); 121bd166ef1SJens Axboe 122bd166ef1SJens Axboe if (e) { 123bd166ef1SJens Axboe data->flags |= BLK_MQ_REQ_INTERNAL; 124bd166ef1SJens Axboe 125bd166ef1SJens Axboe /* 126bd166ef1SJens Axboe * Flush requests are special and go directly to the 127bd166ef1SJens Axboe * dispatch list. 128bd166ef1SJens Axboe */ 129f73f44ebSChristoph Hellwig if (!op_is_flush(op) && e->type->ops.mq.get_request) { 130bd166ef1SJens Axboe rq = e->type->ops.mq.get_request(q, op, data); 131bd166ef1SJens Axboe if (rq) 132bd166ef1SJens Axboe rq->rq_flags |= RQF_QUEUED; 133bd166ef1SJens Axboe } else 134bd166ef1SJens Axboe rq = __blk_mq_alloc_request(data, op); 135bd166ef1SJens Axboe } else { 136bd166ef1SJens Axboe rq = __blk_mq_alloc_request(data, op); 137bd166ef1SJens Axboe } 138bd166ef1SJens Axboe 139bd166ef1SJens Axboe if (rq) { 140f73f44ebSChristoph Hellwig if (!op_is_flush(op)) { 141bd166ef1SJens Axboe rq->elv.icq = NULL; 142bd166ef1SJens Axboe if (e && e->type->icq_cache) 143bd166ef1SJens Axboe blk_mq_sched_assign_ioc(q, rq, bio); 144bd166ef1SJens Axboe } 145bd166ef1SJens Axboe data->hctx->queued++; 146bd166ef1SJens Axboe return rq; 147bd166ef1SJens Axboe } 148bd166ef1SJens Axboe 149bd166ef1SJens Axboe blk_queue_exit(q); 150bd166ef1SJens Axboe return NULL; 151bd166ef1SJens Axboe } 152bd166ef1SJens Axboe 153bd166ef1SJens Axboe void blk_mq_sched_put_request(struct request *rq) 154bd166ef1SJens Axboe { 155bd166ef1SJens Axboe struct request_queue *q = rq->q; 156bd166ef1SJens Axboe struct elevator_queue *e = q->elevator; 157bd166ef1SJens Axboe 158bd166ef1SJens Axboe if (rq->rq_flags & RQF_ELVPRIV) { 159bd166ef1SJens Axboe blk_mq_sched_put_rq_priv(rq->q, rq); 160bd166ef1SJens Axboe if (rq->elv.icq) { 161bd166ef1SJens Axboe put_io_context(rq->elv.icq->ioc); 162bd166ef1SJens Axboe rq->elv.icq = NULL; 163bd166ef1SJens Axboe } 164bd166ef1SJens Axboe } 165bd166ef1SJens Axboe 166bd166ef1SJens Axboe if ((rq->rq_flags & RQF_QUEUED) && e && e->type->ops.mq.put_request) 167bd166ef1SJens Axboe e->type->ops.mq.put_request(rq); 168bd166ef1SJens Axboe else 169bd166ef1SJens Axboe blk_mq_finish_request(rq); 170bd166ef1SJens Axboe } 171bd166ef1SJens Axboe 172bd166ef1SJens Axboe void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) 173bd166ef1SJens Axboe { 17481380ca1SOmar Sandoval struct request_queue *q = hctx->queue; 17581380ca1SOmar Sandoval struct elevator_queue *e = q->elevator; 17664765a75SJens Axboe const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request; 17764765a75SJens Axboe bool did_work = false; 178bd166ef1SJens Axboe LIST_HEAD(rq_list); 179bd166ef1SJens Axboe 180bd166ef1SJens Axboe if (unlikely(blk_mq_hctx_stopped(hctx))) 181bd166ef1SJens Axboe return; 182bd166ef1SJens Axboe 183bd166ef1SJens Axboe hctx->run++; 184bd166ef1SJens Axboe 185bd166ef1SJens Axboe /* 186bd166ef1SJens Axboe * If we have previous entries on our dispatch list, grab them first for 187bd166ef1SJens Axboe * more fair dispatch. 188bd166ef1SJens Axboe */ 189bd166ef1SJens Axboe if (!list_empty_careful(&hctx->dispatch)) { 190bd166ef1SJens Axboe spin_lock(&hctx->lock); 191bd166ef1SJens Axboe if (!list_empty(&hctx->dispatch)) 192bd166ef1SJens Axboe list_splice_init(&hctx->dispatch, &rq_list); 193bd166ef1SJens Axboe spin_unlock(&hctx->lock); 194bd166ef1SJens Axboe } 195bd166ef1SJens Axboe 196bd166ef1SJens Axboe /* 197bd166ef1SJens Axboe * Only ask the scheduler for requests, if we didn't have residual 198bd166ef1SJens Axboe * requests from the dispatch list. This is to avoid the case where 199bd166ef1SJens Axboe * we only ever dispatch a fraction of the requests available because 200bd166ef1SJens Axboe * of low device queue depth. Once we pull requests out of the IO 201bd166ef1SJens Axboe * scheduler, we can no longer merge or sort them. So it's best to 202bd166ef1SJens Axboe * leave them there for as long as we can. Mark the hw queue as 203bd166ef1SJens Axboe * needing a restart in that case. 204bd166ef1SJens Axboe */ 205c13660a0SJens Axboe if (!list_empty(&rq_list)) { 206d38d3515SOmar Sandoval blk_mq_sched_mark_restart_hctx(hctx); 20781380ca1SOmar Sandoval did_work = blk_mq_dispatch_rq_list(q, &rq_list); 20864765a75SJens Axboe } else if (!has_sched_dispatch) { 209c13660a0SJens Axboe blk_mq_flush_busy_ctxs(hctx, &rq_list); 21081380ca1SOmar Sandoval blk_mq_dispatch_rq_list(q, &rq_list); 21164765a75SJens Axboe } 21264765a75SJens Axboe 21364765a75SJens Axboe /* 21464765a75SJens Axboe * We want to dispatch from the scheduler if we had no work left 21564765a75SJens Axboe * on the dispatch list, OR if we did have work but weren't able 21664765a75SJens Axboe * to make progress. 21764765a75SJens Axboe */ 21864765a75SJens Axboe if (!did_work && has_sched_dispatch) { 219c13660a0SJens Axboe do { 220c13660a0SJens Axboe struct request *rq; 221c13660a0SJens Axboe 222c13660a0SJens Axboe rq = e->type->ops.mq.dispatch_request(hctx); 223c13660a0SJens Axboe if (!rq) 224c13660a0SJens Axboe break; 225c13660a0SJens Axboe list_add(&rq->queuelist, &rq_list); 22681380ca1SOmar Sandoval } while (blk_mq_dispatch_rq_list(q, &rq_list)); 227c13660a0SJens Axboe } 228bd166ef1SJens Axboe } 229bd166ef1SJens Axboe 230bd166ef1SJens Axboe void blk_mq_sched_move_to_dispatch(struct blk_mq_hw_ctx *hctx, 231bd166ef1SJens Axboe struct list_head *rq_list, 232bd166ef1SJens Axboe struct request *(*get_rq)(struct blk_mq_hw_ctx *)) 233bd166ef1SJens Axboe { 234bd166ef1SJens Axboe do { 235bd166ef1SJens Axboe struct request *rq; 236bd166ef1SJens Axboe 237bd166ef1SJens Axboe rq = get_rq(hctx); 238bd166ef1SJens Axboe if (!rq) 239bd166ef1SJens Axboe break; 240bd166ef1SJens Axboe 241bd166ef1SJens Axboe list_add_tail(&rq->queuelist, rq_list); 242bd166ef1SJens Axboe } while (1); 243bd166ef1SJens Axboe } 244bd166ef1SJens Axboe EXPORT_SYMBOL_GPL(blk_mq_sched_move_to_dispatch); 245bd166ef1SJens Axboe 246e4d750c9SJens Axboe bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, 247e4d750c9SJens Axboe struct request **merged_request) 248bd166ef1SJens Axboe { 249bd166ef1SJens Axboe struct request *rq; 250bd166ef1SJens Axboe 25134fe7c05SChristoph Hellwig switch (elv_merge(q, &rq, bio)) { 25234fe7c05SChristoph Hellwig case ELEVATOR_BACK_MERGE: 253bd166ef1SJens Axboe if (!blk_mq_sched_allow_merge(q, rq, bio)) 254bd166ef1SJens Axboe return false; 25534fe7c05SChristoph Hellwig if (!bio_attempt_back_merge(q, rq, bio)) 25634fe7c05SChristoph Hellwig return false; 257e4d750c9SJens Axboe *merged_request = attempt_back_merge(q, rq); 258e4d750c9SJens Axboe if (!*merged_request) 25934fe7c05SChristoph Hellwig elv_merged_request(q, rq, ELEVATOR_BACK_MERGE); 260bd166ef1SJens Axboe return true; 26134fe7c05SChristoph Hellwig case ELEVATOR_FRONT_MERGE: 262bd166ef1SJens Axboe if (!blk_mq_sched_allow_merge(q, rq, bio)) 263bd166ef1SJens Axboe return false; 26434fe7c05SChristoph Hellwig if (!bio_attempt_front_merge(q, rq, bio)) 26534fe7c05SChristoph Hellwig return false; 266e4d750c9SJens Axboe *merged_request = attempt_front_merge(q, rq); 267e4d750c9SJens Axboe if (!*merged_request) 26834fe7c05SChristoph Hellwig elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE); 269bd166ef1SJens Axboe return true; 27034fe7c05SChristoph Hellwig default: 271bd166ef1SJens Axboe return false; 272bd166ef1SJens Axboe } 27334fe7c05SChristoph Hellwig } 274bd166ef1SJens Axboe EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge); 275bd166ef1SJens Axboe 276bd166ef1SJens Axboe bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio) 277bd166ef1SJens Axboe { 278bd166ef1SJens Axboe struct elevator_queue *e = q->elevator; 279bd166ef1SJens Axboe 280bd166ef1SJens Axboe if (e->type->ops.mq.bio_merge) { 281bd166ef1SJens Axboe struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); 282bd166ef1SJens Axboe struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); 283bd166ef1SJens Axboe 284bd166ef1SJens Axboe blk_mq_put_ctx(ctx); 285bd166ef1SJens Axboe return e->type->ops.mq.bio_merge(hctx, bio); 286bd166ef1SJens Axboe } 287bd166ef1SJens Axboe 288bd166ef1SJens Axboe return false; 289bd166ef1SJens Axboe } 290bd166ef1SJens Axboe 291bd166ef1SJens Axboe bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq) 292bd166ef1SJens Axboe { 293bd166ef1SJens Axboe return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq); 294bd166ef1SJens Axboe } 295bd166ef1SJens Axboe EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge); 296bd166ef1SJens Axboe 297bd166ef1SJens Axboe void blk_mq_sched_request_inserted(struct request *rq) 298bd166ef1SJens Axboe { 299bd166ef1SJens Axboe trace_block_rq_insert(rq->q, rq); 300bd166ef1SJens Axboe } 301bd166ef1SJens Axboe EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted); 302bd166ef1SJens Axboe 3030cacba6cSOmar Sandoval static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, 3040cacba6cSOmar Sandoval struct request *rq) 305bd166ef1SJens Axboe { 306bd166ef1SJens Axboe if (rq->tag == -1) { 307bd166ef1SJens Axboe rq->rq_flags |= RQF_SORTED; 308bd166ef1SJens Axboe return false; 309bd166ef1SJens Axboe } 310bd166ef1SJens Axboe 311bd166ef1SJens Axboe /* 312bd166ef1SJens Axboe * If we already have a real request tag, send directly to 313bd166ef1SJens Axboe * the dispatch list. 314bd166ef1SJens Axboe */ 315bd166ef1SJens Axboe spin_lock(&hctx->lock); 316bd166ef1SJens Axboe list_add(&rq->queuelist, &hctx->dispatch); 317bd166ef1SJens Axboe spin_unlock(&hctx->lock); 318bd166ef1SJens Axboe return true; 319bd166ef1SJens Axboe } 320bd166ef1SJens Axboe 32150e1dab8SJens Axboe static void blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx) 32250e1dab8SJens Axboe { 32350e1dab8SJens Axboe if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) { 32450e1dab8SJens Axboe clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); 32550e1dab8SJens Axboe if (blk_mq_hctx_has_pending(hctx)) 32650e1dab8SJens Axboe blk_mq_run_hw_queue(hctx, true); 32750e1dab8SJens Axboe } 32850e1dab8SJens Axboe } 32950e1dab8SJens Axboe 33050e1dab8SJens Axboe void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx) 33150e1dab8SJens Axboe { 332d38d3515SOmar Sandoval struct request_queue *q = hctx->queue; 33350e1dab8SJens Axboe unsigned int i; 33450e1dab8SJens Axboe 335d38d3515SOmar Sandoval if (test_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) { 336d38d3515SOmar Sandoval if (test_and_clear_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) { 33750e1dab8SJens Axboe queue_for_each_hw_ctx(q, hctx, i) 33850e1dab8SJens Axboe blk_mq_sched_restart_hctx(hctx); 33950e1dab8SJens Axboe } 340d38d3515SOmar Sandoval } else { 341d38d3515SOmar Sandoval blk_mq_sched_restart_hctx(hctx); 342d38d3515SOmar Sandoval } 34350e1dab8SJens Axboe } 34450e1dab8SJens Axboe 345bd6737f1SJens Axboe /* 346bd6737f1SJens Axboe * Add flush/fua to the queue. If we fail getting a driver tag, then 347bd6737f1SJens Axboe * punt to the requeue list. Requeue will re-invoke us from a context 348bd6737f1SJens Axboe * that's safe to block from. 349bd6737f1SJens Axboe */ 350bd6737f1SJens Axboe static void blk_mq_sched_insert_flush(struct blk_mq_hw_ctx *hctx, 351bd6737f1SJens Axboe struct request *rq, bool can_block) 352bd6737f1SJens Axboe { 353bd6737f1SJens Axboe if (blk_mq_get_driver_tag(rq, &hctx, can_block)) { 354bd6737f1SJens Axboe blk_insert_flush(rq); 355bd6737f1SJens Axboe blk_mq_run_hw_queue(hctx, true); 356bd6737f1SJens Axboe } else 357c7a571b4SJens Axboe blk_mq_add_to_requeue_list(rq, false, true); 358bd6737f1SJens Axboe } 359bd6737f1SJens Axboe 360bd6737f1SJens Axboe void blk_mq_sched_insert_request(struct request *rq, bool at_head, 361bd6737f1SJens Axboe bool run_queue, bool async, bool can_block) 362bd6737f1SJens Axboe { 363bd6737f1SJens Axboe struct request_queue *q = rq->q; 364bd6737f1SJens Axboe struct elevator_queue *e = q->elevator; 365bd6737f1SJens Axboe struct blk_mq_ctx *ctx = rq->mq_ctx; 366bd6737f1SJens Axboe struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); 367bd6737f1SJens Axboe 368f3a8ab7dSJens Axboe if (rq->tag == -1 && op_is_flush(rq->cmd_flags)) { 369bd6737f1SJens Axboe blk_mq_sched_insert_flush(hctx, rq, can_block); 370bd6737f1SJens Axboe return; 371bd6737f1SJens Axboe } 372bd6737f1SJens Axboe 3730cacba6cSOmar Sandoval if (e && blk_mq_sched_bypass_insert(hctx, rq)) 3740cacba6cSOmar Sandoval goto run; 3750cacba6cSOmar Sandoval 376bd6737f1SJens Axboe if (e && e->type->ops.mq.insert_requests) { 377bd6737f1SJens Axboe LIST_HEAD(list); 378bd6737f1SJens Axboe 379bd6737f1SJens Axboe list_add(&rq->queuelist, &list); 380bd6737f1SJens Axboe e->type->ops.mq.insert_requests(hctx, &list, at_head); 381bd6737f1SJens Axboe } else { 382bd6737f1SJens Axboe spin_lock(&ctx->lock); 383bd6737f1SJens Axboe __blk_mq_insert_request(hctx, rq, at_head); 384bd6737f1SJens Axboe spin_unlock(&ctx->lock); 385bd6737f1SJens Axboe } 386bd6737f1SJens Axboe 3870cacba6cSOmar Sandoval run: 388bd6737f1SJens Axboe if (run_queue) 389bd6737f1SJens Axboe blk_mq_run_hw_queue(hctx, async); 390bd6737f1SJens Axboe } 391bd6737f1SJens Axboe 392bd6737f1SJens Axboe void blk_mq_sched_insert_requests(struct request_queue *q, 393bd6737f1SJens Axboe struct blk_mq_ctx *ctx, 394bd6737f1SJens Axboe struct list_head *list, bool run_queue_async) 395bd6737f1SJens Axboe { 396bd6737f1SJens Axboe struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); 397bd6737f1SJens Axboe struct elevator_queue *e = hctx->queue->elevator; 398bd6737f1SJens Axboe 3990cacba6cSOmar Sandoval if (e) { 4000cacba6cSOmar Sandoval struct request *rq, *next; 4010cacba6cSOmar Sandoval 4020cacba6cSOmar Sandoval /* 4030cacba6cSOmar Sandoval * We bypass requests that already have a driver tag assigned, 4040cacba6cSOmar Sandoval * which should only be flushes. Flushes are only ever inserted 4050cacba6cSOmar Sandoval * as single requests, so we shouldn't ever hit the 4060cacba6cSOmar Sandoval * WARN_ON_ONCE() below (but let's handle it just in case). 4070cacba6cSOmar Sandoval */ 4080cacba6cSOmar Sandoval list_for_each_entry_safe(rq, next, list, queuelist) { 4090cacba6cSOmar Sandoval if (WARN_ON_ONCE(rq->tag != -1)) { 4100cacba6cSOmar Sandoval list_del_init(&rq->queuelist); 4110cacba6cSOmar Sandoval blk_mq_sched_bypass_insert(hctx, rq); 4120cacba6cSOmar Sandoval } 4130cacba6cSOmar Sandoval } 4140cacba6cSOmar Sandoval } 4150cacba6cSOmar Sandoval 416bd6737f1SJens Axboe if (e && e->type->ops.mq.insert_requests) 417bd6737f1SJens Axboe e->type->ops.mq.insert_requests(hctx, list, false); 418bd6737f1SJens Axboe else 419bd6737f1SJens Axboe blk_mq_insert_requests(hctx, ctx, list); 420bd6737f1SJens Axboe 421bd6737f1SJens Axboe blk_mq_run_hw_queue(hctx, run_queue_async); 422bd6737f1SJens Axboe } 423bd6737f1SJens Axboe 424bd166ef1SJens Axboe static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set, 425bd166ef1SJens Axboe struct blk_mq_hw_ctx *hctx, 426bd166ef1SJens Axboe unsigned int hctx_idx) 427bd166ef1SJens Axboe { 428bd166ef1SJens Axboe if (hctx->sched_tags) { 429bd166ef1SJens Axboe blk_mq_free_rqs(set, hctx->sched_tags, hctx_idx); 430bd166ef1SJens Axboe blk_mq_free_rq_map(hctx->sched_tags); 431bd166ef1SJens Axboe hctx->sched_tags = NULL; 432bd166ef1SJens Axboe } 433bd166ef1SJens Axboe } 434bd166ef1SJens Axboe 4356917ff0bSOmar Sandoval static int blk_mq_sched_alloc_tags(struct request_queue *q, 4366917ff0bSOmar Sandoval struct blk_mq_hw_ctx *hctx, 4376917ff0bSOmar Sandoval unsigned int hctx_idx) 438bd166ef1SJens Axboe { 439bd166ef1SJens Axboe struct blk_mq_tag_set *set = q->tag_set; 4406917ff0bSOmar Sandoval int ret; 441bd166ef1SJens Axboe 4426917ff0bSOmar Sandoval hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests, 4436917ff0bSOmar Sandoval set->reserved_tags); 444bd166ef1SJens Axboe if (!hctx->sched_tags) 4456917ff0bSOmar Sandoval return -ENOMEM; 4466917ff0bSOmar Sandoval 4476917ff0bSOmar Sandoval ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests); 4486917ff0bSOmar Sandoval if (ret) 4496917ff0bSOmar Sandoval blk_mq_sched_free_tags(set, hctx, hctx_idx); 450bd166ef1SJens Axboe 451bd166ef1SJens Axboe return ret; 452bd166ef1SJens Axboe } 453bd166ef1SJens Axboe 454bd166ef1SJens Axboe void blk_mq_sched_teardown(struct request_queue *q) 455bd166ef1SJens Axboe { 456bd166ef1SJens Axboe struct blk_mq_tag_set *set = q->tag_set; 457bd166ef1SJens Axboe struct blk_mq_hw_ctx *hctx; 458bd166ef1SJens Axboe int i; 459bd166ef1SJens Axboe 460bd166ef1SJens Axboe queue_for_each_hw_ctx(q, hctx, i) 461bd166ef1SJens Axboe blk_mq_sched_free_tags(set, hctx, i); 462bd166ef1SJens Axboe } 463d3484991SJens Axboe 4646917ff0bSOmar Sandoval int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) 4656917ff0bSOmar Sandoval { 4666917ff0bSOmar Sandoval struct blk_mq_hw_ctx *hctx; 4676917ff0bSOmar Sandoval unsigned int i; 4686917ff0bSOmar Sandoval int ret; 4696917ff0bSOmar Sandoval 4706917ff0bSOmar Sandoval if (!e) { 4716917ff0bSOmar Sandoval q->elevator = NULL; 4726917ff0bSOmar Sandoval return 0; 4736917ff0bSOmar Sandoval } 4746917ff0bSOmar Sandoval 4756917ff0bSOmar Sandoval /* 4766917ff0bSOmar Sandoval * Default to 256, since we don't split into sync/async like the 4776917ff0bSOmar Sandoval * old code did. Additionally, this is a per-hw queue depth. 4786917ff0bSOmar Sandoval */ 4796917ff0bSOmar Sandoval q->nr_requests = 2 * BLKDEV_MAX_RQ; 4806917ff0bSOmar Sandoval 4816917ff0bSOmar Sandoval queue_for_each_hw_ctx(q, hctx, i) { 4826917ff0bSOmar Sandoval ret = blk_mq_sched_alloc_tags(q, hctx, i); 4836917ff0bSOmar Sandoval if (ret) 4846917ff0bSOmar Sandoval goto err; 4856917ff0bSOmar Sandoval } 4866917ff0bSOmar Sandoval 4876917ff0bSOmar Sandoval ret = e->ops.mq.init_sched(q, e); 4886917ff0bSOmar Sandoval if (ret) 4896917ff0bSOmar Sandoval goto err; 4906917ff0bSOmar Sandoval 4916917ff0bSOmar Sandoval return 0; 4926917ff0bSOmar Sandoval 4936917ff0bSOmar Sandoval err: 4946917ff0bSOmar Sandoval blk_mq_sched_teardown(q); 4956917ff0bSOmar Sandoval return ret; 4966917ff0bSOmar Sandoval } 4976917ff0bSOmar Sandoval 498d3484991SJens Axboe int blk_mq_sched_init(struct request_queue *q) 499d3484991SJens Axboe { 500d3484991SJens Axboe int ret; 501d3484991SJens Axboe 502d3484991SJens Axboe mutex_lock(&q->sysfs_lock); 503d3484991SJens Axboe ret = elevator_init(q, NULL); 504d3484991SJens Axboe mutex_unlock(&q->sysfs_lock); 505d3484991SJens Axboe 506d3484991SJens Axboe return ret; 507d3484991SJens Axboe } 508