1 /* 2 * blk-mq scheduling framework 3 * 4 * Copyright (C) 2016 Jens Axboe 5 */ 6 #include <linux/kernel.h> 7 #include <linux/module.h> 8 #include <linux/blk-mq.h> 9 10 #include <trace/events/block.h> 11 12 #include "blk.h" 13 #include "blk-mq.h" 14 #include "blk-mq-debugfs.h" 15 #include "blk-mq-sched.h" 16 #include "blk-mq-tag.h" 17 #include "blk-wbt.h" 18 19 void blk_mq_sched_free_hctx_data(struct request_queue *q, 20 void (*exit)(struct blk_mq_hw_ctx *)) 21 { 22 struct blk_mq_hw_ctx *hctx; 23 int i; 24 25 queue_for_each_hw_ctx(q, hctx, i) { 26 if (exit && hctx->sched_data) 27 exit(hctx); 28 kfree(hctx->sched_data); 29 hctx->sched_data = NULL; 30 } 31 } 32 EXPORT_SYMBOL_GPL(blk_mq_sched_free_hctx_data); 33 34 void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio) 35 { 36 struct request_queue *q = rq->q; 37 struct io_context *ioc = rq_ioc(bio); 38 struct io_cq *icq; 39 40 spin_lock_irq(q->queue_lock); 41 icq = ioc_lookup_icq(ioc, q); 42 spin_unlock_irq(q->queue_lock); 43 44 if (!icq) { 45 icq = ioc_create_icq(ioc, q, GFP_ATOMIC); 46 if (!icq) 47 return; 48 } 49 get_io_context(icq->ioc); 50 rq->elv.icq = icq; 51 } 52 53 /* 54 * Mark a hardware queue as needing a restart. For shared queues, maintain 55 * a count of how many hardware queues are marked for restart. 56 */ 57 static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) 58 { 59 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) 60 return; 61 62 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); 63 } 64 65 void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) 66 { 67 if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) 68 return; 69 clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); 70 71 blk_mq_run_hw_queue(hctx, true); 72 } 73 74 /* 75 * Only SCSI implements .get_budget and .put_budget, and SCSI restarts 76 * its queue by itself in its completion handler, so we don't need to 77 * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE. 78 */ 79 static void blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) 80 { 81 struct request_queue *q = hctx->queue; 82 struct elevator_queue *e = q->elevator; 83 LIST_HEAD(rq_list); 84 85 do { 86 struct request *rq; 87 88 if (e->type->ops.mq.has_work && 89 !e->type->ops.mq.has_work(hctx)) 90 break; 91 92 if (!blk_mq_get_dispatch_budget(hctx)) 93 break; 94 95 rq = e->type->ops.mq.dispatch_request(hctx); 96 if (!rq) { 97 blk_mq_put_dispatch_budget(hctx); 98 break; 99 } 100 101 /* 102 * Now this rq owns the budget which has to be released 103 * if this rq won't be queued to driver via .queue_rq() 104 * in blk_mq_dispatch_rq_list(). 105 */ 106 list_add(&rq->queuelist, &rq_list); 107 } while (blk_mq_dispatch_rq_list(q, &rq_list, true)); 108 } 109 110 static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx, 111 struct blk_mq_ctx *ctx) 112 { 113 unsigned idx = ctx->index_hw; 114 115 if (++idx == hctx->nr_ctx) 116 idx = 0; 117 118 return hctx->ctxs[idx]; 119 } 120 121 /* 122 * Only SCSI implements .get_budget and .put_budget, and SCSI restarts 123 * its queue by itself in its completion handler, so we don't need to 124 * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE. 125 */ 126 static void blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx) 127 { 128 struct request_queue *q = hctx->queue; 129 LIST_HEAD(rq_list); 130 struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from); 131 132 do { 133 struct request *rq; 134 135 if (!sbitmap_any_bit_set(&hctx->ctx_map)) 136 break; 137 138 if (!blk_mq_get_dispatch_budget(hctx)) 139 break; 140 141 rq = blk_mq_dequeue_from_ctx(hctx, ctx); 142 if (!rq) { 143 blk_mq_put_dispatch_budget(hctx); 144 break; 145 } 146 147 /* 148 * Now this rq owns the budget which has to be released 149 * if this rq won't be queued to driver via .queue_rq() 150 * in blk_mq_dispatch_rq_list(). 151 */ 152 list_add(&rq->queuelist, &rq_list); 153 154 /* round robin for fair dispatch */ 155 ctx = blk_mq_next_ctx(hctx, rq->mq_ctx); 156 157 } while (blk_mq_dispatch_rq_list(q, &rq_list, true)); 158 159 WRITE_ONCE(hctx->dispatch_from, ctx); 160 } 161 162 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) 163 { 164 struct request_queue *q = hctx->queue; 165 struct elevator_queue *e = q->elevator; 166 const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request; 167 LIST_HEAD(rq_list); 168 169 /* RCU or SRCU read lock is needed before checking quiesced flag */ 170 if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) 171 return; 172 173 hctx->run++; 174 175 /* 176 * If we have previous entries on our dispatch list, grab them first for 177 * more fair dispatch. 178 */ 179 if (!list_empty_careful(&hctx->dispatch)) { 180 spin_lock(&hctx->lock); 181 if (!list_empty(&hctx->dispatch)) 182 list_splice_init(&hctx->dispatch, &rq_list); 183 spin_unlock(&hctx->lock); 184 } 185 186 /* 187 * Only ask the scheduler for requests, if we didn't have residual 188 * requests from the dispatch list. This is to avoid the case where 189 * we only ever dispatch a fraction of the requests available because 190 * of low device queue depth. Once we pull requests out of the IO 191 * scheduler, we can no longer merge or sort them. So it's best to 192 * leave them there for as long as we can. Mark the hw queue as 193 * needing a restart in that case. 194 * 195 * We want to dispatch from the scheduler if there was nothing 196 * on the dispatch list or we were able to dispatch from the 197 * dispatch list. 198 */ 199 if (!list_empty(&rq_list)) { 200 blk_mq_sched_mark_restart_hctx(hctx); 201 if (blk_mq_dispatch_rq_list(q, &rq_list, false)) { 202 if (has_sched_dispatch) 203 blk_mq_do_dispatch_sched(hctx); 204 else 205 blk_mq_do_dispatch_ctx(hctx); 206 } 207 } else if (has_sched_dispatch) { 208 blk_mq_do_dispatch_sched(hctx); 209 } else if (hctx->dispatch_busy) { 210 /* dequeue request one by one from sw queue if queue is busy */ 211 blk_mq_do_dispatch_ctx(hctx); 212 } else { 213 blk_mq_flush_busy_ctxs(hctx, &rq_list); 214 blk_mq_dispatch_rq_list(q, &rq_list, false); 215 } 216 } 217 218 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, 219 struct request **merged_request) 220 { 221 struct request *rq; 222 223 switch (elv_merge(q, &rq, bio)) { 224 case ELEVATOR_BACK_MERGE: 225 if (!blk_mq_sched_allow_merge(q, rq, bio)) 226 return false; 227 if (!bio_attempt_back_merge(q, rq, bio)) 228 return false; 229 *merged_request = attempt_back_merge(q, rq); 230 if (!*merged_request) 231 elv_merged_request(q, rq, ELEVATOR_BACK_MERGE); 232 return true; 233 case ELEVATOR_FRONT_MERGE: 234 if (!blk_mq_sched_allow_merge(q, rq, bio)) 235 return false; 236 if (!bio_attempt_front_merge(q, rq, bio)) 237 return false; 238 *merged_request = attempt_front_merge(q, rq); 239 if (!*merged_request) 240 elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE); 241 return true; 242 case ELEVATOR_DISCARD_MERGE: 243 return bio_attempt_discard_merge(q, rq, bio); 244 default: 245 return false; 246 } 247 } 248 EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge); 249 250 /* 251 * Iterate list of requests and see if we can merge this bio with any 252 * of them. 253 */ 254 bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list, 255 struct bio *bio) 256 { 257 struct request *rq; 258 int checked = 8; 259 260 list_for_each_entry_reverse(rq, list, queuelist) { 261 bool merged = false; 262 263 if (!checked--) 264 break; 265 266 if (!blk_rq_merge_ok(rq, bio)) 267 continue; 268 269 switch (blk_try_merge(rq, bio)) { 270 case ELEVATOR_BACK_MERGE: 271 if (blk_mq_sched_allow_merge(q, rq, bio)) 272 merged = bio_attempt_back_merge(q, rq, bio); 273 break; 274 case ELEVATOR_FRONT_MERGE: 275 if (blk_mq_sched_allow_merge(q, rq, bio)) 276 merged = bio_attempt_front_merge(q, rq, bio); 277 break; 278 case ELEVATOR_DISCARD_MERGE: 279 merged = bio_attempt_discard_merge(q, rq, bio); 280 break; 281 default: 282 continue; 283 } 284 285 return merged; 286 } 287 288 return false; 289 } 290 EXPORT_SYMBOL_GPL(blk_mq_bio_list_merge); 291 292 /* 293 * Reverse check our software queue for entries that we could potentially 294 * merge with. Currently includes a hand-wavy stop count of 8, to not spend 295 * too much time checking for merges. 296 */ 297 static bool blk_mq_attempt_merge(struct request_queue *q, 298 struct blk_mq_ctx *ctx, struct bio *bio) 299 { 300 lockdep_assert_held(&ctx->lock); 301 302 if (blk_mq_bio_list_merge(q, &ctx->rq_list, bio)) { 303 ctx->rq_merged++; 304 return true; 305 } 306 307 return false; 308 } 309 310 bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio) 311 { 312 struct elevator_queue *e = q->elevator; 313 struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); 314 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); 315 bool ret = false; 316 317 if (e && e->type->ops.mq.bio_merge) { 318 blk_mq_put_ctx(ctx); 319 return e->type->ops.mq.bio_merge(hctx, bio); 320 } 321 322 if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) && 323 !list_empty_careful(&ctx->rq_list)) { 324 /* default per sw-queue merge */ 325 spin_lock(&ctx->lock); 326 ret = blk_mq_attempt_merge(q, ctx, bio); 327 spin_unlock(&ctx->lock); 328 } 329 330 blk_mq_put_ctx(ctx); 331 return ret; 332 } 333 334 bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq) 335 { 336 return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq); 337 } 338 EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge); 339 340 void blk_mq_sched_request_inserted(struct request *rq) 341 { 342 trace_block_rq_insert(rq->q, rq); 343 } 344 EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted); 345 346 static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, 347 bool has_sched, 348 struct request *rq) 349 { 350 /* dispatch flush rq directly */ 351 if (rq->rq_flags & RQF_FLUSH_SEQ) { 352 spin_lock(&hctx->lock); 353 list_add(&rq->queuelist, &hctx->dispatch); 354 spin_unlock(&hctx->lock); 355 return true; 356 } 357 358 if (has_sched) 359 rq->rq_flags |= RQF_SORTED; 360 361 return false; 362 } 363 364 void blk_mq_sched_insert_request(struct request *rq, bool at_head, 365 bool run_queue, bool async) 366 { 367 struct request_queue *q = rq->q; 368 struct elevator_queue *e = q->elevator; 369 struct blk_mq_ctx *ctx = rq->mq_ctx; 370 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); 371 372 /* flush rq in flush machinery need to be dispatched directly */ 373 if (!(rq->rq_flags & RQF_FLUSH_SEQ) && op_is_flush(rq->cmd_flags)) { 374 blk_insert_flush(rq); 375 goto run; 376 } 377 378 WARN_ON(e && (rq->tag != -1)); 379 380 if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) 381 goto run; 382 383 if (e && e->type->ops.mq.insert_requests) { 384 LIST_HEAD(list); 385 386 list_add(&rq->queuelist, &list); 387 e->type->ops.mq.insert_requests(hctx, &list, at_head); 388 } else { 389 spin_lock(&ctx->lock); 390 __blk_mq_insert_request(hctx, rq, at_head); 391 spin_unlock(&ctx->lock); 392 } 393 394 run: 395 if (run_queue) 396 blk_mq_run_hw_queue(hctx, async); 397 } 398 399 void blk_mq_sched_insert_requests(struct request_queue *q, 400 struct blk_mq_ctx *ctx, 401 struct list_head *list, bool run_queue_async) 402 { 403 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); 404 struct elevator_queue *e = hctx->queue->elevator; 405 406 if (e && e->type->ops.mq.insert_requests) 407 e->type->ops.mq.insert_requests(hctx, list, false); 408 else { 409 /* 410 * try to issue requests directly if the hw queue isn't 411 * busy in case of 'none' scheduler, and this way may save 412 * us one extra enqueue & dequeue to sw queue. 413 */ 414 if (!hctx->dispatch_busy && !e && !run_queue_async) { 415 blk_mq_try_issue_list_directly(hctx, list); 416 if (list_empty(list)) 417 return; 418 } 419 blk_mq_insert_requests(hctx, ctx, list); 420 } 421 422 blk_mq_run_hw_queue(hctx, run_queue_async); 423 } 424 425 static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set, 426 struct blk_mq_hw_ctx *hctx, 427 unsigned int hctx_idx) 428 { 429 if (hctx->sched_tags) { 430 blk_mq_free_rqs(set, hctx->sched_tags, hctx_idx); 431 blk_mq_free_rq_map(hctx->sched_tags); 432 hctx->sched_tags = NULL; 433 } 434 } 435 436 static int blk_mq_sched_alloc_tags(struct request_queue *q, 437 struct blk_mq_hw_ctx *hctx, 438 unsigned int hctx_idx) 439 { 440 struct blk_mq_tag_set *set = q->tag_set; 441 int ret; 442 443 hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests, 444 set->reserved_tags); 445 if (!hctx->sched_tags) 446 return -ENOMEM; 447 448 ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests); 449 if (ret) 450 blk_mq_sched_free_tags(set, hctx, hctx_idx); 451 452 return ret; 453 } 454 455 static void blk_mq_sched_tags_teardown(struct request_queue *q) 456 { 457 struct blk_mq_tag_set *set = q->tag_set; 458 struct blk_mq_hw_ctx *hctx; 459 int i; 460 461 queue_for_each_hw_ctx(q, hctx, i) 462 blk_mq_sched_free_tags(set, hctx, i); 463 } 464 465 int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) 466 { 467 struct blk_mq_hw_ctx *hctx; 468 struct elevator_queue *eq; 469 unsigned int i; 470 int ret; 471 472 if (!e) { 473 q->elevator = NULL; 474 q->nr_requests = q->tag_set->queue_depth; 475 return 0; 476 } 477 478 /* 479 * Default to double of smaller one between hw queue_depth and 128, 480 * since we don't split into sync/async like the old code did. 481 * Additionally, this is a per-hw queue depth. 482 */ 483 q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth, 484 BLKDEV_MAX_RQ); 485 486 queue_for_each_hw_ctx(q, hctx, i) { 487 ret = blk_mq_sched_alloc_tags(q, hctx, i); 488 if (ret) 489 goto err; 490 } 491 492 ret = e->ops.mq.init_sched(q, e); 493 if (ret) 494 goto err; 495 496 blk_mq_debugfs_register_sched(q); 497 498 queue_for_each_hw_ctx(q, hctx, i) { 499 if (e->ops.mq.init_hctx) { 500 ret = e->ops.mq.init_hctx(hctx, i); 501 if (ret) { 502 eq = q->elevator; 503 blk_mq_exit_sched(q, eq); 504 kobject_put(&eq->kobj); 505 return ret; 506 } 507 } 508 blk_mq_debugfs_register_sched_hctx(q, hctx); 509 } 510 511 return 0; 512 513 err: 514 blk_mq_sched_tags_teardown(q); 515 q->elevator = NULL; 516 return ret; 517 } 518 519 void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e) 520 { 521 struct blk_mq_hw_ctx *hctx; 522 unsigned int i; 523 524 queue_for_each_hw_ctx(q, hctx, i) { 525 blk_mq_debugfs_unregister_sched_hctx(hctx); 526 if (e->type->ops.mq.exit_hctx && hctx->sched_data) { 527 e->type->ops.mq.exit_hctx(hctx, i); 528 hctx->sched_data = NULL; 529 } 530 } 531 blk_mq_debugfs_unregister_sched(q); 532 if (e->type->ops.mq.exit_sched) 533 e->type->ops.mq.exit_sched(e); 534 blk_mq_sched_tags_teardown(q); 535 q->elevator = NULL; 536 } 537