1 /* 2 * Block multiqueue core code 3 * 4 * Copyright (C) 2013-2014 Jens Axboe 5 * Copyright (C) 2013-2014 Christoph Hellwig 6 */ 7 #include <linux/kernel.h> 8 #include <linux/module.h> 9 #include <linux/backing-dev.h> 10 #include <linux/bio.h> 11 #include <linux/blkdev.h> 12 #include <linux/kmemleak.h> 13 #include <linux/mm.h> 14 #include <linux/init.h> 15 #include <linux/slab.h> 16 #include <linux/workqueue.h> 17 #include <linux/smp.h> 18 #include <linux/llist.h> 19 #include <linux/list_sort.h> 20 #include <linux/cpu.h> 21 #include <linux/cache.h> 22 #include <linux/sched/sysctl.h> 23 #include <linux/sched/topology.h> 24 #include <linux/sched/signal.h> 25 #include <linux/delay.h> 26 #include <linux/crash_dump.h> 27 #include <linux/prefetch.h> 28 29 #include <trace/events/block.h> 30 31 #include <linux/blk-mq.h> 32 #include "blk.h" 33 #include "blk-mq.h" 34 #include "blk-mq-debugfs.h" 35 #include "blk-mq-tag.h" 36 #include "blk-stat.h" 37 #include "blk-wbt.h" 38 #include "blk-mq-sched.h" 39 40 static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie); 41 static void blk_mq_poll_stats_start(struct request_queue *q); 42 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb); 43 44 static int blk_mq_poll_stats_bkt(const struct request *rq) 45 { 46 int ddir, bytes, bucket; 47 48 ddir = rq_data_dir(rq); 49 bytes = blk_rq_bytes(rq); 50 51 bucket = ddir + 2*(ilog2(bytes) - 9); 52 53 if (bucket < 0) 54 return -1; 55 else if (bucket >= BLK_MQ_POLL_STATS_BKTS) 56 return ddir + BLK_MQ_POLL_STATS_BKTS - 2; 57 58 return bucket; 59 } 60 61 /* 62 * Check if any of the ctx's have pending work in this hardware queue 63 */ 64 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) 65 { 66 return !list_empty_careful(&hctx->dispatch) || 67 sbitmap_any_bit_set(&hctx->ctx_map) || 68 blk_mq_sched_has_work(hctx); 69 } 70 71 /* 72 * Mark this ctx as having pending work in this hardware queue 73 */ 74 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, 75 struct blk_mq_ctx *ctx) 76 { 77 if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw)) 78 sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw); 79 } 80 81 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, 82 struct blk_mq_ctx *ctx) 83 { 84 sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw); 85 } 86 87 struct mq_inflight { 88 struct hd_struct *part; 89 unsigned int *inflight; 90 }; 91 92 static void blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx, 93 struct request *rq, void *priv, 94 bool reserved) 95 { 96 struct mq_inflight *mi = priv; 97 98 if (blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) { 99 /* 100 * index[0] counts the specific partition that was asked 101 * for. index[1] counts the ones that are active on the 102 * whole device, so increment that if mi->part is indeed 103 * a partition, and not a whole device. 104 */ 105 if (rq->part == mi->part) 106 mi->inflight[0]++; 107 if (mi->part->partno) 108 mi->inflight[1]++; 109 } 110 } 111 112 void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part, 113 unsigned int inflight[2]) 114 { 115 struct mq_inflight mi = { .part = part, .inflight = inflight, }; 116 117 inflight[0] = inflight[1] = 0; 118 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); 119 } 120 121 void blk_freeze_queue_start(struct request_queue *q) 122 { 123 int freeze_depth; 124 125 freeze_depth = atomic_inc_return(&q->mq_freeze_depth); 126 if (freeze_depth == 1) { 127 percpu_ref_kill(&q->q_usage_counter); 128 if (q->mq_ops) 129 blk_mq_run_hw_queues(q, false); 130 } 131 } 132 EXPORT_SYMBOL_GPL(blk_freeze_queue_start); 133 134 void blk_mq_freeze_queue_wait(struct request_queue *q) 135 { 136 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter)); 137 } 138 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait); 139 140 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, 141 unsigned long timeout) 142 { 143 return wait_event_timeout(q->mq_freeze_wq, 144 percpu_ref_is_zero(&q->q_usage_counter), 145 timeout); 146 } 147 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout); 148 149 /* 150 * Guarantee no request is in use, so we can change any data structure of 151 * the queue afterward. 152 */ 153 void blk_freeze_queue(struct request_queue *q) 154 { 155 /* 156 * In the !blk_mq case we are only calling this to kill the 157 * q_usage_counter, otherwise this increases the freeze depth 158 * and waits for it to return to zero. For this reason there is 159 * no blk_unfreeze_queue(), and blk_freeze_queue() is not 160 * exported to drivers as the only user for unfreeze is blk_mq. 161 */ 162 blk_freeze_queue_start(q); 163 if (!q->mq_ops) 164 blk_drain_queue(q); 165 blk_mq_freeze_queue_wait(q); 166 } 167 168 void blk_mq_freeze_queue(struct request_queue *q) 169 { 170 /* 171 * ...just an alias to keep freeze and unfreeze actions balanced 172 * in the blk_mq_* namespace 173 */ 174 blk_freeze_queue(q); 175 } 176 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue); 177 178 void blk_mq_unfreeze_queue(struct request_queue *q) 179 { 180 int freeze_depth; 181 182 freeze_depth = atomic_dec_return(&q->mq_freeze_depth); 183 WARN_ON_ONCE(freeze_depth < 0); 184 if (!freeze_depth) { 185 percpu_ref_reinit(&q->q_usage_counter); 186 wake_up_all(&q->mq_freeze_wq); 187 } 188 } 189 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); 190 191 /* 192 * FIXME: replace the scsi_internal_device_*block_nowait() calls in the 193 * mpt3sas driver such that this function can be removed. 194 */ 195 void blk_mq_quiesce_queue_nowait(struct request_queue *q) 196 { 197 blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q); 198 } 199 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait); 200 201 /** 202 * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished 203 * @q: request queue. 204 * 205 * Note: this function does not prevent that the struct request end_io() 206 * callback function is invoked. Once this function is returned, we make 207 * sure no dispatch can happen until the queue is unquiesced via 208 * blk_mq_unquiesce_queue(). 209 */ 210 void blk_mq_quiesce_queue(struct request_queue *q) 211 { 212 struct blk_mq_hw_ctx *hctx; 213 unsigned int i; 214 bool rcu = false; 215 216 blk_mq_quiesce_queue_nowait(q); 217 218 queue_for_each_hw_ctx(q, hctx, i) { 219 if (hctx->flags & BLK_MQ_F_BLOCKING) 220 synchronize_srcu(hctx->srcu); 221 else 222 rcu = true; 223 } 224 if (rcu) 225 synchronize_rcu(); 226 } 227 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue); 228 229 /* 230 * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue() 231 * @q: request queue. 232 * 233 * This function recovers queue into the state before quiescing 234 * which is done by blk_mq_quiesce_queue. 235 */ 236 void blk_mq_unquiesce_queue(struct request_queue *q) 237 { 238 blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q); 239 240 /* dispatch requests which are inserted during quiescing */ 241 blk_mq_run_hw_queues(q, true); 242 } 243 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue); 244 245 void blk_mq_wake_waiters(struct request_queue *q) 246 { 247 struct blk_mq_hw_ctx *hctx; 248 unsigned int i; 249 250 queue_for_each_hw_ctx(q, hctx, i) 251 if (blk_mq_hw_queue_mapped(hctx)) 252 blk_mq_tag_wakeup_all(hctx->tags, true); 253 } 254 255 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) 256 { 257 return blk_mq_has_free_tags(hctx->tags); 258 } 259 EXPORT_SYMBOL(blk_mq_can_queue); 260 261 static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, 262 unsigned int tag, unsigned int op) 263 { 264 struct blk_mq_tags *tags = blk_mq_tags_from_data(data); 265 struct request *rq = tags->static_rqs[tag]; 266 req_flags_t rq_flags = 0; 267 268 if (data->flags & BLK_MQ_REQ_INTERNAL) { 269 rq->tag = -1; 270 rq->internal_tag = tag; 271 } else { 272 if (blk_mq_tag_busy(data->hctx)) { 273 rq_flags = RQF_MQ_INFLIGHT; 274 atomic_inc(&data->hctx->nr_active); 275 } 276 rq->tag = tag; 277 rq->internal_tag = -1; 278 data->hctx->tags->rqs[rq->tag] = rq; 279 } 280 281 /* csd/requeue_work/fifo_time is initialized before use */ 282 rq->q = data->q; 283 rq->mq_ctx = data->ctx; 284 rq->rq_flags = rq_flags; 285 rq->cpu = -1; 286 rq->cmd_flags = op; 287 if (data->flags & BLK_MQ_REQ_PREEMPT) 288 rq->rq_flags |= RQF_PREEMPT; 289 if (blk_queue_io_stat(data->q)) 290 rq->rq_flags |= RQF_IO_STAT; 291 INIT_LIST_HEAD(&rq->queuelist); 292 INIT_HLIST_NODE(&rq->hash); 293 RB_CLEAR_NODE(&rq->rb_node); 294 rq->rq_disk = NULL; 295 rq->part = NULL; 296 rq->start_time = jiffies; 297 rq->nr_phys_segments = 0; 298 #if defined(CONFIG_BLK_DEV_INTEGRITY) 299 rq->nr_integrity_segments = 0; 300 #endif 301 rq->special = NULL; 302 /* tag was already set */ 303 rq->extra_len = 0; 304 rq->__deadline = 0; 305 306 INIT_LIST_HEAD(&rq->timeout_list); 307 rq->timeout = 0; 308 309 rq->end_io = NULL; 310 rq->end_io_data = NULL; 311 rq->next_rq = NULL; 312 313 #ifdef CONFIG_BLK_CGROUP 314 rq->rl = NULL; 315 set_start_time_ns(rq); 316 rq->io_start_time_ns = 0; 317 #endif 318 319 data->ctx->rq_dispatched[op_is_sync(op)]++; 320 return rq; 321 } 322 323 static struct request *blk_mq_get_request(struct request_queue *q, 324 struct bio *bio, unsigned int op, 325 struct blk_mq_alloc_data *data) 326 { 327 struct elevator_queue *e = q->elevator; 328 struct request *rq; 329 unsigned int tag; 330 bool put_ctx_on_error = false; 331 332 blk_queue_enter_live(q); 333 data->q = q; 334 if (likely(!data->ctx)) { 335 data->ctx = blk_mq_get_ctx(q); 336 put_ctx_on_error = true; 337 } 338 if (likely(!data->hctx)) 339 data->hctx = blk_mq_map_queue(q, data->ctx->cpu); 340 if (op & REQ_NOWAIT) 341 data->flags |= BLK_MQ_REQ_NOWAIT; 342 343 if (e) { 344 data->flags |= BLK_MQ_REQ_INTERNAL; 345 346 /* 347 * Flush requests are special and go directly to the 348 * dispatch list. 349 */ 350 if (!op_is_flush(op) && e->type->ops.mq.limit_depth) 351 e->type->ops.mq.limit_depth(op, data); 352 } 353 354 tag = blk_mq_get_tag(data); 355 if (tag == BLK_MQ_TAG_FAIL) { 356 if (put_ctx_on_error) { 357 blk_mq_put_ctx(data->ctx); 358 data->ctx = NULL; 359 } 360 blk_queue_exit(q); 361 return NULL; 362 } 363 364 rq = blk_mq_rq_ctx_init(data, tag, op); 365 if (!op_is_flush(op)) { 366 rq->elv.icq = NULL; 367 if (e && e->type->ops.mq.prepare_request) { 368 if (e->type->icq_cache && rq_ioc(bio)) 369 blk_mq_sched_assign_ioc(rq, bio); 370 371 e->type->ops.mq.prepare_request(rq, bio); 372 rq->rq_flags |= RQF_ELVPRIV; 373 } 374 } 375 data->hctx->queued++; 376 return rq; 377 } 378 379 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, 380 blk_mq_req_flags_t flags) 381 { 382 struct blk_mq_alloc_data alloc_data = { .flags = flags }; 383 struct request *rq; 384 int ret; 385 386 ret = blk_queue_enter(q, flags); 387 if (ret) 388 return ERR_PTR(ret); 389 390 rq = blk_mq_get_request(q, NULL, op, &alloc_data); 391 blk_queue_exit(q); 392 393 if (!rq) 394 return ERR_PTR(-EWOULDBLOCK); 395 396 blk_mq_put_ctx(alloc_data.ctx); 397 398 rq->__data_len = 0; 399 rq->__sector = (sector_t) -1; 400 rq->bio = rq->biotail = NULL; 401 return rq; 402 } 403 EXPORT_SYMBOL(blk_mq_alloc_request); 404 405 struct request *blk_mq_alloc_request_hctx(struct request_queue *q, 406 unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx) 407 { 408 struct blk_mq_alloc_data alloc_data = { .flags = flags }; 409 struct request *rq; 410 unsigned int cpu; 411 int ret; 412 413 /* 414 * If the tag allocator sleeps we could get an allocation for a 415 * different hardware context. No need to complicate the low level 416 * allocator for this for the rare use case of a command tied to 417 * a specific queue. 418 */ 419 if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT))) 420 return ERR_PTR(-EINVAL); 421 422 if (hctx_idx >= q->nr_hw_queues) 423 return ERR_PTR(-EIO); 424 425 ret = blk_queue_enter(q, flags); 426 if (ret) 427 return ERR_PTR(ret); 428 429 /* 430 * Check if the hardware context is actually mapped to anything. 431 * If not tell the caller that it should skip this queue. 432 */ 433 alloc_data.hctx = q->queue_hw_ctx[hctx_idx]; 434 if (!blk_mq_hw_queue_mapped(alloc_data.hctx)) { 435 blk_queue_exit(q); 436 return ERR_PTR(-EXDEV); 437 } 438 cpu = cpumask_first_and(alloc_data.hctx->cpumask, cpu_online_mask); 439 alloc_data.ctx = __blk_mq_get_ctx(q, cpu); 440 441 rq = blk_mq_get_request(q, NULL, op, &alloc_data); 442 blk_queue_exit(q); 443 444 if (!rq) 445 return ERR_PTR(-EWOULDBLOCK); 446 447 return rq; 448 } 449 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); 450 451 void blk_mq_free_request(struct request *rq) 452 { 453 struct request_queue *q = rq->q; 454 struct elevator_queue *e = q->elevator; 455 struct blk_mq_ctx *ctx = rq->mq_ctx; 456 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); 457 const int sched_tag = rq->internal_tag; 458 459 if (rq->rq_flags & RQF_ELVPRIV) { 460 if (e && e->type->ops.mq.finish_request) 461 e->type->ops.mq.finish_request(rq); 462 if (rq->elv.icq) { 463 put_io_context(rq->elv.icq->ioc); 464 rq->elv.icq = NULL; 465 } 466 } 467 468 ctx->rq_completed[rq_is_sync(rq)]++; 469 if (rq->rq_flags & RQF_MQ_INFLIGHT) 470 atomic_dec(&hctx->nr_active); 471 472 if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq))) 473 laptop_io_completion(q->backing_dev_info); 474 475 wbt_done(q->rq_wb, &rq->issue_stat); 476 477 if (blk_rq_rl(rq)) 478 blk_put_rl(blk_rq_rl(rq)); 479 480 blk_mq_rq_update_state(rq, MQ_RQ_IDLE); 481 if (rq->tag != -1) 482 blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag); 483 if (sched_tag != -1) 484 blk_mq_put_tag(hctx, hctx->sched_tags, ctx, sched_tag); 485 blk_mq_sched_restart(hctx); 486 blk_queue_exit(q); 487 } 488 EXPORT_SYMBOL_GPL(blk_mq_free_request); 489 490 inline void __blk_mq_end_request(struct request *rq, blk_status_t error) 491 { 492 blk_account_io_done(rq); 493 494 if (rq->end_io) { 495 wbt_done(rq->q->rq_wb, &rq->issue_stat); 496 rq->end_io(rq, error); 497 } else { 498 if (unlikely(blk_bidi_rq(rq))) 499 blk_mq_free_request(rq->next_rq); 500 blk_mq_free_request(rq); 501 } 502 } 503 EXPORT_SYMBOL(__blk_mq_end_request); 504 505 void blk_mq_end_request(struct request *rq, blk_status_t error) 506 { 507 if (blk_update_request(rq, error, blk_rq_bytes(rq))) 508 BUG(); 509 __blk_mq_end_request(rq, error); 510 } 511 EXPORT_SYMBOL(blk_mq_end_request); 512 513 static void __blk_mq_complete_request_remote(void *data) 514 { 515 struct request *rq = data; 516 517 rq->q->softirq_done_fn(rq); 518 } 519 520 static void __blk_mq_complete_request(struct request *rq) 521 { 522 struct blk_mq_ctx *ctx = rq->mq_ctx; 523 bool shared = false; 524 int cpu; 525 526 WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT); 527 blk_mq_rq_update_state(rq, MQ_RQ_COMPLETE); 528 529 if (rq->internal_tag != -1) 530 blk_mq_sched_completed_request(rq); 531 if (rq->rq_flags & RQF_STATS) { 532 blk_mq_poll_stats_start(rq->q); 533 blk_stat_add(rq); 534 } 535 536 if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) { 537 rq->q->softirq_done_fn(rq); 538 return; 539 } 540 541 cpu = get_cpu(); 542 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags)) 543 shared = cpus_share_cache(cpu, ctx->cpu); 544 545 if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) { 546 rq->csd.func = __blk_mq_complete_request_remote; 547 rq->csd.info = rq; 548 rq->csd.flags = 0; 549 smp_call_function_single_async(ctx->cpu, &rq->csd); 550 } else { 551 rq->q->softirq_done_fn(rq); 552 } 553 put_cpu(); 554 } 555 556 static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx) 557 __releases(hctx->srcu) 558 { 559 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) 560 rcu_read_unlock(); 561 else 562 srcu_read_unlock(hctx->srcu, srcu_idx); 563 } 564 565 static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx) 566 __acquires(hctx->srcu) 567 { 568 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) { 569 /* shut up gcc false positive */ 570 *srcu_idx = 0; 571 rcu_read_lock(); 572 } else 573 *srcu_idx = srcu_read_lock(hctx->srcu); 574 } 575 576 static void blk_mq_rq_update_aborted_gstate(struct request *rq, u64 gstate) 577 { 578 unsigned long flags; 579 580 /* 581 * blk_mq_rq_aborted_gstate() is used from the completion path and 582 * can thus be called from irq context. u64_stats_fetch in the 583 * middle of update on the same CPU leads to lockup. Disable irq 584 * while updating. 585 */ 586 local_irq_save(flags); 587 u64_stats_update_begin(&rq->aborted_gstate_sync); 588 rq->aborted_gstate = gstate; 589 u64_stats_update_end(&rq->aborted_gstate_sync); 590 local_irq_restore(flags); 591 } 592 593 static u64 blk_mq_rq_aborted_gstate(struct request *rq) 594 { 595 unsigned int start; 596 u64 aborted_gstate; 597 598 do { 599 start = u64_stats_fetch_begin(&rq->aborted_gstate_sync); 600 aborted_gstate = rq->aborted_gstate; 601 } while (u64_stats_fetch_retry(&rq->aborted_gstate_sync, start)); 602 603 return aborted_gstate; 604 } 605 606 /** 607 * blk_mq_complete_request - end I/O on a request 608 * @rq: the request being processed 609 * 610 * Description: 611 * Ends all I/O on a request. It does not handle partial completions. 612 * The actual completion happens out-of-order, through a IPI handler. 613 **/ 614 void blk_mq_complete_request(struct request *rq) 615 { 616 struct request_queue *q = rq->q; 617 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu); 618 int srcu_idx; 619 620 if (unlikely(blk_should_fake_timeout(q))) 621 return; 622 623 /* 624 * If @rq->aborted_gstate equals the current instance, timeout is 625 * claiming @rq and we lost. This is synchronized through 626 * hctx_lock(). See blk_mq_timeout_work() for details. 627 * 628 * Completion path never blocks and we can directly use RCU here 629 * instead of hctx_lock() which can be either RCU or SRCU. 630 * However, that would complicate paths which want to synchronize 631 * against us. Let stay in sync with the issue path so that 632 * hctx_lock() covers both issue and completion paths. 633 */ 634 hctx_lock(hctx, &srcu_idx); 635 if (blk_mq_rq_aborted_gstate(rq) != rq->gstate) 636 __blk_mq_complete_request(rq); 637 hctx_unlock(hctx, srcu_idx); 638 } 639 EXPORT_SYMBOL(blk_mq_complete_request); 640 641 int blk_mq_request_started(struct request *rq) 642 { 643 return blk_mq_rq_state(rq) != MQ_RQ_IDLE; 644 } 645 EXPORT_SYMBOL_GPL(blk_mq_request_started); 646 647 void blk_mq_start_request(struct request *rq) 648 { 649 struct request_queue *q = rq->q; 650 651 blk_mq_sched_started_request(rq); 652 653 trace_block_rq_issue(q, rq); 654 655 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) { 656 blk_stat_set_issue(&rq->issue_stat, blk_rq_sectors(rq)); 657 rq->rq_flags |= RQF_STATS; 658 wbt_issue(q->rq_wb, &rq->issue_stat); 659 } 660 661 WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE); 662 663 /* 664 * Mark @rq in-flight which also advances the generation number, 665 * and register for timeout. Protect with a seqcount to allow the 666 * timeout path to read both @rq->gstate and @rq->deadline 667 * coherently. 668 * 669 * This is the only place where a request is marked in-flight. If 670 * the timeout path reads an in-flight @rq->gstate, the 671 * @rq->deadline it reads together under @rq->gstate_seq is 672 * guaranteed to be the matching one. 673 */ 674 preempt_disable(); 675 write_seqcount_begin(&rq->gstate_seq); 676 677 blk_mq_rq_update_state(rq, MQ_RQ_IN_FLIGHT); 678 blk_add_timer(rq); 679 680 write_seqcount_end(&rq->gstate_seq); 681 preempt_enable(); 682 683 if (q->dma_drain_size && blk_rq_bytes(rq)) { 684 /* 685 * Make sure space for the drain appears. We know we can do 686 * this because max_hw_segments has been adjusted to be one 687 * fewer than the device can handle. 688 */ 689 rq->nr_phys_segments++; 690 } 691 } 692 EXPORT_SYMBOL(blk_mq_start_request); 693 694 /* 695 * When we reach here because queue is busy, it's safe to change the state 696 * to IDLE without checking @rq->aborted_gstate because we should still be 697 * holding the RCU read lock and thus protected against timeout. 698 */ 699 static void __blk_mq_requeue_request(struct request *rq) 700 { 701 struct request_queue *q = rq->q; 702 703 blk_mq_put_driver_tag(rq); 704 705 trace_block_rq_requeue(q, rq); 706 wbt_requeue(q->rq_wb, &rq->issue_stat); 707 708 if (blk_mq_rq_state(rq) != MQ_RQ_IDLE) { 709 blk_mq_rq_update_state(rq, MQ_RQ_IDLE); 710 if (q->dma_drain_size && blk_rq_bytes(rq)) 711 rq->nr_phys_segments--; 712 } 713 } 714 715 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list) 716 { 717 __blk_mq_requeue_request(rq); 718 719 /* this request will be re-inserted to io scheduler queue */ 720 blk_mq_sched_requeue_request(rq); 721 722 BUG_ON(blk_queued_rq(rq)); 723 blk_mq_add_to_requeue_list(rq, true, kick_requeue_list); 724 } 725 EXPORT_SYMBOL(blk_mq_requeue_request); 726 727 static void blk_mq_requeue_work(struct work_struct *work) 728 { 729 struct request_queue *q = 730 container_of(work, struct request_queue, requeue_work.work); 731 LIST_HEAD(rq_list); 732 struct request *rq, *next; 733 734 spin_lock_irq(&q->requeue_lock); 735 list_splice_init(&q->requeue_list, &rq_list); 736 spin_unlock_irq(&q->requeue_lock); 737 738 list_for_each_entry_safe(rq, next, &rq_list, queuelist) { 739 if (!(rq->rq_flags & RQF_SOFTBARRIER)) 740 continue; 741 742 rq->rq_flags &= ~RQF_SOFTBARRIER; 743 list_del_init(&rq->queuelist); 744 blk_mq_sched_insert_request(rq, true, false, false); 745 } 746 747 while (!list_empty(&rq_list)) { 748 rq = list_entry(rq_list.next, struct request, queuelist); 749 list_del_init(&rq->queuelist); 750 blk_mq_sched_insert_request(rq, false, false, false); 751 } 752 753 blk_mq_run_hw_queues(q, false); 754 } 755 756 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, 757 bool kick_requeue_list) 758 { 759 struct request_queue *q = rq->q; 760 unsigned long flags; 761 762 /* 763 * We abuse this flag that is otherwise used by the I/O scheduler to 764 * request head insertion from the workqueue. 765 */ 766 BUG_ON(rq->rq_flags & RQF_SOFTBARRIER); 767 768 spin_lock_irqsave(&q->requeue_lock, flags); 769 if (at_head) { 770 rq->rq_flags |= RQF_SOFTBARRIER; 771 list_add(&rq->queuelist, &q->requeue_list); 772 } else { 773 list_add_tail(&rq->queuelist, &q->requeue_list); 774 } 775 spin_unlock_irqrestore(&q->requeue_lock, flags); 776 777 if (kick_requeue_list) 778 blk_mq_kick_requeue_list(q); 779 } 780 EXPORT_SYMBOL(blk_mq_add_to_requeue_list); 781 782 void blk_mq_kick_requeue_list(struct request_queue *q) 783 { 784 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0); 785 } 786 EXPORT_SYMBOL(blk_mq_kick_requeue_list); 787 788 void blk_mq_delay_kick_requeue_list(struct request_queue *q, 789 unsigned long msecs) 790 { 791 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 792 msecs_to_jiffies(msecs)); 793 } 794 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list); 795 796 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag) 797 { 798 if (tag < tags->nr_tags) { 799 prefetch(tags->rqs[tag]); 800 return tags->rqs[tag]; 801 } 802 803 return NULL; 804 } 805 EXPORT_SYMBOL(blk_mq_tag_to_rq); 806 807 struct blk_mq_timeout_data { 808 unsigned long next; 809 unsigned int next_set; 810 unsigned int nr_expired; 811 }; 812 813 static void blk_mq_rq_timed_out(struct request *req, bool reserved) 814 { 815 const struct blk_mq_ops *ops = req->q->mq_ops; 816 enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER; 817 818 req->rq_flags |= RQF_MQ_TIMEOUT_EXPIRED; 819 820 if (ops->timeout) 821 ret = ops->timeout(req, reserved); 822 823 switch (ret) { 824 case BLK_EH_HANDLED: 825 __blk_mq_complete_request(req); 826 break; 827 case BLK_EH_RESET_TIMER: 828 /* 829 * As nothing prevents from completion happening while 830 * ->aborted_gstate is set, this may lead to ignored 831 * completions and further spurious timeouts. 832 */ 833 blk_mq_rq_update_aborted_gstate(req, 0); 834 blk_add_timer(req); 835 break; 836 case BLK_EH_NOT_HANDLED: 837 break; 838 default: 839 printk(KERN_ERR "block: bad eh return: %d\n", ret); 840 break; 841 } 842 } 843 844 static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, 845 struct request *rq, void *priv, bool reserved) 846 { 847 struct blk_mq_timeout_data *data = priv; 848 unsigned long gstate, deadline; 849 int start; 850 851 might_sleep(); 852 853 if (rq->rq_flags & RQF_MQ_TIMEOUT_EXPIRED) 854 return; 855 856 /* read coherent snapshots of @rq->state_gen and @rq->deadline */ 857 while (true) { 858 start = read_seqcount_begin(&rq->gstate_seq); 859 gstate = READ_ONCE(rq->gstate); 860 deadline = blk_rq_deadline(rq); 861 if (!read_seqcount_retry(&rq->gstate_seq, start)) 862 break; 863 cond_resched(); 864 } 865 866 /* if in-flight && overdue, mark for abortion */ 867 if ((gstate & MQ_RQ_STATE_MASK) == MQ_RQ_IN_FLIGHT && 868 time_after_eq(jiffies, deadline)) { 869 blk_mq_rq_update_aborted_gstate(rq, gstate); 870 data->nr_expired++; 871 hctx->nr_expired++; 872 } else if (!data->next_set || time_after(data->next, deadline)) { 873 data->next = deadline; 874 data->next_set = 1; 875 } 876 } 877 878 static void blk_mq_terminate_expired(struct blk_mq_hw_ctx *hctx, 879 struct request *rq, void *priv, bool reserved) 880 { 881 /* 882 * We marked @rq->aborted_gstate and waited for RCU. If there were 883 * completions that we lost to, they would have finished and 884 * updated @rq->gstate by now; otherwise, the completion path is 885 * now guaranteed to see @rq->aborted_gstate and yield. If 886 * @rq->aborted_gstate still matches @rq->gstate, @rq is ours. 887 */ 888 if (!(rq->rq_flags & RQF_MQ_TIMEOUT_EXPIRED) && 889 READ_ONCE(rq->gstate) == rq->aborted_gstate) 890 blk_mq_rq_timed_out(rq, reserved); 891 } 892 893 static void blk_mq_timeout_work(struct work_struct *work) 894 { 895 struct request_queue *q = 896 container_of(work, struct request_queue, timeout_work); 897 struct blk_mq_timeout_data data = { 898 .next = 0, 899 .next_set = 0, 900 .nr_expired = 0, 901 }; 902 struct blk_mq_hw_ctx *hctx; 903 int i; 904 905 /* A deadlock might occur if a request is stuck requiring a 906 * timeout at the same time a queue freeze is waiting 907 * completion, since the timeout code would not be able to 908 * acquire the queue reference here. 909 * 910 * That's why we don't use blk_queue_enter here; instead, we use 911 * percpu_ref_tryget directly, because we need to be able to 912 * obtain a reference even in the short window between the queue 913 * starting to freeze, by dropping the first reference in 914 * blk_freeze_queue_start, and the moment the last request is 915 * consumed, marked by the instant q_usage_counter reaches 916 * zero. 917 */ 918 if (!percpu_ref_tryget(&q->q_usage_counter)) 919 return; 920 921 /* scan for the expired ones and set their ->aborted_gstate */ 922 blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data); 923 924 if (data.nr_expired) { 925 bool has_rcu = false; 926 927 /* 928 * Wait till everyone sees ->aborted_gstate. The 929 * sequential waits for SRCUs aren't ideal. If this ever 930 * becomes a problem, we can add per-hw_ctx rcu_head and 931 * wait in parallel. 932 */ 933 queue_for_each_hw_ctx(q, hctx, i) { 934 if (!hctx->nr_expired) 935 continue; 936 937 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) 938 has_rcu = true; 939 else 940 synchronize_srcu(hctx->srcu); 941 942 hctx->nr_expired = 0; 943 } 944 if (has_rcu) 945 synchronize_rcu(); 946 947 /* terminate the ones we won */ 948 blk_mq_queue_tag_busy_iter(q, blk_mq_terminate_expired, NULL); 949 } 950 951 if (data.next_set) { 952 data.next = blk_rq_timeout(round_jiffies_up(data.next)); 953 mod_timer(&q->timeout, data.next); 954 } else { 955 /* 956 * Request timeouts are handled as a forward rolling timer. If 957 * we end up here it means that no requests are pending and 958 * also that no request has been pending for a while. Mark 959 * each hctx as idle. 960 */ 961 queue_for_each_hw_ctx(q, hctx, i) { 962 /* the hctx may be unmapped, so check it here */ 963 if (blk_mq_hw_queue_mapped(hctx)) 964 blk_mq_tag_idle(hctx); 965 } 966 } 967 blk_queue_exit(q); 968 } 969 970 struct flush_busy_ctx_data { 971 struct blk_mq_hw_ctx *hctx; 972 struct list_head *list; 973 }; 974 975 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data) 976 { 977 struct flush_busy_ctx_data *flush_data = data; 978 struct blk_mq_hw_ctx *hctx = flush_data->hctx; 979 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; 980 981 spin_lock(&ctx->lock); 982 list_splice_tail_init(&ctx->rq_list, flush_data->list); 983 sbitmap_clear_bit(sb, bitnr); 984 spin_unlock(&ctx->lock); 985 return true; 986 } 987 988 /* 989 * Process software queues that have been marked busy, splicing them 990 * to the for-dispatch 991 */ 992 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list) 993 { 994 struct flush_busy_ctx_data data = { 995 .hctx = hctx, 996 .list = list, 997 }; 998 999 sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data); 1000 } 1001 EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs); 1002 1003 struct dispatch_rq_data { 1004 struct blk_mq_hw_ctx *hctx; 1005 struct request *rq; 1006 }; 1007 1008 static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr, 1009 void *data) 1010 { 1011 struct dispatch_rq_data *dispatch_data = data; 1012 struct blk_mq_hw_ctx *hctx = dispatch_data->hctx; 1013 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; 1014 1015 spin_lock(&ctx->lock); 1016 if (unlikely(!list_empty(&ctx->rq_list))) { 1017 dispatch_data->rq = list_entry_rq(ctx->rq_list.next); 1018 list_del_init(&dispatch_data->rq->queuelist); 1019 if (list_empty(&ctx->rq_list)) 1020 sbitmap_clear_bit(sb, bitnr); 1021 } 1022 spin_unlock(&ctx->lock); 1023 1024 return !dispatch_data->rq; 1025 } 1026 1027 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, 1028 struct blk_mq_ctx *start) 1029 { 1030 unsigned off = start ? start->index_hw : 0; 1031 struct dispatch_rq_data data = { 1032 .hctx = hctx, 1033 .rq = NULL, 1034 }; 1035 1036 __sbitmap_for_each_set(&hctx->ctx_map, off, 1037 dispatch_rq_from_ctx, &data); 1038 1039 return data.rq; 1040 } 1041 1042 static inline unsigned int queued_to_index(unsigned int queued) 1043 { 1044 if (!queued) 1045 return 0; 1046 1047 return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1); 1048 } 1049 1050 bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx, 1051 bool wait) 1052 { 1053 struct blk_mq_alloc_data data = { 1054 .q = rq->q, 1055 .hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), 1056 .flags = wait ? 0 : BLK_MQ_REQ_NOWAIT, 1057 }; 1058 1059 might_sleep_if(wait); 1060 1061 if (rq->tag != -1) 1062 goto done; 1063 1064 if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag)) 1065 data.flags |= BLK_MQ_REQ_RESERVED; 1066 1067 rq->tag = blk_mq_get_tag(&data); 1068 if (rq->tag >= 0) { 1069 if (blk_mq_tag_busy(data.hctx)) { 1070 rq->rq_flags |= RQF_MQ_INFLIGHT; 1071 atomic_inc(&data.hctx->nr_active); 1072 } 1073 data.hctx->tags->rqs[rq->tag] = rq; 1074 } 1075 1076 done: 1077 if (hctx) 1078 *hctx = data.hctx; 1079 return rq->tag != -1; 1080 } 1081 1082 static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, 1083 int flags, void *key) 1084 { 1085 struct blk_mq_hw_ctx *hctx; 1086 1087 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait); 1088 1089 list_del_init(&wait->entry); 1090 blk_mq_run_hw_queue(hctx, true); 1091 return 1; 1092 } 1093 1094 /* 1095 * Mark us waiting for a tag. For shared tags, this involves hooking us into 1096 * the tag wakeups. For non-shared tags, we can simply mark us needing a 1097 * restart. For both cases, take care to check the condition again after 1098 * marking us as waiting. 1099 */ 1100 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx **hctx, 1101 struct request *rq) 1102 { 1103 struct blk_mq_hw_ctx *this_hctx = *hctx; 1104 struct sbq_wait_state *ws; 1105 wait_queue_entry_t *wait; 1106 bool ret; 1107 1108 if (!(this_hctx->flags & BLK_MQ_F_TAG_SHARED)) { 1109 if (!test_bit(BLK_MQ_S_SCHED_RESTART, &this_hctx->state)) 1110 set_bit(BLK_MQ_S_SCHED_RESTART, &this_hctx->state); 1111 1112 /* 1113 * It's possible that a tag was freed in the window between the 1114 * allocation failure and adding the hardware queue to the wait 1115 * queue. 1116 * 1117 * Don't clear RESTART here, someone else could have set it. 1118 * At most this will cost an extra queue run. 1119 */ 1120 return blk_mq_get_driver_tag(rq, hctx, false); 1121 } 1122 1123 wait = &this_hctx->dispatch_wait; 1124 if (!list_empty_careful(&wait->entry)) 1125 return false; 1126 1127 spin_lock(&this_hctx->lock); 1128 if (!list_empty(&wait->entry)) { 1129 spin_unlock(&this_hctx->lock); 1130 return false; 1131 } 1132 1133 ws = bt_wait_ptr(&this_hctx->tags->bitmap_tags, this_hctx); 1134 add_wait_queue(&ws->wait, wait); 1135 1136 /* 1137 * It's possible that a tag was freed in the window between the 1138 * allocation failure and adding the hardware queue to the wait 1139 * queue. 1140 */ 1141 ret = blk_mq_get_driver_tag(rq, hctx, false); 1142 if (!ret) { 1143 spin_unlock(&this_hctx->lock); 1144 return false; 1145 } 1146 1147 /* 1148 * We got a tag, remove ourselves from the wait queue to ensure 1149 * someone else gets the wakeup. 1150 */ 1151 spin_lock_irq(&ws->wait.lock); 1152 list_del_init(&wait->entry); 1153 spin_unlock_irq(&ws->wait.lock); 1154 spin_unlock(&this_hctx->lock); 1155 1156 return true; 1157 } 1158 1159 #define BLK_MQ_RESOURCE_DELAY 3 /* ms units */ 1160 1161 bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list, 1162 bool got_budget) 1163 { 1164 struct blk_mq_hw_ctx *hctx; 1165 struct request *rq, *nxt; 1166 bool no_tag = false; 1167 int errors, queued; 1168 blk_status_t ret = BLK_STS_OK; 1169 1170 if (list_empty(list)) 1171 return false; 1172 1173 WARN_ON(!list_is_singular(list) && got_budget); 1174 1175 /* 1176 * Now process all the entries, sending them to the driver. 1177 */ 1178 errors = queued = 0; 1179 do { 1180 struct blk_mq_queue_data bd; 1181 1182 rq = list_first_entry(list, struct request, queuelist); 1183 1184 hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); 1185 if (!got_budget && !blk_mq_get_dispatch_budget(hctx)) 1186 break; 1187 1188 if (!blk_mq_get_driver_tag(rq, NULL, false)) { 1189 /* 1190 * The initial allocation attempt failed, so we need to 1191 * rerun the hardware queue when a tag is freed. The 1192 * waitqueue takes care of that. If the queue is run 1193 * before we add this entry back on the dispatch list, 1194 * we'll re-run it below. 1195 */ 1196 if (!blk_mq_mark_tag_wait(&hctx, rq)) { 1197 blk_mq_put_dispatch_budget(hctx); 1198 /* 1199 * For non-shared tags, the RESTART check 1200 * will suffice. 1201 */ 1202 if (hctx->flags & BLK_MQ_F_TAG_SHARED) 1203 no_tag = true; 1204 break; 1205 } 1206 } 1207 1208 list_del_init(&rq->queuelist); 1209 1210 bd.rq = rq; 1211 1212 /* 1213 * Flag last if we have no more requests, or if we have more 1214 * but can't assign a driver tag to it. 1215 */ 1216 if (list_empty(list)) 1217 bd.last = true; 1218 else { 1219 nxt = list_first_entry(list, struct request, queuelist); 1220 bd.last = !blk_mq_get_driver_tag(nxt, NULL, false); 1221 } 1222 1223 ret = q->mq_ops->queue_rq(hctx, &bd); 1224 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) { 1225 /* 1226 * If an I/O scheduler has been configured and we got a 1227 * driver tag for the next request already, free it 1228 * again. 1229 */ 1230 if (!list_empty(list)) { 1231 nxt = list_first_entry(list, struct request, queuelist); 1232 blk_mq_put_driver_tag(nxt); 1233 } 1234 list_add(&rq->queuelist, list); 1235 __blk_mq_requeue_request(rq); 1236 break; 1237 } 1238 1239 if (unlikely(ret != BLK_STS_OK)) { 1240 errors++; 1241 blk_mq_end_request(rq, BLK_STS_IOERR); 1242 continue; 1243 } 1244 1245 queued++; 1246 } while (!list_empty(list)); 1247 1248 hctx->dispatched[queued_to_index(queued)]++; 1249 1250 /* 1251 * Any items that need requeuing? Stuff them into hctx->dispatch, 1252 * that is where we will continue on next queue run. 1253 */ 1254 if (!list_empty(list)) { 1255 bool needs_restart; 1256 1257 spin_lock(&hctx->lock); 1258 list_splice_init(list, &hctx->dispatch); 1259 spin_unlock(&hctx->lock); 1260 1261 /* 1262 * If SCHED_RESTART was set by the caller of this function and 1263 * it is no longer set that means that it was cleared by another 1264 * thread and hence that a queue rerun is needed. 1265 * 1266 * If 'no_tag' is set, that means that we failed getting 1267 * a driver tag with an I/O scheduler attached. If our dispatch 1268 * waitqueue is no longer active, ensure that we run the queue 1269 * AFTER adding our entries back to the list. 1270 * 1271 * If no I/O scheduler has been configured it is possible that 1272 * the hardware queue got stopped and restarted before requests 1273 * were pushed back onto the dispatch list. Rerun the queue to 1274 * avoid starvation. Notes: 1275 * - blk_mq_run_hw_queue() checks whether or not a queue has 1276 * been stopped before rerunning a queue. 1277 * - Some but not all block drivers stop a queue before 1278 * returning BLK_STS_RESOURCE. Two exceptions are scsi-mq 1279 * and dm-rq. 1280 * 1281 * If driver returns BLK_STS_RESOURCE and SCHED_RESTART 1282 * bit is set, run queue after a delay to avoid IO stalls 1283 * that could otherwise occur if the queue is idle. 1284 */ 1285 needs_restart = blk_mq_sched_needs_restart(hctx); 1286 if (!needs_restart || 1287 (no_tag && list_empty_careful(&hctx->dispatch_wait.entry))) 1288 blk_mq_run_hw_queue(hctx, true); 1289 else if (needs_restart && (ret == BLK_STS_RESOURCE)) 1290 blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY); 1291 } 1292 1293 return (queued + errors) != 0; 1294 } 1295 1296 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) 1297 { 1298 int srcu_idx; 1299 1300 /* 1301 * We should be running this queue from one of the CPUs that 1302 * are mapped to it. 1303 * 1304 * There are at least two related races now between setting 1305 * hctx->next_cpu from blk_mq_hctx_next_cpu() and running 1306 * __blk_mq_run_hw_queue(): 1307 * 1308 * - hctx->next_cpu is found offline in blk_mq_hctx_next_cpu(), 1309 * but later it becomes online, then this warning is harmless 1310 * at all 1311 * 1312 * - hctx->next_cpu is found online in blk_mq_hctx_next_cpu(), 1313 * but later it becomes offline, then the warning can't be 1314 * triggered, and we depend on blk-mq timeout handler to 1315 * handle dispatched requests to this hctx 1316 */ 1317 if (!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) && 1318 cpu_online(hctx->next_cpu)) { 1319 printk(KERN_WARNING "run queue from wrong CPU %d, hctx %s\n", 1320 raw_smp_processor_id(), 1321 cpumask_empty(hctx->cpumask) ? "inactive": "active"); 1322 dump_stack(); 1323 } 1324 1325 /* 1326 * We can't run the queue inline with ints disabled. Ensure that 1327 * we catch bad users of this early. 1328 */ 1329 WARN_ON_ONCE(in_interrupt()); 1330 1331 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); 1332 1333 hctx_lock(hctx, &srcu_idx); 1334 blk_mq_sched_dispatch_requests(hctx); 1335 hctx_unlock(hctx, srcu_idx); 1336 } 1337 1338 static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx) 1339 { 1340 int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask); 1341 1342 if (cpu >= nr_cpu_ids) 1343 cpu = cpumask_first(hctx->cpumask); 1344 return cpu; 1345 } 1346 1347 /* 1348 * It'd be great if the workqueue API had a way to pass 1349 * in a mask and had some smarts for more clever placement. 1350 * For now we just round-robin here, switching for every 1351 * BLK_MQ_CPU_WORK_BATCH queued items. 1352 */ 1353 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) 1354 { 1355 bool tried = false; 1356 int next_cpu = hctx->next_cpu; 1357 1358 if (hctx->queue->nr_hw_queues == 1) 1359 return WORK_CPU_UNBOUND; 1360 1361 if (--hctx->next_cpu_batch <= 0) { 1362 select_cpu: 1363 next_cpu = cpumask_next_and(next_cpu, hctx->cpumask, 1364 cpu_online_mask); 1365 if (next_cpu >= nr_cpu_ids) 1366 next_cpu = blk_mq_first_mapped_cpu(hctx); 1367 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; 1368 } 1369 1370 /* 1371 * Do unbound schedule if we can't find a online CPU for this hctx, 1372 * and it should only happen in the path of handling CPU DEAD. 1373 */ 1374 if (!cpu_online(next_cpu)) { 1375 if (!tried) { 1376 tried = true; 1377 goto select_cpu; 1378 } 1379 1380 /* 1381 * Make sure to re-select CPU next time once after CPUs 1382 * in hctx->cpumask become online again. 1383 */ 1384 hctx->next_cpu = next_cpu; 1385 hctx->next_cpu_batch = 1; 1386 return WORK_CPU_UNBOUND; 1387 } 1388 1389 hctx->next_cpu = next_cpu; 1390 return next_cpu; 1391 } 1392 1393 static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, 1394 unsigned long msecs) 1395 { 1396 if (unlikely(blk_mq_hctx_stopped(hctx))) 1397 return; 1398 1399 if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) { 1400 int cpu = get_cpu(); 1401 if (cpumask_test_cpu(cpu, hctx->cpumask)) { 1402 __blk_mq_run_hw_queue(hctx); 1403 put_cpu(); 1404 return; 1405 } 1406 1407 put_cpu(); 1408 } 1409 1410 kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work, 1411 msecs_to_jiffies(msecs)); 1412 } 1413 1414 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) 1415 { 1416 __blk_mq_delay_run_hw_queue(hctx, true, msecs); 1417 } 1418 EXPORT_SYMBOL(blk_mq_delay_run_hw_queue); 1419 1420 bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) 1421 { 1422 int srcu_idx; 1423 bool need_run; 1424 1425 /* 1426 * When queue is quiesced, we may be switching io scheduler, or 1427 * updating nr_hw_queues, or other things, and we can't run queue 1428 * any more, even __blk_mq_hctx_has_pending() can't be called safely. 1429 * 1430 * And queue will be rerun in blk_mq_unquiesce_queue() if it is 1431 * quiesced. 1432 */ 1433 hctx_lock(hctx, &srcu_idx); 1434 need_run = !blk_queue_quiesced(hctx->queue) && 1435 blk_mq_hctx_has_pending(hctx); 1436 hctx_unlock(hctx, srcu_idx); 1437 1438 if (need_run) { 1439 __blk_mq_delay_run_hw_queue(hctx, async, 0); 1440 return true; 1441 } 1442 1443 return false; 1444 } 1445 EXPORT_SYMBOL(blk_mq_run_hw_queue); 1446 1447 void blk_mq_run_hw_queues(struct request_queue *q, bool async) 1448 { 1449 struct blk_mq_hw_ctx *hctx; 1450 int i; 1451 1452 queue_for_each_hw_ctx(q, hctx, i) { 1453 if (blk_mq_hctx_stopped(hctx)) 1454 continue; 1455 1456 blk_mq_run_hw_queue(hctx, async); 1457 } 1458 } 1459 EXPORT_SYMBOL(blk_mq_run_hw_queues); 1460 1461 /** 1462 * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped 1463 * @q: request queue. 1464 * 1465 * The caller is responsible for serializing this function against 1466 * blk_mq_{start,stop}_hw_queue(). 1467 */ 1468 bool blk_mq_queue_stopped(struct request_queue *q) 1469 { 1470 struct blk_mq_hw_ctx *hctx; 1471 int i; 1472 1473 queue_for_each_hw_ctx(q, hctx, i) 1474 if (blk_mq_hctx_stopped(hctx)) 1475 return true; 1476 1477 return false; 1478 } 1479 EXPORT_SYMBOL(blk_mq_queue_stopped); 1480 1481 /* 1482 * This function is often used for pausing .queue_rq() by driver when 1483 * there isn't enough resource or some conditions aren't satisfied, and 1484 * BLK_STS_RESOURCE is usually returned. 1485 * 1486 * We do not guarantee that dispatch can be drained or blocked 1487 * after blk_mq_stop_hw_queue() returns. Please use 1488 * blk_mq_quiesce_queue() for that requirement. 1489 */ 1490 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) 1491 { 1492 cancel_delayed_work(&hctx->run_work); 1493 1494 set_bit(BLK_MQ_S_STOPPED, &hctx->state); 1495 } 1496 EXPORT_SYMBOL(blk_mq_stop_hw_queue); 1497 1498 /* 1499 * This function is often used for pausing .queue_rq() by driver when 1500 * there isn't enough resource or some conditions aren't satisfied, and 1501 * BLK_STS_RESOURCE is usually returned. 1502 * 1503 * We do not guarantee that dispatch can be drained or blocked 1504 * after blk_mq_stop_hw_queues() returns. Please use 1505 * blk_mq_quiesce_queue() for that requirement. 1506 */ 1507 void blk_mq_stop_hw_queues(struct request_queue *q) 1508 { 1509 struct blk_mq_hw_ctx *hctx; 1510 int i; 1511 1512 queue_for_each_hw_ctx(q, hctx, i) 1513 blk_mq_stop_hw_queue(hctx); 1514 } 1515 EXPORT_SYMBOL(blk_mq_stop_hw_queues); 1516 1517 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx) 1518 { 1519 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 1520 1521 blk_mq_run_hw_queue(hctx, false); 1522 } 1523 EXPORT_SYMBOL(blk_mq_start_hw_queue); 1524 1525 void blk_mq_start_hw_queues(struct request_queue *q) 1526 { 1527 struct blk_mq_hw_ctx *hctx; 1528 int i; 1529 1530 queue_for_each_hw_ctx(q, hctx, i) 1531 blk_mq_start_hw_queue(hctx); 1532 } 1533 EXPORT_SYMBOL(blk_mq_start_hw_queues); 1534 1535 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) 1536 { 1537 if (!blk_mq_hctx_stopped(hctx)) 1538 return; 1539 1540 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 1541 blk_mq_run_hw_queue(hctx, async); 1542 } 1543 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue); 1544 1545 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async) 1546 { 1547 struct blk_mq_hw_ctx *hctx; 1548 int i; 1549 1550 queue_for_each_hw_ctx(q, hctx, i) 1551 blk_mq_start_stopped_hw_queue(hctx, async); 1552 } 1553 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues); 1554 1555 static void blk_mq_run_work_fn(struct work_struct *work) 1556 { 1557 struct blk_mq_hw_ctx *hctx; 1558 1559 hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work); 1560 1561 /* 1562 * If we are stopped, don't run the queue. 1563 */ 1564 if (test_bit(BLK_MQ_S_STOPPED, &hctx->state)) 1565 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 1566 1567 __blk_mq_run_hw_queue(hctx); 1568 } 1569 1570 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx, 1571 struct request *rq, 1572 bool at_head) 1573 { 1574 struct blk_mq_ctx *ctx = rq->mq_ctx; 1575 1576 lockdep_assert_held(&ctx->lock); 1577 1578 trace_block_rq_insert(hctx->queue, rq); 1579 1580 if (at_head) 1581 list_add(&rq->queuelist, &ctx->rq_list); 1582 else 1583 list_add_tail(&rq->queuelist, &ctx->rq_list); 1584 } 1585 1586 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, 1587 bool at_head) 1588 { 1589 struct blk_mq_ctx *ctx = rq->mq_ctx; 1590 1591 lockdep_assert_held(&ctx->lock); 1592 1593 __blk_mq_insert_req_list(hctx, rq, at_head); 1594 blk_mq_hctx_mark_pending(hctx, ctx); 1595 } 1596 1597 /* 1598 * Should only be used carefully, when the caller knows we want to 1599 * bypass a potential IO scheduler on the target device. 1600 */ 1601 void blk_mq_request_bypass_insert(struct request *rq, bool run_queue) 1602 { 1603 struct blk_mq_ctx *ctx = rq->mq_ctx; 1604 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu); 1605 1606 spin_lock(&hctx->lock); 1607 list_add_tail(&rq->queuelist, &hctx->dispatch); 1608 spin_unlock(&hctx->lock); 1609 1610 if (run_queue) 1611 blk_mq_run_hw_queue(hctx, false); 1612 } 1613 1614 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, 1615 struct list_head *list) 1616 1617 { 1618 /* 1619 * preemption doesn't flush plug list, so it's possible ctx->cpu is 1620 * offline now 1621 */ 1622 spin_lock(&ctx->lock); 1623 while (!list_empty(list)) { 1624 struct request *rq; 1625 1626 rq = list_first_entry(list, struct request, queuelist); 1627 BUG_ON(rq->mq_ctx != ctx); 1628 list_del_init(&rq->queuelist); 1629 __blk_mq_insert_req_list(hctx, rq, false); 1630 } 1631 blk_mq_hctx_mark_pending(hctx, ctx); 1632 spin_unlock(&ctx->lock); 1633 } 1634 1635 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b) 1636 { 1637 struct request *rqa = container_of(a, struct request, queuelist); 1638 struct request *rqb = container_of(b, struct request, queuelist); 1639 1640 return !(rqa->mq_ctx < rqb->mq_ctx || 1641 (rqa->mq_ctx == rqb->mq_ctx && 1642 blk_rq_pos(rqa) < blk_rq_pos(rqb))); 1643 } 1644 1645 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) 1646 { 1647 struct blk_mq_ctx *this_ctx; 1648 struct request_queue *this_q; 1649 struct request *rq; 1650 LIST_HEAD(list); 1651 LIST_HEAD(ctx_list); 1652 unsigned int depth; 1653 1654 list_splice_init(&plug->mq_list, &list); 1655 1656 list_sort(NULL, &list, plug_ctx_cmp); 1657 1658 this_q = NULL; 1659 this_ctx = NULL; 1660 depth = 0; 1661 1662 while (!list_empty(&list)) { 1663 rq = list_entry_rq(list.next); 1664 list_del_init(&rq->queuelist); 1665 BUG_ON(!rq->q); 1666 if (rq->mq_ctx != this_ctx) { 1667 if (this_ctx) { 1668 trace_block_unplug(this_q, depth, from_schedule); 1669 blk_mq_sched_insert_requests(this_q, this_ctx, 1670 &ctx_list, 1671 from_schedule); 1672 } 1673 1674 this_ctx = rq->mq_ctx; 1675 this_q = rq->q; 1676 depth = 0; 1677 } 1678 1679 depth++; 1680 list_add_tail(&rq->queuelist, &ctx_list); 1681 } 1682 1683 /* 1684 * If 'this_ctx' is set, we know we have entries to complete 1685 * on 'ctx_list'. Do those. 1686 */ 1687 if (this_ctx) { 1688 trace_block_unplug(this_q, depth, from_schedule); 1689 blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list, 1690 from_schedule); 1691 } 1692 } 1693 1694 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio) 1695 { 1696 blk_init_request_from_bio(rq, bio); 1697 1698 blk_rq_set_rl(rq, blk_get_rl(rq->q, bio)); 1699 1700 blk_account_io_start(rq, true); 1701 } 1702 1703 static inline void blk_mq_queue_io(struct blk_mq_hw_ctx *hctx, 1704 struct blk_mq_ctx *ctx, 1705 struct request *rq) 1706 { 1707 spin_lock(&ctx->lock); 1708 __blk_mq_insert_request(hctx, rq, false); 1709 spin_unlock(&ctx->lock); 1710 } 1711 1712 static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq) 1713 { 1714 if (rq->tag != -1) 1715 return blk_tag_to_qc_t(rq->tag, hctx->queue_num, false); 1716 1717 return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true); 1718 } 1719 1720 static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, 1721 struct request *rq, 1722 blk_qc_t *cookie) 1723 { 1724 struct request_queue *q = rq->q; 1725 struct blk_mq_queue_data bd = { 1726 .rq = rq, 1727 .last = true, 1728 }; 1729 blk_qc_t new_cookie; 1730 blk_status_t ret; 1731 1732 new_cookie = request_to_qc_t(hctx, rq); 1733 1734 /* 1735 * For OK queue, we are done. For error, caller may kill it. 1736 * Any other error (busy), just add it to our list as we 1737 * previously would have done. 1738 */ 1739 ret = q->mq_ops->queue_rq(hctx, &bd); 1740 switch (ret) { 1741 case BLK_STS_OK: 1742 *cookie = new_cookie; 1743 break; 1744 case BLK_STS_RESOURCE: 1745 case BLK_STS_DEV_RESOURCE: 1746 __blk_mq_requeue_request(rq); 1747 break; 1748 default: 1749 *cookie = BLK_QC_T_NONE; 1750 break; 1751 } 1752 1753 return ret; 1754 } 1755 1756 static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, 1757 struct request *rq, 1758 blk_qc_t *cookie, 1759 bool bypass_insert) 1760 { 1761 struct request_queue *q = rq->q; 1762 bool run_queue = true; 1763 1764 /* 1765 * RCU or SRCU read lock is needed before checking quiesced flag. 1766 * 1767 * When queue is stopped or quiesced, ignore 'bypass_insert' from 1768 * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller, 1769 * and avoid driver to try to dispatch again. 1770 */ 1771 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) { 1772 run_queue = false; 1773 bypass_insert = false; 1774 goto insert; 1775 } 1776 1777 if (q->elevator && !bypass_insert) 1778 goto insert; 1779 1780 if (!blk_mq_get_dispatch_budget(hctx)) 1781 goto insert; 1782 1783 if (!blk_mq_get_driver_tag(rq, NULL, false)) { 1784 blk_mq_put_dispatch_budget(hctx); 1785 goto insert; 1786 } 1787 1788 return __blk_mq_issue_directly(hctx, rq, cookie); 1789 insert: 1790 if (bypass_insert) 1791 return BLK_STS_RESOURCE; 1792 1793 blk_mq_sched_insert_request(rq, false, run_queue, false); 1794 return BLK_STS_OK; 1795 } 1796 1797 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, 1798 struct request *rq, blk_qc_t *cookie) 1799 { 1800 blk_status_t ret; 1801 int srcu_idx; 1802 1803 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); 1804 1805 hctx_lock(hctx, &srcu_idx); 1806 1807 ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false); 1808 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) 1809 blk_mq_sched_insert_request(rq, false, true, false); 1810 else if (ret != BLK_STS_OK) 1811 blk_mq_end_request(rq, ret); 1812 1813 hctx_unlock(hctx, srcu_idx); 1814 } 1815 1816 blk_status_t blk_mq_request_issue_directly(struct request *rq) 1817 { 1818 blk_status_t ret; 1819 int srcu_idx; 1820 blk_qc_t unused_cookie; 1821 struct blk_mq_ctx *ctx = rq->mq_ctx; 1822 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu); 1823 1824 hctx_lock(hctx, &srcu_idx); 1825 ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true); 1826 hctx_unlock(hctx, srcu_idx); 1827 1828 return ret; 1829 } 1830 1831 static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) 1832 { 1833 const int is_sync = op_is_sync(bio->bi_opf); 1834 const int is_flush_fua = op_is_flush(bio->bi_opf); 1835 struct blk_mq_alloc_data data = { .flags = 0 }; 1836 struct request *rq; 1837 unsigned int request_count = 0; 1838 struct blk_plug *plug; 1839 struct request *same_queue_rq = NULL; 1840 blk_qc_t cookie; 1841 unsigned int wb_acct; 1842 1843 blk_queue_bounce(q, &bio); 1844 1845 blk_queue_split(q, &bio); 1846 1847 if (!bio_integrity_prep(bio)) 1848 return BLK_QC_T_NONE; 1849 1850 if (!is_flush_fua && !blk_queue_nomerges(q) && 1851 blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq)) 1852 return BLK_QC_T_NONE; 1853 1854 if (blk_mq_sched_bio_merge(q, bio)) 1855 return BLK_QC_T_NONE; 1856 1857 wb_acct = wbt_wait(q->rq_wb, bio, NULL); 1858 1859 trace_block_getrq(q, bio, bio->bi_opf); 1860 1861 rq = blk_mq_get_request(q, bio, bio->bi_opf, &data); 1862 if (unlikely(!rq)) { 1863 __wbt_done(q->rq_wb, wb_acct); 1864 if (bio->bi_opf & REQ_NOWAIT) 1865 bio_wouldblock_error(bio); 1866 return BLK_QC_T_NONE; 1867 } 1868 1869 wbt_track(&rq->issue_stat, wb_acct); 1870 1871 cookie = request_to_qc_t(data.hctx, rq); 1872 1873 plug = current->plug; 1874 if (unlikely(is_flush_fua)) { 1875 blk_mq_put_ctx(data.ctx); 1876 blk_mq_bio_to_request(rq, bio); 1877 1878 /* bypass scheduler for flush rq */ 1879 blk_insert_flush(rq); 1880 blk_mq_run_hw_queue(data.hctx, true); 1881 } else if (plug && q->nr_hw_queues == 1) { 1882 struct request *last = NULL; 1883 1884 blk_mq_put_ctx(data.ctx); 1885 blk_mq_bio_to_request(rq, bio); 1886 1887 /* 1888 * @request_count may become stale because of schedule 1889 * out, so check the list again. 1890 */ 1891 if (list_empty(&plug->mq_list)) 1892 request_count = 0; 1893 else if (blk_queue_nomerges(q)) 1894 request_count = blk_plug_queued_count(q); 1895 1896 if (!request_count) 1897 trace_block_plug(q); 1898 else 1899 last = list_entry_rq(plug->mq_list.prev); 1900 1901 if (request_count >= BLK_MAX_REQUEST_COUNT || (last && 1902 blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) { 1903 blk_flush_plug_list(plug, false); 1904 trace_block_plug(q); 1905 } 1906 1907 list_add_tail(&rq->queuelist, &plug->mq_list); 1908 } else if (plug && !blk_queue_nomerges(q)) { 1909 blk_mq_bio_to_request(rq, bio); 1910 1911 /* 1912 * We do limited plugging. If the bio can be merged, do that. 1913 * Otherwise the existing request in the plug list will be 1914 * issued. So the plug list will have one request at most 1915 * The plug list might get flushed before this. If that happens, 1916 * the plug list is empty, and same_queue_rq is invalid. 1917 */ 1918 if (list_empty(&plug->mq_list)) 1919 same_queue_rq = NULL; 1920 if (same_queue_rq) 1921 list_del_init(&same_queue_rq->queuelist); 1922 list_add_tail(&rq->queuelist, &plug->mq_list); 1923 1924 blk_mq_put_ctx(data.ctx); 1925 1926 if (same_queue_rq) { 1927 data.hctx = blk_mq_map_queue(q, 1928 same_queue_rq->mq_ctx->cpu); 1929 blk_mq_try_issue_directly(data.hctx, same_queue_rq, 1930 &cookie); 1931 } 1932 } else if (q->nr_hw_queues > 1 && is_sync) { 1933 blk_mq_put_ctx(data.ctx); 1934 blk_mq_bio_to_request(rq, bio); 1935 blk_mq_try_issue_directly(data.hctx, rq, &cookie); 1936 } else if (q->elevator) { 1937 blk_mq_put_ctx(data.ctx); 1938 blk_mq_bio_to_request(rq, bio); 1939 blk_mq_sched_insert_request(rq, false, true, true); 1940 } else { 1941 blk_mq_put_ctx(data.ctx); 1942 blk_mq_bio_to_request(rq, bio); 1943 blk_mq_queue_io(data.hctx, data.ctx, rq); 1944 blk_mq_run_hw_queue(data.hctx, true); 1945 } 1946 1947 return cookie; 1948 } 1949 1950 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, 1951 unsigned int hctx_idx) 1952 { 1953 struct page *page; 1954 1955 if (tags->rqs && set->ops->exit_request) { 1956 int i; 1957 1958 for (i = 0; i < tags->nr_tags; i++) { 1959 struct request *rq = tags->static_rqs[i]; 1960 1961 if (!rq) 1962 continue; 1963 set->ops->exit_request(set, rq, hctx_idx); 1964 tags->static_rqs[i] = NULL; 1965 } 1966 } 1967 1968 while (!list_empty(&tags->page_list)) { 1969 page = list_first_entry(&tags->page_list, struct page, lru); 1970 list_del_init(&page->lru); 1971 /* 1972 * Remove kmemleak object previously allocated in 1973 * blk_mq_init_rq_map(). 1974 */ 1975 kmemleak_free(page_address(page)); 1976 __free_pages(page, page->private); 1977 } 1978 } 1979 1980 void blk_mq_free_rq_map(struct blk_mq_tags *tags) 1981 { 1982 kfree(tags->rqs); 1983 tags->rqs = NULL; 1984 kfree(tags->static_rqs); 1985 tags->static_rqs = NULL; 1986 1987 blk_mq_free_tags(tags); 1988 } 1989 1990 struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, 1991 unsigned int hctx_idx, 1992 unsigned int nr_tags, 1993 unsigned int reserved_tags) 1994 { 1995 struct blk_mq_tags *tags; 1996 int node; 1997 1998 node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx); 1999 if (node == NUMA_NO_NODE) 2000 node = set->numa_node; 2001 2002 tags = blk_mq_init_tags(nr_tags, reserved_tags, node, 2003 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags)); 2004 if (!tags) 2005 return NULL; 2006 2007 tags->rqs = kzalloc_node(nr_tags * sizeof(struct request *), 2008 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, 2009 node); 2010 if (!tags->rqs) { 2011 blk_mq_free_tags(tags); 2012 return NULL; 2013 } 2014 2015 tags->static_rqs = kzalloc_node(nr_tags * sizeof(struct request *), 2016 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, 2017 node); 2018 if (!tags->static_rqs) { 2019 kfree(tags->rqs); 2020 blk_mq_free_tags(tags); 2021 return NULL; 2022 } 2023 2024 return tags; 2025 } 2026 2027 static size_t order_to_size(unsigned int order) 2028 { 2029 return (size_t)PAGE_SIZE << order; 2030 } 2031 2032 static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, 2033 unsigned int hctx_idx, int node) 2034 { 2035 int ret; 2036 2037 if (set->ops->init_request) { 2038 ret = set->ops->init_request(set, rq, hctx_idx, node); 2039 if (ret) 2040 return ret; 2041 } 2042 2043 seqcount_init(&rq->gstate_seq); 2044 u64_stats_init(&rq->aborted_gstate_sync); 2045 return 0; 2046 } 2047 2048 int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, 2049 unsigned int hctx_idx, unsigned int depth) 2050 { 2051 unsigned int i, j, entries_per_page, max_order = 4; 2052 size_t rq_size, left; 2053 int node; 2054 2055 node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx); 2056 if (node == NUMA_NO_NODE) 2057 node = set->numa_node; 2058 2059 INIT_LIST_HEAD(&tags->page_list); 2060 2061 /* 2062 * rq_size is the size of the request plus driver payload, rounded 2063 * to the cacheline size 2064 */ 2065 rq_size = round_up(sizeof(struct request) + set->cmd_size, 2066 cache_line_size()); 2067 left = rq_size * depth; 2068 2069 for (i = 0; i < depth; ) { 2070 int this_order = max_order; 2071 struct page *page; 2072 int to_do; 2073 void *p; 2074 2075 while (this_order && left < order_to_size(this_order - 1)) 2076 this_order--; 2077 2078 do { 2079 page = alloc_pages_node(node, 2080 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO, 2081 this_order); 2082 if (page) 2083 break; 2084 if (!this_order--) 2085 break; 2086 if (order_to_size(this_order) < rq_size) 2087 break; 2088 } while (1); 2089 2090 if (!page) 2091 goto fail; 2092 2093 page->private = this_order; 2094 list_add_tail(&page->lru, &tags->page_list); 2095 2096 p = page_address(page); 2097 /* 2098 * Allow kmemleak to scan these pages as they contain pointers 2099 * to additional allocations like via ops->init_request(). 2100 */ 2101 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO); 2102 entries_per_page = order_to_size(this_order) / rq_size; 2103 to_do = min(entries_per_page, depth - i); 2104 left -= to_do * rq_size; 2105 for (j = 0; j < to_do; j++) { 2106 struct request *rq = p; 2107 2108 tags->static_rqs[i] = rq; 2109 if (blk_mq_init_request(set, rq, hctx_idx, node)) { 2110 tags->static_rqs[i] = NULL; 2111 goto fail; 2112 } 2113 2114 p += rq_size; 2115 i++; 2116 } 2117 } 2118 return 0; 2119 2120 fail: 2121 blk_mq_free_rqs(set, tags, hctx_idx); 2122 return -ENOMEM; 2123 } 2124 2125 /* 2126 * 'cpu' is going away. splice any existing rq_list entries from this 2127 * software queue to the hw queue dispatch list, and ensure that it 2128 * gets run. 2129 */ 2130 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node) 2131 { 2132 struct blk_mq_hw_ctx *hctx; 2133 struct blk_mq_ctx *ctx; 2134 LIST_HEAD(tmp); 2135 2136 hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead); 2137 ctx = __blk_mq_get_ctx(hctx->queue, cpu); 2138 2139 spin_lock(&ctx->lock); 2140 if (!list_empty(&ctx->rq_list)) { 2141 list_splice_init(&ctx->rq_list, &tmp); 2142 blk_mq_hctx_clear_pending(hctx, ctx); 2143 } 2144 spin_unlock(&ctx->lock); 2145 2146 if (list_empty(&tmp)) 2147 return 0; 2148 2149 spin_lock(&hctx->lock); 2150 list_splice_tail_init(&tmp, &hctx->dispatch); 2151 spin_unlock(&hctx->lock); 2152 2153 blk_mq_run_hw_queue(hctx, true); 2154 return 0; 2155 } 2156 2157 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx) 2158 { 2159 cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD, 2160 &hctx->cpuhp_dead); 2161 } 2162 2163 /* hctx->ctxs will be freed in queue's release handler */ 2164 static void blk_mq_exit_hctx(struct request_queue *q, 2165 struct blk_mq_tag_set *set, 2166 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) 2167 { 2168 blk_mq_debugfs_unregister_hctx(hctx); 2169 2170 if (blk_mq_hw_queue_mapped(hctx)) 2171 blk_mq_tag_idle(hctx); 2172 2173 if (set->ops->exit_request) 2174 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx); 2175 2176 blk_mq_sched_exit_hctx(q, hctx, hctx_idx); 2177 2178 if (set->ops->exit_hctx) 2179 set->ops->exit_hctx(hctx, hctx_idx); 2180 2181 if (hctx->flags & BLK_MQ_F_BLOCKING) 2182 cleanup_srcu_struct(hctx->srcu); 2183 2184 blk_mq_remove_cpuhp(hctx); 2185 blk_free_flush_queue(hctx->fq); 2186 sbitmap_free(&hctx->ctx_map); 2187 } 2188 2189 static void blk_mq_exit_hw_queues(struct request_queue *q, 2190 struct blk_mq_tag_set *set, int nr_queue) 2191 { 2192 struct blk_mq_hw_ctx *hctx; 2193 unsigned int i; 2194 2195 queue_for_each_hw_ctx(q, hctx, i) { 2196 if (i == nr_queue) 2197 break; 2198 blk_mq_exit_hctx(q, set, hctx, i); 2199 } 2200 } 2201 2202 static int blk_mq_init_hctx(struct request_queue *q, 2203 struct blk_mq_tag_set *set, 2204 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) 2205 { 2206 int node; 2207 2208 node = hctx->numa_node; 2209 if (node == NUMA_NO_NODE) 2210 node = hctx->numa_node = set->numa_node; 2211 2212 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn); 2213 spin_lock_init(&hctx->lock); 2214 INIT_LIST_HEAD(&hctx->dispatch); 2215 hctx->queue = q; 2216 hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED; 2217 2218 cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead); 2219 2220 hctx->tags = set->tags[hctx_idx]; 2221 2222 /* 2223 * Allocate space for all possible cpus to avoid allocation at 2224 * runtime 2225 */ 2226 hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *), 2227 GFP_KERNEL, node); 2228 if (!hctx->ctxs) 2229 goto unregister_cpu_notifier; 2230 2231 if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL, 2232 node)) 2233 goto free_ctxs; 2234 2235 hctx->nr_ctx = 0; 2236 2237 init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake); 2238 INIT_LIST_HEAD(&hctx->dispatch_wait.entry); 2239 2240 if (set->ops->init_hctx && 2241 set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) 2242 goto free_bitmap; 2243 2244 if (blk_mq_sched_init_hctx(q, hctx, hctx_idx)) 2245 goto exit_hctx; 2246 2247 hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size); 2248 if (!hctx->fq) 2249 goto sched_exit_hctx; 2250 2251 if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, node)) 2252 goto free_fq; 2253 2254 if (hctx->flags & BLK_MQ_F_BLOCKING) 2255 init_srcu_struct(hctx->srcu); 2256 2257 blk_mq_debugfs_register_hctx(q, hctx); 2258 2259 return 0; 2260 2261 free_fq: 2262 kfree(hctx->fq); 2263 sched_exit_hctx: 2264 blk_mq_sched_exit_hctx(q, hctx, hctx_idx); 2265 exit_hctx: 2266 if (set->ops->exit_hctx) 2267 set->ops->exit_hctx(hctx, hctx_idx); 2268 free_bitmap: 2269 sbitmap_free(&hctx->ctx_map); 2270 free_ctxs: 2271 kfree(hctx->ctxs); 2272 unregister_cpu_notifier: 2273 blk_mq_remove_cpuhp(hctx); 2274 return -1; 2275 } 2276 2277 static void blk_mq_init_cpu_queues(struct request_queue *q, 2278 unsigned int nr_hw_queues) 2279 { 2280 unsigned int i; 2281 2282 for_each_possible_cpu(i) { 2283 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i); 2284 struct blk_mq_hw_ctx *hctx; 2285 2286 __ctx->cpu = i; 2287 spin_lock_init(&__ctx->lock); 2288 INIT_LIST_HEAD(&__ctx->rq_list); 2289 __ctx->queue = q; 2290 2291 /* 2292 * Set local node, IFF we have more than one hw queue. If 2293 * not, we remain on the home node of the device 2294 */ 2295 hctx = blk_mq_map_queue(q, i); 2296 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE) 2297 hctx->numa_node = local_memory_node(cpu_to_node(i)); 2298 } 2299 } 2300 2301 static bool __blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, int hctx_idx) 2302 { 2303 int ret = 0; 2304 2305 set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx, 2306 set->queue_depth, set->reserved_tags); 2307 if (!set->tags[hctx_idx]) 2308 return false; 2309 2310 ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx, 2311 set->queue_depth); 2312 if (!ret) 2313 return true; 2314 2315 blk_mq_free_rq_map(set->tags[hctx_idx]); 2316 set->tags[hctx_idx] = NULL; 2317 return false; 2318 } 2319 2320 static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set, 2321 unsigned int hctx_idx) 2322 { 2323 if (set->tags[hctx_idx]) { 2324 blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx); 2325 blk_mq_free_rq_map(set->tags[hctx_idx]); 2326 set->tags[hctx_idx] = NULL; 2327 } 2328 } 2329 2330 static void blk_mq_map_swqueue(struct request_queue *q) 2331 { 2332 unsigned int i; 2333 struct blk_mq_hw_ctx *hctx; 2334 struct blk_mq_ctx *ctx; 2335 struct blk_mq_tag_set *set = q->tag_set; 2336 2337 /* 2338 * Avoid others reading imcomplete hctx->cpumask through sysfs 2339 */ 2340 mutex_lock(&q->sysfs_lock); 2341 2342 queue_for_each_hw_ctx(q, hctx, i) { 2343 cpumask_clear(hctx->cpumask); 2344 hctx->nr_ctx = 0; 2345 } 2346 2347 /* 2348 * Map software to hardware queues. 2349 */ 2350 for_each_possible_cpu(i) { 2351 ctx = per_cpu_ptr(q->queue_ctx, i); 2352 hctx = blk_mq_map_queue(q, i); 2353 2354 cpumask_set_cpu(i, hctx->cpumask); 2355 ctx->index_hw = hctx->nr_ctx; 2356 hctx->ctxs[hctx->nr_ctx++] = ctx; 2357 } 2358 2359 mutex_unlock(&q->sysfs_lock); 2360 2361 queue_for_each_hw_ctx(q, hctx, i) { 2362 /* every hctx should get mapped by at least one CPU */ 2363 WARN_ON(!hctx->nr_ctx); 2364 2365 hctx->tags = set->tags[i]; 2366 WARN_ON(!hctx->tags); 2367 2368 /* 2369 * Set the map size to the number of mapped software queues. 2370 * This is more accurate and more efficient than looping 2371 * over all possibly mapped software queues. 2372 */ 2373 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx); 2374 2375 /* 2376 * Initialize batch roundrobin counts 2377 */ 2378 hctx->next_cpu = blk_mq_first_mapped_cpu(hctx); 2379 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; 2380 } 2381 } 2382 2383 /* 2384 * Caller needs to ensure that we're either frozen/quiesced, or that 2385 * the queue isn't live yet. 2386 */ 2387 static void queue_set_hctx_shared(struct request_queue *q, bool shared) 2388 { 2389 struct blk_mq_hw_ctx *hctx; 2390 int i; 2391 2392 queue_for_each_hw_ctx(q, hctx, i) { 2393 if (shared) { 2394 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) 2395 atomic_inc(&q->shared_hctx_restart); 2396 hctx->flags |= BLK_MQ_F_TAG_SHARED; 2397 } else { 2398 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) 2399 atomic_dec(&q->shared_hctx_restart); 2400 hctx->flags &= ~BLK_MQ_F_TAG_SHARED; 2401 } 2402 } 2403 } 2404 2405 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, 2406 bool shared) 2407 { 2408 struct request_queue *q; 2409 2410 lockdep_assert_held(&set->tag_list_lock); 2411 2412 list_for_each_entry(q, &set->tag_list, tag_set_list) { 2413 blk_mq_freeze_queue(q); 2414 queue_set_hctx_shared(q, shared); 2415 blk_mq_unfreeze_queue(q); 2416 } 2417 } 2418 2419 static void blk_mq_del_queue_tag_set(struct request_queue *q) 2420 { 2421 struct blk_mq_tag_set *set = q->tag_set; 2422 2423 mutex_lock(&set->tag_list_lock); 2424 list_del_rcu(&q->tag_set_list); 2425 INIT_LIST_HEAD(&q->tag_set_list); 2426 if (list_is_singular(&set->tag_list)) { 2427 /* just transitioned to unshared */ 2428 set->flags &= ~BLK_MQ_F_TAG_SHARED; 2429 /* update existing queue */ 2430 blk_mq_update_tag_set_depth(set, false); 2431 } 2432 mutex_unlock(&set->tag_list_lock); 2433 2434 synchronize_rcu(); 2435 } 2436 2437 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, 2438 struct request_queue *q) 2439 { 2440 q->tag_set = set; 2441 2442 mutex_lock(&set->tag_list_lock); 2443 2444 /* 2445 * Check to see if we're transitioning to shared (from 1 to 2 queues). 2446 */ 2447 if (!list_empty(&set->tag_list) && 2448 !(set->flags & BLK_MQ_F_TAG_SHARED)) { 2449 set->flags |= BLK_MQ_F_TAG_SHARED; 2450 /* update existing queue */ 2451 blk_mq_update_tag_set_depth(set, true); 2452 } 2453 if (set->flags & BLK_MQ_F_TAG_SHARED) 2454 queue_set_hctx_shared(q, true); 2455 list_add_tail_rcu(&q->tag_set_list, &set->tag_list); 2456 2457 mutex_unlock(&set->tag_list_lock); 2458 } 2459 2460 /* 2461 * It is the actual release handler for mq, but we do it from 2462 * request queue's release handler for avoiding use-after-free 2463 * and headache because q->mq_kobj shouldn't have been introduced, 2464 * but we can't group ctx/kctx kobj without it. 2465 */ 2466 void blk_mq_release(struct request_queue *q) 2467 { 2468 struct blk_mq_hw_ctx *hctx; 2469 unsigned int i; 2470 2471 /* hctx kobj stays in hctx */ 2472 queue_for_each_hw_ctx(q, hctx, i) { 2473 if (!hctx) 2474 continue; 2475 kobject_put(&hctx->kobj); 2476 } 2477 2478 q->mq_map = NULL; 2479 2480 kfree(q->queue_hw_ctx); 2481 2482 /* 2483 * release .mq_kobj and sw queue's kobject now because 2484 * both share lifetime with request queue. 2485 */ 2486 blk_mq_sysfs_deinit(q); 2487 2488 free_percpu(q->queue_ctx); 2489 } 2490 2491 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) 2492 { 2493 struct request_queue *uninit_q, *q; 2494 2495 uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node, NULL); 2496 if (!uninit_q) 2497 return ERR_PTR(-ENOMEM); 2498 2499 q = blk_mq_init_allocated_queue(set, uninit_q); 2500 if (IS_ERR(q)) 2501 blk_cleanup_queue(uninit_q); 2502 2503 return q; 2504 } 2505 EXPORT_SYMBOL(blk_mq_init_queue); 2506 2507 static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set) 2508 { 2509 int hw_ctx_size = sizeof(struct blk_mq_hw_ctx); 2510 2511 BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu), 2512 __alignof__(struct blk_mq_hw_ctx)) != 2513 sizeof(struct blk_mq_hw_ctx)); 2514 2515 if (tag_set->flags & BLK_MQ_F_BLOCKING) 2516 hw_ctx_size += sizeof(struct srcu_struct); 2517 2518 return hw_ctx_size; 2519 } 2520 2521 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, 2522 struct request_queue *q) 2523 { 2524 int i, j; 2525 struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx; 2526 2527 blk_mq_sysfs_unregister(q); 2528 2529 /* protect against switching io scheduler */ 2530 mutex_lock(&q->sysfs_lock); 2531 for (i = 0; i < set->nr_hw_queues; i++) { 2532 int node; 2533 2534 if (hctxs[i]) 2535 continue; 2536 2537 node = blk_mq_hw_queue_to_node(q->mq_map, i); 2538 hctxs[i] = kzalloc_node(blk_mq_hw_ctx_size(set), 2539 GFP_KERNEL, node); 2540 if (!hctxs[i]) 2541 break; 2542 2543 if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL, 2544 node)) { 2545 kfree(hctxs[i]); 2546 hctxs[i] = NULL; 2547 break; 2548 } 2549 2550 atomic_set(&hctxs[i]->nr_active, 0); 2551 hctxs[i]->numa_node = node; 2552 hctxs[i]->queue_num = i; 2553 2554 if (blk_mq_init_hctx(q, set, hctxs[i], i)) { 2555 free_cpumask_var(hctxs[i]->cpumask); 2556 kfree(hctxs[i]); 2557 hctxs[i] = NULL; 2558 break; 2559 } 2560 blk_mq_hctx_kobj_init(hctxs[i]); 2561 } 2562 for (j = i; j < q->nr_hw_queues; j++) { 2563 struct blk_mq_hw_ctx *hctx = hctxs[j]; 2564 2565 if (hctx) { 2566 if (hctx->tags) 2567 blk_mq_free_map_and_requests(set, j); 2568 blk_mq_exit_hctx(q, set, hctx, j); 2569 kobject_put(&hctx->kobj); 2570 hctxs[j] = NULL; 2571 2572 } 2573 } 2574 q->nr_hw_queues = i; 2575 mutex_unlock(&q->sysfs_lock); 2576 blk_mq_sysfs_register(q); 2577 } 2578 2579 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, 2580 struct request_queue *q) 2581 { 2582 /* mark the queue as mq asap */ 2583 q->mq_ops = set->ops; 2584 2585 q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn, 2586 blk_mq_poll_stats_bkt, 2587 BLK_MQ_POLL_STATS_BKTS, q); 2588 if (!q->poll_cb) 2589 goto err_exit; 2590 2591 q->queue_ctx = alloc_percpu(struct blk_mq_ctx); 2592 if (!q->queue_ctx) 2593 goto err_exit; 2594 2595 /* init q->mq_kobj and sw queues' kobjects */ 2596 blk_mq_sysfs_init(q); 2597 2598 q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)), 2599 GFP_KERNEL, set->numa_node); 2600 if (!q->queue_hw_ctx) 2601 goto err_percpu; 2602 2603 q->mq_map = set->mq_map; 2604 2605 blk_mq_realloc_hw_ctxs(set, q); 2606 if (!q->nr_hw_queues) 2607 goto err_hctxs; 2608 2609 INIT_WORK(&q->timeout_work, blk_mq_timeout_work); 2610 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ); 2611 2612 q->nr_queues = nr_cpu_ids; 2613 2614 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; 2615 2616 if (!(set->flags & BLK_MQ_F_SG_MERGE)) 2617 queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); 2618 2619 q->sg_reserved_size = INT_MAX; 2620 2621 INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work); 2622 INIT_LIST_HEAD(&q->requeue_list); 2623 spin_lock_init(&q->requeue_lock); 2624 2625 blk_queue_make_request(q, blk_mq_make_request); 2626 if (q->mq_ops->poll) 2627 q->poll_fn = blk_mq_poll; 2628 2629 /* 2630 * Do this after blk_queue_make_request() overrides it... 2631 */ 2632 q->nr_requests = set->queue_depth; 2633 2634 /* 2635 * Default to classic polling 2636 */ 2637 q->poll_nsec = -1; 2638 2639 if (set->ops->complete) 2640 blk_queue_softirq_done(q, set->ops->complete); 2641 2642 blk_mq_init_cpu_queues(q, set->nr_hw_queues); 2643 blk_mq_add_queue_tag_set(set, q); 2644 blk_mq_map_swqueue(q); 2645 2646 if (!(set->flags & BLK_MQ_F_NO_SCHED)) { 2647 int ret; 2648 2649 ret = blk_mq_sched_init(q); 2650 if (ret) 2651 return ERR_PTR(ret); 2652 } 2653 2654 return q; 2655 2656 err_hctxs: 2657 kfree(q->queue_hw_ctx); 2658 err_percpu: 2659 free_percpu(q->queue_ctx); 2660 err_exit: 2661 q->mq_ops = NULL; 2662 return ERR_PTR(-ENOMEM); 2663 } 2664 EXPORT_SYMBOL(blk_mq_init_allocated_queue); 2665 2666 void blk_mq_free_queue(struct request_queue *q) 2667 { 2668 struct blk_mq_tag_set *set = q->tag_set; 2669 2670 blk_mq_del_queue_tag_set(q); 2671 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); 2672 } 2673 2674 /* Basically redo blk_mq_init_queue with queue frozen */ 2675 static void blk_mq_queue_reinit(struct request_queue *q) 2676 { 2677 WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth)); 2678 2679 blk_mq_debugfs_unregister_hctxs(q); 2680 blk_mq_sysfs_unregister(q); 2681 2682 /* 2683 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe 2684 * we should change hctx numa_node according to the new topology (this 2685 * involves freeing and re-allocating memory, worth doing?) 2686 */ 2687 blk_mq_map_swqueue(q); 2688 2689 blk_mq_sysfs_register(q); 2690 blk_mq_debugfs_register_hctxs(q); 2691 } 2692 2693 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) 2694 { 2695 int i; 2696 2697 for (i = 0; i < set->nr_hw_queues; i++) 2698 if (!__blk_mq_alloc_rq_map(set, i)) 2699 goto out_unwind; 2700 2701 return 0; 2702 2703 out_unwind: 2704 while (--i >= 0) 2705 blk_mq_free_rq_map(set->tags[i]); 2706 2707 return -ENOMEM; 2708 } 2709 2710 /* 2711 * Allocate the request maps associated with this tag_set. Note that this 2712 * may reduce the depth asked for, if memory is tight. set->queue_depth 2713 * will be updated to reflect the allocated depth. 2714 */ 2715 static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) 2716 { 2717 unsigned int depth; 2718 int err; 2719 2720 depth = set->queue_depth; 2721 do { 2722 err = __blk_mq_alloc_rq_maps(set); 2723 if (!err) 2724 break; 2725 2726 set->queue_depth >>= 1; 2727 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) { 2728 err = -ENOMEM; 2729 break; 2730 } 2731 } while (set->queue_depth); 2732 2733 if (!set->queue_depth || err) { 2734 pr_err("blk-mq: failed to allocate request map\n"); 2735 return -ENOMEM; 2736 } 2737 2738 if (depth != set->queue_depth) 2739 pr_info("blk-mq: reduced tag depth (%u -> %u)\n", 2740 depth, set->queue_depth); 2741 2742 return 0; 2743 } 2744 2745 static int blk_mq_update_queue_map(struct blk_mq_tag_set *set) 2746 { 2747 if (set->ops->map_queues) { 2748 int cpu; 2749 /* 2750 * transport .map_queues is usually done in the following 2751 * way: 2752 * 2753 * for (queue = 0; queue < set->nr_hw_queues; queue++) { 2754 * mask = get_cpu_mask(queue) 2755 * for_each_cpu(cpu, mask) 2756 * set->mq_map[cpu] = queue; 2757 * } 2758 * 2759 * When we need to remap, the table has to be cleared for 2760 * killing stale mapping since one CPU may not be mapped 2761 * to any hw queue. 2762 */ 2763 for_each_possible_cpu(cpu) 2764 set->mq_map[cpu] = 0; 2765 2766 return set->ops->map_queues(set); 2767 } else 2768 return blk_mq_map_queues(set); 2769 } 2770 2771 /* 2772 * Alloc a tag set to be associated with one or more request queues. 2773 * May fail with EINVAL for various error conditions. May adjust the 2774 * requested depth down, if if it too large. In that case, the set 2775 * value will be stored in set->queue_depth. 2776 */ 2777 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) 2778 { 2779 int ret; 2780 2781 BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS); 2782 2783 if (!set->nr_hw_queues) 2784 return -EINVAL; 2785 if (!set->queue_depth) 2786 return -EINVAL; 2787 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) 2788 return -EINVAL; 2789 2790 if (!set->ops->queue_rq) 2791 return -EINVAL; 2792 2793 if (!set->ops->get_budget ^ !set->ops->put_budget) 2794 return -EINVAL; 2795 2796 if (set->queue_depth > BLK_MQ_MAX_DEPTH) { 2797 pr_info("blk-mq: reduced tag depth to %u\n", 2798 BLK_MQ_MAX_DEPTH); 2799 set->queue_depth = BLK_MQ_MAX_DEPTH; 2800 } 2801 2802 /* 2803 * If a crashdump is active, then we are potentially in a very 2804 * memory constrained environment. Limit us to 1 queue and 2805 * 64 tags to prevent using too much memory. 2806 */ 2807 if (is_kdump_kernel()) { 2808 set->nr_hw_queues = 1; 2809 set->queue_depth = min(64U, set->queue_depth); 2810 } 2811 /* 2812 * There is no use for more h/w queues than cpus. 2813 */ 2814 if (set->nr_hw_queues > nr_cpu_ids) 2815 set->nr_hw_queues = nr_cpu_ids; 2816 2817 set->tags = kzalloc_node(nr_cpu_ids * sizeof(struct blk_mq_tags *), 2818 GFP_KERNEL, set->numa_node); 2819 if (!set->tags) 2820 return -ENOMEM; 2821 2822 ret = -ENOMEM; 2823 set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids, 2824 GFP_KERNEL, set->numa_node); 2825 if (!set->mq_map) 2826 goto out_free_tags; 2827 2828 ret = blk_mq_update_queue_map(set); 2829 if (ret) 2830 goto out_free_mq_map; 2831 2832 ret = blk_mq_alloc_rq_maps(set); 2833 if (ret) 2834 goto out_free_mq_map; 2835 2836 mutex_init(&set->tag_list_lock); 2837 INIT_LIST_HEAD(&set->tag_list); 2838 2839 return 0; 2840 2841 out_free_mq_map: 2842 kfree(set->mq_map); 2843 set->mq_map = NULL; 2844 out_free_tags: 2845 kfree(set->tags); 2846 set->tags = NULL; 2847 return ret; 2848 } 2849 EXPORT_SYMBOL(blk_mq_alloc_tag_set); 2850 2851 void blk_mq_free_tag_set(struct blk_mq_tag_set *set) 2852 { 2853 int i; 2854 2855 for (i = 0; i < nr_cpu_ids; i++) 2856 blk_mq_free_map_and_requests(set, i); 2857 2858 kfree(set->mq_map); 2859 set->mq_map = NULL; 2860 2861 kfree(set->tags); 2862 set->tags = NULL; 2863 } 2864 EXPORT_SYMBOL(blk_mq_free_tag_set); 2865 2866 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) 2867 { 2868 struct blk_mq_tag_set *set = q->tag_set; 2869 struct blk_mq_hw_ctx *hctx; 2870 int i, ret; 2871 2872 if (!set) 2873 return -EINVAL; 2874 2875 blk_mq_freeze_queue(q); 2876 blk_mq_quiesce_queue(q); 2877 2878 ret = 0; 2879 queue_for_each_hw_ctx(q, hctx, i) { 2880 if (!hctx->tags) 2881 continue; 2882 /* 2883 * If we're using an MQ scheduler, just update the scheduler 2884 * queue depth. This is similar to what the old code would do. 2885 */ 2886 if (!hctx->sched_tags) { 2887 ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr, 2888 false); 2889 } else { 2890 ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags, 2891 nr, true); 2892 } 2893 if (ret) 2894 break; 2895 } 2896 2897 if (!ret) 2898 q->nr_requests = nr; 2899 2900 blk_mq_unquiesce_queue(q); 2901 blk_mq_unfreeze_queue(q); 2902 2903 return ret; 2904 } 2905 2906 static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, 2907 int nr_hw_queues) 2908 { 2909 struct request_queue *q; 2910 2911 lockdep_assert_held(&set->tag_list_lock); 2912 2913 if (nr_hw_queues > nr_cpu_ids) 2914 nr_hw_queues = nr_cpu_ids; 2915 if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues) 2916 return; 2917 2918 list_for_each_entry(q, &set->tag_list, tag_set_list) 2919 blk_mq_freeze_queue(q); 2920 2921 set->nr_hw_queues = nr_hw_queues; 2922 blk_mq_update_queue_map(set); 2923 list_for_each_entry(q, &set->tag_list, tag_set_list) { 2924 blk_mq_realloc_hw_ctxs(set, q); 2925 blk_mq_queue_reinit(q); 2926 } 2927 2928 list_for_each_entry(q, &set->tag_list, tag_set_list) 2929 blk_mq_unfreeze_queue(q); 2930 } 2931 2932 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) 2933 { 2934 mutex_lock(&set->tag_list_lock); 2935 __blk_mq_update_nr_hw_queues(set, nr_hw_queues); 2936 mutex_unlock(&set->tag_list_lock); 2937 } 2938 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues); 2939 2940 /* Enable polling stats and return whether they were already enabled. */ 2941 static bool blk_poll_stats_enable(struct request_queue *q) 2942 { 2943 if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) || 2944 blk_queue_flag_test_and_set(QUEUE_FLAG_POLL_STATS, q)) 2945 return true; 2946 blk_stat_add_callback(q, q->poll_cb); 2947 return false; 2948 } 2949 2950 static void blk_mq_poll_stats_start(struct request_queue *q) 2951 { 2952 /* 2953 * We don't arm the callback if polling stats are not enabled or the 2954 * callback is already active. 2955 */ 2956 if (!test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) || 2957 blk_stat_is_active(q->poll_cb)) 2958 return; 2959 2960 blk_stat_activate_msecs(q->poll_cb, 100); 2961 } 2962 2963 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb) 2964 { 2965 struct request_queue *q = cb->data; 2966 int bucket; 2967 2968 for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) { 2969 if (cb->stat[bucket].nr_samples) 2970 q->poll_stat[bucket] = cb->stat[bucket]; 2971 } 2972 } 2973 2974 static unsigned long blk_mq_poll_nsecs(struct request_queue *q, 2975 struct blk_mq_hw_ctx *hctx, 2976 struct request *rq) 2977 { 2978 unsigned long ret = 0; 2979 int bucket; 2980 2981 /* 2982 * If stats collection isn't on, don't sleep but turn it on for 2983 * future users 2984 */ 2985 if (!blk_poll_stats_enable(q)) 2986 return 0; 2987 2988 /* 2989 * As an optimistic guess, use half of the mean service time 2990 * for this type of request. We can (and should) make this smarter. 2991 * For instance, if the completion latencies are tight, we can 2992 * get closer than just half the mean. This is especially 2993 * important on devices where the completion latencies are longer 2994 * than ~10 usec. We do use the stats for the relevant IO size 2995 * if available which does lead to better estimates. 2996 */ 2997 bucket = blk_mq_poll_stats_bkt(rq); 2998 if (bucket < 0) 2999 return ret; 3000 3001 if (q->poll_stat[bucket].nr_samples) 3002 ret = (q->poll_stat[bucket].mean + 1) / 2; 3003 3004 return ret; 3005 } 3006 3007 static bool blk_mq_poll_hybrid_sleep(struct request_queue *q, 3008 struct blk_mq_hw_ctx *hctx, 3009 struct request *rq) 3010 { 3011 struct hrtimer_sleeper hs; 3012 enum hrtimer_mode mode; 3013 unsigned int nsecs; 3014 ktime_t kt; 3015 3016 if (rq->rq_flags & RQF_MQ_POLL_SLEPT) 3017 return false; 3018 3019 /* 3020 * poll_nsec can be: 3021 * 3022 * -1: don't ever hybrid sleep 3023 * 0: use half of prev avg 3024 * >0: use this specific value 3025 */ 3026 if (q->poll_nsec == -1) 3027 return false; 3028 else if (q->poll_nsec > 0) 3029 nsecs = q->poll_nsec; 3030 else 3031 nsecs = blk_mq_poll_nsecs(q, hctx, rq); 3032 3033 if (!nsecs) 3034 return false; 3035 3036 rq->rq_flags |= RQF_MQ_POLL_SLEPT; 3037 3038 /* 3039 * This will be replaced with the stats tracking code, using 3040 * 'avg_completion_time / 2' as the pre-sleep target. 3041 */ 3042 kt = nsecs; 3043 3044 mode = HRTIMER_MODE_REL; 3045 hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode); 3046 hrtimer_set_expires(&hs.timer, kt); 3047 3048 hrtimer_init_sleeper(&hs, current); 3049 do { 3050 if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE) 3051 break; 3052 set_current_state(TASK_UNINTERRUPTIBLE); 3053 hrtimer_start_expires(&hs.timer, mode); 3054 if (hs.task) 3055 io_schedule(); 3056 hrtimer_cancel(&hs.timer); 3057 mode = HRTIMER_MODE_ABS; 3058 } while (hs.task && !signal_pending(current)); 3059 3060 __set_current_state(TASK_RUNNING); 3061 destroy_hrtimer_on_stack(&hs.timer); 3062 return true; 3063 } 3064 3065 static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq) 3066 { 3067 struct request_queue *q = hctx->queue; 3068 long state; 3069 3070 /* 3071 * If we sleep, have the caller restart the poll loop to reset 3072 * the state. Like for the other success return cases, the 3073 * caller is responsible for checking if the IO completed. If 3074 * the IO isn't complete, we'll get called again and will go 3075 * straight to the busy poll loop. 3076 */ 3077 if (blk_mq_poll_hybrid_sleep(q, hctx, rq)) 3078 return true; 3079 3080 hctx->poll_considered++; 3081 3082 state = current->state; 3083 while (!need_resched()) { 3084 int ret; 3085 3086 hctx->poll_invoked++; 3087 3088 ret = q->mq_ops->poll(hctx, rq->tag); 3089 if (ret > 0) { 3090 hctx->poll_success++; 3091 set_current_state(TASK_RUNNING); 3092 return true; 3093 } 3094 3095 if (signal_pending_state(state, current)) 3096 set_current_state(TASK_RUNNING); 3097 3098 if (current->state == TASK_RUNNING) 3099 return true; 3100 if (ret < 0) 3101 break; 3102 cpu_relax(); 3103 } 3104 3105 __set_current_state(TASK_RUNNING); 3106 return false; 3107 } 3108 3109 static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie) 3110 { 3111 struct blk_mq_hw_ctx *hctx; 3112 struct request *rq; 3113 3114 if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) 3115 return false; 3116 3117 hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)]; 3118 if (!blk_qc_t_is_internal(cookie)) 3119 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie)); 3120 else { 3121 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie)); 3122 /* 3123 * With scheduling, if the request has completed, we'll 3124 * get a NULL return here, as we clear the sched tag when 3125 * that happens. The request still remains valid, like always, 3126 * so we should be safe with just the NULL check. 3127 */ 3128 if (!rq) 3129 return false; 3130 } 3131 3132 return __blk_mq_poll(hctx, rq); 3133 } 3134 3135 static int __init blk_mq_init(void) 3136 { 3137 cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL, 3138 blk_mq_hctx_notify_dead); 3139 return 0; 3140 } 3141 subsys_initcall(blk_mq_init); 3142