1 /* 2 * Block multiqueue core code 3 * 4 * Copyright (C) 2013-2014 Jens Axboe 5 * Copyright (C) 2013-2014 Christoph Hellwig 6 */ 7 #include <linux/kernel.h> 8 #include <linux/module.h> 9 #include <linux/backing-dev.h> 10 #include <linux/bio.h> 11 #include <linux/blkdev.h> 12 #include <linux/kmemleak.h> 13 #include <linux/mm.h> 14 #include <linux/init.h> 15 #include <linux/slab.h> 16 #include <linux/workqueue.h> 17 #include <linux/smp.h> 18 #include <linux/llist.h> 19 #include <linux/list_sort.h> 20 #include <linux/cpu.h> 21 #include <linux/cache.h> 22 #include <linux/sched/sysctl.h> 23 #include <linux/sched/topology.h> 24 #include <linux/sched/signal.h> 25 #include <linux/delay.h> 26 #include <linux/crash_dump.h> 27 #include <linux/prefetch.h> 28 29 #include <trace/events/block.h> 30 31 #include <linux/blk-mq.h> 32 #include "blk.h" 33 #include "blk-mq.h" 34 #include "blk-mq-debugfs.h" 35 #include "blk-mq-tag.h" 36 #include "blk-stat.h" 37 #include "blk-wbt.h" 38 #include "blk-mq-sched.h" 39 40 static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie); 41 static void blk_mq_poll_stats_start(struct request_queue *q); 42 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb); 43 44 static int blk_mq_poll_stats_bkt(const struct request *rq) 45 { 46 int ddir, bytes, bucket; 47 48 ddir = rq_data_dir(rq); 49 bytes = blk_rq_bytes(rq); 50 51 bucket = ddir + 2*(ilog2(bytes) - 9); 52 53 if (bucket < 0) 54 return -1; 55 else if (bucket >= BLK_MQ_POLL_STATS_BKTS) 56 return ddir + BLK_MQ_POLL_STATS_BKTS - 2; 57 58 return bucket; 59 } 60 61 /* 62 * Check if any of the ctx's have pending work in this hardware queue 63 */ 64 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) 65 { 66 return !list_empty_careful(&hctx->dispatch) || 67 sbitmap_any_bit_set(&hctx->ctx_map) || 68 blk_mq_sched_has_work(hctx); 69 } 70 71 /* 72 * Mark this ctx as having pending work in this hardware queue 73 */ 74 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, 75 struct blk_mq_ctx *ctx) 76 { 77 if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw)) 78 sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw); 79 } 80 81 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, 82 struct blk_mq_ctx *ctx) 83 { 84 sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw); 85 } 86 87 struct mq_inflight { 88 struct hd_struct *part; 89 unsigned int *inflight; 90 }; 91 92 static void blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx, 93 struct request *rq, void *priv, 94 bool reserved) 95 { 96 struct mq_inflight *mi = priv; 97 98 if (test_bit(REQ_ATOM_STARTED, &rq->atomic_flags) && 99 !test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags)) { 100 /* 101 * index[0] counts the specific partition that was asked 102 * for. index[1] counts the ones that are active on the 103 * whole device, so increment that if mi->part is indeed 104 * a partition, and not a whole device. 105 */ 106 if (rq->part == mi->part) 107 mi->inflight[0]++; 108 if (mi->part->partno) 109 mi->inflight[1]++; 110 } 111 } 112 113 void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part, 114 unsigned int inflight[2]) 115 { 116 struct mq_inflight mi = { .part = part, .inflight = inflight, }; 117 118 inflight[0] = inflight[1] = 0; 119 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); 120 } 121 122 void blk_freeze_queue_start(struct request_queue *q) 123 { 124 int freeze_depth; 125 126 freeze_depth = atomic_inc_return(&q->mq_freeze_depth); 127 if (freeze_depth == 1) { 128 percpu_ref_kill(&q->q_usage_counter); 129 if (q->mq_ops) 130 blk_mq_run_hw_queues(q, false); 131 } 132 } 133 EXPORT_SYMBOL_GPL(blk_freeze_queue_start); 134 135 void blk_mq_freeze_queue_wait(struct request_queue *q) 136 { 137 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter)); 138 } 139 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait); 140 141 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, 142 unsigned long timeout) 143 { 144 return wait_event_timeout(q->mq_freeze_wq, 145 percpu_ref_is_zero(&q->q_usage_counter), 146 timeout); 147 } 148 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout); 149 150 /* 151 * Guarantee no request is in use, so we can change any data structure of 152 * the queue afterward. 153 */ 154 void blk_freeze_queue(struct request_queue *q) 155 { 156 /* 157 * In the !blk_mq case we are only calling this to kill the 158 * q_usage_counter, otherwise this increases the freeze depth 159 * and waits for it to return to zero. For this reason there is 160 * no blk_unfreeze_queue(), and blk_freeze_queue() is not 161 * exported to drivers as the only user for unfreeze is blk_mq. 162 */ 163 blk_freeze_queue_start(q); 164 blk_mq_freeze_queue_wait(q); 165 } 166 167 void blk_mq_freeze_queue(struct request_queue *q) 168 { 169 /* 170 * ...just an alias to keep freeze and unfreeze actions balanced 171 * in the blk_mq_* namespace 172 */ 173 blk_freeze_queue(q); 174 } 175 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue); 176 177 void blk_mq_unfreeze_queue(struct request_queue *q) 178 { 179 int freeze_depth; 180 181 freeze_depth = atomic_dec_return(&q->mq_freeze_depth); 182 WARN_ON_ONCE(freeze_depth < 0); 183 if (!freeze_depth) { 184 percpu_ref_reinit(&q->q_usage_counter); 185 wake_up_all(&q->mq_freeze_wq); 186 } 187 } 188 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); 189 190 /* 191 * FIXME: replace the scsi_internal_device_*block_nowait() calls in the 192 * mpt3sas driver such that this function can be removed. 193 */ 194 void blk_mq_quiesce_queue_nowait(struct request_queue *q) 195 { 196 unsigned long flags; 197 198 spin_lock_irqsave(q->queue_lock, flags); 199 queue_flag_set(QUEUE_FLAG_QUIESCED, q); 200 spin_unlock_irqrestore(q->queue_lock, flags); 201 } 202 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait); 203 204 /** 205 * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished 206 * @q: request queue. 207 * 208 * Note: this function does not prevent that the struct request end_io() 209 * callback function is invoked. Once this function is returned, we make 210 * sure no dispatch can happen until the queue is unquiesced via 211 * blk_mq_unquiesce_queue(). 212 */ 213 void blk_mq_quiesce_queue(struct request_queue *q) 214 { 215 struct blk_mq_hw_ctx *hctx; 216 unsigned int i; 217 bool rcu = false; 218 219 blk_mq_quiesce_queue_nowait(q); 220 221 queue_for_each_hw_ctx(q, hctx, i) { 222 if (hctx->flags & BLK_MQ_F_BLOCKING) 223 synchronize_srcu(hctx->queue_rq_srcu); 224 else 225 rcu = true; 226 } 227 if (rcu) 228 synchronize_rcu(); 229 } 230 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue); 231 232 /* 233 * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue() 234 * @q: request queue. 235 * 236 * This function recovers queue into the state before quiescing 237 * which is done by blk_mq_quiesce_queue. 238 */ 239 void blk_mq_unquiesce_queue(struct request_queue *q) 240 { 241 unsigned long flags; 242 243 spin_lock_irqsave(q->queue_lock, flags); 244 queue_flag_clear(QUEUE_FLAG_QUIESCED, q); 245 spin_unlock_irqrestore(q->queue_lock, flags); 246 247 /* dispatch requests which are inserted during quiescing */ 248 blk_mq_run_hw_queues(q, true); 249 } 250 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue); 251 252 void blk_mq_wake_waiters(struct request_queue *q) 253 { 254 struct blk_mq_hw_ctx *hctx; 255 unsigned int i; 256 257 queue_for_each_hw_ctx(q, hctx, i) 258 if (blk_mq_hw_queue_mapped(hctx)) 259 blk_mq_tag_wakeup_all(hctx->tags, true); 260 } 261 262 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) 263 { 264 return blk_mq_has_free_tags(hctx->tags); 265 } 266 EXPORT_SYMBOL(blk_mq_can_queue); 267 268 static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, 269 unsigned int tag, unsigned int op) 270 { 271 struct blk_mq_tags *tags = blk_mq_tags_from_data(data); 272 struct request *rq = tags->static_rqs[tag]; 273 274 rq->rq_flags = 0; 275 276 if (data->flags & BLK_MQ_REQ_INTERNAL) { 277 rq->tag = -1; 278 rq->internal_tag = tag; 279 } else { 280 if (blk_mq_tag_busy(data->hctx)) { 281 rq->rq_flags = RQF_MQ_INFLIGHT; 282 atomic_inc(&data->hctx->nr_active); 283 } 284 rq->tag = tag; 285 rq->internal_tag = -1; 286 data->hctx->tags->rqs[rq->tag] = rq; 287 } 288 289 INIT_LIST_HEAD(&rq->queuelist); 290 /* csd/requeue_work/fifo_time is initialized before use */ 291 rq->q = data->q; 292 rq->mq_ctx = data->ctx; 293 rq->cmd_flags = op; 294 if (data->flags & BLK_MQ_REQ_PREEMPT) 295 rq->rq_flags |= RQF_PREEMPT; 296 if (blk_queue_io_stat(data->q)) 297 rq->rq_flags |= RQF_IO_STAT; 298 /* do not touch atomic flags, it needs atomic ops against the timer */ 299 rq->cpu = -1; 300 INIT_HLIST_NODE(&rq->hash); 301 RB_CLEAR_NODE(&rq->rb_node); 302 rq->rq_disk = NULL; 303 rq->part = NULL; 304 rq->start_time = jiffies; 305 #ifdef CONFIG_BLK_CGROUP 306 rq->rl = NULL; 307 set_start_time_ns(rq); 308 rq->io_start_time_ns = 0; 309 #endif 310 rq->nr_phys_segments = 0; 311 #if defined(CONFIG_BLK_DEV_INTEGRITY) 312 rq->nr_integrity_segments = 0; 313 #endif 314 rq->special = NULL; 315 /* tag was already set */ 316 rq->extra_len = 0; 317 318 INIT_LIST_HEAD(&rq->timeout_list); 319 rq->timeout = 0; 320 321 rq->end_io = NULL; 322 rq->end_io_data = NULL; 323 rq->next_rq = NULL; 324 325 data->ctx->rq_dispatched[op_is_sync(op)]++; 326 return rq; 327 } 328 329 static struct request *blk_mq_get_request(struct request_queue *q, 330 struct bio *bio, unsigned int op, 331 struct blk_mq_alloc_data *data) 332 { 333 struct elevator_queue *e = q->elevator; 334 struct request *rq; 335 unsigned int tag; 336 bool put_ctx_on_error = false; 337 338 blk_queue_enter_live(q); 339 data->q = q; 340 if (likely(!data->ctx)) { 341 data->ctx = blk_mq_get_ctx(q); 342 put_ctx_on_error = true; 343 } 344 if (likely(!data->hctx)) 345 data->hctx = blk_mq_map_queue(q, data->ctx->cpu); 346 if (op & REQ_NOWAIT) 347 data->flags |= BLK_MQ_REQ_NOWAIT; 348 349 if (e) { 350 data->flags |= BLK_MQ_REQ_INTERNAL; 351 352 /* 353 * Flush requests are special and go directly to the 354 * dispatch list. 355 */ 356 if (!op_is_flush(op) && e->type->ops.mq.limit_depth) 357 e->type->ops.mq.limit_depth(op, data); 358 } 359 360 tag = blk_mq_get_tag(data); 361 if (tag == BLK_MQ_TAG_FAIL) { 362 if (put_ctx_on_error) { 363 blk_mq_put_ctx(data->ctx); 364 data->ctx = NULL; 365 } 366 blk_queue_exit(q); 367 return NULL; 368 } 369 370 rq = blk_mq_rq_ctx_init(data, tag, op); 371 if (!op_is_flush(op)) { 372 rq->elv.icq = NULL; 373 if (e && e->type->ops.mq.prepare_request) { 374 if (e->type->icq_cache && rq_ioc(bio)) 375 blk_mq_sched_assign_ioc(rq, bio); 376 377 e->type->ops.mq.prepare_request(rq, bio); 378 rq->rq_flags |= RQF_ELVPRIV; 379 } 380 } 381 data->hctx->queued++; 382 return rq; 383 } 384 385 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, 386 blk_mq_req_flags_t flags) 387 { 388 struct blk_mq_alloc_data alloc_data = { .flags = flags }; 389 struct request *rq; 390 int ret; 391 392 ret = blk_queue_enter(q, flags); 393 if (ret) 394 return ERR_PTR(ret); 395 396 rq = blk_mq_get_request(q, NULL, op, &alloc_data); 397 blk_queue_exit(q); 398 399 if (!rq) 400 return ERR_PTR(-EWOULDBLOCK); 401 402 blk_mq_put_ctx(alloc_data.ctx); 403 404 rq->__data_len = 0; 405 rq->__sector = (sector_t) -1; 406 rq->bio = rq->biotail = NULL; 407 return rq; 408 } 409 EXPORT_SYMBOL(blk_mq_alloc_request); 410 411 struct request *blk_mq_alloc_request_hctx(struct request_queue *q, 412 unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx) 413 { 414 struct blk_mq_alloc_data alloc_data = { .flags = flags }; 415 struct request *rq; 416 unsigned int cpu; 417 int ret; 418 419 /* 420 * If the tag allocator sleeps we could get an allocation for a 421 * different hardware context. No need to complicate the low level 422 * allocator for this for the rare use case of a command tied to 423 * a specific queue. 424 */ 425 if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT))) 426 return ERR_PTR(-EINVAL); 427 428 if (hctx_idx >= q->nr_hw_queues) 429 return ERR_PTR(-EIO); 430 431 ret = blk_queue_enter(q, flags); 432 if (ret) 433 return ERR_PTR(ret); 434 435 /* 436 * Check if the hardware context is actually mapped to anything. 437 * If not tell the caller that it should skip this queue. 438 */ 439 alloc_data.hctx = q->queue_hw_ctx[hctx_idx]; 440 if (!blk_mq_hw_queue_mapped(alloc_data.hctx)) { 441 blk_queue_exit(q); 442 return ERR_PTR(-EXDEV); 443 } 444 cpu = cpumask_first(alloc_data.hctx->cpumask); 445 alloc_data.ctx = __blk_mq_get_ctx(q, cpu); 446 447 rq = blk_mq_get_request(q, NULL, op, &alloc_data); 448 blk_queue_exit(q); 449 450 if (!rq) 451 return ERR_PTR(-EWOULDBLOCK); 452 453 return rq; 454 } 455 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); 456 457 void blk_mq_free_request(struct request *rq) 458 { 459 struct request_queue *q = rq->q; 460 struct elevator_queue *e = q->elevator; 461 struct blk_mq_ctx *ctx = rq->mq_ctx; 462 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); 463 const int sched_tag = rq->internal_tag; 464 465 if (rq->rq_flags & RQF_ELVPRIV) { 466 if (e && e->type->ops.mq.finish_request) 467 e->type->ops.mq.finish_request(rq); 468 if (rq->elv.icq) { 469 put_io_context(rq->elv.icq->ioc); 470 rq->elv.icq = NULL; 471 } 472 } 473 474 ctx->rq_completed[rq_is_sync(rq)]++; 475 if (rq->rq_flags & RQF_MQ_INFLIGHT) 476 atomic_dec(&hctx->nr_active); 477 478 if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq))) 479 laptop_io_completion(q->backing_dev_info); 480 481 wbt_done(q->rq_wb, &rq->issue_stat); 482 483 if (blk_rq_rl(rq)) 484 blk_put_rl(blk_rq_rl(rq)); 485 486 clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); 487 clear_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags); 488 if (rq->tag != -1) 489 blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag); 490 if (sched_tag != -1) 491 blk_mq_put_tag(hctx, hctx->sched_tags, ctx, sched_tag); 492 blk_mq_sched_restart(hctx); 493 blk_queue_exit(q); 494 } 495 EXPORT_SYMBOL_GPL(blk_mq_free_request); 496 497 inline void __blk_mq_end_request(struct request *rq, blk_status_t error) 498 { 499 blk_account_io_done(rq); 500 501 if (rq->end_io) { 502 wbt_done(rq->q->rq_wb, &rq->issue_stat); 503 rq->end_io(rq, error); 504 } else { 505 if (unlikely(blk_bidi_rq(rq))) 506 blk_mq_free_request(rq->next_rq); 507 blk_mq_free_request(rq); 508 } 509 } 510 EXPORT_SYMBOL(__blk_mq_end_request); 511 512 void blk_mq_end_request(struct request *rq, blk_status_t error) 513 { 514 if (blk_update_request(rq, error, blk_rq_bytes(rq))) 515 BUG(); 516 __blk_mq_end_request(rq, error); 517 } 518 EXPORT_SYMBOL(blk_mq_end_request); 519 520 static void __blk_mq_complete_request_remote(void *data) 521 { 522 struct request *rq = data; 523 524 rq->q->softirq_done_fn(rq); 525 } 526 527 static void __blk_mq_complete_request(struct request *rq) 528 { 529 struct blk_mq_ctx *ctx = rq->mq_ctx; 530 bool shared = false; 531 int cpu; 532 533 if (rq->internal_tag != -1) 534 blk_mq_sched_completed_request(rq); 535 if (rq->rq_flags & RQF_STATS) { 536 blk_mq_poll_stats_start(rq->q); 537 blk_stat_add(rq); 538 } 539 540 if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) { 541 rq->q->softirq_done_fn(rq); 542 return; 543 } 544 545 cpu = get_cpu(); 546 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags)) 547 shared = cpus_share_cache(cpu, ctx->cpu); 548 549 if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) { 550 rq->csd.func = __blk_mq_complete_request_remote; 551 rq->csd.info = rq; 552 rq->csd.flags = 0; 553 smp_call_function_single_async(ctx->cpu, &rq->csd); 554 } else { 555 rq->q->softirq_done_fn(rq); 556 } 557 put_cpu(); 558 } 559 560 /** 561 * blk_mq_complete_request - end I/O on a request 562 * @rq: the request being processed 563 * 564 * Description: 565 * Ends all I/O on a request. It does not handle partial completions. 566 * The actual completion happens out-of-order, through a IPI handler. 567 **/ 568 void blk_mq_complete_request(struct request *rq) 569 { 570 struct request_queue *q = rq->q; 571 572 if (unlikely(blk_should_fake_timeout(q))) 573 return; 574 if (!blk_mark_rq_complete(rq)) 575 __blk_mq_complete_request(rq); 576 } 577 EXPORT_SYMBOL(blk_mq_complete_request); 578 579 int blk_mq_request_started(struct request *rq) 580 { 581 return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags); 582 } 583 EXPORT_SYMBOL_GPL(blk_mq_request_started); 584 585 void blk_mq_start_request(struct request *rq) 586 { 587 struct request_queue *q = rq->q; 588 589 blk_mq_sched_started_request(rq); 590 591 trace_block_rq_issue(q, rq); 592 593 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) { 594 blk_stat_set_issue(&rq->issue_stat, blk_rq_sectors(rq)); 595 rq->rq_flags |= RQF_STATS; 596 wbt_issue(q->rq_wb, &rq->issue_stat); 597 } 598 599 blk_add_timer(rq); 600 601 WARN_ON_ONCE(test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)); 602 603 /* 604 * Mark us as started and clear complete. Complete might have been 605 * set if requeue raced with timeout, which then marked it as 606 * complete. So be sure to clear complete again when we start 607 * the request, otherwise we'll ignore the completion event. 608 * 609 * Ensure that ->deadline is visible before we set STARTED, such that 610 * blk_mq_check_expired() is guaranteed to observe our ->deadline when 611 * it observes STARTED. 612 */ 613 smp_wmb(); 614 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags); 615 if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags)) { 616 /* 617 * Coherence order guarantees these consecutive stores to a 618 * single variable propagate in the specified order. Thus the 619 * clear_bit() is ordered _after_ the set bit. See 620 * blk_mq_check_expired(). 621 * 622 * (the bits must be part of the same byte for this to be 623 * true). 624 */ 625 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); 626 } 627 628 if (q->dma_drain_size && blk_rq_bytes(rq)) { 629 /* 630 * Make sure space for the drain appears. We know we can do 631 * this because max_hw_segments has been adjusted to be one 632 * fewer than the device can handle. 633 */ 634 rq->nr_phys_segments++; 635 } 636 } 637 EXPORT_SYMBOL(blk_mq_start_request); 638 639 /* 640 * When we reach here because queue is busy, REQ_ATOM_COMPLETE 641 * flag isn't set yet, so there may be race with timeout handler, 642 * but given rq->deadline is just set in .queue_rq() under 643 * this situation, the race won't be possible in reality because 644 * rq->timeout should be set as big enough to cover the window 645 * between blk_mq_start_request() called from .queue_rq() and 646 * clearing REQ_ATOM_STARTED here. 647 */ 648 static void __blk_mq_requeue_request(struct request *rq) 649 { 650 struct request_queue *q = rq->q; 651 652 blk_mq_put_driver_tag(rq); 653 654 trace_block_rq_requeue(q, rq); 655 wbt_requeue(q->rq_wb, &rq->issue_stat); 656 blk_mq_sched_requeue_request(rq); 657 658 if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) { 659 if (q->dma_drain_size && blk_rq_bytes(rq)) 660 rq->nr_phys_segments--; 661 } 662 } 663 664 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list) 665 { 666 __blk_mq_requeue_request(rq); 667 668 BUG_ON(blk_queued_rq(rq)); 669 blk_mq_add_to_requeue_list(rq, true, kick_requeue_list); 670 } 671 EXPORT_SYMBOL(blk_mq_requeue_request); 672 673 static void blk_mq_requeue_work(struct work_struct *work) 674 { 675 struct request_queue *q = 676 container_of(work, struct request_queue, requeue_work.work); 677 LIST_HEAD(rq_list); 678 struct request *rq, *next; 679 680 spin_lock_irq(&q->requeue_lock); 681 list_splice_init(&q->requeue_list, &rq_list); 682 spin_unlock_irq(&q->requeue_lock); 683 684 list_for_each_entry_safe(rq, next, &rq_list, queuelist) { 685 if (!(rq->rq_flags & RQF_SOFTBARRIER)) 686 continue; 687 688 rq->rq_flags &= ~RQF_SOFTBARRIER; 689 list_del_init(&rq->queuelist); 690 blk_mq_sched_insert_request(rq, true, false, false, true); 691 } 692 693 while (!list_empty(&rq_list)) { 694 rq = list_entry(rq_list.next, struct request, queuelist); 695 list_del_init(&rq->queuelist); 696 blk_mq_sched_insert_request(rq, false, false, false, true); 697 } 698 699 blk_mq_run_hw_queues(q, false); 700 } 701 702 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, 703 bool kick_requeue_list) 704 { 705 struct request_queue *q = rq->q; 706 unsigned long flags; 707 708 /* 709 * We abuse this flag that is otherwise used by the I/O scheduler to 710 * request head insertion from the workqueue. 711 */ 712 BUG_ON(rq->rq_flags & RQF_SOFTBARRIER); 713 714 spin_lock_irqsave(&q->requeue_lock, flags); 715 if (at_head) { 716 rq->rq_flags |= RQF_SOFTBARRIER; 717 list_add(&rq->queuelist, &q->requeue_list); 718 } else { 719 list_add_tail(&rq->queuelist, &q->requeue_list); 720 } 721 spin_unlock_irqrestore(&q->requeue_lock, flags); 722 723 if (kick_requeue_list) 724 blk_mq_kick_requeue_list(q); 725 } 726 EXPORT_SYMBOL(blk_mq_add_to_requeue_list); 727 728 void blk_mq_kick_requeue_list(struct request_queue *q) 729 { 730 kblockd_schedule_delayed_work(&q->requeue_work, 0); 731 } 732 EXPORT_SYMBOL(blk_mq_kick_requeue_list); 733 734 void blk_mq_delay_kick_requeue_list(struct request_queue *q, 735 unsigned long msecs) 736 { 737 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 738 msecs_to_jiffies(msecs)); 739 } 740 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list); 741 742 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag) 743 { 744 if (tag < tags->nr_tags) { 745 prefetch(tags->rqs[tag]); 746 return tags->rqs[tag]; 747 } 748 749 return NULL; 750 } 751 EXPORT_SYMBOL(blk_mq_tag_to_rq); 752 753 struct blk_mq_timeout_data { 754 unsigned long next; 755 unsigned int next_set; 756 }; 757 758 void blk_mq_rq_timed_out(struct request *req, bool reserved) 759 { 760 const struct blk_mq_ops *ops = req->q->mq_ops; 761 enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER; 762 763 /* 764 * We know that complete is set at this point. If STARTED isn't set 765 * anymore, then the request isn't active and the "timeout" should 766 * just be ignored. This can happen due to the bitflag ordering. 767 * Timeout first checks if STARTED is set, and if it is, assumes 768 * the request is active. But if we race with completion, then 769 * both flags will get cleared. So check here again, and ignore 770 * a timeout event with a request that isn't active. 771 */ 772 if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags)) 773 return; 774 775 if (ops->timeout) 776 ret = ops->timeout(req, reserved); 777 778 switch (ret) { 779 case BLK_EH_HANDLED: 780 __blk_mq_complete_request(req); 781 break; 782 case BLK_EH_RESET_TIMER: 783 blk_add_timer(req); 784 blk_clear_rq_complete(req); 785 break; 786 case BLK_EH_NOT_HANDLED: 787 break; 788 default: 789 printk(KERN_ERR "block: bad eh return: %d\n", ret); 790 break; 791 } 792 } 793 794 static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, 795 struct request *rq, void *priv, bool reserved) 796 { 797 struct blk_mq_timeout_data *data = priv; 798 unsigned long deadline; 799 800 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) 801 return; 802 803 /* 804 * Ensures that if we see STARTED we must also see our 805 * up-to-date deadline, see blk_mq_start_request(). 806 */ 807 smp_rmb(); 808 809 deadline = READ_ONCE(rq->deadline); 810 811 /* 812 * The rq being checked may have been freed and reallocated 813 * out already here, we avoid this race by checking rq->deadline 814 * and REQ_ATOM_COMPLETE flag together: 815 * 816 * - if rq->deadline is observed as new value because of 817 * reusing, the rq won't be timed out because of timing. 818 * - if rq->deadline is observed as previous value, 819 * REQ_ATOM_COMPLETE flag won't be cleared in reuse path 820 * because we put a barrier between setting rq->deadline 821 * and clearing the flag in blk_mq_start_request(), so 822 * this rq won't be timed out too. 823 */ 824 if (time_after_eq(jiffies, deadline)) { 825 if (!blk_mark_rq_complete(rq)) { 826 /* 827 * Again coherence order ensures that consecutive reads 828 * from the same variable must be in that order. This 829 * ensures that if we see COMPLETE clear, we must then 830 * see STARTED set and we'll ignore this timeout. 831 * 832 * (There's also the MB implied by the test_and_clear()) 833 */ 834 blk_mq_rq_timed_out(rq, reserved); 835 } 836 } else if (!data->next_set || time_after(data->next, deadline)) { 837 data->next = deadline; 838 data->next_set = 1; 839 } 840 } 841 842 static void blk_mq_timeout_work(struct work_struct *work) 843 { 844 struct request_queue *q = 845 container_of(work, struct request_queue, timeout_work); 846 struct blk_mq_timeout_data data = { 847 .next = 0, 848 .next_set = 0, 849 }; 850 int i; 851 852 /* A deadlock might occur if a request is stuck requiring a 853 * timeout at the same time a queue freeze is waiting 854 * completion, since the timeout code would not be able to 855 * acquire the queue reference here. 856 * 857 * That's why we don't use blk_queue_enter here; instead, we use 858 * percpu_ref_tryget directly, because we need to be able to 859 * obtain a reference even in the short window between the queue 860 * starting to freeze, by dropping the first reference in 861 * blk_freeze_queue_start, and the moment the last request is 862 * consumed, marked by the instant q_usage_counter reaches 863 * zero. 864 */ 865 if (!percpu_ref_tryget(&q->q_usage_counter)) 866 return; 867 868 blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data); 869 870 if (data.next_set) { 871 data.next = blk_rq_timeout(round_jiffies_up(data.next)); 872 mod_timer(&q->timeout, data.next); 873 } else { 874 struct blk_mq_hw_ctx *hctx; 875 876 queue_for_each_hw_ctx(q, hctx, i) { 877 /* the hctx may be unmapped, so check it here */ 878 if (blk_mq_hw_queue_mapped(hctx)) 879 blk_mq_tag_idle(hctx); 880 } 881 } 882 blk_queue_exit(q); 883 } 884 885 struct flush_busy_ctx_data { 886 struct blk_mq_hw_ctx *hctx; 887 struct list_head *list; 888 }; 889 890 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data) 891 { 892 struct flush_busy_ctx_data *flush_data = data; 893 struct blk_mq_hw_ctx *hctx = flush_data->hctx; 894 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; 895 896 sbitmap_clear_bit(sb, bitnr); 897 spin_lock(&ctx->lock); 898 list_splice_tail_init(&ctx->rq_list, flush_data->list); 899 spin_unlock(&ctx->lock); 900 return true; 901 } 902 903 /* 904 * Process software queues that have been marked busy, splicing them 905 * to the for-dispatch 906 */ 907 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list) 908 { 909 struct flush_busy_ctx_data data = { 910 .hctx = hctx, 911 .list = list, 912 }; 913 914 sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data); 915 } 916 EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs); 917 918 struct dispatch_rq_data { 919 struct blk_mq_hw_ctx *hctx; 920 struct request *rq; 921 }; 922 923 static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr, 924 void *data) 925 { 926 struct dispatch_rq_data *dispatch_data = data; 927 struct blk_mq_hw_ctx *hctx = dispatch_data->hctx; 928 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; 929 930 spin_lock(&ctx->lock); 931 if (unlikely(!list_empty(&ctx->rq_list))) { 932 dispatch_data->rq = list_entry_rq(ctx->rq_list.next); 933 list_del_init(&dispatch_data->rq->queuelist); 934 if (list_empty(&ctx->rq_list)) 935 sbitmap_clear_bit(sb, bitnr); 936 } 937 spin_unlock(&ctx->lock); 938 939 return !dispatch_data->rq; 940 } 941 942 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, 943 struct blk_mq_ctx *start) 944 { 945 unsigned off = start ? start->index_hw : 0; 946 struct dispatch_rq_data data = { 947 .hctx = hctx, 948 .rq = NULL, 949 }; 950 951 __sbitmap_for_each_set(&hctx->ctx_map, off, 952 dispatch_rq_from_ctx, &data); 953 954 return data.rq; 955 } 956 957 static inline unsigned int queued_to_index(unsigned int queued) 958 { 959 if (!queued) 960 return 0; 961 962 return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1); 963 } 964 965 bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx, 966 bool wait) 967 { 968 struct blk_mq_alloc_data data = { 969 .q = rq->q, 970 .hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), 971 .flags = wait ? 0 : BLK_MQ_REQ_NOWAIT, 972 }; 973 974 might_sleep_if(wait); 975 976 if (rq->tag != -1) 977 goto done; 978 979 if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag)) 980 data.flags |= BLK_MQ_REQ_RESERVED; 981 982 rq->tag = blk_mq_get_tag(&data); 983 if (rq->tag >= 0) { 984 if (blk_mq_tag_busy(data.hctx)) { 985 rq->rq_flags |= RQF_MQ_INFLIGHT; 986 atomic_inc(&data.hctx->nr_active); 987 } 988 data.hctx->tags->rqs[rq->tag] = rq; 989 } 990 991 done: 992 if (hctx) 993 *hctx = data.hctx; 994 return rq->tag != -1; 995 } 996 997 static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, 998 int flags, void *key) 999 { 1000 struct blk_mq_hw_ctx *hctx; 1001 1002 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait); 1003 1004 list_del_init(&wait->entry); 1005 blk_mq_run_hw_queue(hctx, true); 1006 return 1; 1007 } 1008 1009 /* 1010 * Mark us waiting for a tag. For shared tags, this involves hooking us into 1011 * the tag wakeups. For non-shared tags, we can simply mark us nedeing a 1012 * restart. For both caes, take care to check the condition again after 1013 * marking us as waiting. 1014 */ 1015 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx **hctx, 1016 struct request *rq) 1017 { 1018 struct blk_mq_hw_ctx *this_hctx = *hctx; 1019 bool shared_tags = (this_hctx->flags & BLK_MQ_F_TAG_SHARED) != 0; 1020 struct sbq_wait_state *ws; 1021 wait_queue_entry_t *wait; 1022 bool ret; 1023 1024 if (!shared_tags) { 1025 if (!test_bit(BLK_MQ_S_SCHED_RESTART, &this_hctx->state)) 1026 set_bit(BLK_MQ_S_SCHED_RESTART, &this_hctx->state); 1027 } else { 1028 wait = &this_hctx->dispatch_wait; 1029 if (!list_empty_careful(&wait->entry)) 1030 return false; 1031 1032 spin_lock(&this_hctx->lock); 1033 if (!list_empty(&wait->entry)) { 1034 spin_unlock(&this_hctx->lock); 1035 return false; 1036 } 1037 1038 ws = bt_wait_ptr(&this_hctx->tags->bitmap_tags, this_hctx); 1039 add_wait_queue(&ws->wait, wait); 1040 } 1041 1042 /* 1043 * It's possible that a tag was freed in the window between the 1044 * allocation failure and adding the hardware queue to the wait 1045 * queue. 1046 */ 1047 ret = blk_mq_get_driver_tag(rq, hctx, false); 1048 1049 if (!shared_tags) { 1050 /* 1051 * Don't clear RESTART here, someone else could have set it. 1052 * At most this will cost an extra queue run. 1053 */ 1054 return ret; 1055 } else { 1056 if (!ret) { 1057 spin_unlock(&this_hctx->lock); 1058 return false; 1059 } 1060 1061 /* 1062 * We got a tag, remove ourselves from the wait queue to ensure 1063 * someone else gets the wakeup. 1064 */ 1065 spin_lock_irq(&ws->wait.lock); 1066 list_del_init(&wait->entry); 1067 spin_unlock_irq(&ws->wait.lock); 1068 spin_unlock(&this_hctx->lock); 1069 return true; 1070 } 1071 } 1072 1073 bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list, 1074 bool got_budget) 1075 { 1076 struct blk_mq_hw_ctx *hctx; 1077 struct request *rq, *nxt; 1078 bool no_tag = false; 1079 int errors, queued; 1080 1081 if (list_empty(list)) 1082 return false; 1083 1084 WARN_ON(!list_is_singular(list) && got_budget); 1085 1086 /* 1087 * Now process all the entries, sending them to the driver. 1088 */ 1089 errors = queued = 0; 1090 do { 1091 struct blk_mq_queue_data bd; 1092 blk_status_t ret; 1093 1094 rq = list_first_entry(list, struct request, queuelist); 1095 if (!blk_mq_get_driver_tag(rq, &hctx, false)) { 1096 /* 1097 * The initial allocation attempt failed, so we need to 1098 * rerun the hardware queue when a tag is freed. The 1099 * waitqueue takes care of that. If the queue is run 1100 * before we add this entry back on the dispatch list, 1101 * we'll re-run it below. 1102 */ 1103 if (!blk_mq_mark_tag_wait(&hctx, rq)) { 1104 if (got_budget) 1105 blk_mq_put_dispatch_budget(hctx); 1106 /* 1107 * For non-shared tags, the RESTART check 1108 * will suffice. 1109 */ 1110 if (hctx->flags & BLK_MQ_F_TAG_SHARED) 1111 no_tag = true; 1112 break; 1113 } 1114 } 1115 1116 if (!got_budget && !blk_mq_get_dispatch_budget(hctx)) { 1117 blk_mq_put_driver_tag(rq); 1118 break; 1119 } 1120 1121 list_del_init(&rq->queuelist); 1122 1123 bd.rq = rq; 1124 1125 /* 1126 * Flag last if we have no more requests, or if we have more 1127 * but can't assign a driver tag to it. 1128 */ 1129 if (list_empty(list)) 1130 bd.last = true; 1131 else { 1132 nxt = list_first_entry(list, struct request, queuelist); 1133 bd.last = !blk_mq_get_driver_tag(nxt, NULL, false); 1134 } 1135 1136 ret = q->mq_ops->queue_rq(hctx, &bd); 1137 if (ret == BLK_STS_RESOURCE) { 1138 /* 1139 * If an I/O scheduler has been configured and we got a 1140 * driver tag for the next request already, free it 1141 * again. 1142 */ 1143 if (!list_empty(list)) { 1144 nxt = list_first_entry(list, struct request, queuelist); 1145 blk_mq_put_driver_tag(nxt); 1146 } 1147 list_add(&rq->queuelist, list); 1148 __blk_mq_requeue_request(rq); 1149 break; 1150 } 1151 1152 if (unlikely(ret != BLK_STS_OK)) { 1153 errors++; 1154 blk_mq_end_request(rq, BLK_STS_IOERR); 1155 continue; 1156 } 1157 1158 queued++; 1159 } while (!list_empty(list)); 1160 1161 hctx->dispatched[queued_to_index(queued)]++; 1162 1163 /* 1164 * Any items that need requeuing? Stuff them into hctx->dispatch, 1165 * that is where we will continue on next queue run. 1166 */ 1167 if (!list_empty(list)) { 1168 spin_lock(&hctx->lock); 1169 list_splice_init(list, &hctx->dispatch); 1170 spin_unlock(&hctx->lock); 1171 1172 /* 1173 * If SCHED_RESTART was set by the caller of this function and 1174 * it is no longer set that means that it was cleared by another 1175 * thread and hence that a queue rerun is needed. 1176 * 1177 * If 'no_tag' is set, that means that we failed getting 1178 * a driver tag with an I/O scheduler attached. If our dispatch 1179 * waitqueue is no longer active, ensure that we run the queue 1180 * AFTER adding our entries back to the list. 1181 * 1182 * If no I/O scheduler has been configured it is possible that 1183 * the hardware queue got stopped and restarted before requests 1184 * were pushed back onto the dispatch list. Rerun the queue to 1185 * avoid starvation. Notes: 1186 * - blk_mq_run_hw_queue() checks whether or not a queue has 1187 * been stopped before rerunning a queue. 1188 * - Some but not all block drivers stop a queue before 1189 * returning BLK_STS_RESOURCE. Two exceptions are scsi-mq 1190 * and dm-rq. 1191 */ 1192 if (!blk_mq_sched_needs_restart(hctx) || 1193 (no_tag && list_empty_careful(&hctx->dispatch_wait.entry))) 1194 blk_mq_run_hw_queue(hctx, true); 1195 } 1196 1197 return (queued + errors) != 0; 1198 } 1199 1200 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) 1201 { 1202 int srcu_idx; 1203 1204 /* 1205 * We should be running this queue from one of the CPUs that 1206 * are mapped to it. 1207 */ 1208 WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) && 1209 cpu_online(hctx->next_cpu)); 1210 1211 /* 1212 * We can't run the queue inline with ints disabled. Ensure that 1213 * we catch bad users of this early. 1214 */ 1215 WARN_ON_ONCE(in_interrupt()); 1216 1217 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) { 1218 rcu_read_lock(); 1219 blk_mq_sched_dispatch_requests(hctx); 1220 rcu_read_unlock(); 1221 } else { 1222 might_sleep(); 1223 1224 srcu_idx = srcu_read_lock(hctx->queue_rq_srcu); 1225 blk_mq_sched_dispatch_requests(hctx); 1226 srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx); 1227 } 1228 } 1229 1230 /* 1231 * It'd be great if the workqueue API had a way to pass 1232 * in a mask and had some smarts for more clever placement. 1233 * For now we just round-robin here, switching for every 1234 * BLK_MQ_CPU_WORK_BATCH queued items. 1235 */ 1236 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) 1237 { 1238 if (hctx->queue->nr_hw_queues == 1) 1239 return WORK_CPU_UNBOUND; 1240 1241 if (--hctx->next_cpu_batch <= 0) { 1242 int next_cpu; 1243 1244 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask); 1245 if (next_cpu >= nr_cpu_ids) 1246 next_cpu = cpumask_first(hctx->cpumask); 1247 1248 hctx->next_cpu = next_cpu; 1249 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; 1250 } 1251 1252 return hctx->next_cpu; 1253 } 1254 1255 static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, 1256 unsigned long msecs) 1257 { 1258 if (WARN_ON_ONCE(!blk_mq_hw_queue_mapped(hctx))) 1259 return; 1260 1261 if (unlikely(blk_mq_hctx_stopped(hctx))) 1262 return; 1263 1264 if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) { 1265 int cpu = get_cpu(); 1266 if (cpumask_test_cpu(cpu, hctx->cpumask)) { 1267 __blk_mq_run_hw_queue(hctx); 1268 put_cpu(); 1269 return; 1270 } 1271 1272 put_cpu(); 1273 } 1274 1275 kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx), 1276 &hctx->run_work, 1277 msecs_to_jiffies(msecs)); 1278 } 1279 1280 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) 1281 { 1282 __blk_mq_delay_run_hw_queue(hctx, true, msecs); 1283 } 1284 EXPORT_SYMBOL(blk_mq_delay_run_hw_queue); 1285 1286 bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) 1287 { 1288 if (blk_mq_hctx_has_pending(hctx)) { 1289 __blk_mq_delay_run_hw_queue(hctx, async, 0); 1290 return true; 1291 } 1292 1293 return false; 1294 } 1295 EXPORT_SYMBOL(blk_mq_run_hw_queue); 1296 1297 void blk_mq_run_hw_queues(struct request_queue *q, bool async) 1298 { 1299 struct blk_mq_hw_ctx *hctx; 1300 int i; 1301 1302 queue_for_each_hw_ctx(q, hctx, i) { 1303 if (blk_mq_hctx_stopped(hctx)) 1304 continue; 1305 1306 blk_mq_run_hw_queue(hctx, async); 1307 } 1308 } 1309 EXPORT_SYMBOL(blk_mq_run_hw_queues); 1310 1311 /** 1312 * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped 1313 * @q: request queue. 1314 * 1315 * The caller is responsible for serializing this function against 1316 * blk_mq_{start,stop}_hw_queue(). 1317 */ 1318 bool blk_mq_queue_stopped(struct request_queue *q) 1319 { 1320 struct blk_mq_hw_ctx *hctx; 1321 int i; 1322 1323 queue_for_each_hw_ctx(q, hctx, i) 1324 if (blk_mq_hctx_stopped(hctx)) 1325 return true; 1326 1327 return false; 1328 } 1329 EXPORT_SYMBOL(blk_mq_queue_stopped); 1330 1331 /* 1332 * This function is often used for pausing .queue_rq() by driver when 1333 * there isn't enough resource or some conditions aren't satisfied, and 1334 * BLK_STS_RESOURCE is usually returned. 1335 * 1336 * We do not guarantee that dispatch can be drained or blocked 1337 * after blk_mq_stop_hw_queue() returns. Please use 1338 * blk_mq_quiesce_queue() for that requirement. 1339 */ 1340 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) 1341 { 1342 cancel_delayed_work(&hctx->run_work); 1343 1344 set_bit(BLK_MQ_S_STOPPED, &hctx->state); 1345 } 1346 EXPORT_SYMBOL(blk_mq_stop_hw_queue); 1347 1348 /* 1349 * This function is often used for pausing .queue_rq() by driver when 1350 * there isn't enough resource or some conditions aren't satisfied, and 1351 * BLK_STS_RESOURCE is usually returned. 1352 * 1353 * We do not guarantee that dispatch can be drained or blocked 1354 * after blk_mq_stop_hw_queues() returns. Please use 1355 * blk_mq_quiesce_queue() for that requirement. 1356 */ 1357 void blk_mq_stop_hw_queues(struct request_queue *q) 1358 { 1359 struct blk_mq_hw_ctx *hctx; 1360 int i; 1361 1362 queue_for_each_hw_ctx(q, hctx, i) 1363 blk_mq_stop_hw_queue(hctx); 1364 } 1365 EXPORT_SYMBOL(blk_mq_stop_hw_queues); 1366 1367 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx) 1368 { 1369 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 1370 1371 blk_mq_run_hw_queue(hctx, false); 1372 } 1373 EXPORT_SYMBOL(blk_mq_start_hw_queue); 1374 1375 void blk_mq_start_hw_queues(struct request_queue *q) 1376 { 1377 struct blk_mq_hw_ctx *hctx; 1378 int i; 1379 1380 queue_for_each_hw_ctx(q, hctx, i) 1381 blk_mq_start_hw_queue(hctx); 1382 } 1383 EXPORT_SYMBOL(blk_mq_start_hw_queues); 1384 1385 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) 1386 { 1387 if (!blk_mq_hctx_stopped(hctx)) 1388 return; 1389 1390 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 1391 blk_mq_run_hw_queue(hctx, async); 1392 } 1393 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue); 1394 1395 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async) 1396 { 1397 struct blk_mq_hw_ctx *hctx; 1398 int i; 1399 1400 queue_for_each_hw_ctx(q, hctx, i) 1401 blk_mq_start_stopped_hw_queue(hctx, async); 1402 } 1403 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues); 1404 1405 static void blk_mq_run_work_fn(struct work_struct *work) 1406 { 1407 struct blk_mq_hw_ctx *hctx; 1408 1409 hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work); 1410 1411 /* 1412 * If we are stopped, don't run the queue. The exception is if 1413 * BLK_MQ_S_START_ON_RUN is set. For that case, we auto-clear 1414 * the STOPPED bit and run it. 1415 */ 1416 if (test_bit(BLK_MQ_S_STOPPED, &hctx->state)) { 1417 if (!test_bit(BLK_MQ_S_START_ON_RUN, &hctx->state)) 1418 return; 1419 1420 clear_bit(BLK_MQ_S_START_ON_RUN, &hctx->state); 1421 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 1422 } 1423 1424 __blk_mq_run_hw_queue(hctx); 1425 } 1426 1427 1428 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) 1429 { 1430 if (WARN_ON_ONCE(!blk_mq_hw_queue_mapped(hctx))) 1431 return; 1432 1433 /* 1434 * Stop the hw queue, then modify currently delayed work. 1435 * This should prevent us from running the queue prematurely. 1436 * Mark the queue as auto-clearing STOPPED when it runs. 1437 */ 1438 blk_mq_stop_hw_queue(hctx); 1439 set_bit(BLK_MQ_S_START_ON_RUN, &hctx->state); 1440 kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), 1441 &hctx->run_work, 1442 msecs_to_jiffies(msecs)); 1443 } 1444 EXPORT_SYMBOL(blk_mq_delay_queue); 1445 1446 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx, 1447 struct request *rq, 1448 bool at_head) 1449 { 1450 struct blk_mq_ctx *ctx = rq->mq_ctx; 1451 1452 lockdep_assert_held(&ctx->lock); 1453 1454 trace_block_rq_insert(hctx->queue, rq); 1455 1456 if (at_head) 1457 list_add(&rq->queuelist, &ctx->rq_list); 1458 else 1459 list_add_tail(&rq->queuelist, &ctx->rq_list); 1460 } 1461 1462 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, 1463 bool at_head) 1464 { 1465 struct blk_mq_ctx *ctx = rq->mq_ctx; 1466 1467 lockdep_assert_held(&ctx->lock); 1468 1469 __blk_mq_insert_req_list(hctx, rq, at_head); 1470 blk_mq_hctx_mark_pending(hctx, ctx); 1471 } 1472 1473 /* 1474 * Should only be used carefully, when the caller knows we want to 1475 * bypass a potential IO scheduler on the target device. 1476 */ 1477 void blk_mq_request_bypass_insert(struct request *rq, bool run_queue) 1478 { 1479 struct blk_mq_ctx *ctx = rq->mq_ctx; 1480 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu); 1481 1482 spin_lock(&hctx->lock); 1483 list_add_tail(&rq->queuelist, &hctx->dispatch); 1484 spin_unlock(&hctx->lock); 1485 1486 if (run_queue) 1487 blk_mq_run_hw_queue(hctx, false); 1488 } 1489 1490 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, 1491 struct list_head *list) 1492 1493 { 1494 /* 1495 * preemption doesn't flush plug list, so it's possible ctx->cpu is 1496 * offline now 1497 */ 1498 spin_lock(&ctx->lock); 1499 while (!list_empty(list)) { 1500 struct request *rq; 1501 1502 rq = list_first_entry(list, struct request, queuelist); 1503 BUG_ON(rq->mq_ctx != ctx); 1504 list_del_init(&rq->queuelist); 1505 __blk_mq_insert_req_list(hctx, rq, false); 1506 } 1507 blk_mq_hctx_mark_pending(hctx, ctx); 1508 spin_unlock(&ctx->lock); 1509 } 1510 1511 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b) 1512 { 1513 struct request *rqa = container_of(a, struct request, queuelist); 1514 struct request *rqb = container_of(b, struct request, queuelist); 1515 1516 return !(rqa->mq_ctx < rqb->mq_ctx || 1517 (rqa->mq_ctx == rqb->mq_ctx && 1518 blk_rq_pos(rqa) < blk_rq_pos(rqb))); 1519 } 1520 1521 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) 1522 { 1523 struct blk_mq_ctx *this_ctx; 1524 struct request_queue *this_q; 1525 struct request *rq; 1526 LIST_HEAD(list); 1527 LIST_HEAD(ctx_list); 1528 unsigned int depth; 1529 1530 list_splice_init(&plug->mq_list, &list); 1531 1532 list_sort(NULL, &list, plug_ctx_cmp); 1533 1534 this_q = NULL; 1535 this_ctx = NULL; 1536 depth = 0; 1537 1538 while (!list_empty(&list)) { 1539 rq = list_entry_rq(list.next); 1540 list_del_init(&rq->queuelist); 1541 BUG_ON(!rq->q); 1542 if (rq->mq_ctx != this_ctx) { 1543 if (this_ctx) { 1544 trace_block_unplug(this_q, depth, from_schedule); 1545 blk_mq_sched_insert_requests(this_q, this_ctx, 1546 &ctx_list, 1547 from_schedule); 1548 } 1549 1550 this_ctx = rq->mq_ctx; 1551 this_q = rq->q; 1552 depth = 0; 1553 } 1554 1555 depth++; 1556 list_add_tail(&rq->queuelist, &ctx_list); 1557 } 1558 1559 /* 1560 * If 'this_ctx' is set, we know we have entries to complete 1561 * on 'ctx_list'. Do those. 1562 */ 1563 if (this_ctx) { 1564 trace_block_unplug(this_q, depth, from_schedule); 1565 blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list, 1566 from_schedule); 1567 } 1568 } 1569 1570 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio) 1571 { 1572 blk_init_request_from_bio(rq, bio); 1573 1574 blk_rq_set_rl(rq, blk_get_rl(rq->q, bio)); 1575 1576 blk_account_io_start(rq, true); 1577 } 1578 1579 static inline void blk_mq_queue_io(struct blk_mq_hw_ctx *hctx, 1580 struct blk_mq_ctx *ctx, 1581 struct request *rq) 1582 { 1583 spin_lock(&ctx->lock); 1584 __blk_mq_insert_request(hctx, rq, false); 1585 spin_unlock(&ctx->lock); 1586 } 1587 1588 static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq) 1589 { 1590 if (rq->tag != -1) 1591 return blk_tag_to_qc_t(rq->tag, hctx->queue_num, false); 1592 1593 return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true); 1594 } 1595 1596 static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, 1597 struct request *rq, 1598 blk_qc_t *cookie, bool may_sleep) 1599 { 1600 struct request_queue *q = rq->q; 1601 struct blk_mq_queue_data bd = { 1602 .rq = rq, 1603 .last = true, 1604 }; 1605 blk_qc_t new_cookie; 1606 blk_status_t ret; 1607 bool run_queue = true; 1608 1609 /* RCU or SRCU read lock is needed before checking quiesced flag */ 1610 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) { 1611 run_queue = false; 1612 goto insert; 1613 } 1614 1615 if (q->elevator) 1616 goto insert; 1617 1618 if (!blk_mq_get_driver_tag(rq, NULL, false)) 1619 goto insert; 1620 1621 if (!blk_mq_get_dispatch_budget(hctx)) { 1622 blk_mq_put_driver_tag(rq); 1623 goto insert; 1624 } 1625 1626 new_cookie = request_to_qc_t(hctx, rq); 1627 1628 /* 1629 * For OK queue, we are done. For error, kill it. Any other 1630 * error (busy), just add it to our list as we previously 1631 * would have done 1632 */ 1633 ret = q->mq_ops->queue_rq(hctx, &bd); 1634 switch (ret) { 1635 case BLK_STS_OK: 1636 *cookie = new_cookie; 1637 return; 1638 case BLK_STS_RESOURCE: 1639 __blk_mq_requeue_request(rq); 1640 goto insert; 1641 default: 1642 *cookie = BLK_QC_T_NONE; 1643 blk_mq_end_request(rq, ret); 1644 return; 1645 } 1646 1647 insert: 1648 blk_mq_sched_insert_request(rq, false, run_queue, false, may_sleep); 1649 } 1650 1651 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, 1652 struct request *rq, blk_qc_t *cookie) 1653 { 1654 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) { 1655 rcu_read_lock(); 1656 __blk_mq_try_issue_directly(hctx, rq, cookie, false); 1657 rcu_read_unlock(); 1658 } else { 1659 unsigned int srcu_idx; 1660 1661 might_sleep(); 1662 1663 srcu_idx = srcu_read_lock(hctx->queue_rq_srcu); 1664 __blk_mq_try_issue_directly(hctx, rq, cookie, true); 1665 srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx); 1666 } 1667 } 1668 1669 static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) 1670 { 1671 const int is_sync = op_is_sync(bio->bi_opf); 1672 const int is_flush_fua = op_is_flush(bio->bi_opf); 1673 struct blk_mq_alloc_data data = { .flags = 0 }; 1674 struct request *rq; 1675 unsigned int request_count = 0; 1676 struct blk_plug *plug; 1677 struct request *same_queue_rq = NULL; 1678 blk_qc_t cookie; 1679 unsigned int wb_acct; 1680 1681 blk_queue_bounce(q, &bio); 1682 1683 blk_queue_split(q, &bio); 1684 1685 if (!bio_integrity_prep(bio)) 1686 return BLK_QC_T_NONE; 1687 1688 if (!is_flush_fua && !blk_queue_nomerges(q) && 1689 blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq)) 1690 return BLK_QC_T_NONE; 1691 1692 if (blk_mq_sched_bio_merge(q, bio)) 1693 return BLK_QC_T_NONE; 1694 1695 wb_acct = wbt_wait(q->rq_wb, bio, NULL); 1696 1697 trace_block_getrq(q, bio, bio->bi_opf); 1698 1699 rq = blk_mq_get_request(q, bio, bio->bi_opf, &data); 1700 if (unlikely(!rq)) { 1701 __wbt_done(q->rq_wb, wb_acct); 1702 if (bio->bi_opf & REQ_NOWAIT) 1703 bio_wouldblock_error(bio); 1704 return BLK_QC_T_NONE; 1705 } 1706 1707 wbt_track(&rq->issue_stat, wb_acct); 1708 1709 cookie = request_to_qc_t(data.hctx, rq); 1710 1711 plug = current->plug; 1712 if (unlikely(is_flush_fua)) { 1713 blk_mq_put_ctx(data.ctx); 1714 blk_mq_bio_to_request(rq, bio); 1715 1716 /* bypass scheduler for flush rq */ 1717 blk_insert_flush(rq); 1718 blk_mq_run_hw_queue(data.hctx, true); 1719 } else if (plug && q->nr_hw_queues == 1) { 1720 struct request *last = NULL; 1721 1722 blk_mq_put_ctx(data.ctx); 1723 blk_mq_bio_to_request(rq, bio); 1724 1725 /* 1726 * @request_count may become stale because of schedule 1727 * out, so check the list again. 1728 */ 1729 if (list_empty(&plug->mq_list)) 1730 request_count = 0; 1731 else if (blk_queue_nomerges(q)) 1732 request_count = blk_plug_queued_count(q); 1733 1734 if (!request_count) 1735 trace_block_plug(q); 1736 else 1737 last = list_entry_rq(plug->mq_list.prev); 1738 1739 if (request_count >= BLK_MAX_REQUEST_COUNT || (last && 1740 blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) { 1741 blk_flush_plug_list(plug, false); 1742 trace_block_plug(q); 1743 } 1744 1745 list_add_tail(&rq->queuelist, &plug->mq_list); 1746 } else if (plug && !blk_queue_nomerges(q)) { 1747 blk_mq_bio_to_request(rq, bio); 1748 1749 /* 1750 * We do limited plugging. If the bio can be merged, do that. 1751 * Otherwise the existing request in the plug list will be 1752 * issued. So the plug list will have one request at most 1753 * The plug list might get flushed before this. If that happens, 1754 * the plug list is empty, and same_queue_rq is invalid. 1755 */ 1756 if (list_empty(&plug->mq_list)) 1757 same_queue_rq = NULL; 1758 if (same_queue_rq) 1759 list_del_init(&same_queue_rq->queuelist); 1760 list_add_tail(&rq->queuelist, &plug->mq_list); 1761 1762 blk_mq_put_ctx(data.ctx); 1763 1764 if (same_queue_rq) { 1765 data.hctx = blk_mq_map_queue(q, 1766 same_queue_rq->mq_ctx->cpu); 1767 blk_mq_try_issue_directly(data.hctx, same_queue_rq, 1768 &cookie); 1769 } 1770 } else if (q->nr_hw_queues > 1 && is_sync) { 1771 blk_mq_put_ctx(data.ctx); 1772 blk_mq_bio_to_request(rq, bio); 1773 blk_mq_try_issue_directly(data.hctx, rq, &cookie); 1774 } else if (q->elevator) { 1775 blk_mq_put_ctx(data.ctx); 1776 blk_mq_bio_to_request(rq, bio); 1777 blk_mq_sched_insert_request(rq, false, true, true, true); 1778 } else { 1779 blk_mq_put_ctx(data.ctx); 1780 blk_mq_bio_to_request(rq, bio); 1781 blk_mq_queue_io(data.hctx, data.ctx, rq); 1782 blk_mq_run_hw_queue(data.hctx, true); 1783 } 1784 1785 return cookie; 1786 } 1787 1788 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, 1789 unsigned int hctx_idx) 1790 { 1791 struct page *page; 1792 1793 if (tags->rqs && set->ops->exit_request) { 1794 int i; 1795 1796 for (i = 0; i < tags->nr_tags; i++) { 1797 struct request *rq = tags->static_rqs[i]; 1798 1799 if (!rq) 1800 continue; 1801 set->ops->exit_request(set, rq, hctx_idx); 1802 tags->static_rqs[i] = NULL; 1803 } 1804 } 1805 1806 while (!list_empty(&tags->page_list)) { 1807 page = list_first_entry(&tags->page_list, struct page, lru); 1808 list_del_init(&page->lru); 1809 /* 1810 * Remove kmemleak object previously allocated in 1811 * blk_mq_init_rq_map(). 1812 */ 1813 kmemleak_free(page_address(page)); 1814 __free_pages(page, page->private); 1815 } 1816 } 1817 1818 void blk_mq_free_rq_map(struct blk_mq_tags *tags) 1819 { 1820 kfree(tags->rqs); 1821 tags->rqs = NULL; 1822 kfree(tags->static_rqs); 1823 tags->static_rqs = NULL; 1824 1825 blk_mq_free_tags(tags); 1826 } 1827 1828 struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, 1829 unsigned int hctx_idx, 1830 unsigned int nr_tags, 1831 unsigned int reserved_tags) 1832 { 1833 struct blk_mq_tags *tags; 1834 int node; 1835 1836 node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx); 1837 if (node == NUMA_NO_NODE) 1838 node = set->numa_node; 1839 1840 tags = blk_mq_init_tags(nr_tags, reserved_tags, node, 1841 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags)); 1842 if (!tags) 1843 return NULL; 1844 1845 tags->rqs = kzalloc_node(nr_tags * sizeof(struct request *), 1846 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, 1847 node); 1848 if (!tags->rqs) { 1849 blk_mq_free_tags(tags); 1850 return NULL; 1851 } 1852 1853 tags->static_rqs = kzalloc_node(nr_tags * sizeof(struct request *), 1854 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, 1855 node); 1856 if (!tags->static_rqs) { 1857 kfree(tags->rqs); 1858 blk_mq_free_tags(tags); 1859 return NULL; 1860 } 1861 1862 return tags; 1863 } 1864 1865 static size_t order_to_size(unsigned int order) 1866 { 1867 return (size_t)PAGE_SIZE << order; 1868 } 1869 1870 int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, 1871 unsigned int hctx_idx, unsigned int depth) 1872 { 1873 unsigned int i, j, entries_per_page, max_order = 4; 1874 size_t rq_size, left; 1875 int node; 1876 1877 node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx); 1878 if (node == NUMA_NO_NODE) 1879 node = set->numa_node; 1880 1881 INIT_LIST_HEAD(&tags->page_list); 1882 1883 /* 1884 * rq_size is the size of the request plus driver payload, rounded 1885 * to the cacheline size 1886 */ 1887 rq_size = round_up(sizeof(struct request) + set->cmd_size, 1888 cache_line_size()); 1889 left = rq_size * depth; 1890 1891 for (i = 0; i < depth; ) { 1892 int this_order = max_order; 1893 struct page *page; 1894 int to_do; 1895 void *p; 1896 1897 while (this_order && left < order_to_size(this_order - 1)) 1898 this_order--; 1899 1900 do { 1901 page = alloc_pages_node(node, 1902 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO, 1903 this_order); 1904 if (page) 1905 break; 1906 if (!this_order--) 1907 break; 1908 if (order_to_size(this_order) < rq_size) 1909 break; 1910 } while (1); 1911 1912 if (!page) 1913 goto fail; 1914 1915 page->private = this_order; 1916 list_add_tail(&page->lru, &tags->page_list); 1917 1918 p = page_address(page); 1919 /* 1920 * Allow kmemleak to scan these pages as they contain pointers 1921 * to additional allocations like via ops->init_request(). 1922 */ 1923 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO); 1924 entries_per_page = order_to_size(this_order) / rq_size; 1925 to_do = min(entries_per_page, depth - i); 1926 left -= to_do * rq_size; 1927 for (j = 0; j < to_do; j++) { 1928 struct request *rq = p; 1929 1930 tags->static_rqs[i] = rq; 1931 if (set->ops->init_request) { 1932 if (set->ops->init_request(set, rq, hctx_idx, 1933 node)) { 1934 tags->static_rqs[i] = NULL; 1935 goto fail; 1936 } 1937 } 1938 1939 p += rq_size; 1940 i++; 1941 } 1942 } 1943 return 0; 1944 1945 fail: 1946 blk_mq_free_rqs(set, tags, hctx_idx); 1947 return -ENOMEM; 1948 } 1949 1950 /* 1951 * 'cpu' is going away. splice any existing rq_list entries from this 1952 * software queue to the hw queue dispatch list, and ensure that it 1953 * gets run. 1954 */ 1955 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node) 1956 { 1957 struct blk_mq_hw_ctx *hctx; 1958 struct blk_mq_ctx *ctx; 1959 LIST_HEAD(tmp); 1960 1961 hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead); 1962 ctx = __blk_mq_get_ctx(hctx->queue, cpu); 1963 1964 spin_lock(&ctx->lock); 1965 if (!list_empty(&ctx->rq_list)) { 1966 list_splice_init(&ctx->rq_list, &tmp); 1967 blk_mq_hctx_clear_pending(hctx, ctx); 1968 } 1969 spin_unlock(&ctx->lock); 1970 1971 if (list_empty(&tmp)) 1972 return 0; 1973 1974 spin_lock(&hctx->lock); 1975 list_splice_tail_init(&tmp, &hctx->dispatch); 1976 spin_unlock(&hctx->lock); 1977 1978 blk_mq_run_hw_queue(hctx, true); 1979 return 0; 1980 } 1981 1982 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx) 1983 { 1984 cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD, 1985 &hctx->cpuhp_dead); 1986 } 1987 1988 /* hctx->ctxs will be freed in queue's release handler */ 1989 static void blk_mq_exit_hctx(struct request_queue *q, 1990 struct blk_mq_tag_set *set, 1991 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) 1992 { 1993 blk_mq_debugfs_unregister_hctx(hctx); 1994 1995 blk_mq_tag_idle(hctx); 1996 1997 if (set->ops->exit_request) 1998 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx); 1999 2000 blk_mq_sched_exit_hctx(q, hctx, hctx_idx); 2001 2002 if (set->ops->exit_hctx) 2003 set->ops->exit_hctx(hctx, hctx_idx); 2004 2005 if (hctx->flags & BLK_MQ_F_BLOCKING) 2006 cleanup_srcu_struct(hctx->queue_rq_srcu); 2007 2008 blk_mq_remove_cpuhp(hctx); 2009 blk_free_flush_queue(hctx->fq); 2010 sbitmap_free(&hctx->ctx_map); 2011 } 2012 2013 static void blk_mq_exit_hw_queues(struct request_queue *q, 2014 struct blk_mq_tag_set *set, int nr_queue) 2015 { 2016 struct blk_mq_hw_ctx *hctx; 2017 unsigned int i; 2018 2019 queue_for_each_hw_ctx(q, hctx, i) { 2020 if (i == nr_queue) 2021 break; 2022 blk_mq_exit_hctx(q, set, hctx, i); 2023 } 2024 } 2025 2026 static int blk_mq_init_hctx(struct request_queue *q, 2027 struct blk_mq_tag_set *set, 2028 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) 2029 { 2030 int node; 2031 2032 node = hctx->numa_node; 2033 if (node == NUMA_NO_NODE) 2034 node = hctx->numa_node = set->numa_node; 2035 2036 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn); 2037 spin_lock_init(&hctx->lock); 2038 INIT_LIST_HEAD(&hctx->dispatch); 2039 hctx->queue = q; 2040 hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED; 2041 2042 cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead); 2043 2044 hctx->tags = set->tags[hctx_idx]; 2045 2046 /* 2047 * Allocate space for all possible cpus to avoid allocation at 2048 * runtime 2049 */ 2050 hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *), 2051 GFP_KERNEL, node); 2052 if (!hctx->ctxs) 2053 goto unregister_cpu_notifier; 2054 2055 if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL, 2056 node)) 2057 goto free_ctxs; 2058 2059 hctx->nr_ctx = 0; 2060 2061 init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake); 2062 INIT_LIST_HEAD(&hctx->dispatch_wait.entry); 2063 2064 if (set->ops->init_hctx && 2065 set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) 2066 goto free_bitmap; 2067 2068 if (blk_mq_sched_init_hctx(q, hctx, hctx_idx)) 2069 goto exit_hctx; 2070 2071 hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size); 2072 if (!hctx->fq) 2073 goto sched_exit_hctx; 2074 2075 if (set->ops->init_request && 2076 set->ops->init_request(set, hctx->fq->flush_rq, hctx_idx, 2077 node)) 2078 goto free_fq; 2079 2080 if (hctx->flags & BLK_MQ_F_BLOCKING) 2081 init_srcu_struct(hctx->queue_rq_srcu); 2082 2083 blk_mq_debugfs_register_hctx(q, hctx); 2084 2085 return 0; 2086 2087 free_fq: 2088 kfree(hctx->fq); 2089 sched_exit_hctx: 2090 blk_mq_sched_exit_hctx(q, hctx, hctx_idx); 2091 exit_hctx: 2092 if (set->ops->exit_hctx) 2093 set->ops->exit_hctx(hctx, hctx_idx); 2094 free_bitmap: 2095 sbitmap_free(&hctx->ctx_map); 2096 free_ctxs: 2097 kfree(hctx->ctxs); 2098 unregister_cpu_notifier: 2099 blk_mq_remove_cpuhp(hctx); 2100 return -1; 2101 } 2102 2103 static void blk_mq_init_cpu_queues(struct request_queue *q, 2104 unsigned int nr_hw_queues) 2105 { 2106 unsigned int i; 2107 2108 for_each_possible_cpu(i) { 2109 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i); 2110 struct blk_mq_hw_ctx *hctx; 2111 2112 __ctx->cpu = i; 2113 spin_lock_init(&__ctx->lock); 2114 INIT_LIST_HEAD(&__ctx->rq_list); 2115 __ctx->queue = q; 2116 2117 /* If the cpu isn't present, the cpu is mapped to first hctx */ 2118 if (!cpu_present(i)) 2119 continue; 2120 2121 hctx = blk_mq_map_queue(q, i); 2122 2123 /* 2124 * Set local node, IFF we have more than one hw queue. If 2125 * not, we remain on the home node of the device 2126 */ 2127 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE) 2128 hctx->numa_node = local_memory_node(cpu_to_node(i)); 2129 } 2130 } 2131 2132 static bool __blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, int hctx_idx) 2133 { 2134 int ret = 0; 2135 2136 set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx, 2137 set->queue_depth, set->reserved_tags); 2138 if (!set->tags[hctx_idx]) 2139 return false; 2140 2141 ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx, 2142 set->queue_depth); 2143 if (!ret) 2144 return true; 2145 2146 blk_mq_free_rq_map(set->tags[hctx_idx]); 2147 set->tags[hctx_idx] = NULL; 2148 return false; 2149 } 2150 2151 static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set, 2152 unsigned int hctx_idx) 2153 { 2154 if (set->tags[hctx_idx]) { 2155 blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx); 2156 blk_mq_free_rq_map(set->tags[hctx_idx]); 2157 set->tags[hctx_idx] = NULL; 2158 } 2159 } 2160 2161 static void blk_mq_map_swqueue(struct request_queue *q) 2162 { 2163 unsigned int i, hctx_idx; 2164 struct blk_mq_hw_ctx *hctx; 2165 struct blk_mq_ctx *ctx; 2166 struct blk_mq_tag_set *set = q->tag_set; 2167 2168 /* 2169 * Avoid others reading imcomplete hctx->cpumask through sysfs 2170 */ 2171 mutex_lock(&q->sysfs_lock); 2172 2173 queue_for_each_hw_ctx(q, hctx, i) { 2174 cpumask_clear(hctx->cpumask); 2175 hctx->nr_ctx = 0; 2176 } 2177 2178 /* 2179 * Map software to hardware queues. 2180 * 2181 * If the cpu isn't present, the cpu is mapped to first hctx. 2182 */ 2183 for_each_present_cpu(i) { 2184 hctx_idx = q->mq_map[i]; 2185 /* unmapped hw queue can be remapped after CPU topo changed */ 2186 if (!set->tags[hctx_idx] && 2187 !__blk_mq_alloc_rq_map(set, hctx_idx)) { 2188 /* 2189 * If tags initialization fail for some hctx, 2190 * that hctx won't be brought online. In this 2191 * case, remap the current ctx to hctx[0] which 2192 * is guaranteed to always have tags allocated 2193 */ 2194 q->mq_map[i] = 0; 2195 } 2196 2197 ctx = per_cpu_ptr(q->queue_ctx, i); 2198 hctx = blk_mq_map_queue(q, i); 2199 2200 cpumask_set_cpu(i, hctx->cpumask); 2201 ctx->index_hw = hctx->nr_ctx; 2202 hctx->ctxs[hctx->nr_ctx++] = ctx; 2203 } 2204 2205 mutex_unlock(&q->sysfs_lock); 2206 2207 queue_for_each_hw_ctx(q, hctx, i) { 2208 /* 2209 * If no software queues are mapped to this hardware queue, 2210 * disable it and free the request entries. 2211 */ 2212 if (!hctx->nr_ctx) { 2213 /* Never unmap queue 0. We need it as a 2214 * fallback in case of a new remap fails 2215 * allocation 2216 */ 2217 if (i && set->tags[i]) 2218 blk_mq_free_map_and_requests(set, i); 2219 2220 hctx->tags = NULL; 2221 continue; 2222 } 2223 2224 hctx->tags = set->tags[i]; 2225 WARN_ON(!hctx->tags); 2226 2227 /* 2228 * Set the map size to the number of mapped software queues. 2229 * This is more accurate and more efficient than looping 2230 * over all possibly mapped software queues. 2231 */ 2232 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx); 2233 2234 /* 2235 * Initialize batch roundrobin counts 2236 */ 2237 hctx->next_cpu = cpumask_first(hctx->cpumask); 2238 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; 2239 } 2240 } 2241 2242 /* 2243 * Caller needs to ensure that we're either frozen/quiesced, or that 2244 * the queue isn't live yet. 2245 */ 2246 static void queue_set_hctx_shared(struct request_queue *q, bool shared) 2247 { 2248 struct blk_mq_hw_ctx *hctx; 2249 int i; 2250 2251 queue_for_each_hw_ctx(q, hctx, i) { 2252 if (shared) { 2253 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) 2254 atomic_inc(&q->shared_hctx_restart); 2255 hctx->flags |= BLK_MQ_F_TAG_SHARED; 2256 } else { 2257 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) 2258 atomic_dec(&q->shared_hctx_restart); 2259 hctx->flags &= ~BLK_MQ_F_TAG_SHARED; 2260 } 2261 } 2262 } 2263 2264 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, 2265 bool shared) 2266 { 2267 struct request_queue *q; 2268 2269 lockdep_assert_held(&set->tag_list_lock); 2270 2271 list_for_each_entry(q, &set->tag_list, tag_set_list) { 2272 blk_mq_freeze_queue(q); 2273 queue_set_hctx_shared(q, shared); 2274 blk_mq_unfreeze_queue(q); 2275 } 2276 } 2277 2278 static void blk_mq_del_queue_tag_set(struct request_queue *q) 2279 { 2280 struct blk_mq_tag_set *set = q->tag_set; 2281 2282 mutex_lock(&set->tag_list_lock); 2283 list_del_rcu(&q->tag_set_list); 2284 INIT_LIST_HEAD(&q->tag_set_list); 2285 if (list_is_singular(&set->tag_list)) { 2286 /* just transitioned to unshared */ 2287 set->flags &= ~BLK_MQ_F_TAG_SHARED; 2288 /* update existing queue */ 2289 blk_mq_update_tag_set_depth(set, false); 2290 } 2291 mutex_unlock(&set->tag_list_lock); 2292 2293 synchronize_rcu(); 2294 } 2295 2296 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, 2297 struct request_queue *q) 2298 { 2299 q->tag_set = set; 2300 2301 mutex_lock(&set->tag_list_lock); 2302 2303 /* 2304 * Check to see if we're transitioning to shared (from 1 to 2 queues). 2305 */ 2306 if (!list_empty(&set->tag_list) && 2307 !(set->flags & BLK_MQ_F_TAG_SHARED)) { 2308 set->flags |= BLK_MQ_F_TAG_SHARED; 2309 /* update existing queue */ 2310 blk_mq_update_tag_set_depth(set, true); 2311 } 2312 if (set->flags & BLK_MQ_F_TAG_SHARED) 2313 queue_set_hctx_shared(q, true); 2314 list_add_tail_rcu(&q->tag_set_list, &set->tag_list); 2315 2316 mutex_unlock(&set->tag_list_lock); 2317 } 2318 2319 /* 2320 * It is the actual release handler for mq, but we do it from 2321 * request queue's release handler for avoiding use-after-free 2322 * and headache because q->mq_kobj shouldn't have been introduced, 2323 * but we can't group ctx/kctx kobj without it. 2324 */ 2325 void blk_mq_release(struct request_queue *q) 2326 { 2327 struct blk_mq_hw_ctx *hctx; 2328 unsigned int i; 2329 2330 /* hctx kobj stays in hctx */ 2331 queue_for_each_hw_ctx(q, hctx, i) { 2332 if (!hctx) 2333 continue; 2334 kobject_put(&hctx->kobj); 2335 } 2336 2337 q->mq_map = NULL; 2338 2339 kfree(q->queue_hw_ctx); 2340 2341 /* 2342 * release .mq_kobj and sw queue's kobject now because 2343 * both share lifetime with request queue. 2344 */ 2345 blk_mq_sysfs_deinit(q); 2346 2347 free_percpu(q->queue_ctx); 2348 } 2349 2350 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) 2351 { 2352 struct request_queue *uninit_q, *q; 2353 2354 uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node); 2355 if (!uninit_q) 2356 return ERR_PTR(-ENOMEM); 2357 2358 q = blk_mq_init_allocated_queue(set, uninit_q); 2359 if (IS_ERR(q)) 2360 blk_cleanup_queue(uninit_q); 2361 2362 return q; 2363 } 2364 EXPORT_SYMBOL(blk_mq_init_queue); 2365 2366 static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set) 2367 { 2368 int hw_ctx_size = sizeof(struct blk_mq_hw_ctx); 2369 2370 BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, queue_rq_srcu), 2371 __alignof__(struct blk_mq_hw_ctx)) != 2372 sizeof(struct blk_mq_hw_ctx)); 2373 2374 if (tag_set->flags & BLK_MQ_F_BLOCKING) 2375 hw_ctx_size += sizeof(struct srcu_struct); 2376 2377 return hw_ctx_size; 2378 } 2379 2380 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, 2381 struct request_queue *q) 2382 { 2383 int i, j; 2384 struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx; 2385 2386 blk_mq_sysfs_unregister(q); 2387 for (i = 0; i < set->nr_hw_queues; i++) { 2388 int node; 2389 2390 if (hctxs[i]) 2391 continue; 2392 2393 node = blk_mq_hw_queue_to_node(q->mq_map, i); 2394 hctxs[i] = kzalloc_node(blk_mq_hw_ctx_size(set), 2395 GFP_KERNEL, node); 2396 if (!hctxs[i]) 2397 break; 2398 2399 if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL, 2400 node)) { 2401 kfree(hctxs[i]); 2402 hctxs[i] = NULL; 2403 break; 2404 } 2405 2406 atomic_set(&hctxs[i]->nr_active, 0); 2407 hctxs[i]->numa_node = node; 2408 hctxs[i]->queue_num = i; 2409 2410 if (blk_mq_init_hctx(q, set, hctxs[i], i)) { 2411 free_cpumask_var(hctxs[i]->cpumask); 2412 kfree(hctxs[i]); 2413 hctxs[i] = NULL; 2414 break; 2415 } 2416 blk_mq_hctx_kobj_init(hctxs[i]); 2417 } 2418 for (j = i; j < q->nr_hw_queues; j++) { 2419 struct blk_mq_hw_ctx *hctx = hctxs[j]; 2420 2421 if (hctx) { 2422 if (hctx->tags) 2423 blk_mq_free_map_and_requests(set, j); 2424 blk_mq_exit_hctx(q, set, hctx, j); 2425 kobject_put(&hctx->kobj); 2426 hctxs[j] = NULL; 2427 2428 } 2429 } 2430 q->nr_hw_queues = i; 2431 blk_mq_sysfs_register(q); 2432 } 2433 2434 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, 2435 struct request_queue *q) 2436 { 2437 /* mark the queue as mq asap */ 2438 q->mq_ops = set->ops; 2439 2440 q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn, 2441 blk_mq_poll_stats_bkt, 2442 BLK_MQ_POLL_STATS_BKTS, q); 2443 if (!q->poll_cb) 2444 goto err_exit; 2445 2446 q->queue_ctx = alloc_percpu(struct blk_mq_ctx); 2447 if (!q->queue_ctx) 2448 goto err_exit; 2449 2450 /* init q->mq_kobj and sw queues' kobjects */ 2451 blk_mq_sysfs_init(q); 2452 2453 q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)), 2454 GFP_KERNEL, set->numa_node); 2455 if (!q->queue_hw_ctx) 2456 goto err_percpu; 2457 2458 q->mq_map = set->mq_map; 2459 2460 blk_mq_realloc_hw_ctxs(set, q); 2461 if (!q->nr_hw_queues) 2462 goto err_hctxs; 2463 2464 INIT_WORK(&q->timeout_work, blk_mq_timeout_work); 2465 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ); 2466 2467 q->nr_queues = nr_cpu_ids; 2468 2469 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; 2470 2471 if (!(set->flags & BLK_MQ_F_SG_MERGE)) 2472 q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE; 2473 2474 q->sg_reserved_size = INT_MAX; 2475 2476 INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work); 2477 INIT_LIST_HEAD(&q->requeue_list); 2478 spin_lock_init(&q->requeue_lock); 2479 2480 blk_queue_make_request(q, blk_mq_make_request); 2481 if (q->mq_ops->poll) 2482 q->poll_fn = blk_mq_poll; 2483 2484 /* 2485 * Do this after blk_queue_make_request() overrides it... 2486 */ 2487 q->nr_requests = set->queue_depth; 2488 2489 /* 2490 * Default to classic polling 2491 */ 2492 q->poll_nsec = -1; 2493 2494 if (set->ops->complete) 2495 blk_queue_softirq_done(q, set->ops->complete); 2496 2497 blk_mq_init_cpu_queues(q, set->nr_hw_queues); 2498 blk_mq_add_queue_tag_set(set, q); 2499 blk_mq_map_swqueue(q); 2500 2501 if (!(set->flags & BLK_MQ_F_NO_SCHED)) { 2502 int ret; 2503 2504 ret = blk_mq_sched_init(q); 2505 if (ret) 2506 return ERR_PTR(ret); 2507 } 2508 2509 return q; 2510 2511 err_hctxs: 2512 kfree(q->queue_hw_ctx); 2513 err_percpu: 2514 free_percpu(q->queue_ctx); 2515 err_exit: 2516 q->mq_ops = NULL; 2517 return ERR_PTR(-ENOMEM); 2518 } 2519 EXPORT_SYMBOL(blk_mq_init_allocated_queue); 2520 2521 void blk_mq_free_queue(struct request_queue *q) 2522 { 2523 struct blk_mq_tag_set *set = q->tag_set; 2524 2525 blk_mq_del_queue_tag_set(q); 2526 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); 2527 } 2528 2529 /* Basically redo blk_mq_init_queue with queue frozen */ 2530 static void blk_mq_queue_reinit(struct request_queue *q) 2531 { 2532 WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth)); 2533 2534 blk_mq_debugfs_unregister_hctxs(q); 2535 blk_mq_sysfs_unregister(q); 2536 2537 /* 2538 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe 2539 * we should change hctx numa_node according to the new topology (this 2540 * involves freeing and re-allocating memory, worth doing?) 2541 */ 2542 blk_mq_map_swqueue(q); 2543 2544 blk_mq_sysfs_register(q); 2545 blk_mq_debugfs_register_hctxs(q); 2546 } 2547 2548 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) 2549 { 2550 int i; 2551 2552 for (i = 0; i < set->nr_hw_queues; i++) 2553 if (!__blk_mq_alloc_rq_map(set, i)) 2554 goto out_unwind; 2555 2556 return 0; 2557 2558 out_unwind: 2559 while (--i >= 0) 2560 blk_mq_free_rq_map(set->tags[i]); 2561 2562 return -ENOMEM; 2563 } 2564 2565 /* 2566 * Allocate the request maps associated with this tag_set. Note that this 2567 * may reduce the depth asked for, if memory is tight. set->queue_depth 2568 * will be updated to reflect the allocated depth. 2569 */ 2570 static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) 2571 { 2572 unsigned int depth; 2573 int err; 2574 2575 depth = set->queue_depth; 2576 do { 2577 err = __blk_mq_alloc_rq_maps(set); 2578 if (!err) 2579 break; 2580 2581 set->queue_depth >>= 1; 2582 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) { 2583 err = -ENOMEM; 2584 break; 2585 } 2586 } while (set->queue_depth); 2587 2588 if (!set->queue_depth || err) { 2589 pr_err("blk-mq: failed to allocate request map\n"); 2590 return -ENOMEM; 2591 } 2592 2593 if (depth != set->queue_depth) 2594 pr_info("blk-mq: reduced tag depth (%u -> %u)\n", 2595 depth, set->queue_depth); 2596 2597 return 0; 2598 } 2599 2600 static int blk_mq_update_queue_map(struct blk_mq_tag_set *set) 2601 { 2602 if (set->ops->map_queues) 2603 return set->ops->map_queues(set); 2604 else 2605 return blk_mq_map_queues(set); 2606 } 2607 2608 /* 2609 * Alloc a tag set to be associated with one or more request queues. 2610 * May fail with EINVAL for various error conditions. May adjust the 2611 * requested depth down, if if it too large. In that case, the set 2612 * value will be stored in set->queue_depth. 2613 */ 2614 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) 2615 { 2616 int ret; 2617 2618 BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS); 2619 2620 if (!set->nr_hw_queues) 2621 return -EINVAL; 2622 if (!set->queue_depth) 2623 return -EINVAL; 2624 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) 2625 return -EINVAL; 2626 2627 if (!set->ops->queue_rq) 2628 return -EINVAL; 2629 2630 if (!set->ops->get_budget ^ !set->ops->put_budget) 2631 return -EINVAL; 2632 2633 if (set->queue_depth > BLK_MQ_MAX_DEPTH) { 2634 pr_info("blk-mq: reduced tag depth to %u\n", 2635 BLK_MQ_MAX_DEPTH); 2636 set->queue_depth = BLK_MQ_MAX_DEPTH; 2637 } 2638 2639 /* 2640 * If a crashdump is active, then we are potentially in a very 2641 * memory constrained environment. Limit us to 1 queue and 2642 * 64 tags to prevent using too much memory. 2643 */ 2644 if (is_kdump_kernel()) { 2645 set->nr_hw_queues = 1; 2646 set->queue_depth = min(64U, set->queue_depth); 2647 } 2648 /* 2649 * There is no use for more h/w queues than cpus. 2650 */ 2651 if (set->nr_hw_queues > nr_cpu_ids) 2652 set->nr_hw_queues = nr_cpu_ids; 2653 2654 set->tags = kzalloc_node(nr_cpu_ids * sizeof(struct blk_mq_tags *), 2655 GFP_KERNEL, set->numa_node); 2656 if (!set->tags) 2657 return -ENOMEM; 2658 2659 ret = -ENOMEM; 2660 set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids, 2661 GFP_KERNEL, set->numa_node); 2662 if (!set->mq_map) 2663 goto out_free_tags; 2664 2665 ret = blk_mq_update_queue_map(set); 2666 if (ret) 2667 goto out_free_mq_map; 2668 2669 ret = blk_mq_alloc_rq_maps(set); 2670 if (ret) 2671 goto out_free_mq_map; 2672 2673 mutex_init(&set->tag_list_lock); 2674 INIT_LIST_HEAD(&set->tag_list); 2675 2676 return 0; 2677 2678 out_free_mq_map: 2679 kfree(set->mq_map); 2680 set->mq_map = NULL; 2681 out_free_tags: 2682 kfree(set->tags); 2683 set->tags = NULL; 2684 return ret; 2685 } 2686 EXPORT_SYMBOL(blk_mq_alloc_tag_set); 2687 2688 void blk_mq_free_tag_set(struct blk_mq_tag_set *set) 2689 { 2690 int i; 2691 2692 for (i = 0; i < nr_cpu_ids; i++) 2693 blk_mq_free_map_and_requests(set, i); 2694 2695 kfree(set->mq_map); 2696 set->mq_map = NULL; 2697 2698 kfree(set->tags); 2699 set->tags = NULL; 2700 } 2701 EXPORT_SYMBOL(blk_mq_free_tag_set); 2702 2703 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) 2704 { 2705 struct blk_mq_tag_set *set = q->tag_set; 2706 struct blk_mq_hw_ctx *hctx; 2707 int i, ret; 2708 2709 if (!set) 2710 return -EINVAL; 2711 2712 blk_mq_freeze_queue(q); 2713 2714 ret = 0; 2715 queue_for_each_hw_ctx(q, hctx, i) { 2716 if (!hctx->tags) 2717 continue; 2718 /* 2719 * If we're using an MQ scheduler, just update the scheduler 2720 * queue depth. This is similar to what the old code would do. 2721 */ 2722 if (!hctx->sched_tags) { 2723 ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr, 2724 false); 2725 } else { 2726 ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags, 2727 nr, true); 2728 } 2729 if (ret) 2730 break; 2731 } 2732 2733 if (!ret) 2734 q->nr_requests = nr; 2735 2736 blk_mq_unfreeze_queue(q); 2737 2738 return ret; 2739 } 2740 2741 static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, 2742 int nr_hw_queues) 2743 { 2744 struct request_queue *q; 2745 2746 lockdep_assert_held(&set->tag_list_lock); 2747 2748 if (nr_hw_queues > nr_cpu_ids) 2749 nr_hw_queues = nr_cpu_ids; 2750 if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues) 2751 return; 2752 2753 list_for_each_entry(q, &set->tag_list, tag_set_list) 2754 blk_mq_freeze_queue(q); 2755 2756 set->nr_hw_queues = nr_hw_queues; 2757 blk_mq_update_queue_map(set); 2758 list_for_each_entry(q, &set->tag_list, tag_set_list) { 2759 blk_mq_realloc_hw_ctxs(set, q); 2760 blk_mq_queue_reinit(q); 2761 } 2762 2763 list_for_each_entry(q, &set->tag_list, tag_set_list) 2764 blk_mq_unfreeze_queue(q); 2765 } 2766 2767 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) 2768 { 2769 mutex_lock(&set->tag_list_lock); 2770 __blk_mq_update_nr_hw_queues(set, nr_hw_queues); 2771 mutex_unlock(&set->tag_list_lock); 2772 } 2773 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues); 2774 2775 /* Enable polling stats and return whether they were already enabled. */ 2776 static bool blk_poll_stats_enable(struct request_queue *q) 2777 { 2778 if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) || 2779 test_and_set_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags)) 2780 return true; 2781 blk_stat_add_callback(q, q->poll_cb); 2782 return false; 2783 } 2784 2785 static void blk_mq_poll_stats_start(struct request_queue *q) 2786 { 2787 /* 2788 * We don't arm the callback if polling stats are not enabled or the 2789 * callback is already active. 2790 */ 2791 if (!test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) || 2792 blk_stat_is_active(q->poll_cb)) 2793 return; 2794 2795 blk_stat_activate_msecs(q->poll_cb, 100); 2796 } 2797 2798 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb) 2799 { 2800 struct request_queue *q = cb->data; 2801 int bucket; 2802 2803 for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) { 2804 if (cb->stat[bucket].nr_samples) 2805 q->poll_stat[bucket] = cb->stat[bucket]; 2806 } 2807 } 2808 2809 static unsigned long blk_mq_poll_nsecs(struct request_queue *q, 2810 struct blk_mq_hw_ctx *hctx, 2811 struct request *rq) 2812 { 2813 unsigned long ret = 0; 2814 int bucket; 2815 2816 /* 2817 * If stats collection isn't on, don't sleep but turn it on for 2818 * future users 2819 */ 2820 if (!blk_poll_stats_enable(q)) 2821 return 0; 2822 2823 /* 2824 * As an optimistic guess, use half of the mean service time 2825 * for this type of request. We can (and should) make this smarter. 2826 * For instance, if the completion latencies are tight, we can 2827 * get closer than just half the mean. This is especially 2828 * important on devices where the completion latencies are longer 2829 * than ~10 usec. We do use the stats for the relevant IO size 2830 * if available which does lead to better estimates. 2831 */ 2832 bucket = blk_mq_poll_stats_bkt(rq); 2833 if (bucket < 0) 2834 return ret; 2835 2836 if (q->poll_stat[bucket].nr_samples) 2837 ret = (q->poll_stat[bucket].mean + 1) / 2; 2838 2839 return ret; 2840 } 2841 2842 static bool blk_mq_poll_hybrid_sleep(struct request_queue *q, 2843 struct blk_mq_hw_ctx *hctx, 2844 struct request *rq) 2845 { 2846 struct hrtimer_sleeper hs; 2847 enum hrtimer_mode mode; 2848 unsigned int nsecs; 2849 ktime_t kt; 2850 2851 if (test_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags)) 2852 return false; 2853 2854 /* 2855 * poll_nsec can be: 2856 * 2857 * -1: don't ever hybrid sleep 2858 * 0: use half of prev avg 2859 * >0: use this specific value 2860 */ 2861 if (q->poll_nsec == -1) 2862 return false; 2863 else if (q->poll_nsec > 0) 2864 nsecs = q->poll_nsec; 2865 else 2866 nsecs = blk_mq_poll_nsecs(q, hctx, rq); 2867 2868 if (!nsecs) 2869 return false; 2870 2871 set_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags); 2872 2873 /* 2874 * This will be replaced with the stats tracking code, using 2875 * 'avg_completion_time / 2' as the pre-sleep target. 2876 */ 2877 kt = nsecs; 2878 2879 mode = HRTIMER_MODE_REL; 2880 hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode); 2881 hrtimer_set_expires(&hs.timer, kt); 2882 2883 hrtimer_init_sleeper(&hs, current); 2884 do { 2885 if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags)) 2886 break; 2887 set_current_state(TASK_UNINTERRUPTIBLE); 2888 hrtimer_start_expires(&hs.timer, mode); 2889 if (hs.task) 2890 io_schedule(); 2891 hrtimer_cancel(&hs.timer); 2892 mode = HRTIMER_MODE_ABS; 2893 } while (hs.task && !signal_pending(current)); 2894 2895 __set_current_state(TASK_RUNNING); 2896 destroy_hrtimer_on_stack(&hs.timer); 2897 return true; 2898 } 2899 2900 static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq) 2901 { 2902 struct request_queue *q = hctx->queue; 2903 long state; 2904 2905 /* 2906 * If we sleep, have the caller restart the poll loop to reset 2907 * the state. Like for the other success return cases, the 2908 * caller is responsible for checking if the IO completed. If 2909 * the IO isn't complete, we'll get called again and will go 2910 * straight to the busy poll loop. 2911 */ 2912 if (blk_mq_poll_hybrid_sleep(q, hctx, rq)) 2913 return true; 2914 2915 hctx->poll_considered++; 2916 2917 state = current->state; 2918 while (!need_resched()) { 2919 int ret; 2920 2921 hctx->poll_invoked++; 2922 2923 ret = q->mq_ops->poll(hctx, rq->tag); 2924 if (ret > 0) { 2925 hctx->poll_success++; 2926 set_current_state(TASK_RUNNING); 2927 return true; 2928 } 2929 2930 if (signal_pending_state(state, current)) 2931 set_current_state(TASK_RUNNING); 2932 2933 if (current->state == TASK_RUNNING) 2934 return true; 2935 if (ret < 0) 2936 break; 2937 cpu_relax(); 2938 } 2939 2940 return false; 2941 } 2942 2943 static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie) 2944 { 2945 struct blk_mq_hw_ctx *hctx; 2946 struct request *rq; 2947 2948 if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) 2949 return false; 2950 2951 hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)]; 2952 if (!blk_qc_t_is_internal(cookie)) 2953 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie)); 2954 else { 2955 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie)); 2956 /* 2957 * With scheduling, if the request has completed, we'll 2958 * get a NULL return here, as we clear the sched tag when 2959 * that happens. The request still remains valid, like always, 2960 * so we should be safe with just the NULL check. 2961 */ 2962 if (!rq) 2963 return false; 2964 } 2965 2966 return __blk_mq_poll(hctx, rq); 2967 } 2968 2969 static int __init blk_mq_init(void) 2970 { 2971 /* 2972 * See comment in block/blk.h rq_atomic_flags enum 2973 */ 2974 BUILD_BUG_ON((REQ_ATOM_STARTED / BITS_PER_BYTE) != 2975 (REQ_ATOM_COMPLETE / BITS_PER_BYTE)); 2976 2977 cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL, 2978 blk_mq_hctx_notify_dead); 2979 return 0; 2980 } 2981 subsys_initcall(blk_mq_init); 2982