1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Block multiqueue core code 4 * 5 * Copyright (C) 2013-2014 Jens Axboe 6 * Copyright (C) 2013-2014 Christoph Hellwig 7 */ 8 #include <linux/kernel.h> 9 #include <linux/module.h> 10 #include <linux/backing-dev.h> 11 #include <linux/bio.h> 12 #include <linux/blkdev.h> 13 #include <linux/kmemleak.h> 14 #include <linux/mm.h> 15 #include <linux/init.h> 16 #include <linux/slab.h> 17 #include <linux/workqueue.h> 18 #include <linux/smp.h> 19 #include <linux/llist.h> 20 #include <linux/list_sort.h> 21 #include <linux/cpu.h> 22 #include <linux/cache.h> 23 #include <linux/sched/sysctl.h> 24 #include <linux/sched/topology.h> 25 #include <linux/sched/signal.h> 26 #include <linux/delay.h> 27 #include <linux/crash_dump.h> 28 #include <linux/prefetch.h> 29 #include <linux/blk-crypto.h> 30 31 #include <trace/events/block.h> 32 33 #include <linux/blk-mq.h> 34 #include <linux/t10-pi.h> 35 #include "blk.h" 36 #include "blk-mq.h" 37 #include "blk-mq-debugfs.h" 38 #include "blk-mq-tag.h" 39 #include "blk-pm.h" 40 #include "blk-stat.h" 41 #include "blk-mq-sched.h" 42 #include "blk-rq-qos.h" 43 44 static DEFINE_PER_CPU(struct list_head, blk_cpu_done); 45 46 static void blk_mq_poll_stats_start(struct request_queue *q); 47 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb); 48 49 static int blk_mq_poll_stats_bkt(const struct request *rq) 50 { 51 int ddir, sectors, bucket; 52 53 ddir = rq_data_dir(rq); 54 sectors = blk_rq_stats_sectors(rq); 55 56 bucket = ddir + 2 * ilog2(sectors); 57 58 if (bucket < 0) 59 return -1; 60 else if (bucket >= BLK_MQ_POLL_STATS_BKTS) 61 return ddir + BLK_MQ_POLL_STATS_BKTS - 2; 62 63 return bucket; 64 } 65 66 /* 67 * Check if any of the ctx, dispatch list or elevator 68 * have pending work in this hardware queue. 69 */ 70 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) 71 { 72 return !list_empty_careful(&hctx->dispatch) || 73 sbitmap_any_bit_set(&hctx->ctx_map) || 74 blk_mq_sched_has_work(hctx); 75 } 76 77 /* 78 * Mark this ctx as having pending work in this hardware queue 79 */ 80 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, 81 struct blk_mq_ctx *ctx) 82 { 83 const int bit = ctx->index_hw[hctx->type]; 84 85 if (!sbitmap_test_bit(&hctx->ctx_map, bit)) 86 sbitmap_set_bit(&hctx->ctx_map, bit); 87 } 88 89 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, 90 struct blk_mq_ctx *ctx) 91 { 92 const int bit = ctx->index_hw[hctx->type]; 93 94 sbitmap_clear_bit(&hctx->ctx_map, bit); 95 } 96 97 struct mq_inflight { 98 struct hd_struct *part; 99 unsigned int inflight[2]; 100 }; 101 102 static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx, 103 struct request *rq, void *priv, 104 bool reserved) 105 { 106 struct mq_inflight *mi = priv; 107 108 if (rq->part == mi->part) 109 mi->inflight[rq_data_dir(rq)]++; 110 111 return true; 112 } 113 114 unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part) 115 { 116 struct mq_inflight mi = { .part = part }; 117 118 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); 119 120 return mi.inflight[0] + mi.inflight[1]; 121 } 122 123 void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part, 124 unsigned int inflight[2]) 125 { 126 struct mq_inflight mi = { .part = part }; 127 128 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); 129 inflight[0] = mi.inflight[0]; 130 inflight[1] = mi.inflight[1]; 131 } 132 133 void blk_freeze_queue_start(struct request_queue *q) 134 { 135 mutex_lock(&q->mq_freeze_lock); 136 if (++q->mq_freeze_depth == 1) { 137 percpu_ref_kill(&q->q_usage_counter); 138 mutex_unlock(&q->mq_freeze_lock); 139 if (queue_is_mq(q)) 140 blk_mq_run_hw_queues(q, false); 141 } else { 142 mutex_unlock(&q->mq_freeze_lock); 143 } 144 } 145 EXPORT_SYMBOL_GPL(blk_freeze_queue_start); 146 147 void blk_mq_freeze_queue_wait(struct request_queue *q) 148 { 149 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter)); 150 } 151 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait); 152 153 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, 154 unsigned long timeout) 155 { 156 return wait_event_timeout(q->mq_freeze_wq, 157 percpu_ref_is_zero(&q->q_usage_counter), 158 timeout); 159 } 160 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout); 161 162 /* 163 * Guarantee no request is in use, so we can change any data structure of 164 * the queue afterward. 165 */ 166 void blk_freeze_queue(struct request_queue *q) 167 { 168 /* 169 * In the !blk_mq case we are only calling this to kill the 170 * q_usage_counter, otherwise this increases the freeze depth 171 * and waits for it to return to zero. For this reason there is 172 * no blk_unfreeze_queue(), and blk_freeze_queue() is not 173 * exported to drivers as the only user for unfreeze is blk_mq. 174 */ 175 blk_freeze_queue_start(q); 176 blk_mq_freeze_queue_wait(q); 177 } 178 179 void blk_mq_freeze_queue(struct request_queue *q) 180 { 181 /* 182 * ...just an alias to keep freeze and unfreeze actions balanced 183 * in the blk_mq_* namespace 184 */ 185 blk_freeze_queue(q); 186 } 187 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue); 188 189 void blk_mq_unfreeze_queue(struct request_queue *q) 190 { 191 mutex_lock(&q->mq_freeze_lock); 192 q->mq_freeze_depth--; 193 WARN_ON_ONCE(q->mq_freeze_depth < 0); 194 if (!q->mq_freeze_depth) { 195 percpu_ref_resurrect(&q->q_usage_counter); 196 wake_up_all(&q->mq_freeze_wq); 197 } 198 mutex_unlock(&q->mq_freeze_lock); 199 } 200 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); 201 202 /* 203 * FIXME: replace the scsi_internal_device_*block_nowait() calls in the 204 * mpt3sas driver such that this function can be removed. 205 */ 206 void blk_mq_quiesce_queue_nowait(struct request_queue *q) 207 { 208 blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q); 209 } 210 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait); 211 212 /** 213 * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished 214 * @q: request queue. 215 * 216 * Note: this function does not prevent that the struct request end_io() 217 * callback function is invoked. Once this function is returned, we make 218 * sure no dispatch can happen until the queue is unquiesced via 219 * blk_mq_unquiesce_queue(). 220 */ 221 void blk_mq_quiesce_queue(struct request_queue *q) 222 { 223 struct blk_mq_hw_ctx *hctx; 224 unsigned int i; 225 bool rcu = false; 226 227 blk_mq_quiesce_queue_nowait(q); 228 229 queue_for_each_hw_ctx(q, hctx, i) { 230 if (hctx->flags & BLK_MQ_F_BLOCKING) 231 synchronize_srcu(hctx->srcu); 232 else 233 rcu = true; 234 } 235 if (rcu) 236 synchronize_rcu(); 237 } 238 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue); 239 240 /* 241 * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue() 242 * @q: request queue. 243 * 244 * This function recovers queue into the state before quiescing 245 * which is done by blk_mq_quiesce_queue. 246 */ 247 void blk_mq_unquiesce_queue(struct request_queue *q) 248 { 249 blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q); 250 251 /* dispatch requests which are inserted during quiescing */ 252 blk_mq_run_hw_queues(q, true); 253 } 254 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue); 255 256 void blk_mq_wake_waiters(struct request_queue *q) 257 { 258 struct blk_mq_hw_ctx *hctx; 259 unsigned int i; 260 261 queue_for_each_hw_ctx(q, hctx, i) 262 if (blk_mq_hw_queue_mapped(hctx)) 263 blk_mq_tag_wakeup_all(hctx->tags, true); 264 } 265 266 /* 267 * Only need start/end time stamping if we have iostat or 268 * blk stats enabled, or using an IO scheduler. 269 */ 270 static inline bool blk_mq_need_time_stamp(struct request *rq) 271 { 272 return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS)) || rq->q->elevator; 273 } 274 275 static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, 276 unsigned int tag, u64 alloc_time_ns) 277 { 278 struct blk_mq_tags *tags = blk_mq_tags_from_data(data); 279 struct request *rq = tags->static_rqs[tag]; 280 281 if (data->q->elevator) { 282 rq->tag = BLK_MQ_NO_TAG; 283 rq->internal_tag = tag; 284 } else { 285 rq->tag = tag; 286 rq->internal_tag = BLK_MQ_NO_TAG; 287 } 288 289 /* csd/requeue_work/fifo_time is initialized before use */ 290 rq->q = data->q; 291 rq->mq_ctx = data->ctx; 292 rq->mq_hctx = data->hctx; 293 rq->rq_flags = 0; 294 rq->cmd_flags = data->cmd_flags; 295 if (data->flags & BLK_MQ_REQ_PREEMPT) 296 rq->rq_flags |= RQF_PREEMPT; 297 if (blk_queue_io_stat(data->q)) 298 rq->rq_flags |= RQF_IO_STAT; 299 INIT_LIST_HEAD(&rq->queuelist); 300 INIT_HLIST_NODE(&rq->hash); 301 RB_CLEAR_NODE(&rq->rb_node); 302 rq->rq_disk = NULL; 303 rq->part = NULL; 304 #ifdef CONFIG_BLK_RQ_ALLOC_TIME 305 rq->alloc_time_ns = alloc_time_ns; 306 #endif 307 if (blk_mq_need_time_stamp(rq)) 308 rq->start_time_ns = ktime_get_ns(); 309 else 310 rq->start_time_ns = 0; 311 rq->io_start_time_ns = 0; 312 rq->stats_sectors = 0; 313 rq->nr_phys_segments = 0; 314 #if defined(CONFIG_BLK_DEV_INTEGRITY) 315 rq->nr_integrity_segments = 0; 316 #endif 317 blk_crypto_rq_set_defaults(rq); 318 /* tag was already set */ 319 WRITE_ONCE(rq->deadline, 0); 320 321 rq->timeout = 0; 322 323 rq->end_io = NULL; 324 rq->end_io_data = NULL; 325 326 data->ctx->rq_dispatched[op_is_sync(data->cmd_flags)]++; 327 refcount_set(&rq->ref, 1); 328 329 if (!op_is_flush(data->cmd_flags)) { 330 struct elevator_queue *e = data->q->elevator; 331 332 rq->elv.icq = NULL; 333 if (e && e->type->ops.prepare_request) { 334 if (e->type->icq_cache) 335 blk_mq_sched_assign_ioc(rq); 336 337 e->type->ops.prepare_request(rq); 338 rq->rq_flags |= RQF_ELVPRIV; 339 } 340 } 341 342 data->hctx->queued++; 343 return rq; 344 } 345 346 static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data) 347 { 348 struct request_queue *q = data->q; 349 struct elevator_queue *e = q->elevator; 350 u64 alloc_time_ns = 0; 351 unsigned int tag; 352 353 /* alloc_time includes depth and tag waits */ 354 if (blk_queue_rq_alloc_time(q)) 355 alloc_time_ns = ktime_get_ns(); 356 357 if (data->cmd_flags & REQ_NOWAIT) 358 data->flags |= BLK_MQ_REQ_NOWAIT; 359 360 if (e) { 361 /* 362 * Flush requests are special and go directly to the 363 * dispatch list. Don't include reserved tags in the 364 * limiting, as it isn't useful. 365 */ 366 if (!op_is_flush(data->cmd_flags) && 367 e->type->ops.limit_depth && 368 !(data->flags & BLK_MQ_REQ_RESERVED)) 369 e->type->ops.limit_depth(data->cmd_flags, data); 370 } 371 372 retry: 373 data->ctx = blk_mq_get_ctx(q); 374 data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx); 375 if (!e) 376 blk_mq_tag_busy(data->hctx); 377 378 /* 379 * Waiting allocations only fail because of an inactive hctx. In that 380 * case just retry the hctx assignment and tag allocation as CPU hotplug 381 * should have migrated us to an online CPU by now. 382 */ 383 tag = blk_mq_get_tag(data); 384 if (tag == BLK_MQ_NO_TAG) { 385 if (data->flags & BLK_MQ_REQ_NOWAIT) 386 return NULL; 387 388 /* 389 * Give up the CPU and sleep for a random short time to ensure 390 * that thread using a realtime scheduling class are migrated 391 * off the CPU, and thus off the hctx that is going away. 392 */ 393 msleep(3); 394 goto retry; 395 } 396 return blk_mq_rq_ctx_init(data, tag, alloc_time_ns); 397 } 398 399 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, 400 blk_mq_req_flags_t flags) 401 { 402 struct blk_mq_alloc_data data = { 403 .q = q, 404 .flags = flags, 405 .cmd_flags = op, 406 }; 407 struct request *rq; 408 int ret; 409 410 ret = blk_queue_enter(q, flags); 411 if (ret) 412 return ERR_PTR(ret); 413 414 rq = __blk_mq_alloc_request(&data); 415 if (!rq) 416 goto out_queue_exit; 417 rq->__data_len = 0; 418 rq->__sector = (sector_t) -1; 419 rq->bio = rq->biotail = NULL; 420 return rq; 421 out_queue_exit: 422 blk_queue_exit(q); 423 return ERR_PTR(-EWOULDBLOCK); 424 } 425 EXPORT_SYMBOL(blk_mq_alloc_request); 426 427 struct request *blk_mq_alloc_request_hctx(struct request_queue *q, 428 unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx) 429 { 430 struct blk_mq_alloc_data data = { 431 .q = q, 432 .flags = flags, 433 .cmd_flags = op, 434 }; 435 u64 alloc_time_ns = 0; 436 unsigned int cpu; 437 unsigned int tag; 438 int ret; 439 440 /* alloc_time includes depth and tag waits */ 441 if (blk_queue_rq_alloc_time(q)) 442 alloc_time_ns = ktime_get_ns(); 443 444 /* 445 * If the tag allocator sleeps we could get an allocation for a 446 * different hardware context. No need to complicate the low level 447 * allocator for this for the rare use case of a command tied to 448 * a specific queue. 449 */ 450 if (WARN_ON_ONCE(!(flags & (BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED)))) 451 return ERR_PTR(-EINVAL); 452 453 if (hctx_idx >= q->nr_hw_queues) 454 return ERR_PTR(-EIO); 455 456 ret = blk_queue_enter(q, flags); 457 if (ret) 458 return ERR_PTR(ret); 459 460 /* 461 * Check if the hardware context is actually mapped to anything. 462 * If not tell the caller that it should skip this queue. 463 */ 464 ret = -EXDEV; 465 data.hctx = q->queue_hw_ctx[hctx_idx]; 466 if (!blk_mq_hw_queue_mapped(data.hctx)) 467 goto out_queue_exit; 468 cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask); 469 data.ctx = __blk_mq_get_ctx(q, cpu); 470 471 if (!q->elevator) 472 blk_mq_tag_busy(data.hctx); 473 474 ret = -EWOULDBLOCK; 475 tag = blk_mq_get_tag(&data); 476 if (tag == BLK_MQ_NO_TAG) 477 goto out_queue_exit; 478 return blk_mq_rq_ctx_init(&data, tag, alloc_time_ns); 479 480 out_queue_exit: 481 blk_queue_exit(q); 482 return ERR_PTR(ret); 483 } 484 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); 485 486 static void __blk_mq_free_request(struct request *rq) 487 { 488 struct request_queue *q = rq->q; 489 struct blk_mq_ctx *ctx = rq->mq_ctx; 490 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 491 const int sched_tag = rq->internal_tag; 492 493 blk_crypto_free_request(rq); 494 blk_pm_mark_last_busy(rq); 495 rq->mq_hctx = NULL; 496 if (rq->tag != BLK_MQ_NO_TAG) 497 blk_mq_put_tag(hctx->tags, ctx, rq->tag); 498 if (sched_tag != BLK_MQ_NO_TAG) 499 blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag); 500 blk_mq_sched_restart(hctx); 501 blk_queue_exit(q); 502 } 503 504 void blk_mq_free_request(struct request *rq) 505 { 506 struct request_queue *q = rq->q; 507 struct elevator_queue *e = q->elevator; 508 struct blk_mq_ctx *ctx = rq->mq_ctx; 509 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 510 511 if (rq->rq_flags & RQF_ELVPRIV) { 512 if (e && e->type->ops.finish_request) 513 e->type->ops.finish_request(rq); 514 if (rq->elv.icq) { 515 put_io_context(rq->elv.icq->ioc); 516 rq->elv.icq = NULL; 517 } 518 } 519 520 ctx->rq_completed[rq_is_sync(rq)]++; 521 if (rq->rq_flags & RQF_MQ_INFLIGHT) 522 atomic_dec(&hctx->nr_active); 523 524 if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq))) 525 laptop_io_completion(q->backing_dev_info); 526 527 rq_qos_done(q, rq); 528 529 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 530 if (refcount_dec_and_test(&rq->ref)) 531 __blk_mq_free_request(rq); 532 } 533 EXPORT_SYMBOL_GPL(blk_mq_free_request); 534 535 inline void __blk_mq_end_request(struct request *rq, blk_status_t error) 536 { 537 u64 now = 0; 538 539 if (blk_mq_need_time_stamp(rq)) 540 now = ktime_get_ns(); 541 542 if (rq->rq_flags & RQF_STATS) { 543 blk_mq_poll_stats_start(rq->q); 544 blk_stat_add(rq, now); 545 } 546 547 blk_mq_sched_completed_request(rq, now); 548 549 blk_account_io_done(rq, now); 550 551 if (rq->end_io) { 552 rq_qos_done(rq->q, rq); 553 rq->end_io(rq, error); 554 } else { 555 blk_mq_free_request(rq); 556 } 557 } 558 EXPORT_SYMBOL(__blk_mq_end_request); 559 560 void blk_mq_end_request(struct request *rq, blk_status_t error) 561 { 562 if (blk_update_request(rq, error, blk_rq_bytes(rq))) 563 BUG(); 564 __blk_mq_end_request(rq, error); 565 } 566 EXPORT_SYMBOL(blk_mq_end_request); 567 568 /* 569 * Softirq action handler - move entries to local list and loop over them 570 * while passing them to the queue registered handler. 571 */ 572 static __latent_entropy void blk_done_softirq(struct softirq_action *h) 573 { 574 struct list_head *cpu_list, local_list; 575 576 local_irq_disable(); 577 cpu_list = this_cpu_ptr(&blk_cpu_done); 578 list_replace_init(cpu_list, &local_list); 579 local_irq_enable(); 580 581 while (!list_empty(&local_list)) { 582 struct request *rq; 583 584 rq = list_entry(local_list.next, struct request, ipi_list); 585 list_del_init(&rq->ipi_list); 586 rq->q->mq_ops->complete(rq); 587 } 588 } 589 590 static void blk_mq_trigger_softirq(struct request *rq) 591 { 592 struct list_head *list; 593 unsigned long flags; 594 595 local_irq_save(flags); 596 list = this_cpu_ptr(&blk_cpu_done); 597 list_add_tail(&rq->ipi_list, list); 598 599 /* 600 * If the list only contains our just added request, signal a raise of 601 * the softirq. If there are already entries there, someone already 602 * raised the irq but it hasn't run yet. 603 */ 604 if (list->next == &rq->ipi_list) 605 raise_softirq_irqoff(BLOCK_SOFTIRQ); 606 local_irq_restore(flags); 607 } 608 609 static int blk_softirq_cpu_dead(unsigned int cpu) 610 { 611 /* 612 * If a CPU goes away, splice its entries to the current CPU 613 * and trigger a run of the softirq 614 */ 615 local_irq_disable(); 616 list_splice_init(&per_cpu(blk_cpu_done, cpu), 617 this_cpu_ptr(&blk_cpu_done)); 618 raise_softirq_irqoff(BLOCK_SOFTIRQ); 619 local_irq_enable(); 620 621 return 0; 622 } 623 624 625 static void __blk_mq_complete_request_remote(void *data) 626 { 627 struct request *rq = data; 628 629 /* 630 * For most of single queue controllers, there is only one irq vector 631 * for handling I/O completion, and the only irq's affinity is set 632 * to all possible CPUs. On most of ARCHs, this affinity means the irq 633 * is handled on one specific CPU. 634 * 635 * So complete I/O requests in softirq context in case of single queue 636 * devices to avoid degrading I/O performance due to irqsoff latency. 637 */ 638 if (rq->q->nr_hw_queues == 1) 639 blk_mq_trigger_softirq(rq); 640 else 641 rq->q->mq_ops->complete(rq); 642 } 643 644 static inline bool blk_mq_complete_need_ipi(struct request *rq) 645 { 646 int cpu = raw_smp_processor_id(); 647 648 if (!IS_ENABLED(CONFIG_SMP) || 649 !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) 650 return false; 651 652 /* same CPU or cache domain? Complete locally */ 653 if (cpu == rq->mq_ctx->cpu || 654 (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags) && 655 cpus_share_cache(cpu, rq->mq_ctx->cpu))) 656 return false; 657 658 /* don't try to IPI to an offline CPU */ 659 return cpu_online(rq->mq_ctx->cpu); 660 } 661 662 bool blk_mq_complete_request_remote(struct request *rq) 663 { 664 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); 665 666 /* 667 * For a polled request, always complete locallly, it's pointless 668 * to redirect the completion. 669 */ 670 if (rq->cmd_flags & REQ_HIPRI) 671 return false; 672 673 if (blk_mq_complete_need_ipi(rq)) { 674 rq->csd.func = __blk_mq_complete_request_remote; 675 rq->csd.info = rq; 676 rq->csd.flags = 0; 677 smp_call_function_single_async(rq->mq_ctx->cpu, &rq->csd); 678 } else { 679 if (rq->q->nr_hw_queues > 1) 680 return false; 681 blk_mq_trigger_softirq(rq); 682 } 683 684 return true; 685 } 686 EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote); 687 688 /** 689 * blk_mq_complete_request - end I/O on a request 690 * @rq: the request being processed 691 * 692 * Description: 693 * Complete a request by scheduling the ->complete_rq operation. 694 **/ 695 void blk_mq_complete_request(struct request *rq) 696 { 697 if (!blk_mq_complete_request_remote(rq)) 698 rq->q->mq_ops->complete(rq); 699 } 700 EXPORT_SYMBOL(blk_mq_complete_request); 701 702 static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx) 703 __releases(hctx->srcu) 704 { 705 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) 706 rcu_read_unlock(); 707 else 708 srcu_read_unlock(hctx->srcu, srcu_idx); 709 } 710 711 static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx) 712 __acquires(hctx->srcu) 713 { 714 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) { 715 /* shut up gcc false positive */ 716 *srcu_idx = 0; 717 rcu_read_lock(); 718 } else 719 *srcu_idx = srcu_read_lock(hctx->srcu); 720 } 721 722 /** 723 * blk_mq_start_request - Start processing a request 724 * @rq: Pointer to request to be started 725 * 726 * Function used by device drivers to notify the block layer that a request 727 * is going to be processed now, so blk layer can do proper initializations 728 * such as starting the timeout timer. 729 */ 730 void blk_mq_start_request(struct request *rq) 731 { 732 struct request_queue *q = rq->q; 733 734 trace_block_rq_issue(q, rq); 735 736 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) { 737 rq->io_start_time_ns = ktime_get_ns(); 738 rq->stats_sectors = blk_rq_sectors(rq); 739 rq->rq_flags |= RQF_STATS; 740 rq_qos_issue(q, rq); 741 } 742 743 WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE); 744 745 blk_add_timer(rq); 746 WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT); 747 748 #ifdef CONFIG_BLK_DEV_INTEGRITY 749 if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE) 750 q->integrity.profile->prepare_fn(rq); 751 #endif 752 } 753 EXPORT_SYMBOL(blk_mq_start_request); 754 755 static void __blk_mq_requeue_request(struct request *rq) 756 { 757 struct request_queue *q = rq->q; 758 759 blk_mq_put_driver_tag(rq); 760 761 trace_block_rq_requeue(q, rq); 762 rq_qos_requeue(q, rq); 763 764 if (blk_mq_request_started(rq)) { 765 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 766 rq->rq_flags &= ~RQF_TIMED_OUT; 767 } 768 } 769 770 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list) 771 { 772 __blk_mq_requeue_request(rq); 773 774 /* this request will be re-inserted to io scheduler queue */ 775 blk_mq_sched_requeue_request(rq); 776 777 BUG_ON(!list_empty(&rq->queuelist)); 778 blk_mq_add_to_requeue_list(rq, true, kick_requeue_list); 779 } 780 EXPORT_SYMBOL(blk_mq_requeue_request); 781 782 static void blk_mq_requeue_work(struct work_struct *work) 783 { 784 struct request_queue *q = 785 container_of(work, struct request_queue, requeue_work.work); 786 LIST_HEAD(rq_list); 787 struct request *rq, *next; 788 789 spin_lock_irq(&q->requeue_lock); 790 list_splice_init(&q->requeue_list, &rq_list); 791 spin_unlock_irq(&q->requeue_lock); 792 793 list_for_each_entry_safe(rq, next, &rq_list, queuelist) { 794 if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP))) 795 continue; 796 797 rq->rq_flags &= ~RQF_SOFTBARRIER; 798 list_del_init(&rq->queuelist); 799 /* 800 * If RQF_DONTPREP, rq has contained some driver specific 801 * data, so insert it to hctx dispatch list to avoid any 802 * merge. 803 */ 804 if (rq->rq_flags & RQF_DONTPREP) 805 blk_mq_request_bypass_insert(rq, false, false); 806 else 807 blk_mq_sched_insert_request(rq, true, false, false); 808 } 809 810 while (!list_empty(&rq_list)) { 811 rq = list_entry(rq_list.next, struct request, queuelist); 812 list_del_init(&rq->queuelist); 813 blk_mq_sched_insert_request(rq, false, false, false); 814 } 815 816 blk_mq_run_hw_queues(q, false); 817 } 818 819 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, 820 bool kick_requeue_list) 821 { 822 struct request_queue *q = rq->q; 823 unsigned long flags; 824 825 /* 826 * We abuse this flag that is otherwise used by the I/O scheduler to 827 * request head insertion from the workqueue. 828 */ 829 BUG_ON(rq->rq_flags & RQF_SOFTBARRIER); 830 831 spin_lock_irqsave(&q->requeue_lock, flags); 832 if (at_head) { 833 rq->rq_flags |= RQF_SOFTBARRIER; 834 list_add(&rq->queuelist, &q->requeue_list); 835 } else { 836 list_add_tail(&rq->queuelist, &q->requeue_list); 837 } 838 spin_unlock_irqrestore(&q->requeue_lock, flags); 839 840 if (kick_requeue_list) 841 blk_mq_kick_requeue_list(q); 842 } 843 844 void blk_mq_kick_requeue_list(struct request_queue *q) 845 { 846 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0); 847 } 848 EXPORT_SYMBOL(blk_mq_kick_requeue_list); 849 850 void blk_mq_delay_kick_requeue_list(struct request_queue *q, 851 unsigned long msecs) 852 { 853 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 854 msecs_to_jiffies(msecs)); 855 } 856 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list); 857 858 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag) 859 { 860 if (tag < tags->nr_tags) { 861 prefetch(tags->rqs[tag]); 862 return tags->rqs[tag]; 863 } 864 865 return NULL; 866 } 867 EXPORT_SYMBOL(blk_mq_tag_to_rq); 868 869 static bool blk_mq_rq_inflight(struct blk_mq_hw_ctx *hctx, struct request *rq, 870 void *priv, bool reserved) 871 { 872 /* 873 * If we find a request that isn't idle and the queue matches, 874 * we know the queue is busy. Return false to stop the iteration. 875 */ 876 if (blk_mq_request_started(rq) && rq->q == hctx->queue) { 877 bool *busy = priv; 878 879 *busy = true; 880 return false; 881 } 882 883 return true; 884 } 885 886 bool blk_mq_queue_inflight(struct request_queue *q) 887 { 888 bool busy = false; 889 890 blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy); 891 return busy; 892 } 893 EXPORT_SYMBOL_GPL(blk_mq_queue_inflight); 894 895 static void blk_mq_rq_timed_out(struct request *req, bool reserved) 896 { 897 req->rq_flags |= RQF_TIMED_OUT; 898 if (req->q->mq_ops->timeout) { 899 enum blk_eh_timer_return ret; 900 901 ret = req->q->mq_ops->timeout(req, reserved); 902 if (ret == BLK_EH_DONE) 903 return; 904 WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER); 905 } 906 907 blk_add_timer(req); 908 } 909 910 static bool blk_mq_req_expired(struct request *rq, unsigned long *next) 911 { 912 unsigned long deadline; 913 914 if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT) 915 return false; 916 if (rq->rq_flags & RQF_TIMED_OUT) 917 return false; 918 919 deadline = READ_ONCE(rq->deadline); 920 if (time_after_eq(jiffies, deadline)) 921 return true; 922 923 if (*next == 0) 924 *next = deadline; 925 else if (time_after(*next, deadline)) 926 *next = deadline; 927 return false; 928 } 929 930 static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, 931 struct request *rq, void *priv, bool reserved) 932 { 933 unsigned long *next = priv; 934 935 /* 936 * Just do a quick check if it is expired before locking the request in 937 * so we're not unnecessarilly synchronizing across CPUs. 938 */ 939 if (!blk_mq_req_expired(rq, next)) 940 return true; 941 942 /* 943 * We have reason to believe the request may be expired. Take a 944 * reference on the request to lock this request lifetime into its 945 * currently allocated context to prevent it from being reallocated in 946 * the event the completion by-passes this timeout handler. 947 * 948 * If the reference was already released, then the driver beat the 949 * timeout handler to posting a natural completion. 950 */ 951 if (!refcount_inc_not_zero(&rq->ref)) 952 return true; 953 954 /* 955 * The request is now locked and cannot be reallocated underneath the 956 * timeout handler's processing. Re-verify this exact request is truly 957 * expired; if it is not expired, then the request was completed and 958 * reallocated as a new request. 959 */ 960 if (blk_mq_req_expired(rq, next)) 961 blk_mq_rq_timed_out(rq, reserved); 962 963 if (is_flush_rq(rq, hctx)) 964 rq->end_io(rq, 0); 965 else if (refcount_dec_and_test(&rq->ref)) 966 __blk_mq_free_request(rq); 967 968 return true; 969 } 970 971 static void blk_mq_timeout_work(struct work_struct *work) 972 { 973 struct request_queue *q = 974 container_of(work, struct request_queue, timeout_work); 975 unsigned long next = 0; 976 struct blk_mq_hw_ctx *hctx; 977 int i; 978 979 /* A deadlock might occur if a request is stuck requiring a 980 * timeout at the same time a queue freeze is waiting 981 * completion, since the timeout code would not be able to 982 * acquire the queue reference here. 983 * 984 * That's why we don't use blk_queue_enter here; instead, we use 985 * percpu_ref_tryget directly, because we need to be able to 986 * obtain a reference even in the short window between the queue 987 * starting to freeze, by dropping the first reference in 988 * blk_freeze_queue_start, and the moment the last request is 989 * consumed, marked by the instant q_usage_counter reaches 990 * zero. 991 */ 992 if (!percpu_ref_tryget(&q->q_usage_counter)) 993 return; 994 995 blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &next); 996 997 if (next != 0) { 998 mod_timer(&q->timeout, next); 999 } else { 1000 /* 1001 * Request timeouts are handled as a forward rolling timer. If 1002 * we end up here it means that no requests are pending and 1003 * also that no request has been pending for a while. Mark 1004 * each hctx as idle. 1005 */ 1006 queue_for_each_hw_ctx(q, hctx, i) { 1007 /* the hctx may be unmapped, so check it here */ 1008 if (blk_mq_hw_queue_mapped(hctx)) 1009 blk_mq_tag_idle(hctx); 1010 } 1011 } 1012 blk_queue_exit(q); 1013 } 1014 1015 struct flush_busy_ctx_data { 1016 struct blk_mq_hw_ctx *hctx; 1017 struct list_head *list; 1018 }; 1019 1020 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data) 1021 { 1022 struct flush_busy_ctx_data *flush_data = data; 1023 struct blk_mq_hw_ctx *hctx = flush_data->hctx; 1024 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; 1025 enum hctx_type type = hctx->type; 1026 1027 spin_lock(&ctx->lock); 1028 list_splice_tail_init(&ctx->rq_lists[type], flush_data->list); 1029 sbitmap_clear_bit(sb, bitnr); 1030 spin_unlock(&ctx->lock); 1031 return true; 1032 } 1033 1034 /* 1035 * Process software queues that have been marked busy, splicing them 1036 * to the for-dispatch 1037 */ 1038 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list) 1039 { 1040 struct flush_busy_ctx_data data = { 1041 .hctx = hctx, 1042 .list = list, 1043 }; 1044 1045 sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data); 1046 } 1047 EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs); 1048 1049 struct dispatch_rq_data { 1050 struct blk_mq_hw_ctx *hctx; 1051 struct request *rq; 1052 }; 1053 1054 static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr, 1055 void *data) 1056 { 1057 struct dispatch_rq_data *dispatch_data = data; 1058 struct blk_mq_hw_ctx *hctx = dispatch_data->hctx; 1059 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; 1060 enum hctx_type type = hctx->type; 1061 1062 spin_lock(&ctx->lock); 1063 if (!list_empty(&ctx->rq_lists[type])) { 1064 dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next); 1065 list_del_init(&dispatch_data->rq->queuelist); 1066 if (list_empty(&ctx->rq_lists[type])) 1067 sbitmap_clear_bit(sb, bitnr); 1068 } 1069 spin_unlock(&ctx->lock); 1070 1071 return !dispatch_data->rq; 1072 } 1073 1074 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, 1075 struct blk_mq_ctx *start) 1076 { 1077 unsigned off = start ? start->index_hw[hctx->type] : 0; 1078 struct dispatch_rq_data data = { 1079 .hctx = hctx, 1080 .rq = NULL, 1081 }; 1082 1083 __sbitmap_for_each_set(&hctx->ctx_map, off, 1084 dispatch_rq_from_ctx, &data); 1085 1086 return data.rq; 1087 } 1088 1089 static inline unsigned int queued_to_index(unsigned int queued) 1090 { 1091 if (!queued) 1092 return 0; 1093 1094 return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1); 1095 } 1096 1097 static bool __blk_mq_get_driver_tag(struct request *rq) 1098 { 1099 struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags; 1100 unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags; 1101 int tag; 1102 1103 blk_mq_tag_busy(rq->mq_hctx); 1104 1105 if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) { 1106 bt = &rq->mq_hctx->tags->breserved_tags; 1107 tag_offset = 0; 1108 } 1109 1110 if (!hctx_may_queue(rq->mq_hctx, bt)) 1111 return false; 1112 tag = __sbitmap_queue_get(bt); 1113 if (tag == BLK_MQ_NO_TAG) 1114 return false; 1115 1116 rq->tag = tag + tag_offset; 1117 return true; 1118 } 1119 1120 static bool blk_mq_get_driver_tag(struct request *rq) 1121 { 1122 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 1123 1124 if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_get_driver_tag(rq)) 1125 return false; 1126 1127 if ((hctx->flags & BLK_MQ_F_TAG_SHARED) && 1128 !(rq->rq_flags & RQF_MQ_INFLIGHT)) { 1129 rq->rq_flags |= RQF_MQ_INFLIGHT; 1130 atomic_inc(&hctx->nr_active); 1131 } 1132 hctx->tags->rqs[rq->tag] = rq; 1133 return true; 1134 } 1135 1136 static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, 1137 int flags, void *key) 1138 { 1139 struct blk_mq_hw_ctx *hctx; 1140 1141 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait); 1142 1143 spin_lock(&hctx->dispatch_wait_lock); 1144 if (!list_empty(&wait->entry)) { 1145 struct sbitmap_queue *sbq; 1146 1147 list_del_init(&wait->entry); 1148 sbq = &hctx->tags->bitmap_tags; 1149 atomic_dec(&sbq->ws_active); 1150 } 1151 spin_unlock(&hctx->dispatch_wait_lock); 1152 1153 blk_mq_run_hw_queue(hctx, true); 1154 return 1; 1155 } 1156 1157 /* 1158 * Mark us waiting for a tag. For shared tags, this involves hooking us into 1159 * the tag wakeups. For non-shared tags, we can simply mark us needing a 1160 * restart. For both cases, take care to check the condition again after 1161 * marking us as waiting. 1162 */ 1163 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx, 1164 struct request *rq) 1165 { 1166 struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags; 1167 struct wait_queue_head *wq; 1168 wait_queue_entry_t *wait; 1169 bool ret; 1170 1171 if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) { 1172 blk_mq_sched_mark_restart_hctx(hctx); 1173 1174 /* 1175 * It's possible that a tag was freed in the window between the 1176 * allocation failure and adding the hardware queue to the wait 1177 * queue. 1178 * 1179 * Don't clear RESTART here, someone else could have set it. 1180 * At most this will cost an extra queue run. 1181 */ 1182 return blk_mq_get_driver_tag(rq); 1183 } 1184 1185 wait = &hctx->dispatch_wait; 1186 if (!list_empty_careful(&wait->entry)) 1187 return false; 1188 1189 wq = &bt_wait_ptr(sbq, hctx)->wait; 1190 1191 spin_lock_irq(&wq->lock); 1192 spin_lock(&hctx->dispatch_wait_lock); 1193 if (!list_empty(&wait->entry)) { 1194 spin_unlock(&hctx->dispatch_wait_lock); 1195 spin_unlock_irq(&wq->lock); 1196 return false; 1197 } 1198 1199 atomic_inc(&sbq->ws_active); 1200 wait->flags &= ~WQ_FLAG_EXCLUSIVE; 1201 __add_wait_queue(wq, wait); 1202 1203 /* 1204 * It's possible that a tag was freed in the window between the 1205 * allocation failure and adding the hardware queue to the wait 1206 * queue. 1207 */ 1208 ret = blk_mq_get_driver_tag(rq); 1209 if (!ret) { 1210 spin_unlock(&hctx->dispatch_wait_lock); 1211 spin_unlock_irq(&wq->lock); 1212 return false; 1213 } 1214 1215 /* 1216 * We got a tag, remove ourselves from the wait queue to ensure 1217 * someone else gets the wakeup. 1218 */ 1219 list_del_init(&wait->entry); 1220 atomic_dec(&sbq->ws_active); 1221 spin_unlock(&hctx->dispatch_wait_lock); 1222 spin_unlock_irq(&wq->lock); 1223 1224 return true; 1225 } 1226 1227 #define BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT 8 1228 #define BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR 4 1229 /* 1230 * Update dispatch busy with the Exponential Weighted Moving Average(EWMA): 1231 * - EWMA is one simple way to compute running average value 1232 * - weight(7/8 and 1/8) is applied so that it can decrease exponentially 1233 * - take 4 as factor for avoiding to get too small(0) result, and this 1234 * factor doesn't matter because EWMA decreases exponentially 1235 */ 1236 static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy) 1237 { 1238 unsigned int ewma; 1239 1240 if (hctx->queue->elevator) 1241 return; 1242 1243 ewma = hctx->dispatch_busy; 1244 1245 if (!ewma && !busy) 1246 return; 1247 1248 ewma *= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT - 1; 1249 if (busy) 1250 ewma += 1 << BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR; 1251 ewma /= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT; 1252 1253 hctx->dispatch_busy = ewma; 1254 } 1255 1256 #define BLK_MQ_RESOURCE_DELAY 3 /* ms units */ 1257 1258 static void blk_mq_handle_dev_resource(struct request *rq, 1259 struct list_head *list) 1260 { 1261 struct request *next = 1262 list_first_entry_or_null(list, struct request, queuelist); 1263 1264 /* 1265 * If an I/O scheduler has been configured and we got a driver tag for 1266 * the next request already, free it. 1267 */ 1268 if (next) 1269 blk_mq_put_driver_tag(next); 1270 1271 list_add(&rq->queuelist, list); 1272 __blk_mq_requeue_request(rq); 1273 } 1274 1275 static void blk_mq_handle_zone_resource(struct request *rq, 1276 struct list_head *zone_list) 1277 { 1278 /* 1279 * If we end up here it is because we cannot dispatch a request to a 1280 * specific zone due to LLD level zone-write locking or other zone 1281 * related resource not being available. In this case, set the request 1282 * aside in zone_list for retrying it later. 1283 */ 1284 list_add(&rq->queuelist, zone_list); 1285 __blk_mq_requeue_request(rq); 1286 } 1287 1288 enum prep_dispatch { 1289 PREP_DISPATCH_OK, 1290 PREP_DISPATCH_NO_TAG, 1291 PREP_DISPATCH_NO_BUDGET, 1292 }; 1293 1294 static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq, 1295 bool need_budget) 1296 { 1297 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 1298 1299 if (need_budget && !blk_mq_get_dispatch_budget(rq->q)) { 1300 blk_mq_put_driver_tag(rq); 1301 return PREP_DISPATCH_NO_BUDGET; 1302 } 1303 1304 if (!blk_mq_get_driver_tag(rq)) { 1305 /* 1306 * The initial allocation attempt failed, so we need to 1307 * rerun the hardware queue when a tag is freed. The 1308 * waitqueue takes care of that. If the queue is run 1309 * before we add this entry back on the dispatch list, 1310 * we'll re-run it below. 1311 */ 1312 if (!blk_mq_mark_tag_wait(hctx, rq)) { 1313 /* 1314 * All budgets not got from this function will be put 1315 * together during handling partial dispatch 1316 */ 1317 if (need_budget) 1318 blk_mq_put_dispatch_budget(rq->q); 1319 return PREP_DISPATCH_NO_TAG; 1320 } 1321 } 1322 1323 return PREP_DISPATCH_OK; 1324 } 1325 1326 /* release all allocated budgets before calling to blk_mq_dispatch_rq_list */ 1327 static void blk_mq_release_budgets(struct request_queue *q, 1328 unsigned int nr_budgets) 1329 { 1330 int i; 1331 1332 for (i = 0; i < nr_budgets; i++) 1333 blk_mq_put_dispatch_budget(q); 1334 } 1335 1336 /* 1337 * Returns true if we did some work AND can potentially do more. 1338 */ 1339 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list, 1340 unsigned int nr_budgets) 1341 { 1342 enum prep_dispatch prep; 1343 struct request_queue *q = hctx->queue; 1344 struct request *rq, *nxt; 1345 int errors, queued; 1346 blk_status_t ret = BLK_STS_OK; 1347 LIST_HEAD(zone_list); 1348 1349 if (list_empty(list)) 1350 return false; 1351 1352 /* 1353 * Now process all the entries, sending them to the driver. 1354 */ 1355 errors = queued = 0; 1356 do { 1357 struct blk_mq_queue_data bd; 1358 1359 rq = list_first_entry(list, struct request, queuelist); 1360 1361 WARN_ON_ONCE(hctx != rq->mq_hctx); 1362 prep = blk_mq_prep_dispatch_rq(rq, !nr_budgets); 1363 if (prep != PREP_DISPATCH_OK) 1364 break; 1365 1366 list_del_init(&rq->queuelist); 1367 1368 bd.rq = rq; 1369 1370 /* 1371 * Flag last if we have no more requests, or if we have more 1372 * but can't assign a driver tag to it. 1373 */ 1374 if (list_empty(list)) 1375 bd.last = true; 1376 else { 1377 nxt = list_first_entry(list, struct request, queuelist); 1378 bd.last = !blk_mq_get_driver_tag(nxt); 1379 } 1380 1381 /* 1382 * once the request is queued to lld, no need to cover the 1383 * budget any more 1384 */ 1385 if (nr_budgets) 1386 nr_budgets--; 1387 ret = q->mq_ops->queue_rq(hctx, &bd); 1388 switch (ret) { 1389 case BLK_STS_OK: 1390 queued++; 1391 break; 1392 case BLK_STS_RESOURCE: 1393 case BLK_STS_DEV_RESOURCE: 1394 blk_mq_handle_dev_resource(rq, list); 1395 goto out; 1396 case BLK_STS_ZONE_RESOURCE: 1397 /* 1398 * Move the request to zone_list and keep going through 1399 * the dispatch list to find more requests the drive can 1400 * accept. 1401 */ 1402 blk_mq_handle_zone_resource(rq, &zone_list); 1403 break; 1404 default: 1405 errors++; 1406 blk_mq_end_request(rq, BLK_STS_IOERR); 1407 } 1408 } while (!list_empty(list)); 1409 out: 1410 if (!list_empty(&zone_list)) 1411 list_splice_tail_init(&zone_list, list); 1412 1413 hctx->dispatched[queued_to_index(queued)]++; 1414 1415 /* 1416 * Any items that need requeuing? Stuff them into hctx->dispatch, 1417 * that is where we will continue on next queue run. 1418 */ 1419 if (!list_empty(list)) { 1420 bool needs_restart; 1421 /* For non-shared tags, the RESTART check will suffice */ 1422 bool no_tag = prep == PREP_DISPATCH_NO_TAG && 1423 (hctx->flags & BLK_MQ_F_TAG_SHARED); 1424 bool no_budget_avail = prep == PREP_DISPATCH_NO_BUDGET; 1425 1426 blk_mq_release_budgets(q, nr_budgets); 1427 1428 /* 1429 * If we didn't flush the entire list, we could have told 1430 * the driver there was more coming, but that turned out to 1431 * be a lie. 1432 */ 1433 if (q->mq_ops->commit_rqs && queued) 1434 q->mq_ops->commit_rqs(hctx); 1435 1436 spin_lock(&hctx->lock); 1437 list_splice_tail_init(list, &hctx->dispatch); 1438 spin_unlock(&hctx->lock); 1439 1440 /* 1441 * If SCHED_RESTART was set by the caller of this function and 1442 * it is no longer set that means that it was cleared by another 1443 * thread and hence that a queue rerun is needed. 1444 * 1445 * If 'no_tag' is set, that means that we failed getting 1446 * a driver tag with an I/O scheduler attached. If our dispatch 1447 * waitqueue is no longer active, ensure that we run the queue 1448 * AFTER adding our entries back to the list. 1449 * 1450 * If no I/O scheduler has been configured it is possible that 1451 * the hardware queue got stopped and restarted before requests 1452 * were pushed back onto the dispatch list. Rerun the queue to 1453 * avoid starvation. Notes: 1454 * - blk_mq_run_hw_queue() checks whether or not a queue has 1455 * been stopped before rerunning a queue. 1456 * - Some but not all block drivers stop a queue before 1457 * returning BLK_STS_RESOURCE. Two exceptions are scsi-mq 1458 * and dm-rq. 1459 * 1460 * If driver returns BLK_STS_RESOURCE and SCHED_RESTART 1461 * bit is set, run queue after a delay to avoid IO stalls 1462 * that could otherwise occur if the queue is idle. We'll do 1463 * similar if we couldn't get budget and SCHED_RESTART is set. 1464 */ 1465 needs_restart = blk_mq_sched_needs_restart(hctx); 1466 if (!needs_restart || 1467 (no_tag && list_empty_careful(&hctx->dispatch_wait.entry))) 1468 blk_mq_run_hw_queue(hctx, true); 1469 else if (needs_restart && (ret == BLK_STS_RESOURCE || 1470 no_budget_avail)) 1471 blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY); 1472 1473 blk_mq_update_dispatch_busy(hctx, true); 1474 return false; 1475 } else 1476 blk_mq_update_dispatch_busy(hctx, false); 1477 1478 return (queued + errors) != 0; 1479 } 1480 1481 /** 1482 * __blk_mq_run_hw_queue - Run a hardware queue. 1483 * @hctx: Pointer to the hardware queue to run. 1484 * 1485 * Send pending requests to the hardware. 1486 */ 1487 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) 1488 { 1489 int srcu_idx; 1490 1491 /* 1492 * We should be running this queue from one of the CPUs that 1493 * are mapped to it. 1494 * 1495 * There are at least two related races now between setting 1496 * hctx->next_cpu from blk_mq_hctx_next_cpu() and running 1497 * __blk_mq_run_hw_queue(): 1498 * 1499 * - hctx->next_cpu is found offline in blk_mq_hctx_next_cpu(), 1500 * but later it becomes online, then this warning is harmless 1501 * at all 1502 * 1503 * - hctx->next_cpu is found online in blk_mq_hctx_next_cpu(), 1504 * but later it becomes offline, then the warning can't be 1505 * triggered, and we depend on blk-mq timeout handler to 1506 * handle dispatched requests to this hctx 1507 */ 1508 if (!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) && 1509 cpu_online(hctx->next_cpu)) { 1510 printk(KERN_WARNING "run queue from wrong CPU %d, hctx %s\n", 1511 raw_smp_processor_id(), 1512 cpumask_empty(hctx->cpumask) ? "inactive": "active"); 1513 dump_stack(); 1514 } 1515 1516 /* 1517 * We can't run the queue inline with ints disabled. Ensure that 1518 * we catch bad users of this early. 1519 */ 1520 WARN_ON_ONCE(in_interrupt()); 1521 1522 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); 1523 1524 hctx_lock(hctx, &srcu_idx); 1525 blk_mq_sched_dispatch_requests(hctx); 1526 hctx_unlock(hctx, srcu_idx); 1527 } 1528 1529 static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx) 1530 { 1531 int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask); 1532 1533 if (cpu >= nr_cpu_ids) 1534 cpu = cpumask_first(hctx->cpumask); 1535 return cpu; 1536 } 1537 1538 /* 1539 * It'd be great if the workqueue API had a way to pass 1540 * in a mask and had some smarts for more clever placement. 1541 * For now we just round-robin here, switching for every 1542 * BLK_MQ_CPU_WORK_BATCH queued items. 1543 */ 1544 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) 1545 { 1546 bool tried = false; 1547 int next_cpu = hctx->next_cpu; 1548 1549 if (hctx->queue->nr_hw_queues == 1) 1550 return WORK_CPU_UNBOUND; 1551 1552 if (--hctx->next_cpu_batch <= 0) { 1553 select_cpu: 1554 next_cpu = cpumask_next_and(next_cpu, hctx->cpumask, 1555 cpu_online_mask); 1556 if (next_cpu >= nr_cpu_ids) 1557 next_cpu = blk_mq_first_mapped_cpu(hctx); 1558 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; 1559 } 1560 1561 /* 1562 * Do unbound schedule if we can't find a online CPU for this hctx, 1563 * and it should only happen in the path of handling CPU DEAD. 1564 */ 1565 if (!cpu_online(next_cpu)) { 1566 if (!tried) { 1567 tried = true; 1568 goto select_cpu; 1569 } 1570 1571 /* 1572 * Make sure to re-select CPU next time once after CPUs 1573 * in hctx->cpumask become online again. 1574 */ 1575 hctx->next_cpu = next_cpu; 1576 hctx->next_cpu_batch = 1; 1577 return WORK_CPU_UNBOUND; 1578 } 1579 1580 hctx->next_cpu = next_cpu; 1581 return next_cpu; 1582 } 1583 1584 /** 1585 * __blk_mq_delay_run_hw_queue - Run (or schedule to run) a hardware queue. 1586 * @hctx: Pointer to the hardware queue to run. 1587 * @async: If we want to run the queue asynchronously. 1588 * @msecs: Microseconds of delay to wait before running the queue. 1589 * 1590 * If !@async, try to run the queue now. Else, run the queue asynchronously and 1591 * with a delay of @msecs. 1592 */ 1593 static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, 1594 unsigned long msecs) 1595 { 1596 if (unlikely(blk_mq_hctx_stopped(hctx))) 1597 return; 1598 1599 if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) { 1600 int cpu = get_cpu(); 1601 if (cpumask_test_cpu(cpu, hctx->cpumask)) { 1602 __blk_mq_run_hw_queue(hctx); 1603 put_cpu(); 1604 return; 1605 } 1606 1607 put_cpu(); 1608 } 1609 1610 kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work, 1611 msecs_to_jiffies(msecs)); 1612 } 1613 1614 /** 1615 * blk_mq_delay_run_hw_queue - Run a hardware queue asynchronously. 1616 * @hctx: Pointer to the hardware queue to run. 1617 * @msecs: Microseconds of delay to wait before running the queue. 1618 * 1619 * Run a hardware queue asynchronously with a delay of @msecs. 1620 */ 1621 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) 1622 { 1623 __blk_mq_delay_run_hw_queue(hctx, true, msecs); 1624 } 1625 EXPORT_SYMBOL(blk_mq_delay_run_hw_queue); 1626 1627 /** 1628 * blk_mq_run_hw_queue - Start to run a hardware queue. 1629 * @hctx: Pointer to the hardware queue to run. 1630 * @async: If we want to run the queue asynchronously. 1631 * 1632 * Check if the request queue is not in a quiesced state and if there are 1633 * pending requests to be sent. If this is true, run the queue to send requests 1634 * to hardware. 1635 */ 1636 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) 1637 { 1638 int srcu_idx; 1639 bool need_run; 1640 1641 /* 1642 * When queue is quiesced, we may be switching io scheduler, or 1643 * updating nr_hw_queues, or other things, and we can't run queue 1644 * any more, even __blk_mq_hctx_has_pending() can't be called safely. 1645 * 1646 * And queue will be rerun in blk_mq_unquiesce_queue() if it is 1647 * quiesced. 1648 */ 1649 hctx_lock(hctx, &srcu_idx); 1650 need_run = !blk_queue_quiesced(hctx->queue) && 1651 blk_mq_hctx_has_pending(hctx); 1652 hctx_unlock(hctx, srcu_idx); 1653 1654 if (need_run) 1655 __blk_mq_delay_run_hw_queue(hctx, async, 0); 1656 } 1657 EXPORT_SYMBOL(blk_mq_run_hw_queue); 1658 1659 /** 1660 * blk_mq_run_hw_queue - Run all hardware queues in a request queue. 1661 * @q: Pointer to the request queue to run. 1662 * @async: If we want to run the queue asynchronously. 1663 */ 1664 void blk_mq_run_hw_queues(struct request_queue *q, bool async) 1665 { 1666 struct blk_mq_hw_ctx *hctx; 1667 int i; 1668 1669 queue_for_each_hw_ctx(q, hctx, i) { 1670 if (blk_mq_hctx_stopped(hctx)) 1671 continue; 1672 1673 blk_mq_run_hw_queue(hctx, async); 1674 } 1675 } 1676 EXPORT_SYMBOL(blk_mq_run_hw_queues); 1677 1678 /** 1679 * blk_mq_delay_run_hw_queues - Run all hardware queues asynchronously. 1680 * @q: Pointer to the request queue to run. 1681 * @msecs: Microseconds of delay to wait before running the queues. 1682 */ 1683 void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs) 1684 { 1685 struct blk_mq_hw_ctx *hctx; 1686 int i; 1687 1688 queue_for_each_hw_ctx(q, hctx, i) { 1689 if (blk_mq_hctx_stopped(hctx)) 1690 continue; 1691 1692 blk_mq_delay_run_hw_queue(hctx, msecs); 1693 } 1694 } 1695 EXPORT_SYMBOL(blk_mq_delay_run_hw_queues); 1696 1697 /** 1698 * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped 1699 * @q: request queue. 1700 * 1701 * The caller is responsible for serializing this function against 1702 * blk_mq_{start,stop}_hw_queue(). 1703 */ 1704 bool blk_mq_queue_stopped(struct request_queue *q) 1705 { 1706 struct blk_mq_hw_ctx *hctx; 1707 int i; 1708 1709 queue_for_each_hw_ctx(q, hctx, i) 1710 if (blk_mq_hctx_stopped(hctx)) 1711 return true; 1712 1713 return false; 1714 } 1715 EXPORT_SYMBOL(blk_mq_queue_stopped); 1716 1717 /* 1718 * This function is often used for pausing .queue_rq() by driver when 1719 * there isn't enough resource or some conditions aren't satisfied, and 1720 * BLK_STS_RESOURCE is usually returned. 1721 * 1722 * We do not guarantee that dispatch can be drained or blocked 1723 * after blk_mq_stop_hw_queue() returns. Please use 1724 * blk_mq_quiesce_queue() for that requirement. 1725 */ 1726 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) 1727 { 1728 cancel_delayed_work(&hctx->run_work); 1729 1730 set_bit(BLK_MQ_S_STOPPED, &hctx->state); 1731 } 1732 EXPORT_SYMBOL(blk_mq_stop_hw_queue); 1733 1734 /* 1735 * This function is often used for pausing .queue_rq() by driver when 1736 * there isn't enough resource or some conditions aren't satisfied, and 1737 * BLK_STS_RESOURCE is usually returned. 1738 * 1739 * We do not guarantee that dispatch can be drained or blocked 1740 * after blk_mq_stop_hw_queues() returns. Please use 1741 * blk_mq_quiesce_queue() for that requirement. 1742 */ 1743 void blk_mq_stop_hw_queues(struct request_queue *q) 1744 { 1745 struct blk_mq_hw_ctx *hctx; 1746 int i; 1747 1748 queue_for_each_hw_ctx(q, hctx, i) 1749 blk_mq_stop_hw_queue(hctx); 1750 } 1751 EXPORT_SYMBOL(blk_mq_stop_hw_queues); 1752 1753 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx) 1754 { 1755 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 1756 1757 blk_mq_run_hw_queue(hctx, false); 1758 } 1759 EXPORT_SYMBOL(blk_mq_start_hw_queue); 1760 1761 void blk_mq_start_hw_queues(struct request_queue *q) 1762 { 1763 struct blk_mq_hw_ctx *hctx; 1764 int i; 1765 1766 queue_for_each_hw_ctx(q, hctx, i) 1767 blk_mq_start_hw_queue(hctx); 1768 } 1769 EXPORT_SYMBOL(blk_mq_start_hw_queues); 1770 1771 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) 1772 { 1773 if (!blk_mq_hctx_stopped(hctx)) 1774 return; 1775 1776 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 1777 blk_mq_run_hw_queue(hctx, async); 1778 } 1779 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue); 1780 1781 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async) 1782 { 1783 struct blk_mq_hw_ctx *hctx; 1784 int i; 1785 1786 queue_for_each_hw_ctx(q, hctx, i) 1787 blk_mq_start_stopped_hw_queue(hctx, async); 1788 } 1789 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues); 1790 1791 static void blk_mq_run_work_fn(struct work_struct *work) 1792 { 1793 struct blk_mq_hw_ctx *hctx; 1794 1795 hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work); 1796 1797 /* 1798 * If we are stopped, don't run the queue. 1799 */ 1800 if (test_bit(BLK_MQ_S_STOPPED, &hctx->state)) 1801 return; 1802 1803 __blk_mq_run_hw_queue(hctx); 1804 } 1805 1806 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx, 1807 struct request *rq, 1808 bool at_head) 1809 { 1810 struct blk_mq_ctx *ctx = rq->mq_ctx; 1811 enum hctx_type type = hctx->type; 1812 1813 lockdep_assert_held(&ctx->lock); 1814 1815 trace_block_rq_insert(hctx->queue, rq); 1816 1817 if (at_head) 1818 list_add(&rq->queuelist, &ctx->rq_lists[type]); 1819 else 1820 list_add_tail(&rq->queuelist, &ctx->rq_lists[type]); 1821 } 1822 1823 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, 1824 bool at_head) 1825 { 1826 struct blk_mq_ctx *ctx = rq->mq_ctx; 1827 1828 lockdep_assert_held(&ctx->lock); 1829 1830 __blk_mq_insert_req_list(hctx, rq, at_head); 1831 blk_mq_hctx_mark_pending(hctx, ctx); 1832 } 1833 1834 /** 1835 * blk_mq_request_bypass_insert - Insert a request at dispatch list. 1836 * @rq: Pointer to request to be inserted. 1837 * @run_queue: If we should run the hardware queue after inserting the request. 1838 * 1839 * Should only be used carefully, when the caller knows we want to 1840 * bypass a potential IO scheduler on the target device. 1841 */ 1842 void blk_mq_request_bypass_insert(struct request *rq, bool at_head, 1843 bool run_queue) 1844 { 1845 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 1846 1847 spin_lock(&hctx->lock); 1848 if (at_head) 1849 list_add(&rq->queuelist, &hctx->dispatch); 1850 else 1851 list_add_tail(&rq->queuelist, &hctx->dispatch); 1852 spin_unlock(&hctx->lock); 1853 1854 if (run_queue) 1855 blk_mq_run_hw_queue(hctx, false); 1856 } 1857 1858 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, 1859 struct list_head *list) 1860 1861 { 1862 struct request *rq; 1863 enum hctx_type type = hctx->type; 1864 1865 /* 1866 * preemption doesn't flush plug list, so it's possible ctx->cpu is 1867 * offline now 1868 */ 1869 list_for_each_entry(rq, list, queuelist) { 1870 BUG_ON(rq->mq_ctx != ctx); 1871 trace_block_rq_insert(hctx->queue, rq); 1872 } 1873 1874 spin_lock(&ctx->lock); 1875 list_splice_tail_init(list, &ctx->rq_lists[type]); 1876 blk_mq_hctx_mark_pending(hctx, ctx); 1877 spin_unlock(&ctx->lock); 1878 } 1879 1880 static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b) 1881 { 1882 struct request *rqa = container_of(a, struct request, queuelist); 1883 struct request *rqb = container_of(b, struct request, queuelist); 1884 1885 if (rqa->mq_ctx != rqb->mq_ctx) 1886 return rqa->mq_ctx > rqb->mq_ctx; 1887 if (rqa->mq_hctx != rqb->mq_hctx) 1888 return rqa->mq_hctx > rqb->mq_hctx; 1889 1890 return blk_rq_pos(rqa) > blk_rq_pos(rqb); 1891 } 1892 1893 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) 1894 { 1895 LIST_HEAD(list); 1896 1897 if (list_empty(&plug->mq_list)) 1898 return; 1899 list_splice_init(&plug->mq_list, &list); 1900 1901 if (plug->rq_count > 2 && plug->multiple_queues) 1902 list_sort(NULL, &list, plug_rq_cmp); 1903 1904 plug->rq_count = 0; 1905 1906 do { 1907 struct list_head rq_list; 1908 struct request *rq, *head_rq = list_entry_rq(list.next); 1909 struct list_head *pos = &head_rq->queuelist; /* skip first */ 1910 struct blk_mq_hw_ctx *this_hctx = head_rq->mq_hctx; 1911 struct blk_mq_ctx *this_ctx = head_rq->mq_ctx; 1912 unsigned int depth = 1; 1913 1914 list_for_each_continue(pos, &list) { 1915 rq = list_entry_rq(pos); 1916 BUG_ON(!rq->q); 1917 if (rq->mq_hctx != this_hctx || rq->mq_ctx != this_ctx) 1918 break; 1919 depth++; 1920 } 1921 1922 list_cut_before(&rq_list, &list, pos); 1923 trace_block_unplug(head_rq->q, depth, !from_schedule); 1924 blk_mq_sched_insert_requests(this_hctx, this_ctx, &rq_list, 1925 from_schedule); 1926 } while(!list_empty(&list)); 1927 } 1928 1929 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio, 1930 unsigned int nr_segs) 1931 { 1932 if (bio->bi_opf & REQ_RAHEAD) 1933 rq->cmd_flags |= REQ_FAILFAST_MASK; 1934 1935 rq->__sector = bio->bi_iter.bi_sector; 1936 rq->write_hint = bio->bi_write_hint; 1937 blk_rq_bio_prep(rq, bio, nr_segs); 1938 blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO); 1939 1940 blk_account_io_start(rq); 1941 } 1942 1943 static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, 1944 struct request *rq, 1945 blk_qc_t *cookie, bool last) 1946 { 1947 struct request_queue *q = rq->q; 1948 struct blk_mq_queue_data bd = { 1949 .rq = rq, 1950 .last = last, 1951 }; 1952 blk_qc_t new_cookie; 1953 blk_status_t ret; 1954 1955 new_cookie = request_to_qc_t(hctx, rq); 1956 1957 /* 1958 * For OK queue, we are done. For error, caller may kill it. 1959 * Any other error (busy), just add it to our list as we 1960 * previously would have done. 1961 */ 1962 ret = q->mq_ops->queue_rq(hctx, &bd); 1963 switch (ret) { 1964 case BLK_STS_OK: 1965 blk_mq_update_dispatch_busy(hctx, false); 1966 *cookie = new_cookie; 1967 break; 1968 case BLK_STS_RESOURCE: 1969 case BLK_STS_DEV_RESOURCE: 1970 blk_mq_update_dispatch_busy(hctx, true); 1971 __blk_mq_requeue_request(rq); 1972 break; 1973 default: 1974 blk_mq_update_dispatch_busy(hctx, false); 1975 *cookie = BLK_QC_T_NONE; 1976 break; 1977 } 1978 1979 return ret; 1980 } 1981 1982 static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, 1983 struct request *rq, 1984 blk_qc_t *cookie, 1985 bool bypass_insert, bool last) 1986 { 1987 struct request_queue *q = rq->q; 1988 bool run_queue = true; 1989 1990 /* 1991 * RCU or SRCU read lock is needed before checking quiesced flag. 1992 * 1993 * When queue is stopped or quiesced, ignore 'bypass_insert' from 1994 * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller, 1995 * and avoid driver to try to dispatch again. 1996 */ 1997 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) { 1998 run_queue = false; 1999 bypass_insert = false; 2000 goto insert; 2001 } 2002 2003 if (q->elevator && !bypass_insert) 2004 goto insert; 2005 2006 if (!blk_mq_get_dispatch_budget(q)) 2007 goto insert; 2008 2009 if (!blk_mq_get_driver_tag(rq)) { 2010 blk_mq_put_dispatch_budget(q); 2011 goto insert; 2012 } 2013 2014 return __blk_mq_issue_directly(hctx, rq, cookie, last); 2015 insert: 2016 if (bypass_insert) 2017 return BLK_STS_RESOURCE; 2018 2019 blk_mq_request_bypass_insert(rq, false, run_queue); 2020 return BLK_STS_OK; 2021 } 2022 2023 /** 2024 * blk_mq_try_issue_directly - Try to send a request directly to device driver. 2025 * @hctx: Pointer of the associated hardware queue. 2026 * @rq: Pointer to request to be sent. 2027 * @cookie: Request queue cookie. 2028 * 2029 * If the device has enough resources to accept a new request now, send the 2030 * request directly to device driver. Else, insert at hctx->dispatch queue, so 2031 * we can try send it another time in the future. Requests inserted at this 2032 * queue have higher priority. 2033 */ 2034 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, 2035 struct request *rq, blk_qc_t *cookie) 2036 { 2037 blk_status_t ret; 2038 int srcu_idx; 2039 2040 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); 2041 2042 hctx_lock(hctx, &srcu_idx); 2043 2044 ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true); 2045 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) 2046 blk_mq_request_bypass_insert(rq, false, true); 2047 else if (ret != BLK_STS_OK) 2048 blk_mq_end_request(rq, ret); 2049 2050 hctx_unlock(hctx, srcu_idx); 2051 } 2052 2053 blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last) 2054 { 2055 blk_status_t ret; 2056 int srcu_idx; 2057 blk_qc_t unused_cookie; 2058 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 2059 2060 hctx_lock(hctx, &srcu_idx); 2061 ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last); 2062 hctx_unlock(hctx, srcu_idx); 2063 2064 return ret; 2065 } 2066 2067 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, 2068 struct list_head *list) 2069 { 2070 int queued = 0; 2071 2072 while (!list_empty(list)) { 2073 blk_status_t ret; 2074 struct request *rq = list_first_entry(list, struct request, 2075 queuelist); 2076 2077 list_del_init(&rq->queuelist); 2078 ret = blk_mq_request_issue_directly(rq, list_empty(list)); 2079 if (ret != BLK_STS_OK) { 2080 if (ret == BLK_STS_RESOURCE || 2081 ret == BLK_STS_DEV_RESOURCE) { 2082 blk_mq_request_bypass_insert(rq, false, 2083 list_empty(list)); 2084 break; 2085 } 2086 blk_mq_end_request(rq, ret); 2087 } else 2088 queued++; 2089 } 2090 2091 /* 2092 * If we didn't flush the entire list, we could have told 2093 * the driver there was more coming, but that turned out to 2094 * be a lie. 2095 */ 2096 if (!list_empty(list) && hctx->queue->mq_ops->commit_rqs && queued) 2097 hctx->queue->mq_ops->commit_rqs(hctx); 2098 } 2099 2100 static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) 2101 { 2102 list_add_tail(&rq->queuelist, &plug->mq_list); 2103 plug->rq_count++; 2104 if (!plug->multiple_queues && !list_is_singular(&plug->mq_list)) { 2105 struct request *tmp; 2106 2107 tmp = list_first_entry(&plug->mq_list, struct request, 2108 queuelist); 2109 if (tmp->q != rq->q) 2110 plug->multiple_queues = true; 2111 } 2112 } 2113 2114 /** 2115 * blk_mq_submit_bio - Create and send a request to block device. 2116 * @bio: Bio pointer. 2117 * 2118 * Builds up a request structure from @q and @bio and send to the device. The 2119 * request may not be queued directly to hardware if: 2120 * * This request can be merged with another one 2121 * * We want to place request at plug queue for possible future merging 2122 * * There is an IO scheduler active at this queue 2123 * 2124 * It will not queue the request if there is an error with the bio, or at the 2125 * request creation. 2126 * 2127 * Returns: Request queue cookie. 2128 */ 2129 blk_qc_t blk_mq_submit_bio(struct bio *bio) 2130 { 2131 struct request_queue *q = bio->bi_disk->queue; 2132 const int is_sync = op_is_sync(bio->bi_opf); 2133 const int is_flush_fua = op_is_flush(bio->bi_opf); 2134 struct blk_mq_alloc_data data = { 2135 .q = q, 2136 }; 2137 struct request *rq; 2138 struct blk_plug *plug; 2139 struct request *same_queue_rq = NULL; 2140 unsigned int nr_segs; 2141 blk_qc_t cookie; 2142 blk_status_t ret; 2143 2144 blk_queue_bounce(q, &bio); 2145 __blk_queue_split(&bio, &nr_segs); 2146 2147 if (!bio_integrity_prep(bio)) 2148 goto queue_exit; 2149 2150 if (!is_flush_fua && !blk_queue_nomerges(q) && 2151 blk_attempt_plug_merge(q, bio, nr_segs, &same_queue_rq)) 2152 goto queue_exit; 2153 2154 if (blk_mq_sched_bio_merge(q, bio, nr_segs)) 2155 goto queue_exit; 2156 2157 rq_qos_throttle(q, bio); 2158 2159 data.cmd_flags = bio->bi_opf; 2160 rq = __blk_mq_alloc_request(&data); 2161 if (unlikely(!rq)) { 2162 rq_qos_cleanup(q, bio); 2163 if (bio->bi_opf & REQ_NOWAIT) 2164 bio_wouldblock_error(bio); 2165 goto queue_exit; 2166 } 2167 2168 trace_block_getrq(q, bio, bio->bi_opf); 2169 2170 rq_qos_track(q, rq, bio); 2171 2172 cookie = request_to_qc_t(data.hctx, rq); 2173 2174 blk_mq_bio_to_request(rq, bio, nr_segs); 2175 2176 ret = blk_crypto_init_request(rq); 2177 if (ret != BLK_STS_OK) { 2178 bio->bi_status = ret; 2179 bio_endio(bio); 2180 blk_mq_free_request(rq); 2181 return BLK_QC_T_NONE; 2182 } 2183 2184 plug = blk_mq_plug(q, bio); 2185 if (unlikely(is_flush_fua)) { 2186 /* Bypass scheduler for flush requests */ 2187 blk_insert_flush(rq); 2188 blk_mq_run_hw_queue(data.hctx, true); 2189 } else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs || 2190 !blk_queue_nonrot(q))) { 2191 /* 2192 * Use plugging if we have a ->commit_rqs() hook as well, as 2193 * we know the driver uses bd->last in a smart fashion. 2194 * 2195 * Use normal plugging if this disk is slow HDD, as sequential 2196 * IO may benefit a lot from plug merging. 2197 */ 2198 unsigned int request_count = plug->rq_count; 2199 struct request *last = NULL; 2200 2201 if (!request_count) 2202 trace_block_plug(q); 2203 else 2204 last = list_entry_rq(plug->mq_list.prev); 2205 2206 if (request_count >= BLK_MAX_REQUEST_COUNT || (last && 2207 blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) { 2208 blk_flush_plug_list(plug, false); 2209 trace_block_plug(q); 2210 } 2211 2212 blk_add_rq_to_plug(plug, rq); 2213 } else if (q->elevator) { 2214 /* Insert the request at the IO scheduler queue */ 2215 blk_mq_sched_insert_request(rq, false, true, true); 2216 } else if (plug && !blk_queue_nomerges(q)) { 2217 /* 2218 * We do limited plugging. If the bio can be merged, do that. 2219 * Otherwise the existing request in the plug list will be 2220 * issued. So the plug list will have one request at most 2221 * The plug list might get flushed before this. If that happens, 2222 * the plug list is empty, and same_queue_rq is invalid. 2223 */ 2224 if (list_empty(&plug->mq_list)) 2225 same_queue_rq = NULL; 2226 if (same_queue_rq) { 2227 list_del_init(&same_queue_rq->queuelist); 2228 plug->rq_count--; 2229 } 2230 blk_add_rq_to_plug(plug, rq); 2231 trace_block_plug(q); 2232 2233 if (same_queue_rq) { 2234 data.hctx = same_queue_rq->mq_hctx; 2235 trace_block_unplug(q, 1, true); 2236 blk_mq_try_issue_directly(data.hctx, same_queue_rq, 2237 &cookie); 2238 } 2239 } else if ((q->nr_hw_queues > 1 && is_sync) || 2240 !data.hctx->dispatch_busy) { 2241 /* 2242 * There is no scheduler and we can try to send directly 2243 * to the hardware. 2244 */ 2245 blk_mq_try_issue_directly(data.hctx, rq, &cookie); 2246 } else { 2247 /* Default case. */ 2248 blk_mq_sched_insert_request(rq, false, true, true); 2249 } 2250 2251 return cookie; 2252 queue_exit: 2253 blk_queue_exit(q); 2254 return BLK_QC_T_NONE; 2255 } 2256 EXPORT_SYMBOL_GPL(blk_mq_submit_bio); /* only for request based dm */ 2257 2258 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, 2259 unsigned int hctx_idx) 2260 { 2261 struct page *page; 2262 2263 if (tags->rqs && set->ops->exit_request) { 2264 int i; 2265 2266 for (i = 0; i < tags->nr_tags; i++) { 2267 struct request *rq = tags->static_rqs[i]; 2268 2269 if (!rq) 2270 continue; 2271 set->ops->exit_request(set, rq, hctx_idx); 2272 tags->static_rqs[i] = NULL; 2273 } 2274 } 2275 2276 while (!list_empty(&tags->page_list)) { 2277 page = list_first_entry(&tags->page_list, struct page, lru); 2278 list_del_init(&page->lru); 2279 /* 2280 * Remove kmemleak object previously allocated in 2281 * blk_mq_alloc_rqs(). 2282 */ 2283 kmemleak_free(page_address(page)); 2284 __free_pages(page, page->private); 2285 } 2286 } 2287 2288 void blk_mq_free_rq_map(struct blk_mq_tags *tags) 2289 { 2290 kfree(tags->rqs); 2291 tags->rqs = NULL; 2292 kfree(tags->static_rqs); 2293 tags->static_rqs = NULL; 2294 2295 blk_mq_free_tags(tags); 2296 } 2297 2298 struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, 2299 unsigned int hctx_idx, 2300 unsigned int nr_tags, 2301 unsigned int reserved_tags) 2302 { 2303 struct blk_mq_tags *tags; 2304 int node; 2305 2306 node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx); 2307 if (node == NUMA_NO_NODE) 2308 node = set->numa_node; 2309 2310 tags = blk_mq_init_tags(nr_tags, reserved_tags, node, 2311 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags)); 2312 if (!tags) 2313 return NULL; 2314 2315 tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *), 2316 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, 2317 node); 2318 if (!tags->rqs) { 2319 blk_mq_free_tags(tags); 2320 return NULL; 2321 } 2322 2323 tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *), 2324 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, 2325 node); 2326 if (!tags->static_rqs) { 2327 kfree(tags->rqs); 2328 blk_mq_free_tags(tags); 2329 return NULL; 2330 } 2331 2332 return tags; 2333 } 2334 2335 static size_t order_to_size(unsigned int order) 2336 { 2337 return (size_t)PAGE_SIZE << order; 2338 } 2339 2340 static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, 2341 unsigned int hctx_idx, int node) 2342 { 2343 int ret; 2344 2345 if (set->ops->init_request) { 2346 ret = set->ops->init_request(set, rq, hctx_idx, node); 2347 if (ret) 2348 return ret; 2349 } 2350 2351 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 2352 return 0; 2353 } 2354 2355 int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, 2356 unsigned int hctx_idx, unsigned int depth) 2357 { 2358 unsigned int i, j, entries_per_page, max_order = 4; 2359 size_t rq_size, left; 2360 int node; 2361 2362 node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx); 2363 if (node == NUMA_NO_NODE) 2364 node = set->numa_node; 2365 2366 INIT_LIST_HEAD(&tags->page_list); 2367 2368 /* 2369 * rq_size is the size of the request plus driver payload, rounded 2370 * to the cacheline size 2371 */ 2372 rq_size = round_up(sizeof(struct request) + set->cmd_size, 2373 cache_line_size()); 2374 left = rq_size * depth; 2375 2376 for (i = 0; i < depth; ) { 2377 int this_order = max_order; 2378 struct page *page; 2379 int to_do; 2380 void *p; 2381 2382 while (this_order && left < order_to_size(this_order - 1)) 2383 this_order--; 2384 2385 do { 2386 page = alloc_pages_node(node, 2387 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO, 2388 this_order); 2389 if (page) 2390 break; 2391 if (!this_order--) 2392 break; 2393 if (order_to_size(this_order) < rq_size) 2394 break; 2395 } while (1); 2396 2397 if (!page) 2398 goto fail; 2399 2400 page->private = this_order; 2401 list_add_tail(&page->lru, &tags->page_list); 2402 2403 p = page_address(page); 2404 /* 2405 * Allow kmemleak to scan these pages as they contain pointers 2406 * to additional allocations like via ops->init_request(). 2407 */ 2408 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO); 2409 entries_per_page = order_to_size(this_order) / rq_size; 2410 to_do = min(entries_per_page, depth - i); 2411 left -= to_do * rq_size; 2412 for (j = 0; j < to_do; j++) { 2413 struct request *rq = p; 2414 2415 tags->static_rqs[i] = rq; 2416 if (blk_mq_init_request(set, rq, hctx_idx, node)) { 2417 tags->static_rqs[i] = NULL; 2418 goto fail; 2419 } 2420 2421 p += rq_size; 2422 i++; 2423 } 2424 } 2425 return 0; 2426 2427 fail: 2428 blk_mq_free_rqs(set, tags, hctx_idx); 2429 return -ENOMEM; 2430 } 2431 2432 struct rq_iter_data { 2433 struct blk_mq_hw_ctx *hctx; 2434 bool has_rq; 2435 }; 2436 2437 static bool blk_mq_has_request(struct request *rq, void *data, bool reserved) 2438 { 2439 struct rq_iter_data *iter_data = data; 2440 2441 if (rq->mq_hctx != iter_data->hctx) 2442 return true; 2443 iter_data->has_rq = true; 2444 return false; 2445 } 2446 2447 static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx) 2448 { 2449 struct blk_mq_tags *tags = hctx->sched_tags ? 2450 hctx->sched_tags : hctx->tags; 2451 struct rq_iter_data data = { 2452 .hctx = hctx, 2453 }; 2454 2455 blk_mq_all_tag_iter(tags, blk_mq_has_request, &data); 2456 return data.has_rq; 2457 } 2458 2459 static inline bool blk_mq_last_cpu_in_hctx(unsigned int cpu, 2460 struct blk_mq_hw_ctx *hctx) 2461 { 2462 if (cpumask_next_and(-1, hctx->cpumask, cpu_online_mask) != cpu) 2463 return false; 2464 if (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids) 2465 return false; 2466 return true; 2467 } 2468 2469 static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node) 2470 { 2471 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node, 2472 struct blk_mq_hw_ctx, cpuhp_online); 2473 2474 if (!cpumask_test_cpu(cpu, hctx->cpumask) || 2475 !blk_mq_last_cpu_in_hctx(cpu, hctx)) 2476 return 0; 2477 2478 /* 2479 * Prevent new request from being allocated on the current hctx. 2480 * 2481 * The smp_mb__after_atomic() Pairs with the implied barrier in 2482 * test_and_set_bit_lock in sbitmap_get(). Ensures the inactive flag is 2483 * seen once we return from the tag allocator. 2484 */ 2485 set_bit(BLK_MQ_S_INACTIVE, &hctx->state); 2486 smp_mb__after_atomic(); 2487 2488 /* 2489 * Try to grab a reference to the queue and wait for any outstanding 2490 * requests. If we could not grab a reference the queue has been 2491 * frozen and there are no requests. 2492 */ 2493 if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) { 2494 while (blk_mq_hctx_has_requests(hctx)) 2495 msleep(5); 2496 percpu_ref_put(&hctx->queue->q_usage_counter); 2497 } 2498 2499 return 0; 2500 } 2501 2502 static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node) 2503 { 2504 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node, 2505 struct blk_mq_hw_ctx, cpuhp_online); 2506 2507 if (cpumask_test_cpu(cpu, hctx->cpumask)) 2508 clear_bit(BLK_MQ_S_INACTIVE, &hctx->state); 2509 return 0; 2510 } 2511 2512 /* 2513 * 'cpu' is going away. splice any existing rq_list entries from this 2514 * software queue to the hw queue dispatch list, and ensure that it 2515 * gets run. 2516 */ 2517 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node) 2518 { 2519 struct blk_mq_hw_ctx *hctx; 2520 struct blk_mq_ctx *ctx; 2521 LIST_HEAD(tmp); 2522 enum hctx_type type; 2523 2524 hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead); 2525 if (!cpumask_test_cpu(cpu, hctx->cpumask)) 2526 return 0; 2527 2528 ctx = __blk_mq_get_ctx(hctx->queue, cpu); 2529 type = hctx->type; 2530 2531 spin_lock(&ctx->lock); 2532 if (!list_empty(&ctx->rq_lists[type])) { 2533 list_splice_init(&ctx->rq_lists[type], &tmp); 2534 blk_mq_hctx_clear_pending(hctx, ctx); 2535 } 2536 spin_unlock(&ctx->lock); 2537 2538 if (list_empty(&tmp)) 2539 return 0; 2540 2541 spin_lock(&hctx->lock); 2542 list_splice_tail_init(&tmp, &hctx->dispatch); 2543 spin_unlock(&hctx->lock); 2544 2545 blk_mq_run_hw_queue(hctx, true); 2546 return 0; 2547 } 2548 2549 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx) 2550 { 2551 if (!(hctx->flags & BLK_MQ_F_STACKING)) 2552 cpuhp_state_remove_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE, 2553 &hctx->cpuhp_online); 2554 cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD, 2555 &hctx->cpuhp_dead); 2556 } 2557 2558 /* hctx->ctxs will be freed in queue's release handler */ 2559 static void blk_mq_exit_hctx(struct request_queue *q, 2560 struct blk_mq_tag_set *set, 2561 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) 2562 { 2563 if (blk_mq_hw_queue_mapped(hctx)) 2564 blk_mq_tag_idle(hctx); 2565 2566 if (set->ops->exit_request) 2567 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx); 2568 2569 if (set->ops->exit_hctx) 2570 set->ops->exit_hctx(hctx, hctx_idx); 2571 2572 blk_mq_remove_cpuhp(hctx); 2573 2574 spin_lock(&q->unused_hctx_lock); 2575 list_add(&hctx->hctx_list, &q->unused_hctx_list); 2576 spin_unlock(&q->unused_hctx_lock); 2577 } 2578 2579 static void blk_mq_exit_hw_queues(struct request_queue *q, 2580 struct blk_mq_tag_set *set, int nr_queue) 2581 { 2582 struct blk_mq_hw_ctx *hctx; 2583 unsigned int i; 2584 2585 queue_for_each_hw_ctx(q, hctx, i) { 2586 if (i == nr_queue) 2587 break; 2588 blk_mq_debugfs_unregister_hctx(hctx); 2589 blk_mq_exit_hctx(q, set, hctx, i); 2590 } 2591 } 2592 2593 static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set) 2594 { 2595 int hw_ctx_size = sizeof(struct blk_mq_hw_ctx); 2596 2597 BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu), 2598 __alignof__(struct blk_mq_hw_ctx)) != 2599 sizeof(struct blk_mq_hw_ctx)); 2600 2601 if (tag_set->flags & BLK_MQ_F_BLOCKING) 2602 hw_ctx_size += sizeof(struct srcu_struct); 2603 2604 return hw_ctx_size; 2605 } 2606 2607 static int blk_mq_init_hctx(struct request_queue *q, 2608 struct blk_mq_tag_set *set, 2609 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) 2610 { 2611 hctx->queue_num = hctx_idx; 2612 2613 if (!(hctx->flags & BLK_MQ_F_STACKING)) 2614 cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE, 2615 &hctx->cpuhp_online); 2616 cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead); 2617 2618 hctx->tags = set->tags[hctx_idx]; 2619 2620 if (set->ops->init_hctx && 2621 set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) 2622 goto unregister_cpu_notifier; 2623 2624 if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, 2625 hctx->numa_node)) 2626 goto exit_hctx; 2627 return 0; 2628 2629 exit_hctx: 2630 if (set->ops->exit_hctx) 2631 set->ops->exit_hctx(hctx, hctx_idx); 2632 unregister_cpu_notifier: 2633 blk_mq_remove_cpuhp(hctx); 2634 return -1; 2635 } 2636 2637 static struct blk_mq_hw_ctx * 2638 blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set, 2639 int node) 2640 { 2641 struct blk_mq_hw_ctx *hctx; 2642 gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY; 2643 2644 hctx = kzalloc_node(blk_mq_hw_ctx_size(set), gfp, node); 2645 if (!hctx) 2646 goto fail_alloc_hctx; 2647 2648 if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node)) 2649 goto free_hctx; 2650 2651 atomic_set(&hctx->nr_active, 0); 2652 if (node == NUMA_NO_NODE) 2653 node = set->numa_node; 2654 hctx->numa_node = node; 2655 2656 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn); 2657 spin_lock_init(&hctx->lock); 2658 INIT_LIST_HEAD(&hctx->dispatch); 2659 hctx->queue = q; 2660 hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED; 2661 2662 INIT_LIST_HEAD(&hctx->hctx_list); 2663 2664 /* 2665 * Allocate space for all possible cpus to avoid allocation at 2666 * runtime 2667 */ 2668 hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *), 2669 gfp, node); 2670 if (!hctx->ctxs) 2671 goto free_cpumask; 2672 2673 if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), 2674 gfp, node)) 2675 goto free_ctxs; 2676 hctx->nr_ctx = 0; 2677 2678 spin_lock_init(&hctx->dispatch_wait_lock); 2679 init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake); 2680 INIT_LIST_HEAD(&hctx->dispatch_wait.entry); 2681 2682 hctx->fq = blk_alloc_flush_queue(hctx->numa_node, set->cmd_size, gfp); 2683 if (!hctx->fq) 2684 goto free_bitmap; 2685 2686 if (hctx->flags & BLK_MQ_F_BLOCKING) 2687 init_srcu_struct(hctx->srcu); 2688 blk_mq_hctx_kobj_init(hctx); 2689 2690 return hctx; 2691 2692 free_bitmap: 2693 sbitmap_free(&hctx->ctx_map); 2694 free_ctxs: 2695 kfree(hctx->ctxs); 2696 free_cpumask: 2697 free_cpumask_var(hctx->cpumask); 2698 free_hctx: 2699 kfree(hctx); 2700 fail_alloc_hctx: 2701 return NULL; 2702 } 2703 2704 static void blk_mq_init_cpu_queues(struct request_queue *q, 2705 unsigned int nr_hw_queues) 2706 { 2707 struct blk_mq_tag_set *set = q->tag_set; 2708 unsigned int i, j; 2709 2710 for_each_possible_cpu(i) { 2711 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i); 2712 struct blk_mq_hw_ctx *hctx; 2713 int k; 2714 2715 __ctx->cpu = i; 2716 spin_lock_init(&__ctx->lock); 2717 for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++) 2718 INIT_LIST_HEAD(&__ctx->rq_lists[k]); 2719 2720 __ctx->queue = q; 2721 2722 /* 2723 * Set local node, IFF we have more than one hw queue. If 2724 * not, we remain on the home node of the device 2725 */ 2726 for (j = 0; j < set->nr_maps; j++) { 2727 hctx = blk_mq_map_queue_type(q, j, i); 2728 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE) 2729 hctx->numa_node = local_memory_node(cpu_to_node(i)); 2730 } 2731 } 2732 } 2733 2734 static bool __blk_mq_alloc_map_and_request(struct blk_mq_tag_set *set, 2735 int hctx_idx) 2736 { 2737 int ret = 0; 2738 2739 set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx, 2740 set->queue_depth, set->reserved_tags); 2741 if (!set->tags[hctx_idx]) 2742 return false; 2743 2744 ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx, 2745 set->queue_depth); 2746 if (!ret) 2747 return true; 2748 2749 blk_mq_free_rq_map(set->tags[hctx_idx]); 2750 set->tags[hctx_idx] = NULL; 2751 return false; 2752 } 2753 2754 static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set, 2755 unsigned int hctx_idx) 2756 { 2757 if (set->tags && set->tags[hctx_idx]) { 2758 blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx); 2759 blk_mq_free_rq_map(set->tags[hctx_idx]); 2760 set->tags[hctx_idx] = NULL; 2761 } 2762 } 2763 2764 static void blk_mq_map_swqueue(struct request_queue *q) 2765 { 2766 unsigned int i, j, hctx_idx; 2767 struct blk_mq_hw_ctx *hctx; 2768 struct blk_mq_ctx *ctx; 2769 struct blk_mq_tag_set *set = q->tag_set; 2770 2771 queue_for_each_hw_ctx(q, hctx, i) { 2772 cpumask_clear(hctx->cpumask); 2773 hctx->nr_ctx = 0; 2774 hctx->dispatch_from = NULL; 2775 } 2776 2777 /* 2778 * Map software to hardware queues. 2779 * 2780 * If the cpu isn't present, the cpu is mapped to first hctx. 2781 */ 2782 for_each_possible_cpu(i) { 2783 2784 ctx = per_cpu_ptr(q->queue_ctx, i); 2785 for (j = 0; j < set->nr_maps; j++) { 2786 if (!set->map[j].nr_queues) { 2787 ctx->hctxs[j] = blk_mq_map_queue_type(q, 2788 HCTX_TYPE_DEFAULT, i); 2789 continue; 2790 } 2791 hctx_idx = set->map[j].mq_map[i]; 2792 /* unmapped hw queue can be remapped after CPU topo changed */ 2793 if (!set->tags[hctx_idx] && 2794 !__blk_mq_alloc_map_and_request(set, hctx_idx)) { 2795 /* 2796 * If tags initialization fail for some hctx, 2797 * that hctx won't be brought online. In this 2798 * case, remap the current ctx to hctx[0] which 2799 * is guaranteed to always have tags allocated 2800 */ 2801 set->map[j].mq_map[i] = 0; 2802 } 2803 2804 hctx = blk_mq_map_queue_type(q, j, i); 2805 ctx->hctxs[j] = hctx; 2806 /* 2807 * If the CPU is already set in the mask, then we've 2808 * mapped this one already. This can happen if 2809 * devices share queues across queue maps. 2810 */ 2811 if (cpumask_test_cpu(i, hctx->cpumask)) 2812 continue; 2813 2814 cpumask_set_cpu(i, hctx->cpumask); 2815 hctx->type = j; 2816 ctx->index_hw[hctx->type] = hctx->nr_ctx; 2817 hctx->ctxs[hctx->nr_ctx++] = ctx; 2818 2819 /* 2820 * If the nr_ctx type overflows, we have exceeded the 2821 * amount of sw queues we can support. 2822 */ 2823 BUG_ON(!hctx->nr_ctx); 2824 } 2825 2826 for (; j < HCTX_MAX_TYPES; j++) 2827 ctx->hctxs[j] = blk_mq_map_queue_type(q, 2828 HCTX_TYPE_DEFAULT, i); 2829 } 2830 2831 queue_for_each_hw_ctx(q, hctx, i) { 2832 /* 2833 * If no software queues are mapped to this hardware queue, 2834 * disable it and free the request entries. 2835 */ 2836 if (!hctx->nr_ctx) { 2837 /* Never unmap queue 0. We need it as a 2838 * fallback in case of a new remap fails 2839 * allocation 2840 */ 2841 if (i && set->tags[i]) 2842 blk_mq_free_map_and_requests(set, i); 2843 2844 hctx->tags = NULL; 2845 continue; 2846 } 2847 2848 hctx->tags = set->tags[i]; 2849 WARN_ON(!hctx->tags); 2850 2851 /* 2852 * Set the map size to the number of mapped software queues. 2853 * This is more accurate and more efficient than looping 2854 * over all possibly mapped software queues. 2855 */ 2856 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx); 2857 2858 /* 2859 * Initialize batch roundrobin counts 2860 */ 2861 hctx->next_cpu = blk_mq_first_mapped_cpu(hctx); 2862 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; 2863 } 2864 } 2865 2866 /* 2867 * Caller needs to ensure that we're either frozen/quiesced, or that 2868 * the queue isn't live yet. 2869 */ 2870 static void queue_set_hctx_shared(struct request_queue *q, bool shared) 2871 { 2872 struct blk_mq_hw_ctx *hctx; 2873 int i; 2874 2875 queue_for_each_hw_ctx(q, hctx, i) { 2876 if (shared) 2877 hctx->flags |= BLK_MQ_F_TAG_SHARED; 2878 else 2879 hctx->flags &= ~BLK_MQ_F_TAG_SHARED; 2880 } 2881 } 2882 2883 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, 2884 bool shared) 2885 { 2886 struct request_queue *q; 2887 2888 lockdep_assert_held(&set->tag_list_lock); 2889 2890 list_for_each_entry(q, &set->tag_list, tag_set_list) { 2891 blk_mq_freeze_queue(q); 2892 queue_set_hctx_shared(q, shared); 2893 blk_mq_unfreeze_queue(q); 2894 } 2895 } 2896 2897 static void blk_mq_del_queue_tag_set(struct request_queue *q) 2898 { 2899 struct blk_mq_tag_set *set = q->tag_set; 2900 2901 mutex_lock(&set->tag_list_lock); 2902 list_del(&q->tag_set_list); 2903 if (list_is_singular(&set->tag_list)) { 2904 /* just transitioned to unshared */ 2905 set->flags &= ~BLK_MQ_F_TAG_SHARED; 2906 /* update existing queue */ 2907 blk_mq_update_tag_set_depth(set, false); 2908 } 2909 mutex_unlock(&set->tag_list_lock); 2910 INIT_LIST_HEAD(&q->tag_set_list); 2911 } 2912 2913 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, 2914 struct request_queue *q) 2915 { 2916 mutex_lock(&set->tag_list_lock); 2917 2918 /* 2919 * Check to see if we're transitioning to shared (from 1 to 2 queues). 2920 */ 2921 if (!list_empty(&set->tag_list) && 2922 !(set->flags & BLK_MQ_F_TAG_SHARED)) { 2923 set->flags |= BLK_MQ_F_TAG_SHARED; 2924 /* update existing queue */ 2925 blk_mq_update_tag_set_depth(set, true); 2926 } 2927 if (set->flags & BLK_MQ_F_TAG_SHARED) 2928 queue_set_hctx_shared(q, true); 2929 list_add_tail(&q->tag_set_list, &set->tag_list); 2930 2931 mutex_unlock(&set->tag_list_lock); 2932 } 2933 2934 /* All allocations will be freed in release handler of q->mq_kobj */ 2935 static int blk_mq_alloc_ctxs(struct request_queue *q) 2936 { 2937 struct blk_mq_ctxs *ctxs; 2938 int cpu; 2939 2940 ctxs = kzalloc(sizeof(*ctxs), GFP_KERNEL); 2941 if (!ctxs) 2942 return -ENOMEM; 2943 2944 ctxs->queue_ctx = alloc_percpu(struct blk_mq_ctx); 2945 if (!ctxs->queue_ctx) 2946 goto fail; 2947 2948 for_each_possible_cpu(cpu) { 2949 struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu); 2950 ctx->ctxs = ctxs; 2951 } 2952 2953 q->mq_kobj = &ctxs->kobj; 2954 q->queue_ctx = ctxs->queue_ctx; 2955 2956 return 0; 2957 fail: 2958 kfree(ctxs); 2959 return -ENOMEM; 2960 } 2961 2962 /* 2963 * It is the actual release handler for mq, but we do it from 2964 * request queue's release handler for avoiding use-after-free 2965 * and headache because q->mq_kobj shouldn't have been introduced, 2966 * but we can't group ctx/kctx kobj without it. 2967 */ 2968 void blk_mq_release(struct request_queue *q) 2969 { 2970 struct blk_mq_hw_ctx *hctx, *next; 2971 int i; 2972 2973 queue_for_each_hw_ctx(q, hctx, i) 2974 WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list)); 2975 2976 /* all hctx are in .unused_hctx_list now */ 2977 list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) { 2978 list_del_init(&hctx->hctx_list); 2979 kobject_put(&hctx->kobj); 2980 } 2981 2982 kfree(q->queue_hw_ctx); 2983 2984 /* 2985 * release .mq_kobj and sw queue's kobject now because 2986 * both share lifetime with request queue. 2987 */ 2988 blk_mq_sysfs_deinit(q); 2989 } 2990 2991 struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set, 2992 void *queuedata) 2993 { 2994 struct request_queue *uninit_q, *q; 2995 2996 uninit_q = blk_alloc_queue(set->numa_node); 2997 if (!uninit_q) 2998 return ERR_PTR(-ENOMEM); 2999 uninit_q->queuedata = queuedata; 3000 3001 /* 3002 * Initialize the queue without an elevator. device_add_disk() will do 3003 * the initialization. 3004 */ 3005 q = blk_mq_init_allocated_queue(set, uninit_q, false); 3006 if (IS_ERR(q)) 3007 blk_cleanup_queue(uninit_q); 3008 3009 return q; 3010 } 3011 EXPORT_SYMBOL_GPL(blk_mq_init_queue_data); 3012 3013 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) 3014 { 3015 return blk_mq_init_queue_data(set, NULL); 3016 } 3017 EXPORT_SYMBOL(blk_mq_init_queue); 3018 3019 /* 3020 * Helper for setting up a queue with mq ops, given queue depth, and 3021 * the passed in mq ops flags. 3022 */ 3023 struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set, 3024 const struct blk_mq_ops *ops, 3025 unsigned int queue_depth, 3026 unsigned int set_flags) 3027 { 3028 struct request_queue *q; 3029 int ret; 3030 3031 memset(set, 0, sizeof(*set)); 3032 set->ops = ops; 3033 set->nr_hw_queues = 1; 3034 set->nr_maps = 1; 3035 set->queue_depth = queue_depth; 3036 set->numa_node = NUMA_NO_NODE; 3037 set->flags = set_flags; 3038 3039 ret = blk_mq_alloc_tag_set(set); 3040 if (ret) 3041 return ERR_PTR(ret); 3042 3043 q = blk_mq_init_queue(set); 3044 if (IS_ERR(q)) { 3045 blk_mq_free_tag_set(set); 3046 return q; 3047 } 3048 3049 return q; 3050 } 3051 EXPORT_SYMBOL(blk_mq_init_sq_queue); 3052 3053 static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx( 3054 struct blk_mq_tag_set *set, struct request_queue *q, 3055 int hctx_idx, int node) 3056 { 3057 struct blk_mq_hw_ctx *hctx = NULL, *tmp; 3058 3059 /* reuse dead hctx first */ 3060 spin_lock(&q->unused_hctx_lock); 3061 list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) { 3062 if (tmp->numa_node == node) { 3063 hctx = tmp; 3064 break; 3065 } 3066 } 3067 if (hctx) 3068 list_del_init(&hctx->hctx_list); 3069 spin_unlock(&q->unused_hctx_lock); 3070 3071 if (!hctx) 3072 hctx = blk_mq_alloc_hctx(q, set, node); 3073 if (!hctx) 3074 goto fail; 3075 3076 if (blk_mq_init_hctx(q, set, hctx, hctx_idx)) 3077 goto free_hctx; 3078 3079 return hctx; 3080 3081 free_hctx: 3082 kobject_put(&hctx->kobj); 3083 fail: 3084 return NULL; 3085 } 3086 3087 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, 3088 struct request_queue *q) 3089 { 3090 int i, j, end; 3091 struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx; 3092 3093 if (q->nr_hw_queues < set->nr_hw_queues) { 3094 struct blk_mq_hw_ctx **new_hctxs; 3095 3096 new_hctxs = kcalloc_node(set->nr_hw_queues, 3097 sizeof(*new_hctxs), GFP_KERNEL, 3098 set->numa_node); 3099 if (!new_hctxs) 3100 return; 3101 if (hctxs) 3102 memcpy(new_hctxs, hctxs, q->nr_hw_queues * 3103 sizeof(*hctxs)); 3104 q->queue_hw_ctx = new_hctxs; 3105 kfree(hctxs); 3106 hctxs = new_hctxs; 3107 } 3108 3109 /* protect against switching io scheduler */ 3110 mutex_lock(&q->sysfs_lock); 3111 for (i = 0; i < set->nr_hw_queues; i++) { 3112 int node; 3113 struct blk_mq_hw_ctx *hctx; 3114 3115 node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], i); 3116 /* 3117 * If the hw queue has been mapped to another numa node, 3118 * we need to realloc the hctx. If allocation fails, fallback 3119 * to use the previous one. 3120 */ 3121 if (hctxs[i] && (hctxs[i]->numa_node == node)) 3122 continue; 3123 3124 hctx = blk_mq_alloc_and_init_hctx(set, q, i, node); 3125 if (hctx) { 3126 if (hctxs[i]) 3127 blk_mq_exit_hctx(q, set, hctxs[i], i); 3128 hctxs[i] = hctx; 3129 } else { 3130 if (hctxs[i]) 3131 pr_warn("Allocate new hctx on node %d fails,\ 3132 fallback to previous one on node %d\n", 3133 node, hctxs[i]->numa_node); 3134 else 3135 break; 3136 } 3137 } 3138 /* 3139 * Increasing nr_hw_queues fails. Free the newly allocated 3140 * hctxs and keep the previous q->nr_hw_queues. 3141 */ 3142 if (i != set->nr_hw_queues) { 3143 j = q->nr_hw_queues; 3144 end = i; 3145 } else { 3146 j = i; 3147 end = q->nr_hw_queues; 3148 q->nr_hw_queues = set->nr_hw_queues; 3149 } 3150 3151 for (; j < end; j++) { 3152 struct blk_mq_hw_ctx *hctx = hctxs[j]; 3153 3154 if (hctx) { 3155 if (hctx->tags) 3156 blk_mq_free_map_and_requests(set, j); 3157 blk_mq_exit_hctx(q, set, hctx, j); 3158 hctxs[j] = NULL; 3159 } 3160 } 3161 mutex_unlock(&q->sysfs_lock); 3162 } 3163 3164 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, 3165 struct request_queue *q, 3166 bool elevator_init) 3167 { 3168 /* mark the queue as mq asap */ 3169 q->mq_ops = set->ops; 3170 3171 q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn, 3172 blk_mq_poll_stats_bkt, 3173 BLK_MQ_POLL_STATS_BKTS, q); 3174 if (!q->poll_cb) 3175 goto err_exit; 3176 3177 if (blk_mq_alloc_ctxs(q)) 3178 goto err_poll; 3179 3180 /* init q->mq_kobj and sw queues' kobjects */ 3181 blk_mq_sysfs_init(q); 3182 3183 INIT_LIST_HEAD(&q->unused_hctx_list); 3184 spin_lock_init(&q->unused_hctx_lock); 3185 3186 blk_mq_realloc_hw_ctxs(set, q); 3187 if (!q->nr_hw_queues) 3188 goto err_hctxs; 3189 3190 INIT_WORK(&q->timeout_work, blk_mq_timeout_work); 3191 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ); 3192 3193 q->tag_set = set; 3194 3195 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; 3196 if (set->nr_maps > HCTX_TYPE_POLL && 3197 set->map[HCTX_TYPE_POLL].nr_queues) 3198 blk_queue_flag_set(QUEUE_FLAG_POLL, q); 3199 3200 q->sg_reserved_size = INT_MAX; 3201 3202 INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work); 3203 INIT_LIST_HEAD(&q->requeue_list); 3204 spin_lock_init(&q->requeue_lock); 3205 3206 q->nr_requests = set->queue_depth; 3207 3208 /* 3209 * Default to classic polling 3210 */ 3211 q->poll_nsec = BLK_MQ_POLL_CLASSIC; 3212 3213 blk_mq_init_cpu_queues(q, set->nr_hw_queues); 3214 blk_mq_add_queue_tag_set(set, q); 3215 blk_mq_map_swqueue(q); 3216 3217 if (elevator_init) 3218 elevator_init_mq(q); 3219 3220 return q; 3221 3222 err_hctxs: 3223 kfree(q->queue_hw_ctx); 3224 q->nr_hw_queues = 0; 3225 blk_mq_sysfs_deinit(q); 3226 err_poll: 3227 blk_stat_free_callback(q->poll_cb); 3228 q->poll_cb = NULL; 3229 err_exit: 3230 q->mq_ops = NULL; 3231 return ERR_PTR(-ENOMEM); 3232 } 3233 EXPORT_SYMBOL(blk_mq_init_allocated_queue); 3234 3235 /* tags can _not_ be used after returning from blk_mq_exit_queue */ 3236 void blk_mq_exit_queue(struct request_queue *q) 3237 { 3238 struct blk_mq_tag_set *set = q->tag_set; 3239 3240 blk_mq_del_queue_tag_set(q); 3241 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); 3242 } 3243 3244 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) 3245 { 3246 int i; 3247 3248 for (i = 0; i < set->nr_hw_queues; i++) 3249 if (!__blk_mq_alloc_map_and_request(set, i)) 3250 goto out_unwind; 3251 3252 return 0; 3253 3254 out_unwind: 3255 while (--i >= 0) 3256 blk_mq_free_map_and_requests(set, i); 3257 3258 return -ENOMEM; 3259 } 3260 3261 /* 3262 * Allocate the request maps associated with this tag_set. Note that this 3263 * may reduce the depth asked for, if memory is tight. set->queue_depth 3264 * will be updated to reflect the allocated depth. 3265 */ 3266 static int blk_mq_alloc_map_and_requests(struct blk_mq_tag_set *set) 3267 { 3268 unsigned int depth; 3269 int err; 3270 3271 depth = set->queue_depth; 3272 do { 3273 err = __blk_mq_alloc_rq_maps(set); 3274 if (!err) 3275 break; 3276 3277 set->queue_depth >>= 1; 3278 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) { 3279 err = -ENOMEM; 3280 break; 3281 } 3282 } while (set->queue_depth); 3283 3284 if (!set->queue_depth || err) { 3285 pr_err("blk-mq: failed to allocate request map\n"); 3286 return -ENOMEM; 3287 } 3288 3289 if (depth != set->queue_depth) 3290 pr_info("blk-mq: reduced tag depth (%u -> %u)\n", 3291 depth, set->queue_depth); 3292 3293 return 0; 3294 } 3295 3296 static int blk_mq_update_queue_map(struct blk_mq_tag_set *set) 3297 { 3298 /* 3299 * blk_mq_map_queues() and multiple .map_queues() implementations 3300 * expect that set->map[HCTX_TYPE_DEFAULT].nr_queues is set to the 3301 * number of hardware queues. 3302 */ 3303 if (set->nr_maps == 1) 3304 set->map[HCTX_TYPE_DEFAULT].nr_queues = set->nr_hw_queues; 3305 3306 if (set->ops->map_queues && !is_kdump_kernel()) { 3307 int i; 3308 3309 /* 3310 * transport .map_queues is usually done in the following 3311 * way: 3312 * 3313 * for (queue = 0; queue < set->nr_hw_queues; queue++) { 3314 * mask = get_cpu_mask(queue) 3315 * for_each_cpu(cpu, mask) 3316 * set->map[x].mq_map[cpu] = queue; 3317 * } 3318 * 3319 * When we need to remap, the table has to be cleared for 3320 * killing stale mapping since one CPU may not be mapped 3321 * to any hw queue. 3322 */ 3323 for (i = 0; i < set->nr_maps; i++) 3324 blk_mq_clear_mq_map(&set->map[i]); 3325 3326 return set->ops->map_queues(set); 3327 } else { 3328 BUG_ON(set->nr_maps > 1); 3329 return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); 3330 } 3331 } 3332 3333 static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set, 3334 int cur_nr_hw_queues, int new_nr_hw_queues) 3335 { 3336 struct blk_mq_tags **new_tags; 3337 3338 if (cur_nr_hw_queues >= new_nr_hw_queues) 3339 return 0; 3340 3341 new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *), 3342 GFP_KERNEL, set->numa_node); 3343 if (!new_tags) 3344 return -ENOMEM; 3345 3346 if (set->tags) 3347 memcpy(new_tags, set->tags, cur_nr_hw_queues * 3348 sizeof(*set->tags)); 3349 kfree(set->tags); 3350 set->tags = new_tags; 3351 set->nr_hw_queues = new_nr_hw_queues; 3352 3353 return 0; 3354 } 3355 3356 /* 3357 * Alloc a tag set to be associated with one or more request queues. 3358 * May fail with EINVAL for various error conditions. May adjust the 3359 * requested depth down, if it's too large. In that case, the set 3360 * value will be stored in set->queue_depth. 3361 */ 3362 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) 3363 { 3364 int i, ret; 3365 3366 BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS); 3367 3368 if (!set->nr_hw_queues) 3369 return -EINVAL; 3370 if (!set->queue_depth) 3371 return -EINVAL; 3372 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) 3373 return -EINVAL; 3374 3375 if (!set->ops->queue_rq) 3376 return -EINVAL; 3377 3378 if (!set->ops->get_budget ^ !set->ops->put_budget) 3379 return -EINVAL; 3380 3381 if (set->queue_depth > BLK_MQ_MAX_DEPTH) { 3382 pr_info("blk-mq: reduced tag depth to %u\n", 3383 BLK_MQ_MAX_DEPTH); 3384 set->queue_depth = BLK_MQ_MAX_DEPTH; 3385 } 3386 3387 if (!set->nr_maps) 3388 set->nr_maps = 1; 3389 else if (set->nr_maps > HCTX_MAX_TYPES) 3390 return -EINVAL; 3391 3392 /* 3393 * If a crashdump is active, then we are potentially in a very 3394 * memory constrained environment. Limit us to 1 queue and 3395 * 64 tags to prevent using too much memory. 3396 */ 3397 if (is_kdump_kernel()) { 3398 set->nr_hw_queues = 1; 3399 set->nr_maps = 1; 3400 set->queue_depth = min(64U, set->queue_depth); 3401 } 3402 /* 3403 * There is no use for more h/w queues than cpus if we just have 3404 * a single map 3405 */ 3406 if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids) 3407 set->nr_hw_queues = nr_cpu_ids; 3408 3409 if (blk_mq_realloc_tag_set_tags(set, 0, set->nr_hw_queues) < 0) 3410 return -ENOMEM; 3411 3412 ret = -ENOMEM; 3413 for (i = 0; i < set->nr_maps; i++) { 3414 set->map[i].mq_map = kcalloc_node(nr_cpu_ids, 3415 sizeof(set->map[i].mq_map[0]), 3416 GFP_KERNEL, set->numa_node); 3417 if (!set->map[i].mq_map) 3418 goto out_free_mq_map; 3419 set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues; 3420 } 3421 3422 ret = blk_mq_update_queue_map(set); 3423 if (ret) 3424 goto out_free_mq_map; 3425 3426 ret = blk_mq_alloc_map_and_requests(set); 3427 if (ret) 3428 goto out_free_mq_map; 3429 3430 mutex_init(&set->tag_list_lock); 3431 INIT_LIST_HEAD(&set->tag_list); 3432 3433 return 0; 3434 3435 out_free_mq_map: 3436 for (i = 0; i < set->nr_maps; i++) { 3437 kfree(set->map[i].mq_map); 3438 set->map[i].mq_map = NULL; 3439 } 3440 kfree(set->tags); 3441 set->tags = NULL; 3442 return ret; 3443 } 3444 EXPORT_SYMBOL(blk_mq_alloc_tag_set); 3445 3446 void blk_mq_free_tag_set(struct blk_mq_tag_set *set) 3447 { 3448 int i, j; 3449 3450 for (i = 0; i < set->nr_hw_queues; i++) 3451 blk_mq_free_map_and_requests(set, i); 3452 3453 for (j = 0; j < set->nr_maps; j++) { 3454 kfree(set->map[j].mq_map); 3455 set->map[j].mq_map = NULL; 3456 } 3457 3458 kfree(set->tags); 3459 set->tags = NULL; 3460 } 3461 EXPORT_SYMBOL(blk_mq_free_tag_set); 3462 3463 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) 3464 { 3465 struct blk_mq_tag_set *set = q->tag_set; 3466 struct blk_mq_hw_ctx *hctx; 3467 int i, ret; 3468 3469 if (!set) 3470 return -EINVAL; 3471 3472 if (q->nr_requests == nr) 3473 return 0; 3474 3475 blk_mq_freeze_queue(q); 3476 blk_mq_quiesce_queue(q); 3477 3478 ret = 0; 3479 queue_for_each_hw_ctx(q, hctx, i) { 3480 if (!hctx->tags) 3481 continue; 3482 /* 3483 * If we're using an MQ scheduler, just update the scheduler 3484 * queue depth. This is similar to what the old code would do. 3485 */ 3486 if (!hctx->sched_tags) { 3487 ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr, 3488 false); 3489 } else { 3490 ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags, 3491 nr, true); 3492 } 3493 if (ret) 3494 break; 3495 if (q->elevator && q->elevator->type->ops.depth_updated) 3496 q->elevator->type->ops.depth_updated(hctx); 3497 } 3498 3499 if (!ret) 3500 q->nr_requests = nr; 3501 3502 blk_mq_unquiesce_queue(q); 3503 blk_mq_unfreeze_queue(q); 3504 3505 return ret; 3506 } 3507 3508 /* 3509 * request_queue and elevator_type pair. 3510 * It is just used by __blk_mq_update_nr_hw_queues to cache 3511 * the elevator_type associated with a request_queue. 3512 */ 3513 struct blk_mq_qe_pair { 3514 struct list_head node; 3515 struct request_queue *q; 3516 struct elevator_type *type; 3517 }; 3518 3519 /* 3520 * Cache the elevator_type in qe pair list and switch the 3521 * io scheduler to 'none' 3522 */ 3523 static bool blk_mq_elv_switch_none(struct list_head *head, 3524 struct request_queue *q) 3525 { 3526 struct blk_mq_qe_pair *qe; 3527 3528 if (!q->elevator) 3529 return true; 3530 3531 qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY); 3532 if (!qe) 3533 return false; 3534 3535 INIT_LIST_HEAD(&qe->node); 3536 qe->q = q; 3537 qe->type = q->elevator->type; 3538 list_add(&qe->node, head); 3539 3540 mutex_lock(&q->sysfs_lock); 3541 /* 3542 * After elevator_switch_mq, the previous elevator_queue will be 3543 * released by elevator_release. The reference of the io scheduler 3544 * module get by elevator_get will also be put. So we need to get 3545 * a reference of the io scheduler module here to prevent it to be 3546 * removed. 3547 */ 3548 __module_get(qe->type->elevator_owner); 3549 elevator_switch_mq(q, NULL); 3550 mutex_unlock(&q->sysfs_lock); 3551 3552 return true; 3553 } 3554 3555 static void blk_mq_elv_switch_back(struct list_head *head, 3556 struct request_queue *q) 3557 { 3558 struct blk_mq_qe_pair *qe; 3559 struct elevator_type *t = NULL; 3560 3561 list_for_each_entry(qe, head, node) 3562 if (qe->q == q) { 3563 t = qe->type; 3564 break; 3565 } 3566 3567 if (!t) 3568 return; 3569 3570 list_del(&qe->node); 3571 kfree(qe); 3572 3573 mutex_lock(&q->sysfs_lock); 3574 elevator_switch_mq(q, t); 3575 mutex_unlock(&q->sysfs_lock); 3576 } 3577 3578 static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, 3579 int nr_hw_queues) 3580 { 3581 struct request_queue *q; 3582 LIST_HEAD(head); 3583 int prev_nr_hw_queues; 3584 3585 lockdep_assert_held(&set->tag_list_lock); 3586 3587 if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids) 3588 nr_hw_queues = nr_cpu_ids; 3589 if (nr_hw_queues < 1) 3590 return; 3591 if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues) 3592 return; 3593 3594 list_for_each_entry(q, &set->tag_list, tag_set_list) 3595 blk_mq_freeze_queue(q); 3596 /* 3597 * Switch IO scheduler to 'none', cleaning up the data associated 3598 * with the previous scheduler. We will switch back once we are done 3599 * updating the new sw to hw queue mappings. 3600 */ 3601 list_for_each_entry(q, &set->tag_list, tag_set_list) 3602 if (!blk_mq_elv_switch_none(&head, q)) 3603 goto switch_back; 3604 3605 list_for_each_entry(q, &set->tag_list, tag_set_list) { 3606 blk_mq_debugfs_unregister_hctxs(q); 3607 blk_mq_sysfs_unregister(q); 3608 } 3609 3610 prev_nr_hw_queues = set->nr_hw_queues; 3611 if (blk_mq_realloc_tag_set_tags(set, set->nr_hw_queues, nr_hw_queues) < 3612 0) 3613 goto reregister; 3614 3615 set->nr_hw_queues = nr_hw_queues; 3616 fallback: 3617 blk_mq_update_queue_map(set); 3618 list_for_each_entry(q, &set->tag_list, tag_set_list) { 3619 blk_mq_realloc_hw_ctxs(set, q); 3620 if (q->nr_hw_queues != set->nr_hw_queues) { 3621 pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n", 3622 nr_hw_queues, prev_nr_hw_queues); 3623 set->nr_hw_queues = prev_nr_hw_queues; 3624 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); 3625 goto fallback; 3626 } 3627 blk_mq_map_swqueue(q); 3628 } 3629 3630 reregister: 3631 list_for_each_entry(q, &set->tag_list, tag_set_list) { 3632 blk_mq_sysfs_register(q); 3633 blk_mq_debugfs_register_hctxs(q); 3634 } 3635 3636 switch_back: 3637 list_for_each_entry(q, &set->tag_list, tag_set_list) 3638 blk_mq_elv_switch_back(&head, q); 3639 3640 list_for_each_entry(q, &set->tag_list, tag_set_list) 3641 blk_mq_unfreeze_queue(q); 3642 } 3643 3644 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) 3645 { 3646 mutex_lock(&set->tag_list_lock); 3647 __blk_mq_update_nr_hw_queues(set, nr_hw_queues); 3648 mutex_unlock(&set->tag_list_lock); 3649 } 3650 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues); 3651 3652 /* Enable polling stats and return whether they were already enabled. */ 3653 static bool blk_poll_stats_enable(struct request_queue *q) 3654 { 3655 if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) || 3656 blk_queue_flag_test_and_set(QUEUE_FLAG_POLL_STATS, q)) 3657 return true; 3658 blk_stat_add_callback(q, q->poll_cb); 3659 return false; 3660 } 3661 3662 static void blk_mq_poll_stats_start(struct request_queue *q) 3663 { 3664 /* 3665 * We don't arm the callback if polling stats are not enabled or the 3666 * callback is already active. 3667 */ 3668 if (!test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) || 3669 blk_stat_is_active(q->poll_cb)) 3670 return; 3671 3672 blk_stat_activate_msecs(q->poll_cb, 100); 3673 } 3674 3675 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb) 3676 { 3677 struct request_queue *q = cb->data; 3678 int bucket; 3679 3680 for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) { 3681 if (cb->stat[bucket].nr_samples) 3682 q->poll_stat[bucket] = cb->stat[bucket]; 3683 } 3684 } 3685 3686 static unsigned long blk_mq_poll_nsecs(struct request_queue *q, 3687 struct request *rq) 3688 { 3689 unsigned long ret = 0; 3690 int bucket; 3691 3692 /* 3693 * If stats collection isn't on, don't sleep but turn it on for 3694 * future users 3695 */ 3696 if (!blk_poll_stats_enable(q)) 3697 return 0; 3698 3699 /* 3700 * As an optimistic guess, use half of the mean service time 3701 * for this type of request. We can (and should) make this smarter. 3702 * For instance, if the completion latencies are tight, we can 3703 * get closer than just half the mean. This is especially 3704 * important on devices where the completion latencies are longer 3705 * than ~10 usec. We do use the stats for the relevant IO size 3706 * if available which does lead to better estimates. 3707 */ 3708 bucket = blk_mq_poll_stats_bkt(rq); 3709 if (bucket < 0) 3710 return ret; 3711 3712 if (q->poll_stat[bucket].nr_samples) 3713 ret = (q->poll_stat[bucket].mean + 1) / 2; 3714 3715 return ret; 3716 } 3717 3718 static bool blk_mq_poll_hybrid_sleep(struct request_queue *q, 3719 struct request *rq) 3720 { 3721 struct hrtimer_sleeper hs; 3722 enum hrtimer_mode mode; 3723 unsigned int nsecs; 3724 ktime_t kt; 3725 3726 if (rq->rq_flags & RQF_MQ_POLL_SLEPT) 3727 return false; 3728 3729 /* 3730 * If we get here, hybrid polling is enabled. Hence poll_nsec can be: 3731 * 3732 * 0: use half of prev avg 3733 * >0: use this specific value 3734 */ 3735 if (q->poll_nsec > 0) 3736 nsecs = q->poll_nsec; 3737 else 3738 nsecs = blk_mq_poll_nsecs(q, rq); 3739 3740 if (!nsecs) 3741 return false; 3742 3743 rq->rq_flags |= RQF_MQ_POLL_SLEPT; 3744 3745 /* 3746 * This will be replaced with the stats tracking code, using 3747 * 'avg_completion_time / 2' as the pre-sleep target. 3748 */ 3749 kt = nsecs; 3750 3751 mode = HRTIMER_MODE_REL; 3752 hrtimer_init_sleeper_on_stack(&hs, CLOCK_MONOTONIC, mode); 3753 hrtimer_set_expires(&hs.timer, kt); 3754 3755 do { 3756 if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE) 3757 break; 3758 set_current_state(TASK_UNINTERRUPTIBLE); 3759 hrtimer_sleeper_start_expires(&hs, mode); 3760 if (hs.task) 3761 io_schedule(); 3762 hrtimer_cancel(&hs.timer); 3763 mode = HRTIMER_MODE_ABS; 3764 } while (hs.task && !signal_pending(current)); 3765 3766 __set_current_state(TASK_RUNNING); 3767 destroy_hrtimer_on_stack(&hs.timer); 3768 return true; 3769 } 3770 3771 static bool blk_mq_poll_hybrid(struct request_queue *q, 3772 struct blk_mq_hw_ctx *hctx, blk_qc_t cookie) 3773 { 3774 struct request *rq; 3775 3776 if (q->poll_nsec == BLK_MQ_POLL_CLASSIC) 3777 return false; 3778 3779 if (!blk_qc_t_is_internal(cookie)) 3780 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie)); 3781 else { 3782 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie)); 3783 /* 3784 * With scheduling, if the request has completed, we'll 3785 * get a NULL return here, as we clear the sched tag when 3786 * that happens. The request still remains valid, like always, 3787 * so we should be safe with just the NULL check. 3788 */ 3789 if (!rq) 3790 return false; 3791 } 3792 3793 return blk_mq_poll_hybrid_sleep(q, rq); 3794 } 3795 3796 /** 3797 * blk_poll - poll for IO completions 3798 * @q: the queue 3799 * @cookie: cookie passed back at IO submission time 3800 * @spin: whether to spin for completions 3801 * 3802 * Description: 3803 * Poll for completions on the passed in queue. Returns number of 3804 * completed entries found. If @spin is true, then blk_poll will continue 3805 * looping until at least one completion is found, unless the task is 3806 * otherwise marked running (or we need to reschedule). 3807 */ 3808 int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin) 3809 { 3810 struct blk_mq_hw_ctx *hctx; 3811 long state; 3812 3813 if (!blk_qc_t_valid(cookie) || 3814 !test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) 3815 return 0; 3816 3817 if (current->plug) 3818 blk_flush_plug_list(current->plug, false); 3819 3820 hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)]; 3821 3822 /* 3823 * If we sleep, have the caller restart the poll loop to reset 3824 * the state. Like for the other success return cases, the 3825 * caller is responsible for checking if the IO completed. If 3826 * the IO isn't complete, we'll get called again and will go 3827 * straight to the busy poll loop. 3828 */ 3829 if (blk_mq_poll_hybrid(q, hctx, cookie)) 3830 return 1; 3831 3832 hctx->poll_considered++; 3833 3834 state = current->state; 3835 do { 3836 int ret; 3837 3838 hctx->poll_invoked++; 3839 3840 ret = q->mq_ops->poll(hctx); 3841 if (ret > 0) { 3842 hctx->poll_success++; 3843 __set_current_state(TASK_RUNNING); 3844 return ret; 3845 } 3846 3847 if (signal_pending_state(state, current)) 3848 __set_current_state(TASK_RUNNING); 3849 3850 if (current->state == TASK_RUNNING) 3851 return 1; 3852 if (ret < 0 || !spin) 3853 break; 3854 cpu_relax(); 3855 } while (!need_resched()); 3856 3857 __set_current_state(TASK_RUNNING); 3858 return 0; 3859 } 3860 EXPORT_SYMBOL_GPL(blk_poll); 3861 3862 unsigned int blk_mq_rq_cpu(struct request *rq) 3863 { 3864 return rq->mq_ctx->cpu; 3865 } 3866 EXPORT_SYMBOL(blk_mq_rq_cpu); 3867 3868 static int __init blk_mq_init(void) 3869 { 3870 int i; 3871 3872 for_each_possible_cpu(i) 3873 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); 3874 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq); 3875 3876 cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD, 3877 "block/softirq:dead", NULL, 3878 blk_softirq_cpu_dead); 3879 cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL, 3880 blk_mq_hctx_notify_dead); 3881 cpuhp_setup_state_multi(CPUHP_AP_BLK_MQ_ONLINE, "block/mq:online", 3882 blk_mq_hctx_notify_online, 3883 blk_mq_hctx_notify_offline); 3884 return 0; 3885 } 3886 subsys_initcall(blk_mq_init); 3887