1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Block multiqueue core code 4 * 5 * Copyright (C) 2013-2014 Jens Axboe 6 * Copyright (C) 2013-2014 Christoph Hellwig 7 */ 8 #include <linux/kernel.h> 9 #include <linux/module.h> 10 #include <linux/backing-dev.h> 11 #include <linux/bio.h> 12 #include <linux/blkdev.h> 13 #include <linux/kmemleak.h> 14 #include <linux/mm.h> 15 #include <linux/init.h> 16 #include <linux/slab.h> 17 #include <linux/workqueue.h> 18 #include <linux/smp.h> 19 #include <linux/llist.h> 20 #include <linux/list_sort.h> 21 #include <linux/cpu.h> 22 #include <linux/cache.h> 23 #include <linux/sched/sysctl.h> 24 #include <linux/sched/topology.h> 25 #include <linux/sched/signal.h> 26 #include <linux/delay.h> 27 #include <linux/crash_dump.h> 28 #include <linux/prefetch.h> 29 #include <linux/blk-crypto.h> 30 31 #include <trace/events/block.h> 32 33 #include <linux/blk-mq.h> 34 #include <linux/t10-pi.h> 35 #include "blk.h" 36 #include "blk-mq.h" 37 #include "blk-mq-debugfs.h" 38 #include "blk-mq-tag.h" 39 #include "blk-pm.h" 40 #include "blk-stat.h" 41 #include "blk-mq-sched.h" 42 #include "blk-rq-qos.h" 43 44 static DEFINE_PER_CPU(struct list_head, blk_cpu_done); 45 46 static void blk_mq_poll_stats_start(struct request_queue *q); 47 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb); 48 49 static int blk_mq_poll_stats_bkt(const struct request *rq) 50 { 51 int ddir, sectors, bucket; 52 53 ddir = rq_data_dir(rq); 54 sectors = blk_rq_stats_sectors(rq); 55 56 bucket = ddir + 2 * ilog2(sectors); 57 58 if (bucket < 0) 59 return -1; 60 else if (bucket >= BLK_MQ_POLL_STATS_BKTS) 61 return ddir + BLK_MQ_POLL_STATS_BKTS - 2; 62 63 return bucket; 64 } 65 66 /* 67 * Check if any of the ctx, dispatch list or elevator 68 * have pending work in this hardware queue. 69 */ 70 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) 71 { 72 return !list_empty_careful(&hctx->dispatch) || 73 sbitmap_any_bit_set(&hctx->ctx_map) || 74 blk_mq_sched_has_work(hctx); 75 } 76 77 /* 78 * Mark this ctx as having pending work in this hardware queue 79 */ 80 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, 81 struct blk_mq_ctx *ctx) 82 { 83 const int bit = ctx->index_hw[hctx->type]; 84 85 if (!sbitmap_test_bit(&hctx->ctx_map, bit)) 86 sbitmap_set_bit(&hctx->ctx_map, bit); 87 } 88 89 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, 90 struct blk_mq_ctx *ctx) 91 { 92 const int bit = ctx->index_hw[hctx->type]; 93 94 sbitmap_clear_bit(&hctx->ctx_map, bit); 95 } 96 97 struct mq_inflight { 98 struct block_device *part; 99 unsigned int inflight[2]; 100 }; 101 102 static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx, 103 struct request *rq, void *priv, 104 bool reserved) 105 { 106 struct mq_inflight *mi = priv; 107 108 if ((!mi->part->bd_partno || rq->part == mi->part) && 109 blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) 110 mi->inflight[rq_data_dir(rq)]++; 111 112 return true; 113 } 114 115 unsigned int blk_mq_in_flight(struct request_queue *q, 116 struct block_device *part) 117 { 118 struct mq_inflight mi = { .part = part }; 119 120 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); 121 122 return mi.inflight[0] + mi.inflight[1]; 123 } 124 125 void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part, 126 unsigned int inflight[2]) 127 { 128 struct mq_inflight mi = { .part = part }; 129 130 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); 131 inflight[0] = mi.inflight[0]; 132 inflight[1] = mi.inflight[1]; 133 } 134 135 void blk_freeze_queue_start(struct request_queue *q) 136 { 137 mutex_lock(&q->mq_freeze_lock); 138 if (++q->mq_freeze_depth == 1) { 139 percpu_ref_kill(&q->q_usage_counter); 140 mutex_unlock(&q->mq_freeze_lock); 141 if (queue_is_mq(q)) 142 blk_mq_run_hw_queues(q, false); 143 } else { 144 mutex_unlock(&q->mq_freeze_lock); 145 } 146 } 147 EXPORT_SYMBOL_GPL(blk_freeze_queue_start); 148 149 void blk_mq_freeze_queue_wait(struct request_queue *q) 150 { 151 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter)); 152 } 153 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait); 154 155 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, 156 unsigned long timeout) 157 { 158 return wait_event_timeout(q->mq_freeze_wq, 159 percpu_ref_is_zero(&q->q_usage_counter), 160 timeout); 161 } 162 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout); 163 164 /* 165 * Guarantee no request is in use, so we can change any data structure of 166 * the queue afterward. 167 */ 168 void blk_freeze_queue(struct request_queue *q) 169 { 170 /* 171 * In the !blk_mq case we are only calling this to kill the 172 * q_usage_counter, otherwise this increases the freeze depth 173 * and waits for it to return to zero. For this reason there is 174 * no blk_unfreeze_queue(), and blk_freeze_queue() is not 175 * exported to drivers as the only user for unfreeze is blk_mq. 176 */ 177 blk_freeze_queue_start(q); 178 blk_mq_freeze_queue_wait(q); 179 } 180 181 void blk_mq_freeze_queue(struct request_queue *q) 182 { 183 /* 184 * ...just an alias to keep freeze and unfreeze actions balanced 185 * in the blk_mq_* namespace 186 */ 187 blk_freeze_queue(q); 188 } 189 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue); 190 191 void blk_mq_unfreeze_queue(struct request_queue *q) 192 { 193 mutex_lock(&q->mq_freeze_lock); 194 q->mq_freeze_depth--; 195 WARN_ON_ONCE(q->mq_freeze_depth < 0); 196 if (!q->mq_freeze_depth) { 197 percpu_ref_resurrect(&q->q_usage_counter); 198 wake_up_all(&q->mq_freeze_wq); 199 } 200 mutex_unlock(&q->mq_freeze_lock); 201 } 202 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); 203 204 /* 205 * FIXME: replace the scsi_internal_device_*block_nowait() calls in the 206 * mpt3sas driver such that this function can be removed. 207 */ 208 void blk_mq_quiesce_queue_nowait(struct request_queue *q) 209 { 210 blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q); 211 } 212 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait); 213 214 /** 215 * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished 216 * @q: request queue. 217 * 218 * Note: this function does not prevent that the struct request end_io() 219 * callback function is invoked. Once this function is returned, we make 220 * sure no dispatch can happen until the queue is unquiesced via 221 * blk_mq_unquiesce_queue(). 222 */ 223 void blk_mq_quiesce_queue(struct request_queue *q) 224 { 225 struct blk_mq_hw_ctx *hctx; 226 unsigned int i; 227 bool rcu = false; 228 229 blk_mq_quiesce_queue_nowait(q); 230 231 queue_for_each_hw_ctx(q, hctx, i) { 232 if (hctx->flags & BLK_MQ_F_BLOCKING) 233 synchronize_srcu(hctx->srcu); 234 else 235 rcu = true; 236 } 237 if (rcu) 238 synchronize_rcu(); 239 } 240 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue); 241 242 /* 243 * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue() 244 * @q: request queue. 245 * 246 * This function recovers queue into the state before quiescing 247 * which is done by blk_mq_quiesce_queue. 248 */ 249 void blk_mq_unquiesce_queue(struct request_queue *q) 250 { 251 blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q); 252 253 /* dispatch requests which are inserted during quiescing */ 254 blk_mq_run_hw_queues(q, true); 255 } 256 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue); 257 258 void blk_mq_wake_waiters(struct request_queue *q) 259 { 260 struct blk_mq_hw_ctx *hctx; 261 unsigned int i; 262 263 queue_for_each_hw_ctx(q, hctx, i) 264 if (blk_mq_hw_queue_mapped(hctx)) 265 blk_mq_tag_wakeup_all(hctx->tags, true); 266 } 267 268 /* 269 * Only need start/end time stamping if we have iostat or 270 * blk stats enabled, or using an IO scheduler. 271 */ 272 static inline bool blk_mq_need_time_stamp(struct request *rq) 273 { 274 return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS)) || rq->q->elevator; 275 } 276 277 static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, 278 unsigned int tag, u64 alloc_time_ns) 279 { 280 struct blk_mq_tags *tags = blk_mq_tags_from_data(data); 281 struct request *rq = tags->static_rqs[tag]; 282 283 if (data->q->elevator) { 284 rq->tag = BLK_MQ_NO_TAG; 285 rq->internal_tag = tag; 286 } else { 287 rq->tag = tag; 288 rq->internal_tag = BLK_MQ_NO_TAG; 289 } 290 291 /* csd/requeue_work/fifo_time is initialized before use */ 292 rq->q = data->q; 293 rq->mq_ctx = data->ctx; 294 rq->mq_hctx = data->hctx; 295 rq->rq_flags = 0; 296 rq->cmd_flags = data->cmd_flags; 297 if (data->flags & BLK_MQ_REQ_PREEMPT) 298 rq->rq_flags |= RQF_PREEMPT; 299 if (blk_queue_io_stat(data->q)) 300 rq->rq_flags |= RQF_IO_STAT; 301 INIT_LIST_HEAD(&rq->queuelist); 302 INIT_HLIST_NODE(&rq->hash); 303 RB_CLEAR_NODE(&rq->rb_node); 304 rq->rq_disk = NULL; 305 rq->part = NULL; 306 #ifdef CONFIG_BLK_RQ_ALLOC_TIME 307 rq->alloc_time_ns = alloc_time_ns; 308 #endif 309 if (blk_mq_need_time_stamp(rq)) 310 rq->start_time_ns = ktime_get_ns(); 311 else 312 rq->start_time_ns = 0; 313 rq->io_start_time_ns = 0; 314 rq->stats_sectors = 0; 315 rq->nr_phys_segments = 0; 316 #if defined(CONFIG_BLK_DEV_INTEGRITY) 317 rq->nr_integrity_segments = 0; 318 #endif 319 blk_crypto_rq_set_defaults(rq); 320 /* tag was already set */ 321 WRITE_ONCE(rq->deadline, 0); 322 323 rq->timeout = 0; 324 325 rq->end_io = NULL; 326 rq->end_io_data = NULL; 327 328 data->ctx->rq_dispatched[op_is_sync(data->cmd_flags)]++; 329 refcount_set(&rq->ref, 1); 330 331 if (!op_is_flush(data->cmd_flags)) { 332 struct elevator_queue *e = data->q->elevator; 333 334 rq->elv.icq = NULL; 335 if (e && e->type->ops.prepare_request) { 336 if (e->type->icq_cache) 337 blk_mq_sched_assign_ioc(rq); 338 339 e->type->ops.prepare_request(rq); 340 rq->rq_flags |= RQF_ELVPRIV; 341 } 342 } 343 344 data->hctx->queued++; 345 return rq; 346 } 347 348 static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data) 349 { 350 struct request_queue *q = data->q; 351 struct elevator_queue *e = q->elevator; 352 u64 alloc_time_ns = 0; 353 unsigned int tag; 354 355 /* alloc_time includes depth and tag waits */ 356 if (blk_queue_rq_alloc_time(q)) 357 alloc_time_ns = ktime_get_ns(); 358 359 if (data->cmd_flags & REQ_NOWAIT) 360 data->flags |= BLK_MQ_REQ_NOWAIT; 361 362 if (e) { 363 /* 364 * Flush requests are special and go directly to the 365 * dispatch list. Don't include reserved tags in the 366 * limiting, as it isn't useful. 367 */ 368 if (!op_is_flush(data->cmd_flags) && 369 e->type->ops.limit_depth && 370 !(data->flags & BLK_MQ_REQ_RESERVED)) 371 e->type->ops.limit_depth(data->cmd_flags, data); 372 } 373 374 retry: 375 data->ctx = blk_mq_get_ctx(q); 376 data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx); 377 if (!e) 378 blk_mq_tag_busy(data->hctx); 379 380 /* 381 * Waiting allocations only fail because of an inactive hctx. In that 382 * case just retry the hctx assignment and tag allocation as CPU hotplug 383 * should have migrated us to an online CPU by now. 384 */ 385 tag = blk_mq_get_tag(data); 386 if (tag == BLK_MQ_NO_TAG) { 387 if (data->flags & BLK_MQ_REQ_NOWAIT) 388 return NULL; 389 390 /* 391 * Give up the CPU and sleep for a random short time to ensure 392 * that thread using a realtime scheduling class are migrated 393 * off the CPU, and thus off the hctx that is going away. 394 */ 395 msleep(3); 396 goto retry; 397 } 398 return blk_mq_rq_ctx_init(data, tag, alloc_time_ns); 399 } 400 401 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, 402 blk_mq_req_flags_t flags) 403 { 404 struct blk_mq_alloc_data data = { 405 .q = q, 406 .flags = flags, 407 .cmd_flags = op, 408 }; 409 struct request *rq; 410 int ret; 411 412 ret = blk_queue_enter(q, flags); 413 if (ret) 414 return ERR_PTR(ret); 415 416 rq = __blk_mq_alloc_request(&data); 417 if (!rq) 418 goto out_queue_exit; 419 rq->__data_len = 0; 420 rq->__sector = (sector_t) -1; 421 rq->bio = rq->biotail = NULL; 422 return rq; 423 out_queue_exit: 424 blk_queue_exit(q); 425 return ERR_PTR(-EWOULDBLOCK); 426 } 427 EXPORT_SYMBOL(blk_mq_alloc_request); 428 429 struct request *blk_mq_alloc_request_hctx(struct request_queue *q, 430 unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx) 431 { 432 struct blk_mq_alloc_data data = { 433 .q = q, 434 .flags = flags, 435 .cmd_flags = op, 436 }; 437 u64 alloc_time_ns = 0; 438 unsigned int cpu; 439 unsigned int tag; 440 int ret; 441 442 /* alloc_time includes depth and tag waits */ 443 if (blk_queue_rq_alloc_time(q)) 444 alloc_time_ns = ktime_get_ns(); 445 446 /* 447 * If the tag allocator sleeps we could get an allocation for a 448 * different hardware context. No need to complicate the low level 449 * allocator for this for the rare use case of a command tied to 450 * a specific queue. 451 */ 452 if (WARN_ON_ONCE(!(flags & (BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED)))) 453 return ERR_PTR(-EINVAL); 454 455 if (hctx_idx >= q->nr_hw_queues) 456 return ERR_PTR(-EIO); 457 458 ret = blk_queue_enter(q, flags); 459 if (ret) 460 return ERR_PTR(ret); 461 462 /* 463 * Check if the hardware context is actually mapped to anything. 464 * If not tell the caller that it should skip this queue. 465 */ 466 ret = -EXDEV; 467 data.hctx = q->queue_hw_ctx[hctx_idx]; 468 if (!blk_mq_hw_queue_mapped(data.hctx)) 469 goto out_queue_exit; 470 cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask); 471 data.ctx = __blk_mq_get_ctx(q, cpu); 472 473 if (!q->elevator) 474 blk_mq_tag_busy(data.hctx); 475 476 ret = -EWOULDBLOCK; 477 tag = blk_mq_get_tag(&data); 478 if (tag == BLK_MQ_NO_TAG) 479 goto out_queue_exit; 480 return blk_mq_rq_ctx_init(&data, tag, alloc_time_ns); 481 482 out_queue_exit: 483 blk_queue_exit(q); 484 return ERR_PTR(ret); 485 } 486 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); 487 488 static void __blk_mq_free_request(struct request *rq) 489 { 490 struct request_queue *q = rq->q; 491 struct blk_mq_ctx *ctx = rq->mq_ctx; 492 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 493 const int sched_tag = rq->internal_tag; 494 495 blk_crypto_free_request(rq); 496 blk_pm_mark_last_busy(rq); 497 rq->mq_hctx = NULL; 498 if (rq->tag != BLK_MQ_NO_TAG) 499 blk_mq_put_tag(hctx->tags, ctx, rq->tag); 500 if (sched_tag != BLK_MQ_NO_TAG) 501 blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag); 502 blk_mq_sched_restart(hctx); 503 blk_queue_exit(q); 504 } 505 506 void blk_mq_free_request(struct request *rq) 507 { 508 struct request_queue *q = rq->q; 509 struct elevator_queue *e = q->elevator; 510 struct blk_mq_ctx *ctx = rq->mq_ctx; 511 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 512 513 if (rq->rq_flags & RQF_ELVPRIV) { 514 if (e && e->type->ops.finish_request) 515 e->type->ops.finish_request(rq); 516 if (rq->elv.icq) { 517 put_io_context(rq->elv.icq->ioc); 518 rq->elv.icq = NULL; 519 } 520 } 521 522 ctx->rq_completed[rq_is_sync(rq)]++; 523 if (rq->rq_flags & RQF_MQ_INFLIGHT) 524 __blk_mq_dec_active_requests(hctx); 525 526 if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq))) 527 laptop_io_completion(q->backing_dev_info); 528 529 rq_qos_done(q, rq); 530 531 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 532 if (refcount_dec_and_test(&rq->ref)) 533 __blk_mq_free_request(rq); 534 } 535 EXPORT_SYMBOL_GPL(blk_mq_free_request); 536 537 inline void __blk_mq_end_request(struct request *rq, blk_status_t error) 538 { 539 u64 now = 0; 540 541 if (blk_mq_need_time_stamp(rq)) 542 now = ktime_get_ns(); 543 544 if (rq->rq_flags & RQF_STATS) { 545 blk_mq_poll_stats_start(rq->q); 546 blk_stat_add(rq, now); 547 } 548 549 blk_mq_sched_completed_request(rq, now); 550 551 blk_account_io_done(rq, now); 552 553 if (rq->end_io) { 554 rq_qos_done(rq->q, rq); 555 rq->end_io(rq, error); 556 } else { 557 blk_mq_free_request(rq); 558 } 559 } 560 EXPORT_SYMBOL(__blk_mq_end_request); 561 562 void blk_mq_end_request(struct request *rq, blk_status_t error) 563 { 564 if (blk_update_request(rq, error, blk_rq_bytes(rq))) 565 BUG(); 566 __blk_mq_end_request(rq, error); 567 } 568 EXPORT_SYMBOL(blk_mq_end_request); 569 570 /* 571 * Softirq action handler - move entries to local list and loop over them 572 * while passing them to the queue registered handler. 573 */ 574 static __latent_entropy void blk_done_softirq(struct softirq_action *h) 575 { 576 struct list_head *cpu_list, local_list; 577 578 local_irq_disable(); 579 cpu_list = this_cpu_ptr(&blk_cpu_done); 580 list_replace_init(cpu_list, &local_list); 581 local_irq_enable(); 582 583 while (!list_empty(&local_list)) { 584 struct request *rq; 585 586 rq = list_entry(local_list.next, struct request, ipi_list); 587 list_del_init(&rq->ipi_list); 588 rq->q->mq_ops->complete(rq); 589 } 590 } 591 592 static void blk_mq_trigger_softirq(struct request *rq) 593 { 594 struct list_head *list; 595 unsigned long flags; 596 597 local_irq_save(flags); 598 list = this_cpu_ptr(&blk_cpu_done); 599 list_add_tail(&rq->ipi_list, list); 600 601 /* 602 * If the list only contains our just added request, signal a raise of 603 * the softirq. If there are already entries there, someone already 604 * raised the irq but it hasn't run yet. 605 */ 606 if (list->next == &rq->ipi_list) 607 raise_softirq_irqoff(BLOCK_SOFTIRQ); 608 local_irq_restore(flags); 609 } 610 611 static int blk_softirq_cpu_dead(unsigned int cpu) 612 { 613 /* 614 * If a CPU goes away, splice its entries to the current CPU 615 * and trigger a run of the softirq 616 */ 617 local_irq_disable(); 618 list_splice_init(&per_cpu(blk_cpu_done, cpu), 619 this_cpu_ptr(&blk_cpu_done)); 620 raise_softirq_irqoff(BLOCK_SOFTIRQ); 621 local_irq_enable(); 622 623 return 0; 624 } 625 626 627 static void __blk_mq_complete_request_remote(void *data) 628 { 629 struct request *rq = data; 630 631 /* 632 * For most of single queue controllers, there is only one irq vector 633 * for handling I/O completion, and the only irq's affinity is set 634 * to all possible CPUs. On most of ARCHs, this affinity means the irq 635 * is handled on one specific CPU. 636 * 637 * So complete I/O requests in softirq context in case of single queue 638 * devices to avoid degrading I/O performance due to irqsoff latency. 639 */ 640 if (rq->q->nr_hw_queues == 1) 641 blk_mq_trigger_softirq(rq); 642 else 643 rq->q->mq_ops->complete(rq); 644 } 645 646 static inline bool blk_mq_complete_need_ipi(struct request *rq) 647 { 648 int cpu = raw_smp_processor_id(); 649 650 if (!IS_ENABLED(CONFIG_SMP) || 651 !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) 652 return false; 653 /* 654 * With force threaded interrupts enabled, raising softirq from an SMP 655 * function call will always result in waking the ksoftirqd thread. 656 * This is probably worse than completing the request on a different 657 * cache domain. 658 */ 659 if (force_irqthreads) 660 return false; 661 662 /* same CPU or cache domain? Complete locally */ 663 if (cpu == rq->mq_ctx->cpu || 664 (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags) && 665 cpus_share_cache(cpu, rq->mq_ctx->cpu))) 666 return false; 667 668 /* don't try to IPI to an offline CPU */ 669 return cpu_online(rq->mq_ctx->cpu); 670 } 671 672 bool blk_mq_complete_request_remote(struct request *rq) 673 { 674 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); 675 676 /* 677 * For a polled request, always complete locallly, it's pointless 678 * to redirect the completion. 679 */ 680 if (rq->cmd_flags & REQ_HIPRI) 681 return false; 682 683 if (blk_mq_complete_need_ipi(rq)) { 684 INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq); 685 smp_call_function_single_async(rq->mq_ctx->cpu, &rq->csd); 686 } else { 687 if (rq->q->nr_hw_queues > 1) 688 return false; 689 blk_mq_trigger_softirq(rq); 690 } 691 692 return true; 693 } 694 EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote); 695 696 /** 697 * blk_mq_complete_request - end I/O on a request 698 * @rq: the request being processed 699 * 700 * Description: 701 * Complete a request by scheduling the ->complete_rq operation. 702 **/ 703 void blk_mq_complete_request(struct request *rq) 704 { 705 if (!blk_mq_complete_request_remote(rq)) 706 rq->q->mq_ops->complete(rq); 707 } 708 EXPORT_SYMBOL(blk_mq_complete_request); 709 710 static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx) 711 __releases(hctx->srcu) 712 { 713 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) 714 rcu_read_unlock(); 715 else 716 srcu_read_unlock(hctx->srcu, srcu_idx); 717 } 718 719 static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx) 720 __acquires(hctx->srcu) 721 { 722 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) { 723 /* shut up gcc false positive */ 724 *srcu_idx = 0; 725 rcu_read_lock(); 726 } else 727 *srcu_idx = srcu_read_lock(hctx->srcu); 728 } 729 730 /** 731 * blk_mq_start_request - Start processing a request 732 * @rq: Pointer to request to be started 733 * 734 * Function used by device drivers to notify the block layer that a request 735 * is going to be processed now, so blk layer can do proper initializations 736 * such as starting the timeout timer. 737 */ 738 void blk_mq_start_request(struct request *rq) 739 { 740 struct request_queue *q = rq->q; 741 742 trace_block_rq_issue(rq); 743 744 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) { 745 rq->io_start_time_ns = ktime_get_ns(); 746 rq->stats_sectors = blk_rq_sectors(rq); 747 rq->rq_flags |= RQF_STATS; 748 rq_qos_issue(q, rq); 749 } 750 751 WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE); 752 753 blk_add_timer(rq); 754 WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT); 755 756 #ifdef CONFIG_BLK_DEV_INTEGRITY 757 if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE) 758 q->integrity.profile->prepare_fn(rq); 759 #endif 760 } 761 EXPORT_SYMBOL(blk_mq_start_request); 762 763 static void __blk_mq_requeue_request(struct request *rq) 764 { 765 struct request_queue *q = rq->q; 766 767 blk_mq_put_driver_tag(rq); 768 769 trace_block_rq_requeue(rq); 770 rq_qos_requeue(q, rq); 771 772 if (blk_mq_request_started(rq)) { 773 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 774 rq->rq_flags &= ~RQF_TIMED_OUT; 775 } 776 } 777 778 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list) 779 { 780 __blk_mq_requeue_request(rq); 781 782 /* this request will be re-inserted to io scheduler queue */ 783 blk_mq_sched_requeue_request(rq); 784 785 BUG_ON(!list_empty(&rq->queuelist)); 786 blk_mq_add_to_requeue_list(rq, true, kick_requeue_list); 787 } 788 EXPORT_SYMBOL(blk_mq_requeue_request); 789 790 static void blk_mq_requeue_work(struct work_struct *work) 791 { 792 struct request_queue *q = 793 container_of(work, struct request_queue, requeue_work.work); 794 LIST_HEAD(rq_list); 795 struct request *rq, *next; 796 797 spin_lock_irq(&q->requeue_lock); 798 list_splice_init(&q->requeue_list, &rq_list); 799 spin_unlock_irq(&q->requeue_lock); 800 801 list_for_each_entry_safe(rq, next, &rq_list, queuelist) { 802 if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP))) 803 continue; 804 805 rq->rq_flags &= ~RQF_SOFTBARRIER; 806 list_del_init(&rq->queuelist); 807 /* 808 * If RQF_DONTPREP, rq has contained some driver specific 809 * data, so insert it to hctx dispatch list to avoid any 810 * merge. 811 */ 812 if (rq->rq_flags & RQF_DONTPREP) 813 blk_mq_request_bypass_insert(rq, false, false); 814 else 815 blk_mq_sched_insert_request(rq, true, false, false); 816 } 817 818 while (!list_empty(&rq_list)) { 819 rq = list_entry(rq_list.next, struct request, queuelist); 820 list_del_init(&rq->queuelist); 821 blk_mq_sched_insert_request(rq, false, false, false); 822 } 823 824 blk_mq_run_hw_queues(q, false); 825 } 826 827 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, 828 bool kick_requeue_list) 829 { 830 struct request_queue *q = rq->q; 831 unsigned long flags; 832 833 /* 834 * We abuse this flag that is otherwise used by the I/O scheduler to 835 * request head insertion from the workqueue. 836 */ 837 BUG_ON(rq->rq_flags & RQF_SOFTBARRIER); 838 839 spin_lock_irqsave(&q->requeue_lock, flags); 840 if (at_head) { 841 rq->rq_flags |= RQF_SOFTBARRIER; 842 list_add(&rq->queuelist, &q->requeue_list); 843 } else { 844 list_add_tail(&rq->queuelist, &q->requeue_list); 845 } 846 spin_unlock_irqrestore(&q->requeue_lock, flags); 847 848 if (kick_requeue_list) 849 blk_mq_kick_requeue_list(q); 850 } 851 852 void blk_mq_kick_requeue_list(struct request_queue *q) 853 { 854 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0); 855 } 856 EXPORT_SYMBOL(blk_mq_kick_requeue_list); 857 858 void blk_mq_delay_kick_requeue_list(struct request_queue *q, 859 unsigned long msecs) 860 { 861 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 862 msecs_to_jiffies(msecs)); 863 } 864 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list); 865 866 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag) 867 { 868 if (tag < tags->nr_tags) { 869 prefetch(tags->rqs[tag]); 870 return tags->rqs[tag]; 871 } 872 873 return NULL; 874 } 875 EXPORT_SYMBOL(blk_mq_tag_to_rq); 876 877 static bool blk_mq_rq_inflight(struct blk_mq_hw_ctx *hctx, struct request *rq, 878 void *priv, bool reserved) 879 { 880 /* 881 * If we find a request that isn't idle and the queue matches, 882 * we know the queue is busy. Return false to stop the iteration. 883 */ 884 if (blk_mq_request_started(rq) && rq->q == hctx->queue) { 885 bool *busy = priv; 886 887 *busy = true; 888 return false; 889 } 890 891 return true; 892 } 893 894 bool blk_mq_queue_inflight(struct request_queue *q) 895 { 896 bool busy = false; 897 898 blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy); 899 return busy; 900 } 901 EXPORT_SYMBOL_GPL(blk_mq_queue_inflight); 902 903 static void blk_mq_rq_timed_out(struct request *req, bool reserved) 904 { 905 req->rq_flags |= RQF_TIMED_OUT; 906 if (req->q->mq_ops->timeout) { 907 enum blk_eh_timer_return ret; 908 909 ret = req->q->mq_ops->timeout(req, reserved); 910 if (ret == BLK_EH_DONE) 911 return; 912 WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER); 913 } 914 915 blk_add_timer(req); 916 } 917 918 static bool blk_mq_req_expired(struct request *rq, unsigned long *next) 919 { 920 unsigned long deadline; 921 922 if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT) 923 return false; 924 if (rq->rq_flags & RQF_TIMED_OUT) 925 return false; 926 927 deadline = READ_ONCE(rq->deadline); 928 if (time_after_eq(jiffies, deadline)) 929 return true; 930 931 if (*next == 0) 932 *next = deadline; 933 else if (time_after(*next, deadline)) 934 *next = deadline; 935 return false; 936 } 937 938 static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, 939 struct request *rq, void *priv, bool reserved) 940 { 941 unsigned long *next = priv; 942 943 /* 944 * Just do a quick check if it is expired before locking the request in 945 * so we're not unnecessarilly synchronizing across CPUs. 946 */ 947 if (!blk_mq_req_expired(rq, next)) 948 return true; 949 950 /* 951 * We have reason to believe the request may be expired. Take a 952 * reference on the request to lock this request lifetime into its 953 * currently allocated context to prevent it from being reallocated in 954 * the event the completion by-passes this timeout handler. 955 * 956 * If the reference was already released, then the driver beat the 957 * timeout handler to posting a natural completion. 958 */ 959 if (!refcount_inc_not_zero(&rq->ref)) 960 return true; 961 962 /* 963 * The request is now locked and cannot be reallocated underneath the 964 * timeout handler's processing. Re-verify this exact request is truly 965 * expired; if it is not expired, then the request was completed and 966 * reallocated as a new request. 967 */ 968 if (blk_mq_req_expired(rq, next)) 969 blk_mq_rq_timed_out(rq, reserved); 970 971 if (is_flush_rq(rq, hctx)) 972 rq->end_io(rq, 0); 973 else if (refcount_dec_and_test(&rq->ref)) 974 __blk_mq_free_request(rq); 975 976 return true; 977 } 978 979 static void blk_mq_timeout_work(struct work_struct *work) 980 { 981 struct request_queue *q = 982 container_of(work, struct request_queue, timeout_work); 983 unsigned long next = 0; 984 struct blk_mq_hw_ctx *hctx; 985 int i; 986 987 /* A deadlock might occur if a request is stuck requiring a 988 * timeout at the same time a queue freeze is waiting 989 * completion, since the timeout code would not be able to 990 * acquire the queue reference here. 991 * 992 * That's why we don't use blk_queue_enter here; instead, we use 993 * percpu_ref_tryget directly, because we need to be able to 994 * obtain a reference even in the short window between the queue 995 * starting to freeze, by dropping the first reference in 996 * blk_freeze_queue_start, and the moment the last request is 997 * consumed, marked by the instant q_usage_counter reaches 998 * zero. 999 */ 1000 if (!percpu_ref_tryget(&q->q_usage_counter)) 1001 return; 1002 1003 blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &next); 1004 1005 if (next != 0) { 1006 mod_timer(&q->timeout, next); 1007 } else { 1008 /* 1009 * Request timeouts are handled as a forward rolling timer. If 1010 * we end up here it means that no requests are pending and 1011 * also that no request has been pending for a while. Mark 1012 * each hctx as idle. 1013 */ 1014 queue_for_each_hw_ctx(q, hctx, i) { 1015 /* the hctx may be unmapped, so check it here */ 1016 if (blk_mq_hw_queue_mapped(hctx)) 1017 blk_mq_tag_idle(hctx); 1018 } 1019 } 1020 blk_queue_exit(q); 1021 } 1022 1023 struct flush_busy_ctx_data { 1024 struct blk_mq_hw_ctx *hctx; 1025 struct list_head *list; 1026 }; 1027 1028 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data) 1029 { 1030 struct flush_busy_ctx_data *flush_data = data; 1031 struct blk_mq_hw_ctx *hctx = flush_data->hctx; 1032 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; 1033 enum hctx_type type = hctx->type; 1034 1035 spin_lock(&ctx->lock); 1036 list_splice_tail_init(&ctx->rq_lists[type], flush_data->list); 1037 sbitmap_clear_bit(sb, bitnr); 1038 spin_unlock(&ctx->lock); 1039 return true; 1040 } 1041 1042 /* 1043 * Process software queues that have been marked busy, splicing them 1044 * to the for-dispatch 1045 */ 1046 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list) 1047 { 1048 struct flush_busy_ctx_data data = { 1049 .hctx = hctx, 1050 .list = list, 1051 }; 1052 1053 sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data); 1054 } 1055 EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs); 1056 1057 struct dispatch_rq_data { 1058 struct blk_mq_hw_ctx *hctx; 1059 struct request *rq; 1060 }; 1061 1062 static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr, 1063 void *data) 1064 { 1065 struct dispatch_rq_data *dispatch_data = data; 1066 struct blk_mq_hw_ctx *hctx = dispatch_data->hctx; 1067 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; 1068 enum hctx_type type = hctx->type; 1069 1070 spin_lock(&ctx->lock); 1071 if (!list_empty(&ctx->rq_lists[type])) { 1072 dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next); 1073 list_del_init(&dispatch_data->rq->queuelist); 1074 if (list_empty(&ctx->rq_lists[type])) 1075 sbitmap_clear_bit(sb, bitnr); 1076 } 1077 spin_unlock(&ctx->lock); 1078 1079 return !dispatch_data->rq; 1080 } 1081 1082 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, 1083 struct blk_mq_ctx *start) 1084 { 1085 unsigned off = start ? start->index_hw[hctx->type] : 0; 1086 struct dispatch_rq_data data = { 1087 .hctx = hctx, 1088 .rq = NULL, 1089 }; 1090 1091 __sbitmap_for_each_set(&hctx->ctx_map, off, 1092 dispatch_rq_from_ctx, &data); 1093 1094 return data.rq; 1095 } 1096 1097 static inline unsigned int queued_to_index(unsigned int queued) 1098 { 1099 if (!queued) 1100 return 0; 1101 1102 return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1); 1103 } 1104 1105 static bool __blk_mq_get_driver_tag(struct request *rq) 1106 { 1107 struct sbitmap_queue *bt = rq->mq_hctx->tags->bitmap_tags; 1108 unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags; 1109 int tag; 1110 1111 blk_mq_tag_busy(rq->mq_hctx); 1112 1113 if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) { 1114 bt = rq->mq_hctx->tags->breserved_tags; 1115 tag_offset = 0; 1116 } else { 1117 if (!hctx_may_queue(rq->mq_hctx, bt)) 1118 return false; 1119 } 1120 1121 tag = __sbitmap_queue_get(bt); 1122 if (tag == BLK_MQ_NO_TAG) 1123 return false; 1124 1125 rq->tag = tag + tag_offset; 1126 return true; 1127 } 1128 1129 static bool blk_mq_get_driver_tag(struct request *rq) 1130 { 1131 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 1132 1133 if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_get_driver_tag(rq)) 1134 return false; 1135 1136 if ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) && 1137 !(rq->rq_flags & RQF_MQ_INFLIGHT)) { 1138 rq->rq_flags |= RQF_MQ_INFLIGHT; 1139 __blk_mq_inc_active_requests(hctx); 1140 } 1141 hctx->tags->rqs[rq->tag] = rq; 1142 return true; 1143 } 1144 1145 static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, 1146 int flags, void *key) 1147 { 1148 struct blk_mq_hw_ctx *hctx; 1149 1150 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait); 1151 1152 spin_lock(&hctx->dispatch_wait_lock); 1153 if (!list_empty(&wait->entry)) { 1154 struct sbitmap_queue *sbq; 1155 1156 list_del_init(&wait->entry); 1157 sbq = hctx->tags->bitmap_tags; 1158 atomic_dec(&sbq->ws_active); 1159 } 1160 spin_unlock(&hctx->dispatch_wait_lock); 1161 1162 blk_mq_run_hw_queue(hctx, true); 1163 return 1; 1164 } 1165 1166 /* 1167 * Mark us waiting for a tag. For shared tags, this involves hooking us into 1168 * the tag wakeups. For non-shared tags, we can simply mark us needing a 1169 * restart. For both cases, take care to check the condition again after 1170 * marking us as waiting. 1171 */ 1172 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx, 1173 struct request *rq) 1174 { 1175 struct sbitmap_queue *sbq = hctx->tags->bitmap_tags; 1176 struct wait_queue_head *wq; 1177 wait_queue_entry_t *wait; 1178 bool ret; 1179 1180 if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) { 1181 blk_mq_sched_mark_restart_hctx(hctx); 1182 1183 /* 1184 * It's possible that a tag was freed in the window between the 1185 * allocation failure and adding the hardware queue to the wait 1186 * queue. 1187 * 1188 * Don't clear RESTART here, someone else could have set it. 1189 * At most this will cost an extra queue run. 1190 */ 1191 return blk_mq_get_driver_tag(rq); 1192 } 1193 1194 wait = &hctx->dispatch_wait; 1195 if (!list_empty_careful(&wait->entry)) 1196 return false; 1197 1198 wq = &bt_wait_ptr(sbq, hctx)->wait; 1199 1200 spin_lock_irq(&wq->lock); 1201 spin_lock(&hctx->dispatch_wait_lock); 1202 if (!list_empty(&wait->entry)) { 1203 spin_unlock(&hctx->dispatch_wait_lock); 1204 spin_unlock_irq(&wq->lock); 1205 return false; 1206 } 1207 1208 atomic_inc(&sbq->ws_active); 1209 wait->flags &= ~WQ_FLAG_EXCLUSIVE; 1210 __add_wait_queue(wq, wait); 1211 1212 /* 1213 * It's possible that a tag was freed in the window between the 1214 * allocation failure and adding the hardware queue to the wait 1215 * queue. 1216 */ 1217 ret = blk_mq_get_driver_tag(rq); 1218 if (!ret) { 1219 spin_unlock(&hctx->dispatch_wait_lock); 1220 spin_unlock_irq(&wq->lock); 1221 return false; 1222 } 1223 1224 /* 1225 * We got a tag, remove ourselves from the wait queue to ensure 1226 * someone else gets the wakeup. 1227 */ 1228 list_del_init(&wait->entry); 1229 atomic_dec(&sbq->ws_active); 1230 spin_unlock(&hctx->dispatch_wait_lock); 1231 spin_unlock_irq(&wq->lock); 1232 1233 return true; 1234 } 1235 1236 #define BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT 8 1237 #define BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR 4 1238 /* 1239 * Update dispatch busy with the Exponential Weighted Moving Average(EWMA): 1240 * - EWMA is one simple way to compute running average value 1241 * - weight(7/8 and 1/8) is applied so that it can decrease exponentially 1242 * - take 4 as factor for avoiding to get too small(0) result, and this 1243 * factor doesn't matter because EWMA decreases exponentially 1244 */ 1245 static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy) 1246 { 1247 unsigned int ewma; 1248 1249 if (hctx->queue->elevator) 1250 return; 1251 1252 ewma = hctx->dispatch_busy; 1253 1254 if (!ewma && !busy) 1255 return; 1256 1257 ewma *= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT - 1; 1258 if (busy) 1259 ewma += 1 << BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR; 1260 ewma /= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT; 1261 1262 hctx->dispatch_busy = ewma; 1263 } 1264 1265 #define BLK_MQ_RESOURCE_DELAY 3 /* ms units */ 1266 1267 static void blk_mq_handle_dev_resource(struct request *rq, 1268 struct list_head *list) 1269 { 1270 struct request *next = 1271 list_first_entry_or_null(list, struct request, queuelist); 1272 1273 /* 1274 * If an I/O scheduler has been configured and we got a driver tag for 1275 * the next request already, free it. 1276 */ 1277 if (next) 1278 blk_mq_put_driver_tag(next); 1279 1280 list_add(&rq->queuelist, list); 1281 __blk_mq_requeue_request(rq); 1282 } 1283 1284 static void blk_mq_handle_zone_resource(struct request *rq, 1285 struct list_head *zone_list) 1286 { 1287 /* 1288 * If we end up here it is because we cannot dispatch a request to a 1289 * specific zone due to LLD level zone-write locking or other zone 1290 * related resource not being available. In this case, set the request 1291 * aside in zone_list for retrying it later. 1292 */ 1293 list_add(&rq->queuelist, zone_list); 1294 __blk_mq_requeue_request(rq); 1295 } 1296 1297 enum prep_dispatch { 1298 PREP_DISPATCH_OK, 1299 PREP_DISPATCH_NO_TAG, 1300 PREP_DISPATCH_NO_BUDGET, 1301 }; 1302 1303 static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq, 1304 bool need_budget) 1305 { 1306 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 1307 1308 if (need_budget && !blk_mq_get_dispatch_budget(rq->q)) { 1309 blk_mq_put_driver_tag(rq); 1310 return PREP_DISPATCH_NO_BUDGET; 1311 } 1312 1313 if (!blk_mq_get_driver_tag(rq)) { 1314 /* 1315 * The initial allocation attempt failed, so we need to 1316 * rerun the hardware queue when a tag is freed. The 1317 * waitqueue takes care of that. If the queue is run 1318 * before we add this entry back on the dispatch list, 1319 * we'll re-run it below. 1320 */ 1321 if (!blk_mq_mark_tag_wait(hctx, rq)) { 1322 /* 1323 * All budgets not got from this function will be put 1324 * together during handling partial dispatch 1325 */ 1326 if (need_budget) 1327 blk_mq_put_dispatch_budget(rq->q); 1328 return PREP_DISPATCH_NO_TAG; 1329 } 1330 } 1331 1332 return PREP_DISPATCH_OK; 1333 } 1334 1335 /* release all allocated budgets before calling to blk_mq_dispatch_rq_list */ 1336 static void blk_mq_release_budgets(struct request_queue *q, 1337 unsigned int nr_budgets) 1338 { 1339 int i; 1340 1341 for (i = 0; i < nr_budgets; i++) 1342 blk_mq_put_dispatch_budget(q); 1343 } 1344 1345 /* 1346 * Returns true if we did some work AND can potentially do more. 1347 */ 1348 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list, 1349 unsigned int nr_budgets) 1350 { 1351 enum prep_dispatch prep; 1352 struct request_queue *q = hctx->queue; 1353 struct request *rq, *nxt; 1354 int errors, queued; 1355 blk_status_t ret = BLK_STS_OK; 1356 LIST_HEAD(zone_list); 1357 1358 if (list_empty(list)) 1359 return false; 1360 1361 /* 1362 * Now process all the entries, sending them to the driver. 1363 */ 1364 errors = queued = 0; 1365 do { 1366 struct blk_mq_queue_data bd; 1367 1368 rq = list_first_entry(list, struct request, queuelist); 1369 1370 WARN_ON_ONCE(hctx != rq->mq_hctx); 1371 prep = blk_mq_prep_dispatch_rq(rq, !nr_budgets); 1372 if (prep != PREP_DISPATCH_OK) 1373 break; 1374 1375 list_del_init(&rq->queuelist); 1376 1377 bd.rq = rq; 1378 1379 /* 1380 * Flag last if we have no more requests, or if we have more 1381 * but can't assign a driver tag to it. 1382 */ 1383 if (list_empty(list)) 1384 bd.last = true; 1385 else { 1386 nxt = list_first_entry(list, struct request, queuelist); 1387 bd.last = !blk_mq_get_driver_tag(nxt); 1388 } 1389 1390 /* 1391 * once the request is queued to lld, no need to cover the 1392 * budget any more 1393 */ 1394 if (nr_budgets) 1395 nr_budgets--; 1396 ret = q->mq_ops->queue_rq(hctx, &bd); 1397 switch (ret) { 1398 case BLK_STS_OK: 1399 queued++; 1400 break; 1401 case BLK_STS_RESOURCE: 1402 case BLK_STS_DEV_RESOURCE: 1403 blk_mq_handle_dev_resource(rq, list); 1404 goto out; 1405 case BLK_STS_ZONE_RESOURCE: 1406 /* 1407 * Move the request to zone_list and keep going through 1408 * the dispatch list to find more requests the drive can 1409 * accept. 1410 */ 1411 blk_mq_handle_zone_resource(rq, &zone_list); 1412 break; 1413 default: 1414 errors++; 1415 blk_mq_end_request(rq, ret); 1416 } 1417 } while (!list_empty(list)); 1418 out: 1419 if (!list_empty(&zone_list)) 1420 list_splice_tail_init(&zone_list, list); 1421 1422 hctx->dispatched[queued_to_index(queued)]++; 1423 1424 /* If we didn't flush the entire list, we could have told the driver 1425 * there was more coming, but that turned out to be a lie. 1426 */ 1427 if ((!list_empty(list) || errors) && q->mq_ops->commit_rqs && queued) 1428 q->mq_ops->commit_rqs(hctx); 1429 /* 1430 * Any items that need requeuing? Stuff them into hctx->dispatch, 1431 * that is where we will continue on next queue run. 1432 */ 1433 if (!list_empty(list)) { 1434 bool needs_restart; 1435 /* For non-shared tags, the RESTART check will suffice */ 1436 bool no_tag = prep == PREP_DISPATCH_NO_TAG && 1437 (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED); 1438 bool no_budget_avail = prep == PREP_DISPATCH_NO_BUDGET; 1439 1440 blk_mq_release_budgets(q, nr_budgets); 1441 1442 spin_lock(&hctx->lock); 1443 list_splice_tail_init(list, &hctx->dispatch); 1444 spin_unlock(&hctx->lock); 1445 1446 /* 1447 * Order adding requests to hctx->dispatch and checking 1448 * SCHED_RESTART flag. The pair of this smp_mb() is the one 1449 * in blk_mq_sched_restart(). Avoid restart code path to 1450 * miss the new added requests to hctx->dispatch, meantime 1451 * SCHED_RESTART is observed here. 1452 */ 1453 smp_mb(); 1454 1455 /* 1456 * If SCHED_RESTART was set by the caller of this function and 1457 * it is no longer set that means that it was cleared by another 1458 * thread and hence that a queue rerun is needed. 1459 * 1460 * If 'no_tag' is set, that means that we failed getting 1461 * a driver tag with an I/O scheduler attached. If our dispatch 1462 * waitqueue is no longer active, ensure that we run the queue 1463 * AFTER adding our entries back to the list. 1464 * 1465 * If no I/O scheduler has been configured it is possible that 1466 * the hardware queue got stopped and restarted before requests 1467 * were pushed back onto the dispatch list. Rerun the queue to 1468 * avoid starvation. Notes: 1469 * - blk_mq_run_hw_queue() checks whether or not a queue has 1470 * been stopped before rerunning a queue. 1471 * - Some but not all block drivers stop a queue before 1472 * returning BLK_STS_RESOURCE. Two exceptions are scsi-mq 1473 * and dm-rq. 1474 * 1475 * If driver returns BLK_STS_RESOURCE and SCHED_RESTART 1476 * bit is set, run queue after a delay to avoid IO stalls 1477 * that could otherwise occur if the queue is idle. We'll do 1478 * similar if we couldn't get budget and SCHED_RESTART is set. 1479 */ 1480 needs_restart = blk_mq_sched_needs_restart(hctx); 1481 if (!needs_restart || 1482 (no_tag && list_empty_careful(&hctx->dispatch_wait.entry))) 1483 blk_mq_run_hw_queue(hctx, true); 1484 else if (needs_restart && (ret == BLK_STS_RESOURCE || 1485 no_budget_avail)) 1486 blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY); 1487 1488 blk_mq_update_dispatch_busy(hctx, true); 1489 return false; 1490 } else 1491 blk_mq_update_dispatch_busy(hctx, false); 1492 1493 return (queued + errors) != 0; 1494 } 1495 1496 /** 1497 * __blk_mq_run_hw_queue - Run a hardware queue. 1498 * @hctx: Pointer to the hardware queue to run. 1499 * 1500 * Send pending requests to the hardware. 1501 */ 1502 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) 1503 { 1504 int srcu_idx; 1505 1506 /* 1507 * We can't run the queue inline with ints disabled. Ensure that 1508 * we catch bad users of this early. 1509 */ 1510 WARN_ON_ONCE(in_interrupt()); 1511 1512 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); 1513 1514 hctx_lock(hctx, &srcu_idx); 1515 blk_mq_sched_dispatch_requests(hctx); 1516 hctx_unlock(hctx, srcu_idx); 1517 } 1518 1519 static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx) 1520 { 1521 int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask); 1522 1523 if (cpu >= nr_cpu_ids) 1524 cpu = cpumask_first(hctx->cpumask); 1525 return cpu; 1526 } 1527 1528 /* 1529 * It'd be great if the workqueue API had a way to pass 1530 * in a mask and had some smarts for more clever placement. 1531 * For now we just round-robin here, switching for every 1532 * BLK_MQ_CPU_WORK_BATCH queued items. 1533 */ 1534 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) 1535 { 1536 bool tried = false; 1537 int next_cpu = hctx->next_cpu; 1538 1539 if (hctx->queue->nr_hw_queues == 1) 1540 return WORK_CPU_UNBOUND; 1541 1542 if (--hctx->next_cpu_batch <= 0) { 1543 select_cpu: 1544 next_cpu = cpumask_next_and(next_cpu, hctx->cpumask, 1545 cpu_online_mask); 1546 if (next_cpu >= nr_cpu_ids) 1547 next_cpu = blk_mq_first_mapped_cpu(hctx); 1548 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; 1549 } 1550 1551 /* 1552 * Do unbound schedule if we can't find a online CPU for this hctx, 1553 * and it should only happen in the path of handling CPU DEAD. 1554 */ 1555 if (!cpu_online(next_cpu)) { 1556 if (!tried) { 1557 tried = true; 1558 goto select_cpu; 1559 } 1560 1561 /* 1562 * Make sure to re-select CPU next time once after CPUs 1563 * in hctx->cpumask become online again. 1564 */ 1565 hctx->next_cpu = next_cpu; 1566 hctx->next_cpu_batch = 1; 1567 return WORK_CPU_UNBOUND; 1568 } 1569 1570 hctx->next_cpu = next_cpu; 1571 return next_cpu; 1572 } 1573 1574 /** 1575 * __blk_mq_delay_run_hw_queue - Run (or schedule to run) a hardware queue. 1576 * @hctx: Pointer to the hardware queue to run. 1577 * @async: If we want to run the queue asynchronously. 1578 * @msecs: Milliseconds of delay to wait before running the queue. 1579 * 1580 * If !@async, try to run the queue now. Else, run the queue asynchronously and 1581 * with a delay of @msecs. 1582 */ 1583 static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, 1584 unsigned long msecs) 1585 { 1586 if (unlikely(blk_mq_hctx_stopped(hctx))) 1587 return; 1588 1589 if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) { 1590 int cpu = get_cpu(); 1591 if (cpumask_test_cpu(cpu, hctx->cpumask)) { 1592 __blk_mq_run_hw_queue(hctx); 1593 put_cpu(); 1594 return; 1595 } 1596 1597 put_cpu(); 1598 } 1599 1600 kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work, 1601 msecs_to_jiffies(msecs)); 1602 } 1603 1604 /** 1605 * blk_mq_delay_run_hw_queue - Run a hardware queue asynchronously. 1606 * @hctx: Pointer to the hardware queue to run. 1607 * @msecs: Milliseconds of delay to wait before running the queue. 1608 * 1609 * Run a hardware queue asynchronously with a delay of @msecs. 1610 */ 1611 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) 1612 { 1613 __blk_mq_delay_run_hw_queue(hctx, true, msecs); 1614 } 1615 EXPORT_SYMBOL(blk_mq_delay_run_hw_queue); 1616 1617 /** 1618 * blk_mq_run_hw_queue - Start to run a hardware queue. 1619 * @hctx: Pointer to the hardware queue to run. 1620 * @async: If we want to run the queue asynchronously. 1621 * 1622 * Check if the request queue is not in a quiesced state and if there are 1623 * pending requests to be sent. If this is true, run the queue to send requests 1624 * to hardware. 1625 */ 1626 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) 1627 { 1628 int srcu_idx; 1629 bool need_run; 1630 1631 /* 1632 * When queue is quiesced, we may be switching io scheduler, or 1633 * updating nr_hw_queues, or other things, and we can't run queue 1634 * any more, even __blk_mq_hctx_has_pending() can't be called safely. 1635 * 1636 * And queue will be rerun in blk_mq_unquiesce_queue() if it is 1637 * quiesced. 1638 */ 1639 hctx_lock(hctx, &srcu_idx); 1640 need_run = !blk_queue_quiesced(hctx->queue) && 1641 blk_mq_hctx_has_pending(hctx); 1642 hctx_unlock(hctx, srcu_idx); 1643 1644 if (need_run) 1645 __blk_mq_delay_run_hw_queue(hctx, async, 0); 1646 } 1647 EXPORT_SYMBOL(blk_mq_run_hw_queue); 1648 1649 /** 1650 * blk_mq_run_hw_queues - Run all hardware queues in a request queue. 1651 * @q: Pointer to the request queue to run. 1652 * @async: If we want to run the queue asynchronously. 1653 */ 1654 void blk_mq_run_hw_queues(struct request_queue *q, bool async) 1655 { 1656 struct blk_mq_hw_ctx *hctx; 1657 int i; 1658 1659 queue_for_each_hw_ctx(q, hctx, i) { 1660 if (blk_mq_hctx_stopped(hctx)) 1661 continue; 1662 1663 blk_mq_run_hw_queue(hctx, async); 1664 } 1665 } 1666 EXPORT_SYMBOL(blk_mq_run_hw_queues); 1667 1668 /** 1669 * blk_mq_delay_run_hw_queues - Run all hardware queues asynchronously. 1670 * @q: Pointer to the request queue to run. 1671 * @msecs: Milliseconds of delay to wait before running the queues. 1672 */ 1673 void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs) 1674 { 1675 struct blk_mq_hw_ctx *hctx; 1676 int i; 1677 1678 queue_for_each_hw_ctx(q, hctx, i) { 1679 if (blk_mq_hctx_stopped(hctx)) 1680 continue; 1681 1682 blk_mq_delay_run_hw_queue(hctx, msecs); 1683 } 1684 } 1685 EXPORT_SYMBOL(blk_mq_delay_run_hw_queues); 1686 1687 /** 1688 * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped 1689 * @q: request queue. 1690 * 1691 * The caller is responsible for serializing this function against 1692 * blk_mq_{start,stop}_hw_queue(). 1693 */ 1694 bool blk_mq_queue_stopped(struct request_queue *q) 1695 { 1696 struct blk_mq_hw_ctx *hctx; 1697 int i; 1698 1699 queue_for_each_hw_ctx(q, hctx, i) 1700 if (blk_mq_hctx_stopped(hctx)) 1701 return true; 1702 1703 return false; 1704 } 1705 EXPORT_SYMBOL(blk_mq_queue_stopped); 1706 1707 /* 1708 * This function is often used for pausing .queue_rq() by driver when 1709 * there isn't enough resource or some conditions aren't satisfied, and 1710 * BLK_STS_RESOURCE is usually returned. 1711 * 1712 * We do not guarantee that dispatch can be drained or blocked 1713 * after blk_mq_stop_hw_queue() returns. Please use 1714 * blk_mq_quiesce_queue() for that requirement. 1715 */ 1716 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) 1717 { 1718 cancel_delayed_work(&hctx->run_work); 1719 1720 set_bit(BLK_MQ_S_STOPPED, &hctx->state); 1721 } 1722 EXPORT_SYMBOL(blk_mq_stop_hw_queue); 1723 1724 /* 1725 * This function is often used for pausing .queue_rq() by driver when 1726 * there isn't enough resource or some conditions aren't satisfied, and 1727 * BLK_STS_RESOURCE is usually returned. 1728 * 1729 * We do not guarantee that dispatch can be drained or blocked 1730 * after blk_mq_stop_hw_queues() returns. Please use 1731 * blk_mq_quiesce_queue() for that requirement. 1732 */ 1733 void blk_mq_stop_hw_queues(struct request_queue *q) 1734 { 1735 struct blk_mq_hw_ctx *hctx; 1736 int i; 1737 1738 queue_for_each_hw_ctx(q, hctx, i) 1739 blk_mq_stop_hw_queue(hctx); 1740 } 1741 EXPORT_SYMBOL(blk_mq_stop_hw_queues); 1742 1743 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx) 1744 { 1745 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 1746 1747 blk_mq_run_hw_queue(hctx, false); 1748 } 1749 EXPORT_SYMBOL(blk_mq_start_hw_queue); 1750 1751 void blk_mq_start_hw_queues(struct request_queue *q) 1752 { 1753 struct blk_mq_hw_ctx *hctx; 1754 int i; 1755 1756 queue_for_each_hw_ctx(q, hctx, i) 1757 blk_mq_start_hw_queue(hctx); 1758 } 1759 EXPORT_SYMBOL(blk_mq_start_hw_queues); 1760 1761 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) 1762 { 1763 if (!blk_mq_hctx_stopped(hctx)) 1764 return; 1765 1766 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 1767 blk_mq_run_hw_queue(hctx, async); 1768 } 1769 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue); 1770 1771 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async) 1772 { 1773 struct blk_mq_hw_ctx *hctx; 1774 int i; 1775 1776 queue_for_each_hw_ctx(q, hctx, i) 1777 blk_mq_start_stopped_hw_queue(hctx, async); 1778 } 1779 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues); 1780 1781 static void blk_mq_run_work_fn(struct work_struct *work) 1782 { 1783 struct blk_mq_hw_ctx *hctx; 1784 1785 hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work); 1786 1787 /* 1788 * If we are stopped, don't run the queue. 1789 */ 1790 if (blk_mq_hctx_stopped(hctx)) 1791 return; 1792 1793 __blk_mq_run_hw_queue(hctx); 1794 } 1795 1796 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx, 1797 struct request *rq, 1798 bool at_head) 1799 { 1800 struct blk_mq_ctx *ctx = rq->mq_ctx; 1801 enum hctx_type type = hctx->type; 1802 1803 lockdep_assert_held(&ctx->lock); 1804 1805 trace_block_rq_insert(rq); 1806 1807 if (at_head) 1808 list_add(&rq->queuelist, &ctx->rq_lists[type]); 1809 else 1810 list_add_tail(&rq->queuelist, &ctx->rq_lists[type]); 1811 } 1812 1813 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, 1814 bool at_head) 1815 { 1816 struct blk_mq_ctx *ctx = rq->mq_ctx; 1817 1818 lockdep_assert_held(&ctx->lock); 1819 1820 __blk_mq_insert_req_list(hctx, rq, at_head); 1821 blk_mq_hctx_mark_pending(hctx, ctx); 1822 } 1823 1824 /** 1825 * blk_mq_request_bypass_insert - Insert a request at dispatch list. 1826 * @rq: Pointer to request to be inserted. 1827 * @at_head: true if the request should be inserted at the head of the list. 1828 * @run_queue: If we should run the hardware queue after inserting the request. 1829 * 1830 * Should only be used carefully, when the caller knows we want to 1831 * bypass a potential IO scheduler on the target device. 1832 */ 1833 void blk_mq_request_bypass_insert(struct request *rq, bool at_head, 1834 bool run_queue) 1835 { 1836 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 1837 1838 spin_lock(&hctx->lock); 1839 if (at_head) 1840 list_add(&rq->queuelist, &hctx->dispatch); 1841 else 1842 list_add_tail(&rq->queuelist, &hctx->dispatch); 1843 spin_unlock(&hctx->lock); 1844 1845 if (run_queue) 1846 blk_mq_run_hw_queue(hctx, false); 1847 } 1848 1849 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, 1850 struct list_head *list) 1851 1852 { 1853 struct request *rq; 1854 enum hctx_type type = hctx->type; 1855 1856 /* 1857 * preemption doesn't flush plug list, so it's possible ctx->cpu is 1858 * offline now 1859 */ 1860 list_for_each_entry(rq, list, queuelist) { 1861 BUG_ON(rq->mq_ctx != ctx); 1862 trace_block_rq_insert(rq); 1863 } 1864 1865 spin_lock(&ctx->lock); 1866 list_splice_tail_init(list, &ctx->rq_lists[type]); 1867 blk_mq_hctx_mark_pending(hctx, ctx); 1868 spin_unlock(&ctx->lock); 1869 } 1870 1871 static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b) 1872 { 1873 struct request *rqa = container_of(a, struct request, queuelist); 1874 struct request *rqb = container_of(b, struct request, queuelist); 1875 1876 if (rqa->mq_ctx != rqb->mq_ctx) 1877 return rqa->mq_ctx > rqb->mq_ctx; 1878 if (rqa->mq_hctx != rqb->mq_hctx) 1879 return rqa->mq_hctx > rqb->mq_hctx; 1880 1881 return blk_rq_pos(rqa) > blk_rq_pos(rqb); 1882 } 1883 1884 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) 1885 { 1886 LIST_HEAD(list); 1887 1888 if (list_empty(&plug->mq_list)) 1889 return; 1890 list_splice_init(&plug->mq_list, &list); 1891 1892 if (plug->rq_count > 2 && plug->multiple_queues) 1893 list_sort(NULL, &list, plug_rq_cmp); 1894 1895 plug->rq_count = 0; 1896 1897 do { 1898 struct list_head rq_list; 1899 struct request *rq, *head_rq = list_entry_rq(list.next); 1900 struct list_head *pos = &head_rq->queuelist; /* skip first */ 1901 struct blk_mq_hw_ctx *this_hctx = head_rq->mq_hctx; 1902 struct blk_mq_ctx *this_ctx = head_rq->mq_ctx; 1903 unsigned int depth = 1; 1904 1905 list_for_each_continue(pos, &list) { 1906 rq = list_entry_rq(pos); 1907 BUG_ON(!rq->q); 1908 if (rq->mq_hctx != this_hctx || rq->mq_ctx != this_ctx) 1909 break; 1910 depth++; 1911 } 1912 1913 list_cut_before(&rq_list, &list, pos); 1914 trace_block_unplug(head_rq->q, depth, !from_schedule); 1915 blk_mq_sched_insert_requests(this_hctx, this_ctx, &rq_list, 1916 from_schedule); 1917 } while(!list_empty(&list)); 1918 } 1919 1920 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio, 1921 unsigned int nr_segs) 1922 { 1923 int err; 1924 1925 if (bio->bi_opf & REQ_RAHEAD) 1926 rq->cmd_flags |= REQ_FAILFAST_MASK; 1927 1928 rq->__sector = bio->bi_iter.bi_sector; 1929 rq->write_hint = bio->bi_write_hint; 1930 blk_rq_bio_prep(rq, bio, nr_segs); 1931 1932 /* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */ 1933 err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO); 1934 WARN_ON_ONCE(err); 1935 1936 blk_account_io_start(rq); 1937 } 1938 1939 static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, 1940 struct request *rq, 1941 blk_qc_t *cookie, bool last) 1942 { 1943 struct request_queue *q = rq->q; 1944 struct blk_mq_queue_data bd = { 1945 .rq = rq, 1946 .last = last, 1947 }; 1948 blk_qc_t new_cookie; 1949 blk_status_t ret; 1950 1951 new_cookie = request_to_qc_t(hctx, rq); 1952 1953 /* 1954 * For OK queue, we are done. For error, caller may kill it. 1955 * Any other error (busy), just add it to our list as we 1956 * previously would have done. 1957 */ 1958 ret = q->mq_ops->queue_rq(hctx, &bd); 1959 switch (ret) { 1960 case BLK_STS_OK: 1961 blk_mq_update_dispatch_busy(hctx, false); 1962 *cookie = new_cookie; 1963 break; 1964 case BLK_STS_RESOURCE: 1965 case BLK_STS_DEV_RESOURCE: 1966 blk_mq_update_dispatch_busy(hctx, true); 1967 __blk_mq_requeue_request(rq); 1968 break; 1969 default: 1970 blk_mq_update_dispatch_busy(hctx, false); 1971 *cookie = BLK_QC_T_NONE; 1972 break; 1973 } 1974 1975 return ret; 1976 } 1977 1978 static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, 1979 struct request *rq, 1980 blk_qc_t *cookie, 1981 bool bypass_insert, bool last) 1982 { 1983 struct request_queue *q = rq->q; 1984 bool run_queue = true; 1985 1986 /* 1987 * RCU or SRCU read lock is needed before checking quiesced flag. 1988 * 1989 * When queue is stopped or quiesced, ignore 'bypass_insert' from 1990 * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller, 1991 * and avoid driver to try to dispatch again. 1992 */ 1993 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) { 1994 run_queue = false; 1995 bypass_insert = false; 1996 goto insert; 1997 } 1998 1999 if (q->elevator && !bypass_insert) 2000 goto insert; 2001 2002 if (!blk_mq_get_dispatch_budget(q)) 2003 goto insert; 2004 2005 if (!blk_mq_get_driver_tag(rq)) { 2006 blk_mq_put_dispatch_budget(q); 2007 goto insert; 2008 } 2009 2010 return __blk_mq_issue_directly(hctx, rq, cookie, last); 2011 insert: 2012 if (bypass_insert) 2013 return BLK_STS_RESOURCE; 2014 2015 blk_mq_sched_insert_request(rq, false, run_queue, false); 2016 2017 return BLK_STS_OK; 2018 } 2019 2020 /** 2021 * blk_mq_try_issue_directly - Try to send a request directly to device driver. 2022 * @hctx: Pointer of the associated hardware queue. 2023 * @rq: Pointer to request to be sent. 2024 * @cookie: Request queue cookie. 2025 * 2026 * If the device has enough resources to accept a new request now, send the 2027 * request directly to device driver. Else, insert at hctx->dispatch queue, so 2028 * we can try send it another time in the future. Requests inserted at this 2029 * queue have higher priority. 2030 */ 2031 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, 2032 struct request *rq, blk_qc_t *cookie) 2033 { 2034 blk_status_t ret; 2035 int srcu_idx; 2036 2037 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); 2038 2039 hctx_lock(hctx, &srcu_idx); 2040 2041 ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true); 2042 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) 2043 blk_mq_request_bypass_insert(rq, false, true); 2044 else if (ret != BLK_STS_OK) 2045 blk_mq_end_request(rq, ret); 2046 2047 hctx_unlock(hctx, srcu_idx); 2048 } 2049 2050 blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last) 2051 { 2052 blk_status_t ret; 2053 int srcu_idx; 2054 blk_qc_t unused_cookie; 2055 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 2056 2057 hctx_lock(hctx, &srcu_idx); 2058 ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last); 2059 hctx_unlock(hctx, srcu_idx); 2060 2061 return ret; 2062 } 2063 2064 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, 2065 struct list_head *list) 2066 { 2067 int queued = 0; 2068 int errors = 0; 2069 2070 while (!list_empty(list)) { 2071 blk_status_t ret; 2072 struct request *rq = list_first_entry(list, struct request, 2073 queuelist); 2074 2075 list_del_init(&rq->queuelist); 2076 ret = blk_mq_request_issue_directly(rq, list_empty(list)); 2077 if (ret != BLK_STS_OK) { 2078 if (ret == BLK_STS_RESOURCE || 2079 ret == BLK_STS_DEV_RESOURCE) { 2080 blk_mq_request_bypass_insert(rq, false, 2081 list_empty(list)); 2082 break; 2083 } 2084 blk_mq_end_request(rq, ret); 2085 errors++; 2086 } else 2087 queued++; 2088 } 2089 2090 /* 2091 * If we didn't flush the entire list, we could have told 2092 * the driver there was more coming, but that turned out to 2093 * be a lie. 2094 */ 2095 if ((!list_empty(list) || errors) && 2096 hctx->queue->mq_ops->commit_rqs && queued) 2097 hctx->queue->mq_ops->commit_rqs(hctx); 2098 } 2099 2100 static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) 2101 { 2102 list_add_tail(&rq->queuelist, &plug->mq_list); 2103 plug->rq_count++; 2104 if (!plug->multiple_queues && !list_is_singular(&plug->mq_list)) { 2105 struct request *tmp; 2106 2107 tmp = list_first_entry(&plug->mq_list, struct request, 2108 queuelist); 2109 if (tmp->q != rq->q) 2110 plug->multiple_queues = true; 2111 } 2112 } 2113 2114 /** 2115 * blk_mq_submit_bio - Create and send a request to block device. 2116 * @bio: Bio pointer. 2117 * 2118 * Builds up a request structure from @q and @bio and send to the device. The 2119 * request may not be queued directly to hardware if: 2120 * * This request can be merged with another one 2121 * * We want to place request at plug queue for possible future merging 2122 * * There is an IO scheduler active at this queue 2123 * 2124 * It will not queue the request if there is an error with the bio, or at the 2125 * request creation. 2126 * 2127 * Returns: Request queue cookie. 2128 */ 2129 blk_qc_t blk_mq_submit_bio(struct bio *bio) 2130 { 2131 struct request_queue *q = bio->bi_disk->queue; 2132 const int is_sync = op_is_sync(bio->bi_opf); 2133 const int is_flush_fua = op_is_flush(bio->bi_opf); 2134 struct blk_mq_alloc_data data = { 2135 .q = q, 2136 }; 2137 struct request *rq; 2138 struct blk_plug *plug; 2139 struct request *same_queue_rq = NULL; 2140 unsigned int nr_segs; 2141 blk_qc_t cookie; 2142 blk_status_t ret; 2143 bool hipri; 2144 2145 blk_queue_bounce(q, &bio); 2146 __blk_queue_split(&bio, &nr_segs); 2147 2148 if (!bio_integrity_prep(bio)) 2149 goto queue_exit; 2150 2151 if (!is_flush_fua && !blk_queue_nomerges(q) && 2152 blk_attempt_plug_merge(q, bio, nr_segs, &same_queue_rq)) 2153 goto queue_exit; 2154 2155 if (blk_mq_sched_bio_merge(q, bio, nr_segs)) 2156 goto queue_exit; 2157 2158 rq_qos_throttle(q, bio); 2159 2160 hipri = bio->bi_opf & REQ_HIPRI; 2161 2162 data.cmd_flags = bio->bi_opf; 2163 rq = __blk_mq_alloc_request(&data); 2164 if (unlikely(!rq)) { 2165 rq_qos_cleanup(q, bio); 2166 if (bio->bi_opf & REQ_NOWAIT) 2167 bio_wouldblock_error(bio); 2168 goto queue_exit; 2169 } 2170 2171 trace_block_getrq(bio); 2172 2173 rq_qos_track(q, rq, bio); 2174 2175 cookie = request_to_qc_t(data.hctx, rq); 2176 2177 blk_mq_bio_to_request(rq, bio, nr_segs); 2178 2179 ret = blk_crypto_init_request(rq); 2180 if (ret != BLK_STS_OK) { 2181 bio->bi_status = ret; 2182 bio_endio(bio); 2183 blk_mq_free_request(rq); 2184 return BLK_QC_T_NONE; 2185 } 2186 2187 plug = blk_mq_plug(q, bio); 2188 if (unlikely(is_flush_fua)) { 2189 /* Bypass scheduler for flush requests */ 2190 blk_insert_flush(rq); 2191 blk_mq_run_hw_queue(data.hctx, true); 2192 } else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs || 2193 !blk_queue_nonrot(q))) { 2194 /* 2195 * Use plugging if we have a ->commit_rqs() hook as well, as 2196 * we know the driver uses bd->last in a smart fashion. 2197 * 2198 * Use normal plugging if this disk is slow HDD, as sequential 2199 * IO may benefit a lot from plug merging. 2200 */ 2201 unsigned int request_count = plug->rq_count; 2202 struct request *last = NULL; 2203 2204 if (!request_count) 2205 trace_block_plug(q); 2206 else 2207 last = list_entry_rq(plug->mq_list.prev); 2208 2209 if (request_count >= BLK_MAX_REQUEST_COUNT || (last && 2210 blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) { 2211 blk_flush_plug_list(plug, false); 2212 trace_block_plug(q); 2213 } 2214 2215 blk_add_rq_to_plug(plug, rq); 2216 } else if (q->elevator) { 2217 /* Insert the request at the IO scheduler queue */ 2218 blk_mq_sched_insert_request(rq, false, true, true); 2219 } else if (plug && !blk_queue_nomerges(q)) { 2220 /* 2221 * We do limited plugging. If the bio can be merged, do that. 2222 * Otherwise the existing request in the plug list will be 2223 * issued. So the plug list will have one request at most 2224 * The plug list might get flushed before this. If that happens, 2225 * the plug list is empty, and same_queue_rq is invalid. 2226 */ 2227 if (list_empty(&plug->mq_list)) 2228 same_queue_rq = NULL; 2229 if (same_queue_rq) { 2230 list_del_init(&same_queue_rq->queuelist); 2231 plug->rq_count--; 2232 } 2233 blk_add_rq_to_plug(plug, rq); 2234 trace_block_plug(q); 2235 2236 if (same_queue_rq) { 2237 data.hctx = same_queue_rq->mq_hctx; 2238 trace_block_unplug(q, 1, true); 2239 blk_mq_try_issue_directly(data.hctx, same_queue_rq, 2240 &cookie); 2241 } 2242 } else if ((q->nr_hw_queues > 1 && is_sync) || 2243 !data.hctx->dispatch_busy) { 2244 /* 2245 * There is no scheduler and we can try to send directly 2246 * to the hardware. 2247 */ 2248 blk_mq_try_issue_directly(data.hctx, rq, &cookie); 2249 } else { 2250 /* Default case. */ 2251 blk_mq_sched_insert_request(rq, false, true, true); 2252 } 2253 2254 if (!hipri) 2255 return BLK_QC_T_NONE; 2256 return cookie; 2257 queue_exit: 2258 blk_queue_exit(q); 2259 return BLK_QC_T_NONE; 2260 } 2261 2262 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, 2263 unsigned int hctx_idx) 2264 { 2265 struct page *page; 2266 2267 if (tags->rqs && set->ops->exit_request) { 2268 int i; 2269 2270 for (i = 0; i < tags->nr_tags; i++) { 2271 struct request *rq = tags->static_rqs[i]; 2272 2273 if (!rq) 2274 continue; 2275 set->ops->exit_request(set, rq, hctx_idx); 2276 tags->static_rqs[i] = NULL; 2277 } 2278 } 2279 2280 while (!list_empty(&tags->page_list)) { 2281 page = list_first_entry(&tags->page_list, struct page, lru); 2282 list_del_init(&page->lru); 2283 /* 2284 * Remove kmemleak object previously allocated in 2285 * blk_mq_alloc_rqs(). 2286 */ 2287 kmemleak_free(page_address(page)); 2288 __free_pages(page, page->private); 2289 } 2290 } 2291 2292 void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags) 2293 { 2294 kfree(tags->rqs); 2295 tags->rqs = NULL; 2296 kfree(tags->static_rqs); 2297 tags->static_rqs = NULL; 2298 2299 blk_mq_free_tags(tags, flags); 2300 } 2301 2302 struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, 2303 unsigned int hctx_idx, 2304 unsigned int nr_tags, 2305 unsigned int reserved_tags, 2306 unsigned int flags) 2307 { 2308 struct blk_mq_tags *tags; 2309 int node; 2310 2311 node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx); 2312 if (node == NUMA_NO_NODE) 2313 node = set->numa_node; 2314 2315 tags = blk_mq_init_tags(nr_tags, reserved_tags, node, flags); 2316 if (!tags) 2317 return NULL; 2318 2319 tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *), 2320 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, 2321 node); 2322 if (!tags->rqs) { 2323 blk_mq_free_tags(tags, flags); 2324 return NULL; 2325 } 2326 2327 tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *), 2328 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, 2329 node); 2330 if (!tags->static_rqs) { 2331 kfree(tags->rqs); 2332 blk_mq_free_tags(tags, flags); 2333 return NULL; 2334 } 2335 2336 return tags; 2337 } 2338 2339 static size_t order_to_size(unsigned int order) 2340 { 2341 return (size_t)PAGE_SIZE << order; 2342 } 2343 2344 static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, 2345 unsigned int hctx_idx, int node) 2346 { 2347 int ret; 2348 2349 if (set->ops->init_request) { 2350 ret = set->ops->init_request(set, rq, hctx_idx, node); 2351 if (ret) 2352 return ret; 2353 } 2354 2355 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 2356 return 0; 2357 } 2358 2359 int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, 2360 unsigned int hctx_idx, unsigned int depth) 2361 { 2362 unsigned int i, j, entries_per_page, max_order = 4; 2363 size_t rq_size, left; 2364 int node; 2365 2366 node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx); 2367 if (node == NUMA_NO_NODE) 2368 node = set->numa_node; 2369 2370 INIT_LIST_HEAD(&tags->page_list); 2371 2372 /* 2373 * rq_size is the size of the request plus driver payload, rounded 2374 * to the cacheline size 2375 */ 2376 rq_size = round_up(sizeof(struct request) + set->cmd_size, 2377 cache_line_size()); 2378 left = rq_size * depth; 2379 2380 for (i = 0; i < depth; ) { 2381 int this_order = max_order; 2382 struct page *page; 2383 int to_do; 2384 void *p; 2385 2386 while (this_order && left < order_to_size(this_order - 1)) 2387 this_order--; 2388 2389 do { 2390 page = alloc_pages_node(node, 2391 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO, 2392 this_order); 2393 if (page) 2394 break; 2395 if (!this_order--) 2396 break; 2397 if (order_to_size(this_order) < rq_size) 2398 break; 2399 } while (1); 2400 2401 if (!page) 2402 goto fail; 2403 2404 page->private = this_order; 2405 list_add_tail(&page->lru, &tags->page_list); 2406 2407 p = page_address(page); 2408 /* 2409 * Allow kmemleak to scan these pages as they contain pointers 2410 * to additional allocations like via ops->init_request(). 2411 */ 2412 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO); 2413 entries_per_page = order_to_size(this_order) / rq_size; 2414 to_do = min(entries_per_page, depth - i); 2415 left -= to_do * rq_size; 2416 for (j = 0; j < to_do; j++) { 2417 struct request *rq = p; 2418 2419 tags->static_rqs[i] = rq; 2420 if (blk_mq_init_request(set, rq, hctx_idx, node)) { 2421 tags->static_rqs[i] = NULL; 2422 goto fail; 2423 } 2424 2425 p += rq_size; 2426 i++; 2427 } 2428 } 2429 return 0; 2430 2431 fail: 2432 blk_mq_free_rqs(set, tags, hctx_idx); 2433 return -ENOMEM; 2434 } 2435 2436 struct rq_iter_data { 2437 struct blk_mq_hw_ctx *hctx; 2438 bool has_rq; 2439 }; 2440 2441 static bool blk_mq_has_request(struct request *rq, void *data, bool reserved) 2442 { 2443 struct rq_iter_data *iter_data = data; 2444 2445 if (rq->mq_hctx != iter_data->hctx) 2446 return true; 2447 iter_data->has_rq = true; 2448 return false; 2449 } 2450 2451 static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx) 2452 { 2453 struct blk_mq_tags *tags = hctx->sched_tags ? 2454 hctx->sched_tags : hctx->tags; 2455 struct rq_iter_data data = { 2456 .hctx = hctx, 2457 }; 2458 2459 blk_mq_all_tag_iter(tags, blk_mq_has_request, &data); 2460 return data.has_rq; 2461 } 2462 2463 static inline bool blk_mq_last_cpu_in_hctx(unsigned int cpu, 2464 struct blk_mq_hw_ctx *hctx) 2465 { 2466 if (cpumask_next_and(-1, hctx->cpumask, cpu_online_mask) != cpu) 2467 return false; 2468 if (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids) 2469 return false; 2470 return true; 2471 } 2472 2473 static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node) 2474 { 2475 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node, 2476 struct blk_mq_hw_ctx, cpuhp_online); 2477 2478 if (!cpumask_test_cpu(cpu, hctx->cpumask) || 2479 !blk_mq_last_cpu_in_hctx(cpu, hctx)) 2480 return 0; 2481 2482 /* 2483 * Prevent new request from being allocated on the current hctx. 2484 * 2485 * The smp_mb__after_atomic() Pairs with the implied barrier in 2486 * test_and_set_bit_lock in sbitmap_get(). Ensures the inactive flag is 2487 * seen once we return from the tag allocator. 2488 */ 2489 set_bit(BLK_MQ_S_INACTIVE, &hctx->state); 2490 smp_mb__after_atomic(); 2491 2492 /* 2493 * Try to grab a reference to the queue and wait for any outstanding 2494 * requests. If we could not grab a reference the queue has been 2495 * frozen and there are no requests. 2496 */ 2497 if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) { 2498 while (blk_mq_hctx_has_requests(hctx)) 2499 msleep(5); 2500 percpu_ref_put(&hctx->queue->q_usage_counter); 2501 } 2502 2503 return 0; 2504 } 2505 2506 static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node) 2507 { 2508 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node, 2509 struct blk_mq_hw_ctx, cpuhp_online); 2510 2511 if (cpumask_test_cpu(cpu, hctx->cpumask)) 2512 clear_bit(BLK_MQ_S_INACTIVE, &hctx->state); 2513 return 0; 2514 } 2515 2516 /* 2517 * 'cpu' is going away. splice any existing rq_list entries from this 2518 * software queue to the hw queue dispatch list, and ensure that it 2519 * gets run. 2520 */ 2521 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node) 2522 { 2523 struct blk_mq_hw_ctx *hctx; 2524 struct blk_mq_ctx *ctx; 2525 LIST_HEAD(tmp); 2526 enum hctx_type type; 2527 2528 hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead); 2529 if (!cpumask_test_cpu(cpu, hctx->cpumask)) 2530 return 0; 2531 2532 ctx = __blk_mq_get_ctx(hctx->queue, cpu); 2533 type = hctx->type; 2534 2535 spin_lock(&ctx->lock); 2536 if (!list_empty(&ctx->rq_lists[type])) { 2537 list_splice_init(&ctx->rq_lists[type], &tmp); 2538 blk_mq_hctx_clear_pending(hctx, ctx); 2539 } 2540 spin_unlock(&ctx->lock); 2541 2542 if (list_empty(&tmp)) 2543 return 0; 2544 2545 spin_lock(&hctx->lock); 2546 list_splice_tail_init(&tmp, &hctx->dispatch); 2547 spin_unlock(&hctx->lock); 2548 2549 blk_mq_run_hw_queue(hctx, true); 2550 return 0; 2551 } 2552 2553 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx) 2554 { 2555 if (!(hctx->flags & BLK_MQ_F_STACKING)) 2556 cpuhp_state_remove_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE, 2557 &hctx->cpuhp_online); 2558 cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD, 2559 &hctx->cpuhp_dead); 2560 } 2561 2562 /* hctx->ctxs will be freed in queue's release handler */ 2563 static void blk_mq_exit_hctx(struct request_queue *q, 2564 struct blk_mq_tag_set *set, 2565 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) 2566 { 2567 if (blk_mq_hw_queue_mapped(hctx)) 2568 blk_mq_tag_idle(hctx); 2569 2570 if (set->ops->exit_request) 2571 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx); 2572 2573 if (set->ops->exit_hctx) 2574 set->ops->exit_hctx(hctx, hctx_idx); 2575 2576 blk_mq_remove_cpuhp(hctx); 2577 2578 spin_lock(&q->unused_hctx_lock); 2579 list_add(&hctx->hctx_list, &q->unused_hctx_list); 2580 spin_unlock(&q->unused_hctx_lock); 2581 } 2582 2583 static void blk_mq_exit_hw_queues(struct request_queue *q, 2584 struct blk_mq_tag_set *set, int nr_queue) 2585 { 2586 struct blk_mq_hw_ctx *hctx; 2587 unsigned int i; 2588 2589 queue_for_each_hw_ctx(q, hctx, i) { 2590 if (i == nr_queue) 2591 break; 2592 blk_mq_debugfs_unregister_hctx(hctx); 2593 blk_mq_exit_hctx(q, set, hctx, i); 2594 } 2595 } 2596 2597 static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set) 2598 { 2599 int hw_ctx_size = sizeof(struct blk_mq_hw_ctx); 2600 2601 BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu), 2602 __alignof__(struct blk_mq_hw_ctx)) != 2603 sizeof(struct blk_mq_hw_ctx)); 2604 2605 if (tag_set->flags & BLK_MQ_F_BLOCKING) 2606 hw_ctx_size += sizeof(struct srcu_struct); 2607 2608 return hw_ctx_size; 2609 } 2610 2611 static int blk_mq_init_hctx(struct request_queue *q, 2612 struct blk_mq_tag_set *set, 2613 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) 2614 { 2615 hctx->queue_num = hctx_idx; 2616 2617 if (!(hctx->flags & BLK_MQ_F_STACKING)) 2618 cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE, 2619 &hctx->cpuhp_online); 2620 cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead); 2621 2622 hctx->tags = set->tags[hctx_idx]; 2623 2624 if (set->ops->init_hctx && 2625 set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) 2626 goto unregister_cpu_notifier; 2627 2628 if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, 2629 hctx->numa_node)) 2630 goto exit_hctx; 2631 return 0; 2632 2633 exit_hctx: 2634 if (set->ops->exit_hctx) 2635 set->ops->exit_hctx(hctx, hctx_idx); 2636 unregister_cpu_notifier: 2637 blk_mq_remove_cpuhp(hctx); 2638 return -1; 2639 } 2640 2641 static struct blk_mq_hw_ctx * 2642 blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set, 2643 int node) 2644 { 2645 struct blk_mq_hw_ctx *hctx; 2646 gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY; 2647 2648 hctx = kzalloc_node(blk_mq_hw_ctx_size(set), gfp, node); 2649 if (!hctx) 2650 goto fail_alloc_hctx; 2651 2652 if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node)) 2653 goto free_hctx; 2654 2655 atomic_set(&hctx->nr_active, 0); 2656 atomic_set(&hctx->elevator_queued, 0); 2657 if (node == NUMA_NO_NODE) 2658 node = set->numa_node; 2659 hctx->numa_node = node; 2660 2661 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn); 2662 spin_lock_init(&hctx->lock); 2663 INIT_LIST_HEAD(&hctx->dispatch); 2664 hctx->queue = q; 2665 hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED; 2666 2667 INIT_LIST_HEAD(&hctx->hctx_list); 2668 2669 /* 2670 * Allocate space for all possible cpus to avoid allocation at 2671 * runtime 2672 */ 2673 hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *), 2674 gfp, node); 2675 if (!hctx->ctxs) 2676 goto free_cpumask; 2677 2678 if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), 2679 gfp, node)) 2680 goto free_ctxs; 2681 hctx->nr_ctx = 0; 2682 2683 spin_lock_init(&hctx->dispatch_wait_lock); 2684 init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake); 2685 INIT_LIST_HEAD(&hctx->dispatch_wait.entry); 2686 2687 hctx->fq = blk_alloc_flush_queue(hctx->numa_node, set->cmd_size, gfp); 2688 if (!hctx->fq) 2689 goto free_bitmap; 2690 2691 if (hctx->flags & BLK_MQ_F_BLOCKING) 2692 init_srcu_struct(hctx->srcu); 2693 blk_mq_hctx_kobj_init(hctx); 2694 2695 return hctx; 2696 2697 free_bitmap: 2698 sbitmap_free(&hctx->ctx_map); 2699 free_ctxs: 2700 kfree(hctx->ctxs); 2701 free_cpumask: 2702 free_cpumask_var(hctx->cpumask); 2703 free_hctx: 2704 kfree(hctx); 2705 fail_alloc_hctx: 2706 return NULL; 2707 } 2708 2709 static void blk_mq_init_cpu_queues(struct request_queue *q, 2710 unsigned int nr_hw_queues) 2711 { 2712 struct blk_mq_tag_set *set = q->tag_set; 2713 unsigned int i, j; 2714 2715 for_each_possible_cpu(i) { 2716 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i); 2717 struct blk_mq_hw_ctx *hctx; 2718 int k; 2719 2720 __ctx->cpu = i; 2721 spin_lock_init(&__ctx->lock); 2722 for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++) 2723 INIT_LIST_HEAD(&__ctx->rq_lists[k]); 2724 2725 __ctx->queue = q; 2726 2727 /* 2728 * Set local node, IFF we have more than one hw queue. If 2729 * not, we remain on the home node of the device 2730 */ 2731 for (j = 0; j < set->nr_maps; j++) { 2732 hctx = blk_mq_map_queue_type(q, j, i); 2733 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE) 2734 hctx->numa_node = cpu_to_node(i); 2735 } 2736 } 2737 } 2738 2739 static bool __blk_mq_alloc_map_and_request(struct blk_mq_tag_set *set, 2740 int hctx_idx) 2741 { 2742 unsigned int flags = set->flags; 2743 int ret = 0; 2744 2745 set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx, 2746 set->queue_depth, set->reserved_tags, flags); 2747 if (!set->tags[hctx_idx]) 2748 return false; 2749 2750 ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx, 2751 set->queue_depth); 2752 if (!ret) 2753 return true; 2754 2755 blk_mq_free_rq_map(set->tags[hctx_idx], flags); 2756 set->tags[hctx_idx] = NULL; 2757 return false; 2758 } 2759 2760 static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set, 2761 unsigned int hctx_idx) 2762 { 2763 unsigned int flags = set->flags; 2764 2765 if (set->tags && set->tags[hctx_idx]) { 2766 blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx); 2767 blk_mq_free_rq_map(set->tags[hctx_idx], flags); 2768 set->tags[hctx_idx] = NULL; 2769 } 2770 } 2771 2772 static void blk_mq_map_swqueue(struct request_queue *q) 2773 { 2774 unsigned int i, j, hctx_idx; 2775 struct blk_mq_hw_ctx *hctx; 2776 struct blk_mq_ctx *ctx; 2777 struct blk_mq_tag_set *set = q->tag_set; 2778 2779 queue_for_each_hw_ctx(q, hctx, i) { 2780 cpumask_clear(hctx->cpumask); 2781 hctx->nr_ctx = 0; 2782 hctx->dispatch_from = NULL; 2783 } 2784 2785 /* 2786 * Map software to hardware queues. 2787 * 2788 * If the cpu isn't present, the cpu is mapped to first hctx. 2789 */ 2790 for_each_possible_cpu(i) { 2791 2792 ctx = per_cpu_ptr(q->queue_ctx, i); 2793 for (j = 0; j < set->nr_maps; j++) { 2794 if (!set->map[j].nr_queues) { 2795 ctx->hctxs[j] = blk_mq_map_queue_type(q, 2796 HCTX_TYPE_DEFAULT, i); 2797 continue; 2798 } 2799 hctx_idx = set->map[j].mq_map[i]; 2800 /* unmapped hw queue can be remapped after CPU topo changed */ 2801 if (!set->tags[hctx_idx] && 2802 !__blk_mq_alloc_map_and_request(set, hctx_idx)) { 2803 /* 2804 * If tags initialization fail for some hctx, 2805 * that hctx won't be brought online. In this 2806 * case, remap the current ctx to hctx[0] which 2807 * is guaranteed to always have tags allocated 2808 */ 2809 set->map[j].mq_map[i] = 0; 2810 } 2811 2812 hctx = blk_mq_map_queue_type(q, j, i); 2813 ctx->hctxs[j] = hctx; 2814 /* 2815 * If the CPU is already set in the mask, then we've 2816 * mapped this one already. This can happen if 2817 * devices share queues across queue maps. 2818 */ 2819 if (cpumask_test_cpu(i, hctx->cpumask)) 2820 continue; 2821 2822 cpumask_set_cpu(i, hctx->cpumask); 2823 hctx->type = j; 2824 ctx->index_hw[hctx->type] = hctx->nr_ctx; 2825 hctx->ctxs[hctx->nr_ctx++] = ctx; 2826 2827 /* 2828 * If the nr_ctx type overflows, we have exceeded the 2829 * amount of sw queues we can support. 2830 */ 2831 BUG_ON(!hctx->nr_ctx); 2832 } 2833 2834 for (; j < HCTX_MAX_TYPES; j++) 2835 ctx->hctxs[j] = blk_mq_map_queue_type(q, 2836 HCTX_TYPE_DEFAULT, i); 2837 } 2838 2839 queue_for_each_hw_ctx(q, hctx, i) { 2840 /* 2841 * If no software queues are mapped to this hardware queue, 2842 * disable it and free the request entries. 2843 */ 2844 if (!hctx->nr_ctx) { 2845 /* Never unmap queue 0. We need it as a 2846 * fallback in case of a new remap fails 2847 * allocation 2848 */ 2849 if (i && set->tags[i]) 2850 blk_mq_free_map_and_requests(set, i); 2851 2852 hctx->tags = NULL; 2853 continue; 2854 } 2855 2856 hctx->tags = set->tags[i]; 2857 WARN_ON(!hctx->tags); 2858 2859 /* 2860 * Set the map size to the number of mapped software queues. 2861 * This is more accurate and more efficient than looping 2862 * over all possibly mapped software queues. 2863 */ 2864 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx); 2865 2866 /* 2867 * Initialize batch roundrobin counts 2868 */ 2869 hctx->next_cpu = blk_mq_first_mapped_cpu(hctx); 2870 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; 2871 } 2872 } 2873 2874 /* 2875 * Caller needs to ensure that we're either frozen/quiesced, or that 2876 * the queue isn't live yet. 2877 */ 2878 static void queue_set_hctx_shared(struct request_queue *q, bool shared) 2879 { 2880 struct blk_mq_hw_ctx *hctx; 2881 int i; 2882 2883 queue_for_each_hw_ctx(q, hctx, i) { 2884 if (shared) 2885 hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED; 2886 else 2887 hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED; 2888 } 2889 } 2890 2891 static void blk_mq_update_tag_set_shared(struct blk_mq_tag_set *set, 2892 bool shared) 2893 { 2894 struct request_queue *q; 2895 2896 lockdep_assert_held(&set->tag_list_lock); 2897 2898 list_for_each_entry(q, &set->tag_list, tag_set_list) { 2899 blk_mq_freeze_queue(q); 2900 queue_set_hctx_shared(q, shared); 2901 blk_mq_unfreeze_queue(q); 2902 } 2903 } 2904 2905 static void blk_mq_del_queue_tag_set(struct request_queue *q) 2906 { 2907 struct blk_mq_tag_set *set = q->tag_set; 2908 2909 mutex_lock(&set->tag_list_lock); 2910 list_del(&q->tag_set_list); 2911 if (list_is_singular(&set->tag_list)) { 2912 /* just transitioned to unshared */ 2913 set->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED; 2914 /* update existing queue */ 2915 blk_mq_update_tag_set_shared(set, false); 2916 } 2917 mutex_unlock(&set->tag_list_lock); 2918 INIT_LIST_HEAD(&q->tag_set_list); 2919 } 2920 2921 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, 2922 struct request_queue *q) 2923 { 2924 mutex_lock(&set->tag_list_lock); 2925 2926 /* 2927 * Check to see if we're transitioning to shared (from 1 to 2 queues). 2928 */ 2929 if (!list_empty(&set->tag_list) && 2930 !(set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) { 2931 set->flags |= BLK_MQ_F_TAG_QUEUE_SHARED; 2932 /* update existing queue */ 2933 blk_mq_update_tag_set_shared(set, true); 2934 } 2935 if (set->flags & BLK_MQ_F_TAG_QUEUE_SHARED) 2936 queue_set_hctx_shared(q, true); 2937 list_add_tail(&q->tag_set_list, &set->tag_list); 2938 2939 mutex_unlock(&set->tag_list_lock); 2940 } 2941 2942 /* All allocations will be freed in release handler of q->mq_kobj */ 2943 static int blk_mq_alloc_ctxs(struct request_queue *q) 2944 { 2945 struct blk_mq_ctxs *ctxs; 2946 int cpu; 2947 2948 ctxs = kzalloc(sizeof(*ctxs), GFP_KERNEL); 2949 if (!ctxs) 2950 return -ENOMEM; 2951 2952 ctxs->queue_ctx = alloc_percpu(struct blk_mq_ctx); 2953 if (!ctxs->queue_ctx) 2954 goto fail; 2955 2956 for_each_possible_cpu(cpu) { 2957 struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu); 2958 ctx->ctxs = ctxs; 2959 } 2960 2961 q->mq_kobj = &ctxs->kobj; 2962 q->queue_ctx = ctxs->queue_ctx; 2963 2964 return 0; 2965 fail: 2966 kfree(ctxs); 2967 return -ENOMEM; 2968 } 2969 2970 /* 2971 * It is the actual release handler for mq, but we do it from 2972 * request queue's release handler for avoiding use-after-free 2973 * and headache because q->mq_kobj shouldn't have been introduced, 2974 * but we can't group ctx/kctx kobj without it. 2975 */ 2976 void blk_mq_release(struct request_queue *q) 2977 { 2978 struct blk_mq_hw_ctx *hctx, *next; 2979 int i; 2980 2981 queue_for_each_hw_ctx(q, hctx, i) 2982 WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list)); 2983 2984 /* all hctx are in .unused_hctx_list now */ 2985 list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) { 2986 list_del_init(&hctx->hctx_list); 2987 kobject_put(&hctx->kobj); 2988 } 2989 2990 kfree(q->queue_hw_ctx); 2991 2992 /* 2993 * release .mq_kobj and sw queue's kobject now because 2994 * both share lifetime with request queue. 2995 */ 2996 blk_mq_sysfs_deinit(q); 2997 } 2998 2999 struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set, 3000 void *queuedata) 3001 { 3002 struct request_queue *uninit_q, *q; 3003 3004 uninit_q = blk_alloc_queue(set->numa_node); 3005 if (!uninit_q) 3006 return ERR_PTR(-ENOMEM); 3007 uninit_q->queuedata = queuedata; 3008 3009 /* 3010 * Initialize the queue without an elevator. device_add_disk() will do 3011 * the initialization. 3012 */ 3013 q = blk_mq_init_allocated_queue(set, uninit_q, false); 3014 if (IS_ERR(q)) 3015 blk_cleanup_queue(uninit_q); 3016 3017 return q; 3018 } 3019 EXPORT_SYMBOL_GPL(blk_mq_init_queue_data); 3020 3021 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) 3022 { 3023 return blk_mq_init_queue_data(set, NULL); 3024 } 3025 EXPORT_SYMBOL(blk_mq_init_queue); 3026 3027 /* 3028 * Helper for setting up a queue with mq ops, given queue depth, and 3029 * the passed in mq ops flags. 3030 */ 3031 struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set, 3032 const struct blk_mq_ops *ops, 3033 unsigned int queue_depth, 3034 unsigned int set_flags) 3035 { 3036 struct request_queue *q; 3037 int ret; 3038 3039 memset(set, 0, sizeof(*set)); 3040 set->ops = ops; 3041 set->nr_hw_queues = 1; 3042 set->nr_maps = 1; 3043 set->queue_depth = queue_depth; 3044 set->numa_node = NUMA_NO_NODE; 3045 set->flags = set_flags; 3046 3047 ret = blk_mq_alloc_tag_set(set); 3048 if (ret) 3049 return ERR_PTR(ret); 3050 3051 q = blk_mq_init_queue(set); 3052 if (IS_ERR(q)) { 3053 blk_mq_free_tag_set(set); 3054 return q; 3055 } 3056 3057 return q; 3058 } 3059 EXPORT_SYMBOL(blk_mq_init_sq_queue); 3060 3061 static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx( 3062 struct blk_mq_tag_set *set, struct request_queue *q, 3063 int hctx_idx, int node) 3064 { 3065 struct blk_mq_hw_ctx *hctx = NULL, *tmp; 3066 3067 /* reuse dead hctx first */ 3068 spin_lock(&q->unused_hctx_lock); 3069 list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) { 3070 if (tmp->numa_node == node) { 3071 hctx = tmp; 3072 break; 3073 } 3074 } 3075 if (hctx) 3076 list_del_init(&hctx->hctx_list); 3077 spin_unlock(&q->unused_hctx_lock); 3078 3079 if (!hctx) 3080 hctx = blk_mq_alloc_hctx(q, set, node); 3081 if (!hctx) 3082 goto fail; 3083 3084 if (blk_mq_init_hctx(q, set, hctx, hctx_idx)) 3085 goto free_hctx; 3086 3087 return hctx; 3088 3089 free_hctx: 3090 kobject_put(&hctx->kobj); 3091 fail: 3092 return NULL; 3093 } 3094 3095 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, 3096 struct request_queue *q) 3097 { 3098 int i, j, end; 3099 struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx; 3100 3101 if (q->nr_hw_queues < set->nr_hw_queues) { 3102 struct blk_mq_hw_ctx **new_hctxs; 3103 3104 new_hctxs = kcalloc_node(set->nr_hw_queues, 3105 sizeof(*new_hctxs), GFP_KERNEL, 3106 set->numa_node); 3107 if (!new_hctxs) 3108 return; 3109 if (hctxs) 3110 memcpy(new_hctxs, hctxs, q->nr_hw_queues * 3111 sizeof(*hctxs)); 3112 q->queue_hw_ctx = new_hctxs; 3113 kfree(hctxs); 3114 hctxs = new_hctxs; 3115 } 3116 3117 /* protect against switching io scheduler */ 3118 mutex_lock(&q->sysfs_lock); 3119 for (i = 0; i < set->nr_hw_queues; i++) { 3120 int node; 3121 struct blk_mq_hw_ctx *hctx; 3122 3123 node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], i); 3124 /* 3125 * If the hw queue has been mapped to another numa node, 3126 * we need to realloc the hctx. If allocation fails, fallback 3127 * to use the previous one. 3128 */ 3129 if (hctxs[i] && (hctxs[i]->numa_node == node)) 3130 continue; 3131 3132 hctx = blk_mq_alloc_and_init_hctx(set, q, i, node); 3133 if (hctx) { 3134 if (hctxs[i]) 3135 blk_mq_exit_hctx(q, set, hctxs[i], i); 3136 hctxs[i] = hctx; 3137 } else { 3138 if (hctxs[i]) 3139 pr_warn("Allocate new hctx on node %d fails,\ 3140 fallback to previous one on node %d\n", 3141 node, hctxs[i]->numa_node); 3142 else 3143 break; 3144 } 3145 } 3146 /* 3147 * Increasing nr_hw_queues fails. Free the newly allocated 3148 * hctxs and keep the previous q->nr_hw_queues. 3149 */ 3150 if (i != set->nr_hw_queues) { 3151 j = q->nr_hw_queues; 3152 end = i; 3153 } else { 3154 j = i; 3155 end = q->nr_hw_queues; 3156 q->nr_hw_queues = set->nr_hw_queues; 3157 } 3158 3159 for (; j < end; j++) { 3160 struct blk_mq_hw_ctx *hctx = hctxs[j]; 3161 3162 if (hctx) { 3163 if (hctx->tags) 3164 blk_mq_free_map_and_requests(set, j); 3165 blk_mq_exit_hctx(q, set, hctx, j); 3166 hctxs[j] = NULL; 3167 } 3168 } 3169 mutex_unlock(&q->sysfs_lock); 3170 } 3171 3172 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, 3173 struct request_queue *q, 3174 bool elevator_init) 3175 { 3176 /* mark the queue as mq asap */ 3177 q->mq_ops = set->ops; 3178 3179 q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn, 3180 blk_mq_poll_stats_bkt, 3181 BLK_MQ_POLL_STATS_BKTS, q); 3182 if (!q->poll_cb) 3183 goto err_exit; 3184 3185 if (blk_mq_alloc_ctxs(q)) 3186 goto err_poll; 3187 3188 /* init q->mq_kobj and sw queues' kobjects */ 3189 blk_mq_sysfs_init(q); 3190 3191 INIT_LIST_HEAD(&q->unused_hctx_list); 3192 spin_lock_init(&q->unused_hctx_lock); 3193 3194 blk_mq_realloc_hw_ctxs(set, q); 3195 if (!q->nr_hw_queues) 3196 goto err_hctxs; 3197 3198 INIT_WORK(&q->timeout_work, blk_mq_timeout_work); 3199 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ); 3200 3201 q->tag_set = set; 3202 3203 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; 3204 if (set->nr_maps > HCTX_TYPE_POLL && 3205 set->map[HCTX_TYPE_POLL].nr_queues) 3206 blk_queue_flag_set(QUEUE_FLAG_POLL, q); 3207 3208 q->sg_reserved_size = INT_MAX; 3209 3210 INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work); 3211 INIT_LIST_HEAD(&q->requeue_list); 3212 spin_lock_init(&q->requeue_lock); 3213 3214 q->nr_requests = set->queue_depth; 3215 3216 /* 3217 * Default to classic polling 3218 */ 3219 q->poll_nsec = BLK_MQ_POLL_CLASSIC; 3220 3221 blk_mq_init_cpu_queues(q, set->nr_hw_queues); 3222 blk_mq_add_queue_tag_set(set, q); 3223 blk_mq_map_swqueue(q); 3224 3225 if (elevator_init) 3226 elevator_init_mq(q); 3227 3228 return q; 3229 3230 err_hctxs: 3231 kfree(q->queue_hw_ctx); 3232 q->nr_hw_queues = 0; 3233 blk_mq_sysfs_deinit(q); 3234 err_poll: 3235 blk_stat_free_callback(q->poll_cb); 3236 q->poll_cb = NULL; 3237 err_exit: 3238 q->mq_ops = NULL; 3239 return ERR_PTR(-ENOMEM); 3240 } 3241 EXPORT_SYMBOL(blk_mq_init_allocated_queue); 3242 3243 /* tags can _not_ be used after returning from blk_mq_exit_queue */ 3244 void blk_mq_exit_queue(struct request_queue *q) 3245 { 3246 struct blk_mq_tag_set *set = q->tag_set; 3247 3248 blk_mq_del_queue_tag_set(q); 3249 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); 3250 } 3251 3252 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) 3253 { 3254 int i; 3255 3256 for (i = 0; i < set->nr_hw_queues; i++) { 3257 if (!__blk_mq_alloc_map_and_request(set, i)) 3258 goto out_unwind; 3259 cond_resched(); 3260 } 3261 3262 return 0; 3263 3264 out_unwind: 3265 while (--i >= 0) 3266 blk_mq_free_map_and_requests(set, i); 3267 3268 return -ENOMEM; 3269 } 3270 3271 /* 3272 * Allocate the request maps associated with this tag_set. Note that this 3273 * may reduce the depth asked for, if memory is tight. set->queue_depth 3274 * will be updated to reflect the allocated depth. 3275 */ 3276 static int blk_mq_alloc_map_and_requests(struct blk_mq_tag_set *set) 3277 { 3278 unsigned int depth; 3279 int err; 3280 3281 depth = set->queue_depth; 3282 do { 3283 err = __blk_mq_alloc_rq_maps(set); 3284 if (!err) 3285 break; 3286 3287 set->queue_depth >>= 1; 3288 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) { 3289 err = -ENOMEM; 3290 break; 3291 } 3292 } while (set->queue_depth); 3293 3294 if (!set->queue_depth || err) { 3295 pr_err("blk-mq: failed to allocate request map\n"); 3296 return -ENOMEM; 3297 } 3298 3299 if (depth != set->queue_depth) 3300 pr_info("blk-mq: reduced tag depth (%u -> %u)\n", 3301 depth, set->queue_depth); 3302 3303 return 0; 3304 } 3305 3306 static int blk_mq_update_queue_map(struct blk_mq_tag_set *set) 3307 { 3308 /* 3309 * blk_mq_map_queues() and multiple .map_queues() implementations 3310 * expect that set->map[HCTX_TYPE_DEFAULT].nr_queues is set to the 3311 * number of hardware queues. 3312 */ 3313 if (set->nr_maps == 1) 3314 set->map[HCTX_TYPE_DEFAULT].nr_queues = set->nr_hw_queues; 3315 3316 if (set->ops->map_queues && !is_kdump_kernel()) { 3317 int i; 3318 3319 /* 3320 * transport .map_queues is usually done in the following 3321 * way: 3322 * 3323 * for (queue = 0; queue < set->nr_hw_queues; queue++) { 3324 * mask = get_cpu_mask(queue) 3325 * for_each_cpu(cpu, mask) 3326 * set->map[x].mq_map[cpu] = queue; 3327 * } 3328 * 3329 * When we need to remap, the table has to be cleared for 3330 * killing stale mapping since one CPU may not be mapped 3331 * to any hw queue. 3332 */ 3333 for (i = 0; i < set->nr_maps; i++) 3334 blk_mq_clear_mq_map(&set->map[i]); 3335 3336 return set->ops->map_queues(set); 3337 } else { 3338 BUG_ON(set->nr_maps > 1); 3339 return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); 3340 } 3341 } 3342 3343 static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set, 3344 int cur_nr_hw_queues, int new_nr_hw_queues) 3345 { 3346 struct blk_mq_tags **new_tags; 3347 3348 if (cur_nr_hw_queues >= new_nr_hw_queues) 3349 return 0; 3350 3351 new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *), 3352 GFP_KERNEL, set->numa_node); 3353 if (!new_tags) 3354 return -ENOMEM; 3355 3356 if (set->tags) 3357 memcpy(new_tags, set->tags, cur_nr_hw_queues * 3358 sizeof(*set->tags)); 3359 kfree(set->tags); 3360 set->tags = new_tags; 3361 set->nr_hw_queues = new_nr_hw_queues; 3362 3363 return 0; 3364 } 3365 3366 static int blk_mq_alloc_tag_set_tags(struct blk_mq_tag_set *set, 3367 int new_nr_hw_queues) 3368 { 3369 return blk_mq_realloc_tag_set_tags(set, 0, new_nr_hw_queues); 3370 } 3371 3372 /* 3373 * Alloc a tag set to be associated with one or more request queues. 3374 * May fail with EINVAL for various error conditions. May adjust the 3375 * requested depth down, if it's too large. In that case, the set 3376 * value will be stored in set->queue_depth. 3377 */ 3378 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) 3379 { 3380 int i, ret; 3381 3382 BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS); 3383 3384 if (!set->nr_hw_queues) 3385 return -EINVAL; 3386 if (!set->queue_depth) 3387 return -EINVAL; 3388 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) 3389 return -EINVAL; 3390 3391 if (!set->ops->queue_rq) 3392 return -EINVAL; 3393 3394 if (!set->ops->get_budget ^ !set->ops->put_budget) 3395 return -EINVAL; 3396 3397 if (set->queue_depth > BLK_MQ_MAX_DEPTH) { 3398 pr_info("blk-mq: reduced tag depth to %u\n", 3399 BLK_MQ_MAX_DEPTH); 3400 set->queue_depth = BLK_MQ_MAX_DEPTH; 3401 } 3402 3403 if (!set->nr_maps) 3404 set->nr_maps = 1; 3405 else if (set->nr_maps > HCTX_MAX_TYPES) 3406 return -EINVAL; 3407 3408 /* 3409 * If a crashdump is active, then we are potentially in a very 3410 * memory constrained environment. Limit us to 1 queue and 3411 * 64 tags to prevent using too much memory. 3412 */ 3413 if (is_kdump_kernel()) { 3414 set->nr_hw_queues = 1; 3415 set->nr_maps = 1; 3416 set->queue_depth = min(64U, set->queue_depth); 3417 } 3418 /* 3419 * There is no use for more h/w queues than cpus if we just have 3420 * a single map 3421 */ 3422 if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids) 3423 set->nr_hw_queues = nr_cpu_ids; 3424 3425 if (blk_mq_alloc_tag_set_tags(set, set->nr_hw_queues) < 0) 3426 return -ENOMEM; 3427 3428 ret = -ENOMEM; 3429 for (i = 0; i < set->nr_maps; i++) { 3430 set->map[i].mq_map = kcalloc_node(nr_cpu_ids, 3431 sizeof(set->map[i].mq_map[0]), 3432 GFP_KERNEL, set->numa_node); 3433 if (!set->map[i].mq_map) 3434 goto out_free_mq_map; 3435 set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues; 3436 } 3437 3438 ret = blk_mq_update_queue_map(set); 3439 if (ret) 3440 goto out_free_mq_map; 3441 3442 ret = blk_mq_alloc_map_and_requests(set); 3443 if (ret) 3444 goto out_free_mq_map; 3445 3446 if (blk_mq_is_sbitmap_shared(set->flags)) { 3447 atomic_set(&set->active_queues_shared_sbitmap, 0); 3448 3449 if (blk_mq_init_shared_sbitmap(set, set->flags)) { 3450 ret = -ENOMEM; 3451 goto out_free_mq_rq_maps; 3452 } 3453 } 3454 3455 mutex_init(&set->tag_list_lock); 3456 INIT_LIST_HEAD(&set->tag_list); 3457 3458 return 0; 3459 3460 out_free_mq_rq_maps: 3461 for (i = 0; i < set->nr_hw_queues; i++) 3462 blk_mq_free_map_and_requests(set, i); 3463 out_free_mq_map: 3464 for (i = 0; i < set->nr_maps; i++) { 3465 kfree(set->map[i].mq_map); 3466 set->map[i].mq_map = NULL; 3467 } 3468 kfree(set->tags); 3469 set->tags = NULL; 3470 return ret; 3471 } 3472 EXPORT_SYMBOL(blk_mq_alloc_tag_set); 3473 3474 void blk_mq_free_tag_set(struct blk_mq_tag_set *set) 3475 { 3476 int i, j; 3477 3478 for (i = 0; i < set->nr_hw_queues; i++) 3479 blk_mq_free_map_and_requests(set, i); 3480 3481 if (blk_mq_is_sbitmap_shared(set->flags)) 3482 blk_mq_exit_shared_sbitmap(set); 3483 3484 for (j = 0; j < set->nr_maps; j++) { 3485 kfree(set->map[j].mq_map); 3486 set->map[j].mq_map = NULL; 3487 } 3488 3489 kfree(set->tags); 3490 set->tags = NULL; 3491 } 3492 EXPORT_SYMBOL(blk_mq_free_tag_set); 3493 3494 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) 3495 { 3496 struct blk_mq_tag_set *set = q->tag_set; 3497 struct blk_mq_hw_ctx *hctx; 3498 int i, ret; 3499 3500 if (!set) 3501 return -EINVAL; 3502 3503 if (q->nr_requests == nr) 3504 return 0; 3505 3506 blk_mq_freeze_queue(q); 3507 blk_mq_quiesce_queue(q); 3508 3509 ret = 0; 3510 queue_for_each_hw_ctx(q, hctx, i) { 3511 if (!hctx->tags) 3512 continue; 3513 /* 3514 * If we're using an MQ scheduler, just update the scheduler 3515 * queue depth. This is similar to what the old code would do. 3516 */ 3517 if (!hctx->sched_tags) { 3518 ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr, 3519 false); 3520 if (!ret && blk_mq_is_sbitmap_shared(set->flags)) 3521 blk_mq_tag_resize_shared_sbitmap(set, nr); 3522 } else { 3523 ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags, 3524 nr, true); 3525 } 3526 if (ret) 3527 break; 3528 if (q->elevator && q->elevator->type->ops.depth_updated) 3529 q->elevator->type->ops.depth_updated(hctx); 3530 } 3531 3532 if (!ret) 3533 q->nr_requests = nr; 3534 3535 blk_mq_unquiesce_queue(q); 3536 blk_mq_unfreeze_queue(q); 3537 3538 return ret; 3539 } 3540 3541 /* 3542 * request_queue and elevator_type pair. 3543 * It is just used by __blk_mq_update_nr_hw_queues to cache 3544 * the elevator_type associated with a request_queue. 3545 */ 3546 struct blk_mq_qe_pair { 3547 struct list_head node; 3548 struct request_queue *q; 3549 struct elevator_type *type; 3550 }; 3551 3552 /* 3553 * Cache the elevator_type in qe pair list and switch the 3554 * io scheduler to 'none' 3555 */ 3556 static bool blk_mq_elv_switch_none(struct list_head *head, 3557 struct request_queue *q) 3558 { 3559 struct blk_mq_qe_pair *qe; 3560 3561 if (!q->elevator) 3562 return true; 3563 3564 qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY); 3565 if (!qe) 3566 return false; 3567 3568 INIT_LIST_HEAD(&qe->node); 3569 qe->q = q; 3570 qe->type = q->elevator->type; 3571 list_add(&qe->node, head); 3572 3573 mutex_lock(&q->sysfs_lock); 3574 /* 3575 * After elevator_switch_mq, the previous elevator_queue will be 3576 * released by elevator_release. The reference of the io scheduler 3577 * module get by elevator_get will also be put. So we need to get 3578 * a reference of the io scheduler module here to prevent it to be 3579 * removed. 3580 */ 3581 __module_get(qe->type->elevator_owner); 3582 elevator_switch_mq(q, NULL); 3583 mutex_unlock(&q->sysfs_lock); 3584 3585 return true; 3586 } 3587 3588 static void blk_mq_elv_switch_back(struct list_head *head, 3589 struct request_queue *q) 3590 { 3591 struct blk_mq_qe_pair *qe; 3592 struct elevator_type *t = NULL; 3593 3594 list_for_each_entry(qe, head, node) 3595 if (qe->q == q) { 3596 t = qe->type; 3597 break; 3598 } 3599 3600 if (!t) 3601 return; 3602 3603 list_del(&qe->node); 3604 kfree(qe); 3605 3606 mutex_lock(&q->sysfs_lock); 3607 elevator_switch_mq(q, t); 3608 mutex_unlock(&q->sysfs_lock); 3609 } 3610 3611 static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, 3612 int nr_hw_queues) 3613 { 3614 struct request_queue *q; 3615 LIST_HEAD(head); 3616 int prev_nr_hw_queues; 3617 3618 lockdep_assert_held(&set->tag_list_lock); 3619 3620 if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids) 3621 nr_hw_queues = nr_cpu_ids; 3622 if (nr_hw_queues < 1) 3623 return; 3624 if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues) 3625 return; 3626 3627 list_for_each_entry(q, &set->tag_list, tag_set_list) 3628 blk_mq_freeze_queue(q); 3629 /* 3630 * Switch IO scheduler to 'none', cleaning up the data associated 3631 * with the previous scheduler. We will switch back once we are done 3632 * updating the new sw to hw queue mappings. 3633 */ 3634 list_for_each_entry(q, &set->tag_list, tag_set_list) 3635 if (!blk_mq_elv_switch_none(&head, q)) 3636 goto switch_back; 3637 3638 list_for_each_entry(q, &set->tag_list, tag_set_list) { 3639 blk_mq_debugfs_unregister_hctxs(q); 3640 blk_mq_sysfs_unregister(q); 3641 } 3642 3643 prev_nr_hw_queues = set->nr_hw_queues; 3644 if (blk_mq_realloc_tag_set_tags(set, set->nr_hw_queues, nr_hw_queues) < 3645 0) 3646 goto reregister; 3647 3648 set->nr_hw_queues = nr_hw_queues; 3649 fallback: 3650 blk_mq_update_queue_map(set); 3651 list_for_each_entry(q, &set->tag_list, tag_set_list) { 3652 blk_mq_realloc_hw_ctxs(set, q); 3653 if (q->nr_hw_queues != set->nr_hw_queues) { 3654 pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n", 3655 nr_hw_queues, prev_nr_hw_queues); 3656 set->nr_hw_queues = prev_nr_hw_queues; 3657 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); 3658 goto fallback; 3659 } 3660 blk_mq_map_swqueue(q); 3661 } 3662 3663 reregister: 3664 list_for_each_entry(q, &set->tag_list, tag_set_list) { 3665 blk_mq_sysfs_register(q); 3666 blk_mq_debugfs_register_hctxs(q); 3667 } 3668 3669 switch_back: 3670 list_for_each_entry(q, &set->tag_list, tag_set_list) 3671 blk_mq_elv_switch_back(&head, q); 3672 3673 list_for_each_entry(q, &set->tag_list, tag_set_list) 3674 blk_mq_unfreeze_queue(q); 3675 } 3676 3677 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) 3678 { 3679 mutex_lock(&set->tag_list_lock); 3680 __blk_mq_update_nr_hw_queues(set, nr_hw_queues); 3681 mutex_unlock(&set->tag_list_lock); 3682 } 3683 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues); 3684 3685 /* Enable polling stats and return whether they were already enabled. */ 3686 static bool blk_poll_stats_enable(struct request_queue *q) 3687 { 3688 if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) || 3689 blk_queue_flag_test_and_set(QUEUE_FLAG_POLL_STATS, q)) 3690 return true; 3691 blk_stat_add_callback(q, q->poll_cb); 3692 return false; 3693 } 3694 3695 static void blk_mq_poll_stats_start(struct request_queue *q) 3696 { 3697 /* 3698 * We don't arm the callback if polling stats are not enabled or the 3699 * callback is already active. 3700 */ 3701 if (!test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) || 3702 blk_stat_is_active(q->poll_cb)) 3703 return; 3704 3705 blk_stat_activate_msecs(q->poll_cb, 100); 3706 } 3707 3708 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb) 3709 { 3710 struct request_queue *q = cb->data; 3711 int bucket; 3712 3713 for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) { 3714 if (cb->stat[bucket].nr_samples) 3715 q->poll_stat[bucket] = cb->stat[bucket]; 3716 } 3717 } 3718 3719 static unsigned long blk_mq_poll_nsecs(struct request_queue *q, 3720 struct request *rq) 3721 { 3722 unsigned long ret = 0; 3723 int bucket; 3724 3725 /* 3726 * If stats collection isn't on, don't sleep but turn it on for 3727 * future users 3728 */ 3729 if (!blk_poll_stats_enable(q)) 3730 return 0; 3731 3732 /* 3733 * As an optimistic guess, use half of the mean service time 3734 * for this type of request. We can (and should) make this smarter. 3735 * For instance, if the completion latencies are tight, we can 3736 * get closer than just half the mean. This is especially 3737 * important on devices where the completion latencies are longer 3738 * than ~10 usec. We do use the stats for the relevant IO size 3739 * if available which does lead to better estimates. 3740 */ 3741 bucket = blk_mq_poll_stats_bkt(rq); 3742 if (bucket < 0) 3743 return ret; 3744 3745 if (q->poll_stat[bucket].nr_samples) 3746 ret = (q->poll_stat[bucket].mean + 1) / 2; 3747 3748 return ret; 3749 } 3750 3751 static bool blk_mq_poll_hybrid_sleep(struct request_queue *q, 3752 struct request *rq) 3753 { 3754 struct hrtimer_sleeper hs; 3755 enum hrtimer_mode mode; 3756 unsigned int nsecs; 3757 ktime_t kt; 3758 3759 if (rq->rq_flags & RQF_MQ_POLL_SLEPT) 3760 return false; 3761 3762 /* 3763 * If we get here, hybrid polling is enabled. Hence poll_nsec can be: 3764 * 3765 * 0: use half of prev avg 3766 * >0: use this specific value 3767 */ 3768 if (q->poll_nsec > 0) 3769 nsecs = q->poll_nsec; 3770 else 3771 nsecs = blk_mq_poll_nsecs(q, rq); 3772 3773 if (!nsecs) 3774 return false; 3775 3776 rq->rq_flags |= RQF_MQ_POLL_SLEPT; 3777 3778 /* 3779 * This will be replaced with the stats tracking code, using 3780 * 'avg_completion_time / 2' as the pre-sleep target. 3781 */ 3782 kt = nsecs; 3783 3784 mode = HRTIMER_MODE_REL; 3785 hrtimer_init_sleeper_on_stack(&hs, CLOCK_MONOTONIC, mode); 3786 hrtimer_set_expires(&hs.timer, kt); 3787 3788 do { 3789 if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE) 3790 break; 3791 set_current_state(TASK_UNINTERRUPTIBLE); 3792 hrtimer_sleeper_start_expires(&hs, mode); 3793 if (hs.task) 3794 io_schedule(); 3795 hrtimer_cancel(&hs.timer); 3796 mode = HRTIMER_MODE_ABS; 3797 } while (hs.task && !signal_pending(current)); 3798 3799 __set_current_state(TASK_RUNNING); 3800 destroy_hrtimer_on_stack(&hs.timer); 3801 return true; 3802 } 3803 3804 static bool blk_mq_poll_hybrid(struct request_queue *q, 3805 struct blk_mq_hw_ctx *hctx, blk_qc_t cookie) 3806 { 3807 struct request *rq; 3808 3809 if (q->poll_nsec == BLK_MQ_POLL_CLASSIC) 3810 return false; 3811 3812 if (!blk_qc_t_is_internal(cookie)) 3813 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie)); 3814 else { 3815 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie)); 3816 /* 3817 * With scheduling, if the request has completed, we'll 3818 * get a NULL return here, as we clear the sched tag when 3819 * that happens. The request still remains valid, like always, 3820 * so we should be safe with just the NULL check. 3821 */ 3822 if (!rq) 3823 return false; 3824 } 3825 3826 return blk_mq_poll_hybrid_sleep(q, rq); 3827 } 3828 3829 /** 3830 * blk_poll - poll for IO completions 3831 * @q: the queue 3832 * @cookie: cookie passed back at IO submission time 3833 * @spin: whether to spin for completions 3834 * 3835 * Description: 3836 * Poll for completions on the passed in queue. Returns number of 3837 * completed entries found. If @spin is true, then blk_poll will continue 3838 * looping until at least one completion is found, unless the task is 3839 * otherwise marked running (or we need to reschedule). 3840 */ 3841 int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin) 3842 { 3843 struct blk_mq_hw_ctx *hctx; 3844 long state; 3845 3846 if (!blk_qc_t_valid(cookie) || 3847 !test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) 3848 return 0; 3849 3850 if (current->plug) 3851 blk_flush_plug_list(current->plug, false); 3852 3853 hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)]; 3854 3855 /* 3856 * If we sleep, have the caller restart the poll loop to reset 3857 * the state. Like for the other success return cases, the 3858 * caller is responsible for checking if the IO completed. If 3859 * the IO isn't complete, we'll get called again and will go 3860 * straight to the busy poll loop. If specified not to spin, 3861 * we also should not sleep. 3862 */ 3863 if (spin && blk_mq_poll_hybrid(q, hctx, cookie)) 3864 return 1; 3865 3866 hctx->poll_considered++; 3867 3868 state = current->state; 3869 do { 3870 int ret; 3871 3872 hctx->poll_invoked++; 3873 3874 ret = q->mq_ops->poll(hctx); 3875 if (ret > 0) { 3876 hctx->poll_success++; 3877 __set_current_state(TASK_RUNNING); 3878 return ret; 3879 } 3880 3881 if (signal_pending_state(state, current)) 3882 __set_current_state(TASK_RUNNING); 3883 3884 if (current->state == TASK_RUNNING) 3885 return 1; 3886 if (ret < 0 || !spin) 3887 break; 3888 cpu_relax(); 3889 } while (!need_resched()); 3890 3891 __set_current_state(TASK_RUNNING); 3892 return 0; 3893 } 3894 EXPORT_SYMBOL_GPL(blk_poll); 3895 3896 unsigned int blk_mq_rq_cpu(struct request *rq) 3897 { 3898 return rq->mq_ctx->cpu; 3899 } 3900 EXPORT_SYMBOL(blk_mq_rq_cpu); 3901 3902 static int __init blk_mq_init(void) 3903 { 3904 int i; 3905 3906 for_each_possible_cpu(i) 3907 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); 3908 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq); 3909 3910 cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD, 3911 "block/softirq:dead", NULL, 3912 blk_softirq_cpu_dead); 3913 cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL, 3914 blk_mq_hctx_notify_dead); 3915 cpuhp_setup_state_multi(CPUHP_AP_BLK_MQ_ONLINE, "block/mq:online", 3916 blk_mq_hctx_notify_online, 3917 blk_mq_hctx_notify_offline); 3918 return 0; 3919 } 3920 subsys_initcall(blk_mq_init); 3921