1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Block multiqueue core code 4 * 5 * Copyright (C) 2013-2014 Jens Axboe 6 * Copyright (C) 2013-2014 Christoph Hellwig 7 */ 8 #include <linux/kernel.h> 9 #include <linux/module.h> 10 #include <linux/backing-dev.h> 11 #include <linux/bio.h> 12 #include <linux/blkdev.h> 13 #include <linux/kmemleak.h> 14 #include <linux/mm.h> 15 #include <linux/init.h> 16 #include <linux/slab.h> 17 #include <linux/workqueue.h> 18 #include <linux/smp.h> 19 #include <linux/llist.h> 20 #include <linux/list_sort.h> 21 #include <linux/cpu.h> 22 #include <linux/cache.h> 23 #include <linux/sched/sysctl.h> 24 #include <linux/sched/topology.h> 25 #include <linux/sched/signal.h> 26 #include <linux/delay.h> 27 #include <linux/crash_dump.h> 28 #include <linux/prefetch.h> 29 30 #include <trace/events/block.h> 31 32 #include <linux/blk-mq.h> 33 #include <linux/t10-pi.h> 34 #include "blk.h" 35 #include "blk-mq.h" 36 #include "blk-mq-debugfs.h" 37 #include "blk-mq-tag.h" 38 #include "blk-pm.h" 39 #include "blk-stat.h" 40 #include "blk-mq-sched.h" 41 #include "blk-rq-qos.h" 42 43 static void blk_mq_poll_stats_start(struct request_queue *q); 44 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb); 45 46 static int blk_mq_poll_stats_bkt(const struct request *rq) 47 { 48 int ddir, sectors, bucket; 49 50 ddir = rq_data_dir(rq); 51 sectors = blk_rq_stats_sectors(rq); 52 53 bucket = ddir + 2 * ilog2(sectors); 54 55 if (bucket < 0) 56 return -1; 57 else if (bucket >= BLK_MQ_POLL_STATS_BKTS) 58 return ddir + BLK_MQ_POLL_STATS_BKTS - 2; 59 60 return bucket; 61 } 62 63 /* 64 * Check if any of the ctx, dispatch list or elevator 65 * have pending work in this hardware queue. 66 */ 67 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) 68 { 69 return !list_empty_careful(&hctx->dispatch) || 70 sbitmap_any_bit_set(&hctx->ctx_map) || 71 blk_mq_sched_has_work(hctx); 72 } 73 74 /* 75 * Mark this ctx as having pending work in this hardware queue 76 */ 77 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, 78 struct blk_mq_ctx *ctx) 79 { 80 const int bit = ctx->index_hw[hctx->type]; 81 82 if (!sbitmap_test_bit(&hctx->ctx_map, bit)) 83 sbitmap_set_bit(&hctx->ctx_map, bit); 84 } 85 86 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, 87 struct blk_mq_ctx *ctx) 88 { 89 const int bit = ctx->index_hw[hctx->type]; 90 91 sbitmap_clear_bit(&hctx->ctx_map, bit); 92 } 93 94 struct mq_inflight { 95 struct hd_struct *part; 96 unsigned int inflight[2]; 97 }; 98 99 static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx, 100 struct request *rq, void *priv, 101 bool reserved) 102 { 103 struct mq_inflight *mi = priv; 104 105 if (rq->part == mi->part) 106 mi->inflight[rq_data_dir(rq)]++; 107 108 return true; 109 } 110 111 unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part) 112 { 113 struct mq_inflight mi = { .part = part }; 114 115 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); 116 117 return mi.inflight[0] + mi.inflight[1]; 118 } 119 120 void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part, 121 unsigned int inflight[2]) 122 { 123 struct mq_inflight mi = { .part = part }; 124 125 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); 126 inflight[0] = mi.inflight[0]; 127 inflight[1] = mi.inflight[1]; 128 } 129 130 void blk_freeze_queue_start(struct request_queue *q) 131 { 132 mutex_lock(&q->mq_freeze_lock); 133 if (++q->mq_freeze_depth == 1) { 134 percpu_ref_kill(&q->q_usage_counter); 135 mutex_unlock(&q->mq_freeze_lock); 136 if (queue_is_mq(q)) 137 blk_mq_run_hw_queues(q, false); 138 } else { 139 mutex_unlock(&q->mq_freeze_lock); 140 } 141 } 142 EXPORT_SYMBOL_GPL(blk_freeze_queue_start); 143 144 void blk_mq_freeze_queue_wait(struct request_queue *q) 145 { 146 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter)); 147 } 148 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait); 149 150 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, 151 unsigned long timeout) 152 { 153 return wait_event_timeout(q->mq_freeze_wq, 154 percpu_ref_is_zero(&q->q_usage_counter), 155 timeout); 156 } 157 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout); 158 159 /* 160 * Guarantee no request is in use, so we can change any data structure of 161 * the queue afterward. 162 */ 163 void blk_freeze_queue(struct request_queue *q) 164 { 165 /* 166 * In the !blk_mq case we are only calling this to kill the 167 * q_usage_counter, otherwise this increases the freeze depth 168 * and waits for it to return to zero. For this reason there is 169 * no blk_unfreeze_queue(), and blk_freeze_queue() is not 170 * exported to drivers as the only user for unfreeze is blk_mq. 171 */ 172 blk_freeze_queue_start(q); 173 blk_mq_freeze_queue_wait(q); 174 } 175 176 void blk_mq_freeze_queue(struct request_queue *q) 177 { 178 /* 179 * ...just an alias to keep freeze and unfreeze actions balanced 180 * in the blk_mq_* namespace 181 */ 182 blk_freeze_queue(q); 183 } 184 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue); 185 186 void blk_mq_unfreeze_queue(struct request_queue *q) 187 { 188 mutex_lock(&q->mq_freeze_lock); 189 q->mq_freeze_depth--; 190 WARN_ON_ONCE(q->mq_freeze_depth < 0); 191 if (!q->mq_freeze_depth) { 192 percpu_ref_resurrect(&q->q_usage_counter); 193 wake_up_all(&q->mq_freeze_wq); 194 } 195 mutex_unlock(&q->mq_freeze_lock); 196 } 197 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); 198 199 /* 200 * FIXME: replace the scsi_internal_device_*block_nowait() calls in the 201 * mpt3sas driver such that this function can be removed. 202 */ 203 void blk_mq_quiesce_queue_nowait(struct request_queue *q) 204 { 205 blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q); 206 } 207 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait); 208 209 /** 210 * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished 211 * @q: request queue. 212 * 213 * Note: this function does not prevent that the struct request end_io() 214 * callback function is invoked. Once this function is returned, we make 215 * sure no dispatch can happen until the queue is unquiesced via 216 * blk_mq_unquiesce_queue(). 217 */ 218 void blk_mq_quiesce_queue(struct request_queue *q) 219 { 220 struct blk_mq_hw_ctx *hctx; 221 unsigned int i; 222 bool rcu = false; 223 224 blk_mq_quiesce_queue_nowait(q); 225 226 queue_for_each_hw_ctx(q, hctx, i) { 227 if (hctx->flags & BLK_MQ_F_BLOCKING) 228 synchronize_srcu(hctx->srcu); 229 else 230 rcu = true; 231 } 232 if (rcu) 233 synchronize_rcu(); 234 } 235 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue); 236 237 /* 238 * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue() 239 * @q: request queue. 240 * 241 * This function recovers queue into the state before quiescing 242 * which is done by blk_mq_quiesce_queue. 243 */ 244 void blk_mq_unquiesce_queue(struct request_queue *q) 245 { 246 blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q); 247 248 /* dispatch requests which are inserted during quiescing */ 249 blk_mq_run_hw_queues(q, true); 250 } 251 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue); 252 253 void blk_mq_wake_waiters(struct request_queue *q) 254 { 255 struct blk_mq_hw_ctx *hctx; 256 unsigned int i; 257 258 queue_for_each_hw_ctx(q, hctx, i) 259 if (blk_mq_hw_queue_mapped(hctx)) 260 blk_mq_tag_wakeup_all(hctx->tags, true); 261 } 262 263 /* 264 * Only need start/end time stamping if we have iostat or 265 * blk stats enabled, or using an IO scheduler. 266 */ 267 static inline bool blk_mq_need_time_stamp(struct request *rq) 268 { 269 return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS)) || rq->q->elevator; 270 } 271 272 static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, 273 unsigned int tag, unsigned int op, u64 alloc_time_ns) 274 { 275 struct blk_mq_tags *tags = blk_mq_tags_from_data(data); 276 struct request *rq = tags->static_rqs[tag]; 277 req_flags_t rq_flags = 0; 278 279 if (data->flags & BLK_MQ_REQ_INTERNAL) { 280 rq->tag = -1; 281 rq->internal_tag = tag; 282 } else { 283 if (data->hctx->flags & BLK_MQ_F_TAG_SHARED) { 284 rq_flags = RQF_MQ_INFLIGHT; 285 atomic_inc(&data->hctx->nr_active); 286 } 287 rq->tag = tag; 288 rq->internal_tag = -1; 289 data->hctx->tags->rqs[rq->tag] = rq; 290 } 291 292 /* csd/requeue_work/fifo_time is initialized before use */ 293 rq->q = data->q; 294 rq->mq_ctx = data->ctx; 295 rq->mq_hctx = data->hctx; 296 rq->rq_flags = rq_flags; 297 rq->cmd_flags = op; 298 if (data->flags & BLK_MQ_REQ_PREEMPT) 299 rq->rq_flags |= RQF_PREEMPT; 300 if (blk_queue_io_stat(data->q)) 301 rq->rq_flags |= RQF_IO_STAT; 302 INIT_LIST_HEAD(&rq->queuelist); 303 INIT_HLIST_NODE(&rq->hash); 304 RB_CLEAR_NODE(&rq->rb_node); 305 rq->rq_disk = NULL; 306 rq->part = NULL; 307 #ifdef CONFIG_BLK_RQ_ALLOC_TIME 308 rq->alloc_time_ns = alloc_time_ns; 309 #endif 310 if (blk_mq_need_time_stamp(rq)) 311 rq->start_time_ns = ktime_get_ns(); 312 else 313 rq->start_time_ns = 0; 314 rq->io_start_time_ns = 0; 315 rq->stats_sectors = 0; 316 rq->nr_phys_segments = 0; 317 #if defined(CONFIG_BLK_DEV_INTEGRITY) 318 rq->nr_integrity_segments = 0; 319 #endif 320 /* tag was already set */ 321 rq->extra_len = 0; 322 WRITE_ONCE(rq->deadline, 0); 323 324 rq->timeout = 0; 325 326 rq->end_io = NULL; 327 rq->end_io_data = NULL; 328 329 data->ctx->rq_dispatched[op_is_sync(op)]++; 330 refcount_set(&rq->ref, 1); 331 return rq; 332 } 333 334 static struct request *blk_mq_get_request(struct request_queue *q, 335 struct bio *bio, 336 struct blk_mq_alloc_data *data) 337 { 338 struct elevator_queue *e = q->elevator; 339 struct request *rq; 340 unsigned int tag; 341 bool clear_ctx_on_error = false; 342 u64 alloc_time_ns = 0; 343 344 blk_queue_enter_live(q); 345 346 /* alloc_time includes depth and tag waits */ 347 if (blk_queue_rq_alloc_time(q)) 348 alloc_time_ns = ktime_get_ns(); 349 350 data->q = q; 351 if (likely(!data->ctx)) { 352 data->ctx = blk_mq_get_ctx(q); 353 clear_ctx_on_error = true; 354 } 355 if (likely(!data->hctx)) 356 data->hctx = blk_mq_map_queue(q, data->cmd_flags, 357 data->ctx); 358 if (data->cmd_flags & REQ_NOWAIT) 359 data->flags |= BLK_MQ_REQ_NOWAIT; 360 361 if (e) { 362 data->flags |= BLK_MQ_REQ_INTERNAL; 363 364 /* 365 * Flush requests are special and go directly to the 366 * dispatch list. Don't include reserved tags in the 367 * limiting, as it isn't useful. 368 */ 369 if (!op_is_flush(data->cmd_flags) && 370 e->type->ops.limit_depth && 371 !(data->flags & BLK_MQ_REQ_RESERVED)) 372 e->type->ops.limit_depth(data->cmd_flags, data); 373 } else { 374 blk_mq_tag_busy(data->hctx); 375 } 376 377 tag = blk_mq_get_tag(data); 378 if (tag == BLK_MQ_TAG_FAIL) { 379 if (clear_ctx_on_error) 380 data->ctx = NULL; 381 blk_queue_exit(q); 382 return NULL; 383 } 384 385 rq = blk_mq_rq_ctx_init(data, tag, data->cmd_flags, alloc_time_ns); 386 if (!op_is_flush(data->cmd_flags)) { 387 rq->elv.icq = NULL; 388 if (e && e->type->ops.prepare_request) { 389 if (e->type->icq_cache) 390 blk_mq_sched_assign_ioc(rq); 391 392 e->type->ops.prepare_request(rq, bio); 393 rq->rq_flags |= RQF_ELVPRIV; 394 } 395 } 396 data->hctx->queued++; 397 return rq; 398 } 399 400 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, 401 blk_mq_req_flags_t flags) 402 { 403 struct blk_mq_alloc_data alloc_data = { .flags = flags, .cmd_flags = op }; 404 struct request *rq; 405 int ret; 406 407 ret = blk_queue_enter(q, flags); 408 if (ret) 409 return ERR_PTR(ret); 410 411 rq = blk_mq_get_request(q, NULL, &alloc_data); 412 blk_queue_exit(q); 413 414 if (!rq) 415 return ERR_PTR(-EWOULDBLOCK); 416 417 rq->__data_len = 0; 418 rq->__sector = (sector_t) -1; 419 rq->bio = rq->biotail = NULL; 420 return rq; 421 } 422 EXPORT_SYMBOL(blk_mq_alloc_request); 423 424 struct request *blk_mq_alloc_request_hctx(struct request_queue *q, 425 unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx) 426 { 427 struct blk_mq_alloc_data alloc_data = { .flags = flags, .cmd_flags = op }; 428 struct request *rq; 429 unsigned int cpu; 430 int ret; 431 432 /* 433 * If the tag allocator sleeps we could get an allocation for a 434 * different hardware context. No need to complicate the low level 435 * allocator for this for the rare use case of a command tied to 436 * a specific queue. 437 */ 438 if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT))) 439 return ERR_PTR(-EINVAL); 440 441 if (hctx_idx >= q->nr_hw_queues) 442 return ERR_PTR(-EIO); 443 444 ret = blk_queue_enter(q, flags); 445 if (ret) 446 return ERR_PTR(ret); 447 448 /* 449 * Check if the hardware context is actually mapped to anything. 450 * If not tell the caller that it should skip this queue. 451 */ 452 alloc_data.hctx = q->queue_hw_ctx[hctx_idx]; 453 if (!blk_mq_hw_queue_mapped(alloc_data.hctx)) { 454 blk_queue_exit(q); 455 return ERR_PTR(-EXDEV); 456 } 457 cpu = cpumask_first_and(alloc_data.hctx->cpumask, cpu_online_mask); 458 alloc_data.ctx = __blk_mq_get_ctx(q, cpu); 459 460 rq = blk_mq_get_request(q, NULL, &alloc_data); 461 blk_queue_exit(q); 462 463 if (!rq) 464 return ERR_PTR(-EWOULDBLOCK); 465 466 return rq; 467 } 468 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); 469 470 static void __blk_mq_free_request(struct request *rq) 471 { 472 struct request_queue *q = rq->q; 473 struct blk_mq_ctx *ctx = rq->mq_ctx; 474 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 475 const int sched_tag = rq->internal_tag; 476 477 blk_pm_mark_last_busy(rq); 478 rq->mq_hctx = NULL; 479 if (rq->tag != -1) 480 blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag); 481 if (sched_tag != -1) 482 blk_mq_put_tag(hctx, hctx->sched_tags, ctx, sched_tag); 483 blk_mq_sched_restart(hctx); 484 blk_queue_exit(q); 485 } 486 487 void blk_mq_free_request(struct request *rq) 488 { 489 struct request_queue *q = rq->q; 490 struct elevator_queue *e = q->elevator; 491 struct blk_mq_ctx *ctx = rq->mq_ctx; 492 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 493 494 if (rq->rq_flags & RQF_ELVPRIV) { 495 if (e && e->type->ops.finish_request) 496 e->type->ops.finish_request(rq); 497 if (rq->elv.icq) { 498 put_io_context(rq->elv.icq->ioc); 499 rq->elv.icq = NULL; 500 } 501 } 502 503 ctx->rq_completed[rq_is_sync(rq)]++; 504 if (rq->rq_flags & RQF_MQ_INFLIGHT) 505 atomic_dec(&hctx->nr_active); 506 507 if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq))) 508 laptop_io_completion(q->backing_dev_info); 509 510 rq_qos_done(q, rq); 511 512 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 513 if (refcount_dec_and_test(&rq->ref)) 514 __blk_mq_free_request(rq); 515 } 516 EXPORT_SYMBOL_GPL(blk_mq_free_request); 517 518 inline void __blk_mq_end_request(struct request *rq, blk_status_t error) 519 { 520 u64 now = 0; 521 522 if (blk_mq_need_time_stamp(rq)) 523 now = ktime_get_ns(); 524 525 if (rq->rq_flags & RQF_STATS) { 526 blk_mq_poll_stats_start(rq->q); 527 blk_stat_add(rq, now); 528 } 529 530 if (rq->internal_tag != -1) 531 blk_mq_sched_completed_request(rq, now); 532 533 blk_account_io_done(rq, now); 534 535 if (rq->end_io) { 536 rq_qos_done(rq->q, rq); 537 rq->end_io(rq, error); 538 } else { 539 blk_mq_free_request(rq); 540 } 541 } 542 EXPORT_SYMBOL(__blk_mq_end_request); 543 544 void blk_mq_end_request(struct request *rq, blk_status_t error) 545 { 546 if (blk_update_request(rq, error, blk_rq_bytes(rq))) 547 BUG(); 548 __blk_mq_end_request(rq, error); 549 } 550 EXPORT_SYMBOL(blk_mq_end_request); 551 552 static void __blk_mq_complete_request_remote(void *data) 553 { 554 struct request *rq = data; 555 struct request_queue *q = rq->q; 556 557 q->mq_ops->complete(rq); 558 } 559 560 static void __blk_mq_complete_request(struct request *rq) 561 { 562 struct blk_mq_ctx *ctx = rq->mq_ctx; 563 struct request_queue *q = rq->q; 564 bool shared = false; 565 int cpu; 566 567 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); 568 /* 569 * Most of single queue controllers, there is only one irq vector 570 * for handling IO completion, and the only irq's affinity is set 571 * as all possible CPUs. On most of ARCHs, this affinity means the 572 * irq is handled on one specific CPU. 573 * 574 * So complete IO reqeust in softirq context in case of single queue 575 * for not degrading IO performance by irqsoff latency. 576 */ 577 if (q->nr_hw_queues == 1) { 578 __blk_complete_request(rq); 579 return; 580 } 581 582 /* 583 * For a polled request, always complete locallly, it's pointless 584 * to redirect the completion. 585 */ 586 if ((rq->cmd_flags & REQ_HIPRI) || 587 !test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags)) { 588 q->mq_ops->complete(rq); 589 return; 590 } 591 592 cpu = get_cpu(); 593 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags)) 594 shared = cpus_share_cache(cpu, ctx->cpu); 595 596 if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) { 597 rq->csd.func = __blk_mq_complete_request_remote; 598 rq->csd.info = rq; 599 rq->csd.flags = 0; 600 smp_call_function_single_async(ctx->cpu, &rq->csd); 601 } else { 602 q->mq_ops->complete(rq); 603 } 604 put_cpu(); 605 } 606 607 static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx) 608 __releases(hctx->srcu) 609 { 610 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) 611 rcu_read_unlock(); 612 else 613 srcu_read_unlock(hctx->srcu, srcu_idx); 614 } 615 616 static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx) 617 __acquires(hctx->srcu) 618 { 619 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) { 620 /* shut up gcc false positive */ 621 *srcu_idx = 0; 622 rcu_read_lock(); 623 } else 624 *srcu_idx = srcu_read_lock(hctx->srcu); 625 } 626 627 /** 628 * blk_mq_complete_request - end I/O on a request 629 * @rq: the request being processed 630 * 631 * Description: 632 * Ends all I/O on a request. It does not handle partial completions. 633 * The actual completion happens out-of-order, through a IPI handler. 634 **/ 635 bool blk_mq_complete_request(struct request *rq) 636 { 637 if (unlikely(blk_should_fake_timeout(rq->q))) 638 return false; 639 __blk_mq_complete_request(rq); 640 return true; 641 } 642 EXPORT_SYMBOL(blk_mq_complete_request); 643 644 /** 645 * blk_mq_start_request - Start processing a request 646 * @rq: Pointer to request to be started 647 * 648 * Function used by device drivers to notify the block layer that a request 649 * is going to be processed now, so blk layer can do proper initializations 650 * such as starting the timeout timer. 651 */ 652 void blk_mq_start_request(struct request *rq) 653 { 654 struct request_queue *q = rq->q; 655 656 trace_block_rq_issue(q, rq); 657 658 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) { 659 rq->io_start_time_ns = ktime_get_ns(); 660 rq->stats_sectors = blk_rq_sectors(rq); 661 rq->rq_flags |= RQF_STATS; 662 rq_qos_issue(q, rq); 663 } 664 665 WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE); 666 667 blk_add_timer(rq); 668 WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT); 669 670 if (q->dma_drain_size && blk_rq_bytes(rq)) { 671 /* 672 * Make sure space for the drain appears. We know we can do 673 * this because max_hw_segments has been adjusted to be one 674 * fewer than the device can handle. 675 */ 676 rq->nr_phys_segments++; 677 } 678 679 #ifdef CONFIG_BLK_DEV_INTEGRITY 680 if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE) 681 q->integrity.profile->prepare_fn(rq); 682 #endif 683 } 684 EXPORT_SYMBOL(blk_mq_start_request); 685 686 static void __blk_mq_requeue_request(struct request *rq) 687 { 688 struct request_queue *q = rq->q; 689 690 blk_mq_put_driver_tag(rq); 691 692 trace_block_rq_requeue(q, rq); 693 rq_qos_requeue(q, rq); 694 695 if (blk_mq_request_started(rq)) { 696 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 697 rq->rq_flags &= ~RQF_TIMED_OUT; 698 if (q->dma_drain_size && blk_rq_bytes(rq)) 699 rq->nr_phys_segments--; 700 } 701 } 702 703 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list) 704 { 705 __blk_mq_requeue_request(rq); 706 707 /* this request will be re-inserted to io scheduler queue */ 708 blk_mq_sched_requeue_request(rq); 709 710 BUG_ON(!list_empty(&rq->queuelist)); 711 blk_mq_add_to_requeue_list(rq, true, kick_requeue_list); 712 } 713 EXPORT_SYMBOL(blk_mq_requeue_request); 714 715 static void blk_mq_requeue_work(struct work_struct *work) 716 { 717 struct request_queue *q = 718 container_of(work, struct request_queue, requeue_work.work); 719 LIST_HEAD(rq_list); 720 struct request *rq, *next; 721 722 spin_lock_irq(&q->requeue_lock); 723 list_splice_init(&q->requeue_list, &rq_list); 724 spin_unlock_irq(&q->requeue_lock); 725 726 list_for_each_entry_safe(rq, next, &rq_list, queuelist) { 727 if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP))) 728 continue; 729 730 rq->rq_flags &= ~RQF_SOFTBARRIER; 731 list_del_init(&rq->queuelist); 732 /* 733 * If RQF_DONTPREP, rq has contained some driver specific 734 * data, so insert it to hctx dispatch list to avoid any 735 * merge. 736 */ 737 if (rq->rq_flags & RQF_DONTPREP) 738 blk_mq_request_bypass_insert(rq, false); 739 else 740 blk_mq_sched_insert_request(rq, true, false, false); 741 } 742 743 while (!list_empty(&rq_list)) { 744 rq = list_entry(rq_list.next, struct request, queuelist); 745 list_del_init(&rq->queuelist); 746 blk_mq_sched_insert_request(rq, false, false, false); 747 } 748 749 blk_mq_run_hw_queues(q, false); 750 } 751 752 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, 753 bool kick_requeue_list) 754 { 755 struct request_queue *q = rq->q; 756 unsigned long flags; 757 758 /* 759 * We abuse this flag that is otherwise used by the I/O scheduler to 760 * request head insertion from the workqueue. 761 */ 762 BUG_ON(rq->rq_flags & RQF_SOFTBARRIER); 763 764 spin_lock_irqsave(&q->requeue_lock, flags); 765 if (at_head) { 766 rq->rq_flags |= RQF_SOFTBARRIER; 767 list_add(&rq->queuelist, &q->requeue_list); 768 } else { 769 list_add_tail(&rq->queuelist, &q->requeue_list); 770 } 771 spin_unlock_irqrestore(&q->requeue_lock, flags); 772 773 if (kick_requeue_list) 774 blk_mq_kick_requeue_list(q); 775 } 776 777 void blk_mq_kick_requeue_list(struct request_queue *q) 778 { 779 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0); 780 } 781 EXPORT_SYMBOL(blk_mq_kick_requeue_list); 782 783 void blk_mq_delay_kick_requeue_list(struct request_queue *q, 784 unsigned long msecs) 785 { 786 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 787 msecs_to_jiffies(msecs)); 788 } 789 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list); 790 791 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag) 792 { 793 if (tag < tags->nr_tags) { 794 prefetch(tags->rqs[tag]); 795 return tags->rqs[tag]; 796 } 797 798 return NULL; 799 } 800 EXPORT_SYMBOL(blk_mq_tag_to_rq); 801 802 static bool blk_mq_rq_inflight(struct blk_mq_hw_ctx *hctx, struct request *rq, 803 void *priv, bool reserved) 804 { 805 /* 806 * If we find a request that is inflight and the queue matches, 807 * we know the queue is busy. Return false to stop the iteration. 808 */ 809 if (rq->state == MQ_RQ_IN_FLIGHT && rq->q == hctx->queue) { 810 bool *busy = priv; 811 812 *busy = true; 813 return false; 814 } 815 816 return true; 817 } 818 819 bool blk_mq_queue_inflight(struct request_queue *q) 820 { 821 bool busy = false; 822 823 blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy); 824 return busy; 825 } 826 EXPORT_SYMBOL_GPL(blk_mq_queue_inflight); 827 828 static void blk_mq_rq_timed_out(struct request *req, bool reserved) 829 { 830 req->rq_flags |= RQF_TIMED_OUT; 831 if (req->q->mq_ops->timeout) { 832 enum blk_eh_timer_return ret; 833 834 ret = req->q->mq_ops->timeout(req, reserved); 835 if (ret == BLK_EH_DONE) 836 return; 837 WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER); 838 } 839 840 blk_add_timer(req); 841 } 842 843 static bool blk_mq_req_expired(struct request *rq, unsigned long *next) 844 { 845 unsigned long deadline; 846 847 if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT) 848 return false; 849 if (rq->rq_flags & RQF_TIMED_OUT) 850 return false; 851 852 deadline = READ_ONCE(rq->deadline); 853 if (time_after_eq(jiffies, deadline)) 854 return true; 855 856 if (*next == 0) 857 *next = deadline; 858 else if (time_after(*next, deadline)) 859 *next = deadline; 860 return false; 861 } 862 863 static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, 864 struct request *rq, void *priv, bool reserved) 865 { 866 unsigned long *next = priv; 867 868 /* 869 * Just do a quick check if it is expired before locking the request in 870 * so we're not unnecessarilly synchronizing across CPUs. 871 */ 872 if (!blk_mq_req_expired(rq, next)) 873 return true; 874 875 /* 876 * We have reason to believe the request may be expired. Take a 877 * reference on the request to lock this request lifetime into its 878 * currently allocated context to prevent it from being reallocated in 879 * the event the completion by-passes this timeout handler. 880 * 881 * If the reference was already released, then the driver beat the 882 * timeout handler to posting a natural completion. 883 */ 884 if (!refcount_inc_not_zero(&rq->ref)) 885 return true; 886 887 /* 888 * The request is now locked and cannot be reallocated underneath the 889 * timeout handler's processing. Re-verify this exact request is truly 890 * expired; if it is not expired, then the request was completed and 891 * reallocated as a new request. 892 */ 893 if (blk_mq_req_expired(rq, next)) 894 blk_mq_rq_timed_out(rq, reserved); 895 896 if (is_flush_rq(rq, hctx)) 897 rq->end_io(rq, 0); 898 else if (refcount_dec_and_test(&rq->ref)) 899 __blk_mq_free_request(rq); 900 901 return true; 902 } 903 904 static void blk_mq_timeout_work(struct work_struct *work) 905 { 906 struct request_queue *q = 907 container_of(work, struct request_queue, timeout_work); 908 unsigned long next = 0; 909 struct blk_mq_hw_ctx *hctx; 910 int i; 911 912 /* A deadlock might occur if a request is stuck requiring a 913 * timeout at the same time a queue freeze is waiting 914 * completion, since the timeout code would not be able to 915 * acquire the queue reference here. 916 * 917 * That's why we don't use blk_queue_enter here; instead, we use 918 * percpu_ref_tryget directly, because we need to be able to 919 * obtain a reference even in the short window between the queue 920 * starting to freeze, by dropping the first reference in 921 * blk_freeze_queue_start, and the moment the last request is 922 * consumed, marked by the instant q_usage_counter reaches 923 * zero. 924 */ 925 if (!percpu_ref_tryget(&q->q_usage_counter)) 926 return; 927 928 blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &next); 929 930 if (next != 0) { 931 mod_timer(&q->timeout, next); 932 } else { 933 /* 934 * Request timeouts are handled as a forward rolling timer. If 935 * we end up here it means that no requests are pending and 936 * also that no request has been pending for a while. Mark 937 * each hctx as idle. 938 */ 939 queue_for_each_hw_ctx(q, hctx, i) { 940 /* the hctx may be unmapped, so check it here */ 941 if (blk_mq_hw_queue_mapped(hctx)) 942 blk_mq_tag_idle(hctx); 943 } 944 } 945 blk_queue_exit(q); 946 } 947 948 struct flush_busy_ctx_data { 949 struct blk_mq_hw_ctx *hctx; 950 struct list_head *list; 951 }; 952 953 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data) 954 { 955 struct flush_busy_ctx_data *flush_data = data; 956 struct blk_mq_hw_ctx *hctx = flush_data->hctx; 957 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; 958 enum hctx_type type = hctx->type; 959 960 spin_lock(&ctx->lock); 961 list_splice_tail_init(&ctx->rq_lists[type], flush_data->list); 962 sbitmap_clear_bit(sb, bitnr); 963 spin_unlock(&ctx->lock); 964 return true; 965 } 966 967 /* 968 * Process software queues that have been marked busy, splicing them 969 * to the for-dispatch 970 */ 971 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list) 972 { 973 struct flush_busy_ctx_data data = { 974 .hctx = hctx, 975 .list = list, 976 }; 977 978 sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data); 979 } 980 EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs); 981 982 struct dispatch_rq_data { 983 struct blk_mq_hw_ctx *hctx; 984 struct request *rq; 985 }; 986 987 static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr, 988 void *data) 989 { 990 struct dispatch_rq_data *dispatch_data = data; 991 struct blk_mq_hw_ctx *hctx = dispatch_data->hctx; 992 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; 993 enum hctx_type type = hctx->type; 994 995 spin_lock(&ctx->lock); 996 if (!list_empty(&ctx->rq_lists[type])) { 997 dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next); 998 list_del_init(&dispatch_data->rq->queuelist); 999 if (list_empty(&ctx->rq_lists[type])) 1000 sbitmap_clear_bit(sb, bitnr); 1001 } 1002 spin_unlock(&ctx->lock); 1003 1004 return !dispatch_data->rq; 1005 } 1006 1007 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, 1008 struct blk_mq_ctx *start) 1009 { 1010 unsigned off = start ? start->index_hw[hctx->type] : 0; 1011 struct dispatch_rq_data data = { 1012 .hctx = hctx, 1013 .rq = NULL, 1014 }; 1015 1016 __sbitmap_for_each_set(&hctx->ctx_map, off, 1017 dispatch_rq_from_ctx, &data); 1018 1019 return data.rq; 1020 } 1021 1022 static inline unsigned int queued_to_index(unsigned int queued) 1023 { 1024 if (!queued) 1025 return 0; 1026 1027 return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1); 1028 } 1029 1030 bool blk_mq_get_driver_tag(struct request *rq) 1031 { 1032 struct blk_mq_alloc_data data = { 1033 .q = rq->q, 1034 .hctx = rq->mq_hctx, 1035 .flags = BLK_MQ_REQ_NOWAIT, 1036 .cmd_flags = rq->cmd_flags, 1037 }; 1038 bool shared; 1039 1040 if (rq->tag != -1) 1041 return true; 1042 1043 if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag)) 1044 data.flags |= BLK_MQ_REQ_RESERVED; 1045 1046 shared = blk_mq_tag_busy(data.hctx); 1047 rq->tag = blk_mq_get_tag(&data); 1048 if (rq->tag >= 0) { 1049 if (shared) { 1050 rq->rq_flags |= RQF_MQ_INFLIGHT; 1051 atomic_inc(&data.hctx->nr_active); 1052 } 1053 data.hctx->tags->rqs[rq->tag] = rq; 1054 } 1055 1056 return rq->tag != -1; 1057 } 1058 1059 static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, 1060 int flags, void *key) 1061 { 1062 struct blk_mq_hw_ctx *hctx; 1063 1064 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait); 1065 1066 spin_lock(&hctx->dispatch_wait_lock); 1067 if (!list_empty(&wait->entry)) { 1068 struct sbitmap_queue *sbq; 1069 1070 list_del_init(&wait->entry); 1071 sbq = &hctx->tags->bitmap_tags; 1072 atomic_dec(&sbq->ws_active); 1073 } 1074 spin_unlock(&hctx->dispatch_wait_lock); 1075 1076 blk_mq_run_hw_queue(hctx, true); 1077 return 1; 1078 } 1079 1080 /* 1081 * Mark us waiting for a tag. For shared tags, this involves hooking us into 1082 * the tag wakeups. For non-shared tags, we can simply mark us needing a 1083 * restart. For both cases, take care to check the condition again after 1084 * marking us as waiting. 1085 */ 1086 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx, 1087 struct request *rq) 1088 { 1089 struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags; 1090 struct wait_queue_head *wq; 1091 wait_queue_entry_t *wait; 1092 bool ret; 1093 1094 if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) { 1095 blk_mq_sched_mark_restart_hctx(hctx); 1096 1097 /* 1098 * It's possible that a tag was freed in the window between the 1099 * allocation failure and adding the hardware queue to the wait 1100 * queue. 1101 * 1102 * Don't clear RESTART here, someone else could have set it. 1103 * At most this will cost an extra queue run. 1104 */ 1105 return blk_mq_get_driver_tag(rq); 1106 } 1107 1108 wait = &hctx->dispatch_wait; 1109 if (!list_empty_careful(&wait->entry)) 1110 return false; 1111 1112 wq = &bt_wait_ptr(sbq, hctx)->wait; 1113 1114 spin_lock_irq(&wq->lock); 1115 spin_lock(&hctx->dispatch_wait_lock); 1116 if (!list_empty(&wait->entry)) { 1117 spin_unlock(&hctx->dispatch_wait_lock); 1118 spin_unlock_irq(&wq->lock); 1119 return false; 1120 } 1121 1122 atomic_inc(&sbq->ws_active); 1123 wait->flags &= ~WQ_FLAG_EXCLUSIVE; 1124 __add_wait_queue(wq, wait); 1125 1126 /* 1127 * It's possible that a tag was freed in the window between the 1128 * allocation failure and adding the hardware queue to the wait 1129 * queue. 1130 */ 1131 ret = blk_mq_get_driver_tag(rq); 1132 if (!ret) { 1133 spin_unlock(&hctx->dispatch_wait_lock); 1134 spin_unlock_irq(&wq->lock); 1135 return false; 1136 } 1137 1138 /* 1139 * We got a tag, remove ourselves from the wait queue to ensure 1140 * someone else gets the wakeup. 1141 */ 1142 list_del_init(&wait->entry); 1143 atomic_dec(&sbq->ws_active); 1144 spin_unlock(&hctx->dispatch_wait_lock); 1145 spin_unlock_irq(&wq->lock); 1146 1147 return true; 1148 } 1149 1150 #define BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT 8 1151 #define BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR 4 1152 /* 1153 * Update dispatch busy with the Exponential Weighted Moving Average(EWMA): 1154 * - EWMA is one simple way to compute running average value 1155 * - weight(7/8 and 1/8) is applied so that it can decrease exponentially 1156 * - take 4 as factor for avoiding to get too small(0) result, and this 1157 * factor doesn't matter because EWMA decreases exponentially 1158 */ 1159 static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy) 1160 { 1161 unsigned int ewma; 1162 1163 if (hctx->queue->elevator) 1164 return; 1165 1166 ewma = hctx->dispatch_busy; 1167 1168 if (!ewma && !busy) 1169 return; 1170 1171 ewma *= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT - 1; 1172 if (busy) 1173 ewma += 1 << BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR; 1174 ewma /= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT; 1175 1176 hctx->dispatch_busy = ewma; 1177 } 1178 1179 #define BLK_MQ_RESOURCE_DELAY 3 /* ms units */ 1180 1181 /* 1182 * Returns true if we did some work AND can potentially do more. 1183 */ 1184 bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list, 1185 bool got_budget) 1186 { 1187 struct blk_mq_hw_ctx *hctx; 1188 struct request *rq, *nxt; 1189 bool no_tag = false; 1190 int errors, queued; 1191 blk_status_t ret = BLK_STS_OK; 1192 1193 if (list_empty(list)) 1194 return false; 1195 1196 WARN_ON(!list_is_singular(list) && got_budget); 1197 1198 /* 1199 * Now process all the entries, sending them to the driver. 1200 */ 1201 errors = queued = 0; 1202 do { 1203 struct blk_mq_queue_data bd; 1204 1205 rq = list_first_entry(list, struct request, queuelist); 1206 1207 hctx = rq->mq_hctx; 1208 if (!got_budget && !blk_mq_get_dispatch_budget(hctx)) 1209 break; 1210 1211 if (!blk_mq_get_driver_tag(rq)) { 1212 /* 1213 * The initial allocation attempt failed, so we need to 1214 * rerun the hardware queue when a tag is freed. The 1215 * waitqueue takes care of that. If the queue is run 1216 * before we add this entry back on the dispatch list, 1217 * we'll re-run it below. 1218 */ 1219 if (!blk_mq_mark_tag_wait(hctx, rq)) { 1220 blk_mq_put_dispatch_budget(hctx); 1221 /* 1222 * For non-shared tags, the RESTART check 1223 * will suffice. 1224 */ 1225 if (hctx->flags & BLK_MQ_F_TAG_SHARED) 1226 no_tag = true; 1227 break; 1228 } 1229 } 1230 1231 list_del_init(&rq->queuelist); 1232 1233 bd.rq = rq; 1234 1235 /* 1236 * Flag last if we have no more requests, or if we have more 1237 * but can't assign a driver tag to it. 1238 */ 1239 if (list_empty(list)) 1240 bd.last = true; 1241 else { 1242 nxt = list_first_entry(list, struct request, queuelist); 1243 bd.last = !blk_mq_get_driver_tag(nxt); 1244 } 1245 1246 ret = q->mq_ops->queue_rq(hctx, &bd); 1247 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) { 1248 /* 1249 * If an I/O scheduler has been configured and we got a 1250 * driver tag for the next request already, free it 1251 * again. 1252 */ 1253 if (!list_empty(list)) { 1254 nxt = list_first_entry(list, struct request, queuelist); 1255 blk_mq_put_driver_tag(nxt); 1256 } 1257 list_add(&rq->queuelist, list); 1258 __blk_mq_requeue_request(rq); 1259 break; 1260 } 1261 1262 if (unlikely(ret != BLK_STS_OK)) { 1263 errors++; 1264 blk_mq_end_request(rq, BLK_STS_IOERR); 1265 continue; 1266 } 1267 1268 queued++; 1269 } while (!list_empty(list)); 1270 1271 hctx->dispatched[queued_to_index(queued)]++; 1272 1273 /* 1274 * Any items that need requeuing? Stuff them into hctx->dispatch, 1275 * that is where we will continue on next queue run. 1276 */ 1277 if (!list_empty(list)) { 1278 bool needs_restart; 1279 1280 /* 1281 * If we didn't flush the entire list, we could have told 1282 * the driver there was more coming, but that turned out to 1283 * be a lie. 1284 */ 1285 if (q->mq_ops->commit_rqs) 1286 q->mq_ops->commit_rqs(hctx); 1287 1288 spin_lock(&hctx->lock); 1289 list_splice_init(list, &hctx->dispatch); 1290 spin_unlock(&hctx->lock); 1291 1292 /* 1293 * If SCHED_RESTART was set by the caller of this function and 1294 * it is no longer set that means that it was cleared by another 1295 * thread and hence that a queue rerun is needed. 1296 * 1297 * If 'no_tag' is set, that means that we failed getting 1298 * a driver tag with an I/O scheduler attached. If our dispatch 1299 * waitqueue is no longer active, ensure that we run the queue 1300 * AFTER adding our entries back to the list. 1301 * 1302 * If no I/O scheduler has been configured it is possible that 1303 * the hardware queue got stopped and restarted before requests 1304 * were pushed back onto the dispatch list. Rerun the queue to 1305 * avoid starvation. Notes: 1306 * - blk_mq_run_hw_queue() checks whether or not a queue has 1307 * been stopped before rerunning a queue. 1308 * - Some but not all block drivers stop a queue before 1309 * returning BLK_STS_RESOURCE. Two exceptions are scsi-mq 1310 * and dm-rq. 1311 * 1312 * If driver returns BLK_STS_RESOURCE and SCHED_RESTART 1313 * bit is set, run queue after a delay to avoid IO stalls 1314 * that could otherwise occur if the queue is idle. 1315 */ 1316 needs_restart = blk_mq_sched_needs_restart(hctx); 1317 if (!needs_restart || 1318 (no_tag && list_empty_careful(&hctx->dispatch_wait.entry))) 1319 blk_mq_run_hw_queue(hctx, true); 1320 else if (needs_restart && (ret == BLK_STS_RESOURCE)) 1321 blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY); 1322 1323 blk_mq_update_dispatch_busy(hctx, true); 1324 return false; 1325 } else 1326 blk_mq_update_dispatch_busy(hctx, false); 1327 1328 /* 1329 * If the host/device is unable to accept more work, inform the 1330 * caller of that. 1331 */ 1332 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) 1333 return false; 1334 1335 return (queued + errors) != 0; 1336 } 1337 1338 /** 1339 * __blk_mq_run_hw_queue - Run a hardware queue. 1340 * @hctx: Pointer to the hardware queue to run. 1341 * 1342 * Send pending requests to the hardware. 1343 */ 1344 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) 1345 { 1346 int srcu_idx; 1347 1348 /* 1349 * We should be running this queue from one of the CPUs that 1350 * are mapped to it. 1351 * 1352 * There are at least two related races now between setting 1353 * hctx->next_cpu from blk_mq_hctx_next_cpu() and running 1354 * __blk_mq_run_hw_queue(): 1355 * 1356 * - hctx->next_cpu is found offline in blk_mq_hctx_next_cpu(), 1357 * but later it becomes online, then this warning is harmless 1358 * at all 1359 * 1360 * - hctx->next_cpu is found online in blk_mq_hctx_next_cpu(), 1361 * but later it becomes offline, then the warning can't be 1362 * triggered, and we depend on blk-mq timeout handler to 1363 * handle dispatched requests to this hctx 1364 */ 1365 if (!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) && 1366 cpu_online(hctx->next_cpu)) { 1367 printk(KERN_WARNING "run queue from wrong CPU %d, hctx %s\n", 1368 raw_smp_processor_id(), 1369 cpumask_empty(hctx->cpumask) ? "inactive": "active"); 1370 dump_stack(); 1371 } 1372 1373 /* 1374 * We can't run the queue inline with ints disabled. Ensure that 1375 * we catch bad users of this early. 1376 */ 1377 WARN_ON_ONCE(in_interrupt()); 1378 1379 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); 1380 1381 hctx_lock(hctx, &srcu_idx); 1382 blk_mq_sched_dispatch_requests(hctx); 1383 hctx_unlock(hctx, srcu_idx); 1384 } 1385 1386 static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx) 1387 { 1388 int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask); 1389 1390 if (cpu >= nr_cpu_ids) 1391 cpu = cpumask_first(hctx->cpumask); 1392 return cpu; 1393 } 1394 1395 /* 1396 * It'd be great if the workqueue API had a way to pass 1397 * in a mask and had some smarts for more clever placement. 1398 * For now we just round-robin here, switching for every 1399 * BLK_MQ_CPU_WORK_BATCH queued items. 1400 */ 1401 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) 1402 { 1403 bool tried = false; 1404 int next_cpu = hctx->next_cpu; 1405 1406 if (hctx->queue->nr_hw_queues == 1) 1407 return WORK_CPU_UNBOUND; 1408 1409 if (--hctx->next_cpu_batch <= 0) { 1410 select_cpu: 1411 next_cpu = cpumask_next_and(next_cpu, hctx->cpumask, 1412 cpu_online_mask); 1413 if (next_cpu >= nr_cpu_ids) 1414 next_cpu = blk_mq_first_mapped_cpu(hctx); 1415 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; 1416 } 1417 1418 /* 1419 * Do unbound schedule if we can't find a online CPU for this hctx, 1420 * and it should only happen in the path of handling CPU DEAD. 1421 */ 1422 if (!cpu_online(next_cpu)) { 1423 if (!tried) { 1424 tried = true; 1425 goto select_cpu; 1426 } 1427 1428 /* 1429 * Make sure to re-select CPU next time once after CPUs 1430 * in hctx->cpumask become online again. 1431 */ 1432 hctx->next_cpu = next_cpu; 1433 hctx->next_cpu_batch = 1; 1434 return WORK_CPU_UNBOUND; 1435 } 1436 1437 hctx->next_cpu = next_cpu; 1438 return next_cpu; 1439 } 1440 1441 /** 1442 * __blk_mq_delay_run_hw_queue - Run (or schedule to run) a hardware queue. 1443 * @hctx: Pointer to the hardware queue to run. 1444 * @async: If we want to run the queue asynchronously. 1445 * @msecs: Microseconds of delay to wait before running the queue. 1446 * 1447 * If !@async, try to run the queue now. Else, run the queue asynchronously and 1448 * with a delay of @msecs. 1449 */ 1450 static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, 1451 unsigned long msecs) 1452 { 1453 if (unlikely(blk_mq_hctx_stopped(hctx))) 1454 return; 1455 1456 if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) { 1457 int cpu = get_cpu(); 1458 if (cpumask_test_cpu(cpu, hctx->cpumask)) { 1459 __blk_mq_run_hw_queue(hctx); 1460 put_cpu(); 1461 return; 1462 } 1463 1464 put_cpu(); 1465 } 1466 1467 kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work, 1468 msecs_to_jiffies(msecs)); 1469 } 1470 1471 /** 1472 * blk_mq_delay_run_hw_queue - Run a hardware queue asynchronously. 1473 * @hctx: Pointer to the hardware queue to run. 1474 * @msecs: Microseconds of delay to wait before running the queue. 1475 * 1476 * Run a hardware queue asynchronously with a delay of @msecs. 1477 */ 1478 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) 1479 { 1480 __blk_mq_delay_run_hw_queue(hctx, true, msecs); 1481 } 1482 EXPORT_SYMBOL(blk_mq_delay_run_hw_queue); 1483 1484 /** 1485 * blk_mq_run_hw_queue - Start to run a hardware queue. 1486 * @hctx: Pointer to the hardware queue to run. 1487 * @async: If we want to run the queue asynchronously. 1488 * 1489 * Check if the request queue is not in a quiesced state and if there are 1490 * pending requests to be sent. If this is true, run the queue to send requests 1491 * to hardware. 1492 */ 1493 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) 1494 { 1495 int srcu_idx; 1496 bool need_run; 1497 1498 /* 1499 * When queue is quiesced, we may be switching io scheduler, or 1500 * updating nr_hw_queues, or other things, and we can't run queue 1501 * any more, even __blk_mq_hctx_has_pending() can't be called safely. 1502 * 1503 * And queue will be rerun in blk_mq_unquiesce_queue() if it is 1504 * quiesced. 1505 */ 1506 hctx_lock(hctx, &srcu_idx); 1507 need_run = !blk_queue_quiesced(hctx->queue) && 1508 blk_mq_hctx_has_pending(hctx); 1509 hctx_unlock(hctx, srcu_idx); 1510 1511 if (need_run) 1512 __blk_mq_delay_run_hw_queue(hctx, async, 0); 1513 } 1514 EXPORT_SYMBOL(blk_mq_run_hw_queue); 1515 1516 /** 1517 * blk_mq_run_hw_queue - Run all hardware queues in a request queue. 1518 * @q: Pointer to the request queue to run. 1519 * @async: If we want to run the queue asynchronously. 1520 */ 1521 void blk_mq_run_hw_queues(struct request_queue *q, bool async) 1522 { 1523 struct blk_mq_hw_ctx *hctx; 1524 int i; 1525 1526 queue_for_each_hw_ctx(q, hctx, i) { 1527 if (blk_mq_hctx_stopped(hctx)) 1528 continue; 1529 1530 blk_mq_run_hw_queue(hctx, async); 1531 } 1532 } 1533 EXPORT_SYMBOL(blk_mq_run_hw_queues); 1534 1535 /** 1536 * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped 1537 * @q: request queue. 1538 * 1539 * The caller is responsible for serializing this function against 1540 * blk_mq_{start,stop}_hw_queue(). 1541 */ 1542 bool blk_mq_queue_stopped(struct request_queue *q) 1543 { 1544 struct blk_mq_hw_ctx *hctx; 1545 int i; 1546 1547 queue_for_each_hw_ctx(q, hctx, i) 1548 if (blk_mq_hctx_stopped(hctx)) 1549 return true; 1550 1551 return false; 1552 } 1553 EXPORT_SYMBOL(blk_mq_queue_stopped); 1554 1555 /* 1556 * This function is often used for pausing .queue_rq() by driver when 1557 * there isn't enough resource or some conditions aren't satisfied, and 1558 * BLK_STS_RESOURCE is usually returned. 1559 * 1560 * We do not guarantee that dispatch can be drained or blocked 1561 * after blk_mq_stop_hw_queue() returns. Please use 1562 * blk_mq_quiesce_queue() for that requirement. 1563 */ 1564 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) 1565 { 1566 cancel_delayed_work(&hctx->run_work); 1567 1568 set_bit(BLK_MQ_S_STOPPED, &hctx->state); 1569 } 1570 EXPORT_SYMBOL(blk_mq_stop_hw_queue); 1571 1572 /* 1573 * This function is often used for pausing .queue_rq() by driver when 1574 * there isn't enough resource or some conditions aren't satisfied, and 1575 * BLK_STS_RESOURCE is usually returned. 1576 * 1577 * We do not guarantee that dispatch can be drained or blocked 1578 * after blk_mq_stop_hw_queues() returns. Please use 1579 * blk_mq_quiesce_queue() for that requirement. 1580 */ 1581 void blk_mq_stop_hw_queues(struct request_queue *q) 1582 { 1583 struct blk_mq_hw_ctx *hctx; 1584 int i; 1585 1586 queue_for_each_hw_ctx(q, hctx, i) 1587 blk_mq_stop_hw_queue(hctx); 1588 } 1589 EXPORT_SYMBOL(blk_mq_stop_hw_queues); 1590 1591 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx) 1592 { 1593 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 1594 1595 blk_mq_run_hw_queue(hctx, false); 1596 } 1597 EXPORT_SYMBOL(blk_mq_start_hw_queue); 1598 1599 void blk_mq_start_hw_queues(struct request_queue *q) 1600 { 1601 struct blk_mq_hw_ctx *hctx; 1602 int i; 1603 1604 queue_for_each_hw_ctx(q, hctx, i) 1605 blk_mq_start_hw_queue(hctx); 1606 } 1607 EXPORT_SYMBOL(blk_mq_start_hw_queues); 1608 1609 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) 1610 { 1611 if (!blk_mq_hctx_stopped(hctx)) 1612 return; 1613 1614 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 1615 blk_mq_run_hw_queue(hctx, async); 1616 } 1617 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue); 1618 1619 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async) 1620 { 1621 struct blk_mq_hw_ctx *hctx; 1622 int i; 1623 1624 queue_for_each_hw_ctx(q, hctx, i) 1625 blk_mq_start_stopped_hw_queue(hctx, async); 1626 } 1627 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues); 1628 1629 static void blk_mq_run_work_fn(struct work_struct *work) 1630 { 1631 struct blk_mq_hw_ctx *hctx; 1632 1633 hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work); 1634 1635 /* 1636 * If we are stopped, don't run the queue. 1637 */ 1638 if (test_bit(BLK_MQ_S_STOPPED, &hctx->state)) 1639 return; 1640 1641 __blk_mq_run_hw_queue(hctx); 1642 } 1643 1644 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx, 1645 struct request *rq, 1646 bool at_head) 1647 { 1648 struct blk_mq_ctx *ctx = rq->mq_ctx; 1649 enum hctx_type type = hctx->type; 1650 1651 lockdep_assert_held(&ctx->lock); 1652 1653 trace_block_rq_insert(hctx->queue, rq); 1654 1655 if (at_head) 1656 list_add(&rq->queuelist, &ctx->rq_lists[type]); 1657 else 1658 list_add_tail(&rq->queuelist, &ctx->rq_lists[type]); 1659 } 1660 1661 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, 1662 bool at_head) 1663 { 1664 struct blk_mq_ctx *ctx = rq->mq_ctx; 1665 1666 lockdep_assert_held(&ctx->lock); 1667 1668 __blk_mq_insert_req_list(hctx, rq, at_head); 1669 blk_mq_hctx_mark_pending(hctx, ctx); 1670 } 1671 1672 /** 1673 * blk_mq_request_bypass_insert - Insert a request at dispatch list. 1674 * @rq: Pointer to request to be inserted. 1675 * @run_queue: If we should run the hardware queue after inserting the request. 1676 * 1677 * Should only be used carefully, when the caller knows we want to 1678 * bypass a potential IO scheduler on the target device. 1679 */ 1680 void blk_mq_request_bypass_insert(struct request *rq, bool run_queue) 1681 { 1682 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 1683 1684 spin_lock(&hctx->lock); 1685 list_add_tail(&rq->queuelist, &hctx->dispatch); 1686 spin_unlock(&hctx->lock); 1687 1688 if (run_queue) 1689 blk_mq_run_hw_queue(hctx, false); 1690 } 1691 1692 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, 1693 struct list_head *list) 1694 1695 { 1696 struct request *rq; 1697 enum hctx_type type = hctx->type; 1698 1699 /* 1700 * preemption doesn't flush plug list, so it's possible ctx->cpu is 1701 * offline now 1702 */ 1703 list_for_each_entry(rq, list, queuelist) { 1704 BUG_ON(rq->mq_ctx != ctx); 1705 trace_block_rq_insert(hctx->queue, rq); 1706 } 1707 1708 spin_lock(&ctx->lock); 1709 list_splice_tail_init(list, &ctx->rq_lists[type]); 1710 blk_mq_hctx_mark_pending(hctx, ctx); 1711 spin_unlock(&ctx->lock); 1712 } 1713 1714 static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b) 1715 { 1716 struct request *rqa = container_of(a, struct request, queuelist); 1717 struct request *rqb = container_of(b, struct request, queuelist); 1718 1719 if (rqa->mq_ctx != rqb->mq_ctx) 1720 return rqa->mq_ctx > rqb->mq_ctx; 1721 if (rqa->mq_hctx != rqb->mq_hctx) 1722 return rqa->mq_hctx > rqb->mq_hctx; 1723 1724 return blk_rq_pos(rqa) > blk_rq_pos(rqb); 1725 } 1726 1727 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) 1728 { 1729 LIST_HEAD(list); 1730 1731 if (list_empty(&plug->mq_list)) 1732 return; 1733 list_splice_init(&plug->mq_list, &list); 1734 1735 if (plug->rq_count > 2 && plug->multiple_queues) 1736 list_sort(NULL, &list, plug_rq_cmp); 1737 1738 plug->rq_count = 0; 1739 1740 do { 1741 struct list_head rq_list; 1742 struct request *rq, *head_rq = list_entry_rq(list.next); 1743 struct list_head *pos = &head_rq->queuelist; /* skip first */ 1744 struct blk_mq_hw_ctx *this_hctx = head_rq->mq_hctx; 1745 struct blk_mq_ctx *this_ctx = head_rq->mq_ctx; 1746 unsigned int depth = 1; 1747 1748 list_for_each_continue(pos, &list) { 1749 rq = list_entry_rq(pos); 1750 BUG_ON(!rq->q); 1751 if (rq->mq_hctx != this_hctx || rq->mq_ctx != this_ctx) 1752 break; 1753 depth++; 1754 } 1755 1756 list_cut_before(&rq_list, &list, pos); 1757 trace_block_unplug(head_rq->q, depth, !from_schedule); 1758 blk_mq_sched_insert_requests(this_hctx, this_ctx, &rq_list, 1759 from_schedule); 1760 } while(!list_empty(&list)); 1761 } 1762 1763 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio, 1764 unsigned int nr_segs) 1765 { 1766 if (bio->bi_opf & REQ_RAHEAD) 1767 rq->cmd_flags |= REQ_FAILFAST_MASK; 1768 1769 rq->__sector = bio->bi_iter.bi_sector; 1770 rq->write_hint = bio->bi_write_hint; 1771 blk_rq_bio_prep(rq, bio, nr_segs); 1772 1773 blk_account_io_start(rq, true); 1774 } 1775 1776 static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, 1777 struct request *rq, 1778 blk_qc_t *cookie, bool last) 1779 { 1780 struct request_queue *q = rq->q; 1781 struct blk_mq_queue_data bd = { 1782 .rq = rq, 1783 .last = last, 1784 }; 1785 blk_qc_t new_cookie; 1786 blk_status_t ret; 1787 1788 new_cookie = request_to_qc_t(hctx, rq); 1789 1790 /* 1791 * For OK queue, we are done. For error, caller may kill it. 1792 * Any other error (busy), just add it to our list as we 1793 * previously would have done. 1794 */ 1795 ret = q->mq_ops->queue_rq(hctx, &bd); 1796 switch (ret) { 1797 case BLK_STS_OK: 1798 blk_mq_update_dispatch_busy(hctx, false); 1799 *cookie = new_cookie; 1800 break; 1801 case BLK_STS_RESOURCE: 1802 case BLK_STS_DEV_RESOURCE: 1803 blk_mq_update_dispatch_busy(hctx, true); 1804 __blk_mq_requeue_request(rq); 1805 break; 1806 default: 1807 blk_mq_update_dispatch_busy(hctx, false); 1808 *cookie = BLK_QC_T_NONE; 1809 break; 1810 } 1811 1812 return ret; 1813 } 1814 1815 static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, 1816 struct request *rq, 1817 blk_qc_t *cookie, 1818 bool bypass_insert, bool last) 1819 { 1820 struct request_queue *q = rq->q; 1821 bool run_queue = true; 1822 1823 /* 1824 * RCU or SRCU read lock is needed before checking quiesced flag. 1825 * 1826 * When queue is stopped or quiesced, ignore 'bypass_insert' from 1827 * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller, 1828 * and avoid driver to try to dispatch again. 1829 */ 1830 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) { 1831 run_queue = false; 1832 bypass_insert = false; 1833 goto insert; 1834 } 1835 1836 if (q->elevator && !bypass_insert) 1837 goto insert; 1838 1839 if (!blk_mq_get_dispatch_budget(hctx)) 1840 goto insert; 1841 1842 if (!blk_mq_get_driver_tag(rq)) { 1843 blk_mq_put_dispatch_budget(hctx); 1844 goto insert; 1845 } 1846 1847 return __blk_mq_issue_directly(hctx, rq, cookie, last); 1848 insert: 1849 if (bypass_insert) 1850 return BLK_STS_RESOURCE; 1851 1852 blk_mq_request_bypass_insert(rq, run_queue); 1853 return BLK_STS_OK; 1854 } 1855 1856 /** 1857 * blk_mq_try_issue_directly - Try to send a request directly to device driver. 1858 * @hctx: Pointer of the associated hardware queue. 1859 * @rq: Pointer to request to be sent. 1860 * @cookie: Request queue cookie. 1861 * 1862 * If the device has enough resources to accept a new request now, send the 1863 * request directly to device driver. Else, insert at hctx->dispatch queue, so 1864 * we can try send it another time in the future. Requests inserted at this 1865 * queue have higher priority. 1866 */ 1867 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, 1868 struct request *rq, blk_qc_t *cookie) 1869 { 1870 blk_status_t ret; 1871 int srcu_idx; 1872 1873 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); 1874 1875 hctx_lock(hctx, &srcu_idx); 1876 1877 ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true); 1878 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) 1879 blk_mq_request_bypass_insert(rq, true); 1880 else if (ret != BLK_STS_OK) 1881 blk_mq_end_request(rq, ret); 1882 1883 hctx_unlock(hctx, srcu_idx); 1884 } 1885 1886 blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last) 1887 { 1888 blk_status_t ret; 1889 int srcu_idx; 1890 blk_qc_t unused_cookie; 1891 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 1892 1893 hctx_lock(hctx, &srcu_idx); 1894 ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last); 1895 hctx_unlock(hctx, srcu_idx); 1896 1897 return ret; 1898 } 1899 1900 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, 1901 struct list_head *list) 1902 { 1903 while (!list_empty(list)) { 1904 blk_status_t ret; 1905 struct request *rq = list_first_entry(list, struct request, 1906 queuelist); 1907 1908 list_del_init(&rq->queuelist); 1909 ret = blk_mq_request_issue_directly(rq, list_empty(list)); 1910 if (ret != BLK_STS_OK) { 1911 if (ret == BLK_STS_RESOURCE || 1912 ret == BLK_STS_DEV_RESOURCE) { 1913 blk_mq_request_bypass_insert(rq, 1914 list_empty(list)); 1915 break; 1916 } 1917 blk_mq_end_request(rq, ret); 1918 } 1919 } 1920 1921 /* 1922 * If we didn't flush the entire list, we could have told 1923 * the driver there was more coming, but that turned out to 1924 * be a lie. 1925 */ 1926 if (!list_empty(list) && hctx->queue->mq_ops->commit_rqs) 1927 hctx->queue->mq_ops->commit_rqs(hctx); 1928 } 1929 1930 static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) 1931 { 1932 list_add_tail(&rq->queuelist, &plug->mq_list); 1933 plug->rq_count++; 1934 if (!plug->multiple_queues && !list_is_singular(&plug->mq_list)) { 1935 struct request *tmp; 1936 1937 tmp = list_first_entry(&plug->mq_list, struct request, 1938 queuelist); 1939 if (tmp->q != rq->q) 1940 plug->multiple_queues = true; 1941 } 1942 } 1943 1944 /** 1945 * blk_mq_make_request - Create and send a request to block device. 1946 * @q: Request queue pointer. 1947 * @bio: Bio pointer. 1948 * 1949 * Builds up a request structure from @q and @bio and send to the device. The 1950 * request may not be queued directly to hardware if: 1951 * * This request can be merged with another one 1952 * * We want to place request at plug queue for possible future merging 1953 * * There is an IO scheduler active at this queue 1954 * 1955 * It will not queue the request if there is an error with the bio, or at the 1956 * request creation. 1957 * 1958 * Returns: Request queue cookie. 1959 */ 1960 static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) 1961 { 1962 const int is_sync = op_is_sync(bio->bi_opf); 1963 const int is_flush_fua = op_is_flush(bio->bi_opf); 1964 struct blk_mq_alloc_data data = { .flags = 0}; 1965 struct request *rq; 1966 struct blk_plug *plug; 1967 struct request *same_queue_rq = NULL; 1968 unsigned int nr_segs; 1969 blk_qc_t cookie; 1970 1971 blk_queue_bounce(q, &bio); 1972 __blk_queue_split(q, &bio, &nr_segs); 1973 1974 if (!bio_integrity_prep(bio)) 1975 return BLK_QC_T_NONE; 1976 1977 if (!is_flush_fua && !blk_queue_nomerges(q) && 1978 blk_attempt_plug_merge(q, bio, nr_segs, &same_queue_rq)) 1979 return BLK_QC_T_NONE; 1980 1981 if (blk_mq_sched_bio_merge(q, bio, nr_segs)) 1982 return BLK_QC_T_NONE; 1983 1984 rq_qos_throttle(q, bio); 1985 1986 data.cmd_flags = bio->bi_opf; 1987 rq = blk_mq_get_request(q, bio, &data); 1988 if (unlikely(!rq)) { 1989 rq_qos_cleanup(q, bio); 1990 if (bio->bi_opf & REQ_NOWAIT) 1991 bio_wouldblock_error(bio); 1992 return BLK_QC_T_NONE; 1993 } 1994 1995 trace_block_getrq(q, bio, bio->bi_opf); 1996 1997 rq_qos_track(q, rq, bio); 1998 1999 cookie = request_to_qc_t(data.hctx, rq); 2000 2001 blk_mq_bio_to_request(rq, bio, nr_segs); 2002 2003 plug = blk_mq_plug(q, bio); 2004 if (unlikely(is_flush_fua)) { 2005 /* Bypass scheduler for flush requests */ 2006 blk_insert_flush(rq); 2007 blk_mq_run_hw_queue(data.hctx, true); 2008 } else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs || 2009 !blk_queue_nonrot(q))) { 2010 /* 2011 * Use plugging if we have a ->commit_rqs() hook as well, as 2012 * we know the driver uses bd->last in a smart fashion. 2013 * 2014 * Use normal plugging if this disk is slow HDD, as sequential 2015 * IO may benefit a lot from plug merging. 2016 */ 2017 unsigned int request_count = plug->rq_count; 2018 struct request *last = NULL; 2019 2020 if (!request_count) 2021 trace_block_plug(q); 2022 else 2023 last = list_entry_rq(plug->mq_list.prev); 2024 2025 if (request_count >= BLK_MAX_REQUEST_COUNT || (last && 2026 blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) { 2027 blk_flush_plug_list(plug, false); 2028 trace_block_plug(q); 2029 } 2030 2031 blk_add_rq_to_plug(plug, rq); 2032 } else if (q->elevator) { 2033 /* Insert the request at the IO scheduler queue */ 2034 blk_mq_sched_insert_request(rq, false, true, true); 2035 } else if (plug && !blk_queue_nomerges(q)) { 2036 /* 2037 * We do limited plugging. If the bio can be merged, do that. 2038 * Otherwise the existing request in the plug list will be 2039 * issued. So the plug list will have one request at most 2040 * The plug list might get flushed before this. If that happens, 2041 * the plug list is empty, and same_queue_rq is invalid. 2042 */ 2043 if (list_empty(&plug->mq_list)) 2044 same_queue_rq = NULL; 2045 if (same_queue_rq) { 2046 list_del_init(&same_queue_rq->queuelist); 2047 plug->rq_count--; 2048 } 2049 blk_add_rq_to_plug(plug, rq); 2050 trace_block_plug(q); 2051 2052 if (same_queue_rq) { 2053 data.hctx = same_queue_rq->mq_hctx; 2054 trace_block_unplug(q, 1, true); 2055 blk_mq_try_issue_directly(data.hctx, same_queue_rq, 2056 &cookie); 2057 } 2058 } else if ((q->nr_hw_queues > 1 && is_sync) || 2059 !data.hctx->dispatch_busy) { 2060 /* 2061 * There is no scheduler and we can try to send directly 2062 * to the hardware. 2063 */ 2064 blk_mq_try_issue_directly(data.hctx, rq, &cookie); 2065 } else { 2066 /* Default case. */ 2067 blk_mq_sched_insert_request(rq, false, true, true); 2068 } 2069 2070 return cookie; 2071 } 2072 2073 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, 2074 unsigned int hctx_idx) 2075 { 2076 struct page *page; 2077 2078 if (tags->rqs && set->ops->exit_request) { 2079 int i; 2080 2081 for (i = 0; i < tags->nr_tags; i++) { 2082 struct request *rq = tags->static_rqs[i]; 2083 2084 if (!rq) 2085 continue; 2086 set->ops->exit_request(set, rq, hctx_idx); 2087 tags->static_rqs[i] = NULL; 2088 } 2089 } 2090 2091 while (!list_empty(&tags->page_list)) { 2092 page = list_first_entry(&tags->page_list, struct page, lru); 2093 list_del_init(&page->lru); 2094 /* 2095 * Remove kmemleak object previously allocated in 2096 * blk_mq_alloc_rqs(). 2097 */ 2098 kmemleak_free(page_address(page)); 2099 __free_pages(page, page->private); 2100 } 2101 } 2102 2103 void blk_mq_free_rq_map(struct blk_mq_tags *tags) 2104 { 2105 kfree(tags->rqs); 2106 tags->rqs = NULL; 2107 kfree(tags->static_rqs); 2108 tags->static_rqs = NULL; 2109 2110 blk_mq_free_tags(tags); 2111 } 2112 2113 struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, 2114 unsigned int hctx_idx, 2115 unsigned int nr_tags, 2116 unsigned int reserved_tags) 2117 { 2118 struct blk_mq_tags *tags; 2119 int node; 2120 2121 node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx); 2122 if (node == NUMA_NO_NODE) 2123 node = set->numa_node; 2124 2125 tags = blk_mq_init_tags(nr_tags, reserved_tags, node, 2126 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags)); 2127 if (!tags) 2128 return NULL; 2129 2130 tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *), 2131 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, 2132 node); 2133 if (!tags->rqs) { 2134 blk_mq_free_tags(tags); 2135 return NULL; 2136 } 2137 2138 tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *), 2139 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, 2140 node); 2141 if (!tags->static_rqs) { 2142 kfree(tags->rqs); 2143 blk_mq_free_tags(tags); 2144 return NULL; 2145 } 2146 2147 return tags; 2148 } 2149 2150 static size_t order_to_size(unsigned int order) 2151 { 2152 return (size_t)PAGE_SIZE << order; 2153 } 2154 2155 static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, 2156 unsigned int hctx_idx, int node) 2157 { 2158 int ret; 2159 2160 if (set->ops->init_request) { 2161 ret = set->ops->init_request(set, rq, hctx_idx, node); 2162 if (ret) 2163 return ret; 2164 } 2165 2166 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 2167 return 0; 2168 } 2169 2170 int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, 2171 unsigned int hctx_idx, unsigned int depth) 2172 { 2173 unsigned int i, j, entries_per_page, max_order = 4; 2174 size_t rq_size, left; 2175 int node; 2176 2177 node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx); 2178 if (node == NUMA_NO_NODE) 2179 node = set->numa_node; 2180 2181 INIT_LIST_HEAD(&tags->page_list); 2182 2183 /* 2184 * rq_size is the size of the request plus driver payload, rounded 2185 * to the cacheline size 2186 */ 2187 rq_size = round_up(sizeof(struct request) + set->cmd_size, 2188 cache_line_size()); 2189 left = rq_size * depth; 2190 2191 for (i = 0; i < depth; ) { 2192 int this_order = max_order; 2193 struct page *page; 2194 int to_do; 2195 void *p; 2196 2197 while (this_order && left < order_to_size(this_order - 1)) 2198 this_order--; 2199 2200 do { 2201 page = alloc_pages_node(node, 2202 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO, 2203 this_order); 2204 if (page) 2205 break; 2206 if (!this_order--) 2207 break; 2208 if (order_to_size(this_order) < rq_size) 2209 break; 2210 } while (1); 2211 2212 if (!page) 2213 goto fail; 2214 2215 page->private = this_order; 2216 list_add_tail(&page->lru, &tags->page_list); 2217 2218 p = page_address(page); 2219 /* 2220 * Allow kmemleak to scan these pages as they contain pointers 2221 * to additional allocations like via ops->init_request(). 2222 */ 2223 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO); 2224 entries_per_page = order_to_size(this_order) / rq_size; 2225 to_do = min(entries_per_page, depth - i); 2226 left -= to_do * rq_size; 2227 for (j = 0; j < to_do; j++) { 2228 struct request *rq = p; 2229 2230 tags->static_rqs[i] = rq; 2231 if (blk_mq_init_request(set, rq, hctx_idx, node)) { 2232 tags->static_rqs[i] = NULL; 2233 goto fail; 2234 } 2235 2236 p += rq_size; 2237 i++; 2238 } 2239 } 2240 return 0; 2241 2242 fail: 2243 blk_mq_free_rqs(set, tags, hctx_idx); 2244 return -ENOMEM; 2245 } 2246 2247 /* 2248 * 'cpu' is going away. splice any existing rq_list entries from this 2249 * software queue to the hw queue dispatch list, and ensure that it 2250 * gets run. 2251 */ 2252 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node) 2253 { 2254 struct blk_mq_hw_ctx *hctx; 2255 struct blk_mq_ctx *ctx; 2256 LIST_HEAD(tmp); 2257 enum hctx_type type; 2258 2259 hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead); 2260 ctx = __blk_mq_get_ctx(hctx->queue, cpu); 2261 type = hctx->type; 2262 2263 spin_lock(&ctx->lock); 2264 if (!list_empty(&ctx->rq_lists[type])) { 2265 list_splice_init(&ctx->rq_lists[type], &tmp); 2266 blk_mq_hctx_clear_pending(hctx, ctx); 2267 } 2268 spin_unlock(&ctx->lock); 2269 2270 if (list_empty(&tmp)) 2271 return 0; 2272 2273 spin_lock(&hctx->lock); 2274 list_splice_tail_init(&tmp, &hctx->dispatch); 2275 spin_unlock(&hctx->lock); 2276 2277 blk_mq_run_hw_queue(hctx, true); 2278 return 0; 2279 } 2280 2281 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx) 2282 { 2283 cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD, 2284 &hctx->cpuhp_dead); 2285 } 2286 2287 /* hctx->ctxs will be freed in queue's release handler */ 2288 static void blk_mq_exit_hctx(struct request_queue *q, 2289 struct blk_mq_tag_set *set, 2290 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) 2291 { 2292 if (blk_mq_hw_queue_mapped(hctx)) 2293 blk_mq_tag_idle(hctx); 2294 2295 if (set->ops->exit_request) 2296 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx); 2297 2298 if (set->ops->exit_hctx) 2299 set->ops->exit_hctx(hctx, hctx_idx); 2300 2301 blk_mq_remove_cpuhp(hctx); 2302 2303 spin_lock(&q->unused_hctx_lock); 2304 list_add(&hctx->hctx_list, &q->unused_hctx_list); 2305 spin_unlock(&q->unused_hctx_lock); 2306 } 2307 2308 static void blk_mq_exit_hw_queues(struct request_queue *q, 2309 struct blk_mq_tag_set *set, int nr_queue) 2310 { 2311 struct blk_mq_hw_ctx *hctx; 2312 unsigned int i; 2313 2314 queue_for_each_hw_ctx(q, hctx, i) { 2315 if (i == nr_queue) 2316 break; 2317 blk_mq_debugfs_unregister_hctx(hctx); 2318 blk_mq_exit_hctx(q, set, hctx, i); 2319 } 2320 } 2321 2322 static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set) 2323 { 2324 int hw_ctx_size = sizeof(struct blk_mq_hw_ctx); 2325 2326 BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu), 2327 __alignof__(struct blk_mq_hw_ctx)) != 2328 sizeof(struct blk_mq_hw_ctx)); 2329 2330 if (tag_set->flags & BLK_MQ_F_BLOCKING) 2331 hw_ctx_size += sizeof(struct srcu_struct); 2332 2333 return hw_ctx_size; 2334 } 2335 2336 static int blk_mq_init_hctx(struct request_queue *q, 2337 struct blk_mq_tag_set *set, 2338 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) 2339 { 2340 hctx->queue_num = hctx_idx; 2341 2342 cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead); 2343 2344 hctx->tags = set->tags[hctx_idx]; 2345 2346 if (set->ops->init_hctx && 2347 set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) 2348 goto unregister_cpu_notifier; 2349 2350 if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, 2351 hctx->numa_node)) 2352 goto exit_hctx; 2353 return 0; 2354 2355 exit_hctx: 2356 if (set->ops->exit_hctx) 2357 set->ops->exit_hctx(hctx, hctx_idx); 2358 unregister_cpu_notifier: 2359 blk_mq_remove_cpuhp(hctx); 2360 return -1; 2361 } 2362 2363 static struct blk_mq_hw_ctx * 2364 blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set, 2365 int node) 2366 { 2367 struct blk_mq_hw_ctx *hctx; 2368 gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY; 2369 2370 hctx = kzalloc_node(blk_mq_hw_ctx_size(set), gfp, node); 2371 if (!hctx) 2372 goto fail_alloc_hctx; 2373 2374 if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node)) 2375 goto free_hctx; 2376 2377 atomic_set(&hctx->nr_active, 0); 2378 if (node == NUMA_NO_NODE) 2379 node = set->numa_node; 2380 hctx->numa_node = node; 2381 2382 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn); 2383 spin_lock_init(&hctx->lock); 2384 INIT_LIST_HEAD(&hctx->dispatch); 2385 hctx->queue = q; 2386 hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED; 2387 2388 INIT_LIST_HEAD(&hctx->hctx_list); 2389 2390 /* 2391 * Allocate space for all possible cpus to avoid allocation at 2392 * runtime 2393 */ 2394 hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *), 2395 gfp, node); 2396 if (!hctx->ctxs) 2397 goto free_cpumask; 2398 2399 if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), 2400 gfp, node)) 2401 goto free_ctxs; 2402 hctx->nr_ctx = 0; 2403 2404 spin_lock_init(&hctx->dispatch_wait_lock); 2405 init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake); 2406 INIT_LIST_HEAD(&hctx->dispatch_wait.entry); 2407 2408 hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size, 2409 gfp); 2410 if (!hctx->fq) 2411 goto free_bitmap; 2412 2413 if (hctx->flags & BLK_MQ_F_BLOCKING) 2414 init_srcu_struct(hctx->srcu); 2415 blk_mq_hctx_kobj_init(hctx); 2416 2417 return hctx; 2418 2419 free_bitmap: 2420 sbitmap_free(&hctx->ctx_map); 2421 free_ctxs: 2422 kfree(hctx->ctxs); 2423 free_cpumask: 2424 free_cpumask_var(hctx->cpumask); 2425 free_hctx: 2426 kfree(hctx); 2427 fail_alloc_hctx: 2428 return NULL; 2429 } 2430 2431 static void blk_mq_init_cpu_queues(struct request_queue *q, 2432 unsigned int nr_hw_queues) 2433 { 2434 struct blk_mq_tag_set *set = q->tag_set; 2435 unsigned int i, j; 2436 2437 for_each_possible_cpu(i) { 2438 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i); 2439 struct blk_mq_hw_ctx *hctx; 2440 int k; 2441 2442 __ctx->cpu = i; 2443 spin_lock_init(&__ctx->lock); 2444 for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++) 2445 INIT_LIST_HEAD(&__ctx->rq_lists[k]); 2446 2447 __ctx->queue = q; 2448 2449 /* 2450 * Set local node, IFF we have more than one hw queue. If 2451 * not, we remain on the home node of the device 2452 */ 2453 for (j = 0; j < set->nr_maps; j++) { 2454 hctx = blk_mq_map_queue_type(q, j, i); 2455 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE) 2456 hctx->numa_node = local_memory_node(cpu_to_node(i)); 2457 } 2458 } 2459 } 2460 2461 static bool __blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, int hctx_idx) 2462 { 2463 int ret = 0; 2464 2465 set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx, 2466 set->queue_depth, set->reserved_tags); 2467 if (!set->tags[hctx_idx]) 2468 return false; 2469 2470 ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx, 2471 set->queue_depth); 2472 if (!ret) 2473 return true; 2474 2475 blk_mq_free_rq_map(set->tags[hctx_idx]); 2476 set->tags[hctx_idx] = NULL; 2477 return false; 2478 } 2479 2480 static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set, 2481 unsigned int hctx_idx) 2482 { 2483 if (set->tags && set->tags[hctx_idx]) { 2484 blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx); 2485 blk_mq_free_rq_map(set->tags[hctx_idx]); 2486 set->tags[hctx_idx] = NULL; 2487 } 2488 } 2489 2490 static void blk_mq_map_swqueue(struct request_queue *q) 2491 { 2492 unsigned int i, j, hctx_idx; 2493 struct blk_mq_hw_ctx *hctx; 2494 struct blk_mq_ctx *ctx; 2495 struct blk_mq_tag_set *set = q->tag_set; 2496 2497 queue_for_each_hw_ctx(q, hctx, i) { 2498 cpumask_clear(hctx->cpumask); 2499 hctx->nr_ctx = 0; 2500 hctx->dispatch_from = NULL; 2501 } 2502 2503 /* 2504 * Map software to hardware queues. 2505 * 2506 * If the cpu isn't present, the cpu is mapped to first hctx. 2507 */ 2508 for_each_possible_cpu(i) { 2509 hctx_idx = set->map[HCTX_TYPE_DEFAULT].mq_map[i]; 2510 /* unmapped hw queue can be remapped after CPU topo changed */ 2511 if (!set->tags[hctx_idx] && 2512 !__blk_mq_alloc_rq_map(set, hctx_idx)) { 2513 /* 2514 * If tags initialization fail for some hctx, 2515 * that hctx won't be brought online. In this 2516 * case, remap the current ctx to hctx[0] which 2517 * is guaranteed to always have tags allocated 2518 */ 2519 set->map[HCTX_TYPE_DEFAULT].mq_map[i] = 0; 2520 } 2521 2522 ctx = per_cpu_ptr(q->queue_ctx, i); 2523 for (j = 0; j < set->nr_maps; j++) { 2524 if (!set->map[j].nr_queues) { 2525 ctx->hctxs[j] = blk_mq_map_queue_type(q, 2526 HCTX_TYPE_DEFAULT, i); 2527 continue; 2528 } 2529 2530 hctx = blk_mq_map_queue_type(q, j, i); 2531 ctx->hctxs[j] = hctx; 2532 /* 2533 * If the CPU is already set in the mask, then we've 2534 * mapped this one already. This can happen if 2535 * devices share queues across queue maps. 2536 */ 2537 if (cpumask_test_cpu(i, hctx->cpumask)) 2538 continue; 2539 2540 cpumask_set_cpu(i, hctx->cpumask); 2541 hctx->type = j; 2542 ctx->index_hw[hctx->type] = hctx->nr_ctx; 2543 hctx->ctxs[hctx->nr_ctx++] = ctx; 2544 2545 /* 2546 * If the nr_ctx type overflows, we have exceeded the 2547 * amount of sw queues we can support. 2548 */ 2549 BUG_ON(!hctx->nr_ctx); 2550 } 2551 2552 for (; j < HCTX_MAX_TYPES; j++) 2553 ctx->hctxs[j] = blk_mq_map_queue_type(q, 2554 HCTX_TYPE_DEFAULT, i); 2555 } 2556 2557 queue_for_each_hw_ctx(q, hctx, i) { 2558 /* 2559 * If no software queues are mapped to this hardware queue, 2560 * disable it and free the request entries. 2561 */ 2562 if (!hctx->nr_ctx) { 2563 /* Never unmap queue 0. We need it as a 2564 * fallback in case of a new remap fails 2565 * allocation 2566 */ 2567 if (i && set->tags[i]) 2568 blk_mq_free_map_and_requests(set, i); 2569 2570 hctx->tags = NULL; 2571 continue; 2572 } 2573 2574 hctx->tags = set->tags[i]; 2575 WARN_ON(!hctx->tags); 2576 2577 /* 2578 * Set the map size to the number of mapped software queues. 2579 * This is more accurate and more efficient than looping 2580 * over all possibly mapped software queues. 2581 */ 2582 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx); 2583 2584 /* 2585 * Initialize batch roundrobin counts 2586 */ 2587 hctx->next_cpu = blk_mq_first_mapped_cpu(hctx); 2588 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; 2589 } 2590 } 2591 2592 /* 2593 * Caller needs to ensure that we're either frozen/quiesced, or that 2594 * the queue isn't live yet. 2595 */ 2596 static void queue_set_hctx_shared(struct request_queue *q, bool shared) 2597 { 2598 struct blk_mq_hw_ctx *hctx; 2599 int i; 2600 2601 queue_for_each_hw_ctx(q, hctx, i) { 2602 if (shared) 2603 hctx->flags |= BLK_MQ_F_TAG_SHARED; 2604 else 2605 hctx->flags &= ~BLK_MQ_F_TAG_SHARED; 2606 } 2607 } 2608 2609 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, 2610 bool shared) 2611 { 2612 struct request_queue *q; 2613 2614 lockdep_assert_held(&set->tag_list_lock); 2615 2616 list_for_each_entry(q, &set->tag_list, tag_set_list) { 2617 blk_mq_freeze_queue(q); 2618 queue_set_hctx_shared(q, shared); 2619 blk_mq_unfreeze_queue(q); 2620 } 2621 } 2622 2623 static void blk_mq_del_queue_tag_set(struct request_queue *q) 2624 { 2625 struct blk_mq_tag_set *set = q->tag_set; 2626 2627 mutex_lock(&set->tag_list_lock); 2628 list_del_rcu(&q->tag_set_list); 2629 if (list_is_singular(&set->tag_list)) { 2630 /* just transitioned to unshared */ 2631 set->flags &= ~BLK_MQ_F_TAG_SHARED; 2632 /* update existing queue */ 2633 blk_mq_update_tag_set_depth(set, false); 2634 } 2635 mutex_unlock(&set->tag_list_lock); 2636 INIT_LIST_HEAD(&q->tag_set_list); 2637 } 2638 2639 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, 2640 struct request_queue *q) 2641 { 2642 mutex_lock(&set->tag_list_lock); 2643 2644 /* 2645 * Check to see if we're transitioning to shared (from 1 to 2 queues). 2646 */ 2647 if (!list_empty(&set->tag_list) && 2648 !(set->flags & BLK_MQ_F_TAG_SHARED)) { 2649 set->flags |= BLK_MQ_F_TAG_SHARED; 2650 /* update existing queue */ 2651 blk_mq_update_tag_set_depth(set, true); 2652 } 2653 if (set->flags & BLK_MQ_F_TAG_SHARED) 2654 queue_set_hctx_shared(q, true); 2655 list_add_tail_rcu(&q->tag_set_list, &set->tag_list); 2656 2657 mutex_unlock(&set->tag_list_lock); 2658 } 2659 2660 /* All allocations will be freed in release handler of q->mq_kobj */ 2661 static int blk_mq_alloc_ctxs(struct request_queue *q) 2662 { 2663 struct blk_mq_ctxs *ctxs; 2664 int cpu; 2665 2666 ctxs = kzalloc(sizeof(*ctxs), GFP_KERNEL); 2667 if (!ctxs) 2668 return -ENOMEM; 2669 2670 ctxs->queue_ctx = alloc_percpu(struct blk_mq_ctx); 2671 if (!ctxs->queue_ctx) 2672 goto fail; 2673 2674 for_each_possible_cpu(cpu) { 2675 struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu); 2676 ctx->ctxs = ctxs; 2677 } 2678 2679 q->mq_kobj = &ctxs->kobj; 2680 q->queue_ctx = ctxs->queue_ctx; 2681 2682 return 0; 2683 fail: 2684 kfree(ctxs); 2685 return -ENOMEM; 2686 } 2687 2688 /* 2689 * It is the actual release handler for mq, but we do it from 2690 * request queue's release handler for avoiding use-after-free 2691 * and headache because q->mq_kobj shouldn't have been introduced, 2692 * but we can't group ctx/kctx kobj without it. 2693 */ 2694 void blk_mq_release(struct request_queue *q) 2695 { 2696 struct blk_mq_hw_ctx *hctx, *next; 2697 int i; 2698 2699 queue_for_each_hw_ctx(q, hctx, i) 2700 WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list)); 2701 2702 /* all hctx are in .unused_hctx_list now */ 2703 list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) { 2704 list_del_init(&hctx->hctx_list); 2705 kobject_put(&hctx->kobj); 2706 } 2707 2708 kfree(q->queue_hw_ctx); 2709 2710 /* 2711 * release .mq_kobj and sw queue's kobject now because 2712 * both share lifetime with request queue. 2713 */ 2714 blk_mq_sysfs_deinit(q); 2715 } 2716 2717 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) 2718 { 2719 struct request_queue *uninit_q, *q; 2720 2721 uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node); 2722 if (!uninit_q) 2723 return ERR_PTR(-ENOMEM); 2724 2725 /* 2726 * Initialize the queue without an elevator. device_add_disk() will do 2727 * the initialization. 2728 */ 2729 q = blk_mq_init_allocated_queue(set, uninit_q, false); 2730 if (IS_ERR(q)) 2731 blk_cleanup_queue(uninit_q); 2732 2733 return q; 2734 } 2735 EXPORT_SYMBOL(blk_mq_init_queue); 2736 2737 /* 2738 * Helper for setting up a queue with mq ops, given queue depth, and 2739 * the passed in mq ops flags. 2740 */ 2741 struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set, 2742 const struct blk_mq_ops *ops, 2743 unsigned int queue_depth, 2744 unsigned int set_flags) 2745 { 2746 struct request_queue *q; 2747 int ret; 2748 2749 memset(set, 0, sizeof(*set)); 2750 set->ops = ops; 2751 set->nr_hw_queues = 1; 2752 set->nr_maps = 1; 2753 set->queue_depth = queue_depth; 2754 set->numa_node = NUMA_NO_NODE; 2755 set->flags = set_flags; 2756 2757 ret = blk_mq_alloc_tag_set(set); 2758 if (ret) 2759 return ERR_PTR(ret); 2760 2761 q = blk_mq_init_queue(set); 2762 if (IS_ERR(q)) { 2763 blk_mq_free_tag_set(set); 2764 return q; 2765 } 2766 2767 return q; 2768 } 2769 EXPORT_SYMBOL(blk_mq_init_sq_queue); 2770 2771 static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx( 2772 struct blk_mq_tag_set *set, struct request_queue *q, 2773 int hctx_idx, int node) 2774 { 2775 struct blk_mq_hw_ctx *hctx = NULL, *tmp; 2776 2777 /* reuse dead hctx first */ 2778 spin_lock(&q->unused_hctx_lock); 2779 list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) { 2780 if (tmp->numa_node == node) { 2781 hctx = tmp; 2782 break; 2783 } 2784 } 2785 if (hctx) 2786 list_del_init(&hctx->hctx_list); 2787 spin_unlock(&q->unused_hctx_lock); 2788 2789 if (!hctx) 2790 hctx = blk_mq_alloc_hctx(q, set, node); 2791 if (!hctx) 2792 goto fail; 2793 2794 if (blk_mq_init_hctx(q, set, hctx, hctx_idx)) 2795 goto free_hctx; 2796 2797 return hctx; 2798 2799 free_hctx: 2800 kobject_put(&hctx->kobj); 2801 fail: 2802 return NULL; 2803 } 2804 2805 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, 2806 struct request_queue *q) 2807 { 2808 int i, j, end; 2809 struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx; 2810 2811 if (q->nr_hw_queues < set->nr_hw_queues) { 2812 struct blk_mq_hw_ctx **new_hctxs; 2813 2814 new_hctxs = kcalloc_node(set->nr_hw_queues, 2815 sizeof(*new_hctxs), GFP_KERNEL, 2816 set->numa_node); 2817 if (!new_hctxs) 2818 return; 2819 if (hctxs) 2820 memcpy(new_hctxs, hctxs, q->nr_hw_queues * 2821 sizeof(*hctxs)); 2822 q->queue_hw_ctx = new_hctxs; 2823 q->nr_hw_queues = set->nr_hw_queues; 2824 kfree(hctxs); 2825 hctxs = new_hctxs; 2826 } 2827 2828 /* protect against switching io scheduler */ 2829 mutex_lock(&q->sysfs_lock); 2830 for (i = 0; i < set->nr_hw_queues; i++) { 2831 int node; 2832 struct blk_mq_hw_ctx *hctx; 2833 2834 node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], i); 2835 /* 2836 * If the hw queue has been mapped to another numa node, 2837 * we need to realloc the hctx. If allocation fails, fallback 2838 * to use the previous one. 2839 */ 2840 if (hctxs[i] && (hctxs[i]->numa_node == node)) 2841 continue; 2842 2843 hctx = blk_mq_alloc_and_init_hctx(set, q, i, node); 2844 if (hctx) { 2845 if (hctxs[i]) 2846 blk_mq_exit_hctx(q, set, hctxs[i], i); 2847 hctxs[i] = hctx; 2848 } else { 2849 if (hctxs[i]) 2850 pr_warn("Allocate new hctx on node %d fails,\ 2851 fallback to previous one on node %d\n", 2852 node, hctxs[i]->numa_node); 2853 else 2854 break; 2855 } 2856 } 2857 /* 2858 * Increasing nr_hw_queues fails. Free the newly allocated 2859 * hctxs and keep the previous q->nr_hw_queues. 2860 */ 2861 if (i != set->nr_hw_queues) { 2862 j = q->nr_hw_queues; 2863 end = i; 2864 } else { 2865 j = i; 2866 end = q->nr_hw_queues; 2867 q->nr_hw_queues = set->nr_hw_queues; 2868 } 2869 2870 for (; j < end; j++) { 2871 struct blk_mq_hw_ctx *hctx = hctxs[j]; 2872 2873 if (hctx) { 2874 if (hctx->tags) 2875 blk_mq_free_map_and_requests(set, j); 2876 blk_mq_exit_hctx(q, set, hctx, j); 2877 hctxs[j] = NULL; 2878 } 2879 } 2880 mutex_unlock(&q->sysfs_lock); 2881 } 2882 2883 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, 2884 struct request_queue *q, 2885 bool elevator_init) 2886 { 2887 /* mark the queue as mq asap */ 2888 q->mq_ops = set->ops; 2889 2890 q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn, 2891 blk_mq_poll_stats_bkt, 2892 BLK_MQ_POLL_STATS_BKTS, q); 2893 if (!q->poll_cb) 2894 goto err_exit; 2895 2896 if (blk_mq_alloc_ctxs(q)) 2897 goto err_poll; 2898 2899 /* init q->mq_kobj and sw queues' kobjects */ 2900 blk_mq_sysfs_init(q); 2901 2902 INIT_LIST_HEAD(&q->unused_hctx_list); 2903 spin_lock_init(&q->unused_hctx_lock); 2904 2905 blk_mq_realloc_hw_ctxs(set, q); 2906 if (!q->nr_hw_queues) 2907 goto err_hctxs; 2908 2909 INIT_WORK(&q->timeout_work, blk_mq_timeout_work); 2910 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ); 2911 2912 q->tag_set = set; 2913 2914 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; 2915 if (set->nr_maps > HCTX_TYPE_POLL && 2916 set->map[HCTX_TYPE_POLL].nr_queues) 2917 blk_queue_flag_set(QUEUE_FLAG_POLL, q); 2918 2919 q->sg_reserved_size = INT_MAX; 2920 2921 INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work); 2922 INIT_LIST_HEAD(&q->requeue_list); 2923 spin_lock_init(&q->requeue_lock); 2924 2925 blk_queue_make_request(q, blk_mq_make_request); 2926 2927 /* 2928 * Do this after blk_queue_make_request() overrides it... 2929 */ 2930 q->nr_requests = set->queue_depth; 2931 2932 /* 2933 * Default to classic polling 2934 */ 2935 q->poll_nsec = BLK_MQ_POLL_CLASSIC; 2936 2937 blk_mq_init_cpu_queues(q, set->nr_hw_queues); 2938 blk_mq_add_queue_tag_set(set, q); 2939 blk_mq_map_swqueue(q); 2940 2941 if (elevator_init) 2942 elevator_init_mq(q); 2943 2944 return q; 2945 2946 err_hctxs: 2947 kfree(q->queue_hw_ctx); 2948 q->nr_hw_queues = 0; 2949 blk_mq_sysfs_deinit(q); 2950 err_poll: 2951 blk_stat_free_callback(q->poll_cb); 2952 q->poll_cb = NULL; 2953 err_exit: 2954 q->mq_ops = NULL; 2955 return ERR_PTR(-ENOMEM); 2956 } 2957 EXPORT_SYMBOL(blk_mq_init_allocated_queue); 2958 2959 /* tags can _not_ be used after returning from blk_mq_exit_queue */ 2960 void blk_mq_exit_queue(struct request_queue *q) 2961 { 2962 struct blk_mq_tag_set *set = q->tag_set; 2963 2964 blk_mq_del_queue_tag_set(q); 2965 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); 2966 } 2967 2968 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) 2969 { 2970 int i; 2971 2972 for (i = 0; i < set->nr_hw_queues; i++) 2973 if (!__blk_mq_alloc_rq_map(set, i)) 2974 goto out_unwind; 2975 2976 return 0; 2977 2978 out_unwind: 2979 while (--i >= 0) 2980 blk_mq_free_rq_map(set->tags[i]); 2981 2982 return -ENOMEM; 2983 } 2984 2985 /* 2986 * Allocate the request maps associated with this tag_set. Note that this 2987 * may reduce the depth asked for, if memory is tight. set->queue_depth 2988 * will be updated to reflect the allocated depth. 2989 */ 2990 static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) 2991 { 2992 unsigned int depth; 2993 int err; 2994 2995 depth = set->queue_depth; 2996 do { 2997 err = __blk_mq_alloc_rq_maps(set); 2998 if (!err) 2999 break; 3000 3001 set->queue_depth >>= 1; 3002 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) { 3003 err = -ENOMEM; 3004 break; 3005 } 3006 } while (set->queue_depth); 3007 3008 if (!set->queue_depth || err) { 3009 pr_err("blk-mq: failed to allocate request map\n"); 3010 return -ENOMEM; 3011 } 3012 3013 if (depth != set->queue_depth) 3014 pr_info("blk-mq: reduced tag depth (%u -> %u)\n", 3015 depth, set->queue_depth); 3016 3017 return 0; 3018 } 3019 3020 static int blk_mq_update_queue_map(struct blk_mq_tag_set *set) 3021 { 3022 if (set->ops->map_queues && !is_kdump_kernel()) { 3023 int i; 3024 3025 /* 3026 * transport .map_queues is usually done in the following 3027 * way: 3028 * 3029 * for (queue = 0; queue < set->nr_hw_queues; queue++) { 3030 * mask = get_cpu_mask(queue) 3031 * for_each_cpu(cpu, mask) 3032 * set->map[x].mq_map[cpu] = queue; 3033 * } 3034 * 3035 * When we need to remap, the table has to be cleared for 3036 * killing stale mapping since one CPU may not be mapped 3037 * to any hw queue. 3038 */ 3039 for (i = 0; i < set->nr_maps; i++) 3040 blk_mq_clear_mq_map(&set->map[i]); 3041 3042 return set->ops->map_queues(set); 3043 } else { 3044 BUG_ON(set->nr_maps > 1); 3045 return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); 3046 } 3047 } 3048 3049 static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set, 3050 int cur_nr_hw_queues, int new_nr_hw_queues) 3051 { 3052 struct blk_mq_tags **new_tags; 3053 3054 if (cur_nr_hw_queues >= new_nr_hw_queues) 3055 return 0; 3056 3057 new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *), 3058 GFP_KERNEL, set->numa_node); 3059 if (!new_tags) 3060 return -ENOMEM; 3061 3062 if (set->tags) 3063 memcpy(new_tags, set->tags, cur_nr_hw_queues * 3064 sizeof(*set->tags)); 3065 kfree(set->tags); 3066 set->tags = new_tags; 3067 set->nr_hw_queues = new_nr_hw_queues; 3068 3069 return 0; 3070 } 3071 3072 /* 3073 * Alloc a tag set to be associated with one or more request queues. 3074 * May fail with EINVAL for various error conditions. May adjust the 3075 * requested depth down, if it's too large. In that case, the set 3076 * value will be stored in set->queue_depth. 3077 */ 3078 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) 3079 { 3080 int i, ret; 3081 3082 BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS); 3083 3084 if (!set->nr_hw_queues) 3085 return -EINVAL; 3086 if (!set->queue_depth) 3087 return -EINVAL; 3088 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) 3089 return -EINVAL; 3090 3091 if (!set->ops->queue_rq) 3092 return -EINVAL; 3093 3094 if (!set->ops->get_budget ^ !set->ops->put_budget) 3095 return -EINVAL; 3096 3097 if (set->queue_depth > BLK_MQ_MAX_DEPTH) { 3098 pr_info("blk-mq: reduced tag depth to %u\n", 3099 BLK_MQ_MAX_DEPTH); 3100 set->queue_depth = BLK_MQ_MAX_DEPTH; 3101 } 3102 3103 if (!set->nr_maps) 3104 set->nr_maps = 1; 3105 else if (set->nr_maps > HCTX_MAX_TYPES) 3106 return -EINVAL; 3107 3108 /* 3109 * If a crashdump is active, then we are potentially in a very 3110 * memory constrained environment. Limit us to 1 queue and 3111 * 64 tags to prevent using too much memory. 3112 */ 3113 if (is_kdump_kernel()) { 3114 set->nr_hw_queues = 1; 3115 set->nr_maps = 1; 3116 set->queue_depth = min(64U, set->queue_depth); 3117 } 3118 /* 3119 * There is no use for more h/w queues than cpus if we just have 3120 * a single map 3121 */ 3122 if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids) 3123 set->nr_hw_queues = nr_cpu_ids; 3124 3125 if (blk_mq_realloc_tag_set_tags(set, 0, set->nr_hw_queues) < 0) 3126 return -ENOMEM; 3127 3128 ret = -ENOMEM; 3129 for (i = 0; i < set->nr_maps; i++) { 3130 set->map[i].mq_map = kcalloc_node(nr_cpu_ids, 3131 sizeof(set->map[i].mq_map[0]), 3132 GFP_KERNEL, set->numa_node); 3133 if (!set->map[i].mq_map) 3134 goto out_free_mq_map; 3135 set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues; 3136 } 3137 3138 ret = blk_mq_update_queue_map(set); 3139 if (ret) 3140 goto out_free_mq_map; 3141 3142 ret = blk_mq_alloc_rq_maps(set); 3143 if (ret) 3144 goto out_free_mq_map; 3145 3146 mutex_init(&set->tag_list_lock); 3147 INIT_LIST_HEAD(&set->tag_list); 3148 3149 return 0; 3150 3151 out_free_mq_map: 3152 for (i = 0; i < set->nr_maps; i++) { 3153 kfree(set->map[i].mq_map); 3154 set->map[i].mq_map = NULL; 3155 } 3156 kfree(set->tags); 3157 set->tags = NULL; 3158 return ret; 3159 } 3160 EXPORT_SYMBOL(blk_mq_alloc_tag_set); 3161 3162 void blk_mq_free_tag_set(struct blk_mq_tag_set *set) 3163 { 3164 int i, j; 3165 3166 for (i = 0; i < set->nr_hw_queues; i++) 3167 blk_mq_free_map_and_requests(set, i); 3168 3169 for (j = 0; j < set->nr_maps; j++) { 3170 kfree(set->map[j].mq_map); 3171 set->map[j].mq_map = NULL; 3172 } 3173 3174 kfree(set->tags); 3175 set->tags = NULL; 3176 } 3177 EXPORT_SYMBOL(blk_mq_free_tag_set); 3178 3179 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) 3180 { 3181 struct blk_mq_tag_set *set = q->tag_set; 3182 struct blk_mq_hw_ctx *hctx; 3183 int i, ret; 3184 3185 if (!set) 3186 return -EINVAL; 3187 3188 if (q->nr_requests == nr) 3189 return 0; 3190 3191 blk_mq_freeze_queue(q); 3192 blk_mq_quiesce_queue(q); 3193 3194 ret = 0; 3195 queue_for_each_hw_ctx(q, hctx, i) { 3196 if (!hctx->tags) 3197 continue; 3198 /* 3199 * If we're using an MQ scheduler, just update the scheduler 3200 * queue depth. This is similar to what the old code would do. 3201 */ 3202 if (!hctx->sched_tags) { 3203 ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr, 3204 false); 3205 } else { 3206 ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags, 3207 nr, true); 3208 } 3209 if (ret) 3210 break; 3211 if (q->elevator && q->elevator->type->ops.depth_updated) 3212 q->elevator->type->ops.depth_updated(hctx); 3213 } 3214 3215 if (!ret) 3216 q->nr_requests = nr; 3217 3218 blk_mq_unquiesce_queue(q); 3219 blk_mq_unfreeze_queue(q); 3220 3221 return ret; 3222 } 3223 3224 /* 3225 * request_queue and elevator_type pair. 3226 * It is just used by __blk_mq_update_nr_hw_queues to cache 3227 * the elevator_type associated with a request_queue. 3228 */ 3229 struct blk_mq_qe_pair { 3230 struct list_head node; 3231 struct request_queue *q; 3232 struct elevator_type *type; 3233 }; 3234 3235 /* 3236 * Cache the elevator_type in qe pair list and switch the 3237 * io scheduler to 'none' 3238 */ 3239 static bool blk_mq_elv_switch_none(struct list_head *head, 3240 struct request_queue *q) 3241 { 3242 struct blk_mq_qe_pair *qe; 3243 3244 if (!q->elevator) 3245 return true; 3246 3247 qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY); 3248 if (!qe) 3249 return false; 3250 3251 INIT_LIST_HEAD(&qe->node); 3252 qe->q = q; 3253 qe->type = q->elevator->type; 3254 list_add(&qe->node, head); 3255 3256 mutex_lock(&q->sysfs_lock); 3257 /* 3258 * After elevator_switch_mq, the previous elevator_queue will be 3259 * released by elevator_release. The reference of the io scheduler 3260 * module get by elevator_get will also be put. So we need to get 3261 * a reference of the io scheduler module here to prevent it to be 3262 * removed. 3263 */ 3264 __module_get(qe->type->elevator_owner); 3265 elevator_switch_mq(q, NULL); 3266 mutex_unlock(&q->sysfs_lock); 3267 3268 return true; 3269 } 3270 3271 static void blk_mq_elv_switch_back(struct list_head *head, 3272 struct request_queue *q) 3273 { 3274 struct blk_mq_qe_pair *qe; 3275 struct elevator_type *t = NULL; 3276 3277 list_for_each_entry(qe, head, node) 3278 if (qe->q == q) { 3279 t = qe->type; 3280 break; 3281 } 3282 3283 if (!t) 3284 return; 3285 3286 list_del(&qe->node); 3287 kfree(qe); 3288 3289 mutex_lock(&q->sysfs_lock); 3290 elevator_switch_mq(q, t); 3291 mutex_unlock(&q->sysfs_lock); 3292 } 3293 3294 static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, 3295 int nr_hw_queues) 3296 { 3297 struct request_queue *q; 3298 LIST_HEAD(head); 3299 int prev_nr_hw_queues; 3300 3301 lockdep_assert_held(&set->tag_list_lock); 3302 3303 if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids) 3304 nr_hw_queues = nr_cpu_ids; 3305 if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues) 3306 return; 3307 3308 list_for_each_entry(q, &set->tag_list, tag_set_list) 3309 blk_mq_freeze_queue(q); 3310 /* 3311 * Switch IO scheduler to 'none', cleaning up the data associated 3312 * with the previous scheduler. We will switch back once we are done 3313 * updating the new sw to hw queue mappings. 3314 */ 3315 list_for_each_entry(q, &set->tag_list, tag_set_list) 3316 if (!blk_mq_elv_switch_none(&head, q)) 3317 goto switch_back; 3318 3319 list_for_each_entry(q, &set->tag_list, tag_set_list) { 3320 blk_mq_debugfs_unregister_hctxs(q); 3321 blk_mq_sysfs_unregister(q); 3322 } 3323 3324 if (blk_mq_realloc_tag_set_tags(set, set->nr_hw_queues, nr_hw_queues) < 3325 0) 3326 goto reregister; 3327 3328 prev_nr_hw_queues = set->nr_hw_queues; 3329 set->nr_hw_queues = nr_hw_queues; 3330 blk_mq_update_queue_map(set); 3331 fallback: 3332 list_for_each_entry(q, &set->tag_list, tag_set_list) { 3333 blk_mq_realloc_hw_ctxs(set, q); 3334 if (q->nr_hw_queues != set->nr_hw_queues) { 3335 pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n", 3336 nr_hw_queues, prev_nr_hw_queues); 3337 set->nr_hw_queues = prev_nr_hw_queues; 3338 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); 3339 goto fallback; 3340 } 3341 blk_mq_map_swqueue(q); 3342 } 3343 3344 reregister: 3345 list_for_each_entry(q, &set->tag_list, tag_set_list) { 3346 blk_mq_sysfs_register(q); 3347 blk_mq_debugfs_register_hctxs(q); 3348 } 3349 3350 switch_back: 3351 list_for_each_entry(q, &set->tag_list, tag_set_list) 3352 blk_mq_elv_switch_back(&head, q); 3353 3354 list_for_each_entry(q, &set->tag_list, tag_set_list) 3355 blk_mq_unfreeze_queue(q); 3356 } 3357 3358 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) 3359 { 3360 mutex_lock(&set->tag_list_lock); 3361 __blk_mq_update_nr_hw_queues(set, nr_hw_queues); 3362 mutex_unlock(&set->tag_list_lock); 3363 } 3364 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues); 3365 3366 /* Enable polling stats and return whether they were already enabled. */ 3367 static bool blk_poll_stats_enable(struct request_queue *q) 3368 { 3369 if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) || 3370 blk_queue_flag_test_and_set(QUEUE_FLAG_POLL_STATS, q)) 3371 return true; 3372 blk_stat_add_callback(q, q->poll_cb); 3373 return false; 3374 } 3375 3376 static void blk_mq_poll_stats_start(struct request_queue *q) 3377 { 3378 /* 3379 * We don't arm the callback if polling stats are not enabled or the 3380 * callback is already active. 3381 */ 3382 if (!test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) || 3383 blk_stat_is_active(q->poll_cb)) 3384 return; 3385 3386 blk_stat_activate_msecs(q->poll_cb, 100); 3387 } 3388 3389 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb) 3390 { 3391 struct request_queue *q = cb->data; 3392 int bucket; 3393 3394 for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) { 3395 if (cb->stat[bucket].nr_samples) 3396 q->poll_stat[bucket] = cb->stat[bucket]; 3397 } 3398 } 3399 3400 static unsigned long blk_mq_poll_nsecs(struct request_queue *q, 3401 struct blk_mq_hw_ctx *hctx, 3402 struct request *rq) 3403 { 3404 unsigned long ret = 0; 3405 int bucket; 3406 3407 /* 3408 * If stats collection isn't on, don't sleep but turn it on for 3409 * future users 3410 */ 3411 if (!blk_poll_stats_enable(q)) 3412 return 0; 3413 3414 /* 3415 * As an optimistic guess, use half of the mean service time 3416 * for this type of request. We can (and should) make this smarter. 3417 * For instance, if the completion latencies are tight, we can 3418 * get closer than just half the mean. This is especially 3419 * important on devices where the completion latencies are longer 3420 * than ~10 usec. We do use the stats for the relevant IO size 3421 * if available which does lead to better estimates. 3422 */ 3423 bucket = blk_mq_poll_stats_bkt(rq); 3424 if (bucket < 0) 3425 return ret; 3426 3427 if (q->poll_stat[bucket].nr_samples) 3428 ret = (q->poll_stat[bucket].mean + 1) / 2; 3429 3430 return ret; 3431 } 3432 3433 static bool blk_mq_poll_hybrid_sleep(struct request_queue *q, 3434 struct blk_mq_hw_ctx *hctx, 3435 struct request *rq) 3436 { 3437 struct hrtimer_sleeper hs; 3438 enum hrtimer_mode mode; 3439 unsigned int nsecs; 3440 ktime_t kt; 3441 3442 if (rq->rq_flags & RQF_MQ_POLL_SLEPT) 3443 return false; 3444 3445 /* 3446 * If we get here, hybrid polling is enabled. Hence poll_nsec can be: 3447 * 3448 * 0: use half of prev avg 3449 * >0: use this specific value 3450 */ 3451 if (q->poll_nsec > 0) 3452 nsecs = q->poll_nsec; 3453 else 3454 nsecs = blk_mq_poll_nsecs(q, hctx, rq); 3455 3456 if (!nsecs) 3457 return false; 3458 3459 rq->rq_flags |= RQF_MQ_POLL_SLEPT; 3460 3461 /* 3462 * This will be replaced with the stats tracking code, using 3463 * 'avg_completion_time / 2' as the pre-sleep target. 3464 */ 3465 kt = nsecs; 3466 3467 mode = HRTIMER_MODE_REL; 3468 hrtimer_init_sleeper_on_stack(&hs, CLOCK_MONOTONIC, mode); 3469 hrtimer_set_expires(&hs.timer, kt); 3470 3471 do { 3472 if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE) 3473 break; 3474 set_current_state(TASK_UNINTERRUPTIBLE); 3475 hrtimer_sleeper_start_expires(&hs, mode); 3476 if (hs.task) 3477 io_schedule(); 3478 hrtimer_cancel(&hs.timer); 3479 mode = HRTIMER_MODE_ABS; 3480 } while (hs.task && !signal_pending(current)); 3481 3482 __set_current_state(TASK_RUNNING); 3483 destroy_hrtimer_on_stack(&hs.timer); 3484 return true; 3485 } 3486 3487 static bool blk_mq_poll_hybrid(struct request_queue *q, 3488 struct blk_mq_hw_ctx *hctx, blk_qc_t cookie) 3489 { 3490 struct request *rq; 3491 3492 if (q->poll_nsec == BLK_MQ_POLL_CLASSIC) 3493 return false; 3494 3495 if (!blk_qc_t_is_internal(cookie)) 3496 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie)); 3497 else { 3498 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie)); 3499 /* 3500 * With scheduling, if the request has completed, we'll 3501 * get a NULL return here, as we clear the sched tag when 3502 * that happens. The request still remains valid, like always, 3503 * so we should be safe with just the NULL check. 3504 */ 3505 if (!rq) 3506 return false; 3507 } 3508 3509 return blk_mq_poll_hybrid_sleep(q, hctx, rq); 3510 } 3511 3512 /** 3513 * blk_poll - poll for IO completions 3514 * @q: the queue 3515 * @cookie: cookie passed back at IO submission time 3516 * @spin: whether to spin for completions 3517 * 3518 * Description: 3519 * Poll for completions on the passed in queue. Returns number of 3520 * completed entries found. If @spin is true, then blk_poll will continue 3521 * looping until at least one completion is found, unless the task is 3522 * otherwise marked running (or we need to reschedule). 3523 */ 3524 int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin) 3525 { 3526 struct blk_mq_hw_ctx *hctx; 3527 long state; 3528 3529 if (!blk_qc_t_valid(cookie) || 3530 !test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) 3531 return 0; 3532 3533 if (current->plug) 3534 blk_flush_plug_list(current->plug, false); 3535 3536 hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)]; 3537 3538 /* 3539 * If we sleep, have the caller restart the poll loop to reset 3540 * the state. Like for the other success return cases, the 3541 * caller is responsible for checking if the IO completed. If 3542 * the IO isn't complete, we'll get called again and will go 3543 * straight to the busy poll loop. 3544 */ 3545 if (blk_mq_poll_hybrid(q, hctx, cookie)) 3546 return 1; 3547 3548 hctx->poll_considered++; 3549 3550 state = current->state; 3551 do { 3552 int ret; 3553 3554 hctx->poll_invoked++; 3555 3556 ret = q->mq_ops->poll(hctx); 3557 if (ret > 0) { 3558 hctx->poll_success++; 3559 __set_current_state(TASK_RUNNING); 3560 return ret; 3561 } 3562 3563 if (signal_pending_state(state, current)) 3564 __set_current_state(TASK_RUNNING); 3565 3566 if (current->state == TASK_RUNNING) 3567 return 1; 3568 if (ret < 0 || !spin) 3569 break; 3570 cpu_relax(); 3571 } while (!need_resched()); 3572 3573 __set_current_state(TASK_RUNNING); 3574 return 0; 3575 } 3576 EXPORT_SYMBOL_GPL(blk_poll); 3577 3578 unsigned int blk_mq_rq_cpu(struct request *rq) 3579 { 3580 return rq->mq_ctx->cpu; 3581 } 3582 EXPORT_SYMBOL(blk_mq_rq_cpu); 3583 3584 static int __init blk_mq_init(void) 3585 { 3586 cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL, 3587 blk_mq_hctx_notify_dead); 3588 return 0; 3589 } 3590 subsys_initcall(blk_mq_init); 3591