1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Block multiqueue core code 4 * 5 * Copyright (C) 2013-2014 Jens Axboe 6 * Copyright (C) 2013-2014 Christoph Hellwig 7 */ 8 #include <linux/kernel.h> 9 #include <linux/module.h> 10 #include <linux/backing-dev.h> 11 #include <linux/bio.h> 12 #include <linux/blkdev.h> 13 #include <linux/blk-integrity.h> 14 #include <linux/kmemleak.h> 15 #include <linux/mm.h> 16 #include <linux/init.h> 17 #include <linux/slab.h> 18 #include <linux/workqueue.h> 19 #include <linux/smp.h> 20 #include <linux/interrupt.h> 21 #include <linux/llist.h> 22 #include <linux/cpu.h> 23 #include <linux/cache.h> 24 #include <linux/sched/sysctl.h> 25 #include <linux/sched/topology.h> 26 #include <linux/sched/signal.h> 27 #include <linux/delay.h> 28 #include <linux/crash_dump.h> 29 #include <linux/prefetch.h> 30 #include <linux/blk-crypto.h> 31 #include <linux/part_stat.h> 32 33 #include <trace/events/block.h> 34 35 #include <linux/blk-mq.h> 36 #include <linux/t10-pi.h> 37 #include "blk.h" 38 #include "blk-mq.h" 39 #include "blk-mq-debugfs.h" 40 #include "blk-mq-tag.h" 41 #include "blk-pm.h" 42 #include "blk-stat.h" 43 #include "blk-mq-sched.h" 44 #include "blk-rq-qos.h" 45 #include "blk-ioprio.h" 46 47 static DEFINE_PER_CPU(struct llist_head, blk_cpu_done); 48 49 static void blk_mq_poll_stats_start(struct request_queue *q); 50 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb); 51 52 static int blk_mq_poll_stats_bkt(const struct request *rq) 53 { 54 int ddir, sectors, bucket; 55 56 ddir = rq_data_dir(rq); 57 sectors = blk_rq_stats_sectors(rq); 58 59 bucket = ddir + 2 * ilog2(sectors); 60 61 if (bucket < 0) 62 return -1; 63 else if (bucket >= BLK_MQ_POLL_STATS_BKTS) 64 return ddir + BLK_MQ_POLL_STATS_BKTS - 2; 65 66 return bucket; 67 } 68 69 #define BLK_QC_T_SHIFT 16 70 #define BLK_QC_T_INTERNAL (1U << 31) 71 72 static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q, 73 blk_qc_t qc) 74 { 75 return xa_load(&q->hctx_table, 76 (qc & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT); 77 } 78 79 static inline struct request *blk_qc_to_rq(struct blk_mq_hw_ctx *hctx, 80 blk_qc_t qc) 81 { 82 unsigned int tag = qc & ((1U << BLK_QC_T_SHIFT) - 1); 83 84 if (qc & BLK_QC_T_INTERNAL) 85 return blk_mq_tag_to_rq(hctx->sched_tags, tag); 86 return blk_mq_tag_to_rq(hctx->tags, tag); 87 } 88 89 static inline blk_qc_t blk_rq_to_qc(struct request *rq) 90 { 91 return (rq->mq_hctx->queue_num << BLK_QC_T_SHIFT) | 92 (rq->tag != -1 ? 93 rq->tag : (rq->internal_tag | BLK_QC_T_INTERNAL)); 94 } 95 96 /* 97 * Check if any of the ctx, dispatch list or elevator 98 * have pending work in this hardware queue. 99 */ 100 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) 101 { 102 return !list_empty_careful(&hctx->dispatch) || 103 sbitmap_any_bit_set(&hctx->ctx_map) || 104 blk_mq_sched_has_work(hctx); 105 } 106 107 /* 108 * Mark this ctx as having pending work in this hardware queue 109 */ 110 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, 111 struct blk_mq_ctx *ctx) 112 { 113 const int bit = ctx->index_hw[hctx->type]; 114 115 if (!sbitmap_test_bit(&hctx->ctx_map, bit)) 116 sbitmap_set_bit(&hctx->ctx_map, bit); 117 } 118 119 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, 120 struct blk_mq_ctx *ctx) 121 { 122 const int bit = ctx->index_hw[hctx->type]; 123 124 sbitmap_clear_bit(&hctx->ctx_map, bit); 125 } 126 127 struct mq_inflight { 128 struct block_device *part; 129 unsigned int inflight[2]; 130 }; 131 132 static bool blk_mq_check_inflight(struct request *rq, void *priv) 133 { 134 struct mq_inflight *mi = priv; 135 136 if (rq->part && blk_do_io_stat(rq) && 137 (!mi->part->bd_partno || rq->part == mi->part) && 138 blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) 139 mi->inflight[rq_data_dir(rq)]++; 140 141 return true; 142 } 143 144 unsigned int blk_mq_in_flight(struct request_queue *q, 145 struct block_device *part) 146 { 147 struct mq_inflight mi = { .part = part }; 148 149 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); 150 151 return mi.inflight[0] + mi.inflight[1]; 152 } 153 154 void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part, 155 unsigned int inflight[2]) 156 { 157 struct mq_inflight mi = { .part = part }; 158 159 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); 160 inflight[0] = mi.inflight[0]; 161 inflight[1] = mi.inflight[1]; 162 } 163 164 void blk_freeze_queue_start(struct request_queue *q) 165 { 166 mutex_lock(&q->mq_freeze_lock); 167 if (++q->mq_freeze_depth == 1) { 168 percpu_ref_kill(&q->q_usage_counter); 169 mutex_unlock(&q->mq_freeze_lock); 170 if (queue_is_mq(q)) 171 blk_mq_run_hw_queues(q, false); 172 } else { 173 mutex_unlock(&q->mq_freeze_lock); 174 } 175 } 176 EXPORT_SYMBOL_GPL(blk_freeze_queue_start); 177 178 void blk_mq_freeze_queue_wait(struct request_queue *q) 179 { 180 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter)); 181 } 182 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait); 183 184 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, 185 unsigned long timeout) 186 { 187 return wait_event_timeout(q->mq_freeze_wq, 188 percpu_ref_is_zero(&q->q_usage_counter), 189 timeout); 190 } 191 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout); 192 193 /* 194 * Guarantee no request is in use, so we can change any data structure of 195 * the queue afterward. 196 */ 197 void blk_freeze_queue(struct request_queue *q) 198 { 199 /* 200 * In the !blk_mq case we are only calling this to kill the 201 * q_usage_counter, otherwise this increases the freeze depth 202 * and waits for it to return to zero. For this reason there is 203 * no blk_unfreeze_queue(), and blk_freeze_queue() is not 204 * exported to drivers as the only user for unfreeze is blk_mq. 205 */ 206 blk_freeze_queue_start(q); 207 blk_mq_freeze_queue_wait(q); 208 } 209 210 void blk_mq_freeze_queue(struct request_queue *q) 211 { 212 /* 213 * ...just an alias to keep freeze and unfreeze actions balanced 214 * in the blk_mq_* namespace 215 */ 216 blk_freeze_queue(q); 217 } 218 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue); 219 220 void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic) 221 { 222 mutex_lock(&q->mq_freeze_lock); 223 if (force_atomic) 224 q->q_usage_counter.data->force_atomic = true; 225 q->mq_freeze_depth--; 226 WARN_ON_ONCE(q->mq_freeze_depth < 0); 227 if (!q->mq_freeze_depth) { 228 percpu_ref_resurrect(&q->q_usage_counter); 229 wake_up_all(&q->mq_freeze_wq); 230 } 231 mutex_unlock(&q->mq_freeze_lock); 232 } 233 234 void blk_mq_unfreeze_queue(struct request_queue *q) 235 { 236 __blk_mq_unfreeze_queue(q, false); 237 } 238 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); 239 240 /* 241 * FIXME: replace the scsi_internal_device_*block_nowait() calls in the 242 * mpt3sas driver such that this function can be removed. 243 */ 244 void blk_mq_quiesce_queue_nowait(struct request_queue *q) 245 { 246 unsigned long flags; 247 248 spin_lock_irqsave(&q->queue_lock, flags); 249 if (!q->quiesce_depth++) 250 blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q); 251 spin_unlock_irqrestore(&q->queue_lock, flags); 252 } 253 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait); 254 255 /** 256 * blk_mq_wait_quiesce_done() - wait until in-progress quiesce is done 257 * @q: request queue. 258 * 259 * Note: it is driver's responsibility for making sure that quiesce has 260 * been started. 261 */ 262 void blk_mq_wait_quiesce_done(struct request_queue *q) 263 { 264 if (blk_queue_has_srcu(q)) 265 synchronize_srcu(q->srcu); 266 else 267 synchronize_rcu(); 268 } 269 EXPORT_SYMBOL_GPL(blk_mq_wait_quiesce_done); 270 271 /** 272 * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished 273 * @q: request queue. 274 * 275 * Note: this function does not prevent that the struct request end_io() 276 * callback function is invoked. Once this function is returned, we make 277 * sure no dispatch can happen until the queue is unquiesced via 278 * blk_mq_unquiesce_queue(). 279 */ 280 void blk_mq_quiesce_queue(struct request_queue *q) 281 { 282 blk_mq_quiesce_queue_nowait(q); 283 blk_mq_wait_quiesce_done(q); 284 } 285 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue); 286 287 /* 288 * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue() 289 * @q: request queue. 290 * 291 * This function recovers queue into the state before quiescing 292 * which is done by blk_mq_quiesce_queue. 293 */ 294 void blk_mq_unquiesce_queue(struct request_queue *q) 295 { 296 unsigned long flags; 297 bool run_queue = false; 298 299 spin_lock_irqsave(&q->queue_lock, flags); 300 if (WARN_ON_ONCE(q->quiesce_depth <= 0)) { 301 ; 302 } else if (!--q->quiesce_depth) { 303 blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q); 304 run_queue = true; 305 } 306 spin_unlock_irqrestore(&q->queue_lock, flags); 307 308 /* dispatch requests which are inserted during quiescing */ 309 if (run_queue) 310 blk_mq_run_hw_queues(q, true); 311 } 312 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue); 313 314 void blk_mq_wake_waiters(struct request_queue *q) 315 { 316 struct blk_mq_hw_ctx *hctx; 317 unsigned long i; 318 319 queue_for_each_hw_ctx(q, hctx, i) 320 if (blk_mq_hw_queue_mapped(hctx)) 321 blk_mq_tag_wakeup_all(hctx->tags, true); 322 } 323 324 void blk_rq_init(struct request_queue *q, struct request *rq) 325 { 326 memset(rq, 0, sizeof(*rq)); 327 328 INIT_LIST_HEAD(&rq->queuelist); 329 rq->q = q; 330 rq->__sector = (sector_t) -1; 331 INIT_HLIST_NODE(&rq->hash); 332 RB_CLEAR_NODE(&rq->rb_node); 333 rq->tag = BLK_MQ_NO_TAG; 334 rq->internal_tag = BLK_MQ_NO_TAG; 335 rq->start_time_ns = ktime_get_ns(); 336 rq->part = NULL; 337 blk_crypto_rq_set_defaults(rq); 338 } 339 EXPORT_SYMBOL(blk_rq_init); 340 341 static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, 342 struct blk_mq_tags *tags, unsigned int tag, u64 alloc_time_ns) 343 { 344 struct blk_mq_ctx *ctx = data->ctx; 345 struct blk_mq_hw_ctx *hctx = data->hctx; 346 struct request_queue *q = data->q; 347 struct request *rq = tags->static_rqs[tag]; 348 349 rq->q = q; 350 rq->mq_ctx = ctx; 351 rq->mq_hctx = hctx; 352 rq->cmd_flags = data->cmd_flags; 353 354 if (data->flags & BLK_MQ_REQ_PM) 355 data->rq_flags |= RQF_PM; 356 if (blk_queue_io_stat(q)) 357 data->rq_flags |= RQF_IO_STAT; 358 rq->rq_flags = data->rq_flags; 359 360 if (!(data->rq_flags & RQF_ELV)) { 361 rq->tag = tag; 362 rq->internal_tag = BLK_MQ_NO_TAG; 363 } else { 364 rq->tag = BLK_MQ_NO_TAG; 365 rq->internal_tag = tag; 366 } 367 rq->timeout = 0; 368 369 if (blk_mq_need_time_stamp(rq)) 370 rq->start_time_ns = ktime_get_ns(); 371 else 372 rq->start_time_ns = 0; 373 rq->part = NULL; 374 #ifdef CONFIG_BLK_RQ_ALLOC_TIME 375 rq->alloc_time_ns = alloc_time_ns; 376 #endif 377 rq->io_start_time_ns = 0; 378 rq->stats_sectors = 0; 379 rq->nr_phys_segments = 0; 380 #if defined(CONFIG_BLK_DEV_INTEGRITY) 381 rq->nr_integrity_segments = 0; 382 #endif 383 rq->end_io = NULL; 384 rq->end_io_data = NULL; 385 386 blk_crypto_rq_set_defaults(rq); 387 INIT_LIST_HEAD(&rq->queuelist); 388 /* tag was already set */ 389 WRITE_ONCE(rq->deadline, 0); 390 req_ref_set(rq, 1); 391 392 if (rq->rq_flags & RQF_ELV) { 393 struct elevator_queue *e = data->q->elevator; 394 395 INIT_HLIST_NODE(&rq->hash); 396 RB_CLEAR_NODE(&rq->rb_node); 397 398 if (!op_is_flush(data->cmd_flags) && 399 e->type->ops.prepare_request) { 400 e->type->ops.prepare_request(rq); 401 rq->rq_flags |= RQF_ELVPRIV; 402 } 403 } 404 405 return rq; 406 } 407 408 static inline struct request * 409 __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data, 410 u64 alloc_time_ns) 411 { 412 unsigned int tag, tag_offset; 413 struct blk_mq_tags *tags; 414 struct request *rq; 415 unsigned long tag_mask; 416 int i, nr = 0; 417 418 tag_mask = blk_mq_get_tags(data, data->nr_tags, &tag_offset); 419 if (unlikely(!tag_mask)) 420 return NULL; 421 422 tags = blk_mq_tags_from_data(data); 423 for (i = 0; tag_mask; i++) { 424 if (!(tag_mask & (1UL << i))) 425 continue; 426 tag = tag_offset + i; 427 prefetch(tags->static_rqs[tag]); 428 tag_mask &= ~(1UL << i); 429 rq = blk_mq_rq_ctx_init(data, tags, tag, alloc_time_ns); 430 rq_list_add(data->cached_rq, rq); 431 nr++; 432 } 433 /* caller already holds a reference, add for remainder */ 434 percpu_ref_get_many(&data->q->q_usage_counter, nr - 1); 435 data->nr_tags -= nr; 436 437 return rq_list_pop(data->cached_rq); 438 } 439 440 static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data) 441 { 442 struct request_queue *q = data->q; 443 u64 alloc_time_ns = 0; 444 struct request *rq; 445 unsigned int tag; 446 447 /* alloc_time includes depth and tag waits */ 448 if (blk_queue_rq_alloc_time(q)) 449 alloc_time_ns = ktime_get_ns(); 450 451 if (data->cmd_flags & REQ_NOWAIT) 452 data->flags |= BLK_MQ_REQ_NOWAIT; 453 454 if (q->elevator) { 455 struct elevator_queue *e = q->elevator; 456 457 data->rq_flags |= RQF_ELV; 458 459 /* 460 * Flush/passthrough requests are special and go directly to the 461 * dispatch list. Don't include reserved tags in the 462 * limiting, as it isn't useful. 463 */ 464 if (!op_is_flush(data->cmd_flags) && 465 !blk_op_is_passthrough(data->cmd_flags) && 466 e->type->ops.limit_depth && 467 !(data->flags & BLK_MQ_REQ_RESERVED)) 468 e->type->ops.limit_depth(data->cmd_flags, data); 469 } 470 471 retry: 472 data->ctx = blk_mq_get_ctx(q); 473 data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx); 474 if (!(data->rq_flags & RQF_ELV)) 475 blk_mq_tag_busy(data->hctx); 476 477 if (data->flags & BLK_MQ_REQ_RESERVED) 478 data->rq_flags |= RQF_RESV; 479 480 /* 481 * Try batched alloc if we want more than 1 tag. 482 */ 483 if (data->nr_tags > 1) { 484 rq = __blk_mq_alloc_requests_batch(data, alloc_time_ns); 485 if (rq) 486 return rq; 487 data->nr_tags = 1; 488 } 489 490 /* 491 * Waiting allocations only fail because of an inactive hctx. In that 492 * case just retry the hctx assignment and tag allocation as CPU hotplug 493 * should have migrated us to an online CPU by now. 494 */ 495 tag = blk_mq_get_tag(data); 496 if (tag == BLK_MQ_NO_TAG) { 497 if (data->flags & BLK_MQ_REQ_NOWAIT) 498 return NULL; 499 /* 500 * Give up the CPU and sleep for a random short time to 501 * ensure that thread using a realtime scheduling class 502 * are migrated off the CPU, and thus off the hctx that 503 * is going away. 504 */ 505 msleep(3); 506 goto retry; 507 } 508 509 return blk_mq_rq_ctx_init(data, blk_mq_tags_from_data(data), tag, 510 alloc_time_ns); 511 } 512 513 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, 514 blk_mq_req_flags_t flags) 515 { 516 struct blk_mq_alloc_data data = { 517 .q = q, 518 .flags = flags, 519 .cmd_flags = op, 520 .nr_tags = 1, 521 }; 522 struct request *rq; 523 int ret; 524 525 ret = blk_queue_enter(q, flags); 526 if (ret) 527 return ERR_PTR(ret); 528 529 rq = __blk_mq_alloc_requests(&data); 530 if (!rq) 531 goto out_queue_exit; 532 rq->__data_len = 0; 533 rq->__sector = (sector_t) -1; 534 rq->bio = rq->biotail = NULL; 535 return rq; 536 out_queue_exit: 537 blk_queue_exit(q); 538 return ERR_PTR(-EWOULDBLOCK); 539 } 540 EXPORT_SYMBOL(blk_mq_alloc_request); 541 542 struct request *blk_mq_alloc_request_hctx(struct request_queue *q, 543 unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx) 544 { 545 struct blk_mq_alloc_data data = { 546 .q = q, 547 .flags = flags, 548 .cmd_flags = op, 549 .nr_tags = 1, 550 }; 551 u64 alloc_time_ns = 0; 552 unsigned int cpu; 553 unsigned int tag; 554 int ret; 555 556 /* alloc_time includes depth and tag waits */ 557 if (blk_queue_rq_alloc_time(q)) 558 alloc_time_ns = ktime_get_ns(); 559 560 /* 561 * If the tag allocator sleeps we could get an allocation for a 562 * different hardware context. No need to complicate the low level 563 * allocator for this for the rare use case of a command tied to 564 * a specific queue. 565 */ 566 if (WARN_ON_ONCE(!(flags & (BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED)))) 567 return ERR_PTR(-EINVAL); 568 569 if (hctx_idx >= q->nr_hw_queues) 570 return ERR_PTR(-EIO); 571 572 ret = blk_queue_enter(q, flags); 573 if (ret) 574 return ERR_PTR(ret); 575 576 /* 577 * Check if the hardware context is actually mapped to anything. 578 * If not tell the caller that it should skip this queue. 579 */ 580 ret = -EXDEV; 581 data.hctx = xa_load(&q->hctx_table, hctx_idx); 582 if (!blk_mq_hw_queue_mapped(data.hctx)) 583 goto out_queue_exit; 584 cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask); 585 if (cpu >= nr_cpu_ids) 586 goto out_queue_exit; 587 data.ctx = __blk_mq_get_ctx(q, cpu); 588 589 if (!q->elevator) 590 blk_mq_tag_busy(data.hctx); 591 else 592 data.rq_flags |= RQF_ELV; 593 594 if (flags & BLK_MQ_REQ_RESERVED) 595 data.rq_flags |= RQF_RESV; 596 597 ret = -EWOULDBLOCK; 598 tag = blk_mq_get_tag(&data); 599 if (tag == BLK_MQ_NO_TAG) 600 goto out_queue_exit; 601 return blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag, 602 alloc_time_ns); 603 604 out_queue_exit: 605 blk_queue_exit(q); 606 return ERR_PTR(ret); 607 } 608 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); 609 610 static void __blk_mq_free_request(struct request *rq) 611 { 612 struct request_queue *q = rq->q; 613 struct blk_mq_ctx *ctx = rq->mq_ctx; 614 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 615 const int sched_tag = rq->internal_tag; 616 617 blk_crypto_free_request(rq); 618 blk_pm_mark_last_busy(rq); 619 rq->mq_hctx = NULL; 620 if (rq->tag != BLK_MQ_NO_TAG) 621 blk_mq_put_tag(hctx->tags, ctx, rq->tag); 622 if (sched_tag != BLK_MQ_NO_TAG) 623 blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag); 624 blk_mq_sched_restart(hctx); 625 blk_queue_exit(q); 626 } 627 628 void blk_mq_free_request(struct request *rq) 629 { 630 struct request_queue *q = rq->q; 631 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 632 633 if ((rq->rq_flags & RQF_ELVPRIV) && 634 q->elevator->type->ops.finish_request) 635 q->elevator->type->ops.finish_request(rq); 636 637 if (rq->rq_flags & RQF_MQ_INFLIGHT) 638 __blk_mq_dec_active_requests(hctx); 639 640 if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq))) 641 laptop_io_completion(q->disk->bdi); 642 643 rq_qos_done(q, rq); 644 645 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 646 if (req_ref_put_and_test(rq)) 647 __blk_mq_free_request(rq); 648 } 649 EXPORT_SYMBOL_GPL(blk_mq_free_request); 650 651 void blk_mq_free_plug_rqs(struct blk_plug *plug) 652 { 653 struct request *rq; 654 655 while ((rq = rq_list_pop(&plug->cached_rq)) != NULL) 656 blk_mq_free_request(rq); 657 } 658 659 void blk_dump_rq_flags(struct request *rq, char *msg) 660 { 661 printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg, 662 rq->q->disk ? rq->q->disk->disk_name : "?", 663 (unsigned long long) rq->cmd_flags); 664 665 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", 666 (unsigned long long)blk_rq_pos(rq), 667 blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); 668 printk(KERN_INFO " bio %p, biotail %p, len %u\n", 669 rq->bio, rq->biotail, blk_rq_bytes(rq)); 670 } 671 EXPORT_SYMBOL(blk_dump_rq_flags); 672 673 static void req_bio_endio(struct request *rq, struct bio *bio, 674 unsigned int nbytes, blk_status_t error) 675 { 676 if (unlikely(error)) { 677 bio->bi_status = error; 678 } else if (req_op(rq) == REQ_OP_ZONE_APPEND) { 679 /* 680 * Partial zone append completions cannot be supported as the 681 * BIO fragments may end up not being written sequentially. 682 */ 683 if (bio->bi_iter.bi_size != nbytes) 684 bio->bi_status = BLK_STS_IOERR; 685 else 686 bio->bi_iter.bi_sector = rq->__sector; 687 } 688 689 bio_advance(bio, nbytes); 690 691 if (unlikely(rq->rq_flags & RQF_QUIET)) 692 bio_set_flag(bio, BIO_QUIET); 693 /* don't actually finish bio if it's part of flush sequence */ 694 if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ)) 695 bio_endio(bio); 696 } 697 698 static void blk_account_io_completion(struct request *req, unsigned int bytes) 699 { 700 if (req->part && blk_do_io_stat(req)) { 701 const int sgrp = op_stat_group(req_op(req)); 702 703 part_stat_lock(); 704 part_stat_add(req->part, sectors[sgrp], bytes >> 9); 705 part_stat_unlock(); 706 } 707 } 708 709 static void blk_print_req_error(struct request *req, blk_status_t status) 710 { 711 printk_ratelimited(KERN_ERR 712 "%s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x " 713 "phys_seg %u prio class %u\n", 714 blk_status_to_str(status), 715 req->q->disk ? req->q->disk->disk_name : "?", 716 blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)), 717 req->cmd_flags & ~REQ_OP_MASK, 718 req->nr_phys_segments, 719 IOPRIO_PRIO_CLASS(req->ioprio)); 720 } 721 722 /* 723 * Fully end IO on a request. Does not support partial completions, or 724 * errors. 725 */ 726 static void blk_complete_request(struct request *req) 727 { 728 const bool is_flush = (req->rq_flags & RQF_FLUSH_SEQ) != 0; 729 int total_bytes = blk_rq_bytes(req); 730 struct bio *bio = req->bio; 731 732 trace_block_rq_complete(req, BLK_STS_OK, total_bytes); 733 734 if (!bio) 735 return; 736 737 #ifdef CONFIG_BLK_DEV_INTEGRITY 738 if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ) 739 req->q->integrity.profile->complete_fn(req, total_bytes); 740 #endif 741 742 blk_account_io_completion(req, total_bytes); 743 744 do { 745 struct bio *next = bio->bi_next; 746 747 /* Completion has already been traced */ 748 bio_clear_flag(bio, BIO_TRACE_COMPLETION); 749 750 if (req_op(req) == REQ_OP_ZONE_APPEND) 751 bio->bi_iter.bi_sector = req->__sector; 752 753 if (!is_flush) 754 bio_endio(bio); 755 bio = next; 756 } while (bio); 757 758 /* 759 * Reset counters so that the request stacking driver 760 * can find how many bytes remain in the request 761 * later. 762 */ 763 req->bio = NULL; 764 req->__data_len = 0; 765 } 766 767 /** 768 * blk_update_request - Complete multiple bytes without completing the request 769 * @req: the request being processed 770 * @error: block status code 771 * @nr_bytes: number of bytes to complete for @req 772 * 773 * Description: 774 * Ends I/O on a number of bytes attached to @req, but doesn't complete 775 * the request structure even if @req doesn't have leftover. 776 * If @req has leftover, sets it up for the next range of segments. 777 * 778 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees 779 * %false return from this function. 780 * 781 * Note: 782 * The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in this function 783 * except in the consistency check at the end of this function. 784 * 785 * Return: 786 * %false - this request doesn't have any more data 787 * %true - this request has more data 788 **/ 789 bool blk_update_request(struct request *req, blk_status_t error, 790 unsigned int nr_bytes) 791 { 792 int total_bytes; 793 794 trace_block_rq_complete(req, error, nr_bytes); 795 796 if (!req->bio) 797 return false; 798 799 #ifdef CONFIG_BLK_DEV_INTEGRITY 800 if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ && 801 error == BLK_STS_OK) 802 req->q->integrity.profile->complete_fn(req, nr_bytes); 803 #endif 804 805 if (unlikely(error && !blk_rq_is_passthrough(req) && 806 !(req->rq_flags & RQF_QUIET)) && 807 !test_bit(GD_DEAD, &req->q->disk->state)) { 808 blk_print_req_error(req, error); 809 trace_block_rq_error(req, error, nr_bytes); 810 } 811 812 blk_account_io_completion(req, nr_bytes); 813 814 total_bytes = 0; 815 while (req->bio) { 816 struct bio *bio = req->bio; 817 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes); 818 819 if (bio_bytes == bio->bi_iter.bi_size) 820 req->bio = bio->bi_next; 821 822 /* Completion has already been traced */ 823 bio_clear_flag(bio, BIO_TRACE_COMPLETION); 824 req_bio_endio(req, bio, bio_bytes, error); 825 826 total_bytes += bio_bytes; 827 nr_bytes -= bio_bytes; 828 829 if (!nr_bytes) 830 break; 831 } 832 833 /* 834 * completely done 835 */ 836 if (!req->bio) { 837 /* 838 * Reset counters so that the request stacking driver 839 * can find how many bytes remain in the request 840 * later. 841 */ 842 req->__data_len = 0; 843 return false; 844 } 845 846 req->__data_len -= total_bytes; 847 848 /* update sector only for requests with clear definition of sector */ 849 if (!blk_rq_is_passthrough(req)) 850 req->__sector += total_bytes >> 9; 851 852 /* mixed attributes always follow the first bio */ 853 if (req->rq_flags & RQF_MIXED_MERGE) { 854 req->cmd_flags &= ~REQ_FAILFAST_MASK; 855 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK; 856 } 857 858 if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) { 859 /* 860 * If total number of sectors is less than the first segment 861 * size, something has gone terribly wrong. 862 */ 863 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { 864 blk_dump_rq_flags(req, "request botched"); 865 req->__data_len = blk_rq_cur_bytes(req); 866 } 867 868 /* recalculate the number of segments */ 869 req->nr_phys_segments = blk_recalc_rq_segments(req); 870 } 871 872 return true; 873 } 874 EXPORT_SYMBOL_GPL(blk_update_request); 875 876 static void __blk_account_io_done(struct request *req, u64 now) 877 { 878 const int sgrp = op_stat_group(req_op(req)); 879 880 part_stat_lock(); 881 update_io_ticks(req->part, jiffies, true); 882 part_stat_inc(req->part, ios[sgrp]); 883 part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns); 884 part_stat_unlock(); 885 } 886 887 static inline void blk_account_io_done(struct request *req, u64 now) 888 { 889 /* 890 * Account IO completion. flush_rq isn't accounted as a 891 * normal IO on queueing nor completion. Accounting the 892 * containing request is enough. 893 */ 894 if (blk_do_io_stat(req) && req->part && 895 !(req->rq_flags & RQF_FLUSH_SEQ)) 896 __blk_account_io_done(req, now); 897 } 898 899 static void __blk_account_io_start(struct request *rq) 900 { 901 /* 902 * All non-passthrough requests are created from a bio with one 903 * exception: when a flush command that is part of a flush sequence 904 * generated by the state machine in blk-flush.c is cloned onto the 905 * lower device by dm-multipath we can get here without a bio. 906 */ 907 if (rq->bio) 908 rq->part = rq->bio->bi_bdev; 909 else 910 rq->part = rq->q->disk->part0; 911 912 part_stat_lock(); 913 update_io_ticks(rq->part, jiffies, false); 914 part_stat_unlock(); 915 } 916 917 static inline void blk_account_io_start(struct request *req) 918 { 919 if (blk_do_io_stat(req)) 920 __blk_account_io_start(req); 921 } 922 923 static inline void __blk_mq_end_request_acct(struct request *rq, u64 now) 924 { 925 if (rq->rq_flags & RQF_STATS) { 926 blk_mq_poll_stats_start(rq->q); 927 blk_stat_add(rq, now); 928 } 929 930 blk_mq_sched_completed_request(rq, now); 931 blk_account_io_done(rq, now); 932 } 933 934 inline void __blk_mq_end_request(struct request *rq, blk_status_t error) 935 { 936 if (blk_mq_need_time_stamp(rq)) 937 __blk_mq_end_request_acct(rq, ktime_get_ns()); 938 939 if (rq->end_io) { 940 rq_qos_done(rq->q, rq); 941 rq->end_io(rq, error); 942 } else { 943 blk_mq_free_request(rq); 944 } 945 } 946 EXPORT_SYMBOL(__blk_mq_end_request); 947 948 void blk_mq_end_request(struct request *rq, blk_status_t error) 949 { 950 if (blk_update_request(rq, error, blk_rq_bytes(rq))) 951 BUG(); 952 __blk_mq_end_request(rq, error); 953 } 954 EXPORT_SYMBOL(blk_mq_end_request); 955 956 #define TAG_COMP_BATCH 32 957 958 static inline void blk_mq_flush_tag_batch(struct blk_mq_hw_ctx *hctx, 959 int *tag_array, int nr_tags) 960 { 961 struct request_queue *q = hctx->queue; 962 963 /* 964 * All requests should have been marked as RQF_MQ_INFLIGHT, so 965 * update hctx->nr_active in batch 966 */ 967 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) 968 __blk_mq_sub_active_requests(hctx, nr_tags); 969 970 blk_mq_put_tags(hctx->tags, tag_array, nr_tags); 971 percpu_ref_put_many(&q->q_usage_counter, nr_tags); 972 } 973 974 void blk_mq_end_request_batch(struct io_comp_batch *iob) 975 { 976 int tags[TAG_COMP_BATCH], nr_tags = 0; 977 struct blk_mq_hw_ctx *cur_hctx = NULL; 978 struct request *rq; 979 u64 now = 0; 980 981 if (iob->need_ts) 982 now = ktime_get_ns(); 983 984 while ((rq = rq_list_pop(&iob->req_list)) != NULL) { 985 prefetch(rq->bio); 986 prefetch(rq->rq_next); 987 988 blk_complete_request(rq); 989 if (iob->need_ts) 990 __blk_mq_end_request_acct(rq, now); 991 992 rq_qos_done(rq->q, rq); 993 994 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 995 if (!req_ref_put_and_test(rq)) 996 continue; 997 998 blk_crypto_free_request(rq); 999 blk_pm_mark_last_busy(rq); 1000 1001 if (nr_tags == TAG_COMP_BATCH || cur_hctx != rq->mq_hctx) { 1002 if (cur_hctx) 1003 blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags); 1004 nr_tags = 0; 1005 cur_hctx = rq->mq_hctx; 1006 } 1007 tags[nr_tags++] = rq->tag; 1008 } 1009 1010 if (nr_tags) 1011 blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags); 1012 } 1013 EXPORT_SYMBOL_GPL(blk_mq_end_request_batch); 1014 1015 static void blk_complete_reqs(struct llist_head *list) 1016 { 1017 struct llist_node *entry = llist_reverse_order(llist_del_all(list)); 1018 struct request *rq, *next; 1019 1020 llist_for_each_entry_safe(rq, next, entry, ipi_list) 1021 rq->q->mq_ops->complete(rq); 1022 } 1023 1024 static __latent_entropy void blk_done_softirq(struct softirq_action *h) 1025 { 1026 blk_complete_reqs(this_cpu_ptr(&blk_cpu_done)); 1027 } 1028 1029 static int blk_softirq_cpu_dead(unsigned int cpu) 1030 { 1031 blk_complete_reqs(&per_cpu(blk_cpu_done, cpu)); 1032 return 0; 1033 } 1034 1035 static void __blk_mq_complete_request_remote(void *data) 1036 { 1037 __raise_softirq_irqoff(BLOCK_SOFTIRQ); 1038 } 1039 1040 static inline bool blk_mq_complete_need_ipi(struct request *rq) 1041 { 1042 int cpu = raw_smp_processor_id(); 1043 1044 if (!IS_ENABLED(CONFIG_SMP) || 1045 !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) 1046 return false; 1047 /* 1048 * With force threaded interrupts enabled, raising softirq from an SMP 1049 * function call will always result in waking the ksoftirqd thread. 1050 * This is probably worse than completing the request on a different 1051 * cache domain. 1052 */ 1053 if (force_irqthreads()) 1054 return false; 1055 1056 /* same CPU or cache domain? Complete locally */ 1057 if (cpu == rq->mq_ctx->cpu || 1058 (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags) && 1059 cpus_share_cache(cpu, rq->mq_ctx->cpu))) 1060 return false; 1061 1062 /* don't try to IPI to an offline CPU */ 1063 return cpu_online(rq->mq_ctx->cpu); 1064 } 1065 1066 static void blk_mq_complete_send_ipi(struct request *rq) 1067 { 1068 struct llist_head *list; 1069 unsigned int cpu; 1070 1071 cpu = rq->mq_ctx->cpu; 1072 list = &per_cpu(blk_cpu_done, cpu); 1073 if (llist_add(&rq->ipi_list, list)) { 1074 INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq); 1075 smp_call_function_single_async(cpu, &rq->csd); 1076 } 1077 } 1078 1079 static void blk_mq_raise_softirq(struct request *rq) 1080 { 1081 struct llist_head *list; 1082 1083 preempt_disable(); 1084 list = this_cpu_ptr(&blk_cpu_done); 1085 if (llist_add(&rq->ipi_list, list)) 1086 raise_softirq(BLOCK_SOFTIRQ); 1087 preempt_enable(); 1088 } 1089 1090 bool blk_mq_complete_request_remote(struct request *rq) 1091 { 1092 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); 1093 1094 /* 1095 * For a polled request, always complete locally, it's pointless 1096 * to redirect the completion. 1097 */ 1098 if (rq->cmd_flags & REQ_POLLED) 1099 return false; 1100 1101 if (blk_mq_complete_need_ipi(rq)) { 1102 blk_mq_complete_send_ipi(rq); 1103 return true; 1104 } 1105 1106 if (rq->q->nr_hw_queues == 1) { 1107 blk_mq_raise_softirq(rq); 1108 return true; 1109 } 1110 return false; 1111 } 1112 EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote); 1113 1114 /** 1115 * blk_mq_complete_request - end I/O on a request 1116 * @rq: the request being processed 1117 * 1118 * Description: 1119 * Complete a request by scheduling the ->complete_rq operation. 1120 **/ 1121 void blk_mq_complete_request(struct request *rq) 1122 { 1123 if (!blk_mq_complete_request_remote(rq)) 1124 rq->q->mq_ops->complete(rq); 1125 } 1126 EXPORT_SYMBOL(blk_mq_complete_request); 1127 1128 /** 1129 * blk_mq_start_request - Start processing a request 1130 * @rq: Pointer to request to be started 1131 * 1132 * Function used by device drivers to notify the block layer that a request 1133 * is going to be processed now, so blk layer can do proper initializations 1134 * such as starting the timeout timer. 1135 */ 1136 void blk_mq_start_request(struct request *rq) 1137 { 1138 struct request_queue *q = rq->q; 1139 1140 trace_block_rq_issue(rq); 1141 1142 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) { 1143 rq->io_start_time_ns = ktime_get_ns(); 1144 rq->stats_sectors = blk_rq_sectors(rq); 1145 rq->rq_flags |= RQF_STATS; 1146 rq_qos_issue(q, rq); 1147 } 1148 1149 WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE); 1150 1151 blk_add_timer(rq); 1152 WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT); 1153 1154 #ifdef CONFIG_BLK_DEV_INTEGRITY 1155 if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE) 1156 q->integrity.profile->prepare_fn(rq); 1157 #endif 1158 if (rq->bio && rq->bio->bi_opf & REQ_POLLED) 1159 WRITE_ONCE(rq->bio->bi_cookie, blk_rq_to_qc(rq)); 1160 } 1161 EXPORT_SYMBOL(blk_mq_start_request); 1162 1163 /* 1164 * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple 1165 * queues. This is important for md arrays to benefit from merging 1166 * requests. 1167 */ 1168 static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug) 1169 { 1170 if (plug->multiple_queues) 1171 return BLK_MAX_REQUEST_COUNT * 2; 1172 return BLK_MAX_REQUEST_COUNT; 1173 } 1174 1175 static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) 1176 { 1177 struct request *last = rq_list_peek(&plug->mq_list); 1178 1179 if (!plug->rq_count) { 1180 trace_block_plug(rq->q); 1181 } else if (plug->rq_count >= blk_plug_max_rq_count(plug) || 1182 (!blk_queue_nomerges(rq->q) && 1183 blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) { 1184 blk_mq_flush_plug_list(plug, false); 1185 trace_block_plug(rq->q); 1186 } 1187 1188 if (!plug->multiple_queues && last && last->q != rq->q) 1189 plug->multiple_queues = true; 1190 if (!plug->has_elevator && (rq->rq_flags & RQF_ELV)) 1191 plug->has_elevator = true; 1192 rq->rq_next = NULL; 1193 rq_list_add(&plug->mq_list, rq); 1194 plug->rq_count++; 1195 } 1196 1197 /** 1198 * blk_execute_rq_nowait - insert a request to I/O scheduler for execution 1199 * @rq: request to insert 1200 * @at_head: insert request at head or tail of queue 1201 * 1202 * Description: 1203 * Insert a fully prepared request at the back of the I/O scheduler queue 1204 * for execution. Don't wait for completion. 1205 * 1206 * Note: 1207 * This function will invoke @done directly if the queue is dead. 1208 */ 1209 void blk_execute_rq_nowait(struct request *rq, bool at_head) 1210 { 1211 WARN_ON(irqs_disabled()); 1212 WARN_ON(!blk_rq_is_passthrough(rq)); 1213 1214 blk_account_io_start(rq); 1215 if (current->plug) 1216 blk_add_rq_to_plug(current->plug, rq); 1217 else 1218 blk_mq_sched_insert_request(rq, at_head, true, false); 1219 } 1220 EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); 1221 1222 struct blk_rq_wait { 1223 struct completion done; 1224 blk_status_t ret; 1225 }; 1226 1227 static void blk_end_sync_rq(struct request *rq, blk_status_t ret) 1228 { 1229 struct blk_rq_wait *wait = rq->end_io_data; 1230 1231 wait->ret = ret; 1232 complete(&wait->done); 1233 } 1234 1235 static bool blk_rq_is_poll(struct request *rq) 1236 { 1237 if (!rq->mq_hctx) 1238 return false; 1239 if (rq->mq_hctx->type != HCTX_TYPE_POLL) 1240 return false; 1241 if (WARN_ON_ONCE(!rq->bio)) 1242 return false; 1243 return true; 1244 } 1245 1246 static void blk_rq_poll_completion(struct request *rq, struct completion *wait) 1247 { 1248 do { 1249 bio_poll(rq->bio, NULL, 0); 1250 cond_resched(); 1251 } while (!completion_done(wait)); 1252 } 1253 1254 /** 1255 * blk_execute_rq - insert a request into queue for execution 1256 * @rq: request to insert 1257 * @at_head: insert request at head or tail of queue 1258 * 1259 * Description: 1260 * Insert a fully prepared request at the back of the I/O scheduler queue 1261 * for execution and wait for completion. 1262 * Return: The blk_status_t result provided to blk_mq_end_request(). 1263 */ 1264 blk_status_t blk_execute_rq(struct request *rq, bool at_head) 1265 { 1266 struct blk_rq_wait wait = { 1267 .done = COMPLETION_INITIALIZER_ONSTACK(wait.done), 1268 }; 1269 1270 WARN_ON(irqs_disabled()); 1271 WARN_ON(!blk_rq_is_passthrough(rq)); 1272 1273 rq->end_io_data = &wait; 1274 rq->end_io = blk_end_sync_rq; 1275 1276 blk_account_io_start(rq); 1277 blk_mq_sched_insert_request(rq, at_head, true, false); 1278 1279 if (blk_rq_is_poll(rq)) { 1280 blk_rq_poll_completion(rq, &wait.done); 1281 } else { 1282 /* 1283 * Prevent hang_check timer from firing at us during very long 1284 * I/O 1285 */ 1286 unsigned long hang_check = sysctl_hung_task_timeout_secs; 1287 1288 if (hang_check) 1289 while (!wait_for_completion_io_timeout(&wait.done, 1290 hang_check * (HZ/2))) 1291 ; 1292 else 1293 wait_for_completion_io(&wait.done); 1294 } 1295 1296 return wait.ret; 1297 } 1298 EXPORT_SYMBOL(blk_execute_rq); 1299 1300 static void __blk_mq_requeue_request(struct request *rq) 1301 { 1302 struct request_queue *q = rq->q; 1303 1304 blk_mq_put_driver_tag(rq); 1305 1306 trace_block_rq_requeue(rq); 1307 rq_qos_requeue(q, rq); 1308 1309 if (blk_mq_request_started(rq)) { 1310 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 1311 rq->rq_flags &= ~RQF_TIMED_OUT; 1312 } 1313 } 1314 1315 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list) 1316 { 1317 __blk_mq_requeue_request(rq); 1318 1319 /* this request will be re-inserted to io scheduler queue */ 1320 blk_mq_sched_requeue_request(rq); 1321 1322 blk_mq_add_to_requeue_list(rq, true, kick_requeue_list); 1323 } 1324 EXPORT_SYMBOL(blk_mq_requeue_request); 1325 1326 static void blk_mq_requeue_work(struct work_struct *work) 1327 { 1328 struct request_queue *q = 1329 container_of(work, struct request_queue, requeue_work.work); 1330 LIST_HEAD(rq_list); 1331 struct request *rq, *next; 1332 1333 spin_lock_irq(&q->requeue_lock); 1334 list_splice_init(&q->requeue_list, &rq_list); 1335 spin_unlock_irq(&q->requeue_lock); 1336 1337 list_for_each_entry_safe(rq, next, &rq_list, queuelist) { 1338 if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP))) 1339 continue; 1340 1341 rq->rq_flags &= ~RQF_SOFTBARRIER; 1342 list_del_init(&rq->queuelist); 1343 /* 1344 * If RQF_DONTPREP, rq has contained some driver specific 1345 * data, so insert it to hctx dispatch list to avoid any 1346 * merge. 1347 */ 1348 if (rq->rq_flags & RQF_DONTPREP) 1349 blk_mq_request_bypass_insert(rq, false, false); 1350 else 1351 blk_mq_sched_insert_request(rq, true, false, false); 1352 } 1353 1354 while (!list_empty(&rq_list)) { 1355 rq = list_entry(rq_list.next, struct request, queuelist); 1356 list_del_init(&rq->queuelist); 1357 blk_mq_sched_insert_request(rq, false, false, false); 1358 } 1359 1360 blk_mq_run_hw_queues(q, false); 1361 } 1362 1363 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, 1364 bool kick_requeue_list) 1365 { 1366 struct request_queue *q = rq->q; 1367 unsigned long flags; 1368 1369 /* 1370 * We abuse this flag that is otherwise used by the I/O scheduler to 1371 * request head insertion from the workqueue. 1372 */ 1373 BUG_ON(rq->rq_flags & RQF_SOFTBARRIER); 1374 1375 spin_lock_irqsave(&q->requeue_lock, flags); 1376 if (at_head) { 1377 rq->rq_flags |= RQF_SOFTBARRIER; 1378 list_add(&rq->queuelist, &q->requeue_list); 1379 } else { 1380 list_add_tail(&rq->queuelist, &q->requeue_list); 1381 } 1382 spin_unlock_irqrestore(&q->requeue_lock, flags); 1383 1384 if (kick_requeue_list) 1385 blk_mq_kick_requeue_list(q); 1386 } 1387 1388 void blk_mq_kick_requeue_list(struct request_queue *q) 1389 { 1390 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0); 1391 } 1392 EXPORT_SYMBOL(blk_mq_kick_requeue_list); 1393 1394 void blk_mq_delay_kick_requeue_list(struct request_queue *q, 1395 unsigned long msecs) 1396 { 1397 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 1398 msecs_to_jiffies(msecs)); 1399 } 1400 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list); 1401 1402 static bool blk_mq_rq_inflight(struct request *rq, void *priv) 1403 { 1404 /* 1405 * If we find a request that isn't idle we know the queue is busy 1406 * as it's checked in the iter. 1407 * Return false to stop the iteration. 1408 */ 1409 if (blk_mq_request_started(rq)) { 1410 bool *busy = priv; 1411 1412 *busy = true; 1413 return false; 1414 } 1415 1416 return true; 1417 } 1418 1419 bool blk_mq_queue_inflight(struct request_queue *q) 1420 { 1421 bool busy = false; 1422 1423 blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy); 1424 return busy; 1425 } 1426 EXPORT_SYMBOL_GPL(blk_mq_queue_inflight); 1427 1428 static void blk_mq_rq_timed_out(struct request *req) 1429 { 1430 req->rq_flags |= RQF_TIMED_OUT; 1431 if (req->q->mq_ops->timeout) { 1432 enum blk_eh_timer_return ret; 1433 1434 ret = req->q->mq_ops->timeout(req); 1435 if (ret == BLK_EH_DONE) 1436 return; 1437 WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER); 1438 } 1439 1440 blk_add_timer(req); 1441 } 1442 1443 static bool blk_mq_req_expired(struct request *rq, unsigned long *next) 1444 { 1445 unsigned long deadline; 1446 1447 if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT) 1448 return false; 1449 if (rq->rq_flags & RQF_TIMED_OUT) 1450 return false; 1451 1452 deadline = READ_ONCE(rq->deadline); 1453 if (time_after_eq(jiffies, deadline)) 1454 return true; 1455 1456 if (*next == 0) 1457 *next = deadline; 1458 else if (time_after(*next, deadline)) 1459 *next = deadline; 1460 return false; 1461 } 1462 1463 void blk_mq_put_rq_ref(struct request *rq) 1464 { 1465 if (is_flush_rq(rq)) 1466 rq->end_io(rq, 0); 1467 else if (req_ref_put_and_test(rq)) 1468 __blk_mq_free_request(rq); 1469 } 1470 1471 static bool blk_mq_check_expired(struct request *rq, void *priv) 1472 { 1473 unsigned long *next = priv; 1474 1475 /* 1476 * blk_mq_queue_tag_busy_iter() has locked the request, so it cannot 1477 * be reallocated underneath the timeout handler's processing, then 1478 * the expire check is reliable. If the request is not expired, then 1479 * it was completed and reallocated as a new request after returning 1480 * from blk_mq_check_expired(). 1481 */ 1482 if (blk_mq_req_expired(rq, next)) 1483 blk_mq_rq_timed_out(rq); 1484 return true; 1485 } 1486 1487 static void blk_mq_timeout_work(struct work_struct *work) 1488 { 1489 struct request_queue *q = 1490 container_of(work, struct request_queue, timeout_work); 1491 unsigned long next = 0; 1492 struct blk_mq_hw_ctx *hctx; 1493 unsigned long i; 1494 1495 /* A deadlock might occur if a request is stuck requiring a 1496 * timeout at the same time a queue freeze is waiting 1497 * completion, since the timeout code would not be able to 1498 * acquire the queue reference here. 1499 * 1500 * That's why we don't use blk_queue_enter here; instead, we use 1501 * percpu_ref_tryget directly, because we need to be able to 1502 * obtain a reference even in the short window between the queue 1503 * starting to freeze, by dropping the first reference in 1504 * blk_freeze_queue_start, and the moment the last request is 1505 * consumed, marked by the instant q_usage_counter reaches 1506 * zero. 1507 */ 1508 if (!percpu_ref_tryget(&q->q_usage_counter)) 1509 return; 1510 1511 blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &next); 1512 1513 if (next != 0) { 1514 mod_timer(&q->timeout, next); 1515 } else { 1516 /* 1517 * Request timeouts are handled as a forward rolling timer. If 1518 * we end up here it means that no requests are pending and 1519 * also that no request has been pending for a while. Mark 1520 * each hctx as idle. 1521 */ 1522 queue_for_each_hw_ctx(q, hctx, i) { 1523 /* the hctx may be unmapped, so check it here */ 1524 if (blk_mq_hw_queue_mapped(hctx)) 1525 blk_mq_tag_idle(hctx); 1526 } 1527 } 1528 blk_queue_exit(q); 1529 } 1530 1531 struct flush_busy_ctx_data { 1532 struct blk_mq_hw_ctx *hctx; 1533 struct list_head *list; 1534 }; 1535 1536 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data) 1537 { 1538 struct flush_busy_ctx_data *flush_data = data; 1539 struct blk_mq_hw_ctx *hctx = flush_data->hctx; 1540 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; 1541 enum hctx_type type = hctx->type; 1542 1543 spin_lock(&ctx->lock); 1544 list_splice_tail_init(&ctx->rq_lists[type], flush_data->list); 1545 sbitmap_clear_bit(sb, bitnr); 1546 spin_unlock(&ctx->lock); 1547 return true; 1548 } 1549 1550 /* 1551 * Process software queues that have been marked busy, splicing them 1552 * to the for-dispatch 1553 */ 1554 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list) 1555 { 1556 struct flush_busy_ctx_data data = { 1557 .hctx = hctx, 1558 .list = list, 1559 }; 1560 1561 sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data); 1562 } 1563 EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs); 1564 1565 struct dispatch_rq_data { 1566 struct blk_mq_hw_ctx *hctx; 1567 struct request *rq; 1568 }; 1569 1570 static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr, 1571 void *data) 1572 { 1573 struct dispatch_rq_data *dispatch_data = data; 1574 struct blk_mq_hw_ctx *hctx = dispatch_data->hctx; 1575 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; 1576 enum hctx_type type = hctx->type; 1577 1578 spin_lock(&ctx->lock); 1579 if (!list_empty(&ctx->rq_lists[type])) { 1580 dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next); 1581 list_del_init(&dispatch_data->rq->queuelist); 1582 if (list_empty(&ctx->rq_lists[type])) 1583 sbitmap_clear_bit(sb, bitnr); 1584 } 1585 spin_unlock(&ctx->lock); 1586 1587 return !dispatch_data->rq; 1588 } 1589 1590 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, 1591 struct blk_mq_ctx *start) 1592 { 1593 unsigned off = start ? start->index_hw[hctx->type] : 0; 1594 struct dispatch_rq_data data = { 1595 .hctx = hctx, 1596 .rq = NULL, 1597 }; 1598 1599 __sbitmap_for_each_set(&hctx->ctx_map, off, 1600 dispatch_rq_from_ctx, &data); 1601 1602 return data.rq; 1603 } 1604 1605 static bool __blk_mq_alloc_driver_tag(struct request *rq) 1606 { 1607 struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags; 1608 unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags; 1609 int tag; 1610 1611 blk_mq_tag_busy(rq->mq_hctx); 1612 1613 if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) { 1614 bt = &rq->mq_hctx->tags->breserved_tags; 1615 tag_offset = 0; 1616 } else { 1617 if (!hctx_may_queue(rq->mq_hctx, bt)) 1618 return false; 1619 } 1620 1621 tag = __sbitmap_queue_get(bt); 1622 if (tag == BLK_MQ_NO_TAG) 1623 return false; 1624 1625 rq->tag = tag + tag_offset; 1626 return true; 1627 } 1628 1629 bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq) 1630 { 1631 if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_alloc_driver_tag(rq)) 1632 return false; 1633 1634 if ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) && 1635 !(rq->rq_flags & RQF_MQ_INFLIGHT)) { 1636 rq->rq_flags |= RQF_MQ_INFLIGHT; 1637 __blk_mq_inc_active_requests(hctx); 1638 } 1639 hctx->tags->rqs[rq->tag] = rq; 1640 return true; 1641 } 1642 1643 static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, 1644 int flags, void *key) 1645 { 1646 struct blk_mq_hw_ctx *hctx; 1647 1648 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait); 1649 1650 spin_lock(&hctx->dispatch_wait_lock); 1651 if (!list_empty(&wait->entry)) { 1652 struct sbitmap_queue *sbq; 1653 1654 list_del_init(&wait->entry); 1655 sbq = &hctx->tags->bitmap_tags; 1656 atomic_dec(&sbq->ws_active); 1657 } 1658 spin_unlock(&hctx->dispatch_wait_lock); 1659 1660 blk_mq_run_hw_queue(hctx, true); 1661 return 1; 1662 } 1663 1664 /* 1665 * Mark us waiting for a tag. For shared tags, this involves hooking us into 1666 * the tag wakeups. For non-shared tags, we can simply mark us needing a 1667 * restart. For both cases, take care to check the condition again after 1668 * marking us as waiting. 1669 */ 1670 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx, 1671 struct request *rq) 1672 { 1673 struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags; 1674 struct wait_queue_head *wq; 1675 wait_queue_entry_t *wait; 1676 bool ret; 1677 1678 if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) { 1679 blk_mq_sched_mark_restart_hctx(hctx); 1680 1681 /* 1682 * It's possible that a tag was freed in the window between the 1683 * allocation failure and adding the hardware queue to the wait 1684 * queue. 1685 * 1686 * Don't clear RESTART here, someone else could have set it. 1687 * At most this will cost an extra queue run. 1688 */ 1689 return blk_mq_get_driver_tag(rq); 1690 } 1691 1692 wait = &hctx->dispatch_wait; 1693 if (!list_empty_careful(&wait->entry)) 1694 return false; 1695 1696 wq = &bt_wait_ptr(sbq, hctx)->wait; 1697 1698 spin_lock_irq(&wq->lock); 1699 spin_lock(&hctx->dispatch_wait_lock); 1700 if (!list_empty(&wait->entry)) { 1701 spin_unlock(&hctx->dispatch_wait_lock); 1702 spin_unlock_irq(&wq->lock); 1703 return false; 1704 } 1705 1706 atomic_inc(&sbq->ws_active); 1707 wait->flags &= ~WQ_FLAG_EXCLUSIVE; 1708 __add_wait_queue(wq, wait); 1709 1710 /* 1711 * It's possible that a tag was freed in the window between the 1712 * allocation failure and adding the hardware queue to the wait 1713 * queue. 1714 */ 1715 ret = blk_mq_get_driver_tag(rq); 1716 if (!ret) { 1717 spin_unlock(&hctx->dispatch_wait_lock); 1718 spin_unlock_irq(&wq->lock); 1719 return false; 1720 } 1721 1722 /* 1723 * We got a tag, remove ourselves from the wait queue to ensure 1724 * someone else gets the wakeup. 1725 */ 1726 list_del_init(&wait->entry); 1727 atomic_dec(&sbq->ws_active); 1728 spin_unlock(&hctx->dispatch_wait_lock); 1729 spin_unlock_irq(&wq->lock); 1730 1731 return true; 1732 } 1733 1734 #define BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT 8 1735 #define BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR 4 1736 /* 1737 * Update dispatch busy with the Exponential Weighted Moving Average(EWMA): 1738 * - EWMA is one simple way to compute running average value 1739 * - weight(7/8 and 1/8) is applied so that it can decrease exponentially 1740 * - take 4 as factor for avoiding to get too small(0) result, and this 1741 * factor doesn't matter because EWMA decreases exponentially 1742 */ 1743 static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy) 1744 { 1745 unsigned int ewma; 1746 1747 ewma = hctx->dispatch_busy; 1748 1749 if (!ewma && !busy) 1750 return; 1751 1752 ewma *= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT - 1; 1753 if (busy) 1754 ewma += 1 << BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR; 1755 ewma /= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT; 1756 1757 hctx->dispatch_busy = ewma; 1758 } 1759 1760 #define BLK_MQ_RESOURCE_DELAY 3 /* ms units */ 1761 1762 static void blk_mq_handle_dev_resource(struct request *rq, 1763 struct list_head *list) 1764 { 1765 struct request *next = 1766 list_first_entry_or_null(list, struct request, queuelist); 1767 1768 /* 1769 * If an I/O scheduler has been configured and we got a driver tag for 1770 * the next request already, free it. 1771 */ 1772 if (next) 1773 blk_mq_put_driver_tag(next); 1774 1775 list_add(&rq->queuelist, list); 1776 __blk_mq_requeue_request(rq); 1777 } 1778 1779 static void blk_mq_handle_zone_resource(struct request *rq, 1780 struct list_head *zone_list) 1781 { 1782 /* 1783 * If we end up here it is because we cannot dispatch a request to a 1784 * specific zone due to LLD level zone-write locking or other zone 1785 * related resource not being available. In this case, set the request 1786 * aside in zone_list for retrying it later. 1787 */ 1788 list_add(&rq->queuelist, zone_list); 1789 __blk_mq_requeue_request(rq); 1790 } 1791 1792 enum prep_dispatch { 1793 PREP_DISPATCH_OK, 1794 PREP_DISPATCH_NO_TAG, 1795 PREP_DISPATCH_NO_BUDGET, 1796 }; 1797 1798 static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq, 1799 bool need_budget) 1800 { 1801 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 1802 int budget_token = -1; 1803 1804 if (need_budget) { 1805 budget_token = blk_mq_get_dispatch_budget(rq->q); 1806 if (budget_token < 0) { 1807 blk_mq_put_driver_tag(rq); 1808 return PREP_DISPATCH_NO_BUDGET; 1809 } 1810 blk_mq_set_rq_budget_token(rq, budget_token); 1811 } 1812 1813 if (!blk_mq_get_driver_tag(rq)) { 1814 /* 1815 * The initial allocation attempt failed, so we need to 1816 * rerun the hardware queue when a tag is freed. The 1817 * waitqueue takes care of that. If the queue is run 1818 * before we add this entry back on the dispatch list, 1819 * we'll re-run it below. 1820 */ 1821 if (!blk_mq_mark_tag_wait(hctx, rq)) { 1822 /* 1823 * All budgets not got from this function will be put 1824 * together during handling partial dispatch 1825 */ 1826 if (need_budget) 1827 blk_mq_put_dispatch_budget(rq->q, budget_token); 1828 return PREP_DISPATCH_NO_TAG; 1829 } 1830 } 1831 1832 return PREP_DISPATCH_OK; 1833 } 1834 1835 /* release all allocated budgets before calling to blk_mq_dispatch_rq_list */ 1836 static void blk_mq_release_budgets(struct request_queue *q, 1837 struct list_head *list) 1838 { 1839 struct request *rq; 1840 1841 list_for_each_entry(rq, list, queuelist) { 1842 int budget_token = blk_mq_get_rq_budget_token(rq); 1843 1844 if (budget_token >= 0) 1845 blk_mq_put_dispatch_budget(q, budget_token); 1846 } 1847 } 1848 1849 /* 1850 * Returns true if we did some work AND can potentially do more. 1851 */ 1852 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list, 1853 unsigned int nr_budgets) 1854 { 1855 enum prep_dispatch prep; 1856 struct request_queue *q = hctx->queue; 1857 struct request *rq, *nxt; 1858 int errors, queued; 1859 blk_status_t ret = BLK_STS_OK; 1860 LIST_HEAD(zone_list); 1861 bool needs_resource = false; 1862 1863 if (list_empty(list)) 1864 return false; 1865 1866 /* 1867 * Now process all the entries, sending them to the driver. 1868 */ 1869 errors = queued = 0; 1870 do { 1871 struct blk_mq_queue_data bd; 1872 1873 rq = list_first_entry(list, struct request, queuelist); 1874 1875 WARN_ON_ONCE(hctx != rq->mq_hctx); 1876 prep = blk_mq_prep_dispatch_rq(rq, !nr_budgets); 1877 if (prep != PREP_DISPATCH_OK) 1878 break; 1879 1880 list_del_init(&rq->queuelist); 1881 1882 bd.rq = rq; 1883 1884 /* 1885 * Flag last if we have no more requests, or if we have more 1886 * but can't assign a driver tag to it. 1887 */ 1888 if (list_empty(list)) 1889 bd.last = true; 1890 else { 1891 nxt = list_first_entry(list, struct request, queuelist); 1892 bd.last = !blk_mq_get_driver_tag(nxt); 1893 } 1894 1895 /* 1896 * once the request is queued to lld, no need to cover the 1897 * budget any more 1898 */ 1899 if (nr_budgets) 1900 nr_budgets--; 1901 ret = q->mq_ops->queue_rq(hctx, &bd); 1902 switch (ret) { 1903 case BLK_STS_OK: 1904 queued++; 1905 break; 1906 case BLK_STS_RESOURCE: 1907 needs_resource = true; 1908 fallthrough; 1909 case BLK_STS_DEV_RESOURCE: 1910 blk_mq_handle_dev_resource(rq, list); 1911 goto out; 1912 case BLK_STS_ZONE_RESOURCE: 1913 /* 1914 * Move the request to zone_list and keep going through 1915 * the dispatch list to find more requests the drive can 1916 * accept. 1917 */ 1918 blk_mq_handle_zone_resource(rq, &zone_list); 1919 needs_resource = true; 1920 break; 1921 default: 1922 errors++; 1923 blk_mq_end_request(rq, ret); 1924 } 1925 } while (!list_empty(list)); 1926 out: 1927 if (!list_empty(&zone_list)) 1928 list_splice_tail_init(&zone_list, list); 1929 1930 /* If we didn't flush the entire list, we could have told the driver 1931 * there was more coming, but that turned out to be a lie. 1932 */ 1933 if ((!list_empty(list) || errors) && q->mq_ops->commit_rqs && queued) 1934 q->mq_ops->commit_rqs(hctx); 1935 /* 1936 * Any items that need requeuing? Stuff them into hctx->dispatch, 1937 * that is where we will continue on next queue run. 1938 */ 1939 if (!list_empty(list)) { 1940 bool needs_restart; 1941 /* For non-shared tags, the RESTART check will suffice */ 1942 bool no_tag = prep == PREP_DISPATCH_NO_TAG && 1943 (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED); 1944 1945 if (nr_budgets) 1946 blk_mq_release_budgets(q, list); 1947 1948 spin_lock(&hctx->lock); 1949 list_splice_tail_init(list, &hctx->dispatch); 1950 spin_unlock(&hctx->lock); 1951 1952 /* 1953 * Order adding requests to hctx->dispatch and checking 1954 * SCHED_RESTART flag. The pair of this smp_mb() is the one 1955 * in blk_mq_sched_restart(). Avoid restart code path to 1956 * miss the new added requests to hctx->dispatch, meantime 1957 * SCHED_RESTART is observed here. 1958 */ 1959 smp_mb(); 1960 1961 /* 1962 * If SCHED_RESTART was set by the caller of this function and 1963 * it is no longer set that means that it was cleared by another 1964 * thread and hence that a queue rerun is needed. 1965 * 1966 * If 'no_tag' is set, that means that we failed getting 1967 * a driver tag with an I/O scheduler attached. If our dispatch 1968 * waitqueue is no longer active, ensure that we run the queue 1969 * AFTER adding our entries back to the list. 1970 * 1971 * If no I/O scheduler has been configured it is possible that 1972 * the hardware queue got stopped and restarted before requests 1973 * were pushed back onto the dispatch list. Rerun the queue to 1974 * avoid starvation. Notes: 1975 * - blk_mq_run_hw_queue() checks whether or not a queue has 1976 * been stopped before rerunning a queue. 1977 * - Some but not all block drivers stop a queue before 1978 * returning BLK_STS_RESOURCE. Two exceptions are scsi-mq 1979 * and dm-rq. 1980 * 1981 * If driver returns BLK_STS_RESOURCE and SCHED_RESTART 1982 * bit is set, run queue after a delay to avoid IO stalls 1983 * that could otherwise occur if the queue is idle. We'll do 1984 * similar if we couldn't get budget or couldn't lock a zone 1985 * and SCHED_RESTART is set. 1986 */ 1987 needs_restart = blk_mq_sched_needs_restart(hctx); 1988 if (prep == PREP_DISPATCH_NO_BUDGET) 1989 needs_resource = true; 1990 if (!needs_restart || 1991 (no_tag && list_empty_careful(&hctx->dispatch_wait.entry))) 1992 blk_mq_run_hw_queue(hctx, true); 1993 else if (needs_restart && needs_resource) 1994 blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY); 1995 1996 blk_mq_update_dispatch_busy(hctx, true); 1997 return false; 1998 } else 1999 blk_mq_update_dispatch_busy(hctx, false); 2000 2001 return (queued + errors) != 0; 2002 } 2003 2004 /** 2005 * __blk_mq_run_hw_queue - Run a hardware queue. 2006 * @hctx: Pointer to the hardware queue to run. 2007 * 2008 * Send pending requests to the hardware. 2009 */ 2010 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) 2011 { 2012 /* 2013 * We can't run the queue inline with ints disabled. Ensure that 2014 * we catch bad users of this early. 2015 */ 2016 WARN_ON_ONCE(in_interrupt()); 2017 2018 blk_mq_run_dispatch_ops(hctx->queue, 2019 blk_mq_sched_dispatch_requests(hctx)); 2020 } 2021 2022 static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx) 2023 { 2024 int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask); 2025 2026 if (cpu >= nr_cpu_ids) 2027 cpu = cpumask_first(hctx->cpumask); 2028 return cpu; 2029 } 2030 2031 /* 2032 * It'd be great if the workqueue API had a way to pass 2033 * in a mask and had some smarts for more clever placement. 2034 * For now we just round-robin here, switching for every 2035 * BLK_MQ_CPU_WORK_BATCH queued items. 2036 */ 2037 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) 2038 { 2039 bool tried = false; 2040 int next_cpu = hctx->next_cpu; 2041 2042 if (hctx->queue->nr_hw_queues == 1) 2043 return WORK_CPU_UNBOUND; 2044 2045 if (--hctx->next_cpu_batch <= 0) { 2046 select_cpu: 2047 next_cpu = cpumask_next_and(next_cpu, hctx->cpumask, 2048 cpu_online_mask); 2049 if (next_cpu >= nr_cpu_ids) 2050 next_cpu = blk_mq_first_mapped_cpu(hctx); 2051 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; 2052 } 2053 2054 /* 2055 * Do unbound schedule if we can't find a online CPU for this hctx, 2056 * and it should only happen in the path of handling CPU DEAD. 2057 */ 2058 if (!cpu_online(next_cpu)) { 2059 if (!tried) { 2060 tried = true; 2061 goto select_cpu; 2062 } 2063 2064 /* 2065 * Make sure to re-select CPU next time once after CPUs 2066 * in hctx->cpumask become online again. 2067 */ 2068 hctx->next_cpu = next_cpu; 2069 hctx->next_cpu_batch = 1; 2070 return WORK_CPU_UNBOUND; 2071 } 2072 2073 hctx->next_cpu = next_cpu; 2074 return next_cpu; 2075 } 2076 2077 /** 2078 * __blk_mq_delay_run_hw_queue - Run (or schedule to run) a hardware queue. 2079 * @hctx: Pointer to the hardware queue to run. 2080 * @async: If we want to run the queue asynchronously. 2081 * @msecs: Milliseconds of delay to wait before running the queue. 2082 * 2083 * If !@async, try to run the queue now. Else, run the queue asynchronously and 2084 * with a delay of @msecs. 2085 */ 2086 static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, 2087 unsigned long msecs) 2088 { 2089 if (unlikely(blk_mq_hctx_stopped(hctx))) 2090 return; 2091 2092 if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) { 2093 if (cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) { 2094 __blk_mq_run_hw_queue(hctx); 2095 return; 2096 } 2097 } 2098 2099 kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work, 2100 msecs_to_jiffies(msecs)); 2101 } 2102 2103 /** 2104 * blk_mq_delay_run_hw_queue - Run a hardware queue asynchronously. 2105 * @hctx: Pointer to the hardware queue to run. 2106 * @msecs: Milliseconds of delay to wait before running the queue. 2107 * 2108 * Run a hardware queue asynchronously with a delay of @msecs. 2109 */ 2110 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) 2111 { 2112 __blk_mq_delay_run_hw_queue(hctx, true, msecs); 2113 } 2114 EXPORT_SYMBOL(blk_mq_delay_run_hw_queue); 2115 2116 /** 2117 * blk_mq_run_hw_queue - Start to run a hardware queue. 2118 * @hctx: Pointer to the hardware queue to run. 2119 * @async: If we want to run the queue asynchronously. 2120 * 2121 * Check if the request queue is not in a quiesced state and if there are 2122 * pending requests to be sent. If this is true, run the queue to send requests 2123 * to hardware. 2124 */ 2125 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) 2126 { 2127 bool need_run; 2128 2129 /* 2130 * When queue is quiesced, we may be switching io scheduler, or 2131 * updating nr_hw_queues, or other things, and we can't run queue 2132 * any more, even __blk_mq_hctx_has_pending() can't be called safely. 2133 * 2134 * And queue will be rerun in blk_mq_unquiesce_queue() if it is 2135 * quiesced. 2136 */ 2137 __blk_mq_run_dispatch_ops(hctx->queue, false, 2138 need_run = !blk_queue_quiesced(hctx->queue) && 2139 blk_mq_hctx_has_pending(hctx)); 2140 2141 if (need_run) 2142 __blk_mq_delay_run_hw_queue(hctx, async, 0); 2143 } 2144 EXPORT_SYMBOL(blk_mq_run_hw_queue); 2145 2146 /* 2147 * Return prefered queue to dispatch from (if any) for non-mq aware IO 2148 * scheduler. 2149 */ 2150 static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q) 2151 { 2152 struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); 2153 /* 2154 * If the IO scheduler does not respect hardware queues when 2155 * dispatching, we just don't bother with multiple HW queues and 2156 * dispatch from hctx for the current CPU since running multiple queues 2157 * just causes lock contention inside the scheduler and pointless cache 2158 * bouncing. 2159 */ 2160 struct blk_mq_hw_ctx *hctx = ctx->hctxs[HCTX_TYPE_DEFAULT]; 2161 2162 if (!blk_mq_hctx_stopped(hctx)) 2163 return hctx; 2164 return NULL; 2165 } 2166 2167 /** 2168 * blk_mq_run_hw_queues - Run all hardware queues in a request queue. 2169 * @q: Pointer to the request queue to run. 2170 * @async: If we want to run the queue asynchronously. 2171 */ 2172 void blk_mq_run_hw_queues(struct request_queue *q, bool async) 2173 { 2174 struct blk_mq_hw_ctx *hctx, *sq_hctx; 2175 unsigned long i; 2176 2177 sq_hctx = NULL; 2178 if (blk_queue_sq_sched(q)) 2179 sq_hctx = blk_mq_get_sq_hctx(q); 2180 queue_for_each_hw_ctx(q, hctx, i) { 2181 if (blk_mq_hctx_stopped(hctx)) 2182 continue; 2183 /* 2184 * Dispatch from this hctx either if there's no hctx preferred 2185 * by IO scheduler or if it has requests that bypass the 2186 * scheduler. 2187 */ 2188 if (!sq_hctx || sq_hctx == hctx || 2189 !list_empty_careful(&hctx->dispatch)) 2190 blk_mq_run_hw_queue(hctx, async); 2191 } 2192 } 2193 EXPORT_SYMBOL(blk_mq_run_hw_queues); 2194 2195 /** 2196 * blk_mq_delay_run_hw_queues - Run all hardware queues asynchronously. 2197 * @q: Pointer to the request queue to run. 2198 * @msecs: Milliseconds of delay to wait before running the queues. 2199 */ 2200 void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs) 2201 { 2202 struct blk_mq_hw_ctx *hctx, *sq_hctx; 2203 unsigned long i; 2204 2205 sq_hctx = NULL; 2206 if (blk_queue_sq_sched(q)) 2207 sq_hctx = blk_mq_get_sq_hctx(q); 2208 queue_for_each_hw_ctx(q, hctx, i) { 2209 if (blk_mq_hctx_stopped(hctx)) 2210 continue; 2211 /* 2212 * If there is already a run_work pending, leave the 2213 * pending delay untouched. Otherwise, a hctx can stall 2214 * if another hctx is re-delaying the other's work 2215 * before the work executes. 2216 */ 2217 if (delayed_work_pending(&hctx->run_work)) 2218 continue; 2219 /* 2220 * Dispatch from this hctx either if there's no hctx preferred 2221 * by IO scheduler or if it has requests that bypass the 2222 * scheduler. 2223 */ 2224 if (!sq_hctx || sq_hctx == hctx || 2225 !list_empty_careful(&hctx->dispatch)) 2226 blk_mq_delay_run_hw_queue(hctx, msecs); 2227 } 2228 } 2229 EXPORT_SYMBOL(blk_mq_delay_run_hw_queues); 2230 2231 /** 2232 * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped 2233 * @q: request queue. 2234 * 2235 * The caller is responsible for serializing this function against 2236 * blk_mq_{start,stop}_hw_queue(). 2237 */ 2238 bool blk_mq_queue_stopped(struct request_queue *q) 2239 { 2240 struct blk_mq_hw_ctx *hctx; 2241 unsigned long i; 2242 2243 queue_for_each_hw_ctx(q, hctx, i) 2244 if (blk_mq_hctx_stopped(hctx)) 2245 return true; 2246 2247 return false; 2248 } 2249 EXPORT_SYMBOL(blk_mq_queue_stopped); 2250 2251 /* 2252 * This function is often used for pausing .queue_rq() by driver when 2253 * there isn't enough resource or some conditions aren't satisfied, and 2254 * BLK_STS_RESOURCE is usually returned. 2255 * 2256 * We do not guarantee that dispatch can be drained or blocked 2257 * after blk_mq_stop_hw_queue() returns. Please use 2258 * blk_mq_quiesce_queue() for that requirement. 2259 */ 2260 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) 2261 { 2262 cancel_delayed_work(&hctx->run_work); 2263 2264 set_bit(BLK_MQ_S_STOPPED, &hctx->state); 2265 } 2266 EXPORT_SYMBOL(blk_mq_stop_hw_queue); 2267 2268 /* 2269 * This function is often used for pausing .queue_rq() by driver when 2270 * there isn't enough resource or some conditions aren't satisfied, and 2271 * BLK_STS_RESOURCE is usually returned. 2272 * 2273 * We do not guarantee that dispatch can be drained or blocked 2274 * after blk_mq_stop_hw_queues() returns. Please use 2275 * blk_mq_quiesce_queue() for that requirement. 2276 */ 2277 void blk_mq_stop_hw_queues(struct request_queue *q) 2278 { 2279 struct blk_mq_hw_ctx *hctx; 2280 unsigned long i; 2281 2282 queue_for_each_hw_ctx(q, hctx, i) 2283 blk_mq_stop_hw_queue(hctx); 2284 } 2285 EXPORT_SYMBOL(blk_mq_stop_hw_queues); 2286 2287 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx) 2288 { 2289 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 2290 2291 blk_mq_run_hw_queue(hctx, false); 2292 } 2293 EXPORT_SYMBOL(blk_mq_start_hw_queue); 2294 2295 void blk_mq_start_hw_queues(struct request_queue *q) 2296 { 2297 struct blk_mq_hw_ctx *hctx; 2298 unsigned long i; 2299 2300 queue_for_each_hw_ctx(q, hctx, i) 2301 blk_mq_start_hw_queue(hctx); 2302 } 2303 EXPORT_SYMBOL(blk_mq_start_hw_queues); 2304 2305 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) 2306 { 2307 if (!blk_mq_hctx_stopped(hctx)) 2308 return; 2309 2310 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 2311 blk_mq_run_hw_queue(hctx, async); 2312 } 2313 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue); 2314 2315 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async) 2316 { 2317 struct blk_mq_hw_ctx *hctx; 2318 unsigned long i; 2319 2320 queue_for_each_hw_ctx(q, hctx, i) 2321 blk_mq_start_stopped_hw_queue(hctx, async); 2322 } 2323 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues); 2324 2325 static void blk_mq_run_work_fn(struct work_struct *work) 2326 { 2327 struct blk_mq_hw_ctx *hctx; 2328 2329 hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work); 2330 2331 /* 2332 * If we are stopped, don't run the queue. 2333 */ 2334 if (blk_mq_hctx_stopped(hctx)) 2335 return; 2336 2337 __blk_mq_run_hw_queue(hctx); 2338 } 2339 2340 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx, 2341 struct request *rq, 2342 bool at_head) 2343 { 2344 struct blk_mq_ctx *ctx = rq->mq_ctx; 2345 enum hctx_type type = hctx->type; 2346 2347 lockdep_assert_held(&ctx->lock); 2348 2349 trace_block_rq_insert(rq); 2350 2351 if (at_head) 2352 list_add(&rq->queuelist, &ctx->rq_lists[type]); 2353 else 2354 list_add_tail(&rq->queuelist, &ctx->rq_lists[type]); 2355 } 2356 2357 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, 2358 bool at_head) 2359 { 2360 struct blk_mq_ctx *ctx = rq->mq_ctx; 2361 2362 lockdep_assert_held(&ctx->lock); 2363 2364 __blk_mq_insert_req_list(hctx, rq, at_head); 2365 blk_mq_hctx_mark_pending(hctx, ctx); 2366 } 2367 2368 /** 2369 * blk_mq_request_bypass_insert - Insert a request at dispatch list. 2370 * @rq: Pointer to request to be inserted. 2371 * @at_head: true if the request should be inserted at the head of the list. 2372 * @run_queue: If we should run the hardware queue after inserting the request. 2373 * 2374 * Should only be used carefully, when the caller knows we want to 2375 * bypass a potential IO scheduler on the target device. 2376 */ 2377 void blk_mq_request_bypass_insert(struct request *rq, bool at_head, 2378 bool run_queue) 2379 { 2380 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 2381 2382 spin_lock(&hctx->lock); 2383 if (at_head) 2384 list_add(&rq->queuelist, &hctx->dispatch); 2385 else 2386 list_add_tail(&rq->queuelist, &hctx->dispatch); 2387 spin_unlock(&hctx->lock); 2388 2389 if (run_queue) 2390 blk_mq_run_hw_queue(hctx, false); 2391 } 2392 2393 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, 2394 struct list_head *list) 2395 2396 { 2397 struct request *rq; 2398 enum hctx_type type = hctx->type; 2399 2400 /* 2401 * preemption doesn't flush plug list, so it's possible ctx->cpu is 2402 * offline now 2403 */ 2404 list_for_each_entry(rq, list, queuelist) { 2405 BUG_ON(rq->mq_ctx != ctx); 2406 trace_block_rq_insert(rq); 2407 } 2408 2409 spin_lock(&ctx->lock); 2410 list_splice_tail_init(list, &ctx->rq_lists[type]); 2411 blk_mq_hctx_mark_pending(hctx, ctx); 2412 spin_unlock(&ctx->lock); 2413 } 2414 2415 static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int *queued, 2416 bool from_schedule) 2417 { 2418 if (hctx->queue->mq_ops->commit_rqs) { 2419 trace_block_unplug(hctx->queue, *queued, !from_schedule); 2420 hctx->queue->mq_ops->commit_rqs(hctx); 2421 } 2422 *queued = 0; 2423 } 2424 2425 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio, 2426 unsigned int nr_segs) 2427 { 2428 int err; 2429 2430 if (bio->bi_opf & REQ_RAHEAD) 2431 rq->cmd_flags |= REQ_FAILFAST_MASK; 2432 2433 rq->__sector = bio->bi_iter.bi_sector; 2434 blk_rq_bio_prep(rq, bio, nr_segs); 2435 2436 /* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */ 2437 err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO); 2438 WARN_ON_ONCE(err); 2439 2440 blk_account_io_start(rq); 2441 } 2442 2443 static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, 2444 struct request *rq, bool last) 2445 { 2446 struct request_queue *q = rq->q; 2447 struct blk_mq_queue_data bd = { 2448 .rq = rq, 2449 .last = last, 2450 }; 2451 blk_status_t ret; 2452 2453 /* 2454 * For OK queue, we are done. For error, caller may kill it. 2455 * Any other error (busy), just add it to our list as we 2456 * previously would have done. 2457 */ 2458 ret = q->mq_ops->queue_rq(hctx, &bd); 2459 switch (ret) { 2460 case BLK_STS_OK: 2461 blk_mq_update_dispatch_busy(hctx, false); 2462 break; 2463 case BLK_STS_RESOURCE: 2464 case BLK_STS_DEV_RESOURCE: 2465 blk_mq_update_dispatch_busy(hctx, true); 2466 __blk_mq_requeue_request(rq); 2467 break; 2468 default: 2469 blk_mq_update_dispatch_busy(hctx, false); 2470 break; 2471 } 2472 2473 return ret; 2474 } 2475 2476 static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, 2477 struct request *rq, 2478 bool bypass_insert, bool last) 2479 { 2480 struct request_queue *q = rq->q; 2481 bool run_queue = true; 2482 int budget_token; 2483 2484 /* 2485 * RCU or SRCU read lock is needed before checking quiesced flag. 2486 * 2487 * When queue is stopped or quiesced, ignore 'bypass_insert' from 2488 * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller, 2489 * and avoid driver to try to dispatch again. 2490 */ 2491 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) { 2492 run_queue = false; 2493 bypass_insert = false; 2494 goto insert; 2495 } 2496 2497 if ((rq->rq_flags & RQF_ELV) && !bypass_insert) 2498 goto insert; 2499 2500 budget_token = blk_mq_get_dispatch_budget(q); 2501 if (budget_token < 0) 2502 goto insert; 2503 2504 blk_mq_set_rq_budget_token(rq, budget_token); 2505 2506 if (!blk_mq_get_driver_tag(rq)) { 2507 blk_mq_put_dispatch_budget(q, budget_token); 2508 goto insert; 2509 } 2510 2511 return __blk_mq_issue_directly(hctx, rq, last); 2512 insert: 2513 if (bypass_insert) 2514 return BLK_STS_RESOURCE; 2515 2516 blk_mq_sched_insert_request(rq, false, run_queue, false); 2517 2518 return BLK_STS_OK; 2519 } 2520 2521 /** 2522 * blk_mq_try_issue_directly - Try to send a request directly to device driver. 2523 * @hctx: Pointer of the associated hardware queue. 2524 * @rq: Pointer to request to be sent. 2525 * 2526 * If the device has enough resources to accept a new request now, send the 2527 * request directly to device driver. Else, insert at hctx->dispatch queue, so 2528 * we can try send it another time in the future. Requests inserted at this 2529 * queue have higher priority. 2530 */ 2531 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, 2532 struct request *rq) 2533 { 2534 blk_status_t ret = 2535 __blk_mq_try_issue_directly(hctx, rq, false, true); 2536 2537 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) 2538 blk_mq_request_bypass_insert(rq, false, true); 2539 else if (ret != BLK_STS_OK) 2540 blk_mq_end_request(rq, ret); 2541 } 2542 2543 static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last) 2544 { 2545 return __blk_mq_try_issue_directly(rq->mq_hctx, rq, true, last); 2546 } 2547 2548 static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule) 2549 { 2550 struct blk_mq_hw_ctx *hctx = NULL; 2551 struct request *rq; 2552 int queued = 0; 2553 int errors = 0; 2554 2555 while ((rq = rq_list_pop(&plug->mq_list))) { 2556 bool last = rq_list_empty(plug->mq_list); 2557 blk_status_t ret; 2558 2559 if (hctx != rq->mq_hctx) { 2560 if (hctx) 2561 blk_mq_commit_rqs(hctx, &queued, from_schedule); 2562 hctx = rq->mq_hctx; 2563 } 2564 2565 ret = blk_mq_request_issue_directly(rq, last); 2566 switch (ret) { 2567 case BLK_STS_OK: 2568 queued++; 2569 break; 2570 case BLK_STS_RESOURCE: 2571 case BLK_STS_DEV_RESOURCE: 2572 blk_mq_request_bypass_insert(rq, false, last); 2573 blk_mq_commit_rqs(hctx, &queued, from_schedule); 2574 return; 2575 default: 2576 blk_mq_end_request(rq, ret); 2577 errors++; 2578 break; 2579 } 2580 } 2581 2582 /* 2583 * If we didn't flush the entire list, we could have told the driver 2584 * there was more coming, but that turned out to be a lie. 2585 */ 2586 if (errors) 2587 blk_mq_commit_rqs(hctx, &queued, from_schedule); 2588 } 2589 2590 static void __blk_mq_flush_plug_list(struct request_queue *q, 2591 struct blk_plug *plug) 2592 { 2593 if (blk_queue_quiesced(q)) 2594 return; 2595 q->mq_ops->queue_rqs(&plug->mq_list); 2596 } 2597 2598 static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched) 2599 { 2600 struct blk_mq_hw_ctx *this_hctx = NULL; 2601 struct blk_mq_ctx *this_ctx = NULL; 2602 struct request *requeue_list = NULL; 2603 unsigned int depth = 0; 2604 LIST_HEAD(list); 2605 2606 do { 2607 struct request *rq = rq_list_pop(&plug->mq_list); 2608 2609 if (!this_hctx) { 2610 this_hctx = rq->mq_hctx; 2611 this_ctx = rq->mq_ctx; 2612 } else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) { 2613 rq_list_add(&requeue_list, rq); 2614 continue; 2615 } 2616 list_add_tail(&rq->queuelist, &list); 2617 depth++; 2618 } while (!rq_list_empty(plug->mq_list)); 2619 2620 plug->mq_list = requeue_list; 2621 trace_block_unplug(this_hctx->queue, depth, !from_sched); 2622 blk_mq_sched_insert_requests(this_hctx, this_ctx, &list, from_sched); 2623 } 2624 2625 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) 2626 { 2627 struct request *rq; 2628 2629 if (rq_list_empty(plug->mq_list)) 2630 return; 2631 plug->rq_count = 0; 2632 2633 if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) { 2634 struct request_queue *q; 2635 2636 rq = rq_list_peek(&plug->mq_list); 2637 q = rq->q; 2638 2639 /* 2640 * Peek first request and see if we have a ->queue_rqs() hook. 2641 * If we do, we can dispatch the whole plug list in one go. We 2642 * already know at this point that all requests belong to the 2643 * same queue, caller must ensure that's the case. 2644 * 2645 * Since we pass off the full list to the driver at this point, 2646 * we do not increment the active request count for the queue. 2647 * Bypass shared tags for now because of that. 2648 */ 2649 if (q->mq_ops->queue_rqs && 2650 !(rq->mq_hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) { 2651 blk_mq_run_dispatch_ops(q, 2652 __blk_mq_flush_plug_list(q, plug)); 2653 if (rq_list_empty(plug->mq_list)) 2654 return; 2655 } 2656 2657 blk_mq_run_dispatch_ops(q, 2658 blk_mq_plug_issue_direct(plug, false)); 2659 if (rq_list_empty(plug->mq_list)) 2660 return; 2661 } 2662 2663 do { 2664 blk_mq_dispatch_plug_list(plug, from_schedule); 2665 } while (!rq_list_empty(plug->mq_list)); 2666 } 2667 2668 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, 2669 struct list_head *list) 2670 { 2671 int queued = 0; 2672 int errors = 0; 2673 2674 while (!list_empty(list)) { 2675 blk_status_t ret; 2676 struct request *rq = list_first_entry(list, struct request, 2677 queuelist); 2678 2679 list_del_init(&rq->queuelist); 2680 ret = blk_mq_request_issue_directly(rq, list_empty(list)); 2681 if (ret != BLK_STS_OK) { 2682 if (ret == BLK_STS_RESOURCE || 2683 ret == BLK_STS_DEV_RESOURCE) { 2684 blk_mq_request_bypass_insert(rq, false, 2685 list_empty(list)); 2686 break; 2687 } 2688 blk_mq_end_request(rq, ret); 2689 errors++; 2690 } else 2691 queued++; 2692 } 2693 2694 /* 2695 * If we didn't flush the entire list, we could have told 2696 * the driver there was more coming, but that turned out to 2697 * be a lie. 2698 */ 2699 if ((!list_empty(list) || errors) && 2700 hctx->queue->mq_ops->commit_rqs && queued) 2701 hctx->queue->mq_ops->commit_rqs(hctx); 2702 } 2703 2704 static bool blk_mq_attempt_bio_merge(struct request_queue *q, 2705 struct bio *bio, unsigned int nr_segs) 2706 { 2707 if (!blk_queue_nomerges(q) && bio_mergeable(bio)) { 2708 if (blk_attempt_plug_merge(q, bio, nr_segs)) 2709 return true; 2710 if (blk_mq_sched_bio_merge(q, bio, nr_segs)) 2711 return true; 2712 } 2713 return false; 2714 } 2715 2716 static struct request *blk_mq_get_new_requests(struct request_queue *q, 2717 struct blk_plug *plug, 2718 struct bio *bio, 2719 unsigned int nsegs) 2720 { 2721 struct blk_mq_alloc_data data = { 2722 .q = q, 2723 .nr_tags = 1, 2724 .cmd_flags = bio->bi_opf, 2725 }; 2726 struct request *rq; 2727 2728 if (unlikely(bio_queue_enter(bio))) 2729 return NULL; 2730 2731 if (blk_mq_attempt_bio_merge(q, bio, nsegs)) 2732 goto queue_exit; 2733 2734 rq_qos_throttle(q, bio); 2735 2736 if (plug) { 2737 data.nr_tags = plug->nr_ios; 2738 plug->nr_ios = 1; 2739 data.cached_rq = &plug->cached_rq; 2740 } 2741 2742 rq = __blk_mq_alloc_requests(&data); 2743 if (rq) 2744 return rq; 2745 rq_qos_cleanup(q, bio); 2746 if (bio->bi_opf & REQ_NOWAIT) 2747 bio_wouldblock_error(bio); 2748 queue_exit: 2749 blk_queue_exit(q); 2750 return NULL; 2751 } 2752 2753 static inline struct request *blk_mq_get_cached_request(struct request_queue *q, 2754 struct blk_plug *plug, struct bio **bio, unsigned int nsegs) 2755 { 2756 struct request *rq; 2757 2758 if (!plug) 2759 return NULL; 2760 rq = rq_list_peek(&plug->cached_rq); 2761 if (!rq || rq->q != q) 2762 return NULL; 2763 2764 if (blk_mq_attempt_bio_merge(q, *bio, nsegs)) { 2765 *bio = NULL; 2766 return NULL; 2767 } 2768 2769 if (blk_mq_get_hctx_type((*bio)->bi_opf) != rq->mq_hctx->type) 2770 return NULL; 2771 if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf)) 2772 return NULL; 2773 2774 /* 2775 * If any qos ->throttle() end up blocking, we will have flushed the 2776 * plug and hence killed the cached_rq list as well. Pop this entry 2777 * before we throttle. 2778 */ 2779 plug->cached_rq = rq_list_next(rq); 2780 rq_qos_throttle(q, *bio); 2781 2782 rq->cmd_flags = (*bio)->bi_opf; 2783 INIT_LIST_HEAD(&rq->queuelist); 2784 return rq; 2785 } 2786 2787 static void bio_set_ioprio(struct bio *bio) 2788 { 2789 /* Nobody set ioprio so far? Initialize it based on task's nice value */ 2790 if (IOPRIO_PRIO_CLASS(bio->bi_ioprio) == IOPRIO_CLASS_NONE) 2791 bio->bi_ioprio = get_current_ioprio(); 2792 blkcg_set_ioprio(bio); 2793 } 2794 2795 /** 2796 * blk_mq_submit_bio - Create and send a request to block device. 2797 * @bio: Bio pointer. 2798 * 2799 * Builds up a request structure from @q and @bio and send to the device. The 2800 * request may not be queued directly to hardware if: 2801 * * This request can be merged with another one 2802 * * We want to place request at plug queue for possible future merging 2803 * * There is an IO scheduler active at this queue 2804 * 2805 * It will not queue the request if there is an error with the bio, or at the 2806 * request creation. 2807 */ 2808 void blk_mq_submit_bio(struct bio *bio) 2809 { 2810 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 2811 struct blk_plug *plug = blk_mq_plug(q, bio); 2812 const int is_sync = op_is_sync(bio->bi_opf); 2813 struct request *rq; 2814 unsigned int nr_segs = 1; 2815 blk_status_t ret; 2816 2817 blk_queue_bounce(q, &bio); 2818 if (blk_may_split(q, bio)) 2819 __blk_queue_split(q, &bio, &nr_segs); 2820 2821 if (!bio_integrity_prep(bio)) 2822 return; 2823 2824 bio_set_ioprio(bio); 2825 2826 rq = blk_mq_get_cached_request(q, plug, &bio, nr_segs); 2827 if (!rq) { 2828 if (!bio) 2829 return; 2830 rq = blk_mq_get_new_requests(q, plug, bio, nr_segs); 2831 if (unlikely(!rq)) 2832 return; 2833 } 2834 2835 trace_block_getrq(bio); 2836 2837 rq_qos_track(q, rq, bio); 2838 2839 blk_mq_bio_to_request(rq, bio, nr_segs); 2840 2841 ret = blk_crypto_init_request(rq); 2842 if (ret != BLK_STS_OK) { 2843 bio->bi_status = ret; 2844 bio_endio(bio); 2845 blk_mq_free_request(rq); 2846 return; 2847 } 2848 2849 if (op_is_flush(bio->bi_opf)) { 2850 blk_insert_flush(rq); 2851 return; 2852 } 2853 2854 if (plug) 2855 blk_add_rq_to_plug(plug, rq); 2856 else if ((rq->rq_flags & RQF_ELV) || 2857 (rq->mq_hctx->dispatch_busy && 2858 (q->nr_hw_queues == 1 || !is_sync))) 2859 blk_mq_sched_insert_request(rq, false, true, true); 2860 else 2861 blk_mq_run_dispatch_ops(rq->q, 2862 blk_mq_try_issue_directly(rq->mq_hctx, rq)); 2863 } 2864 2865 #ifdef CONFIG_BLK_MQ_STACKING 2866 /** 2867 * blk_insert_cloned_request - Helper for stacking drivers to submit a request 2868 * @rq: the request being queued 2869 */ 2870 blk_status_t blk_insert_cloned_request(struct request *rq) 2871 { 2872 struct request_queue *q = rq->q; 2873 unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq)); 2874 blk_status_t ret; 2875 2876 if (blk_rq_sectors(rq) > max_sectors) { 2877 /* 2878 * SCSI device does not have a good way to return if 2879 * Write Same/Zero is actually supported. If a device rejects 2880 * a non-read/write command (discard, write same,etc.) the 2881 * low-level device driver will set the relevant queue limit to 2882 * 0 to prevent blk-lib from issuing more of the offending 2883 * operations. Commands queued prior to the queue limit being 2884 * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O 2885 * errors being propagated to upper layers. 2886 */ 2887 if (max_sectors == 0) 2888 return BLK_STS_NOTSUPP; 2889 2890 printk(KERN_ERR "%s: over max size limit. (%u > %u)\n", 2891 __func__, blk_rq_sectors(rq), max_sectors); 2892 return BLK_STS_IOERR; 2893 } 2894 2895 /* 2896 * The queue settings related to segment counting may differ from the 2897 * original queue. 2898 */ 2899 rq->nr_phys_segments = blk_recalc_rq_segments(rq); 2900 if (rq->nr_phys_segments > queue_max_segments(q)) { 2901 printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n", 2902 __func__, rq->nr_phys_segments, queue_max_segments(q)); 2903 return BLK_STS_IOERR; 2904 } 2905 2906 if (q->disk && should_fail_request(q->disk->part0, blk_rq_bytes(rq))) 2907 return BLK_STS_IOERR; 2908 2909 if (blk_crypto_insert_cloned_request(rq)) 2910 return BLK_STS_IOERR; 2911 2912 blk_account_io_start(rq); 2913 2914 /* 2915 * Since we have a scheduler attached on the top device, 2916 * bypass a potential scheduler on the bottom device for 2917 * insert. 2918 */ 2919 blk_mq_run_dispatch_ops(q, 2920 ret = blk_mq_request_issue_directly(rq, true)); 2921 if (ret) 2922 blk_account_io_done(rq, ktime_get_ns()); 2923 return ret; 2924 } 2925 EXPORT_SYMBOL_GPL(blk_insert_cloned_request); 2926 2927 /** 2928 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request 2929 * @rq: the clone request to be cleaned up 2930 * 2931 * Description: 2932 * Free all bios in @rq for a cloned request. 2933 */ 2934 void blk_rq_unprep_clone(struct request *rq) 2935 { 2936 struct bio *bio; 2937 2938 while ((bio = rq->bio) != NULL) { 2939 rq->bio = bio->bi_next; 2940 2941 bio_put(bio); 2942 } 2943 } 2944 EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); 2945 2946 /** 2947 * blk_rq_prep_clone - Helper function to setup clone request 2948 * @rq: the request to be setup 2949 * @rq_src: original request to be cloned 2950 * @bs: bio_set that bios for clone are allocated from 2951 * @gfp_mask: memory allocation mask for bio 2952 * @bio_ctr: setup function to be called for each clone bio. 2953 * Returns %0 for success, non %0 for failure. 2954 * @data: private data to be passed to @bio_ctr 2955 * 2956 * Description: 2957 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq. 2958 * Also, pages which the original bios are pointing to are not copied 2959 * and the cloned bios just point same pages. 2960 * So cloned bios must be completed before original bios, which means 2961 * the caller must complete @rq before @rq_src. 2962 */ 2963 int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 2964 struct bio_set *bs, gfp_t gfp_mask, 2965 int (*bio_ctr)(struct bio *, struct bio *, void *), 2966 void *data) 2967 { 2968 struct bio *bio, *bio_src; 2969 2970 if (!bs) 2971 bs = &fs_bio_set; 2972 2973 __rq_for_each_bio(bio_src, rq_src) { 2974 bio = bio_alloc_clone(rq->q->disk->part0, bio_src, gfp_mask, 2975 bs); 2976 if (!bio) 2977 goto free_and_out; 2978 2979 if (bio_ctr && bio_ctr(bio, bio_src, data)) 2980 goto free_and_out; 2981 2982 if (rq->bio) { 2983 rq->biotail->bi_next = bio; 2984 rq->biotail = bio; 2985 } else { 2986 rq->bio = rq->biotail = bio; 2987 } 2988 bio = NULL; 2989 } 2990 2991 /* Copy attributes of the original request to the clone request. */ 2992 rq->__sector = blk_rq_pos(rq_src); 2993 rq->__data_len = blk_rq_bytes(rq_src); 2994 if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) { 2995 rq->rq_flags |= RQF_SPECIAL_PAYLOAD; 2996 rq->special_vec = rq_src->special_vec; 2997 } 2998 rq->nr_phys_segments = rq_src->nr_phys_segments; 2999 rq->ioprio = rq_src->ioprio; 3000 3001 if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0) 3002 goto free_and_out; 3003 3004 return 0; 3005 3006 free_and_out: 3007 if (bio) 3008 bio_put(bio); 3009 blk_rq_unprep_clone(rq); 3010 3011 return -ENOMEM; 3012 } 3013 EXPORT_SYMBOL_GPL(blk_rq_prep_clone); 3014 #endif /* CONFIG_BLK_MQ_STACKING */ 3015 3016 /* 3017 * Steal bios from a request and add them to a bio list. 3018 * The request must not have been partially completed before. 3019 */ 3020 void blk_steal_bios(struct bio_list *list, struct request *rq) 3021 { 3022 if (rq->bio) { 3023 if (list->tail) 3024 list->tail->bi_next = rq->bio; 3025 else 3026 list->head = rq->bio; 3027 list->tail = rq->biotail; 3028 3029 rq->bio = NULL; 3030 rq->biotail = NULL; 3031 } 3032 3033 rq->__data_len = 0; 3034 } 3035 EXPORT_SYMBOL_GPL(blk_steal_bios); 3036 3037 static size_t order_to_size(unsigned int order) 3038 { 3039 return (size_t)PAGE_SIZE << order; 3040 } 3041 3042 /* called before freeing request pool in @tags */ 3043 static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags, 3044 struct blk_mq_tags *tags) 3045 { 3046 struct page *page; 3047 unsigned long flags; 3048 3049 /* There is no need to clear a driver tags own mapping */ 3050 if (drv_tags == tags) 3051 return; 3052 3053 list_for_each_entry(page, &tags->page_list, lru) { 3054 unsigned long start = (unsigned long)page_address(page); 3055 unsigned long end = start + order_to_size(page->private); 3056 int i; 3057 3058 for (i = 0; i < drv_tags->nr_tags; i++) { 3059 struct request *rq = drv_tags->rqs[i]; 3060 unsigned long rq_addr = (unsigned long)rq; 3061 3062 if (rq_addr >= start && rq_addr < end) { 3063 WARN_ON_ONCE(req_ref_read(rq) != 0); 3064 cmpxchg(&drv_tags->rqs[i], rq, NULL); 3065 } 3066 } 3067 } 3068 3069 /* 3070 * Wait until all pending iteration is done. 3071 * 3072 * Request reference is cleared and it is guaranteed to be observed 3073 * after the ->lock is released. 3074 */ 3075 spin_lock_irqsave(&drv_tags->lock, flags); 3076 spin_unlock_irqrestore(&drv_tags->lock, flags); 3077 } 3078 3079 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, 3080 unsigned int hctx_idx) 3081 { 3082 struct blk_mq_tags *drv_tags; 3083 struct page *page; 3084 3085 if (list_empty(&tags->page_list)) 3086 return; 3087 3088 if (blk_mq_is_shared_tags(set->flags)) 3089 drv_tags = set->shared_tags; 3090 else 3091 drv_tags = set->tags[hctx_idx]; 3092 3093 if (tags->static_rqs && set->ops->exit_request) { 3094 int i; 3095 3096 for (i = 0; i < tags->nr_tags; i++) { 3097 struct request *rq = tags->static_rqs[i]; 3098 3099 if (!rq) 3100 continue; 3101 set->ops->exit_request(set, rq, hctx_idx); 3102 tags->static_rqs[i] = NULL; 3103 } 3104 } 3105 3106 blk_mq_clear_rq_mapping(drv_tags, tags); 3107 3108 while (!list_empty(&tags->page_list)) { 3109 page = list_first_entry(&tags->page_list, struct page, lru); 3110 list_del_init(&page->lru); 3111 /* 3112 * Remove kmemleak object previously allocated in 3113 * blk_mq_alloc_rqs(). 3114 */ 3115 kmemleak_free(page_address(page)); 3116 __free_pages(page, page->private); 3117 } 3118 } 3119 3120 void blk_mq_free_rq_map(struct blk_mq_tags *tags) 3121 { 3122 kfree(tags->rqs); 3123 tags->rqs = NULL; 3124 kfree(tags->static_rqs); 3125 tags->static_rqs = NULL; 3126 3127 blk_mq_free_tags(tags); 3128 } 3129 3130 static enum hctx_type hctx_idx_to_type(struct blk_mq_tag_set *set, 3131 unsigned int hctx_idx) 3132 { 3133 int i; 3134 3135 for (i = 0; i < set->nr_maps; i++) { 3136 unsigned int start = set->map[i].queue_offset; 3137 unsigned int end = start + set->map[i].nr_queues; 3138 3139 if (hctx_idx >= start && hctx_idx < end) 3140 break; 3141 } 3142 3143 if (i >= set->nr_maps) 3144 i = HCTX_TYPE_DEFAULT; 3145 3146 return i; 3147 } 3148 3149 static int blk_mq_get_hctx_node(struct blk_mq_tag_set *set, 3150 unsigned int hctx_idx) 3151 { 3152 enum hctx_type type = hctx_idx_to_type(set, hctx_idx); 3153 3154 return blk_mq_hw_queue_to_node(&set->map[type], hctx_idx); 3155 } 3156 3157 static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, 3158 unsigned int hctx_idx, 3159 unsigned int nr_tags, 3160 unsigned int reserved_tags) 3161 { 3162 int node = blk_mq_get_hctx_node(set, hctx_idx); 3163 struct blk_mq_tags *tags; 3164 3165 if (node == NUMA_NO_NODE) 3166 node = set->numa_node; 3167 3168 tags = blk_mq_init_tags(nr_tags, reserved_tags, node, 3169 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags)); 3170 if (!tags) 3171 return NULL; 3172 3173 tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *), 3174 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, 3175 node); 3176 if (!tags->rqs) { 3177 blk_mq_free_tags(tags); 3178 return NULL; 3179 } 3180 3181 tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *), 3182 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, 3183 node); 3184 if (!tags->static_rqs) { 3185 kfree(tags->rqs); 3186 blk_mq_free_tags(tags); 3187 return NULL; 3188 } 3189 3190 return tags; 3191 } 3192 3193 static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, 3194 unsigned int hctx_idx, int node) 3195 { 3196 int ret; 3197 3198 if (set->ops->init_request) { 3199 ret = set->ops->init_request(set, rq, hctx_idx, node); 3200 if (ret) 3201 return ret; 3202 } 3203 3204 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 3205 return 0; 3206 } 3207 3208 static int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, 3209 struct blk_mq_tags *tags, 3210 unsigned int hctx_idx, unsigned int depth) 3211 { 3212 unsigned int i, j, entries_per_page, max_order = 4; 3213 int node = blk_mq_get_hctx_node(set, hctx_idx); 3214 size_t rq_size, left; 3215 3216 if (node == NUMA_NO_NODE) 3217 node = set->numa_node; 3218 3219 INIT_LIST_HEAD(&tags->page_list); 3220 3221 /* 3222 * rq_size is the size of the request plus driver payload, rounded 3223 * to the cacheline size 3224 */ 3225 rq_size = round_up(sizeof(struct request) + set->cmd_size, 3226 cache_line_size()); 3227 left = rq_size * depth; 3228 3229 for (i = 0; i < depth; ) { 3230 int this_order = max_order; 3231 struct page *page; 3232 int to_do; 3233 void *p; 3234 3235 while (this_order && left < order_to_size(this_order - 1)) 3236 this_order--; 3237 3238 do { 3239 page = alloc_pages_node(node, 3240 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO, 3241 this_order); 3242 if (page) 3243 break; 3244 if (!this_order--) 3245 break; 3246 if (order_to_size(this_order) < rq_size) 3247 break; 3248 } while (1); 3249 3250 if (!page) 3251 goto fail; 3252 3253 page->private = this_order; 3254 list_add_tail(&page->lru, &tags->page_list); 3255 3256 p = page_address(page); 3257 /* 3258 * Allow kmemleak to scan these pages as they contain pointers 3259 * to additional allocations like via ops->init_request(). 3260 */ 3261 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO); 3262 entries_per_page = order_to_size(this_order) / rq_size; 3263 to_do = min(entries_per_page, depth - i); 3264 left -= to_do * rq_size; 3265 for (j = 0; j < to_do; j++) { 3266 struct request *rq = p; 3267 3268 tags->static_rqs[i] = rq; 3269 if (blk_mq_init_request(set, rq, hctx_idx, node)) { 3270 tags->static_rqs[i] = NULL; 3271 goto fail; 3272 } 3273 3274 p += rq_size; 3275 i++; 3276 } 3277 } 3278 return 0; 3279 3280 fail: 3281 blk_mq_free_rqs(set, tags, hctx_idx); 3282 return -ENOMEM; 3283 } 3284 3285 struct rq_iter_data { 3286 struct blk_mq_hw_ctx *hctx; 3287 bool has_rq; 3288 }; 3289 3290 static bool blk_mq_has_request(struct request *rq, void *data) 3291 { 3292 struct rq_iter_data *iter_data = data; 3293 3294 if (rq->mq_hctx != iter_data->hctx) 3295 return true; 3296 iter_data->has_rq = true; 3297 return false; 3298 } 3299 3300 static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx) 3301 { 3302 struct blk_mq_tags *tags = hctx->sched_tags ? 3303 hctx->sched_tags : hctx->tags; 3304 struct rq_iter_data data = { 3305 .hctx = hctx, 3306 }; 3307 3308 blk_mq_all_tag_iter(tags, blk_mq_has_request, &data); 3309 return data.has_rq; 3310 } 3311 3312 static inline bool blk_mq_last_cpu_in_hctx(unsigned int cpu, 3313 struct blk_mq_hw_ctx *hctx) 3314 { 3315 if (cpumask_first_and(hctx->cpumask, cpu_online_mask) != cpu) 3316 return false; 3317 if (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids) 3318 return false; 3319 return true; 3320 } 3321 3322 static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node) 3323 { 3324 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node, 3325 struct blk_mq_hw_ctx, cpuhp_online); 3326 3327 if (!cpumask_test_cpu(cpu, hctx->cpumask) || 3328 !blk_mq_last_cpu_in_hctx(cpu, hctx)) 3329 return 0; 3330 3331 /* 3332 * Prevent new request from being allocated on the current hctx. 3333 * 3334 * The smp_mb__after_atomic() Pairs with the implied barrier in 3335 * test_and_set_bit_lock in sbitmap_get(). Ensures the inactive flag is 3336 * seen once we return from the tag allocator. 3337 */ 3338 set_bit(BLK_MQ_S_INACTIVE, &hctx->state); 3339 smp_mb__after_atomic(); 3340 3341 /* 3342 * Try to grab a reference to the queue and wait for any outstanding 3343 * requests. If we could not grab a reference the queue has been 3344 * frozen and there are no requests. 3345 */ 3346 if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) { 3347 while (blk_mq_hctx_has_requests(hctx)) 3348 msleep(5); 3349 percpu_ref_put(&hctx->queue->q_usage_counter); 3350 } 3351 3352 return 0; 3353 } 3354 3355 static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node) 3356 { 3357 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node, 3358 struct blk_mq_hw_ctx, cpuhp_online); 3359 3360 if (cpumask_test_cpu(cpu, hctx->cpumask)) 3361 clear_bit(BLK_MQ_S_INACTIVE, &hctx->state); 3362 return 0; 3363 } 3364 3365 /* 3366 * 'cpu' is going away. splice any existing rq_list entries from this 3367 * software queue to the hw queue dispatch list, and ensure that it 3368 * gets run. 3369 */ 3370 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node) 3371 { 3372 struct blk_mq_hw_ctx *hctx; 3373 struct blk_mq_ctx *ctx; 3374 LIST_HEAD(tmp); 3375 enum hctx_type type; 3376 3377 hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead); 3378 if (!cpumask_test_cpu(cpu, hctx->cpumask)) 3379 return 0; 3380 3381 ctx = __blk_mq_get_ctx(hctx->queue, cpu); 3382 type = hctx->type; 3383 3384 spin_lock(&ctx->lock); 3385 if (!list_empty(&ctx->rq_lists[type])) { 3386 list_splice_init(&ctx->rq_lists[type], &tmp); 3387 blk_mq_hctx_clear_pending(hctx, ctx); 3388 } 3389 spin_unlock(&ctx->lock); 3390 3391 if (list_empty(&tmp)) 3392 return 0; 3393 3394 spin_lock(&hctx->lock); 3395 list_splice_tail_init(&tmp, &hctx->dispatch); 3396 spin_unlock(&hctx->lock); 3397 3398 blk_mq_run_hw_queue(hctx, true); 3399 return 0; 3400 } 3401 3402 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx) 3403 { 3404 if (!(hctx->flags & BLK_MQ_F_STACKING)) 3405 cpuhp_state_remove_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE, 3406 &hctx->cpuhp_online); 3407 cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD, 3408 &hctx->cpuhp_dead); 3409 } 3410 3411 /* 3412 * Before freeing hw queue, clearing the flush request reference in 3413 * tags->rqs[] for avoiding potential UAF. 3414 */ 3415 static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags, 3416 unsigned int queue_depth, struct request *flush_rq) 3417 { 3418 int i; 3419 unsigned long flags; 3420 3421 /* The hw queue may not be mapped yet */ 3422 if (!tags) 3423 return; 3424 3425 WARN_ON_ONCE(req_ref_read(flush_rq) != 0); 3426 3427 for (i = 0; i < queue_depth; i++) 3428 cmpxchg(&tags->rqs[i], flush_rq, NULL); 3429 3430 /* 3431 * Wait until all pending iteration is done. 3432 * 3433 * Request reference is cleared and it is guaranteed to be observed 3434 * after the ->lock is released. 3435 */ 3436 spin_lock_irqsave(&tags->lock, flags); 3437 spin_unlock_irqrestore(&tags->lock, flags); 3438 } 3439 3440 /* hctx->ctxs will be freed in queue's release handler */ 3441 static void blk_mq_exit_hctx(struct request_queue *q, 3442 struct blk_mq_tag_set *set, 3443 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) 3444 { 3445 struct request *flush_rq = hctx->fq->flush_rq; 3446 3447 if (blk_mq_hw_queue_mapped(hctx)) 3448 blk_mq_tag_idle(hctx); 3449 3450 if (blk_queue_init_done(q)) 3451 blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx], 3452 set->queue_depth, flush_rq); 3453 if (set->ops->exit_request) 3454 set->ops->exit_request(set, flush_rq, hctx_idx); 3455 3456 if (set->ops->exit_hctx) 3457 set->ops->exit_hctx(hctx, hctx_idx); 3458 3459 blk_mq_remove_cpuhp(hctx); 3460 3461 xa_erase(&q->hctx_table, hctx_idx); 3462 3463 spin_lock(&q->unused_hctx_lock); 3464 list_add(&hctx->hctx_list, &q->unused_hctx_list); 3465 spin_unlock(&q->unused_hctx_lock); 3466 } 3467 3468 static void blk_mq_exit_hw_queues(struct request_queue *q, 3469 struct blk_mq_tag_set *set, int nr_queue) 3470 { 3471 struct blk_mq_hw_ctx *hctx; 3472 unsigned long i; 3473 3474 queue_for_each_hw_ctx(q, hctx, i) { 3475 if (i == nr_queue) 3476 break; 3477 blk_mq_exit_hctx(q, set, hctx, i); 3478 } 3479 } 3480 3481 static int blk_mq_init_hctx(struct request_queue *q, 3482 struct blk_mq_tag_set *set, 3483 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) 3484 { 3485 hctx->queue_num = hctx_idx; 3486 3487 if (!(hctx->flags & BLK_MQ_F_STACKING)) 3488 cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE, 3489 &hctx->cpuhp_online); 3490 cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead); 3491 3492 hctx->tags = set->tags[hctx_idx]; 3493 3494 if (set->ops->init_hctx && 3495 set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) 3496 goto unregister_cpu_notifier; 3497 3498 if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, 3499 hctx->numa_node)) 3500 goto exit_hctx; 3501 3502 if (xa_insert(&q->hctx_table, hctx_idx, hctx, GFP_KERNEL)) 3503 goto exit_flush_rq; 3504 3505 return 0; 3506 3507 exit_flush_rq: 3508 if (set->ops->exit_request) 3509 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx); 3510 exit_hctx: 3511 if (set->ops->exit_hctx) 3512 set->ops->exit_hctx(hctx, hctx_idx); 3513 unregister_cpu_notifier: 3514 blk_mq_remove_cpuhp(hctx); 3515 return -1; 3516 } 3517 3518 static struct blk_mq_hw_ctx * 3519 blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set, 3520 int node) 3521 { 3522 struct blk_mq_hw_ctx *hctx; 3523 gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY; 3524 3525 hctx = kzalloc_node(sizeof(struct blk_mq_hw_ctx), gfp, node); 3526 if (!hctx) 3527 goto fail_alloc_hctx; 3528 3529 if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node)) 3530 goto free_hctx; 3531 3532 atomic_set(&hctx->nr_active, 0); 3533 if (node == NUMA_NO_NODE) 3534 node = set->numa_node; 3535 hctx->numa_node = node; 3536 3537 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn); 3538 spin_lock_init(&hctx->lock); 3539 INIT_LIST_HEAD(&hctx->dispatch); 3540 hctx->queue = q; 3541 hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED; 3542 3543 INIT_LIST_HEAD(&hctx->hctx_list); 3544 3545 /* 3546 * Allocate space for all possible cpus to avoid allocation at 3547 * runtime 3548 */ 3549 hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *), 3550 gfp, node); 3551 if (!hctx->ctxs) 3552 goto free_cpumask; 3553 3554 if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), 3555 gfp, node, false, false)) 3556 goto free_ctxs; 3557 hctx->nr_ctx = 0; 3558 3559 spin_lock_init(&hctx->dispatch_wait_lock); 3560 init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake); 3561 INIT_LIST_HEAD(&hctx->dispatch_wait.entry); 3562 3563 hctx->fq = blk_alloc_flush_queue(hctx->numa_node, set->cmd_size, gfp); 3564 if (!hctx->fq) 3565 goto free_bitmap; 3566 3567 blk_mq_hctx_kobj_init(hctx); 3568 3569 return hctx; 3570 3571 free_bitmap: 3572 sbitmap_free(&hctx->ctx_map); 3573 free_ctxs: 3574 kfree(hctx->ctxs); 3575 free_cpumask: 3576 free_cpumask_var(hctx->cpumask); 3577 free_hctx: 3578 kfree(hctx); 3579 fail_alloc_hctx: 3580 return NULL; 3581 } 3582 3583 static void blk_mq_init_cpu_queues(struct request_queue *q, 3584 unsigned int nr_hw_queues) 3585 { 3586 struct blk_mq_tag_set *set = q->tag_set; 3587 unsigned int i, j; 3588 3589 for_each_possible_cpu(i) { 3590 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i); 3591 struct blk_mq_hw_ctx *hctx; 3592 int k; 3593 3594 __ctx->cpu = i; 3595 spin_lock_init(&__ctx->lock); 3596 for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++) 3597 INIT_LIST_HEAD(&__ctx->rq_lists[k]); 3598 3599 __ctx->queue = q; 3600 3601 /* 3602 * Set local node, IFF we have more than one hw queue. If 3603 * not, we remain on the home node of the device 3604 */ 3605 for (j = 0; j < set->nr_maps; j++) { 3606 hctx = blk_mq_map_queue_type(q, j, i); 3607 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE) 3608 hctx->numa_node = cpu_to_node(i); 3609 } 3610 } 3611 } 3612 3613 struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, 3614 unsigned int hctx_idx, 3615 unsigned int depth) 3616 { 3617 struct blk_mq_tags *tags; 3618 int ret; 3619 3620 tags = blk_mq_alloc_rq_map(set, hctx_idx, depth, set->reserved_tags); 3621 if (!tags) 3622 return NULL; 3623 3624 ret = blk_mq_alloc_rqs(set, tags, hctx_idx, depth); 3625 if (ret) { 3626 blk_mq_free_rq_map(tags); 3627 return NULL; 3628 } 3629 3630 return tags; 3631 } 3632 3633 static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, 3634 int hctx_idx) 3635 { 3636 if (blk_mq_is_shared_tags(set->flags)) { 3637 set->tags[hctx_idx] = set->shared_tags; 3638 3639 return true; 3640 } 3641 3642 set->tags[hctx_idx] = blk_mq_alloc_map_and_rqs(set, hctx_idx, 3643 set->queue_depth); 3644 3645 return set->tags[hctx_idx]; 3646 } 3647 3648 void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, 3649 struct blk_mq_tags *tags, 3650 unsigned int hctx_idx) 3651 { 3652 if (tags) { 3653 blk_mq_free_rqs(set, tags, hctx_idx); 3654 blk_mq_free_rq_map(tags); 3655 } 3656 } 3657 3658 static void __blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, 3659 unsigned int hctx_idx) 3660 { 3661 if (!blk_mq_is_shared_tags(set->flags)) 3662 blk_mq_free_map_and_rqs(set, set->tags[hctx_idx], hctx_idx); 3663 3664 set->tags[hctx_idx] = NULL; 3665 } 3666 3667 static void blk_mq_map_swqueue(struct request_queue *q) 3668 { 3669 unsigned int j, hctx_idx; 3670 unsigned long i; 3671 struct blk_mq_hw_ctx *hctx; 3672 struct blk_mq_ctx *ctx; 3673 struct blk_mq_tag_set *set = q->tag_set; 3674 3675 queue_for_each_hw_ctx(q, hctx, i) { 3676 cpumask_clear(hctx->cpumask); 3677 hctx->nr_ctx = 0; 3678 hctx->dispatch_from = NULL; 3679 } 3680 3681 /* 3682 * Map software to hardware queues. 3683 * 3684 * If the cpu isn't present, the cpu is mapped to first hctx. 3685 */ 3686 for_each_possible_cpu(i) { 3687 3688 ctx = per_cpu_ptr(q->queue_ctx, i); 3689 for (j = 0; j < set->nr_maps; j++) { 3690 if (!set->map[j].nr_queues) { 3691 ctx->hctxs[j] = blk_mq_map_queue_type(q, 3692 HCTX_TYPE_DEFAULT, i); 3693 continue; 3694 } 3695 hctx_idx = set->map[j].mq_map[i]; 3696 /* unmapped hw queue can be remapped after CPU topo changed */ 3697 if (!set->tags[hctx_idx] && 3698 !__blk_mq_alloc_map_and_rqs(set, hctx_idx)) { 3699 /* 3700 * If tags initialization fail for some hctx, 3701 * that hctx won't be brought online. In this 3702 * case, remap the current ctx to hctx[0] which 3703 * is guaranteed to always have tags allocated 3704 */ 3705 set->map[j].mq_map[i] = 0; 3706 } 3707 3708 hctx = blk_mq_map_queue_type(q, j, i); 3709 ctx->hctxs[j] = hctx; 3710 /* 3711 * If the CPU is already set in the mask, then we've 3712 * mapped this one already. This can happen if 3713 * devices share queues across queue maps. 3714 */ 3715 if (cpumask_test_cpu(i, hctx->cpumask)) 3716 continue; 3717 3718 cpumask_set_cpu(i, hctx->cpumask); 3719 hctx->type = j; 3720 ctx->index_hw[hctx->type] = hctx->nr_ctx; 3721 hctx->ctxs[hctx->nr_ctx++] = ctx; 3722 3723 /* 3724 * If the nr_ctx type overflows, we have exceeded the 3725 * amount of sw queues we can support. 3726 */ 3727 BUG_ON(!hctx->nr_ctx); 3728 } 3729 3730 for (; j < HCTX_MAX_TYPES; j++) 3731 ctx->hctxs[j] = blk_mq_map_queue_type(q, 3732 HCTX_TYPE_DEFAULT, i); 3733 } 3734 3735 queue_for_each_hw_ctx(q, hctx, i) { 3736 /* 3737 * If no software queues are mapped to this hardware queue, 3738 * disable it and free the request entries. 3739 */ 3740 if (!hctx->nr_ctx) { 3741 /* Never unmap queue 0. We need it as a 3742 * fallback in case of a new remap fails 3743 * allocation 3744 */ 3745 if (i) 3746 __blk_mq_free_map_and_rqs(set, i); 3747 3748 hctx->tags = NULL; 3749 continue; 3750 } 3751 3752 hctx->tags = set->tags[i]; 3753 WARN_ON(!hctx->tags); 3754 3755 /* 3756 * Set the map size to the number of mapped software queues. 3757 * This is more accurate and more efficient than looping 3758 * over all possibly mapped software queues. 3759 */ 3760 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx); 3761 3762 /* 3763 * Initialize batch roundrobin counts 3764 */ 3765 hctx->next_cpu = blk_mq_first_mapped_cpu(hctx); 3766 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; 3767 } 3768 } 3769 3770 /* 3771 * Caller needs to ensure that we're either frozen/quiesced, or that 3772 * the queue isn't live yet. 3773 */ 3774 static void queue_set_hctx_shared(struct request_queue *q, bool shared) 3775 { 3776 struct blk_mq_hw_ctx *hctx; 3777 unsigned long i; 3778 3779 queue_for_each_hw_ctx(q, hctx, i) { 3780 if (shared) { 3781 hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED; 3782 } else { 3783 blk_mq_tag_idle(hctx); 3784 hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED; 3785 } 3786 } 3787 } 3788 3789 static void blk_mq_update_tag_set_shared(struct blk_mq_tag_set *set, 3790 bool shared) 3791 { 3792 struct request_queue *q; 3793 3794 lockdep_assert_held(&set->tag_list_lock); 3795 3796 list_for_each_entry(q, &set->tag_list, tag_set_list) { 3797 blk_mq_freeze_queue(q); 3798 queue_set_hctx_shared(q, shared); 3799 blk_mq_unfreeze_queue(q); 3800 } 3801 } 3802 3803 static void blk_mq_del_queue_tag_set(struct request_queue *q) 3804 { 3805 struct blk_mq_tag_set *set = q->tag_set; 3806 3807 mutex_lock(&set->tag_list_lock); 3808 list_del(&q->tag_set_list); 3809 if (list_is_singular(&set->tag_list)) { 3810 /* just transitioned to unshared */ 3811 set->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED; 3812 /* update existing queue */ 3813 blk_mq_update_tag_set_shared(set, false); 3814 } 3815 mutex_unlock(&set->tag_list_lock); 3816 INIT_LIST_HEAD(&q->tag_set_list); 3817 } 3818 3819 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, 3820 struct request_queue *q) 3821 { 3822 mutex_lock(&set->tag_list_lock); 3823 3824 /* 3825 * Check to see if we're transitioning to shared (from 1 to 2 queues). 3826 */ 3827 if (!list_empty(&set->tag_list) && 3828 !(set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) { 3829 set->flags |= BLK_MQ_F_TAG_QUEUE_SHARED; 3830 /* update existing queue */ 3831 blk_mq_update_tag_set_shared(set, true); 3832 } 3833 if (set->flags & BLK_MQ_F_TAG_QUEUE_SHARED) 3834 queue_set_hctx_shared(q, true); 3835 list_add_tail(&q->tag_set_list, &set->tag_list); 3836 3837 mutex_unlock(&set->tag_list_lock); 3838 } 3839 3840 /* All allocations will be freed in release handler of q->mq_kobj */ 3841 static int blk_mq_alloc_ctxs(struct request_queue *q) 3842 { 3843 struct blk_mq_ctxs *ctxs; 3844 int cpu; 3845 3846 ctxs = kzalloc(sizeof(*ctxs), GFP_KERNEL); 3847 if (!ctxs) 3848 return -ENOMEM; 3849 3850 ctxs->queue_ctx = alloc_percpu(struct blk_mq_ctx); 3851 if (!ctxs->queue_ctx) 3852 goto fail; 3853 3854 for_each_possible_cpu(cpu) { 3855 struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu); 3856 ctx->ctxs = ctxs; 3857 } 3858 3859 q->mq_kobj = &ctxs->kobj; 3860 q->queue_ctx = ctxs->queue_ctx; 3861 3862 return 0; 3863 fail: 3864 kfree(ctxs); 3865 return -ENOMEM; 3866 } 3867 3868 /* 3869 * It is the actual release handler for mq, but we do it from 3870 * request queue's release handler for avoiding use-after-free 3871 * and headache because q->mq_kobj shouldn't have been introduced, 3872 * but we can't group ctx/kctx kobj without it. 3873 */ 3874 void blk_mq_release(struct request_queue *q) 3875 { 3876 struct blk_mq_hw_ctx *hctx, *next; 3877 unsigned long i; 3878 3879 queue_for_each_hw_ctx(q, hctx, i) 3880 WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list)); 3881 3882 /* all hctx are in .unused_hctx_list now */ 3883 list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) { 3884 list_del_init(&hctx->hctx_list); 3885 kobject_put(&hctx->kobj); 3886 } 3887 3888 xa_destroy(&q->hctx_table); 3889 3890 /* 3891 * release .mq_kobj and sw queue's kobject now because 3892 * both share lifetime with request queue. 3893 */ 3894 blk_mq_sysfs_deinit(q); 3895 } 3896 3897 static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set, 3898 void *queuedata) 3899 { 3900 struct request_queue *q; 3901 int ret; 3902 3903 q = blk_alloc_queue(set->numa_node, set->flags & BLK_MQ_F_BLOCKING); 3904 if (!q) 3905 return ERR_PTR(-ENOMEM); 3906 q->queuedata = queuedata; 3907 ret = blk_mq_init_allocated_queue(set, q); 3908 if (ret) { 3909 blk_put_queue(q); 3910 return ERR_PTR(ret); 3911 } 3912 return q; 3913 } 3914 3915 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) 3916 { 3917 return blk_mq_init_queue_data(set, NULL); 3918 } 3919 EXPORT_SYMBOL(blk_mq_init_queue); 3920 3921 /** 3922 * blk_mq_destroy_queue - shutdown a request queue 3923 * @q: request queue to shutdown 3924 * 3925 * This shuts down a request queue allocated by blk_mq_init_queue() and drops 3926 * the initial reference. All future requests will failed with -ENODEV. 3927 * 3928 * Context: can sleep 3929 */ 3930 void blk_mq_destroy_queue(struct request_queue *q) 3931 { 3932 WARN_ON_ONCE(!queue_is_mq(q)); 3933 WARN_ON_ONCE(blk_queue_registered(q)); 3934 3935 might_sleep(); 3936 3937 blk_queue_flag_set(QUEUE_FLAG_DYING, q); 3938 blk_queue_start_drain(q); 3939 blk_freeze_queue(q); 3940 3941 blk_sync_queue(q); 3942 blk_mq_cancel_work_sync(q); 3943 blk_mq_exit_queue(q); 3944 3945 /* @q is and will stay empty, shutdown and put */ 3946 blk_put_queue(q); 3947 } 3948 EXPORT_SYMBOL(blk_mq_destroy_queue); 3949 3950 struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata, 3951 struct lock_class_key *lkclass) 3952 { 3953 struct request_queue *q; 3954 struct gendisk *disk; 3955 3956 q = blk_mq_init_queue_data(set, queuedata); 3957 if (IS_ERR(q)) 3958 return ERR_CAST(q); 3959 3960 disk = __alloc_disk_node(q, set->numa_node, lkclass); 3961 if (!disk) { 3962 blk_put_queue(q); 3963 return ERR_PTR(-ENOMEM); 3964 } 3965 set_bit(GD_OWNS_QUEUE, &disk->state); 3966 return disk; 3967 } 3968 EXPORT_SYMBOL(__blk_mq_alloc_disk); 3969 3970 struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q, 3971 struct lock_class_key *lkclass) 3972 { 3973 if (!blk_get_queue(q)) 3974 return NULL; 3975 return __alloc_disk_node(q, NUMA_NO_NODE, lkclass); 3976 } 3977 EXPORT_SYMBOL(blk_mq_alloc_disk_for_queue); 3978 3979 static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx( 3980 struct blk_mq_tag_set *set, struct request_queue *q, 3981 int hctx_idx, int node) 3982 { 3983 struct blk_mq_hw_ctx *hctx = NULL, *tmp; 3984 3985 /* reuse dead hctx first */ 3986 spin_lock(&q->unused_hctx_lock); 3987 list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) { 3988 if (tmp->numa_node == node) { 3989 hctx = tmp; 3990 break; 3991 } 3992 } 3993 if (hctx) 3994 list_del_init(&hctx->hctx_list); 3995 spin_unlock(&q->unused_hctx_lock); 3996 3997 if (!hctx) 3998 hctx = blk_mq_alloc_hctx(q, set, node); 3999 if (!hctx) 4000 goto fail; 4001 4002 if (blk_mq_init_hctx(q, set, hctx, hctx_idx)) 4003 goto free_hctx; 4004 4005 return hctx; 4006 4007 free_hctx: 4008 kobject_put(&hctx->kobj); 4009 fail: 4010 return NULL; 4011 } 4012 4013 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, 4014 struct request_queue *q) 4015 { 4016 struct blk_mq_hw_ctx *hctx; 4017 unsigned long i, j; 4018 4019 /* protect against switching io scheduler */ 4020 mutex_lock(&q->sysfs_lock); 4021 for (i = 0; i < set->nr_hw_queues; i++) { 4022 int old_node; 4023 int node = blk_mq_get_hctx_node(set, i); 4024 struct blk_mq_hw_ctx *old_hctx = xa_load(&q->hctx_table, i); 4025 4026 if (old_hctx) { 4027 old_node = old_hctx->numa_node; 4028 blk_mq_exit_hctx(q, set, old_hctx, i); 4029 } 4030 4031 if (!blk_mq_alloc_and_init_hctx(set, q, i, node)) { 4032 if (!old_hctx) 4033 break; 4034 pr_warn("Allocate new hctx on node %d fails, fallback to previous one on node %d\n", 4035 node, old_node); 4036 hctx = blk_mq_alloc_and_init_hctx(set, q, i, old_node); 4037 WARN_ON_ONCE(!hctx); 4038 } 4039 } 4040 /* 4041 * Increasing nr_hw_queues fails. Free the newly allocated 4042 * hctxs and keep the previous q->nr_hw_queues. 4043 */ 4044 if (i != set->nr_hw_queues) { 4045 j = q->nr_hw_queues; 4046 } else { 4047 j = i; 4048 q->nr_hw_queues = set->nr_hw_queues; 4049 } 4050 4051 xa_for_each_start(&q->hctx_table, j, hctx, j) 4052 blk_mq_exit_hctx(q, set, hctx, j); 4053 mutex_unlock(&q->sysfs_lock); 4054 } 4055 4056 static void blk_mq_update_poll_flag(struct request_queue *q) 4057 { 4058 struct blk_mq_tag_set *set = q->tag_set; 4059 4060 if (set->nr_maps > HCTX_TYPE_POLL && 4061 set->map[HCTX_TYPE_POLL].nr_queues) 4062 blk_queue_flag_set(QUEUE_FLAG_POLL, q); 4063 else 4064 blk_queue_flag_clear(QUEUE_FLAG_POLL, q); 4065 } 4066 4067 int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, 4068 struct request_queue *q) 4069 { 4070 WARN_ON_ONCE(blk_queue_has_srcu(q) != 4071 !!(set->flags & BLK_MQ_F_BLOCKING)); 4072 4073 /* mark the queue as mq asap */ 4074 q->mq_ops = set->ops; 4075 4076 q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn, 4077 blk_mq_poll_stats_bkt, 4078 BLK_MQ_POLL_STATS_BKTS, q); 4079 if (!q->poll_cb) 4080 goto err_exit; 4081 4082 if (blk_mq_alloc_ctxs(q)) 4083 goto err_poll; 4084 4085 /* init q->mq_kobj and sw queues' kobjects */ 4086 blk_mq_sysfs_init(q); 4087 4088 INIT_LIST_HEAD(&q->unused_hctx_list); 4089 spin_lock_init(&q->unused_hctx_lock); 4090 4091 xa_init(&q->hctx_table); 4092 4093 blk_mq_realloc_hw_ctxs(set, q); 4094 if (!q->nr_hw_queues) 4095 goto err_hctxs; 4096 4097 INIT_WORK(&q->timeout_work, blk_mq_timeout_work); 4098 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ); 4099 4100 q->tag_set = set; 4101 4102 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; 4103 blk_mq_update_poll_flag(q); 4104 4105 INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work); 4106 INIT_LIST_HEAD(&q->requeue_list); 4107 spin_lock_init(&q->requeue_lock); 4108 4109 q->nr_requests = set->queue_depth; 4110 4111 /* 4112 * Default to classic polling 4113 */ 4114 q->poll_nsec = BLK_MQ_POLL_CLASSIC; 4115 4116 blk_mq_init_cpu_queues(q, set->nr_hw_queues); 4117 blk_mq_add_queue_tag_set(set, q); 4118 blk_mq_map_swqueue(q); 4119 return 0; 4120 4121 err_hctxs: 4122 xa_destroy(&q->hctx_table); 4123 q->nr_hw_queues = 0; 4124 blk_mq_sysfs_deinit(q); 4125 err_poll: 4126 blk_stat_free_callback(q->poll_cb); 4127 q->poll_cb = NULL; 4128 err_exit: 4129 q->mq_ops = NULL; 4130 return -ENOMEM; 4131 } 4132 EXPORT_SYMBOL(blk_mq_init_allocated_queue); 4133 4134 /* tags can _not_ be used after returning from blk_mq_exit_queue */ 4135 void blk_mq_exit_queue(struct request_queue *q) 4136 { 4137 struct blk_mq_tag_set *set = q->tag_set; 4138 4139 /* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */ 4140 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); 4141 /* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */ 4142 blk_mq_del_queue_tag_set(q); 4143 } 4144 4145 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) 4146 { 4147 int i; 4148 4149 if (blk_mq_is_shared_tags(set->flags)) { 4150 set->shared_tags = blk_mq_alloc_map_and_rqs(set, 4151 BLK_MQ_NO_HCTX_IDX, 4152 set->queue_depth); 4153 if (!set->shared_tags) 4154 return -ENOMEM; 4155 } 4156 4157 for (i = 0; i < set->nr_hw_queues; i++) { 4158 if (!__blk_mq_alloc_map_and_rqs(set, i)) 4159 goto out_unwind; 4160 cond_resched(); 4161 } 4162 4163 return 0; 4164 4165 out_unwind: 4166 while (--i >= 0) 4167 __blk_mq_free_map_and_rqs(set, i); 4168 4169 if (blk_mq_is_shared_tags(set->flags)) { 4170 blk_mq_free_map_and_rqs(set, set->shared_tags, 4171 BLK_MQ_NO_HCTX_IDX); 4172 } 4173 4174 return -ENOMEM; 4175 } 4176 4177 /* 4178 * Allocate the request maps associated with this tag_set. Note that this 4179 * may reduce the depth asked for, if memory is tight. set->queue_depth 4180 * will be updated to reflect the allocated depth. 4181 */ 4182 static int blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set *set) 4183 { 4184 unsigned int depth; 4185 int err; 4186 4187 depth = set->queue_depth; 4188 do { 4189 err = __blk_mq_alloc_rq_maps(set); 4190 if (!err) 4191 break; 4192 4193 set->queue_depth >>= 1; 4194 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) { 4195 err = -ENOMEM; 4196 break; 4197 } 4198 } while (set->queue_depth); 4199 4200 if (!set->queue_depth || err) { 4201 pr_err("blk-mq: failed to allocate request map\n"); 4202 return -ENOMEM; 4203 } 4204 4205 if (depth != set->queue_depth) 4206 pr_info("blk-mq: reduced tag depth (%u -> %u)\n", 4207 depth, set->queue_depth); 4208 4209 return 0; 4210 } 4211 4212 static int blk_mq_update_queue_map(struct blk_mq_tag_set *set) 4213 { 4214 /* 4215 * blk_mq_map_queues() and multiple .map_queues() implementations 4216 * expect that set->map[HCTX_TYPE_DEFAULT].nr_queues is set to the 4217 * number of hardware queues. 4218 */ 4219 if (set->nr_maps == 1) 4220 set->map[HCTX_TYPE_DEFAULT].nr_queues = set->nr_hw_queues; 4221 4222 if (set->ops->map_queues && !is_kdump_kernel()) { 4223 int i; 4224 4225 /* 4226 * transport .map_queues is usually done in the following 4227 * way: 4228 * 4229 * for (queue = 0; queue < set->nr_hw_queues; queue++) { 4230 * mask = get_cpu_mask(queue) 4231 * for_each_cpu(cpu, mask) 4232 * set->map[x].mq_map[cpu] = queue; 4233 * } 4234 * 4235 * When we need to remap, the table has to be cleared for 4236 * killing stale mapping since one CPU may not be mapped 4237 * to any hw queue. 4238 */ 4239 for (i = 0; i < set->nr_maps; i++) 4240 blk_mq_clear_mq_map(&set->map[i]); 4241 4242 return set->ops->map_queues(set); 4243 } else { 4244 BUG_ON(set->nr_maps > 1); 4245 return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); 4246 } 4247 } 4248 4249 static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set, 4250 int cur_nr_hw_queues, int new_nr_hw_queues) 4251 { 4252 struct blk_mq_tags **new_tags; 4253 4254 if (cur_nr_hw_queues >= new_nr_hw_queues) 4255 return 0; 4256 4257 new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *), 4258 GFP_KERNEL, set->numa_node); 4259 if (!new_tags) 4260 return -ENOMEM; 4261 4262 if (set->tags) 4263 memcpy(new_tags, set->tags, cur_nr_hw_queues * 4264 sizeof(*set->tags)); 4265 kfree(set->tags); 4266 set->tags = new_tags; 4267 set->nr_hw_queues = new_nr_hw_queues; 4268 4269 return 0; 4270 } 4271 4272 static int blk_mq_alloc_tag_set_tags(struct blk_mq_tag_set *set, 4273 int new_nr_hw_queues) 4274 { 4275 return blk_mq_realloc_tag_set_tags(set, 0, new_nr_hw_queues); 4276 } 4277 4278 /* 4279 * Alloc a tag set to be associated with one or more request queues. 4280 * May fail with EINVAL for various error conditions. May adjust the 4281 * requested depth down, if it's too large. In that case, the set 4282 * value will be stored in set->queue_depth. 4283 */ 4284 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) 4285 { 4286 int i, ret; 4287 4288 BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS); 4289 4290 if (!set->nr_hw_queues) 4291 return -EINVAL; 4292 if (!set->queue_depth) 4293 return -EINVAL; 4294 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) 4295 return -EINVAL; 4296 4297 if (!set->ops->queue_rq) 4298 return -EINVAL; 4299 4300 if (!set->ops->get_budget ^ !set->ops->put_budget) 4301 return -EINVAL; 4302 4303 if (set->queue_depth > BLK_MQ_MAX_DEPTH) { 4304 pr_info("blk-mq: reduced tag depth to %u\n", 4305 BLK_MQ_MAX_DEPTH); 4306 set->queue_depth = BLK_MQ_MAX_DEPTH; 4307 } 4308 4309 if (!set->nr_maps) 4310 set->nr_maps = 1; 4311 else if (set->nr_maps > HCTX_MAX_TYPES) 4312 return -EINVAL; 4313 4314 /* 4315 * If a crashdump is active, then we are potentially in a very 4316 * memory constrained environment. Limit us to 1 queue and 4317 * 64 tags to prevent using too much memory. 4318 */ 4319 if (is_kdump_kernel()) { 4320 set->nr_hw_queues = 1; 4321 set->nr_maps = 1; 4322 set->queue_depth = min(64U, set->queue_depth); 4323 } 4324 /* 4325 * There is no use for more h/w queues than cpus if we just have 4326 * a single map 4327 */ 4328 if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids) 4329 set->nr_hw_queues = nr_cpu_ids; 4330 4331 if (blk_mq_alloc_tag_set_tags(set, set->nr_hw_queues) < 0) 4332 return -ENOMEM; 4333 4334 ret = -ENOMEM; 4335 for (i = 0; i < set->nr_maps; i++) { 4336 set->map[i].mq_map = kcalloc_node(nr_cpu_ids, 4337 sizeof(set->map[i].mq_map[0]), 4338 GFP_KERNEL, set->numa_node); 4339 if (!set->map[i].mq_map) 4340 goto out_free_mq_map; 4341 set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues; 4342 } 4343 4344 ret = blk_mq_update_queue_map(set); 4345 if (ret) 4346 goto out_free_mq_map; 4347 4348 ret = blk_mq_alloc_set_map_and_rqs(set); 4349 if (ret) 4350 goto out_free_mq_map; 4351 4352 mutex_init(&set->tag_list_lock); 4353 INIT_LIST_HEAD(&set->tag_list); 4354 4355 return 0; 4356 4357 out_free_mq_map: 4358 for (i = 0; i < set->nr_maps; i++) { 4359 kfree(set->map[i].mq_map); 4360 set->map[i].mq_map = NULL; 4361 } 4362 kfree(set->tags); 4363 set->tags = NULL; 4364 return ret; 4365 } 4366 EXPORT_SYMBOL(blk_mq_alloc_tag_set); 4367 4368 /* allocate and initialize a tagset for a simple single-queue device */ 4369 int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set, 4370 const struct blk_mq_ops *ops, unsigned int queue_depth, 4371 unsigned int set_flags) 4372 { 4373 memset(set, 0, sizeof(*set)); 4374 set->ops = ops; 4375 set->nr_hw_queues = 1; 4376 set->nr_maps = 1; 4377 set->queue_depth = queue_depth; 4378 set->numa_node = NUMA_NO_NODE; 4379 set->flags = set_flags; 4380 return blk_mq_alloc_tag_set(set); 4381 } 4382 EXPORT_SYMBOL_GPL(blk_mq_alloc_sq_tag_set); 4383 4384 void blk_mq_free_tag_set(struct blk_mq_tag_set *set) 4385 { 4386 int i, j; 4387 4388 for (i = 0; i < set->nr_hw_queues; i++) 4389 __blk_mq_free_map_and_rqs(set, i); 4390 4391 if (blk_mq_is_shared_tags(set->flags)) { 4392 blk_mq_free_map_and_rqs(set, set->shared_tags, 4393 BLK_MQ_NO_HCTX_IDX); 4394 } 4395 4396 for (j = 0; j < set->nr_maps; j++) { 4397 kfree(set->map[j].mq_map); 4398 set->map[j].mq_map = NULL; 4399 } 4400 4401 kfree(set->tags); 4402 set->tags = NULL; 4403 } 4404 EXPORT_SYMBOL(blk_mq_free_tag_set); 4405 4406 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) 4407 { 4408 struct blk_mq_tag_set *set = q->tag_set; 4409 struct blk_mq_hw_ctx *hctx; 4410 int ret; 4411 unsigned long i; 4412 4413 if (!set) 4414 return -EINVAL; 4415 4416 if (q->nr_requests == nr) 4417 return 0; 4418 4419 blk_mq_freeze_queue(q); 4420 blk_mq_quiesce_queue(q); 4421 4422 ret = 0; 4423 queue_for_each_hw_ctx(q, hctx, i) { 4424 if (!hctx->tags) 4425 continue; 4426 /* 4427 * If we're using an MQ scheduler, just update the scheduler 4428 * queue depth. This is similar to what the old code would do. 4429 */ 4430 if (hctx->sched_tags) { 4431 ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags, 4432 nr, true); 4433 } else { 4434 ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr, 4435 false); 4436 } 4437 if (ret) 4438 break; 4439 if (q->elevator && q->elevator->type->ops.depth_updated) 4440 q->elevator->type->ops.depth_updated(hctx); 4441 } 4442 if (!ret) { 4443 q->nr_requests = nr; 4444 if (blk_mq_is_shared_tags(set->flags)) { 4445 if (q->elevator) 4446 blk_mq_tag_update_sched_shared_tags(q); 4447 else 4448 blk_mq_tag_resize_shared_tags(set, nr); 4449 } 4450 } 4451 4452 blk_mq_unquiesce_queue(q); 4453 blk_mq_unfreeze_queue(q); 4454 4455 return ret; 4456 } 4457 4458 /* 4459 * request_queue and elevator_type pair. 4460 * It is just used by __blk_mq_update_nr_hw_queues to cache 4461 * the elevator_type associated with a request_queue. 4462 */ 4463 struct blk_mq_qe_pair { 4464 struct list_head node; 4465 struct request_queue *q; 4466 struct elevator_type *type; 4467 }; 4468 4469 /* 4470 * Cache the elevator_type in qe pair list and switch the 4471 * io scheduler to 'none' 4472 */ 4473 static bool blk_mq_elv_switch_none(struct list_head *head, 4474 struct request_queue *q) 4475 { 4476 struct blk_mq_qe_pair *qe; 4477 4478 if (!q->elevator) 4479 return true; 4480 4481 qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY); 4482 if (!qe) 4483 return false; 4484 4485 /* q->elevator needs protection from ->sysfs_lock */ 4486 mutex_lock(&q->sysfs_lock); 4487 4488 INIT_LIST_HEAD(&qe->node); 4489 qe->q = q; 4490 qe->type = q->elevator->type; 4491 list_add(&qe->node, head); 4492 4493 /* 4494 * After elevator_switch_mq, the previous elevator_queue will be 4495 * released by elevator_release. The reference of the io scheduler 4496 * module get by elevator_get will also be put. So we need to get 4497 * a reference of the io scheduler module here to prevent it to be 4498 * removed. 4499 */ 4500 __module_get(qe->type->elevator_owner); 4501 elevator_switch_mq(q, NULL); 4502 mutex_unlock(&q->sysfs_lock); 4503 4504 return true; 4505 } 4506 4507 static struct blk_mq_qe_pair *blk_lookup_qe_pair(struct list_head *head, 4508 struct request_queue *q) 4509 { 4510 struct blk_mq_qe_pair *qe; 4511 4512 list_for_each_entry(qe, head, node) 4513 if (qe->q == q) 4514 return qe; 4515 4516 return NULL; 4517 } 4518 4519 static void blk_mq_elv_switch_back(struct list_head *head, 4520 struct request_queue *q) 4521 { 4522 struct blk_mq_qe_pair *qe; 4523 struct elevator_type *t; 4524 4525 qe = blk_lookup_qe_pair(head, q); 4526 if (!qe) 4527 return; 4528 t = qe->type; 4529 list_del(&qe->node); 4530 kfree(qe); 4531 4532 mutex_lock(&q->sysfs_lock); 4533 elevator_switch_mq(q, t); 4534 mutex_unlock(&q->sysfs_lock); 4535 } 4536 4537 static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, 4538 int nr_hw_queues) 4539 { 4540 struct request_queue *q; 4541 LIST_HEAD(head); 4542 int prev_nr_hw_queues; 4543 4544 lockdep_assert_held(&set->tag_list_lock); 4545 4546 if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids) 4547 nr_hw_queues = nr_cpu_ids; 4548 if (nr_hw_queues < 1) 4549 return; 4550 if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues) 4551 return; 4552 4553 list_for_each_entry(q, &set->tag_list, tag_set_list) 4554 blk_mq_freeze_queue(q); 4555 /* 4556 * Switch IO scheduler to 'none', cleaning up the data associated 4557 * with the previous scheduler. We will switch back once we are done 4558 * updating the new sw to hw queue mappings. 4559 */ 4560 list_for_each_entry(q, &set->tag_list, tag_set_list) 4561 if (!blk_mq_elv_switch_none(&head, q)) 4562 goto switch_back; 4563 4564 list_for_each_entry(q, &set->tag_list, tag_set_list) { 4565 blk_mq_debugfs_unregister_hctxs(q); 4566 blk_mq_sysfs_unregister_hctxs(q); 4567 } 4568 4569 prev_nr_hw_queues = set->nr_hw_queues; 4570 if (blk_mq_realloc_tag_set_tags(set, set->nr_hw_queues, nr_hw_queues) < 4571 0) 4572 goto reregister; 4573 4574 set->nr_hw_queues = nr_hw_queues; 4575 fallback: 4576 blk_mq_update_queue_map(set); 4577 list_for_each_entry(q, &set->tag_list, tag_set_list) { 4578 blk_mq_realloc_hw_ctxs(set, q); 4579 blk_mq_update_poll_flag(q); 4580 if (q->nr_hw_queues != set->nr_hw_queues) { 4581 int i = prev_nr_hw_queues; 4582 4583 pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n", 4584 nr_hw_queues, prev_nr_hw_queues); 4585 for (; i < set->nr_hw_queues; i++) 4586 __blk_mq_free_map_and_rqs(set, i); 4587 4588 set->nr_hw_queues = prev_nr_hw_queues; 4589 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); 4590 goto fallback; 4591 } 4592 blk_mq_map_swqueue(q); 4593 } 4594 4595 reregister: 4596 list_for_each_entry(q, &set->tag_list, tag_set_list) { 4597 blk_mq_sysfs_register_hctxs(q); 4598 blk_mq_debugfs_register_hctxs(q); 4599 } 4600 4601 switch_back: 4602 list_for_each_entry(q, &set->tag_list, tag_set_list) 4603 blk_mq_elv_switch_back(&head, q); 4604 4605 list_for_each_entry(q, &set->tag_list, tag_set_list) 4606 blk_mq_unfreeze_queue(q); 4607 } 4608 4609 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) 4610 { 4611 mutex_lock(&set->tag_list_lock); 4612 __blk_mq_update_nr_hw_queues(set, nr_hw_queues); 4613 mutex_unlock(&set->tag_list_lock); 4614 } 4615 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues); 4616 4617 /* Enable polling stats and return whether they were already enabled. */ 4618 static bool blk_poll_stats_enable(struct request_queue *q) 4619 { 4620 if (q->poll_stat) 4621 return true; 4622 4623 return blk_stats_alloc_enable(q); 4624 } 4625 4626 static void blk_mq_poll_stats_start(struct request_queue *q) 4627 { 4628 /* 4629 * We don't arm the callback if polling stats are not enabled or the 4630 * callback is already active. 4631 */ 4632 if (!q->poll_stat || blk_stat_is_active(q->poll_cb)) 4633 return; 4634 4635 blk_stat_activate_msecs(q->poll_cb, 100); 4636 } 4637 4638 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb) 4639 { 4640 struct request_queue *q = cb->data; 4641 int bucket; 4642 4643 for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) { 4644 if (cb->stat[bucket].nr_samples) 4645 q->poll_stat[bucket] = cb->stat[bucket]; 4646 } 4647 } 4648 4649 static unsigned long blk_mq_poll_nsecs(struct request_queue *q, 4650 struct request *rq) 4651 { 4652 unsigned long ret = 0; 4653 int bucket; 4654 4655 /* 4656 * If stats collection isn't on, don't sleep but turn it on for 4657 * future users 4658 */ 4659 if (!blk_poll_stats_enable(q)) 4660 return 0; 4661 4662 /* 4663 * As an optimistic guess, use half of the mean service time 4664 * for this type of request. We can (and should) make this smarter. 4665 * For instance, if the completion latencies are tight, we can 4666 * get closer than just half the mean. This is especially 4667 * important on devices where the completion latencies are longer 4668 * than ~10 usec. We do use the stats for the relevant IO size 4669 * if available which does lead to better estimates. 4670 */ 4671 bucket = blk_mq_poll_stats_bkt(rq); 4672 if (bucket < 0) 4673 return ret; 4674 4675 if (q->poll_stat[bucket].nr_samples) 4676 ret = (q->poll_stat[bucket].mean + 1) / 2; 4677 4678 return ret; 4679 } 4680 4681 static bool blk_mq_poll_hybrid(struct request_queue *q, blk_qc_t qc) 4682 { 4683 struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, qc); 4684 struct request *rq = blk_qc_to_rq(hctx, qc); 4685 struct hrtimer_sleeper hs; 4686 enum hrtimer_mode mode; 4687 unsigned int nsecs; 4688 ktime_t kt; 4689 4690 /* 4691 * If a request has completed on queue that uses an I/O scheduler, we 4692 * won't get back a request from blk_qc_to_rq. 4693 */ 4694 if (!rq || (rq->rq_flags & RQF_MQ_POLL_SLEPT)) 4695 return false; 4696 4697 /* 4698 * If we get here, hybrid polling is enabled. Hence poll_nsec can be: 4699 * 4700 * 0: use half of prev avg 4701 * >0: use this specific value 4702 */ 4703 if (q->poll_nsec > 0) 4704 nsecs = q->poll_nsec; 4705 else 4706 nsecs = blk_mq_poll_nsecs(q, rq); 4707 4708 if (!nsecs) 4709 return false; 4710 4711 rq->rq_flags |= RQF_MQ_POLL_SLEPT; 4712 4713 /* 4714 * This will be replaced with the stats tracking code, using 4715 * 'avg_completion_time / 2' as the pre-sleep target. 4716 */ 4717 kt = nsecs; 4718 4719 mode = HRTIMER_MODE_REL; 4720 hrtimer_init_sleeper_on_stack(&hs, CLOCK_MONOTONIC, mode); 4721 hrtimer_set_expires(&hs.timer, kt); 4722 4723 do { 4724 if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE) 4725 break; 4726 set_current_state(TASK_UNINTERRUPTIBLE); 4727 hrtimer_sleeper_start_expires(&hs, mode); 4728 if (hs.task) 4729 io_schedule(); 4730 hrtimer_cancel(&hs.timer); 4731 mode = HRTIMER_MODE_ABS; 4732 } while (hs.task && !signal_pending(current)); 4733 4734 __set_current_state(TASK_RUNNING); 4735 destroy_hrtimer_on_stack(&hs.timer); 4736 4737 /* 4738 * If we sleep, have the caller restart the poll loop to reset the 4739 * state. Like for the other success return cases, the caller is 4740 * responsible for checking if the IO completed. If the IO isn't 4741 * complete, we'll get called again and will go straight to the busy 4742 * poll loop. 4743 */ 4744 return true; 4745 } 4746 4747 static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie, 4748 struct io_comp_batch *iob, unsigned int flags) 4749 { 4750 struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, cookie); 4751 long state = get_current_state(); 4752 int ret; 4753 4754 do { 4755 ret = q->mq_ops->poll(hctx, iob); 4756 if (ret > 0) { 4757 __set_current_state(TASK_RUNNING); 4758 return ret; 4759 } 4760 4761 if (signal_pending_state(state, current)) 4762 __set_current_state(TASK_RUNNING); 4763 if (task_is_running(current)) 4764 return 1; 4765 4766 if (ret < 0 || (flags & BLK_POLL_ONESHOT)) 4767 break; 4768 cpu_relax(); 4769 } while (!need_resched()); 4770 4771 __set_current_state(TASK_RUNNING); 4772 return 0; 4773 } 4774 4775 int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob, 4776 unsigned int flags) 4777 { 4778 if (!(flags & BLK_POLL_NOSLEEP) && 4779 q->poll_nsec != BLK_MQ_POLL_CLASSIC) { 4780 if (blk_mq_poll_hybrid(q, cookie)) 4781 return 1; 4782 } 4783 return blk_mq_poll_classic(q, cookie, iob, flags); 4784 } 4785 4786 unsigned int blk_mq_rq_cpu(struct request *rq) 4787 { 4788 return rq->mq_ctx->cpu; 4789 } 4790 EXPORT_SYMBOL(blk_mq_rq_cpu); 4791 4792 void blk_mq_cancel_work_sync(struct request_queue *q) 4793 { 4794 if (queue_is_mq(q)) { 4795 struct blk_mq_hw_ctx *hctx; 4796 unsigned long i; 4797 4798 cancel_delayed_work_sync(&q->requeue_work); 4799 4800 queue_for_each_hw_ctx(q, hctx, i) 4801 cancel_delayed_work_sync(&hctx->run_work); 4802 } 4803 } 4804 4805 static int __init blk_mq_init(void) 4806 { 4807 int i; 4808 4809 for_each_possible_cpu(i) 4810 init_llist_head(&per_cpu(blk_cpu_done, i)); 4811 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq); 4812 4813 cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD, 4814 "block/softirq:dead", NULL, 4815 blk_softirq_cpu_dead); 4816 cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL, 4817 blk_mq_hctx_notify_dead); 4818 cpuhp_setup_state_multi(CPUHP_AP_BLK_MQ_ONLINE, "block/mq:online", 4819 blk_mq_hctx_notify_online, 4820 blk_mq_hctx_notify_offline); 4821 return 0; 4822 } 4823 subsys_initcall(blk_mq_init); 4824