1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Block multiqueue core code 4 * 5 * Copyright (C) 2013-2014 Jens Axboe 6 * Copyright (C) 2013-2014 Christoph Hellwig 7 */ 8 #include <linux/kernel.h> 9 #include <linux/module.h> 10 #include <linux/backing-dev.h> 11 #include <linux/bio.h> 12 #include <linux/blkdev.h> 13 #include <linux/blk-integrity.h> 14 #include <linux/kmemleak.h> 15 #include <linux/mm.h> 16 #include <linux/init.h> 17 #include <linux/slab.h> 18 #include <linux/workqueue.h> 19 #include <linux/smp.h> 20 #include <linux/interrupt.h> 21 #include <linux/llist.h> 22 #include <linux/cpu.h> 23 #include <linux/cache.h> 24 #include <linux/sched/sysctl.h> 25 #include <linux/sched/topology.h> 26 #include <linux/sched/signal.h> 27 #include <linux/delay.h> 28 #include <linux/crash_dump.h> 29 #include <linux/prefetch.h> 30 #include <linux/blk-crypto.h> 31 #include <linux/part_stat.h> 32 33 #include <trace/events/block.h> 34 35 #include <linux/blk-mq.h> 36 #include <linux/t10-pi.h> 37 #include "blk.h" 38 #include "blk-mq.h" 39 #include "blk-mq-debugfs.h" 40 #include "blk-mq-tag.h" 41 #include "blk-pm.h" 42 #include "blk-stat.h" 43 #include "blk-mq-sched.h" 44 #include "blk-rq-qos.h" 45 46 static DEFINE_PER_CPU(struct llist_head, blk_cpu_done); 47 48 static void blk_mq_poll_stats_start(struct request_queue *q); 49 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb); 50 51 static int blk_mq_poll_stats_bkt(const struct request *rq) 52 { 53 int ddir, sectors, bucket; 54 55 ddir = rq_data_dir(rq); 56 sectors = blk_rq_stats_sectors(rq); 57 58 bucket = ddir + 2 * ilog2(sectors); 59 60 if (bucket < 0) 61 return -1; 62 else if (bucket >= BLK_MQ_POLL_STATS_BKTS) 63 return ddir + BLK_MQ_POLL_STATS_BKTS - 2; 64 65 return bucket; 66 } 67 68 #define BLK_QC_T_SHIFT 16 69 #define BLK_QC_T_INTERNAL (1U << 31) 70 71 static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q, 72 blk_qc_t qc) 73 { 74 return xa_load(&q->hctx_table, 75 (qc & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT); 76 } 77 78 static inline struct request *blk_qc_to_rq(struct blk_mq_hw_ctx *hctx, 79 blk_qc_t qc) 80 { 81 unsigned int tag = qc & ((1U << BLK_QC_T_SHIFT) - 1); 82 83 if (qc & BLK_QC_T_INTERNAL) 84 return blk_mq_tag_to_rq(hctx->sched_tags, tag); 85 return blk_mq_tag_to_rq(hctx->tags, tag); 86 } 87 88 static inline blk_qc_t blk_rq_to_qc(struct request *rq) 89 { 90 return (rq->mq_hctx->queue_num << BLK_QC_T_SHIFT) | 91 (rq->tag != -1 ? 92 rq->tag : (rq->internal_tag | BLK_QC_T_INTERNAL)); 93 } 94 95 /* 96 * Check if any of the ctx, dispatch list or elevator 97 * have pending work in this hardware queue. 98 */ 99 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) 100 { 101 return !list_empty_careful(&hctx->dispatch) || 102 sbitmap_any_bit_set(&hctx->ctx_map) || 103 blk_mq_sched_has_work(hctx); 104 } 105 106 /* 107 * Mark this ctx as having pending work in this hardware queue 108 */ 109 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, 110 struct blk_mq_ctx *ctx) 111 { 112 const int bit = ctx->index_hw[hctx->type]; 113 114 if (!sbitmap_test_bit(&hctx->ctx_map, bit)) 115 sbitmap_set_bit(&hctx->ctx_map, bit); 116 } 117 118 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, 119 struct blk_mq_ctx *ctx) 120 { 121 const int bit = ctx->index_hw[hctx->type]; 122 123 sbitmap_clear_bit(&hctx->ctx_map, bit); 124 } 125 126 struct mq_inflight { 127 struct block_device *part; 128 unsigned int inflight[2]; 129 }; 130 131 static bool blk_mq_check_inflight(struct request *rq, void *priv, 132 bool reserved) 133 { 134 struct mq_inflight *mi = priv; 135 136 if ((!mi->part->bd_partno || rq->part == mi->part) && 137 blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) 138 mi->inflight[rq_data_dir(rq)]++; 139 140 return true; 141 } 142 143 unsigned int blk_mq_in_flight(struct request_queue *q, 144 struct block_device *part) 145 { 146 struct mq_inflight mi = { .part = part }; 147 148 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); 149 150 return mi.inflight[0] + mi.inflight[1]; 151 } 152 153 void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part, 154 unsigned int inflight[2]) 155 { 156 struct mq_inflight mi = { .part = part }; 157 158 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); 159 inflight[0] = mi.inflight[0]; 160 inflight[1] = mi.inflight[1]; 161 } 162 163 void blk_freeze_queue_start(struct request_queue *q) 164 { 165 mutex_lock(&q->mq_freeze_lock); 166 if (++q->mq_freeze_depth == 1) { 167 percpu_ref_kill(&q->q_usage_counter); 168 mutex_unlock(&q->mq_freeze_lock); 169 if (queue_is_mq(q)) 170 blk_mq_run_hw_queues(q, false); 171 } else { 172 mutex_unlock(&q->mq_freeze_lock); 173 } 174 } 175 EXPORT_SYMBOL_GPL(blk_freeze_queue_start); 176 177 void blk_mq_freeze_queue_wait(struct request_queue *q) 178 { 179 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter)); 180 } 181 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait); 182 183 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, 184 unsigned long timeout) 185 { 186 return wait_event_timeout(q->mq_freeze_wq, 187 percpu_ref_is_zero(&q->q_usage_counter), 188 timeout); 189 } 190 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout); 191 192 /* 193 * Guarantee no request is in use, so we can change any data structure of 194 * the queue afterward. 195 */ 196 void blk_freeze_queue(struct request_queue *q) 197 { 198 /* 199 * In the !blk_mq case we are only calling this to kill the 200 * q_usage_counter, otherwise this increases the freeze depth 201 * and waits for it to return to zero. For this reason there is 202 * no blk_unfreeze_queue(), and blk_freeze_queue() is not 203 * exported to drivers as the only user for unfreeze is blk_mq. 204 */ 205 blk_freeze_queue_start(q); 206 blk_mq_freeze_queue_wait(q); 207 } 208 209 void blk_mq_freeze_queue(struct request_queue *q) 210 { 211 /* 212 * ...just an alias to keep freeze and unfreeze actions balanced 213 * in the blk_mq_* namespace 214 */ 215 blk_freeze_queue(q); 216 } 217 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue); 218 219 void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic) 220 { 221 mutex_lock(&q->mq_freeze_lock); 222 if (force_atomic) 223 q->q_usage_counter.data->force_atomic = true; 224 q->mq_freeze_depth--; 225 WARN_ON_ONCE(q->mq_freeze_depth < 0); 226 if (!q->mq_freeze_depth) { 227 percpu_ref_resurrect(&q->q_usage_counter); 228 wake_up_all(&q->mq_freeze_wq); 229 } 230 mutex_unlock(&q->mq_freeze_lock); 231 } 232 233 void blk_mq_unfreeze_queue(struct request_queue *q) 234 { 235 __blk_mq_unfreeze_queue(q, false); 236 } 237 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); 238 239 /* 240 * FIXME: replace the scsi_internal_device_*block_nowait() calls in the 241 * mpt3sas driver such that this function can be removed. 242 */ 243 void blk_mq_quiesce_queue_nowait(struct request_queue *q) 244 { 245 unsigned long flags; 246 247 spin_lock_irqsave(&q->queue_lock, flags); 248 if (!q->quiesce_depth++) 249 blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q); 250 spin_unlock_irqrestore(&q->queue_lock, flags); 251 } 252 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait); 253 254 /** 255 * blk_mq_wait_quiesce_done() - wait until in-progress quiesce is done 256 * @q: request queue. 257 * 258 * Note: it is driver's responsibility for making sure that quiesce has 259 * been started. 260 */ 261 void blk_mq_wait_quiesce_done(struct request_queue *q) 262 { 263 if (blk_queue_has_srcu(q)) 264 synchronize_srcu(q->srcu); 265 else 266 synchronize_rcu(); 267 } 268 EXPORT_SYMBOL_GPL(blk_mq_wait_quiesce_done); 269 270 /** 271 * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished 272 * @q: request queue. 273 * 274 * Note: this function does not prevent that the struct request end_io() 275 * callback function is invoked. Once this function is returned, we make 276 * sure no dispatch can happen until the queue is unquiesced via 277 * blk_mq_unquiesce_queue(). 278 */ 279 void blk_mq_quiesce_queue(struct request_queue *q) 280 { 281 blk_mq_quiesce_queue_nowait(q); 282 blk_mq_wait_quiesce_done(q); 283 } 284 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue); 285 286 /* 287 * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue() 288 * @q: request queue. 289 * 290 * This function recovers queue into the state before quiescing 291 * which is done by blk_mq_quiesce_queue. 292 */ 293 void blk_mq_unquiesce_queue(struct request_queue *q) 294 { 295 unsigned long flags; 296 bool run_queue = false; 297 298 spin_lock_irqsave(&q->queue_lock, flags); 299 if (WARN_ON_ONCE(q->quiesce_depth <= 0)) { 300 ; 301 } else if (!--q->quiesce_depth) { 302 blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q); 303 run_queue = true; 304 } 305 spin_unlock_irqrestore(&q->queue_lock, flags); 306 307 /* dispatch requests which are inserted during quiescing */ 308 if (run_queue) 309 blk_mq_run_hw_queues(q, true); 310 } 311 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue); 312 313 void blk_mq_wake_waiters(struct request_queue *q) 314 { 315 struct blk_mq_hw_ctx *hctx; 316 unsigned long i; 317 318 queue_for_each_hw_ctx(q, hctx, i) 319 if (blk_mq_hw_queue_mapped(hctx)) 320 blk_mq_tag_wakeup_all(hctx->tags, true); 321 } 322 323 void blk_rq_init(struct request_queue *q, struct request *rq) 324 { 325 memset(rq, 0, sizeof(*rq)); 326 327 INIT_LIST_HEAD(&rq->queuelist); 328 rq->q = q; 329 rq->__sector = (sector_t) -1; 330 INIT_HLIST_NODE(&rq->hash); 331 RB_CLEAR_NODE(&rq->rb_node); 332 rq->tag = BLK_MQ_NO_TAG; 333 rq->internal_tag = BLK_MQ_NO_TAG; 334 rq->start_time_ns = ktime_get_ns(); 335 rq->part = NULL; 336 blk_crypto_rq_set_defaults(rq); 337 } 338 EXPORT_SYMBOL(blk_rq_init); 339 340 static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, 341 struct blk_mq_tags *tags, unsigned int tag, u64 alloc_time_ns) 342 { 343 struct blk_mq_ctx *ctx = data->ctx; 344 struct blk_mq_hw_ctx *hctx = data->hctx; 345 struct request_queue *q = data->q; 346 struct request *rq = tags->static_rqs[tag]; 347 348 rq->q = q; 349 rq->mq_ctx = ctx; 350 rq->mq_hctx = hctx; 351 rq->cmd_flags = data->cmd_flags; 352 353 if (data->flags & BLK_MQ_REQ_PM) 354 data->rq_flags |= RQF_PM; 355 if (blk_queue_io_stat(q)) 356 data->rq_flags |= RQF_IO_STAT; 357 rq->rq_flags = data->rq_flags; 358 359 if (!(data->rq_flags & RQF_ELV)) { 360 rq->tag = tag; 361 rq->internal_tag = BLK_MQ_NO_TAG; 362 } else { 363 rq->tag = BLK_MQ_NO_TAG; 364 rq->internal_tag = tag; 365 } 366 rq->timeout = 0; 367 368 if (blk_mq_need_time_stamp(rq)) 369 rq->start_time_ns = ktime_get_ns(); 370 else 371 rq->start_time_ns = 0; 372 rq->part = NULL; 373 #ifdef CONFIG_BLK_RQ_ALLOC_TIME 374 rq->alloc_time_ns = alloc_time_ns; 375 #endif 376 rq->io_start_time_ns = 0; 377 rq->stats_sectors = 0; 378 rq->nr_phys_segments = 0; 379 #if defined(CONFIG_BLK_DEV_INTEGRITY) 380 rq->nr_integrity_segments = 0; 381 #endif 382 rq->end_io = NULL; 383 rq->end_io_data = NULL; 384 385 blk_crypto_rq_set_defaults(rq); 386 INIT_LIST_HEAD(&rq->queuelist); 387 /* tag was already set */ 388 WRITE_ONCE(rq->deadline, 0); 389 req_ref_set(rq, 1); 390 391 if (rq->rq_flags & RQF_ELV) { 392 struct elevator_queue *e = data->q->elevator; 393 394 INIT_HLIST_NODE(&rq->hash); 395 RB_CLEAR_NODE(&rq->rb_node); 396 397 if (!op_is_flush(data->cmd_flags) && 398 e->type->ops.prepare_request) { 399 e->type->ops.prepare_request(rq); 400 rq->rq_flags |= RQF_ELVPRIV; 401 } 402 } 403 404 return rq; 405 } 406 407 static inline struct request * 408 __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data, 409 u64 alloc_time_ns) 410 { 411 unsigned int tag, tag_offset; 412 struct blk_mq_tags *tags; 413 struct request *rq; 414 unsigned long tag_mask; 415 int i, nr = 0; 416 417 tag_mask = blk_mq_get_tags(data, data->nr_tags, &tag_offset); 418 if (unlikely(!tag_mask)) 419 return NULL; 420 421 tags = blk_mq_tags_from_data(data); 422 for (i = 0; tag_mask; i++) { 423 if (!(tag_mask & (1UL << i))) 424 continue; 425 tag = tag_offset + i; 426 prefetch(tags->static_rqs[tag]); 427 tag_mask &= ~(1UL << i); 428 rq = blk_mq_rq_ctx_init(data, tags, tag, alloc_time_ns); 429 rq_list_add(data->cached_rq, rq); 430 nr++; 431 } 432 /* caller already holds a reference, add for remainder */ 433 percpu_ref_get_many(&data->q->q_usage_counter, nr - 1); 434 data->nr_tags -= nr; 435 436 return rq_list_pop(data->cached_rq); 437 } 438 439 static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data) 440 { 441 struct request_queue *q = data->q; 442 u64 alloc_time_ns = 0; 443 struct request *rq; 444 unsigned int tag; 445 446 /* alloc_time includes depth and tag waits */ 447 if (blk_queue_rq_alloc_time(q)) 448 alloc_time_ns = ktime_get_ns(); 449 450 if (data->cmd_flags & REQ_NOWAIT) 451 data->flags |= BLK_MQ_REQ_NOWAIT; 452 453 if (q->elevator) { 454 struct elevator_queue *e = q->elevator; 455 456 data->rq_flags |= RQF_ELV; 457 458 /* 459 * Flush/passthrough requests are special and go directly to the 460 * dispatch list. Don't include reserved tags in the 461 * limiting, as it isn't useful. 462 */ 463 if (!op_is_flush(data->cmd_flags) && 464 !blk_op_is_passthrough(data->cmd_flags) && 465 e->type->ops.limit_depth && 466 !(data->flags & BLK_MQ_REQ_RESERVED)) 467 e->type->ops.limit_depth(data->cmd_flags, data); 468 } 469 470 retry: 471 data->ctx = blk_mq_get_ctx(q); 472 data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx); 473 if (!(data->rq_flags & RQF_ELV)) 474 blk_mq_tag_busy(data->hctx); 475 476 /* 477 * Try batched alloc if we want more than 1 tag. 478 */ 479 if (data->nr_tags > 1) { 480 rq = __blk_mq_alloc_requests_batch(data, alloc_time_ns); 481 if (rq) 482 return rq; 483 data->nr_tags = 1; 484 } 485 486 /* 487 * Waiting allocations only fail because of an inactive hctx. In that 488 * case just retry the hctx assignment and tag allocation as CPU hotplug 489 * should have migrated us to an online CPU by now. 490 */ 491 tag = blk_mq_get_tag(data); 492 if (tag == BLK_MQ_NO_TAG) { 493 if (data->flags & BLK_MQ_REQ_NOWAIT) 494 return NULL; 495 /* 496 * Give up the CPU and sleep for a random short time to 497 * ensure that thread using a realtime scheduling class 498 * are migrated off the CPU, and thus off the hctx that 499 * is going away. 500 */ 501 msleep(3); 502 goto retry; 503 } 504 505 return blk_mq_rq_ctx_init(data, blk_mq_tags_from_data(data), tag, 506 alloc_time_ns); 507 } 508 509 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, 510 blk_mq_req_flags_t flags) 511 { 512 struct blk_mq_alloc_data data = { 513 .q = q, 514 .flags = flags, 515 .cmd_flags = op, 516 .nr_tags = 1, 517 }; 518 struct request *rq; 519 int ret; 520 521 ret = blk_queue_enter(q, flags); 522 if (ret) 523 return ERR_PTR(ret); 524 525 rq = __blk_mq_alloc_requests(&data); 526 if (!rq) 527 goto out_queue_exit; 528 rq->__data_len = 0; 529 rq->__sector = (sector_t) -1; 530 rq->bio = rq->biotail = NULL; 531 return rq; 532 out_queue_exit: 533 blk_queue_exit(q); 534 return ERR_PTR(-EWOULDBLOCK); 535 } 536 EXPORT_SYMBOL(blk_mq_alloc_request); 537 538 struct request *blk_mq_alloc_request_hctx(struct request_queue *q, 539 unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx) 540 { 541 struct blk_mq_alloc_data data = { 542 .q = q, 543 .flags = flags, 544 .cmd_flags = op, 545 .nr_tags = 1, 546 }; 547 u64 alloc_time_ns = 0; 548 unsigned int cpu; 549 unsigned int tag; 550 int ret; 551 552 /* alloc_time includes depth and tag waits */ 553 if (blk_queue_rq_alloc_time(q)) 554 alloc_time_ns = ktime_get_ns(); 555 556 /* 557 * If the tag allocator sleeps we could get an allocation for a 558 * different hardware context. No need to complicate the low level 559 * allocator for this for the rare use case of a command tied to 560 * a specific queue. 561 */ 562 if (WARN_ON_ONCE(!(flags & (BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED)))) 563 return ERR_PTR(-EINVAL); 564 565 if (hctx_idx >= q->nr_hw_queues) 566 return ERR_PTR(-EIO); 567 568 ret = blk_queue_enter(q, flags); 569 if (ret) 570 return ERR_PTR(ret); 571 572 /* 573 * Check if the hardware context is actually mapped to anything. 574 * If not tell the caller that it should skip this queue. 575 */ 576 ret = -EXDEV; 577 data.hctx = xa_load(&q->hctx_table, hctx_idx); 578 if (!blk_mq_hw_queue_mapped(data.hctx)) 579 goto out_queue_exit; 580 cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask); 581 data.ctx = __blk_mq_get_ctx(q, cpu); 582 583 if (!q->elevator) 584 blk_mq_tag_busy(data.hctx); 585 else 586 data.rq_flags |= RQF_ELV; 587 588 ret = -EWOULDBLOCK; 589 tag = blk_mq_get_tag(&data); 590 if (tag == BLK_MQ_NO_TAG) 591 goto out_queue_exit; 592 return blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag, 593 alloc_time_ns); 594 595 out_queue_exit: 596 blk_queue_exit(q); 597 return ERR_PTR(ret); 598 } 599 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); 600 601 static void __blk_mq_free_request(struct request *rq) 602 { 603 struct request_queue *q = rq->q; 604 struct blk_mq_ctx *ctx = rq->mq_ctx; 605 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 606 const int sched_tag = rq->internal_tag; 607 608 blk_crypto_free_request(rq); 609 blk_pm_mark_last_busy(rq); 610 rq->mq_hctx = NULL; 611 if (rq->tag != BLK_MQ_NO_TAG) 612 blk_mq_put_tag(hctx->tags, ctx, rq->tag); 613 if (sched_tag != BLK_MQ_NO_TAG) 614 blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag); 615 blk_mq_sched_restart(hctx); 616 blk_queue_exit(q); 617 } 618 619 void blk_mq_free_request(struct request *rq) 620 { 621 struct request_queue *q = rq->q; 622 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 623 624 if ((rq->rq_flags & RQF_ELVPRIV) && 625 q->elevator->type->ops.finish_request) 626 q->elevator->type->ops.finish_request(rq); 627 628 if (rq->rq_flags & RQF_MQ_INFLIGHT) 629 __blk_mq_dec_active_requests(hctx); 630 631 if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq))) 632 laptop_io_completion(q->disk->bdi); 633 634 rq_qos_done(q, rq); 635 636 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 637 if (req_ref_put_and_test(rq)) 638 __blk_mq_free_request(rq); 639 } 640 EXPORT_SYMBOL_GPL(blk_mq_free_request); 641 642 void blk_mq_free_plug_rqs(struct blk_plug *plug) 643 { 644 struct request *rq; 645 646 while ((rq = rq_list_pop(&plug->cached_rq)) != NULL) 647 blk_mq_free_request(rq); 648 } 649 650 void blk_dump_rq_flags(struct request *rq, char *msg) 651 { 652 printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg, 653 rq->q->disk ? rq->q->disk->disk_name : "?", 654 (unsigned long long) rq->cmd_flags); 655 656 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", 657 (unsigned long long)blk_rq_pos(rq), 658 blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); 659 printk(KERN_INFO " bio %p, biotail %p, len %u\n", 660 rq->bio, rq->biotail, blk_rq_bytes(rq)); 661 } 662 EXPORT_SYMBOL(blk_dump_rq_flags); 663 664 static void req_bio_endio(struct request *rq, struct bio *bio, 665 unsigned int nbytes, blk_status_t error) 666 { 667 if (unlikely(error)) { 668 bio->bi_status = error; 669 } else if (req_op(rq) == REQ_OP_ZONE_APPEND) { 670 /* 671 * Partial zone append completions cannot be supported as the 672 * BIO fragments may end up not being written sequentially. 673 */ 674 if (bio->bi_iter.bi_size != nbytes) 675 bio->bi_status = BLK_STS_IOERR; 676 else 677 bio->bi_iter.bi_sector = rq->__sector; 678 } 679 680 bio_advance(bio, nbytes); 681 682 if (unlikely(rq->rq_flags & RQF_QUIET)) 683 bio_set_flag(bio, BIO_QUIET); 684 /* don't actually finish bio if it's part of flush sequence */ 685 if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ)) 686 bio_endio(bio); 687 } 688 689 static void blk_account_io_completion(struct request *req, unsigned int bytes) 690 { 691 if (req->part && blk_do_io_stat(req)) { 692 const int sgrp = op_stat_group(req_op(req)); 693 694 part_stat_lock(); 695 part_stat_add(req->part, sectors[sgrp], bytes >> 9); 696 part_stat_unlock(); 697 } 698 } 699 700 static void blk_print_req_error(struct request *req, blk_status_t status) 701 { 702 printk_ratelimited(KERN_ERR 703 "%s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x " 704 "phys_seg %u prio class %u\n", 705 blk_status_to_str(status), 706 req->q->disk ? req->q->disk->disk_name : "?", 707 blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)), 708 req->cmd_flags & ~REQ_OP_MASK, 709 req->nr_phys_segments, 710 IOPRIO_PRIO_CLASS(req->ioprio)); 711 } 712 713 /* 714 * Fully end IO on a request. Does not support partial completions, or 715 * errors. 716 */ 717 static void blk_complete_request(struct request *req) 718 { 719 const bool is_flush = (req->rq_flags & RQF_FLUSH_SEQ) != 0; 720 int total_bytes = blk_rq_bytes(req); 721 struct bio *bio = req->bio; 722 723 trace_block_rq_complete(req, BLK_STS_OK, total_bytes); 724 725 if (!bio) 726 return; 727 728 #ifdef CONFIG_BLK_DEV_INTEGRITY 729 if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ) 730 req->q->integrity.profile->complete_fn(req, total_bytes); 731 #endif 732 733 blk_account_io_completion(req, total_bytes); 734 735 do { 736 struct bio *next = bio->bi_next; 737 738 /* Completion has already been traced */ 739 bio_clear_flag(bio, BIO_TRACE_COMPLETION); 740 741 if (req_op(req) == REQ_OP_ZONE_APPEND) 742 bio->bi_iter.bi_sector = req->__sector; 743 744 if (!is_flush) 745 bio_endio(bio); 746 bio = next; 747 } while (bio); 748 749 /* 750 * Reset counters so that the request stacking driver 751 * can find how many bytes remain in the request 752 * later. 753 */ 754 req->bio = NULL; 755 req->__data_len = 0; 756 } 757 758 /** 759 * blk_update_request - Complete multiple bytes without completing the request 760 * @req: the request being processed 761 * @error: block status code 762 * @nr_bytes: number of bytes to complete for @req 763 * 764 * Description: 765 * Ends I/O on a number of bytes attached to @req, but doesn't complete 766 * the request structure even if @req doesn't have leftover. 767 * If @req has leftover, sets it up for the next range of segments. 768 * 769 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees 770 * %false return from this function. 771 * 772 * Note: 773 * The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in this function 774 * except in the consistency check at the end of this function. 775 * 776 * Return: 777 * %false - this request doesn't have any more data 778 * %true - this request has more data 779 **/ 780 bool blk_update_request(struct request *req, blk_status_t error, 781 unsigned int nr_bytes) 782 { 783 int total_bytes; 784 785 trace_block_rq_complete(req, error, nr_bytes); 786 787 if (!req->bio) 788 return false; 789 790 #ifdef CONFIG_BLK_DEV_INTEGRITY 791 if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ && 792 error == BLK_STS_OK) 793 req->q->integrity.profile->complete_fn(req, nr_bytes); 794 #endif 795 796 if (unlikely(error && !blk_rq_is_passthrough(req) && 797 !(req->rq_flags & RQF_QUIET))) { 798 blk_print_req_error(req, error); 799 trace_block_rq_error(req, error, nr_bytes); 800 } 801 802 blk_account_io_completion(req, nr_bytes); 803 804 total_bytes = 0; 805 while (req->bio) { 806 struct bio *bio = req->bio; 807 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes); 808 809 if (bio_bytes == bio->bi_iter.bi_size) 810 req->bio = bio->bi_next; 811 812 /* Completion has already been traced */ 813 bio_clear_flag(bio, BIO_TRACE_COMPLETION); 814 req_bio_endio(req, bio, bio_bytes, error); 815 816 total_bytes += bio_bytes; 817 nr_bytes -= bio_bytes; 818 819 if (!nr_bytes) 820 break; 821 } 822 823 /* 824 * completely done 825 */ 826 if (!req->bio) { 827 /* 828 * Reset counters so that the request stacking driver 829 * can find how many bytes remain in the request 830 * later. 831 */ 832 req->__data_len = 0; 833 return false; 834 } 835 836 req->__data_len -= total_bytes; 837 838 /* update sector only for requests with clear definition of sector */ 839 if (!blk_rq_is_passthrough(req)) 840 req->__sector += total_bytes >> 9; 841 842 /* mixed attributes always follow the first bio */ 843 if (req->rq_flags & RQF_MIXED_MERGE) { 844 req->cmd_flags &= ~REQ_FAILFAST_MASK; 845 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK; 846 } 847 848 if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) { 849 /* 850 * If total number of sectors is less than the first segment 851 * size, something has gone terribly wrong. 852 */ 853 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { 854 blk_dump_rq_flags(req, "request botched"); 855 req->__data_len = blk_rq_cur_bytes(req); 856 } 857 858 /* recalculate the number of segments */ 859 req->nr_phys_segments = blk_recalc_rq_segments(req); 860 } 861 862 return true; 863 } 864 EXPORT_SYMBOL_GPL(blk_update_request); 865 866 static void __blk_account_io_done(struct request *req, u64 now) 867 { 868 const int sgrp = op_stat_group(req_op(req)); 869 870 part_stat_lock(); 871 update_io_ticks(req->part, jiffies, true); 872 part_stat_inc(req->part, ios[sgrp]); 873 part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns); 874 part_stat_unlock(); 875 } 876 877 static inline void blk_account_io_done(struct request *req, u64 now) 878 { 879 /* 880 * Account IO completion. flush_rq isn't accounted as a 881 * normal IO on queueing nor completion. Accounting the 882 * containing request is enough. 883 */ 884 if (blk_do_io_stat(req) && req->part && 885 !(req->rq_flags & RQF_FLUSH_SEQ)) 886 __blk_account_io_done(req, now); 887 } 888 889 static void __blk_account_io_start(struct request *rq) 890 { 891 /* 892 * All non-passthrough requests are created from a bio with one 893 * exception: when a flush command that is part of a flush sequence 894 * generated by the state machine in blk-flush.c is cloned onto the 895 * lower device by dm-multipath we can get here without a bio. 896 */ 897 if (rq->bio) 898 rq->part = rq->bio->bi_bdev; 899 else 900 rq->part = rq->q->disk->part0; 901 902 part_stat_lock(); 903 update_io_ticks(rq->part, jiffies, false); 904 part_stat_unlock(); 905 } 906 907 static inline void blk_account_io_start(struct request *req) 908 { 909 if (blk_do_io_stat(req)) 910 __blk_account_io_start(req); 911 } 912 913 static inline void __blk_mq_end_request_acct(struct request *rq, u64 now) 914 { 915 if (rq->rq_flags & RQF_STATS) { 916 blk_mq_poll_stats_start(rq->q); 917 blk_stat_add(rq, now); 918 } 919 920 blk_mq_sched_completed_request(rq, now); 921 blk_account_io_done(rq, now); 922 } 923 924 inline void __blk_mq_end_request(struct request *rq, blk_status_t error) 925 { 926 if (blk_mq_need_time_stamp(rq)) 927 __blk_mq_end_request_acct(rq, ktime_get_ns()); 928 929 if (rq->end_io) { 930 rq_qos_done(rq->q, rq); 931 rq->end_io(rq, error); 932 } else { 933 blk_mq_free_request(rq); 934 } 935 } 936 EXPORT_SYMBOL(__blk_mq_end_request); 937 938 void blk_mq_end_request(struct request *rq, blk_status_t error) 939 { 940 if (blk_update_request(rq, error, blk_rq_bytes(rq))) 941 BUG(); 942 __blk_mq_end_request(rq, error); 943 } 944 EXPORT_SYMBOL(blk_mq_end_request); 945 946 #define TAG_COMP_BATCH 32 947 948 static inline void blk_mq_flush_tag_batch(struct blk_mq_hw_ctx *hctx, 949 int *tag_array, int nr_tags) 950 { 951 struct request_queue *q = hctx->queue; 952 953 /* 954 * All requests should have been marked as RQF_MQ_INFLIGHT, so 955 * update hctx->nr_active in batch 956 */ 957 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) 958 __blk_mq_sub_active_requests(hctx, nr_tags); 959 960 blk_mq_put_tags(hctx->tags, tag_array, nr_tags); 961 percpu_ref_put_many(&q->q_usage_counter, nr_tags); 962 } 963 964 void blk_mq_end_request_batch(struct io_comp_batch *iob) 965 { 966 int tags[TAG_COMP_BATCH], nr_tags = 0; 967 struct blk_mq_hw_ctx *cur_hctx = NULL; 968 struct request *rq; 969 u64 now = 0; 970 971 if (iob->need_ts) 972 now = ktime_get_ns(); 973 974 while ((rq = rq_list_pop(&iob->req_list)) != NULL) { 975 prefetch(rq->bio); 976 prefetch(rq->rq_next); 977 978 blk_complete_request(rq); 979 if (iob->need_ts) 980 __blk_mq_end_request_acct(rq, now); 981 982 rq_qos_done(rq->q, rq); 983 984 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 985 if (!req_ref_put_and_test(rq)) 986 continue; 987 988 blk_crypto_free_request(rq); 989 blk_pm_mark_last_busy(rq); 990 991 if (nr_tags == TAG_COMP_BATCH || cur_hctx != rq->mq_hctx) { 992 if (cur_hctx) 993 blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags); 994 nr_tags = 0; 995 cur_hctx = rq->mq_hctx; 996 } 997 tags[nr_tags++] = rq->tag; 998 } 999 1000 if (nr_tags) 1001 blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags); 1002 } 1003 EXPORT_SYMBOL_GPL(blk_mq_end_request_batch); 1004 1005 static void blk_complete_reqs(struct llist_head *list) 1006 { 1007 struct llist_node *entry = llist_reverse_order(llist_del_all(list)); 1008 struct request *rq, *next; 1009 1010 llist_for_each_entry_safe(rq, next, entry, ipi_list) 1011 rq->q->mq_ops->complete(rq); 1012 } 1013 1014 static __latent_entropy void blk_done_softirq(struct softirq_action *h) 1015 { 1016 blk_complete_reqs(this_cpu_ptr(&blk_cpu_done)); 1017 } 1018 1019 static int blk_softirq_cpu_dead(unsigned int cpu) 1020 { 1021 blk_complete_reqs(&per_cpu(blk_cpu_done, cpu)); 1022 return 0; 1023 } 1024 1025 static void __blk_mq_complete_request_remote(void *data) 1026 { 1027 __raise_softirq_irqoff(BLOCK_SOFTIRQ); 1028 } 1029 1030 static inline bool blk_mq_complete_need_ipi(struct request *rq) 1031 { 1032 int cpu = raw_smp_processor_id(); 1033 1034 if (!IS_ENABLED(CONFIG_SMP) || 1035 !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) 1036 return false; 1037 /* 1038 * With force threaded interrupts enabled, raising softirq from an SMP 1039 * function call will always result in waking the ksoftirqd thread. 1040 * This is probably worse than completing the request on a different 1041 * cache domain. 1042 */ 1043 if (force_irqthreads()) 1044 return false; 1045 1046 /* same CPU or cache domain? Complete locally */ 1047 if (cpu == rq->mq_ctx->cpu || 1048 (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags) && 1049 cpus_share_cache(cpu, rq->mq_ctx->cpu))) 1050 return false; 1051 1052 /* don't try to IPI to an offline CPU */ 1053 return cpu_online(rq->mq_ctx->cpu); 1054 } 1055 1056 static void blk_mq_complete_send_ipi(struct request *rq) 1057 { 1058 struct llist_head *list; 1059 unsigned int cpu; 1060 1061 cpu = rq->mq_ctx->cpu; 1062 list = &per_cpu(blk_cpu_done, cpu); 1063 if (llist_add(&rq->ipi_list, list)) { 1064 INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq); 1065 smp_call_function_single_async(cpu, &rq->csd); 1066 } 1067 } 1068 1069 static void blk_mq_raise_softirq(struct request *rq) 1070 { 1071 struct llist_head *list; 1072 1073 preempt_disable(); 1074 list = this_cpu_ptr(&blk_cpu_done); 1075 if (llist_add(&rq->ipi_list, list)) 1076 raise_softirq(BLOCK_SOFTIRQ); 1077 preempt_enable(); 1078 } 1079 1080 bool blk_mq_complete_request_remote(struct request *rq) 1081 { 1082 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); 1083 1084 /* 1085 * For a polled request, always complete locallly, it's pointless 1086 * to redirect the completion. 1087 */ 1088 if (rq->cmd_flags & REQ_POLLED) 1089 return false; 1090 1091 if (blk_mq_complete_need_ipi(rq)) { 1092 blk_mq_complete_send_ipi(rq); 1093 return true; 1094 } 1095 1096 if (rq->q->nr_hw_queues == 1) { 1097 blk_mq_raise_softirq(rq); 1098 return true; 1099 } 1100 return false; 1101 } 1102 EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote); 1103 1104 /** 1105 * blk_mq_complete_request - end I/O on a request 1106 * @rq: the request being processed 1107 * 1108 * Description: 1109 * Complete a request by scheduling the ->complete_rq operation. 1110 **/ 1111 void blk_mq_complete_request(struct request *rq) 1112 { 1113 if (!blk_mq_complete_request_remote(rq)) 1114 rq->q->mq_ops->complete(rq); 1115 } 1116 EXPORT_SYMBOL(blk_mq_complete_request); 1117 1118 /** 1119 * blk_mq_start_request - Start processing a request 1120 * @rq: Pointer to request to be started 1121 * 1122 * Function used by device drivers to notify the block layer that a request 1123 * is going to be processed now, so blk layer can do proper initializations 1124 * such as starting the timeout timer. 1125 */ 1126 void blk_mq_start_request(struct request *rq) 1127 { 1128 struct request_queue *q = rq->q; 1129 1130 trace_block_rq_issue(rq); 1131 1132 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) { 1133 u64 start_time; 1134 #ifdef CONFIG_BLK_CGROUP 1135 if (rq->bio) 1136 start_time = bio_issue_time(&rq->bio->bi_issue); 1137 else 1138 #endif 1139 start_time = ktime_get_ns(); 1140 rq->io_start_time_ns = start_time; 1141 rq->stats_sectors = blk_rq_sectors(rq); 1142 rq->rq_flags |= RQF_STATS; 1143 rq_qos_issue(q, rq); 1144 } 1145 1146 WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE); 1147 1148 blk_add_timer(rq); 1149 WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT); 1150 1151 #ifdef CONFIG_BLK_DEV_INTEGRITY 1152 if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE) 1153 q->integrity.profile->prepare_fn(rq); 1154 #endif 1155 if (rq->bio && rq->bio->bi_opf & REQ_POLLED) 1156 WRITE_ONCE(rq->bio->bi_cookie, blk_rq_to_qc(rq)); 1157 } 1158 EXPORT_SYMBOL(blk_mq_start_request); 1159 1160 /** 1161 * blk_end_sync_rq - executes a completion event on a request 1162 * @rq: request to complete 1163 * @error: end I/O status of the request 1164 */ 1165 static void blk_end_sync_rq(struct request *rq, blk_status_t error) 1166 { 1167 struct completion *waiting = rq->end_io_data; 1168 1169 rq->end_io_data = (void *)(uintptr_t)error; 1170 1171 /* 1172 * complete last, if this is a stack request the process (and thus 1173 * the rq pointer) could be invalid right after this complete() 1174 */ 1175 complete(waiting); 1176 } 1177 1178 /** 1179 * blk_execute_rq_nowait - insert a request to I/O scheduler for execution 1180 * @rq: request to insert 1181 * @at_head: insert request at head or tail of queue 1182 * @done: I/O completion handler 1183 * 1184 * Description: 1185 * Insert a fully prepared request at the back of the I/O scheduler queue 1186 * for execution. Don't wait for completion. 1187 * 1188 * Note: 1189 * This function will invoke @done directly if the queue is dead. 1190 */ 1191 void blk_execute_rq_nowait(struct request *rq, bool at_head, rq_end_io_fn *done) 1192 { 1193 WARN_ON(irqs_disabled()); 1194 WARN_ON(!blk_rq_is_passthrough(rq)); 1195 1196 rq->end_io = done; 1197 1198 blk_account_io_start(rq); 1199 1200 /* 1201 * don't check dying flag for MQ because the request won't 1202 * be reused after dying flag is set 1203 */ 1204 blk_mq_sched_insert_request(rq, at_head, true, false); 1205 } 1206 EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); 1207 1208 static bool blk_rq_is_poll(struct request *rq) 1209 { 1210 if (!rq->mq_hctx) 1211 return false; 1212 if (rq->mq_hctx->type != HCTX_TYPE_POLL) 1213 return false; 1214 if (WARN_ON_ONCE(!rq->bio)) 1215 return false; 1216 return true; 1217 } 1218 1219 static void blk_rq_poll_completion(struct request *rq, struct completion *wait) 1220 { 1221 do { 1222 bio_poll(rq->bio, NULL, 0); 1223 cond_resched(); 1224 } while (!completion_done(wait)); 1225 } 1226 1227 /** 1228 * blk_execute_rq - insert a request into queue for execution 1229 * @rq: request to insert 1230 * @at_head: insert request at head or tail of queue 1231 * 1232 * Description: 1233 * Insert a fully prepared request at the back of the I/O scheduler queue 1234 * for execution and wait for completion. 1235 * Return: The blk_status_t result provided to blk_mq_end_request(). 1236 */ 1237 blk_status_t blk_execute_rq(struct request *rq, bool at_head) 1238 { 1239 DECLARE_COMPLETION_ONSTACK(wait); 1240 unsigned long hang_check; 1241 1242 rq->end_io_data = &wait; 1243 blk_execute_rq_nowait(rq, at_head, blk_end_sync_rq); 1244 1245 /* Prevent hang_check timer from firing at us during very long I/O */ 1246 hang_check = sysctl_hung_task_timeout_secs; 1247 1248 if (blk_rq_is_poll(rq)) 1249 blk_rq_poll_completion(rq, &wait); 1250 else if (hang_check) 1251 while (!wait_for_completion_io_timeout(&wait, 1252 hang_check * (HZ/2))) 1253 ; 1254 else 1255 wait_for_completion_io(&wait); 1256 1257 return (blk_status_t)(uintptr_t)rq->end_io_data; 1258 } 1259 EXPORT_SYMBOL(blk_execute_rq); 1260 1261 static void __blk_mq_requeue_request(struct request *rq) 1262 { 1263 struct request_queue *q = rq->q; 1264 1265 blk_mq_put_driver_tag(rq); 1266 1267 trace_block_rq_requeue(rq); 1268 rq_qos_requeue(q, rq); 1269 1270 if (blk_mq_request_started(rq)) { 1271 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 1272 rq->rq_flags &= ~RQF_TIMED_OUT; 1273 } 1274 } 1275 1276 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list) 1277 { 1278 __blk_mq_requeue_request(rq); 1279 1280 /* this request will be re-inserted to io scheduler queue */ 1281 blk_mq_sched_requeue_request(rq); 1282 1283 blk_mq_add_to_requeue_list(rq, true, kick_requeue_list); 1284 } 1285 EXPORT_SYMBOL(blk_mq_requeue_request); 1286 1287 static void blk_mq_requeue_work(struct work_struct *work) 1288 { 1289 struct request_queue *q = 1290 container_of(work, struct request_queue, requeue_work.work); 1291 LIST_HEAD(rq_list); 1292 struct request *rq, *next; 1293 1294 spin_lock_irq(&q->requeue_lock); 1295 list_splice_init(&q->requeue_list, &rq_list); 1296 spin_unlock_irq(&q->requeue_lock); 1297 1298 list_for_each_entry_safe(rq, next, &rq_list, queuelist) { 1299 if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP))) 1300 continue; 1301 1302 rq->rq_flags &= ~RQF_SOFTBARRIER; 1303 list_del_init(&rq->queuelist); 1304 /* 1305 * If RQF_DONTPREP, rq has contained some driver specific 1306 * data, so insert it to hctx dispatch list to avoid any 1307 * merge. 1308 */ 1309 if (rq->rq_flags & RQF_DONTPREP) 1310 blk_mq_request_bypass_insert(rq, false, false); 1311 else 1312 blk_mq_sched_insert_request(rq, true, false, false); 1313 } 1314 1315 while (!list_empty(&rq_list)) { 1316 rq = list_entry(rq_list.next, struct request, queuelist); 1317 list_del_init(&rq->queuelist); 1318 blk_mq_sched_insert_request(rq, false, false, false); 1319 } 1320 1321 blk_mq_run_hw_queues(q, false); 1322 } 1323 1324 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, 1325 bool kick_requeue_list) 1326 { 1327 struct request_queue *q = rq->q; 1328 unsigned long flags; 1329 1330 /* 1331 * We abuse this flag that is otherwise used by the I/O scheduler to 1332 * request head insertion from the workqueue. 1333 */ 1334 BUG_ON(rq->rq_flags & RQF_SOFTBARRIER); 1335 1336 spin_lock_irqsave(&q->requeue_lock, flags); 1337 if (at_head) { 1338 rq->rq_flags |= RQF_SOFTBARRIER; 1339 list_add(&rq->queuelist, &q->requeue_list); 1340 } else { 1341 list_add_tail(&rq->queuelist, &q->requeue_list); 1342 } 1343 spin_unlock_irqrestore(&q->requeue_lock, flags); 1344 1345 if (kick_requeue_list) 1346 blk_mq_kick_requeue_list(q); 1347 } 1348 1349 void blk_mq_kick_requeue_list(struct request_queue *q) 1350 { 1351 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0); 1352 } 1353 EXPORT_SYMBOL(blk_mq_kick_requeue_list); 1354 1355 void blk_mq_delay_kick_requeue_list(struct request_queue *q, 1356 unsigned long msecs) 1357 { 1358 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 1359 msecs_to_jiffies(msecs)); 1360 } 1361 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list); 1362 1363 static bool blk_mq_rq_inflight(struct request *rq, void *priv, 1364 bool reserved) 1365 { 1366 /* 1367 * If we find a request that isn't idle we know the queue is busy 1368 * as it's checked in the iter. 1369 * Return false to stop the iteration. 1370 */ 1371 if (blk_mq_request_started(rq)) { 1372 bool *busy = priv; 1373 1374 *busy = true; 1375 return false; 1376 } 1377 1378 return true; 1379 } 1380 1381 bool blk_mq_queue_inflight(struct request_queue *q) 1382 { 1383 bool busy = false; 1384 1385 blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy); 1386 return busy; 1387 } 1388 EXPORT_SYMBOL_GPL(blk_mq_queue_inflight); 1389 1390 static void blk_mq_rq_timed_out(struct request *req, bool reserved) 1391 { 1392 req->rq_flags |= RQF_TIMED_OUT; 1393 if (req->q->mq_ops->timeout) { 1394 enum blk_eh_timer_return ret; 1395 1396 ret = req->q->mq_ops->timeout(req, reserved); 1397 if (ret == BLK_EH_DONE) 1398 return; 1399 WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER); 1400 } 1401 1402 blk_add_timer(req); 1403 } 1404 1405 static bool blk_mq_req_expired(struct request *rq, unsigned long *next) 1406 { 1407 unsigned long deadline; 1408 1409 if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT) 1410 return false; 1411 if (rq->rq_flags & RQF_TIMED_OUT) 1412 return false; 1413 1414 deadline = READ_ONCE(rq->deadline); 1415 if (time_after_eq(jiffies, deadline)) 1416 return true; 1417 1418 if (*next == 0) 1419 *next = deadline; 1420 else if (time_after(*next, deadline)) 1421 *next = deadline; 1422 return false; 1423 } 1424 1425 void blk_mq_put_rq_ref(struct request *rq) 1426 { 1427 if (is_flush_rq(rq)) 1428 rq->end_io(rq, 0); 1429 else if (req_ref_put_and_test(rq)) 1430 __blk_mq_free_request(rq); 1431 } 1432 1433 static bool blk_mq_check_expired(struct request *rq, void *priv, bool reserved) 1434 { 1435 unsigned long *next = priv; 1436 1437 /* 1438 * blk_mq_queue_tag_busy_iter() has locked the request, so it cannot 1439 * be reallocated underneath the timeout handler's processing, then 1440 * the expire check is reliable. If the request is not expired, then 1441 * it was completed and reallocated as a new request after returning 1442 * from blk_mq_check_expired(). 1443 */ 1444 if (blk_mq_req_expired(rq, next)) 1445 blk_mq_rq_timed_out(rq, reserved); 1446 return true; 1447 } 1448 1449 static void blk_mq_timeout_work(struct work_struct *work) 1450 { 1451 struct request_queue *q = 1452 container_of(work, struct request_queue, timeout_work); 1453 unsigned long next = 0; 1454 struct blk_mq_hw_ctx *hctx; 1455 unsigned long i; 1456 1457 /* A deadlock might occur if a request is stuck requiring a 1458 * timeout at the same time a queue freeze is waiting 1459 * completion, since the timeout code would not be able to 1460 * acquire the queue reference here. 1461 * 1462 * That's why we don't use blk_queue_enter here; instead, we use 1463 * percpu_ref_tryget directly, because we need to be able to 1464 * obtain a reference even in the short window between the queue 1465 * starting to freeze, by dropping the first reference in 1466 * blk_freeze_queue_start, and the moment the last request is 1467 * consumed, marked by the instant q_usage_counter reaches 1468 * zero. 1469 */ 1470 if (!percpu_ref_tryget(&q->q_usage_counter)) 1471 return; 1472 1473 blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &next); 1474 1475 if (next != 0) { 1476 mod_timer(&q->timeout, next); 1477 } else { 1478 /* 1479 * Request timeouts are handled as a forward rolling timer. If 1480 * we end up here it means that no requests are pending and 1481 * also that no request has been pending for a while. Mark 1482 * each hctx as idle. 1483 */ 1484 queue_for_each_hw_ctx(q, hctx, i) { 1485 /* the hctx may be unmapped, so check it here */ 1486 if (blk_mq_hw_queue_mapped(hctx)) 1487 blk_mq_tag_idle(hctx); 1488 } 1489 } 1490 blk_queue_exit(q); 1491 } 1492 1493 struct flush_busy_ctx_data { 1494 struct blk_mq_hw_ctx *hctx; 1495 struct list_head *list; 1496 }; 1497 1498 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data) 1499 { 1500 struct flush_busy_ctx_data *flush_data = data; 1501 struct blk_mq_hw_ctx *hctx = flush_data->hctx; 1502 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; 1503 enum hctx_type type = hctx->type; 1504 1505 spin_lock(&ctx->lock); 1506 list_splice_tail_init(&ctx->rq_lists[type], flush_data->list); 1507 sbitmap_clear_bit(sb, bitnr); 1508 spin_unlock(&ctx->lock); 1509 return true; 1510 } 1511 1512 /* 1513 * Process software queues that have been marked busy, splicing them 1514 * to the for-dispatch 1515 */ 1516 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list) 1517 { 1518 struct flush_busy_ctx_data data = { 1519 .hctx = hctx, 1520 .list = list, 1521 }; 1522 1523 sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data); 1524 } 1525 EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs); 1526 1527 struct dispatch_rq_data { 1528 struct blk_mq_hw_ctx *hctx; 1529 struct request *rq; 1530 }; 1531 1532 static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr, 1533 void *data) 1534 { 1535 struct dispatch_rq_data *dispatch_data = data; 1536 struct blk_mq_hw_ctx *hctx = dispatch_data->hctx; 1537 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; 1538 enum hctx_type type = hctx->type; 1539 1540 spin_lock(&ctx->lock); 1541 if (!list_empty(&ctx->rq_lists[type])) { 1542 dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next); 1543 list_del_init(&dispatch_data->rq->queuelist); 1544 if (list_empty(&ctx->rq_lists[type])) 1545 sbitmap_clear_bit(sb, bitnr); 1546 } 1547 spin_unlock(&ctx->lock); 1548 1549 return !dispatch_data->rq; 1550 } 1551 1552 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, 1553 struct blk_mq_ctx *start) 1554 { 1555 unsigned off = start ? start->index_hw[hctx->type] : 0; 1556 struct dispatch_rq_data data = { 1557 .hctx = hctx, 1558 .rq = NULL, 1559 }; 1560 1561 __sbitmap_for_each_set(&hctx->ctx_map, off, 1562 dispatch_rq_from_ctx, &data); 1563 1564 return data.rq; 1565 } 1566 1567 static bool __blk_mq_alloc_driver_tag(struct request *rq) 1568 { 1569 struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags; 1570 unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags; 1571 int tag; 1572 1573 blk_mq_tag_busy(rq->mq_hctx); 1574 1575 if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) { 1576 bt = &rq->mq_hctx->tags->breserved_tags; 1577 tag_offset = 0; 1578 } else { 1579 if (!hctx_may_queue(rq->mq_hctx, bt)) 1580 return false; 1581 } 1582 1583 tag = __sbitmap_queue_get(bt); 1584 if (tag == BLK_MQ_NO_TAG) 1585 return false; 1586 1587 rq->tag = tag + tag_offset; 1588 return true; 1589 } 1590 1591 bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq) 1592 { 1593 if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_alloc_driver_tag(rq)) 1594 return false; 1595 1596 if ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) && 1597 !(rq->rq_flags & RQF_MQ_INFLIGHT)) { 1598 rq->rq_flags |= RQF_MQ_INFLIGHT; 1599 __blk_mq_inc_active_requests(hctx); 1600 } 1601 hctx->tags->rqs[rq->tag] = rq; 1602 return true; 1603 } 1604 1605 static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, 1606 int flags, void *key) 1607 { 1608 struct blk_mq_hw_ctx *hctx; 1609 1610 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait); 1611 1612 spin_lock(&hctx->dispatch_wait_lock); 1613 if (!list_empty(&wait->entry)) { 1614 struct sbitmap_queue *sbq; 1615 1616 list_del_init(&wait->entry); 1617 sbq = &hctx->tags->bitmap_tags; 1618 atomic_dec(&sbq->ws_active); 1619 } 1620 spin_unlock(&hctx->dispatch_wait_lock); 1621 1622 blk_mq_run_hw_queue(hctx, true); 1623 return 1; 1624 } 1625 1626 /* 1627 * Mark us waiting for a tag. For shared tags, this involves hooking us into 1628 * the tag wakeups. For non-shared tags, we can simply mark us needing a 1629 * restart. For both cases, take care to check the condition again after 1630 * marking us as waiting. 1631 */ 1632 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx, 1633 struct request *rq) 1634 { 1635 struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags; 1636 struct wait_queue_head *wq; 1637 wait_queue_entry_t *wait; 1638 bool ret; 1639 1640 if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) { 1641 blk_mq_sched_mark_restart_hctx(hctx); 1642 1643 /* 1644 * It's possible that a tag was freed in the window between the 1645 * allocation failure and adding the hardware queue to the wait 1646 * queue. 1647 * 1648 * Don't clear RESTART here, someone else could have set it. 1649 * At most this will cost an extra queue run. 1650 */ 1651 return blk_mq_get_driver_tag(rq); 1652 } 1653 1654 wait = &hctx->dispatch_wait; 1655 if (!list_empty_careful(&wait->entry)) 1656 return false; 1657 1658 wq = &bt_wait_ptr(sbq, hctx)->wait; 1659 1660 spin_lock_irq(&wq->lock); 1661 spin_lock(&hctx->dispatch_wait_lock); 1662 if (!list_empty(&wait->entry)) { 1663 spin_unlock(&hctx->dispatch_wait_lock); 1664 spin_unlock_irq(&wq->lock); 1665 return false; 1666 } 1667 1668 atomic_inc(&sbq->ws_active); 1669 wait->flags &= ~WQ_FLAG_EXCLUSIVE; 1670 __add_wait_queue(wq, wait); 1671 1672 /* 1673 * It's possible that a tag was freed in the window between the 1674 * allocation failure and adding the hardware queue to the wait 1675 * queue. 1676 */ 1677 ret = blk_mq_get_driver_tag(rq); 1678 if (!ret) { 1679 spin_unlock(&hctx->dispatch_wait_lock); 1680 spin_unlock_irq(&wq->lock); 1681 return false; 1682 } 1683 1684 /* 1685 * We got a tag, remove ourselves from the wait queue to ensure 1686 * someone else gets the wakeup. 1687 */ 1688 list_del_init(&wait->entry); 1689 atomic_dec(&sbq->ws_active); 1690 spin_unlock(&hctx->dispatch_wait_lock); 1691 spin_unlock_irq(&wq->lock); 1692 1693 return true; 1694 } 1695 1696 #define BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT 8 1697 #define BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR 4 1698 /* 1699 * Update dispatch busy with the Exponential Weighted Moving Average(EWMA): 1700 * - EWMA is one simple way to compute running average value 1701 * - weight(7/8 and 1/8) is applied so that it can decrease exponentially 1702 * - take 4 as factor for avoiding to get too small(0) result, and this 1703 * factor doesn't matter because EWMA decreases exponentially 1704 */ 1705 static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy) 1706 { 1707 unsigned int ewma; 1708 1709 ewma = hctx->dispatch_busy; 1710 1711 if (!ewma && !busy) 1712 return; 1713 1714 ewma *= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT - 1; 1715 if (busy) 1716 ewma += 1 << BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR; 1717 ewma /= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT; 1718 1719 hctx->dispatch_busy = ewma; 1720 } 1721 1722 #define BLK_MQ_RESOURCE_DELAY 3 /* ms units */ 1723 1724 static void blk_mq_handle_dev_resource(struct request *rq, 1725 struct list_head *list) 1726 { 1727 struct request *next = 1728 list_first_entry_or_null(list, struct request, queuelist); 1729 1730 /* 1731 * If an I/O scheduler has been configured and we got a driver tag for 1732 * the next request already, free it. 1733 */ 1734 if (next) 1735 blk_mq_put_driver_tag(next); 1736 1737 list_add(&rq->queuelist, list); 1738 __blk_mq_requeue_request(rq); 1739 } 1740 1741 static void blk_mq_handle_zone_resource(struct request *rq, 1742 struct list_head *zone_list) 1743 { 1744 /* 1745 * If we end up here it is because we cannot dispatch a request to a 1746 * specific zone due to LLD level zone-write locking or other zone 1747 * related resource not being available. In this case, set the request 1748 * aside in zone_list for retrying it later. 1749 */ 1750 list_add(&rq->queuelist, zone_list); 1751 __blk_mq_requeue_request(rq); 1752 } 1753 1754 enum prep_dispatch { 1755 PREP_DISPATCH_OK, 1756 PREP_DISPATCH_NO_TAG, 1757 PREP_DISPATCH_NO_BUDGET, 1758 }; 1759 1760 static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq, 1761 bool need_budget) 1762 { 1763 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 1764 int budget_token = -1; 1765 1766 if (need_budget) { 1767 budget_token = blk_mq_get_dispatch_budget(rq->q); 1768 if (budget_token < 0) { 1769 blk_mq_put_driver_tag(rq); 1770 return PREP_DISPATCH_NO_BUDGET; 1771 } 1772 blk_mq_set_rq_budget_token(rq, budget_token); 1773 } 1774 1775 if (!blk_mq_get_driver_tag(rq)) { 1776 /* 1777 * The initial allocation attempt failed, so we need to 1778 * rerun the hardware queue when a tag is freed. The 1779 * waitqueue takes care of that. If the queue is run 1780 * before we add this entry back on the dispatch list, 1781 * we'll re-run it below. 1782 */ 1783 if (!blk_mq_mark_tag_wait(hctx, rq)) { 1784 /* 1785 * All budgets not got from this function will be put 1786 * together during handling partial dispatch 1787 */ 1788 if (need_budget) 1789 blk_mq_put_dispatch_budget(rq->q, budget_token); 1790 return PREP_DISPATCH_NO_TAG; 1791 } 1792 } 1793 1794 return PREP_DISPATCH_OK; 1795 } 1796 1797 /* release all allocated budgets before calling to blk_mq_dispatch_rq_list */ 1798 static void blk_mq_release_budgets(struct request_queue *q, 1799 struct list_head *list) 1800 { 1801 struct request *rq; 1802 1803 list_for_each_entry(rq, list, queuelist) { 1804 int budget_token = blk_mq_get_rq_budget_token(rq); 1805 1806 if (budget_token >= 0) 1807 blk_mq_put_dispatch_budget(q, budget_token); 1808 } 1809 } 1810 1811 /* 1812 * Returns true if we did some work AND can potentially do more. 1813 */ 1814 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list, 1815 unsigned int nr_budgets) 1816 { 1817 enum prep_dispatch prep; 1818 struct request_queue *q = hctx->queue; 1819 struct request *rq, *nxt; 1820 int errors, queued; 1821 blk_status_t ret = BLK_STS_OK; 1822 LIST_HEAD(zone_list); 1823 bool needs_resource = false; 1824 1825 if (list_empty(list)) 1826 return false; 1827 1828 /* 1829 * Now process all the entries, sending them to the driver. 1830 */ 1831 errors = queued = 0; 1832 do { 1833 struct blk_mq_queue_data bd; 1834 1835 rq = list_first_entry(list, struct request, queuelist); 1836 1837 WARN_ON_ONCE(hctx != rq->mq_hctx); 1838 prep = blk_mq_prep_dispatch_rq(rq, !nr_budgets); 1839 if (prep != PREP_DISPATCH_OK) 1840 break; 1841 1842 list_del_init(&rq->queuelist); 1843 1844 bd.rq = rq; 1845 1846 /* 1847 * Flag last if we have no more requests, or if we have more 1848 * but can't assign a driver tag to it. 1849 */ 1850 if (list_empty(list)) 1851 bd.last = true; 1852 else { 1853 nxt = list_first_entry(list, struct request, queuelist); 1854 bd.last = !blk_mq_get_driver_tag(nxt); 1855 } 1856 1857 /* 1858 * once the request is queued to lld, no need to cover the 1859 * budget any more 1860 */ 1861 if (nr_budgets) 1862 nr_budgets--; 1863 ret = q->mq_ops->queue_rq(hctx, &bd); 1864 switch (ret) { 1865 case BLK_STS_OK: 1866 queued++; 1867 break; 1868 case BLK_STS_RESOURCE: 1869 needs_resource = true; 1870 fallthrough; 1871 case BLK_STS_DEV_RESOURCE: 1872 blk_mq_handle_dev_resource(rq, list); 1873 goto out; 1874 case BLK_STS_ZONE_RESOURCE: 1875 /* 1876 * Move the request to zone_list and keep going through 1877 * the dispatch list to find more requests the drive can 1878 * accept. 1879 */ 1880 blk_mq_handle_zone_resource(rq, &zone_list); 1881 needs_resource = true; 1882 break; 1883 default: 1884 errors++; 1885 blk_mq_end_request(rq, ret); 1886 } 1887 } while (!list_empty(list)); 1888 out: 1889 if (!list_empty(&zone_list)) 1890 list_splice_tail_init(&zone_list, list); 1891 1892 /* If we didn't flush the entire list, we could have told the driver 1893 * there was more coming, but that turned out to be a lie. 1894 */ 1895 if ((!list_empty(list) || errors) && q->mq_ops->commit_rqs && queued) 1896 q->mq_ops->commit_rqs(hctx); 1897 /* 1898 * Any items that need requeuing? Stuff them into hctx->dispatch, 1899 * that is where we will continue on next queue run. 1900 */ 1901 if (!list_empty(list)) { 1902 bool needs_restart; 1903 /* For non-shared tags, the RESTART check will suffice */ 1904 bool no_tag = prep == PREP_DISPATCH_NO_TAG && 1905 (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED); 1906 1907 if (nr_budgets) 1908 blk_mq_release_budgets(q, list); 1909 1910 spin_lock(&hctx->lock); 1911 list_splice_tail_init(list, &hctx->dispatch); 1912 spin_unlock(&hctx->lock); 1913 1914 /* 1915 * Order adding requests to hctx->dispatch and checking 1916 * SCHED_RESTART flag. The pair of this smp_mb() is the one 1917 * in blk_mq_sched_restart(). Avoid restart code path to 1918 * miss the new added requests to hctx->dispatch, meantime 1919 * SCHED_RESTART is observed here. 1920 */ 1921 smp_mb(); 1922 1923 /* 1924 * If SCHED_RESTART was set by the caller of this function and 1925 * it is no longer set that means that it was cleared by another 1926 * thread and hence that a queue rerun is needed. 1927 * 1928 * If 'no_tag' is set, that means that we failed getting 1929 * a driver tag with an I/O scheduler attached. If our dispatch 1930 * waitqueue is no longer active, ensure that we run the queue 1931 * AFTER adding our entries back to the list. 1932 * 1933 * If no I/O scheduler has been configured it is possible that 1934 * the hardware queue got stopped and restarted before requests 1935 * were pushed back onto the dispatch list. Rerun the queue to 1936 * avoid starvation. Notes: 1937 * - blk_mq_run_hw_queue() checks whether or not a queue has 1938 * been stopped before rerunning a queue. 1939 * - Some but not all block drivers stop a queue before 1940 * returning BLK_STS_RESOURCE. Two exceptions are scsi-mq 1941 * and dm-rq. 1942 * 1943 * If driver returns BLK_STS_RESOURCE and SCHED_RESTART 1944 * bit is set, run queue after a delay to avoid IO stalls 1945 * that could otherwise occur if the queue is idle. We'll do 1946 * similar if we couldn't get budget or couldn't lock a zone 1947 * and SCHED_RESTART is set. 1948 */ 1949 needs_restart = blk_mq_sched_needs_restart(hctx); 1950 if (prep == PREP_DISPATCH_NO_BUDGET) 1951 needs_resource = true; 1952 if (!needs_restart || 1953 (no_tag && list_empty_careful(&hctx->dispatch_wait.entry))) 1954 blk_mq_run_hw_queue(hctx, true); 1955 else if (needs_restart && needs_resource) 1956 blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY); 1957 1958 blk_mq_update_dispatch_busy(hctx, true); 1959 return false; 1960 } else 1961 blk_mq_update_dispatch_busy(hctx, false); 1962 1963 return (queued + errors) != 0; 1964 } 1965 1966 /** 1967 * __blk_mq_run_hw_queue - Run a hardware queue. 1968 * @hctx: Pointer to the hardware queue to run. 1969 * 1970 * Send pending requests to the hardware. 1971 */ 1972 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) 1973 { 1974 /* 1975 * We can't run the queue inline with ints disabled. Ensure that 1976 * we catch bad users of this early. 1977 */ 1978 WARN_ON_ONCE(in_interrupt()); 1979 1980 blk_mq_run_dispatch_ops(hctx->queue, 1981 blk_mq_sched_dispatch_requests(hctx)); 1982 } 1983 1984 static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx) 1985 { 1986 int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask); 1987 1988 if (cpu >= nr_cpu_ids) 1989 cpu = cpumask_first(hctx->cpumask); 1990 return cpu; 1991 } 1992 1993 /* 1994 * It'd be great if the workqueue API had a way to pass 1995 * in a mask and had some smarts for more clever placement. 1996 * For now we just round-robin here, switching for every 1997 * BLK_MQ_CPU_WORK_BATCH queued items. 1998 */ 1999 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) 2000 { 2001 bool tried = false; 2002 int next_cpu = hctx->next_cpu; 2003 2004 if (hctx->queue->nr_hw_queues == 1) 2005 return WORK_CPU_UNBOUND; 2006 2007 if (--hctx->next_cpu_batch <= 0) { 2008 select_cpu: 2009 next_cpu = cpumask_next_and(next_cpu, hctx->cpumask, 2010 cpu_online_mask); 2011 if (next_cpu >= nr_cpu_ids) 2012 next_cpu = blk_mq_first_mapped_cpu(hctx); 2013 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; 2014 } 2015 2016 /* 2017 * Do unbound schedule if we can't find a online CPU for this hctx, 2018 * and it should only happen in the path of handling CPU DEAD. 2019 */ 2020 if (!cpu_online(next_cpu)) { 2021 if (!tried) { 2022 tried = true; 2023 goto select_cpu; 2024 } 2025 2026 /* 2027 * Make sure to re-select CPU next time once after CPUs 2028 * in hctx->cpumask become online again. 2029 */ 2030 hctx->next_cpu = next_cpu; 2031 hctx->next_cpu_batch = 1; 2032 return WORK_CPU_UNBOUND; 2033 } 2034 2035 hctx->next_cpu = next_cpu; 2036 return next_cpu; 2037 } 2038 2039 /** 2040 * __blk_mq_delay_run_hw_queue - Run (or schedule to run) a hardware queue. 2041 * @hctx: Pointer to the hardware queue to run. 2042 * @async: If we want to run the queue asynchronously. 2043 * @msecs: Milliseconds of delay to wait before running the queue. 2044 * 2045 * If !@async, try to run the queue now. Else, run the queue asynchronously and 2046 * with a delay of @msecs. 2047 */ 2048 static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, 2049 unsigned long msecs) 2050 { 2051 if (unlikely(blk_mq_hctx_stopped(hctx))) 2052 return; 2053 2054 if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) { 2055 int cpu = get_cpu(); 2056 if (cpumask_test_cpu(cpu, hctx->cpumask)) { 2057 __blk_mq_run_hw_queue(hctx); 2058 put_cpu(); 2059 return; 2060 } 2061 2062 put_cpu(); 2063 } 2064 2065 kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work, 2066 msecs_to_jiffies(msecs)); 2067 } 2068 2069 /** 2070 * blk_mq_delay_run_hw_queue - Run a hardware queue asynchronously. 2071 * @hctx: Pointer to the hardware queue to run. 2072 * @msecs: Milliseconds of delay to wait before running the queue. 2073 * 2074 * Run a hardware queue asynchronously with a delay of @msecs. 2075 */ 2076 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) 2077 { 2078 __blk_mq_delay_run_hw_queue(hctx, true, msecs); 2079 } 2080 EXPORT_SYMBOL(blk_mq_delay_run_hw_queue); 2081 2082 /** 2083 * blk_mq_run_hw_queue - Start to run a hardware queue. 2084 * @hctx: Pointer to the hardware queue to run. 2085 * @async: If we want to run the queue asynchronously. 2086 * 2087 * Check if the request queue is not in a quiesced state and if there are 2088 * pending requests to be sent. If this is true, run the queue to send requests 2089 * to hardware. 2090 */ 2091 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) 2092 { 2093 bool need_run; 2094 2095 /* 2096 * When queue is quiesced, we may be switching io scheduler, or 2097 * updating nr_hw_queues, or other things, and we can't run queue 2098 * any more, even __blk_mq_hctx_has_pending() can't be called safely. 2099 * 2100 * And queue will be rerun in blk_mq_unquiesce_queue() if it is 2101 * quiesced. 2102 */ 2103 __blk_mq_run_dispatch_ops(hctx->queue, false, 2104 need_run = !blk_queue_quiesced(hctx->queue) && 2105 blk_mq_hctx_has_pending(hctx)); 2106 2107 if (need_run) 2108 __blk_mq_delay_run_hw_queue(hctx, async, 0); 2109 } 2110 EXPORT_SYMBOL(blk_mq_run_hw_queue); 2111 2112 /* 2113 * Is the request queue handled by an IO scheduler that does not respect 2114 * hardware queues when dispatching? 2115 */ 2116 static bool blk_mq_has_sqsched(struct request_queue *q) 2117 { 2118 struct elevator_queue *e = q->elevator; 2119 2120 if (e && e->type->ops.dispatch_request && 2121 !(e->type->elevator_features & ELEVATOR_F_MQ_AWARE)) 2122 return true; 2123 return false; 2124 } 2125 2126 /* 2127 * Return prefered queue to dispatch from (if any) for non-mq aware IO 2128 * scheduler. 2129 */ 2130 static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q) 2131 { 2132 struct blk_mq_hw_ctx *hctx; 2133 2134 /* 2135 * If the IO scheduler does not respect hardware queues when 2136 * dispatching, we just don't bother with multiple HW queues and 2137 * dispatch from hctx for the current CPU since running multiple queues 2138 * just causes lock contention inside the scheduler and pointless cache 2139 * bouncing. 2140 */ 2141 hctx = blk_mq_map_queue_type(q, HCTX_TYPE_DEFAULT, 2142 raw_smp_processor_id()); 2143 if (!blk_mq_hctx_stopped(hctx)) 2144 return hctx; 2145 return NULL; 2146 } 2147 2148 /** 2149 * blk_mq_run_hw_queues - Run all hardware queues in a request queue. 2150 * @q: Pointer to the request queue to run. 2151 * @async: If we want to run the queue asynchronously. 2152 */ 2153 void blk_mq_run_hw_queues(struct request_queue *q, bool async) 2154 { 2155 struct blk_mq_hw_ctx *hctx, *sq_hctx; 2156 unsigned long i; 2157 2158 sq_hctx = NULL; 2159 if (blk_mq_has_sqsched(q)) 2160 sq_hctx = blk_mq_get_sq_hctx(q); 2161 queue_for_each_hw_ctx(q, hctx, i) { 2162 if (blk_mq_hctx_stopped(hctx)) 2163 continue; 2164 /* 2165 * Dispatch from this hctx either if there's no hctx preferred 2166 * by IO scheduler or if it has requests that bypass the 2167 * scheduler. 2168 */ 2169 if (!sq_hctx || sq_hctx == hctx || 2170 !list_empty_careful(&hctx->dispatch)) 2171 blk_mq_run_hw_queue(hctx, async); 2172 } 2173 } 2174 EXPORT_SYMBOL(blk_mq_run_hw_queues); 2175 2176 /** 2177 * blk_mq_delay_run_hw_queues - Run all hardware queues asynchronously. 2178 * @q: Pointer to the request queue to run. 2179 * @msecs: Milliseconds of delay to wait before running the queues. 2180 */ 2181 void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs) 2182 { 2183 struct blk_mq_hw_ctx *hctx, *sq_hctx; 2184 unsigned long i; 2185 2186 sq_hctx = NULL; 2187 if (blk_mq_has_sqsched(q)) 2188 sq_hctx = blk_mq_get_sq_hctx(q); 2189 queue_for_each_hw_ctx(q, hctx, i) { 2190 if (blk_mq_hctx_stopped(hctx)) 2191 continue; 2192 /* 2193 * If there is already a run_work pending, leave the 2194 * pending delay untouched. Otherwise, a hctx can stall 2195 * if another hctx is re-delaying the other's work 2196 * before the work executes. 2197 */ 2198 if (delayed_work_pending(&hctx->run_work)) 2199 continue; 2200 /* 2201 * Dispatch from this hctx either if there's no hctx preferred 2202 * by IO scheduler or if it has requests that bypass the 2203 * scheduler. 2204 */ 2205 if (!sq_hctx || sq_hctx == hctx || 2206 !list_empty_careful(&hctx->dispatch)) 2207 blk_mq_delay_run_hw_queue(hctx, msecs); 2208 } 2209 } 2210 EXPORT_SYMBOL(blk_mq_delay_run_hw_queues); 2211 2212 /** 2213 * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped 2214 * @q: request queue. 2215 * 2216 * The caller is responsible for serializing this function against 2217 * blk_mq_{start,stop}_hw_queue(). 2218 */ 2219 bool blk_mq_queue_stopped(struct request_queue *q) 2220 { 2221 struct blk_mq_hw_ctx *hctx; 2222 unsigned long i; 2223 2224 queue_for_each_hw_ctx(q, hctx, i) 2225 if (blk_mq_hctx_stopped(hctx)) 2226 return true; 2227 2228 return false; 2229 } 2230 EXPORT_SYMBOL(blk_mq_queue_stopped); 2231 2232 /* 2233 * This function is often used for pausing .queue_rq() by driver when 2234 * there isn't enough resource or some conditions aren't satisfied, and 2235 * BLK_STS_RESOURCE is usually returned. 2236 * 2237 * We do not guarantee that dispatch can be drained or blocked 2238 * after blk_mq_stop_hw_queue() returns. Please use 2239 * blk_mq_quiesce_queue() for that requirement. 2240 */ 2241 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) 2242 { 2243 cancel_delayed_work(&hctx->run_work); 2244 2245 set_bit(BLK_MQ_S_STOPPED, &hctx->state); 2246 } 2247 EXPORT_SYMBOL(blk_mq_stop_hw_queue); 2248 2249 /* 2250 * This function is often used for pausing .queue_rq() by driver when 2251 * there isn't enough resource or some conditions aren't satisfied, and 2252 * BLK_STS_RESOURCE is usually returned. 2253 * 2254 * We do not guarantee that dispatch can be drained or blocked 2255 * after blk_mq_stop_hw_queues() returns. Please use 2256 * blk_mq_quiesce_queue() for that requirement. 2257 */ 2258 void blk_mq_stop_hw_queues(struct request_queue *q) 2259 { 2260 struct blk_mq_hw_ctx *hctx; 2261 unsigned long i; 2262 2263 queue_for_each_hw_ctx(q, hctx, i) 2264 blk_mq_stop_hw_queue(hctx); 2265 } 2266 EXPORT_SYMBOL(blk_mq_stop_hw_queues); 2267 2268 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx) 2269 { 2270 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 2271 2272 blk_mq_run_hw_queue(hctx, false); 2273 } 2274 EXPORT_SYMBOL(blk_mq_start_hw_queue); 2275 2276 void blk_mq_start_hw_queues(struct request_queue *q) 2277 { 2278 struct blk_mq_hw_ctx *hctx; 2279 unsigned long i; 2280 2281 queue_for_each_hw_ctx(q, hctx, i) 2282 blk_mq_start_hw_queue(hctx); 2283 } 2284 EXPORT_SYMBOL(blk_mq_start_hw_queues); 2285 2286 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) 2287 { 2288 if (!blk_mq_hctx_stopped(hctx)) 2289 return; 2290 2291 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 2292 blk_mq_run_hw_queue(hctx, async); 2293 } 2294 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue); 2295 2296 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async) 2297 { 2298 struct blk_mq_hw_ctx *hctx; 2299 unsigned long i; 2300 2301 queue_for_each_hw_ctx(q, hctx, i) 2302 blk_mq_start_stopped_hw_queue(hctx, async); 2303 } 2304 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues); 2305 2306 static void blk_mq_run_work_fn(struct work_struct *work) 2307 { 2308 struct blk_mq_hw_ctx *hctx; 2309 2310 hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work); 2311 2312 /* 2313 * If we are stopped, don't run the queue. 2314 */ 2315 if (blk_mq_hctx_stopped(hctx)) 2316 return; 2317 2318 __blk_mq_run_hw_queue(hctx); 2319 } 2320 2321 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx, 2322 struct request *rq, 2323 bool at_head) 2324 { 2325 struct blk_mq_ctx *ctx = rq->mq_ctx; 2326 enum hctx_type type = hctx->type; 2327 2328 lockdep_assert_held(&ctx->lock); 2329 2330 trace_block_rq_insert(rq); 2331 2332 if (at_head) 2333 list_add(&rq->queuelist, &ctx->rq_lists[type]); 2334 else 2335 list_add_tail(&rq->queuelist, &ctx->rq_lists[type]); 2336 } 2337 2338 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, 2339 bool at_head) 2340 { 2341 struct blk_mq_ctx *ctx = rq->mq_ctx; 2342 2343 lockdep_assert_held(&ctx->lock); 2344 2345 __blk_mq_insert_req_list(hctx, rq, at_head); 2346 blk_mq_hctx_mark_pending(hctx, ctx); 2347 } 2348 2349 /** 2350 * blk_mq_request_bypass_insert - Insert a request at dispatch list. 2351 * @rq: Pointer to request to be inserted. 2352 * @at_head: true if the request should be inserted at the head of the list. 2353 * @run_queue: If we should run the hardware queue after inserting the request. 2354 * 2355 * Should only be used carefully, when the caller knows we want to 2356 * bypass a potential IO scheduler on the target device. 2357 */ 2358 void blk_mq_request_bypass_insert(struct request *rq, bool at_head, 2359 bool run_queue) 2360 { 2361 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 2362 2363 spin_lock(&hctx->lock); 2364 if (at_head) 2365 list_add(&rq->queuelist, &hctx->dispatch); 2366 else 2367 list_add_tail(&rq->queuelist, &hctx->dispatch); 2368 spin_unlock(&hctx->lock); 2369 2370 if (run_queue) 2371 blk_mq_run_hw_queue(hctx, false); 2372 } 2373 2374 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, 2375 struct list_head *list) 2376 2377 { 2378 struct request *rq; 2379 enum hctx_type type = hctx->type; 2380 2381 /* 2382 * preemption doesn't flush plug list, so it's possible ctx->cpu is 2383 * offline now 2384 */ 2385 list_for_each_entry(rq, list, queuelist) { 2386 BUG_ON(rq->mq_ctx != ctx); 2387 trace_block_rq_insert(rq); 2388 } 2389 2390 spin_lock(&ctx->lock); 2391 list_splice_tail_init(list, &ctx->rq_lists[type]); 2392 blk_mq_hctx_mark_pending(hctx, ctx); 2393 spin_unlock(&ctx->lock); 2394 } 2395 2396 static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int *queued, 2397 bool from_schedule) 2398 { 2399 if (hctx->queue->mq_ops->commit_rqs) { 2400 trace_block_unplug(hctx->queue, *queued, !from_schedule); 2401 hctx->queue->mq_ops->commit_rqs(hctx); 2402 } 2403 *queued = 0; 2404 } 2405 2406 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio, 2407 unsigned int nr_segs) 2408 { 2409 int err; 2410 2411 if (bio->bi_opf & REQ_RAHEAD) 2412 rq->cmd_flags |= REQ_FAILFAST_MASK; 2413 2414 rq->__sector = bio->bi_iter.bi_sector; 2415 rq->write_hint = bio->bi_write_hint; 2416 blk_rq_bio_prep(rq, bio, nr_segs); 2417 2418 /* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */ 2419 err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO); 2420 WARN_ON_ONCE(err); 2421 2422 blk_account_io_start(rq); 2423 } 2424 2425 static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, 2426 struct request *rq, bool last) 2427 { 2428 struct request_queue *q = rq->q; 2429 struct blk_mq_queue_data bd = { 2430 .rq = rq, 2431 .last = last, 2432 }; 2433 blk_status_t ret; 2434 2435 /* 2436 * For OK queue, we are done. For error, caller may kill it. 2437 * Any other error (busy), just add it to our list as we 2438 * previously would have done. 2439 */ 2440 ret = q->mq_ops->queue_rq(hctx, &bd); 2441 switch (ret) { 2442 case BLK_STS_OK: 2443 blk_mq_update_dispatch_busy(hctx, false); 2444 break; 2445 case BLK_STS_RESOURCE: 2446 case BLK_STS_DEV_RESOURCE: 2447 blk_mq_update_dispatch_busy(hctx, true); 2448 __blk_mq_requeue_request(rq); 2449 break; 2450 default: 2451 blk_mq_update_dispatch_busy(hctx, false); 2452 break; 2453 } 2454 2455 return ret; 2456 } 2457 2458 static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, 2459 struct request *rq, 2460 bool bypass_insert, bool last) 2461 { 2462 struct request_queue *q = rq->q; 2463 bool run_queue = true; 2464 int budget_token; 2465 2466 /* 2467 * RCU or SRCU read lock is needed before checking quiesced flag. 2468 * 2469 * When queue is stopped or quiesced, ignore 'bypass_insert' from 2470 * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller, 2471 * and avoid driver to try to dispatch again. 2472 */ 2473 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) { 2474 run_queue = false; 2475 bypass_insert = false; 2476 goto insert; 2477 } 2478 2479 if ((rq->rq_flags & RQF_ELV) && !bypass_insert) 2480 goto insert; 2481 2482 budget_token = blk_mq_get_dispatch_budget(q); 2483 if (budget_token < 0) 2484 goto insert; 2485 2486 blk_mq_set_rq_budget_token(rq, budget_token); 2487 2488 if (!blk_mq_get_driver_tag(rq)) { 2489 blk_mq_put_dispatch_budget(q, budget_token); 2490 goto insert; 2491 } 2492 2493 return __blk_mq_issue_directly(hctx, rq, last); 2494 insert: 2495 if (bypass_insert) 2496 return BLK_STS_RESOURCE; 2497 2498 blk_mq_sched_insert_request(rq, false, run_queue, false); 2499 2500 return BLK_STS_OK; 2501 } 2502 2503 /** 2504 * blk_mq_try_issue_directly - Try to send a request directly to device driver. 2505 * @hctx: Pointer of the associated hardware queue. 2506 * @rq: Pointer to request to be sent. 2507 * 2508 * If the device has enough resources to accept a new request now, send the 2509 * request directly to device driver. Else, insert at hctx->dispatch queue, so 2510 * we can try send it another time in the future. Requests inserted at this 2511 * queue have higher priority. 2512 */ 2513 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, 2514 struct request *rq) 2515 { 2516 blk_status_t ret = 2517 __blk_mq_try_issue_directly(hctx, rq, false, true); 2518 2519 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) 2520 blk_mq_request_bypass_insert(rq, false, true); 2521 else if (ret != BLK_STS_OK) 2522 blk_mq_end_request(rq, ret); 2523 } 2524 2525 static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last) 2526 { 2527 return __blk_mq_try_issue_directly(rq->mq_hctx, rq, true, last); 2528 } 2529 2530 static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule) 2531 { 2532 struct blk_mq_hw_ctx *hctx = NULL; 2533 struct request *rq; 2534 int queued = 0; 2535 int errors = 0; 2536 2537 while ((rq = rq_list_pop(&plug->mq_list))) { 2538 bool last = rq_list_empty(plug->mq_list); 2539 blk_status_t ret; 2540 2541 if (hctx != rq->mq_hctx) { 2542 if (hctx) 2543 blk_mq_commit_rqs(hctx, &queued, from_schedule); 2544 hctx = rq->mq_hctx; 2545 } 2546 2547 ret = blk_mq_request_issue_directly(rq, last); 2548 switch (ret) { 2549 case BLK_STS_OK: 2550 queued++; 2551 break; 2552 case BLK_STS_RESOURCE: 2553 case BLK_STS_DEV_RESOURCE: 2554 blk_mq_request_bypass_insert(rq, false, last); 2555 blk_mq_commit_rqs(hctx, &queued, from_schedule); 2556 return; 2557 default: 2558 blk_mq_end_request(rq, ret); 2559 errors++; 2560 break; 2561 } 2562 } 2563 2564 /* 2565 * If we didn't flush the entire list, we could have told the driver 2566 * there was more coming, but that turned out to be a lie. 2567 */ 2568 if (errors) 2569 blk_mq_commit_rqs(hctx, &queued, from_schedule); 2570 } 2571 2572 static void __blk_mq_flush_plug_list(struct request_queue *q, 2573 struct blk_plug *plug) 2574 { 2575 if (blk_queue_quiesced(q)) 2576 return; 2577 q->mq_ops->queue_rqs(&plug->mq_list); 2578 } 2579 2580 static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched) 2581 { 2582 struct blk_mq_hw_ctx *this_hctx = NULL; 2583 struct blk_mq_ctx *this_ctx = NULL; 2584 struct request *requeue_list = NULL; 2585 unsigned int depth = 0; 2586 LIST_HEAD(list); 2587 2588 do { 2589 struct request *rq = rq_list_pop(&plug->mq_list); 2590 2591 if (!this_hctx) { 2592 this_hctx = rq->mq_hctx; 2593 this_ctx = rq->mq_ctx; 2594 } else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) { 2595 rq_list_add(&requeue_list, rq); 2596 continue; 2597 } 2598 list_add_tail(&rq->queuelist, &list); 2599 depth++; 2600 } while (!rq_list_empty(plug->mq_list)); 2601 2602 plug->mq_list = requeue_list; 2603 trace_block_unplug(this_hctx->queue, depth, !from_sched); 2604 blk_mq_sched_insert_requests(this_hctx, this_ctx, &list, from_sched); 2605 } 2606 2607 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) 2608 { 2609 struct request *rq; 2610 2611 if (rq_list_empty(plug->mq_list)) 2612 return; 2613 plug->rq_count = 0; 2614 2615 if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) { 2616 struct request_queue *q; 2617 2618 rq = rq_list_peek(&plug->mq_list); 2619 q = rq->q; 2620 2621 /* 2622 * Peek first request and see if we have a ->queue_rqs() hook. 2623 * If we do, we can dispatch the whole plug list in one go. We 2624 * already know at this point that all requests belong to the 2625 * same queue, caller must ensure that's the case. 2626 * 2627 * Since we pass off the full list to the driver at this point, 2628 * we do not increment the active request count for the queue. 2629 * Bypass shared tags for now because of that. 2630 */ 2631 if (q->mq_ops->queue_rqs && 2632 !(rq->mq_hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) { 2633 blk_mq_run_dispatch_ops(q, 2634 __blk_mq_flush_plug_list(q, plug)); 2635 if (rq_list_empty(plug->mq_list)) 2636 return; 2637 } 2638 2639 blk_mq_run_dispatch_ops(q, 2640 blk_mq_plug_issue_direct(plug, false)); 2641 if (rq_list_empty(plug->mq_list)) 2642 return; 2643 } 2644 2645 do { 2646 blk_mq_dispatch_plug_list(plug, from_schedule); 2647 } while (!rq_list_empty(plug->mq_list)); 2648 } 2649 2650 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, 2651 struct list_head *list) 2652 { 2653 int queued = 0; 2654 int errors = 0; 2655 2656 while (!list_empty(list)) { 2657 blk_status_t ret; 2658 struct request *rq = list_first_entry(list, struct request, 2659 queuelist); 2660 2661 list_del_init(&rq->queuelist); 2662 ret = blk_mq_request_issue_directly(rq, list_empty(list)); 2663 if (ret != BLK_STS_OK) { 2664 if (ret == BLK_STS_RESOURCE || 2665 ret == BLK_STS_DEV_RESOURCE) { 2666 blk_mq_request_bypass_insert(rq, false, 2667 list_empty(list)); 2668 break; 2669 } 2670 blk_mq_end_request(rq, ret); 2671 errors++; 2672 } else 2673 queued++; 2674 } 2675 2676 /* 2677 * If we didn't flush the entire list, we could have told 2678 * the driver there was more coming, but that turned out to 2679 * be a lie. 2680 */ 2681 if ((!list_empty(list) || errors) && 2682 hctx->queue->mq_ops->commit_rqs && queued) 2683 hctx->queue->mq_ops->commit_rqs(hctx); 2684 } 2685 2686 /* 2687 * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple 2688 * queues. This is important for md arrays to benefit from merging 2689 * requests. 2690 */ 2691 static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug) 2692 { 2693 if (plug->multiple_queues) 2694 return BLK_MAX_REQUEST_COUNT * 2; 2695 return BLK_MAX_REQUEST_COUNT; 2696 } 2697 2698 static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) 2699 { 2700 struct request *last = rq_list_peek(&plug->mq_list); 2701 2702 if (!plug->rq_count) { 2703 trace_block_plug(rq->q); 2704 } else if (plug->rq_count >= blk_plug_max_rq_count(plug) || 2705 (!blk_queue_nomerges(rq->q) && 2706 blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) { 2707 blk_mq_flush_plug_list(plug, false); 2708 trace_block_plug(rq->q); 2709 } 2710 2711 if (!plug->multiple_queues && last && last->q != rq->q) 2712 plug->multiple_queues = true; 2713 if (!plug->has_elevator && (rq->rq_flags & RQF_ELV)) 2714 plug->has_elevator = true; 2715 rq->rq_next = NULL; 2716 rq_list_add(&plug->mq_list, rq); 2717 plug->rq_count++; 2718 } 2719 2720 static bool blk_mq_attempt_bio_merge(struct request_queue *q, 2721 struct bio *bio, unsigned int nr_segs) 2722 { 2723 if (!blk_queue_nomerges(q) && bio_mergeable(bio)) { 2724 if (blk_attempt_plug_merge(q, bio, nr_segs)) 2725 return true; 2726 if (blk_mq_sched_bio_merge(q, bio, nr_segs)) 2727 return true; 2728 } 2729 return false; 2730 } 2731 2732 static struct request *blk_mq_get_new_requests(struct request_queue *q, 2733 struct blk_plug *plug, 2734 struct bio *bio, 2735 unsigned int nsegs) 2736 { 2737 struct blk_mq_alloc_data data = { 2738 .q = q, 2739 .nr_tags = 1, 2740 .cmd_flags = bio->bi_opf, 2741 }; 2742 struct request *rq; 2743 2744 if (unlikely(bio_queue_enter(bio))) 2745 return NULL; 2746 2747 if (blk_mq_attempt_bio_merge(q, bio, nsegs)) 2748 goto queue_exit; 2749 2750 rq_qos_throttle(q, bio); 2751 2752 if (plug) { 2753 data.nr_tags = plug->nr_ios; 2754 plug->nr_ios = 1; 2755 data.cached_rq = &plug->cached_rq; 2756 } 2757 2758 rq = __blk_mq_alloc_requests(&data); 2759 if (rq) 2760 return rq; 2761 rq_qos_cleanup(q, bio); 2762 if (bio->bi_opf & REQ_NOWAIT) 2763 bio_wouldblock_error(bio); 2764 queue_exit: 2765 blk_queue_exit(q); 2766 return NULL; 2767 } 2768 2769 static inline struct request *blk_mq_get_cached_request(struct request_queue *q, 2770 struct blk_plug *plug, struct bio **bio, unsigned int nsegs) 2771 { 2772 struct request *rq; 2773 2774 if (!plug) 2775 return NULL; 2776 rq = rq_list_peek(&plug->cached_rq); 2777 if (!rq || rq->q != q) 2778 return NULL; 2779 2780 if (blk_mq_attempt_bio_merge(q, *bio, nsegs)) { 2781 *bio = NULL; 2782 return NULL; 2783 } 2784 2785 rq_qos_throttle(q, *bio); 2786 2787 if (blk_mq_get_hctx_type((*bio)->bi_opf) != rq->mq_hctx->type) 2788 return NULL; 2789 if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf)) 2790 return NULL; 2791 2792 rq->cmd_flags = (*bio)->bi_opf; 2793 plug->cached_rq = rq_list_next(rq); 2794 INIT_LIST_HEAD(&rq->queuelist); 2795 return rq; 2796 } 2797 2798 /** 2799 * blk_mq_submit_bio - Create and send a request to block device. 2800 * @bio: Bio pointer. 2801 * 2802 * Builds up a request structure from @q and @bio and send to the device. The 2803 * request may not be queued directly to hardware if: 2804 * * This request can be merged with another one 2805 * * We want to place request at plug queue for possible future merging 2806 * * There is an IO scheduler active at this queue 2807 * 2808 * It will not queue the request if there is an error with the bio, or at the 2809 * request creation. 2810 */ 2811 void blk_mq_submit_bio(struct bio *bio) 2812 { 2813 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 2814 struct blk_plug *plug = blk_mq_plug(q, bio); 2815 const int is_sync = op_is_sync(bio->bi_opf); 2816 struct request *rq; 2817 unsigned int nr_segs = 1; 2818 blk_status_t ret; 2819 2820 blk_queue_bounce(q, &bio); 2821 if (blk_may_split(q, bio)) 2822 __blk_queue_split(q, &bio, &nr_segs); 2823 2824 if (!bio_integrity_prep(bio)) 2825 return; 2826 2827 rq = blk_mq_get_cached_request(q, plug, &bio, nr_segs); 2828 if (!rq) { 2829 if (!bio) 2830 return; 2831 rq = blk_mq_get_new_requests(q, plug, bio, nr_segs); 2832 if (unlikely(!rq)) 2833 return; 2834 } 2835 2836 trace_block_getrq(bio); 2837 2838 rq_qos_track(q, rq, bio); 2839 2840 blk_mq_bio_to_request(rq, bio, nr_segs); 2841 2842 ret = blk_crypto_init_request(rq); 2843 if (ret != BLK_STS_OK) { 2844 bio->bi_status = ret; 2845 bio_endio(bio); 2846 blk_mq_free_request(rq); 2847 return; 2848 } 2849 2850 if (op_is_flush(bio->bi_opf)) { 2851 blk_insert_flush(rq); 2852 return; 2853 } 2854 2855 if (plug) 2856 blk_add_rq_to_plug(plug, rq); 2857 else if ((rq->rq_flags & RQF_ELV) || 2858 (rq->mq_hctx->dispatch_busy && 2859 (q->nr_hw_queues == 1 || !is_sync))) 2860 blk_mq_sched_insert_request(rq, false, true, true); 2861 else 2862 blk_mq_run_dispatch_ops(rq->q, 2863 blk_mq_try_issue_directly(rq->mq_hctx, rq)); 2864 } 2865 2866 #ifdef CONFIG_BLK_MQ_STACKING 2867 /** 2868 * blk_insert_cloned_request - Helper for stacking drivers to submit a request 2869 * @rq: the request being queued 2870 */ 2871 blk_status_t blk_insert_cloned_request(struct request *rq) 2872 { 2873 struct request_queue *q = rq->q; 2874 unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq)); 2875 blk_status_t ret; 2876 2877 if (blk_rq_sectors(rq) > max_sectors) { 2878 /* 2879 * SCSI device does not have a good way to return if 2880 * Write Same/Zero is actually supported. If a device rejects 2881 * a non-read/write command (discard, write same,etc.) the 2882 * low-level device driver will set the relevant queue limit to 2883 * 0 to prevent blk-lib from issuing more of the offending 2884 * operations. Commands queued prior to the queue limit being 2885 * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O 2886 * errors being propagated to upper layers. 2887 */ 2888 if (max_sectors == 0) 2889 return BLK_STS_NOTSUPP; 2890 2891 printk(KERN_ERR "%s: over max size limit. (%u > %u)\n", 2892 __func__, blk_rq_sectors(rq), max_sectors); 2893 return BLK_STS_IOERR; 2894 } 2895 2896 /* 2897 * The queue settings related to segment counting may differ from the 2898 * original queue. 2899 */ 2900 rq->nr_phys_segments = blk_recalc_rq_segments(rq); 2901 if (rq->nr_phys_segments > queue_max_segments(q)) { 2902 printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n", 2903 __func__, rq->nr_phys_segments, queue_max_segments(q)); 2904 return BLK_STS_IOERR; 2905 } 2906 2907 if (q->disk && should_fail_request(q->disk->part0, blk_rq_bytes(rq))) 2908 return BLK_STS_IOERR; 2909 2910 if (blk_crypto_insert_cloned_request(rq)) 2911 return BLK_STS_IOERR; 2912 2913 blk_account_io_start(rq); 2914 2915 /* 2916 * Since we have a scheduler attached on the top device, 2917 * bypass a potential scheduler on the bottom device for 2918 * insert. 2919 */ 2920 blk_mq_run_dispatch_ops(q, 2921 ret = blk_mq_request_issue_directly(rq, true)); 2922 if (ret) 2923 blk_account_io_done(rq, ktime_get_ns()); 2924 return ret; 2925 } 2926 EXPORT_SYMBOL_GPL(blk_insert_cloned_request); 2927 2928 /** 2929 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request 2930 * @rq: the clone request to be cleaned up 2931 * 2932 * Description: 2933 * Free all bios in @rq for a cloned request. 2934 */ 2935 void blk_rq_unprep_clone(struct request *rq) 2936 { 2937 struct bio *bio; 2938 2939 while ((bio = rq->bio) != NULL) { 2940 rq->bio = bio->bi_next; 2941 2942 bio_put(bio); 2943 } 2944 } 2945 EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); 2946 2947 /** 2948 * blk_rq_prep_clone - Helper function to setup clone request 2949 * @rq: the request to be setup 2950 * @rq_src: original request to be cloned 2951 * @bs: bio_set that bios for clone are allocated from 2952 * @gfp_mask: memory allocation mask for bio 2953 * @bio_ctr: setup function to be called for each clone bio. 2954 * Returns %0 for success, non %0 for failure. 2955 * @data: private data to be passed to @bio_ctr 2956 * 2957 * Description: 2958 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq. 2959 * Also, pages which the original bios are pointing to are not copied 2960 * and the cloned bios just point same pages. 2961 * So cloned bios must be completed before original bios, which means 2962 * the caller must complete @rq before @rq_src. 2963 */ 2964 int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 2965 struct bio_set *bs, gfp_t gfp_mask, 2966 int (*bio_ctr)(struct bio *, struct bio *, void *), 2967 void *data) 2968 { 2969 struct bio *bio, *bio_src; 2970 2971 if (!bs) 2972 bs = &fs_bio_set; 2973 2974 __rq_for_each_bio(bio_src, rq_src) { 2975 bio = bio_alloc_clone(rq->q->disk->part0, bio_src, gfp_mask, 2976 bs); 2977 if (!bio) 2978 goto free_and_out; 2979 2980 if (bio_ctr && bio_ctr(bio, bio_src, data)) 2981 goto free_and_out; 2982 2983 if (rq->bio) { 2984 rq->biotail->bi_next = bio; 2985 rq->biotail = bio; 2986 } else { 2987 rq->bio = rq->biotail = bio; 2988 } 2989 bio = NULL; 2990 } 2991 2992 /* Copy attributes of the original request to the clone request. */ 2993 rq->__sector = blk_rq_pos(rq_src); 2994 rq->__data_len = blk_rq_bytes(rq_src); 2995 if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) { 2996 rq->rq_flags |= RQF_SPECIAL_PAYLOAD; 2997 rq->special_vec = rq_src->special_vec; 2998 } 2999 rq->nr_phys_segments = rq_src->nr_phys_segments; 3000 rq->ioprio = rq_src->ioprio; 3001 3002 if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0) 3003 goto free_and_out; 3004 3005 return 0; 3006 3007 free_and_out: 3008 if (bio) 3009 bio_put(bio); 3010 blk_rq_unprep_clone(rq); 3011 3012 return -ENOMEM; 3013 } 3014 EXPORT_SYMBOL_GPL(blk_rq_prep_clone); 3015 #endif /* CONFIG_BLK_MQ_STACKING */ 3016 3017 /* 3018 * Steal bios from a request and add them to a bio list. 3019 * The request must not have been partially completed before. 3020 */ 3021 void blk_steal_bios(struct bio_list *list, struct request *rq) 3022 { 3023 if (rq->bio) { 3024 if (list->tail) 3025 list->tail->bi_next = rq->bio; 3026 else 3027 list->head = rq->bio; 3028 list->tail = rq->biotail; 3029 3030 rq->bio = NULL; 3031 rq->biotail = NULL; 3032 } 3033 3034 rq->__data_len = 0; 3035 } 3036 EXPORT_SYMBOL_GPL(blk_steal_bios); 3037 3038 static size_t order_to_size(unsigned int order) 3039 { 3040 return (size_t)PAGE_SIZE << order; 3041 } 3042 3043 /* called before freeing request pool in @tags */ 3044 static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags, 3045 struct blk_mq_tags *tags) 3046 { 3047 struct page *page; 3048 unsigned long flags; 3049 3050 /* There is no need to clear a driver tags own mapping */ 3051 if (drv_tags == tags) 3052 return; 3053 3054 list_for_each_entry(page, &tags->page_list, lru) { 3055 unsigned long start = (unsigned long)page_address(page); 3056 unsigned long end = start + order_to_size(page->private); 3057 int i; 3058 3059 for (i = 0; i < drv_tags->nr_tags; i++) { 3060 struct request *rq = drv_tags->rqs[i]; 3061 unsigned long rq_addr = (unsigned long)rq; 3062 3063 if (rq_addr >= start && rq_addr < end) { 3064 WARN_ON_ONCE(req_ref_read(rq) != 0); 3065 cmpxchg(&drv_tags->rqs[i], rq, NULL); 3066 } 3067 } 3068 } 3069 3070 /* 3071 * Wait until all pending iteration is done. 3072 * 3073 * Request reference is cleared and it is guaranteed to be observed 3074 * after the ->lock is released. 3075 */ 3076 spin_lock_irqsave(&drv_tags->lock, flags); 3077 spin_unlock_irqrestore(&drv_tags->lock, flags); 3078 } 3079 3080 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, 3081 unsigned int hctx_idx) 3082 { 3083 struct blk_mq_tags *drv_tags; 3084 struct page *page; 3085 3086 if (list_empty(&tags->page_list)) 3087 return; 3088 3089 if (blk_mq_is_shared_tags(set->flags)) 3090 drv_tags = set->shared_tags; 3091 else 3092 drv_tags = set->tags[hctx_idx]; 3093 3094 if (tags->static_rqs && set->ops->exit_request) { 3095 int i; 3096 3097 for (i = 0; i < tags->nr_tags; i++) { 3098 struct request *rq = tags->static_rqs[i]; 3099 3100 if (!rq) 3101 continue; 3102 set->ops->exit_request(set, rq, hctx_idx); 3103 tags->static_rqs[i] = NULL; 3104 } 3105 } 3106 3107 blk_mq_clear_rq_mapping(drv_tags, tags); 3108 3109 while (!list_empty(&tags->page_list)) { 3110 page = list_first_entry(&tags->page_list, struct page, lru); 3111 list_del_init(&page->lru); 3112 /* 3113 * Remove kmemleak object previously allocated in 3114 * blk_mq_alloc_rqs(). 3115 */ 3116 kmemleak_free(page_address(page)); 3117 __free_pages(page, page->private); 3118 } 3119 } 3120 3121 void blk_mq_free_rq_map(struct blk_mq_tags *tags) 3122 { 3123 kfree(tags->rqs); 3124 tags->rqs = NULL; 3125 kfree(tags->static_rqs); 3126 tags->static_rqs = NULL; 3127 3128 blk_mq_free_tags(tags); 3129 } 3130 3131 static enum hctx_type hctx_idx_to_type(struct blk_mq_tag_set *set, 3132 unsigned int hctx_idx) 3133 { 3134 int i; 3135 3136 for (i = 0; i < set->nr_maps; i++) { 3137 unsigned int start = set->map[i].queue_offset; 3138 unsigned int end = start + set->map[i].nr_queues; 3139 3140 if (hctx_idx >= start && hctx_idx < end) 3141 break; 3142 } 3143 3144 if (i >= set->nr_maps) 3145 i = HCTX_TYPE_DEFAULT; 3146 3147 return i; 3148 } 3149 3150 static int blk_mq_get_hctx_node(struct blk_mq_tag_set *set, 3151 unsigned int hctx_idx) 3152 { 3153 enum hctx_type type = hctx_idx_to_type(set, hctx_idx); 3154 3155 return blk_mq_hw_queue_to_node(&set->map[type], hctx_idx); 3156 } 3157 3158 static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, 3159 unsigned int hctx_idx, 3160 unsigned int nr_tags, 3161 unsigned int reserved_tags) 3162 { 3163 int node = blk_mq_get_hctx_node(set, hctx_idx); 3164 struct blk_mq_tags *tags; 3165 3166 if (node == NUMA_NO_NODE) 3167 node = set->numa_node; 3168 3169 tags = blk_mq_init_tags(nr_tags, reserved_tags, node, 3170 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags)); 3171 if (!tags) 3172 return NULL; 3173 3174 tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *), 3175 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, 3176 node); 3177 if (!tags->rqs) { 3178 blk_mq_free_tags(tags); 3179 return NULL; 3180 } 3181 3182 tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *), 3183 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, 3184 node); 3185 if (!tags->static_rqs) { 3186 kfree(tags->rqs); 3187 blk_mq_free_tags(tags); 3188 return NULL; 3189 } 3190 3191 return tags; 3192 } 3193 3194 static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, 3195 unsigned int hctx_idx, int node) 3196 { 3197 int ret; 3198 3199 if (set->ops->init_request) { 3200 ret = set->ops->init_request(set, rq, hctx_idx, node); 3201 if (ret) 3202 return ret; 3203 } 3204 3205 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 3206 return 0; 3207 } 3208 3209 static int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, 3210 struct blk_mq_tags *tags, 3211 unsigned int hctx_idx, unsigned int depth) 3212 { 3213 unsigned int i, j, entries_per_page, max_order = 4; 3214 int node = blk_mq_get_hctx_node(set, hctx_idx); 3215 size_t rq_size, left; 3216 3217 if (node == NUMA_NO_NODE) 3218 node = set->numa_node; 3219 3220 INIT_LIST_HEAD(&tags->page_list); 3221 3222 /* 3223 * rq_size is the size of the request plus driver payload, rounded 3224 * to the cacheline size 3225 */ 3226 rq_size = round_up(sizeof(struct request) + set->cmd_size, 3227 cache_line_size()); 3228 left = rq_size * depth; 3229 3230 for (i = 0; i < depth; ) { 3231 int this_order = max_order; 3232 struct page *page; 3233 int to_do; 3234 void *p; 3235 3236 while (this_order && left < order_to_size(this_order - 1)) 3237 this_order--; 3238 3239 do { 3240 page = alloc_pages_node(node, 3241 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO, 3242 this_order); 3243 if (page) 3244 break; 3245 if (!this_order--) 3246 break; 3247 if (order_to_size(this_order) < rq_size) 3248 break; 3249 } while (1); 3250 3251 if (!page) 3252 goto fail; 3253 3254 page->private = this_order; 3255 list_add_tail(&page->lru, &tags->page_list); 3256 3257 p = page_address(page); 3258 /* 3259 * Allow kmemleak to scan these pages as they contain pointers 3260 * to additional allocations like via ops->init_request(). 3261 */ 3262 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO); 3263 entries_per_page = order_to_size(this_order) / rq_size; 3264 to_do = min(entries_per_page, depth - i); 3265 left -= to_do * rq_size; 3266 for (j = 0; j < to_do; j++) { 3267 struct request *rq = p; 3268 3269 tags->static_rqs[i] = rq; 3270 if (blk_mq_init_request(set, rq, hctx_idx, node)) { 3271 tags->static_rqs[i] = NULL; 3272 goto fail; 3273 } 3274 3275 p += rq_size; 3276 i++; 3277 } 3278 } 3279 return 0; 3280 3281 fail: 3282 blk_mq_free_rqs(set, tags, hctx_idx); 3283 return -ENOMEM; 3284 } 3285 3286 struct rq_iter_data { 3287 struct blk_mq_hw_ctx *hctx; 3288 bool has_rq; 3289 }; 3290 3291 static bool blk_mq_has_request(struct request *rq, void *data, bool reserved) 3292 { 3293 struct rq_iter_data *iter_data = data; 3294 3295 if (rq->mq_hctx != iter_data->hctx) 3296 return true; 3297 iter_data->has_rq = true; 3298 return false; 3299 } 3300 3301 static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx) 3302 { 3303 struct blk_mq_tags *tags = hctx->sched_tags ? 3304 hctx->sched_tags : hctx->tags; 3305 struct rq_iter_data data = { 3306 .hctx = hctx, 3307 }; 3308 3309 blk_mq_all_tag_iter(tags, blk_mq_has_request, &data); 3310 return data.has_rq; 3311 } 3312 3313 static inline bool blk_mq_last_cpu_in_hctx(unsigned int cpu, 3314 struct blk_mq_hw_ctx *hctx) 3315 { 3316 if (cpumask_first_and(hctx->cpumask, cpu_online_mask) != cpu) 3317 return false; 3318 if (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids) 3319 return false; 3320 return true; 3321 } 3322 3323 static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node) 3324 { 3325 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node, 3326 struct blk_mq_hw_ctx, cpuhp_online); 3327 3328 if (!cpumask_test_cpu(cpu, hctx->cpumask) || 3329 !blk_mq_last_cpu_in_hctx(cpu, hctx)) 3330 return 0; 3331 3332 /* 3333 * Prevent new request from being allocated on the current hctx. 3334 * 3335 * The smp_mb__after_atomic() Pairs with the implied barrier in 3336 * test_and_set_bit_lock in sbitmap_get(). Ensures the inactive flag is 3337 * seen once we return from the tag allocator. 3338 */ 3339 set_bit(BLK_MQ_S_INACTIVE, &hctx->state); 3340 smp_mb__after_atomic(); 3341 3342 /* 3343 * Try to grab a reference to the queue and wait for any outstanding 3344 * requests. If we could not grab a reference the queue has been 3345 * frozen and there are no requests. 3346 */ 3347 if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) { 3348 while (blk_mq_hctx_has_requests(hctx)) 3349 msleep(5); 3350 percpu_ref_put(&hctx->queue->q_usage_counter); 3351 } 3352 3353 return 0; 3354 } 3355 3356 static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node) 3357 { 3358 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node, 3359 struct blk_mq_hw_ctx, cpuhp_online); 3360 3361 if (cpumask_test_cpu(cpu, hctx->cpumask)) 3362 clear_bit(BLK_MQ_S_INACTIVE, &hctx->state); 3363 return 0; 3364 } 3365 3366 /* 3367 * 'cpu' is going away. splice any existing rq_list entries from this 3368 * software queue to the hw queue dispatch list, and ensure that it 3369 * gets run. 3370 */ 3371 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node) 3372 { 3373 struct blk_mq_hw_ctx *hctx; 3374 struct blk_mq_ctx *ctx; 3375 LIST_HEAD(tmp); 3376 enum hctx_type type; 3377 3378 hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead); 3379 if (!cpumask_test_cpu(cpu, hctx->cpumask)) 3380 return 0; 3381 3382 ctx = __blk_mq_get_ctx(hctx->queue, cpu); 3383 type = hctx->type; 3384 3385 spin_lock(&ctx->lock); 3386 if (!list_empty(&ctx->rq_lists[type])) { 3387 list_splice_init(&ctx->rq_lists[type], &tmp); 3388 blk_mq_hctx_clear_pending(hctx, ctx); 3389 } 3390 spin_unlock(&ctx->lock); 3391 3392 if (list_empty(&tmp)) 3393 return 0; 3394 3395 spin_lock(&hctx->lock); 3396 list_splice_tail_init(&tmp, &hctx->dispatch); 3397 spin_unlock(&hctx->lock); 3398 3399 blk_mq_run_hw_queue(hctx, true); 3400 return 0; 3401 } 3402 3403 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx) 3404 { 3405 if (!(hctx->flags & BLK_MQ_F_STACKING)) 3406 cpuhp_state_remove_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE, 3407 &hctx->cpuhp_online); 3408 cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD, 3409 &hctx->cpuhp_dead); 3410 } 3411 3412 /* 3413 * Before freeing hw queue, clearing the flush request reference in 3414 * tags->rqs[] for avoiding potential UAF. 3415 */ 3416 static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags, 3417 unsigned int queue_depth, struct request *flush_rq) 3418 { 3419 int i; 3420 unsigned long flags; 3421 3422 /* The hw queue may not be mapped yet */ 3423 if (!tags) 3424 return; 3425 3426 WARN_ON_ONCE(req_ref_read(flush_rq) != 0); 3427 3428 for (i = 0; i < queue_depth; i++) 3429 cmpxchg(&tags->rqs[i], flush_rq, NULL); 3430 3431 /* 3432 * Wait until all pending iteration is done. 3433 * 3434 * Request reference is cleared and it is guaranteed to be observed 3435 * after the ->lock is released. 3436 */ 3437 spin_lock_irqsave(&tags->lock, flags); 3438 spin_unlock_irqrestore(&tags->lock, flags); 3439 } 3440 3441 /* hctx->ctxs will be freed in queue's release handler */ 3442 static void blk_mq_exit_hctx(struct request_queue *q, 3443 struct blk_mq_tag_set *set, 3444 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) 3445 { 3446 struct request *flush_rq = hctx->fq->flush_rq; 3447 3448 if (blk_mq_hw_queue_mapped(hctx)) 3449 blk_mq_tag_idle(hctx); 3450 3451 blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx], 3452 set->queue_depth, flush_rq); 3453 if (set->ops->exit_request) 3454 set->ops->exit_request(set, flush_rq, hctx_idx); 3455 3456 if (set->ops->exit_hctx) 3457 set->ops->exit_hctx(hctx, hctx_idx); 3458 3459 blk_mq_remove_cpuhp(hctx); 3460 3461 xa_erase(&q->hctx_table, hctx_idx); 3462 3463 spin_lock(&q->unused_hctx_lock); 3464 list_add(&hctx->hctx_list, &q->unused_hctx_list); 3465 spin_unlock(&q->unused_hctx_lock); 3466 } 3467 3468 static void blk_mq_exit_hw_queues(struct request_queue *q, 3469 struct blk_mq_tag_set *set, int nr_queue) 3470 { 3471 struct blk_mq_hw_ctx *hctx; 3472 unsigned long i; 3473 3474 queue_for_each_hw_ctx(q, hctx, i) { 3475 if (i == nr_queue) 3476 break; 3477 blk_mq_exit_hctx(q, set, hctx, i); 3478 } 3479 } 3480 3481 static int blk_mq_init_hctx(struct request_queue *q, 3482 struct blk_mq_tag_set *set, 3483 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) 3484 { 3485 hctx->queue_num = hctx_idx; 3486 3487 if (!(hctx->flags & BLK_MQ_F_STACKING)) 3488 cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE, 3489 &hctx->cpuhp_online); 3490 cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead); 3491 3492 hctx->tags = set->tags[hctx_idx]; 3493 3494 if (set->ops->init_hctx && 3495 set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) 3496 goto unregister_cpu_notifier; 3497 3498 if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, 3499 hctx->numa_node)) 3500 goto exit_hctx; 3501 3502 if (xa_insert(&q->hctx_table, hctx_idx, hctx, GFP_KERNEL)) 3503 goto exit_flush_rq; 3504 3505 return 0; 3506 3507 exit_flush_rq: 3508 if (set->ops->exit_request) 3509 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx); 3510 exit_hctx: 3511 if (set->ops->exit_hctx) 3512 set->ops->exit_hctx(hctx, hctx_idx); 3513 unregister_cpu_notifier: 3514 blk_mq_remove_cpuhp(hctx); 3515 return -1; 3516 } 3517 3518 static struct blk_mq_hw_ctx * 3519 blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set, 3520 int node) 3521 { 3522 struct blk_mq_hw_ctx *hctx; 3523 gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY; 3524 3525 hctx = kzalloc_node(sizeof(struct blk_mq_hw_ctx), gfp, node); 3526 if (!hctx) 3527 goto fail_alloc_hctx; 3528 3529 if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node)) 3530 goto free_hctx; 3531 3532 atomic_set(&hctx->nr_active, 0); 3533 if (node == NUMA_NO_NODE) 3534 node = set->numa_node; 3535 hctx->numa_node = node; 3536 3537 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn); 3538 spin_lock_init(&hctx->lock); 3539 INIT_LIST_HEAD(&hctx->dispatch); 3540 hctx->queue = q; 3541 hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED; 3542 3543 INIT_LIST_HEAD(&hctx->hctx_list); 3544 3545 /* 3546 * Allocate space for all possible cpus to avoid allocation at 3547 * runtime 3548 */ 3549 hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *), 3550 gfp, node); 3551 if (!hctx->ctxs) 3552 goto free_cpumask; 3553 3554 if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), 3555 gfp, node, false, false)) 3556 goto free_ctxs; 3557 hctx->nr_ctx = 0; 3558 3559 spin_lock_init(&hctx->dispatch_wait_lock); 3560 init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake); 3561 INIT_LIST_HEAD(&hctx->dispatch_wait.entry); 3562 3563 hctx->fq = blk_alloc_flush_queue(hctx->numa_node, set->cmd_size, gfp); 3564 if (!hctx->fq) 3565 goto free_bitmap; 3566 3567 blk_mq_hctx_kobj_init(hctx); 3568 3569 return hctx; 3570 3571 free_bitmap: 3572 sbitmap_free(&hctx->ctx_map); 3573 free_ctxs: 3574 kfree(hctx->ctxs); 3575 free_cpumask: 3576 free_cpumask_var(hctx->cpumask); 3577 free_hctx: 3578 kfree(hctx); 3579 fail_alloc_hctx: 3580 return NULL; 3581 } 3582 3583 static void blk_mq_init_cpu_queues(struct request_queue *q, 3584 unsigned int nr_hw_queues) 3585 { 3586 struct blk_mq_tag_set *set = q->tag_set; 3587 unsigned int i, j; 3588 3589 for_each_possible_cpu(i) { 3590 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i); 3591 struct blk_mq_hw_ctx *hctx; 3592 int k; 3593 3594 __ctx->cpu = i; 3595 spin_lock_init(&__ctx->lock); 3596 for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++) 3597 INIT_LIST_HEAD(&__ctx->rq_lists[k]); 3598 3599 __ctx->queue = q; 3600 3601 /* 3602 * Set local node, IFF we have more than one hw queue. If 3603 * not, we remain on the home node of the device 3604 */ 3605 for (j = 0; j < set->nr_maps; j++) { 3606 hctx = blk_mq_map_queue_type(q, j, i); 3607 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE) 3608 hctx->numa_node = cpu_to_node(i); 3609 } 3610 } 3611 } 3612 3613 struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, 3614 unsigned int hctx_idx, 3615 unsigned int depth) 3616 { 3617 struct blk_mq_tags *tags; 3618 int ret; 3619 3620 tags = blk_mq_alloc_rq_map(set, hctx_idx, depth, set->reserved_tags); 3621 if (!tags) 3622 return NULL; 3623 3624 ret = blk_mq_alloc_rqs(set, tags, hctx_idx, depth); 3625 if (ret) { 3626 blk_mq_free_rq_map(tags); 3627 return NULL; 3628 } 3629 3630 return tags; 3631 } 3632 3633 static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, 3634 int hctx_idx) 3635 { 3636 if (blk_mq_is_shared_tags(set->flags)) { 3637 set->tags[hctx_idx] = set->shared_tags; 3638 3639 return true; 3640 } 3641 3642 set->tags[hctx_idx] = blk_mq_alloc_map_and_rqs(set, hctx_idx, 3643 set->queue_depth); 3644 3645 return set->tags[hctx_idx]; 3646 } 3647 3648 void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, 3649 struct blk_mq_tags *tags, 3650 unsigned int hctx_idx) 3651 { 3652 if (tags) { 3653 blk_mq_free_rqs(set, tags, hctx_idx); 3654 blk_mq_free_rq_map(tags); 3655 } 3656 } 3657 3658 static void __blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, 3659 unsigned int hctx_idx) 3660 { 3661 if (!blk_mq_is_shared_tags(set->flags)) 3662 blk_mq_free_map_and_rqs(set, set->tags[hctx_idx], hctx_idx); 3663 3664 set->tags[hctx_idx] = NULL; 3665 } 3666 3667 static void blk_mq_map_swqueue(struct request_queue *q) 3668 { 3669 unsigned int j, hctx_idx; 3670 unsigned long i; 3671 struct blk_mq_hw_ctx *hctx; 3672 struct blk_mq_ctx *ctx; 3673 struct blk_mq_tag_set *set = q->tag_set; 3674 3675 queue_for_each_hw_ctx(q, hctx, i) { 3676 cpumask_clear(hctx->cpumask); 3677 hctx->nr_ctx = 0; 3678 hctx->dispatch_from = NULL; 3679 } 3680 3681 /* 3682 * Map software to hardware queues. 3683 * 3684 * If the cpu isn't present, the cpu is mapped to first hctx. 3685 */ 3686 for_each_possible_cpu(i) { 3687 3688 ctx = per_cpu_ptr(q->queue_ctx, i); 3689 for (j = 0; j < set->nr_maps; j++) { 3690 if (!set->map[j].nr_queues) { 3691 ctx->hctxs[j] = blk_mq_map_queue_type(q, 3692 HCTX_TYPE_DEFAULT, i); 3693 continue; 3694 } 3695 hctx_idx = set->map[j].mq_map[i]; 3696 /* unmapped hw queue can be remapped after CPU topo changed */ 3697 if (!set->tags[hctx_idx] && 3698 !__blk_mq_alloc_map_and_rqs(set, hctx_idx)) { 3699 /* 3700 * If tags initialization fail for some hctx, 3701 * that hctx won't be brought online. In this 3702 * case, remap the current ctx to hctx[0] which 3703 * is guaranteed to always have tags allocated 3704 */ 3705 set->map[j].mq_map[i] = 0; 3706 } 3707 3708 hctx = blk_mq_map_queue_type(q, j, i); 3709 ctx->hctxs[j] = hctx; 3710 /* 3711 * If the CPU is already set in the mask, then we've 3712 * mapped this one already. This can happen if 3713 * devices share queues across queue maps. 3714 */ 3715 if (cpumask_test_cpu(i, hctx->cpumask)) 3716 continue; 3717 3718 cpumask_set_cpu(i, hctx->cpumask); 3719 hctx->type = j; 3720 ctx->index_hw[hctx->type] = hctx->nr_ctx; 3721 hctx->ctxs[hctx->nr_ctx++] = ctx; 3722 3723 /* 3724 * If the nr_ctx type overflows, we have exceeded the 3725 * amount of sw queues we can support. 3726 */ 3727 BUG_ON(!hctx->nr_ctx); 3728 } 3729 3730 for (; j < HCTX_MAX_TYPES; j++) 3731 ctx->hctxs[j] = blk_mq_map_queue_type(q, 3732 HCTX_TYPE_DEFAULT, i); 3733 } 3734 3735 queue_for_each_hw_ctx(q, hctx, i) { 3736 /* 3737 * If no software queues are mapped to this hardware queue, 3738 * disable it and free the request entries. 3739 */ 3740 if (!hctx->nr_ctx) { 3741 /* Never unmap queue 0. We need it as a 3742 * fallback in case of a new remap fails 3743 * allocation 3744 */ 3745 if (i) 3746 __blk_mq_free_map_and_rqs(set, i); 3747 3748 hctx->tags = NULL; 3749 continue; 3750 } 3751 3752 hctx->tags = set->tags[i]; 3753 WARN_ON(!hctx->tags); 3754 3755 /* 3756 * Set the map size to the number of mapped software queues. 3757 * This is more accurate and more efficient than looping 3758 * over all possibly mapped software queues. 3759 */ 3760 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx); 3761 3762 /* 3763 * Initialize batch roundrobin counts 3764 */ 3765 hctx->next_cpu = blk_mq_first_mapped_cpu(hctx); 3766 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; 3767 } 3768 } 3769 3770 /* 3771 * Caller needs to ensure that we're either frozen/quiesced, or that 3772 * the queue isn't live yet. 3773 */ 3774 static void queue_set_hctx_shared(struct request_queue *q, bool shared) 3775 { 3776 struct blk_mq_hw_ctx *hctx; 3777 unsigned long i; 3778 3779 queue_for_each_hw_ctx(q, hctx, i) { 3780 if (shared) { 3781 hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED; 3782 } else { 3783 blk_mq_tag_idle(hctx); 3784 hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED; 3785 } 3786 } 3787 } 3788 3789 static void blk_mq_update_tag_set_shared(struct blk_mq_tag_set *set, 3790 bool shared) 3791 { 3792 struct request_queue *q; 3793 3794 lockdep_assert_held(&set->tag_list_lock); 3795 3796 list_for_each_entry(q, &set->tag_list, tag_set_list) { 3797 blk_mq_freeze_queue(q); 3798 queue_set_hctx_shared(q, shared); 3799 blk_mq_unfreeze_queue(q); 3800 } 3801 } 3802 3803 static void blk_mq_del_queue_tag_set(struct request_queue *q) 3804 { 3805 struct blk_mq_tag_set *set = q->tag_set; 3806 3807 mutex_lock(&set->tag_list_lock); 3808 list_del(&q->tag_set_list); 3809 if (list_is_singular(&set->tag_list)) { 3810 /* just transitioned to unshared */ 3811 set->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED; 3812 /* update existing queue */ 3813 blk_mq_update_tag_set_shared(set, false); 3814 } 3815 mutex_unlock(&set->tag_list_lock); 3816 INIT_LIST_HEAD(&q->tag_set_list); 3817 } 3818 3819 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, 3820 struct request_queue *q) 3821 { 3822 mutex_lock(&set->tag_list_lock); 3823 3824 /* 3825 * Check to see if we're transitioning to shared (from 1 to 2 queues). 3826 */ 3827 if (!list_empty(&set->tag_list) && 3828 !(set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) { 3829 set->flags |= BLK_MQ_F_TAG_QUEUE_SHARED; 3830 /* update existing queue */ 3831 blk_mq_update_tag_set_shared(set, true); 3832 } 3833 if (set->flags & BLK_MQ_F_TAG_QUEUE_SHARED) 3834 queue_set_hctx_shared(q, true); 3835 list_add_tail(&q->tag_set_list, &set->tag_list); 3836 3837 mutex_unlock(&set->tag_list_lock); 3838 } 3839 3840 /* All allocations will be freed in release handler of q->mq_kobj */ 3841 static int blk_mq_alloc_ctxs(struct request_queue *q) 3842 { 3843 struct blk_mq_ctxs *ctxs; 3844 int cpu; 3845 3846 ctxs = kzalloc(sizeof(*ctxs), GFP_KERNEL); 3847 if (!ctxs) 3848 return -ENOMEM; 3849 3850 ctxs->queue_ctx = alloc_percpu(struct blk_mq_ctx); 3851 if (!ctxs->queue_ctx) 3852 goto fail; 3853 3854 for_each_possible_cpu(cpu) { 3855 struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu); 3856 ctx->ctxs = ctxs; 3857 } 3858 3859 q->mq_kobj = &ctxs->kobj; 3860 q->queue_ctx = ctxs->queue_ctx; 3861 3862 return 0; 3863 fail: 3864 kfree(ctxs); 3865 return -ENOMEM; 3866 } 3867 3868 /* 3869 * It is the actual release handler for mq, but we do it from 3870 * request queue's release handler for avoiding use-after-free 3871 * and headache because q->mq_kobj shouldn't have been introduced, 3872 * but we can't group ctx/kctx kobj without it. 3873 */ 3874 void blk_mq_release(struct request_queue *q) 3875 { 3876 struct blk_mq_hw_ctx *hctx, *next; 3877 unsigned long i; 3878 3879 queue_for_each_hw_ctx(q, hctx, i) 3880 WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list)); 3881 3882 /* all hctx are in .unused_hctx_list now */ 3883 list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) { 3884 list_del_init(&hctx->hctx_list); 3885 kobject_put(&hctx->kobj); 3886 } 3887 3888 xa_destroy(&q->hctx_table); 3889 3890 /* 3891 * release .mq_kobj and sw queue's kobject now because 3892 * both share lifetime with request queue. 3893 */ 3894 blk_mq_sysfs_deinit(q); 3895 } 3896 3897 static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set, 3898 void *queuedata) 3899 { 3900 struct request_queue *q; 3901 int ret; 3902 3903 q = blk_alloc_queue(set->numa_node, set->flags & BLK_MQ_F_BLOCKING); 3904 if (!q) 3905 return ERR_PTR(-ENOMEM); 3906 q->queuedata = queuedata; 3907 ret = blk_mq_init_allocated_queue(set, q); 3908 if (ret) { 3909 blk_cleanup_queue(q); 3910 return ERR_PTR(ret); 3911 } 3912 return q; 3913 } 3914 3915 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) 3916 { 3917 return blk_mq_init_queue_data(set, NULL); 3918 } 3919 EXPORT_SYMBOL(blk_mq_init_queue); 3920 3921 struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata, 3922 struct lock_class_key *lkclass) 3923 { 3924 struct request_queue *q; 3925 struct gendisk *disk; 3926 3927 q = blk_mq_init_queue_data(set, queuedata); 3928 if (IS_ERR(q)) 3929 return ERR_CAST(q); 3930 3931 disk = __alloc_disk_node(q, set->numa_node, lkclass); 3932 if (!disk) { 3933 blk_cleanup_queue(q); 3934 return ERR_PTR(-ENOMEM); 3935 } 3936 return disk; 3937 } 3938 EXPORT_SYMBOL(__blk_mq_alloc_disk); 3939 3940 static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx( 3941 struct blk_mq_tag_set *set, struct request_queue *q, 3942 int hctx_idx, int node) 3943 { 3944 struct blk_mq_hw_ctx *hctx = NULL, *tmp; 3945 3946 /* reuse dead hctx first */ 3947 spin_lock(&q->unused_hctx_lock); 3948 list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) { 3949 if (tmp->numa_node == node) { 3950 hctx = tmp; 3951 break; 3952 } 3953 } 3954 if (hctx) 3955 list_del_init(&hctx->hctx_list); 3956 spin_unlock(&q->unused_hctx_lock); 3957 3958 if (!hctx) 3959 hctx = blk_mq_alloc_hctx(q, set, node); 3960 if (!hctx) 3961 goto fail; 3962 3963 if (blk_mq_init_hctx(q, set, hctx, hctx_idx)) 3964 goto free_hctx; 3965 3966 return hctx; 3967 3968 free_hctx: 3969 kobject_put(&hctx->kobj); 3970 fail: 3971 return NULL; 3972 } 3973 3974 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, 3975 struct request_queue *q) 3976 { 3977 struct blk_mq_hw_ctx *hctx; 3978 unsigned long i, j; 3979 3980 /* protect against switching io scheduler */ 3981 mutex_lock(&q->sysfs_lock); 3982 for (i = 0; i < set->nr_hw_queues; i++) { 3983 int old_node; 3984 int node = blk_mq_get_hctx_node(set, i); 3985 struct blk_mq_hw_ctx *old_hctx = xa_load(&q->hctx_table, i); 3986 3987 if (old_hctx) { 3988 old_node = old_hctx->numa_node; 3989 blk_mq_exit_hctx(q, set, old_hctx, i); 3990 } 3991 3992 if (!blk_mq_alloc_and_init_hctx(set, q, i, node)) { 3993 if (!old_hctx) 3994 break; 3995 pr_warn("Allocate new hctx on node %d fails, fallback to previous one on node %d\n", 3996 node, old_node); 3997 hctx = blk_mq_alloc_and_init_hctx(set, q, i, old_node); 3998 WARN_ON_ONCE(!hctx); 3999 } 4000 } 4001 /* 4002 * Increasing nr_hw_queues fails. Free the newly allocated 4003 * hctxs and keep the previous q->nr_hw_queues. 4004 */ 4005 if (i != set->nr_hw_queues) { 4006 j = q->nr_hw_queues; 4007 } else { 4008 j = i; 4009 q->nr_hw_queues = set->nr_hw_queues; 4010 } 4011 4012 xa_for_each_start(&q->hctx_table, j, hctx, j) 4013 blk_mq_exit_hctx(q, set, hctx, j); 4014 mutex_unlock(&q->sysfs_lock); 4015 } 4016 4017 static void blk_mq_update_poll_flag(struct request_queue *q) 4018 { 4019 struct blk_mq_tag_set *set = q->tag_set; 4020 4021 if (set->nr_maps > HCTX_TYPE_POLL && 4022 set->map[HCTX_TYPE_POLL].nr_queues) 4023 blk_queue_flag_set(QUEUE_FLAG_POLL, q); 4024 else 4025 blk_queue_flag_clear(QUEUE_FLAG_POLL, q); 4026 } 4027 4028 int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, 4029 struct request_queue *q) 4030 { 4031 WARN_ON_ONCE(blk_queue_has_srcu(q) != 4032 !!(set->flags & BLK_MQ_F_BLOCKING)); 4033 4034 /* mark the queue as mq asap */ 4035 q->mq_ops = set->ops; 4036 4037 q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn, 4038 blk_mq_poll_stats_bkt, 4039 BLK_MQ_POLL_STATS_BKTS, q); 4040 if (!q->poll_cb) 4041 goto err_exit; 4042 4043 if (blk_mq_alloc_ctxs(q)) 4044 goto err_poll; 4045 4046 /* init q->mq_kobj and sw queues' kobjects */ 4047 blk_mq_sysfs_init(q); 4048 4049 INIT_LIST_HEAD(&q->unused_hctx_list); 4050 spin_lock_init(&q->unused_hctx_lock); 4051 4052 xa_init(&q->hctx_table); 4053 4054 blk_mq_realloc_hw_ctxs(set, q); 4055 if (!q->nr_hw_queues) 4056 goto err_hctxs; 4057 4058 INIT_WORK(&q->timeout_work, blk_mq_timeout_work); 4059 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ); 4060 4061 q->tag_set = set; 4062 4063 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; 4064 blk_mq_update_poll_flag(q); 4065 4066 INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work); 4067 INIT_LIST_HEAD(&q->requeue_list); 4068 spin_lock_init(&q->requeue_lock); 4069 4070 q->nr_requests = set->queue_depth; 4071 4072 /* 4073 * Default to classic polling 4074 */ 4075 q->poll_nsec = BLK_MQ_POLL_CLASSIC; 4076 4077 blk_mq_init_cpu_queues(q, set->nr_hw_queues); 4078 blk_mq_add_queue_tag_set(set, q); 4079 blk_mq_map_swqueue(q); 4080 return 0; 4081 4082 err_hctxs: 4083 xa_destroy(&q->hctx_table); 4084 q->nr_hw_queues = 0; 4085 blk_mq_sysfs_deinit(q); 4086 err_poll: 4087 blk_stat_free_callback(q->poll_cb); 4088 q->poll_cb = NULL; 4089 err_exit: 4090 q->mq_ops = NULL; 4091 return -ENOMEM; 4092 } 4093 EXPORT_SYMBOL(blk_mq_init_allocated_queue); 4094 4095 /* tags can _not_ be used after returning from blk_mq_exit_queue */ 4096 void blk_mq_exit_queue(struct request_queue *q) 4097 { 4098 struct blk_mq_tag_set *set = q->tag_set; 4099 4100 /* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */ 4101 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); 4102 /* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */ 4103 blk_mq_del_queue_tag_set(q); 4104 } 4105 4106 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) 4107 { 4108 int i; 4109 4110 if (blk_mq_is_shared_tags(set->flags)) { 4111 set->shared_tags = blk_mq_alloc_map_and_rqs(set, 4112 BLK_MQ_NO_HCTX_IDX, 4113 set->queue_depth); 4114 if (!set->shared_tags) 4115 return -ENOMEM; 4116 } 4117 4118 for (i = 0; i < set->nr_hw_queues; i++) { 4119 if (!__blk_mq_alloc_map_and_rqs(set, i)) 4120 goto out_unwind; 4121 cond_resched(); 4122 } 4123 4124 return 0; 4125 4126 out_unwind: 4127 while (--i >= 0) 4128 __blk_mq_free_map_and_rqs(set, i); 4129 4130 if (blk_mq_is_shared_tags(set->flags)) { 4131 blk_mq_free_map_and_rqs(set, set->shared_tags, 4132 BLK_MQ_NO_HCTX_IDX); 4133 } 4134 4135 return -ENOMEM; 4136 } 4137 4138 /* 4139 * Allocate the request maps associated with this tag_set. Note that this 4140 * may reduce the depth asked for, if memory is tight. set->queue_depth 4141 * will be updated to reflect the allocated depth. 4142 */ 4143 static int blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set *set) 4144 { 4145 unsigned int depth; 4146 int err; 4147 4148 depth = set->queue_depth; 4149 do { 4150 err = __blk_mq_alloc_rq_maps(set); 4151 if (!err) 4152 break; 4153 4154 set->queue_depth >>= 1; 4155 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) { 4156 err = -ENOMEM; 4157 break; 4158 } 4159 } while (set->queue_depth); 4160 4161 if (!set->queue_depth || err) { 4162 pr_err("blk-mq: failed to allocate request map\n"); 4163 return -ENOMEM; 4164 } 4165 4166 if (depth != set->queue_depth) 4167 pr_info("blk-mq: reduced tag depth (%u -> %u)\n", 4168 depth, set->queue_depth); 4169 4170 return 0; 4171 } 4172 4173 static int blk_mq_update_queue_map(struct blk_mq_tag_set *set) 4174 { 4175 /* 4176 * blk_mq_map_queues() and multiple .map_queues() implementations 4177 * expect that set->map[HCTX_TYPE_DEFAULT].nr_queues is set to the 4178 * number of hardware queues. 4179 */ 4180 if (set->nr_maps == 1) 4181 set->map[HCTX_TYPE_DEFAULT].nr_queues = set->nr_hw_queues; 4182 4183 if (set->ops->map_queues && !is_kdump_kernel()) { 4184 int i; 4185 4186 /* 4187 * transport .map_queues is usually done in the following 4188 * way: 4189 * 4190 * for (queue = 0; queue < set->nr_hw_queues; queue++) { 4191 * mask = get_cpu_mask(queue) 4192 * for_each_cpu(cpu, mask) 4193 * set->map[x].mq_map[cpu] = queue; 4194 * } 4195 * 4196 * When we need to remap, the table has to be cleared for 4197 * killing stale mapping since one CPU may not be mapped 4198 * to any hw queue. 4199 */ 4200 for (i = 0; i < set->nr_maps; i++) 4201 blk_mq_clear_mq_map(&set->map[i]); 4202 4203 return set->ops->map_queues(set); 4204 } else { 4205 BUG_ON(set->nr_maps > 1); 4206 return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); 4207 } 4208 } 4209 4210 static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set, 4211 int cur_nr_hw_queues, int new_nr_hw_queues) 4212 { 4213 struct blk_mq_tags **new_tags; 4214 4215 if (cur_nr_hw_queues >= new_nr_hw_queues) 4216 return 0; 4217 4218 new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *), 4219 GFP_KERNEL, set->numa_node); 4220 if (!new_tags) 4221 return -ENOMEM; 4222 4223 if (set->tags) 4224 memcpy(new_tags, set->tags, cur_nr_hw_queues * 4225 sizeof(*set->tags)); 4226 kfree(set->tags); 4227 set->tags = new_tags; 4228 set->nr_hw_queues = new_nr_hw_queues; 4229 4230 return 0; 4231 } 4232 4233 static int blk_mq_alloc_tag_set_tags(struct blk_mq_tag_set *set, 4234 int new_nr_hw_queues) 4235 { 4236 return blk_mq_realloc_tag_set_tags(set, 0, new_nr_hw_queues); 4237 } 4238 4239 /* 4240 * Alloc a tag set to be associated with one or more request queues. 4241 * May fail with EINVAL for various error conditions. May adjust the 4242 * requested depth down, if it's too large. In that case, the set 4243 * value will be stored in set->queue_depth. 4244 */ 4245 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) 4246 { 4247 int i, ret; 4248 4249 BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS); 4250 4251 if (!set->nr_hw_queues) 4252 return -EINVAL; 4253 if (!set->queue_depth) 4254 return -EINVAL; 4255 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) 4256 return -EINVAL; 4257 4258 if (!set->ops->queue_rq) 4259 return -EINVAL; 4260 4261 if (!set->ops->get_budget ^ !set->ops->put_budget) 4262 return -EINVAL; 4263 4264 if (set->queue_depth > BLK_MQ_MAX_DEPTH) { 4265 pr_info("blk-mq: reduced tag depth to %u\n", 4266 BLK_MQ_MAX_DEPTH); 4267 set->queue_depth = BLK_MQ_MAX_DEPTH; 4268 } 4269 4270 if (!set->nr_maps) 4271 set->nr_maps = 1; 4272 else if (set->nr_maps > HCTX_MAX_TYPES) 4273 return -EINVAL; 4274 4275 /* 4276 * If a crashdump is active, then we are potentially in a very 4277 * memory constrained environment. Limit us to 1 queue and 4278 * 64 tags to prevent using too much memory. 4279 */ 4280 if (is_kdump_kernel()) { 4281 set->nr_hw_queues = 1; 4282 set->nr_maps = 1; 4283 set->queue_depth = min(64U, set->queue_depth); 4284 } 4285 /* 4286 * There is no use for more h/w queues than cpus if we just have 4287 * a single map 4288 */ 4289 if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids) 4290 set->nr_hw_queues = nr_cpu_ids; 4291 4292 if (blk_mq_alloc_tag_set_tags(set, set->nr_hw_queues) < 0) 4293 return -ENOMEM; 4294 4295 ret = -ENOMEM; 4296 for (i = 0; i < set->nr_maps; i++) { 4297 set->map[i].mq_map = kcalloc_node(nr_cpu_ids, 4298 sizeof(set->map[i].mq_map[0]), 4299 GFP_KERNEL, set->numa_node); 4300 if (!set->map[i].mq_map) 4301 goto out_free_mq_map; 4302 set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues; 4303 } 4304 4305 ret = blk_mq_update_queue_map(set); 4306 if (ret) 4307 goto out_free_mq_map; 4308 4309 ret = blk_mq_alloc_set_map_and_rqs(set); 4310 if (ret) 4311 goto out_free_mq_map; 4312 4313 mutex_init(&set->tag_list_lock); 4314 INIT_LIST_HEAD(&set->tag_list); 4315 4316 return 0; 4317 4318 out_free_mq_map: 4319 for (i = 0; i < set->nr_maps; i++) { 4320 kfree(set->map[i].mq_map); 4321 set->map[i].mq_map = NULL; 4322 } 4323 kfree(set->tags); 4324 set->tags = NULL; 4325 return ret; 4326 } 4327 EXPORT_SYMBOL(blk_mq_alloc_tag_set); 4328 4329 /* allocate and initialize a tagset for a simple single-queue device */ 4330 int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set, 4331 const struct blk_mq_ops *ops, unsigned int queue_depth, 4332 unsigned int set_flags) 4333 { 4334 memset(set, 0, sizeof(*set)); 4335 set->ops = ops; 4336 set->nr_hw_queues = 1; 4337 set->nr_maps = 1; 4338 set->queue_depth = queue_depth; 4339 set->numa_node = NUMA_NO_NODE; 4340 set->flags = set_flags; 4341 return blk_mq_alloc_tag_set(set); 4342 } 4343 EXPORT_SYMBOL_GPL(blk_mq_alloc_sq_tag_set); 4344 4345 void blk_mq_free_tag_set(struct blk_mq_tag_set *set) 4346 { 4347 int i, j; 4348 4349 for (i = 0; i < set->nr_hw_queues; i++) 4350 __blk_mq_free_map_and_rqs(set, i); 4351 4352 if (blk_mq_is_shared_tags(set->flags)) { 4353 blk_mq_free_map_and_rqs(set, set->shared_tags, 4354 BLK_MQ_NO_HCTX_IDX); 4355 } 4356 4357 for (j = 0; j < set->nr_maps; j++) { 4358 kfree(set->map[j].mq_map); 4359 set->map[j].mq_map = NULL; 4360 } 4361 4362 kfree(set->tags); 4363 set->tags = NULL; 4364 } 4365 EXPORT_SYMBOL(blk_mq_free_tag_set); 4366 4367 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) 4368 { 4369 struct blk_mq_tag_set *set = q->tag_set; 4370 struct blk_mq_hw_ctx *hctx; 4371 int ret; 4372 unsigned long i; 4373 4374 if (!set) 4375 return -EINVAL; 4376 4377 if (q->nr_requests == nr) 4378 return 0; 4379 4380 blk_mq_freeze_queue(q); 4381 blk_mq_quiesce_queue(q); 4382 4383 ret = 0; 4384 queue_for_each_hw_ctx(q, hctx, i) { 4385 if (!hctx->tags) 4386 continue; 4387 /* 4388 * If we're using an MQ scheduler, just update the scheduler 4389 * queue depth. This is similar to what the old code would do. 4390 */ 4391 if (hctx->sched_tags) { 4392 ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags, 4393 nr, true); 4394 } else { 4395 ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr, 4396 false); 4397 } 4398 if (ret) 4399 break; 4400 if (q->elevator && q->elevator->type->ops.depth_updated) 4401 q->elevator->type->ops.depth_updated(hctx); 4402 } 4403 if (!ret) { 4404 q->nr_requests = nr; 4405 if (blk_mq_is_shared_tags(set->flags)) { 4406 if (q->elevator) 4407 blk_mq_tag_update_sched_shared_tags(q); 4408 else 4409 blk_mq_tag_resize_shared_tags(set, nr); 4410 } 4411 } 4412 4413 blk_mq_unquiesce_queue(q); 4414 blk_mq_unfreeze_queue(q); 4415 4416 return ret; 4417 } 4418 4419 /* 4420 * request_queue and elevator_type pair. 4421 * It is just used by __blk_mq_update_nr_hw_queues to cache 4422 * the elevator_type associated with a request_queue. 4423 */ 4424 struct blk_mq_qe_pair { 4425 struct list_head node; 4426 struct request_queue *q; 4427 struct elevator_type *type; 4428 }; 4429 4430 /* 4431 * Cache the elevator_type in qe pair list and switch the 4432 * io scheduler to 'none' 4433 */ 4434 static bool blk_mq_elv_switch_none(struct list_head *head, 4435 struct request_queue *q) 4436 { 4437 struct blk_mq_qe_pair *qe; 4438 4439 if (!q->elevator) 4440 return true; 4441 4442 qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY); 4443 if (!qe) 4444 return false; 4445 4446 INIT_LIST_HEAD(&qe->node); 4447 qe->q = q; 4448 qe->type = q->elevator->type; 4449 list_add(&qe->node, head); 4450 4451 mutex_lock(&q->sysfs_lock); 4452 /* 4453 * After elevator_switch_mq, the previous elevator_queue will be 4454 * released by elevator_release. The reference of the io scheduler 4455 * module get by elevator_get will also be put. So we need to get 4456 * a reference of the io scheduler module here to prevent it to be 4457 * removed. 4458 */ 4459 __module_get(qe->type->elevator_owner); 4460 elevator_switch_mq(q, NULL); 4461 mutex_unlock(&q->sysfs_lock); 4462 4463 return true; 4464 } 4465 4466 static void blk_mq_elv_switch_back(struct list_head *head, 4467 struct request_queue *q) 4468 { 4469 struct blk_mq_qe_pair *qe; 4470 struct elevator_type *t = NULL; 4471 4472 list_for_each_entry(qe, head, node) 4473 if (qe->q == q) { 4474 t = qe->type; 4475 break; 4476 } 4477 4478 if (!t) 4479 return; 4480 4481 list_del(&qe->node); 4482 kfree(qe); 4483 4484 mutex_lock(&q->sysfs_lock); 4485 elevator_switch_mq(q, t); 4486 mutex_unlock(&q->sysfs_lock); 4487 } 4488 4489 static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, 4490 int nr_hw_queues) 4491 { 4492 struct request_queue *q; 4493 LIST_HEAD(head); 4494 int prev_nr_hw_queues; 4495 4496 lockdep_assert_held(&set->tag_list_lock); 4497 4498 if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids) 4499 nr_hw_queues = nr_cpu_ids; 4500 if (nr_hw_queues < 1) 4501 return; 4502 if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues) 4503 return; 4504 4505 list_for_each_entry(q, &set->tag_list, tag_set_list) 4506 blk_mq_freeze_queue(q); 4507 /* 4508 * Switch IO scheduler to 'none', cleaning up the data associated 4509 * with the previous scheduler. We will switch back once we are done 4510 * updating the new sw to hw queue mappings. 4511 */ 4512 list_for_each_entry(q, &set->tag_list, tag_set_list) 4513 if (!blk_mq_elv_switch_none(&head, q)) 4514 goto switch_back; 4515 4516 list_for_each_entry(q, &set->tag_list, tag_set_list) { 4517 blk_mq_debugfs_unregister_hctxs(q); 4518 blk_mq_sysfs_unregister(q); 4519 } 4520 4521 prev_nr_hw_queues = set->nr_hw_queues; 4522 if (blk_mq_realloc_tag_set_tags(set, set->nr_hw_queues, nr_hw_queues) < 4523 0) 4524 goto reregister; 4525 4526 set->nr_hw_queues = nr_hw_queues; 4527 fallback: 4528 blk_mq_update_queue_map(set); 4529 list_for_each_entry(q, &set->tag_list, tag_set_list) { 4530 blk_mq_realloc_hw_ctxs(set, q); 4531 blk_mq_update_poll_flag(q); 4532 if (q->nr_hw_queues != set->nr_hw_queues) { 4533 int i = prev_nr_hw_queues; 4534 4535 pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n", 4536 nr_hw_queues, prev_nr_hw_queues); 4537 for (; i < set->nr_hw_queues; i++) 4538 __blk_mq_free_map_and_rqs(set, i); 4539 4540 set->nr_hw_queues = prev_nr_hw_queues; 4541 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); 4542 goto fallback; 4543 } 4544 blk_mq_map_swqueue(q); 4545 } 4546 4547 reregister: 4548 list_for_each_entry(q, &set->tag_list, tag_set_list) { 4549 blk_mq_sysfs_register(q); 4550 blk_mq_debugfs_register_hctxs(q); 4551 } 4552 4553 switch_back: 4554 list_for_each_entry(q, &set->tag_list, tag_set_list) 4555 blk_mq_elv_switch_back(&head, q); 4556 4557 list_for_each_entry(q, &set->tag_list, tag_set_list) 4558 blk_mq_unfreeze_queue(q); 4559 } 4560 4561 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) 4562 { 4563 mutex_lock(&set->tag_list_lock); 4564 __blk_mq_update_nr_hw_queues(set, nr_hw_queues); 4565 mutex_unlock(&set->tag_list_lock); 4566 } 4567 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues); 4568 4569 /* Enable polling stats and return whether they were already enabled. */ 4570 static bool blk_poll_stats_enable(struct request_queue *q) 4571 { 4572 if (q->poll_stat) 4573 return true; 4574 4575 return blk_stats_alloc_enable(q); 4576 } 4577 4578 static void blk_mq_poll_stats_start(struct request_queue *q) 4579 { 4580 /* 4581 * We don't arm the callback if polling stats are not enabled or the 4582 * callback is already active. 4583 */ 4584 if (!q->poll_stat || blk_stat_is_active(q->poll_cb)) 4585 return; 4586 4587 blk_stat_activate_msecs(q->poll_cb, 100); 4588 } 4589 4590 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb) 4591 { 4592 struct request_queue *q = cb->data; 4593 int bucket; 4594 4595 for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) { 4596 if (cb->stat[bucket].nr_samples) 4597 q->poll_stat[bucket] = cb->stat[bucket]; 4598 } 4599 } 4600 4601 static unsigned long blk_mq_poll_nsecs(struct request_queue *q, 4602 struct request *rq) 4603 { 4604 unsigned long ret = 0; 4605 int bucket; 4606 4607 /* 4608 * If stats collection isn't on, don't sleep but turn it on for 4609 * future users 4610 */ 4611 if (!blk_poll_stats_enable(q)) 4612 return 0; 4613 4614 /* 4615 * As an optimistic guess, use half of the mean service time 4616 * for this type of request. We can (and should) make this smarter. 4617 * For instance, if the completion latencies are tight, we can 4618 * get closer than just half the mean. This is especially 4619 * important on devices where the completion latencies are longer 4620 * than ~10 usec. We do use the stats for the relevant IO size 4621 * if available which does lead to better estimates. 4622 */ 4623 bucket = blk_mq_poll_stats_bkt(rq); 4624 if (bucket < 0) 4625 return ret; 4626 4627 if (q->poll_stat[bucket].nr_samples) 4628 ret = (q->poll_stat[bucket].mean + 1) / 2; 4629 4630 return ret; 4631 } 4632 4633 static bool blk_mq_poll_hybrid(struct request_queue *q, blk_qc_t qc) 4634 { 4635 struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, qc); 4636 struct request *rq = blk_qc_to_rq(hctx, qc); 4637 struct hrtimer_sleeper hs; 4638 enum hrtimer_mode mode; 4639 unsigned int nsecs; 4640 ktime_t kt; 4641 4642 /* 4643 * If a request has completed on queue that uses an I/O scheduler, we 4644 * won't get back a request from blk_qc_to_rq. 4645 */ 4646 if (!rq || (rq->rq_flags & RQF_MQ_POLL_SLEPT)) 4647 return false; 4648 4649 /* 4650 * If we get here, hybrid polling is enabled. Hence poll_nsec can be: 4651 * 4652 * 0: use half of prev avg 4653 * >0: use this specific value 4654 */ 4655 if (q->poll_nsec > 0) 4656 nsecs = q->poll_nsec; 4657 else 4658 nsecs = blk_mq_poll_nsecs(q, rq); 4659 4660 if (!nsecs) 4661 return false; 4662 4663 rq->rq_flags |= RQF_MQ_POLL_SLEPT; 4664 4665 /* 4666 * This will be replaced with the stats tracking code, using 4667 * 'avg_completion_time / 2' as the pre-sleep target. 4668 */ 4669 kt = nsecs; 4670 4671 mode = HRTIMER_MODE_REL; 4672 hrtimer_init_sleeper_on_stack(&hs, CLOCK_MONOTONIC, mode); 4673 hrtimer_set_expires(&hs.timer, kt); 4674 4675 do { 4676 if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE) 4677 break; 4678 set_current_state(TASK_UNINTERRUPTIBLE); 4679 hrtimer_sleeper_start_expires(&hs, mode); 4680 if (hs.task) 4681 io_schedule(); 4682 hrtimer_cancel(&hs.timer); 4683 mode = HRTIMER_MODE_ABS; 4684 } while (hs.task && !signal_pending(current)); 4685 4686 __set_current_state(TASK_RUNNING); 4687 destroy_hrtimer_on_stack(&hs.timer); 4688 4689 /* 4690 * If we sleep, have the caller restart the poll loop to reset the 4691 * state. Like for the other success return cases, the caller is 4692 * responsible for checking if the IO completed. If the IO isn't 4693 * complete, we'll get called again and will go straight to the busy 4694 * poll loop. 4695 */ 4696 return true; 4697 } 4698 4699 static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie, 4700 struct io_comp_batch *iob, unsigned int flags) 4701 { 4702 struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, cookie); 4703 long state = get_current_state(); 4704 int ret; 4705 4706 do { 4707 ret = q->mq_ops->poll(hctx, iob); 4708 if (ret > 0) { 4709 __set_current_state(TASK_RUNNING); 4710 return ret; 4711 } 4712 4713 if (signal_pending_state(state, current)) 4714 __set_current_state(TASK_RUNNING); 4715 if (task_is_running(current)) 4716 return 1; 4717 4718 if (ret < 0 || (flags & BLK_POLL_ONESHOT)) 4719 break; 4720 cpu_relax(); 4721 } while (!need_resched()); 4722 4723 __set_current_state(TASK_RUNNING); 4724 return 0; 4725 } 4726 4727 int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob, 4728 unsigned int flags) 4729 { 4730 if (!(flags & BLK_POLL_NOSLEEP) && 4731 q->poll_nsec != BLK_MQ_POLL_CLASSIC) { 4732 if (blk_mq_poll_hybrid(q, cookie)) 4733 return 1; 4734 } 4735 return blk_mq_poll_classic(q, cookie, iob, flags); 4736 } 4737 4738 unsigned int blk_mq_rq_cpu(struct request *rq) 4739 { 4740 return rq->mq_ctx->cpu; 4741 } 4742 EXPORT_SYMBOL(blk_mq_rq_cpu); 4743 4744 void blk_mq_cancel_work_sync(struct request_queue *q) 4745 { 4746 if (queue_is_mq(q)) { 4747 struct blk_mq_hw_ctx *hctx; 4748 unsigned long i; 4749 4750 cancel_delayed_work_sync(&q->requeue_work); 4751 4752 queue_for_each_hw_ctx(q, hctx, i) 4753 cancel_delayed_work_sync(&hctx->run_work); 4754 } 4755 } 4756 4757 static int __init blk_mq_init(void) 4758 { 4759 int i; 4760 4761 for_each_possible_cpu(i) 4762 init_llist_head(&per_cpu(blk_cpu_done, i)); 4763 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq); 4764 4765 cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD, 4766 "block/softirq:dead", NULL, 4767 blk_softirq_cpu_dead); 4768 cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL, 4769 blk_mq_hctx_notify_dead); 4770 cpuhp_setup_state_multi(CPUHP_AP_BLK_MQ_ONLINE, "block/mq:online", 4771 blk_mq_hctx_notify_online, 4772 blk_mq_hctx_notify_offline); 4773 return 0; 4774 } 4775 subsys_initcall(blk_mq_init); 4776