1 /* 2 * Block multiqueue core code 3 * 4 * Copyright (C) 2013-2014 Jens Axboe 5 * Copyright (C) 2013-2014 Christoph Hellwig 6 */ 7 #include <linux/kernel.h> 8 #include <linux/module.h> 9 #include <linux/backing-dev.h> 10 #include <linux/bio.h> 11 #include <linux/blkdev.h> 12 #include <linux/kmemleak.h> 13 #include <linux/mm.h> 14 #include <linux/init.h> 15 #include <linux/slab.h> 16 #include <linux/workqueue.h> 17 #include <linux/smp.h> 18 #include <linux/llist.h> 19 #include <linux/list_sort.h> 20 #include <linux/cpu.h> 21 #include <linux/cache.h> 22 #include <linux/sched/sysctl.h> 23 #include <linux/delay.h> 24 #include <linux/crash_dump.h> 25 26 #include <trace/events/block.h> 27 28 #include <linux/blk-mq.h> 29 #include "blk.h" 30 #include "blk-mq.h" 31 #include "blk-mq-tag.h" 32 33 static DEFINE_MUTEX(all_q_mutex); 34 static LIST_HEAD(all_q_list); 35 36 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx); 37 38 /* 39 * Check if any of the ctx's have pending work in this hardware queue 40 */ 41 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) 42 { 43 unsigned int i; 44 45 for (i = 0; i < hctx->ctx_map.size; i++) 46 if (hctx->ctx_map.map[i].word) 47 return true; 48 49 return false; 50 } 51 52 static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx, 53 struct blk_mq_ctx *ctx) 54 { 55 return &hctx->ctx_map.map[ctx->index_hw / hctx->ctx_map.bits_per_word]; 56 } 57 58 #define CTX_TO_BIT(hctx, ctx) \ 59 ((ctx)->index_hw & ((hctx)->ctx_map.bits_per_word - 1)) 60 61 /* 62 * Mark this ctx as having pending work in this hardware queue 63 */ 64 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, 65 struct blk_mq_ctx *ctx) 66 { 67 struct blk_align_bitmap *bm = get_bm(hctx, ctx); 68 69 if (!test_bit(CTX_TO_BIT(hctx, ctx), &bm->word)) 70 set_bit(CTX_TO_BIT(hctx, ctx), &bm->word); 71 } 72 73 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, 74 struct blk_mq_ctx *ctx) 75 { 76 struct blk_align_bitmap *bm = get_bm(hctx, ctx); 77 78 clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word); 79 } 80 81 void blk_mq_freeze_queue_start(struct request_queue *q) 82 { 83 int freeze_depth; 84 85 freeze_depth = atomic_inc_return(&q->mq_freeze_depth); 86 if (freeze_depth == 1) { 87 percpu_ref_kill(&q->q_usage_counter); 88 blk_mq_run_hw_queues(q, false); 89 } 90 } 91 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start); 92 93 static void blk_mq_freeze_queue_wait(struct request_queue *q) 94 { 95 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter)); 96 } 97 98 /* 99 * Guarantee no request is in use, so we can change any data structure of 100 * the queue afterward. 101 */ 102 void blk_freeze_queue(struct request_queue *q) 103 { 104 /* 105 * In the !blk_mq case we are only calling this to kill the 106 * q_usage_counter, otherwise this increases the freeze depth 107 * and waits for it to return to zero. For this reason there is 108 * no blk_unfreeze_queue(), and blk_freeze_queue() is not 109 * exported to drivers as the only user for unfreeze is blk_mq. 110 */ 111 blk_mq_freeze_queue_start(q); 112 blk_mq_freeze_queue_wait(q); 113 } 114 115 void blk_mq_freeze_queue(struct request_queue *q) 116 { 117 /* 118 * ...just an alias to keep freeze and unfreeze actions balanced 119 * in the blk_mq_* namespace 120 */ 121 blk_freeze_queue(q); 122 } 123 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue); 124 125 void blk_mq_unfreeze_queue(struct request_queue *q) 126 { 127 int freeze_depth; 128 129 freeze_depth = atomic_dec_return(&q->mq_freeze_depth); 130 WARN_ON_ONCE(freeze_depth < 0); 131 if (!freeze_depth) { 132 percpu_ref_reinit(&q->q_usage_counter); 133 wake_up_all(&q->mq_freeze_wq); 134 } 135 } 136 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); 137 138 void blk_mq_wake_waiters(struct request_queue *q) 139 { 140 struct blk_mq_hw_ctx *hctx; 141 unsigned int i; 142 143 queue_for_each_hw_ctx(q, hctx, i) 144 if (blk_mq_hw_queue_mapped(hctx)) 145 blk_mq_tag_wakeup_all(hctx->tags, true); 146 147 /* 148 * If we are called because the queue has now been marked as 149 * dying, we need to ensure that processes currently waiting on 150 * the queue are notified as well. 151 */ 152 wake_up_all(&q->mq_freeze_wq); 153 } 154 155 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) 156 { 157 return blk_mq_has_free_tags(hctx->tags); 158 } 159 EXPORT_SYMBOL(blk_mq_can_queue); 160 161 static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, 162 struct request *rq, int op, 163 unsigned int op_flags) 164 { 165 if (blk_queue_io_stat(q)) 166 op_flags |= REQ_IO_STAT; 167 168 INIT_LIST_HEAD(&rq->queuelist); 169 /* csd/requeue_work/fifo_time is initialized before use */ 170 rq->q = q; 171 rq->mq_ctx = ctx; 172 req_set_op_attrs(rq, op, op_flags); 173 /* do not touch atomic flags, it needs atomic ops against the timer */ 174 rq->cpu = -1; 175 INIT_HLIST_NODE(&rq->hash); 176 RB_CLEAR_NODE(&rq->rb_node); 177 rq->rq_disk = NULL; 178 rq->part = NULL; 179 rq->start_time = jiffies; 180 #ifdef CONFIG_BLK_CGROUP 181 rq->rl = NULL; 182 set_start_time_ns(rq); 183 rq->io_start_time_ns = 0; 184 #endif 185 rq->nr_phys_segments = 0; 186 #if defined(CONFIG_BLK_DEV_INTEGRITY) 187 rq->nr_integrity_segments = 0; 188 #endif 189 rq->special = NULL; 190 /* tag was already set */ 191 rq->errors = 0; 192 193 rq->cmd = rq->__cmd; 194 195 rq->extra_len = 0; 196 rq->sense_len = 0; 197 rq->resid_len = 0; 198 rq->sense = NULL; 199 200 INIT_LIST_HEAD(&rq->timeout_list); 201 rq->timeout = 0; 202 203 rq->end_io = NULL; 204 rq->end_io_data = NULL; 205 rq->next_rq = NULL; 206 207 ctx->rq_dispatched[rw_is_sync(op, op_flags)]++; 208 } 209 210 static struct request * 211 __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int op, int op_flags) 212 { 213 struct request *rq; 214 unsigned int tag; 215 216 tag = blk_mq_get_tag(data); 217 if (tag != BLK_MQ_TAG_FAIL) { 218 rq = data->hctx->tags->rqs[tag]; 219 220 if (blk_mq_tag_busy(data->hctx)) { 221 rq->cmd_flags = REQ_MQ_INFLIGHT; 222 atomic_inc(&data->hctx->nr_active); 223 } 224 225 rq->tag = tag; 226 blk_mq_rq_ctx_init(data->q, data->ctx, rq, op, op_flags); 227 return rq; 228 } 229 230 return NULL; 231 } 232 233 struct request *blk_mq_alloc_request(struct request_queue *q, int rw, 234 unsigned int flags) 235 { 236 struct blk_mq_ctx *ctx; 237 struct blk_mq_hw_ctx *hctx; 238 struct request *rq; 239 struct blk_mq_alloc_data alloc_data; 240 int ret; 241 242 ret = blk_queue_enter(q, flags & BLK_MQ_REQ_NOWAIT); 243 if (ret) 244 return ERR_PTR(ret); 245 246 ctx = blk_mq_get_ctx(q); 247 hctx = q->mq_ops->map_queue(q, ctx->cpu); 248 blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx); 249 250 rq = __blk_mq_alloc_request(&alloc_data, rw, 0); 251 if (!rq && !(flags & BLK_MQ_REQ_NOWAIT)) { 252 __blk_mq_run_hw_queue(hctx); 253 blk_mq_put_ctx(ctx); 254 255 ctx = blk_mq_get_ctx(q); 256 hctx = q->mq_ops->map_queue(q, ctx->cpu); 257 blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx); 258 rq = __blk_mq_alloc_request(&alloc_data, rw, 0); 259 ctx = alloc_data.ctx; 260 } 261 blk_mq_put_ctx(ctx); 262 if (!rq) { 263 blk_queue_exit(q); 264 return ERR_PTR(-EWOULDBLOCK); 265 } 266 267 rq->__data_len = 0; 268 rq->__sector = (sector_t) -1; 269 rq->bio = rq->biotail = NULL; 270 return rq; 271 } 272 EXPORT_SYMBOL(blk_mq_alloc_request); 273 274 struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw, 275 unsigned int flags, unsigned int hctx_idx) 276 { 277 struct blk_mq_hw_ctx *hctx; 278 struct blk_mq_ctx *ctx; 279 struct request *rq; 280 struct blk_mq_alloc_data alloc_data; 281 int ret; 282 283 /* 284 * If the tag allocator sleeps we could get an allocation for a 285 * different hardware context. No need to complicate the low level 286 * allocator for this for the rare use case of a command tied to 287 * a specific queue. 288 */ 289 if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT))) 290 return ERR_PTR(-EINVAL); 291 292 if (hctx_idx >= q->nr_hw_queues) 293 return ERR_PTR(-EIO); 294 295 ret = blk_queue_enter(q, true); 296 if (ret) 297 return ERR_PTR(ret); 298 299 hctx = q->queue_hw_ctx[hctx_idx]; 300 ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask)); 301 302 blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx); 303 rq = __blk_mq_alloc_request(&alloc_data, rw, 0); 304 if (!rq) { 305 blk_queue_exit(q); 306 return ERR_PTR(-EWOULDBLOCK); 307 } 308 309 return rq; 310 } 311 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); 312 313 static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx, 314 struct blk_mq_ctx *ctx, struct request *rq) 315 { 316 const int tag = rq->tag; 317 struct request_queue *q = rq->q; 318 319 if (rq->cmd_flags & REQ_MQ_INFLIGHT) 320 atomic_dec(&hctx->nr_active); 321 rq->cmd_flags = 0; 322 323 clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); 324 blk_mq_put_tag(hctx, tag, &ctx->last_tag); 325 blk_queue_exit(q); 326 } 327 328 void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq) 329 { 330 struct blk_mq_ctx *ctx = rq->mq_ctx; 331 332 ctx->rq_completed[rq_is_sync(rq)]++; 333 __blk_mq_free_request(hctx, ctx, rq); 334 335 } 336 EXPORT_SYMBOL_GPL(blk_mq_free_hctx_request); 337 338 void blk_mq_free_request(struct request *rq) 339 { 340 struct blk_mq_hw_ctx *hctx; 341 struct request_queue *q = rq->q; 342 343 hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu); 344 blk_mq_free_hctx_request(hctx, rq); 345 } 346 EXPORT_SYMBOL_GPL(blk_mq_free_request); 347 348 inline void __blk_mq_end_request(struct request *rq, int error) 349 { 350 blk_account_io_done(rq); 351 352 if (rq->end_io) { 353 rq->end_io(rq, error); 354 } else { 355 if (unlikely(blk_bidi_rq(rq))) 356 blk_mq_free_request(rq->next_rq); 357 blk_mq_free_request(rq); 358 } 359 } 360 EXPORT_SYMBOL(__blk_mq_end_request); 361 362 void blk_mq_end_request(struct request *rq, int error) 363 { 364 if (blk_update_request(rq, error, blk_rq_bytes(rq))) 365 BUG(); 366 __blk_mq_end_request(rq, error); 367 } 368 EXPORT_SYMBOL(blk_mq_end_request); 369 370 static void __blk_mq_complete_request_remote(void *data) 371 { 372 struct request *rq = data; 373 374 rq->q->softirq_done_fn(rq); 375 } 376 377 static void blk_mq_ipi_complete_request(struct request *rq) 378 { 379 struct blk_mq_ctx *ctx = rq->mq_ctx; 380 bool shared = false; 381 int cpu; 382 383 if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) { 384 rq->q->softirq_done_fn(rq); 385 return; 386 } 387 388 cpu = get_cpu(); 389 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags)) 390 shared = cpus_share_cache(cpu, ctx->cpu); 391 392 if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) { 393 rq->csd.func = __blk_mq_complete_request_remote; 394 rq->csd.info = rq; 395 rq->csd.flags = 0; 396 smp_call_function_single_async(ctx->cpu, &rq->csd); 397 } else { 398 rq->q->softirq_done_fn(rq); 399 } 400 put_cpu(); 401 } 402 403 static void __blk_mq_complete_request(struct request *rq) 404 { 405 struct request_queue *q = rq->q; 406 407 if (!q->softirq_done_fn) 408 blk_mq_end_request(rq, rq->errors); 409 else 410 blk_mq_ipi_complete_request(rq); 411 } 412 413 /** 414 * blk_mq_complete_request - end I/O on a request 415 * @rq: the request being processed 416 * 417 * Description: 418 * Ends all I/O on a request. It does not handle partial completions. 419 * The actual completion happens out-of-order, through a IPI handler. 420 **/ 421 void blk_mq_complete_request(struct request *rq, int error) 422 { 423 struct request_queue *q = rq->q; 424 425 if (unlikely(blk_should_fake_timeout(q))) 426 return; 427 if (!blk_mark_rq_complete(rq)) { 428 rq->errors = error; 429 __blk_mq_complete_request(rq); 430 } 431 } 432 EXPORT_SYMBOL(blk_mq_complete_request); 433 434 int blk_mq_request_started(struct request *rq) 435 { 436 return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags); 437 } 438 EXPORT_SYMBOL_GPL(blk_mq_request_started); 439 440 void blk_mq_start_request(struct request *rq) 441 { 442 struct request_queue *q = rq->q; 443 444 trace_block_rq_issue(q, rq); 445 446 rq->resid_len = blk_rq_bytes(rq); 447 if (unlikely(blk_bidi_rq(rq))) 448 rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq); 449 450 blk_add_timer(rq); 451 452 /* 453 * Ensure that ->deadline is visible before set the started 454 * flag and clear the completed flag. 455 */ 456 smp_mb__before_atomic(); 457 458 /* 459 * Mark us as started and clear complete. Complete might have been 460 * set if requeue raced with timeout, which then marked it as 461 * complete. So be sure to clear complete again when we start 462 * the request, otherwise we'll ignore the completion event. 463 */ 464 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) 465 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags); 466 if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags)) 467 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); 468 469 if (q->dma_drain_size && blk_rq_bytes(rq)) { 470 /* 471 * Make sure space for the drain appears. We know we can do 472 * this because max_hw_segments has been adjusted to be one 473 * fewer than the device can handle. 474 */ 475 rq->nr_phys_segments++; 476 } 477 } 478 EXPORT_SYMBOL(blk_mq_start_request); 479 480 static void __blk_mq_requeue_request(struct request *rq) 481 { 482 struct request_queue *q = rq->q; 483 484 trace_block_rq_requeue(q, rq); 485 486 if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) { 487 if (q->dma_drain_size && blk_rq_bytes(rq)) 488 rq->nr_phys_segments--; 489 } 490 } 491 492 void blk_mq_requeue_request(struct request *rq) 493 { 494 __blk_mq_requeue_request(rq); 495 496 BUG_ON(blk_queued_rq(rq)); 497 blk_mq_add_to_requeue_list(rq, true); 498 } 499 EXPORT_SYMBOL(blk_mq_requeue_request); 500 501 static void blk_mq_requeue_work(struct work_struct *work) 502 { 503 struct request_queue *q = 504 container_of(work, struct request_queue, requeue_work); 505 LIST_HEAD(rq_list); 506 struct request *rq, *next; 507 unsigned long flags; 508 509 spin_lock_irqsave(&q->requeue_lock, flags); 510 list_splice_init(&q->requeue_list, &rq_list); 511 spin_unlock_irqrestore(&q->requeue_lock, flags); 512 513 list_for_each_entry_safe(rq, next, &rq_list, queuelist) { 514 if (!(rq->cmd_flags & REQ_SOFTBARRIER)) 515 continue; 516 517 rq->cmd_flags &= ~REQ_SOFTBARRIER; 518 list_del_init(&rq->queuelist); 519 blk_mq_insert_request(rq, true, false, false); 520 } 521 522 while (!list_empty(&rq_list)) { 523 rq = list_entry(rq_list.next, struct request, queuelist); 524 list_del_init(&rq->queuelist); 525 blk_mq_insert_request(rq, false, false, false); 526 } 527 528 /* 529 * Use the start variant of queue running here, so that running 530 * the requeue work will kick stopped queues. 531 */ 532 blk_mq_start_hw_queues(q); 533 } 534 535 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head) 536 { 537 struct request_queue *q = rq->q; 538 unsigned long flags; 539 540 /* 541 * We abuse this flag that is otherwise used by the I/O scheduler to 542 * request head insertation from the workqueue. 543 */ 544 BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER); 545 546 spin_lock_irqsave(&q->requeue_lock, flags); 547 if (at_head) { 548 rq->cmd_flags |= REQ_SOFTBARRIER; 549 list_add(&rq->queuelist, &q->requeue_list); 550 } else { 551 list_add_tail(&rq->queuelist, &q->requeue_list); 552 } 553 spin_unlock_irqrestore(&q->requeue_lock, flags); 554 } 555 EXPORT_SYMBOL(blk_mq_add_to_requeue_list); 556 557 void blk_mq_cancel_requeue_work(struct request_queue *q) 558 { 559 cancel_work_sync(&q->requeue_work); 560 } 561 EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work); 562 563 void blk_mq_kick_requeue_list(struct request_queue *q) 564 { 565 kblockd_schedule_work(&q->requeue_work); 566 } 567 EXPORT_SYMBOL(blk_mq_kick_requeue_list); 568 569 void blk_mq_abort_requeue_list(struct request_queue *q) 570 { 571 unsigned long flags; 572 LIST_HEAD(rq_list); 573 574 spin_lock_irqsave(&q->requeue_lock, flags); 575 list_splice_init(&q->requeue_list, &rq_list); 576 spin_unlock_irqrestore(&q->requeue_lock, flags); 577 578 while (!list_empty(&rq_list)) { 579 struct request *rq; 580 581 rq = list_first_entry(&rq_list, struct request, queuelist); 582 list_del_init(&rq->queuelist); 583 rq->errors = -EIO; 584 blk_mq_end_request(rq, rq->errors); 585 } 586 } 587 EXPORT_SYMBOL(blk_mq_abort_requeue_list); 588 589 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag) 590 { 591 if (tag < tags->nr_tags) 592 return tags->rqs[tag]; 593 594 return NULL; 595 } 596 EXPORT_SYMBOL(blk_mq_tag_to_rq); 597 598 struct blk_mq_timeout_data { 599 unsigned long next; 600 unsigned int next_set; 601 }; 602 603 void blk_mq_rq_timed_out(struct request *req, bool reserved) 604 { 605 struct blk_mq_ops *ops = req->q->mq_ops; 606 enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER; 607 608 /* 609 * We know that complete is set at this point. If STARTED isn't set 610 * anymore, then the request isn't active and the "timeout" should 611 * just be ignored. This can happen due to the bitflag ordering. 612 * Timeout first checks if STARTED is set, and if it is, assumes 613 * the request is active. But if we race with completion, then 614 * we both flags will get cleared. So check here again, and ignore 615 * a timeout event with a request that isn't active. 616 */ 617 if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags)) 618 return; 619 620 if (ops->timeout) 621 ret = ops->timeout(req, reserved); 622 623 switch (ret) { 624 case BLK_EH_HANDLED: 625 __blk_mq_complete_request(req); 626 break; 627 case BLK_EH_RESET_TIMER: 628 blk_add_timer(req); 629 blk_clear_rq_complete(req); 630 break; 631 case BLK_EH_NOT_HANDLED: 632 break; 633 default: 634 printk(KERN_ERR "block: bad eh return: %d\n", ret); 635 break; 636 } 637 } 638 639 static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, 640 struct request *rq, void *priv, bool reserved) 641 { 642 struct blk_mq_timeout_data *data = priv; 643 644 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) { 645 /* 646 * If a request wasn't started before the queue was 647 * marked dying, kill it here or it'll go unnoticed. 648 */ 649 if (unlikely(blk_queue_dying(rq->q))) { 650 rq->errors = -EIO; 651 blk_mq_end_request(rq, rq->errors); 652 } 653 return; 654 } 655 656 if (time_after_eq(jiffies, rq->deadline)) { 657 if (!blk_mark_rq_complete(rq)) 658 blk_mq_rq_timed_out(rq, reserved); 659 } else if (!data->next_set || time_after(data->next, rq->deadline)) { 660 data->next = rq->deadline; 661 data->next_set = 1; 662 } 663 } 664 665 static void blk_mq_timeout_work(struct work_struct *work) 666 { 667 struct request_queue *q = 668 container_of(work, struct request_queue, timeout_work); 669 struct blk_mq_timeout_data data = { 670 .next = 0, 671 .next_set = 0, 672 }; 673 int i; 674 675 /* A deadlock might occur if a request is stuck requiring a 676 * timeout at the same time a queue freeze is waiting 677 * completion, since the timeout code would not be able to 678 * acquire the queue reference here. 679 * 680 * That's why we don't use blk_queue_enter here; instead, we use 681 * percpu_ref_tryget directly, because we need to be able to 682 * obtain a reference even in the short window between the queue 683 * starting to freeze, by dropping the first reference in 684 * blk_mq_freeze_queue_start, and the moment the last request is 685 * consumed, marked by the instant q_usage_counter reaches 686 * zero. 687 */ 688 if (!percpu_ref_tryget(&q->q_usage_counter)) 689 return; 690 691 blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data); 692 693 if (data.next_set) { 694 data.next = blk_rq_timeout(round_jiffies_up(data.next)); 695 mod_timer(&q->timeout, data.next); 696 } else { 697 struct blk_mq_hw_ctx *hctx; 698 699 queue_for_each_hw_ctx(q, hctx, i) { 700 /* the hctx may be unmapped, so check it here */ 701 if (blk_mq_hw_queue_mapped(hctx)) 702 blk_mq_tag_idle(hctx); 703 } 704 } 705 blk_queue_exit(q); 706 } 707 708 /* 709 * Reverse check our software queue for entries that we could potentially 710 * merge with. Currently includes a hand-wavy stop count of 8, to not spend 711 * too much time checking for merges. 712 */ 713 static bool blk_mq_attempt_merge(struct request_queue *q, 714 struct blk_mq_ctx *ctx, struct bio *bio) 715 { 716 struct request *rq; 717 int checked = 8; 718 719 list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) { 720 int el_ret; 721 722 if (!checked--) 723 break; 724 725 if (!blk_rq_merge_ok(rq, bio)) 726 continue; 727 728 el_ret = blk_try_merge(rq, bio); 729 if (el_ret == ELEVATOR_BACK_MERGE) { 730 if (bio_attempt_back_merge(q, rq, bio)) { 731 ctx->rq_merged++; 732 return true; 733 } 734 break; 735 } else if (el_ret == ELEVATOR_FRONT_MERGE) { 736 if (bio_attempt_front_merge(q, rq, bio)) { 737 ctx->rq_merged++; 738 return true; 739 } 740 break; 741 } 742 } 743 744 return false; 745 } 746 747 /* 748 * Process software queues that have been marked busy, splicing them 749 * to the for-dispatch 750 */ 751 static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list) 752 { 753 struct blk_mq_ctx *ctx; 754 int i; 755 756 for (i = 0; i < hctx->ctx_map.size; i++) { 757 struct blk_align_bitmap *bm = &hctx->ctx_map.map[i]; 758 unsigned int off, bit; 759 760 if (!bm->word) 761 continue; 762 763 bit = 0; 764 off = i * hctx->ctx_map.bits_per_word; 765 do { 766 bit = find_next_bit(&bm->word, bm->depth, bit); 767 if (bit >= bm->depth) 768 break; 769 770 ctx = hctx->ctxs[bit + off]; 771 clear_bit(bit, &bm->word); 772 spin_lock(&ctx->lock); 773 list_splice_tail_init(&ctx->rq_list, list); 774 spin_unlock(&ctx->lock); 775 776 bit++; 777 } while (1); 778 } 779 } 780 781 /* 782 * Run this hardware queue, pulling any software queues mapped to it in. 783 * Note that this function currently has various problems around ordering 784 * of IO. In particular, we'd like FIFO behaviour on handling existing 785 * items on the hctx->dispatch list. Ignore that for now. 786 */ 787 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) 788 { 789 struct request_queue *q = hctx->queue; 790 struct request *rq; 791 LIST_HEAD(rq_list); 792 LIST_HEAD(driver_list); 793 struct list_head *dptr; 794 int queued; 795 796 if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state))) 797 return; 798 799 WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) && 800 cpu_online(hctx->next_cpu)); 801 802 hctx->run++; 803 804 /* 805 * Touch any software queue that has pending entries. 806 */ 807 flush_busy_ctxs(hctx, &rq_list); 808 809 /* 810 * If we have previous entries on our dispatch list, grab them 811 * and stuff them at the front for more fair dispatch. 812 */ 813 if (!list_empty_careful(&hctx->dispatch)) { 814 spin_lock(&hctx->lock); 815 if (!list_empty(&hctx->dispatch)) 816 list_splice_init(&hctx->dispatch, &rq_list); 817 spin_unlock(&hctx->lock); 818 } 819 820 /* 821 * Start off with dptr being NULL, so we start the first request 822 * immediately, even if we have more pending. 823 */ 824 dptr = NULL; 825 826 /* 827 * Now process all the entries, sending them to the driver. 828 */ 829 queued = 0; 830 while (!list_empty(&rq_list)) { 831 struct blk_mq_queue_data bd; 832 int ret; 833 834 rq = list_first_entry(&rq_list, struct request, queuelist); 835 list_del_init(&rq->queuelist); 836 837 bd.rq = rq; 838 bd.list = dptr; 839 bd.last = list_empty(&rq_list); 840 841 ret = q->mq_ops->queue_rq(hctx, &bd); 842 switch (ret) { 843 case BLK_MQ_RQ_QUEUE_OK: 844 queued++; 845 break; 846 case BLK_MQ_RQ_QUEUE_BUSY: 847 list_add(&rq->queuelist, &rq_list); 848 __blk_mq_requeue_request(rq); 849 break; 850 default: 851 pr_err("blk-mq: bad return on queue: %d\n", ret); 852 case BLK_MQ_RQ_QUEUE_ERROR: 853 rq->errors = -EIO; 854 blk_mq_end_request(rq, rq->errors); 855 break; 856 } 857 858 if (ret == BLK_MQ_RQ_QUEUE_BUSY) 859 break; 860 861 /* 862 * We've done the first request. If we have more than 1 863 * left in the list, set dptr to defer issue. 864 */ 865 if (!dptr && rq_list.next != rq_list.prev) 866 dptr = &driver_list; 867 } 868 869 if (!queued) 870 hctx->dispatched[0]++; 871 else if (queued < (1 << (BLK_MQ_MAX_DISPATCH_ORDER - 1))) 872 hctx->dispatched[ilog2(queued) + 1]++; 873 874 /* 875 * Any items that need requeuing? Stuff them into hctx->dispatch, 876 * that is where we will continue on next queue run. 877 */ 878 if (!list_empty(&rq_list)) { 879 spin_lock(&hctx->lock); 880 list_splice(&rq_list, &hctx->dispatch); 881 spin_unlock(&hctx->lock); 882 /* 883 * the queue is expected stopped with BLK_MQ_RQ_QUEUE_BUSY, but 884 * it's possible the queue is stopped and restarted again 885 * before this. Queue restart will dispatch requests. And since 886 * requests in rq_list aren't added into hctx->dispatch yet, 887 * the requests in rq_list might get lost. 888 * 889 * blk_mq_run_hw_queue() already checks the STOPPED bit 890 **/ 891 blk_mq_run_hw_queue(hctx, true); 892 } 893 } 894 895 /* 896 * It'd be great if the workqueue API had a way to pass 897 * in a mask and had some smarts for more clever placement. 898 * For now we just round-robin here, switching for every 899 * BLK_MQ_CPU_WORK_BATCH queued items. 900 */ 901 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) 902 { 903 if (hctx->queue->nr_hw_queues == 1) 904 return WORK_CPU_UNBOUND; 905 906 if (--hctx->next_cpu_batch <= 0) { 907 int cpu = hctx->next_cpu, next_cpu; 908 909 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask); 910 if (next_cpu >= nr_cpu_ids) 911 next_cpu = cpumask_first(hctx->cpumask); 912 913 hctx->next_cpu = next_cpu; 914 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; 915 916 return cpu; 917 } 918 919 return hctx->next_cpu; 920 } 921 922 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) 923 { 924 if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state) || 925 !blk_mq_hw_queue_mapped(hctx))) 926 return; 927 928 if (!async) { 929 int cpu = get_cpu(); 930 if (cpumask_test_cpu(cpu, hctx->cpumask)) { 931 __blk_mq_run_hw_queue(hctx); 932 put_cpu(); 933 return; 934 } 935 936 put_cpu(); 937 } 938 939 kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx), 940 &hctx->run_work, 0); 941 } 942 943 void blk_mq_run_hw_queues(struct request_queue *q, bool async) 944 { 945 struct blk_mq_hw_ctx *hctx; 946 int i; 947 948 queue_for_each_hw_ctx(q, hctx, i) { 949 if ((!blk_mq_hctx_has_pending(hctx) && 950 list_empty_careful(&hctx->dispatch)) || 951 test_bit(BLK_MQ_S_STOPPED, &hctx->state)) 952 continue; 953 954 blk_mq_run_hw_queue(hctx, async); 955 } 956 } 957 EXPORT_SYMBOL(blk_mq_run_hw_queues); 958 959 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) 960 { 961 cancel_delayed_work(&hctx->run_work); 962 cancel_delayed_work(&hctx->delay_work); 963 set_bit(BLK_MQ_S_STOPPED, &hctx->state); 964 } 965 EXPORT_SYMBOL(blk_mq_stop_hw_queue); 966 967 void blk_mq_stop_hw_queues(struct request_queue *q) 968 { 969 struct blk_mq_hw_ctx *hctx; 970 int i; 971 972 queue_for_each_hw_ctx(q, hctx, i) 973 blk_mq_stop_hw_queue(hctx); 974 } 975 EXPORT_SYMBOL(blk_mq_stop_hw_queues); 976 977 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx) 978 { 979 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 980 981 blk_mq_run_hw_queue(hctx, false); 982 } 983 EXPORT_SYMBOL(blk_mq_start_hw_queue); 984 985 void blk_mq_start_hw_queues(struct request_queue *q) 986 { 987 struct blk_mq_hw_ctx *hctx; 988 int i; 989 990 queue_for_each_hw_ctx(q, hctx, i) 991 blk_mq_start_hw_queue(hctx); 992 } 993 EXPORT_SYMBOL(blk_mq_start_hw_queues); 994 995 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async) 996 { 997 struct blk_mq_hw_ctx *hctx; 998 int i; 999 1000 queue_for_each_hw_ctx(q, hctx, i) { 1001 if (!test_bit(BLK_MQ_S_STOPPED, &hctx->state)) 1002 continue; 1003 1004 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 1005 blk_mq_run_hw_queue(hctx, async); 1006 } 1007 } 1008 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues); 1009 1010 static void blk_mq_run_work_fn(struct work_struct *work) 1011 { 1012 struct blk_mq_hw_ctx *hctx; 1013 1014 hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work); 1015 1016 __blk_mq_run_hw_queue(hctx); 1017 } 1018 1019 static void blk_mq_delay_work_fn(struct work_struct *work) 1020 { 1021 struct blk_mq_hw_ctx *hctx; 1022 1023 hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work); 1024 1025 if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state)) 1026 __blk_mq_run_hw_queue(hctx); 1027 } 1028 1029 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) 1030 { 1031 if (unlikely(!blk_mq_hw_queue_mapped(hctx))) 1032 return; 1033 1034 kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx), 1035 &hctx->delay_work, msecs_to_jiffies(msecs)); 1036 } 1037 EXPORT_SYMBOL(blk_mq_delay_queue); 1038 1039 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx, 1040 struct request *rq, 1041 bool at_head) 1042 { 1043 struct blk_mq_ctx *ctx = rq->mq_ctx; 1044 1045 trace_block_rq_insert(hctx->queue, rq); 1046 1047 if (at_head) 1048 list_add(&rq->queuelist, &ctx->rq_list); 1049 else 1050 list_add_tail(&rq->queuelist, &ctx->rq_list); 1051 } 1052 1053 static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, 1054 struct request *rq, bool at_head) 1055 { 1056 struct blk_mq_ctx *ctx = rq->mq_ctx; 1057 1058 __blk_mq_insert_req_list(hctx, rq, at_head); 1059 blk_mq_hctx_mark_pending(hctx, ctx); 1060 } 1061 1062 void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue, 1063 bool async) 1064 { 1065 struct blk_mq_ctx *ctx = rq->mq_ctx; 1066 struct request_queue *q = rq->q; 1067 struct blk_mq_hw_ctx *hctx; 1068 1069 hctx = q->mq_ops->map_queue(q, ctx->cpu); 1070 1071 spin_lock(&ctx->lock); 1072 __blk_mq_insert_request(hctx, rq, at_head); 1073 spin_unlock(&ctx->lock); 1074 1075 if (run_queue) 1076 blk_mq_run_hw_queue(hctx, async); 1077 } 1078 1079 static void blk_mq_insert_requests(struct request_queue *q, 1080 struct blk_mq_ctx *ctx, 1081 struct list_head *list, 1082 int depth, 1083 bool from_schedule) 1084 1085 { 1086 struct blk_mq_hw_ctx *hctx; 1087 1088 trace_block_unplug(q, depth, !from_schedule); 1089 1090 hctx = q->mq_ops->map_queue(q, ctx->cpu); 1091 1092 /* 1093 * preemption doesn't flush plug list, so it's possible ctx->cpu is 1094 * offline now 1095 */ 1096 spin_lock(&ctx->lock); 1097 while (!list_empty(list)) { 1098 struct request *rq; 1099 1100 rq = list_first_entry(list, struct request, queuelist); 1101 BUG_ON(rq->mq_ctx != ctx); 1102 list_del_init(&rq->queuelist); 1103 __blk_mq_insert_req_list(hctx, rq, false); 1104 } 1105 blk_mq_hctx_mark_pending(hctx, ctx); 1106 spin_unlock(&ctx->lock); 1107 1108 blk_mq_run_hw_queue(hctx, from_schedule); 1109 } 1110 1111 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b) 1112 { 1113 struct request *rqa = container_of(a, struct request, queuelist); 1114 struct request *rqb = container_of(b, struct request, queuelist); 1115 1116 return !(rqa->mq_ctx < rqb->mq_ctx || 1117 (rqa->mq_ctx == rqb->mq_ctx && 1118 blk_rq_pos(rqa) < blk_rq_pos(rqb))); 1119 } 1120 1121 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) 1122 { 1123 struct blk_mq_ctx *this_ctx; 1124 struct request_queue *this_q; 1125 struct request *rq; 1126 LIST_HEAD(list); 1127 LIST_HEAD(ctx_list); 1128 unsigned int depth; 1129 1130 list_splice_init(&plug->mq_list, &list); 1131 1132 list_sort(NULL, &list, plug_ctx_cmp); 1133 1134 this_q = NULL; 1135 this_ctx = NULL; 1136 depth = 0; 1137 1138 while (!list_empty(&list)) { 1139 rq = list_entry_rq(list.next); 1140 list_del_init(&rq->queuelist); 1141 BUG_ON(!rq->q); 1142 if (rq->mq_ctx != this_ctx) { 1143 if (this_ctx) { 1144 blk_mq_insert_requests(this_q, this_ctx, 1145 &ctx_list, depth, 1146 from_schedule); 1147 } 1148 1149 this_ctx = rq->mq_ctx; 1150 this_q = rq->q; 1151 depth = 0; 1152 } 1153 1154 depth++; 1155 list_add_tail(&rq->queuelist, &ctx_list); 1156 } 1157 1158 /* 1159 * If 'this_ctx' is set, we know we have entries to complete 1160 * on 'ctx_list'. Do those. 1161 */ 1162 if (this_ctx) { 1163 blk_mq_insert_requests(this_q, this_ctx, &ctx_list, depth, 1164 from_schedule); 1165 } 1166 } 1167 1168 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio) 1169 { 1170 init_request_from_bio(rq, bio); 1171 1172 blk_account_io_start(rq, 1); 1173 } 1174 1175 static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx) 1176 { 1177 return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) && 1178 !blk_queue_nomerges(hctx->queue); 1179 } 1180 1181 static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx, 1182 struct blk_mq_ctx *ctx, 1183 struct request *rq, struct bio *bio) 1184 { 1185 if (!hctx_allow_merges(hctx) || !bio_mergeable(bio)) { 1186 blk_mq_bio_to_request(rq, bio); 1187 spin_lock(&ctx->lock); 1188 insert_rq: 1189 __blk_mq_insert_request(hctx, rq, false); 1190 spin_unlock(&ctx->lock); 1191 return false; 1192 } else { 1193 struct request_queue *q = hctx->queue; 1194 1195 spin_lock(&ctx->lock); 1196 if (!blk_mq_attempt_merge(q, ctx, bio)) { 1197 blk_mq_bio_to_request(rq, bio); 1198 goto insert_rq; 1199 } 1200 1201 spin_unlock(&ctx->lock); 1202 __blk_mq_free_request(hctx, ctx, rq); 1203 return true; 1204 } 1205 } 1206 1207 struct blk_map_ctx { 1208 struct blk_mq_hw_ctx *hctx; 1209 struct blk_mq_ctx *ctx; 1210 }; 1211 1212 static struct request *blk_mq_map_request(struct request_queue *q, 1213 struct bio *bio, 1214 struct blk_map_ctx *data) 1215 { 1216 struct blk_mq_hw_ctx *hctx; 1217 struct blk_mq_ctx *ctx; 1218 struct request *rq; 1219 int op = bio_data_dir(bio); 1220 int op_flags = 0; 1221 struct blk_mq_alloc_data alloc_data; 1222 1223 blk_queue_enter_live(q); 1224 ctx = blk_mq_get_ctx(q); 1225 hctx = q->mq_ops->map_queue(q, ctx->cpu); 1226 1227 if (rw_is_sync(bio_op(bio), bio->bi_opf)) 1228 op_flags |= REQ_SYNC; 1229 1230 trace_block_getrq(q, bio, op); 1231 blk_mq_set_alloc_data(&alloc_data, q, BLK_MQ_REQ_NOWAIT, ctx, hctx); 1232 rq = __blk_mq_alloc_request(&alloc_data, op, op_flags); 1233 if (unlikely(!rq)) { 1234 __blk_mq_run_hw_queue(hctx); 1235 blk_mq_put_ctx(ctx); 1236 trace_block_sleeprq(q, bio, op); 1237 1238 ctx = blk_mq_get_ctx(q); 1239 hctx = q->mq_ops->map_queue(q, ctx->cpu); 1240 blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx); 1241 rq = __blk_mq_alloc_request(&alloc_data, op, op_flags); 1242 ctx = alloc_data.ctx; 1243 hctx = alloc_data.hctx; 1244 } 1245 1246 hctx->queued++; 1247 data->hctx = hctx; 1248 data->ctx = ctx; 1249 return rq; 1250 } 1251 1252 static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie) 1253 { 1254 int ret; 1255 struct request_queue *q = rq->q; 1256 struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, 1257 rq->mq_ctx->cpu); 1258 struct blk_mq_queue_data bd = { 1259 .rq = rq, 1260 .list = NULL, 1261 .last = 1 1262 }; 1263 blk_qc_t new_cookie = blk_tag_to_qc_t(rq->tag, hctx->queue_num); 1264 1265 /* 1266 * For OK queue, we are done. For error, kill it. Any other 1267 * error (busy), just add it to our list as we previously 1268 * would have done 1269 */ 1270 ret = q->mq_ops->queue_rq(hctx, &bd); 1271 if (ret == BLK_MQ_RQ_QUEUE_OK) { 1272 *cookie = new_cookie; 1273 return 0; 1274 } 1275 1276 __blk_mq_requeue_request(rq); 1277 1278 if (ret == BLK_MQ_RQ_QUEUE_ERROR) { 1279 *cookie = BLK_QC_T_NONE; 1280 rq->errors = -EIO; 1281 blk_mq_end_request(rq, rq->errors); 1282 return 0; 1283 } 1284 1285 return -1; 1286 } 1287 1288 /* 1289 * Multiple hardware queue variant. This will not use per-process plugs, 1290 * but will attempt to bypass the hctx queueing if we can go straight to 1291 * hardware for SYNC IO. 1292 */ 1293 static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) 1294 { 1295 const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf); 1296 const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA); 1297 struct blk_map_ctx data; 1298 struct request *rq; 1299 unsigned int request_count = 0; 1300 struct blk_plug *plug; 1301 struct request *same_queue_rq = NULL; 1302 blk_qc_t cookie; 1303 1304 blk_queue_bounce(q, &bio); 1305 1306 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { 1307 bio_io_error(bio); 1308 return BLK_QC_T_NONE; 1309 } 1310 1311 blk_queue_split(q, &bio, q->bio_split); 1312 1313 if (!is_flush_fua && !blk_queue_nomerges(q) && 1314 blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq)) 1315 return BLK_QC_T_NONE; 1316 1317 rq = blk_mq_map_request(q, bio, &data); 1318 if (unlikely(!rq)) 1319 return BLK_QC_T_NONE; 1320 1321 cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num); 1322 1323 if (unlikely(is_flush_fua)) { 1324 blk_mq_bio_to_request(rq, bio); 1325 blk_insert_flush(rq); 1326 goto run_queue; 1327 } 1328 1329 plug = current->plug; 1330 /* 1331 * If the driver supports defer issued based on 'last', then 1332 * queue it up like normal since we can potentially save some 1333 * CPU this way. 1334 */ 1335 if (((plug && !blk_queue_nomerges(q)) || is_sync) && 1336 !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) { 1337 struct request *old_rq = NULL; 1338 1339 blk_mq_bio_to_request(rq, bio); 1340 1341 /* 1342 * We do limited pluging. If the bio can be merged, do that. 1343 * Otherwise the existing request in the plug list will be 1344 * issued. So the plug list will have one request at most 1345 */ 1346 if (plug) { 1347 /* 1348 * The plug list might get flushed before this. If that 1349 * happens, same_queue_rq is invalid and plug list is 1350 * empty 1351 */ 1352 if (same_queue_rq && !list_empty(&plug->mq_list)) { 1353 old_rq = same_queue_rq; 1354 list_del_init(&old_rq->queuelist); 1355 } 1356 list_add_tail(&rq->queuelist, &plug->mq_list); 1357 } else /* is_sync */ 1358 old_rq = rq; 1359 blk_mq_put_ctx(data.ctx); 1360 if (!old_rq) 1361 goto done; 1362 if (!blk_mq_direct_issue_request(old_rq, &cookie)) 1363 goto done; 1364 blk_mq_insert_request(old_rq, false, true, true); 1365 goto done; 1366 } 1367 1368 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) { 1369 /* 1370 * For a SYNC request, send it to the hardware immediately. For 1371 * an ASYNC request, just ensure that we run it later on. The 1372 * latter allows for merging opportunities and more efficient 1373 * dispatching. 1374 */ 1375 run_queue: 1376 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua); 1377 } 1378 blk_mq_put_ctx(data.ctx); 1379 done: 1380 return cookie; 1381 } 1382 1383 /* 1384 * Single hardware queue variant. This will attempt to use any per-process 1385 * plug for merging and IO deferral. 1386 */ 1387 static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio) 1388 { 1389 const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf); 1390 const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA); 1391 struct blk_plug *plug; 1392 unsigned int request_count = 0; 1393 struct blk_map_ctx data; 1394 struct request *rq; 1395 blk_qc_t cookie; 1396 1397 blk_queue_bounce(q, &bio); 1398 1399 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { 1400 bio_io_error(bio); 1401 return BLK_QC_T_NONE; 1402 } 1403 1404 blk_queue_split(q, &bio, q->bio_split); 1405 1406 if (!is_flush_fua && !blk_queue_nomerges(q)) { 1407 if (blk_attempt_plug_merge(q, bio, &request_count, NULL)) 1408 return BLK_QC_T_NONE; 1409 } else 1410 request_count = blk_plug_queued_count(q); 1411 1412 rq = blk_mq_map_request(q, bio, &data); 1413 if (unlikely(!rq)) 1414 return BLK_QC_T_NONE; 1415 1416 cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num); 1417 1418 if (unlikely(is_flush_fua)) { 1419 blk_mq_bio_to_request(rq, bio); 1420 blk_insert_flush(rq); 1421 goto run_queue; 1422 } 1423 1424 /* 1425 * A task plug currently exists. Since this is completely lockless, 1426 * utilize that to temporarily store requests until the task is 1427 * either done or scheduled away. 1428 */ 1429 plug = current->plug; 1430 if (plug) { 1431 blk_mq_bio_to_request(rq, bio); 1432 if (!request_count) 1433 trace_block_plug(q); 1434 1435 blk_mq_put_ctx(data.ctx); 1436 1437 if (request_count >= BLK_MAX_REQUEST_COUNT) { 1438 blk_flush_plug_list(plug, false); 1439 trace_block_plug(q); 1440 } 1441 1442 list_add_tail(&rq->queuelist, &plug->mq_list); 1443 return cookie; 1444 } 1445 1446 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) { 1447 /* 1448 * For a SYNC request, send it to the hardware immediately. For 1449 * an ASYNC request, just ensure that we run it later on. The 1450 * latter allows for merging opportunities and more efficient 1451 * dispatching. 1452 */ 1453 run_queue: 1454 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua); 1455 } 1456 1457 blk_mq_put_ctx(data.ctx); 1458 return cookie; 1459 } 1460 1461 /* 1462 * Default mapping to a software queue, since we use one per CPU. 1463 */ 1464 struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu) 1465 { 1466 return q->queue_hw_ctx[q->mq_map[cpu]]; 1467 } 1468 EXPORT_SYMBOL(blk_mq_map_queue); 1469 1470 static void blk_mq_free_rq_map(struct blk_mq_tag_set *set, 1471 struct blk_mq_tags *tags, unsigned int hctx_idx) 1472 { 1473 struct page *page; 1474 1475 if (tags->rqs && set->ops->exit_request) { 1476 int i; 1477 1478 for (i = 0; i < tags->nr_tags; i++) { 1479 if (!tags->rqs[i]) 1480 continue; 1481 set->ops->exit_request(set->driver_data, tags->rqs[i], 1482 hctx_idx, i); 1483 tags->rqs[i] = NULL; 1484 } 1485 } 1486 1487 while (!list_empty(&tags->page_list)) { 1488 page = list_first_entry(&tags->page_list, struct page, lru); 1489 list_del_init(&page->lru); 1490 /* 1491 * Remove kmemleak object previously allocated in 1492 * blk_mq_init_rq_map(). 1493 */ 1494 kmemleak_free(page_address(page)); 1495 __free_pages(page, page->private); 1496 } 1497 1498 kfree(tags->rqs); 1499 1500 blk_mq_free_tags(tags); 1501 } 1502 1503 static size_t order_to_size(unsigned int order) 1504 { 1505 return (size_t)PAGE_SIZE << order; 1506 } 1507 1508 static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set, 1509 unsigned int hctx_idx) 1510 { 1511 struct blk_mq_tags *tags; 1512 unsigned int i, j, entries_per_page, max_order = 4; 1513 size_t rq_size, left; 1514 1515 tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags, 1516 set->numa_node, 1517 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags)); 1518 if (!tags) 1519 return NULL; 1520 1521 INIT_LIST_HEAD(&tags->page_list); 1522 1523 tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *), 1524 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY, 1525 set->numa_node); 1526 if (!tags->rqs) { 1527 blk_mq_free_tags(tags); 1528 return NULL; 1529 } 1530 1531 /* 1532 * rq_size is the size of the request plus driver payload, rounded 1533 * to the cacheline size 1534 */ 1535 rq_size = round_up(sizeof(struct request) + set->cmd_size, 1536 cache_line_size()); 1537 left = rq_size * set->queue_depth; 1538 1539 for (i = 0; i < set->queue_depth; ) { 1540 int this_order = max_order; 1541 struct page *page; 1542 int to_do; 1543 void *p; 1544 1545 while (this_order && left < order_to_size(this_order - 1)) 1546 this_order--; 1547 1548 do { 1549 page = alloc_pages_node(set->numa_node, 1550 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO, 1551 this_order); 1552 if (page) 1553 break; 1554 if (!this_order--) 1555 break; 1556 if (order_to_size(this_order) < rq_size) 1557 break; 1558 } while (1); 1559 1560 if (!page) 1561 goto fail; 1562 1563 page->private = this_order; 1564 list_add_tail(&page->lru, &tags->page_list); 1565 1566 p = page_address(page); 1567 /* 1568 * Allow kmemleak to scan these pages as they contain pointers 1569 * to additional allocations like via ops->init_request(). 1570 */ 1571 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_KERNEL); 1572 entries_per_page = order_to_size(this_order) / rq_size; 1573 to_do = min(entries_per_page, set->queue_depth - i); 1574 left -= to_do * rq_size; 1575 for (j = 0; j < to_do; j++) { 1576 tags->rqs[i] = p; 1577 if (set->ops->init_request) { 1578 if (set->ops->init_request(set->driver_data, 1579 tags->rqs[i], hctx_idx, i, 1580 set->numa_node)) { 1581 tags->rqs[i] = NULL; 1582 goto fail; 1583 } 1584 } 1585 1586 p += rq_size; 1587 i++; 1588 } 1589 } 1590 return tags; 1591 1592 fail: 1593 blk_mq_free_rq_map(set, tags, hctx_idx); 1594 return NULL; 1595 } 1596 1597 static void blk_mq_free_bitmap(struct blk_mq_ctxmap *bitmap) 1598 { 1599 kfree(bitmap->map); 1600 } 1601 1602 static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node) 1603 { 1604 unsigned int bpw = 8, total, num_maps, i; 1605 1606 bitmap->bits_per_word = bpw; 1607 1608 num_maps = ALIGN(nr_cpu_ids, bpw) / bpw; 1609 bitmap->map = kzalloc_node(num_maps * sizeof(struct blk_align_bitmap), 1610 GFP_KERNEL, node); 1611 if (!bitmap->map) 1612 return -ENOMEM; 1613 1614 total = nr_cpu_ids; 1615 for (i = 0; i < num_maps; i++) { 1616 bitmap->map[i].depth = min(total, bitmap->bits_per_word); 1617 total -= bitmap->map[i].depth; 1618 } 1619 1620 return 0; 1621 } 1622 1623 /* 1624 * 'cpu' is going away. splice any existing rq_list entries from this 1625 * software queue to the hw queue dispatch list, and ensure that it 1626 * gets run. 1627 */ 1628 static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu) 1629 { 1630 struct blk_mq_ctx *ctx; 1631 LIST_HEAD(tmp); 1632 1633 ctx = __blk_mq_get_ctx(hctx->queue, cpu); 1634 1635 spin_lock(&ctx->lock); 1636 if (!list_empty(&ctx->rq_list)) { 1637 list_splice_init(&ctx->rq_list, &tmp); 1638 blk_mq_hctx_clear_pending(hctx, ctx); 1639 } 1640 spin_unlock(&ctx->lock); 1641 1642 if (list_empty(&tmp)) 1643 return NOTIFY_OK; 1644 1645 spin_lock(&hctx->lock); 1646 list_splice_tail_init(&tmp, &hctx->dispatch); 1647 spin_unlock(&hctx->lock); 1648 1649 blk_mq_run_hw_queue(hctx, true); 1650 return NOTIFY_OK; 1651 } 1652 1653 static int blk_mq_hctx_notify(void *data, unsigned long action, 1654 unsigned int cpu) 1655 { 1656 struct blk_mq_hw_ctx *hctx = data; 1657 1658 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) 1659 return blk_mq_hctx_cpu_offline(hctx, cpu); 1660 1661 /* 1662 * In case of CPU online, tags may be reallocated 1663 * in blk_mq_map_swqueue() after mapping is updated. 1664 */ 1665 1666 return NOTIFY_OK; 1667 } 1668 1669 /* hctx->ctxs will be freed in queue's release handler */ 1670 static void blk_mq_exit_hctx(struct request_queue *q, 1671 struct blk_mq_tag_set *set, 1672 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) 1673 { 1674 unsigned flush_start_tag = set->queue_depth; 1675 1676 blk_mq_tag_idle(hctx); 1677 1678 if (set->ops->exit_request) 1679 set->ops->exit_request(set->driver_data, 1680 hctx->fq->flush_rq, hctx_idx, 1681 flush_start_tag + hctx_idx); 1682 1683 if (set->ops->exit_hctx) 1684 set->ops->exit_hctx(hctx, hctx_idx); 1685 1686 blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); 1687 blk_free_flush_queue(hctx->fq); 1688 blk_mq_free_bitmap(&hctx->ctx_map); 1689 } 1690 1691 static void blk_mq_exit_hw_queues(struct request_queue *q, 1692 struct blk_mq_tag_set *set, int nr_queue) 1693 { 1694 struct blk_mq_hw_ctx *hctx; 1695 unsigned int i; 1696 1697 queue_for_each_hw_ctx(q, hctx, i) { 1698 if (i == nr_queue) 1699 break; 1700 blk_mq_exit_hctx(q, set, hctx, i); 1701 } 1702 } 1703 1704 static void blk_mq_free_hw_queues(struct request_queue *q, 1705 struct blk_mq_tag_set *set) 1706 { 1707 struct blk_mq_hw_ctx *hctx; 1708 unsigned int i; 1709 1710 queue_for_each_hw_ctx(q, hctx, i) 1711 free_cpumask_var(hctx->cpumask); 1712 } 1713 1714 static int blk_mq_init_hctx(struct request_queue *q, 1715 struct blk_mq_tag_set *set, 1716 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) 1717 { 1718 int node; 1719 unsigned flush_start_tag = set->queue_depth; 1720 1721 node = hctx->numa_node; 1722 if (node == NUMA_NO_NODE) 1723 node = hctx->numa_node = set->numa_node; 1724 1725 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn); 1726 INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn); 1727 spin_lock_init(&hctx->lock); 1728 INIT_LIST_HEAD(&hctx->dispatch); 1729 hctx->queue = q; 1730 hctx->queue_num = hctx_idx; 1731 hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED; 1732 1733 blk_mq_init_cpu_notifier(&hctx->cpu_notifier, 1734 blk_mq_hctx_notify, hctx); 1735 blk_mq_register_cpu_notifier(&hctx->cpu_notifier); 1736 1737 hctx->tags = set->tags[hctx_idx]; 1738 1739 /* 1740 * Allocate space for all possible cpus to avoid allocation at 1741 * runtime 1742 */ 1743 hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *), 1744 GFP_KERNEL, node); 1745 if (!hctx->ctxs) 1746 goto unregister_cpu_notifier; 1747 1748 if (blk_mq_alloc_bitmap(&hctx->ctx_map, node)) 1749 goto free_ctxs; 1750 1751 hctx->nr_ctx = 0; 1752 1753 if (set->ops->init_hctx && 1754 set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) 1755 goto free_bitmap; 1756 1757 hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size); 1758 if (!hctx->fq) 1759 goto exit_hctx; 1760 1761 if (set->ops->init_request && 1762 set->ops->init_request(set->driver_data, 1763 hctx->fq->flush_rq, hctx_idx, 1764 flush_start_tag + hctx_idx, node)) 1765 goto free_fq; 1766 1767 return 0; 1768 1769 free_fq: 1770 kfree(hctx->fq); 1771 exit_hctx: 1772 if (set->ops->exit_hctx) 1773 set->ops->exit_hctx(hctx, hctx_idx); 1774 free_bitmap: 1775 blk_mq_free_bitmap(&hctx->ctx_map); 1776 free_ctxs: 1777 kfree(hctx->ctxs); 1778 unregister_cpu_notifier: 1779 blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); 1780 1781 return -1; 1782 } 1783 1784 static void blk_mq_init_cpu_queues(struct request_queue *q, 1785 unsigned int nr_hw_queues) 1786 { 1787 unsigned int i; 1788 1789 for_each_possible_cpu(i) { 1790 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i); 1791 struct blk_mq_hw_ctx *hctx; 1792 1793 memset(__ctx, 0, sizeof(*__ctx)); 1794 __ctx->cpu = i; 1795 spin_lock_init(&__ctx->lock); 1796 INIT_LIST_HEAD(&__ctx->rq_list); 1797 __ctx->queue = q; 1798 1799 /* If the cpu isn't online, the cpu is mapped to first hctx */ 1800 if (!cpu_online(i)) 1801 continue; 1802 1803 hctx = q->mq_ops->map_queue(q, i); 1804 1805 /* 1806 * Set local node, IFF we have more than one hw queue. If 1807 * not, we remain on the home node of the device 1808 */ 1809 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE) 1810 hctx->numa_node = local_memory_node(cpu_to_node(i)); 1811 } 1812 } 1813 1814 static void blk_mq_map_swqueue(struct request_queue *q, 1815 const struct cpumask *online_mask) 1816 { 1817 unsigned int i; 1818 struct blk_mq_hw_ctx *hctx; 1819 struct blk_mq_ctx *ctx; 1820 struct blk_mq_tag_set *set = q->tag_set; 1821 1822 /* 1823 * Avoid others reading imcomplete hctx->cpumask through sysfs 1824 */ 1825 mutex_lock(&q->sysfs_lock); 1826 1827 queue_for_each_hw_ctx(q, hctx, i) { 1828 cpumask_clear(hctx->cpumask); 1829 hctx->nr_ctx = 0; 1830 } 1831 1832 /* 1833 * Map software to hardware queues 1834 */ 1835 for_each_possible_cpu(i) { 1836 /* If the cpu isn't online, the cpu is mapped to first hctx */ 1837 if (!cpumask_test_cpu(i, online_mask)) 1838 continue; 1839 1840 ctx = per_cpu_ptr(q->queue_ctx, i); 1841 hctx = q->mq_ops->map_queue(q, i); 1842 1843 cpumask_set_cpu(i, hctx->cpumask); 1844 ctx->index_hw = hctx->nr_ctx; 1845 hctx->ctxs[hctx->nr_ctx++] = ctx; 1846 } 1847 1848 mutex_unlock(&q->sysfs_lock); 1849 1850 queue_for_each_hw_ctx(q, hctx, i) { 1851 struct blk_mq_ctxmap *map = &hctx->ctx_map; 1852 1853 /* 1854 * If no software queues are mapped to this hardware queue, 1855 * disable it and free the request entries. 1856 */ 1857 if (!hctx->nr_ctx) { 1858 if (set->tags[i]) { 1859 blk_mq_free_rq_map(set, set->tags[i], i); 1860 set->tags[i] = NULL; 1861 } 1862 hctx->tags = NULL; 1863 continue; 1864 } 1865 1866 /* unmapped hw queue can be remapped after CPU topo changed */ 1867 if (!set->tags[i]) 1868 set->tags[i] = blk_mq_init_rq_map(set, i); 1869 hctx->tags = set->tags[i]; 1870 WARN_ON(!hctx->tags); 1871 1872 cpumask_copy(hctx->tags->cpumask, hctx->cpumask); 1873 /* 1874 * Set the map size to the number of mapped software queues. 1875 * This is more accurate and more efficient than looping 1876 * over all possibly mapped software queues. 1877 */ 1878 map->size = DIV_ROUND_UP(hctx->nr_ctx, map->bits_per_word); 1879 1880 /* 1881 * Initialize batch roundrobin counts 1882 */ 1883 hctx->next_cpu = cpumask_first(hctx->cpumask); 1884 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; 1885 } 1886 } 1887 1888 static void queue_set_hctx_shared(struct request_queue *q, bool shared) 1889 { 1890 struct blk_mq_hw_ctx *hctx; 1891 int i; 1892 1893 queue_for_each_hw_ctx(q, hctx, i) { 1894 if (shared) 1895 hctx->flags |= BLK_MQ_F_TAG_SHARED; 1896 else 1897 hctx->flags &= ~BLK_MQ_F_TAG_SHARED; 1898 } 1899 } 1900 1901 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared) 1902 { 1903 struct request_queue *q; 1904 1905 list_for_each_entry(q, &set->tag_list, tag_set_list) { 1906 blk_mq_freeze_queue(q); 1907 queue_set_hctx_shared(q, shared); 1908 blk_mq_unfreeze_queue(q); 1909 } 1910 } 1911 1912 static void blk_mq_del_queue_tag_set(struct request_queue *q) 1913 { 1914 struct blk_mq_tag_set *set = q->tag_set; 1915 1916 mutex_lock(&set->tag_list_lock); 1917 list_del_init(&q->tag_set_list); 1918 if (list_is_singular(&set->tag_list)) { 1919 /* just transitioned to unshared */ 1920 set->flags &= ~BLK_MQ_F_TAG_SHARED; 1921 /* update existing queue */ 1922 blk_mq_update_tag_set_depth(set, false); 1923 } 1924 mutex_unlock(&set->tag_list_lock); 1925 } 1926 1927 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, 1928 struct request_queue *q) 1929 { 1930 q->tag_set = set; 1931 1932 mutex_lock(&set->tag_list_lock); 1933 1934 /* Check to see if we're transitioning to shared (from 1 to 2 queues). */ 1935 if (!list_empty(&set->tag_list) && !(set->flags & BLK_MQ_F_TAG_SHARED)) { 1936 set->flags |= BLK_MQ_F_TAG_SHARED; 1937 /* update existing queue */ 1938 blk_mq_update_tag_set_depth(set, true); 1939 } 1940 if (set->flags & BLK_MQ_F_TAG_SHARED) 1941 queue_set_hctx_shared(q, true); 1942 list_add_tail(&q->tag_set_list, &set->tag_list); 1943 1944 mutex_unlock(&set->tag_list_lock); 1945 } 1946 1947 /* 1948 * It is the actual release handler for mq, but we do it from 1949 * request queue's release handler for avoiding use-after-free 1950 * and headache because q->mq_kobj shouldn't have been introduced, 1951 * but we can't group ctx/kctx kobj without it. 1952 */ 1953 void blk_mq_release(struct request_queue *q) 1954 { 1955 struct blk_mq_hw_ctx *hctx; 1956 unsigned int i; 1957 1958 /* hctx kobj stays in hctx */ 1959 queue_for_each_hw_ctx(q, hctx, i) { 1960 if (!hctx) 1961 continue; 1962 kfree(hctx->ctxs); 1963 kfree(hctx); 1964 } 1965 1966 kfree(q->mq_map); 1967 q->mq_map = NULL; 1968 1969 kfree(q->queue_hw_ctx); 1970 1971 /* ctx kobj stays in queue_ctx */ 1972 free_percpu(q->queue_ctx); 1973 } 1974 1975 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) 1976 { 1977 struct request_queue *uninit_q, *q; 1978 1979 uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node); 1980 if (!uninit_q) 1981 return ERR_PTR(-ENOMEM); 1982 1983 q = blk_mq_init_allocated_queue(set, uninit_q); 1984 if (IS_ERR(q)) 1985 blk_cleanup_queue(uninit_q); 1986 1987 return q; 1988 } 1989 EXPORT_SYMBOL(blk_mq_init_queue); 1990 1991 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, 1992 struct request_queue *q) 1993 { 1994 int i, j; 1995 struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx; 1996 1997 blk_mq_sysfs_unregister(q); 1998 for (i = 0; i < set->nr_hw_queues; i++) { 1999 int node; 2000 2001 if (hctxs[i]) 2002 continue; 2003 2004 node = blk_mq_hw_queue_to_node(q->mq_map, i); 2005 hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx), 2006 GFP_KERNEL, node); 2007 if (!hctxs[i]) 2008 break; 2009 2010 if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL, 2011 node)) { 2012 kfree(hctxs[i]); 2013 hctxs[i] = NULL; 2014 break; 2015 } 2016 2017 atomic_set(&hctxs[i]->nr_active, 0); 2018 hctxs[i]->numa_node = node; 2019 hctxs[i]->queue_num = i; 2020 2021 if (blk_mq_init_hctx(q, set, hctxs[i], i)) { 2022 free_cpumask_var(hctxs[i]->cpumask); 2023 kfree(hctxs[i]); 2024 hctxs[i] = NULL; 2025 break; 2026 } 2027 blk_mq_hctx_kobj_init(hctxs[i]); 2028 } 2029 for (j = i; j < q->nr_hw_queues; j++) { 2030 struct blk_mq_hw_ctx *hctx = hctxs[j]; 2031 2032 if (hctx) { 2033 if (hctx->tags) { 2034 blk_mq_free_rq_map(set, hctx->tags, j); 2035 set->tags[j] = NULL; 2036 } 2037 blk_mq_exit_hctx(q, set, hctx, j); 2038 free_cpumask_var(hctx->cpumask); 2039 kobject_put(&hctx->kobj); 2040 kfree(hctx->ctxs); 2041 kfree(hctx); 2042 hctxs[j] = NULL; 2043 2044 } 2045 } 2046 q->nr_hw_queues = i; 2047 blk_mq_sysfs_register(q); 2048 } 2049 2050 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, 2051 struct request_queue *q) 2052 { 2053 /* mark the queue as mq asap */ 2054 q->mq_ops = set->ops; 2055 2056 q->queue_ctx = alloc_percpu(struct blk_mq_ctx); 2057 if (!q->queue_ctx) 2058 goto err_exit; 2059 2060 q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)), 2061 GFP_KERNEL, set->numa_node); 2062 if (!q->queue_hw_ctx) 2063 goto err_percpu; 2064 2065 q->mq_map = blk_mq_make_queue_map(set); 2066 if (!q->mq_map) 2067 goto err_map; 2068 2069 blk_mq_realloc_hw_ctxs(set, q); 2070 if (!q->nr_hw_queues) 2071 goto err_hctxs; 2072 2073 INIT_WORK(&q->timeout_work, blk_mq_timeout_work); 2074 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ); 2075 2076 q->nr_queues = nr_cpu_ids; 2077 2078 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; 2079 2080 if (!(set->flags & BLK_MQ_F_SG_MERGE)) 2081 q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE; 2082 2083 q->sg_reserved_size = INT_MAX; 2084 2085 INIT_WORK(&q->requeue_work, blk_mq_requeue_work); 2086 INIT_LIST_HEAD(&q->requeue_list); 2087 spin_lock_init(&q->requeue_lock); 2088 2089 if (q->nr_hw_queues > 1) 2090 blk_queue_make_request(q, blk_mq_make_request); 2091 else 2092 blk_queue_make_request(q, blk_sq_make_request); 2093 2094 /* 2095 * Do this after blk_queue_make_request() overrides it... 2096 */ 2097 q->nr_requests = set->queue_depth; 2098 2099 if (set->ops->complete) 2100 blk_queue_softirq_done(q, set->ops->complete); 2101 2102 blk_mq_init_cpu_queues(q, set->nr_hw_queues); 2103 2104 get_online_cpus(); 2105 mutex_lock(&all_q_mutex); 2106 2107 list_add_tail(&q->all_q_node, &all_q_list); 2108 blk_mq_add_queue_tag_set(set, q); 2109 blk_mq_map_swqueue(q, cpu_online_mask); 2110 2111 mutex_unlock(&all_q_mutex); 2112 put_online_cpus(); 2113 2114 return q; 2115 2116 err_hctxs: 2117 kfree(q->mq_map); 2118 err_map: 2119 kfree(q->queue_hw_ctx); 2120 err_percpu: 2121 free_percpu(q->queue_ctx); 2122 err_exit: 2123 q->mq_ops = NULL; 2124 return ERR_PTR(-ENOMEM); 2125 } 2126 EXPORT_SYMBOL(blk_mq_init_allocated_queue); 2127 2128 void blk_mq_free_queue(struct request_queue *q) 2129 { 2130 struct blk_mq_tag_set *set = q->tag_set; 2131 2132 mutex_lock(&all_q_mutex); 2133 list_del_init(&q->all_q_node); 2134 mutex_unlock(&all_q_mutex); 2135 2136 blk_mq_del_queue_tag_set(q); 2137 2138 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); 2139 blk_mq_free_hw_queues(q, set); 2140 } 2141 2142 /* Basically redo blk_mq_init_queue with queue frozen */ 2143 static void blk_mq_queue_reinit(struct request_queue *q, 2144 const struct cpumask *online_mask) 2145 { 2146 WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth)); 2147 2148 blk_mq_sysfs_unregister(q); 2149 2150 blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues, online_mask); 2151 2152 /* 2153 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe 2154 * we should change hctx numa_node according to new topology (this 2155 * involves free and re-allocate memory, worthy doing?) 2156 */ 2157 2158 blk_mq_map_swqueue(q, online_mask); 2159 2160 blk_mq_sysfs_register(q); 2161 } 2162 2163 static int blk_mq_queue_reinit_notify(struct notifier_block *nb, 2164 unsigned long action, void *hcpu) 2165 { 2166 struct request_queue *q; 2167 int cpu = (unsigned long)hcpu; 2168 /* 2169 * New online cpumask which is going to be set in this hotplug event. 2170 * Declare this cpumasks as global as cpu-hotplug operation is invoked 2171 * one-by-one and dynamically allocating this could result in a failure. 2172 */ 2173 static struct cpumask online_new; 2174 2175 /* 2176 * Before hotadded cpu starts handling requests, new mappings must 2177 * be established. Otherwise, these requests in hw queue might 2178 * never be dispatched. 2179 * 2180 * For example, there is a single hw queue (hctx) and two CPU queues 2181 * (ctx0 for CPU0, and ctx1 for CPU1). 2182 * 2183 * Now CPU1 is just onlined and a request is inserted into 2184 * ctx1->rq_list and set bit0 in pending bitmap as ctx1->index_hw is 2185 * still zero. 2186 * 2187 * And then while running hw queue, flush_busy_ctxs() finds bit0 is 2188 * set in pending bitmap and tries to retrieve requests in 2189 * hctx->ctxs[0]->rq_list. But htx->ctxs[0] is a pointer to ctx0, 2190 * so the request in ctx1->rq_list is ignored. 2191 */ 2192 switch (action & ~CPU_TASKS_FROZEN) { 2193 case CPU_DEAD: 2194 case CPU_UP_CANCELED: 2195 cpumask_copy(&online_new, cpu_online_mask); 2196 break; 2197 case CPU_UP_PREPARE: 2198 cpumask_copy(&online_new, cpu_online_mask); 2199 cpumask_set_cpu(cpu, &online_new); 2200 break; 2201 default: 2202 return NOTIFY_OK; 2203 } 2204 2205 mutex_lock(&all_q_mutex); 2206 2207 /* 2208 * We need to freeze and reinit all existing queues. Freezing 2209 * involves synchronous wait for an RCU grace period and doing it 2210 * one by one may take a long time. Start freezing all queues in 2211 * one swoop and then wait for the completions so that freezing can 2212 * take place in parallel. 2213 */ 2214 list_for_each_entry(q, &all_q_list, all_q_node) 2215 blk_mq_freeze_queue_start(q); 2216 list_for_each_entry(q, &all_q_list, all_q_node) { 2217 blk_mq_freeze_queue_wait(q); 2218 2219 /* 2220 * timeout handler can't touch hw queue during the 2221 * reinitialization 2222 */ 2223 del_timer_sync(&q->timeout); 2224 } 2225 2226 list_for_each_entry(q, &all_q_list, all_q_node) 2227 blk_mq_queue_reinit(q, &online_new); 2228 2229 list_for_each_entry(q, &all_q_list, all_q_node) 2230 blk_mq_unfreeze_queue(q); 2231 2232 mutex_unlock(&all_q_mutex); 2233 return NOTIFY_OK; 2234 } 2235 2236 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) 2237 { 2238 int i; 2239 2240 for (i = 0; i < set->nr_hw_queues; i++) { 2241 set->tags[i] = blk_mq_init_rq_map(set, i); 2242 if (!set->tags[i]) 2243 goto out_unwind; 2244 } 2245 2246 return 0; 2247 2248 out_unwind: 2249 while (--i >= 0) 2250 blk_mq_free_rq_map(set, set->tags[i], i); 2251 2252 return -ENOMEM; 2253 } 2254 2255 /* 2256 * Allocate the request maps associated with this tag_set. Note that this 2257 * may reduce the depth asked for, if memory is tight. set->queue_depth 2258 * will be updated to reflect the allocated depth. 2259 */ 2260 static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) 2261 { 2262 unsigned int depth; 2263 int err; 2264 2265 depth = set->queue_depth; 2266 do { 2267 err = __blk_mq_alloc_rq_maps(set); 2268 if (!err) 2269 break; 2270 2271 set->queue_depth >>= 1; 2272 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) { 2273 err = -ENOMEM; 2274 break; 2275 } 2276 } while (set->queue_depth); 2277 2278 if (!set->queue_depth || err) { 2279 pr_err("blk-mq: failed to allocate request map\n"); 2280 return -ENOMEM; 2281 } 2282 2283 if (depth != set->queue_depth) 2284 pr_info("blk-mq: reduced tag depth (%u -> %u)\n", 2285 depth, set->queue_depth); 2286 2287 return 0; 2288 } 2289 2290 struct cpumask *blk_mq_tags_cpumask(struct blk_mq_tags *tags) 2291 { 2292 return tags->cpumask; 2293 } 2294 EXPORT_SYMBOL_GPL(blk_mq_tags_cpumask); 2295 2296 /* 2297 * Alloc a tag set to be associated with one or more request queues. 2298 * May fail with EINVAL for various error conditions. May adjust the 2299 * requested depth down, if if it too large. In that case, the set 2300 * value will be stored in set->queue_depth. 2301 */ 2302 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) 2303 { 2304 BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS); 2305 2306 if (!set->nr_hw_queues) 2307 return -EINVAL; 2308 if (!set->queue_depth) 2309 return -EINVAL; 2310 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) 2311 return -EINVAL; 2312 2313 if (!set->ops->queue_rq || !set->ops->map_queue) 2314 return -EINVAL; 2315 2316 if (set->queue_depth > BLK_MQ_MAX_DEPTH) { 2317 pr_info("blk-mq: reduced tag depth to %u\n", 2318 BLK_MQ_MAX_DEPTH); 2319 set->queue_depth = BLK_MQ_MAX_DEPTH; 2320 } 2321 2322 /* 2323 * If a crashdump is active, then we are potentially in a very 2324 * memory constrained environment. Limit us to 1 queue and 2325 * 64 tags to prevent using too much memory. 2326 */ 2327 if (is_kdump_kernel()) { 2328 set->nr_hw_queues = 1; 2329 set->queue_depth = min(64U, set->queue_depth); 2330 } 2331 /* 2332 * There is no use for more h/w queues than cpus. 2333 */ 2334 if (set->nr_hw_queues > nr_cpu_ids) 2335 set->nr_hw_queues = nr_cpu_ids; 2336 2337 set->tags = kzalloc_node(nr_cpu_ids * sizeof(struct blk_mq_tags *), 2338 GFP_KERNEL, set->numa_node); 2339 if (!set->tags) 2340 return -ENOMEM; 2341 2342 if (blk_mq_alloc_rq_maps(set)) 2343 goto enomem; 2344 2345 mutex_init(&set->tag_list_lock); 2346 INIT_LIST_HEAD(&set->tag_list); 2347 2348 return 0; 2349 enomem: 2350 kfree(set->tags); 2351 set->tags = NULL; 2352 return -ENOMEM; 2353 } 2354 EXPORT_SYMBOL(blk_mq_alloc_tag_set); 2355 2356 void blk_mq_free_tag_set(struct blk_mq_tag_set *set) 2357 { 2358 int i; 2359 2360 for (i = 0; i < nr_cpu_ids; i++) { 2361 if (set->tags[i]) 2362 blk_mq_free_rq_map(set, set->tags[i], i); 2363 } 2364 2365 kfree(set->tags); 2366 set->tags = NULL; 2367 } 2368 EXPORT_SYMBOL(blk_mq_free_tag_set); 2369 2370 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) 2371 { 2372 struct blk_mq_tag_set *set = q->tag_set; 2373 struct blk_mq_hw_ctx *hctx; 2374 int i, ret; 2375 2376 if (!set || nr > set->queue_depth) 2377 return -EINVAL; 2378 2379 ret = 0; 2380 queue_for_each_hw_ctx(q, hctx, i) { 2381 if (!hctx->tags) 2382 continue; 2383 ret = blk_mq_tag_update_depth(hctx->tags, nr); 2384 if (ret) 2385 break; 2386 } 2387 2388 if (!ret) 2389 q->nr_requests = nr; 2390 2391 return ret; 2392 } 2393 2394 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) 2395 { 2396 struct request_queue *q; 2397 2398 if (nr_hw_queues > nr_cpu_ids) 2399 nr_hw_queues = nr_cpu_ids; 2400 if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues) 2401 return; 2402 2403 list_for_each_entry(q, &set->tag_list, tag_set_list) 2404 blk_mq_freeze_queue(q); 2405 2406 set->nr_hw_queues = nr_hw_queues; 2407 list_for_each_entry(q, &set->tag_list, tag_set_list) { 2408 blk_mq_realloc_hw_ctxs(set, q); 2409 2410 if (q->nr_hw_queues > 1) 2411 blk_queue_make_request(q, blk_mq_make_request); 2412 else 2413 blk_queue_make_request(q, blk_sq_make_request); 2414 2415 blk_mq_queue_reinit(q, cpu_online_mask); 2416 } 2417 2418 list_for_each_entry(q, &set->tag_list, tag_set_list) 2419 blk_mq_unfreeze_queue(q); 2420 } 2421 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues); 2422 2423 void blk_mq_disable_hotplug(void) 2424 { 2425 mutex_lock(&all_q_mutex); 2426 } 2427 2428 void blk_mq_enable_hotplug(void) 2429 { 2430 mutex_unlock(&all_q_mutex); 2431 } 2432 2433 static int __init blk_mq_init(void) 2434 { 2435 blk_mq_cpu_init(); 2436 2437 hotcpu_notifier(blk_mq_queue_reinit_notify, 0); 2438 2439 return 0; 2440 } 2441 subsys_initcall(blk_mq_init); 2442