1 /* 2 * Copyright (C) 1991, 1992 Linus Torvalds 3 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics 4 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> 6 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> 7 * - July2000 8 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 9 */ 10 11 /* 12 * This handles all read/write requests to block devices 13 */ 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/backing-dev.h> 17 #include <linux/bio.h> 18 #include <linux/blkdev.h> 19 #include <linux/blk-mq.h> 20 #include <linux/highmem.h> 21 #include <linux/mm.h> 22 #include <linux/kernel_stat.h> 23 #include <linux/string.h> 24 #include <linux/init.h> 25 #include <linux/completion.h> 26 #include <linux/slab.h> 27 #include <linux/swap.h> 28 #include <linux/writeback.h> 29 #include <linux/task_io_accounting_ops.h> 30 #include <linux/fault-inject.h> 31 #include <linux/list_sort.h> 32 #include <linux/delay.h> 33 #include <linux/ratelimit.h> 34 #include <linux/pm_runtime.h> 35 #include <linux/blk-cgroup.h> 36 #include <linux/debugfs.h> 37 #include <linux/bpf.h> 38 39 #define CREATE_TRACE_POINTS 40 #include <trace/events/block.h> 41 42 #include "blk.h" 43 #include "blk-mq.h" 44 #include "blk-mq-sched.h" 45 #include "blk-wbt.h" 46 47 #ifdef CONFIG_DEBUG_FS 48 struct dentry *blk_debugfs_root; 49 #endif 50 51 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); 52 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); 53 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); 54 EXPORT_TRACEPOINT_SYMBOL_GPL(block_split); 55 EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug); 56 57 DEFINE_IDA(blk_queue_ida); 58 59 /* 60 * For the allocated request tables 61 */ 62 struct kmem_cache *request_cachep; 63 64 /* 65 * For queue allocation 66 */ 67 struct kmem_cache *blk_requestq_cachep; 68 69 /* 70 * Controlling structure to kblockd 71 */ 72 static struct workqueue_struct *kblockd_workqueue; 73 74 /** 75 * blk_queue_flag_set - atomically set a queue flag 76 * @flag: flag to be set 77 * @q: request queue 78 */ 79 void blk_queue_flag_set(unsigned int flag, struct request_queue *q) 80 { 81 unsigned long flags; 82 83 spin_lock_irqsave(q->queue_lock, flags); 84 queue_flag_set(flag, q); 85 spin_unlock_irqrestore(q->queue_lock, flags); 86 } 87 EXPORT_SYMBOL(blk_queue_flag_set); 88 89 /** 90 * blk_queue_flag_clear - atomically clear a queue flag 91 * @flag: flag to be cleared 92 * @q: request queue 93 */ 94 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q) 95 { 96 unsigned long flags; 97 98 spin_lock_irqsave(q->queue_lock, flags); 99 queue_flag_clear(flag, q); 100 spin_unlock_irqrestore(q->queue_lock, flags); 101 } 102 EXPORT_SYMBOL(blk_queue_flag_clear); 103 104 /** 105 * blk_queue_flag_test_and_set - atomically test and set a queue flag 106 * @flag: flag to be set 107 * @q: request queue 108 * 109 * Returns the previous value of @flag - 0 if the flag was not set and 1 if 110 * the flag was already set. 111 */ 112 bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q) 113 { 114 unsigned long flags; 115 bool res; 116 117 spin_lock_irqsave(q->queue_lock, flags); 118 res = queue_flag_test_and_set(flag, q); 119 spin_unlock_irqrestore(q->queue_lock, flags); 120 121 return res; 122 } 123 EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set); 124 125 /** 126 * blk_queue_flag_test_and_clear - atomically test and clear a queue flag 127 * @flag: flag to be cleared 128 * @q: request queue 129 * 130 * Returns the previous value of @flag - 0 if the flag was not set and 1 if 131 * the flag was set. 132 */ 133 bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q) 134 { 135 unsigned long flags; 136 bool res; 137 138 spin_lock_irqsave(q->queue_lock, flags); 139 res = queue_flag_test_and_clear(flag, q); 140 spin_unlock_irqrestore(q->queue_lock, flags); 141 142 return res; 143 } 144 EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_clear); 145 146 static void blk_clear_congested(struct request_list *rl, int sync) 147 { 148 #ifdef CONFIG_CGROUP_WRITEBACK 149 clear_wb_congested(rl->blkg->wb_congested, sync); 150 #else 151 /* 152 * If !CGROUP_WRITEBACK, all blkg's map to bdi->wb and we shouldn't 153 * flip its congestion state for events on other blkcgs. 154 */ 155 if (rl == &rl->q->root_rl) 156 clear_wb_congested(rl->q->backing_dev_info->wb.congested, sync); 157 #endif 158 } 159 160 static void blk_set_congested(struct request_list *rl, int sync) 161 { 162 #ifdef CONFIG_CGROUP_WRITEBACK 163 set_wb_congested(rl->blkg->wb_congested, sync); 164 #else 165 /* see blk_clear_congested() */ 166 if (rl == &rl->q->root_rl) 167 set_wb_congested(rl->q->backing_dev_info->wb.congested, sync); 168 #endif 169 } 170 171 void blk_queue_congestion_threshold(struct request_queue *q) 172 { 173 int nr; 174 175 nr = q->nr_requests - (q->nr_requests / 8) + 1; 176 if (nr > q->nr_requests) 177 nr = q->nr_requests; 178 q->nr_congestion_on = nr; 179 180 nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1; 181 if (nr < 1) 182 nr = 1; 183 q->nr_congestion_off = nr; 184 } 185 186 void blk_rq_init(struct request_queue *q, struct request *rq) 187 { 188 memset(rq, 0, sizeof(*rq)); 189 190 INIT_LIST_HEAD(&rq->queuelist); 191 INIT_LIST_HEAD(&rq->timeout_list); 192 rq->cpu = -1; 193 rq->q = q; 194 rq->__sector = (sector_t) -1; 195 INIT_HLIST_NODE(&rq->hash); 196 RB_CLEAR_NODE(&rq->rb_node); 197 rq->tag = -1; 198 rq->internal_tag = -1; 199 rq->start_time_ns = ktime_get_ns(); 200 rq->part = NULL; 201 } 202 EXPORT_SYMBOL(blk_rq_init); 203 204 static const struct { 205 int errno; 206 const char *name; 207 } blk_errors[] = { 208 [BLK_STS_OK] = { 0, "" }, 209 [BLK_STS_NOTSUPP] = { -EOPNOTSUPP, "operation not supported" }, 210 [BLK_STS_TIMEOUT] = { -ETIMEDOUT, "timeout" }, 211 [BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" }, 212 [BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" }, 213 [BLK_STS_TARGET] = { -EREMOTEIO, "critical target" }, 214 [BLK_STS_NEXUS] = { -EBADE, "critical nexus" }, 215 [BLK_STS_MEDIUM] = { -ENODATA, "critical medium" }, 216 [BLK_STS_PROTECTION] = { -EILSEQ, "protection" }, 217 [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" }, 218 [BLK_STS_DEV_RESOURCE] = { -EBUSY, "device resource" }, 219 [BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" }, 220 221 /* device mapper special case, should not leak out: */ 222 [BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" }, 223 224 /* everything else not covered above: */ 225 [BLK_STS_IOERR] = { -EIO, "I/O" }, 226 }; 227 228 blk_status_t errno_to_blk_status(int errno) 229 { 230 int i; 231 232 for (i = 0; i < ARRAY_SIZE(blk_errors); i++) { 233 if (blk_errors[i].errno == errno) 234 return (__force blk_status_t)i; 235 } 236 237 return BLK_STS_IOERR; 238 } 239 EXPORT_SYMBOL_GPL(errno_to_blk_status); 240 241 int blk_status_to_errno(blk_status_t status) 242 { 243 int idx = (__force int)status; 244 245 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors))) 246 return -EIO; 247 return blk_errors[idx].errno; 248 } 249 EXPORT_SYMBOL_GPL(blk_status_to_errno); 250 251 static void print_req_error(struct request *req, blk_status_t status) 252 { 253 int idx = (__force int)status; 254 255 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors))) 256 return; 257 258 printk_ratelimited(KERN_ERR "%s: %s error, dev %s, sector %llu\n", 259 __func__, blk_errors[idx].name, req->rq_disk ? 260 req->rq_disk->disk_name : "?", 261 (unsigned long long)blk_rq_pos(req)); 262 } 263 264 static void req_bio_endio(struct request *rq, struct bio *bio, 265 unsigned int nbytes, blk_status_t error) 266 { 267 if (error) 268 bio->bi_status = error; 269 270 if (unlikely(rq->rq_flags & RQF_QUIET)) 271 bio_set_flag(bio, BIO_QUIET); 272 273 bio_advance(bio, nbytes); 274 275 /* don't actually finish bio if it's part of flush sequence */ 276 /* 277 * XXX this code looks suspicious - it's not consistent with advancing 278 * req->bio in caller 279 */ 280 if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ)) 281 bio_endio(bio); 282 } 283 284 void blk_dump_rq_flags(struct request *rq, char *msg) 285 { 286 printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg, 287 rq->rq_disk ? rq->rq_disk->disk_name : "?", 288 (unsigned long long) rq->cmd_flags); 289 290 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", 291 (unsigned long long)blk_rq_pos(rq), 292 blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); 293 printk(KERN_INFO " bio %p, biotail %p, len %u\n", 294 rq->bio, rq->biotail, blk_rq_bytes(rq)); 295 } 296 EXPORT_SYMBOL(blk_dump_rq_flags); 297 298 static void blk_delay_work(struct work_struct *work) 299 { 300 struct request_queue *q; 301 302 q = container_of(work, struct request_queue, delay_work.work); 303 spin_lock_irq(q->queue_lock); 304 __blk_run_queue(q); 305 spin_unlock_irq(q->queue_lock); 306 } 307 308 /** 309 * blk_delay_queue - restart queueing after defined interval 310 * @q: The &struct request_queue in question 311 * @msecs: Delay in msecs 312 * 313 * Description: 314 * Sometimes queueing needs to be postponed for a little while, to allow 315 * resources to come back. This function will make sure that queueing is 316 * restarted around the specified time. 317 */ 318 void blk_delay_queue(struct request_queue *q, unsigned long msecs) 319 { 320 lockdep_assert_held(q->queue_lock); 321 WARN_ON_ONCE(q->mq_ops); 322 323 if (likely(!blk_queue_dead(q))) 324 queue_delayed_work(kblockd_workqueue, &q->delay_work, 325 msecs_to_jiffies(msecs)); 326 } 327 EXPORT_SYMBOL(blk_delay_queue); 328 329 /** 330 * blk_start_queue_async - asynchronously restart a previously stopped queue 331 * @q: The &struct request_queue in question 332 * 333 * Description: 334 * blk_start_queue_async() will clear the stop flag on the queue, and 335 * ensure that the request_fn for the queue is run from an async 336 * context. 337 **/ 338 void blk_start_queue_async(struct request_queue *q) 339 { 340 lockdep_assert_held(q->queue_lock); 341 WARN_ON_ONCE(q->mq_ops); 342 343 queue_flag_clear(QUEUE_FLAG_STOPPED, q); 344 blk_run_queue_async(q); 345 } 346 EXPORT_SYMBOL(blk_start_queue_async); 347 348 /** 349 * blk_start_queue - restart a previously stopped queue 350 * @q: The &struct request_queue in question 351 * 352 * Description: 353 * blk_start_queue() will clear the stop flag on the queue, and call 354 * the request_fn for the queue if it was in a stopped state when 355 * entered. Also see blk_stop_queue(). 356 **/ 357 void blk_start_queue(struct request_queue *q) 358 { 359 lockdep_assert_held(q->queue_lock); 360 WARN_ON_ONCE(q->mq_ops); 361 362 queue_flag_clear(QUEUE_FLAG_STOPPED, q); 363 __blk_run_queue(q); 364 } 365 EXPORT_SYMBOL(blk_start_queue); 366 367 /** 368 * blk_stop_queue - stop a queue 369 * @q: The &struct request_queue in question 370 * 371 * Description: 372 * The Linux block layer assumes that a block driver will consume all 373 * entries on the request queue when the request_fn strategy is called. 374 * Often this will not happen, because of hardware limitations (queue 375 * depth settings). If a device driver gets a 'queue full' response, 376 * or if it simply chooses not to queue more I/O at one point, it can 377 * call this function to prevent the request_fn from being called until 378 * the driver has signalled it's ready to go again. This happens by calling 379 * blk_start_queue() to restart queue operations. 380 **/ 381 void blk_stop_queue(struct request_queue *q) 382 { 383 lockdep_assert_held(q->queue_lock); 384 WARN_ON_ONCE(q->mq_ops); 385 386 cancel_delayed_work(&q->delay_work); 387 queue_flag_set(QUEUE_FLAG_STOPPED, q); 388 } 389 EXPORT_SYMBOL(blk_stop_queue); 390 391 /** 392 * blk_sync_queue - cancel any pending callbacks on a queue 393 * @q: the queue 394 * 395 * Description: 396 * The block layer may perform asynchronous callback activity 397 * on a queue, such as calling the unplug function after a timeout. 398 * A block device may call blk_sync_queue to ensure that any 399 * such activity is cancelled, thus allowing it to release resources 400 * that the callbacks might use. The caller must already have made sure 401 * that its ->make_request_fn will not re-add plugging prior to calling 402 * this function. 403 * 404 * This function does not cancel any asynchronous activity arising 405 * out of elevator or throttling code. That would require elevator_exit() 406 * and blkcg_exit_queue() to be called with queue lock initialized. 407 * 408 */ 409 void blk_sync_queue(struct request_queue *q) 410 { 411 del_timer_sync(&q->timeout); 412 cancel_work_sync(&q->timeout_work); 413 414 if (q->mq_ops) { 415 struct blk_mq_hw_ctx *hctx; 416 int i; 417 418 cancel_delayed_work_sync(&q->requeue_work); 419 queue_for_each_hw_ctx(q, hctx, i) 420 cancel_delayed_work_sync(&hctx->run_work); 421 } else { 422 cancel_delayed_work_sync(&q->delay_work); 423 } 424 } 425 EXPORT_SYMBOL(blk_sync_queue); 426 427 /** 428 * blk_set_preempt_only - set QUEUE_FLAG_PREEMPT_ONLY 429 * @q: request queue pointer 430 * 431 * Returns the previous value of the PREEMPT_ONLY flag - 0 if the flag was not 432 * set and 1 if the flag was already set. 433 */ 434 int blk_set_preempt_only(struct request_queue *q) 435 { 436 return blk_queue_flag_test_and_set(QUEUE_FLAG_PREEMPT_ONLY, q); 437 } 438 EXPORT_SYMBOL_GPL(blk_set_preempt_only); 439 440 void blk_clear_preempt_only(struct request_queue *q) 441 { 442 blk_queue_flag_clear(QUEUE_FLAG_PREEMPT_ONLY, q); 443 wake_up_all(&q->mq_freeze_wq); 444 } 445 EXPORT_SYMBOL_GPL(blk_clear_preempt_only); 446 447 /** 448 * __blk_run_queue_uncond - run a queue whether or not it has been stopped 449 * @q: The queue to run 450 * 451 * Description: 452 * Invoke request handling on a queue if there are any pending requests. 453 * May be used to restart request handling after a request has completed. 454 * This variant runs the queue whether or not the queue has been 455 * stopped. Must be called with the queue lock held and interrupts 456 * disabled. See also @blk_run_queue. 457 */ 458 inline void __blk_run_queue_uncond(struct request_queue *q) 459 { 460 lockdep_assert_held(q->queue_lock); 461 WARN_ON_ONCE(q->mq_ops); 462 463 if (unlikely(blk_queue_dead(q))) 464 return; 465 466 /* 467 * Some request_fn implementations, e.g. scsi_request_fn(), unlock 468 * the queue lock internally. As a result multiple threads may be 469 * running such a request function concurrently. Keep track of the 470 * number of active request_fn invocations such that blk_drain_queue() 471 * can wait until all these request_fn calls have finished. 472 */ 473 q->request_fn_active++; 474 q->request_fn(q); 475 q->request_fn_active--; 476 } 477 EXPORT_SYMBOL_GPL(__blk_run_queue_uncond); 478 479 /** 480 * __blk_run_queue - run a single device queue 481 * @q: The queue to run 482 * 483 * Description: 484 * See @blk_run_queue. 485 */ 486 void __blk_run_queue(struct request_queue *q) 487 { 488 lockdep_assert_held(q->queue_lock); 489 WARN_ON_ONCE(q->mq_ops); 490 491 if (unlikely(blk_queue_stopped(q))) 492 return; 493 494 __blk_run_queue_uncond(q); 495 } 496 EXPORT_SYMBOL(__blk_run_queue); 497 498 /** 499 * blk_run_queue_async - run a single device queue in workqueue context 500 * @q: The queue to run 501 * 502 * Description: 503 * Tells kblockd to perform the equivalent of @blk_run_queue on behalf 504 * of us. 505 * 506 * Note: 507 * Since it is not allowed to run q->delay_work after blk_cleanup_queue() 508 * has canceled q->delay_work, callers must hold the queue lock to avoid 509 * race conditions between blk_cleanup_queue() and blk_run_queue_async(). 510 */ 511 void blk_run_queue_async(struct request_queue *q) 512 { 513 lockdep_assert_held(q->queue_lock); 514 WARN_ON_ONCE(q->mq_ops); 515 516 if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q))) 517 mod_delayed_work(kblockd_workqueue, &q->delay_work, 0); 518 } 519 EXPORT_SYMBOL(blk_run_queue_async); 520 521 /** 522 * blk_run_queue - run a single device queue 523 * @q: The queue to run 524 * 525 * Description: 526 * Invoke request handling on this queue, if it has pending work to do. 527 * May be used to restart queueing when a request has completed. 528 */ 529 void blk_run_queue(struct request_queue *q) 530 { 531 unsigned long flags; 532 533 WARN_ON_ONCE(q->mq_ops); 534 535 spin_lock_irqsave(q->queue_lock, flags); 536 __blk_run_queue(q); 537 spin_unlock_irqrestore(q->queue_lock, flags); 538 } 539 EXPORT_SYMBOL(blk_run_queue); 540 541 void blk_put_queue(struct request_queue *q) 542 { 543 kobject_put(&q->kobj); 544 } 545 EXPORT_SYMBOL(blk_put_queue); 546 547 /** 548 * __blk_drain_queue - drain requests from request_queue 549 * @q: queue to drain 550 * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV 551 * 552 * Drain requests from @q. If @drain_all is set, all requests are drained. 553 * If not, only ELVPRIV requests are drained. The caller is responsible 554 * for ensuring that no new requests which need to be drained are queued. 555 */ 556 static void __blk_drain_queue(struct request_queue *q, bool drain_all) 557 __releases(q->queue_lock) 558 __acquires(q->queue_lock) 559 { 560 int i; 561 562 lockdep_assert_held(q->queue_lock); 563 WARN_ON_ONCE(q->mq_ops); 564 565 while (true) { 566 bool drain = false; 567 568 /* 569 * The caller might be trying to drain @q before its 570 * elevator is initialized. 571 */ 572 if (q->elevator) 573 elv_drain_elevator(q); 574 575 blkcg_drain_queue(q); 576 577 /* 578 * This function might be called on a queue which failed 579 * driver init after queue creation or is not yet fully 580 * active yet. Some drivers (e.g. fd and loop) get unhappy 581 * in such cases. Kick queue iff dispatch queue has 582 * something on it and @q has request_fn set. 583 */ 584 if (!list_empty(&q->queue_head) && q->request_fn) 585 __blk_run_queue(q); 586 587 drain |= q->nr_rqs_elvpriv; 588 drain |= q->request_fn_active; 589 590 /* 591 * Unfortunately, requests are queued at and tracked from 592 * multiple places and there's no single counter which can 593 * be drained. Check all the queues and counters. 594 */ 595 if (drain_all) { 596 struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); 597 drain |= !list_empty(&q->queue_head); 598 for (i = 0; i < 2; i++) { 599 drain |= q->nr_rqs[i]; 600 drain |= q->in_flight[i]; 601 if (fq) 602 drain |= !list_empty(&fq->flush_queue[i]); 603 } 604 } 605 606 if (!drain) 607 break; 608 609 spin_unlock_irq(q->queue_lock); 610 611 msleep(10); 612 613 spin_lock_irq(q->queue_lock); 614 } 615 616 /* 617 * With queue marked dead, any woken up waiter will fail the 618 * allocation path, so the wakeup chaining is lost and we're 619 * left with hung waiters. We need to wake up those waiters. 620 */ 621 if (q->request_fn) { 622 struct request_list *rl; 623 624 blk_queue_for_each_rl(rl, q) 625 for (i = 0; i < ARRAY_SIZE(rl->wait); i++) 626 wake_up_all(&rl->wait[i]); 627 } 628 } 629 630 void blk_drain_queue(struct request_queue *q) 631 { 632 spin_lock_irq(q->queue_lock); 633 __blk_drain_queue(q, true); 634 spin_unlock_irq(q->queue_lock); 635 } 636 637 /** 638 * blk_queue_bypass_start - enter queue bypass mode 639 * @q: queue of interest 640 * 641 * In bypass mode, only the dispatch FIFO queue of @q is used. This 642 * function makes @q enter bypass mode and drains all requests which were 643 * throttled or issued before. On return, it's guaranteed that no request 644 * is being throttled or has ELVPRIV set and blk_queue_bypass() %true 645 * inside queue or RCU read lock. 646 */ 647 void blk_queue_bypass_start(struct request_queue *q) 648 { 649 WARN_ON_ONCE(q->mq_ops); 650 651 spin_lock_irq(q->queue_lock); 652 q->bypass_depth++; 653 queue_flag_set(QUEUE_FLAG_BYPASS, q); 654 spin_unlock_irq(q->queue_lock); 655 656 /* 657 * Queues start drained. Skip actual draining till init is 658 * complete. This avoids lenghty delays during queue init which 659 * can happen many times during boot. 660 */ 661 if (blk_queue_init_done(q)) { 662 spin_lock_irq(q->queue_lock); 663 __blk_drain_queue(q, false); 664 spin_unlock_irq(q->queue_lock); 665 666 /* ensure blk_queue_bypass() is %true inside RCU read lock */ 667 synchronize_rcu(); 668 } 669 } 670 EXPORT_SYMBOL_GPL(blk_queue_bypass_start); 671 672 /** 673 * blk_queue_bypass_end - leave queue bypass mode 674 * @q: queue of interest 675 * 676 * Leave bypass mode and restore the normal queueing behavior. 677 * 678 * Note: although blk_queue_bypass_start() is only called for blk-sq queues, 679 * this function is called for both blk-sq and blk-mq queues. 680 */ 681 void blk_queue_bypass_end(struct request_queue *q) 682 { 683 spin_lock_irq(q->queue_lock); 684 if (!--q->bypass_depth) 685 queue_flag_clear(QUEUE_FLAG_BYPASS, q); 686 WARN_ON_ONCE(q->bypass_depth < 0); 687 spin_unlock_irq(q->queue_lock); 688 } 689 EXPORT_SYMBOL_GPL(blk_queue_bypass_end); 690 691 void blk_set_queue_dying(struct request_queue *q) 692 { 693 blk_queue_flag_set(QUEUE_FLAG_DYING, q); 694 695 /* 696 * When queue DYING flag is set, we need to block new req 697 * entering queue, so we call blk_freeze_queue_start() to 698 * prevent I/O from crossing blk_queue_enter(). 699 */ 700 blk_freeze_queue_start(q); 701 702 if (q->mq_ops) 703 blk_mq_wake_waiters(q); 704 else { 705 struct request_list *rl; 706 707 spin_lock_irq(q->queue_lock); 708 blk_queue_for_each_rl(rl, q) { 709 if (rl->rq_pool) { 710 wake_up_all(&rl->wait[BLK_RW_SYNC]); 711 wake_up_all(&rl->wait[BLK_RW_ASYNC]); 712 } 713 } 714 spin_unlock_irq(q->queue_lock); 715 } 716 717 /* Make blk_queue_enter() reexamine the DYING flag. */ 718 wake_up_all(&q->mq_freeze_wq); 719 } 720 EXPORT_SYMBOL_GPL(blk_set_queue_dying); 721 722 /** 723 * blk_cleanup_queue - shutdown a request queue 724 * @q: request queue to shutdown 725 * 726 * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and 727 * put it. All future requests will be failed immediately with -ENODEV. 728 */ 729 void blk_cleanup_queue(struct request_queue *q) 730 { 731 spinlock_t *lock = q->queue_lock; 732 733 /* mark @q DYING, no new request or merges will be allowed afterwards */ 734 mutex_lock(&q->sysfs_lock); 735 blk_set_queue_dying(q); 736 spin_lock_irq(lock); 737 738 /* 739 * A dying queue is permanently in bypass mode till released. Note 740 * that, unlike blk_queue_bypass_start(), we aren't performing 741 * synchronize_rcu() after entering bypass mode to avoid the delay 742 * as some drivers create and destroy a lot of queues while 743 * probing. This is still safe because blk_release_queue() will be 744 * called only after the queue refcnt drops to zero and nothing, 745 * RCU or not, would be traversing the queue by then. 746 */ 747 q->bypass_depth++; 748 queue_flag_set(QUEUE_FLAG_BYPASS, q); 749 750 queue_flag_set(QUEUE_FLAG_NOMERGES, q); 751 queue_flag_set(QUEUE_FLAG_NOXMERGES, q); 752 queue_flag_set(QUEUE_FLAG_DYING, q); 753 spin_unlock_irq(lock); 754 mutex_unlock(&q->sysfs_lock); 755 756 /* 757 * Drain all requests queued before DYING marking. Set DEAD flag to 758 * prevent that q->request_fn() gets invoked after draining finished. 759 */ 760 blk_freeze_queue(q); 761 spin_lock_irq(lock); 762 queue_flag_set(QUEUE_FLAG_DEAD, q); 763 spin_unlock_irq(lock); 764 765 /* 766 * make sure all in-progress dispatch are completed because 767 * blk_freeze_queue() can only complete all requests, and 768 * dispatch may still be in-progress since we dispatch requests 769 * from more than one contexts 770 */ 771 if (q->mq_ops) 772 blk_mq_quiesce_queue(q); 773 774 /* for synchronous bio-based driver finish in-flight integrity i/o */ 775 blk_flush_integrity(); 776 777 /* @q won't process any more request, flush async actions */ 778 del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer); 779 blk_sync_queue(q); 780 781 /* 782 * I/O scheduler exit is only safe after the sysfs scheduler attribute 783 * has been removed. 784 */ 785 WARN_ON_ONCE(q->kobj.state_in_sysfs); 786 787 /* 788 * Since the I/O scheduler exit code may access cgroup information, 789 * perform I/O scheduler exit before disassociating from the block 790 * cgroup controller. 791 */ 792 if (q->elevator) { 793 ioc_clear_queue(q); 794 elevator_exit(q, q->elevator); 795 q->elevator = NULL; 796 } 797 798 /* 799 * Remove all references to @q from the block cgroup controller before 800 * restoring @q->queue_lock to avoid that restoring this pointer causes 801 * e.g. blkcg_print_blkgs() to crash. 802 */ 803 blkcg_exit_queue(q); 804 805 /* 806 * Since the cgroup code may dereference the @q->backing_dev_info 807 * pointer, only decrease its reference count after having removed the 808 * association with the block cgroup controller. 809 */ 810 bdi_put(q->backing_dev_info); 811 812 if (q->mq_ops) 813 blk_mq_free_queue(q); 814 percpu_ref_exit(&q->q_usage_counter); 815 816 spin_lock_irq(lock); 817 if (q->queue_lock != &q->__queue_lock) 818 q->queue_lock = &q->__queue_lock; 819 spin_unlock_irq(lock); 820 821 /* @q is and will stay empty, shutdown and put */ 822 blk_put_queue(q); 823 } 824 EXPORT_SYMBOL(blk_cleanup_queue); 825 826 /* Allocate memory local to the request queue */ 827 static void *alloc_request_simple(gfp_t gfp_mask, void *data) 828 { 829 struct request_queue *q = data; 830 831 return kmem_cache_alloc_node(request_cachep, gfp_mask, q->node); 832 } 833 834 static void free_request_simple(void *element, void *data) 835 { 836 kmem_cache_free(request_cachep, element); 837 } 838 839 static void *alloc_request_size(gfp_t gfp_mask, void *data) 840 { 841 struct request_queue *q = data; 842 struct request *rq; 843 844 rq = kmalloc_node(sizeof(struct request) + q->cmd_size, gfp_mask, 845 q->node); 846 if (rq && q->init_rq_fn && q->init_rq_fn(q, rq, gfp_mask) < 0) { 847 kfree(rq); 848 rq = NULL; 849 } 850 return rq; 851 } 852 853 static void free_request_size(void *element, void *data) 854 { 855 struct request_queue *q = data; 856 857 if (q->exit_rq_fn) 858 q->exit_rq_fn(q, element); 859 kfree(element); 860 } 861 862 int blk_init_rl(struct request_list *rl, struct request_queue *q, 863 gfp_t gfp_mask) 864 { 865 if (unlikely(rl->rq_pool) || q->mq_ops) 866 return 0; 867 868 rl->q = q; 869 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; 870 rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; 871 init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); 872 init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); 873 874 if (q->cmd_size) { 875 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, 876 alloc_request_size, free_request_size, 877 q, gfp_mask, q->node); 878 } else { 879 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, 880 alloc_request_simple, free_request_simple, 881 q, gfp_mask, q->node); 882 } 883 if (!rl->rq_pool) 884 return -ENOMEM; 885 886 if (rl != &q->root_rl) 887 WARN_ON_ONCE(!blk_get_queue(q)); 888 889 return 0; 890 } 891 892 void blk_exit_rl(struct request_queue *q, struct request_list *rl) 893 { 894 if (rl->rq_pool) { 895 mempool_destroy(rl->rq_pool); 896 if (rl != &q->root_rl) 897 blk_put_queue(q); 898 } 899 } 900 901 struct request_queue *blk_alloc_queue(gfp_t gfp_mask) 902 { 903 return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE, NULL); 904 } 905 EXPORT_SYMBOL(blk_alloc_queue); 906 907 /** 908 * blk_queue_enter() - try to increase q->q_usage_counter 909 * @q: request queue pointer 910 * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PREEMPT 911 */ 912 int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags) 913 { 914 const bool preempt = flags & BLK_MQ_REQ_PREEMPT; 915 916 while (true) { 917 bool success = false; 918 919 rcu_read_lock(); 920 if (percpu_ref_tryget_live(&q->q_usage_counter)) { 921 /* 922 * The code that sets the PREEMPT_ONLY flag is 923 * responsible for ensuring that that flag is globally 924 * visible before the queue is unfrozen. 925 */ 926 if (preempt || !blk_queue_preempt_only(q)) { 927 success = true; 928 } else { 929 percpu_ref_put(&q->q_usage_counter); 930 } 931 } 932 rcu_read_unlock(); 933 934 if (success) 935 return 0; 936 937 if (flags & BLK_MQ_REQ_NOWAIT) 938 return -EBUSY; 939 940 /* 941 * read pair of barrier in blk_freeze_queue_start(), 942 * we need to order reading __PERCPU_REF_DEAD flag of 943 * .q_usage_counter and reading .mq_freeze_depth or 944 * queue dying flag, otherwise the following wait may 945 * never return if the two reads are reordered. 946 */ 947 smp_rmb(); 948 949 wait_event(q->mq_freeze_wq, 950 (atomic_read(&q->mq_freeze_depth) == 0 && 951 (preempt || !blk_queue_preempt_only(q))) || 952 blk_queue_dying(q)); 953 if (blk_queue_dying(q)) 954 return -ENODEV; 955 } 956 } 957 958 void blk_queue_exit(struct request_queue *q) 959 { 960 percpu_ref_put(&q->q_usage_counter); 961 } 962 963 static void blk_queue_usage_counter_release(struct percpu_ref *ref) 964 { 965 struct request_queue *q = 966 container_of(ref, struct request_queue, q_usage_counter); 967 968 wake_up_all(&q->mq_freeze_wq); 969 } 970 971 static void blk_rq_timed_out_timer(struct timer_list *t) 972 { 973 struct request_queue *q = from_timer(q, t, timeout); 974 975 kblockd_schedule_work(&q->timeout_work); 976 } 977 978 /** 979 * blk_alloc_queue_node - allocate a request queue 980 * @gfp_mask: memory allocation flags 981 * @node_id: NUMA node to allocate memory from 982 * @lock: For legacy queues, pointer to a spinlock that will be used to e.g. 983 * serialize calls to the legacy .request_fn() callback. Ignored for 984 * blk-mq request queues. 985 * 986 * Note: pass the queue lock as the third argument to this function instead of 987 * setting the queue lock pointer explicitly to avoid triggering a sporadic 988 * crash in the blkcg code. This function namely calls blkcg_init_queue() and 989 * the queue lock pointer must be set before blkcg_init_queue() is called. 990 */ 991 struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, 992 spinlock_t *lock) 993 { 994 struct request_queue *q; 995 int ret; 996 997 q = kmem_cache_alloc_node(blk_requestq_cachep, 998 gfp_mask | __GFP_ZERO, node_id); 999 if (!q) 1000 return NULL; 1001 1002 INIT_LIST_HEAD(&q->queue_head); 1003 q->last_merge = NULL; 1004 q->end_sector = 0; 1005 q->boundary_rq = NULL; 1006 1007 q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask); 1008 if (q->id < 0) 1009 goto fail_q; 1010 1011 ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); 1012 if (ret) 1013 goto fail_id; 1014 1015 q->backing_dev_info = bdi_alloc_node(gfp_mask, node_id); 1016 if (!q->backing_dev_info) 1017 goto fail_split; 1018 1019 q->stats = blk_alloc_queue_stats(); 1020 if (!q->stats) 1021 goto fail_stats; 1022 1023 q->backing_dev_info->ra_pages = 1024 (VM_MAX_READAHEAD * 1024) / PAGE_SIZE; 1025 q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK; 1026 q->backing_dev_info->name = "block"; 1027 q->node = node_id; 1028 1029 timer_setup(&q->backing_dev_info->laptop_mode_wb_timer, 1030 laptop_mode_timer_fn, 0); 1031 timer_setup(&q->timeout, blk_rq_timed_out_timer, 0); 1032 INIT_WORK(&q->timeout_work, NULL); 1033 INIT_LIST_HEAD(&q->queue_head); 1034 INIT_LIST_HEAD(&q->timeout_list); 1035 INIT_LIST_HEAD(&q->icq_list); 1036 #ifdef CONFIG_BLK_CGROUP 1037 INIT_LIST_HEAD(&q->blkg_list); 1038 #endif 1039 INIT_DELAYED_WORK(&q->delay_work, blk_delay_work); 1040 1041 kobject_init(&q->kobj, &blk_queue_ktype); 1042 1043 #ifdef CONFIG_BLK_DEV_IO_TRACE 1044 mutex_init(&q->blk_trace_mutex); 1045 #endif 1046 mutex_init(&q->sysfs_lock); 1047 spin_lock_init(&q->__queue_lock); 1048 1049 if (!q->mq_ops) 1050 q->queue_lock = lock ? : &q->__queue_lock; 1051 1052 /* 1053 * A queue starts its life with bypass turned on to avoid 1054 * unnecessary bypass on/off overhead and nasty surprises during 1055 * init. The initial bypass will be finished when the queue is 1056 * registered by blk_register_queue(). 1057 */ 1058 q->bypass_depth = 1; 1059 queue_flag_set_unlocked(QUEUE_FLAG_BYPASS, q); 1060 1061 init_waitqueue_head(&q->mq_freeze_wq); 1062 1063 /* 1064 * Init percpu_ref in atomic mode so that it's faster to shutdown. 1065 * See blk_register_queue() for details. 1066 */ 1067 if (percpu_ref_init(&q->q_usage_counter, 1068 blk_queue_usage_counter_release, 1069 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL)) 1070 goto fail_bdi; 1071 1072 if (blkcg_init_queue(q)) 1073 goto fail_ref; 1074 1075 return q; 1076 1077 fail_ref: 1078 percpu_ref_exit(&q->q_usage_counter); 1079 fail_bdi: 1080 blk_free_queue_stats(q->stats); 1081 fail_stats: 1082 bdi_put(q->backing_dev_info); 1083 fail_split: 1084 bioset_exit(&q->bio_split); 1085 fail_id: 1086 ida_simple_remove(&blk_queue_ida, q->id); 1087 fail_q: 1088 kmem_cache_free(blk_requestq_cachep, q); 1089 return NULL; 1090 } 1091 EXPORT_SYMBOL(blk_alloc_queue_node); 1092 1093 /** 1094 * blk_init_queue - prepare a request queue for use with a block device 1095 * @rfn: The function to be called to process requests that have been 1096 * placed on the queue. 1097 * @lock: Request queue spin lock 1098 * 1099 * Description: 1100 * If a block device wishes to use the standard request handling procedures, 1101 * which sorts requests and coalesces adjacent requests, then it must 1102 * call blk_init_queue(). The function @rfn will be called when there 1103 * are requests on the queue that need to be processed. If the device 1104 * supports plugging, then @rfn may not be called immediately when requests 1105 * are available on the queue, but may be called at some time later instead. 1106 * Plugged queues are generally unplugged when a buffer belonging to one 1107 * of the requests on the queue is needed, or due to memory pressure. 1108 * 1109 * @rfn is not required, or even expected, to remove all requests off the 1110 * queue, but only as many as it can handle at a time. If it does leave 1111 * requests on the queue, it is responsible for arranging that the requests 1112 * get dealt with eventually. 1113 * 1114 * The queue spin lock must be held while manipulating the requests on the 1115 * request queue; this lock will be taken also from interrupt context, so irq 1116 * disabling is needed for it. 1117 * 1118 * Function returns a pointer to the initialized request queue, or %NULL if 1119 * it didn't succeed. 1120 * 1121 * Note: 1122 * blk_init_queue() must be paired with a blk_cleanup_queue() call 1123 * when the block device is deactivated (such as at module unload). 1124 **/ 1125 1126 struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) 1127 { 1128 return blk_init_queue_node(rfn, lock, NUMA_NO_NODE); 1129 } 1130 EXPORT_SYMBOL(blk_init_queue); 1131 1132 struct request_queue * 1133 blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) 1134 { 1135 struct request_queue *q; 1136 1137 q = blk_alloc_queue_node(GFP_KERNEL, node_id, lock); 1138 if (!q) 1139 return NULL; 1140 1141 q->request_fn = rfn; 1142 if (blk_init_allocated_queue(q) < 0) { 1143 blk_cleanup_queue(q); 1144 return NULL; 1145 } 1146 1147 return q; 1148 } 1149 EXPORT_SYMBOL(blk_init_queue_node); 1150 1151 static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio); 1152 1153 1154 int blk_init_allocated_queue(struct request_queue *q) 1155 { 1156 WARN_ON_ONCE(q->mq_ops); 1157 1158 q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size); 1159 if (!q->fq) 1160 return -ENOMEM; 1161 1162 if (q->init_rq_fn && q->init_rq_fn(q, q->fq->flush_rq, GFP_KERNEL)) 1163 goto out_free_flush_queue; 1164 1165 if (blk_init_rl(&q->root_rl, q, GFP_KERNEL)) 1166 goto out_exit_flush_rq; 1167 1168 INIT_WORK(&q->timeout_work, blk_timeout_work); 1169 q->queue_flags |= QUEUE_FLAG_DEFAULT; 1170 1171 /* 1172 * This also sets hw/phys segments, boundary and size 1173 */ 1174 blk_queue_make_request(q, blk_queue_bio); 1175 1176 q->sg_reserved_size = INT_MAX; 1177 1178 if (elevator_init(q)) 1179 goto out_exit_flush_rq; 1180 return 0; 1181 1182 out_exit_flush_rq: 1183 if (q->exit_rq_fn) 1184 q->exit_rq_fn(q, q->fq->flush_rq); 1185 out_free_flush_queue: 1186 blk_free_flush_queue(q->fq); 1187 return -ENOMEM; 1188 } 1189 EXPORT_SYMBOL(blk_init_allocated_queue); 1190 1191 bool blk_get_queue(struct request_queue *q) 1192 { 1193 if (likely(!blk_queue_dying(q))) { 1194 __blk_get_queue(q); 1195 return true; 1196 } 1197 1198 return false; 1199 } 1200 EXPORT_SYMBOL(blk_get_queue); 1201 1202 static inline void blk_free_request(struct request_list *rl, struct request *rq) 1203 { 1204 if (rq->rq_flags & RQF_ELVPRIV) { 1205 elv_put_request(rl->q, rq); 1206 if (rq->elv.icq) 1207 put_io_context(rq->elv.icq->ioc); 1208 } 1209 1210 mempool_free(rq, rl->rq_pool); 1211 } 1212 1213 /* 1214 * ioc_batching returns true if the ioc is a valid batching request and 1215 * should be given priority access to a request. 1216 */ 1217 static inline int ioc_batching(struct request_queue *q, struct io_context *ioc) 1218 { 1219 if (!ioc) 1220 return 0; 1221 1222 /* 1223 * Make sure the process is able to allocate at least 1 request 1224 * even if the batch times out, otherwise we could theoretically 1225 * lose wakeups. 1226 */ 1227 return ioc->nr_batch_requests == q->nr_batching || 1228 (ioc->nr_batch_requests > 0 1229 && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME)); 1230 } 1231 1232 /* 1233 * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This 1234 * will cause the process to be a "batcher" on all queues in the system. This 1235 * is the behaviour we want though - once it gets a wakeup it should be given 1236 * a nice run. 1237 */ 1238 static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) 1239 { 1240 if (!ioc || ioc_batching(q, ioc)) 1241 return; 1242 1243 ioc->nr_batch_requests = q->nr_batching; 1244 ioc->last_waited = jiffies; 1245 } 1246 1247 static void __freed_request(struct request_list *rl, int sync) 1248 { 1249 struct request_queue *q = rl->q; 1250 1251 if (rl->count[sync] < queue_congestion_off_threshold(q)) 1252 blk_clear_congested(rl, sync); 1253 1254 if (rl->count[sync] + 1 <= q->nr_requests) { 1255 if (waitqueue_active(&rl->wait[sync])) 1256 wake_up(&rl->wait[sync]); 1257 1258 blk_clear_rl_full(rl, sync); 1259 } 1260 } 1261 1262 /* 1263 * A request has just been released. Account for it, update the full and 1264 * congestion status, wake up any waiters. Called under q->queue_lock. 1265 */ 1266 static void freed_request(struct request_list *rl, bool sync, 1267 req_flags_t rq_flags) 1268 { 1269 struct request_queue *q = rl->q; 1270 1271 q->nr_rqs[sync]--; 1272 rl->count[sync]--; 1273 if (rq_flags & RQF_ELVPRIV) 1274 q->nr_rqs_elvpriv--; 1275 1276 __freed_request(rl, sync); 1277 1278 if (unlikely(rl->starved[sync ^ 1])) 1279 __freed_request(rl, sync ^ 1); 1280 } 1281 1282 int blk_update_nr_requests(struct request_queue *q, unsigned int nr) 1283 { 1284 struct request_list *rl; 1285 int on_thresh, off_thresh; 1286 1287 WARN_ON_ONCE(q->mq_ops); 1288 1289 spin_lock_irq(q->queue_lock); 1290 q->nr_requests = nr; 1291 blk_queue_congestion_threshold(q); 1292 on_thresh = queue_congestion_on_threshold(q); 1293 off_thresh = queue_congestion_off_threshold(q); 1294 1295 blk_queue_for_each_rl(rl, q) { 1296 if (rl->count[BLK_RW_SYNC] >= on_thresh) 1297 blk_set_congested(rl, BLK_RW_SYNC); 1298 else if (rl->count[BLK_RW_SYNC] < off_thresh) 1299 blk_clear_congested(rl, BLK_RW_SYNC); 1300 1301 if (rl->count[BLK_RW_ASYNC] >= on_thresh) 1302 blk_set_congested(rl, BLK_RW_ASYNC); 1303 else if (rl->count[BLK_RW_ASYNC] < off_thresh) 1304 blk_clear_congested(rl, BLK_RW_ASYNC); 1305 1306 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { 1307 blk_set_rl_full(rl, BLK_RW_SYNC); 1308 } else { 1309 blk_clear_rl_full(rl, BLK_RW_SYNC); 1310 wake_up(&rl->wait[BLK_RW_SYNC]); 1311 } 1312 1313 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { 1314 blk_set_rl_full(rl, BLK_RW_ASYNC); 1315 } else { 1316 blk_clear_rl_full(rl, BLK_RW_ASYNC); 1317 wake_up(&rl->wait[BLK_RW_ASYNC]); 1318 } 1319 } 1320 1321 spin_unlock_irq(q->queue_lock); 1322 return 0; 1323 } 1324 1325 /** 1326 * __get_request - get a free request 1327 * @rl: request list to allocate from 1328 * @op: operation and flags 1329 * @bio: bio to allocate request for (can be %NULL) 1330 * @flags: BLQ_MQ_REQ_* flags 1331 * @gfp_mask: allocator flags 1332 * 1333 * Get a free request from @q. This function may fail under memory 1334 * pressure or if @q is dead. 1335 * 1336 * Must be called with @q->queue_lock held and, 1337 * Returns ERR_PTR on failure, with @q->queue_lock held. 1338 * Returns request pointer on success, with @q->queue_lock *not held*. 1339 */ 1340 static struct request *__get_request(struct request_list *rl, unsigned int op, 1341 struct bio *bio, blk_mq_req_flags_t flags, gfp_t gfp_mask) 1342 { 1343 struct request_queue *q = rl->q; 1344 struct request *rq; 1345 struct elevator_type *et = q->elevator->type; 1346 struct io_context *ioc = rq_ioc(bio); 1347 struct io_cq *icq = NULL; 1348 const bool is_sync = op_is_sync(op); 1349 int may_queue; 1350 req_flags_t rq_flags = RQF_ALLOCED; 1351 1352 lockdep_assert_held(q->queue_lock); 1353 1354 if (unlikely(blk_queue_dying(q))) 1355 return ERR_PTR(-ENODEV); 1356 1357 may_queue = elv_may_queue(q, op); 1358 if (may_queue == ELV_MQUEUE_NO) 1359 goto rq_starved; 1360 1361 if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { 1362 if (rl->count[is_sync]+1 >= q->nr_requests) { 1363 /* 1364 * The queue will fill after this allocation, so set 1365 * it as full, and mark this process as "batching". 1366 * This process will be allowed to complete a batch of 1367 * requests, others will be blocked. 1368 */ 1369 if (!blk_rl_full(rl, is_sync)) { 1370 ioc_set_batching(q, ioc); 1371 blk_set_rl_full(rl, is_sync); 1372 } else { 1373 if (may_queue != ELV_MQUEUE_MUST 1374 && !ioc_batching(q, ioc)) { 1375 /* 1376 * The queue is full and the allocating 1377 * process is not a "batcher", and not 1378 * exempted by the IO scheduler 1379 */ 1380 return ERR_PTR(-ENOMEM); 1381 } 1382 } 1383 } 1384 blk_set_congested(rl, is_sync); 1385 } 1386 1387 /* 1388 * Only allow batching queuers to allocate up to 50% over the defined 1389 * limit of requests, otherwise we could have thousands of requests 1390 * allocated with any setting of ->nr_requests 1391 */ 1392 if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) 1393 return ERR_PTR(-ENOMEM); 1394 1395 q->nr_rqs[is_sync]++; 1396 rl->count[is_sync]++; 1397 rl->starved[is_sync] = 0; 1398 1399 /* 1400 * Decide whether the new request will be managed by elevator. If 1401 * so, mark @rq_flags and increment elvpriv. Non-zero elvpriv will 1402 * prevent the current elevator from being destroyed until the new 1403 * request is freed. This guarantees icq's won't be destroyed and 1404 * makes creating new ones safe. 1405 * 1406 * Flush requests do not use the elevator so skip initialization. 1407 * This allows a request to share the flush and elevator data. 1408 * 1409 * Also, lookup icq while holding queue_lock. If it doesn't exist, 1410 * it will be created after releasing queue_lock. 1411 */ 1412 if (!op_is_flush(op) && !blk_queue_bypass(q)) { 1413 rq_flags |= RQF_ELVPRIV; 1414 q->nr_rqs_elvpriv++; 1415 if (et->icq_cache && ioc) 1416 icq = ioc_lookup_icq(ioc, q); 1417 } 1418 1419 if (blk_queue_io_stat(q)) 1420 rq_flags |= RQF_IO_STAT; 1421 spin_unlock_irq(q->queue_lock); 1422 1423 /* allocate and init request */ 1424 rq = mempool_alloc(rl->rq_pool, gfp_mask); 1425 if (!rq) 1426 goto fail_alloc; 1427 1428 blk_rq_init(q, rq); 1429 blk_rq_set_rl(rq, rl); 1430 rq->cmd_flags = op; 1431 rq->rq_flags = rq_flags; 1432 if (flags & BLK_MQ_REQ_PREEMPT) 1433 rq->rq_flags |= RQF_PREEMPT; 1434 1435 /* init elvpriv */ 1436 if (rq_flags & RQF_ELVPRIV) { 1437 if (unlikely(et->icq_cache && !icq)) { 1438 if (ioc) 1439 icq = ioc_create_icq(ioc, q, gfp_mask); 1440 if (!icq) 1441 goto fail_elvpriv; 1442 } 1443 1444 rq->elv.icq = icq; 1445 if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) 1446 goto fail_elvpriv; 1447 1448 /* @rq->elv.icq holds io_context until @rq is freed */ 1449 if (icq) 1450 get_io_context(icq->ioc); 1451 } 1452 out: 1453 /* 1454 * ioc may be NULL here, and ioc_batching will be false. That's 1455 * OK, if the queue is under the request limit then requests need 1456 * not count toward the nr_batch_requests limit. There will always 1457 * be some limit enforced by BLK_BATCH_TIME. 1458 */ 1459 if (ioc_batching(q, ioc)) 1460 ioc->nr_batch_requests--; 1461 1462 trace_block_getrq(q, bio, op); 1463 return rq; 1464 1465 fail_elvpriv: 1466 /* 1467 * elvpriv init failed. ioc, icq and elvpriv aren't mempool backed 1468 * and may fail indefinitely under memory pressure and thus 1469 * shouldn't stall IO. Treat this request as !elvpriv. This will 1470 * disturb iosched and blkcg but weird is bettern than dead. 1471 */ 1472 printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n", 1473 __func__, dev_name(q->backing_dev_info->dev)); 1474 1475 rq->rq_flags &= ~RQF_ELVPRIV; 1476 rq->elv.icq = NULL; 1477 1478 spin_lock_irq(q->queue_lock); 1479 q->nr_rqs_elvpriv--; 1480 spin_unlock_irq(q->queue_lock); 1481 goto out; 1482 1483 fail_alloc: 1484 /* 1485 * Allocation failed presumably due to memory. Undo anything we 1486 * might have messed up. 1487 * 1488 * Allocating task should really be put onto the front of the wait 1489 * queue, but this is pretty rare. 1490 */ 1491 spin_lock_irq(q->queue_lock); 1492 freed_request(rl, is_sync, rq_flags); 1493 1494 /* 1495 * in the very unlikely event that allocation failed and no 1496 * requests for this direction was pending, mark us starved so that 1497 * freeing of a request in the other direction will notice 1498 * us. another possible fix would be to split the rq mempool into 1499 * READ and WRITE 1500 */ 1501 rq_starved: 1502 if (unlikely(rl->count[is_sync] == 0)) 1503 rl->starved[is_sync] = 1; 1504 return ERR_PTR(-ENOMEM); 1505 } 1506 1507 /** 1508 * get_request - get a free request 1509 * @q: request_queue to allocate request from 1510 * @op: operation and flags 1511 * @bio: bio to allocate request for (can be %NULL) 1512 * @flags: BLK_MQ_REQ_* flags. 1513 * @gfp: allocator flags 1514 * 1515 * Get a free request from @q. If %BLK_MQ_REQ_NOWAIT is set in @flags, 1516 * this function keeps retrying under memory pressure and fails iff @q is dead. 1517 * 1518 * Must be called with @q->queue_lock held and, 1519 * Returns ERR_PTR on failure, with @q->queue_lock held. 1520 * Returns request pointer on success, with @q->queue_lock *not held*. 1521 */ 1522 static struct request *get_request(struct request_queue *q, unsigned int op, 1523 struct bio *bio, blk_mq_req_flags_t flags, gfp_t gfp) 1524 { 1525 const bool is_sync = op_is_sync(op); 1526 DEFINE_WAIT(wait); 1527 struct request_list *rl; 1528 struct request *rq; 1529 1530 lockdep_assert_held(q->queue_lock); 1531 WARN_ON_ONCE(q->mq_ops); 1532 1533 rl = blk_get_rl(q, bio); /* transferred to @rq on success */ 1534 retry: 1535 rq = __get_request(rl, op, bio, flags, gfp); 1536 if (!IS_ERR(rq)) 1537 return rq; 1538 1539 if (op & REQ_NOWAIT) { 1540 blk_put_rl(rl); 1541 return ERR_PTR(-EAGAIN); 1542 } 1543 1544 if ((flags & BLK_MQ_REQ_NOWAIT) || unlikely(blk_queue_dying(q))) { 1545 blk_put_rl(rl); 1546 return rq; 1547 } 1548 1549 /* wait on @rl and retry */ 1550 prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, 1551 TASK_UNINTERRUPTIBLE); 1552 1553 trace_block_sleeprq(q, bio, op); 1554 1555 spin_unlock_irq(q->queue_lock); 1556 io_schedule(); 1557 1558 /* 1559 * After sleeping, we become a "batching" process and will be able 1560 * to allocate at least one request, and up to a big batch of them 1561 * for a small period time. See ioc_batching, ioc_set_batching 1562 */ 1563 ioc_set_batching(q, current->io_context); 1564 1565 spin_lock_irq(q->queue_lock); 1566 finish_wait(&rl->wait[is_sync], &wait); 1567 1568 goto retry; 1569 } 1570 1571 /* flags: BLK_MQ_REQ_PREEMPT and/or BLK_MQ_REQ_NOWAIT. */ 1572 static struct request *blk_old_get_request(struct request_queue *q, 1573 unsigned int op, blk_mq_req_flags_t flags) 1574 { 1575 struct request *rq; 1576 gfp_t gfp_mask = flags & BLK_MQ_REQ_NOWAIT ? GFP_ATOMIC : GFP_NOIO; 1577 int ret = 0; 1578 1579 WARN_ON_ONCE(q->mq_ops); 1580 1581 /* create ioc upfront */ 1582 create_io_context(gfp_mask, q->node); 1583 1584 ret = blk_queue_enter(q, flags); 1585 if (ret) 1586 return ERR_PTR(ret); 1587 spin_lock_irq(q->queue_lock); 1588 rq = get_request(q, op, NULL, flags, gfp_mask); 1589 if (IS_ERR(rq)) { 1590 spin_unlock_irq(q->queue_lock); 1591 blk_queue_exit(q); 1592 return rq; 1593 } 1594 1595 /* q->queue_lock is unlocked at this point */ 1596 rq->__data_len = 0; 1597 rq->__sector = (sector_t) -1; 1598 rq->bio = rq->biotail = NULL; 1599 return rq; 1600 } 1601 1602 /** 1603 * blk_get_request - allocate a request 1604 * @q: request queue to allocate a request for 1605 * @op: operation (REQ_OP_*) and REQ_* flags, e.g. REQ_SYNC. 1606 * @flags: BLK_MQ_REQ_* flags, e.g. BLK_MQ_REQ_NOWAIT. 1607 */ 1608 struct request *blk_get_request(struct request_queue *q, unsigned int op, 1609 blk_mq_req_flags_t flags) 1610 { 1611 struct request *req; 1612 1613 WARN_ON_ONCE(op & REQ_NOWAIT); 1614 WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PREEMPT)); 1615 1616 if (q->mq_ops) { 1617 req = blk_mq_alloc_request(q, op, flags); 1618 if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn) 1619 q->mq_ops->initialize_rq_fn(req); 1620 } else { 1621 req = blk_old_get_request(q, op, flags); 1622 if (!IS_ERR(req) && q->initialize_rq_fn) 1623 q->initialize_rq_fn(req); 1624 } 1625 1626 return req; 1627 } 1628 EXPORT_SYMBOL(blk_get_request); 1629 1630 /** 1631 * blk_requeue_request - put a request back on queue 1632 * @q: request queue where request should be inserted 1633 * @rq: request to be inserted 1634 * 1635 * Description: 1636 * Drivers often keep queueing requests until the hardware cannot accept 1637 * more, when that condition happens we need to put the request back 1638 * on the queue. Must be called with queue lock held. 1639 */ 1640 void blk_requeue_request(struct request_queue *q, struct request *rq) 1641 { 1642 lockdep_assert_held(q->queue_lock); 1643 WARN_ON_ONCE(q->mq_ops); 1644 1645 blk_delete_timer(rq); 1646 blk_clear_rq_complete(rq); 1647 trace_block_rq_requeue(q, rq); 1648 wbt_requeue(q->rq_wb, rq); 1649 1650 if (rq->rq_flags & RQF_QUEUED) 1651 blk_queue_end_tag(q, rq); 1652 1653 BUG_ON(blk_queued_rq(rq)); 1654 1655 elv_requeue_request(q, rq); 1656 } 1657 EXPORT_SYMBOL(blk_requeue_request); 1658 1659 static void add_acct_request(struct request_queue *q, struct request *rq, 1660 int where) 1661 { 1662 blk_account_io_start(rq, true); 1663 __elv_add_request(q, rq, where); 1664 } 1665 1666 static void part_round_stats_single(struct request_queue *q, int cpu, 1667 struct hd_struct *part, unsigned long now, 1668 unsigned int inflight) 1669 { 1670 if (inflight) { 1671 __part_stat_add(cpu, part, time_in_queue, 1672 inflight * (now - part->stamp)); 1673 __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); 1674 } 1675 part->stamp = now; 1676 } 1677 1678 /** 1679 * part_round_stats() - Round off the performance stats on a struct disk_stats. 1680 * @q: target block queue 1681 * @cpu: cpu number for stats access 1682 * @part: target partition 1683 * 1684 * The average IO queue length and utilisation statistics are maintained 1685 * by observing the current state of the queue length and the amount of 1686 * time it has been in this state for. 1687 * 1688 * Normally, that accounting is done on IO completion, but that can result 1689 * in more than a second's worth of IO being accounted for within any one 1690 * second, leading to >100% utilisation. To deal with that, we call this 1691 * function to do a round-off before returning the results when reading 1692 * /proc/diskstats. This accounts immediately for all queue usage up to 1693 * the current jiffies and restarts the counters again. 1694 */ 1695 void part_round_stats(struct request_queue *q, int cpu, struct hd_struct *part) 1696 { 1697 struct hd_struct *part2 = NULL; 1698 unsigned long now = jiffies; 1699 unsigned int inflight[2]; 1700 int stats = 0; 1701 1702 if (part->stamp != now) 1703 stats |= 1; 1704 1705 if (part->partno) { 1706 part2 = &part_to_disk(part)->part0; 1707 if (part2->stamp != now) 1708 stats |= 2; 1709 } 1710 1711 if (!stats) 1712 return; 1713 1714 part_in_flight(q, part, inflight); 1715 1716 if (stats & 2) 1717 part_round_stats_single(q, cpu, part2, now, inflight[1]); 1718 if (stats & 1) 1719 part_round_stats_single(q, cpu, part, now, inflight[0]); 1720 } 1721 EXPORT_SYMBOL_GPL(part_round_stats); 1722 1723 #ifdef CONFIG_PM 1724 static void blk_pm_put_request(struct request *rq) 1725 { 1726 if (rq->q->dev && !(rq->rq_flags & RQF_PM) && !--rq->q->nr_pending) 1727 pm_runtime_mark_last_busy(rq->q->dev); 1728 } 1729 #else 1730 static inline void blk_pm_put_request(struct request *rq) {} 1731 #endif 1732 1733 void __blk_put_request(struct request_queue *q, struct request *req) 1734 { 1735 req_flags_t rq_flags = req->rq_flags; 1736 1737 if (unlikely(!q)) 1738 return; 1739 1740 if (q->mq_ops) { 1741 blk_mq_free_request(req); 1742 return; 1743 } 1744 1745 lockdep_assert_held(q->queue_lock); 1746 1747 blk_req_zone_write_unlock(req); 1748 blk_pm_put_request(req); 1749 1750 elv_completed_request(q, req); 1751 1752 /* this is a bio leak */ 1753 WARN_ON(req->bio != NULL); 1754 1755 wbt_done(q->rq_wb, req); 1756 1757 /* 1758 * Request may not have originated from ll_rw_blk. if not, 1759 * it didn't come out of our reserved rq pools 1760 */ 1761 if (rq_flags & RQF_ALLOCED) { 1762 struct request_list *rl = blk_rq_rl(req); 1763 bool sync = op_is_sync(req->cmd_flags); 1764 1765 BUG_ON(!list_empty(&req->queuelist)); 1766 BUG_ON(ELV_ON_HASH(req)); 1767 1768 blk_free_request(rl, req); 1769 freed_request(rl, sync, rq_flags); 1770 blk_put_rl(rl); 1771 blk_queue_exit(q); 1772 } 1773 } 1774 EXPORT_SYMBOL_GPL(__blk_put_request); 1775 1776 void blk_put_request(struct request *req) 1777 { 1778 struct request_queue *q = req->q; 1779 1780 if (q->mq_ops) 1781 blk_mq_free_request(req); 1782 else { 1783 unsigned long flags; 1784 1785 spin_lock_irqsave(q->queue_lock, flags); 1786 __blk_put_request(q, req); 1787 spin_unlock_irqrestore(q->queue_lock, flags); 1788 } 1789 } 1790 EXPORT_SYMBOL(blk_put_request); 1791 1792 bool bio_attempt_back_merge(struct request_queue *q, struct request *req, 1793 struct bio *bio) 1794 { 1795 const int ff = bio->bi_opf & REQ_FAILFAST_MASK; 1796 1797 if (!ll_back_merge_fn(q, req, bio)) 1798 return false; 1799 1800 trace_block_bio_backmerge(q, req, bio); 1801 1802 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) 1803 blk_rq_set_mixed_merge(req); 1804 1805 req->biotail->bi_next = bio; 1806 req->biotail = bio; 1807 req->__data_len += bio->bi_iter.bi_size; 1808 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1809 1810 blk_account_io_start(req, false); 1811 return true; 1812 } 1813 1814 bool bio_attempt_front_merge(struct request_queue *q, struct request *req, 1815 struct bio *bio) 1816 { 1817 const int ff = bio->bi_opf & REQ_FAILFAST_MASK; 1818 1819 if (!ll_front_merge_fn(q, req, bio)) 1820 return false; 1821 1822 trace_block_bio_frontmerge(q, req, bio); 1823 1824 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) 1825 blk_rq_set_mixed_merge(req); 1826 1827 bio->bi_next = req->bio; 1828 req->bio = bio; 1829 1830 req->__sector = bio->bi_iter.bi_sector; 1831 req->__data_len += bio->bi_iter.bi_size; 1832 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1833 1834 blk_account_io_start(req, false); 1835 return true; 1836 } 1837 1838 bool bio_attempt_discard_merge(struct request_queue *q, struct request *req, 1839 struct bio *bio) 1840 { 1841 unsigned short segments = blk_rq_nr_discard_segments(req); 1842 1843 if (segments >= queue_max_discard_segments(q)) 1844 goto no_merge; 1845 if (blk_rq_sectors(req) + bio_sectors(bio) > 1846 blk_rq_get_max_sectors(req, blk_rq_pos(req))) 1847 goto no_merge; 1848 1849 req->biotail->bi_next = bio; 1850 req->biotail = bio; 1851 req->__data_len += bio->bi_iter.bi_size; 1852 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1853 req->nr_phys_segments = segments + 1; 1854 1855 blk_account_io_start(req, false); 1856 return true; 1857 no_merge: 1858 req_set_nomerge(q, req); 1859 return false; 1860 } 1861 1862 /** 1863 * blk_attempt_plug_merge - try to merge with %current's plugged list 1864 * @q: request_queue new bio is being queued at 1865 * @bio: new bio being queued 1866 * @request_count: out parameter for number of traversed plugged requests 1867 * @same_queue_rq: pointer to &struct request that gets filled in when 1868 * another request associated with @q is found on the plug list 1869 * (optional, may be %NULL) 1870 * 1871 * Determine whether @bio being queued on @q can be merged with a request 1872 * on %current's plugged list. Returns %true if merge was successful, 1873 * otherwise %false. 1874 * 1875 * Plugging coalesces IOs from the same issuer for the same purpose without 1876 * going through @q->queue_lock. As such it's more of an issuing mechanism 1877 * than scheduling, and the request, while may have elvpriv data, is not 1878 * added on the elevator at this point. In addition, we don't have 1879 * reliable access to the elevator outside queue lock. Only check basic 1880 * merging parameters without querying the elevator. 1881 * 1882 * Caller must ensure !blk_queue_nomerges(q) beforehand. 1883 */ 1884 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, 1885 unsigned int *request_count, 1886 struct request **same_queue_rq) 1887 { 1888 struct blk_plug *plug; 1889 struct request *rq; 1890 struct list_head *plug_list; 1891 1892 plug = current->plug; 1893 if (!plug) 1894 return false; 1895 *request_count = 0; 1896 1897 if (q->mq_ops) 1898 plug_list = &plug->mq_list; 1899 else 1900 plug_list = &plug->list; 1901 1902 list_for_each_entry_reverse(rq, plug_list, queuelist) { 1903 bool merged = false; 1904 1905 if (rq->q == q) { 1906 (*request_count)++; 1907 /* 1908 * Only blk-mq multiple hardware queues case checks the 1909 * rq in the same queue, there should be only one such 1910 * rq in a queue 1911 **/ 1912 if (same_queue_rq) 1913 *same_queue_rq = rq; 1914 } 1915 1916 if (rq->q != q || !blk_rq_merge_ok(rq, bio)) 1917 continue; 1918 1919 switch (blk_try_merge(rq, bio)) { 1920 case ELEVATOR_BACK_MERGE: 1921 merged = bio_attempt_back_merge(q, rq, bio); 1922 break; 1923 case ELEVATOR_FRONT_MERGE: 1924 merged = bio_attempt_front_merge(q, rq, bio); 1925 break; 1926 case ELEVATOR_DISCARD_MERGE: 1927 merged = bio_attempt_discard_merge(q, rq, bio); 1928 break; 1929 default: 1930 break; 1931 } 1932 1933 if (merged) 1934 return true; 1935 } 1936 1937 return false; 1938 } 1939 1940 unsigned int blk_plug_queued_count(struct request_queue *q) 1941 { 1942 struct blk_plug *plug; 1943 struct request *rq; 1944 struct list_head *plug_list; 1945 unsigned int ret = 0; 1946 1947 plug = current->plug; 1948 if (!plug) 1949 goto out; 1950 1951 if (q->mq_ops) 1952 plug_list = &plug->mq_list; 1953 else 1954 plug_list = &plug->list; 1955 1956 list_for_each_entry(rq, plug_list, queuelist) { 1957 if (rq->q == q) 1958 ret++; 1959 } 1960 out: 1961 return ret; 1962 } 1963 1964 void blk_init_request_from_bio(struct request *req, struct bio *bio) 1965 { 1966 struct io_context *ioc = rq_ioc(bio); 1967 1968 if (bio->bi_opf & REQ_RAHEAD) 1969 req->cmd_flags |= REQ_FAILFAST_MASK; 1970 1971 req->__sector = bio->bi_iter.bi_sector; 1972 if (ioprio_valid(bio_prio(bio))) 1973 req->ioprio = bio_prio(bio); 1974 else if (ioc) 1975 req->ioprio = ioc->ioprio; 1976 else 1977 req->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0); 1978 req->write_hint = bio->bi_write_hint; 1979 blk_rq_bio_prep(req->q, req, bio); 1980 } 1981 EXPORT_SYMBOL_GPL(blk_init_request_from_bio); 1982 1983 static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio) 1984 { 1985 struct blk_plug *plug; 1986 int where = ELEVATOR_INSERT_SORT; 1987 struct request *req, *free; 1988 unsigned int request_count = 0; 1989 unsigned int wb_acct; 1990 1991 /* 1992 * low level driver can indicate that it wants pages above a 1993 * certain limit bounced to low memory (ie for highmem, or even 1994 * ISA dma in theory) 1995 */ 1996 blk_queue_bounce(q, &bio); 1997 1998 blk_queue_split(q, &bio); 1999 2000 if (!bio_integrity_prep(bio)) 2001 return BLK_QC_T_NONE; 2002 2003 if (op_is_flush(bio->bi_opf)) { 2004 spin_lock_irq(q->queue_lock); 2005 where = ELEVATOR_INSERT_FLUSH; 2006 goto get_rq; 2007 } 2008 2009 /* 2010 * Check if we can merge with the plugged list before grabbing 2011 * any locks. 2012 */ 2013 if (!blk_queue_nomerges(q)) { 2014 if (blk_attempt_plug_merge(q, bio, &request_count, NULL)) 2015 return BLK_QC_T_NONE; 2016 } else 2017 request_count = blk_plug_queued_count(q); 2018 2019 spin_lock_irq(q->queue_lock); 2020 2021 switch (elv_merge(q, &req, bio)) { 2022 case ELEVATOR_BACK_MERGE: 2023 if (!bio_attempt_back_merge(q, req, bio)) 2024 break; 2025 elv_bio_merged(q, req, bio); 2026 free = attempt_back_merge(q, req); 2027 if (free) 2028 __blk_put_request(q, free); 2029 else 2030 elv_merged_request(q, req, ELEVATOR_BACK_MERGE); 2031 goto out_unlock; 2032 case ELEVATOR_FRONT_MERGE: 2033 if (!bio_attempt_front_merge(q, req, bio)) 2034 break; 2035 elv_bio_merged(q, req, bio); 2036 free = attempt_front_merge(q, req); 2037 if (free) 2038 __blk_put_request(q, free); 2039 else 2040 elv_merged_request(q, req, ELEVATOR_FRONT_MERGE); 2041 goto out_unlock; 2042 default: 2043 break; 2044 } 2045 2046 get_rq: 2047 wb_acct = wbt_wait(q->rq_wb, bio, q->queue_lock); 2048 2049 /* 2050 * Grab a free request. This is might sleep but can not fail. 2051 * Returns with the queue unlocked. 2052 */ 2053 blk_queue_enter_live(q); 2054 req = get_request(q, bio->bi_opf, bio, 0, GFP_NOIO); 2055 if (IS_ERR(req)) { 2056 blk_queue_exit(q); 2057 __wbt_done(q->rq_wb, wb_acct); 2058 if (PTR_ERR(req) == -ENOMEM) 2059 bio->bi_status = BLK_STS_RESOURCE; 2060 else 2061 bio->bi_status = BLK_STS_IOERR; 2062 bio_endio(bio); 2063 goto out_unlock; 2064 } 2065 2066 wbt_track(req, wb_acct); 2067 2068 /* 2069 * After dropping the lock and possibly sleeping here, our request 2070 * may now be mergeable after it had proven unmergeable (above). 2071 * We don't worry about that case for efficiency. It won't happen 2072 * often, and the elevators are able to handle it. 2073 */ 2074 blk_init_request_from_bio(req, bio); 2075 2076 if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags)) 2077 req->cpu = raw_smp_processor_id(); 2078 2079 plug = current->plug; 2080 if (plug) { 2081 /* 2082 * If this is the first request added after a plug, fire 2083 * of a plug trace. 2084 * 2085 * @request_count may become stale because of schedule 2086 * out, so check plug list again. 2087 */ 2088 if (!request_count || list_empty(&plug->list)) 2089 trace_block_plug(q); 2090 else { 2091 struct request *last = list_entry_rq(plug->list.prev); 2092 if (request_count >= BLK_MAX_REQUEST_COUNT || 2093 blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE) { 2094 blk_flush_plug_list(plug, false); 2095 trace_block_plug(q); 2096 } 2097 } 2098 list_add_tail(&req->queuelist, &plug->list); 2099 blk_account_io_start(req, true); 2100 } else { 2101 spin_lock_irq(q->queue_lock); 2102 add_acct_request(q, req, where); 2103 __blk_run_queue(q); 2104 out_unlock: 2105 spin_unlock_irq(q->queue_lock); 2106 } 2107 2108 return BLK_QC_T_NONE; 2109 } 2110 2111 static void handle_bad_sector(struct bio *bio, sector_t maxsector) 2112 { 2113 char b[BDEVNAME_SIZE]; 2114 2115 printk(KERN_INFO "attempt to access beyond end of device\n"); 2116 printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n", 2117 bio_devname(bio, b), bio->bi_opf, 2118 (unsigned long long)bio_end_sector(bio), 2119 (long long)maxsector); 2120 } 2121 2122 #ifdef CONFIG_FAIL_MAKE_REQUEST 2123 2124 static DECLARE_FAULT_ATTR(fail_make_request); 2125 2126 static int __init setup_fail_make_request(char *str) 2127 { 2128 return setup_fault_attr(&fail_make_request, str); 2129 } 2130 __setup("fail_make_request=", setup_fail_make_request); 2131 2132 static bool should_fail_request(struct hd_struct *part, unsigned int bytes) 2133 { 2134 return part->make_it_fail && should_fail(&fail_make_request, bytes); 2135 } 2136 2137 static int __init fail_make_request_debugfs(void) 2138 { 2139 struct dentry *dir = fault_create_debugfs_attr("fail_make_request", 2140 NULL, &fail_make_request); 2141 2142 return PTR_ERR_OR_ZERO(dir); 2143 } 2144 2145 late_initcall(fail_make_request_debugfs); 2146 2147 #else /* CONFIG_FAIL_MAKE_REQUEST */ 2148 2149 static inline bool should_fail_request(struct hd_struct *part, 2150 unsigned int bytes) 2151 { 2152 return false; 2153 } 2154 2155 #endif /* CONFIG_FAIL_MAKE_REQUEST */ 2156 2157 static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part) 2158 { 2159 if (part->policy && op_is_write(bio_op(bio))) { 2160 char b[BDEVNAME_SIZE]; 2161 2162 printk(KERN_ERR 2163 "generic_make_request: Trying to write " 2164 "to read-only block-device %s (partno %d)\n", 2165 bio_devname(bio, b), part->partno); 2166 return true; 2167 } 2168 2169 return false; 2170 } 2171 2172 static noinline int should_fail_bio(struct bio *bio) 2173 { 2174 if (should_fail_request(&bio->bi_disk->part0, bio->bi_iter.bi_size)) 2175 return -EIO; 2176 return 0; 2177 } 2178 ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO); 2179 2180 /* 2181 * Check whether this bio extends beyond the end of the device or partition. 2182 * This may well happen - the kernel calls bread() without checking the size of 2183 * the device, e.g., when mounting a file system. 2184 */ 2185 static inline int bio_check_eod(struct bio *bio, sector_t maxsector) 2186 { 2187 unsigned int nr_sectors = bio_sectors(bio); 2188 2189 if (nr_sectors && maxsector && 2190 (nr_sectors > maxsector || 2191 bio->bi_iter.bi_sector > maxsector - nr_sectors)) { 2192 handle_bad_sector(bio, maxsector); 2193 return -EIO; 2194 } 2195 return 0; 2196 } 2197 2198 /* 2199 * Remap block n of partition p to block n+start(p) of the disk. 2200 */ 2201 static inline int blk_partition_remap(struct bio *bio) 2202 { 2203 struct hd_struct *p; 2204 int ret = -EIO; 2205 2206 rcu_read_lock(); 2207 p = __disk_get_part(bio->bi_disk, bio->bi_partno); 2208 if (unlikely(!p)) 2209 goto out; 2210 if (unlikely(should_fail_request(p, bio->bi_iter.bi_size))) 2211 goto out; 2212 if (unlikely(bio_check_ro(bio, p))) 2213 goto out; 2214 2215 /* 2216 * Zone reset does not include bi_size so bio_sectors() is always 0. 2217 * Include a test for the reset op code and perform the remap if needed. 2218 */ 2219 if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET) { 2220 if (bio_check_eod(bio, part_nr_sects_read(p))) 2221 goto out; 2222 bio->bi_iter.bi_sector += p->start_sect; 2223 trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p), 2224 bio->bi_iter.bi_sector - p->start_sect); 2225 } 2226 bio->bi_partno = 0; 2227 ret = 0; 2228 out: 2229 rcu_read_unlock(); 2230 return ret; 2231 } 2232 2233 static noinline_for_stack bool 2234 generic_make_request_checks(struct bio *bio) 2235 { 2236 struct request_queue *q; 2237 int nr_sectors = bio_sectors(bio); 2238 blk_status_t status = BLK_STS_IOERR; 2239 char b[BDEVNAME_SIZE]; 2240 2241 might_sleep(); 2242 2243 q = bio->bi_disk->queue; 2244 if (unlikely(!q)) { 2245 printk(KERN_ERR 2246 "generic_make_request: Trying to access " 2247 "nonexistent block-device %s (%Lu)\n", 2248 bio_devname(bio, b), (long long)bio->bi_iter.bi_sector); 2249 goto end_io; 2250 } 2251 2252 /* 2253 * For a REQ_NOWAIT based request, return -EOPNOTSUPP 2254 * if queue is not a request based queue. 2255 */ 2256 if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_rq_based(q)) 2257 goto not_supported; 2258 2259 if (should_fail_bio(bio)) 2260 goto end_io; 2261 2262 if (bio->bi_partno) { 2263 if (unlikely(blk_partition_remap(bio))) 2264 goto end_io; 2265 } else { 2266 if (unlikely(bio_check_ro(bio, &bio->bi_disk->part0))) 2267 goto end_io; 2268 if (unlikely(bio_check_eod(bio, get_capacity(bio->bi_disk)))) 2269 goto end_io; 2270 } 2271 2272 /* 2273 * Filter flush bio's early so that make_request based 2274 * drivers without flush support don't have to worry 2275 * about them. 2276 */ 2277 if (op_is_flush(bio->bi_opf) && 2278 !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) { 2279 bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA); 2280 if (!nr_sectors) { 2281 status = BLK_STS_OK; 2282 goto end_io; 2283 } 2284 } 2285 2286 switch (bio_op(bio)) { 2287 case REQ_OP_DISCARD: 2288 if (!blk_queue_discard(q)) 2289 goto not_supported; 2290 break; 2291 case REQ_OP_SECURE_ERASE: 2292 if (!blk_queue_secure_erase(q)) 2293 goto not_supported; 2294 break; 2295 case REQ_OP_WRITE_SAME: 2296 if (!q->limits.max_write_same_sectors) 2297 goto not_supported; 2298 break; 2299 case REQ_OP_ZONE_REPORT: 2300 case REQ_OP_ZONE_RESET: 2301 if (!blk_queue_is_zoned(q)) 2302 goto not_supported; 2303 break; 2304 case REQ_OP_WRITE_ZEROES: 2305 if (!q->limits.max_write_zeroes_sectors) 2306 goto not_supported; 2307 break; 2308 default: 2309 break; 2310 } 2311 2312 /* 2313 * Various block parts want %current->io_context and lazy ioc 2314 * allocation ends up trading a lot of pain for a small amount of 2315 * memory. Just allocate it upfront. This may fail and block 2316 * layer knows how to live with it. 2317 */ 2318 create_io_context(GFP_ATOMIC, q->node); 2319 2320 if (!blkcg_bio_issue_check(q, bio)) 2321 return false; 2322 2323 if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) { 2324 trace_block_bio_queue(q, bio); 2325 /* Now that enqueuing has been traced, we need to trace 2326 * completion as well. 2327 */ 2328 bio_set_flag(bio, BIO_TRACE_COMPLETION); 2329 } 2330 return true; 2331 2332 not_supported: 2333 status = BLK_STS_NOTSUPP; 2334 end_io: 2335 bio->bi_status = status; 2336 bio_endio(bio); 2337 return false; 2338 } 2339 2340 /** 2341 * generic_make_request - hand a buffer to its device driver for I/O 2342 * @bio: The bio describing the location in memory and on the device. 2343 * 2344 * generic_make_request() is used to make I/O requests of block 2345 * devices. It is passed a &struct bio, which describes the I/O that needs 2346 * to be done. 2347 * 2348 * generic_make_request() does not return any status. The 2349 * success/failure status of the request, along with notification of 2350 * completion, is delivered asynchronously through the bio->bi_end_io 2351 * function described (one day) else where. 2352 * 2353 * The caller of generic_make_request must make sure that bi_io_vec 2354 * are set to describe the memory buffer, and that bi_dev and bi_sector are 2355 * set to describe the device address, and the 2356 * bi_end_io and optionally bi_private are set to describe how 2357 * completion notification should be signaled. 2358 * 2359 * generic_make_request and the drivers it calls may use bi_next if this 2360 * bio happens to be merged with someone else, and may resubmit the bio to 2361 * a lower device by calling into generic_make_request recursively, which 2362 * means the bio should NOT be touched after the call to ->make_request_fn. 2363 */ 2364 blk_qc_t generic_make_request(struct bio *bio) 2365 { 2366 /* 2367 * bio_list_on_stack[0] contains bios submitted by the current 2368 * make_request_fn. 2369 * bio_list_on_stack[1] contains bios that were submitted before 2370 * the current make_request_fn, but that haven't been processed 2371 * yet. 2372 */ 2373 struct bio_list bio_list_on_stack[2]; 2374 blk_mq_req_flags_t flags = 0; 2375 struct request_queue *q = bio->bi_disk->queue; 2376 blk_qc_t ret = BLK_QC_T_NONE; 2377 2378 if (bio->bi_opf & REQ_NOWAIT) 2379 flags = BLK_MQ_REQ_NOWAIT; 2380 if (bio_flagged(bio, BIO_QUEUE_ENTERED)) 2381 blk_queue_enter_live(q); 2382 else if (blk_queue_enter(q, flags) < 0) { 2383 if (!blk_queue_dying(q) && (bio->bi_opf & REQ_NOWAIT)) 2384 bio_wouldblock_error(bio); 2385 else 2386 bio_io_error(bio); 2387 return ret; 2388 } 2389 2390 if (!generic_make_request_checks(bio)) 2391 goto out; 2392 2393 /* 2394 * We only want one ->make_request_fn to be active at a time, else 2395 * stack usage with stacked devices could be a problem. So use 2396 * current->bio_list to keep a list of requests submited by a 2397 * make_request_fn function. current->bio_list is also used as a 2398 * flag to say if generic_make_request is currently active in this 2399 * task or not. If it is NULL, then no make_request is active. If 2400 * it is non-NULL, then a make_request is active, and new requests 2401 * should be added at the tail 2402 */ 2403 if (current->bio_list) { 2404 bio_list_add(¤t->bio_list[0], bio); 2405 goto out; 2406 } 2407 2408 /* following loop may be a bit non-obvious, and so deserves some 2409 * explanation. 2410 * Before entering the loop, bio->bi_next is NULL (as all callers 2411 * ensure that) so we have a list with a single bio. 2412 * We pretend that we have just taken it off a longer list, so 2413 * we assign bio_list to a pointer to the bio_list_on_stack, 2414 * thus initialising the bio_list of new bios to be 2415 * added. ->make_request() may indeed add some more bios 2416 * through a recursive call to generic_make_request. If it 2417 * did, we find a non-NULL value in bio_list and re-enter the loop 2418 * from the top. In this case we really did just take the bio 2419 * of the top of the list (no pretending) and so remove it from 2420 * bio_list, and call into ->make_request() again. 2421 */ 2422 BUG_ON(bio->bi_next); 2423 bio_list_init(&bio_list_on_stack[0]); 2424 current->bio_list = bio_list_on_stack; 2425 do { 2426 bool enter_succeeded = true; 2427 2428 if (unlikely(q != bio->bi_disk->queue)) { 2429 if (q) 2430 blk_queue_exit(q); 2431 q = bio->bi_disk->queue; 2432 flags = 0; 2433 if (bio->bi_opf & REQ_NOWAIT) 2434 flags = BLK_MQ_REQ_NOWAIT; 2435 if (blk_queue_enter(q, flags) < 0) { 2436 enter_succeeded = false; 2437 q = NULL; 2438 } 2439 } 2440 2441 if (enter_succeeded) { 2442 struct bio_list lower, same; 2443 2444 /* Create a fresh bio_list for all subordinate requests */ 2445 bio_list_on_stack[1] = bio_list_on_stack[0]; 2446 bio_list_init(&bio_list_on_stack[0]); 2447 ret = q->make_request_fn(q, bio); 2448 2449 /* sort new bios into those for a lower level 2450 * and those for the same level 2451 */ 2452 bio_list_init(&lower); 2453 bio_list_init(&same); 2454 while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL) 2455 if (q == bio->bi_disk->queue) 2456 bio_list_add(&same, bio); 2457 else 2458 bio_list_add(&lower, bio); 2459 /* now assemble so we handle the lowest level first */ 2460 bio_list_merge(&bio_list_on_stack[0], &lower); 2461 bio_list_merge(&bio_list_on_stack[0], &same); 2462 bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]); 2463 } else { 2464 if (unlikely(!blk_queue_dying(q) && 2465 (bio->bi_opf & REQ_NOWAIT))) 2466 bio_wouldblock_error(bio); 2467 else 2468 bio_io_error(bio); 2469 } 2470 bio = bio_list_pop(&bio_list_on_stack[0]); 2471 } while (bio); 2472 current->bio_list = NULL; /* deactivate */ 2473 2474 out: 2475 if (q) 2476 blk_queue_exit(q); 2477 return ret; 2478 } 2479 EXPORT_SYMBOL(generic_make_request); 2480 2481 /** 2482 * direct_make_request - hand a buffer directly to its device driver for I/O 2483 * @bio: The bio describing the location in memory and on the device. 2484 * 2485 * This function behaves like generic_make_request(), but does not protect 2486 * against recursion. Must only be used if the called driver is known 2487 * to not call generic_make_request (or direct_make_request) again from 2488 * its make_request function. (Calling direct_make_request again from 2489 * a workqueue is perfectly fine as that doesn't recurse). 2490 */ 2491 blk_qc_t direct_make_request(struct bio *bio) 2492 { 2493 struct request_queue *q = bio->bi_disk->queue; 2494 bool nowait = bio->bi_opf & REQ_NOWAIT; 2495 blk_qc_t ret; 2496 2497 if (!generic_make_request_checks(bio)) 2498 return BLK_QC_T_NONE; 2499 2500 if (unlikely(blk_queue_enter(q, nowait ? BLK_MQ_REQ_NOWAIT : 0))) { 2501 if (nowait && !blk_queue_dying(q)) 2502 bio->bi_status = BLK_STS_AGAIN; 2503 else 2504 bio->bi_status = BLK_STS_IOERR; 2505 bio_endio(bio); 2506 return BLK_QC_T_NONE; 2507 } 2508 2509 ret = q->make_request_fn(q, bio); 2510 blk_queue_exit(q); 2511 return ret; 2512 } 2513 EXPORT_SYMBOL_GPL(direct_make_request); 2514 2515 /** 2516 * submit_bio - submit a bio to the block device layer for I/O 2517 * @bio: The &struct bio which describes the I/O 2518 * 2519 * submit_bio() is very similar in purpose to generic_make_request(), and 2520 * uses that function to do most of the work. Both are fairly rough 2521 * interfaces; @bio must be presetup and ready for I/O. 2522 * 2523 */ 2524 blk_qc_t submit_bio(struct bio *bio) 2525 { 2526 /* 2527 * If it's a regular read/write or a barrier with data attached, 2528 * go through the normal accounting stuff before submission. 2529 */ 2530 if (bio_has_data(bio)) { 2531 unsigned int count; 2532 2533 if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) 2534 count = queue_logical_block_size(bio->bi_disk->queue) >> 9; 2535 else 2536 count = bio_sectors(bio); 2537 2538 if (op_is_write(bio_op(bio))) { 2539 count_vm_events(PGPGOUT, count); 2540 } else { 2541 task_io_account_read(bio->bi_iter.bi_size); 2542 count_vm_events(PGPGIN, count); 2543 } 2544 2545 if (unlikely(block_dump)) { 2546 char b[BDEVNAME_SIZE]; 2547 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n", 2548 current->comm, task_pid_nr(current), 2549 op_is_write(bio_op(bio)) ? "WRITE" : "READ", 2550 (unsigned long long)bio->bi_iter.bi_sector, 2551 bio_devname(bio, b), count); 2552 } 2553 } 2554 2555 return generic_make_request(bio); 2556 } 2557 EXPORT_SYMBOL(submit_bio); 2558 2559 bool blk_poll(struct request_queue *q, blk_qc_t cookie) 2560 { 2561 if (!q->poll_fn || !blk_qc_t_valid(cookie)) 2562 return false; 2563 2564 if (current->plug) 2565 blk_flush_plug_list(current->plug, false); 2566 return q->poll_fn(q, cookie); 2567 } 2568 EXPORT_SYMBOL_GPL(blk_poll); 2569 2570 /** 2571 * blk_cloned_rq_check_limits - Helper function to check a cloned request 2572 * for new the queue limits 2573 * @q: the queue 2574 * @rq: the request being checked 2575 * 2576 * Description: 2577 * @rq may have been made based on weaker limitations of upper-level queues 2578 * in request stacking drivers, and it may violate the limitation of @q. 2579 * Since the block layer and the underlying device driver trust @rq 2580 * after it is inserted to @q, it should be checked against @q before 2581 * the insertion using this generic function. 2582 * 2583 * Request stacking drivers like request-based dm may change the queue 2584 * limits when retrying requests on other queues. Those requests need 2585 * to be checked against the new queue limits again during dispatch. 2586 */ 2587 static int blk_cloned_rq_check_limits(struct request_queue *q, 2588 struct request *rq) 2589 { 2590 if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) { 2591 printk(KERN_ERR "%s: over max size limit.\n", __func__); 2592 return -EIO; 2593 } 2594 2595 /* 2596 * queue's settings related to segment counting like q->bounce_pfn 2597 * may differ from that of other stacking queues. 2598 * Recalculate it to check the request correctly on this queue's 2599 * limitation. 2600 */ 2601 blk_recalc_rq_segments(rq); 2602 if (rq->nr_phys_segments > queue_max_segments(q)) { 2603 printk(KERN_ERR "%s: over max segments limit.\n", __func__); 2604 return -EIO; 2605 } 2606 2607 return 0; 2608 } 2609 2610 /** 2611 * blk_insert_cloned_request - Helper for stacking drivers to submit a request 2612 * @q: the queue to submit the request 2613 * @rq: the request being queued 2614 */ 2615 blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq) 2616 { 2617 unsigned long flags; 2618 int where = ELEVATOR_INSERT_BACK; 2619 2620 if (blk_cloned_rq_check_limits(q, rq)) 2621 return BLK_STS_IOERR; 2622 2623 if (rq->rq_disk && 2624 should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq))) 2625 return BLK_STS_IOERR; 2626 2627 if (q->mq_ops) { 2628 if (blk_queue_io_stat(q)) 2629 blk_account_io_start(rq, true); 2630 /* 2631 * Since we have a scheduler attached on the top device, 2632 * bypass a potential scheduler on the bottom device for 2633 * insert. 2634 */ 2635 return blk_mq_request_issue_directly(rq); 2636 } 2637 2638 spin_lock_irqsave(q->queue_lock, flags); 2639 if (unlikely(blk_queue_dying(q))) { 2640 spin_unlock_irqrestore(q->queue_lock, flags); 2641 return BLK_STS_IOERR; 2642 } 2643 2644 /* 2645 * Submitting request must be dequeued before calling this function 2646 * because it will be linked to another request_queue 2647 */ 2648 BUG_ON(blk_queued_rq(rq)); 2649 2650 if (op_is_flush(rq->cmd_flags)) 2651 where = ELEVATOR_INSERT_FLUSH; 2652 2653 add_acct_request(q, rq, where); 2654 if (where == ELEVATOR_INSERT_FLUSH) 2655 __blk_run_queue(q); 2656 spin_unlock_irqrestore(q->queue_lock, flags); 2657 2658 return BLK_STS_OK; 2659 } 2660 EXPORT_SYMBOL_GPL(blk_insert_cloned_request); 2661 2662 /** 2663 * blk_rq_err_bytes - determine number of bytes till the next failure boundary 2664 * @rq: request to examine 2665 * 2666 * Description: 2667 * A request could be merge of IOs which require different failure 2668 * handling. This function determines the number of bytes which 2669 * can be failed from the beginning of the request without 2670 * crossing into area which need to be retried further. 2671 * 2672 * Return: 2673 * The number of bytes to fail. 2674 */ 2675 unsigned int blk_rq_err_bytes(const struct request *rq) 2676 { 2677 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; 2678 unsigned int bytes = 0; 2679 struct bio *bio; 2680 2681 if (!(rq->rq_flags & RQF_MIXED_MERGE)) 2682 return blk_rq_bytes(rq); 2683 2684 /* 2685 * Currently the only 'mixing' which can happen is between 2686 * different fastfail types. We can safely fail portions 2687 * which have all the failfast bits that the first one has - 2688 * the ones which are at least as eager to fail as the first 2689 * one. 2690 */ 2691 for (bio = rq->bio; bio; bio = bio->bi_next) { 2692 if ((bio->bi_opf & ff) != ff) 2693 break; 2694 bytes += bio->bi_iter.bi_size; 2695 } 2696 2697 /* this could lead to infinite loop */ 2698 BUG_ON(blk_rq_bytes(rq) && !bytes); 2699 return bytes; 2700 } 2701 EXPORT_SYMBOL_GPL(blk_rq_err_bytes); 2702 2703 void blk_account_io_completion(struct request *req, unsigned int bytes) 2704 { 2705 if (blk_do_io_stat(req)) { 2706 const int rw = rq_data_dir(req); 2707 struct hd_struct *part; 2708 int cpu; 2709 2710 cpu = part_stat_lock(); 2711 part = req->part; 2712 part_stat_add(cpu, part, sectors[rw], bytes >> 9); 2713 part_stat_unlock(); 2714 } 2715 } 2716 2717 void blk_account_io_done(struct request *req, u64 now) 2718 { 2719 /* 2720 * Account IO completion. flush_rq isn't accounted as a 2721 * normal IO on queueing nor completion. Accounting the 2722 * containing request is enough. 2723 */ 2724 if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) { 2725 unsigned long duration; 2726 const int rw = rq_data_dir(req); 2727 struct hd_struct *part; 2728 int cpu; 2729 2730 duration = nsecs_to_jiffies(now - req->start_time_ns); 2731 cpu = part_stat_lock(); 2732 part = req->part; 2733 2734 part_stat_inc(cpu, part, ios[rw]); 2735 part_stat_add(cpu, part, ticks[rw], duration); 2736 part_round_stats(req->q, cpu, part); 2737 part_dec_in_flight(req->q, part, rw); 2738 2739 hd_struct_put(part); 2740 part_stat_unlock(); 2741 } 2742 } 2743 2744 #ifdef CONFIG_PM 2745 /* 2746 * Don't process normal requests when queue is suspended 2747 * or in the process of suspending/resuming 2748 */ 2749 static bool blk_pm_allow_request(struct request *rq) 2750 { 2751 switch (rq->q->rpm_status) { 2752 case RPM_RESUMING: 2753 case RPM_SUSPENDING: 2754 return rq->rq_flags & RQF_PM; 2755 case RPM_SUSPENDED: 2756 return false; 2757 } 2758 2759 return true; 2760 } 2761 #else 2762 static bool blk_pm_allow_request(struct request *rq) 2763 { 2764 return true; 2765 } 2766 #endif 2767 2768 void blk_account_io_start(struct request *rq, bool new_io) 2769 { 2770 struct hd_struct *part; 2771 int rw = rq_data_dir(rq); 2772 int cpu; 2773 2774 if (!blk_do_io_stat(rq)) 2775 return; 2776 2777 cpu = part_stat_lock(); 2778 2779 if (!new_io) { 2780 part = rq->part; 2781 part_stat_inc(cpu, part, merges[rw]); 2782 } else { 2783 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); 2784 if (!hd_struct_try_get(part)) { 2785 /* 2786 * The partition is already being removed, 2787 * the request will be accounted on the disk only 2788 * 2789 * We take a reference on disk->part0 although that 2790 * partition will never be deleted, so we can treat 2791 * it as any other partition. 2792 */ 2793 part = &rq->rq_disk->part0; 2794 hd_struct_get(part); 2795 } 2796 part_round_stats(rq->q, cpu, part); 2797 part_inc_in_flight(rq->q, part, rw); 2798 rq->part = part; 2799 } 2800 2801 part_stat_unlock(); 2802 } 2803 2804 static struct request *elv_next_request(struct request_queue *q) 2805 { 2806 struct request *rq; 2807 struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); 2808 2809 WARN_ON_ONCE(q->mq_ops); 2810 2811 while (1) { 2812 list_for_each_entry(rq, &q->queue_head, queuelist) { 2813 if (blk_pm_allow_request(rq)) 2814 return rq; 2815 2816 if (rq->rq_flags & RQF_SOFTBARRIER) 2817 break; 2818 } 2819 2820 /* 2821 * Flush request is running and flush request isn't queueable 2822 * in the drive, we can hold the queue till flush request is 2823 * finished. Even we don't do this, driver can't dispatch next 2824 * requests and will requeue them. And this can improve 2825 * throughput too. For example, we have request flush1, write1, 2826 * flush 2. flush1 is dispatched, then queue is hold, write1 2827 * isn't inserted to queue. After flush1 is finished, flush2 2828 * will be dispatched. Since disk cache is already clean, 2829 * flush2 will be finished very soon, so looks like flush2 is 2830 * folded to flush1. 2831 * Since the queue is hold, a flag is set to indicate the queue 2832 * should be restarted later. Please see flush_end_io() for 2833 * details. 2834 */ 2835 if (fq->flush_pending_idx != fq->flush_running_idx && 2836 !queue_flush_queueable(q)) { 2837 fq->flush_queue_delayed = 1; 2838 return NULL; 2839 } 2840 if (unlikely(blk_queue_bypass(q)) || 2841 !q->elevator->type->ops.sq.elevator_dispatch_fn(q, 0)) 2842 return NULL; 2843 } 2844 } 2845 2846 /** 2847 * blk_peek_request - peek at the top of a request queue 2848 * @q: request queue to peek at 2849 * 2850 * Description: 2851 * Return the request at the top of @q. The returned request 2852 * should be started using blk_start_request() before LLD starts 2853 * processing it. 2854 * 2855 * Return: 2856 * Pointer to the request at the top of @q if available. Null 2857 * otherwise. 2858 */ 2859 struct request *blk_peek_request(struct request_queue *q) 2860 { 2861 struct request *rq; 2862 int ret; 2863 2864 lockdep_assert_held(q->queue_lock); 2865 WARN_ON_ONCE(q->mq_ops); 2866 2867 while ((rq = elv_next_request(q)) != NULL) { 2868 if (!(rq->rq_flags & RQF_STARTED)) { 2869 /* 2870 * This is the first time the device driver 2871 * sees this request (possibly after 2872 * requeueing). Notify IO scheduler. 2873 */ 2874 if (rq->rq_flags & RQF_SORTED) 2875 elv_activate_rq(q, rq); 2876 2877 /* 2878 * just mark as started even if we don't start 2879 * it, a request that has been delayed should 2880 * not be passed by new incoming requests 2881 */ 2882 rq->rq_flags |= RQF_STARTED; 2883 trace_block_rq_issue(q, rq); 2884 } 2885 2886 if (!q->boundary_rq || q->boundary_rq == rq) { 2887 q->end_sector = rq_end_sector(rq); 2888 q->boundary_rq = NULL; 2889 } 2890 2891 if (rq->rq_flags & RQF_DONTPREP) 2892 break; 2893 2894 if (q->dma_drain_size && blk_rq_bytes(rq)) { 2895 /* 2896 * make sure space for the drain appears we 2897 * know we can do this because max_hw_segments 2898 * has been adjusted to be one fewer than the 2899 * device can handle 2900 */ 2901 rq->nr_phys_segments++; 2902 } 2903 2904 if (!q->prep_rq_fn) 2905 break; 2906 2907 ret = q->prep_rq_fn(q, rq); 2908 if (ret == BLKPREP_OK) { 2909 break; 2910 } else if (ret == BLKPREP_DEFER) { 2911 /* 2912 * the request may have been (partially) prepped. 2913 * we need to keep this request in the front to 2914 * avoid resource deadlock. RQF_STARTED will 2915 * prevent other fs requests from passing this one. 2916 */ 2917 if (q->dma_drain_size && blk_rq_bytes(rq) && 2918 !(rq->rq_flags & RQF_DONTPREP)) { 2919 /* 2920 * remove the space for the drain we added 2921 * so that we don't add it again 2922 */ 2923 --rq->nr_phys_segments; 2924 } 2925 2926 rq = NULL; 2927 break; 2928 } else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) { 2929 rq->rq_flags |= RQF_QUIET; 2930 /* 2931 * Mark this request as started so we don't trigger 2932 * any debug logic in the end I/O path. 2933 */ 2934 blk_start_request(rq); 2935 __blk_end_request_all(rq, ret == BLKPREP_INVALID ? 2936 BLK_STS_TARGET : BLK_STS_IOERR); 2937 } else { 2938 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); 2939 break; 2940 } 2941 } 2942 2943 return rq; 2944 } 2945 EXPORT_SYMBOL(blk_peek_request); 2946 2947 static void blk_dequeue_request(struct request *rq) 2948 { 2949 struct request_queue *q = rq->q; 2950 2951 BUG_ON(list_empty(&rq->queuelist)); 2952 BUG_ON(ELV_ON_HASH(rq)); 2953 2954 list_del_init(&rq->queuelist); 2955 2956 /* 2957 * the time frame between a request being removed from the lists 2958 * and to it is freed is accounted as io that is in progress at 2959 * the driver side. 2960 */ 2961 if (blk_account_rq(rq)) 2962 q->in_flight[rq_is_sync(rq)]++; 2963 } 2964 2965 /** 2966 * blk_start_request - start request processing on the driver 2967 * @req: request to dequeue 2968 * 2969 * Description: 2970 * Dequeue @req and start timeout timer on it. This hands off the 2971 * request to the driver. 2972 */ 2973 void blk_start_request(struct request *req) 2974 { 2975 lockdep_assert_held(req->q->queue_lock); 2976 WARN_ON_ONCE(req->q->mq_ops); 2977 2978 blk_dequeue_request(req); 2979 2980 if (test_bit(QUEUE_FLAG_STATS, &req->q->queue_flags)) { 2981 req->io_start_time_ns = ktime_get_ns(); 2982 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 2983 req->throtl_size = blk_rq_sectors(req); 2984 #endif 2985 req->rq_flags |= RQF_STATS; 2986 wbt_issue(req->q->rq_wb, req); 2987 } 2988 2989 BUG_ON(blk_rq_is_complete(req)); 2990 blk_add_timer(req); 2991 } 2992 EXPORT_SYMBOL(blk_start_request); 2993 2994 /** 2995 * blk_fetch_request - fetch a request from a request queue 2996 * @q: request queue to fetch a request from 2997 * 2998 * Description: 2999 * Return the request at the top of @q. The request is started on 3000 * return and LLD can start processing it immediately. 3001 * 3002 * Return: 3003 * Pointer to the request at the top of @q if available. Null 3004 * otherwise. 3005 */ 3006 struct request *blk_fetch_request(struct request_queue *q) 3007 { 3008 struct request *rq; 3009 3010 lockdep_assert_held(q->queue_lock); 3011 WARN_ON_ONCE(q->mq_ops); 3012 3013 rq = blk_peek_request(q); 3014 if (rq) 3015 blk_start_request(rq); 3016 return rq; 3017 } 3018 EXPORT_SYMBOL(blk_fetch_request); 3019 3020 /* 3021 * Steal bios from a request and add them to a bio list. 3022 * The request must not have been partially completed before. 3023 */ 3024 void blk_steal_bios(struct bio_list *list, struct request *rq) 3025 { 3026 if (rq->bio) { 3027 if (list->tail) 3028 list->tail->bi_next = rq->bio; 3029 else 3030 list->head = rq->bio; 3031 list->tail = rq->biotail; 3032 3033 rq->bio = NULL; 3034 rq->biotail = NULL; 3035 } 3036 3037 rq->__data_len = 0; 3038 } 3039 EXPORT_SYMBOL_GPL(blk_steal_bios); 3040 3041 /** 3042 * blk_update_request - Special helper function for request stacking drivers 3043 * @req: the request being processed 3044 * @error: block status code 3045 * @nr_bytes: number of bytes to complete @req 3046 * 3047 * Description: 3048 * Ends I/O on a number of bytes attached to @req, but doesn't complete 3049 * the request structure even if @req doesn't have leftover. 3050 * If @req has leftover, sets it up for the next range of segments. 3051 * 3052 * This special helper function is only for request stacking drivers 3053 * (e.g. request-based dm) so that they can handle partial completion. 3054 * Actual device drivers should use blk_end_request instead. 3055 * 3056 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees 3057 * %false return from this function. 3058 * 3059 * Return: 3060 * %false - this request doesn't have any more data 3061 * %true - this request has more data 3062 **/ 3063 bool blk_update_request(struct request *req, blk_status_t error, 3064 unsigned int nr_bytes) 3065 { 3066 int total_bytes; 3067 3068 trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes); 3069 3070 if (!req->bio) 3071 return false; 3072 3073 if (unlikely(error && !blk_rq_is_passthrough(req) && 3074 !(req->rq_flags & RQF_QUIET))) 3075 print_req_error(req, error); 3076 3077 blk_account_io_completion(req, nr_bytes); 3078 3079 total_bytes = 0; 3080 while (req->bio) { 3081 struct bio *bio = req->bio; 3082 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes); 3083 3084 if (bio_bytes == bio->bi_iter.bi_size) { 3085 req->bio = bio->bi_next; 3086 bio->bi_next = NULL; 3087 } 3088 3089 /* Completion has already been traced */ 3090 bio_clear_flag(bio, BIO_TRACE_COMPLETION); 3091 req_bio_endio(req, bio, bio_bytes, error); 3092 3093 total_bytes += bio_bytes; 3094 nr_bytes -= bio_bytes; 3095 3096 if (!nr_bytes) 3097 break; 3098 } 3099 3100 /* 3101 * completely done 3102 */ 3103 if (!req->bio) { 3104 /* 3105 * Reset counters so that the request stacking driver 3106 * can find how many bytes remain in the request 3107 * later. 3108 */ 3109 req->__data_len = 0; 3110 return false; 3111 } 3112 3113 req->__data_len -= total_bytes; 3114 3115 /* update sector only for requests with clear definition of sector */ 3116 if (!blk_rq_is_passthrough(req)) 3117 req->__sector += total_bytes >> 9; 3118 3119 /* mixed attributes always follow the first bio */ 3120 if (req->rq_flags & RQF_MIXED_MERGE) { 3121 req->cmd_flags &= ~REQ_FAILFAST_MASK; 3122 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK; 3123 } 3124 3125 if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) { 3126 /* 3127 * If total number of sectors is less than the first segment 3128 * size, something has gone terribly wrong. 3129 */ 3130 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { 3131 blk_dump_rq_flags(req, "request botched"); 3132 req->__data_len = blk_rq_cur_bytes(req); 3133 } 3134 3135 /* recalculate the number of segments */ 3136 blk_recalc_rq_segments(req); 3137 } 3138 3139 return true; 3140 } 3141 EXPORT_SYMBOL_GPL(blk_update_request); 3142 3143 static bool blk_update_bidi_request(struct request *rq, blk_status_t error, 3144 unsigned int nr_bytes, 3145 unsigned int bidi_bytes) 3146 { 3147 if (blk_update_request(rq, error, nr_bytes)) 3148 return true; 3149 3150 /* Bidi request must be completed as a whole */ 3151 if (unlikely(blk_bidi_rq(rq)) && 3152 blk_update_request(rq->next_rq, error, bidi_bytes)) 3153 return true; 3154 3155 if (blk_queue_add_random(rq->q)) 3156 add_disk_randomness(rq->rq_disk); 3157 3158 return false; 3159 } 3160 3161 /** 3162 * blk_unprep_request - unprepare a request 3163 * @req: the request 3164 * 3165 * This function makes a request ready for complete resubmission (or 3166 * completion). It happens only after all error handling is complete, 3167 * so represents the appropriate moment to deallocate any resources 3168 * that were allocated to the request in the prep_rq_fn. The queue 3169 * lock is held when calling this. 3170 */ 3171 void blk_unprep_request(struct request *req) 3172 { 3173 struct request_queue *q = req->q; 3174 3175 req->rq_flags &= ~RQF_DONTPREP; 3176 if (q->unprep_rq_fn) 3177 q->unprep_rq_fn(q, req); 3178 } 3179 EXPORT_SYMBOL_GPL(blk_unprep_request); 3180 3181 void blk_finish_request(struct request *req, blk_status_t error) 3182 { 3183 struct request_queue *q = req->q; 3184 u64 now = ktime_get_ns(); 3185 3186 lockdep_assert_held(req->q->queue_lock); 3187 WARN_ON_ONCE(q->mq_ops); 3188 3189 if (req->rq_flags & RQF_STATS) 3190 blk_stat_add(req, now); 3191 3192 if (req->rq_flags & RQF_QUEUED) 3193 blk_queue_end_tag(q, req); 3194 3195 BUG_ON(blk_queued_rq(req)); 3196 3197 if (unlikely(laptop_mode) && !blk_rq_is_passthrough(req)) 3198 laptop_io_completion(req->q->backing_dev_info); 3199 3200 blk_delete_timer(req); 3201 3202 if (req->rq_flags & RQF_DONTPREP) 3203 blk_unprep_request(req); 3204 3205 blk_account_io_done(req, now); 3206 3207 if (req->end_io) { 3208 wbt_done(req->q->rq_wb, req); 3209 req->end_io(req, error); 3210 } else { 3211 if (blk_bidi_rq(req)) 3212 __blk_put_request(req->next_rq->q, req->next_rq); 3213 3214 __blk_put_request(q, req); 3215 } 3216 } 3217 EXPORT_SYMBOL(blk_finish_request); 3218 3219 /** 3220 * blk_end_bidi_request - Complete a bidi request 3221 * @rq: the request to complete 3222 * @error: block status code 3223 * @nr_bytes: number of bytes to complete @rq 3224 * @bidi_bytes: number of bytes to complete @rq->next_rq 3225 * 3226 * Description: 3227 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. 3228 * Drivers that supports bidi can safely call this member for any 3229 * type of request, bidi or uni. In the later case @bidi_bytes is 3230 * just ignored. 3231 * 3232 * Return: 3233 * %false - we are done with this request 3234 * %true - still buffers pending for this request 3235 **/ 3236 static bool blk_end_bidi_request(struct request *rq, blk_status_t error, 3237 unsigned int nr_bytes, unsigned int bidi_bytes) 3238 { 3239 struct request_queue *q = rq->q; 3240 unsigned long flags; 3241 3242 WARN_ON_ONCE(q->mq_ops); 3243 3244 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) 3245 return true; 3246 3247 spin_lock_irqsave(q->queue_lock, flags); 3248 blk_finish_request(rq, error); 3249 spin_unlock_irqrestore(q->queue_lock, flags); 3250 3251 return false; 3252 } 3253 3254 /** 3255 * __blk_end_bidi_request - Complete a bidi request with queue lock held 3256 * @rq: the request to complete 3257 * @error: block status code 3258 * @nr_bytes: number of bytes to complete @rq 3259 * @bidi_bytes: number of bytes to complete @rq->next_rq 3260 * 3261 * Description: 3262 * Identical to blk_end_bidi_request() except that queue lock is 3263 * assumed to be locked on entry and remains so on return. 3264 * 3265 * Return: 3266 * %false - we are done with this request 3267 * %true - still buffers pending for this request 3268 **/ 3269 static bool __blk_end_bidi_request(struct request *rq, blk_status_t error, 3270 unsigned int nr_bytes, unsigned int bidi_bytes) 3271 { 3272 lockdep_assert_held(rq->q->queue_lock); 3273 WARN_ON_ONCE(rq->q->mq_ops); 3274 3275 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) 3276 return true; 3277 3278 blk_finish_request(rq, error); 3279 3280 return false; 3281 } 3282 3283 /** 3284 * blk_end_request - Helper function for drivers to complete the request. 3285 * @rq: the request being processed 3286 * @error: block status code 3287 * @nr_bytes: number of bytes to complete 3288 * 3289 * Description: 3290 * Ends I/O on a number of bytes attached to @rq. 3291 * If @rq has leftover, sets it up for the next range of segments. 3292 * 3293 * Return: 3294 * %false - we are done with this request 3295 * %true - still buffers pending for this request 3296 **/ 3297 bool blk_end_request(struct request *rq, blk_status_t error, 3298 unsigned int nr_bytes) 3299 { 3300 WARN_ON_ONCE(rq->q->mq_ops); 3301 return blk_end_bidi_request(rq, error, nr_bytes, 0); 3302 } 3303 EXPORT_SYMBOL(blk_end_request); 3304 3305 /** 3306 * blk_end_request_all - Helper function for drives to finish the request. 3307 * @rq: the request to finish 3308 * @error: block status code 3309 * 3310 * Description: 3311 * Completely finish @rq. 3312 */ 3313 void blk_end_request_all(struct request *rq, blk_status_t error) 3314 { 3315 bool pending; 3316 unsigned int bidi_bytes = 0; 3317 3318 if (unlikely(blk_bidi_rq(rq))) 3319 bidi_bytes = blk_rq_bytes(rq->next_rq); 3320 3321 pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); 3322 BUG_ON(pending); 3323 } 3324 EXPORT_SYMBOL(blk_end_request_all); 3325 3326 /** 3327 * __blk_end_request - Helper function for drivers to complete the request. 3328 * @rq: the request being processed 3329 * @error: block status code 3330 * @nr_bytes: number of bytes to complete 3331 * 3332 * Description: 3333 * Must be called with queue lock held unlike blk_end_request(). 3334 * 3335 * Return: 3336 * %false - we are done with this request 3337 * %true - still buffers pending for this request 3338 **/ 3339 bool __blk_end_request(struct request *rq, blk_status_t error, 3340 unsigned int nr_bytes) 3341 { 3342 lockdep_assert_held(rq->q->queue_lock); 3343 WARN_ON_ONCE(rq->q->mq_ops); 3344 3345 return __blk_end_bidi_request(rq, error, nr_bytes, 0); 3346 } 3347 EXPORT_SYMBOL(__blk_end_request); 3348 3349 /** 3350 * __blk_end_request_all - Helper function for drives to finish the request. 3351 * @rq: the request to finish 3352 * @error: block status code 3353 * 3354 * Description: 3355 * Completely finish @rq. Must be called with queue lock held. 3356 */ 3357 void __blk_end_request_all(struct request *rq, blk_status_t error) 3358 { 3359 bool pending; 3360 unsigned int bidi_bytes = 0; 3361 3362 lockdep_assert_held(rq->q->queue_lock); 3363 WARN_ON_ONCE(rq->q->mq_ops); 3364 3365 if (unlikely(blk_bidi_rq(rq))) 3366 bidi_bytes = blk_rq_bytes(rq->next_rq); 3367 3368 pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); 3369 BUG_ON(pending); 3370 } 3371 EXPORT_SYMBOL(__blk_end_request_all); 3372 3373 /** 3374 * __blk_end_request_cur - Helper function to finish the current request chunk. 3375 * @rq: the request to finish the current chunk for 3376 * @error: block status code 3377 * 3378 * Description: 3379 * Complete the current consecutively mapped chunk from @rq. Must 3380 * be called with queue lock held. 3381 * 3382 * Return: 3383 * %false - we are done with this request 3384 * %true - still buffers pending for this request 3385 */ 3386 bool __blk_end_request_cur(struct request *rq, blk_status_t error) 3387 { 3388 return __blk_end_request(rq, error, blk_rq_cur_bytes(rq)); 3389 } 3390 EXPORT_SYMBOL(__blk_end_request_cur); 3391 3392 void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 3393 struct bio *bio) 3394 { 3395 if (bio_has_data(bio)) 3396 rq->nr_phys_segments = bio_phys_segments(q, bio); 3397 else if (bio_op(bio) == REQ_OP_DISCARD) 3398 rq->nr_phys_segments = 1; 3399 3400 rq->__data_len = bio->bi_iter.bi_size; 3401 rq->bio = rq->biotail = bio; 3402 3403 if (bio->bi_disk) 3404 rq->rq_disk = bio->bi_disk; 3405 } 3406 3407 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 3408 /** 3409 * rq_flush_dcache_pages - Helper function to flush all pages in a request 3410 * @rq: the request to be flushed 3411 * 3412 * Description: 3413 * Flush all pages in @rq. 3414 */ 3415 void rq_flush_dcache_pages(struct request *rq) 3416 { 3417 struct req_iterator iter; 3418 struct bio_vec bvec; 3419 3420 rq_for_each_segment(bvec, rq, iter) 3421 flush_dcache_page(bvec.bv_page); 3422 } 3423 EXPORT_SYMBOL_GPL(rq_flush_dcache_pages); 3424 #endif 3425 3426 /** 3427 * blk_lld_busy - Check if underlying low-level drivers of a device are busy 3428 * @q : the queue of the device being checked 3429 * 3430 * Description: 3431 * Check if underlying low-level drivers of a device are busy. 3432 * If the drivers want to export their busy state, they must set own 3433 * exporting function using blk_queue_lld_busy() first. 3434 * 3435 * Basically, this function is used only by request stacking drivers 3436 * to stop dispatching requests to underlying devices when underlying 3437 * devices are busy. This behavior helps more I/O merging on the queue 3438 * of the request stacking driver and prevents I/O throughput regression 3439 * on burst I/O load. 3440 * 3441 * Return: 3442 * 0 - Not busy (The request stacking driver should dispatch request) 3443 * 1 - Busy (The request stacking driver should stop dispatching request) 3444 */ 3445 int blk_lld_busy(struct request_queue *q) 3446 { 3447 if (q->lld_busy_fn) 3448 return q->lld_busy_fn(q); 3449 3450 return 0; 3451 } 3452 EXPORT_SYMBOL_GPL(blk_lld_busy); 3453 3454 /** 3455 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request 3456 * @rq: the clone request to be cleaned up 3457 * 3458 * Description: 3459 * Free all bios in @rq for a cloned request. 3460 */ 3461 void blk_rq_unprep_clone(struct request *rq) 3462 { 3463 struct bio *bio; 3464 3465 while ((bio = rq->bio) != NULL) { 3466 rq->bio = bio->bi_next; 3467 3468 bio_put(bio); 3469 } 3470 } 3471 EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); 3472 3473 /* 3474 * Copy attributes of the original request to the clone request. 3475 * The actual data parts (e.g. ->cmd, ->sense) are not copied. 3476 */ 3477 static void __blk_rq_prep_clone(struct request *dst, struct request *src) 3478 { 3479 dst->cpu = src->cpu; 3480 dst->__sector = blk_rq_pos(src); 3481 dst->__data_len = blk_rq_bytes(src); 3482 dst->nr_phys_segments = src->nr_phys_segments; 3483 dst->ioprio = src->ioprio; 3484 dst->extra_len = src->extra_len; 3485 } 3486 3487 /** 3488 * blk_rq_prep_clone - Helper function to setup clone request 3489 * @rq: the request to be setup 3490 * @rq_src: original request to be cloned 3491 * @bs: bio_set that bios for clone are allocated from 3492 * @gfp_mask: memory allocation mask for bio 3493 * @bio_ctr: setup function to be called for each clone bio. 3494 * Returns %0 for success, non %0 for failure. 3495 * @data: private data to be passed to @bio_ctr 3496 * 3497 * Description: 3498 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq. 3499 * The actual data parts of @rq_src (e.g. ->cmd, ->sense) 3500 * are not copied, and copying such parts is the caller's responsibility. 3501 * Also, pages which the original bios are pointing to are not copied 3502 * and the cloned bios just point same pages. 3503 * So cloned bios must be completed before original bios, which means 3504 * the caller must complete @rq before @rq_src. 3505 */ 3506 int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 3507 struct bio_set *bs, gfp_t gfp_mask, 3508 int (*bio_ctr)(struct bio *, struct bio *, void *), 3509 void *data) 3510 { 3511 struct bio *bio, *bio_src; 3512 3513 if (!bs) 3514 bs = &fs_bio_set; 3515 3516 __rq_for_each_bio(bio_src, rq_src) { 3517 bio = bio_clone_fast(bio_src, gfp_mask, bs); 3518 if (!bio) 3519 goto free_and_out; 3520 3521 if (bio_ctr && bio_ctr(bio, bio_src, data)) 3522 goto free_and_out; 3523 3524 if (rq->bio) { 3525 rq->biotail->bi_next = bio; 3526 rq->biotail = bio; 3527 } else 3528 rq->bio = rq->biotail = bio; 3529 } 3530 3531 __blk_rq_prep_clone(rq, rq_src); 3532 3533 return 0; 3534 3535 free_and_out: 3536 if (bio) 3537 bio_put(bio); 3538 blk_rq_unprep_clone(rq); 3539 3540 return -ENOMEM; 3541 } 3542 EXPORT_SYMBOL_GPL(blk_rq_prep_clone); 3543 3544 int kblockd_schedule_work(struct work_struct *work) 3545 { 3546 return queue_work(kblockd_workqueue, work); 3547 } 3548 EXPORT_SYMBOL(kblockd_schedule_work); 3549 3550 int kblockd_schedule_work_on(int cpu, struct work_struct *work) 3551 { 3552 return queue_work_on(cpu, kblockd_workqueue, work); 3553 } 3554 EXPORT_SYMBOL(kblockd_schedule_work_on); 3555 3556 int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, 3557 unsigned long delay) 3558 { 3559 return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay); 3560 } 3561 EXPORT_SYMBOL(kblockd_mod_delayed_work_on); 3562 3563 /** 3564 * blk_start_plug - initialize blk_plug and track it inside the task_struct 3565 * @plug: The &struct blk_plug that needs to be initialized 3566 * 3567 * Description: 3568 * Tracking blk_plug inside the task_struct will help with auto-flushing the 3569 * pending I/O should the task end up blocking between blk_start_plug() and 3570 * blk_finish_plug(). This is important from a performance perspective, but 3571 * also ensures that we don't deadlock. For instance, if the task is blocking 3572 * for a memory allocation, memory reclaim could end up wanting to free a 3573 * page belonging to that request that is currently residing in our private 3574 * plug. By flushing the pending I/O when the process goes to sleep, we avoid 3575 * this kind of deadlock. 3576 */ 3577 void blk_start_plug(struct blk_plug *plug) 3578 { 3579 struct task_struct *tsk = current; 3580 3581 /* 3582 * If this is a nested plug, don't actually assign it. 3583 */ 3584 if (tsk->plug) 3585 return; 3586 3587 INIT_LIST_HEAD(&plug->list); 3588 INIT_LIST_HEAD(&plug->mq_list); 3589 INIT_LIST_HEAD(&plug->cb_list); 3590 /* 3591 * Store ordering should not be needed here, since a potential 3592 * preempt will imply a full memory barrier 3593 */ 3594 tsk->plug = plug; 3595 } 3596 EXPORT_SYMBOL(blk_start_plug); 3597 3598 static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b) 3599 { 3600 struct request *rqa = container_of(a, struct request, queuelist); 3601 struct request *rqb = container_of(b, struct request, queuelist); 3602 3603 return !(rqa->q < rqb->q || 3604 (rqa->q == rqb->q && blk_rq_pos(rqa) < blk_rq_pos(rqb))); 3605 } 3606 3607 /* 3608 * If 'from_schedule' is true, then postpone the dispatch of requests 3609 * until a safe kblockd context. We due this to avoid accidental big 3610 * additional stack usage in driver dispatch, in places where the originally 3611 * plugger did not intend it. 3612 */ 3613 static void queue_unplugged(struct request_queue *q, unsigned int depth, 3614 bool from_schedule) 3615 __releases(q->queue_lock) 3616 { 3617 lockdep_assert_held(q->queue_lock); 3618 3619 trace_block_unplug(q, depth, !from_schedule); 3620 3621 if (from_schedule) 3622 blk_run_queue_async(q); 3623 else 3624 __blk_run_queue(q); 3625 spin_unlock_irq(q->queue_lock); 3626 } 3627 3628 static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) 3629 { 3630 LIST_HEAD(callbacks); 3631 3632 while (!list_empty(&plug->cb_list)) { 3633 list_splice_init(&plug->cb_list, &callbacks); 3634 3635 while (!list_empty(&callbacks)) { 3636 struct blk_plug_cb *cb = list_first_entry(&callbacks, 3637 struct blk_plug_cb, 3638 list); 3639 list_del(&cb->list); 3640 cb->callback(cb, from_schedule); 3641 } 3642 } 3643 } 3644 3645 struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data, 3646 int size) 3647 { 3648 struct blk_plug *plug = current->plug; 3649 struct blk_plug_cb *cb; 3650 3651 if (!plug) 3652 return NULL; 3653 3654 list_for_each_entry(cb, &plug->cb_list, list) 3655 if (cb->callback == unplug && cb->data == data) 3656 return cb; 3657 3658 /* Not currently on the callback list */ 3659 BUG_ON(size < sizeof(*cb)); 3660 cb = kzalloc(size, GFP_ATOMIC); 3661 if (cb) { 3662 cb->data = data; 3663 cb->callback = unplug; 3664 list_add(&cb->list, &plug->cb_list); 3665 } 3666 return cb; 3667 } 3668 EXPORT_SYMBOL(blk_check_plugged); 3669 3670 void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) 3671 { 3672 struct request_queue *q; 3673 struct request *rq; 3674 LIST_HEAD(list); 3675 unsigned int depth; 3676 3677 flush_plug_callbacks(plug, from_schedule); 3678 3679 if (!list_empty(&plug->mq_list)) 3680 blk_mq_flush_plug_list(plug, from_schedule); 3681 3682 if (list_empty(&plug->list)) 3683 return; 3684 3685 list_splice_init(&plug->list, &list); 3686 3687 list_sort(NULL, &list, plug_rq_cmp); 3688 3689 q = NULL; 3690 depth = 0; 3691 3692 while (!list_empty(&list)) { 3693 rq = list_entry_rq(list.next); 3694 list_del_init(&rq->queuelist); 3695 BUG_ON(!rq->q); 3696 if (rq->q != q) { 3697 /* 3698 * This drops the queue lock 3699 */ 3700 if (q) 3701 queue_unplugged(q, depth, from_schedule); 3702 q = rq->q; 3703 depth = 0; 3704 spin_lock_irq(q->queue_lock); 3705 } 3706 3707 /* 3708 * Short-circuit if @q is dead 3709 */ 3710 if (unlikely(blk_queue_dying(q))) { 3711 __blk_end_request_all(rq, BLK_STS_IOERR); 3712 continue; 3713 } 3714 3715 /* 3716 * rq is already accounted, so use raw insert 3717 */ 3718 if (op_is_flush(rq->cmd_flags)) 3719 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); 3720 else 3721 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); 3722 3723 depth++; 3724 } 3725 3726 /* 3727 * This drops the queue lock 3728 */ 3729 if (q) 3730 queue_unplugged(q, depth, from_schedule); 3731 } 3732 3733 void blk_finish_plug(struct blk_plug *plug) 3734 { 3735 if (plug != current->plug) 3736 return; 3737 blk_flush_plug_list(plug, false); 3738 3739 current->plug = NULL; 3740 } 3741 EXPORT_SYMBOL(blk_finish_plug); 3742 3743 #ifdef CONFIG_PM 3744 /** 3745 * blk_pm_runtime_init - Block layer runtime PM initialization routine 3746 * @q: the queue of the device 3747 * @dev: the device the queue belongs to 3748 * 3749 * Description: 3750 * Initialize runtime-PM-related fields for @q and start auto suspend for 3751 * @dev. Drivers that want to take advantage of request-based runtime PM 3752 * should call this function after @dev has been initialized, and its 3753 * request queue @q has been allocated, and runtime PM for it can not happen 3754 * yet(either due to disabled/forbidden or its usage_count > 0). In most 3755 * cases, driver should call this function before any I/O has taken place. 3756 * 3757 * This function takes care of setting up using auto suspend for the device, 3758 * the autosuspend delay is set to -1 to make runtime suspend impossible 3759 * until an updated value is either set by user or by driver. Drivers do 3760 * not need to touch other autosuspend settings. 3761 * 3762 * The block layer runtime PM is request based, so only works for drivers 3763 * that use request as their IO unit instead of those directly use bio's. 3764 */ 3765 void blk_pm_runtime_init(struct request_queue *q, struct device *dev) 3766 { 3767 /* not support for RQF_PM and ->rpm_status in blk-mq yet */ 3768 if (q->mq_ops) 3769 return; 3770 3771 q->dev = dev; 3772 q->rpm_status = RPM_ACTIVE; 3773 pm_runtime_set_autosuspend_delay(q->dev, -1); 3774 pm_runtime_use_autosuspend(q->dev); 3775 } 3776 EXPORT_SYMBOL(blk_pm_runtime_init); 3777 3778 /** 3779 * blk_pre_runtime_suspend - Pre runtime suspend check 3780 * @q: the queue of the device 3781 * 3782 * Description: 3783 * This function will check if runtime suspend is allowed for the device 3784 * by examining if there are any requests pending in the queue. If there 3785 * are requests pending, the device can not be runtime suspended; otherwise, 3786 * the queue's status will be updated to SUSPENDING and the driver can 3787 * proceed to suspend the device. 3788 * 3789 * For the not allowed case, we mark last busy for the device so that 3790 * runtime PM core will try to autosuspend it some time later. 3791 * 3792 * This function should be called near the start of the device's 3793 * runtime_suspend callback. 3794 * 3795 * Return: 3796 * 0 - OK to runtime suspend the device 3797 * -EBUSY - Device should not be runtime suspended 3798 */ 3799 int blk_pre_runtime_suspend(struct request_queue *q) 3800 { 3801 int ret = 0; 3802 3803 if (!q->dev) 3804 return ret; 3805 3806 spin_lock_irq(q->queue_lock); 3807 if (q->nr_pending) { 3808 ret = -EBUSY; 3809 pm_runtime_mark_last_busy(q->dev); 3810 } else { 3811 q->rpm_status = RPM_SUSPENDING; 3812 } 3813 spin_unlock_irq(q->queue_lock); 3814 return ret; 3815 } 3816 EXPORT_SYMBOL(blk_pre_runtime_suspend); 3817 3818 /** 3819 * blk_post_runtime_suspend - Post runtime suspend processing 3820 * @q: the queue of the device 3821 * @err: return value of the device's runtime_suspend function 3822 * 3823 * Description: 3824 * Update the queue's runtime status according to the return value of the 3825 * device's runtime suspend function and mark last busy for the device so 3826 * that PM core will try to auto suspend the device at a later time. 3827 * 3828 * This function should be called near the end of the device's 3829 * runtime_suspend callback. 3830 */ 3831 void blk_post_runtime_suspend(struct request_queue *q, int err) 3832 { 3833 if (!q->dev) 3834 return; 3835 3836 spin_lock_irq(q->queue_lock); 3837 if (!err) { 3838 q->rpm_status = RPM_SUSPENDED; 3839 } else { 3840 q->rpm_status = RPM_ACTIVE; 3841 pm_runtime_mark_last_busy(q->dev); 3842 } 3843 spin_unlock_irq(q->queue_lock); 3844 } 3845 EXPORT_SYMBOL(blk_post_runtime_suspend); 3846 3847 /** 3848 * blk_pre_runtime_resume - Pre runtime resume processing 3849 * @q: the queue of the device 3850 * 3851 * Description: 3852 * Update the queue's runtime status to RESUMING in preparation for the 3853 * runtime resume of the device. 3854 * 3855 * This function should be called near the start of the device's 3856 * runtime_resume callback. 3857 */ 3858 void blk_pre_runtime_resume(struct request_queue *q) 3859 { 3860 if (!q->dev) 3861 return; 3862 3863 spin_lock_irq(q->queue_lock); 3864 q->rpm_status = RPM_RESUMING; 3865 spin_unlock_irq(q->queue_lock); 3866 } 3867 EXPORT_SYMBOL(blk_pre_runtime_resume); 3868 3869 /** 3870 * blk_post_runtime_resume - Post runtime resume processing 3871 * @q: the queue of the device 3872 * @err: return value of the device's runtime_resume function 3873 * 3874 * Description: 3875 * Update the queue's runtime status according to the return value of the 3876 * device's runtime_resume function. If it is successfully resumed, process 3877 * the requests that are queued into the device's queue when it is resuming 3878 * and then mark last busy and initiate autosuspend for it. 3879 * 3880 * This function should be called near the end of the device's 3881 * runtime_resume callback. 3882 */ 3883 void blk_post_runtime_resume(struct request_queue *q, int err) 3884 { 3885 if (!q->dev) 3886 return; 3887 3888 spin_lock_irq(q->queue_lock); 3889 if (!err) { 3890 q->rpm_status = RPM_ACTIVE; 3891 __blk_run_queue(q); 3892 pm_runtime_mark_last_busy(q->dev); 3893 pm_request_autosuspend(q->dev); 3894 } else { 3895 q->rpm_status = RPM_SUSPENDED; 3896 } 3897 spin_unlock_irq(q->queue_lock); 3898 } 3899 EXPORT_SYMBOL(blk_post_runtime_resume); 3900 3901 /** 3902 * blk_set_runtime_active - Force runtime status of the queue to be active 3903 * @q: the queue of the device 3904 * 3905 * If the device is left runtime suspended during system suspend the resume 3906 * hook typically resumes the device and corrects runtime status 3907 * accordingly. However, that does not affect the queue runtime PM status 3908 * which is still "suspended". This prevents processing requests from the 3909 * queue. 3910 * 3911 * This function can be used in driver's resume hook to correct queue 3912 * runtime PM status and re-enable peeking requests from the queue. It 3913 * should be called before first request is added to the queue. 3914 */ 3915 void blk_set_runtime_active(struct request_queue *q) 3916 { 3917 spin_lock_irq(q->queue_lock); 3918 q->rpm_status = RPM_ACTIVE; 3919 pm_runtime_mark_last_busy(q->dev); 3920 pm_request_autosuspend(q->dev); 3921 spin_unlock_irq(q->queue_lock); 3922 } 3923 EXPORT_SYMBOL(blk_set_runtime_active); 3924 #endif 3925 3926 int __init blk_dev_init(void) 3927 { 3928 BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS)); 3929 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 * 3930 FIELD_SIZEOF(struct request, cmd_flags)); 3931 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 * 3932 FIELD_SIZEOF(struct bio, bi_opf)); 3933 3934 /* used for unplugging and affects IO latency/throughput - HIGHPRI */ 3935 kblockd_workqueue = alloc_workqueue("kblockd", 3936 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); 3937 if (!kblockd_workqueue) 3938 panic("Failed to create kblockd\n"); 3939 3940 request_cachep = kmem_cache_create("blkdev_requests", 3941 sizeof(struct request), 0, SLAB_PANIC, NULL); 3942 3943 blk_requestq_cachep = kmem_cache_create("request_queue", 3944 sizeof(struct request_queue), 0, SLAB_PANIC, NULL); 3945 3946 #ifdef CONFIG_DEBUG_FS 3947 blk_debugfs_root = debugfs_create_dir("block", NULL); 3948 #endif 3949 3950 return 0; 3951 } 3952