1 /* 2 * Copyright (C) 1991, 1992 Linus Torvalds 3 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics 4 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> 6 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> 7 * - July2000 8 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 9 */ 10 11 /* 12 * This handles all read/write requests to block devices 13 */ 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/backing-dev.h> 17 #include <linux/bio.h> 18 #include <linux/blkdev.h> 19 #include <linux/blk-mq.h> 20 #include <linux/highmem.h> 21 #include <linux/mm.h> 22 #include <linux/kernel_stat.h> 23 #include <linux/string.h> 24 #include <linux/init.h> 25 #include <linux/completion.h> 26 #include <linux/slab.h> 27 #include <linux/swap.h> 28 #include <linux/writeback.h> 29 #include <linux/task_io_accounting_ops.h> 30 #include <linux/fault-inject.h> 31 #include <linux/list_sort.h> 32 #include <linux/delay.h> 33 #include <linux/ratelimit.h> 34 #include <linux/pm_runtime.h> 35 #include <linux/blk-cgroup.h> 36 #include <linux/debugfs.h> 37 38 #define CREATE_TRACE_POINTS 39 #include <trace/events/block.h> 40 41 #include "blk.h" 42 #include "blk-mq.h" 43 #include "blk-mq-sched.h" 44 #include "blk-wbt.h" 45 46 #ifdef CONFIG_DEBUG_FS 47 struct dentry *blk_debugfs_root; 48 #endif 49 50 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); 51 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); 52 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); 53 EXPORT_TRACEPOINT_SYMBOL_GPL(block_split); 54 EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug); 55 56 DEFINE_IDA(blk_queue_ida); 57 58 /* 59 * For the allocated request tables 60 */ 61 struct kmem_cache *request_cachep; 62 63 /* 64 * For queue allocation 65 */ 66 struct kmem_cache *blk_requestq_cachep; 67 68 /* 69 * Controlling structure to kblockd 70 */ 71 static struct workqueue_struct *kblockd_workqueue; 72 73 static void blk_clear_congested(struct request_list *rl, int sync) 74 { 75 #ifdef CONFIG_CGROUP_WRITEBACK 76 clear_wb_congested(rl->blkg->wb_congested, sync); 77 #else 78 /* 79 * If !CGROUP_WRITEBACK, all blkg's map to bdi->wb and we shouldn't 80 * flip its congestion state for events on other blkcgs. 81 */ 82 if (rl == &rl->q->root_rl) 83 clear_wb_congested(rl->q->backing_dev_info->wb.congested, sync); 84 #endif 85 } 86 87 static void blk_set_congested(struct request_list *rl, int sync) 88 { 89 #ifdef CONFIG_CGROUP_WRITEBACK 90 set_wb_congested(rl->blkg->wb_congested, sync); 91 #else 92 /* see blk_clear_congested() */ 93 if (rl == &rl->q->root_rl) 94 set_wb_congested(rl->q->backing_dev_info->wb.congested, sync); 95 #endif 96 } 97 98 void blk_queue_congestion_threshold(struct request_queue *q) 99 { 100 int nr; 101 102 nr = q->nr_requests - (q->nr_requests / 8) + 1; 103 if (nr > q->nr_requests) 104 nr = q->nr_requests; 105 q->nr_congestion_on = nr; 106 107 nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1; 108 if (nr < 1) 109 nr = 1; 110 q->nr_congestion_off = nr; 111 } 112 113 void blk_rq_init(struct request_queue *q, struct request *rq) 114 { 115 memset(rq, 0, sizeof(*rq)); 116 117 INIT_LIST_HEAD(&rq->queuelist); 118 INIT_LIST_HEAD(&rq->timeout_list); 119 rq->cpu = -1; 120 rq->q = q; 121 rq->__sector = (sector_t) -1; 122 INIT_HLIST_NODE(&rq->hash); 123 RB_CLEAR_NODE(&rq->rb_node); 124 rq->tag = -1; 125 rq->internal_tag = -1; 126 rq->start_time = jiffies; 127 set_start_time_ns(rq); 128 rq->part = NULL; 129 } 130 EXPORT_SYMBOL(blk_rq_init); 131 132 static const struct { 133 int errno; 134 const char *name; 135 } blk_errors[] = { 136 [BLK_STS_OK] = { 0, "" }, 137 [BLK_STS_NOTSUPP] = { -EOPNOTSUPP, "operation not supported" }, 138 [BLK_STS_TIMEOUT] = { -ETIMEDOUT, "timeout" }, 139 [BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" }, 140 [BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" }, 141 [BLK_STS_TARGET] = { -EREMOTEIO, "critical target" }, 142 [BLK_STS_NEXUS] = { -EBADE, "critical nexus" }, 143 [BLK_STS_MEDIUM] = { -ENODATA, "critical medium" }, 144 [BLK_STS_PROTECTION] = { -EILSEQ, "protection" }, 145 [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" }, 146 [BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" }, 147 148 /* device mapper special case, should not leak out: */ 149 [BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" }, 150 151 /* everything else not covered above: */ 152 [BLK_STS_IOERR] = { -EIO, "I/O" }, 153 }; 154 155 blk_status_t errno_to_blk_status(int errno) 156 { 157 int i; 158 159 for (i = 0; i < ARRAY_SIZE(blk_errors); i++) { 160 if (blk_errors[i].errno == errno) 161 return (__force blk_status_t)i; 162 } 163 164 return BLK_STS_IOERR; 165 } 166 EXPORT_SYMBOL_GPL(errno_to_blk_status); 167 168 int blk_status_to_errno(blk_status_t status) 169 { 170 int idx = (__force int)status; 171 172 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors))) 173 return -EIO; 174 return blk_errors[idx].errno; 175 } 176 EXPORT_SYMBOL_GPL(blk_status_to_errno); 177 178 static void print_req_error(struct request *req, blk_status_t status) 179 { 180 int idx = (__force int)status; 181 182 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors))) 183 return; 184 185 printk_ratelimited(KERN_ERR "%s: %s error, dev %s, sector %llu\n", 186 __func__, blk_errors[idx].name, req->rq_disk ? 187 req->rq_disk->disk_name : "?", 188 (unsigned long long)blk_rq_pos(req)); 189 } 190 191 static void req_bio_endio(struct request *rq, struct bio *bio, 192 unsigned int nbytes, blk_status_t error) 193 { 194 if (error) 195 bio->bi_status = error; 196 197 if (unlikely(rq->rq_flags & RQF_QUIET)) 198 bio_set_flag(bio, BIO_QUIET); 199 200 bio_advance(bio, nbytes); 201 202 /* don't actually finish bio if it's part of flush sequence */ 203 if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ)) 204 bio_endio(bio); 205 } 206 207 void blk_dump_rq_flags(struct request *rq, char *msg) 208 { 209 printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg, 210 rq->rq_disk ? rq->rq_disk->disk_name : "?", 211 (unsigned long long) rq->cmd_flags); 212 213 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", 214 (unsigned long long)blk_rq_pos(rq), 215 blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); 216 printk(KERN_INFO " bio %p, biotail %p, len %u\n", 217 rq->bio, rq->biotail, blk_rq_bytes(rq)); 218 } 219 EXPORT_SYMBOL(blk_dump_rq_flags); 220 221 static void blk_delay_work(struct work_struct *work) 222 { 223 struct request_queue *q; 224 225 q = container_of(work, struct request_queue, delay_work.work); 226 spin_lock_irq(q->queue_lock); 227 __blk_run_queue(q); 228 spin_unlock_irq(q->queue_lock); 229 } 230 231 /** 232 * blk_delay_queue - restart queueing after defined interval 233 * @q: The &struct request_queue in question 234 * @msecs: Delay in msecs 235 * 236 * Description: 237 * Sometimes queueing needs to be postponed for a little while, to allow 238 * resources to come back. This function will make sure that queueing is 239 * restarted around the specified time. 240 */ 241 void blk_delay_queue(struct request_queue *q, unsigned long msecs) 242 { 243 lockdep_assert_held(q->queue_lock); 244 WARN_ON_ONCE(q->mq_ops); 245 246 if (likely(!blk_queue_dead(q))) 247 queue_delayed_work(kblockd_workqueue, &q->delay_work, 248 msecs_to_jiffies(msecs)); 249 } 250 EXPORT_SYMBOL(blk_delay_queue); 251 252 /** 253 * blk_start_queue_async - asynchronously restart a previously stopped queue 254 * @q: The &struct request_queue in question 255 * 256 * Description: 257 * blk_start_queue_async() will clear the stop flag on the queue, and 258 * ensure that the request_fn for the queue is run from an async 259 * context. 260 **/ 261 void blk_start_queue_async(struct request_queue *q) 262 { 263 lockdep_assert_held(q->queue_lock); 264 WARN_ON_ONCE(q->mq_ops); 265 266 queue_flag_clear(QUEUE_FLAG_STOPPED, q); 267 blk_run_queue_async(q); 268 } 269 EXPORT_SYMBOL(blk_start_queue_async); 270 271 /** 272 * blk_start_queue - restart a previously stopped queue 273 * @q: The &struct request_queue in question 274 * 275 * Description: 276 * blk_start_queue() will clear the stop flag on the queue, and call 277 * the request_fn for the queue if it was in a stopped state when 278 * entered. Also see blk_stop_queue(). 279 **/ 280 void blk_start_queue(struct request_queue *q) 281 { 282 lockdep_assert_held(q->queue_lock); 283 WARN_ON(!in_interrupt() && !irqs_disabled()); 284 WARN_ON_ONCE(q->mq_ops); 285 286 queue_flag_clear(QUEUE_FLAG_STOPPED, q); 287 __blk_run_queue(q); 288 } 289 EXPORT_SYMBOL(blk_start_queue); 290 291 /** 292 * blk_stop_queue - stop a queue 293 * @q: The &struct request_queue in question 294 * 295 * Description: 296 * The Linux block layer assumes that a block driver will consume all 297 * entries on the request queue when the request_fn strategy is called. 298 * Often this will not happen, because of hardware limitations (queue 299 * depth settings). If a device driver gets a 'queue full' response, 300 * or if it simply chooses not to queue more I/O at one point, it can 301 * call this function to prevent the request_fn from being called until 302 * the driver has signalled it's ready to go again. This happens by calling 303 * blk_start_queue() to restart queue operations. 304 **/ 305 void blk_stop_queue(struct request_queue *q) 306 { 307 lockdep_assert_held(q->queue_lock); 308 WARN_ON_ONCE(q->mq_ops); 309 310 cancel_delayed_work(&q->delay_work); 311 queue_flag_set(QUEUE_FLAG_STOPPED, q); 312 } 313 EXPORT_SYMBOL(blk_stop_queue); 314 315 /** 316 * blk_sync_queue - cancel any pending callbacks on a queue 317 * @q: the queue 318 * 319 * Description: 320 * The block layer may perform asynchronous callback activity 321 * on a queue, such as calling the unplug function after a timeout. 322 * A block device may call blk_sync_queue to ensure that any 323 * such activity is cancelled, thus allowing it to release resources 324 * that the callbacks might use. The caller must already have made sure 325 * that its ->make_request_fn will not re-add plugging prior to calling 326 * this function. 327 * 328 * This function does not cancel any asynchronous activity arising 329 * out of elevator or throttling code. That would require elevator_exit() 330 * and blkcg_exit_queue() to be called with queue lock initialized. 331 * 332 */ 333 void blk_sync_queue(struct request_queue *q) 334 { 335 del_timer_sync(&q->timeout); 336 cancel_work_sync(&q->timeout_work); 337 338 if (q->mq_ops) { 339 struct blk_mq_hw_ctx *hctx; 340 int i; 341 342 cancel_delayed_work_sync(&q->requeue_work); 343 queue_for_each_hw_ctx(q, hctx, i) 344 cancel_delayed_work_sync(&hctx->run_work); 345 } else { 346 cancel_delayed_work_sync(&q->delay_work); 347 } 348 } 349 EXPORT_SYMBOL(blk_sync_queue); 350 351 /** 352 * blk_set_preempt_only - set QUEUE_FLAG_PREEMPT_ONLY 353 * @q: request queue pointer 354 * 355 * Returns the previous value of the PREEMPT_ONLY flag - 0 if the flag was not 356 * set and 1 if the flag was already set. 357 */ 358 int blk_set_preempt_only(struct request_queue *q) 359 { 360 unsigned long flags; 361 int res; 362 363 spin_lock_irqsave(q->queue_lock, flags); 364 res = queue_flag_test_and_set(QUEUE_FLAG_PREEMPT_ONLY, q); 365 spin_unlock_irqrestore(q->queue_lock, flags); 366 367 return res; 368 } 369 EXPORT_SYMBOL_GPL(blk_set_preempt_only); 370 371 void blk_clear_preempt_only(struct request_queue *q) 372 { 373 unsigned long flags; 374 375 spin_lock_irqsave(q->queue_lock, flags); 376 queue_flag_clear(QUEUE_FLAG_PREEMPT_ONLY, q); 377 wake_up_all(&q->mq_freeze_wq); 378 spin_unlock_irqrestore(q->queue_lock, flags); 379 } 380 EXPORT_SYMBOL_GPL(blk_clear_preempt_only); 381 382 /** 383 * __blk_run_queue_uncond - run a queue whether or not it has been stopped 384 * @q: The queue to run 385 * 386 * Description: 387 * Invoke request handling on a queue if there are any pending requests. 388 * May be used to restart request handling after a request has completed. 389 * This variant runs the queue whether or not the queue has been 390 * stopped. Must be called with the queue lock held and interrupts 391 * disabled. See also @blk_run_queue. 392 */ 393 inline void __blk_run_queue_uncond(struct request_queue *q) 394 { 395 lockdep_assert_held(q->queue_lock); 396 WARN_ON_ONCE(q->mq_ops); 397 398 if (unlikely(blk_queue_dead(q))) 399 return; 400 401 /* 402 * Some request_fn implementations, e.g. scsi_request_fn(), unlock 403 * the queue lock internally. As a result multiple threads may be 404 * running such a request function concurrently. Keep track of the 405 * number of active request_fn invocations such that blk_drain_queue() 406 * can wait until all these request_fn calls have finished. 407 */ 408 q->request_fn_active++; 409 q->request_fn(q); 410 q->request_fn_active--; 411 } 412 EXPORT_SYMBOL_GPL(__blk_run_queue_uncond); 413 414 /** 415 * __blk_run_queue - run a single device queue 416 * @q: The queue to run 417 * 418 * Description: 419 * See @blk_run_queue. 420 */ 421 void __blk_run_queue(struct request_queue *q) 422 { 423 lockdep_assert_held(q->queue_lock); 424 WARN_ON_ONCE(q->mq_ops); 425 426 if (unlikely(blk_queue_stopped(q))) 427 return; 428 429 __blk_run_queue_uncond(q); 430 } 431 EXPORT_SYMBOL(__blk_run_queue); 432 433 /** 434 * blk_run_queue_async - run a single device queue in workqueue context 435 * @q: The queue to run 436 * 437 * Description: 438 * Tells kblockd to perform the equivalent of @blk_run_queue on behalf 439 * of us. 440 * 441 * Note: 442 * Since it is not allowed to run q->delay_work after blk_cleanup_queue() 443 * has canceled q->delay_work, callers must hold the queue lock to avoid 444 * race conditions between blk_cleanup_queue() and blk_run_queue_async(). 445 */ 446 void blk_run_queue_async(struct request_queue *q) 447 { 448 lockdep_assert_held(q->queue_lock); 449 WARN_ON_ONCE(q->mq_ops); 450 451 if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q))) 452 mod_delayed_work(kblockd_workqueue, &q->delay_work, 0); 453 } 454 EXPORT_SYMBOL(blk_run_queue_async); 455 456 /** 457 * blk_run_queue - run a single device queue 458 * @q: The queue to run 459 * 460 * Description: 461 * Invoke request handling on this queue, if it has pending work to do. 462 * May be used to restart queueing when a request has completed. 463 */ 464 void blk_run_queue(struct request_queue *q) 465 { 466 unsigned long flags; 467 468 WARN_ON_ONCE(q->mq_ops); 469 470 spin_lock_irqsave(q->queue_lock, flags); 471 __blk_run_queue(q); 472 spin_unlock_irqrestore(q->queue_lock, flags); 473 } 474 EXPORT_SYMBOL(blk_run_queue); 475 476 void blk_put_queue(struct request_queue *q) 477 { 478 kobject_put(&q->kobj); 479 } 480 EXPORT_SYMBOL(blk_put_queue); 481 482 /** 483 * __blk_drain_queue - drain requests from request_queue 484 * @q: queue to drain 485 * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV 486 * 487 * Drain requests from @q. If @drain_all is set, all requests are drained. 488 * If not, only ELVPRIV requests are drained. The caller is responsible 489 * for ensuring that no new requests which need to be drained are queued. 490 */ 491 static void __blk_drain_queue(struct request_queue *q, bool drain_all) 492 __releases(q->queue_lock) 493 __acquires(q->queue_lock) 494 { 495 int i; 496 497 lockdep_assert_held(q->queue_lock); 498 WARN_ON_ONCE(q->mq_ops); 499 500 while (true) { 501 bool drain = false; 502 503 /* 504 * The caller might be trying to drain @q before its 505 * elevator is initialized. 506 */ 507 if (q->elevator) 508 elv_drain_elevator(q); 509 510 blkcg_drain_queue(q); 511 512 /* 513 * This function might be called on a queue which failed 514 * driver init after queue creation or is not yet fully 515 * active yet. Some drivers (e.g. fd and loop) get unhappy 516 * in such cases. Kick queue iff dispatch queue has 517 * something on it and @q has request_fn set. 518 */ 519 if (!list_empty(&q->queue_head) && q->request_fn) 520 __blk_run_queue(q); 521 522 drain |= q->nr_rqs_elvpriv; 523 drain |= q->request_fn_active; 524 525 /* 526 * Unfortunately, requests are queued at and tracked from 527 * multiple places and there's no single counter which can 528 * be drained. Check all the queues and counters. 529 */ 530 if (drain_all) { 531 struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); 532 drain |= !list_empty(&q->queue_head); 533 for (i = 0; i < 2; i++) { 534 drain |= q->nr_rqs[i]; 535 drain |= q->in_flight[i]; 536 if (fq) 537 drain |= !list_empty(&fq->flush_queue[i]); 538 } 539 } 540 541 if (!drain) 542 break; 543 544 spin_unlock_irq(q->queue_lock); 545 546 msleep(10); 547 548 spin_lock_irq(q->queue_lock); 549 } 550 551 /* 552 * With queue marked dead, any woken up waiter will fail the 553 * allocation path, so the wakeup chaining is lost and we're 554 * left with hung waiters. We need to wake up those waiters. 555 */ 556 if (q->request_fn) { 557 struct request_list *rl; 558 559 blk_queue_for_each_rl(rl, q) 560 for (i = 0; i < ARRAY_SIZE(rl->wait); i++) 561 wake_up_all(&rl->wait[i]); 562 } 563 } 564 565 /** 566 * blk_queue_bypass_start - enter queue bypass mode 567 * @q: queue of interest 568 * 569 * In bypass mode, only the dispatch FIFO queue of @q is used. This 570 * function makes @q enter bypass mode and drains all requests which were 571 * throttled or issued before. On return, it's guaranteed that no request 572 * is being throttled or has ELVPRIV set and blk_queue_bypass() %true 573 * inside queue or RCU read lock. 574 */ 575 void blk_queue_bypass_start(struct request_queue *q) 576 { 577 WARN_ON_ONCE(q->mq_ops); 578 579 spin_lock_irq(q->queue_lock); 580 q->bypass_depth++; 581 queue_flag_set(QUEUE_FLAG_BYPASS, q); 582 spin_unlock_irq(q->queue_lock); 583 584 /* 585 * Queues start drained. Skip actual draining till init is 586 * complete. This avoids lenghty delays during queue init which 587 * can happen many times during boot. 588 */ 589 if (blk_queue_init_done(q)) { 590 spin_lock_irq(q->queue_lock); 591 __blk_drain_queue(q, false); 592 spin_unlock_irq(q->queue_lock); 593 594 /* ensure blk_queue_bypass() is %true inside RCU read lock */ 595 synchronize_rcu(); 596 } 597 } 598 EXPORT_SYMBOL_GPL(blk_queue_bypass_start); 599 600 /** 601 * blk_queue_bypass_end - leave queue bypass mode 602 * @q: queue of interest 603 * 604 * Leave bypass mode and restore the normal queueing behavior. 605 * 606 * Note: although blk_queue_bypass_start() is only called for blk-sq queues, 607 * this function is called for both blk-sq and blk-mq queues. 608 */ 609 void blk_queue_bypass_end(struct request_queue *q) 610 { 611 spin_lock_irq(q->queue_lock); 612 if (!--q->bypass_depth) 613 queue_flag_clear(QUEUE_FLAG_BYPASS, q); 614 WARN_ON_ONCE(q->bypass_depth < 0); 615 spin_unlock_irq(q->queue_lock); 616 } 617 EXPORT_SYMBOL_GPL(blk_queue_bypass_end); 618 619 void blk_set_queue_dying(struct request_queue *q) 620 { 621 spin_lock_irq(q->queue_lock); 622 queue_flag_set(QUEUE_FLAG_DYING, q); 623 spin_unlock_irq(q->queue_lock); 624 625 /* 626 * When queue DYING flag is set, we need to block new req 627 * entering queue, so we call blk_freeze_queue_start() to 628 * prevent I/O from crossing blk_queue_enter(). 629 */ 630 blk_freeze_queue_start(q); 631 632 if (q->mq_ops) 633 blk_mq_wake_waiters(q); 634 else { 635 struct request_list *rl; 636 637 spin_lock_irq(q->queue_lock); 638 blk_queue_for_each_rl(rl, q) { 639 if (rl->rq_pool) { 640 wake_up_all(&rl->wait[BLK_RW_SYNC]); 641 wake_up_all(&rl->wait[BLK_RW_ASYNC]); 642 } 643 } 644 spin_unlock_irq(q->queue_lock); 645 } 646 647 /* Make blk_queue_enter() reexamine the DYING flag. */ 648 wake_up_all(&q->mq_freeze_wq); 649 } 650 EXPORT_SYMBOL_GPL(blk_set_queue_dying); 651 652 /** 653 * blk_cleanup_queue - shutdown a request queue 654 * @q: request queue to shutdown 655 * 656 * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and 657 * put it. All future requests will be failed immediately with -ENODEV. 658 */ 659 void blk_cleanup_queue(struct request_queue *q) 660 { 661 spinlock_t *lock = q->queue_lock; 662 663 /* mark @q DYING, no new request or merges will be allowed afterwards */ 664 mutex_lock(&q->sysfs_lock); 665 blk_set_queue_dying(q); 666 spin_lock_irq(lock); 667 668 /* 669 * A dying queue is permanently in bypass mode till released. Note 670 * that, unlike blk_queue_bypass_start(), we aren't performing 671 * synchronize_rcu() after entering bypass mode to avoid the delay 672 * as some drivers create and destroy a lot of queues while 673 * probing. This is still safe because blk_release_queue() will be 674 * called only after the queue refcnt drops to zero and nothing, 675 * RCU or not, would be traversing the queue by then. 676 */ 677 q->bypass_depth++; 678 queue_flag_set(QUEUE_FLAG_BYPASS, q); 679 680 queue_flag_set(QUEUE_FLAG_NOMERGES, q); 681 queue_flag_set(QUEUE_FLAG_NOXMERGES, q); 682 queue_flag_set(QUEUE_FLAG_DYING, q); 683 spin_unlock_irq(lock); 684 mutex_unlock(&q->sysfs_lock); 685 686 /* 687 * Drain all requests queued before DYING marking. Set DEAD flag to 688 * prevent that q->request_fn() gets invoked after draining finished. 689 */ 690 blk_freeze_queue(q); 691 spin_lock_irq(lock); 692 if (!q->mq_ops) 693 __blk_drain_queue(q, true); 694 queue_flag_set(QUEUE_FLAG_DEAD, q); 695 spin_unlock_irq(lock); 696 697 /* for synchronous bio-based driver finish in-flight integrity i/o */ 698 blk_flush_integrity(); 699 700 /* @q won't process any more request, flush async actions */ 701 del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer); 702 blk_sync_queue(q); 703 704 if (q->mq_ops) 705 blk_mq_free_queue(q); 706 percpu_ref_exit(&q->q_usage_counter); 707 708 spin_lock_irq(lock); 709 if (q->queue_lock != &q->__queue_lock) 710 q->queue_lock = &q->__queue_lock; 711 spin_unlock_irq(lock); 712 713 /* @q is and will stay empty, shutdown and put */ 714 blk_put_queue(q); 715 } 716 EXPORT_SYMBOL(blk_cleanup_queue); 717 718 /* Allocate memory local to the request queue */ 719 static void *alloc_request_simple(gfp_t gfp_mask, void *data) 720 { 721 struct request_queue *q = data; 722 723 return kmem_cache_alloc_node(request_cachep, gfp_mask, q->node); 724 } 725 726 static void free_request_simple(void *element, void *data) 727 { 728 kmem_cache_free(request_cachep, element); 729 } 730 731 static void *alloc_request_size(gfp_t gfp_mask, void *data) 732 { 733 struct request_queue *q = data; 734 struct request *rq; 735 736 rq = kmalloc_node(sizeof(struct request) + q->cmd_size, gfp_mask, 737 q->node); 738 if (rq && q->init_rq_fn && q->init_rq_fn(q, rq, gfp_mask) < 0) { 739 kfree(rq); 740 rq = NULL; 741 } 742 return rq; 743 } 744 745 static void free_request_size(void *element, void *data) 746 { 747 struct request_queue *q = data; 748 749 if (q->exit_rq_fn) 750 q->exit_rq_fn(q, element); 751 kfree(element); 752 } 753 754 int blk_init_rl(struct request_list *rl, struct request_queue *q, 755 gfp_t gfp_mask) 756 { 757 if (unlikely(rl->rq_pool) || q->mq_ops) 758 return 0; 759 760 rl->q = q; 761 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; 762 rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; 763 init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); 764 init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); 765 766 if (q->cmd_size) { 767 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, 768 alloc_request_size, free_request_size, 769 q, gfp_mask, q->node); 770 } else { 771 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, 772 alloc_request_simple, free_request_simple, 773 q, gfp_mask, q->node); 774 } 775 if (!rl->rq_pool) 776 return -ENOMEM; 777 778 if (rl != &q->root_rl) 779 WARN_ON_ONCE(!blk_get_queue(q)); 780 781 return 0; 782 } 783 784 void blk_exit_rl(struct request_queue *q, struct request_list *rl) 785 { 786 if (rl->rq_pool) { 787 mempool_destroy(rl->rq_pool); 788 if (rl != &q->root_rl) 789 blk_put_queue(q); 790 } 791 } 792 793 struct request_queue *blk_alloc_queue(gfp_t gfp_mask) 794 { 795 return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE); 796 } 797 EXPORT_SYMBOL(blk_alloc_queue); 798 799 /** 800 * blk_queue_enter() - try to increase q->q_usage_counter 801 * @q: request queue pointer 802 * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PREEMPT 803 */ 804 int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags) 805 { 806 const bool preempt = flags & BLK_MQ_REQ_PREEMPT; 807 808 while (true) { 809 bool success = false; 810 int ret; 811 812 rcu_read_lock_sched(); 813 if (percpu_ref_tryget_live(&q->q_usage_counter)) { 814 /* 815 * The code that sets the PREEMPT_ONLY flag is 816 * responsible for ensuring that that flag is globally 817 * visible before the queue is unfrozen. 818 */ 819 if (preempt || !blk_queue_preempt_only(q)) { 820 success = true; 821 } else { 822 percpu_ref_put(&q->q_usage_counter); 823 } 824 } 825 rcu_read_unlock_sched(); 826 827 if (success) 828 return 0; 829 830 if (flags & BLK_MQ_REQ_NOWAIT) 831 return -EBUSY; 832 833 /* 834 * read pair of barrier in blk_freeze_queue_start(), 835 * we need to order reading __PERCPU_REF_DEAD flag of 836 * .q_usage_counter and reading .mq_freeze_depth or 837 * queue dying flag, otherwise the following wait may 838 * never return if the two reads are reordered. 839 */ 840 smp_rmb(); 841 842 ret = wait_event_interruptible(q->mq_freeze_wq, 843 (atomic_read(&q->mq_freeze_depth) == 0 && 844 (preempt || !blk_queue_preempt_only(q))) || 845 blk_queue_dying(q)); 846 if (blk_queue_dying(q)) 847 return -ENODEV; 848 if (ret) 849 return ret; 850 } 851 } 852 853 void blk_queue_exit(struct request_queue *q) 854 { 855 percpu_ref_put(&q->q_usage_counter); 856 } 857 858 static void blk_queue_usage_counter_release(struct percpu_ref *ref) 859 { 860 struct request_queue *q = 861 container_of(ref, struct request_queue, q_usage_counter); 862 863 wake_up_all(&q->mq_freeze_wq); 864 } 865 866 static void blk_rq_timed_out_timer(struct timer_list *t) 867 { 868 struct request_queue *q = from_timer(q, t, timeout); 869 870 kblockd_schedule_work(&q->timeout_work); 871 } 872 873 struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) 874 { 875 struct request_queue *q; 876 877 q = kmem_cache_alloc_node(blk_requestq_cachep, 878 gfp_mask | __GFP_ZERO, node_id); 879 if (!q) 880 return NULL; 881 882 q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask); 883 if (q->id < 0) 884 goto fail_q; 885 886 q->bio_split = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); 887 if (!q->bio_split) 888 goto fail_id; 889 890 q->backing_dev_info = bdi_alloc_node(gfp_mask, node_id); 891 if (!q->backing_dev_info) 892 goto fail_split; 893 894 q->stats = blk_alloc_queue_stats(); 895 if (!q->stats) 896 goto fail_stats; 897 898 q->backing_dev_info->ra_pages = 899 (VM_MAX_READAHEAD * 1024) / PAGE_SIZE; 900 q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK; 901 q->backing_dev_info->name = "block"; 902 q->node = node_id; 903 904 timer_setup(&q->backing_dev_info->laptop_mode_wb_timer, 905 laptop_mode_timer_fn, 0); 906 timer_setup(&q->timeout, blk_rq_timed_out_timer, 0); 907 INIT_WORK(&q->timeout_work, NULL); 908 INIT_LIST_HEAD(&q->queue_head); 909 INIT_LIST_HEAD(&q->timeout_list); 910 INIT_LIST_HEAD(&q->icq_list); 911 #ifdef CONFIG_BLK_CGROUP 912 INIT_LIST_HEAD(&q->blkg_list); 913 #endif 914 INIT_DELAYED_WORK(&q->delay_work, blk_delay_work); 915 916 kobject_init(&q->kobj, &blk_queue_ktype); 917 918 #ifdef CONFIG_BLK_DEV_IO_TRACE 919 mutex_init(&q->blk_trace_mutex); 920 #endif 921 mutex_init(&q->sysfs_lock); 922 spin_lock_init(&q->__queue_lock); 923 924 /* 925 * By default initialize queue_lock to internal lock and driver can 926 * override it later if need be. 927 */ 928 q->queue_lock = &q->__queue_lock; 929 930 /* 931 * A queue starts its life with bypass turned on to avoid 932 * unnecessary bypass on/off overhead and nasty surprises during 933 * init. The initial bypass will be finished when the queue is 934 * registered by blk_register_queue(). 935 */ 936 q->bypass_depth = 1; 937 __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags); 938 939 init_waitqueue_head(&q->mq_freeze_wq); 940 941 /* 942 * Init percpu_ref in atomic mode so that it's faster to shutdown. 943 * See blk_register_queue() for details. 944 */ 945 if (percpu_ref_init(&q->q_usage_counter, 946 blk_queue_usage_counter_release, 947 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL)) 948 goto fail_bdi; 949 950 if (blkcg_init_queue(q)) 951 goto fail_ref; 952 953 return q; 954 955 fail_ref: 956 percpu_ref_exit(&q->q_usage_counter); 957 fail_bdi: 958 blk_free_queue_stats(q->stats); 959 fail_stats: 960 bdi_put(q->backing_dev_info); 961 fail_split: 962 bioset_free(q->bio_split); 963 fail_id: 964 ida_simple_remove(&blk_queue_ida, q->id); 965 fail_q: 966 kmem_cache_free(blk_requestq_cachep, q); 967 return NULL; 968 } 969 EXPORT_SYMBOL(blk_alloc_queue_node); 970 971 /** 972 * blk_init_queue - prepare a request queue for use with a block device 973 * @rfn: The function to be called to process requests that have been 974 * placed on the queue. 975 * @lock: Request queue spin lock 976 * 977 * Description: 978 * If a block device wishes to use the standard request handling procedures, 979 * which sorts requests and coalesces adjacent requests, then it must 980 * call blk_init_queue(). The function @rfn will be called when there 981 * are requests on the queue that need to be processed. If the device 982 * supports plugging, then @rfn may not be called immediately when requests 983 * are available on the queue, but may be called at some time later instead. 984 * Plugged queues are generally unplugged when a buffer belonging to one 985 * of the requests on the queue is needed, or due to memory pressure. 986 * 987 * @rfn is not required, or even expected, to remove all requests off the 988 * queue, but only as many as it can handle at a time. If it does leave 989 * requests on the queue, it is responsible for arranging that the requests 990 * get dealt with eventually. 991 * 992 * The queue spin lock must be held while manipulating the requests on the 993 * request queue; this lock will be taken also from interrupt context, so irq 994 * disabling is needed for it. 995 * 996 * Function returns a pointer to the initialized request queue, or %NULL if 997 * it didn't succeed. 998 * 999 * Note: 1000 * blk_init_queue() must be paired with a blk_cleanup_queue() call 1001 * when the block device is deactivated (such as at module unload). 1002 **/ 1003 1004 struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) 1005 { 1006 return blk_init_queue_node(rfn, lock, NUMA_NO_NODE); 1007 } 1008 EXPORT_SYMBOL(blk_init_queue); 1009 1010 struct request_queue * 1011 blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) 1012 { 1013 struct request_queue *q; 1014 1015 q = blk_alloc_queue_node(GFP_KERNEL, node_id); 1016 if (!q) 1017 return NULL; 1018 1019 q->request_fn = rfn; 1020 if (lock) 1021 q->queue_lock = lock; 1022 if (blk_init_allocated_queue(q) < 0) { 1023 blk_cleanup_queue(q); 1024 return NULL; 1025 } 1026 1027 return q; 1028 } 1029 EXPORT_SYMBOL(blk_init_queue_node); 1030 1031 static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio); 1032 1033 1034 int blk_init_allocated_queue(struct request_queue *q) 1035 { 1036 WARN_ON_ONCE(q->mq_ops); 1037 1038 q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size); 1039 if (!q->fq) 1040 return -ENOMEM; 1041 1042 if (q->init_rq_fn && q->init_rq_fn(q, q->fq->flush_rq, GFP_KERNEL)) 1043 goto out_free_flush_queue; 1044 1045 if (blk_init_rl(&q->root_rl, q, GFP_KERNEL)) 1046 goto out_exit_flush_rq; 1047 1048 INIT_WORK(&q->timeout_work, blk_timeout_work); 1049 q->queue_flags |= QUEUE_FLAG_DEFAULT; 1050 1051 /* 1052 * This also sets hw/phys segments, boundary and size 1053 */ 1054 blk_queue_make_request(q, blk_queue_bio); 1055 1056 q->sg_reserved_size = INT_MAX; 1057 1058 /* Protect q->elevator from elevator_change */ 1059 mutex_lock(&q->sysfs_lock); 1060 1061 /* init elevator */ 1062 if (elevator_init(q, NULL)) { 1063 mutex_unlock(&q->sysfs_lock); 1064 goto out_exit_flush_rq; 1065 } 1066 1067 mutex_unlock(&q->sysfs_lock); 1068 return 0; 1069 1070 out_exit_flush_rq: 1071 if (q->exit_rq_fn) 1072 q->exit_rq_fn(q, q->fq->flush_rq); 1073 out_free_flush_queue: 1074 blk_free_flush_queue(q->fq); 1075 return -ENOMEM; 1076 } 1077 EXPORT_SYMBOL(blk_init_allocated_queue); 1078 1079 bool blk_get_queue(struct request_queue *q) 1080 { 1081 if (likely(!blk_queue_dying(q))) { 1082 __blk_get_queue(q); 1083 return true; 1084 } 1085 1086 return false; 1087 } 1088 EXPORT_SYMBOL(blk_get_queue); 1089 1090 static inline void blk_free_request(struct request_list *rl, struct request *rq) 1091 { 1092 if (rq->rq_flags & RQF_ELVPRIV) { 1093 elv_put_request(rl->q, rq); 1094 if (rq->elv.icq) 1095 put_io_context(rq->elv.icq->ioc); 1096 } 1097 1098 mempool_free(rq, rl->rq_pool); 1099 } 1100 1101 /* 1102 * ioc_batching returns true if the ioc is a valid batching request and 1103 * should be given priority access to a request. 1104 */ 1105 static inline int ioc_batching(struct request_queue *q, struct io_context *ioc) 1106 { 1107 if (!ioc) 1108 return 0; 1109 1110 /* 1111 * Make sure the process is able to allocate at least 1 request 1112 * even if the batch times out, otherwise we could theoretically 1113 * lose wakeups. 1114 */ 1115 return ioc->nr_batch_requests == q->nr_batching || 1116 (ioc->nr_batch_requests > 0 1117 && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME)); 1118 } 1119 1120 /* 1121 * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This 1122 * will cause the process to be a "batcher" on all queues in the system. This 1123 * is the behaviour we want though - once it gets a wakeup it should be given 1124 * a nice run. 1125 */ 1126 static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) 1127 { 1128 if (!ioc || ioc_batching(q, ioc)) 1129 return; 1130 1131 ioc->nr_batch_requests = q->nr_batching; 1132 ioc->last_waited = jiffies; 1133 } 1134 1135 static void __freed_request(struct request_list *rl, int sync) 1136 { 1137 struct request_queue *q = rl->q; 1138 1139 if (rl->count[sync] < queue_congestion_off_threshold(q)) 1140 blk_clear_congested(rl, sync); 1141 1142 if (rl->count[sync] + 1 <= q->nr_requests) { 1143 if (waitqueue_active(&rl->wait[sync])) 1144 wake_up(&rl->wait[sync]); 1145 1146 blk_clear_rl_full(rl, sync); 1147 } 1148 } 1149 1150 /* 1151 * A request has just been released. Account for it, update the full and 1152 * congestion status, wake up any waiters. Called under q->queue_lock. 1153 */ 1154 static void freed_request(struct request_list *rl, bool sync, 1155 req_flags_t rq_flags) 1156 { 1157 struct request_queue *q = rl->q; 1158 1159 q->nr_rqs[sync]--; 1160 rl->count[sync]--; 1161 if (rq_flags & RQF_ELVPRIV) 1162 q->nr_rqs_elvpriv--; 1163 1164 __freed_request(rl, sync); 1165 1166 if (unlikely(rl->starved[sync ^ 1])) 1167 __freed_request(rl, sync ^ 1); 1168 } 1169 1170 int blk_update_nr_requests(struct request_queue *q, unsigned int nr) 1171 { 1172 struct request_list *rl; 1173 int on_thresh, off_thresh; 1174 1175 WARN_ON_ONCE(q->mq_ops); 1176 1177 spin_lock_irq(q->queue_lock); 1178 q->nr_requests = nr; 1179 blk_queue_congestion_threshold(q); 1180 on_thresh = queue_congestion_on_threshold(q); 1181 off_thresh = queue_congestion_off_threshold(q); 1182 1183 blk_queue_for_each_rl(rl, q) { 1184 if (rl->count[BLK_RW_SYNC] >= on_thresh) 1185 blk_set_congested(rl, BLK_RW_SYNC); 1186 else if (rl->count[BLK_RW_SYNC] < off_thresh) 1187 blk_clear_congested(rl, BLK_RW_SYNC); 1188 1189 if (rl->count[BLK_RW_ASYNC] >= on_thresh) 1190 blk_set_congested(rl, BLK_RW_ASYNC); 1191 else if (rl->count[BLK_RW_ASYNC] < off_thresh) 1192 blk_clear_congested(rl, BLK_RW_ASYNC); 1193 1194 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { 1195 blk_set_rl_full(rl, BLK_RW_SYNC); 1196 } else { 1197 blk_clear_rl_full(rl, BLK_RW_SYNC); 1198 wake_up(&rl->wait[BLK_RW_SYNC]); 1199 } 1200 1201 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { 1202 blk_set_rl_full(rl, BLK_RW_ASYNC); 1203 } else { 1204 blk_clear_rl_full(rl, BLK_RW_ASYNC); 1205 wake_up(&rl->wait[BLK_RW_ASYNC]); 1206 } 1207 } 1208 1209 spin_unlock_irq(q->queue_lock); 1210 return 0; 1211 } 1212 1213 /** 1214 * __get_request - get a free request 1215 * @rl: request list to allocate from 1216 * @op: operation and flags 1217 * @bio: bio to allocate request for (can be %NULL) 1218 * @flags: BLQ_MQ_REQ_* flags 1219 * 1220 * Get a free request from @q. This function may fail under memory 1221 * pressure or if @q is dead. 1222 * 1223 * Must be called with @q->queue_lock held and, 1224 * Returns ERR_PTR on failure, with @q->queue_lock held. 1225 * Returns request pointer on success, with @q->queue_lock *not held*. 1226 */ 1227 static struct request *__get_request(struct request_list *rl, unsigned int op, 1228 struct bio *bio, blk_mq_req_flags_t flags) 1229 { 1230 struct request_queue *q = rl->q; 1231 struct request *rq; 1232 struct elevator_type *et = q->elevator->type; 1233 struct io_context *ioc = rq_ioc(bio); 1234 struct io_cq *icq = NULL; 1235 const bool is_sync = op_is_sync(op); 1236 int may_queue; 1237 gfp_t gfp_mask = flags & BLK_MQ_REQ_NOWAIT ? GFP_ATOMIC : 1238 __GFP_DIRECT_RECLAIM; 1239 req_flags_t rq_flags = RQF_ALLOCED; 1240 1241 lockdep_assert_held(q->queue_lock); 1242 1243 if (unlikely(blk_queue_dying(q))) 1244 return ERR_PTR(-ENODEV); 1245 1246 may_queue = elv_may_queue(q, op); 1247 if (may_queue == ELV_MQUEUE_NO) 1248 goto rq_starved; 1249 1250 if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { 1251 if (rl->count[is_sync]+1 >= q->nr_requests) { 1252 /* 1253 * The queue will fill after this allocation, so set 1254 * it as full, and mark this process as "batching". 1255 * This process will be allowed to complete a batch of 1256 * requests, others will be blocked. 1257 */ 1258 if (!blk_rl_full(rl, is_sync)) { 1259 ioc_set_batching(q, ioc); 1260 blk_set_rl_full(rl, is_sync); 1261 } else { 1262 if (may_queue != ELV_MQUEUE_MUST 1263 && !ioc_batching(q, ioc)) { 1264 /* 1265 * The queue is full and the allocating 1266 * process is not a "batcher", and not 1267 * exempted by the IO scheduler 1268 */ 1269 return ERR_PTR(-ENOMEM); 1270 } 1271 } 1272 } 1273 blk_set_congested(rl, is_sync); 1274 } 1275 1276 /* 1277 * Only allow batching queuers to allocate up to 50% over the defined 1278 * limit of requests, otherwise we could have thousands of requests 1279 * allocated with any setting of ->nr_requests 1280 */ 1281 if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) 1282 return ERR_PTR(-ENOMEM); 1283 1284 q->nr_rqs[is_sync]++; 1285 rl->count[is_sync]++; 1286 rl->starved[is_sync] = 0; 1287 1288 /* 1289 * Decide whether the new request will be managed by elevator. If 1290 * so, mark @rq_flags and increment elvpriv. Non-zero elvpriv will 1291 * prevent the current elevator from being destroyed until the new 1292 * request is freed. This guarantees icq's won't be destroyed and 1293 * makes creating new ones safe. 1294 * 1295 * Flush requests do not use the elevator so skip initialization. 1296 * This allows a request to share the flush and elevator data. 1297 * 1298 * Also, lookup icq while holding queue_lock. If it doesn't exist, 1299 * it will be created after releasing queue_lock. 1300 */ 1301 if (!op_is_flush(op) && !blk_queue_bypass(q)) { 1302 rq_flags |= RQF_ELVPRIV; 1303 q->nr_rqs_elvpriv++; 1304 if (et->icq_cache && ioc) 1305 icq = ioc_lookup_icq(ioc, q); 1306 } 1307 1308 if (blk_queue_io_stat(q)) 1309 rq_flags |= RQF_IO_STAT; 1310 spin_unlock_irq(q->queue_lock); 1311 1312 /* allocate and init request */ 1313 rq = mempool_alloc(rl->rq_pool, gfp_mask); 1314 if (!rq) 1315 goto fail_alloc; 1316 1317 blk_rq_init(q, rq); 1318 blk_rq_set_rl(rq, rl); 1319 rq->cmd_flags = op; 1320 rq->rq_flags = rq_flags; 1321 if (flags & BLK_MQ_REQ_PREEMPT) 1322 rq->rq_flags |= RQF_PREEMPT; 1323 1324 /* init elvpriv */ 1325 if (rq_flags & RQF_ELVPRIV) { 1326 if (unlikely(et->icq_cache && !icq)) { 1327 if (ioc) 1328 icq = ioc_create_icq(ioc, q, gfp_mask); 1329 if (!icq) 1330 goto fail_elvpriv; 1331 } 1332 1333 rq->elv.icq = icq; 1334 if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) 1335 goto fail_elvpriv; 1336 1337 /* @rq->elv.icq holds io_context until @rq is freed */ 1338 if (icq) 1339 get_io_context(icq->ioc); 1340 } 1341 out: 1342 /* 1343 * ioc may be NULL here, and ioc_batching will be false. That's 1344 * OK, if the queue is under the request limit then requests need 1345 * not count toward the nr_batch_requests limit. There will always 1346 * be some limit enforced by BLK_BATCH_TIME. 1347 */ 1348 if (ioc_batching(q, ioc)) 1349 ioc->nr_batch_requests--; 1350 1351 trace_block_getrq(q, bio, op); 1352 return rq; 1353 1354 fail_elvpriv: 1355 /* 1356 * elvpriv init failed. ioc, icq and elvpriv aren't mempool backed 1357 * and may fail indefinitely under memory pressure and thus 1358 * shouldn't stall IO. Treat this request as !elvpriv. This will 1359 * disturb iosched and blkcg but weird is bettern than dead. 1360 */ 1361 printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n", 1362 __func__, dev_name(q->backing_dev_info->dev)); 1363 1364 rq->rq_flags &= ~RQF_ELVPRIV; 1365 rq->elv.icq = NULL; 1366 1367 spin_lock_irq(q->queue_lock); 1368 q->nr_rqs_elvpriv--; 1369 spin_unlock_irq(q->queue_lock); 1370 goto out; 1371 1372 fail_alloc: 1373 /* 1374 * Allocation failed presumably due to memory. Undo anything we 1375 * might have messed up. 1376 * 1377 * Allocating task should really be put onto the front of the wait 1378 * queue, but this is pretty rare. 1379 */ 1380 spin_lock_irq(q->queue_lock); 1381 freed_request(rl, is_sync, rq_flags); 1382 1383 /* 1384 * in the very unlikely event that allocation failed and no 1385 * requests for this direction was pending, mark us starved so that 1386 * freeing of a request in the other direction will notice 1387 * us. another possible fix would be to split the rq mempool into 1388 * READ and WRITE 1389 */ 1390 rq_starved: 1391 if (unlikely(rl->count[is_sync] == 0)) 1392 rl->starved[is_sync] = 1; 1393 return ERR_PTR(-ENOMEM); 1394 } 1395 1396 /** 1397 * get_request - get a free request 1398 * @q: request_queue to allocate request from 1399 * @op: operation and flags 1400 * @bio: bio to allocate request for (can be %NULL) 1401 * @flags: BLK_MQ_REQ_* flags. 1402 * 1403 * Get a free request from @q. If %__GFP_DIRECT_RECLAIM is set in @gfp_mask, 1404 * this function keeps retrying under memory pressure and fails iff @q is dead. 1405 * 1406 * Must be called with @q->queue_lock held and, 1407 * Returns ERR_PTR on failure, with @q->queue_lock held. 1408 * Returns request pointer on success, with @q->queue_lock *not held*. 1409 */ 1410 static struct request *get_request(struct request_queue *q, unsigned int op, 1411 struct bio *bio, blk_mq_req_flags_t flags) 1412 { 1413 const bool is_sync = op_is_sync(op); 1414 DEFINE_WAIT(wait); 1415 struct request_list *rl; 1416 struct request *rq; 1417 1418 lockdep_assert_held(q->queue_lock); 1419 WARN_ON_ONCE(q->mq_ops); 1420 1421 rl = blk_get_rl(q, bio); /* transferred to @rq on success */ 1422 retry: 1423 rq = __get_request(rl, op, bio, flags); 1424 if (!IS_ERR(rq)) 1425 return rq; 1426 1427 if (op & REQ_NOWAIT) { 1428 blk_put_rl(rl); 1429 return ERR_PTR(-EAGAIN); 1430 } 1431 1432 if ((flags & BLK_MQ_REQ_NOWAIT) || unlikely(blk_queue_dying(q))) { 1433 blk_put_rl(rl); 1434 return rq; 1435 } 1436 1437 /* wait on @rl and retry */ 1438 prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, 1439 TASK_UNINTERRUPTIBLE); 1440 1441 trace_block_sleeprq(q, bio, op); 1442 1443 spin_unlock_irq(q->queue_lock); 1444 io_schedule(); 1445 1446 /* 1447 * After sleeping, we become a "batching" process and will be able 1448 * to allocate at least one request, and up to a big batch of them 1449 * for a small period time. See ioc_batching, ioc_set_batching 1450 */ 1451 ioc_set_batching(q, current->io_context); 1452 1453 spin_lock_irq(q->queue_lock); 1454 finish_wait(&rl->wait[is_sync], &wait); 1455 1456 goto retry; 1457 } 1458 1459 /* flags: BLK_MQ_REQ_PREEMPT and/or BLK_MQ_REQ_NOWAIT. */ 1460 static struct request *blk_old_get_request(struct request_queue *q, 1461 unsigned int op, blk_mq_req_flags_t flags) 1462 { 1463 struct request *rq; 1464 gfp_t gfp_mask = flags & BLK_MQ_REQ_NOWAIT ? GFP_ATOMIC : 1465 __GFP_DIRECT_RECLAIM; 1466 int ret = 0; 1467 1468 WARN_ON_ONCE(q->mq_ops); 1469 1470 /* create ioc upfront */ 1471 create_io_context(gfp_mask, q->node); 1472 1473 ret = blk_queue_enter(q, flags); 1474 if (ret) 1475 return ERR_PTR(ret); 1476 spin_lock_irq(q->queue_lock); 1477 rq = get_request(q, op, NULL, flags); 1478 if (IS_ERR(rq)) { 1479 spin_unlock_irq(q->queue_lock); 1480 blk_queue_exit(q); 1481 return rq; 1482 } 1483 1484 /* q->queue_lock is unlocked at this point */ 1485 rq->__data_len = 0; 1486 rq->__sector = (sector_t) -1; 1487 rq->bio = rq->biotail = NULL; 1488 return rq; 1489 } 1490 1491 /** 1492 * blk_get_request_flags - allocate a request 1493 * @q: request queue to allocate a request for 1494 * @op: operation (REQ_OP_*) and REQ_* flags, e.g. REQ_SYNC. 1495 * @flags: BLK_MQ_REQ_* flags, e.g. BLK_MQ_REQ_NOWAIT. 1496 */ 1497 struct request *blk_get_request_flags(struct request_queue *q, unsigned int op, 1498 blk_mq_req_flags_t flags) 1499 { 1500 struct request *req; 1501 1502 WARN_ON_ONCE(op & REQ_NOWAIT); 1503 WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PREEMPT)); 1504 1505 if (q->mq_ops) { 1506 req = blk_mq_alloc_request(q, op, flags); 1507 if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn) 1508 q->mq_ops->initialize_rq_fn(req); 1509 } else { 1510 req = blk_old_get_request(q, op, flags); 1511 if (!IS_ERR(req) && q->initialize_rq_fn) 1512 q->initialize_rq_fn(req); 1513 } 1514 1515 return req; 1516 } 1517 EXPORT_SYMBOL(blk_get_request_flags); 1518 1519 struct request *blk_get_request(struct request_queue *q, unsigned int op, 1520 gfp_t gfp_mask) 1521 { 1522 return blk_get_request_flags(q, op, gfp_mask & __GFP_DIRECT_RECLAIM ? 1523 0 : BLK_MQ_REQ_NOWAIT); 1524 } 1525 EXPORT_SYMBOL(blk_get_request); 1526 1527 /** 1528 * blk_requeue_request - put a request back on queue 1529 * @q: request queue where request should be inserted 1530 * @rq: request to be inserted 1531 * 1532 * Description: 1533 * Drivers often keep queueing requests until the hardware cannot accept 1534 * more, when that condition happens we need to put the request back 1535 * on the queue. Must be called with queue lock held. 1536 */ 1537 void blk_requeue_request(struct request_queue *q, struct request *rq) 1538 { 1539 lockdep_assert_held(q->queue_lock); 1540 WARN_ON_ONCE(q->mq_ops); 1541 1542 blk_delete_timer(rq); 1543 blk_clear_rq_complete(rq); 1544 trace_block_rq_requeue(q, rq); 1545 wbt_requeue(q->rq_wb, &rq->issue_stat); 1546 1547 if (rq->rq_flags & RQF_QUEUED) 1548 blk_queue_end_tag(q, rq); 1549 1550 BUG_ON(blk_queued_rq(rq)); 1551 1552 elv_requeue_request(q, rq); 1553 } 1554 EXPORT_SYMBOL(blk_requeue_request); 1555 1556 static void add_acct_request(struct request_queue *q, struct request *rq, 1557 int where) 1558 { 1559 blk_account_io_start(rq, true); 1560 __elv_add_request(q, rq, where); 1561 } 1562 1563 static void part_round_stats_single(struct request_queue *q, int cpu, 1564 struct hd_struct *part, unsigned long now, 1565 unsigned int inflight) 1566 { 1567 if (inflight) { 1568 __part_stat_add(cpu, part, time_in_queue, 1569 inflight * (now - part->stamp)); 1570 __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); 1571 } 1572 part->stamp = now; 1573 } 1574 1575 /** 1576 * part_round_stats() - Round off the performance stats on a struct disk_stats. 1577 * @q: target block queue 1578 * @cpu: cpu number for stats access 1579 * @part: target partition 1580 * 1581 * The average IO queue length and utilisation statistics are maintained 1582 * by observing the current state of the queue length and the amount of 1583 * time it has been in this state for. 1584 * 1585 * Normally, that accounting is done on IO completion, but that can result 1586 * in more than a second's worth of IO being accounted for within any one 1587 * second, leading to >100% utilisation. To deal with that, we call this 1588 * function to do a round-off before returning the results when reading 1589 * /proc/diskstats. This accounts immediately for all queue usage up to 1590 * the current jiffies and restarts the counters again. 1591 */ 1592 void part_round_stats(struct request_queue *q, int cpu, struct hd_struct *part) 1593 { 1594 struct hd_struct *part2 = NULL; 1595 unsigned long now = jiffies; 1596 unsigned int inflight[2]; 1597 int stats = 0; 1598 1599 if (part->stamp != now) 1600 stats |= 1; 1601 1602 if (part->partno) { 1603 part2 = &part_to_disk(part)->part0; 1604 if (part2->stamp != now) 1605 stats |= 2; 1606 } 1607 1608 if (!stats) 1609 return; 1610 1611 part_in_flight(q, part, inflight); 1612 1613 if (stats & 2) 1614 part_round_stats_single(q, cpu, part2, now, inflight[1]); 1615 if (stats & 1) 1616 part_round_stats_single(q, cpu, part, now, inflight[0]); 1617 } 1618 EXPORT_SYMBOL_GPL(part_round_stats); 1619 1620 #ifdef CONFIG_PM 1621 static void blk_pm_put_request(struct request *rq) 1622 { 1623 if (rq->q->dev && !(rq->rq_flags & RQF_PM) && !--rq->q->nr_pending) 1624 pm_runtime_mark_last_busy(rq->q->dev); 1625 } 1626 #else 1627 static inline void blk_pm_put_request(struct request *rq) {} 1628 #endif 1629 1630 void __blk_put_request(struct request_queue *q, struct request *req) 1631 { 1632 req_flags_t rq_flags = req->rq_flags; 1633 1634 if (unlikely(!q)) 1635 return; 1636 1637 if (q->mq_ops) { 1638 blk_mq_free_request(req); 1639 return; 1640 } 1641 1642 lockdep_assert_held(q->queue_lock); 1643 1644 blk_pm_put_request(req); 1645 1646 elv_completed_request(q, req); 1647 1648 /* this is a bio leak */ 1649 WARN_ON(req->bio != NULL); 1650 1651 wbt_done(q->rq_wb, &req->issue_stat); 1652 1653 /* 1654 * Request may not have originated from ll_rw_blk. if not, 1655 * it didn't come out of our reserved rq pools 1656 */ 1657 if (rq_flags & RQF_ALLOCED) { 1658 struct request_list *rl = blk_rq_rl(req); 1659 bool sync = op_is_sync(req->cmd_flags); 1660 1661 BUG_ON(!list_empty(&req->queuelist)); 1662 BUG_ON(ELV_ON_HASH(req)); 1663 1664 blk_free_request(rl, req); 1665 freed_request(rl, sync, rq_flags); 1666 blk_put_rl(rl); 1667 blk_queue_exit(q); 1668 } 1669 } 1670 EXPORT_SYMBOL_GPL(__blk_put_request); 1671 1672 void blk_put_request(struct request *req) 1673 { 1674 struct request_queue *q = req->q; 1675 1676 if (q->mq_ops) 1677 blk_mq_free_request(req); 1678 else { 1679 unsigned long flags; 1680 1681 spin_lock_irqsave(q->queue_lock, flags); 1682 __blk_put_request(q, req); 1683 spin_unlock_irqrestore(q->queue_lock, flags); 1684 } 1685 } 1686 EXPORT_SYMBOL(blk_put_request); 1687 1688 bool bio_attempt_back_merge(struct request_queue *q, struct request *req, 1689 struct bio *bio) 1690 { 1691 const int ff = bio->bi_opf & REQ_FAILFAST_MASK; 1692 1693 if (!ll_back_merge_fn(q, req, bio)) 1694 return false; 1695 1696 trace_block_bio_backmerge(q, req, bio); 1697 1698 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) 1699 blk_rq_set_mixed_merge(req); 1700 1701 req->biotail->bi_next = bio; 1702 req->biotail = bio; 1703 req->__data_len += bio->bi_iter.bi_size; 1704 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1705 1706 blk_account_io_start(req, false); 1707 return true; 1708 } 1709 1710 bool bio_attempt_front_merge(struct request_queue *q, struct request *req, 1711 struct bio *bio) 1712 { 1713 const int ff = bio->bi_opf & REQ_FAILFAST_MASK; 1714 1715 if (!ll_front_merge_fn(q, req, bio)) 1716 return false; 1717 1718 trace_block_bio_frontmerge(q, req, bio); 1719 1720 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) 1721 blk_rq_set_mixed_merge(req); 1722 1723 bio->bi_next = req->bio; 1724 req->bio = bio; 1725 1726 req->__sector = bio->bi_iter.bi_sector; 1727 req->__data_len += bio->bi_iter.bi_size; 1728 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1729 1730 blk_account_io_start(req, false); 1731 return true; 1732 } 1733 1734 bool bio_attempt_discard_merge(struct request_queue *q, struct request *req, 1735 struct bio *bio) 1736 { 1737 unsigned short segments = blk_rq_nr_discard_segments(req); 1738 1739 if (segments >= queue_max_discard_segments(q)) 1740 goto no_merge; 1741 if (blk_rq_sectors(req) + bio_sectors(bio) > 1742 blk_rq_get_max_sectors(req, blk_rq_pos(req))) 1743 goto no_merge; 1744 1745 req->biotail->bi_next = bio; 1746 req->biotail = bio; 1747 req->__data_len += bio->bi_iter.bi_size; 1748 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1749 req->nr_phys_segments = segments + 1; 1750 1751 blk_account_io_start(req, false); 1752 return true; 1753 no_merge: 1754 req_set_nomerge(q, req); 1755 return false; 1756 } 1757 1758 /** 1759 * blk_attempt_plug_merge - try to merge with %current's plugged list 1760 * @q: request_queue new bio is being queued at 1761 * @bio: new bio being queued 1762 * @request_count: out parameter for number of traversed plugged requests 1763 * @same_queue_rq: pointer to &struct request that gets filled in when 1764 * another request associated with @q is found on the plug list 1765 * (optional, may be %NULL) 1766 * 1767 * Determine whether @bio being queued on @q can be merged with a request 1768 * on %current's plugged list. Returns %true if merge was successful, 1769 * otherwise %false. 1770 * 1771 * Plugging coalesces IOs from the same issuer for the same purpose without 1772 * going through @q->queue_lock. As such it's more of an issuing mechanism 1773 * than scheduling, and the request, while may have elvpriv data, is not 1774 * added on the elevator at this point. In addition, we don't have 1775 * reliable access to the elevator outside queue lock. Only check basic 1776 * merging parameters without querying the elevator. 1777 * 1778 * Caller must ensure !blk_queue_nomerges(q) beforehand. 1779 */ 1780 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, 1781 unsigned int *request_count, 1782 struct request **same_queue_rq) 1783 { 1784 struct blk_plug *plug; 1785 struct request *rq; 1786 struct list_head *plug_list; 1787 1788 plug = current->plug; 1789 if (!plug) 1790 return false; 1791 *request_count = 0; 1792 1793 if (q->mq_ops) 1794 plug_list = &plug->mq_list; 1795 else 1796 plug_list = &plug->list; 1797 1798 list_for_each_entry_reverse(rq, plug_list, queuelist) { 1799 bool merged = false; 1800 1801 if (rq->q == q) { 1802 (*request_count)++; 1803 /* 1804 * Only blk-mq multiple hardware queues case checks the 1805 * rq in the same queue, there should be only one such 1806 * rq in a queue 1807 **/ 1808 if (same_queue_rq) 1809 *same_queue_rq = rq; 1810 } 1811 1812 if (rq->q != q || !blk_rq_merge_ok(rq, bio)) 1813 continue; 1814 1815 switch (blk_try_merge(rq, bio)) { 1816 case ELEVATOR_BACK_MERGE: 1817 merged = bio_attempt_back_merge(q, rq, bio); 1818 break; 1819 case ELEVATOR_FRONT_MERGE: 1820 merged = bio_attempt_front_merge(q, rq, bio); 1821 break; 1822 case ELEVATOR_DISCARD_MERGE: 1823 merged = bio_attempt_discard_merge(q, rq, bio); 1824 break; 1825 default: 1826 break; 1827 } 1828 1829 if (merged) 1830 return true; 1831 } 1832 1833 return false; 1834 } 1835 1836 unsigned int blk_plug_queued_count(struct request_queue *q) 1837 { 1838 struct blk_plug *plug; 1839 struct request *rq; 1840 struct list_head *plug_list; 1841 unsigned int ret = 0; 1842 1843 plug = current->plug; 1844 if (!plug) 1845 goto out; 1846 1847 if (q->mq_ops) 1848 plug_list = &plug->mq_list; 1849 else 1850 plug_list = &plug->list; 1851 1852 list_for_each_entry(rq, plug_list, queuelist) { 1853 if (rq->q == q) 1854 ret++; 1855 } 1856 out: 1857 return ret; 1858 } 1859 1860 void blk_init_request_from_bio(struct request *req, struct bio *bio) 1861 { 1862 struct io_context *ioc = rq_ioc(bio); 1863 1864 if (bio->bi_opf & REQ_RAHEAD) 1865 req->cmd_flags |= REQ_FAILFAST_MASK; 1866 1867 req->__sector = bio->bi_iter.bi_sector; 1868 if (ioprio_valid(bio_prio(bio))) 1869 req->ioprio = bio_prio(bio); 1870 else if (ioc) 1871 req->ioprio = ioc->ioprio; 1872 else 1873 req->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0); 1874 req->write_hint = bio->bi_write_hint; 1875 blk_rq_bio_prep(req->q, req, bio); 1876 } 1877 EXPORT_SYMBOL_GPL(blk_init_request_from_bio); 1878 1879 static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio) 1880 { 1881 struct blk_plug *plug; 1882 int where = ELEVATOR_INSERT_SORT; 1883 struct request *req, *free; 1884 unsigned int request_count = 0; 1885 unsigned int wb_acct; 1886 1887 /* 1888 * low level driver can indicate that it wants pages above a 1889 * certain limit bounced to low memory (ie for highmem, or even 1890 * ISA dma in theory) 1891 */ 1892 blk_queue_bounce(q, &bio); 1893 1894 blk_queue_split(q, &bio); 1895 1896 if (!bio_integrity_prep(bio)) 1897 return BLK_QC_T_NONE; 1898 1899 if (op_is_flush(bio->bi_opf)) { 1900 spin_lock_irq(q->queue_lock); 1901 where = ELEVATOR_INSERT_FLUSH; 1902 goto get_rq; 1903 } 1904 1905 /* 1906 * Check if we can merge with the plugged list before grabbing 1907 * any locks. 1908 */ 1909 if (!blk_queue_nomerges(q)) { 1910 if (blk_attempt_plug_merge(q, bio, &request_count, NULL)) 1911 return BLK_QC_T_NONE; 1912 } else 1913 request_count = blk_plug_queued_count(q); 1914 1915 spin_lock_irq(q->queue_lock); 1916 1917 switch (elv_merge(q, &req, bio)) { 1918 case ELEVATOR_BACK_MERGE: 1919 if (!bio_attempt_back_merge(q, req, bio)) 1920 break; 1921 elv_bio_merged(q, req, bio); 1922 free = attempt_back_merge(q, req); 1923 if (free) 1924 __blk_put_request(q, free); 1925 else 1926 elv_merged_request(q, req, ELEVATOR_BACK_MERGE); 1927 goto out_unlock; 1928 case ELEVATOR_FRONT_MERGE: 1929 if (!bio_attempt_front_merge(q, req, bio)) 1930 break; 1931 elv_bio_merged(q, req, bio); 1932 free = attempt_front_merge(q, req); 1933 if (free) 1934 __blk_put_request(q, free); 1935 else 1936 elv_merged_request(q, req, ELEVATOR_FRONT_MERGE); 1937 goto out_unlock; 1938 default: 1939 break; 1940 } 1941 1942 get_rq: 1943 wb_acct = wbt_wait(q->rq_wb, bio, q->queue_lock); 1944 1945 /* 1946 * Grab a free request. This is might sleep but can not fail. 1947 * Returns with the queue unlocked. 1948 */ 1949 blk_queue_enter_live(q); 1950 req = get_request(q, bio->bi_opf, bio, 0); 1951 if (IS_ERR(req)) { 1952 blk_queue_exit(q); 1953 __wbt_done(q->rq_wb, wb_acct); 1954 if (PTR_ERR(req) == -ENOMEM) 1955 bio->bi_status = BLK_STS_RESOURCE; 1956 else 1957 bio->bi_status = BLK_STS_IOERR; 1958 bio_endio(bio); 1959 goto out_unlock; 1960 } 1961 1962 wbt_track(&req->issue_stat, wb_acct); 1963 1964 /* 1965 * After dropping the lock and possibly sleeping here, our request 1966 * may now be mergeable after it had proven unmergeable (above). 1967 * We don't worry about that case for efficiency. It won't happen 1968 * often, and the elevators are able to handle it. 1969 */ 1970 blk_init_request_from_bio(req, bio); 1971 1972 if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags)) 1973 req->cpu = raw_smp_processor_id(); 1974 1975 plug = current->plug; 1976 if (plug) { 1977 /* 1978 * If this is the first request added after a plug, fire 1979 * of a plug trace. 1980 * 1981 * @request_count may become stale because of schedule 1982 * out, so check plug list again. 1983 */ 1984 if (!request_count || list_empty(&plug->list)) 1985 trace_block_plug(q); 1986 else { 1987 struct request *last = list_entry_rq(plug->list.prev); 1988 if (request_count >= BLK_MAX_REQUEST_COUNT || 1989 blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE) { 1990 blk_flush_plug_list(plug, false); 1991 trace_block_plug(q); 1992 } 1993 } 1994 list_add_tail(&req->queuelist, &plug->list); 1995 blk_account_io_start(req, true); 1996 } else { 1997 spin_lock_irq(q->queue_lock); 1998 add_acct_request(q, req, where); 1999 __blk_run_queue(q); 2000 out_unlock: 2001 spin_unlock_irq(q->queue_lock); 2002 } 2003 2004 return BLK_QC_T_NONE; 2005 } 2006 2007 static void handle_bad_sector(struct bio *bio) 2008 { 2009 char b[BDEVNAME_SIZE]; 2010 2011 printk(KERN_INFO "attempt to access beyond end of device\n"); 2012 printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n", 2013 bio_devname(bio, b), bio->bi_opf, 2014 (unsigned long long)bio_end_sector(bio), 2015 (long long)get_capacity(bio->bi_disk)); 2016 } 2017 2018 #ifdef CONFIG_FAIL_MAKE_REQUEST 2019 2020 static DECLARE_FAULT_ATTR(fail_make_request); 2021 2022 static int __init setup_fail_make_request(char *str) 2023 { 2024 return setup_fault_attr(&fail_make_request, str); 2025 } 2026 __setup("fail_make_request=", setup_fail_make_request); 2027 2028 static bool should_fail_request(struct hd_struct *part, unsigned int bytes) 2029 { 2030 return part->make_it_fail && should_fail(&fail_make_request, bytes); 2031 } 2032 2033 static int __init fail_make_request_debugfs(void) 2034 { 2035 struct dentry *dir = fault_create_debugfs_attr("fail_make_request", 2036 NULL, &fail_make_request); 2037 2038 return PTR_ERR_OR_ZERO(dir); 2039 } 2040 2041 late_initcall(fail_make_request_debugfs); 2042 2043 #else /* CONFIG_FAIL_MAKE_REQUEST */ 2044 2045 static inline bool should_fail_request(struct hd_struct *part, 2046 unsigned int bytes) 2047 { 2048 return false; 2049 } 2050 2051 #endif /* CONFIG_FAIL_MAKE_REQUEST */ 2052 2053 /* 2054 * Remap block n of partition p to block n+start(p) of the disk. 2055 */ 2056 static inline int blk_partition_remap(struct bio *bio) 2057 { 2058 struct hd_struct *p; 2059 int ret = 0; 2060 2061 /* 2062 * Zone reset does not include bi_size so bio_sectors() is always 0. 2063 * Include a test for the reset op code and perform the remap if needed. 2064 */ 2065 if (!bio->bi_partno || 2066 (!bio_sectors(bio) && bio_op(bio) != REQ_OP_ZONE_RESET)) 2067 return 0; 2068 2069 rcu_read_lock(); 2070 p = __disk_get_part(bio->bi_disk, bio->bi_partno); 2071 if (likely(p && !should_fail_request(p, bio->bi_iter.bi_size))) { 2072 bio->bi_iter.bi_sector += p->start_sect; 2073 bio->bi_partno = 0; 2074 trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p), 2075 bio->bi_iter.bi_sector - p->start_sect); 2076 } else { 2077 printk("%s: fail for partition %d\n", __func__, bio->bi_partno); 2078 ret = -EIO; 2079 } 2080 rcu_read_unlock(); 2081 2082 return ret; 2083 } 2084 2085 /* 2086 * Check whether this bio extends beyond the end of the device. 2087 */ 2088 static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors) 2089 { 2090 sector_t maxsector; 2091 2092 if (!nr_sectors) 2093 return 0; 2094 2095 /* Test device or partition size, when known. */ 2096 maxsector = get_capacity(bio->bi_disk); 2097 if (maxsector) { 2098 sector_t sector = bio->bi_iter.bi_sector; 2099 2100 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) { 2101 /* 2102 * This may well happen - the kernel calls bread() 2103 * without checking the size of the device, e.g., when 2104 * mounting a device. 2105 */ 2106 handle_bad_sector(bio); 2107 return 1; 2108 } 2109 } 2110 2111 return 0; 2112 } 2113 2114 static noinline_for_stack bool 2115 generic_make_request_checks(struct bio *bio) 2116 { 2117 struct request_queue *q; 2118 int nr_sectors = bio_sectors(bio); 2119 blk_status_t status = BLK_STS_IOERR; 2120 char b[BDEVNAME_SIZE]; 2121 2122 might_sleep(); 2123 2124 if (bio_check_eod(bio, nr_sectors)) 2125 goto end_io; 2126 2127 q = bio->bi_disk->queue; 2128 if (unlikely(!q)) { 2129 printk(KERN_ERR 2130 "generic_make_request: Trying to access " 2131 "nonexistent block-device %s (%Lu)\n", 2132 bio_devname(bio, b), (long long)bio->bi_iter.bi_sector); 2133 goto end_io; 2134 } 2135 2136 /* 2137 * For a REQ_NOWAIT based request, return -EOPNOTSUPP 2138 * if queue is not a request based queue. 2139 */ 2140 2141 if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_rq_based(q)) 2142 goto not_supported; 2143 2144 if (should_fail_request(&bio->bi_disk->part0, bio->bi_iter.bi_size)) 2145 goto end_io; 2146 2147 if (blk_partition_remap(bio)) 2148 goto end_io; 2149 2150 if (bio_check_eod(bio, nr_sectors)) 2151 goto end_io; 2152 2153 /* 2154 * Filter flush bio's early so that make_request based 2155 * drivers without flush support don't have to worry 2156 * about them. 2157 */ 2158 if (op_is_flush(bio->bi_opf) && 2159 !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) { 2160 bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA); 2161 if (!nr_sectors) { 2162 status = BLK_STS_OK; 2163 goto end_io; 2164 } 2165 } 2166 2167 switch (bio_op(bio)) { 2168 case REQ_OP_DISCARD: 2169 if (!blk_queue_discard(q)) 2170 goto not_supported; 2171 break; 2172 case REQ_OP_SECURE_ERASE: 2173 if (!blk_queue_secure_erase(q)) 2174 goto not_supported; 2175 break; 2176 case REQ_OP_WRITE_SAME: 2177 if (!q->limits.max_write_same_sectors) 2178 goto not_supported; 2179 break; 2180 case REQ_OP_ZONE_REPORT: 2181 case REQ_OP_ZONE_RESET: 2182 if (!blk_queue_is_zoned(q)) 2183 goto not_supported; 2184 break; 2185 case REQ_OP_WRITE_ZEROES: 2186 if (!q->limits.max_write_zeroes_sectors) 2187 goto not_supported; 2188 break; 2189 default: 2190 break; 2191 } 2192 2193 /* 2194 * Various block parts want %current->io_context and lazy ioc 2195 * allocation ends up trading a lot of pain for a small amount of 2196 * memory. Just allocate it upfront. This may fail and block 2197 * layer knows how to live with it. 2198 */ 2199 create_io_context(GFP_ATOMIC, q->node); 2200 2201 if (!blkcg_bio_issue_check(q, bio)) 2202 return false; 2203 2204 if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) { 2205 trace_block_bio_queue(q, bio); 2206 /* Now that enqueuing has been traced, we need to trace 2207 * completion as well. 2208 */ 2209 bio_set_flag(bio, BIO_TRACE_COMPLETION); 2210 } 2211 return true; 2212 2213 not_supported: 2214 status = BLK_STS_NOTSUPP; 2215 end_io: 2216 bio->bi_status = status; 2217 bio_endio(bio); 2218 return false; 2219 } 2220 2221 /** 2222 * generic_make_request - hand a buffer to its device driver for I/O 2223 * @bio: The bio describing the location in memory and on the device. 2224 * 2225 * generic_make_request() is used to make I/O requests of block 2226 * devices. It is passed a &struct bio, which describes the I/O that needs 2227 * to be done. 2228 * 2229 * generic_make_request() does not return any status. The 2230 * success/failure status of the request, along with notification of 2231 * completion, is delivered asynchronously through the bio->bi_end_io 2232 * function described (one day) else where. 2233 * 2234 * The caller of generic_make_request must make sure that bi_io_vec 2235 * are set to describe the memory buffer, and that bi_dev and bi_sector are 2236 * set to describe the device address, and the 2237 * bi_end_io and optionally bi_private are set to describe how 2238 * completion notification should be signaled. 2239 * 2240 * generic_make_request and the drivers it calls may use bi_next if this 2241 * bio happens to be merged with someone else, and may resubmit the bio to 2242 * a lower device by calling into generic_make_request recursively, which 2243 * means the bio should NOT be touched after the call to ->make_request_fn. 2244 */ 2245 blk_qc_t generic_make_request(struct bio *bio) 2246 { 2247 /* 2248 * bio_list_on_stack[0] contains bios submitted by the current 2249 * make_request_fn. 2250 * bio_list_on_stack[1] contains bios that were submitted before 2251 * the current make_request_fn, but that haven't been processed 2252 * yet. 2253 */ 2254 struct bio_list bio_list_on_stack[2]; 2255 blk_qc_t ret = BLK_QC_T_NONE; 2256 2257 if (!generic_make_request_checks(bio)) 2258 goto out; 2259 2260 /* 2261 * We only want one ->make_request_fn to be active at a time, else 2262 * stack usage with stacked devices could be a problem. So use 2263 * current->bio_list to keep a list of requests submited by a 2264 * make_request_fn function. current->bio_list is also used as a 2265 * flag to say if generic_make_request is currently active in this 2266 * task or not. If it is NULL, then no make_request is active. If 2267 * it is non-NULL, then a make_request is active, and new requests 2268 * should be added at the tail 2269 */ 2270 if (current->bio_list) { 2271 bio_list_add(¤t->bio_list[0], bio); 2272 goto out; 2273 } 2274 2275 /* following loop may be a bit non-obvious, and so deserves some 2276 * explanation. 2277 * Before entering the loop, bio->bi_next is NULL (as all callers 2278 * ensure that) so we have a list with a single bio. 2279 * We pretend that we have just taken it off a longer list, so 2280 * we assign bio_list to a pointer to the bio_list_on_stack, 2281 * thus initialising the bio_list of new bios to be 2282 * added. ->make_request() may indeed add some more bios 2283 * through a recursive call to generic_make_request. If it 2284 * did, we find a non-NULL value in bio_list and re-enter the loop 2285 * from the top. In this case we really did just take the bio 2286 * of the top of the list (no pretending) and so remove it from 2287 * bio_list, and call into ->make_request() again. 2288 */ 2289 BUG_ON(bio->bi_next); 2290 bio_list_init(&bio_list_on_stack[0]); 2291 current->bio_list = bio_list_on_stack; 2292 do { 2293 struct request_queue *q = bio->bi_disk->queue; 2294 blk_mq_req_flags_t flags = bio->bi_opf & REQ_NOWAIT ? 2295 BLK_MQ_REQ_NOWAIT : 0; 2296 2297 if (likely(blk_queue_enter(q, flags) == 0)) { 2298 struct bio_list lower, same; 2299 2300 /* Create a fresh bio_list for all subordinate requests */ 2301 bio_list_on_stack[1] = bio_list_on_stack[0]; 2302 bio_list_init(&bio_list_on_stack[0]); 2303 ret = q->make_request_fn(q, bio); 2304 2305 blk_queue_exit(q); 2306 2307 /* sort new bios into those for a lower level 2308 * and those for the same level 2309 */ 2310 bio_list_init(&lower); 2311 bio_list_init(&same); 2312 while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL) 2313 if (q == bio->bi_disk->queue) 2314 bio_list_add(&same, bio); 2315 else 2316 bio_list_add(&lower, bio); 2317 /* now assemble so we handle the lowest level first */ 2318 bio_list_merge(&bio_list_on_stack[0], &lower); 2319 bio_list_merge(&bio_list_on_stack[0], &same); 2320 bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]); 2321 } else { 2322 if (unlikely(!blk_queue_dying(q) && 2323 (bio->bi_opf & REQ_NOWAIT))) 2324 bio_wouldblock_error(bio); 2325 else 2326 bio_io_error(bio); 2327 } 2328 bio = bio_list_pop(&bio_list_on_stack[0]); 2329 } while (bio); 2330 current->bio_list = NULL; /* deactivate */ 2331 2332 out: 2333 return ret; 2334 } 2335 EXPORT_SYMBOL(generic_make_request); 2336 2337 /** 2338 * direct_make_request - hand a buffer directly to its device driver for I/O 2339 * @bio: The bio describing the location in memory and on the device. 2340 * 2341 * This function behaves like generic_make_request(), but does not protect 2342 * against recursion. Must only be used if the called driver is known 2343 * to not call generic_make_request (or direct_make_request) again from 2344 * its make_request function. (Calling direct_make_request again from 2345 * a workqueue is perfectly fine as that doesn't recurse). 2346 */ 2347 blk_qc_t direct_make_request(struct bio *bio) 2348 { 2349 struct request_queue *q = bio->bi_disk->queue; 2350 bool nowait = bio->bi_opf & REQ_NOWAIT; 2351 blk_qc_t ret; 2352 2353 if (!generic_make_request_checks(bio)) 2354 return BLK_QC_T_NONE; 2355 2356 if (unlikely(blk_queue_enter(q, nowait ? BLK_MQ_REQ_NOWAIT : 0))) { 2357 if (nowait && !blk_queue_dying(q)) 2358 bio->bi_status = BLK_STS_AGAIN; 2359 else 2360 bio->bi_status = BLK_STS_IOERR; 2361 bio_endio(bio); 2362 return BLK_QC_T_NONE; 2363 } 2364 2365 ret = q->make_request_fn(q, bio); 2366 blk_queue_exit(q); 2367 return ret; 2368 } 2369 EXPORT_SYMBOL_GPL(direct_make_request); 2370 2371 /** 2372 * submit_bio - submit a bio to the block device layer for I/O 2373 * @bio: The &struct bio which describes the I/O 2374 * 2375 * submit_bio() is very similar in purpose to generic_make_request(), and 2376 * uses that function to do most of the work. Both are fairly rough 2377 * interfaces; @bio must be presetup and ready for I/O. 2378 * 2379 */ 2380 blk_qc_t submit_bio(struct bio *bio) 2381 { 2382 /* 2383 * If it's a regular read/write or a barrier with data attached, 2384 * go through the normal accounting stuff before submission. 2385 */ 2386 if (bio_has_data(bio)) { 2387 unsigned int count; 2388 2389 if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) 2390 count = queue_logical_block_size(bio->bi_disk->queue); 2391 else 2392 count = bio_sectors(bio); 2393 2394 if (op_is_write(bio_op(bio))) { 2395 count_vm_events(PGPGOUT, count); 2396 } else { 2397 task_io_account_read(bio->bi_iter.bi_size); 2398 count_vm_events(PGPGIN, count); 2399 } 2400 2401 if (unlikely(block_dump)) { 2402 char b[BDEVNAME_SIZE]; 2403 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n", 2404 current->comm, task_pid_nr(current), 2405 op_is_write(bio_op(bio)) ? "WRITE" : "READ", 2406 (unsigned long long)bio->bi_iter.bi_sector, 2407 bio_devname(bio, b), count); 2408 } 2409 } 2410 2411 return generic_make_request(bio); 2412 } 2413 EXPORT_SYMBOL(submit_bio); 2414 2415 bool blk_poll(struct request_queue *q, blk_qc_t cookie) 2416 { 2417 if (!q->poll_fn || !blk_qc_t_valid(cookie)) 2418 return false; 2419 2420 if (current->plug) 2421 blk_flush_plug_list(current->plug, false); 2422 return q->poll_fn(q, cookie); 2423 } 2424 EXPORT_SYMBOL_GPL(blk_poll); 2425 2426 /** 2427 * blk_cloned_rq_check_limits - Helper function to check a cloned request 2428 * for new the queue limits 2429 * @q: the queue 2430 * @rq: the request being checked 2431 * 2432 * Description: 2433 * @rq may have been made based on weaker limitations of upper-level queues 2434 * in request stacking drivers, and it may violate the limitation of @q. 2435 * Since the block layer and the underlying device driver trust @rq 2436 * after it is inserted to @q, it should be checked against @q before 2437 * the insertion using this generic function. 2438 * 2439 * Request stacking drivers like request-based dm may change the queue 2440 * limits when retrying requests on other queues. Those requests need 2441 * to be checked against the new queue limits again during dispatch. 2442 */ 2443 static int blk_cloned_rq_check_limits(struct request_queue *q, 2444 struct request *rq) 2445 { 2446 if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) { 2447 printk(KERN_ERR "%s: over max size limit.\n", __func__); 2448 return -EIO; 2449 } 2450 2451 /* 2452 * queue's settings related to segment counting like q->bounce_pfn 2453 * may differ from that of other stacking queues. 2454 * Recalculate it to check the request correctly on this queue's 2455 * limitation. 2456 */ 2457 blk_recalc_rq_segments(rq); 2458 if (rq->nr_phys_segments > queue_max_segments(q)) { 2459 printk(KERN_ERR "%s: over max segments limit.\n", __func__); 2460 return -EIO; 2461 } 2462 2463 return 0; 2464 } 2465 2466 /** 2467 * blk_insert_cloned_request - Helper for stacking drivers to submit a request 2468 * @q: the queue to submit the request 2469 * @rq: the request being queued 2470 */ 2471 blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq) 2472 { 2473 unsigned long flags; 2474 int where = ELEVATOR_INSERT_BACK; 2475 2476 if (blk_cloned_rq_check_limits(q, rq)) 2477 return BLK_STS_IOERR; 2478 2479 if (rq->rq_disk && 2480 should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq))) 2481 return BLK_STS_IOERR; 2482 2483 if (q->mq_ops) { 2484 if (blk_queue_io_stat(q)) 2485 blk_account_io_start(rq, true); 2486 /* 2487 * Since we have a scheduler attached on the top device, 2488 * bypass a potential scheduler on the bottom device for 2489 * insert. 2490 */ 2491 blk_mq_request_bypass_insert(rq, true); 2492 return BLK_STS_OK; 2493 } 2494 2495 spin_lock_irqsave(q->queue_lock, flags); 2496 if (unlikely(blk_queue_dying(q))) { 2497 spin_unlock_irqrestore(q->queue_lock, flags); 2498 return BLK_STS_IOERR; 2499 } 2500 2501 /* 2502 * Submitting request must be dequeued before calling this function 2503 * because it will be linked to another request_queue 2504 */ 2505 BUG_ON(blk_queued_rq(rq)); 2506 2507 if (op_is_flush(rq->cmd_flags)) 2508 where = ELEVATOR_INSERT_FLUSH; 2509 2510 add_acct_request(q, rq, where); 2511 if (where == ELEVATOR_INSERT_FLUSH) 2512 __blk_run_queue(q); 2513 spin_unlock_irqrestore(q->queue_lock, flags); 2514 2515 return BLK_STS_OK; 2516 } 2517 EXPORT_SYMBOL_GPL(blk_insert_cloned_request); 2518 2519 /** 2520 * blk_rq_err_bytes - determine number of bytes till the next failure boundary 2521 * @rq: request to examine 2522 * 2523 * Description: 2524 * A request could be merge of IOs which require different failure 2525 * handling. This function determines the number of bytes which 2526 * can be failed from the beginning of the request without 2527 * crossing into area which need to be retried further. 2528 * 2529 * Return: 2530 * The number of bytes to fail. 2531 */ 2532 unsigned int blk_rq_err_bytes(const struct request *rq) 2533 { 2534 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; 2535 unsigned int bytes = 0; 2536 struct bio *bio; 2537 2538 if (!(rq->rq_flags & RQF_MIXED_MERGE)) 2539 return blk_rq_bytes(rq); 2540 2541 /* 2542 * Currently the only 'mixing' which can happen is between 2543 * different fastfail types. We can safely fail portions 2544 * which have all the failfast bits that the first one has - 2545 * the ones which are at least as eager to fail as the first 2546 * one. 2547 */ 2548 for (bio = rq->bio; bio; bio = bio->bi_next) { 2549 if ((bio->bi_opf & ff) != ff) 2550 break; 2551 bytes += bio->bi_iter.bi_size; 2552 } 2553 2554 /* this could lead to infinite loop */ 2555 BUG_ON(blk_rq_bytes(rq) && !bytes); 2556 return bytes; 2557 } 2558 EXPORT_SYMBOL_GPL(blk_rq_err_bytes); 2559 2560 void blk_account_io_completion(struct request *req, unsigned int bytes) 2561 { 2562 if (blk_do_io_stat(req)) { 2563 const int rw = rq_data_dir(req); 2564 struct hd_struct *part; 2565 int cpu; 2566 2567 cpu = part_stat_lock(); 2568 part = req->part; 2569 part_stat_add(cpu, part, sectors[rw], bytes >> 9); 2570 part_stat_unlock(); 2571 } 2572 } 2573 2574 void blk_account_io_done(struct request *req) 2575 { 2576 /* 2577 * Account IO completion. flush_rq isn't accounted as a 2578 * normal IO on queueing nor completion. Accounting the 2579 * containing request is enough. 2580 */ 2581 if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) { 2582 unsigned long duration = jiffies - req->start_time; 2583 const int rw = rq_data_dir(req); 2584 struct hd_struct *part; 2585 int cpu; 2586 2587 cpu = part_stat_lock(); 2588 part = req->part; 2589 2590 part_stat_inc(cpu, part, ios[rw]); 2591 part_stat_add(cpu, part, ticks[rw], duration); 2592 part_round_stats(req->q, cpu, part); 2593 part_dec_in_flight(req->q, part, rw); 2594 2595 hd_struct_put(part); 2596 part_stat_unlock(); 2597 } 2598 } 2599 2600 #ifdef CONFIG_PM 2601 /* 2602 * Don't process normal requests when queue is suspended 2603 * or in the process of suspending/resuming 2604 */ 2605 static bool blk_pm_allow_request(struct request *rq) 2606 { 2607 switch (rq->q->rpm_status) { 2608 case RPM_RESUMING: 2609 case RPM_SUSPENDING: 2610 return rq->rq_flags & RQF_PM; 2611 case RPM_SUSPENDED: 2612 return false; 2613 } 2614 2615 return true; 2616 } 2617 #else 2618 static bool blk_pm_allow_request(struct request *rq) 2619 { 2620 return true; 2621 } 2622 #endif 2623 2624 void blk_account_io_start(struct request *rq, bool new_io) 2625 { 2626 struct hd_struct *part; 2627 int rw = rq_data_dir(rq); 2628 int cpu; 2629 2630 if (!blk_do_io_stat(rq)) 2631 return; 2632 2633 cpu = part_stat_lock(); 2634 2635 if (!new_io) { 2636 part = rq->part; 2637 part_stat_inc(cpu, part, merges[rw]); 2638 } else { 2639 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); 2640 if (!hd_struct_try_get(part)) { 2641 /* 2642 * The partition is already being removed, 2643 * the request will be accounted on the disk only 2644 * 2645 * We take a reference on disk->part0 although that 2646 * partition will never be deleted, so we can treat 2647 * it as any other partition. 2648 */ 2649 part = &rq->rq_disk->part0; 2650 hd_struct_get(part); 2651 } 2652 part_round_stats(rq->q, cpu, part); 2653 part_inc_in_flight(rq->q, part, rw); 2654 rq->part = part; 2655 } 2656 2657 part_stat_unlock(); 2658 } 2659 2660 static struct request *elv_next_request(struct request_queue *q) 2661 { 2662 struct request *rq; 2663 struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); 2664 2665 WARN_ON_ONCE(q->mq_ops); 2666 2667 while (1) { 2668 list_for_each_entry(rq, &q->queue_head, queuelist) { 2669 if (blk_pm_allow_request(rq)) 2670 return rq; 2671 2672 if (rq->rq_flags & RQF_SOFTBARRIER) 2673 break; 2674 } 2675 2676 /* 2677 * Flush request is running and flush request isn't queueable 2678 * in the drive, we can hold the queue till flush request is 2679 * finished. Even we don't do this, driver can't dispatch next 2680 * requests and will requeue them. And this can improve 2681 * throughput too. For example, we have request flush1, write1, 2682 * flush 2. flush1 is dispatched, then queue is hold, write1 2683 * isn't inserted to queue. After flush1 is finished, flush2 2684 * will be dispatched. Since disk cache is already clean, 2685 * flush2 will be finished very soon, so looks like flush2 is 2686 * folded to flush1. 2687 * Since the queue is hold, a flag is set to indicate the queue 2688 * should be restarted later. Please see flush_end_io() for 2689 * details. 2690 */ 2691 if (fq->flush_pending_idx != fq->flush_running_idx && 2692 !queue_flush_queueable(q)) { 2693 fq->flush_queue_delayed = 1; 2694 return NULL; 2695 } 2696 if (unlikely(blk_queue_bypass(q)) || 2697 !q->elevator->type->ops.sq.elevator_dispatch_fn(q, 0)) 2698 return NULL; 2699 } 2700 } 2701 2702 /** 2703 * blk_peek_request - peek at the top of a request queue 2704 * @q: request queue to peek at 2705 * 2706 * Description: 2707 * Return the request at the top of @q. The returned request 2708 * should be started using blk_start_request() before LLD starts 2709 * processing it. 2710 * 2711 * Return: 2712 * Pointer to the request at the top of @q if available. Null 2713 * otherwise. 2714 */ 2715 struct request *blk_peek_request(struct request_queue *q) 2716 { 2717 struct request *rq; 2718 int ret; 2719 2720 lockdep_assert_held(q->queue_lock); 2721 WARN_ON_ONCE(q->mq_ops); 2722 2723 while ((rq = elv_next_request(q)) != NULL) { 2724 if (!(rq->rq_flags & RQF_STARTED)) { 2725 /* 2726 * This is the first time the device driver 2727 * sees this request (possibly after 2728 * requeueing). Notify IO scheduler. 2729 */ 2730 if (rq->rq_flags & RQF_SORTED) 2731 elv_activate_rq(q, rq); 2732 2733 /* 2734 * just mark as started even if we don't start 2735 * it, a request that has been delayed should 2736 * not be passed by new incoming requests 2737 */ 2738 rq->rq_flags |= RQF_STARTED; 2739 trace_block_rq_issue(q, rq); 2740 } 2741 2742 if (!q->boundary_rq || q->boundary_rq == rq) { 2743 q->end_sector = rq_end_sector(rq); 2744 q->boundary_rq = NULL; 2745 } 2746 2747 if (rq->rq_flags & RQF_DONTPREP) 2748 break; 2749 2750 if (q->dma_drain_size && blk_rq_bytes(rq)) { 2751 /* 2752 * make sure space for the drain appears we 2753 * know we can do this because max_hw_segments 2754 * has been adjusted to be one fewer than the 2755 * device can handle 2756 */ 2757 rq->nr_phys_segments++; 2758 } 2759 2760 if (!q->prep_rq_fn) 2761 break; 2762 2763 ret = q->prep_rq_fn(q, rq); 2764 if (ret == BLKPREP_OK) { 2765 break; 2766 } else if (ret == BLKPREP_DEFER) { 2767 /* 2768 * the request may have been (partially) prepped. 2769 * we need to keep this request in the front to 2770 * avoid resource deadlock. RQF_STARTED will 2771 * prevent other fs requests from passing this one. 2772 */ 2773 if (q->dma_drain_size && blk_rq_bytes(rq) && 2774 !(rq->rq_flags & RQF_DONTPREP)) { 2775 /* 2776 * remove the space for the drain we added 2777 * so that we don't add it again 2778 */ 2779 --rq->nr_phys_segments; 2780 } 2781 2782 rq = NULL; 2783 break; 2784 } else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) { 2785 rq->rq_flags |= RQF_QUIET; 2786 /* 2787 * Mark this request as started so we don't trigger 2788 * any debug logic in the end I/O path. 2789 */ 2790 blk_start_request(rq); 2791 __blk_end_request_all(rq, ret == BLKPREP_INVALID ? 2792 BLK_STS_TARGET : BLK_STS_IOERR); 2793 } else { 2794 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); 2795 break; 2796 } 2797 } 2798 2799 return rq; 2800 } 2801 EXPORT_SYMBOL(blk_peek_request); 2802 2803 static void blk_dequeue_request(struct request *rq) 2804 { 2805 struct request_queue *q = rq->q; 2806 2807 BUG_ON(list_empty(&rq->queuelist)); 2808 BUG_ON(ELV_ON_HASH(rq)); 2809 2810 list_del_init(&rq->queuelist); 2811 2812 /* 2813 * the time frame between a request being removed from the lists 2814 * and to it is freed is accounted as io that is in progress at 2815 * the driver side. 2816 */ 2817 if (blk_account_rq(rq)) { 2818 q->in_flight[rq_is_sync(rq)]++; 2819 set_io_start_time_ns(rq); 2820 } 2821 } 2822 2823 /** 2824 * blk_start_request - start request processing on the driver 2825 * @req: request to dequeue 2826 * 2827 * Description: 2828 * Dequeue @req and start timeout timer on it. This hands off the 2829 * request to the driver. 2830 */ 2831 void blk_start_request(struct request *req) 2832 { 2833 lockdep_assert_held(req->q->queue_lock); 2834 WARN_ON_ONCE(req->q->mq_ops); 2835 2836 blk_dequeue_request(req); 2837 2838 if (test_bit(QUEUE_FLAG_STATS, &req->q->queue_flags)) { 2839 blk_stat_set_issue(&req->issue_stat, blk_rq_sectors(req)); 2840 req->rq_flags |= RQF_STATS; 2841 wbt_issue(req->q->rq_wb, &req->issue_stat); 2842 } 2843 2844 BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags)); 2845 blk_add_timer(req); 2846 } 2847 EXPORT_SYMBOL(blk_start_request); 2848 2849 /** 2850 * blk_fetch_request - fetch a request from a request queue 2851 * @q: request queue to fetch a request from 2852 * 2853 * Description: 2854 * Return the request at the top of @q. The request is started on 2855 * return and LLD can start processing it immediately. 2856 * 2857 * Return: 2858 * Pointer to the request at the top of @q if available. Null 2859 * otherwise. 2860 */ 2861 struct request *blk_fetch_request(struct request_queue *q) 2862 { 2863 struct request *rq; 2864 2865 lockdep_assert_held(q->queue_lock); 2866 WARN_ON_ONCE(q->mq_ops); 2867 2868 rq = blk_peek_request(q); 2869 if (rq) 2870 blk_start_request(rq); 2871 return rq; 2872 } 2873 EXPORT_SYMBOL(blk_fetch_request); 2874 2875 /* 2876 * Steal bios from a request and add them to a bio list. 2877 * The request must not have been partially completed before. 2878 */ 2879 void blk_steal_bios(struct bio_list *list, struct request *rq) 2880 { 2881 if (rq->bio) { 2882 if (list->tail) 2883 list->tail->bi_next = rq->bio; 2884 else 2885 list->head = rq->bio; 2886 list->tail = rq->biotail; 2887 2888 rq->bio = NULL; 2889 rq->biotail = NULL; 2890 } 2891 2892 rq->__data_len = 0; 2893 } 2894 EXPORT_SYMBOL_GPL(blk_steal_bios); 2895 2896 /** 2897 * blk_update_request - Special helper function for request stacking drivers 2898 * @req: the request being processed 2899 * @error: block status code 2900 * @nr_bytes: number of bytes to complete @req 2901 * 2902 * Description: 2903 * Ends I/O on a number of bytes attached to @req, but doesn't complete 2904 * the request structure even if @req doesn't have leftover. 2905 * If @req has leftover, sets it up for the next range of segments. 2906 * 2907 * This special helper function is only for request stacking drivers 2908 * (e.g. request-based dm) so that they can handle partial completion. 2909 * Actual device drivers should use blk_end_request instead. 2910 * 2911 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees 2912 * %false return from this function. 2913 * 2914 * Return: 2915 * %false - this request doesn't have any more data 2916 * %true - this request has more data 2917 **/ 2918 bool blk_update_request(struct request *req, blk_status_t error, 2919 unsigned int nr_bytes) 2920 { 2921 int total_bytes; 2922 2923 trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes); 2924 2925 if (!req->bio) 2926 return false; 2927 2928 if (unlikely(error && !blk_rq_is_passthrough(req) && 2929 !(req->rq_flags & RQF_QUIET))) 2930 print_req_error(req, error); 2931 2932 blk_account_io_completion(req, nr_bytes); 2933 2934 total_bytes = 0; 2935 while (req->bio) { 2936 struct bio *bio = req->bio; 2937 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes); 2938 2939 if (bio_bytes == bio->bi_iter.bi_size) 2940 req->bio = bio->bi_next; 2941 2942 /* Completion has already been traced */ 2943 bio_clear_flag(bio, BIO_TRACE_COMPLETION); 2944 req_bio_endio(req, bio, bio_bytes, error); 2945 2946 total_bytes += bio_bytes; 2947 nr_bytes -= bio_bytes; 2948 2949 if (!nr_bytes) 2950 break; 2951 } 2952 2953 /* 2954 * completely done 2955 */ 2956 if (!req->bio) { 2957 /* 2958 * Reset counters so that the request stacking driver 2959 * can find how many bytes remain in the request 2960 * later. 2961 */ 2962 req->__data_len = 0; 2963 return false; 2964 } 2965 2966 req->__data_len -= total_bytes; 2967 2968 /* update sector only for requests with clear definition of sector */ 2969 if (!blk_rq_is_passthrough(req)) 2970 req->__sector += total_bytes >> 9; 2971 2972 /* mixed attributes always follow the first bio */ 2973 if (req->rq_flags & RQF_MIXED_MERGE) { 2974 req->cmd_flags &= ~REQ_FAILFAST_MASK; 2975 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK; 2976 } 2977 2978 if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) { 2979 /* 2980 * If total number of sectors is less than the first segment 2981 * size, something has gone terribly wrong. 2982 */ 2983 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { 2984 blk_dump_rq_flags(req, "request botched"); 2985 req->__data_len = blk_rq_cur_bytes(req); 2986 } 2987 2988 /* recalculate the number of segments */ 2989 blk_recalc_rq_segments(req); 2990 } 2991 2992 return true; 2993 } 2994 EXPORT_SYMBOL_GPL(blk_update_request); 2995 2996 static bool blk_update_bidi_request(struct request *rq, blk_status_t error, 2997 unsigned int nr_bytes, 2998 unsigned int bidi_bytes) 2999 { 3000 if (blk_update_request(rq, error, nr_bytes)) 3001 return true; 3002 3003 /* Bidi request must be completed as a whole */ 3004 if (unlikely(blk_bidi_rq(rq)) && 3005 blk_update_request(rq->next_rq, error, bidi_bytes)) 3006 return true; 3007 3008 if (blk_queue_add_random(rq->q)) 3009 add_disk_randomness(rq->rq_disk); 3010 3011 return false; 3012 } 3013 3014 /** 3015 * blk_unprep_request - unprepare a request 3016 * @req: the request 3017 * 3018 * This function makes a request ready for complete resubmission (or 3019 * completion). It happens only after all error handling is complete, 3020 * so represents the appropriate moment to deallocate any resources 3021 * that were allocated to the request in the prep_rq_fn. The queue 3022 * lock is held when calling this. 3023 */ 3024 void blk_unprep_request(struct request *req) 3025 { 3026 struct request_queue *q = req->q; 3027 3028 req->rq_flags &= ~RQF_DONTPREP; 3029 if (q->unprep_rq_fn) 3030 q->unprep_rq_fn(q, req); 3031 } 3032 EXPORT_SYMBOL_GPL(blk_unprep_request); 3033 3034 void blk_finish_request(struct request *req, blk_status_t error) 3035 { 3036 struct request_queue *q = req->q; 3037 3038 lockdep_assert_held(req->q->queue_lock); 3039 WARN_ON_ONCE(q->mq_ops); 3040 3041 if (req->rq_flags & RQF_STATS) 3042 blk_stat_add(req); 3043 3044 if (req->rq_flags & RQF_QUEUED) 3045 blk_queue_end_tag(q, req); 3046 3047 BUG_ON(blk_queued_rq(req)); 3048 3049 if (unlikely(laptop_mode) && !blk_rq_is_passthrough(req)) 3050 laptop_io_completion(req->q->backing_dev_info); 3051 3052 blk_delete_timer(req); 3053 3054 if (req->rq_flags & RQF_DONTPREP) 3055 blk_unprep_request(req); 3056 3057 blk_account_io_done(req); 3058 3059 if (req->end_io) { 3060 wbt_done(req->q->rq_wb, &req->issue_stat); 3061 req->end_io(req, error); 3062 } else { 3063 if (blk_bidi_rq(req)) 3064 __blk_put_request(req->next_rq->q, req->next_rq); 3065 3066 __blk_put_request(q, req); 3067 } 3068 } 3069 EXPORT_SYMBOL(blk_finish_request); 3070 3071 /** 3072 * blk_end_bidi_request - Complete a bidi request 3073 * @rq: the request to complete 3074 * @error: block status code 3075 * @nr_bytes: number of bytes to complete @rq 3076 * @bidi_bytes: number of bytes to complete @rq->next_rq 3077 * 3078 * Description: 3079 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. 3080 * Drivers that supports bidi can safely call this member for any 3081 * type of request, bidi or uni. In the later case @bidi_bytes is 3082 * just ignored. 3083 * 3084 * Return: 3085 * %false - we are done with this request 3086 * %true - still buffers pending for this request 3087 **/ 3088 static bool blk_end_bidi_request(struct request *rq, blk_status_t error, 3089 unsigned int nr_bytes, unsigned int bidi_bytes) 3090 { 3091 struct request_queue *q = rq->q; 3092 unsigned long flags; 3093 3094 WARN_ON_ONCE(q->mq_ops); 3095 3096 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) 3097 return true; 3098 3099 spin_lock_irqsave(q->queue_lock, flags); 3100 blk_finish_request(rq, error); 3101 spin_unlock_irqrestore(q->queue_lock, flags); 3102 3103 return false; 3104 } 3105 3106 /** 3107 * __blk_end_bidi_request - Complete a bidi request with queue lock held 3108 * @rq: the request to complete 3109 * @error: block status code 3110 * @nr_bytes: number of bytes to complete @rq 3111 * @bidi_bytes: number of bytes to complete @rq->next_rq 3112 * 3113 * Description: 3114 * Identical to blk_end_bidi_request() except that queue lock is 3115 * assumed to be locked on entry and remains so on return. 3116 * 3117 * Return: 3118 * %false - we are done with this request 3119 * %true - still buffers pending for this request 3120 **/ 3121 static bool __blk_end_bidi_request(struct request *rq, blk_status_t error, 3122 unsigned int nr_bytes, unsigned int bidi_bytes) 3123 { 3124 lockdep_assert_held(rq->q->queue_lock); 3125 WARN_ON_ONCE(rq->q->mq_ops); 3126 3127 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) 3128 return true; 3129 3130 blk_finish_request(rq, error); 3131 3132 return false; 3133 } 3134 3135 /** 3136 * blk_end_request - Helper function for drivers to complete the request. 3137 * @rq: the request being processed 3138 * @error: block status code 3139 * @nr_bytes: number of bytes to complete 3140 * 3141 * Description: 3142 * Ends I/O on a number of bytes attached to @rq. 3143 * If @rq has leftover, sets it up for the next range of segments. 3144 * 3145 * Return: 3146 * %false - we are done with this request 3147 * %true - still buffers pending for this request 3148 **/ 3149 bool blk_end_request(struct request *rq, blk_status_t error, 3150 unsigned int nr_bytes) 3151 { 3152 WARN_ON_ONCE(rq->q->mq_ops); 3153 return blk_end_bidi_request(rq, error, nr_bytes, 0); 3154 } 3155 EXPORT_SYMBOL(blk_end_request); 3156 3157 /** 3158 * blk_end_request_all - Helper function for drives to finish the request. 3159 * @rq: the request to finish 3160 * @error: block status code 3161 * 3162 * Description: 3163 * Completely finish @rq. 3164 */ 3165 void blk_end_request_all(struct request *rq, blk_status_t error) 3166 { 3167 bool pending; 3168 unsigned int bidi_bytes = 0; 3169 3170 if (unlikely(blk_bidi_rq(rq))) 3171 bidi_bytes = blk_rq_bytes(rq->next_rq); 3172 3173 pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); 3174 BUG_ON(pending); 3175 } 3176 EXPORT_SYMBOL(blk_end_request_all); 3177 3178 /** 3179 * __blk_end_request - Helper function for drivers to complete the request. 3180 * @rq: the request being processed 3181 * @error: block status code 3182 * @nr_bytes: number of bytes to complete 3183 * 3184 * Description: 3185 * Must be called with queue lock held unlike blk_end_request(). 3186 * 3187 * Return: 3188 * %false - we are done with this request 3189 * %true - still buffers pending for this request 3190 **/ 3191 bool __blk_end_request(struct request *rq, blk_status_t error, 3192 unsigned int nr_bytes) 3193 { 3194 lockdep_assert_held(rq->q->queue_lock); 3195 WARN_ON_ONCE(rq->q->mq_ops); 3196 3197 return __blk_end_bidi_request(rq, error, nr_bytes, 0); 3198 } 3199 EXPORT_SYMBOL(__blk_end_request); 3200 3201 /** 3202 * __blk_end_request_all - Helper function for drives to finish the request. 3203 * @rq: the request to finish 3204 * @error: block status code 3205 * 3206 * Description: 3207 * Completely finish @rq. Must be called with queue lock held. 3208 */ 3209 void __blk_end_request_all(struct request *rq, blk_status_t error) 3210 { 3211 bool pending; 3212 unsigned int bidi_bytes = 0; 3213 3214 lockdep_assert_held(rq->q->queue_lock); 3215 WARN_ON_ONCE(rq->q->mq_ops); 3216 3217 if (unlikely(blk_bidi_rq(rq))) 3218 bidi_bytes = blk_rq_bytes(rq->next_rq); 3219 3220 pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); 3221 BUG_ON(pending); 3222 } 3223 EXPORT_SYMBOL(__blk_end_request_all); 3224 3225 /** 3226 * __blk_end_request_cur - Helper function to finish the current request chunk. 3227 * @rq: the request to finish the current chunk for 3228 * @error: block status code 3229 * 3230 * Description: 3231 * Complete the current consecutively mapped chunk from @rq. Must 3232 * be called with queue lock held. 3233 * 3234 * Return: 3235 * %false - we are done with this request 3236 * %true - still buffers pending for this request 3237 */ 3238 bool __blk_end_request_cur(struct request *rq, blk_status_t error) 3239 { 3240 return __blk_end_request(rq, error, blk_rq_cur_bytes(rq)); 3241 } 3242 EXPORT_SYMBOL(__blk_end_request_cur); 3243 3244 void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 3245 struct bio *bio) 3246 { 3247 if (bio_has_data(bio)) 3248 rq->nr_phys_segments = bio_phys_segments(q, bio); 3249 3250 rq->__data_len = bio->bi_iter.bi_size; 3251 rq->bio = rq->biotail = bio; 3252 3253 if (bio->bi_disk) 3254 rq->rq_disk = bio->bi_disk; 3255 } 3256 3257 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 3258 /** 3259 * rq_flush_dcache_pages - Helper function to flush all pages in a request 3260 * @rq: the request to be flushed 3261 * 3262 * Description: 3263 * Flush all pages in @rq. 3264 */ 3265 void rq_flush_dcache_pages(struct request *rq) 3266 { 3267 struct req_iterator iter; 3268 struct bio_vec bvec; 3269 3270 rq_for_each_segment(bvec, rq, iter) 3271 flush_dcache_page(bvec.bv_page); 3272 } 3273 EXPORT_SYMBOL_GPL(rq_flush_dcache_pages); 3274 #endif 3275 3276 /** 3277 * blk_lld_busy - Check if underlying low-level drivers of a device are busy 3278 * @q : the queue of the device being checked 3279 * 3280 * Description: 3281 * Check if underlying low-level drivers of a device are busy. 3282 * If the drivers want to export their busy state, they must set own 3283 * exporting function using blk_queue_lld_busy() first. 3284 * 3285 * Basically, this function is used only by request stacking drivers 3286 * to stop dispatching requests to underlying devices when underlying 3287 * devices are busy. This behavior helps more I/O merging on the queue 3288 * of the request stacking driver and prevents I/O throughput regression 3289 * on burst I/O load. 3290 * 3291 * Return: 3292 * 0 - Not busy (The request stacking driver should dispatch request) 3293 * 1 - Busy (The request stacking driver should stop dispatching request) 3294 */ 3295 int blk_lld_busy(struct request_queue *q) 3296 { 3297 if (q->lld_busy_fn) 3298 return q->lld_busy_fn(q); 3299 3300 return 0; 3301 } 3302 EXPORT_SYMBOL_GPL(blk_lld_busy); 3303 3304 /** 3305 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request 3306 * @rq: the clone request to be cleaned up 3307 * 3308 * Description: 3309 * Free all bios in @rq for a cloned request. 3310 */ 3311 void blk_rq_unprep_clone(struct request *rq) 3312 { 3313 struct bio *bio; 3314 3315 while ((bio = rq->bio) != NULL) { 3316 rq->bio = bio->bi_next; 3317 3318 bio_put(bio); 3319 } 3320 } 3321 EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); 3322 3323 /* 3324 * Copy attributes of the original request to the clone request. 3325 * The actual data parts (e.g. ->cmd, ->sense) are not copied. 3326 */ 3327 static void __blk_rq_prep_clone(struct request *dst, struct request *src) 3328 { 3329 dst->cpu = src->cpu; 3330 dst->__sector = blk_rq_pos(src); 3331 dst->__data_len = blk_rq_bytes(src); 3332 dst->nr_phys_segments = src->nr_phys_segments; 3333 dst->ioprio = src->ioprio; 3334 dst->extra_len = src->extra_len; 3335 } 3336 3337 /** 3338 * blk_rq_prep_clone - Helper function to setup clone request 3339 * @rq: the request to be setup 3340 * @rq_src: original request to be cloned 3341 * @bs: bio_set that bios for clone are allocated from 3342 * @gfp_mask: memory allocation mask for bio 3343 * @bio_ctr: setup function to be called for each clone bio. 3344 * Returns %0 for success, non %0 for failure. 3345 * @data: private data to be passed to @bio_ctr 3346 * 3347 * Description: 3348 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq. 3349 * The actual data parts of @rq_src (e.g. ->cmd, ->sense) 3350 * are not copied, and copying such parts is the caller's responsibility. 3351 * Also, pages which the original bios are pointing to are not copied 3352 * and the cloned bios just point same pages. 3353 * So cloned bios must be completed before original bios, which means 3354 * the caller must complete @rq before @rq_src. 3355 */ 3356 int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 3357 struct bio_set *bs, gfp_t gfp_mask, 3358 int (*bio_ctr)(struct bio *, struct bio *, void *), 3359 void *data) 3360 { 3361 struct bio *bio, *bio_src; 3362 3363 if (!bs) 3364 bs = fs_bio_set; 3365 3366 __rq_for_each_bio(bio_src, rq_src) { 3367 bio = bio_clone_fast(bio_src, gfp_mask, bs); 3368 if (!bio) 3369 goto free_and_out; 3370 3371 if (bio_ctr && bio_ctr(bio, bio_src, data)) 3372 goto free_and_out; 3373 3374 if (rq->bio) { 3375 rq->biotail->bi_next = bio; 3376 rq->biotail = bio; 3377 } else 3378 rq->bio = rq->biotail = bio; 3379 } 3380 3381 __blk_rq_prep_clone(rq, rq_src); 3382 3383 return 0; 3384 3385 free_and_out: 3386 if (bio) 3387 bio_put(bio); 3388 blk_rq_unprep_clone(rq); 3389 3390 return -ENOMEM; 3391 } 3392 EXPORT_SYMBOL_GPL(blk_rq_prep_clone); 3393 3394 int kblockd_schedule_work(struct work_struct *work) 3395 { 3396 return queue_work(kblockd_workqueue, work); 3397 } 3398 EXPORT_SYMBOL(kblockd_schedule_work); 3399 3400 int kblockd_schedule_work_on(int cpu, struct work_struct *work) 3401 { 3402 return queue_work_on(cpu, kblockd_workqueue, work); 3403 } 3404 EXPORT_SYMBOL(kblockd_schedule_work_on); 3405 3406 int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, 3407 unsigned long delay) 3408 { 3409 return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay); 3410 } 3411 EXPORT_SYMBOL(kblockd_mod_delayed_work_on); 3412 3413 int kblockd_schedule_delayed_work(struct delayed_work *dwork, 3414 unsigned long delay) 3415 { 3416 return queue_delayed_work(kblockd_workqueue, dwork, delay); 3417 } 3418 EXPORT_SYMBOL(kblockd_schedule_delayed_work); 3419 3420 int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, 3421 unsigned long delay) 3422 { 3423 return queue_delayed_work_on(cpu, kblockd_workqueue, dwork, delay); 3424 } 3425 EXPORT_SYMBOL(kblockd_schedule_delayed_work_on); 3426 3427 /** 3428 * blk_start_plug - initialize blk_plug and track it inside the task_struct 3429 * @plug: The &struct blk_plug that needs to be initialized 3430 * 3431 * Description: 3432 * Tracking blk_plug inside the task_struct will help with auto-flushing the 3433 * pending I/O should the task end up blocking between blk_start_plug() and 3434 * blk_finish_plug(). This is important from a performance perspective, but 3435 * also ensures that we don't deadlock. For instance, if the task is blocking 3436 * for a memory allocation, memory reclaim could end up wanting to free a 3437 * page belonging to that request that is currently residing in our private 3438 * plug. By flushing the pending I/O when the process goes to sleep, we avoid 3439 * this kind of deadlock. 3440 */ 3441 void blk_start_plug(struct blk_plug *plug) 3442 { 3443 struct task_struct *tsk = current; 3444 3445 /* 3446 * If this is a nested plug, don't actually assign it. 3447 */ 3448 if (tsk->plug) 3449 return; 3450 3451 INIT_LIST_HEAD(&plug->list); 3452 INIT_LIST_HEAD(&plug->mq_list); 3453 INIT_LIST_HEAD(&plug->cb_list); 3454 /* 3455 * Store ordering should not be needed here, since a potential 3456 * preempt will imply a full memory barrier 3457 */ 3458 tsk->plug = plug; 3459 } 3460 EXPORT_SYMBOL(blk_start_plug); 3461 3462 static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b) 3463 { 3464 struct request *rqa = container_of(a, struct request, queuelist); 3465 struct request *rqb = container_of(b, struct request, queuelist); 3466 3467 return !(rqa->q < rqb->q || 3468 (rqa->q == rqb->q && blk_rq_pos(rqa) < blk_rq_pos(rqb))); 3469 } 3470 3471 /* 3472 * If 'from_schedule' is true, then postpone the dispatch of requests 3473 * until a safe kblockd context. We due this to avoid accidental big 3474 * additional stack usage in driver dispatch, in places where the originally 3475 * plugger did not intend it. 3476 */ 3477 static void queue_unplugged(struct request_queue *q, unsigned int depth, 3478 bool from_schedule) 3479 __releases(q->queue_lock) 3480 { 3481 lockdep_assert_held(q->queue_lock); 3482 3483 trace_block_unplug(q, depth, !from_schedule); 3484 3485 if (from_schedule) 3486 blk_run_queue_async(q); 3487 else 3488 __blk_run_queue(q); 3489 spin_unlock(q->queue_lock); 3490 } 3491 3492 static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) 3493 { 3494 LIST_HEAD(callbacks); 3495 3496 while (!list_empty(&plug->cb_list)) { 3497 list_splice_init(&plug->cb_list, &callbacks); 3498 3499 while (!list_empty(&callbacks)) { 3500 struct blk_plug_cb *cb = list_first_entry(&callbacks, 3501 struct blk_plug_cb, 3502 list); 3503 list_del(&cb->list); 3504 cb->callback(cb, from_schedule); 3505 } 3506 } 3507 } 3508 3509 struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data, 3510 int size) 3511 { 3512 struct blk_plug *plug = current->plug; 3513 struct blk_plug_cb *cb; 3514 3515 if (!plug) 3516 return NULL; 3517 3518 list_for_each_entry(cb, &plug->cb_list, list) 3519 if (cb->callback == unplug && cb->data == data) 3520 return cb; 3521 3522 /* Not currently on the callback list */ 3523 BUG_ON(size < sizeof(*cb)); 3524 cb = kzalloc(size, GFP_ATOMIC); 3525 if (cb) { 3526 cb->data = data; 3527 cb->callback = unplug; 3528 list_add(&cb->list, &plug->cb_list); 3529 } 3530 return cb; 3531 } 3532 EXPORT_SYMBOL(blk_check_plugged); 3533 3534 void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) 3535 { 3536 struct request_queue *q; 3537 unsigned long flags; 3538 struct request *rq; 3539 LIST_HEAD(list); 3540 unsigned int depth; 3541 3542 flush_plug_callbacks(plug, from_schedule); 3543 3544 if (!list_empty(&plug->mq_list)) 3545 blk_mq_flush_plug_list(plug, from_schedule); 3546 3547 if (list_empty(&plug->list)) 3548 return; 3549 3550 list_splice_init(&plug->list, &list); 3551 3552 list_sort(NULL, &list, plug_rq_cmp); 3553 3554 q = NULL; 3555 depth = 0; 3556 3557 /* 3558 * Save and disable interrupts here, to avoid doing it for every 3559 * queue lock we have to take. 3560 */ 3561 local_irq_save(flags); 3562 while (!list_empty(&list)) { 3563 rq = list_entry_rq(list.next); 3564 list_del_init(&rq->queuelist); 3565 BUG_ON(!rq->q); 3566 if (rq->q != q) { 3567 /* 3568 * This drops the queue lock 3569 */ 3570 if (q) 3571 queue_unplugged(q, depth, from_schedule); 3572 q = rq->q; 3573 depth = 0; 3574 spin_lock(q->queue_lock); 3575 } 3576 3577 /* 3578 * Short-circuit if @q is dead 3579 */ 3580 if (unlikely(blk_queue_dying(q))) { 3581 __blk_end_request_all(rq, BLK_STS_IOERR); 3582 continue; 3583 } 3584 3585 /* 3586 * rq is already accounted, so use raw insert 3587 */ 3588 if (op_is_flush(rq->cmd_flags)) 3589 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); 3590 else 3591 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); 3592 3593 depth++; 3594 } 3595 3596 /* 3597 * This drops the queue lock 3598 */ 3599 if (q) 3600 queue_unplugged(q, depth, from_schedule); 3601 3602 local_irq_restore(flags); 3603 } 3604 3605 void blk_finish_plug(struct blk_plug *plug) 3606 { 3607 if (plug != current->plug) 3608 return; 3609 blk_flush_plug_list(plug, false); 3610 3611 current->plug = NULL; 3612 } 3613 EXPORT_SYMBOL(blk_finish_plug); 3614 3615 #ifdef CONFIG_PM 3616 /** 3617 * blk_pm_runtime_init - Block layer runtime PM initialization routine 3618 * @q: the queue of the device 3619 * @dev: the device the queue belongs to 3620 * 3621 * Description: 3622 * Initialize runtime-PM-related fields for @q and start auto suspend for 3623 * @dev. Drivers that want to take advantage of request-based runtime PM 3624 * should call this function after @dev has been initialized, and its 3625 * request queue @q has been allocated, and runtime PM for it can not happen 3626 * yet(either due to disabled/forbidden or its usage_count > 0). In most 3627 * cases, driver should call this function before any I/O has taken place. 3628 * 3629 * This function takes care of setting up using auto suspend for the device, 3630 * the autosuspend delay is set to -1 to make runtime suspend impossible 3631 * until an updated value is either set by user or by driver. Drivers do 3632 * not need to touch other autosuspend settings. 3633 * 3634 * The block layer runtime PM is request based, so only works for drivers 3635 * that use request as their IO unit instead of those directly use bio's. 3636 */ 3637 void blk_pm_runtime_init(struct request_queue *q, struct device *dev) 3638 { 3639 /* not support for RQF_PM and ->rpm_status in blk-mq yet */ 3640 if (q->mq_ops) 3641 return; 3642 3643 q->dev = dev; 3644 q->rpm_status = RPM_ACTIVE; 3645 pm_runtime_set_autosuspend_delay(q->dev, -1); 3646 pm_runtime_use_autosuspend(q->dev); 3647 } 3648 EXPORT_SYMBOL(blk_pm_runtime_init); 3649 3650 /** 3651 * blk_pre_runtime_suspend - Pre runtime suspend check 3652 * @q: the queue of the device 3653 * 3654 * Description: 3655 * This function will check if runtime suspend is allowed for the device 3656 * by examining if there are any requests pending in the queue. If there 3657 * are requests pending, the device can not be runtime suspended; otherwise, 3658 * the queue's status will be updated to SUSPENDING and the driver can 3659 * proceed to suspend the device. 3660 * 3661 * For the not allowed case, we mark last busy for the device so that 3662 * runtime PM core will try to autosuspend it some time later. 3663 * 3664 * This function should be called near the start of the device's 3665 * runtime_suspend callback. 3666 * 3667 * Return: 3668 * 0 - OK to runtime suspend the device 3669 * -EBUSY - Device should not be runtime suspended 3670 */ 3671 int blk_pre_runtime_suspend(struct request_queue *q) 3672 { 3673 int ret = 0; 3674 3675 if (!q->dev) 3676 return ret; 3677 3678 spin_lock_irq(q->queue_lock); 3679 if (q->nr_pending) { 3680 ret = -EBUSY; 3681 pm_runtime_mark_last_busy(q->dev); 3682 } else { 3683 q->rpm_status = RPM_SUSPENDING; 3684 } 3685 spin_unlock_irq(q->queue_lock); 3686 return ret; 3687 } 3688 EXPORT_SYMBOL(blk_pre_runtime_suspend); 3689 3690 /** 3691 * blk_post_runtime_suspend - Post runtime suspend processing 3692 * @q: the queue of the device 3693 * @err: return value of the device's runtime_suspend function 3694 * 3695 * Description: 3696 * Update the queue's runtime status according to the return value of the 3697 * device's runtime suspend function and mark last busy for the device so 3698 * that PM core will try to auto suspend the device at a later time. 3699 * 3700 * This function should be called near the end of the device's 3701 * runtime_suspend callback. 3702 */ 3703 void blk_post_runtime_suspend(struct request_queue *q, int err) 3704 { 3705 if (!q->dev) 3706 return; 3707 3708 spin_lock_irq(q->queue_lock); 3709 if (!err) { 3710 q->rpm_status = RPM_SUSPENDED; 3711 } else { 3712 q->rpm_status = RPM_ACTIVE; 3713 pm_runtime_mark_last_busy(q->dev); 3714 } 3715 spin_unlock_irq(q->queue_lock); 3716 } 3717 EXPORT_SYMBOL(blk_post_runtime_suspend); 3718 3719 /** 3720 * blk_pre_runtime_resume - Pre runtime resume processing 3721 * @q: the queue of the device 3722 * 3723 * Description: 3724 * Update the queue's runtime status to RESUMING in preparation for the 3725 * runtime resume of the device. 3726 * 3727 * This function should be called near the start of the device's 3728 * runtime_resume callback. 3729 */ 3730 void blk_pre_runtime_resume(struct request_queue *q) 3731 { 3732 if (!q->dev) 3733 return; 3734 3735 spin_lock_irq(q->queue_lock); 3736 q->rpm_status = RPM_RESUMING; 3737 spin_unlock_irq(q->queue_lock); 3738 } 3739 EXPORT_SYMBOL(blk_pre_runtime_resume); 3740 3741 /** 3742 * blk_post_runtime_resume - Post runtime resume processing 3743 * @q: the queue of the device 3744 * @err: return value of the device's runtime_resume function 3745 * 3746 * Description: 3747 * Update the queue's runtime status according to the return value of the 3748 * device's runtime_resume function. If it is successfully resumed, process 3749 * the requests that are queued into the device's queue when it is resuming 3750 * and then mark last busy and initiate autosuspend for it. 3751 * 3752 * This function should be called near the end of the device's 3753 * runtime_resume callback. 3754 */ 3755 void blk_post_runtime_resume(struct request_queue *q, int err) 3756 { 3757 if (!q->dev) 3758 return; 3759 3760 spin_lock_irq(q->queue_lock); 3761 if (!err) { 3762 q->rpm_status = RPM_ACTIVE; 3763 __blk_run_queue(q); 3764 pm_runtime_mark_last_busy(q->dev); 3765 pm_request_autosuspend(q->dev); 3766 } else { 3767 q->rpm_status = RPM_SUSPENDED; 3768 } 3769 spin_unlock_irq(q->queue_lock); 3770 } 3771 EXPORT_SYMBOL(blk_post_runtime_resume); 3772 3773 /** 3774 * blk_set_runtime_active - Force runtime status of the queue to be active 3775 * @q: the queue of the device 3776 * 3777 * If the device is left runtime suspended during system suspend the resume 3778 * hook typically resumes the device and corrects runtime status 3779 * accordingly. However, that does not affect the queue runtime PM status 3780 * which is still "suspended". This prevents processing requests from the 3781 * queue. 3782 * 3783 * This function can be used in driver's resume hook to correct queue 3784 * runtime PM status and re-enable peeking requests from the queue. It 3785 * should be called before first request is added to the queue. 3786 */ 3787 void blk_set_runtime_active(struct request_queue *q) 3788 { 3789 spin_lock_irq(q->queue_lock); 3790 q->rpm_status = RPM_ACTIVE; 3791 pm_runtime_mark_last_busy(q->dev); 3792 pm_request_autosuspend(q->dev); 3793 spin_unlock_irq(q->queue_lock); 3794 } 3795 EXPORT_SYMBOL(blk_set_runtime_active); 3796 #endif 3797 3798 int __init blk_dev_init(void) 3799 { 3800 BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS)); 3801 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 * 3802 FIELD_SIZEOF(struct request, cmd_flags)); 3803 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 * 3804 FIELD_SIZEOF(struct bio, bi_opf)); 3805 3806 /* used for unplugging and affects IO latency/throughput - HIGHPRI */ 3807 kblockd_workqueue = alloc_workqueue("kblockd", 3808 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); 3809 if (!kblockd_workqueue) 3810 panic("Failed to create kblockd\n"); 3811 3812 request_cachep = kmem_cache_create("blkdev_requests", 3813 sizeof(struct request), 0, SLAB_PANIC, NULL); 3814 3815 blk_requestq_cachep = kmem_cache_create("request_queue", 3816 sizeof(struct request_queue), 0, SLAB_PANIC, NULL); 3817 3818 #ifdef CONFIG_DEBUG_FS 3819 blk_debugfs_root = debugfs_create_dir("block", NULL); 3820 #endif 3821 3822 return 0; 3823 } 3824