1 /* 2 * Copyright (C) 1991, 1992 Linus Torvalds 3 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics 4 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> 6 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> 7 * - July2000 8 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 9 */ 10 11 /* 12 * This handles all read/write requests to block devices 13 */ 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/backing-dev.h> 17 #include <linux/bio.h> 18 #include <linux/blkdev.h> 19 #include <linux/blk-mq.h> 20 #include <linux/highmem.h> 21 #include <linux/mm.h> 22 #include <linux/kernel_stat.h> 23 #include <linux/string.h> 24 #include <linux/init.h> 25 #include <linux/completion.h> 26 #include <linux/slab.h> 27 #include <linux/swap.h> 28 #include <linux/writeback.h> 29 #include <linux/task_io_accounting_ops.h> 30 #include <linux/fault-inject.h> 31 #include <linux/list_sort.h> 32 #include <linux/delay.h> 33 #include <linux/ratelimit.h> 34 #include <linux/pm_runtime.h> 35 36 #define CREATE_TRACE_POINTS 37 #include <trace/events/block.h> 38 39 #include "blk.h" 40 #include "blk-cgroup.h" 41 #include "blk-mq.h" 42 43 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); 44 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); 45 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); 46 EXPORT_TRACEPOINT_SYMBOL_GPL(block_split); 47 EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug); 48 49 DEFINE_IDA(blk_queue_ida); 50 51 /* 52 * For the allocated request tables 53 */ 54 struct kmem_cache *request_cachep = NULL; 55 56 /* 57 * For queue allocation 58 */ 59 struct kmem_cache *blk_requestq_cachep; 60 61 /* 62 * Controlling structure to kblockd 63 */ 64 static struct workqueue_struct *kblockd_workqueue; 65 66 void blk_queue_congestion_threshold(struct request_queue *q) 67 { 68 int nr; 69 70 nr = q->nr_requests - (q->nr_requests / 8) + 1; 71 if (nr > q->nr_requests) 72 nr = q->nr_requests; 73 q->nr_congestion_on = nr; 74 75 nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1; 76 if (nr < 1) 77 nr = 1; 78 q->nr_congestion_off = nr; 79 } 80 81 /** 82 * blk_get_backing_dev_info - get the address of a queue's backing_dev_info 83 * @bdev: device 84 * 85 * Locates the passed device's request queue and returns the address of its 86 * backing_dev_info. This function can only be called if @bdev is opened 87 * and the return value is never NULL. 88 */ 89 struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) 90 { 91 struct request_queue *q = bdev_get_queue(bdev); 92 93 return &q->backing_dev_info; 94 } 95 EXPORT_SYMBOL(blk_get_backing_dev_info); 96 97 void blk_rq_init(struct request_queue *q, struct request *rq) 98 { 99 memset(rq, 0, sizeof(*rq)); 100 101 INIT_LIST_HEAD(&rq->queuelist); 102 INIT_LIST_HEAD(&rq->timeout_list); 103 rq->cpu = -1; 104 rq->q = q; 105 rq->__sector = (sector_t) -1; 106 INIT_HLIST_NODE(&rq->hash); 107 RB_CLEAR_NODE(&rq->rb_node); 108 rq->cmd = rq->__cmd; 109 rq->cmd_len = BLK_MAX_CDB; 110 rq->tag = -1; 111 rq->start_time = jiffies; 112 set_start_time_ns(rq); 113 rq->part = NULL; 114 } 115 EXPORT_SYMBOL(blk_rq_init); 116 117 static void req_bio_endio(struct request *rq, struct bio *bio, 118 unsigned int nbytes, int error) 119 { 120 if (error) 121 clear_bit(BIO_UPTODATE, &bio->bi_flags); 122 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) 123 error = -EIO; 124 125 if (unlikely(rq->cmd_flags & REQ_QUIET)) 126 set_bit(BIO_QUIET, &bio->bi_flags); 127 128 bio_advance(bio, nbytes); 129 130 /* don't actually finish bio if it's part of flush sequence */ 131 if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) 132 bio_endio(bio, error); 133 } 134 135 void blk_dump_rq_flags(struct request *rq, char *msg) 136 { 137 int bit; 138 139 printk(KERN_INFO "%s: dev %s: type=%x, flags=%llx\n", msg, 140 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, 141 (unsigned long long) rq->cmd_flags); 142 143 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", 144 (unsigned long long)blk_rq_pos(rq), 145 blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); 146 printk(KERN_INFO " bio %p, biotail %p, len %u\n", 147 rq->bio, rq->biotail, blk_rq_bytes(rq)); 148 149 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 150 printk(KERN_INFO " cdb: "); 151 for (bit = 0; bit < BLK_MAX_CDB; bit++) 152 printk("%02x ", rq->cmd[bit]); 153 printk("\n"); 154 } 155 } 156 EXPORT_SYMBOL(blk_dump_rq_flags); 157 158 static void blk_delay_work(struct work_struct *work) 159 { 160 struct request_queue *q; 161 162 q = container_of(work, struct request_queue, delay_work.work); 163 spin_lock_irq(q->queue_lock); 164 __blk_run_queue(q); 165 spin_unlock_irq(q->queue_lock); 166 } 167 168 /** 169 * blk_delay_queue - restart queueing after defined interval 170 * @q: The &struct request_queue in question 171 * @msecs: Delay in msecs 172 * 173 * Description: 174 * Sometimes queueing needs to be postponed for a little while, to allow 175 * resources to come back. This function will make sure that queueing is 176 * restarted around the specified time. Queue lock must be held. 177 */ 178 void blk_delay_queue(struct request_queue *q, unsigned long msecs) 179 { 180 if (likely(!blk_queue_dead(q))) 181 queue_delayed_work(kblockd_workqueue, &q->delay_work, 182 msecs_to_jiffies(msecs)); 183 } 184 EXPORT_SYMBOL(blk_delay_queue); 185 186 /** 187 * blk_start_queue - restart a previously stopped queue 188 * @q: The &struct request_queue in question 189 * 190 * Description: 191 * blk_start_queue() will clear the stop flag on the queue, and call 192 * the request_fn for the queue if it was in a stopped state when 193 * entered. Also see blk_stop_queue(). Queue lock must be held. 194 **/ 195 void blk_start_queue(struct request_queue *q) 196 { 197 WARN_ON(!irqs_disabled()); 198 199 queue_flag_clear(QUEUE_FLAG_STOPPED, q); 200 __blk_run_queue(q); 201 } 202 EXPORT_SYMBOL(blk_start_queue); 203 204 /** 205 * blk_stop_queue - stop a queue 206 * @q: The &struct request_queue in question 207 * 208 * Description: 209 * The Linux block layer assumes that a block driver will consume all 210 * entries on the request queue when the request_fn strategy is called. 211 * Often this will not happen, because of hardware limitations (queue 212 * depth settings). If a device driver gets a 'queue full' response, 213 * or if it simply chooses not to queue more I/O at one point, it can 214 * call this function to prevent the request_fn from being called until 215 * the driver has signalled it's ready to go again. This happens by calling 216 * blk_start_queue() to restart queue operations. Queue lock must be held. 217 **/ 218 void blk_stop_queue(struct request_queue *q) 219 { 220 cancel_delayed_work(&q->delay_work); 221 queue_flag_set(QUEUE_FLAG_STOPPED, q); 222 } 223 EXPORT_SYMBOL(blk_stop_queue); 224 225 /** 226 * blk_sync_queue - cancel any pending callbacks on a queue 227 * @q: the queue 228 * 229 * Description: 230 * The block layer may perform asynchronous callback activity 231 * on a queue, such as calling the unplug function after a timeout. 232 * A block device may call blk_sync_queue to ensure that any 233 * such activity is cancelled, thus allowing it to release resources 234 * that the callbacks might use. The caller must already have made sure 235 * that its ->make_request_fn will not re-add plugging prior to calling 236 * this function. 237 * 238 * This function does not cancel any asynchronous activity arising 239 * out of elevator or throttling code. That would require elevator_exit() 240 * and blkcg_exit_queue() to be called with queue lock initialized. 241 * 242 */ 243 void blk_sync_queue(struct request_queue *q) 244 { 245 del_timer_sync(&q->timeout); 246 247 if (q->mq_ops) { 248 struct blk_mq_hw_ctx *hctx; 249 int i; 250 251 queue_for_each_hw_ctx(q, hctx, i) { 252 cancel_delayed_work_sync(&hctx->run_work); 253 cancel_delayed_work_sync(&hctx->delay_work); 254 } 255 } else { 256 cancel_delayed_work_sync(&q->delay_work); 257 } 258 } 259 EXPORT_SYMBOL(blk_sync_queue); 260 261 /** 262 * __blk_run_queue_uncond - run a queue whether or not it has been stopped 263 * @q: The queue to run 264 * 265 * Description: 266 * Invoke request handling on a queue if there are any pending requests. 267 * May be used to restart request handling after a request has completed. 268 * This variant runs the queue whether or not the queue has been 269 * stopped. Must be called with the queue lock held and interrupts 270 * disabled. See also @blk_run_queue. 271 */ 272 inline void __blk_run_queue_uncond(struct request_queue *q) 273 { 274 if (unlikely(blk_queue_dead(q))) 275 return; 276 277 /* 278 * Some request_fn implementations, e.g. scsi_request_fn(), unlock 279 * the queue lock internally. As a result multiple threads may be 280 * running such a request function concurrently. Keep track of the 281 * number of active request_fn invocations such that blk_drain_queue() 282 * can wait until all these request_fn calls have finished. 283 */ 284 q->request_fn_active++; 285 q->request_fn(q); 286 q->request_fn_active--; 287 } 288 289 /** 290 * __blk_run_queue - run a single device queue 291 * @q: The queue to run 292 * 293 * Description: 294 * See @blk_run_queue. This variant must be called with the queue lock 295 * held and interrupts disabled. 296 */ 297 void __blk_run_queue(struct request_queue *q) 298 { 299 if (unlikely(blk_queue_stopped(q))) 300 return; 301 302 __blk_run_queue_uncond(q); 303 } 304 EXPORT_SYMBOL(__blk_run_queue); 305 306 /** 307 * blk_run_queue_async - run a single device queue in workqueue context 308 * @q: The queue to run 309 * 310 * Description: 311 * Tells kblockd to perform the equivalent of @blk_run_queue on behalf 312 * of us. The caller must hold the queue lock. 313 */ 314 void blk_run_queue_async(struct request_queue *q) 315 { 316 if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q))) 317 mod_delayed_work(kblockd_workqueue, &q->delay_work, 0); 318 } 319 EXPORT_SYMBOL(blk_run_queue_async); 320 321 /** 322 * blk_run_queue - run a single device queue 323 * @q: The queue to run 324 * 325 * Description: 326 * Invoke request handling on this queue, if it has pending work to do. 327 * May be used to restart queueing when a request has completed. 328 */ 329 void blk_run_queue(struct request_queue *q) 330 { 331 unsigned long flags; 332 333 spin_lock_irqsave(q->queue_lock, flags); 334 __blk_run_queue(q); 335 spin_unlock_irqrestore(q->queue_lock, flags); 336 } 337 EXPORT_SYMBOL(blk_run_queue); 338 339 void blk_put_queue(struct request_queue *q) 340 { 341 kobject_put(&q->kobj); 342 } 343 EXPORT_SYMBOL(blk_put_queue); 344 345 /** 346 * __blk_drain_queue - drain requests from request_queue 347 * @q: queue to drain 348 * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV 349 * 350 * Drain requests from @q. If @drain_all is set, all requests are drained. 351 * If not, only ELVPRIV requests are drained. The caller is responsible 352 * for ensuring that no new requests which need to be drained are queued. 353 */ 354 static void __blk_drain_queue(struct request_queue *q, bool drain_all) 355 __releases(q->queue_lock) 356 __acquires(q->queue_lock) 357 { 358 int i; 359 360 lockdep_assert_held(q->queue_lock); 361 362 while (true) { 363 bool drain = false; 364 365 /* 366 * The caller might be trying to drain @q before its 367 * elevator is initialized. 368 */ 369 if (q->elevator) 370 elv_drain_elevator(q); 371 372 blkcg_drain_queue(q); 373 374 /* 375 * This function might be called on a queue which failed 376 * driver init after queue creation or is not yet fully 377 * active yet. Some drivers (e.g. fd and loop) get unhappy 378 * in such cases. Kick queue iff dispatch queue has 379 * something on it and @q has request_fn set. 380 */ 381 if (!list_empty(&q->queue_head) && q->request_fn) 382 __blk_run_queue(q); 383 384 drain |= q->nr_rqs_elvpriv; 385 drain |= q->request_fn_active; 386 387 /* 388 * Unfortunately, requests are queued at and tracked from 389 * multiple places and there's no single counter which can 390 * be drained. Check all the queues and counters. 391 */ 392 if (drain_all) { 393 struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); 394 drain |= !list_empty(&q->queue_head); 395 for (i = 0; i < 2; i++) { 396 drain |= q->nr_rqs[i]; 397 drain |= q->in_flight[i]; 398 if (fq) 399 drain |= !list_empty(&fq->flush_queue[i]); 400 } 401 } 402 403 if (!drain) 404 break; 405 406 spin_unlock_irq(q->queue_lock); 407 408 msleep(10); 409 410 spin_lock_irq(q->queue_lock); 411 } 412 413 /* 414 * With queue marked dead, any woken up waiter will fail the 415 * allocation path, so the wakeup chaining is lost and we're 416 * left with hung waiters. We need to wake up those waiters. 417 */ 418 if (q->request_fn) { 419 struct request_list *rl; 420 421 blk_queue_for_each_rl(rl, q) 422 for (i = 0; i < ARRAY_SIZE(rl->wait); i++) 423 wake_up_all(&rl->wait[i]); 424 } 425 } 426 427 /** 428 * blk_queue_bypass_start - enter queue bypass mode 429 * @q: queue of interest 430 * 431 * In bypass mode, only the dispatch FIFO queue of @q is used. This 432 * function makes @q enter bypass mode and drains all requests which were 433 * throttled or issued before. On return, it's guaranteed that no request 434 * is being throttled or has ELVPRIV set and blk_queue_bypass() %true 435 * inside queue or RCU read lock. 436 */ 437 void blk_queue_bypass_start(struct request_queue *q) 438 { 439 spin_lock_irq(q->queue_lock); 440 q->bypass_depth++; 441 queue_flag_set(QUEUE_FLAG_BYPASS, q); 442 spin_unlock_irq(q->queue_lock); 443 444 /* 445 * Queues start drained. Skip actual draining till init is 446 * complete. This avoids lenghty delays during queue init which 447 * can happen many times during boot. 448 */ 449 if (blk_queue_init_done(q)) { 450 spin_lock_irq(q->queue_lock); 451 __blk_drain_queue(q, false); 452 spin_unlock_irq(q->queue_lock); 453 454 /* ensure blk_queue_bypass() is %true inside RCU read lock */ 455 synchronize_rcu(); 456 } 457 } 458 EXPORT_SYMBOL_GPL(blk_queue_bypass_start); 459 460 /** 461 * blk_queue_bypass_end - leave queue bypass mode 462 * @q: queue of interest 463 * 464 * Leave bypass mode and restore the normal queueing behavior. 465 */ 466 void blk_queue_bypass_end(struct request_queue *q) 467 { 468 spin_lock_irq(q->queue_lock); 469 if (!--q->bypass_depth) 470 queue_flag_clear(QUEUE_FLAG_BYPASS, q); 471 WARN_ON_ONCE(q->bypass_depth < 0); 472 spin_unlock_irq(q->queue_lock); 473 } 474 EXPORT_SYMBOL_GPL(blk_queue_bypass_end); 475 476 void blk_set_queue_dying(struct request_queue *q) 477 { 478 queue_flag_set_unlocked(QUEUE_FLAG_DYING, q); 479 480 if (q->mq_ops) 481 blk_mq_wake_waiters(q); 482 else { 483 struct request_list *rl; 484 485 blk_queue_for_each_rl(rl, q) { 486 if (rl->rq_pool) { 487 wake_up(&rl->wait[BLK_RW_SYNC]); 488 wake_up(&rl->wait[BLK_RW_ASYNC]); 489 } 490 } 491 } 492 } 493 EXPORT_SYMBOL_GPL(blk_set_queue_dying); 494 495 /** 496 * blk_cleanup_queue - shutdown a request queue 497 * @q: request queue to shutdown 498 * 499 * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and 500 * put it. All future requests will be failed immediately with -ENODEV. 501 */ 502 void blk_cleanup_queue(struct request_queue *q) 503 { 504 spinlock_t *lock = q->queue_lock; 505 506 /* mark @q DYING, no new request or merges will be allowed afterwards */ 507 mutex_lock(&q->sysfs_lock); 508 blk_set_queue_dying(q); 509 spin_lock_irq(lock); 510 511 /* 512 * A dying queue is permanently in bypass mode till released. Note 513 * that, unlike blk_queue_bypass_start(), we aren't performing 514 * synchronize_rcu() after entering bypass mode to avoid the delay 515 * as some drivers create and destroy a lot of queues while 516 * probing. This is still safe because blk_release_queue() will be 517 * called only after the queue refcnt drops to zero and nothing, 518 * RCU or not, would be traversing the queue by then. 519 */ 520 q->bypass_depth++; 521 queue_flag_set(QUEUE_FLAG_BYPASS, q); 522 523 queue_flag_set(QUEUE_FLAG_NOMERGES, q); 524 queue_flag_set(QUEUE_FLAG_NOXMERGES, q); 525 queue_flag_set(QUEUE_FLAG_DYING, q); 526 spin_unlock_irq(lock); 527 mutex_unlock(&q->sysfs_lock); 528 529 /* 530 * Drain all requests queued before DYING marking. Set DEAD flag to 531 * prevent that q->request_fn() gets invoked after draining finished. 532 */ 533 if (q->mq_ops) { 534 blk_mq_freeze_queue(q); 535 spin_lock_irq(lock); 536 } else { 537 spin_lock_irq(lock); 538 __blk_drain_queue(q, true); 539 } 540 queue_flag_set(QUEUE_FLAG_DEAD, q); 541 spin_unlock_irq(lock); 542 543 /* @q won't process any more request, flush async actions */ 544 del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); 545 blk_sync_queue(q); 546 547 if (q->mq_ops) 548 blk_mq_free_queue(q); 549 550 spin_lock_irq(lock); 551 if (q->queue_lock != &q->__queue_lock) 552 q->queue_lock = &q->__queue_lock; 553 spin_unlock_irq(lock); 554 555 bdi_destroy(&q->backing_dev_info); 556 557 /* @q is and will stay empty, shutdown and put */ 558 blk_put_queue(q); 559 } 560 EXPORT_SYMBOL(blk_cleanup_queue); 561 562 /* Allocate memory local to the request queue */ 563 static void *alloc_request_struct(gfp_t gfp_mask, void *data) 564 { 565 int nid = (int)(long)data; 566 return kmem_cache_alloc_node(request_cachep, gfp_mask, nid); 567 } 568 569 static void free_request_struct(void *element, void *unused) 570 { 571 kmem_cache_free(request_cachep, element); 572 } 573 574 int blk_init_rl(struct request_list *rl, struct request_queue *q, 575 gfp_t gfp_mask) 576 { 577 if (unlikely(rl->rq_pool)) 578 return 0; 579 580 rl->q = q; 581 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; 582 rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; 583 init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); 584 init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); 585 586 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, alloc_request_struct, 587 free_request_struct, 588 (void *)(long)q->node, gfp_mask, 589 q->node); 590 if (!rl->rq_pool) 591 return -ENOMEM; 592 593 return 0; 594 } 595 596 void blk_exit_rl(struct request_list *rl) 597 { 598 if (rl->rq_pool) 599 mempool_destroy(rl->rq_pool); 600 } 601 602 struct request_queue *blk_alloc_queue(gfp_t gfp_mask) 603 { 604 return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE); 605 } 606 EXPORT_SYMBOL(blk_alloc_queue); 607 608 struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) 609 { 610 struct request_queue *q; 611 int err; 612 613 q = kmem_cache_alloc_node(blk_requestq_cachep, 614 gfp_mask | __GFP_ZERO, node_id); 615 if (!q) 616 return NULL; 617 618 q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask); 619 if (q->id < 0) 620 goto fail_q; 621 622 q->backing_dev_info.ra_pages = 623 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 624 q->backing_dev_info.state = 0; 625 q->backing_dev_info.capabilities = 0; 626 q->backing_dev_info.name = "block"; 627 q->node = node_id; 628 629 err = bdi_init(&q->backing_dev_info); 630 if (err) 631 goto fail_id; 632 633 setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, 634 laptop_mode_timer_fn, (unsigned long) q); 635 setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); 636 INIT_LIST_HEAD(&q->queue_head); 637 INIT_LIST_HEAD(&q->timeout_list); 638 INIT_LIST_HEAD(&q->icq_list); 639 #ifdef CONFIG_BLK_CGROUP 640 INIT_LIST_HEAD(&q->blkg_list); 641 #endif 642 INIT_DELAYED_WORK(&q->delay_work, blk_delay_work); 643 644 kobject_init(&q->kobj, &blk_queue_ktype); 645 646 mutex_init(&q->sysfs_lock); 647 spin_lock_init(&q->__queue_lock); 648 649 /* 650 * By default initialize queue_lock to internal lock and driver can 651 * override it later if need be. 652 */ 653 q->queue_lock = &q->__queue_lock; 654 655 /* 656 * A queue starts its life with bypass turned on to avoid 657 * unnecessary bypass on/off overhead and nasty surprises during 658 * init. The initial bypass will be finished when the queue is 659 * registered by blk_register_queue(). 660 */ 661 q->bypass_depth = 1; 662 __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags); 663 664 init_waitqueue_head(&q->mq_freeze_wq); 665 666 if (blkcg_init_queue(q)) 667 goto fail_bdi; 668 669 return q; 670 671 fail_bdi: 672 bdi_destroy(&q->backing_dev_info); 673 fail_id: 674 ida_simple_remove(&blk_queue_ida, q->id); 675 fail_q: 676 kmem_cache_free(blk_requestq_cachep, q); 677 return NULL; 678 } 679 EXPORT_SYMBOL(blk_alloc_queue_node); 680 681 /** 682 * blk_init_queue - prepare a request queue for use with a block device 683 * @rfn: The function to be called to process requests that have been 684 * placed on the queue. 685 * @lock: Request queue spin lock 686 * 687 * Description: 688 * If a block device wishes to use the standard request handling procedures, 689 * which sorts requests and coalesces adjacent requests, then it must 690 * call blk_init_queue(). The function @rfn will be called when there 691 * are requests on the queue that need to be processed. If the device 692 * supports plugging, then @rfn may not be called immediately when requests 693 * are available on the queue, but may be called at some time later instead. 694 * Plugged queues are generally unplugged when a buffer belonging to one 695 * of the requests on the queue is needed, or due to memory pressure. 696 * 697 * @rfn is not required, or even expected, to remove all requests off the 698 * queue, but only as many as it can handle at a time. If it does leave 699 * requests on the queue, it is responsible for arranging that the requests 700 * get dealt with eventually. 701 * 702 * The queue spin lock must be held while manipulating the requests on the 703 * request queue; this lock will be taken also from interrupt context, so irq 704 * disabling is needed for it. 705 * 706 * Function returns a pointer to the initialized request queue, or %NULL if 707 * it didn't succeed. 708 * 709 * Note: 710 * blk_init_queue() must be paired with a blk_cleanup_queue() call 711 * when the block device is deactivated (such as at module unload). 712 **/ 713 714 struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) 715 { 716 return blk_init_queue_node(rfn, lock, NUMA_NO_NODE); 717 } 718 EXPORT_SYMBOL(blk_init_queue); 719 720 struct request_queue * 721 blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) 722 { 723 struct request_queue *uninit_q, *q; 724 725 uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id); 726 if (!uninit_q) 727 return NULL; 728 729 q = blk_init_allocated_queue(uninit_q, rfn, lock); 730 if (!q) 731 blk_cleanup_queue(uninit_q); 732 733 return q; 734 } 735 EXPORT_SYMBOL(blk_init_queue_node); 736 737 struct request_queue * 738 blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, 739 spinlock_t *lock) 740 { 741 if (!q) 742 return NULL; 743 744 q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, 0); 745 if (!q->fq) 746 return NULL; 747 748 if (blk_init_rl(&q->root_rl, q, GFP_KERNEL)) 749 goto fail; 750 751 q->request_fn = rfn; 752 q->prep_rq_fn = NULL; 753 q->unprep_rq_fn = NULL; 754 q->queue_flags |= QUEUE_FLAG_DEFAULT; 755 756 /* Override internal queue lock with supplied lock pointer */ 757 if (lock) 758 q->queue_lock = lock; 759 760 /* 761 * This also sets hw/phys segments, boundary and size 762 */ 763 blk_queue_make_request(q, blk_queue_bio); 764 765 q->sg_reserved_size = INT_MAX; 766 767 /* Protect q->elevator from elevator_change */ 768 mutex_lock(&q->sysfs_lock); 769 770 /* init elevator */ 771 if (elevator_init(q, NULL)) { 772 mutex_unlock(&q->sysfs_lock); 773 goto fail; 774 } 775 776 mutex_unlock(&q->sysfs_lock); 777 778 return q; 779 780 fail: 781 blk_free_flush_queue(q->fq); 782 return NULL; 783 } 784 EXPORT_SYMBOL(blk_init_allocated_queue); 785 786 bool blk_get_queue(struct request_queue *q) 787 { 788 if (likely(!blk_queue_dying(q))) { 789 __blk_get_queue(q); 790 return true; 791 } 792 793 return false; 794 } 795 EXPORT_SYMBOL(blk_get_queue); 796 797 static inline void blk_free_request(struct request_list *rl, struct request *rq) 798 { 799 if (rq->cmd_flags & REQ_ELVPRIV) { 800 elv_put_request(rl->q, rq); 801 if (rq->elv.icq) 802 put_io_context(rq->elv.icq->ioc); 803 } 804 805 mempool_free(rq, rl->rq_pool); 806 } 807 808 /* 809 * ioc_batching returns true if the ioc is a valid batching request and 810 * should be given priority access to a request. 811 */ 812 static inline int ioc_batching(struct request_queue *q, struct io_context *ioc) 813 { 814 if (!ioc) 815 return 0; 816 817 /* 818 * Make sure the process is able to allocate at least 1 request 819 * even if the batch times out, otherwise we could theoretically 820 * lose wakeups. 821 */ 822 return ioc->nr_batch_requests == q->nr_batching || 823 (ioc->nr_batch_requests > 0 824 && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME)); 825 } 826 827 /* 828 * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This 829 * will cause the process to be a "batcher" on all queues in the system. This 830 * is the behaviour we want though - once it gets a wakeup it should be given 831 * a nice run. 832 */ 833 static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) 834 { 835 if (!ioc || ioc_batching(q, ioc)) 836 return; 837 838 ioc->nr_batch_requests = q->nr_batching; 839 ioc->last_waited = jiffies; 840 } 841 842 static void __freed_request(struct request_list *rl, int sync) 843 { 844 struct request_queue *q = rl->q; 845 846 /* 847 * bdi isn't aware of blkcg yet. As all async IOs end up root 848 * blkcg anyway, just use root blkcg state. 849 */ 850 if (rl == &q->root_rl && 851 rl->count[sync] < queue_congestion_off_threshold(q)) 852 blk_clear_queue_congested(q, sync); 853 854 if (rl->count[sync] + 1 <= q->nr_requests) { 855 if (waitqueue_active(&rl->wait[sync])) 856 wake_up(&rl->wait[sync]); 857 858 blk_clear_rl_full(rl, sync); 859 } 860 } 861 862 /* 863 * A request has just been released. Account for it, update the full and 864 * congestion status, wake up any waiters. Called under q->queue_lock. 865 */ 866 static void freed_request(struct request_list *rl, unsigned int flags) 867 { 868 struct request_queue *q = rl->q; 869 int sync = rw_is_sync(flags); 870 871 q->nr_rqs[sync]--; 872 rl->count[sync]--; 873 if (flags & REQ_ELVPRIV) 874 q->nr_rqs_elvpriv--; 875 876 __freed_request(rl, sync); 877 878 if (unlikely(rl->starved[sync ^ 1])) 879 __freed_request(rl, sync ^ 1); 880 } 881 882 int blk_update_nr_requests(struct request_queue *q, unsigned int nr) 883 { 884 struct request_list *rl; 885 886 spin_lock_irq(q->queue_lock); 887 q->nr_requests = nr; 888 blk_queue_congestion_threshold(q); 889 890 /* congestion isn't cgroup aware and follows root blkcg for now */ 891 rl = &q->root_rl; 892 893 if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q)) 894 blk_set_queue_congested(q, BLK_RW_SYNC); 895 else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q)) 896 blk_clear_queue_congested(q, BLK_RW_SYNC); 897 898 if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q)) 899 blk_set_queue_congested(q, BLK_RW_ASYNC); 900 else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q)) 901 blk_clear_queue_congested(q, BLK_RW_ASYNC); 902 903 blk_queue_for_each_rl(rl, q) { 904 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { 905 blk_set_rl_full(rl, BLK_RW_SYNC); 906 } else { 907 blk_clear_rl_full(rl, BLK_RW_SYNC); 908 wake_up(&rl->wait[BLK_RW_SYNC]); 909 } 910 911 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { 912 blk_set_rl_full(rl, BLK_RW_ASYNC); 913 } else { 914 blk_clear_rl_full(rl, BLK_RW_ASYNC); 915 wake_up(&rl->wait[BLK_RW_ASYNC]); 916 } 917 } 918 919 spin_unlock_irq(q->queue_lock); 920 return 0; 921 } 922 923 /* 924 * Determine if elevator data should be initialized when allocating the 925 * request associated with @bio. 926 */ 927 static bool blk_rq_should_init_elevator(struct bio *bio) 928 { 929 if (!bio) 930 return true; 931 932 /* 933 * Flush requests do not use the elevator so skip initialization. 934 * This allows a request to share the flush and elevator data. 935 */ 936 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) 937 return false; 938 939 return true; 940 } 941 942 /** 943 * rq_ioc - determine io_context for request allocation 944 * @bio: request being allocated is for this bio (can be %NULL) 945 * 946 * Determine io_context to use for request allocation for @bio. May return 947 * %NULL if %current->io_context doesn't exist. 948 */ 949 static struct io_context *rq_ioc(struct bio *bio) 950 { 951 #ifdef CONFIG_BLK_CGROUP 952 if (bio && bio->bi_ioc) 953 return bio->bi_ioc; 954 #endif 955 return current->io_context; 956 } 957 958 /** 959 * __get_request - get a free request 960 * @rl: request list to allocate from 961 * @rw_flags: RW and SYNC flags 962 * @bio: bio to allocate request for (can be %NULL) 963 * @gfp_mask: allocation mask 964 * 965 * Get a free request from @q. This function may fail under memory 966 * pressure or if @q is dead. 967 * 968 * Must be called with @q->queue_lock held and, 969 * Returns ERR_PTR on failure, with @q->queue_lock held. 970 * Returns request pointer on success, with @q->queue_lock *not held*. 971 */ 972 static struct request *__get_request(struct request_list *rl, int rw_flags, 973 struct bio *bio, gfp_t gfp_mask) 974 { 975 struct request_queue *q = rl->q; 976 struct request *rq; 977 struct elevator_type *et = q->elevator->type; 978 struct io_context *ioc = rq_ioc(bio); 979 struct io_cq *icq = NULL; 980 const bool is_sync = rw_is_sync(rw_flags) != 0; 981 int may_queue; 982 983 if (unlikely(blk_queue_dying(q))) 984 return ERR_PTR(-ENODEV); 985 986 may_queue = elv_may_queue(q, rw_flags); 987 if (may_queue == ELV_MQUEUE_NO) 988 goto rq_starved; 989 990 if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { 991 if (rl->count[is_sync]+1 >= q->nr_requests) { 992 /* 993 * The queue will fill after this allocation, so set 994 * it as full, and mark this process as "batching". 995 * This process will be allowed to complete a batch of 996 * requests, others will be blocked. 997 */ 998 if (!blk_rl_full(rl, is_sync)) { 999 ioc_set_batching(q, ioc); 1000 blk_set_rl_full(rl, is_sync); 1001 } else { 1002 if (may_queue != ELV_MQUEUE_MUST 1003 && !ioc_batching(q, ioc)) { 1004 /* 1005 * The queue is full and the allocating 1006 * process is not a "batcher", and not 1007 * exempted by the IO scheduler 1008 */ 1009 return ERR_PTR(-ENOMEM); 1010 } 1011 } 1012 } 1013 /* 1014 * bdi isn't aware of blkcg yet. As all async IOs end up 1015 * root blkcg anyway, just use root blkcg state. 1016 */ 1017 if (rl == &q->root_rl) 1018 blk_set_queue_congested(q, is_sync); 1019 } 1020 1021 /* 1022 * Only allow batching queuers to allocate up to 50% over the defined 1023 * limit of requests, otherwise we could have thousands of requests 1024 * allocated with any setting of ->nr_requests 1025 */ 1026 if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) 1027 return ERR_PTR(-ENOMEM); 1028 1029 q->nr_rqs[is_sync]++; 1030 rl->count[is_sync]++; 1031 rl->starved[is_sync] = 0; 1032 1033 /* 1034 * Decide whether the new request will be managed by elevator. If 1035 * so, mark @rw_flags and increment elvpriv. Non-zero elvpriv will 1036 * prevent the current elevator from being destroyed until the new 1037 * request is freed. This guarantees icq's won't be destroyed and 1038 * makes creating new ones safe. 1039 * 1040 * Also, lookup icq while holding queue_lock. If it doesn't exist, 1041 * it will be created after releasing queue_lock. 1042 */ 1043 if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) { 1044 rw_flags |= REQ_ELVPRIV; 1045 q->nr_rqs_elvpriv++; 1046 if (et->icq_cache && ioc) 1047 icq = ioc_lookup_icq(ioc, q); 1048 } 1049 1050 if (blk_queue_io_stat(q)) 1051 rw_flags |= REQ_IO_STAT; 1052 spin_unlock_irq(q->queue_lock); 1053 1054 /* allocate and init request */ 1055 rq = mempool_alloc(rl->rq_pool, gfp_mask); 1056 if (!rq) 1057 goto fail_alloc; 1058 1059 blk_rq_init(q, rq); 1060 blk_rq_set_rl(rq, rl); 1061 rq->cmd_flags = rw_flags | REQ_ALLOCED; 1062 1063 /* init elvpriv */ 1064 if (rw_flags & REQ_ELVPRIV) { 1065 if (unlikely(et->icq_cache && !icq)) { 1066 if (ioc) 1067 icq = ioc_create_icq(ioc, q, gfp_mask); 1068 if (!icq) 1069 goto fail_elvpriv; 1070 } 1071 1072 rq->elv.icq = icq; 1073 if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) 1074 goto fail_elvpriv; 1075 1076 /* @rq->elv.icq holds io_context until @rq is freed */ 1077 if (icq) 1078 get_io_context(icq->ioc); 1079 } 1080 out: 1081 /* 1082 * ioc may be NULL here, and ioc_batching will be false. That's 1083 * OK, if the queue is under the request limit then requests need 1084 * not count toward the nr_batch_requests limit. There will always 1085 * be some limit enforced by BLK_BATCH_TIME. 1086 */ 1087 if (ioc_batching(q, ioc)) 1088 ioc->nr_batch_requests--; 1089 1090 trace_block_getrq(q, bio, rw_flags & 1); 1091 return rq; 1092 1093 fail_elvpriv: 1094 /* 1095 * elvpriv init failed. ioc, icq and elvpriv aren't mempool backed 1096 * and may fail indefinitely under memory pressure and thus 1097 * shouldn't stall IO. Treat this request as !elvpriv. This will 1098 * disturb iosched and blkcg but weird is bettern than dead. 1099 */ 1100 printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n", 1101 __func__, dev_name(q->backing_dev_info.dev)); 1102 1103 rq->cmd_flags &= ~REQ_ELVPRIV; 1104 rq->elv.icq = NULL; 1105 1106 spin_lock_irq(q->queue_lock); 1107 q->nr_rqs_elvpriv--; 1108 spin_unlock_irq(q->queue_lock); 1109 goto out; 1110 1111 fail_alloc: 1112 /* 1113 * Allocation failed presumably due to memory. Undo anything we 1114 * might have messed up. 1115 * 1116 * Allocating task should really be put onto the front of the wait 1117 * queue, but this is pretty rare. 1118 */ 1119 spin_lock_irq(q->queue_lock); 1120 freed_request(rl, rw_flags); 1121 1122 /* 1123 * in the very unlikely event that allocation failed and no 1124 * requests for this direction was pending, mark us starved so that 1125 * freeing of a request in the other direction will notice 1126 * us. another possible fix would be to split the rq mempool into 1127 * READ and WRITE 1128 */ 1129 rq_starved: 1130 if (unlikely(rl->count[is_sync] == 0)) 1131 rl->starved[is_sync] = 1; 1132 return ERR_PTR(-ENOMEM); 1133 } 1134 1135 /** 1136 * get_request - get a free request 1137 * @q: request_queue to allocate request from 1138 * @rw_flags: RW and SYNC flags 1139 * @bio: bio to allocate request for (can be %NULL) 1140 * @gfp_mask: allocation mask 1141 * 1142 * Get a free request from @q. If %__GFP_WAIT is set in @gfp_mask, this 1143 * function keeps retrying under memory pressure and fails iff @q is dead. 1144 * 1145 * Must be called with @q->queue_lock held and, 1146 * Returns ERR_PTR on failure, with @q->queue_lock held. 1147 * Returns request pointer on success, with @q->queue_lock *not held*. 1148 */ 1149 static struct request *get_request(struct request_queue *q, int rw_flags, 1150 struct bio *bio, gfp_t gfp_mask) 1151 { 1152 const bool is_sync = rw_is_sync(rw_flags) != 0; 1153 DEFINE_WAIT(wait); 1154 struct request_list *rl; 1155 struct request *rq; 1156 1157 rl = blk_get_rl(q, bio); /* transferred to @rq on success */ 1158 retry: 1159 rq = __get_request(rl, rw_flags, bio, gfp_mask); 1160 if (!IS_ERR(rq)) 1161 return rq; 1162 1163 if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dying(q))) { 1164 blk_put_rl(rl); 1165 return rq; 1166 } 1167 1168 /* wait on @rl and retry */ 1169 prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, 1170 TASK_UNINTERRUPTIBLE); 1171 1172 trace_block_sleeprq(q, bio, rw_flags & 1); 1173 1174 spin_unlock_irq(q->queue_lock); 1175 io_schedule(); 1176 1177 /* 1178 * After sleeping, we become a "batching" process and will be able 1179 * to allocate at least one request, and up to a big batch of them 1180 * for a small period time. See ioc_batching, ioc_set_batching 1181 */ 1182 ioc_set_batching(q, current->io_context); 1183 1184 spin_lock_irq(q->queue_lock); 1185 finish_wait(&rl->wait[is_sync], &wait); 1186 1187 goto retry; 1188 } 1189 1190 static struct request *blk_old_get_request(struct request_queue *q, int rw, 1191 gfp_t gfp_mask) 1192 { 1193 struct request *rq; 1194 1195 BUG_ON(rw != READ && rw != WRITE); 1196 1197 /* create ioc upfront */ 1198 create_io_context(gfp_mask, q->node); 1199 1200 spin_lock_irq(q->queue_lock); 1201 rq = get_request(q, rw, NULL, gfp_mask); 1202 if (IS_ERR(rq)) 1203 spin_unlock_irq(q->queue_lock); 1204 /* q->queue_lock is unlocked at this point */ 1205 1206 return rq; 1207 } 1208 1209 struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) 1210 { 1211 if (q->mq_ops) 1212 return blk_mq_alloc_request(q, rw, gfp_mask, false); 1213 else 1214 return blk_old_get_request(q, rw, gfp_mask); 1215 } 1216 EXPORT_SYMBOL(blk_get_request); 1217 1218 /** 1219 * blk_make_request - given a bio, allocate a corresponding struct request. 1220 * @q: target request queue 1221 * @bio: The bio describing the memory mappings that will be submitted for IO. 1222 * It may be a chained-bio properly constructed by block/bio layer. 1223 * @gfp_mask: gfp flags to be used for memory allocation 1224 * 1225 * blk_make_request is the parallel of generic_make_request for BLOCK_PC 1226 * type commands. Where the struct request needs to be farther initialized by 1227 * the caller. It is passed a &struct bio, which describes the memory info of 1228 * the I/O transfer. 1229 * 1230 * The caller of blk_make_request must make sure that bi_io_vec 1231 * are set to describe the memory buffers. That bio_data_dir() will return 1232 * the needed direction of the request. (And all bio's in the passed bio-chain 1233 * are properly set accordingly) 1234 * 1235 * If called under none-sleepable conditions, mapped bio buffers must not 1236 * need bouncing, by calling the appropriate masked or flagged allocator, 1237 * suitable for the target device. Otherwise the call to blk_queue_bounce will 1238 * BUG. 1239 * 1240 * WARNING: When allocating/cloning a bio-chain, careful consideration should be 1241 * given to how you allocate bios. In particular, you cannot use __GFP_WAIT for 1242 * anything but the first bio in the chain. Otherwise you risk waiting for IO 1243 * completion of a bio that hasn't been submitted yet, thus resulting in a 1244 * deadlock. Alternatively bios should be allocated using bio_kmalloc() instead 1245 * of bio_alloc(), as that avoids the mempool deadlock. 1246 * If possible a big IO should be split into smaller parts when allocation 1247 * fails. Partial allocation should not be an error, or you risk a live-lock. 1248 */ 1249 struct request *blk_make_request(struct request_queue *q, struct bio *bio, 1250 gfp_t gfp_mask) 1251 { 1252 struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask); 1253 1254 if (IS_ERR(rq)) 1255 return rq; 1256 1257 blk_rq_set_block_pc(rq); 1258 1259 for_each_bio(bio) { 1260 struct bio *bounce_bio = bio; 1261 int ret; 1262 1263 blk_queue_bounce(q, &bounce_bio); 1264 ret = blk_rq_append_bio(q, rq, bounce_bio); 1265 if (unlikely(ret)) { 1266 blk_put_request(rq); 1267 return ERR_PTR(ret); 1268 } 1269 } 1270 1271 return rq; 1272 } 1273 EXPORT_SYMBOL(blk_make_request); 1274 1275 /** 1276 * blk_rq_set_block_pc - initialize a request to type BLOCK_PC 1277 * @rq: request to be initialized 1278 * 1279 */ 1280 void blk_rq_set_block_pc(struct request *rq) 1281 { 1282 rq->cmd_type = REQ_TYPE_BLOCK_PC; 1283 rq->__data_len = 0; 1284 rq->__sector = (sector_t) -1; 1285 rq->bio = rq->biotail = NULL; 1286 memset(rq->__cmd, 0, sizeof(rq->__cmd)); 1287 } 1288 EXPORT_SYMBOL(blk_rq_set_block_pc); 1289 1290 /** 1291 * blk_requeue_request - put a request back on queue 1292 * @q: request queue where request should be inserted 1293 * @rq: request to be inserted 1294 * 1295 * Description: 1296 * Drivers often keep queueing requests until the hardware cannot accept 1297 * more, when that condition happens we need to put the request back 1298 * on the queue. Must be called with queue lock held. 1299 */ 1300 void blk_requeue_request(struct request_queue *q, struct request *rq) 1301 { 1302 blk_delete_timer(rq); 1303 blk_clear_rq_complete(rq); 1304 trace_block_rq_requeue(q, rq); 1305 1306 if (rq->cmd_flags & REQ_QUEUED) 1307 blk_queue_end_tag(q, rq); 1308 1309 BUG_ON(blk_queued_rq(rq)); 1310 1311 elv_requeue_request(q, rq); 1312 } 1313 EXPORT_SYMBOL(blk_requeue_request); 1314 1315 static void add_acct_request(struct request_queue *q, struct request *rq, 1316 int where) 1317 { 1318 blk_account_io_start(rq, true); 1319 __elv_add_request(q, rq, where); 1320 } 1321 1322 static void part_round_stats_single(int cpu, struct hd_struct *part, 1323 unsigned long now) 1324 { 1325 int inflight; 1326 1327 if (now == part->stamp) 1328 return; 1329 1330 inflight = part_in_flight(part); 1331 if (inflight) { 1332 __part_stat_add(cpu, part, time_in_queue, 1333 inflight * (now - part->stamp)); 1334 __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); 1335 } 1336 part->stamp = now; 1337 } 1338 1339 /** 1340 * part_round_stats() - Round off the performance stats on a struct disk_stats. 1341 * @cpu: cpu number for stats access 1342 * @part: target partition 1343 * 1344 * The average IO queue length and utilisation statistics are maintained 1345 * by observing the current state of the queue length and the amount of 1346 * time it has been in this state for. 1347 * 1348 * Normally, that accounting is done on IO completion, but that can result 1349 * in more than a second's worth of IO being accounted for within any one 1350 * second, leading to >100% utilisation. To deal with that, we call this 1351 * function to do a round-off before returning the results when reading 1352 * /proc/diskstats. This accounts immediately for all queue usage up to 1353 * the current jiffies and restarts the counters again. 1354 */ 1355 void part_round_stats(int cpu, struct hd_struct *part) 1356 { 1357 unsigned long now = jiffies; 1358 1359 if (part->partno) 1360 part_round_stats_single(cpu, &part_to_disk(part)->part0, now); 1361 part_round_stats_single(cpu, part, now); 1362 } 1363 EXPORT_SYMBOL_GPL(part_round_stats); 1364 1365 #ifdef CONFIG_PM 1366 static void blk_pm_put_request(struct request *rq) 1367 { 1368 if (rq->q->dev && !(rq->cmd_flags & REQ_PM) && !--rq->q->nr_pending) 1369 pm_runtime_mark_last_busy(rq->q->dev); 1370 } 1371 #else 1372 static inline void blk_pm_put_request(struct request *rq) {} 1373 #endif 1374 1375 /* 1376 * queue lock must be held 1377 */ 1378 void __blk_put_request(struct request_queue *q, struct request *req) 1379 { 1380 if (unlikely(!q)) 1381 return; 1382 1383 if (q->mq_ops) { 1384 blk_mq_free_request(req); 1385 return; 1386 } 1387 1388 blk_pm_put_request(req); 1389 1390 elv_completed_request(q, req); 1391 1392 /* this is a bio leak */ 1393 WARN_ON(req->bio != NULL); 1394 1395 /* 1396 * Request may not have originated from ll_rw_blk. if not, 1397 * it didn't come out of our reserved rq pools 1398 */ 1399 if (req->cmd_flags & REQ_ALLOCED) { 1400 unsigned int flags = req->cmd_flags; 1401 struct request_list *rl = blk_rq_rl(req); 1402 1403 BUG_ON(!list_empty(&req->queuelist)); 1404 BUG_ON(ELV_ON_HASH(req)); 1405 1406 blk_free_request(rl, req); 1407 freed_request(rl, flags); 1408 blk_put_rl(rl); 1409 } 1410 } 1411 EXPORT_SYMBOL_GPL(__blk_put_request); 1412 1413 void blk_put_request(struct request *req) 1414 { 1415 struct request_queue *q = req->q; 1416 1417 if (q->mq_ops) 1418 blk_mq_free_request(req); 1419 else { 1420 unsigned long flags; 1421 1422 spin_lock_irqsave(q->queue_lock, flags); 1423 __blk_put_request(q, req); 1424 spin_unlock_irqrestore(q->queue_lock, flags); 1425 } 1426 } 1427 EXPORT_SYMBOL(blk_put_request); 1428 1429 /** 1430 * blk_add_request_payload - add a payload to a request 1431 * @rq: request to update 1432 * @page: page backing the payload 1433 * @len: length of the payload. 1434 * 1435 * This allows to later add a payload to an already submitted request by 1436 * a block driver. The driver needs to take care of freeing the payload 1437 * itself. 1438 * 1439 * Note that this is a quite horrible hack and nothing but handling of 1440 * discard requests should ever use it. 1441 */ 1442 void blk_add_request_payload(struct request *rq, struct page *page, 1443 unsigned int len) 1444 { 1445 struct bio *bio = rq->bio; 1446 1447 bio->bi_io_vec->bv_page = page; 1448 bio->bi_io_vec->bv_offset = 0; 1449 bio->bi_io_vec->bv_len = len; 1450 1451 bio->bi_iter.bi_size = len; 1452 bio->bi_vcnt = 1; 1453 bio->bi_phys_segments = 1; 1454 1455 rq->__data_len = rq->resid_len = len; 1456 rq->nr_phys_segments = 1; 1457 } 1458 EXPORT_SYMBOL_GPL(blk_add_request_payload); 1459 1460 bool bio_attempt_back_merge(struct request_queue *q, struct request *req, 1461 struct bio *bio) 1462 { 1463 const int ff = bio->bi_rw & REQ_FAILFAST_MASK; 1464 1465 if (!ll_back_merge_fn(q, req, bio)) 1466 return false; 1467 1468 trace_block_bio_backmerge(q, req, bio); 1469 1470 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) 1471 blk_rq_set_mixed_merge(req); 1472 1473 req->biotail->bi_next = bio; 1474 req->biotail = bio; 1475 req->__data_len += bio->bi_iter.bi_size; 1476 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1477 1478 blk_account_io_start(req, false); 1479 return true; 1480 } 1481 1482 bool bio_attempt_front_merge(struct request_queue *q, struct request *req, 1483 struct bio *bio) 1484 { 1485 const int ff = bio->bi_rw & REQ_FAILFAST_MASK; 1486 1487 if (!ll_front_merge_fn(q, req, bio)) 1488 return false; 1489 1490 trace_block_bio_frontmerge(q, req, bio); 1491 1492 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) 1493 blk_rq_set_mixed_merge(req); 1494 1495 bio->bi_next = req->bio; 1496 req->bio = bio; 1497 1498 req->__sector = bio->bi_iter.bi_sector; 1499 req->__data_len += bio->bi_iter.bi_size; 1500 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1501 1502 blk_account_io_start(req, false); 1503 return true; 1504 } 1505 1506 /** 1507 * blk_attempt_plug_merge - try to merge with %current's plugged list 1508 * @q: request_queue new bio is being queued at 1509 * @bio: new bio being queued 1510 * @request_count: out parameter for number of traversed plugged requests 1511 * 1512 * Determine whether @bio being queued on @q can be merged with a request 1513 * on %current's plugged list. Returns %true if merge was successful, 1514 * otherwise %false. 1515 * 1516 * Plugging coalesces IOs from the same issuer for the same purpose without 1517 * going through @q->queue_lock. As such it's more of an issuing mechanism 1518 * than scheduling, and the request, while may have elvpriv data, is not 1519 * added on the elevator at this point. In addition, we don't have 1520 * reliable access to the elevator outside queue lock. Only check basic 1521 * merging parameters without querying the elevator. 1522 * 1523 * Caller must ensure !blk_queue_nomerges(q) beforehand. 1524 */ 1525 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, 1526 unsigned int *request_count) 1527 { 1528 struct blk_plug *plug; 1529 struct request *rq; 1530 bool ret = false; 1531 struct list_head *plug_list; 1532 1533 plug = current->plug; 1534 if (!plug) 1535 goto out; 1536 *request_count = 0; 1537 1538 if (q->mq_ops) 1539 plug_list = &plug->mq_list; 1540 else 1541 plug_list = &plug->list; 1542 1543 list_for_each_entry_reverse(rq, plug_list, queuelist) { 1544 int el_ret; 1545 1546 if (rq->q == q) 1547 (*request_count)++; 1548 1549 if (rq->q != q || !blk_rq_merge_ok(rq, bio)) 1550 continue; 1551 1552 el_ret = blk_try_merge(rq, bio); 1553 if (el_ret == ELEVATOR_BACK_MERGE) { 1554 ret = bio_attempt_back_merge(q, rq, bio); 1555 if (ret) 1556 break; 1557 } else if (el_ret == ELEVATOR_FRONT_MERGE) { 1558 ret = bio_attempt_front_merge(q, rq, bio); 1559 if (ret) 1560 break; 1561 } 1562 } 1563 out: 1564 return ret; 1565 } 1566 1567 void init_request_from_bio(struct request *req, struct bio *bio) 1568 { 1569 req->cmd_type = REQ_TYPE_FS; 1570 1571 req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK; 1572 if (bio->bi_rw & REQ_RAHEAD) 1573 req->cmd_flags |= REQ_FAILFAST_MASK; 1574 1575 req->errors = 0; 1576 req->__sector = bio->bi_iter.bi_sector; 1577 req->ioprio = bio_prio(bio); 1578 blk_rq_bio_prep(req->q, req, bio); 1579 } 1580 1581 void blk_queue_bio(struct request_queue *q, struct bio *bio) 1582 { 1583 const bool sync = !!(bio->bi_rw & REQ_SYNC); 1584 struct blk_plug *plug; 1585 int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT; 1586 struct request *req; 1587 unsigned int request_count = 0; 1588 1589 /* 1590 * low level driver can indicate that it wants pages above a 1591 * certain limit bounced to low memory (ie for highmem, or even 1592 * ISA dma in theory) 1593 */ 1594 blk_queue_bounce(q, &bio); 1595 1596 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { 1597 bio_endio(bio, -EIO); 1598 return; 1599 } 1600 1601 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) { 1602 spin_lock_irq(q->queue_lock); 1603 where = ELEVATOR_INSERT_FLUSH; 1604 goto get_rq; 1605 } 1606 1607 /* 1608 * Check if we can merge with the plugged list before grabbing 1609 * any locks. 1610 */ 1611 if (!blk_queue_nomerges(q) && 1612 blk_attempt_plug_merge(q, bio, &request_count)) 1613 return; 1614 1615 spin_lock_irq(q->queue_lock); 1616 1617 el_ret = elv_merge(q, &req, bio); 1618 if (el_ret == ELEVATOR_BACK_MERGE) { 1619 if (bio_attempt_back_merge(q, req, bio)) { 1620 elv_bio_merged(q, req, bio); 1621 if (!attempt_back_merge(q, req)) 1622 elv_merged_request(q, req, el_ret); 1623 goto out_unlock; 1624 } 1625 } else if (el_ret == ELEVATOR_FRONT_MERGE) { 1626 if (bio_attempt_front_merge(q, req, bio)) { 1627 elv_bio_merged(q, req, bio); 1628 if (!attempt_front_merge(q, req)) 1629 elv_merged_request(q, req, el_ret); 1630 goto out_unlock; 1631 } 1632 } 1633 1634 get_rq: 1635 /* 1636 * This sync check and mask will be re-done in init_request_from_bio(), 1637 * but we need to set it earlier to expose the sync flag to the 1638 * rq allocator and io schedulers. 1639 */ 1640 rw_flags = bio_data_dir(bio); 1641 if (sync) 1642 rw_flags |= REQ_SYNC; 1643 1644 /* 1645 * Grab a free request. This is might sleep but can not fail. 1646 * Returns with the queue unlocked. 1647 */ 1648 req = get_request(q, rw_flags, bio, GFP_NOIO); 1649 if (IS_ERR(req)) { 1650 bio_endio(bio, PTR_ERR(req)); /* @q is dead */ 1651 goto out_unlock; 1652 } 1653 1654 /* 1655 * After dropping the lock and possibly sleeping here, our request 1656 * may now be mergeable after it had proven unmergeable (above). 1657 * We don't worry about that case for efficiency. It won't happen 1658 * often, and the elevators are able to handle it. 1659 */ 1660 init_request_from_bio(req, bio); 1661 1662 if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags)) 1663 req->cpu = raw_smp_processor_id(); 1664 1665 plug = current->plug; 1666 if (plug) { 1667 /* 1668 * If this is the first request added after a plug, fire 1669 * of a plug trace. 1670 */ 1671 if (!request_count) 1672 trace_block_plug(q); 1673 else { 1674 if (request_count >= BLK_MAX_REQUEST_COUNT) { 1675 blk_flush_plug_list(plug, false); 1676 trace_block_plug(q); 1677 } 1678 } 1679 list_add_tail(&req->queuelist, &plug->list); 1680 blk_account_io_start(req, true); 1681 } else { 1682 spin_lock_irq(q->queue_lock); 1683 add_acct_request(q, req, where); 1684 __blk_run_queue(q); 1685 out_unlock: 1686 spin_unlock_irq(q->queue_lock); 1687 } 1688 } 1689 EXPORT_SYMBOL_GPL(blk_queue_bio); /* for device mapper only */ 1690 1691 /* 1692 * If bio->bi_dev is a partition, remap the location 1693 */ 1694 static inline void blk_partition_remap(struct bio *bio) 1695 { 1696 struct block_device *bdev = bio->bi_bdev; 1697 1698 if (bio_sectors(bio) && bdev != bdev->bd_contains) { 1699 struct hd_struct *p = bdev->bd_part; 1700 1701 bio->bi_iter.bi_sector += p->start_sect; 1702 bio->bi_bdev = bdev->bd_contains; 1703 1704 trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio, 1705 bdev->bd_dev, 1706 bio->bi_iter.bi_sector - p->start_sect); 1707 } 1708 } 1709 1710 static void handle_bad_sector(struct bio *bio) 1711 { 1712 char b[BDEVNAME_SIZE]; 1713 1714 printk(KERN_INFO "attempt to access beyond end of device\n"); 1715 printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n", 1716 bdevname(bio->bi_bdev, b), 1717 bio->bi_rw, 1718 (unsigned long long)bio_end_sector(bio), 1719 (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9)); 1720 1721 set_bit(BIO_EOF, &bio->bi_flags); 1722 } 1723 1724 #ifdef CONFIG_FAIL_MAKE_REQUEST 1725 1726 static DECLARE_FAULT_ATTR(fail_make_request); 1727 1728 static int __init setup_fail_make_request(char *str) 1729 { 1730 return setup_fault_attr(&fail_make_request, str); 1731 } 1732 __setup("fail_make_request=", setup_fail_make_request); 1733 1734 static bool should_fail_request(struct hd_struct *part, unsigned int bytes) 1735 { 1736 return part->make_it_fail && should_fail(&fail_make_request, bytes); 1737 } 1738 1739 static int __init fail_make_request_debugfs(void) 1740 { 1741 struct dentry *dir = fault_create_debugfs_attr("fail_make_request", 1742 NULL, &fail_make_request); 1743 1744 return PTR_ERR_OR_ZERO(dir); 1745 } 1746 1747 late_initcall(fail_make_request_debugfs); 1748 1749 #else /* CONFIG_FAIL_MAKE_REQUEST */ 1750 1751 static inline bool should_fail_request(struct hd_struct *part, 1752 unsigned int bytes) 1753 { 1754 return false; 1755 } 1756 1757 #endif /* CONFIG_FAIL_MAKE_REQUEST */ 1758 1759 /* 1760 * Check whether this bio extends beyond the end of the device. 1761 */ 1762 static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors) 1763 { 1764 sector_t maxsector; 1765 1766 if (!nr_sectors) 1767 return 0; 1768 1769 /* Test device or partition size, when known. */ 1770 maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; 1771 if (maxsector) { 1772 sector_t sector = bio->bi_iter.bi_sector; 1773 1774 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) { 1775 /* 1776 * This may well happen - the kernel calls bread() 1777 * without checking the size of the device, e.g., when 1778 * mounting a device. 1779 */ 1780 handle_bad_sector(bio); 1781 return 1; 1782 } 1783 } 1784 1785 return 0; 1786 } 1787 1788 static noinline_for_stack bool 1789 generic_make_request_checks(struct bio *bio) 1790 { 1791 struct request_queue *q; 1792 int nr_sectors = bio_sectors(bio); 1793 int err = -EIO; 1794 char b[BDEVNAME_SIZE]; 1795 struct hd_struct *part; 1796 1797 might_sleep(); 1798 1799 if (bio_check_eod(bio, nr_sectors)) 1800 goto end_io; 1801 1802 q = bdev_get_queue(bio->bi_bdev); 1803 if (unlikely(!q)) { 1804 printk(KERN_ERR 1805 "generic_make_request: Trying to access " 1806 "nonexistent block-device %s (%Lu)\n", 1807 bdevname(bio->bi_bdev, b), 1808 (long long) bio->bi_iter.bi_sector); 1809 goto end_io; 1810 } 1811 1812 if (likely(bio_is_rw(bio) && 1813 nr_sectors > queue_max_hw_sectors(q))) { 1814 printk(KERN_ERR "bio too big device %s (%u > %u)\n", 1815 bdevname(bio->bi_bdev, b), 1816 bio_sectors(bio), 1817 queue_max_hw_sectors(q)); 1818 goto end_io; 1819 } 1820 1821 part = bio->bi_bdev->bd_part; 1822 if (should_fail_request(part, bio->bi_iter.bi_size) || 1823 should_fail_request(&part_to_disk(part)->part0, 1824 bio->bi_iter.bi_size)) 1825 goto end_io; 1826 1827 /* 1828 * If this device has partitions, remap block n 1829 * of partition p to block n+start(p) of the disk. 1830 */ 1831 blk_partition_remap(bio); 1832 1833 if (bio_check_eod(bio, nr_sectors)) 1834 goto end_io; 1835 1836 /* 1837 * Filter flush bio's early so that make_request based 1838 * drivers without flush support don't have to worry 1839 * about them. 1840 */ 1841 if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) { 1842 bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA); 1843 if (!nr_sectors) { 1844 err = 0; 1845 goto end_io; 1846 } 1847 } 1848 1849 if ((bio->bi_rw & REQ_DISCARD) && 1850 (!blk_queue_discard(q) || 1851 ((bio->bi_rw & REQ_SECURE) && !blk_queue_secdiscard(q)))) { 1852 err = -EOPNOTSUPP; 1853 goto end_io; 1854 } 1855 1856 if (bio->bi_rw & REQ_WRITE_SAME && !bdev_write_same(bio->bi_bdev)) { 1857 err = -EOPNOTSUPP; 1858 goto end_io; 1859 } 1860 1861 /* 1862 * Various block parts want %current->io_context and lazy ioc 1863 * allocation ends up trading a lot of pain for a small amount of 1864 * memory. Just allocate it upfront. This may fail and block 1865 * layer knows how to live with it. 1866 */ 1867 create_io_context(GFP_ATOMIC, q->node); 1868 1869 if (blk_throtl_bio(q, bio)) 1870 return false; /* throttled, will be resubmitted later */ 1871 1872 trace_block_bio_queue(q, bio); 1873 return true; 1874 1875 end_io: 1876 bio_endio(bio, err); 1877 return false; 1878 } 1879 1880 /** 1881 * generic_make_request - hand a buffer to its device driver for I/O 1882 * @bio: The bio describing the location in memory and on the device. 1883 * 1884 * generic_make_request() is used to make I/O requests of block 1885 * devices. It is passed a &struct bio, which describes the I/O that needs 1886 * to be done. 1887 * 1888 * generic_make_request() does not return any status. The 1889 * success/failure status of the request, along with notification of 1890 * completion, is delivered asynchronously through the bio->bi_end_io 1891 * function described (one day) else where. 1892 * 1893 * The caller of generic_make_request must make sure that bi_io_vec 1894 * are set to describe the memory buffer, and that bi_dev and bi_sector are 1895 * set to describe the device address, and the 1896 * bi_end_io and optionally bi_private are set to describe how 1897 * completion notification should be signaled. 1898 * 1899 * generic_make_request and the drivers it calls may use bi_next if this 1900 * bio happens to be merged with someone else, and may resubmit the bio to 1901 * a lower device by calling into generic_make_request recursively, which 1902 * means the bio should NOT be touched after the call to ->make_request_fn. 1903 */ 1904 void generic_make_request(struct bio *bio) 1905 { 1906 struct bio_list bio_list_on_stack; 1907 1908 if (!generic_make_request_checks(bio)) 1909 return; 1910 1911 /* 1912 * We only want one ->make_request_fn to be active at a time, else 1913 * stack usage with stacked devices could be a problem. So use 1914 * current->bio_list to keep a list of requests submited by a 1915 * make_request_fn function. current->bio_list is also used as a 1916 * flag to say if generic_make_request is currently active in this 1917 * task or not. If it is NULL, then no make_request is active. If 1918 * it is non-NULL, then a make_request is active, and new requests 1919 * should be added at the tail 1920 */ 1921 if (current->bio_list) { 1922 bio_list_add(current->bio_list, bio); 1923 return; 1924 } 1925 1926 /* following loop may be a bit non-obvious, and so deserves some 1927 * explanation. 1928 * Before entering the loop, bio->bi_next is NULL (as all callers 1929 * ensure that) so we have a list with a single bio. 1930 * We pretend that we have just taken it off a longer list, so 1931 * we assign bio_list to a pointer to the bio_list_on_stack, 1932 * thus initialising the bio_list of new bios to be 1933 * added. ->make_request() may indeed add some more bios 1934 * through a recursive call to generic_make_request. If it 1935 * did, we find a non-NULL value in bio_list and re-enter the loop 1936 * from the top. In this case we really did just take the bio 1937 * of the top of the list (no pretending) and so remove it from 1938 * bio_list, and call into ->make_request() again. 1939 */ 1940 BUG_ON(bio->bi_next); 1941 bio_list_init(&bio_list_on_stack); 1942 current->bio_list = &bio_list_on_stack; 1943 do { 1944 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 1945 1946 q->make_request_fn(q, bio); 1947 1948 bio = bio_list_pop(current->bio_list); 1949 } while (bio); 1950 current->bio_list = NULL; /* deactivate */ 1951 } 1952 EXPORT_SYMBOL(generic_make_request); 1953 1954 /** 1955 * submit_bio - submit a bio to the block device layer for I/O 1956 * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead) 1957 * @bio: The &struct bio which describes the I/O 1958 * 1959 * submit_bio() is very similar in purpose to generic_make_request(), and 1960 * uses that function to do most of the work. Both are fairly rough 1961 * interfaces; @bio must be presetup and ready for I/O. 1962 * 1963 */ 1964 void submit_bio(int rw, struct bio *bio) 1965 { 1966 bio->bi_rw |= rw; 1967 1968 /* 1969 * If it's a regular read/write or a barrier with data attached, 1970 * go through the normal accounting stuff before submission. 1971 */ 1972 if (bio_has_data(bio)) { 1973 unsigned int count; 1974 1975 if (unlikely(rw & REQ_WRITE_SAME)) 1976 count = bdev_logical_block_size(bio->bi_bdev) >> 9; 1977 else 1978 count = bio_sectors(bio); 1979 1980 if (rw & WRITE) { 1981 count_vm_events(PGPGOUT, count); 1982 } else { 1983 task_io_account_read(bio->bi_iter.bi_size); 1984 count_vm_events(PGPGIN, count); 1985 } 1986 1987 if (unlikely(block_dump)) { 1988 char b[BDEVNAME_SIZE]; 1989 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n", 1990 current->comm, task_pid_nr(current), 1991 (rw & WRITE) ? "WRITE" : "READ", 1992 (unsigned long long)bio->bi_iter.bi_sector, 1993 bdevname(bio->bi_bdev, b), 1994 count); 1995 } 1996 } 1997 1998 generic_make_request(bio); 1999 } 2000 EXPORT_SYMBOL(submit_bio); 2001 2002 /** 2003 * blk_rq_check_limits - Helper function to check a request for the queue limit 2004 * @q: the queue 2005 * @rq: the request being checked 2006 * 2007 * Description: 2008 * @rq may have been made based on weaker limitations of upper-level queues 2009 * in request stacking drivers, and it may violate the limitation of @q. 2010 * Since the block layer and the underlying device driver trust @rq 2011 * after it is inserted to @q, it should be checked against @q before 2012 * the insertion using this generic function. 2013 * 2014 * This function should also be useful for request stacking drivers 2015 * in some cases below, so export this function. 2016 * Request stacking drivers like request-based dm may change the queue 2017 * limits while requests are in the queue (e.g. dm's table swapping). 2018 * Such request stacking drivers should check those requests against 2019 * the new queue limits again when they dispatch those requests, 2020 * although such checkings are also done against the old queue limits 2021 * when submitting requests. 2022 */ 2023 int blk_rq_check_limits(struct request_queue *q, struct request *rq) 2024 { 2025 if (!rq_mergeable(rq)) 2026 return 0; 2027 2028 if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) { 2029 printk(KERN_ERR "%s: over max size limit.\n", __func__); 2030 return -EIO; 2031 } 2032 2033 /* 2034 * queue's settings related to segment counting like q->bounce_pfn 2035 * may differ from that of other stacking queues. 2036 * Recalculate it to check the request correctly on this queue's 2037 * limitation. 2038 */ 2039 blk_recalc_rq_segments(rq); 2040 if (rq->nr_phys_segments > queue_max_segments(q)) { 2041 printk(KERN_ERR "%s: over max segments limit.\n", __func__); 2042 return -EIO; 2043 } 2044 2045 return 0; 2046 } 2047 EXPORT_SYMBOL_GPL(blk_rq_check_limits); 2048 2049 /** 2050 * blk_insert_cloned_request - Helper for stacking drivers to submit a request 2051 * @q: the queue to submit the request 2052 * @rq: the request being queued 2053 */ 2054 int blk_insert_cloned_request(struct request_queue *q, struct request *rq) 2055 { 2056 unsigned long flags; 2057 int where = ELEVATOR_INSERT_BACK; 2058 2059 if (blk_rq_check_limits(q, rq)) 2060 return -EIO; 2061 2062 if (rq->rq_disk && 2063 should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq))) 2064 return -EIO; 2065 2066 if (q->mq_ops) { 2067 if (blk_queue_io_stat(q)) 2068 blk_account_io_start(rq, true); 2069 blk_mq_insert_request(rq, false, true, true); 2070 return 0; 2071 } 2072 2073 spin_lock_irqsave(q->queue_lock, flags); 2074 if (unlikely(blk_queue_dying(q))) { 2075 spin_unlock_irqrestore(q->queue_lock, flags); 2076 return -ENODEV; 2077 } 2078 2079 /* 2080 * Submitting request must be dequeued before calling this function 2081 * because it will be linked to another request_queue 2082 */ 2083 BUG_ON(blk_queued_rq(rq)); 2084 2085 if (rq->cmd_flags & (REQ_FLUSH|REQ_FUA)) 2086 where = ELEVATOR_INSERT_FLUSH; 2087 2088 add_acct_request(q, rq, where); 2089 if (where == ELEVATOR_INSERT_FLUSH) 2090 __blk_run_queue(q); 2091 spin_unlock_irqrestore(q->queue_lock, flags); 2092 2093 return 0; 2094 } 2095 EXPORT_SYMBOL_GPL(blk_insert_cloned_request); 2096 2097 /** 2098 * blk_rq_err_bytes - determine number of bytes till the next failure boundary 2099 * @rq: request to examine 2100 * 2101 * Description: 2102 * A request could be merge of IOs which require different failure 2103 * handling. This function determines the number of bytes which 2104 * can be failed from the beginning of the request without 2105 * crossing into area which need to be retried further. 2106 * 2107 * Return: 2108 * The number of bytes to fail. 2109 * 2110 * Context: 2111 * queue_lock must be held. 2112 */ 2113 unsigned int blk_rq_err_bytes(const struct request *rq) 2114 { 2115 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; 2116 unsigned int bytes = 0; 2117 struct bio *bio; 2118 2119 if (!(rq->cmd_flags & REQ_MIXED_MERGE)) 2120 return blk_rq_bytes(rq); 2121 2122 /* 2123 * Currently the only 'mixing' which can happen is between 2124 * different fastfail types. We can safely fail portions 2125 * which have all the failfast bits that the first one has - 2126 * the ones which are at least as eager to fail as the first 2127 * one. 2128 */ 2129 for (bio = rq->bio; bio; bio = bio->bi_next) { 2130 if ((bio->bi_rw & ff) != ff) 2131 break; 2132 bytes += bio->bi_iter.bi_size; 2133 } 2134 2135 /* this could lead to infinite loop */ 2136 BUG_ON(blk_rq_bytes(rq) && !bytes); 2137 return bytes; 2138 } 2139 EXPORT_SYMBOL_GPL(blk_rq_err_bytes); 2140 2141 void blk_account_io_completion(struct request *req, unsigned int bytes) 2142 { 2143 if (blk_do_io_stat(req)) { 2144 const int rw = rq_data_dir(req); 2145 struct hd_struct *part; 2146 int cpu; 2147 2148 cpu = part_stat_lock(); 2149 part = req->part; 2150 part_stat_add(cpu, part, sectors[rw], bytes >> 9); 2151 part_stat_unlock(); 2152 } 2153 } 2154 2155 void blk_account_io_done(struct request *req) 2156 { 2157 /* 2158 * Account IO completion. flush_rq isn't accounted as a 2159 * normal IO on queueing nor completion. Accounting the 2160 * containing request is enough. 2161 */ 2162 if (blk_do_io_stat(req) && !(req->cmd_flags & REQ_FLUSH_SEQ)) { 2163 unsigned long duration = jiffies - req->start_time; 2164 const int rw = rq_data_dir(req); 2165 struct hd_struct *part; 2166 int cpu; 2167 2168 cpu = part_stat_lock(); 2169 part = req->part; 2170 2171 part_stat_inc(cpu, part, ios[rw]); 2172 part_stat_add(cpu, part, ticks[rw], duration); 2173 part_round_stats(cpu, part); 2174 part_dec_in_flight(part, rw); 2175 2176 hd_struct_put(part); 2177 part_stat_unlock(); 2178 } 2179 } 2180 2181 #ifdef CONFIG_PM 2182 /* 2183 * Don't process normal requests when queue is suspended 2184 * or in the process of suspending/resuming 2185 */ 2186 static struct request *blk_pm_peek_request(struct request_queue *q, 2187 struct request *rq) 2188 { 2189 if (q->dev && (q->rpm_status == RPM_SUSPENDED || 2190 (q->rpm_status != RPM_ACTIVE && !(rq->cmd_flags & REQ_PM)))) 2191 return NULL; 2192 else 2193 return rq; 2194 } 2195 #else 2196 static inline struct request *blk_pm_peek_request(struct request_queue *q, 2197 struct request *rq) 2198 { 2199 return rq; 2200 } 2201 #endif 2202 2203 void blk_account_io_start(struct request *rq, bool new_io) 2204 { 2205 struct hd_struct *part; 2206 int rw = rq_data_dir(rq); 2207 int cpu; 2208 2209 if (!blk_do_io_stat(rq)) 2210 return; 2211 2212 cpu = part_stat_lock(); 2213 2214 if (!new_io) { 2215 part = rq->part; 2216 part_stat_inc(cpu, part, merges[rw]); 2217 } else { 2218 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); 2219 if (!hd_struct_try_get(part)) { 2220 /* 2221 * The partition is already being removed, 2222 * the request will be accounted on the disk only 2223 * 2224 * We take a reference on disk->part0 although that 2225 * partition will never be deleted, so we can treat 2226 * it as any other partition. 2227 */ 2228 part = &rq->rq_disk->part0; 2229 hd_struct_get(part); 2230 } 2231 part_round_stats(cpu, part); 2232 part_inc_in_flight(part, rw); 2233 rq->part = part; 2234 } 2235 2236 part_stat_unlock(); 2237 } 2238 2239 /** 2240 * blk_peek_request - peek at the top of a request queue 2241 * @q: request queue to peek at 2242 * 2243 * Description: 2244 * Return the request at the top of @q. The returned request 2245 * should be started using blk_start_request() before LLD starts 2246 * processing it. 2247 * 2248 * Return: 2249 * Pointer to the request at the top of @q if available. Null 2250 * otherwise. 2251 * 2252 * Context: 2253 * queue_lock must be held. 2254 */ 2255 struct request *blk_peek_request(struct request_queue *q) 2256 { 2257 struct request *rq; 2258 int ret; 2259 2260 while ((rq = __elv_next_request(q)) != NULL) { 2261 2262 rq = blk_pm_peek_request(q, rq); 2263 if (!rq) 2264 break; 2265 2266 if (!(rq->cmd_flags & REQ_STARTED)) { 2267 /* 2268 * This is the first time the device driver 2269 * sees this request (possibly after 2270 * requeueing). Notify IO scheduler. 2271 */ 2272 if (rq->cmd_flags & REQ_SORTED) 2273 elv_activate_rq(q, rq); 2274 2275 /* 2276 * just mark as started even if we don't start 2277 * it, a request that has been delayed should 2278 * not be passed by new incoming requests 2279 */ 2280 rq->cmd_flags |= REQ_STARTED; 2281 trace_block_rq_issue(q, rq); 2282 } 2283 2284 if (!q->boundary_rq || q->boundary_rq == rq) { 2285 q->end_sector = rq_end_sector(rq); 2286 q->boundary_rq = NULL; 2287 } 2288 2289 if (rq->cmd_flags & REQ_DONTPREP) 2290 break; 2291 2292 if (q->dma_drain_size && blk_rq_bytes(rq)) { 2293 /* 2294 * make sure space for the drain appears we 2295 * know we can do this because max_hw_segments 2296 * has been adjusted to be one fewer than the 2297 * device can handle 2298 */ 2299 rq->nr_phys_segments++; 2300 } 2301 2302 if (!q->prep_rq_fn) 2303 break; 2304 2305 ret = q->prep_rq_fn(q, rq); 2306 if (ret == BLKPREP_OK) { 2307 break; 2308 } else if (ret == BLKPREP_DEFER) { 2309 /* 2310 * the request may have been (partially) prepped. 2311 * we need to keep this request in the front to 2312 * avoid resource deadlock. REQ_STARTED will 2313 * prevent other fs requests from passing this one. 2314 */ 2315 if (q->dma_drain_size && blk_rq_bytes(rq) && 2316 !(rq->cmd_flags & REQ_DONTPREP)) { 2317 /* 2318 * remove the space for the drain we added 2319 * so that we don't add it again 2320 */ 2321 --rq->nr_phys_segments; 2322 } 2323 2324 rq = NULL; 2325 break; 2326 } else if (ret == BLKPREP_KILL) { 2327 rq->cmd_flags |= REQ_QUIET; 2328 /* 2329 * Mark this request as started so we don't trigger 2330 * any debug logic in the end I/O path. 2331 */ 2332 blk_start_request(rq); 2333 __blk_end_request_all(rq, -EIO); 2334 } else { 2335 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); 2336 break; 2337 } 2338 } 2339 2340 return rq; 2341 } 2342 EXPORT_SYMBOL(blk_peek_request); 2343 2344 void blk_dequeue_request(struct request *rq) 2345 { 2346 struct request_queue *q = rq->q; 2347 2348 BUG_ON(list_empty(&rq->queuelist)); 2349 BUG_ON(ELV_ON_HASH(rq)); 2350 2351 list_del_init(&rq->queuelist); 2352 2353 /* 2354 * the time frame between a request being removed from the lists 2355 * and to it is freed is accounted as io that is in progress at 2356 * the driver side. 2357 */ 2358 if (blk_account_rq(rq)) { 2359 q->in_flight[rq_is_sync(rq)]++; 2360 set_io_start_time_ns(rq); 2361 } 2362 } 2363 2364 /** 2365 * blk_start_request - start request processing on the driver 2366 * @req: request to dequeue 2367 * 2368 * Description: 2369 * Dequeue @req and start timeout timer on it. This hands off the 2370 * request to the driver. 2371 * 2372 * Block internal functions which don't want to start timer should 2373 * call blk_dequeue_request(). 2374 * 2375 * Context: 2376 * queue_lock must be held. 2377 */ 2378 void blk_start_request(struct request *req) 2379 { 2380 blk_dequeue_request(req); 2381 2382 /* 2383 * We are now handing the request to the hardware, initialize 2384 * resid_len to full count and add the timeout handler. 2385 */ 2386 req->resid_len = blk_rq_bytes(req); 2387 if (unlikely(blk_bidi_rq(req))) 2388 req->next_rq->resid_len = blk_rq_bytes(req->next_rq); 2389 2390 BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags)); 2391 blk_add_timer(req); 2392 } 2393 EXPORT_SYMBOL(blk_start_request); 2394 2395 /** 2396 * blk_fetch_request - fetch a request from a request queue 2397 * @q: request queue to fetch a request from 2398 * 2399 * Description: 2400 * Return the request at the top of @q. The request is started on 2401 * return and LLD can start processing it immediately. 2402 * 2403 * Return: 2404 * Pointer to the request at the top of @q if available. Null 2405 * otherwise. 2406 * 2407 * Context: 2408 * queue_lock must be held. 2409 */ 2410 struct request *blk_fetch_request(struct request_queue *q) 2411 { 2412 struct request *rq; 2413 2414 rq = blk_peek_request(q); 2415 if (rq) 2416 blk_start_request(rq); 2417 return rq; 2418 } 2419 EXPORT_SYMBOL(blk_fetch_request); 2420 2421 /** 2422 * blk_update_request - Special helper function for request stacking drivers 2423 * @req: the request being processed 2424 * @error: %0 for success, < %0 for error 2425 * @nr_bytes: number of bytes to complete @req 2426 * 2427 * Description: 2428 * Ends I/O on a number of bytes attached to @req, but doesn't complete 2429 * the request structure even if @req doesn't have leftover. 2430 * If @req has leftover, sets it up for the next range of segments. 2431 * 2432 * This special helper function is only for request stacking drivers 2433 * (e.g. request-based dm) so that they can handle partial completion. 2434 * Actual device drivers should use blk_end_request instead. 2435 * 2436 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees 2437 * %false return from this function. 2438 * 2439 * Return: 2440 * %false - this request doesn't have any more data 2441 * %true - this request has more data 2442 **/ 2443 bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) 2444 { 2445 int total_bytes; 2446 2447 trace_block_rq_complete(req->q, req, nr_bytes); 2448 2449 if (!req->bio) 2450 return false; 2451 2452 /* 2453 * For fs requests, rq is just carrier of independent bio's 2454 * and each partial completion should be handled separately. 2455 * Reset per-request error on each partial completion. 2456 * 2457 * TODO: tj: This is too subtle. It would be better to let 2458 * low level drivers do what they see fit. 2459 */ 2460 if (req->cmd_type == REQ_TYPE_FS) 2461 req->errors = 0; 2462 2463 if (error && req->cmd_type == REQ_TYPE_FS && 2464 !(req->cmd_flags & REQ_QUIET)) { 2465 char *error_type; 2466 2467 switch (error) { 2468 case -ENOLINK: 2469 error_type = "recoverable transport"; 2470 break; 2471 case -EREMOTEIO: 2472 error_type = "critical target"; 2473 break; 2474 case -EBADE: 2475 error_type = "critical nexus"; 2476 break; 2477 case -ETIMEDOUT: 2478 error_type = "timeout"; 2479 break; 2480 case -ENOSPC: 2481 error_type = "critical space allocation"; 2482 break; 2483 case -ENODATA: 2484 error_type = "critical medium"; 2485 break; 2486 case -EIO: 2487 default: 2488 error_type = "I/O"; 2489 break; 2490 } 2491 printk_ratelimited(KERN_ERR "%s: %s error, dev %s, sector %llu\n", 2492 __func__, error_type, req->rq_disk ? 2493 req->rq_disk->disk_name : "?", 2494 (unsigned long long)blk_rq_pos(req)); 2495 2496 } 2497 2498 blk_account_io_completion(req, nr_bytes); 2499 2500 total_bytes = 0; 2501 while (req->bio) { 2502 struct bio *bio = req->bio; 2503 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes); 2504 2505 if (bio_bytes == bio->bi_iter.bi_size) 2506 req->bio = bio->bi_next; 2507 2508 req_bio_endio(req, bio, bio_bytes, error); 2509 2510 total_bytes += bio_bytes; 2511 nr_bytes -= bio_bytes; 2512 2513 if (!nr_bytes) 2514 break; 2515 } 2516 2517 /* 2518 * completely done 2519 */ 2520 if (!req->bio) { 2521 /* 2522 * Reset counters so that the request stacking driver 2523 * can find how many bytes remain in the request 2524 * later. 2525 */ 2526 req->__data_len = 0; 2527 return false; 2528 } 2529 2530 req->__data_len -= total_bytes; 2531 2532 /* update sector only for requests with clear definition of sector */ 2533 if (req->cmd_type == REQ_TYPE_FS) 2534 req->__sector += total_bytes >> 9; 2535 2536 /* mixed attributes always follow the first bio */ 2537 if (req->cmd_flags & REQ_MIXED_MERGE) { 2538 req->cmd_flags &= ~REQ_FAILFAST_MASK; 2539 req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK; 2540 } 2541 2542 /* 2543 * If total number of sectors is less than the first segment 2544 * size, something has gone terribly wrong. 2545 */ 2546 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { 2547 blk_dump_rq_flags(req, "request botched"); 2548 req->__data_len = blk_rq_cur_bytes(req); 2549 } 2550 2551 /* recalculate the number of segments */ 2552 blk_recalc_rq_segments(req); 2553 2554 return true; 2555 } 2556 EXPORT_SYMBOL_GPL(blk_update_request); 2557 2558 static bool blk_update_bidi_request(struct request *rq, int error, 2559 unsigned int nr_bytes, 2560 unsigned int bidi_bytes) 2561 { 2562 if (blk_update_request(rq, error, nr_bytes)) 2563 return true; 2564 2565 /* Bidi request must be completed as a whole */ 2566 if (unlikely(blk_bidi_rq(rq)) && 2567 blk_update_request(rq->next_rq, error, bidi_bytes)) 2568 return true; 2569 2570 if (blk_queue_add_random(rq->q)) 2571 add_disk_randomness(rq->rq_disk); 2572 2573 return false; 2574 } 2575 2576 /** 2577 * blk_unprep_request - unprepare a request 2578 * @req: the request 2579 * 2580 * This function makes a request ready for complete resubmission (or 2581 * completion). It happens only after all error handling is complete, 2582 * so represents the appropriate moment to deallocate any resources 2583 * that were allocated to the request in the prep_rq_fn. The queue 2584 * lock is held when calling this. 2585 */ 2586 void blk_unprep_request(struct request *req) 2587 { 2588 struct request_queue *q = req->q; 2589 2590 req->cmd_flags &= ~REQ_DONTPREP; 2591 if (q->unprep_rq_fn) 2592 q->unprep_rq_fn(q, req); 2593 } 2594 EXPORT_SYMBOL_GPL(blk_unprep_request); 2595 2596 /* 2597 * queue lock must be held 2598 */ 2599 void blk_finish_request(struct request *req, int error) 2600 { 2601 if (req->cmd_flags & REQ_QUEUED) 2602 blk_queue_end_tag(req->q, req); 2603 2604 BUG_ON(blk_queued_rq(req)); 2605 2606 if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS) 2607 laptop_io_completion(&req->q->backing_dev_info); 2608 2609 blk_delete_timer(req); 2610 2611 if (req->cmd_flags & REQ_DONTPREP) 2612 blk_unprep_request(req); 2613 2614 blk_account_io_done(req); 2615 2616 if (req->end_io) 2617 req->end_io(req, error); 2618 else { 2619 if (blk_bidi_rq(req)) 2620 __blk_put_request(req->next_rq->q, req->next_rq); 2621 2622 __blk_put_request(req->q, req); 2623 } 2624 } 2625 EXPORT_SYMBOL(blk_finish_request); 2626 2627 /** 2628 * blk_end_bidi_request - Complete a bidi request 2629 * @rq: the request to complete 2630 * @error: %0 for success, < %0 for error 2631 * @nr_bytes: number of bytes to complete @rq 2632 * @bidi_bytes: number of bytes to complete @rq->next_rq 2633 * 2634 * Description: 2635 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. 2636 * Drivers that supports bidi can safely call this member for any 2637 * type of request, bidi or uni. In the later case @bidi_bytes is 2638 * just ignored. 2639 * 2640 * Return: 2641 * %false - we are done with this request 2642 * %true - still buffers pending for this request 2643 **/ 2644 static bool blk_end_bidi_request(struct request *rq, int error, 2645 unsigned int nr_bytes, unsigned int bidi_bytes) 2646 { 2647 struct request_queue *q = rq->q; 2648 unsigned long flags; 2649 2650 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) 2651 return true; 2652 2653 spin_lock_irqsave(q->queue_lock, flags); 2654 blk_finish_request(rq, error); 2655 spin_unlock_irqrestore(q->queue_lock, flags); 2656 2657 return false; 2658 } 2659 2660 /** 2661 * __blk_end_bidi_request - Complete a bidi request with queue lock held 2662 * @rq: the request to complete 2663 * @error: %0 for success, < %0 for error 2664 * @nr_bytes: number of bytes to complete @rq 2665 * @bidi_bytes: number of bytes to complete @rq->next_rq 2666 * 2667 * Description: 2668 * Identical to blk_end_bidi_request() except that queue lock is 2669 * assumed to be locked on entry and remains so on return. 2670 * 2671 * Return: 2672 * %false - we are done with this request 2673 * %true - still buffers pending for this request 2674 **/ 2675 bool __blk_end_bidi_request(struct request *rq, int error, 2676 unsigned int nr_bytes, unsigned int bidi_bytes) 2677 { 2678 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) 2679 return true; 2680 2681 blk_finish_request(rq, error); 2682 2683 return false; 2684 } 2685 2686 /** 2687 * blk_end_request - Helper function for drivers to complete the request. 2688 * @rq: the request being processed 2689 * @error: %0 for success, < %0 for error 2690 * @nr_bytes: number of bytes to complete 2691 * 2692 * Description: 2693 * Ends I/O on a number of bytes attached to @rq. 2694 * If @rq has leftover, sets it up for the next range of segments. 2695 * 2696 * Return: 2697 * %false - we are done with this request 2698 * %true - still buffers pending for this request 2699 **/ 2700 bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes) 2701 { 2702 return blk_end_bidi_request(rq, error, nr_bytes, 0); 2703 } 2704 EXPORT_SYMBOL(blk_end_request); 2705 2706 /** 2707 * blk_end_request_all - Helper function for drives to finish the request. 2708 * @rq: the request to finish 2709 * @error: %0 for success, < %0 for error 2710 * 2711 * Description: 2712 * Completely finish @rq. 2713 */ 2714 void blk_end_request_all(struct request *rq, int error) 2715 { 2716 bool pending; 2717 unsigned int bidi_bytes = 0; 2718 2719 if (unlikely(blk_bidi_rq(rq))) 2720 bidi_bytes = blk_rq_bytes(rq->next_rq); 2721 2722 pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); 2723 BUG_ON(pending); 2724 } 2725 EXPORT_SYMBOL(blk_end_request_all); 2726 2727 /** 2728 * blk_end_request_cur - Helper function to finish the current request chunk. 2729 * @rq: the request to finish the current chunk for 2730 * @error: %0 for success, < %0 for error 2731 * 2732 * Description: 2733 * Complete the current consecutively mapped chunk from @rq. 2734 * 2735 * Return: 2736 * %false - we are done with this request 2737 * %true - still buffers pending for this request 2738 */ 2739 bool blk_end_request_cur(struct request *rq, int error) 2740 { 2741 return blk_end_request(rq, error, blk_rq_cur_bytes(rq)); 2742 } 2743 EXPORT_SYMBOL(blk_end_request_cur); 2744 2745 /** 2746 * blk_end_request_err - Finish a request till the next failure boundary. 2747 * @rq: the request to finish till the next failure boundary for 2748 * @error: must be negative errno 2749 * 2750 * Description: 2751 * Complete @rq till the next failure boundary. 2752 * 2753 * Return: 2754 * %false - we are done with this request 2755 * %true - still buffers pending for this request 2756 */ 2757 bool blk_end_request_err(struct request *rq, int error) 2758 { 2759 WARN_ON(error >= 0); 2760 return blk_end_request(rq, error, blk_rq_err_bytes(rq)); 2761 } 2762 EXPORT_SYMBOL_GPL(blk_end_request_err); 2763 2764 /** 2765 * __blk_end_request - Helper function for drivers to complete the request. 2766 * @rq: the request being processed 2767 * @error: %0 for success, < %0 for error 2768 * @nr_bytes: number of bytes to complete 2769 * 2770 * Description: 2771 * Must be called with queue lock held unlike blk_end_request(). 2772 * 2773 * Return: 2774 * %false - we are done with this request 2775 * %true - still buffers pending for this request 2776 **/ 2777 bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) 2778 { 2779 return __blk_end_bidi_request(rq, error, nr_bytes, 0); 2780 } 2781 EXPORT_SYMBOL(__blk_end_request); 2782 2783 /** 2784 * __blk_end_request_all - Helper function for drives to finish the request. 2785 * @rq: the request to finish 2786 * @error: %0 for success, < %0 for error 2787 * 2788 * Description: 2789 * Completely finish @rq. Must be called with queue lock held. 2790 */ 2791 void __blk_end_request_all(struct request *rq, int error) 2792 { 2793 bool pending; 2794 unsigned int bidi_bytes = 0; 2795 2796 if (unlikely(blk_bidi_rq(rq))) 2797 bidi_bytes = blk_rq_bytes(rq->next_rq); 2798 2799 pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); 2800 BUG_ON(pending); 2801 } 2802 EXPORT_SYMBOL(__blk_end_request_all); 2803 2804 /** 2805 * __blk_end_request_cur - Helper function to finish the current request chunk. 2806 * @rq: the request to finish the current chunk for 2807 * @error: %0 for success, < %0 for error 2808 * 2809 * Description: 2810 * Complete the current consecutively mapped chunk from @rq. Must 2811 * be called with queue lock held. 2812 * 2813 * Return: 2814 * %false - we are done with this request 2815 * %true - still buffers pending for this request 2816 */ 2817 bool __blk_end_request_cur(struct request *rq, int error) 2818 { 2819 return __blk_end_request(rq, error, blk_rq_cur_bytes(rq)); 2820 } 2821 EXPORT_SYMBOL(__blk_end_request_cur); 2822 2823 /** 2824 * __blk_end_request_err - Finish a request till the next failure boundary. 2825 * @rq: the request to finish till the next failure boundary for 2826 * @error: must be negative errno 2827 * 2828 * Description: 2829 * Complete @rq till the next failure boundary. Must be called 2830 * with queue lock held. 2831 * 2832 * Return: 2833 * %false - we are done with this request 2834 * %true - still buffers pending for this request 2835 */ 2836 bool __blk_end_request_err(struct request *rq, int error) 2837 { 2838 WARN_ON(error >= 0); 2839 return __blk_end_request(rq, error, blk_rq_err_bytes(rq)); 2840 } 2841 EXPORT_SYMBOL_GPL(__blk_end_request_err); 2842 2843 void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 2844 struct bio *bio) 2845 { 2846 /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */ 2847 rq->cmd_flags |= bio->bi_rw & REQ_WRITE; 2848 2849 if (bio_has_data(bio)) 2850 rq->nr_phys_segments = bio_phys_segments(q, bio); 2851 2852 rq->__data_len = bio->bi_iter.bi_size; 2853 rq->bio = rq->biotail = bio; 2854 2855 if (bio->bi_bdev) 2856 rq->rq_disk = bio->bi_bdev->bd_disk; 2857 } 2858 2859 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 2860 /** 2861 * rq_flush_dcache_pages - Helper function to flush all pages in a request 2862 * @rq: the request to be flushed 2863 * 2864 * Description: 2865 * Flush all pages in @rq. 2866 */ 2867 void rq_flush_dcache_pages(struct request *rq) 2868 { 2869 struct req_iterator iter; 2870 struct bio_vec bvec; 2871 2872 rq_for_each_segment(bvec, rq, iter) 2873 flush_dcache_page(bvec.bv_page); 2874 } 2875 EXPORT_SYMBOL_GPL(rq_flush_dcache_pages); 2876 #endif 2877 2878 /** 2879 * blk_lld_busy - Check if underlying low-level drivers of a device are busy 2880 * @q : the queue of the device being checked 2881 * 2882 * Description: 2883 * Check if underlying low-level drivers of a device are busy. 2884 * If the drivers want to export their busy state, they must set own 2885 * exporting function using blk_queue_lld_busy() first. 2886 * 2887 * Basically, this function is used only by request stacking drivers 2888 * to stop dispatching requests to underlying devices when underlying 2889 * devices are busy. This behavior helps more I/O merging on the queue 2890 * of the request stacking driver and prevents I/O throughput regression 2891 * on burst I/O load. 2892 * 2893 * Return: 2894 * 0 - Not busy (The request stacking driver should dispatch request) 2895 * 1 - Busy (The request stacking driver should stop dispatching request) 2896 */ 2897 int blk_lld_busy(struct request_queue *q) 2898 { 2899 if (q->lld_busy_fn) 2900 return q->lld_busy_fn(q); 2901 2902 return 0; 2903 } 2904 EXPORT_SYMBOL_GPL(blk_lld_busy); 2905 2906 /** 2907 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request 2908 * @rq: the clone request to be cleaned up 2909 * 2910 * Description: 2911 * Free all bios in @rq for a cloned request. 2912 */ 2913 void blk_rq_unprep_clone(struct request *rq) 2914 { 2915 struct bio *bio; 2916 2917 while ((bio = rq->bio) != NULL) { 2918 rq->bio = bio->bi_next; 2919 2920 bio_put(bio); 2921 } 2922 } 2923 EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); 2924 2925 /* 2926 * Copy attributes of the original request to the clone request. 2927 * The actual data parts (e.g. ->cmd, ->sense) are not copied. 2928 */ 2929 static void __blk_rq_prep_clone(struct request *dst, struct request *src) 2930 { 2931 dst->cpu = src->cpu; 2932 dst->cmd_flags |= (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE; 2933 dst->cmd_type = src->cmd_type; 2934 dst->__sector = blk_rq_pos(src); 2935 dst->__data_len = blk_rq_bytes(src); 2936 dst->nr_phys_segments = src->nr_phys_segments; 2937 dst->ioprio = src->ioprio; 2938 dst->extra_len = src->extra_len; 2939 } 2940 2941 /** 2942 * blk_rq_prep_clone - Helper function to setup clone request 2943 * @rq: the request to be setup 2944 * @rq_src: original request to be cloned 2945 * @bs: bio_set that bios for clone are allocated from 2946 * @gfp_mask: memory allocation mask for bio 2947 * @bio_ctr: setup function to be called for each clone bio. 2948 * Returns %0 for success, non %0 for failure. 2949 * @data: private data to be passed to @bio_ctr 2950 * 2951 * Description: 2952 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq. 2953 * The actual data parts of @rq_src (e.g. ->cmd, ->sense) 2954 * are not copied, and copying such parts is the caller's responsibility. 2955 * Also, pages which the original bios are pointing to are not copied 2956 * and the cloned bios just point same pages. 2957 * So cloned bios must be completed before original bios, which means 2958 * the caller must complete @rq before @rq_src. 2959 */ 2960 int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 2961 struct bio_set *bs, gfp_t gfp_mask, 2962 int (*bio_ctr)(struct bio *, struct bio *, void *), 2963 void *data) 2964 { 2965 struct bio *bio, *bio_src; 2966 2967 if (!bs) 2968 bs = fs_bio_set; 2969 2970 __rq_for_each_bio(bio_src, rq_src) { 2971 bio = bio_clone_fast(bio_src, gfp_mask, bs); 2972 if (!bio) 2973 goto free_and_out; 2974 2975 if (bio_ctr && bio_ctr(bio, bio_src, data)) 2976 goto free_and_out; 2977 2978 if (rq->bio) { 2979 rq->biotail->bi_next = bio; 2980 rq->biotail = bio; 2981 } else 2982 rq->bio = rq->biotail = bio; 2983 } 2984 2985 __blk_rq_prep_clone(rq, rq_src); 2986 2987 return 0; 2988 2989 free_and_out: 2990 if (bio) 2991 bio_put(bio); 2992 blk_rq_unprep_clone(rq); 2993 2994 return -ENOMEM; 2995 } 2996 EXPORT_SYMBOL_GPL(blk_rq_prep_clone); 2997 2998 int kblockd_schedule_work(struct work_struct *work) 2999 { 3000 return queue_work(kblockd_workqueue, work); 3001 } 3002 EXPORT_SYMBOL(kblockd_schedule_work); 3003 3004 int kblockd_schedule_delayed_work(struct delayed_work *dwork, 3005 unsigned long delay) 3006 { 3007 return queue_delayed_work(kblockd_workqueue, dwork, delay); 3008 } 3009 EXPORT_SYMBOL(kblockd_schedule_delayed_work); 3010 3011 int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, 3012 unsigned long delay) 3013 { 3014 return queue_delayed_work_on(cpu, kblockd_workqueue, dwork, delay); 3015 } 3016 EXPORT_SYMBOL(kblockd_schedule_delayed_work_on); 3017 3018 /** 3019 * blk_start_plug - initialize blk_plug and track it inside the task_struct 3020 * @plug: The &struct blk_plug that needs to be initialized 3021 * 3022 * Description: 3023 * Tracking blk_plug inside the task_struct will help with auto-flushing the 3024 * pending I/O should the task end up blocking between blk_start_plug() and 3025 * blk_finish_plug(). This is important from a performance perspective, but 3026 * also ensures that we don't deadlock. For instance, if the task is blocking 3027 * for a memory allocation, memory reclaim could end up wanting to free a 3028 * page belonging to that request that is currently residing in our private 3029 * plug. By flushing the pending I/O when the process goes to sleep, we avoid 3030 * this kind of deadlock. 3031 */ 3032 void blk_start_plug(struct blk_plug *plug) 3033 { 3034 struct task_struct *tsk = current; 3035 3036 INIT_LIST_HEAD(&plug->list); 3037 INIT_LIST_HEAD(&plug->mq_list); 3038 INIT_LIST_HEAD(&plug->cb_list); 3039 3040 /* 3041 * If this is a nested plug, don't actually assign it. It will be 3042 * flushed on its own. 3043 */ 3044 if (!tsk->plug) { 3045 /* 3046 * Store ordering should not be needed here, since a potential 3047 * preempt will imply a full memory barrier 3048 */ 3049 tsk->plug = plug; 3050 } 3051 } 3052 EXPORT_SYMBOL(blk_start_plug); 3053 3054 static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b) 3055 { 3056 struct request *rqa = container_of(a, struct request, queuelist); 3057 struct request *rqb = container_of(b, struct request, queuelist); 3058 3059 return !(rqa->q < rqb->q || 3060 (rqa->q == rqb->q && blk_rq_pos(rqa) < blk_rq_pos(rqb))); 3061 } 3062 3063 /* 3064 * If 'from_schedule' is true, then postpone the dispatch of requests 3065 * until a safe kblockd context. We due this to avoid accidental big 3066 * additional stack usage in driver dispatch, in places where the originally 3067 * plugger did not intend it. 3068 */ 3069 static void queue_unplugged(struct request_queue *q, unsigned int depth, 3070 bool from_schedule) 3071 __releases(q->queue_lock) 3072 { 3073 trace_block_unplug(q, depth, !from_schedule); 3074 3075 if (from_schedule) 3076 blk_run_queue_async(q); 3077 else 3078 __blk_run_queue(q); 3079 spin_unlock(q->queue_lock); 3080 } 3081 3082 static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) 3083 { 3084 LIST_HEAD(callbacks); 3085 3086 while (!list_empty(&plug->cb_list)) { 3087 list_splice_init(&plug->cb_list, &callbacks); 3088 3089 while (!list_empty(&callbacks)) { 3090 struct blk_plug_cb *cb = list_first_entry(&callbacks, 3091 struct blk_plug_cb, 3092 list); 3093 list_del(&cb->list); 3094 cb->callback(cb, from_schedule); 3095 } 3096 } 3097 } 3098 3099 struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data, 3100 int size) 3101 { 3102 struct blk_plug *plug = current->plug; 3103 struct blk_plug_cb *cb; 3104 3105 if (!plug) 3106 return NULL; 3107 3108 list_for_each_entry(cb, &plug->cb_list, list) 3109 if (cb->callback == unplug && cb->data == data) 3110 return cb; 3111 3112 /* Not currently on the callback list */ 3113 BUG_ON(size < sizeof(*cb)); 3114 cb = kzalloc(size, GFP_ATOMIC); 3115 if (cb) { 3116 cb->data = data; 3117 cb->callback = unplug; 3118 list_add(&cb->list, &plug->cb_list); 3119 } 3120 return cb; 3121 } 3122 EXPORT_SYMBOL(blk_check_plugged); 3123 3124 void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) 3125 { 3126 struct request_queue *q; 3127 unsigned long flags; 3128 struct request *rq; 3129 LIST_HEAD(list); 3130 unsigned int depth; 3131 3132 flush_plug_callbacks(plug, from_schedule); 3133 3134 if (!list_empty(&plug->mq_list)) 3135 blk_mq_flush_plug_list(plug, from_schedule); 3136 3137 if (list_empty(&plug->list)) 3138 return; 3139 3140 list_splice_init(&plug->list, &list); 3141 3142 list_sort(NULL, &list, plug_rq_cmp); 3143 3144 q = NULL; 3145 depth = 0; 3146 3147 /* 3148 * Save and disable interrupts here, to avoid doing it for every 3149 * queue lock we have to take. 3150 */ 3151 local_irq_save(flags); 3152 while (!list_empty(&list)) { 3153 rq = list_entry_rq(list.next); 3154 list_del_init(&rq->queuelist); 3155 BUG_ON(!rq->q); 3156 if (rq->q != q) { 3157 /* 3158 * This drops the queue lock 3159 */ 3160 if (q) 3161 queue_unplugged(q, depth, from_schedule); 3162 q = rq->q; 3163 depth = 0; 3164 spin_lock(q->queue_lock); 3165 } 3166 3167 /* 3168 * Short-circuit if @q is dead 3169 */ 3170 if (unlikely(blk_queue_dying(q))) { 3171 __blk_end_request_all(rq, -ENODEV); 3172 continue; 3173 } 3174 3175 /* 3176 * rq is already accounted, so use raw insert 3177 */ 3178 if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) 3179 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); 3180 else 3181 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); 3182 3183 depth++; 3184 } 3185 3186 /* 3187 * This drops the queue lock 3188 */ 3189 if (q) 3190 queue_unplugged(q, depth, from_schedule); 3191 3192 local_irq_restore(flags); 3193 } 3194 3195 void blk_finish_plug(struct blk_plug *plug) 3196 { 3197 blk_flush_plug_list(plug, false); 3198 3199 if (plug == current->plug) 3200 current->plug = NULL; 3201 } 3202 EXPORT_SYMBOL(blk_finish_plug); 3203 3204 #ifdef CONFIG_PM 3205 /** 3206 * blk_pm_runtime_init - Block layer runtime PM initialization routine 3207 * @q: the queue of the device 3208 * @dev: the device the queue belongs to 3209 * 3210 * Description: 3211 * Initialize runtime-PM-related fields for @q and start auto suspend for 3212 * @dev. Drivers that want to take advantage of request-based runtime PM 3213 * should call this function after @dev has been initialized, and its 3214 * request queue @q has been allocated, and runtime PM for it can not happen 3215 * yet(either due to disabled/forbidden or its usage_count > 0). In most 3216 * cases, driver should call this function before any I/O has taken place. 3217 * 3218 * This function takes care of setting up using auto suspend for the device, 3219 * the autosuspend delay is set to -1 to make runtime suspend impossible 3220 * until an updated value is either set by user or by driver. Drivers do 3221 * not need to touch other autosuspend settings. 3222 * 3223 * The block layer runtime PM is request based, so only works for drivers 3224 * that use request as their IO unit instead of those directly use bio's. 3225 */ 3226 void blk_pm_runtime_init(struct request_queue *q, struct device *dev) 3227 { 3228 q->dev = dev; 3229 q->rpm_status = RPM_ACTIVE; 3230 pm_runtime_set_autosuspend_delay(q->dev, -1); 3231 pm_runtime_use_autosuspend(q->dev); 3232 } 3233 EXPORT_SYMBOL(blk_pm_runtime_init); 3234 3235 /** 3236 * blk_pre_runtime_suspend - Pre runtime suspend check 3237 * @q: the queue of the device 3238 * 3239 * Description: 3240 * This function will check if runtime suspend is allowed for the device 3241 * by examining if there are any requests pending in the queue. If there 3242 * are requests pending, the device can not be runtime suspended; otherwise, 3243 * the queue's status will be updated to SUSPENDING and the driver can 3244 * proceed to suspend the device. 3245 * 3246 * For the not allowed case, we mark last busy for the device so that 3247 * runtime PM core will try to autosuspend it some time later. 3248 * 3249 * This function should be called near the start of the device's 3250 * runtime_suspend callback. 3251 * 3252 * Return: 3253 * 0 - OK to runtime suspend the device 3254 * -EBUSY - Device should not be runtime suspended 3255 */ 3256 int blk_pre_runtime_suspend(struct request_queue *q) 3257 { 3258 int ret = 0; 3259 3260 spin_lock_irq(q->queue_lock); 3261 if (q->nr_pending) { 3262 ret = -EBUSY; 3263 pm_runtime_mark_last_busy(q->dev); 3264 } else { 3265 q->rpm_status = RPM_SUSPENDING; 3266 } 3267 spin_unlock_irq(q->queue_lock); 3268 return ret; 3269 } 3270 EXPORT_SYMBOL(blk_pre_runtime_suspend); 3271 3272 /** 3273 * blk_post_runtime_suspend - Post runtime suspend processing 3274 * @q: the queue of the device 3275 * @err: return value of the device's runtime_suspend function 3276 * 3277 * Description: 3278 * Update the queue's runtime status according to the return value of the 3279 * device's runtime suspend function and mark last busy for the device so 3280 * that PM core will try to auto suspend the device at a later time. 3281 * 3282 * This function should be called near the end of the device's 3283 * runtime_suspend callback. 3284 */ 3285 void blk_post_runtime_suspend(struct request_queue *q, int err) 3286 { 3287 spin_lock_irq(q->queue_lock); 3288 if (!err) { 3289 q->rpm_status = RPM_SUSPENDED; 3290 } else { 3291 q->rpm_status = RPM_ACTIVE; 3292 pm_runtime_mark_last_busy(q->dev); 3293 } 3294 spin_unlock_irq(q->queue_lock); 3295 } 3296 EXPORT_SYMBOL(blk_post_runtime_suspend); 3297 3298 /** 3299 * blk_pre_runtime_resume - Pre runtime resume processing 3300 * @q: the queue of the device 3301 * 3302 * Description: 3303 * Update the queue's runtime status to RESUMING in preparation for the 3304 * runtime resume of the device. 3305 * 3306 * This function should be called near the start of the device's 3307 * runtime_resume callback. 3308 */ 3309 void blk_pre_runtime_resume(struct request_queue *q) 3310 { 3311 spin_lock_irq(q->queue_lock); 3312 q->rpm_status = RPM_RESUMING; 3313 spin_unlock_irq(q->queue_lock); 3314 } 3315 EXPORT_SYMBOL(blk_pre_runtime_resume); 3316 3317 /** 3318 * blk_post_runtime_resume - Post runtime resume processing 3319 * @q: the queue of the device 3320 * @err: return value of the device's runtime_resume function 3321 * 3322 * Description: 3323 * Update the queue's runtime status according to the return value of the 3324 * device's runtime_resume function. If it is successfully resumed, process 3325 * the requests that are queued into the device's queue when it is resuming 3326 * and then mark last busy and initiate autosuspend for it. 3327 * 3328 * This function should be called near the end of the device's 3329 * runtime_resume callback. 3330 */ 3331 void blk_post_runtime_resume(struct request_queue *q, int err) 3332 { 3333 spin_lock_irq(q->queue_lock); 3334 if (!err) { 3335 q->rpm_status = RPM_ACTIVE; 3336 __blk_run_queue(q); 3337 pm_runtime_mark_last_busy(q->dev); 3338 pm_request_autosuspend(q->dev); 3339 } else { 3340 q->rpm_status = RPM_SUSPENDED; 3341 } 3342 spin_unlock_irq(q->queue_lock); 3343 } 3344 EXPORT_SYMBOL(blk_post_runtime_resume); 3345 #endif 3346 3347 int __init blk_dev_init(void) 3348 { 3349 BUILD_BUG_ON(__REQ_NR_BITS > 8 * 3350 sizeof(((struct request *)0)->cmd_flags)); 3351 3352 /* used for unplugging and affects IO latency/throughput - HIGHPRI */ 3353 kblockd_workqueue = alloc_workqueue("kblockd", 3354 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); 3355 if (!kblockd_workqueue) 3356 panic("Failed to create kblockd\n"); 3357 3358 request_cachep = kmem_cache_create("blkdev_requests", 3359 sizeof(struct request), 0, SLAB_PANIC, NULL); 3360 3361 blk_requestq_cachep = kmem_cache_create("blkdev_queue", 3362 sizeof(struct request_queue), 0, SLAB_PANIC, NULL); 3363 3364 return 0; 3365 } 3366