1 /* 2 * Copyright (C) 1991, 1992 Linus Torvalds 3 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics 4 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> 6 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> 7 * - July2000 8 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 9 */ 10 11 /* 12 * This handles all read/write requests to block devices 13 */ 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/backing-dev.h> 17 #include <linux/bio.h> 18 #include <linux/blkdev.h> 19 #include <linux/highmem.h> 20 #include <linux/mm.h> 21 #include <linux/kernel_stat.h> 22 #include <linux/string.h> 23 #include <linux/init.h> 24 #include <linux/completion.h> 25 #include <linux/slab.h> 26 #include <linux/swap.h> 27 #include <linux/writeback.h> 28 #include <linux/task_io_accounting_ops.h> 29 #include <linux/fault-inject.h> 30 31 #define CREATE_TRACE_POINTS 32 #include <trace/events/block.h> 33 34 #include "blk.h" 35 36 EXPORT_TRACEPOINT_SYMBOL_GPL(block_remap); 37 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); 38 39 static int __make_request(struct request_queue *q, struct bio *bio); 40 41 /* 42 * For the allocated request tables 43 */ 44 static struct kmem_cache *request_cachep; 45 46 /* 47 * For queue allocation 48 */ 49 struct kmem_cache *blk_requestq_cachep; 50 51 /* 52 * Controlling structure to kblockd 53 */ 54 static struct workqueue_struct *kblockd_workqueue; 55 56 static void drive_stat_acct(struct request *rq, int new_io) 57 { 58 struct hd_struct *part; 59 int rw = rq_data_dir(rq); 60 int cpu; 61 62 if (!blk_do_io_stat(rq)) 63 return; 64 65 cpu = part_stat_lock(); 66 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); 67 68 if (!new_io) 69 part_stat_inc(cpu, part, merges[rw]); 70 else { 71 part_round_stats(cpu, part); 72 part_inc_in_flight(part, rw); 73 } 74 75 part_stat_unlock(); 76 } 77 78 void blk_queue_congestion_threshold(struct request_queue *q) 79 { 80 int nr; 81 82 nr = q->nr_requests - (q->nr_requests / 8) + 1; 83 if (nr > q->nr_requests) 84 nr = q->nr_requests; 85 q->nr_congestion_on = nr; 86 87 nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1; 88 if (nr < 1) 89 nr = 1; 90 q->nr_congestion_off = nr; 91 } 92 93 /** 94 * blk_get_backing_dev_info - get the address of a queue's backing_dev_info 95 * @bdev: device 96 * 97 * Locates the passed device's request queue and returns the address of its 98 * backing_dev_info 99 * 100 * Will return NULL if the request queue cannot be located. 101 */ 102 struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) 103 { 104 struct backing_dev_info *ret = NULL; 105 struct request_queue *q = bdev_get_queue(bdev); 106 107 if (q) 108 ret = &q->backing_dev_info; 109 return ret; 110 } 111 EXPORT_SYMBOL(blk_get_backing_dev_info); 112 113 void blk_rq_init(struct request_queue *q, struct request *rq) 114 { 115 memset(rq, 0, sizeof(*rq)); 116 117 INIT_LIST_HEAD(&rq->queuelist); 118 INIT_LIST_HEAD(&rq->timeout_list); 119 rq->cpu = -1; 120 rq->q = q; 121 rq->__sector = (sector_t) -1; 122 INIT_HLIST_NODE(&rq->hash); 123 RB_CLEAR_NODE(&rq->rb_node); 124 rq->cmd = rq->__cmd; 125 rq->cmd_len = BLK_MAX_CDB; 126 rq->tag = -1; 127 rq->ref_count = 1; 128 rq->start_time = jiffies; 129 } 130 EXPORT_SYMBOL(blk_rq_init); 131 132 static void req_bio_endio(struct request *rq, struct bio *bio, 133 unsigned int nbytes, int error) 134 { 135 struct request_queue *q = rq->q; 136 137 if (&q->bar_rq != rq) { 138 if (error) 139 clear_bit(BIO_UPTODATE, &bio->bi_flags); 140 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) 141 error = -EIO; 142 143 if (unlikely(nbytes > bio->bi_size)) { 144 printk(KERN_ERR "%s: want %u bytes done, %u left\n", 145 __func__, nbytes, bio->bi_size); 146 nbytes = bio->bi_size; 147 } 148 149 if (unlikely(rq->cmd_flags & REQ_QUIET)) 150 set_bit(BIO_QUIET, &bio->bi_flags); 151 152 bio->bi_size -= nbytes; 153 bio->bi_sector += (nbytes >> 9); 154 155 if (bio_integrity(bio)) 156 bio_integrity_advance(bio, nbytes); 157 158 if (bio->bi_size == 0) 159 bio_endio(bio, error); 160 } else { 161 162 /* 163 * Okay, this is the barrier request in progress, just 164 * record the error; 165 */ 166 if (error && !q->orderr) 167 q->orderr = error; 168 } 169 } 170 171 void blk_dump_rq_flags(struct request *rq, char *msg) 172 { 173 int bit; 174 175 printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg, 176 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, 177 rq->cmd_flags); 178 179 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", 180 (unsigned long long)blk_rq_pos(rq), 181 blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); 182 printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n", 183 rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq)); 184 185 if (blk_pc_request(rq)) { 186 printk(KERN_INFO " cdb: "); 187 for (bit = 0; bit < BLK_MAX_CDB; bit++) 188 printk("%02x ", rq->cmd[bit]); 189 printk("\n"); 190 } 191 } 192 EXPORT_SYMBOL(blk_dump_rq_flags); 193 194 /* 195 * "plug" the device if there are no outstanding requests: this will 196 * force the transfer to start only after we have put all the requests 197 * on the list. 198 * 199 * This is called with interrupts off and no requests on the queue and 200 * with the queue lock held. 201 */ 202 void blk_plug_device(struct request_queue *q) 203 { 204 WARN_ON(!irqs_disabled()); 205 206 /* 207 * don't plug a stopped queue, it must be paired with blk_start_queue() 208 * which will restart the queueing 209 */ 210 if (blk_queue_stopped(q)) 211 return; 212 213 if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) { 214 mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); 215 trace_block_plug(q); 216 } 217 } 218 EXPORT_SYMBOL(blk_plug_device); 219 220 /** 221 * blk_plug_device_unlocked - plug a device without queue lock held 222 * @q: The &struct request_queue to plug 223 * 224 * Description: 225 * Like @blk_plug_device(), but grabs the queue lock and disables 226 * interrupts. 227 **/ 228 void blk_plug_device_unlocked(struct request_queue *q) 229 { 230 unsigned long flags; 231 232 spin_lock_irqsave(q->queue_lock, flags); 233 blk_plug_device(q); 234 spin_unlock_irqrestore(q->queue_lock, flags); 235 } 236 EXPORT_SYMBOL(blk_plug_device_unlocked); 237 238 /* 239 * remove the queue from the plugged list, if present. called with 240 * queue lock held and interrupts disabled. 241 */ 242 int blk_remove_plug(struct request_queue *q) 243 { 244 WARN_ON(!irqs_disabled()); 245 246 if (!queue_flag_test_and_clear(QUEUE_FLAG_PLUGGED, q)) 247 return 0; 248 249 del_timer(&q->unplug_timer); 250 return 1; 251 } 252 EXPORT_SYMBOL(blk_remove_plug); 253 254 /* 255 * remove the plug and let it rip.. 256 */ 257 void __generic_unplug_device(struct request_queue *q) 258 { 259 if (unlikely(blk_queue_stopped(q))) 260 return; 261 if (!blk_remove_plug(q) && !blk_queue_nonrot(q)) 262 return; 263 264 q->request_fn(q); 265 } 266 267 /** 268 * generic_unplug_device - fire a request queue 269 * @q: The &struct request_queue in question 270 * 271 * Description: 272 * Linux uses plugging to build bigger requests queues before letting 273 * the device have at them. If a queue is plugged, the I/O scheduler 274 * is still adding and merging requests on the queue. Once the queue 275 * gets unplugged, the request_fn defined for the queue is invoked and 276 * transfers started. 277 **/ 278 void generic_unplug_device(struct request_queue *q) 279 { 280 if (blk_queue_plugged(q)) { 281 spin_lock_irq(q->queue_lock); 282 __generic_unplug_device(q); 283 spin_unlock_irq(q->queue_lock); 284 } 285 } 286 EXPORT_SYMBOL(generic_unplug_device); 287 288 static void blk_backing_dev_unplug(struct backing_dev_info *bdi, 289 struct page *page) 290 { 291 struct request_queue *q = bdi->unplug_io_data; 292 293 blk_unplug(q); 294 } 295 296 void blk_unplug_work(struct work_struct *work) 297 { 298 struct request_queue *q = 299 container_of(work, struct request_queue, unplug_work); 300 301 trace_block_unplug_io(q); 302 q->unplug_fn(q); 303 } 304 305 void blk_unplug_timeout(unsigned long data) 306 { 307 struct request_queue *q = (struct request_queue *)data; 308 309 trace_block_unplug_timer(q); 310 kblockd_schedule_work(q, &q->unplug_work); 311 } 312 313 void blk_unplug(struct request_queue *q) 314 { 315 /* 316 * devices don't necessarily have an ->unplug_fn defined 317 */ 318 if (q->unplug_fn) { 319 trace_block_unplug_io(q); 320 q->unplug_fn(q); 321 } 322 } 323 EXPORT_SYMBOL(blk_unplug); 324 325 /** 326 * blk_start_queue - restart a previously stopped queue 327 * @q: The &struct request_queue in question 328 * 329 * Description: 330 * blk_start_queue() will clear the stop flag on the queue, and call 331 * the request_fn for the queue if it was in a stopped state when 332 * entered. Also see blk_stop_queue(). Queue lock must be held. 333 **/ 334 void blk_start_queue(struct request_queue *q) 335 { 336 WARN_ON(!irqs_disabled()); 337 338 queue_flag_clear(QUEUE_FLAG_STOPPED, q); 339 __blk_run_queue(q); 340 } 341 EXPORT_SYMBOL(blk_start_queue); 342 343 /** 344 * blk_stop_queue - stop a queue 345 * @q: The &struct request_queue in question 346 * 347 * Description: 348 * The Linux block layer assumes that a block driver will consume all 349 * entries on the request queue when the request_fn strategy is called. 350 * Often this will not happen, because of hardware limitations (queue 351 * depth settings). If a device driver gets a 'queue full' response, 352 * or if it simply chooses not to queue more I/O at one point, it can 353 * call this function to prevent the request_fn from being called until 354 * the driver has signalled it's ready to go again. This happens by calling 355 * blk_start_queue() to restart queue operations. Queue lock must be held. 356 **/ 357 void blk_stop_queue(struct request_queue *q) 358 { 359 blk_remove_plug(q); 360 queue_flag_set(QUEUE_FLAG_STOPPED, q); 361 } 362 EXPORT_SYMBOL(blk_stop_queue); 363 364 /** 365 * blk_sync_queue - cancel any pending callbacks on a queue 366 * @q: the queue 367 * 368 * Description: 369 * The block layer may perform asynchronous callback activity 370 * on a queue, such as calling the unplug function after a timeout. 371 * A block device may call blk_sync_queue to ensure that any 372 * such activity is cancelled, thus allowing it to release resources 373 * that the callbacks might use. The caller must already have made sure 374 * that its ->make_request_fn will not re-add plugging prior to calling 375 * this function. 376 * 377 */ 378 void blk_sync_queue(struct request_queue *q) 379 { 380 del_timer_sync(&q->unplug_timer); 381 del_timer_sync(&q->timeout); 382 cancel_work_sync(&q->unplug_work); 383 } 384 EXPORT_SYMBOL(blk_sync_queue); 385 386 /** 387 * __blk_run_queue - run a single device queue 388 * @q: The queue to run 389 * 390 * Description: 391 * See @blk_run_queue. This variant must be called with the queue lock 392 * held and interrupts disabled. 393 * 394 */ 395 void __blk_run_queue(struct request_queue *q) 396 { 397 blk_remove_plug(q); 398 399 if (unlikely(blk_queue_stopped(q))) 400 return; 401 402 if (elv_queue_empty(q)) 403 return; 404 405 /* 406 * Only recurse once to avoid overrunning the stack, let the unplug 407 * handling reinvoke the handler shortly if we already got there. 408 */ 409 if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { 410 q->request_fn(q); 411 queue_flag_clear(QUEUE_FLAG_REENTER, q); 412 } else { 413 queue_flag_set(QUEUE_FLAG_PLUGGED, q); 414 kblockd_schedule_work(q, &q->unplug_work); 415 } 416 } 417 EXPORT_SYMBOL(__blk_run_queue); 418 419 /** 420 * blk_run_queue - run a single device queue 421 * @q: The queue to run 422 * 423 * Description: 424 * Invoke request handling on this queue, if it has pending work to do. 425 * May be used to restart queueing when a request has completed. 426 */ 427 void blk_run_queue(struct request_queue *q) 428 { 429 unsigned long flags; 430 431 spin_lock_irqsave(q->queue_lock, flags); 432 __blk_run_queue(q); 433 spin_unlock_irqrestore(q->queue_lock, flags); 434 } 435 EXPORT_SYMBOL(blk_run_queue); 436 437 void blk_put_queue(struct request_queue *q) 438 { 439 kobject_put(&q->kobj); 440 } 441 442 void blk_cleanup_queue(struct request_queue *q) 443 { 444 /* 445 * We know we have process context here, so we can be a little 446 * cautious and ensure that pending block actions on this device 447 * are done before moving on. Going into this function, we should 448 * not have processes doing IO to this device. 449 */ 450 blk_sync_queue(q); 451 452 mutex_lock(&q->sysfs_lock); 453 queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); 454 mutex_unlock(&q->sysfs_lock); 455 456 if (q->elevator) 457 elevator_exit(q->elevator); 458 459 blk_put_queue(q); 460 } 461 EXPORT_SYMBOL(blk_cleanup_queue); 462 463 static int blk_init_free_list(struct request_queue *q) 464 { 465 struct request_list *rl = &q->rq; 466 467 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; 468 rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; 469 rl->elvpriv = 0; 470 init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); 471 init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); 472 473 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, 474 mempool_free_slab, request_cachep, q->node); 475 476 if (!rl->rq_pool) 477 return -ENOMEM; 478 479 return 0; 480 } 481 482 struct request_queue *blk_alloc_queue(gfp_t gfp_mask) 483 { 484 return blk_alloc_queue_node(gfp_mask, -1); 485 } 486 EXPORT_SYMBOL(blk_alloc_queue); 487 488 struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) 489 { 490 struct request_queue *q; 491 int err; 492 493 q = kmem_cache_alloc_node(blk_requestq_cachep, 494 gfp_mask | __GFP_ZERO, node_id); 495 if (!q) 496 return NULL; 497 498 q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug; 499 q->backing_dev_info.unplug_io_data = q; 500 q->backing_dev_info.ra_pages = 501 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 502 q->backing_dev_info.state = 0; 503 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; 504 q->backing_dev_info.name = "block"; 505 506 err = bdi_init(&q->backing_dev_info); 507 if (err) { 508 kmem_cache_free(blk_requestq_cachep, q); 509 return NULL; 510 } 511 512 init_timer(&q->unplug_timer); 513 setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); 514 INIT_LIST_HEAD(&q->timeout_list); 515 INIT_WORK(&q->unplug_work, blk_unplug_work); 516 517 kobject_init(&q->kobj, &blk_queue_ktype); 518 519 mutex_init(&q->sysfs_lock); 520 spin_lock_init(&q->__queue_lock); 521 522 return q; 523 } 524 EXPORT_SYMBOL(blk_alloc_queue_node); 525 526 /** 527 * blk_init_queue - prepare a request queue for use with a block device 528 * @rfn: The function to be called to process requests that have been 529 * placed on the queue. 530 * @lock: Request queue spin lock 531 * 532 * Description: 533 * If a block device wishes to use the standard request handling procedures, 534 * which sorts requests and coalesces adjacent requests, then it must 535 * call blk_init_queue(). The function @rfn will be called when there 536 * are requests on the queue that need to be processed. If the device 537 * supports plugging, then @rfn may not be called immediately when requests 538 * are available on the queue, but may be called at some time later instead. 539 * Plugged queues are generally unplugged when a buffer belonging to one 540 * of the requests on the queue is needed, or due to memory pressure. 541 * 542 * @rfn is not required, or even expected, to remove all requests off the 543 * queue, but only as many as it can handle at a time. If it does leave 544 * requests on the queue, it is responsible for arranging that the requests 545 * get dealt with eventually. 546 * 547 * The queue spin lock must be held while manipulating the requests on the 548 * request queue; this lock will be taken also from interrupt context, so irq 549 * disabling is needed for it. 550 * 551 * Function returns a pointer to the initialized request queue, or %NULL if 552 * it didn't succeed. 553 * 554 * Note: 555 * blk_init_queue() must be paired with a blk_cleanup_queue() call 556 * when the block device is deactivated (such as at module unload). 557 **/ 558 559 struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) 560 { 561 return blk_init_queue_node(rfn, lock, -1); 562 } 563 EXPORT_SYMBOL(blk_init_queue); 564 565 struct request_queue * 566 blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) 567 { 568 struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id); 569 570 if (!q) 571 return NULL; 572 573 q->node = node_id; 574 if (blk_init_free_list(q)) { 575 kmem_cache_free(blk_requestq_cachep, q); 576 return NULL; 577 } 578 579 q->request_fn = rfn; 580 q->prep_rq_fn = NULL; 581 q->unplug_fn = generic_unplug_device; 582 q->queue_flags = QUEUE_FLAG_DEFAULT; 583 q->queue_lock = lock; 584 585 /* 586 * This also sets hw/phys segments, boundary and size 587 */ 588 blk_queue_make_request(q, __make_request); 589 590 q->sg_reserved_size = INT_MAX; 591 592 /* 593 * all done 594 */ 595 if (!elevator_init(q, NULL)) { 596 blk_queue_congestion_threshold(q); 597 return q; 598 } 599 600 blk_put_queue(q); 601 return NULL; 602 } 603 EXPORT_SYMBOL(blk_init_queue_node); 604 605 int blk_get_queue(struct request_queue *q) 606 { 607 if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { 608 kobject_get(&q->kobj); 609 return 0; 610 } 611 612 return 1; 613 } 614 615 static inline void blk_free_request(struct request_queue *q, struct request *rq) 616 { 617 if (rq->cmd_flags & REQ_ELVPRIV) 618 elv_put_request(q, rq); 619 mempool_free(rq, q->rq.rq_pool); 620 } 621 622 static struct request * 623 blk_alloc_request(struct request_queue *q, int flags, int priv, gfp_t gfp_mask) 624 { 625 struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); 626 627 if (!rq) 628 return NULL; 629 630 blk_rq_init(q, rq); 631 632 rq->cmd_flags = flags | REQ_ALLOCED; 633 634 if (priv) { 635 if (unlikely(elv_set_request(q, rq, gfp_mask))) { 636 mempool_free(rq, q->rq.rq_pool); 637 return NULL; 638 } 639 rq->cmd_flags |= REQ_ELVPRIV; 640 } 641 642 return rq; 643 } 644 645 /* 646 * ioc_batching returns true if the ioc is a valid batching request and 647 * should be given priority access to a request. 648 */ 649 static inline int ioc_batching(struct request_queue *q, struct io_context *ioc) 650 { 651 if (!ioc) 652 return 0; 653 654 /* 655 * Make sure the process is able to allocate at least 1 request 656 * even if the batch times out, otherwise we could theoretically 657 * lose wakeups. 658 */ 659 return ioc->nr_batch_requests == q->nr_batching || 660 (ioc->nr_batch_requests > 0 661 && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME)); 662 } 663 664 /* 665 * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This 666 * will cause the process to be a "batcher" on all queues in the system. This 667 * is the behaviour we want though - once it gets a wakeup it should be given 668 * a nice run. 669 */ 670 static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) 671 { 672 if (!ioc || ioc_batching(q, ioc)) 673 return; 674 675 ioc->nr_batch_requests = q->nr_batching; 676 ioc->last_waited = jiffies; 677 } 678 679 static void __freed_request(struct request_queue *q, int sync) 680 { 681 struct request_list *rl = &q->rq; 682 683 if (rl->count[sync] < queue_congestion_off_threshold(q)) 684 blk_clear_queue_congested(q, sync); 685 686 if (rl->count[sync] + 1 <= q->nr_requests) { 687 if (waitqueue_active(&rl->wait[sync])) 688 wake_up(&rl->wait[sync]); 689 690 blk_clear_queue_full(q, sync); 691 } 692 } 693 694 /* 695 * A request has just been released. Account for it, update the full and 696 * congestion status, wake up any waiters. Called under q->queue_lock. 697 */ 698 static void freed_request(struct request_queue *q, int sync, int priv) 699 { 700 struct request_list *rl = &q->rq; 701 702 rl->count[sync]--; 703 if (priv) 704 rl->elvpriv--; 705 706 __freed_request(q, sync); 707 708 if (unlikely(rl->starved[sync ^ 1])) 709 __freed_request(q, sync ^ 1); 710 } 711 712 /* 713 * Get a free request, queue_lock must be held. 714 * Returns NULL on failure, with queue_lock held. 715 * Returns !NULL on success, with queue_lock *not held*. 716 */ 717 static struct request *get_request(struct request_queue *q, int rw_flags, 718 struct bio *bio, gfp_t gfp_mask) 719 { 720 struct request *rq = NULL; 721 struct request_list *rl = &q->rq; 722 struct io_context *ioc = NULL; 723 const bool is_sync = rw_is_sync(rw_flags) != 0; 724 int may_queue, priv; 725 726 may_queue = elv_may_queue(q, rw_flags); 727 if (may_queue == ELV_MQUEUE_NO) 728 goto rq_starved; 729 730 if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { 731 if (rl->count[is_sync]+1 >= q->nr_requests) { 732 ioc = current_io_context(GFP_ATOMIC, q->node); 733 /* 734 * The queue will fill after this allocation, so set 735 * it as full, and mark this process as "batching". 736 * This process will be allowed to complete a batch of 737 * requests, others will be blocked. 738 */ 739 if (!blk_queue_full(q, is_sync)) { 740 ioc_set_batching(q, ioc); 741 blk_set_queue_full(q, is_sync); 742 } else { 743 if (may_queue != ELV_MQUEUE_MUST 744 && !ioc_batching(q, ioc)) { 745 /* 746 * The queue is full and the allocating 747 * process is not a "batcher", and not 748 * exempted by the IO scheduler 749 */ 750 goto out; 751 } 752 } 753 } 754 blk_set_queue_congested(q, is_sync); 755 } 756 757 /* 758 * Only allow batching queuers to allocate up to 50% over the defined 759 * limit of requests, otherwise we could have thousands of requests 760 * allocated with any setting of ->nr_requests 761 */ 762 if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) 763 goto out; 764 765 rl->count[is_sync]++; 766 rl->starved[is_sync] = 0; 767 768 priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); 769 if (priv) 770 rl->elvpriv++; 771 772 if (blk_queue_io_stat(q)) 773 rw_flags |= REQ_IO_STAT; 774 spin_unlock_irq(q->queue_lock); 775 776 rq = blk_alloc_request(q, rw_flags, priv, gfp_mask); 777 if (unlikely(!rq)) { 778 /* 779 * Allocation failed presumably due to memory. Undo anything 780 * we might have messed up. 781 * 782 * Allocating task should really be put onto the front of the 783 * wait queue, but this is pretty rare. 784 */ 785 spin_lock_irq(q->queue_lock); 786 freed_request(q, is_sync, priv); 787 788 /* 789 * in the very unlikely event that allocation failed and no 790 * requests for this direction was pending, mark us starved 791 * so that freeing of a request in the other direction will 792 * notice us. another possible fix would be to split the 793 * rq mempool into READ and WRITE 794 */ 795 rq_starved: 796 if (unlikely(rl->count[is_sync] == 0)) 797 rl->starved[is_sync] = 1; 798 799 goto out; 800 } 801 802 /* 803 * ioc may be NULL here, and ioc_batching will be false. That's 804 * OK, if the queue is under the request limit then requests need 805 * not count toward the nr_batch_requests limit. There will always 806 * be some limit enforced by BLK_BATCH_TIME. 807 */ 808 if (ioc_batching(q, ioc)) 809 ioc->nr_batch_requests--; 810 811 trace_block_getrq(q, bio, rw_flags & 1); 812 out: 813 return rq; 814 } 815 816 /* 817 * No available requests for this queue, unplug the device and wait for some 818 * requests to become available. 819 * 820 * Called with q->queue_lock held, and returns with it unlocked. 821 */ 822 static struct request *get_request_wait(struct request_queue *q, int rw_flags, 823 struct bio *bio) 824 { 825 const bool is_sync = rw_is_sync(rw_flags) != 0; 826 struct request *rq; 827 828 rq = get_request(q, rw_flags, bio, GFP_NOIO); 829 while (!rq) { 830 DEFINE_WAIT(wait); 831 struct io_context *ioc; 832 struct request_list *rl = &q->rq; 833 834 prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, 835 TASK_UNINTERRUPTIBLE); 836 837 trace_block_sleeprq(q, bio, rw_flags & 1); 838 839 __generic_unplug_device(q); 840 spin_unlock_irq(q->queue_lock); 841 io_schedule(); 842 843 /* 844 * After sleeping, we become a "batching" process and 845 * will be able to allocate at least one request, and 846 * up to a big batch of them for a small period time. 847 * See ioc_batching, ioc_set_batching 848 */ 849 ioc = current_io_context(GFP_NOIO, q->node); 850 ioc_set_batching(q, ioc); 851 852 spin_lock_irq(q->queue_lock); 853 finish_wait(&rl->wait[is_sync], &wait); 854 855 rq = get_request(q, rw_flags, bio, GFP_NOIO); 856 }; 857 858 return rq; 859 } 860 861 struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) 862 { 863 struct request *rq; 864 865 BUG_ON(rw != READ && rw != WRITE); 866 867 spin_lock_irq(q->queue_lock); 868 if (gfp_mask & __GFP_WAIT) { 869 rq = get_request_wait(q, rw, NULL); 870 } else { 871 rq = get_request(q, rw, NULL, gfp_mask); 872 if (!rq) 873 spin_unlock_irq(q->queue_lock); 874 } 875 /* q->queue_lock is unlocked at this point */ 876 877 return rq; 878 } 879 EXPORT_SYMBOL(blk_get_request); 880 881 /** 882 * blk_make_request - given a bio, allocate a corresponding struct request. 883 * @q: target request queue 884 * @bio: The bio describing the memory mappings that will be submitted for IO. 885 * It may be a chained-bio properly constructed by block/bio layer. 886 * @gfp_mask: gfp flags to be used for memory allocation 887 * 888 * blk_make_request is the parallel of generic_make_request for BLOCK_PC 889 * type commands. Where the struct request needs to be farther initialized by 890 * the caller. It is passed a &struct bio, which describes the memory info of 891 * the I/O transfer. 892 * 893 * The caller of blk_make_request must make sure that bi_io_vec 894 * are set to describe the memory buffers. That bio_data_dir() will return 895 * the needed direction of the request. (And all bio's in the passed bio-chain 896 * are properly set accordingly) 897 * 898 * If called under none-sleepable conditions, mapped bio buffers must not 899 * need bouncing, by calling the appropriate masked or flagged allocator, 900 * suitable for the target device. Otherwise the call to blk_queue_bounce will 901 * BUG. 902 * 903 * WARNING: When allocating/cloning a bio-chain, careful consideration should be 904 * given to how you allocate bios. In particular, you cannot use __GFP_WAIT for 905 * anything but the first bio in the chain. Otherwise you risk waiting for IO 906 * completion of a bio that hasn't been submitted yet, thus resulting in a 907 * deadlock. Alternatively bios should be allocated using bio_kmalloc() instead 908 * of bio_alloc(), as that avoids the mempool deadlock. 909 * If possible a big IO should be split into smaller parts when allocation 910 * fails. Partial allocation should not be an error, or you risk a live-lock. 911 */ 912 struct request *blk_make_request(struct request_queue *q, struct bio *bio, 913 gfp_t gfp_mask) 914 { 915 struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask); 916 917 if (unlikely(!rq)) 918 return ERR_PTR(-ENOMEM); 919 920 for_each_bio(bio) { 921 struct bio *bounce_bio = bio; 922 int ret; 923 924 blk_queue_bounce(q, &bounce_bio); 925 ret = blk_rq_append_bio(q, rq, bounce_bio); 926 if (unlikely(ret)) { 927 blk_put_request(rq); 928 return ERR_PTR(ret); 929 } 930 } 931 932 return rq; 933 } 934 EXPORT_SYMBOL(blk_make_request); 935 936 /** 937 * blk_requeue_request - put a request back on queue 938 * @q: request queue where request should be inserted 939 * @rq: request to be inserted 940 * 941 * Description: 942 * Drivers often keep queueing requests until the hardware cannot accept 943 * more, when that condition happens we need to put the request back 944 * on the queue. Must be called with queue lock held. 945 */ 946 void blk_requeue_request(struct request_queue *q, struct request *rq) 947 { 948 blk_delete_timer(rq); 949 blk_clear_rq_complete(rq); 950 trace_block_rq_requeue(q, rq); 951 952 if (blk_rq_tagged(rq)) 953 blk_queue_end_tag(q, rq); 954 955 BUG_ON(blk_queued_rq(rq)); 956 957 elv_requeue_request(q, rq); 958 } 959 EXPORT_SYMBOL(blk_requeue_request); 960 961 /** 962 * blk_insert_request - insert a special request into a request queue 963 * @q: request queue where request should be inserted 964 * @rq: request to be inserted 965 * @at_head: insert request at head or tail of queue 966 * @data: private data 967 * 968 * Description: 969 * Many block devices need to execute commands asynchronously, so they don't 970 * block the whole kernel from preemption during request execution. This is 971 * accomplished normally by inserting aritficial requests tagged as 972 * REQ_TYPE_SPECIAL in to the corresponding request queue, and letting them 973 * be scheduled for actual execution by the request queue. 974 * 975 * We have the option of inserting the head or the tail of the queue. 976 * Typically we use the tail for new ioctls and so forth. We use the head 977 * of the queue for things like a QUEUE_FULL message from a device, or a 978 * host that is unable to accept a particular command. 979 */ 980 void blk_insert_request(struct request_queue *q, struct request *rq, 981 int at_head, void *data) 982 { 983 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; 984 unsigned long flags; 985 986 /* 987 * tell I/O scheduler that this isn't a regular read/write (ie it 988 * must not attempt merges on this) and that it acts as a soft 989 * barrier 990 */ 991 rq->cmd_type = REQ_TYPE_SPECIAL; 992 993 rq->special = data; 994 995 spin_lock_irqsave(q->queue_lock, flags); 996 997 /* 998 * If command is tagged, release the tag 999 */ 1000 if (blk_rq_tagged(rq)) 1001 blk_queue_end_tag(q, rq); 1002 1003 drive_stat_acct(rq, 1); 1004 __elv_add_request(q, rq, where, 0); 1005 __blk_run_queue(q); 1006 spin_unlock_irqrestore(q->queue_lock, flags); 1007 } 1008 EXPORT_SYMBOL(blk_insert_request); 1009 1010 /* 1011 * add-request adds a request to the linked list. 1012 * queue lock is held and interrupts disabled, as we muck with the 1013 * request queue list. 1014 */ 1015 static inline void add_request(struct request_queue *q, struct request *req) 1016 { 1017 drive_stat_acct(req, 1); 1018 1019 /* 1020 * elevator indicated where it wants this request to be 1021 * inserted at elevator_merge time 1022 */ 1023 __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0); 1024 } 1025 1026 static void part_round_stats_single(int cpu, struct hd_struct *part, 1027 unsigned long now) 1028 { 1029 if (now == part->stamp) 1030 return; 1031 1032 if (part->in_flight) { 1033 __part_stat_add(cpu, part, time_in_queue, 1034 part_in_flight(part) * (now - part->stamp)); 1035 __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); 1036 } 1037 part->stamp = now; 1038 } 1039 1040 /** 1041 * part_round_stats() - Round off the performance stats on a struct disk_stats. 1042 * @cpu: cpu number for stats access 1043 * @part: target partition 1044 * 1045 * The average IO queue length and utilisation statistics are maintained 1046 * by observing the current state of the queue length and the amount of 1047 * time it has been in this state for. 1048 * 1049 * Normally, that accounting is done on IO completion, but that can result 1050 * in more than a second's worth of IO being accounted for within any one 1051 * second, leading to >100% utilisation. To deal with that, we call this 1052 * function to do a round-off before returning the results when reading 1053 * /proc/diskstats. This accounts immediately for all queue usage up to 1054 * the current jiffies and restarts the counters again. 1055 */ 1056 void part_round_stats(int cpu, struct hd_struct *part) 1057 { 1058 unsigned long now = jiffies; 1059 1060 if (part->partno) 1061 part_round_stats_single(cpu, &part_to_disk(part)->part0, now); 1062 part_round_stats_single(cpu, part, now); 1063 } 1064 EXPORT_SYMBOL_GPL(part_round_stats); 1065 1066 /* 1067 * queue lock must be held 1068 */ 1069 void __blk_put_request(struct request_queue *q, struct request *req) 1070 { 1071 if (unlikely(!q)) 1072 return; 1073 if (unlikely(--req->ref_count)) 1074 return; 1075 1076 elv_completed_request(q, req); 1077 1078 /* this is a bio leak */ 1079 WARN_ON(req->bio != NULL); 1080 1081 /* 1082 * Request may not have originated from ll_rw_blk. if not, 1083 * it didn't come out of our reserved rq pools 1084 */ 1085 if (req->cmd_flags & REQ_ALLOCED) { 1086 int is_sync = rq_is_sync(req) != 0; 1087 int priv = req->cmd_flags & REQ_ELVPRIV; 1088 1089 BUG_ON(!list_empty(&req->queuelist)); 1090 BUG_ON(!hlist_unhashed(&req->hash)); 1091 1092 blk_free_request(q, req); 1093 freed_request(q, is_sync, priv); 1094 } 1095 } 1096 EXPORT_SYMBOL_GPL(__blk_put_request); 1097 1098 void blk_put_request(struct request *req) 1099 { 1100 unsigned long flags; 1101 struct request_queue *q = req->q; 1102 1103 spin_lock_irqsave(q->queue_lock, flags); 1104 __blk_put_request(q, req); 1105 spin_unlock_irqrestore(q->queue_lock, flags); 1106 } 1107 EXPORT_SYMBOL(blk_put_request); 1108 1109 void init_request_from_bio(struct request *req, struct bio *bio) 1110 { 1111 req->cpu = bio->bi_comp_cpu; 1112 req->cmd_type = REQ_TYPE_FS; 1113 1114 /* 1115 * Inherit FAILFAST from bio (for read-ahead, and explicit 1116 * FAILFAST). FAILFAST flags are identical for req and bio. 1117 */ 1118 if (bio_rw_flagged(bio, BIO_RW_AHEAD)) 1119 req->cmd_flags |= REQ_FAILFAST_MASK; 1120 else 1121 req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK; 1122 1123 if (unlikely(bio_rw_flagged(bio, BIO_RW_DISCARD))) { 1124 req->cmd_flags |= REQ_DISCARD; 1125 if (bio_rw_flagged(bio, BIO_RW_BARRIER)) 1126 req->cmd_flags |= REQ_SOFTBARRIER; 1127 req->q->prepare_discard_fn(req->q, req); 1128 } else if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) 1129 req->cmd_flags |= REQ_HARDBARRIER; 1130 1131 if (bio_rw_flagged(bio, BIO_RW_SYNCIO)) 1132 req->cmd_flags |= REQ_RW_SYNC; 1133 if (bio_rw_flagged(bio, BIO_RW_META)) 1134 req->cmd_flags |= REQ_RW_META; 1135 if (bio_rw_flagged(bio, BIO_RW_NOIDLE)) 1136 req->cmd_flags |= REQ_NOIDLE; 1137 1138 req->errors = 0; 1139 req->__sector = bio->bi_sector; 1140 req->ioprio = bio_prio(bio); 1141 blk_rq_bio_prep(req->q, req, bio); 1142 } 1143 1144 /* 1145 * Only disabling plugging for non-rotational devices if it does tagging 1146 * as well, otherwise we do need the proper merging 1147 */ 1148 static inline bool queue_should_plug(struct request_queue *q) 1149 { 1150 return !(blk_queue_nonrot(q) && blk_queue_queuing(q)); 1151 } 1152 1153 static int __make_request(struct request_queue *q, struct bio *bio) 1154 { 1155 struct request *req; 1156 int el_ret; 1157 unsigned int bytes = bio->bi_size; 1158 const unsigned short prio = bio_prio(bio); 1159 const bool sync = bio_rw_flagged(bio, BIO_RW_SYNCIO); 1160 const bool unplug = bio_rw_flagged(bio, BIO_RW_UNPLUG); 1161 const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK; 1162 int rw_flags; 1163 1164 if (bio_rw_flagged(bio, BIO_RW_BARRIER) && bio_has_data(bio) && 1165 (q->next_ordered == QUEUE_ORDERED_NONE)) { 1166 bio_endio(bio, -EOPNOTSUPP); 1167 return 0; 1168 } 1169 /* 1170 * low level driver can indicate that it wants pages above a 1171 * certain limit bounced to low memory (ie for highmem, or even 1172 * ISA dma in theory) 1173 */ 1174 blk_queue_bounce(q, &bio); 1175 1176 spin_lock_irq(q->queue_lock); 1177 1178 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)) || elv_queue_empty(q)) 1179 goto get_rq; 1180 1181 el_ret = elv_merge(q, &req, bio); 1182 switch (el_ret) { 1183 case ELEVATOR_BACK_MERGE: 1184 BUG_ON(!rq_mergeable(req)); 1185 1186 if (!ll_back_merge_fn(q, req, bio)) 1187 break; 1188 1189 trace_block_bio_backmerge(q, bio); 1190 1191 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) 1192 blk_rq_set_mixed_merge(req); 1193 1194 req->biotail->bi_next = bio; 1195 req->biotail = bio; 1196 req->__data_len += bytes; 1197 req->ioprio = ioprio_best(req->ioprio, prio); 1198 if (!blk_rq_cpu_valid(req)) 1199 req->cpu = bio->bi_comp_cpu; 1200 drive_stat_acct(req, 0); 1201 if (!attempt_back_merge(q, req)) 1202 elv_merged_request(q, req, el_ret); 1203 goto out; 1204 1205 case ELEVATOR_FRONT_MERGE: 1206 BUG_ON(!rq_mergeable(req)); 1207 1208 if (!ll_front_merge_fn(q, req, bio)) 1209 break; 1210 1211 trace_block_bio_frontmerge(q, bio); 1212 1213 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) { 1214 blk_rq_set_mixed_merge(req); 1215 req->cmd_flags &= ~REQ_FAILFAST_MASK; 1216 req->cmd_flags |= ff; 1217 } 1218 1219 bio->bi_next = req->bio; 1220 req->bio = bio; 1221 1222 /* 1223 * may not be valid. if the low level driver said 1224 * it didn't need a bounce buffer then it better 1225 * not touch req->buffer either... 1226 */ 1227 req->buffer = bio_data(bio); 1228 req->__sector = bio->bi_sector; 1229 req->__data_len += bytes; 1230 req->ioprio = ioprio_best(req->ioprio, prio); 1231 if (!blk_rq_cpu_valid(req)) 1232 req->cpu = bio->bi_comp_cpu; 1233 drive_stat_acct(req, 0); 1234 if (!attempt_front_merge(q, req)) 1235 elv_merged_request(q, req, el_ret); 1236 goto out; 1237 1238 /* ELV_NO_MERGE: elevator says don't/can't merge. */ 1239 default: 1240 ; 1241 } 1242 1243 get_rq: 1244 /* 1245 * This sync check and mask will be re-done in init_request_from_bio(), 1246 * but we need to set it earlier to expose the sync flag to the 1247 * rq allocator and io schedulers. 1248 */ 1249 rw_flags = bio_data_dir(bio); 1250 if (sync) 1251 rw_flags |= REQ_RW_SYNC; 1252 1253 /* 1254 * Grab a free request. This is might sleep but can not fail. 1255 * Returns with the queue unlocked. 1256 */ 1257 req = get_request_wait(q, rw_flags, bio); 1258 1259 /* 1260 * After dropping the lock and possibly sleeping here, our request 1261 * may now be mergeable after it had proven unmergeable (above). 1262 * We don't worry about that case for efficiency. It won't happen 1263 * often, and the elevators are able to handle it. 1264 */ 1265 init_request_from_bio(req, bio); 1266 1267 spin_lock_irq(q->queue_lock); 1268 if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) || 1269 bio_flagged(bio, BIO_CPU_AFFINE)) 1270 req->cpu = blk_cpu_to_group(smp_processor_id()); 1271 if (queue_should_plug(q) && elv_queue_empty(q)) 1272 blk_plug_device(q); 1273 add_request(q, req); 1274 out: 1275 if (unplug || !queue_should_plug(q)) 1276 __generic_unplug_device(q); 1277 spin_unlock_irq(q->queue_lock); 1278 return 0; 1279 } 1280 1281 /* 1282 * If bio->bi_dev is a partition, remap the location 1283 */ 1284 static inline void blk_partition_remap(struct bio *bio) 1285 { 1286 struct block_device *bdev = bio->bi_bdev; 1287 1288 if (bio_sectors(bio) && bdev != bdev->bd_contains) { 1289 struct hd_struct *p = bdev->bd_part; 1290 1291 bio->bi_sector += p->start_sect; 1292 bio->bi_bdev = bdev->bd_contains; 1293 1294 trace_block_remap(bdev_get_queue(bio->bi_bdev), bio, 1295 bdev->bd_dev, 1296 bio->bi_sector - p->start_sect); 1297 } 1298 } 1299 1300 static void handle_bad_sector(struct bio *bio) 1301 { 1302 char b[BDEVNAME_SIZE]; 1303 1304 printk(KERN_INFO "attempt to access beyond end of device\n"); 1305 printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n", 1306 bdevname(bio->bi_bdev, b), 1307 bio->bi_rw, 1308 (unsigned long long)bio->bi_sector + bio_sectors(bio), 1309 (long long)(bio->bi_bdev->bd_inode->i_size >> 9)); 1310 1311 set_bit(BIO_EOF, &bio->bi_flags); 1312 } 1313 1314 #ifdef CONFIG_FAIL_MAKE_REQUEST 1315 1316 static DECLARE_FAULT_ATTR(fail_make_request); 1317 1318 static int __init setup_fail_make_request(char *str) 1319 { 1320 return setup_fault_attr(&fail_make_request, str); 1321 } 1322 __setup("fail_make_request=", setup_fail_make_request); 1323 1324 static int should_fail_request(struct bio *bio) 1325 { 1326 struct hd_struct *part = bio->bi_bdev->bd_part; 1327 1328 if (part_to_disk(part)->part0.make_it_fail || part->make_it_fail) 1329 return should_fail(&fail_make_request, bio->bi_size); 1330 1331 return 0; 1332 } 1333 1334 static int __init fail_make_request_debugfs(void) 1335 { 1336 return init_fault_attr_dentries(&fail_make_request, 1337 "fail_make_request"); 1338 } 1339 1340 late_initcall(fail_make_request_debugfs); 1341 1342 #else /* CONFIG_FAIL_MAKE_REQUEST */ 1343 1344 static inline int should_fail_request(struct bio *bio) 1345 { 1346 return 0; 1347 } 1348 1349 #endif /* CONFIG_FAIL_MAKE_REQUEST */ 1350 1351 /* 1352 * Check whether this bio extends beyond the end of the device. 1353 */ 1354 static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors) 1355 { 1356 sector_t maxsector; 1357 1358 if (!nr_sectors) 1359 return 0; 1360 1361 /* Test device or partition size, when known. */ 1362 maxsector = bio->bi_bdev->bd_inode->i_size >> 9; 1363 if (maxsector) { 1364 sector_t sector = bio->bi_sector; 1365 1366 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) { 1367 /* 1368 * This may well happen - the kernel calls bread() 1369 * without checking the size of the device, e.g., when 1370 * mounting a device. 1371 */ 1372 handle_bad_sector(bio); 1373 return 1; 1374 } 1375 } 1376 1377 return 0; 1378 } 1379 1380 /** 1381 * generic_make_request - hand a buffer to its device driver for I/O 1382 * @bio: The bio describing the location in memory and on the device. 1383 * 1384 * generic_make_request() is used to make I/O requests of block 1385 * devices. It is passed a &struct bio, which describes the I/O that needs 1386 * to be done. 1387 * 1388 * generic_make_request() does not return any status. The 1389 * success/failure status of the request, along with notification of 1390 * completion, is delivered asynchronously through the bio->bi_end_io 1391 * function described (one day) else where. 1392 * 1393 * The caller of generic_make_request must make sure that bi_io_vec 1394 * are set to describe the memory buffer, and that bi_dev and bi_sector are 1395 * set to describe the device address, and the 1396 * bi_end_io and optionally bi_private are set to describe how 1397 * completion notification should be signaled. 1398 * 1399 * generic_make_request and the drivers it calls may use bi_next if this 1400 * bio happens to be merged with someone else, and may change bi_dev and 1401 * bi_sector for remaps as it sees fit. So the values of these fields 1402 * should NOT be depended on after the call to generic_make_request. 1403 */ 1404 static inline void __generic_make_request(struct bio *bio) 1405 { 1406 struct request_queue *q; 1407 sector_t old_sector; 1408 int ret, nr_sectors = bio_sectors(bio); 1409 dev_t old_dev; 1410 int err = -EIO; 1411 1412 might_sleep(); 1413 1414 if (bio_check_eod(bio, nr_sectors)) 1415 goto end_io; 1416 1417 /* 1418 * Resolve the mapping until finished. (drivers are 1419 * still free to implement/resolve their own stacking 1420 * by explicitly returning 0) 1421 * 1422 * NOTE: we don't repeat the blk_size check for each new device. 1423 * Stacking drivers are expected to know what they are doing. 1424 */ 1425 old_sector = -1; 1426 old_dev = 0; 1427 do { 1428 char b[BDEVNAME_SIZE]; 1429 1430 q = bdev_get_queue(bio->bi_bdev); 1431 if (unlikely(!q)) { 1432 printk(KERN_ERR 1433 "generic_make_request: Trying to access " 1434 "nonexistent block-device %s (%Lu)\n", 1435 bdevname(bio->bi_bdev, b), 1436 (long long) bio->bi_sector); 1437 goto end_io; 1438 } 1439 1440 if (unlikely(nr_sectors > queue_max_hw_sectors(q))) { 1441 printk(KERN_ERR "bio too big device %s (%u > %u)\n", 1442 bdevname(bio->bi_bdev, b), 1443 bio_sectors(bio), 1444 queue_max_hw_sectors(q)); 1445 goto end_io; 1446 } 1447 1448 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) 1449 goto end_io; 1450 1451 if (should_fail_request(bio)) 1452 goto end_io; 1453 1454 /* 1455 * If this device has partitions, remap block n 1456 * of partition p to block n+start(p) of the disk. 1457 */ 1458 blk_partition_remap(bio); 1459 1460 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) 1461 goto end_io; 1462 1463 if (old_sector != -1) 1464 trace_block_remap(q, bio, old_dev, old_sector); 1465 1466 old_sector = bio->bi_sector; 1467 old_dev = bio->bi_bdev->bd_dev; 1468 1469 if (bio_check_eod(bio, nr_sectors)) 1470 goto end_io; 1471 1472 if (bio_rw_flagged(bio, BIO_RW_DISCARD) && 1473 !q->prepare_discard_fn) { 1474 err = -EOPNOTSUPP; 1475 goto end_io; 1476 } 1477 1478 trace_block_bio_queue(q, bio); 1479 1480 ret = q->make_request_fn(q, bio); 1481 } while (ret); 1482 1483 return; 1484 1485 end_io: 1486 bio_endio(bio, err); 1487 } 1488 1489 /* 1490 * We only want one ->make_request_fn to be active at a time, 1491 * else stack usage with stacked devices could be a problem. 1492 * So use current->bio_{list,tail} to keep a list of requests 1493 * submited by a make_request_fn function. 1494 * current->bio_tail is also used as a flag to say if 1495 * generic_make_request is currently active in this task or not. 1496 * If it is NULL, then no make_request is active. If it is non-NULL, 1497 * then a make_request is active, and new requests should be added 1498 * at the tail 1499 */ 1500 void generic_make_request(struct bio *bio) 1501 { 1502 if (current->bio_tail) { 1503 /* make_request is active */ 1504 *(current->bio_tail) = bio; 1505 bio->bi_next = NULL; 1506 current->bio_tail = &bio->bi_next; 1507 return; 1508 } 1509 /* following loop may be a bit non-obvious, and so deserves some 1510 * explanation. 1511 * Before entering the loop, bio->bi_next is NULL (as all callers 1512 * ensure that) so we have a list with a single bio. 1513 * We pretend that we have just taken it off a longer list, so 1514 * we assign bio_list to the next (which is NULL) and bio_tail 1515 * to &bio_list, thus initialising the bio_list of new bios to be 1516 * added. __generic_make_request may indeed add some more bios 1517 * through a recursive call to generic_make_request. If it 1518 * did, we find a non-NULL value in bio_list and re-enter the loop 1519 * from the top. In this case we really did just take the bio 1520 * of the top of the list (no pretending) and so fixup bio_list and 1521 * bio_tail or bi_next, and call into __generic_make_request again. 1522 * 1523 * The loop was structured like this to make only one call to 1524 * __generic_make_request (which is important as it is large and 1525 * inlined) and to keep the structure simple. 1526 */ 1527 BUG_ON(bio->bi_next); 1528 do { 1529 current->bio_list = bio->bi_next; 1530 if (bio->bi_next == NULL) 1531 current->bio_tail = ¤t->bio_list; 1532 else 1533 bio->bi_next = NULL; 1534 __generic_make_request(bio); 1535 bio = current->bio_list; 1536 } while (bio); 1537 current->bio_tail = NULL; /* deactivate */ 1538 } 1539 EXPORT_SYMBOL(generic_make_request); 1540 1541 /** 1542 * submit_bio - submit a bio to the block device layer for I/O 1543 * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead) 1544 * @bio: The &struct bio which describes the I/O 1545 * 1546 * submit_bio() is very similar in purpose to generic_make_request(), and 1547 * uses that function to do most of the work. Both are fairly rough 1548 * interfaces; @bio must be presetup and ready for I/O. 1549 * 1550 */ 1551 void submit_bio(int rw, struct bio *bio) 1552 { 1553 int count = bio_sectors(bio); 1554 1555 bio->bi_rw |= rw; 1556 1557 /* 1558 * If it's a regular read/write or a barrier with data attached, 1559 * go through the normal accounting stuff before submission. 1560 */ 1561 if (bio_has_data(bio)) { 1562 if (rw & WRITE) { 1563 count_vm_events(PGPGOUT, count); 1564 } else { 1565 task_io_account_read(bio->bi_size); 1566 count_vm_events(PGPGIN, count); 1567 } 1568 1569 if (unlikely(block_dump)) { 1570 char b[BDEVNAME_SIZE]; 1571 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n", 1572 current->comm, task_pid_nr(current), 1573 (rw & WRITE) ? "WRITE" : "READ", 1574 (unsigned long long)bio->bi_sector, 1575 bdevname(bio->bi_bdev, b)); 1576 } 1577 } 1578 1579 generic_make_request(bio); 1580 } 1581 EXPORT_SYMBOL(submit_bio); 1582 1583 /** 1584 * blk_rq_check_limits - Helper function to check a request for the queue limit 1585 * @q: the queue 1586 * @rq: the request being checked 1587 * 1588 * Description: 1589 * @rq may have been made based on weaker limitations of upper-level queues 1590 * in request stacking drivers, and it may violate the limitation of @q. 1591 * Since the block layer and the underlying device driver trust @rq 1592 * after it is inserted to @q, it should be checked against @q before 1593 * the insertion using this generic function. 1594 * 1595 * This function should also be useful for request stacking drivers 1596 * in some cases below, so export this fuction. 1597 * Request stacking drivers like request-based dm may change the queue 1598 * limits while requests are in the queue (e.g. dm's table swapping). 1599 * Such request stacking drivers should check those requests agaist 1600 * the new queue limits again when they dispatch those requests, 1601 * although such checkings are also done against the old queue limits 1602 * when submitting requests. 1603 */ 1604 int blk_rq_check_limits(struct request_queue *q, struct request *rq) 1605 { 1606 if (blk_rq_sectors(rq) > queue_max_sectors(q) || 1607 blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) { 1608 printk(KERN_ERR "%s: over max size limit.\n", __func__); 1609 return -EIO; 1610 } 1611 1612 /* 1613 * queue's settings related to segment counting like q->bounce_pfn 1614 * may differ from that of other stacking queues. 1615 * Recalculate it to check the request correctly on this queue's 1616 * limitation. 1617 */ 1618 blk_recalc_rq_segments(rq); 1619 if (rq->nr_phys_segments > queue_max_phys_segments(q) || 1620 rq->nr_phys_segments > queue_max_hw_segments(q)) { 1621 printk(KERN_ERR "%s: over max segments limit.\n", __func__); 1622 return -EIO; 1623 } 1624 1625 return 0; 1626 } 1627 EXPORT_SYMBOL_GPL(blk_rq_check_limits); 1628 1629 /** 1630 * blk_insert_cloned_request - Helper for stacking drivers to submit a request 1631 * @q: the queue to submit the request 1632 * @rq: the request being queued 1633 */ 1634 int blk_insert_cloned_request(struct request_queue *q, struct request *rq) 1635 { 1636 unsigned long flags; 1637 1638 if (blk_rq_check_limits(q, rq)) 1639 return -EIO; 1640 1641 #ifdef CONFIG_FAIL_MAKE_REQUEST 1642 if (rq->rq_disk && rq->rq_disk->part0.make_it_fail && 1643 should_fail(&fail_make_request, blk_rq_bytes(rq))) 1644 return -EIO; 1645 #endif 1646 1647 spin_lock_irqsave(q->queue_lock, flags); 1648 1649 /* 1650 * Submitting request must be dequeued before calling this function 1651 * because it will be linked to another request_queue 1652 */ 1653 BUG_ON(blk_queued_rq(rq)); 1654 1655 drive_stat_acct(rq, 1); 1656 __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0); 1657 1658 spin_unlock_irqrestore(q->queue_lock, flags); 1659 1660 return 0; 1661 } 1662 EXPORT_SYMBOL_GPL(blk_insert_cloned_request); 1663 1664 /** 1665 * blk_rq_err_bytes - determine number of bytes till the next failure boundary 1666 * @rq: request to examine 1667 * 1668 * Description: 1669 * A request could be merge of IOs which require different failure 1670 * handling. This function determines the number of bytes which 1671 * can be failed from the beginning of the request without 1672 * crossing into area which need to be retried further. 1673 * 1674 * Return: 1675 * The number of bytes to fail. 1676 * 1677 * Context: 1678 * queue_lock must be held. 1679 */ 1680 unsigned int blk_rq_err_bytes(const struct request *rq) 1681 { 1682 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; 1683 unsigned int bytes = 0; 1684 struct bio *bio; 1685 1686 if (!(rq->cmd_flags & REQ_MIXED_MERGE)) 1687 return blk_rq_bytes(rq); 1688 1689 /* 1690 * Currently the only 'mixing' which can happen is between 1691 * different fastfail types. We can safely fail portions 1692 * which have all the failfast bits that the first one has - 1693 * the ones which are at least as eager to fail as the first 1694 * one. 1695 */ 1696 for (bio = rq->bio; bio; bio = bio->bi_next) { 1697 if ((bio->bi_rw & ff) != ff) 1698 break; 1699 bytes += bio->bi_size; 1700 } 1701 1702 /* this could lead to infinite loop */ 1703 BUG_ON(blk_rq_bytes(rq) && !bytes); 1704 return bytes; 1705 } 1706 EXPORT_SYMBOL_GPL(blk_rq_err_bytes); 1707 1708 static void blk_account_io_completion(struct request *req, unsigned int bytes) 1709 { 1710 if (blk_do_io_stat(req)) { 1711 const int rw = rq_data_dir(req); 1712 struct hd_struct *part; 1713 int cpu; 1714 1715 cpu = part_stat_lock(); 1716 part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); 1717 part_stat_add(cpu, part, sectors[rw], bytes >> 9); 1718 part_stat_unlock(); 1719 } 1720 } 1721 1722 static void blk_account_io_done(struct request *req) 1723 { 1724 /* 1725 * Account IO completion. bar_rq isn't accounted as a normal 1726 * IO on queueing nor completion. Accounting the containing 1727 * request is enough. 1728 */ 1729 if (blk_do_io_stat(req) && req != &req->q->bar_rq) { 1730 unsigned long duration = jiffies - req->start_time; 1731 const int rw = rq_data_dir(req); 1732 struct hd_struct *part; 1733 int cpu; 1734 1735 cpu = part_stat_lock(); 1736 part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); 1737 1738 part_stat_inc(cpu, part, ios[rw]); 1739 part_stat_add(cpu, part, ticks[rw], duration); 1740 part_round_stats(cpu, part); 1741 part_dec_in_flight(part, rw); 1742 1743 part_stat_unlock(); 1744 } 1745 } 1746 1747 /** 1748 * blk_peek_request - peek at the top of a request queue 1749 * @q: request queue to peek at 1750 * 1751 * Description: 1752 * Return the request at the top of @q. The returned request 1753 * should be started using blk_start_request() before LLD starts 1754 * processing it. 1755 * 1756 * Return: 1757 * Pointer to the request at the top of @q if available. Null 1758 * otherwise. 1759 * 1760 * Context: 1761 * queue_lock must be held. 1762 */ 1763 struct request *blk_peek_request(struct request_queue *q) 1764 { 1765 struct request *rq; 1766 int ret; 1767 1768 while ((rq = __elv_next_request(q)) != NULL) { 1769 if (!(rq->cmd_flags & REQ_STARTED)) { 1770 /* 1771 * This is the first time the device driver 1772 * sees this request (possibly after 1773 * requeueing). Notify IO scheduler. 1774 */ 1775 if (blk_sorted_rq(rq)) 1776 elv_activate_rq(q, rq); 1777 1778 /* 1779 * just mark as started even if we don't start 1780 * it, a request that has been delayed should 1781 * not be passed by new incoming requests 1782 */ 1783 rq->cmd_flags |= REQ_STARTED; 1784 trace_block_rq_issue(q, rq); 1785 } 1786 1787 if (!q->boundary_rq || q->boundary_rq == rq) { 1788 q->end_sector = rq_end_sector(rq); 1789 q->boundary_rq = NULL; 1790 } 1791 1792 if (rq->cmd_flags & REQ_DONTPREP) 1793 break; 1794 1795 if (q->dma_drain_size && blk_rq_bytes(rq)) { 1796 /* 1797 * make sure space for the drain appears we 1798 * know we can do this because max_hw_segments 1799 * has been adjusted to be one fewer than the 1800 * device can handle 1801 */ 1802 rq->nr_phys_segments++; 1803 } 1804 1805 if (!q->prep_rq_fn) 1806 break; 1807 1808 ret = q->prep_rq_fn(q, rq); 1809 if (ret == BLKPREP_OK) { 1810 break; 1811 } else if (ret == BLKPREP_DEFER) { 1812 /* 1813 * the request may have been (partially) prepped. 1814 * we need to keep this request in the front to 1815 * avoid resource deadlock. REQ_STARTED will 1816 * prevent other fs requests from passing this one. 1817 */ 1818 if (q->dma_drain_size && blk_rq_bytes(rq) && 1819 !(rq->cmd_flags & REQ_DONTPREP)) { 1820 /* 1821 * remove the space for the drain we added 1822 * so that we don't add it again 1823 */ 1824 --rq->nr_phys_segments; 1825 } 1826 1827 rq = NULL; 1828 break; 1829 } else if (ret == BLKPREP_KILL) { 1830 rq->cmd_flags |= REQ_QUIET; 1831 /* 1832 * Mark this request as started so we don't trigger 1833 * any debug logic in the end I/O path. 1834 */ 1835 blk_start_request(rq); 1836 __blk_end_request_all(rq, -EIO); 1837 } else { 1838 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); 1839 break; 1840 } 1841 } 1842 1843 return rq; 1844 } 1845 EXPORT_SYMBOL(blk_peek_request); 1846 1847 void blk_dequeue_request(struct request *rq) 1848 { 1849 struct request_queue *q = rq->q; 1850 1851 BUG_ON(list_empty(&rq->queuelist)); 1852 BUG_ON(ELV_ON_HASH(rq)); 1853 1854 list_del_init(&rq->queuelist); 1855 1856 /* 1857 * the time frame between a request being removed from the lists 1858 * and to it is freed is accounted as io that is in progress at 1859 * the driver side. 1860 */ 1861 if (blk_account_rq(rq)) { 1862 q->in_flight[rq_is_sync(rq)]++; 1863 /* 1864 * Mark this device as supporting hardware queuing, if 1865 * we have more IOs in flight than 4. 1866 */ 1867 if (!blk_queue_queuing(q) && queue_in_flight(q) > 4) 1868 set_bit(QUEUE_FLAG_CQ, &q->queue_flags); 1869 } 1870 } 1871 1872 /** 1873 * blk_start_request - start request processing on the driver 1874 * @req: request to dequeue 1875 * 1876 * Description: 1877 * Dequeue @req and start timeout timer on it. This hands off the 1878 * request to the driver. 1879 * 1880 * Block internal functions which don't want to start timer should 1881 * call blk_dequeue_request(). 1882 * 1883 * Context: 1884 * queue_lock must be held. 1885 */ 1886 void blk_start_request(struct request *req) 1887 { 1888 blk_dequeue_request(req); 1889 1890 /* 1891 * We are now handing the request to the hardware, initialize 1892 * resid_len to full count and add the timeout handler. 1893 */ 1894 req->resid_len = blk_rq_bytes(req); 1895 if (unlikely(blk_bidi_rq(req))) 1896 req->next_rq->resid_len = blk_rq_bytes(req->next_rq); 1897 1898 blk_add_timer(req); 1899 } 1900 EXPORT_SYMBOL(blk_start_request); 1901 1902 /** 1903 * blk_fetch_request - fetch a request from a request queue 1904 * @q: request queue to fetch a request from 1905 * 1906 * Description: 1907 * Return the request at the top of @q. The request is started on 1908 * return and LLD can start processing it immediately. 1909 * 1910 * Return: 1911 * Pointer to the request at the top of @q if available. Null 1912 * otherwise. 1913 * 1914 * Context: 1915 * queue_lock must be held. 1916 */ 1917 struct request *blk_fetch_request(struct request_queue *q) 1918 { 1919 struct request *rq; 1920 1921 rq = blk_peek_request(q); 1922 if (rq) 1923 blk_start_request(rq); 1924 return rq; 1925 } 1926 EXPORT_SYMBOL(blk_fetch_request); 1927 1928 /** 1929 * blk_update_request - Special helper function for request stacking drivers 1930 * @req: the request being processed 1931 * @error: %0 for success, < %0 for error 1932 * @nr_bytes: number of bytes to complete @req 1933 * 1934 * Description: 1935 * Ends I/O on a number of bytes attached to @req, but doesn't complete 1936 * the request structure even if @req doesn't have leftover. 1937 * If @req has leftover, sets it up for the next range of segments. 1938 * 1939 * This special helper function is only for request stacking drivers 1940 * (e.g. request-based dm) so that they can handle partial completion. 1941 * Actual device drivers should use blk_end_request instead. 1942 * 1943 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees 1944 * %false return from this function. 1945 * 1946 * Return: 1947 * %false - this request doesn't have any more data 1948 * %true - this request has more data 1949 **/ 1950 bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) 1951 { 1952 int total_bytes, bio_nbytes, next_idx = 0; 1953 struct bio *bio; 1954 1955 if (!req->bio) 1956 return false; 1957 1958 trace_block_rq_complete(req->q, req); 1959 1960 /* 1961 * For fs requests, rq is just carrier of independent bio's 1962 * and each partial completion should be handled separately. 1963 * Reset per-request error on each partial completion. 1964 * 1965 * TODO: tj: This is too subtle. It would be better to let 1966 * low level drivers do what they see fit. 1967 */ 1968 if (blk_fs_request(req)) 1969 req->errors = 0; 1970 1971 if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) { 1972 printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n", 1973 req->rq_disk ? req->rq_disk->disk_name : "?", 1974 (unsigned long long)blk_rq_pos(req)); 1975 } 1976 1977 blk_account_io_completion(req, nr_bytes); 1978 1979 total_bytes = bio_nbytes = 0; 1980 while ((bio = req->bio) != NULL) { 1981 int nbytes; 1982 1983 if (nr_bytes >= bio->bi_size) { 1984 req->bio = bio->bi_next; 1985 nbytes = bio->bi_size; 1986 req_bio_endio(req, bio, nbytes, error); 1987 next_idx = 0; 1988 bio_nbytes = 0; 1989 } else { 1990 int idx = bio->bi_idx + next_idx; 1991 1992 if (unlikely(idx >= bio->bi_vcnt)) { 1993 blk_dump_rq_flags(req, "__end_that"); 1994 printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n", 1995 __func__, idx, bio->bi_vcnt); 1996 break; 1997 } 1998 1999 nbytes = bio_iovec_idx(bio, idx)->bv_len; 2000 BIO_BUG_ON(nbytes > bio->bi_size); 2001 2002 /* 2003 * not a complete bvec done 2004 */ 2005 if (unlikely(nbytes > nr_bytes)) { 2006 bio_nbytes += nr_bytes; 2007 total_bytes += nr_bytes; 2008 break; 2009 } 2010 2011 /* 2012 * advance to the next vector 2013 */ 2014 next_idx++; 2015 bio_nbytes += nbytes; 2016 } 2017 2018 total_bytes += nbytes; 2019 nr_bytes -= nbytes; 2020 2021 bio = req->bio; 2022 if (bio) { 2023 /* 2024 * end more in this run, or just return 'not-done' 2025 */ 2026 if (unlikely(nr_bytes <= 0)) 2027 break; 2028 } 2029 } 2030 2031 /* 2032 * completely done 2033 */ 2034 if (!req->bio) { 2035 /* 2036 * Reset counters so that the request stacking driver 2037 * can find how many bytes remain in the request 2038 * later. 2039 */ 2040 req->__data_len = 0; 2041 return false; 2042 } 2043 2044 /* 2045 * if the request wasn't completed, update state 2046 */ 2047 if (bio_nbytes) { 2048 req_bio_endio(req, bio, bio_nbytes, error); 2049 bio->bi_idx += next_idx; 2050 bio_iovec(bio)->bv_offset += nr_bytes; 2051 bio_iovec(bio)->bv_len -= nr_bytes; 2052 } 2053 2054 req->__data_len -= total_bytes; 2055 req->buffer = bio_data(req->bio); 2056 2057 /* update sector only for requests with clear definition of sector */ 2058 if (blk_fs_request(req) || blk_discard_rq(req)) 2059 req->__sector += total_bytes >> 9; 2060 2061 /* mixed attributes always follow the first bio */ 2062 if (req->cmd_flags & REQ_MIXED_MERGE) { 2063 req->cmd_flags &= ~REQ_FAILFAST_MASK; 2064 req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK; 2065 } 2066 2067 /* 2068 * If total number of sectors is less than the first segment 2069 * size, something has gone terribly wrong. 2070 */ 2071 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { 2072 printk(KERN_ERR "blk: request botched\n"); 2073 req->__data_len = blk_rq_cur_bytes(req); 2074 } 2075 2076 /* recalculate the number of segments */ 2077 blk_recalc_rq_segments(req); 2078 2079 return true; 2080 } 2081 EXPORT_SYMBOL_GPL(blk_update_request); 2082 2083 static bool blk_update_bidi_request(struct request *rq, int error, 2084 unsigned int nr_bytes, 2085 unsigned int bidi_bytes) 2086 { 2087 if (blk_update_request(rq, error, nr_bytes)) 2088 return true; 2089 2090 /* Bidi request must be completed as a whole */ 2091 if (unlikely(blk_bidi_rq(rq)) && 2092 blk_update_request(rq->next_rq, error, bidi_bytes)) 2093 return true; 2094 2095 add_disk_randomness(rq->rq_disk); 2096 2097 return false; 2098 } 2099 2100 /* 2101 * queue lock must be held 2102 */ 2103 static void blk_finish_request(struct request *req, int error) 2104 { 2105 if (blk_rq_tagged(req)) 2106 blk_queue_end_tag(req->q, req); 2107 2108 BUG_ON(blk_queued_rq(req)); 2109 2110 if (unlikely(laptop_mode) && blk_fs_request(req)) 2111 laptop_io_completion(); 2112 2113 blk_delete_timer(req); 2114 2115 blk_account_io_done(req); 2116 2117 if (req->end_io) 2118 req->end_io(req, error); 2119 else { 2120 if (blk_bidi_rq(req)) 2121 __blk_put_request(req->next_rq->q, req->next_rq); 2122 2123 __blk_put_request(req->q, req); 2124 } 2125 } 2126 2127 /** 2128 * blk_end_bidi_request - Complete a bidi request 2129 * @rq: the request to complete 2130 * @error: %0 for success, < %0 for error 2131 * @nr_bytes: number of bytes to complete @rq 2132 * @bidi_bytes: number of bytes to complete @rq->next_rq 2133 * 2134 * Description: 2135 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. 2136 * Drivers that supports bidi can safely call this member for any 2137 * type of request, bidi or uni. In the later case @bidi_bytes is 2138 * just ignored. 2139 * 2140 * Return: 2141 * %false - we are done with this request 2142 * %true - still buffers pending for this request 2143 **/ 2144 static bool blk_end_bidi_request(struct request *rq, int error, 2145 unsigned int nr_bytes, unsigned int bidi_bytes) 2146 { 2147 struct request_queue *q = rq->q; 2148 unsigned long flags; 2149 2150 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) 2151 return true; 2152 2153 spin_lock_irqsave(q->queue_lock, flags); 2154 blk_finish_request(rq, error); 2155 spin_unlock_irqrestore(q->queue_lock, flags); 2156 2157 return false; 2158 } 2159 2160 /** 2161 * __blk_end_bidi_request - Complete a bidi request with queue lock held 2162 * @rq: the request to complete 2163 * @error: %0 for success, < %0 for error 2164 * @nr_bytes: number of bytes to complete @rq 2165 * @bidi_bytes: number of bytes to complete @rq->next_rq 2166 * 2167 * Description: 2168 * Identical to blk_end_bidi_request() except that queue lock is 2169 * assumed to be locked on entry and remains so on return. 2170 * 2171 * Return: 2172 * %false - we are done with this request 2173 * %true - still buffers pending for this request 2174 **/ 2175 static bool __blk_end_bidi_request(struct request *rq, int error, 2176 unsigned int nr_bytes, unsigned int bidi_bytes) 2177 { 2178 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) 2179 return true; 2180 2181 blk_finish_request(rq, error); 2182 2183 return false; 2184 } 2185 2186 /** 2187 * blk_end_request - Helper function for drivers to complete the request. 2188 * @rq: the request being processed 2189 * @error: %0 for success, < %0 for error 2190 * @nr_bytes: number of bytes to complete 2191 * 2192 * Description: 2193 * Ends I/O on a number of bytes attached to @rq. 2194 * If @rq has leftover, sets it up for the next range of segments. 2195 * 2196 * Return: 2197 * %false - we are done with this request 2198 * %true - still buffers pending for this request 2199 **/ 2200 bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes) 2201 { 2202 return blk_end_bidi_request(rq, error, nr_bytes, 0); 2203 } 2204 EXPORT_SYMBOL(blk_end_request); 2205 2206 /** 2207 * blk_end_request_all - Helper function for drives to finish the request. 2208 * @rq: the request to finish 2209 * @error: %0 for success, < %0 for error 2210 * 2211 * Description: 2212 * Completely finish @rq. 2213 */ 2214 void blk_end_request_all(struct request *rq, int error) 2215 { 2216 bool pending; 2217 unsigned int bidi_bytes = 0; 2218 2219 if (unlikely(blk_bidi_rq(rq))) 2220 bidi_bytes = blk_rq_bytes(rq->next_rq); 2221 2222 pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); 2223 BUG_ON(pending); 2224 } 2225 EXPORT_SYMBOL(blk_end_request_all); 2226 2227 /** 2228 * blk_end_request_cur - Helper function to finish the current request chunk. 2229 * @rq: the request to finish the current chunk for 2230 * @error: %0 for success, < %0 for error 2231 * 2232 * Description: 2233 * Complete the current consecutively mapped chunk from @rq. 2234 * 2235 * Return: 2236 * %false - we are done with this request 2237 * %true - still buffers pending for this request 2238 */ 2239 bool blk_end_request_cur(struct request *rq, int error) 2240 { 2241 return blk_end_request(rq, error, blk_rq_cur_bytes(rq)); 2242 } 2243 EXPORT_SYMBOL(blk_end_request_cur); 2244 2245 /** 2246 * blk_end_request_err - Finish a request till the next failure boundary. 2247 * @rq: the request to finish till the next failure boundary for 2248 * @error: must be negative errno 2249 * 2250 * Description: 2251 * Complete @rq till the next failure boundary. 2252 * 2253 * Return: 2254 * %false - we are done with this request 2255 * %true - still buffers pending for this request 2256 */ 2257 bool blk_end_request_err(struct request *rq, int error) 2258 { 2259 WARN_ON(error >= 0); 2260 return blk_end_request(rq, error, blk_rq_err_bytes(rq)); 2261 } 2262 EXPORT_SYMBOL_GPL(blk_end_request_err); 2263 2264 /** 2265 * __blk_end_request - Helper function for drivers to complete the request. 2266 * @rq: the request being processed 2267 * @error: %0 for success, < %0 for error 2268 * @nr_bytes: number of bytes to complete 2269 * 2270 * Description: 2271 * Must be called with queue lock held unlike blk_end_request(). 2272 * 2273 * Return: 2274 * %false - we are done with this request 2275 * %true - still buffers pending for this request 2276 **/ 2277 bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) 2278 { 2279 return __blk_end_bidi_request(rq, error, nr_bytes, 0); 2280 } 2281 EXPORT_SYMBOL(__blk_end_request); 2282 2283 /** 2284 * __blk_end_request_all - Helper function for drives to finish the request. 2285 * @rq: the request to finish 2286 * @error: %0 for success, < %0 for error 2287 * 2288 * Description: 2289 * Completely finish @rq. Must be called with queue lock held. 2290 */ 2291 void __blk_end_request_all(struct request *rq, int error) 2292 { 2293 bool pending; 2294 unsigned int bidi_bytes = 0; 2295 2296 if (unlikely(blk_bidi_rq(rq))) 2297 bidi_bytes = blk_rq_bytes(rq->next_rq); 2298 2299 pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); 2300 BUG_ON(pending); 2301 } 2302 EXPORT_SYMBOL(__blk_end_request_all); 2303 2304 /** 2305 * __blk_end_request_cur - Helper function to finish the current request chunk. 2306 * @rq: the request to finish the current chunk for 2307 * @error: %0 for success, < %0 for error 2308 * 2309 * Description: 2310 * Complete the current consecutively mapped chunk from @rq. Must 2311 * be called with queue lock held. 2312 * 2313 * Return: 2314 * %false - we are done with this request 2315 * %true - still buffers pending for this request 2316 */ 2317 bool __blk_end_request_cur(struct request *rq, int error) 2318 { 2319 return __blk_end_request(rq, error, blk_rq_cur_bytes(rq)); 2320 } 2321 EXPORT_SYMBOL(__blk_end_request_cur); 2322 2323 /** 2324 * __blk_end_request_err - Finish a request till the next failure boundary. 2325 * @rq: the request to finish till the next failure boundary for 2326 * @error: must be negative errno 2327 * 2328 * Description: 2329 * Complete @rq till the next failure boundary. Must be called 2330 * with queue lock held. 2331 * 2332 * Return: 2333 * %false - we are done with this request 2334 * %true - still buffers pending for this request 2335 */ 2336 bool __blk_end_request_err(struct request *rq, int error) 2337 { 2338 WARN_ON(error >= 0); 2339 return __blk_end_request(rq, error, blk_rq_err_bytes(rq)); 2340 } 2341 EXPORT_SYMBOL_GPL(__blk_end_request_err); 2342 2343 void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 2344 struct bio *bio) 2345 { 2346 /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */ 2347 rq->cmd_flags |= bio->bi_rw & REQ_RW; 2348 2349 if (bio_has_data(bio)) { 2350 rq->nr_phys_segments = bio_phys_segments(q, bio); 2351 rq->buffer = bio_data(bio); 2352 } 2353 rq->__data_len = bio->bi_size; 2354 rq->bio = rq->biotail = bio; 2355 2356 if (bio->bi_bdev) 2357 rq->rq_disk = bio->bi_bdev->bd_disk; 2358 } 2359 2360 /** 2361 * blk_lld_busy - Check if underlying low-level drivers of a device are busy 2362 * @q : the queue of the device being checked 2363 * 2364 * Description: 2365 * Check if underlying low-level drivers of a device are busy. 2366 * If the drivers want to export their busy state, they must set own 2367 * exporting function using blk_queue_lld_busy() first. 2368 * 2369 * Basically, this function is used only by request stacking drivers 2370 * to stop dispatching requests to underlying devices when underlying 2371 * devices are busy. This behavior helps more I/O merging on the queue 2372 * of the request stacking driver and prevents I/O throughput regression 2373 * on burst I/O load. 2374 * 2375 * Return: 2376 * 0 - Not busy (The request stacking driver should dispatch request) 2377 * 1 - Busy (The request stacking driver should stop dispatching request) 2378 */ 2379 int blk_lld_busy(struct request_queue *q) 2380 { 2381 if (q->lld_busy_fn) 2382 return q->lld_busy_fn(q); 2383 2384 return 0; 2385 } 2386 EXPORT_SYMBOL_GPL(blk_lld_busy); 2387 2388 /** 2389 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request 2390 * @rq: the clone request to be cleaned up 2391 * 2392 * Description: 2393 * Free all bios in @rq for a cloned request. 2394 */ 2395 void blk_rq_unprep_clone(struct request *rq) 2396 { 2397 struct bio *bio; 2398 2399 while ((bio = rq->bio) != NULL) { 2400 rq->bio = bio->bi_next; 2401 2402 bio_put(bio); 2403 } 2404 } 2405 EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); 2406 2407 /* 2408 * Copy attributes of the original request to the clone request. 2409 * The actual data parts (e.g. ->cmd, ->buffer, ->sense) are not copied. 2410 */ 2411 static void __blk_rq_prep_clone(struct request *dst, struct request *src) 2412 { 2413 dst->cpu = src->cpu; 2414 dst->cmd_flags = (rq_data_dir(src) | REQ_NOMERGE); 2415 dst->cmd_type = src->cmd_type; 2416 dst->__sector = blk_rq_pos(src); 2417 dst->__data_len = blk_rq_bytes(src); 2418 dst->nr_phys_segments = src->nr_phys_segments; 2419 dst->ioprio = src->ioprio; 2420 dst->extra_len = src->extra_len; 2421 } 2422 2423 /** 2424 * blk_rq_prep_clone - Helper function to setup clone request 2425 * @rq: the request to be setup 2426 * @rq_src: original request to be cloned 2427 * @bs: bio_set that bios for clone are allocated from 2428 * @gfp_mask: memory allocation mask for bio 2429 * @bio_ctr: setup function to be called for each clone bio. 2430 * Returns %0 for success, non %0 for failure. 2431 * @data: private data to be passed to @bio_ctr 2432 * 2433 * Description: 2434 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq. 2435 * The actual data parts of @rq_src (e.g. ->cmd, ->buffer, ->sense) 2436 * are not copied, and copying such parts is the caller's responsibility. 2437 * Also, pages which the original bios are pointing to are not copied 2438 * and the cloned bios just point same pages. 2439 * So cloned bios must be completed before original bios, which means 2440 * the caller must complete @rq before @rq_src. 2441 */ 2442 int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 2443 struct bio_set *bs, gfp_t gfp_mask, 2444 int (*bio_ctr)(struct bio *, struct bio *, void *), 2445 void *data) 2446 { 2447 struct bio *bio, *bio_src; 2448 2449 if (!bs) 2450 bs = fs_bio_set; 2451 2452 blk_rq_init(NULL, rq); 2453 2454 __rq_for_each_bio(bio_src, rq_src) { 2455 bio = bio_alloc_bioset(gfp_mask, bio_src->bi_max_vecs, bs); 2456 if (!bio) 2457 goto free_and_out; 2458 2459 __bio_clone(bio, bio_src); 2460 2461 if (bio_integrity(bio_src) && 2462 bio_integrity_clone(bio, bio_src, gfp_mask, bs)) 2463 goto free_and_out; 2464 2465 if (bio_ctr && bio_ctr(bio, bio_src, data)) 2466 goto free_and_out; 2467 2468 if (rq->bio) { 2469 rq->biotail->bi_next = bio; 2470 rq->biotail = bio; 2471 } else 2472 rq->bio = rq->biotail = bio; 2473 } 2474 2475 __blk_rq_prep_clone(rq, rq_src); 2476 2477 return 0; 2478 2479 free_and_out: 2480 if (bio) 2481 bio_free(bio, bs); 2482 blk_rq_unprep_clone(rq); 2483 2484 return -ENOMEM; 2485 } 2486 EXPORT_SYMBOL_GPL(blk_rq_prep_clone); 2487 2488 int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) 2489 { 2490 return queue_work(kblockd_workqueue, work); 2491 } 2492 EXPORT_SYMBOL(kblockd_schedule_work); 2493 2494 int __init blk_dev_init(void) 2495 { 2496 BUILD_BUG_ON(__REQ_NR_BITS > 8 * 2497 sizeof(((struct request *)0)->cmd_flags)); 2498 2499 kblockd_workqueue = create_workqueue("kblockd"); 2500 if (!kblockd_workqueue) 2501 panic("Failed to create kblockd\n"); 2502 2503 request_cachep = kmem_cache_create("blkdev_requests", 2504 sizeof(struct request), 0, SLAB_PANIC, NULL); 2505 2506 blk_requestq_cachep = kmem_cache_create("blkdev_queue", 2507 sizeof(struct request_queue), 0, SLAB_PANIC, NULL); 2508 2509 return 0; 2510 } 2511 2512