1 /* 2 * Copyright (C) 1991, 1992 Linus Torvalds 3 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics 4 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> 6 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> 7 * - July2000 8 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 9 */ 10 11 /* 12 * This handles all read/write requests to block devices 13 */ 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/backing-dev.h> 17 #include <linux/bio.h> 18 #include <linux/blkdev.h> 19 #include <linux/highmem.h> 20 #include <linux/mm.h> 21 #include <linux/kernel_stat.h> 22 #include <linux/string.h> 23 #include <linux/init.h> 24 #include <linux/completion.h> 25 #include <linux/slab.h> 26 #include <linux/swap.h> 27 #include <linux/writeback.h> 28 #include <linux/task_io_accounting_ops.h> 29 #include <linux/interrupt.h> 30 #include <linux/cpu.h> 31 #include <linux/blktrace_api.h> 32 #include <linux/fault-inject.h> 33 34 #include "blk.h" 35 36 static int __make_request(struct request_queue *q, struct bio *bio); 37 38 /* 39 * For the allocated request tables 40 */ 41 static struct kmem_cache *request_cachep; 42 43 /* 44 * For queue allocation 45 */ 46 struct kmem_cache *blk_requestq_cachep; 47 48 /* 49 * Controlling structure to kblockd 50 */ 51 static struct workqueue_struct *kblockd_workqueue; 52 53 static DEFINE_PER_CPU(struct list_head, blk_cpu_done); 54 55 static void drive_stat_acct(struct request *rq, int new_io) 56 { 57 struct hd_struct *part; 58 int rw = rq_data_dir(rq); 59 60 if (!blk_fs_request(rq) || !rq->rq_disk) 61 return; 62 63 part = get_part(rq->rq_disk, rq->sector); 64 if (!new_io) 65 __all_stat_inc(rq->rq_disk, part, merges[rw], rq->sector); 66 else { 67 disk_round_stats(rq->rq_disk); 68 rq->rq_disk->in_flight++; 69 if (part) { 70 part_round_stats(part); 71 part->in_flight++; 72 } 73 } 74 } 75 76 void blk_queue_congestion_threshold(struct request_queue *q) 77 { 78 int nr; 79 80 nr = q->nr_requests - (q->nr_requests / 8) + 1; 81 if (nr > q->nr_requests) 82 nr = q->nr_requests; 83 q->nr_congestion_on = nr; 84 85 nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1; 86 if (nr < 1) 87 nr = 1; 88 q->nr_congestion_off = nr; 89 } 90 91 /** 92 * blk_get_backing_dev_info - get the address of a queue's backing_dev_info 93 * @bdev: device 94 * 95 * Locates the passed device's request queue and returns the address of its 96 * backing_dev_info 97 * 98 * Will return NULL if the request queue cannot be located. 99 */ 100 struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) 101 { 102 struct backing_dev_info *ret = NULL; 103 struct request_queue *q = bdev_get_queue(bdev); 104 105 if (q) 106 ret = &q->backing_dev_info; 107 return ret; 108 } 109 EXPORT_SYMBOL(blk_get_backing_dev_info); 110 111 void blk_rq_init(struct request_queue *q, struct request *rq) 112 { 113 memset(rq, 0, sizeof(*rq)); 114 115 INIT_LIST_HEAD(&rq->queuelist); 116 INIT_LIST_HEAD(&rq->donelist); 117 rq->q = q; 118 rq->sector = rq->hard_sector = (sector_t) -1; 119 INIT_HLIST_NODE(&rq->hash); 120 RB_CLEAR_NODE(&rq->rb_node); 121 rq->cmd = rq->__cmd; 122 rq->tag = -1; 123 rq->ref_count = 1; 124 } 125 EXPORT_SYMBOL(blk_rq_init); 126 127 static void req_bio_endio(struct request *rq, struct bio *bio, 128 unsigned int nbytes, int error) 129 { 130 struct request_queue *q = rq->q; 131 132 if (&q->bar_rq != rq) { 133 if (error) 134 clear_bit(BIO_UPTODATE, &bio->bi_flags); 135 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) 136 error = -EIO; 137 138 if (unlikely(nbytes > bio->bi_size)) { 139 printk(KERN_ERR "%s: want %u bytes done, %u left\n", 140 __func__, nbytes, bio->bi_size); 141 nbytes = bio->bi_size; 142 } 143 144 bio->bi_size -= nbytes; 145 bio->bi_sector += (nbytes >> 9); 146 147 if (bio_integrity(bio)) 148 bio_integrity_advance(bio, nbytes); 149 150 if (bio->bi_size == 0) 151 bio_endio(bio, error); 152 } else { 153 154 /* 155 * Okay, this is the barrier request in progress, just 156 * record the error; 157 */ 158 if (error && !q->orderr) 159 q->orderr = error; 160 } 161 } 162 163 void blk_dump_rq_flags(struct request *rq, char *msg) 164 { 165 int bit; 166 167 printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg, 168 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, 169 rq->cmd_flags); 170 171 printk(KERN_INFO " sector %llu, nr/cnr %lu/%u\n", 172 (unsigned long long)rq->sector, 173 rq->nr_sectors, 174 rq->current_nr_sectors); 175 printk(KERN_INFO " bio %p, biotail %p, buffer %p, data %p, len %u\n", 176 rq->bio, rq->biotail, 177 rq->buffer, rq->data, 178 rq->data_len); 179 180 if (blk_pc_request(rq)) { 181 printk(KERN_INFO " cdb: "); 182 for (bit = 0; bit < BLK_MAX_CDB; bit++) 183 printk("%02x ", rq->cmd[bit]); 184 printk("\n"); 185 } 186 } 187 EXPORT_SYMBOL(blk_dump_rq_flags); 188 189 /* 190 * "plug" the device if there are no outstanding requests: this will 191 * force the transfer to start only after we have put all the requests 192 * on the list. 193 * 194 * This is called with interrupts off and no requests on the queue and 195 * with the queue lock held. 196 */ 197 void blk_plug_device(struct request_queue *q) 198 { 199 WARN_ON(!irqs_disabled()); 200 201 /* 202 * don't plug a stopped queue, it must be paired with blk_start_queue() 203 * which will restart the queueing 204 */ 205 if (blk_queue_stopped(q)) 206 return; 207 208 if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) { 209 mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); 210 blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG); 211 } 212 } 213 EXPORT_SYMBOL(blk_plug_device); 214 215 /* 216 * remove the queue from the plugged list, if present. called with 217 * queue lock held and interrupts disabled. 218 */ 219 int blk_remove_plug(struct request_queue *q) 220 { 221 WARN_ON(!irqs_disabled()); 222 223 if (!queue_flag_test_and_clear(QUEUE_FLAG_PLUGGED, q)) 224 return 0; 225 226 del_timer(&q->unplug_timer); 227 return 1; 228 } 229 EXPORT_SYMBOL(blk_remove_plug); 230 231 /* 232 * remove the plug and let it rip.. 233 */ 234 void __generic_unplug_device(struct request_queue *q) 235 { 236 if (unlikely(blk_queue_stopped(q))) 237 return; 238 239 if (!blk_remove_plug(q)) 240 return; 241 242 q->request_fn(q); 243 } 244 EXPORT_SYMBOL(__generic_unplug_device); 245 246 /** 247 * generic_unplug_device - fire a request queue 248 * @q: The &struct request_queue in question 249 * 250 * Description: 251 * Linux uses plugging to build bigger requests queues before letting 252 * the device have at them. If a queue is plugged, the I/O scheduler 253 * is still adding and merging requests on the queue. Once the queue 254 * gets unplugged, the request_fn defined for the queue is invoked and 255 * transfers started. 256 **/ 257 void generic_unplug_device(struct request_queue *q) 258 { 259 if (blk_queue_plugged(q)) { 260 spin_lock_irq(q->queue_lock); 261 __generic_unplug_device(q); 262 spin_unlock_irq(q->queue_lock); 263 } 264 } 265 EXPORT_SYMBOL(generic_unplug_device); 266 267 static void blk_backing_dev_unplug(struct backing_dev_info *bdi, 268 struct page *page) 269 { 270 struct request_queue *q = bdi->unplug_io_data; 271 272 blk_unplug(q); 273 } 274 275 void blk_unplug_work(struct work_struct *work) 276 { 277 struct request_queue *q = 278 container_of(work, struct request_queue, unplug_work); 279 280 blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, 281 q->rq.count[READ] + q->rq.count[WRITE]); 282 283 q->unplug_fn(q); 284 } 285 286 void blk_unplug_timeout(unsigned long data) 287 { 288 struct request_queue *q = (struct request_queue *)data; 289 290 blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL, 291 q->rq.count[READ] + q->rq.count[WRITE]); 292 293 kblockd_schedule_work(&q->unplug_work); 294 } 295 296 void blk_unplug(struct request_queue *q) 297 { 298 /* 299 * devices don't necessarily have an ->unplug_fn defined 300 */ 301 if (q->unplug_fn) { 302 blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, 303 q->rq.count[READ] + q->rq.count[WRITE]); 304 305 q->unplug_fn(q); 306 } 307 } 308 EXPORT_SYMBOL(blk_unplug); 309 310 /** 311 * blk_start_queue - restart a previously stopped queue 312 * @q: The &struct request_queue in question 313 * 314 * Description: 315 * blk_start_queue() will clear the stop flag on the queue, and call 316 * the request_fn for the queue if it was in a stopped state when 317 * entered. Also see blk_stop_queue(). Queue lock must be held. 318 **/ 319 void blk_start_queue(struct request_queue *q) 320 { 321 WARN_ON(!irqs_disabled()); 322 323 queue_flag_clear(QUEUE_FLAG_STOPPED, q); 324 325 /* 326 * one level of recursion is ok and is much faster than kicking 327 * the unplug handling 328 */ 329 if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { 330 q->request_fn(q); 331 queue_flag_clear(QUEUE_FLAG_REENTER, q); 332 } else { 333 blk_plug_device(q); 334 kblockd_schedule_work(&q->unplug_work); 335 } 336 } 337 EXPORT_SYMBOL(blk_start_queue); 338 339 /** 340 * blk_stop_queue - stop a queue 341 * @q: The &struct request_queue in question 342 * 343 * Description: 344 * The Linux block layer assumes that a block driver will consume all 345 * entries on the request queue when the request_fn strategy is called. 346 * Often this will not happen, because of hardware limitations (queue 347 * depth settings). If a device driver gets a 'queue full' response, 348 * or if it simply chooses not to queue more I/O at one point, it can 349 * call this function to prevent the request_fn from being called until 350 * the driver has signalled it's ready to go again. This happens by calling 351 * blk_start_queue() to restart queue operations. Queue lock must be held. 352 **/ 353 void blk_stop_queue(struct request_queue *q) 354 { 355 blk_remove_plug(q); 356 queue_flag_set(QUEUE_FLAG_STOPPED, q); 357 } 358 EXPORT_SYMBOL(blk_stop_queue); 359 360 /** 361 * blk_sync_queue - cancel any pending callbacks on a queue 362 * @q: the queue 363 * 364 * Description: 365 * The block layer may perform asynchronous callback activity 366 * on a queue, such as calling the unplug function after a timeout. 367 * A block device may call blk_sync_queue to ensure that any 368 * such activity is cancelled, thus allowing it to release resources 369 * that the callbacks might use. The caller must already have made sure 370 * that its ->make_request_fn will not re-add plugging prior to calling 371 * this function. 372 * 373 */ 374 void blk_sync_queue(struct request_queue *q) 375 { 376 del_timer_sync(&q->unplug_timer); 377 kblockd_flush_work(&q->unplug_work); 378 } 379 EXPORT_SYMBOL(blk_sync_queue); 380 381 /** 382 * blk_run_queue - run a single device queue 383 * @q: The queue to run 384 */ 385 void __blk_run_queue(struct request_queue *q) 386 { 387 blk_remove_plug(q); 388 389 /* 390 * Only recurse once to avoid overrunning the stack, let the unplug 391 * handling reinvoke the handler shortly if we already got there. 392 */ 393 if (!elv_queue_empty(q)) { 394 if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { 395 q->request_fn(q); 396 queue_flag_clear(QUEUE_FLAG_REENTER, q); 397 } else { 398 blk_plug_device(q); 399 kblockd_schedule_work(&q->unplug_work); 400 } 401 } 402 } 403 EXPORT_SYMBOL(__blk_run_queue); 404 405 /** 406 * blk_run_queue - run a single device queue 407 * @q: The queue to run 408 */ 409 void blk_run_queue(struct request_queue *q) 410 { 411 unsigned long flags; 412 413 spin_lock_irqsave(q->queue_lock, flags); 414 __blk_run_queue(q); 415 spin_unlock_irqrestore(q->queue_lock, flags); 416 } 417 EXPORT_SYMBOL(blk_run_queue); 418 419 void blk_put_queue(struct request_queue *q) 420 { 421 kobject_put(&q->kobj); 422 } 423 424 void blk_cleanup_queue(struct request_queue *q) 425 { 426 mutex_lock(&q->sysfs_lock); 427 queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); 428 mutex_unlock(&q->sysfs_lock); 429 430 if (q->elevator) 431 elevator_exit(q->elevator); 432 433 blk_put_queue(q); 434 } 435 EXPORT_SYMBOL(blk_cleanup_queue); 436 437 static int blk_init_free_list(struct request_queue *q) 438 { 439 struct request_list *rl = &q->rq; 440 441 rl->count[READ] = rl->count[WRITE] = 0; 442 rl->starved[READ] = rl->starved[WRITE] = 0; 443 rl->elvpriv = 0; 444 init_waitqueue_head(&rl->wait[READ]); 445 init_waitqueue_head(&rl->wait[WRITE]); 446 447 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, 448 mempool_free_slab, request_cachep, q->node); 449 450 if (!rl->rq_pool) 451 return -ENOMEM; 452 453 return 0; 454 } 455 456 struct request_queue *blk_alloc_queue(gfp_t gfp_mask) 457 { 458 return blk_alloc_queue_node(gfp_mask, -1); 459 } 460 EXPORT_SYMBOL(blk_alloc_queue); 461 462 struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) 463 { 464 struct request_queue *q; 465 int err; 466 467 q = kmem_cache_alloc_node(blk_requestq_cachep, 468 gfp_mask | __GFP_ZERO, node_id); 469 if (!q) 470 return NULL; 471 472 q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug; 473 q->backing_dev_info.unplug_io_data = q; 474 err = bdi_init(&q->backing_dev_info); 475 if (err) { 476 kmem_cache_free(blk_requestq_cachep, q); 477 return NULL; 478 } 479 480 init_timer(&q->unplug_timer); 481 482 kobject_init(&q->kobj, &blk_queue_ktype); 483 484 mutex_init(&q->sysfs_lock); 485 spin_lock_init(&q->__queue_lock); 486 487 return q; 488 } 489 EXPORT_SYMBOL(blk_alloc_queue_node); 490 491 /** 492 * blk_init_queue - prepare a request queue for use with a block device 493 * @rfn: The function to be called to process requests that have been 494 * placed on the queue. 495 * @lock: Request queue spin lock 496 * 497 * Description: 498 * If a block device wishes to use the standard request handling procedures, 499 * which sorts requests and coalesces adjacent requests, then it must 500 * call blk_init_queue(). The function @rfn will be called when there 501 * are requests on the queue that need to be processed. If the device 502 * supports plugging, then @rfn may not be called immediately when requests 503 * are available on the queue, but may be called at some time later instead. 504 * Plugged queues are generally unplugged when a buffer belonging to one 505 * of the requests on the queue is needed, or due to memory pressure. 506 * 507 * @rfn is not required, or even expected, to remove all requests off the 508 * queue, but only as many as it can handle at a time. If it does leave 509 * requests on the queue, it is responsible for arranging that the requests 510 * get dealt with eventually. 511 * 512 * The queue spin lock must be held while manipulating the requests on the 513 * request queue; this lock will be taken also from interrupt context, so irq 514 * disabling is needed for it. 515 * 516 * Function returns a pointer to the initialized request queue, or NULL if 517 * it didn't succeed. 518 * 519 * Note: 520 * blk_init_queue() must be paired with a blk_cleanup_queue() call 521 * when the block device is deactivated (such as at module unload). 522 **/ 523 524 struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) 525 { 526 return blk_init_queue_node(rfn, lock, -1); 527 } 528 EXPORT_SYMBOL(blk_init_queue); 529 530 struct request_queue * 531 blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) 532 { 533 struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id); 534 535 if (!q) 536 return NULL; 537 538 q->node = node_id; 539 if (blk_init_free_list(q)) { 540 kmem_cache_free(blk_requestq_cachep, q); 541 return NULL; 542 } 543 544 /* 545 * if caller didn't supply a lock, they get per-queue locking with 546 * our embedded lock 547 */ 548 if (!lock) 549 lock = &q->__queue_lock; 550 551 q->request_fn = rfn; 552 q->prep_rq_fn = NULL; 553 q->unplug_fn = generic_unplug_device; 554 q->queue_flags = (1 << QUEUE_FLAG_CLUSTER); 555 q->queue_lock = lock; 556 557 blk_queue_segment_boundary(q, 0xffffffff); 558 559 blk_queue_make_request(q, __make_request); 560 blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE); 561 562 blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS); 563 blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS); 564 565 q->sg_reserved_size = INT_MAX; 566 567 /* 568 * all done 569 */ 570 if (!elevator_init(q, NULL)) { 571 blk_queue_congestion_threshold(q); 572 return q; 573 } 574 575 blk_put_queue(q); 576 return NULL; 577 } 578 EXPORT_SYMBOL(blk_init_queue_node); 579 580 int blk_get_queue(struct request_queue *q) 581 { 582 if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { 583 kobject_get(&q->kobj); 584 return 0; 585 } 586 587 return 1; 588 } 589 590 static inline void blk_free_request(struct request_queue *q, struct request *rq) 591 { 592 if (rq->cmd_flags & REQ_ELVPRIV) 593 elv_put_request(q, rq); 594 mempool_free(rq, q->rq.rq_pool); 595 } 596 597 static struct request * 598 blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask) 599 { 600 struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); 601 602 if (!rq) 603 return NULL; 604 605 blk_rq_init(q, rq); 606 607 /* 608 * first three bits are identical in rq->cmd_flags and bio->bi_rw, 609 * see bio.h and blkdev.h 610 */ 611 rq->cmd_flags = rw | REQ_ALLOCED; 612 613 if (priv) { 614 if (unlikely(elv_set_request(q, rq, gfp_mask))) { 615 mempool_free(rq, q->rq.rq_pool); 616 return NULL; 617 } 618 rq->cmd_flags |= REQ_ELVPRIV; 619 } 620 621 return rq; 622 } 623 624 /* 625 * ioc_batching returns true if the ioc is a valid batching request and 626 * should be given priority access to a request. 627 */ 628 static inline int ioc_batching(struct request_queue *q, struct io_context *ioc) 629 { 630 if (!ioc) 631 return 0; 632 633 /* 634 * Make sure the process is able to allocate at least 1 request 635 * even if the batch times out, otherwise we could theoretically 636 * lose wakeups. 637 */ 638 return ioc->nr_batch_requests == q->nr_batching || 639 (ioc->nr_batch_requests > 0 640 && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME)); 641 } 642 643 /* 644 * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This 645 * will cause the process to be a "batcher" on all queues in the system. This 646 * is the behaviour we want though - once it gets a wakeup it should be given 647 * a nice run. 648 */ 649 static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) 650 { 651 if (!ioc || ioc_batching(q, ioc)) 652 return; 653 654 ioc->nr_batch_requests = q->nr_batching; 655 ioc->last_waited = jiffies; 656 } 657 658 static void __freed_request(struct request_queue *q, int rw) 659 { 660 struct request_list *rl = &q->rq; 661 662 if (rl->count[rw] < queue_congestion_off_threshold(q)) 663 blk_clear_queue_congested(q, rw); 664 665 if (rl->count[rw] + 1 <= q->nr_requests) { 666 if (waitqueue_active(&rl->wait[rw])) 667 wake_up(&rl->wait[rw]); 668 669 blk_clear_queue_full(q, rw); 670 } 671 } 672 673 /* 674 * A request has just been released. Account for it, update the full and 675 * congestion status, wake up any waiters. Called under q->queue_lock. 676 */ 677 static void freed_request(struct request_queue *q, int rw, int priv) 678 { 679 struct request_list *rl = &q->rq; 680 681 rl->count[rw]--; 682 if (priv) 683 rl->elvpriv--; 684 685 __freed_request(q, rw); 686 687 if (unlikely(rl->starved[rw ^ 1])) 688 __freed_request(q, rw ^ 1); 689 } 690 691 #define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist) 692 /* 693 * Get a free request, queue_lock must be held. 694 * Returns NULL on failure, with queue_lock held. 695 * Returns !NULL on success, with queue_lock *not held*. 696 */ 697 static struct request *get_request(struct request_queue *q, int rw_flags, 698 struct bio *bio, gfp_t gfp_mask) 699 { 700 struct request *rq = NULL; 701 struct request_list *rl = &q->rq; 702 struct io_context *ioc = NULL; 703 const int rw = rw_flags & 0x01; 704 int may_queue, priv; 705 706 may_queue = elv_may_queue(q, rw_flags); 707 if (may_queue == ELV_MQUEUE_NO) 708 goto rq_starved; 709 710 if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) { 711 if (rl->count[rw]+1 >= q->nr_requests) { 712 ioc = current_io_context(GFP_ATOMIC, q->node); 713 /* 714 * The queue will fill after this allocation, so set 715 * it as full, and mark this process as "batching". 716 * This process will be allowed to complete a batch of 717 * requests, others will be blocked. 718 */ 719 if (!blk_queue_full(q, rw)) { 720 ioc_set_batching(q, ioc); 721 blk_set_queue_full(q, rw); 722 } else { 723 if (may_queue != ELV_MQUEUE_MUST 724 && !ioc_batching(q, ioc)) { 725 /* 726 * The queue is full and the allocating 727 * process is not a "batcher", and not 728 * exempted by the IO scheduler 729 */ 730 goto out; 731 } 732 } 733 } 734 blk_set_queue_congested(q, rw); 735 } 736 737 /* 738 * Only allow batching queuers to allocate up to 50% over the defined 739 * limit of requests, otherwise we could have thousands of requests 740 * allocated with any setting of ->nr_requests 741 */ 742 if (rl->count[rw] >= (3 * q->nr_requests / 2)) 743 goto out; 744 745 rl->count[rw]++; 746 rl->starved[rw] = 0; 747 748 priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); 749 if (priv) 750 rl->elvpriv++; 751 752 spin_unlock_irq(q->queue_lock); 753 754 rq = blk_alloc_request(q, rw_flags, priv, gfp_mask); 755 if (unlikely(!rq)) { 756 /* 757 * Allocation failed presumably due to memory. Undo anything 758 * we might have messed up. 759 * 760 * Allocating task should really be put onto the front of the 761 * wait queue, but this is pretty rare. 762 */ 763 spin_lock_irq(q->queue_lock); 764 freed_request(q, rw, priv); 765 766 /* 767 * in the very unlikely event that allocation failed and no 768 * requests for this direction was pending, mark us starved 769 * so that freeing of a request in the other direction will 770 * notice us. another possible fix would be to split the 771 * rq mempool into READ and WRITE 772 */ 773 rq_starved: 774 if (unlikely(rl->count[rw] == 0)) 775 rl->starved[rw] = 1; 776 777 goto out; 778 } 779 780 /* 781 * ioc may be NULL here, and ioc_batching will be false. That's 782 * OK, if the queue is under the request limit then requests need 783 * not count toward the nr_batch_requests limit. There will always 784 * be some limit enforced by BLK_BATCH_TIME. 785 */ 786 if (ioc_batching(q, ioc)) 787 ioc->nr_batch_requests--; 788 789 blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ); 790 out: 791 return rq; 792 } 793 794 /* 795 * No available requests for this queue, unplug the device and wait for some 796 * requests to become available. 797 * 798 * Called with q->queue_lock held, and returns with it unlocked. 799 */ 800 static struct request *get_request_wait(struct request_queue *q, int rw_flags, 801 struct bio *bio) 802 { 803 const int rw = rw_flags & 0x01; 804 struct request *rq; 805 806 rq = get_request(q, rw_flags, bio, GFP_NOIO); 807 while (!rq) { 808 DEFINE_WAIT(wait); 809 struct io_context *ioc; 810 struct request_list *rl = &q->rq; 811 812 prepare_to_wait_exclusive(&rl->wait[rw], &wait, 813 TASK_UNINTERRUPTIBLE); 814 815 blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ); 816 817 __generic_unplug_device(q); 818 spin_unlock_irq(q->queue_lock); 819 io_schedule(); 820 821 /* 822 * After sleeping, we become a "batching" process and 823 * will be able to allocate at least one request, and 824 * up to a big batch of them for a small period time. 825 * See ioc_batching, ioc_set_batching 826 */ 827 ioc = current_io_context(GFP_NOIO, q->node); 828 ioc_set_batching(q, ioc); 829 830 spin_lock_irq(q->queue_lock); 831 finish_wait(&rl->wait[rw], &wait); 832 833 rq = get_request(q, rw_flags, bio, GFP_NOIO); 834 }; 835 836 return rq; 837 } 838 839 struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) 840 { 841 struct request *rq; 842 843 BUG_ON(rw != READ && rw != WRITE); 844 845 spin_lock_irq(q->queue_lock); 846 if (gfp_mask & __GFP_WAIT) { 847 rq = get_request_wait(q, rw, NULL); 848 } else { 849 rq = get_request(q, rw, NULL, gfp_mask); 850 if (!rq) 851 spin_unlock_irq(q->queue_lock); 852 } 853 /* q->queue_lock is unlocked at this point */ 854 855 return rq; 856 } 857 EXPORT_SYMBOL(blk_get_request); 858 859 /** 860 * blk_start_queueing - initiate dispatch of requests to device 861 * @q: request queue to kick into gear 862 * 863 * This is basically a helper to remove the need to know whether a queue 864 * is plugged or not if someone just wants to initiate dispatch of requests 865 * for this queue. 866 * 867 * The queue lock must be held with interrupts disabled. 868 */ 869 void blk_start_queueing(struct request_queue *q) 870 { 871 if (!blk_queue_plugged(q)) 872 q->request_fn(q); 873 else 874 __generic_unplug_device(q); 875 } 876 EXPORT_SYMBOL(blk_start_queueing); 877 878 /** 879 * blk_requeue_request - put a request back on queue 880 * @q: request queue where request should be inserted 881 * @rq: request to be inserted 882 * 883 * Description: 884 * Drivers often keep queueing requests until the hardware cannot accept 885 * more, when that condition happens we need to put the request back 886 * on the queue. Must be called with queue lock held. 887 */ 888 void blk_requeue_request(struct request_queue *q, struct request *rq) 889 { 890 blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); 891 892 if (blk_rq_tagged(rq)) 893 blk_queue_end_tag(q, rq); 894 895 elv_requeue_request(q, rq); 896 } 897 EXPORT_SYMBOL(blk_requeue_request); 898 899 /** 900 * blk_insert_request - insert a special request in to a request queue 901 * @q: request queue where request should be inserted 902 * @rq: request to be inserted 903 * @at_head: insert request at head or tail of queue 904 * @data: private data 905 * 906 * Description: 907 * Many block devices need to execute commands asynchronously, so they don't 908 * block the whole kernel from preemption during request execution. This is 909 * accomplished normally by inserting aritficial requests tagged as 910 * REQ_SPECIAL in to the corresponding request queue, and letting them be 911 * scheduled for actual execution by the request queue. 912 * 913 * We have the option of inserting the head or the tail of the queue. 914 * Typically we use the tail for new ioctls and so forth. We use the head 915 * of the queue for things like a QUEUE_FULL message from a device, or a 916 * host that is unable to accept a particular command. 917 */ 918 void blk_insert_request(struct request_queue *q, struct request *rq, 919 int at_head, void *data) 920 { 921 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; 922 unsigned long flags; 923 924 /* 925 * tell I/O scheduler that this isn't a regular read/write (ie it 926 * must not attempt merges on this) and that it acts as a soft 927 * barrier 928 */ 929 rq->cmd_type = REQ_TYPE_SPECIAL; 930 rq->cmd_flags |= REQ_SOFTBARRIER; 931 932 rq->special = data; 933 934 spin_lock_irqsave(q->queue_lock, flags); 935 936 /* 937 * If command is tagged, release the tag 938 */ 939 if (blk_rq_tagged(rq)) 940 blk_queue_end_tag(q, rq); 941 942 drive_stat_acct(rq, 1); 943 __elv_add_request(q, rq, where, 0); 944 blk_start_queueing(q); 945 spin_unlock_irqrestore(q->queue_lock, flags); 946 } 947 EXPORT_SYMBOL(blk_insert_request); 948 949 /* 950 * add-request adds a request to the linked list. 951 * queue lock is held and interrupts disabled, as we muck with the 952 * request queue list. 953 */ 954 static inline void add_request(struct request_queue *q, struct request *req) 955 { 956 drive_stat_acct(req, 1); 957 958 /* 959 * elevator indicated where it wants this request to be 960 * inserted at elevator_merge time 961 */ 962 __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0); 963 } 964 965 /* 966 * disk_round_stats() - Round off the performance stats on a struct 967 * disk_stats. 968 * 969 * The average IO queue length and utilisation statistics are maintained 970 * by observing the current state of the queue length and the amount of 971 * time it has been in this state for. 972 * 973 * Normally, that accounting is done on IO completion, but that can result 974 * in more than a second's worth of IO being accounted for within any one 975 * second, leading to >100% utilisation. To deal with that, we call this 976 * function to do a round-off before returning the results when reading 977 * /proc/diskstats. This accounts immediately for all queue usage up to 978 * the current jiffies and restarts the counters again. 979 */ 980 void disk_round_stats(struct gendisk *disk) 981 { 982 unsigned long now = jiffies; 983 984 if (now == disk->stamp) 985 return; 986 987 if (disk->in_flight) { 988 __disk_stat_add(disk, time_in_queue, 989 disk->in_flight * (now - disk->stamp)); 990 __disk_stat_add(disk, io_ticks, (now - disk->stamp)); 991 } 992 disk->stamp = now; 993 } 994 EXPORT_SYMBOL_GPL(disk_round_stats); 995 996 void part_round_stats(struct hd_struct *part) 997 { 998 unsigned long now = jiffies; 999 1000 if (now == part->stamp) 1001 return; 1002 1003 if (part->in_flight) { 1004 __part_stat_add(part, time_in_queue, 1005 part->in_flight * (now - part->stamp)); 1006 __part_stat_add(part, io_ticks, (now - part->stamp)); 1007 } 1008 part->stamp = now; 1009 } 1010 1011 /* 1012 * queue lock must be held 1013 */ 1014 void __blk_put_request(struct request_queue *q, struct request *req) 1015 { 1016 if (unlikely(!q)) 1017 return; 1018 if (unlikely(--req->ref_count)) 1019 return; 1020 1021 elv_completed_request(q, req); 1022 1023 /* 1024 * Request may not have originated from ll_rw_blk. if not, 1025 * it didn't come out of our reserved rq pools 1026 */ 1027 if (req->cmd_flags & REQ_ALLOCED) { 1028 int rw = rq_data_dir(req); 1029 int priv = req->cmd_flags & REQ_ELVPRIV; 1030 1031 BUG_ON(!list_empty(&req->queuelist)); 1032 BUG_ON(!hlist_unhashed(&req->hash)); 1033 1034 blk_free_request(q, req); 1035 freed_request(q, rw, priv); 1036 } 1037 } 1038 EXPORT_SYMBOL_GPL(__blk_put_request); 1039 1040 void blk_put_request(struct request *req) 1041 { 1042 unsigned long flags; 1043 struct request_queue *q = req->q; 1044 1045 spin_lock_irqsave(q->queue_lock, flags); 1046 __blk_put_request(q, req); 1047 spin_unlock_irqrestore(q->queue_lock, flags); 1048 } 1049 EXPORT_SYMBOL(blk_put_request); 1050 1051 void init_request_from_bio(struct request *req, struct bio *bio) 1052 { 1053 req->cmd_type = REQ_TYPE_FS; 1054 1055 /* 1056 * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST) 1057 */ 1058 if (bio_rw_ahead(bio) || bio_failfast(bio)) 1059 req->cmd_flags |= REQ_FAILFAST; 1060 1061 /* 1062 * REQ_BARRIER implies no merging, but lets make it explicit 1063 */ 1064 if (unlikely(bio_barrier(bio))) 1065 req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE); 1066 1067 if (bio_sync(bio)) 1068 req->cmd_flags |= REQ_RW_SYNC; 1069 if (bio_rw_meta(bio)) 1070 req->cmd_flags |= REQ_RW_META; 1071 1072 req->errors = 0; 1073 req->hard_sector = req->sector = bio->bi_sector; 1074 req->ioprio = bio_prio(bio); 1075 req->start_time = jiffies; 1076 blk_rq_bio_prep(req->q, req, bio); 1077 } 1078 1079 static int __make_request(struct request_queue *q, struct bio *bio) 1080 { 1081 struct request *req; 1082 int el_ret, nr_sectors, barrier, err; 1083 const unsigned short prio = bio_prio(bio); 1084 const int sync = bio_sync(bio); 1085 int rw_flags; 1086 1087 nr_sectors = bio_sectors(bio); 1088 1089 /* 1090 * low level driver can indicate that it wants pages above a 1091 * certain limit bounced to low memory (ie for highmem, or even 1092 * ISA dma in theory) 1093 */ 1094 blk_queue_bounce(q, &bio); 1095 1096 barrier = bio_barrier(bio); 1097 if (unlikely(barrier) && (q->next_ordered == QUEUE_ORDERED_NONE)) { 1098 err = -EOPNOTSUPP; 1099 goto end_io; 1100 } 1101 1102 spin_lock_irq(q->queue_lock); 1103 1104 if (unlikely(barrier) || elv_queue_empty(q)) 1105 goto get_rq; 1106 1107 el_ret = elv_merge(q, &req, bio); 1108 switch (el_ret) { 1109 case ELEVATOR_BACK_MERGE: 1110 BUG_ON(!rq_mergeable(req)); 1111 1112 if (!ll_back_merge_fn(q, req, bio)) 1113 break; 1114 1115 blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); 1116 1117 req->biotail->bi_next = bio; 1118 req->biotail = bio; 1119 req->nr_sectors = req->hard_nr_sectors += nr_sectors; 1120 req->ioprio = ioprio_best(req->ioprio, prio); 1121 drive_stat_acct(req, 0); 1122 if (!attempt_back_merge(q, req)) 1123 elv_merged_request(q, req, el_ret); 1124 goto out; 1125 1126 case ELEVATOR_FRONT_MERGE: 1127 BUG_ON(!rq_mergeable(req)); 1128 1129 if (!ll_front_merge_fn(q, req, bio)) 1130 break; 1131 1132 blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); 1133 1134 bio->bi_next = req->bio; 1135 req->bio = bio; 1136 1137 /* 1138 * may not be valid. if the low level driver said 1139 * it didn't need a bounce buffer then it better 1140 * not touch req->buffer either... 1141 */ 1142 req->buffer = bio_data(bio); 1143 req->current_nr_sectors = bio_cur_sectors(bio); 1144 req->hard_cur_sectors = req->current_nr_sectors; 1145 req->sector = req->hard_sector = bio->bi_sector; 1146 req->nr_sectors = req->hard_nr_sectors += nr_sectors; 1147 req->ioprio = ioprio_best(req->ioprio, prio); 1148 drive_stat_acct(req, 0); 1149 if (!attempt_front_merge(q, req)) 1150 elv_merged_request(q, req, el_ret); 1151 goto out; 1152 1153 /* ELV_NO_MERGE: elevator says don't/can't merge. */ 1154 default: 1155 ; 1156 } 1157 1158 get_rq: 1159 /* 1160 * This sync check and mask will be re-done in init_request_from_bio(), 1161 * but we need to set it earlier to expose the sync flag to the 1162 * rq allocator and io schedulers. 1163 */ 1164 rw_flags = bio_data_dir(bio); 1165 if (sync) 1166 rw_flags |= REQ_RW_SYNC; 1167 1168 /* 1169 * Grab a free request. This is might sleep but can not fail. 1170 * Returns with the queue unlocked. 1171 */ 1172 req = get_request_wait(q, rw_flags, bio); 1173 1174 /* 1175 * After dropping the lock and possibly sleeping here, our request 1176 * may now be mergeable after it had proven unmergeable (above). 1177 * We don't worry about that case for efficiency. It won't happen 1178 * often, and the elevators are able to handle it. 1179 */ 1180 init_request_from_bio(req, bio); 1181 1182 spin_lock_irq(q->queue_lock); 1183 if (elv_queue_empty(q)) 1184 blk_plug_device(q); 1185 add_request(q, req); 1186 out: 1187 if (sync) 1188 __generic_unplug_device(q); 1189 1190 spin_unlock_irq(q->queue_lock); 1191 return 0; 1192 1193 end_io: 1194 bio_endio(bio, err); 1195 return 0; 1196 } 1197 1198 /* 1199 * If bio->bi_dev is a partition, remap the location 1200 */ 1201 static inline void blk_partition_remap(struct bio *bio) 1202 { 1203 struct block_device *bdev = bio->bi_bdev; 1204 1205 if (bio_sectors(bio) && bdev != bdev->bd_contains) { 1206 struct hd_struct *p = bdev->bd_part; 1207 1208 bio->bi_sector += p->start_sect; 1209 bio->bi_bdev = bdev->bd_contains; 1210 1211 blk_add_trace_remap(bdev_get_queue(bio->bi_bdev), bio, 1212 bdev->bd_dev, bio->bi_sector, 1213 bio->bi_sector - p->start_sect); 1214 } 1215 } 1216 1217 static void handle_bad_sector(struct bio *bio) 1218 { 1219 char b[BDEVNAME_SIZE]; 1220 1221 printk(KERN_INFO "attempt to access beyond end of device\n"); 1222 printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n", 1223 bdevname(bio->bi_bdev, b), 1224 bio->bi_rw, 1225 (unsigned long long)bio->bi_sector + bio_sectors(bio), 1226 (long long)(bio->bi_bdev->bd_inode->i_size >> 9)); 1227 1228 set_bit(BIO_EOF, &bio->bi_flags); 1229 } 1230 1231 #ifdef CONFIG_FAIL_MAKE_REQUEST 1232 1233 static DECLARE_FAULT_ATTR(fail_make_request); 1234 1235 static int __init setup_fail_make_request(char *str) 1236 { 1237 return setup_fault_attr(&fail_make_request, str); 1238 } 1239 __setup("fail_make_request=", setup_fail_make_request); 1240 1241 static int should_fail_request(struct bio *bio) 1242 { 1243 if ((bio->bi_bdev->bd_disk->flags & GENHD_FL_FAIL) || 1244 (bio->bi_bdev->bd_part && bio->bi_bdev->bd_part->make_it_fail)) 1245 return should_fail(&fail_make_request, bio->bi_size); 1246 1247 return 0; 1248 } 1249 1250 static int __init fail_make_request_debugfs(void) 1251 { 1252 return init_fault_attr_dentries(&fail_make_request, 1253 "fail_make_request"); 1254 } 1255 1256 late_initcall(fail_make_request_debugfs); 1257 1258 #else /* CONFIG_FAIL_MAKE_REQUEST */ 1259 1260 static inline int should_fail_request(struct bio *bio) 1261 { 1262 return 0; 1263 } 1264 1265 #endif /* CONFIG_FAIL_MAKE_REQUEST */ 1266 1267 /* 1268 * Check whether this bio extends beyond the end of the device. 1269 */ 1270 static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors) 1271 { 1272 sector_t maxsector; 1273 1274 if (!nr_sectors) 1275 return 0; 1276 1277 /* Test device or partition size, when known. */ 1278 maxsector = bio->bi_bdev->bd_inode->i_size >> 9; 1279 if (maxsector) { 1280 sector_t sector = bio->bi_sector; 1281 1282 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) { 1283 /* 1284 * This may well happen - the kernel calls bread() 1285 * without checking the size of the device, e.g., when 1286 * mounting a device. 1287 */ 1288 handle_bad_sector(bio); 1289 return 1; 1290 } 1291 } 1292 1293 return 0; 1294 } 1295 1296 /** 1297 * generic_make_request: hand a buffer to its device driver for I/O 1298 * @bio: The bio describing the location in memory and on the device. 1299 * 1300 * generic_make_request() is used to make I/O requests of block 1301 * devices. It is passed a &struct bio, which describes the I/O that needs 1302 * to be done. 1303 * 1304 * generic_make_request() does not return any status. The 1305 * success/failure status of the request, along with notification of 1306 * completion, is delivered asynchronously through the bio->bi_end_io 1307 * function described (one day) else where. 1308 * 1309 * The caller of generic_make_request must make sure that bi_io_vec 1310 * are set to describe the memory buffer, and that bi_dev and bi_sector are 1311 * set to describe the device address, and the 1312 * bi_end_io and optionally bi_private are set to describe how 1313 * completion notification should be signaled. 1314 * 1315 * generic_make_request and the drivers it calls may use bi_next if this 1316 * bio happens to be merged with someone else, and may change bi_dev and 1317 * bi_sector for remaps as it sees fit. So the values of these fields 1318 * should NOT be depended on after the call to generic_make_request. 1319 */ 1320 static inline void __generic_make_request(struct bio *bio) 1321 { 1322 struct request_queue *q; 1323 sector_t old_sector; 1324 int ret, nr_sectors = bio_sectors(bio); 1325 dev_t old_dev; 1326 int err = -EIO; 1327 1328 might_sleep(); 1329 1330 if (bio_check_eod(bio, nr_sectors)) 1331 goto end_io; 1332 1333 /* 1334 * Resolve the mapping until finished. (drivers are 1335 * still free to implement/resolve their own stacking 1336 * by explicitly returning 0) 1337 * 1338 * NOTE: we don't repeat the blk_size check for each new device. 1339 * Stacking drivers are expected to know what they are doing. 1340 */ 1341 old_sector = -1; 1342 old_dev = 0; 1343 do { 1344 char b[BDEVNAME_SIZE]; 1345 1346 q = bdev_get_queue(bio->bi_bdev); 1347 if (!q) { 1348 printk(KERN_ERR 1349 "generic_make_request: Trying to access " 1350 "nonexistent block-device %s (%Lu)\n", 1351 bdevname(bio->bi_bdev, b), 1352 (long long) bio->bi_sector); 1353 end_io: 1354 bio_endio(bio, err); 1355 break; 1356 } 1357 1358 if (unlikely(nr_sectors > q->max_hw_sectors)) { 1359 printk(KERN_ERR "bio too big device %s (%u > %u)\n", 1360 bdevname(bio->bi_bdev, b), 1361 bio_sectors(bio), 1362 q->max_hw_sectors); 1363 goto end_io; 1364 } 1365 1366 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) 1367 goto end_io; 1368 1369 if (should_fail_request(bio)) 1370 goto end_io; 1371 1372 /* 1373 * If this device has partitions, remap block n 1374 * of partition p to block n+start(p) of the disk. 1375 */ 1376 blk_partition_remap(bio); 1377 1378 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) 1379 goto end_io; 1380 1381 if (old_sector != -1) 1382 blk_add_trace_remap(q, bio, old_dev, bio->bi_sector, 1383 old_sector); 1384 1385 blk_add_trace_bio(q, bio, BLK_TA_QUEUE); 1386 1387 old_sector = bio->bi_sector; 1388 old_dev = bio->bi_bdev->bd_dev; 1389 1390 if (bio_check_eod(bio, nr_sectors)) 1391 goto end_io; 1392 if (bio_empty_barrier(bio) && !q->prepare_flush_fn) { 1393 err = -EOPNOTSUPP; 1394 goto end_io; 1395 } 1396 1397 ret = q->make_request_fn(q, bio); 1398 } while (ret); 1399 } 1400 1401 /* 1402 * We only want one ->make_request_fn to be active at a time, 1403 * else stack usage with stacked devices could be a problem. 1404 * So use current->bio_{list,tail} to keep a list of requests 1405 * submited by a make_request_fn function. 1406 * current->bio_tail is also used as a flag to say if 1407 * generic_make_request is currently active in this task or not. 1408 * If it is NULL, then no make_request is active. If it is non-NULL, 1409 * then a make_request is active, and new requests should be added 1410 * at the tail 1411 */ 1412 void generic_make_request(struct bio *bio) 1413 { 1414 if (current->bio_tail) { 1415 /* make_request is active */ 1416 *(current->bio_tail) = bio; 1417 bio->bi_next = NULL; 1418 current->bio_tail = &bio->bi_next; 1419 return; 1420 } 1421 /* following loop may be a bit non-obvious, and so deserves some 1422 * explanation. 1423 * Before entering the loop, bio->bi_next is NULL (as all callers 1424 * ensure that) so we have a list with a single bio. 1425 * We pretend that we have just taken it off a longer list, so 1426 * we assign bio_list to the next (which is NULL) and bio_tail 1427 * to &bio_list, thus initialising the bio_list of new bios to be 1428 * added. __generic_make_request may indeed add some more bios 1429 * through a recursive call to generic_make_request. If it 1430 * did, we find a non-NULL value in bio_list and re-enter the loop 1431 * from the top. In this case we really did just take the bio 1432 * of the top of the list (no pretending) and so fixup bio_list and 1433 * bio_tail or bi_next, and call into __generic_make_request again. 1434 * 1435 * The loop was structured like this to make only one call to 1436 * __generic_make_request (which is important as it is large and 1437 * inlined) and to keep the structure simple. 1438 */ 1439 BUG_ON(bio->bi_next); 1440 do { 1441 current->bio_list = bio->bi_next; 1442 if (bio->bi_next == NULL) 1443 current->bio_tail = ¤t->bio_list; 1444 else 1445 bio->bi_next = NULL; 1446 __generic_make_request(bio); 1447 bio = current->bio_list; 1448 } while (bio); 1449 current->bio_tail = NULL; /* deactivate */ 1450 } 1451 EXPORT_SYMBOL(generic_make_request); 1452 1453 /** 1454 * submit_bio: submit a bio to the block device layer for I/O 1455 * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead) 1456 * @bio: The &struct bio which describes the I/O 1457 * 1458 * submit_bio() is very similar in purpose to generic_make_request(), and 1459 * uses that function to do most of the work. Both are fairly rough 1460 * interfaces, @bio must be presetup and ready for I/O. 1461 * 1462 */ 1463 void submit_bio(int rw, struct bio *bio) 1464 { 1465 int count = bio_sectors(bio); 1466 1467 bio->bi_rw |= rw; 1468 1469 /* 1470 * If it's a regular read/write or a barrier with data attached, 1471 * go through the normal accounting stuff before submission. 1472 */ 1473 if (!bio_empty_barrier(bio)) { 1474 1475 BIO_BUG_ON(!bio->bi_size); 1476 BIO_BUG_ON(!bio->bi_io_vec); 1477 1478 if (rw & WRITE) { 1479 count_vm_events(PGPGOUT, count); 1480 } else { 1481 task_io_account_read(bio->bi_size); 1482 count_vm_events(PGPGIN, count); 1483 } 1484 1485 if (unlikely(block_dump)) { 1486 char b[BDEVNAME_SIZE]; 1487 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n", 1488 current->comm, task_pid_nr(current), 1489 (rw & WRITE) ? "WRITE" : "READ", 1490 (unsigned long long)bio->bi_sector, 1491 bdevname(bio->bi_bdev, b)); 1492 } 1493 } 1494 1495 generic_make_request(bio); 1496 } 1497 EXPORT_SYMBOL(submit_bio); 1498 1499 /** 1500 * __end_that_request_first - end I/O on a request 1501 * @req: the request being processed 1502 * @error: 0 for success, < 0 for error 1503 * @nr_bytes: number of bytes to complete 1504 * 1505 * Description: 1506 * Ends I/O on a number of bytes attached to @req, and sets it up 1507 * for the next range of segments (if any) in the cluster. 1508 * 1509 * Return: 1510 * 0 - we are done with this request, call end_that_request_last() 1511 * 1 - still buffers pending for this request 1512 **/ 1513 static int __end_that_request_first(struct request *req, int error, 1514 int nr_bytes) 1515 { 1516 int total_bytes, bio_nbytes, next_idx = 0; 1517 struct bio *bio; 1518 1519 blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE); 1520 1521 /* 1522 * for a REQ_BLOCK_PC request, we want to carry any eventual 1523 * sense key with us all the way through 1524 */ 1525 if (!blk_pc_request(req)) 1526 req->errors = 0; 1527 1528 if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) { 1529 printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n", 1530 req->rq_disk ? req->rq_disk->disk_name : "?", 1531 (unsigned long long)req->sector); 1532 } 1533 1534 if (blk_fs_request(req) && req->rq_disk) { 1535 struct hd_struct *part = get_part(req->rq_disk, req->sector); 1536 const int rw = rq_data_dir(req); 1537 1538 all_stat_add(req->rq_disk, part, sectors[rw], 1539 nr_bytes >> 9, req->sector); 1540 } 1541 1542 total_bytes = bio_nbytes = 0; 1543 while ((bio = req->bio) != NULL) { 1544 int nbytes; 1545 1546 /* 1547 * For an empty barrier request, the low level driver must 1548 * store a potential error location in ->sector. We pass 1549 * that back up in ->bi_sector. 1550 */ 1551 if (blk_empty_barrier(req)) 1552 bio->bi_sector = req->sector; 1553 1554 if (nr_bytes >= bio->bi_size) { 1555 req->bio = bio->bi_next; 1556 nbytes = bio->bi_size; 1557 req_bio_endio(req, bio, nbytes, error); 1558 next_idx = 0; 1559 bio_nbytes = 0; 1560 } else { 1561 int idx = bio->bi_idx + next_idx; 1562 1563 if (unlikely(bio->bi_idx >= bio->bi_vcnt)) { 1564 blk_dump_rq_flags(req, "__end_that"); 1565 printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n", 1566 __func__, bio->bi_idx, bio->bi_vcnt); 1567 break; 1568 } 1569 1570 nbytes = bio_iovec_idx(bio, idx)->bv_len; 1571 BIO_BUG_ON(nbytes > bio->bi_size); 1572 1573 /* 1574 * not a complete bvec done 1575 */ 1576 if (unlikely(nbytes > nr_bytes)) { 1577 bio_nbytes += nr_bytes; 1578 total_bytes += nr_bytes; 1579 break; 1580 } 1581 1582 /* 1583 * advance to the next vector 1584 */ 1585 next_idx++; 1586 bio_nbytes += nbytes; 1587 } 1588 1589 total_bytes += nbytes; 1590 nr_bytes -= nbytes; 1591 1592 bio = req->bio; 1593 if (bio) { 1594 /* 1595 * end more in this run, or just return 'not-done' 1596 */ 1597 if (unlikely(nr_bytes <= 0)) 1598 break; 1599 } 1600 } 1601 1602 /* 1603 * completely done 1604 */ 1605 if (!req->bio) 1606 return 0; 1607 1608 /* 1609 * if the request wasn't completed, update state 1610 */ 1611 if (bio_nbytes) { 1612 req_bio_endio(req, bio, bio_nbytes, error); 1613 bio->bi_idx += next_idx; 1614 bio_iovec(bio)->bv_offset += nr_bytes; 1615 bio_iovec(bio)->bv_len -= nr_bytes; 1616 } 1617 1618 blk_recalc_rq_sectors(req, total_bytes >> 9); 1619 blk_recalc_rq_segments(req); 1620 return 1; 1621 } 1622 1623 /* 1624 * splice the completion data to a local structure and hand off to 1625 * process_completion_queue() to complete the requests 1626 */ 1627 static void blk_done_softirq(struct softirq_action *h) 1628 { 1629 struct list_head *cpu_list, local_list; 1630 1631 local_irq_disable(); 1632 cpu_list = &__get_cpu_var(blk_cpu_done); 1633 list_replace_init(cpu_list, &local_list); 1634 local_irq_enable(); 1635 1636 while (!list_empty(&local_list)) { 1637 struct request *rq; 1638 1639 rq = list_entry(local_list.next, struct request, donelist); 1640 list_del_init(&rq->donelist); 1641 rq->q->softirq_done_fn(rq); 1642 } 1643 } 1644 1645 static int __cpuinit blk_cpu_notify(struct notifier_block *self, 1646 unsigned long action, void *hcpu) 1647 { 1648 /* 1649 * If a CPU goes away, splice its entries to the current CPU 1650 * and trigger a run of the softirq 1651 */ 1652 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { 1653 int cpu = (unsigned long) hcpu; 1654 1655 local_irq_disable(); 1656 list_splice_init(&per_cpu(blk_cpu_done, cpu), 1657 &__get_cpu_var(blk_cpu_done)); 1658 raise_softirq_irqoff(BLOCK_SOFTIRQ); 1659 local_irq_enable(); 1660 } 1661 1662 return NOTIFY_OK; 1663 } 1664 1665 1666 static struct notifier_block blk_cpu_notifier __cpuinitdata = { 1667 .notifier_call = blk_cpu_notify, 1668 }; 1669 1670 /** 1671 * blk_complete_request - end I/O on a request 1672 * @req: the request being processed 1673 * 1674 * Description: 1675 * Ends all I/O on a request. It does not handle partial completions, 1676 * unless the driver actually implements this in its completion callback 1677 * through requeueing. The actual completion happens out-of-order, 1678 * through a softirq handler. The user must have registered a completion 1679 * callback through blk_queue_softirq_done(). 1680 **/ 1681 1682 void blk_complete_request(struct request *req) 1683 { 1684 struct list_head *cpu_list; 1685 unsigned long flags; 1686 1687 BUG_ON(!req->q->softirq_done_fn); 1688 1689 local_irq_save(flags); 1690 1691 cpu_list = &__get_cpu_var(blk_cpu_done); 1692 list_add_tail(&req->donelist, cpu_list); 1693 raise_softirq_irqoff(BLOCK_SOFTIRQ); 1694 1695 local_irq_restore(flags); 1696 } 1697 EXPORT_SYMBOL(blk_complete_request); 1698 1699 /* 1700 * queue lock must be held 1701 */ 1702 static void end_that_request_last(struct request *req, int error) 1703 { 1704 struct gendisk *disk = req->rq_disk; 1705 1706 if (blk_rq_tagged(req)) 1707 blk_queue_end_tag(req->q, req); 1708 1709 if (blk_queued_rq(req)) 1710 blkdev_dequeue_request(req); 1711 1712 if (unlikely(laptop_mode) && blk_fs_request(req)) 1713 laptop_io_completion(); 1714 1715 /* 1716 * Account IO completion. bar_rq isn't accounted as a normal 1717 * IO on queueing nor completion. Accounting the containing 1718 * request is enough. 1719 */ 1720 if (disk && blk_fs_request(req) && req != &req->q->bar_rq) { 1721 unsigned long duration = jiffies - req->start_time; 1722 const int rw = rq_data_dir(req); 1723 struct hd_struct *part = get_part(disk, req->sector); 1724 1725 __all_stat_inc(disk, part, ios[rw], req->sector); 1726 __all_stat_add(disk, part, ticks[rw], duration, req->sector); 1727 disk_round_stats(disk); 1728 disk->in_flight--; 1729 if (part) { 1730 part_round_stats(part); 1731 part->in_flight--; 1732 } 1733 } 1734 1735 if (req->end_io) 1736 req->end_io(req, error); 1737 else { 1738 if (blk_bidi_rq(req)) 1739 __blk_put_request(req->next_rq->q, req->next_rq); 1740 1741 __blk_put_request(req->q, req); 1742 } 1743 } 1744 1745 static inline void __end_request(struct request *rq, int uptodate, 1746 unsigned int nr_bytes) 1747 { 1748 int error = 0; 1749 1750 if (uptodate <= 0) 1751 error = uptodate ? uptodate : -EIO; 1752 1753 __blk_end_request(rq, error, nr_bytes); 1754 } 1755 1756 /** 1757 * blk_rq_bytes - Returns bytes left to complete in the entire request 1758 * @rq: the request being processed 1759 **/ 1760 unsigned int blk_rq_bytes(struct request *rq) 1761 { 1762 if (blk_fs_request(rq)) 1763 return rq->hard_nr_sectors << 9; 1764 1765 return rq->data_len; 1766 } 1767 EXPORT_SYMBOL_GPL(blk_rq_bytes); 1768 1769 /** 1770 * blk_rq_cur_bytes - Returns bytes left to complete in the current segment 1771 * @rq: the request being processed 1772 **/ 1773 unsigned int blk_rq_cur_bytes(struct request *rq) 1774 { 1775 if (blk_fs_request(rq)) 1776 return rq->current_nr_sectors << 9; 1777 1778 if (rq->bio) 1779 return rq->bio->bi_size; 1780 1781 return rq->data_len; 1782 } 1783 EXPORT_SYMBOL_GPL(blk_rq_cur_bytes); 1784 1785 /** 1786 * end_queued_request - end all I/O on a queued request 1787 * @rq: the request being processed 1788 * @uptodate: error value or 0/1 uptodate flag 1789 * 1790 * Description: 1791 * Ends all I/O on a request, and removes it from the block layer queues. 1792 * Not suitable for normal IO completion, unless the driver still has 1793 * the request attached to the block layer. 1794 * 1795 **/ 1796 void end_queued_request(struct request *rq, int uptodate) 1797 { 1798 __end_request(rq, uptodate, blk_rq_bytes(rq)); 1799 } 1800 EXPORT_SYMBOL(end_queued_request); 1801 1802 /** 1803 * end_dequeued_request - end all I/O on a dequeued request 1804 * @rq: the request being processed 1805 * @uptodate: error value or 0/1 uptodate flag 1806 * 1807 * Description: 1808 * Ends all I/O on a request. The request must already have been 1809 * dequeued using blkdev_dequeue_request(), as is normally the case 1810 * for most drivers. 1811 * 1812 **/ 1813 void end_dequeued_request(struct request *rq, int uptodate) 1814 { 1815 __end_request(rq, uptodate, blk_rq_bytes(rq)); 1816 } 1817 EXPORT_SYMBOL(end_dequeued_request); 1818 1819 1820 /** 1821 * end_request - end I/O on the current segment of the request 1822 * @req: the request being processed 1823 * @uptodate: error value or 0/1 uptodate flag 1824 * 1825 * Description: 1826 * Ends I/O on the current segment of a request. If that is the only 1827 * remaining segment, the request is also completed and freed. 1828 * 1829 * This is a remnant of how older block drivers handled IO completions. 1830 * Modern drivers typically end IO on the full request in one go, unless 1831 * they have a residual value to account for. For that case this function 1832 * isn't really useful, unless the residual just happens to be the 1833 * full current segment. In other words, don't use this function in new 1834 * code. Either use end_request_completely(), or the 1835 * end_that_request_chunk() (along with end_that_request_last()) for 1836 * partial completions. 1837 * 1838 **/ 1839 void end_request(struct request *req, int uptodate) 1840 { 1841 __end_request(req, uptodate, req->hard_cur_sectors << 9); 1842 } 1843 EXPORT_SYMBOL(end_request); 1844 1845 /** 1846 * blk_end_io - Generic end_io function to complete a request. 1847 * @rq: the request being processed 1848 * @error: 0 for success, < 0 for error 1849 * @nr_bytes: number of bytes to complete @rq 1850 * @bidi_bytes: number of bytes to complete @rq->next_rq 1851 * @drv_callback: function called between completion of bios in the request 1852 * and completion of the request. 1853 * If the callback returns non 0, this helper returns without 1854 * completion of the request. 1855 * 1856 * Description: 1857 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. 1858 * If @rq has leftover, sets it up for the next range of segments. 1859 * 1860 * Return: 1861 * 0 - we are done with this request 1862 * 1 - this request is not freed yet, it still has pending buffers. 1863 **/ 1864 static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes, 1865 unsigned int bidi_bytes, 1866 int (drv_callback)(struct request *)) 1867 { 1868 struct request_queue *q = rq->q; 1869 unsigned long flags = 0UL; 1870 1871 if (blk_fs_request(rq) || blk_pc_request(rq)) { 1872 if (__end_that_request_first(rq, error, nr_bytes)) 1873 return 1; 1874 1875 /* Bidi request must be completed as a whole */ 1876 if (blk_bidi_rq(rq) && 1877 __end_that_request_first(rq->next_rq, error, bidi_bytes)) 1878 return 1; 1879 } 1880 1881 /* Special feature for tricky drivers */ 1882 if (drv_callback && drv_callback(rq)) 1883 return 1; 1884 1885 add_disk_randomness(rq->rq_disk); 1886 1887 spin_lock_irqsave(q->queue_lock, flags); 1888 end_that_request_last(rq, error); 1889 spin_unlock_irqrestore(q->queue_lock, flags); 1890 1891 return 0; 1892 } 1893 1894 /** 1895 * blk_end_request - Helper function for drivers to complete the request. 1896 * @rq: the request being processed 1897 * @error: 0 for success, < 0 for error 1898 * @nr_bytes: number of bytes to complete 1899 * 1900 * Description: 1901 * Ends I/O on a number of bytes attached to @rq. 1902 * If @rq has leftover, sets it up for the next range of segments. 1903 * 1904 * Return: 1905 * 0 - we are done with this request 1906 * 1 - still buffers pending for this request 1907 **/ 1908 int blk_end_request(struct request *rq, int error, unsigned int nr_bytes) 1909 { 1910 return blk_end_io(rq, error, nr_bytes, 0, NULL); 1911 } 1912 EXPORT_SYMBOL_GPL(blk_end_request); 1913 1914 /** 1915 * __blk_end_request - Helper function for drivers to complete the request. 1916 * @rq: the request being processed 1917 * @error: 0 for success, < 0 for error 1918 * @nr_bytes: number of bytes to complete 1919 * 1920 * Description: 1921 * Must be called with queue lock held unlike blk_end_request(). 1922 * 1923 * Return: 1924 * 0 - we are done with this request 1925 * 1 - still buffers pending for this request 1926 **/ 1927 int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) 1928 { 1929 if (blk_fs_request(rq) || blk_pc_request(rq)) { 1930 if (__end_that_request_first(rq, error, nr_bytes)) 1931 return 1; 1932 } 1933 1934 add_disk_randomness(rq->rq_disk); 1935 1936 end_that_request_last(rq, error); 1937 1938 return 0; 1939 } 1940 EXPORT_SYMBOL_GPL(__blk_end_request); 1941 1942 /** 1943 * blk_end_bidi_request - Helper function for drivers to complete bidi request. 1944 * @rq: the bidi request being processed 1945 * @error: 0 for success, < 0 for error 1946 * @nr_bytes: number of bytes to complete @rq 1947 * @bidi_bytes: number of bytes to complete @rq->next_rq 1948 * 1949 * Description: 1950 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. 1951 * 1952 * Return: 1953 * 0 - we are done with this request 1954 * 1 - still buffers pending for this request 1955 **/ 1956 int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes, 1957 unsigned int bidi_bytes) 1958 { 1959 return blk_end_io(rq, error, nr_bytes, bidi_bytes, NULL); 1960 } 1961 EXPORT_SYMBOL_GPL(blk_end_bidi_request); 1962 1963 /** 1964 * blk_end_request_callback - Special helper function for tricky drivers 1965 * @rq: the request being processed 1966 * @error: 0 for success, < 0 for error 1967 * @nr_bytes: number of bytes to complete 1968 * @drv_callback: function called between completion of bios in the request 1969 * and completion of the request. 1970 * If the callback returns non 0, this helper returns without 1971 * completion of the request. 1972 * 1973 * Description: 1974 * Ends I/O on a number of bytes attached to @rq. 1975 * If @rq has leftover, sets it up for the next range of segments. 1976 * 1977 * This special helper function is used only for existing tricky drivers. 1978 * (e.g. cdrom_newpc_intr() of ide-cd) 1979 * This interface will be removed when such drivers are rewritten. 1980 * Don't use this interface in other places anymore. 1981 * 1982 * Return: 1983 * 0 - we are done with this request 1984 * 1 - this request is not freed yet. 1985 * this request still has pending buffers or 1986 * the driver doesn't want to finish this request yet. 1987 **/ 1988 int blk_end_request_callback(struct request *rq, int error, 1989 unsigned int nr_bytes, 1990 int (drv_callback)(struct request *)) 1991 { 1992 return blk_end_io(rq, error, nr_bytes, 0, drv_callback); 1993 } 1994 EXPORT_SYMBOL_GPL(blk_end_request_callback); 1995 1996 void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 1997 struct bio *bio) 1998 { 1999 /* first two bits are identical in rq->cmd_flags and bio->bi_rw */ 2000 rq->cmd_flags |= (bio->bi_rw & 3); 2001 2002 rq->nr_phys_segments = bio_phys_segments(q, bio); 2003 rq->nr_hw_segments = bio_hw_segments(q, bio); 2004 rq->current_nr_sectors = bio_cur_sectors(bio); 2005 rq->hard_cur_sectors = rq->current_nr_sectors; 2006 rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio); 2007 rq->buffer = bio_data(bio); 2008 rq->data_len = bio->bi_size; 2009 2010 rq->bio = rq->biotail = bio; 2011 2012 if (bio->bi_bdev) 2013 rq->rq_disk = bio->bi_bdev->bd_disk; 2014 } 2015 2016 int kblockd_schedule_work(struct work_struct *work) 2017 { 2018 return queue_work(kblockd_workqueue, work); 2019 } 2020 EXPORT_SYMBOL(kblockd_schedule_work); 2021 2022 void kblockd_flush_work(struct work_struct *work) 2023 { 2024 cancel_work_sync(work); 2025 } 2026 EXPORT_SYMBOL(kblockd_flush_work); 2027 2028 int __init blk_dev_init(void) 2029 { 2030 int i; 2031 2032 kblockd_workqueue = create_workqueue("kblockd"); 2033 if (!kblockd_workqueue) 2034 panic("Failed to create kblockd\n"); 2035 2036 request_cachep = kmem_cache_create("blkdev_requests", 2037 sizeof(struct request), 0, SLAB_PANIC, NULL); 2038 2039 blk_requestq_cachep = kmem_cache_create("blkdev_queue", 2040 sizeof(struct request_queue), 0, SLAB_PANIC, NULL); 2041 2042 for_each_possible_cpu(i) 2043 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); 2044 2045 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq); 2046 register_hotcpu_notifier(&blk_cpu_notifier); 2047 2048 return 0; 2049 } 2050 2051