1 /* 2 * Copyright (C) 1991, 1992 Linus Torvalds 3 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics 4 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> 6 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> 7 * - July2000 8 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 9 */ 10 11 /* 12 * This handles all read/write requests to block devices 13 */ 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/backing-dev.h> 17 #include <linux/bio.h> 18 #include <linux/blkdev.h> 19 #include <linux/highmem.h> 20 #include <linux/mm.h> 21 #include <linux/kernel_stat.h> 22 #include <linux/string.h> 23 #include <linux/init.h> 24 #include <linux/completion.h> 25 #include <linux/slab.h> 26 #include <linux/swap.h> 27 #include <linux/writeback.h> 28 #include <linux/task_io_accounting_ops.h> 29 #include <linux/fault-inject.h> 30 #include <linux/list_sort.h> 31 #include <linux/delay.h> 32 33 #define CREATE_TRACE_POINTS 34 #include <trace/events/block.h> 35 36 #include "blk.h" 37 38 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); 39 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); 40 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); 41 42 DEFINE_IDA(blk_queue_ida); 43 44 /* 45 * For the allocated request tables 46 */ 47 static struct kmem_cache *request_cachep; 48 49 /* 50 * For queue allocation 51 */ 52 struct kmem_cache *blk_requestq_cachep; 53 54 /* 55 * Controlling structure to kblockd 56 */ 57 static struct workqueue_struct *kblockd_workqueue; 58 59 static void drive_stat_acct(struct request *rq, int new_io) 60 { 61 struct hd_struct *part; 62 int rw = rq_data_dir(rq); 63 int cpu; 64 65 if (!blk_do_io_stat(rq)) 66 return; 67 68 cpu = part_stat_lock(); 69 70 if (!new_io) { 71 part = rq->part; 72 part_stat_inc(cpu, part, merges[rw]); 73 } else { 74 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); 75 if (!hd_struct_try_get(part)) { 76 /* 77 * The partition is already being removed, 78 * the request will be accounted on the disk only 79 * 80 * We take a reference on disk->part0 although that 81 * partition will never be deleted, so we can treat 82 * it as any other partition. 83 */ 84 part = &rq->rq_disk->part0; 85 hd_struct_get(part); 86 } 87 part_round_stats(cpu, part); 88 part_inc_in_flight(part, rw); 89 rq->part = part; 90 } 91 92 part_stat_unlock(); 93 } 94 95 void blk_queue_congestion_threshold(struct request_queue *q) 96 { 97 int nr; 98 99 nr = q->nr_requests - (q->nr_requests / 8) + 1; 100 if (nr > q->nr_requests) 101 nr = q->nr_requests; 102 q->nr_congestion_on = nr; 103 104 nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1; 105 if (nr < 1) 106 nr = 1; 107 q->nr_congestion_off = nr; 108 } 109 110 /** 111 * blk_get_backing_dev_info - get the address of a queue's backing_dev_info 112 * @bdev: device 113 * 114 * Locates the passed device's request queue and returns the address of its 115 * backing_dev_info 116 * 117 * Will return NULL if the request queue cannot be located. 118 */ 119 struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) 120 { 121 struct backing_dev_info *ret = NULL; 122 struct request_queue *q = bdev_get_queue(bdev); 123 124 if (q) 125 ret = &q->backing_dev_info; 126 return ret; 127 } 128 EXPORT_SYMBOL(blk_get_backing_dev_info); 129 130 void blk_rq_init(struct request_queue *q, struct request *rq) 131 { 132 memset(rq, 0, sizeof(*rq)); 133 134 INIT_LIST_HEAD(&rq->queuelist); 135 INIT_LIST_HEAD(&rq->timeout_list); 136 rq->cpu = -1; 137 rq->q = q; 138 rq->__sector = (sector_t) -1; 139 INIT_HLIST_NODE(&rq->hash); 140 RB_CLEAR_NODE(&rq->rb_node); 141 rq->cmd = rq->__cmd; 142 rq->cmd_len = BLK_MAX_CDB; 143 rq->tag = -1; 144 rq->ref_count = 1; 145 rq->start_time = jiffies; 146 set_start_time_ns(rq); 147 rq->part = NULL; 148 } 149 EXPORT_SYMBOL(blk_rq_init); 150 151 static void req_bio_endio(struct request *rq, struct bio *bio, 152 unsigned int nbytes, int error) 153 { 154 if (error) 155 clear_bit(BIO_UPTODATE, &bio->bi_flags); 156 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) 157 error = -EIO; 158 159 if (unlikely(nbytes > bio->bi_size)) { 160 printk(KERN_ERR "%s: want %u bytes done, %u left\n", 161 __func__, nbytes, bio->bi_size); 162 nbytes = bio->bi_size; 163 } 164 165 if (unlikely(rq->cmd_flags & REQ_QUIET)) 166 set_bit(BIO_QUIET, &bio->bi_flags); 167 168 bio->bi_size -= nbytes; 169 bio->bi_sector += (nbytes >> 9); 170 171 if (bio_integrity(bio)) 172 bio_integrity_advance(bio, nbytes); 173 174 /* don't actually finish bio if it's part of flush sequence */ 175 if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) 176 bio_endio(bio, error); 177 } 178 179 void blk_dump_rq_flags(struct request *rq, char *msg) 180 { 181 int bit; 182 183 printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg, 184 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, 185 rq->cmd_flags); 186 187 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", 188 (unsigned long long)blk_rq_pos(rq), 189 blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); 190 printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n", 191 rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq)); 192 193 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 194 printk(KERN_INFO " cdb: "); 195 for (bit = 0; bit < BLK_MAX_CDB; bit++) 196 printk("%02x ", rq->cmd[bit]); 197 printk("\n"); 198 } 199 } 200 EXPORT_SYMBOL(blk_dump_rq_flags); 201 202 static void blk_delay_work(struct work_struct *work) 203 { 204 struct request_queue *q; 205 206 q = container_of(work, struct request_queue, delay_work.work); 207 spin_lock_irq(q->queue_lock); 208 __blk_run_queue(q); 209 spin_unlock_irq(q->queue_lock); 210 } 211 212 /** 213 * blk_delay_queue - restart queueing after defined interval 214 * @q: The &struct request_queue in question 215 * @msecs: Delay in msecs 216 * 217 * Description: 218 * Sometimes queueing needs to be postponed for a little while, to allow 219 * resources to come back. This function will make sure that queueing is 220 * restarted around the specified time. 221 */ 222 void blk_delay_queue(struct request_queue *q, unsigned long msecs) 223 { 224 queue_delayed_work(kblockd_workqueue, &q->delay_work, 225 msecs_to_jiffies(msecs)); 226 } 227 EXPORT_SYMBOL(blk_delay_queue); 228 229 /** 230 * blk_start_queue - restart a previously stopped queue 231 * @q: The &struct request_queue in question 232 * 233 * Description: 234 * blk_start_queue() will clear the stop flag on the queue, and call 235 * the request_fn for the queue if it was in a stopped state when 236 * entered. Also see blk_stop_queue(). Queue lock must be held. 237 **/ 238 void blk_start_queue(struct request_queue *q) 239 { 240 WARN_ON(!irqs_disabled()); 241 242 queue_flag_clear(QUEUE_FLAG_STOPPED, q); 243 __blk_run_queue(q); 244 } 245 EXPORT_SYMBOL(blk_start_queue); 246 247 /** 248 * blk_stop_queue - stop a queue 249 * @q: The &struct request_queue in question 250 * 251 * Description: 252 * The Linux block layer assumes that a block driver will consume all 253 * entries on the request queue when the request_fn strategy is called. 254 * Often this will not happen, because of hardware limitations (queue 255 * depth settings). If a device driver gets a 'queue full' response, 256 * or if it simply chooses not to queue more I/O at one point, it can 257 * call this function to prevent the request_fn from being called until 258 * the driver has signalled it's ready to go again. This happens by calling 259 * blk_start_queue() to restart queue operations. Queue lock must be held. 260 **/ 261 void blk_stop_queue(struct request_queue *q) 262 { 263 __cancel_delayed_work(&q->delay_work); 264 queue_flag_set(QUEUE_FLAG_STOPPED, q); 265 } 266 EXPORT_SYMBOL(blk_stop_queue); 267 268 /** 269 * blk_sync_queue - cancel any pending callbacks on a queue 270 * @q: the queue 271 * 272 * Description: 273 * The block layer may perform asynchronous callback activity 274 * on a queue, such as calling the unplug function after a timeout. 275 * A block device may call blk_sync_queue to ensure that any 276 * such activity is cancelled, thus allowing it to release resources 277 * that the callbacks might use. The caller must already have made sure 278 * that its ->make_request_fn will not re-add plugging prior to calling 279 * this function. 280 * 281 * This function does not cancel any asynchronous activity arising 282 * out of elevator or throttling code. That would require elevaotor_exit() 283 * and blk_throtl_exit() to be called with queue lock initialized. 284 * 285 */ 286 void blk_sync_queue(struct request_queue *q) 287 { 288 del_timer_sync(&q->timeout); 289 cancel_delayed_work_sync(&q->delay_work); 290 } 291 EXPORT_SYMBOL(blk_sync_queue); 292 293 /** 294 * __blk_run_queue - run a single device queue 295 * @q: The queue to run 296 * 297 * Description: 298 * See @blk_run_queue. This variant must be called with the queue lock 299 * held and interrupts disabled. 300 */ 301 void __blk_run_queue(struct request_queue *q) 302 { 303 if (unlikely(blk_queue_stopped(q))) 304 return; 305 306 q->request_fn(q); 307 } 308 EXPORT_SYMBOL(__blk_run_queue); 309 310 /** 311 * blk_run_queue_async - run a single device queue in workqueue context 312 * @q: The queue to run 313 * 314 * Description: 315 * Tells kblockd to perform the equivalent of @blk_run_queue on behalf 316 * of us. 317 */ 318 void blk_run_queue_async(struct request_queue *q) 319 { 320 if (likely(!blk_queue_stopped(q))) { 321 __cancel_delayed_work(&q->delay_work); 322 queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); 323 } 324 } 325 EXPORT_SYMBOL(blk_run_queue_async); 326 327 /** 328 * blk_run_queue - run a single device queue 329 * @q: The queue to run 330 * 331 * Description: 332 * Invoke request handling on this queue, if it has pending work to do. 333 * May be used to restart queueing when a request has completed. 334 */ 335 void blk_run_queue(struct request_queue *q) 336 { 337 unsigned long flags; 338 339 spin_lock_irqsave(q->queue_lock, flags); 340 __blk_run_queue(q); 341 spin_unlock_irqrestore(q->queue_lock, flags); 342 } 343 EXPORT_SYMBOL(blk_run_queue); 344 345 void blk_put_queue(struct request_queue *q) 346 { 347 kobject_put(&q->kobj); 348 } 349 EXPORT_SYMBOL(blk_put_queue); 350 351 /** 352 * blk_drain_queue - drain requests from request_queue 353 * @q: queue to drain 354 * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV 355 * 356 * Drain requests from @q. If @drain_all is set, all requests are drained. 357 * If not, only ELVPRIV requests are drained. The caller is responsible 358 * for ensuring that no new requests which need to be drained are queued. 359 */ 360 void blk_drain_queue(struct request_queue *q, bool drain_all) 361 { 362 while (true) { 363 bool drain = false; 364 int i; 365 366 spin_lock_irq(q->queue_lock); 367 368 elv_drain_elevator(q); 369 if (drain_all) 370 blk_throtl_drain(q); 371 372 /* 373 * This function might be called on a queue which failed 374 * driver init after queue creation. Some drivers 375 * (e.g. fd) get unhappy in such cases. Kick queue iff 376 * dispatch queue has something on it. 377 */ 378 if (!list_empty(&q->queue_head)) 379 __blk_run_queue(q); 380 381 drain |= q->rq.elvpriv; 382 383 /* 384 * Unfortunately, requests are queued at and tracked from 385 * multiple places and there's no single counter which can 386 * be drained. Check all the queues and counters. 387 */ 388 if (drain_all) { 389 drain |= !list_empty(&q->queue_head); 390 for (i = 0; i < 2; i++) { 391 drain |= q->rq.count[i]; 392 drain |= q->in_flight[i]; 393 drain |= !list_empty(&q->flush_queue[i]); 394 } 395 } 396 397 spin_unlock_irq(q->queue_lock); 398 399 if (!drain) 400 break; 401 msleep(10); 402 } 403 } 404 405 /** 406 * blk_cleanup_queue - shutdown a request queue 407 * @q: request queue to shutdown 408 * 409 * Mark @q DEAD, drain all pending requests, destroy and put it. All 410 * future requests will be failed immediately with -ENODEV. 411 */ 412 void blk_cleanup_queue(struct request_queue *q) 413 { 414 spinlock_t *lock = q->queue_lock; 415 416 /* mark @q DEAD, no new request or merges will be allowed afterwards */ 417 mutex_lock(&q->sysfs_lock); 418 queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); 419 420 spin_lock_irq(lock); 421 queue_flag_set(QUEUE_FLAG_NOMERGES, q); 422 queue_flag_set(QUEUE_FLAG_NOXMERGES, q); 423 queue_flag_set(QUEUE_FLAG_DEAD, q); 424 425 if (q->queue_lock != &q->__queue_lock) 426 q->queue_lock = &q->__queue_lock; 427 428 spin_unlock_irq(lock); 429 mutex_unlock(&q->sysfs_lock); 430 431 /* 432 * Drain all requests queued before DEAD marking. The caller might 433 * be trying to tear down @q before its elevator is initialized, in 434 * which case we don't want to call into draining. 435 */ 436 if (q->elevator) 437 blk_drain_queue(q, true); 438 439 /* @q won't process any more request, flush async actions */ 440 del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); 441 blk_sync_queue(q); 442 443 /* @q is and will stay empty, shutdown and put */ 444 blk_put_queue(q); 445 } 446 EXPORT_SYMBOL(blk_cleanup_queue); 447 448 static int blk_init_free_list(struct request_queue *q) 449 { 450 struct request_list *rl = &q->rq; 451 452 if (unlikely(rl->rq_pool)) 453 return 0; 454 455 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; 456 rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; 457 rl->elvpriv = 0; 458 init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); 459 init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); 460 461 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, 462 mempool_free_slab, request_cachep, q->node); 463 464 if (!rl->rq_pool) 465 return -ENOMEM; 466 467 return 0; 468 } 469 470 struct request_queue *blk_alloc_queue(gfp_t gfp_mask) 471 { 472 return blk_alloc_queue_node(gfp_mask, -1); 473 } 474 EXPORT_SYMBOL(blk_alloc_queue); 475 476 struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) 477 { 478 struct request_queue *q; 479 int err; 480 481 q = kmem_cache_alloc_node(blk_requestq_cachep, 482 gfp_mask | __GFP_ZERO, node_id); 483 if (!q) 484 return NULL; 485 486 q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL); 487 if (q->id < 0) 488 goto fail_q; 489 490 q->backing_dev_info.ra_pages = 491 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 492 q->backing_dev_info.state = 0; 493 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; 494 q->backing_dev_info.name = "block"; 495 q->node = node_id; 496 497 err = bdi_init(&q->backing_dev_info); 498 if (err) 499 goto fail_id; 500 501 if (blk_throtl_init(q)) 502 goto fail_id; 503 504 setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, 505 laptop_mode_timer_fn, (unsigned long) q); 506 setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); 507 INIT_LIST_HEAD(&q->timeout_list); 508 INIT_LIST_HEAD(&q->icq_list); 509 INIT_LIST_HEAD(&q->flush_queue[0]); 510 INIT_LIST_HEAD(&q->flush_queue[1]); 511 INIT_LIST_HEAD(&q->flush_data_in_flight); 512 INIT_DELAYED_WORK(&q->delay_work, blk_delay_work); 513 514 kobject_init(&q->kobj, &blk_queue_ktype); 515 516 mutex_init(&q->sysfs_lock); 517 spin_lock_init(&q->__queue_lock); 518 519 /* 520 * By default initialize queue_lock to internal lock and driver can 521 * override it later if need be. 522 */ 523 q->queue_lock = &q->__queue_lock; 524 525 return q; 526 527 fail_id: 528 ida_simple_remove(&blk_queue_ida, q->id); 529 fail_q: 530 kmem_cache_free(blk_requestq_cachep, q); 531 return NULL; 532 } 533 EXPORT_SYMBOL(blk_alloc_queue_node); 534 535 /** 536 * blk_init_queue - prepare a request queue for use with a block device 537 * @rfn: The function to be called to process requests that have been 538 * placed on the queue. 539 * @lock: Request queue spin lock 540 * 541 * Description: 542 * If a block device wishes to use the standard request handling procedures, 543 * which sorts requests and coalesces adjacent requests, then it must 544 * call blk_init_queue(). The function @rfn will be called when there 545 * are requests on the queue that need to be processed. If the device 546 * supports plugging, then @rfn may not be called immediately when requests 547 * are available on the queue, but may be called at some time later instead. 548 * Plugged queues are generally unplugged when a buffer belonging to one 549 * of the requests on the queue is needed, or due to memory pressure. 550 * 551 * @rfn is not required, or even expected, to remove all requests off the 552 * queue, but only as many as it can handle at a time. If it does leave 553 * requests on the queue, it is responsible for arranging that the requests 554 * get dealt with eventually. 555 * 556 * The queue spin lock must be held while manipulating the requests on the 557 * request queue; this lock will be taken also from interrupt context, so irq 558 * disabling is needed for it. 559 * 560 * Function returns a pointer to the initialized request queue, or %NULL if 561 * it didn't succeed. 562 * 563 * Note: 564 * blk_init_queue() must be paired with a blk_cleanup_queue() call 565 * when the block device is deactivated (such as at module unload). 566 **/ 567 568 struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) 569 { 570 return blk_init_queue_node(rfn, lock, -1); 571 } 572 EXPORT_SYMBOL(blk_init_queue); 573 574 struct request_queue * 575 blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) 576 { 577 struct request_queue *uninit_q, *q; 578 579 uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id); 580 if (!uninit_q) 581 return NULL; 582 583 q = blk_init_allocated_queue(uninit_q, rfn, lock); 584 if (!q) 585 blk_cleanup_queue(uninit_q); 586 587 return q; 588 } 589 EXPORT_SYMBOL(blk_init_queue_node); 590 591 struct request_queue * 592 blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, 593 spinlock_t *lock) 594 { 595 if (!q) 596 return NULL; 597 598 if (blk_init_free_list(q)) 599 return NULL; 600 601 q->request_fn = rfn; 602 q->prep_rq_fn = NULL; 603 q->unprep_rq_fn = NULL; 604 q->queue_flags = QUEUE_FLAG_DEFAULT; 605 606 /* Override internal queue lock with supplied lock pointer */ 607 if (lock) 608 q->queue_lock = lock; 609 610 /* 611 * This also sets hw/phys segments, boundary and size 612 */ 613 blk_queue_make_request(q, blk_queue_bio); 614 615 q->sg_reserved_size = INT_MAX; 616 617 /* 618 * all done 619 */ 620 if (!elevator_init(q, NULL)) { 621 blk_queue_congestion_threshold(q); 622 return q; 623 } 624 625 return NULL; 626 } 627 EXPORT_SYMBOL(blk_init_allocated_queue); 628 629 bool blk_get_queue(struct request_queue *q) 630 { 631 if (likely(!blk_queue_dead(q))) { 632 __blk_get_queue(q); 633 return true; 634 } 635 636 return false; 637 } 638 EXPORT_SYMBOL(blk_get_queue); 639 640 static inline void blk_free_request(struct request_queue *q, struct request *rq) 641 { 642 if (rq->cmd_flags & REQ_ELVPRIV) { 643 elv_put_request(q, rq); 644 if (rq->elv.icq) 645 put_io_context(rq->elv.icq->ioc); 646 } 647 648 mempool_free(rq, q->rq.rq_pool); 649 } 650 651 static struct request * 652 blk_alloc_request(struct request_queue *q, struct io_cq *icq, 653 unsigned int flags, gfp_t gfp_mask) 654 { 655 struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); 656 657 if (!rq) 658 return NULL; 659 660 blk_rq_init(q, rq); 661 662 rq->cmd_flags = flags | REQ_ALLOCED; 663 664 if (flags & REQ_ELVPRIV) { 665 rq->elv.icq = icq; 666 if (unlikely(elv_set_request(q, rq, gfp_mask))) { 667 mempool_free(rq, q->rq.rq_pool); 668 return NULL; 669 } 670 /* @rq->elv.icq holds on to io_context until @rq is freed */ 671 if (icq) 672 get_io_context(icq->ioc); 673 } 674 675 return rq; 676 } 677 678 /* 679 * ioc_batching returns true if the ioc is a valid batching request and 680 * should be given priority access to a request. 681 */ 682 static inline int ioc_batching(struct request_queue *q, struct io_context *ioc) 683 { 684 if (!ioc) 685 return 0; 686 687 /* 688 * Make sure the process is able to allocate at least 1 request 689 * even if the batch times out, otherwise we could theoretically 690 * lose wakeups. 691 */ 692 return ioc->nr_batch_requests == q->nr_batching || 693 (ioc->nr_batch_requests > 0 694 && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME)); 695 } 696 697 /* 698 * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This 699 * will cause the process to be a "batcher" on all queues in the system. This 700 * is the behaviour we want though - once it gets a wakeup it should be given 701 * a nice run. 702 */ 703 static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) 704 { 705 if (!ioc || ioc_batching(q, ioc)) 706 return; 707 708 ioc->nr_batch_requests = q->nr_batching; 709 ioc->last_waited = jiffies; 710 } 711 712 static void __freed_request(struct request_queue *q, int sync) 713 { 714 struct request_list *rl = &q->rq; 715 716 if (rl->count[sync] < queue_congestion_off_threshold(q)) 717 blk_clear_queue_congested(q, sync); 718 719 if (rl->count[sync] + 1 <= q->nr_requests) { 720 if (waitqueue_active(&rl->wait[sync])) 721 wake_up(&rl->wait[sync]); 722 723 blk_clear_queue_full(q, sync); 724 } 725 } 726 727 /* 728 * A request has just been released. Account for it, update the full and 729 * congestion status, wake up any waiters. Called under q->queue_lock. 730 */ 731 static void freed_request(struct request_queue *q, unsigned int flags) 732 { 733 struct request_list *rl = &q->rq; 734 int sync = rw_is_sync(flags); 735 736 rl->count[sync]--; 737 if (flags & REQ_ELVPRIV) 738 rl->elvpriv--; 739 740 __freed_request(q, sync); 741 742 if (unlikely(rl->starved[sync ^ 1])) 743 __freed_request(q, sync ^ 1); 744 } 745 746 /* 747 * Determine if elevator data should be initialized when allocating the 748 * request associated with @bio. 749 */ 750 static bool blk_rq_should_init_elevator(struct bio *bio) 751 { 752 if (!bio) 753 return true; 754 755 /* 756 * Flush requests do not use the elevator so skip initialization. 757 * This allows a request to share the flush and elevator data. 758 */ 759 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) 760 return false; 761 762 return true; 763 } 764 765 /** 766 * get_request - get a free request 767 * @q: request_queue to allocate request from 768 * @rw_flags: RW and SYNC flags 769 * @bio: bio to allocate request for (can be %NULL) 770 * @gfp_mask: allocation mask 771 * 772 * Get a free request from @q. This function may fail under memory 773 * pressure or if @q is dead. 774 * 775 * Must be callled with @q->queue_lock held and, 776 * Returns %NULL on failure, with @q->queue_lock held. 777 * Returns !%NULL on success, with @q->queue_lock *not held*. 778 */ 779 static struct request *get_request(struct request_queue *q, int rw_flags, 780 struct bio *bio, gfp_t gfp_mask) 781 { 782 struct request *rq = NULL; 783 struct request_list *rl = &q->rq; 784 struct elevator_type *et; 785 struct io_context *ioc; 786 struct io_cq *icq = NULL; 787 const bool is_sync = rw_is_sync(rw_flags) != 0; 788 bool retried = false; 789 int may_queue; 790 retry: 791 et = q->elevator->type; 792 ioc = current->io_context; 793 794 if (unlikely(blk_queue_dead(q))) 795 return NULL; 796 797 may_queue = elv_may_queue(q, rw_flags); 798 if (may_queue == ELV_MQUEUE_NO) 799 goto rq_starved; 800 801 if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { 802 if (rl->count[is_sync]+1 >= q->nr_requests) { 803 /* 804 * We want ioc to record batching state. If it's 805 * not already there, creating a new one requires 806 * dropping queue_lock, which in turn requires 807 * retesting conditions to avoid queue hang. 808 */ 809 if (!ioc && !retried) { 810 spin_unlock_irq(q->queue_lock); 811 create_io_context(current, gfp_mask, q->node); 812 spin_lock_irq(q->queue_lock); 813 retried = true; 814 goto retry; 815 } 816 817 /* 818 * The queue will fill after this allocation, so set 819 * it as full, and mark this process as "batching". 820 * This process will be allowed to complete a batch of 821 * requests, others will be blocked. 822 */ 823 if (!blk_queue_full(q, is_sync)) { 824 ioc_set_batching(q, ioc); 825 blk_set_queue_full(q, is_sync); 826 } else { 827 if (may_queue != ELV_MQUEUE_MUST 828 && !ioc_batching(q, ioc)) { 829 /* 830 * The queue is full and the allocating 831 * process is not a "batcher", and not 832 * exempted by the IO scheduler 833 */ 834 goto out; 835 } 836 } 837 } 838 blk_set_queue_congested(q, is_sync); 839 } 840 841 /* 842 * Only allow batching queuers to allocate up to 50% over the defined 843 * limit of requests, otherwise we could have thousands of requests 844 * allocated with any setting of ->nr_requests 845 */ 846 if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) 847 goto out; 848 849 rl->count[is_sync]++; 850 rl->starved[is_sync] = 0; 851 852 /* 853 * Decide whether the new request will be managed by elevator. If 854 * so, mark @rw_flags and increment elvpriv. Non-zero elvpriv will 855 * prevent the current elevator from being destroyed until the new 856 * request is freed. This guarantees icq's won't be destroyed and 857 * makes creating new ones safe. 858 * 859 * Also, lookup icq while holding queue_lock. If it doesn't exist, 860 * it will be created after releasing queue_lock. 861 */ 862 if (blk_rq_should_init_elevator(bio) && 863 !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags)) { 864 rw_flags |= REQ_ELVPRIV; 865 rl->elvpriv++; 866 if (et->icq_cache && ioc) 867 icq = ioc_lookup_icq(ioc, q); 868 } 869 870 if (blk_queue_io_stat(q)) 871 rw_flags |= REQ_IO_STAT; 872 spin_unlock_irq(q->queue_lock); 873 874 /* create icq if missing */ 875 if ((rw_flags & REQ_ELVPRIV) && unlikely(et->icq_cache && !icq)) { 876 icq = ioc_create_icq(q, gfp_mask); 877 if (!icq) 878 goto fail_icq; 879 } 880 881 rq = blk_alloc_request(q, icq, rw_flags, gfp_mask); 882 883 fail_icq: 884 if (unlikely(!rq)) { 885 /* 886 * Allocation failed presumably due to memory. Undo anything 887 * we might have messed up. 888 * 889 * Allocating task should really be put onto the front of the 890 * wait queue, but this is pretty rare. 891 */ 892 spin_lock_irq(q->queue_lock); 893 freed_request(q, rw_flags); 894 895 /* 896 * in the very unlikely event that allocation failed and no 897 * requests for this direction was pending, mark us starved 898 * so that freeing of a request in the other direction will 899 * notice us. another possible fix would be to split the 900 * rq mempool into READ and WRITE 901 */ 902 rq_starved: 903 if (unlikely(rl->count[is_sync] == 0)) 904 rl->starved[is_sync] = 1; 905 906 goto out; 907 } 908 909 /* 910 * ioc may be NULL here, and ioc_batching will be false. That's 911 * OK, if the queue is under the request limit then requests need 912 * not count toward the nr_batch_requests limit. There will always 913 * be some limit enforced by BLK_BATCH_TIME. 914 */ 915 if (ioc_batching(q, ioc)) 916 ioc->nr_batch_requests--; 917 918 trace_block_getrq(q, bio, rw_flags & 1); 919 out: 920 return rq; 921 } 922 923 /** 924 * get_request_wait - get a free request with retry 925 * @q: request_queue to allocate request from 926 * @rw_flags: RW and SYNC flags 927 * @bio: bio to allocate request for (can be %NULL) 928 * 929 * Get a free request from @q. This function keeps retrying under memory 930 * pressure and fails iff @q is dead. 931 * 932 * Must be callled with @q->queue_lock held and, 933 * Returns %NULL on failure, with @q->queue_lock held. 934 * Returns !%NULL on success, with @q->queue_lock *not held*. 935 */ 936 static struct request *get_request_wait(struct request_queue *q, int rw_flags, 937 struct bio *bio) 938 { 939 const bool is_sync = rw_is_sync(rw_flags) != 0; 940 struct request *rq; 941 942 rq = get_request(q, rw_flags, bio, GFP_NOIO); 943 while (!rq) { 944 DEFINE_WAIT(wait); 945 struct request_list *rl = &q->rq; 946 947 if (unlikely(blk_queue_dead(q))) 948 return NULL; 949 950 prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, 951 TASK_UNINTERRUPTIBLE); 952 953 trace_block_sleeprq(q, bio, rw_flags & 1); 954 955 spin_unlock_irq(q->queue_lock); 956 io_schedule(); 957 958 /* 959 * After sleeping, we become a "batching" process and 960 * will be able to allocate at least one request, and 961 * up to a big batch of them for a small period time. 962 * See ioc_batching, ioc_set_batching 963 */ 964 create_io_context(current, GFP_NOIO, q->node); 965 ioc_set_batching(q, current->io_context); 966 967 spin_lock_irq(q->queue_lock); 968 finish_wait(&rl->wait[is_sync], &wait); 969 970 rq = get_request(q, rw_flags, bio, GFP_NOIO); 971 }; 972 973 return rq; 974 } 975 976 struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) 977 { 978 struct request *rq; 979 980 BUG_ON(rw != READ && rw != WRITE); 981 982 spin_lock_irq(q->queue_lock); 983 if (gfp_mask & __GFP_WAIT) 984 rq = get_request_wait(q, rw, NULL); 985 else 986 rq = get_request(q, rw, NULL, gfp_mask); 987 if (!rq) 988 spin_unlock_irq(q->queue_lock); 989 /* q->queue_lock is unlocked at this point */ 990 991 return rq; 992 } 993 EXPORT_SYMBOL(blk_get_request); 994 995 /** 996 * blk_make_request - given a bio, allocate a corresponding struct request. 997 * @q: target request queue 998 * @bio: The bio describing the memory mappings that will be submitted for IO. 999 * It may be a chained-bio properly constructed by block/bio layer. 1000 * @gfp_mask: gfp flags to be used for memory allocation 1001 * 1002 * blk_make_request is the parallel of generic_make_request for BLOCK_PC 1003 * type commands. Where the struct request needs to be farther initialized by 1004 * the caller. It is passed a &struct bio, which describes the memory info of 1005 * the I/O transfer. 1006 * 1007 * The caller of blk_make_request must make sure that bi_io_vec 1008 * are set to describe the memory buffers. That bio_data_dir() will return 1009 * the needed direction of the request. (And all bio's in the passed bio-chain 1010 * are properly set accordingly) 1011 * 1012 * If called under none-sleepable conditions, mapped bio buffers must not 1013 * need bouncing, by calling the appropriate masked or flagged allocator, 1014 * suitable for the target device. Otherwise the call to blk_queue_bounce will 1015 * BUG. 1016 * 1017 * WARNING: When allocating/cloning a bio-chain, careful consideration should be 1018 * given to how you allocate bios. In particular, you cannot use __GFP_WAIT for 1019 * anything but the first bio in the chain. Otherwise you risk waiting for IO 1020 * completion of a bio that hasn't been submitted yet, thus resulting in a 1021 * deadlock. Alternatively bios should be allocated using bio_kmalloc() instead 1022 * of bio_alloc(), as that avoids the mempool deadlock. 1023 * If possible a big IO should be split into smaller parts when allocation 1024 * fails. Partial allocation should not be an error, or you risk a live-lock. 1025 */ 1026 struct request *blk_make_request(struct request_queue *q, struct bio *bio, 1027 gfp_t gfp_mask) 1028 { 1029 struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask); 1030 1031 if (unlikely(!rq)) 1032 return ERR_PTR(-ENOMEM); 1033 1034 for_each_bio(bio) { 1035 struct bio *bounce_bio = bio; 1036 int ret; 1037 1038 blk_queue_bounce(q, &bounce_bio); 1039 ret = blk_rq_append_bio(q, rq, bounce_bio); 1040 if (unlikely(ret)) { 1041 blk_put_request(rq); 1042 return ERR_PTR(ret); 1043 } 1044 } 1045 1046 return rq; 1047 } 1048 EXPORT_SYMBOL(blk_make_request); 1049 1050 /** 1051 * blk_requeue_request - put a request back on queue 1052 * @q: request queue where request should be inserted 1053 * @rq: request to be inserted 1054 * 1055 * Description: 1056 * Drivers often keep queueing requests until the hardware cannot accept 1057 * more, when that condition happens we need to put the request back 1058 * on the queue. Must be called with queue lock held. 1059 */ 1060 void blk_requeue_request(struct request_queue *q, struct request *rq) 1061 { 1062 blk_delete_timer(rq); 1063 blk_clear_rq_complete(rq); 1064 trace_block_rq_requeue(q, rq); 1065 1066 if (blk_rq_tagged(rq)) 1067 blk_queue_end_tag(q, rq); 1068 1069 BUG_ON(blk_queued_rq(rq)); 1070 1071 elv_requeue_request(q, rq); 1072 } 1073 EXPORT_SYMBOL(blk_requeue_request); 1074 1075 static void add_acct_request(struct request_queue *q, struct request *rq, 1076 int where) 1077 { 1078 drive_stat_acct(rq, 1); 1079 __elv_add_request(q, rq, where); 1080 } 1081 1082 static void part_round_stats_single(int cpu, struct hd_struct *part, 1083 unsigned long now) 1084 { 1085 if (now == part->stamp) 1086 return; 1087 1088 if (part_in_flight(part)) { 1089 __part_stat_add(cpu, part, time_in_queue, 1090 part_in_flight(part) * (now - part->stamp)); 1091 __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); 1092 } 1093 part->stamp = now; 1094 } 1095 1096 /** 1097 * part_round_stats() - Round off the performance stats on a struct disk_stats. 1098 * @cpu: cpu number for stats access 1099 * @part: target partition 1100 * 1101 * The average IO queue length and utilisation statistics are maintained 1102 * by observing the current state of the queue length and the amount of 1103 * time it has been in this state for. 1104 * 1105 * Normally, that accounting is done on IO completion, but that can result 1106 * in more than a second's worth of IO being accounted for within any one 1107 * second, leading to >100% utilisation. To deal with that, we call this 1108 * function to do a round-off before returning the results when reading 1109 * /proc/diskstats. This accounts immediately for all queue usage up to 1110 * the current jiffies and restarts the counters again. 1111 */ 1112 void part_round_stats(int cpu, struct hd_struct *part) 1113 { 1114 unsigned long now = jiffies; 1115 1116 if (part->partno) 1117 part_round_stats_single(cpu, &part_to_disk(part)->part0, now); 1118 part_round_stats_single(cpu, part, now); 1119 } 1120 EXPORT_SYMBOL_GPL(part_round_stats); 1121 1122 /* 1123 * queue lock must be held 1124 */ 1125 void __blk_put_request(struct request_queue *q, struct request *req) 1126 { 1127 if (unlikely(!q)) 1128 return; 1129 if (unlikely(--req->ref_count)) 1130 return; 1131 1132 elv_completed_request(q, req); 1133 1134 /* this is a bio leak */ 1135 WARN_ON(req->bio != NULL); 1136 1137 /* 1138 * Request may not have originated from ll_rw_blk. if not, 1139 * it didn't come out of our reserved rq pools 1140 */ 1141 if (req->cmd_flags & REQ_ALLOCED) { 1142 unsigned int flags = req->cmd_flags; 1143 1144 BUG_ON(!list_empty(&req->queuelist)); 1145 BUG_ON(!hlist_unhashed(&req->hash)); 1146 1147 blk_free_request(q, req); 1148 freed_request(q, flags); 1149 } 1150 } 1151 EXPORT_SYMBOL_GPL(__blk_put_request); 1152 1153 void blk_put_request(struct request *req) 1154 { 1155 unsigned long flags; 1156 struct request_queue *q = req->q; 1157 1158 spin_lock_irqsave(q->queue_lock, flags); 1159 __blk_put_request(q, req); 1160 spin_unlock_irqrestore(q->queue_lock, flags); 1161 } 1162 EXPORT_SYMBOL(blk_put_request); 1163 1164 /** 1165 * blk_add_request_payload - add a payload to a request 1166 * @rq: request to update 1167 * @page: page backing the payload 1168 * @len: length of the payload. 1169 * 1170 * This allows to later add a payload to an already submitted request by 1171 * a block driver. The driver needs to take care of freeing the payload 1172 * itself. 1173 * 1174 * Note that this is a quite horrible hack and nothing but handling of 1175 * discard requests should ever use it. 1176 */ 1177 void blk_add_request_payload(struct request *rq, struct page *page, 1178 unsigned int len) 1179 { 1180 struct bio *bio = rq->bio; 1181 1182 bio->bi_io_vec->bv_page = page; 1183 bio->bi_io_vec->bv_offset = 0; 1184 bio->bi_io_vec->bv_len = len; 1185 1186 bio->bi_size = len; 1187 bio->bi_vcnt = 1; 1188 bio->bi_phys_segments = 1; 1189 1190 rq->__data_len = rq->resid_len = len; 1191 rq->nr_phys_segments = 1; 1192 rq->buffer = bio_data(bio); 1193 } 1194 EXPORT_SYMBOL_GPL(blk_add_request_payload); 1195 1196 static bool bio_attempt_back_merge(struct request_queue *q, struct request *req, 1197 struct bio *bio) 1198 { 1199 const int ff = bio->bi_rw & REQ_FAILFAST_MASK; 1200 1201 if (!ll_back_merge_fn(q, req, bio)) 1202 return false; 1203 1204 trace_block_bio_backmerge(q, bio); 1205 1206 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) 1207 blk_rq_set_mixed_merge(req); 1208 1209 req->biotail->bi_next = bio; 1210 req->biotail = bio; 1211 req->__data_len += bio->bi_size; 1212 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1213 1214 drive_stat_acct(req, 0); 1215 return true; 1216 } 1217 1218 static bool bio_attempt_front_merge(struct request_queue *q, 1219 struct request *req, struct bio *bio) 1220 { 1221 const int ff = bio->bi_rw & REQ_FAILFAST_MASK; 1222 1223 if (!ll_front_merge_fn(q, req, bio)) 1224 return false; 1225 1226 trace_block_bio_frontmerge(q, bio); 1227 1228 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) 1229 blk_rq_set_mixed_merge(req); 1230 1231 bio->bi_next = req->bio; 1232 req->bio = bio; 1233 1234 /* 1235 * may not be valid. if the low level driver said 1236 * it didn't need a bounce buffer then it better 1237 * not touch req->buffer either... 1238 */ 1239 req->buffer = bio_data(bio); 1240 req->__sector = bio->bi_sector; 1241 req->__data_len += bio->bi_size; 1242 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1243 1244 drive_stat_acct(req, 0); 1245 return true; 1246 } 1247 1248 /** 1249 * attempt_plug_merge - try to merge with %current's plugged list 1250 * @q: request_queue new bio is being queued at 1251 * @bio: new bio being queued 1252 * @request_count: out parameter for number of traversed plugged requests 1253 * 1254 * Determine whether @bio being queued on @q can be merged with a request 1255 * on %current's plugged list. Returns %true if merge was successful, 1256 * otherwise %false. 1257 * 1258 * Plugging coalesces IOs from the same issuer for the same purpose without 1259 * going through @q->queue_lock. As such it's more of an issuing mechanism 1260 * than scheduling, and the request, while may have elvpriv data, is not 1261 * added on the elevator at this point. In addition, we don't have 1262 * reliable access to the elevator outside queue lock. Only check basic 1263 * merging parameters without querying the elevator. 1264 */ 1265 static bool attempt_plug_merge(struct request_queue *q, struct bio *bio, 1266 unsigned int *request_count) 1267 { 1268 struct blk_plug *plug; 1269 struct request *rq; 1270 bool ret = false; 1271 1272 plug = current->plug; 1273 if (!plug) 1274 goto out; 1275 *request_count = 0; 1276 1277 list_for_each_entry_reverse(rq, &plug->list, queuelist) { 1278 int el_ret; 1279 1280 (*request_count)++; 1281 1282 if (rq->q != q || !blk_rq_merge_ok(rq, bio)) 1283 continue; 1284 1285 el_ret = blk_try_merge(rq, bio); 1286 if (el_ret == ELEVATOR_BACK_MERGE) { 1287 ret = bio_attempt_back_merge(q, rq, bio); 1288 if (ret) 1289 break; 1290 } else if (el_ret == ELEVATOR_FRONT_MERGE) { 1291 ret = bio_attempt_front_merge(q, rq, bio); 1292 if (ret) 1293 break; 1294 } 1295 } 1296 out: 1297 return ret; 1298 } 1299 1300 void init_request_from_bio(struct request *req, struct bio *bio) 1301 { 1302 req->cmd_type = REQ_TYPE_FS; 1303 1304 req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK; 1305 if (bio->bi_rw & REQ_RAHEAD) 1306 req->cmd_flags |= REQ_FAILFAST_MASK; 1307 1308 req->errors = 0; 1309 req->__sector = bio->bi_sector; 1310 req->ioprio = bio_prio(bio); 1311 blk_rq_bio_prep(req->q, req, bio); 1312 } 1313 1314 void blk_queue_bio(struct request_queue *q, struct bio *bio) 1315 { 1316 const bool sync = !!(bio->bi_rw & REQ_SYNC); 1317 struct blk_plug *plug; 1318 int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT; 1319 struct request *req; 1320 unsigned int request_count = 0; 1321 1322 /* 1323 * low level driver can indicate that it wants pages above a 1324 * certain limit bounced to low memory (ie for highmem, or even 1325 * ISA dma in theory) 1326 */ 1327 blk_queue_bounce(q, &bio); 1328 1329 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) { 1330 spin_lock_irq(q->queue_lock); 1331 where = ELEVATOR_INSERT_FLUSH; 1332 goto get_rq; 1333 } 1334 1335 /* 1336 * Check if we can merge with the plugged list before grabbing 1337 * any locks. 1338 */ 1339 if (attempt_plug_merge(q, bio, &request_count)) 1340 return; 1341 1342 spin_lock_irq(q->queue_lock); 1343 1344 el_ret = elv_merge(q, &req, bio); 1345 if (el_ret == ELEVATOR_BACK_MERGE) { 1346 if (bio_attempt_back_merge(q, req, bio)) { 1347 elv_bio_merged(q, req, bio); 1348 if (!attempt_back_merge(q, req)) 1349 elv_merged_request(q, req, el_ret); 1350 goto out_unlock; 1351 } 1352 } else if (el_ret == ELEVATOR_FRONT_MERGE) { 1353 if (bio_attempt_front_merge(q, req, bio)) { 1354 elv_bio_merged(q, req, bio); 1355 if (!attempt_front_merge(q, req)) 1356 elv_merged_request(q, req, el_ret); 1357 goto out_unlock; 1358 } 1359 } 1360 1361 get_rq: 1362 /* 1363 * This sync check and mask will be re-done in init_request_from_bio(), 1364 * but we need to set it earlier to expose the sync flag to the 1365 * rq allocator and io schedulers. 1366 */ 1367 rw_flags = bio_data_dir(bio); 1368 if (sync) 1369 rw_flags |= REQ_SYNC; 1370 1371 /* 1372 * Grab a free request. This is might sleep but can not fail. 1373 * Returns with the queue unlocked. 1374 */ 1375 req = get_request_wait(q, rw_flags, bio); 1376 if (unlikely(!req)) { 1377 bio_endio(bio, -ENODEV); /* @q is dead */ 1378 goto out_unlock; 1379 } 1380 1381 /* 1382 * After dropping the lock and possibly sleeping here, our request 1383 * may now be mergeable after it had proven unmergeable (above). 1384 * We don't worry about that case for efficiency. It won't happen 1385 * often, and the elevators are able to handle it. 1386 */ 1387 init_request_from_bio(req, bio); 1388 1389 if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags)) 1390 req->cpu = raw_smp_processor_id(); 1391 1392 plug = current->plug; 1393 if (plug) { 1394 /* 1395 * If this is the first request added after a plug, fire 1396 * of a plug trace. If others have been added before, check 1397 * if we have multiple devices in this plug. If so, make a 1398 * note to sort the list before dispatch. 1399 */ 1400 if (list_empty(&plug->list)) 1401 trace_block_plug(q); 1402 else { 1403 if (!plug->should_sort) { 1404 struct request *__rq; 1405 1406 __rq = list_entry_rq(plug->list.prev); 1407 if (__rq->q != q) 1408 plug->should_sort = 1; 1409 } 1410 if (request_count >= BLK_MAX_REQUEST_COUNT) { 1411 blk_flush_plug_list(plug, false); 1412 trace_block_plug(q); 1413 } 1414 } 1415 list_add_tail(&req->queuelist, &plug->list); 1416 drive_stat_acct(req, 1); 1417 } else { 1418 spin_lock_irq(q->queue_lock); 1419 add_acct_request(q, req, where); 1420 __blk_run_queue(q); 1421 out_unlock: 1422 spin_unlock_irq(q->queue_lock); 1423 } 1424 } 1425 EXPORT_SYMBOL_GPL(blk_queue_bio); /* for device mapper only */ 1426 1427 /* 1428 * If bio->bi_dev is a partition, remap the location 1429 */ 1430 static inline void blk_partition_remap(struct bio *bio) 1431 { 1432 struct block_device *bdev = bio->bi_bdev; 1433 1434 if (bio_sectors(bio) && bdev != bdev->bd_contains) { 1435 struct hd_struct *p = bdev->bd_part; 1436 1437 bio->bi_sector += p->start_sect; 1438 bio->bi_bdev = bdev->bd_contains; 1439 1440 trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio, 1441 bdev->bd_dev, 1442 bio->bi_sector - p->start_sect); 1443 } 1444 } 1445 1446 static void handle_bad_sector(struct bio *bio) 1447 { 1448 char b[BDEVNAME_SIZE]; 1449 1450 printk(KERN_INFO "attempt to access beyond end of device\n"); 1451 printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n", 1452 bdevname(bio->bi_bdev, b), 1453 bio->bi_rw, 1454 (unsigned long long)bio->bi_sector + bio_sectors(bio), 1455 (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9)); 1456 1457 set_bit(BIO_EOF, &bio->bi_flags); 1458 } 1459 1460 #ifdef CONFIG_FAIL_MAKE_REQUEST 1461 1462 static DECLARE_FAULT_ATTR(fail_make_request); 1463 1464 static int __init setup_fail_make_request(char *str) 1465 { 1466 return setup_fault_attr(&fail_make_request, str); 1467 } 1468 __setup("fail_make_request=", setup_fail_make_request); 1469 1470 static bool should_fail_request(struct hd_struct *part, unsigned int bytes) 1471 { 1472 return part->make_it_fail && should_fail(&fail_make_request, bytes); 1473 } 1474 1475 static int __init fail_make_request_debugfs(void) 1476 { 1477 struct dentry *dir = fault_create_debugfs_attr("fail_make_request", 1478 NULL, &fail_make_request); 1479 1480 return IS_ERR(dir) ? PTR_ERR(dir) : 0; 1481 } 1482 1483 late_initcall(fail_make_request_debugfs); 1484 1485 #else /* CONFIG_FAIL_MAKE_REQUEST */ 1486 1487 static inline bool should_fail_request(struct hd_struct *part, 1488 unsigned int bytes) 1489 { 1490 return false; 1491 } 1492 1493 #endif /* CONFIG_FAIL_MAKE_REQUEST */ 1494 1495 /* 1496 * Check whether this bio extends beyond the end of the device. 1497 */ 1498 static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors) 1499 { 1500 sector_t maxsector; 1501 1502 if (!nr_sectors) 1503 return 0; 1504 1505 /* Test device or partition size, when known. */ 1506 maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; 1507 if (maxsector) { 1508 sector_t sector = bio->bi_sector; 1509 1510 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) { 1511 /* 1512 * This may well happen - the kernel calls bread() 1513 * without checking the size of the device, e.g., when 1514 * mounting a device. 1515 */ 1516 handle_bad_sector(bio); 1517 return 1; 1518 } 1519 } 1520 1521 return 0; 1522 } 1523 1524 static noinline_for_stack bool 1525 generic_make_request_checks(struct bio *bio) 1526 { 1527 struct request_queue *q; 1528 int nr_sectors = bio_sectors(bio); 1529 int err = -EIO; 1530 char b[BDEVNAME_SIZE]; 1531 struct hd_struct *part; 1532 1533 might_sleep(); 1534 1535 if (bio_check_eod(bio, nr_sectors)) 1536 goto end_io; 1537 1538 q = bdev_get_queue(bio->bi_bdev); 1539 if (unlikely(!q)) { 1540 printk(KERN_ERR 1541 "generic_make_request: Trying to access " 1542 "nonexistent block-device %s (%Lu)\n", 1543 bdevname(bio->bi_bdev, b), 1544 (long long) bio->bi_sector); 1545 goto end_io; 1546 } 1547 1548 if (unlikely(!(bio->bi_rw & REQ_DISCARD) && 1549 nr_sectors > queue_max_hw_sectors(q))) { 1550 printk(KERN_ERR "bio too big device %s (%u > %u)\n", 1551 bdevname(bio->bi_bdev, b), 1552 bio_sectors(bio), 1553 queue_max_hw_sectors(q)); 1554 goto end_io; 1555 } 1556 1557 part = bio->bi_bdev->bd_part; 1558 if (should_fail_request(part, bio->bi_size) || 1559 should_fail_request(&part_to_disk(part)->part0, 1560 bio->bi_size)) 1561 goto end_io; 1562 1563 /* 1564 * If this device has partitions, remap block n 1565 * of partition p to block n+start(p) of the disk. 1566 */ 1567 blk_partition_remap(bio); 1568 1569 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) 1570 goto end_io; 1571 1572 if (bio_check_eod(bio, nr_sectors)) 1573 goto end_io; 1574 1575 /* 1576 * Filter flush bio's early so that make_request based 1577 * drivers without flush support don't have to worry 1578 * about them. 1579 */ 1580 if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) { 1581 bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA); 1582 if (!nr_sectors) { 1583 err = 0; 1584 goto end_io; 1585 } 1586 } 1587 1588 if ((bio->bi_rw & REQ_DISCARD) && 1589 (!blk_queue_discard(q) || 1590 ((bio->bi_rw & REQ_SECURE) && 1591 !blk_queue_secdiscard(q)))) { 1592 err = -EOPNOTSUPP; 1593 goto end_io; 1594 } 1595 1596 if (blk_throtl_bio(q, bio)) 1597 return false; /* throttled, will be resubmitted later */ 1598 1599 trace_block_bio_queue(q, bio); 1600 return true; 1601 1602 end_io: 1603 bio_endio(bio, err); 1604 return false; 1605 } 1606 1607 /** 1608 * generic_make_request - hand a buffer to its device driver for I/O 1609 * @bio: The bio describing the location in memory and on the device. 1610 * 1611 * generic_make_request() is used to make I/O requests of block 1612 * devices. It is passed a &struct bio, which describes the I/O that needs 1613 * to be done. 1614 * 1615 * generic_make_request() does not return any status. The 1616 * success/failure status of the request, along with notification of 1617 * completion, is delivered asynchronously through the bio->bi_end_io 1618 * function described (one day) else where. 1619 * 1620 * The caller of generic_make_request must make sure that bi_io_vec 1621 * are set to describe the memory buffer, and that bi_dev and bi_sector are 1622 * set to describe the device address, and the 1623 * bi_end_io and optionally bi_private are set to describe how 1624 * completion notification should be signaled. 1625 * 1626 * generic_make_request and the drivers it calls may use bi_next if this 1627 * bio happens to be merged with someone else, and may resubmit the bio to 1628 * a lower device by calling into generic_make_request recursively, which 1629 * means the bio should NOT be touched after the call to ->make_request_fn. 1630 */ 1631 void generic_make_request(struct bio *bio) 1632 { 1633 struct bio_list bio_list_on_stack; 1634 1635 if (!generic_make_request_checks(bio)) 1636 return; 1637 1638 /* 1639 * We only want one ->make_request_fn to be active at a time, else 1640 * stack usage with stacked devices could be a problem. So use 1641 * current->bio_list to keep a list of requests submited by a 1642 * make_request_fn function. current->bio_list is also used as a 1643 * flag to say if generic_make_request is currently active in this 1644 * task or not. If it is NULL, then no make_request is active. If 1645 * it is non-NULL, then a make_request is active, and new requests 1646 * should be added at the tail 1647 */ 1648 if (current->bio_list) { 1649 bio_list_add(current->bio_list, bio); 1650 return; 1651 } 1652 1653 /* following loop may be a bit non-obvious, and so deserves some 1654 * explanation. 1655 * Before entering the loop, bio->bi_next is NULL (as all callers 1656 * ensure that) so we have a list with a single bio. 1657 * We pretend that we have just taken it off a longer list, so 1658 * we assign bio_list to a pointer to the bio_list_on_stack, 1659 * thus initialising the bio_list of new bios to be 1660 * added. ->make_request() may indeed add some more bios 1661 * through a recursive call to generic_make_request. If it 1662 * did, we find a non-NULL value in bio_list and re-enter the loop 1663 * from the top. In this case we really did just take the bio 1664 * of the top of the list (no pretending) and so remove it from 1665 * bio_list, and call into ->make_request() again. 1666 */ 1667 BUG_ON(bio->bi_next); 1668 bio_list_init(&bio_list_on_stack); 1669 current->bio_list = &bio_list_on_stack; 1670 do { 1671 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 1672 1673 q->make_request_fn(q, bio); 1674 1675 bio = bio_list_pop(current->bio_list); 1676 } while (bio); 1677 current->bio_list = NULL; /* deactivate */ 1678 } 1679 EXPORT_SYMBOL(generic_make_request); 1680 1681 /** 1682 * submit_bio - submit a bio to the block device layer for I/O 1683 * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead) 1684 * @bio: The &struct bio which describes the I/O 1685 * 1686 * submit_bio() is very similar in purpose to generic_make_request(), and 1687 * uses that function to do most of the work. Both are fairly rough 1688 * interfaces; @bio must be presetup and ready for I/O. 1689 * 1690 */ 1691 void submit_bio(int rw, struct bio *bio) 1692 { 1693 int count = bio_sectors(bio); 1694 1695 bio->bi_rw |= rw; 1696 1697 /* 1698 * If it's a regular read/write or a barrier with data attached, 1699 * go through the normal accounting stuff before submission. 1700 */ 1701 if (bio_has_data(bio) && !(rw & REQ_DISCARD)) { 1702 if (rw & WRITE) { 1703 count_vm_events(PGPGOUT, count); 1704 } else { 1705 task_io_account_read(bio->bi_size); 1706 count_vm_events(PGPGIN, count); 1707 } 1708 1709 if (unlikely(block_dump)) { 1710 char b[BDEVNAME_SIZE]; 1711 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n", 1712 current->comm, task_pid_nr(current), 1713 (rw & WRITE) ? "WRITE" : "READ", 1714 (unsigned long long)bio->bi_sector, 1715 bdevname(bio->bi_bdev, b), 1716 count); 1717 } 1718 } 1719 1720 generic_make_request(bio); 1721 } 1722 EXPORT_SYMBOL(submit_bio); 1723 1724 /** 1725 * blk_rq_check_limits - Helper function to check a request for the queue limit 1726 * @q: the queue 1727 * @rq: the request being checked 1728 * 1729 * Description: 1730 * @rq may have been made based on weaker limitations of upper-level queues 1731 * in request stacking drivers, and it may violate the limitation of @q. 1732 * Since the block layer and the underlying device driver trust @rq 1733 * after it is inserted to @q, it should be checked against @q before 1734 * the insertion using this generic function. 1735 * 1736 * This function should also be useful for request stacking drivers 1737 * in some cases below, so export this function. 1738 * Request stacking drivers like request-based dm may change the queue 1739 * limits while requests are in the queue (e.g. dm's table swapping). 1740 * Such request stacking drivers should check those requests agaist 1741 * the new queue limits again when they dispatch those requests, 1742 * although such checkings are also done against the old queue limits 1743 * when submitting requests. 1744 */ 1745 int blk_rq_check_limits(struct request_queue *q, struct request *rq) 1746 { 1747 if (rq->cmd_flags & REQ_DISCARD) 1748 return 0; 1749 1750 if (blk_rq_sectors(rq) > queue_max_sectors(q) || 1751 blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) { 1752 printk(KERN_ERR "%s: over max size limit.\n", __func__); 1753 return -EIO; 1754 } 1755 1756 /* 1757 * queue's settings related to segment counting like q->bounce_pfn 1758 * may differ from that of other stacking queues. 1759 * Recalculate it to check the request correctly on this queue's 1760 * limitation. 1761 */ 1762 blk_recalc_rq_segments(rq); 1763 if (rq->nr_phys_segments > queue_max_segments(q)) { 1764 printk(KERN_ERR "%s: over max segments limit.\n", __func__); 1765 return -EIO; 1766 } 1767 1768 return 0; 1769 } 1770 EXPORT_SYMBOL_GPL(blk_rq_check_limits); 1771 1772 /** 1773 * blk_insert_cloned_request - Helper for stacking drivers to submit a request 1774 * @q: the queue to submit the request 1775 * @rq: the request being queued 1776 */ 1777 int blk_insert_cloned_request(struct request_queue *q, struct request *rq) 1778 { 1779 unsigned long flags; 1780 int where = ELEVATOR_INSERT_BACK; 1781 1782 if (blk_rq_check_limits(q, rq)) 1783 return -EIO; 1784 1785 if (rq->rq_disk && 1786 should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq))) 1787 return -EIO; 1788 1789 spin_lock_irqsave(q->queue_lock, flags); 1790 if (unlikely(blk_queue_dead(q))) { 1791 spin_unlock_irqrestore(q->queue_lock, flags); 1792 return -ENODEV; 1793 } 1794 1795 /* 1796 * Submitting request must be dequeued before calling this function 1797 * because it will be linked to another request_queue 1798 */ 1799 BUG_ON(blk_queued_rq(rq)); 1800 1801 if (rq->cmd_flags & (REQ_FLUSH|REQ_FUA)) 1802 where = ELEVATOR_INSERT_FLUSH; 1803 1804 add_acct_request(q, rq, where); 1805 if (where == ELEVATOR_INSERT_FLUSH) 1806 __blk_run_queue(q); 1807 spin_unlock_irqrestore(q->queue_lock, flags); 1808 1809 return 0; 1810 } 1811 EXPORT_SYMBOL_GPL(blk_insert_cloned_request); 1812 1813 /** 1814 * blk_rq_err_bytes - determine number of bytes till the next failure boundary 1815 * @rq: request to examine 1816 * 1817 * Description: 1818 * A request could be merge of IOs which require different failure 1819 * handling. This function determines the number of bytes which 1820 * can be failed from the beginning of the request without 1821 * crossing into area which need to be retried further. 1822 * 1823 * Return: 1824 * The number of bytes to fail. 1825 * 1826 * Context: 1827 * queue_lock must be held. 1828 */ 1829 unsigned int blk_rq_err_bytes(const struct request *rq) 1830 { 1831 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; 1832 unsigned int bytes = 0; 1833 struct bio *bio; 1834 1835 if (!(rq->cmd_flags & REQ_MIXED_MERGE)) 1836 return blk_rq_bytes(rq); 1837 1838 /* 1839 * Currently the only 'mixing' which can happen is between 1840 * different fastfail types. We can safely fail portions 1841 * which have all the failfast bits that the first one has - 1842 * the ones which are at least as eager to fail as the first 1843 * one. 1844 */ 1845 for (bio = rq->bio; bio; bio = bio->bi_next) { 1846 if ((bio->bi_rw & ff) != ff) 1847 break; 1848 bytes += bio->bi_size; 1849 } 1850 1851 /* this could lead to infinite loop */ 1852 BUG_ON(blk_rq_bytes(rq) && !bytes); 1853 return bytes; 1854 } 1855 EXPORT_SYMBOL_GPL(blk_rq_err_bytes); 1856 1857 static void blk_account_io_completion(struct request *req, unsigned int bytes) 1858 { 1859 if (blk_do_io_stat(req)) { 1860 const int rw = rq_data_dir(req); 1861 struct hd_struct *part; 1862 int cpu; 1863 1864 cpu = part_stat_lock(); 1865 part = req->part; 1866 part_stat_add(cpu, part, sectors[rw], bytes >> 9); 1867 part_stat_unlock(); 1868 } 1869 } 1870 1871 static void blk_account_io_done(struct request *req) 1872 { 1873 /* 1874 * Account IO completion. flush_rq isn't accounted as a 1875 * normal IO on queueing nor completion. Accounting the 1876 * containing request is enough. 1877 */ 1878 if (blk_do_io_stat(req) && !(req->cmd_flags & REQ_FLUSH_SEQ)) { 1879 unsigned long duration = jiffies - req->start_time; 1880 const int rw = rq_data_dir(req); 1881 struct hd_struct *part; 1882 int cpu; 1883 1884 cpu = part_stat_lock(); 1885 part = req->part; 1886 1887 part_stat_inc(cpu, part, ios[rw]); 1888 part_stat_add(cpu, part, ticks[rw], duration); 1889 part_round_stats(cpu, part); 1890 part_dec_in_flight(part, rw); 1891 1892 hd_struct_put(part); 1893 part_stat_unlock(); 1894 } 1895 } 1896 1897 /** 1898 * blk_peek_request - peek at the top of a request queue 1899 * @q: request queue to peek at 1900 * 1901 * Description: 1902 * Return the request at the top of @q. The returned request 1903 * should be started using blk_start_request() before LLD starts 1904 * processing it. 1905 * 1906 * Return: 1907 * Pointer to the request at the top of @q if available. Null 1908 * otherwise. 1909 * 1910 * Context: 1911 * queue_lock must be held. 1912 */ 1913 struct request *blk_peek_request(struct request_queue *q) 1914 { 1915 struct request *rq; 1916 int ret; 1917 1918 while ((rq = __elv_next_request(q)) != NULL) { 1919 if (!(rq->cmd_flags & REQ_STARTED)) { 1920 /* 1921 * This is the first time the device driver 1922 * sees this request (possibly after 1923 * requeueing). Notify IO scheduler. 1924 */ 1925 if (rq->cmd_flags & REQ_SORTED) 1926 elv_activate_rq(q, rq); 1927 1928 /* 1929 * just mark as started even if we don't start 1930 * it, a request that has been delayed should 1931 * not be passed by new incoming requests 1932 */ 1933 rq->cmd_flags |= REQ_STARTED; 1934 trace_block_rq_issue(q, rq); 1935 } 1936 1937 if (!q->boundary_rq || q->boundary_rq == rq) { 1938 q->end_sector = rq_end_sector(rq); 1939 q->boundary_rq = NULL; 1940 } 1941 1942 if (rq->cmd_flags & REQ_DONTPREP) 1943 break; 1944 1945 if (q->dma_drain_size && blk_rq_bytes(rq)) { 1946 /* 1947 * make sure space for the drain appears we 1948 * know we can do this because max_hw_segments 1949 * has been adjusted to be one fewer than the 1950 * device can handle 1951 */ 1952 rq->nr_phys_segments++; 1953 } 1954 1955 if (!q->prep_rq_fn) 1956 break; 1957 1958 ret = q->prep_rq_fn(q, rq); 1959 if (ret == BLKPREP_OK) { 1960 break; 1961 } else if (ret == BLKPREP_DEFER) { 1962 /* 1963 * the request may have been (partially) prepped. 1964 * we need to keep this request in the front to 1965 * avoid resource deadlock. REQ_STARTED will 1966 * prevent other fs requests from passing this one. 1967 */ 1968 if (q->dma_drain_size && blk_rq_bytes(rq) && 1969 !(rq->cmd_flags & REQ_DONTPREP)) { 1970 /* 1971 * remove the space for the drain we added 1972 * so that we don't add it again 1973 */ 1974 --rq->nr_phys_segments; 1975 } 1976 1977 rq = NULL; 1978 break; 1979 } else if (ret == BLKPREP_KILL) { 1980 rq->cmd_flags |= REQ_QUIET; 1981 /* 1982 * Mark this request as started so we don't trigger 1983 * any debug logic in the end I/O path. 1984 */ 1985 blk_start_request(rq); 1986 __blk_end_request_all(rq, -EIO); 1987 } else { 1988 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); 1989 break; 1990 } 1991 } 1992 1993 return rq; 1994 } 1995 EXPORT_SYMBOL(blk_peek_request); 1996 1997 void blk_dequeue_request(struct request *rq) 1998 { 1999 struct request_queue *q = rq->q; 2000 2001 BUG_ON(list_empty(&rq->queuelist)); 2002 BUG_ON(ELV_ON_HASH(rq)); 2003 2004 list_del_init(&rq->queuelist); 2005 2006 /* 2007 * the time frame between a request being removed from the lists 2008 * and to it is freed is accounted as io that is in progress at 2009 * the driver side. 2010 */ 2011 if (blk_account_rq(rq)) { 2012 q->in_flight[rq_is_sync(rq)]++; 2013 set_io_start_time_ns(rq); 2014 } 2015 } 2016 2017 /** 2018 * blk_start_request - start request processing on the driver 2019 * @req: request to dequeue 2020 * 2021 * Description: 2022 * Dequeue @req and start timeout timer on it. This hands off the 2023 * request to the driver. 2024 * 2025 * Block internal functions which don't want to start timer should 2026 * call blk_dequeue_request(). 2027 * 2028 * Context: 2029 * queue_lock must be held. 2030 */ 2031 void blk_start_request(struct request *req) 2032 { 2033 blk_dequeue_request(req); 2034 2035 /* 2036 * We are now handing the request to the hardware, initialize 2037 * resid_len to full count and add the timeout handler. 2038 */ 2039 req->resid_len = blk_rq_bytes(req); 2040 if (unlikely(blk_bidi_rq(req))) 2041 req->next_rq->resid_len = blk_rq_bytes(req->next_rq); 2042 2043 blk_add_timer(req); 2044 } 2045 EXPORT_SYMBOL(blk_start_request); 2046 2047 /** 2048 * blk_fetch_request - fetch a request from a request queue 2049 * @q: request queue to fetch a request from 2050 * 2051 * Description: 2052 * Return the request at the top of @q. The request is started on 2053 * return and LLD can start processing it immediately. 2054 * 2055 * Return: 2056 * Pointer to the request at the top of @q if available. Null 2057 * otherwise. 2058 * 2059 * Context: 2060 * queue_lock must be held. 2061 */ 2062 struct request *blk_fetch_request(struct request_queue *q) 2063 { 2064 struct request *rq; 2065 2066 rq = blk_peek_request(q); 2067 if (rq) 2068 blk_start_request(rq); 2069 return rq; 2070 } 2071 EXPORT_SYMBOL(blk_fetch_request); 2072 2073 /** 2074 * blk_update_request - Special helper function for request stacking drivers 2075 * @req: the request being processed 2076 * @error: %0 for success, < %0 for error 2077 * @nr_bytes: number of bytes to complete @req 2078 * 2079 * Description: 2080 * Ends I/O on a number of bytes attached to @req, but doesn't complete 2081 * the request structure even if @req doesn't have leftover. 2082 * If @req has leftover, sets it up for the next range of segments. 2083 * 2084 * This special helper function is only for request stacking drivers 2085 * (e.g. request-based dm) so that they can handle partial completion. 2086 * Actual device drivers should use blk_end_request instead. 2087 * 2088 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees 2089 * %false return from this function. 2090 * 2091 * Return: 2092 * %false - this request doesn't have any more data 2093 * %true - this request has more data 2094 **/ 2095 bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) 2096 { 2097 int total_bytes, bio_nbytes, next_idx = 0; 2098 struct bio *bio; 2099 2100 if (!req->bio) 2101 return false; 2102 2103 trace_block_rq_complete(req->q, req); 2104 2105 /* 2106 * For fs requests, rq is just carrier of independent bio's 2107 * and each partial completion should be handled separately. 2108 * Reset per-request error on each partial completion. 2109 * 2110 * TODO: tj: This is too subtle. It would be better to let 2111 * low level drivers do what they see fit. 2112 */ 2113 if (req->cmd_type == REQ_TYPE_FS) 2114 req->errors = 0; 2115 2116 if (error && req->cmd_type == REQ_TYPE_FS && 2117 !(req->cmd_flags & REQ_QUIET)) { 2118 char *error_type; 2119 2120 switch (error) { 2121 case -ENOLINK: 2122 error_type = "recoverable transport"; 2123 break; 2124 case -EREMOTEIO: 2125 error_type = "critical target"; 2126 break; 2127 case -EBADE: 2128 error_type = "critical nexus"; 2129 break; 2130 case -EIO: 2131 default: 2132 error_type = "I/O"; 2133 break; 2134 } 2135 printk(KERN_ERR "end_request: %s error, dev %s, sector %llu\n", 2136 error_type, req->rq_disk ? req->rq_disk->disk_name : "?", 2137 (unsigned long long)blk_rq_pos(req)); 2138 } 2139 2140 blk_account_io_completion(req, nr_bytes); 2141 2142 total_bytes = bio_nbytes = 0; 2143 while ((bio = req->bio) != NULL) { 2144 int nbytes; 2145 2146 if (nr_bytes >= bio->bi_size) { 2147 req->bio = bio->bi_next; 2148 nbytes = bio->bi_size; 2149 req_bio_endio(req, bio, nbytes, error); 2150 next_idx = 0; 2151 bio_nbytes = 0; 2152 } else { 2153 int idx = bio->bi_idx + next_idx; 2154 2155 if (unlikely(idx >= bio->bi_vcnt)) { 2156 blk_dump_rq_flags(req, "__end_that"); 2157 printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n", 2158 __func__, idx, bio->bi_vcnt); 2159 break; 2160 } 2161 2162 nbytes = bio_iovec_idx(bio, idx)->bv_len; 2163 BIO_BUG_ON(nbytes > bio->bi_size); 2164 2165 /* 2166 * not a complete bvec done 2167 */ 2168 if (unlikely(nbytes > nr_bytes)) { 2169 bio_nbytes += nr_bytes; 2170 total_bytes += nr_bytes; 2171 break; 2172 } 2173 2174 /* 2175 * advance to the next vector 2176 */ 2177 next_idx++; 2178 bio_nbytes += nbytes; 2179 } 2180 2181 total_bytes += nbytes; 2182 nr_bytes -= nbytes; 2183 2184 bio = req->bio; 2185 if (bio) { 2186 /* 2187 * end more in this run, or just return 'not-done' 2188 */ 2189 if (unlikely(nr_bytes <= 0)) 2190 break; 2191 } 2192 } 2193 2194 /* 2195 * completely done 2196 */ 2197 if (!req->bio) { 2198 /* 2199 * Reset counters so that the request stacking driver 2200 * can find how many bytes remain in the request 2201 * later. 2202 */ 2203 req->__data_len = 0; 2204 return false; 2205 } 2206 2207 /* 2208 * if the request wasn't completed, update state 2209 */ 2210 if (bio_nbytes) { 2211 req_bio_endio(req, bio, bio_nbytes, error); 2212 bio->bi_idx += next_idx; 2213 bio_iovec(bio)->bv_offset += nr_bytes; 2214 bio_iovec(bio)->bv_len -= nr_bytes; 2215 } 2216 2217 req->__data_len -= total_bytes; 2218 req->buffer = bio_data(req->bio); 2219 2220 /* update sector only for requests with clear definition of sector */ 2221 if (req->cmd_type == REQ_TYPE_FS || (req->cmd_flags & REQ_DISCARD)) 2222 req->__sector += total_bytes >> 9; 2223 2224 /* mixed attributes always follow the first bio */ 2225 if (req->cmd_flags & REQ_MIXED_MERGE) { 2226 req->cmd_flags &= ~REQ_FAILFAST_MASK; 2227 req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK; 2228 } 2229 2230 /* 2231 * If total number of sectors is less than the first segment 2232 * size, something has gone terribly wrong. 2233 */ 2234 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { 2235 blk_dump_rq_flags(req, "request botched"); 2236 req->__data_len = blk_rq_cur_bytes(req); 2237 } 2238 2239 /* recalculate the number of segments */ 2240 blk_recalc_rq_segments(req); 2241 2242 return true; 2243 } 2244 EXPORT_SYMBOL_GPL(blk_update_request); 2245 2246 static bool blk_update_bidi_request(struct request *rq, int error, 2247 unsigned int nr_bytes, 2248 unsigned int bidi_bytes) 2249 { 2250 if (blk_update_request(rq, error, nr_bytes)) 2251 return true; 2252 2253 /* Bidi request must be completed as a whole */ 2254 if (unlikely(blk_bidi_rq(rq)) && 2255 blk_update_request(rq->next_rq, error, bidi_bytes)) 2256 return true; 2257 2258 if (blk_queue_add_random(rq->q)) 2259 add_disk_randomness(rq->rq_disk); 2260 2261 return false; 2262 } 2263 2264 /** 2265 * blk_unprep_request - unprepare a request 2266 * @req: the request 2267 * 2268 * This function makes a request ready for complete resubmission (or 2269 * completion). It happens only after all error handling is complete, 2270 * so represents the appropriate moment to deallocate any resources 2271 * that were allocated to the request in the prep_rq_fn. The queue 2272 * lock is held when calling this. 2273 */ 2274 void blk_unprep_request(struct request *req) 2275 { 2276 struct request_queue *q = req->q; 2277 2278 req->cmd_flags &= ~REQ_DONTPREP; 2279 if (q->unprep_rq_fn) 2280 q->unprep_rq_fn(q, req); 2281 } 2282 EXPORT_SYMBOL_GPL(blk_unprep_request); 2283 2284 /* 2285 * queue lock must be held 2286 */ 2287 static void blk_finish_request(struct request *req, int error) 2288 { 2289 if (blk_rq_tagged(req)) 2290 blk_queue_end_tag(req->q, req); 2291 2292 BUG_ON(blk_queued_rq(req)); 2293 2294 if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS) 2295 laptop_io_completion(&req->q->backing_dev_info); 2296 2297 blk_delete_timer(req); 2298 2299 if (req->cmd_flags & REQ_DONTPREP) 2300 blk_unprep_request(req); 2301 2302 2303 blk_account_io_done(req); 2304 2305 if (req->end_io) 2306 req->end_io(req, error); 2307 else { 2308 if (blk_bidi_rq(req)) 2309 __blk_put_request(req->next_rq->q, req->next_rq); 2310 2311 __blk_put_request(req->q, req); 2312 } 2313 } 2314 2315 /** 2316 * blk_end_bidi_request - Complete a bidi request 2317 * @rq: the request to complete 2318 * @error: %0 for success, < %0 for error 2319 * @nr_bytes: number of bytes to complete @rq 2320 * @bidi_bytes: number of bytes to complete @rq->next_rq 2321 * 2322 * Description: 2323 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. 2324 * Drivers that supports bidi can safely call this member for any 2325 * type of request, bidi or uni. In the later case @bidi_bytes is 2326 * just ignored. 2327 * 2328 * Return: 2329 * %false - we are done with this request 2330 * %true - still buffers pending for this request 2331 **/ 2332 static bool blk_end_bidi_request(struct request *rq, int error, 2333 unsigned int nr_bytes, unsigned int bidi_bytes) 2334 { 2335 struct request_queue *q = rq->q; 2336 unsigned long flags; 2337 2338 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) 2339 return true; 2340 2341 spin_lock_irqsave(q->queue_lock, flags); 2342 blk_finish_request(rq, error); 2343 spin_unlock_irqrestore(q->queue_lock, flags); 2344 2345 return false; 2346 } 2347 2348 /** 2349 * __blk_end_bidi_request - Complete a bidi request with queue lock held 2350 * @rq: the request to complete 2351 * @error: %0 for success, < %0 for error 2352 * @nr_bytes: number of bytes to complete @rq 2353 * @bidi_bytes: number of bytes to complete @rq->next_rq 2354 * 2355 * Description: 2356 * Identical to blk_end_bidi_request() except that queue lock is 2357 * assumed to be locked on entry and remains so on return. 2358 * 2359 * Return: 2360 * %false - we are done with this request 2361 * %true - still buffers pending for this request 2362 **/ 2363 bool __blk_end_bidi_request(struct request *rq, int error, 2364 unsigned int nr_bytes, unsigned int bidi_bytes) 2365 { 2366 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) 2367 return true; 2368 2369 blk_finish_request(rq, error); 2370 2371 return false; 2372 } 2373 2374 /** 2375 * blk_end_request - Helper function for drivers to complete the request. 2376 * @rq: the request being processed 2377 * @error: %0 for success, < %0 for error 2378 * @nr_bytes: number of bytes to complete 2379 * 2380 * Description: 2381 * Ends I/O on a number of bytes attached to @rq. 2382 * If @rq has leftover, sets it up for the next range of segments. 2383 * 2384 * Return: 2385 * %false - we are done with this request 2386 * %true - still buffers pending for this request 2387 **/ 2388 bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes) 2389 { 2390 return blk_end_bidi_request(rq, error, nr_bytes, 0); 2391 } 2392 EXPORT_SYMBOL(blk_end_request); 2393 2394 /** 2395 * blk_end_request_all - Helper function for drives to finish the request. 2396 * @rq: the request to finish 2397 * @error: %0 for success, < %0 for error 2398 * 2399 * Description: 2400 * Completely finish @rq. 2401 */ 2402 void blk_end_request_all(struct request *rq, int error) 2403 { 2404 bool pending; 2405 unsigned int bidi_bytes = 0; 2406 2407 if (unlikely(blk_bidi_rq(rq))) 2408 bidi_bytes = blk_rq_bytes(rq->next_rq); 2409 2410 pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); 2411 BUG_ON(pending); 2412 } 2413 EXPORT_SYMBOL(blk_end_request_all); 2414 2415 /** 2416 * blk_end_request_cur - Helper function to finish the current request chunk. 2417 * @rq: the request to finish the current chunk for 2418 * @error: %0 for success, < %0 for error 2419 * 2420 * Description: 2421 * Complete the current consecutively mapped chunk from @rq. 2422 * 2423 * Return: 2424 * %false - we are done with this request 2425 * %true - still buffers pending for this request 2426 */ 2427 bool blk_end_request_cur(struct request *rq, int error) 2428 { 2429 return blk_end_request(rq, error, blk_rq_cur_bytes(rq)); 2430 } 2431 EXPORT_SYMBOL(blk_end_request_cur); 2432 2433 /** 2434 * blk_end_request_err - Finish a request till the next failure boundary. 2435 * @rq: the request to finish till the next failure boundary for 2436 * @error: must be negative errno 2437 * 2438 * Description: 2439 * Complete @rq till the next failure boundary. 2440 * 2441 * Return: 2442 * %false - we are done with this request 2443 * %true - still buffers pending for this request 2444 */ 2445 bool blk_end_request_err(struct request *rq, int error) 2446 { 2447 WARN_ON(error >= 0); 2448 return blk_end_request(rq, error, blk_rq_err_bytes(rq)); 2449 } 2450 EXPORT_SYMBOL_GPL(blk_end_request_err); 2451 2452 /** 2453 * __blk_end_request - Helper function for drivers to complete the request. 2454 * @rq: the request being processed 2455 * @error: %0 for success, < %0 for error 2456 * @nr_bytes: number of bytes to complete 2457 * 2458 * Description: 2459 * Must be called with queue lock held unlike blk_end_request(). 2460 * 2461 * Return: 2462 * %false - we are done with this request 2463 * %true - still buffers pending for this request 2464 **/ 2465 bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) 2466 { 2467 return __blk_end_bidi_request(rq, error, nr_bytes, 0); 2468 } 2469 EXPORT_SYMBOL(__blk_end_request); 2470 2471 /** 2472 * __blk_end_request_all - Helper function for drives to finish the request. 2473 * @rq: the request to finish 2474 * @error: %0 for success, < %0 for error 2475 * 2476 * Description: 2477 * Completely finish @rq. Must be called with queue lock held. 2478 */ 2479 void __blk_end_request_all(struct request *rq, int error) 2480 { 2481 bool pending; 2482 unsigned int bidi_bytes = 0; 2483 2484 if (unlikely(blk_bidi_rq(rq))) 2485 bidi_bytes = blk_rq_bytes(rq->next_rq); 2486 2487 pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); 2488 BUG_ON(pending); 2489 } 2490 EXPORT_SYMBOL(__blk_end_request_all); 2491 2492 /** 2493 * __blk_end_request_cur - Helper function to finish the current request chunk. 2494 * @rq: the request to finish the current chunk for 2495 * @error: %0 for success, < %0 for error 2496 * 2497 * Description: 2498 * Complete the current consecutively mapped chunk from @rq. Must 2499 * be called with queue lock held. 2500 * 2501 * Return: 2502 * %false - we are done with this request 2503 * %true - still buffers pending for this request 2504 */ 2505 bool __blk_end_request_cur(struct request *rq, int error) 2506 { 2507 return __blk_end_request(rq, error, blk_rq_cur_bytes(rq)); 2508 } 2509 EXPORT_SYMBOL(__blk_end_request_cur); 2510 2511 /** 2512 * __blk_end_request_err - Finish a request till the next failure boundary. 2513 * @rq: the request to finish till the next failure boundary for 2514 * @error: must be negative errno 2515 * 2516 * Description: 2517 * Complete @rq till the next failure boundary. Must be called 2518 * with queue lock held. 2519 * 2520 * Return: 2521 * %false - we are done with this request 2522 * %true - still buffers pending for this request 2523 */ 2524 bool __blk_end_request_err(struct request *rq, int error) 2525 { 2526 WARN_ON(error >= 0); 2527 return __blk_end_request(rq, error, blk_rq_err_bytes(rq)); 2528 } 2529 EXPORT_SYMBOL_GPL(__blk_end_request_err); 2530 2531 void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 2532 struct bio *bio) 2533 { 2534 /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */ 2535 rq->cmd_flags |= bio->bi_rw & REQ_WRITE; 2536 2537 if (bio_has_data(bio)) { 2538 rq->nr_phys_segments = bio_phys_segments(q, bio); 2539 rq->buffer = bio_data(bio); 2540 } 2541 rq->__data_len = bio->bi_size; 2542 rq->bio = rq->biotail = bio; 2543 2544 if (bio->bi_bdev) 2545 rq->rq_disk = bio->bi_bdev->bd_disk; 2546 } 2547 2548 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 2549 /** 2550 * rq_flush_dcache_pages - Helper function to flush all pages in a request 2551 * @rq: the request to be flushed 2552 * 2553 * Description: 2554 * Flush all pages in @rq. 2555 */ 2556 void rq_flush_dcache_pages(struct request *rq) 2557 { 2558 struct req_iterator iter; 2559 struct bio_vec *bvec; 2560 2561 rq_for_each_segment(bvec, rq, iter) 2562 flush_dcache_page(bvec->bv_page); 2563 } 2564 EXPORT_SYMBOL_GPL(rq_flush_dcache_pages); 2565 #endif 2566 2567 /** 2568 * blk_lld_busy - Check if underlying low-level drivers of a device are busy 2569 * @q : the queue of the device being checked 2570 * 2571 * Description: 2572 * Check if underlying low-level drivers of a device are busy. 2573 * If the drivers want to export their busy state, they must set own 2574 * exporting function using blk_queue_lld_busy() first. 2575 * 2576 * Basically, this function is used only by request stacking drivers 2577 * to stop dispatching requests to underlying devices when underlying 2578 * devices are busy. This behavior helps more I/O merging on the queue 2579 * of the request stacking driver and prevents I/O throughput regression 2580 * on burst I/O load. 2581 * 2582 * Return: 2583 * 0 - Not busy (The request stacking driver should dispatch request) 2584 * 1 - Busy (The request stacking driver should stop dispatching request) 2585 */ 2586 int blk_lld_busy(struct request_queue *q) 2587 { 2588 if (q->lld_busy_fn) 2589 return q->lld_busy_fn(q); 2590 2591 return 0; 2592 } 2593 EXPORT_SYMBOL_GPL(blk_lld_busy); 2594 2595 /** 2596 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request 2597 * @rq: the clone request to be cleaned up 2598 * 2599 * Description: 2600 * Free all bios in @rq for a cloned request. 2601 */ 2602 void blk_rq_unprep_clone(struct request *rq) 2603 { 2604 struct bio *bio; 2605 2606 while ((bio = rq->bio) != NULL) { 2607 rq->bio = bio->bi_next; 2608 2609 bio_put(bio); 2610 } 2611 } 2612 EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); 2613 2614 /* 2615 * Copy attributes of the original request to the clone request. 2616 * The actual data parts (e.g. ->cmd, ->buffer, ->sense) are not copied. 2617 */ 2618 static void __blk_rq_prep_clone(struct request *dst, struct request *src) 2619 { 2620 dst->cpu = src->cpu; 2621 dst->cmd_flags = (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE; 2622 dst->cmd_type = src->cmd_type; 2623 dst->__sector = blk_rq_pos(src); 2624 dst->__data_len = blk_rq_bytes(src); 2625 dst->nr_phys_segments = src->nr_phys_segments; 2626 dst->ioprio = src->ioprio; 2627 dst->extra_len = src->extra_len; 2628 } 2629 2630 /** 2631 * blk_rq_prep_clone - Helper function to setup clone request 2632 * @rq: the request to be setup 2633 * @rq_src: original request to be cloned 2634 * @bs: bio_set that bios for clone are allocated from 2635 * @gfp_mask: memory allocation mask for bio 2636 * @bio_ctr: setup function to be called for each clone bio. 2637 * Returns %0 for success, non %0 for failure. 2638 * @data: private data to be passed to @bio_ctr 2639 * 2640 * Description: 2641 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq. 2642 * The actual data parts of @rq_src (e.g. ->cmd, ->buffer, ->sense) 2643 * are not copied, and copying such parts is the caller's responsibility. 2644 * Also, pages which the original bios are pointing to are not copied 2645 * and the cloned bios just point same pages. 2646 * So cloned bios must be completed before original bios, which means 2647 * the caller must complete @rq before @rq_src. 2648 */ 2649 int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 2650 struct bio_set *bs, gfp_t gfp_mask, 2651 int (*bio_ctr)(struct bio *, struct bio *, void *), 2652 void *data) 2653 { 2654 struct bio *bio, *bio_src; 2655 2656 if (!bs) 2657 bs = fs_bio_set; 2658 2659 blk_rq_init(NULL, rq); 2660 2661 __rq_for_each_bio(bio_src, rq_src) { 2662 bio = bio_alloc_bioset(gfp_mask, bio_src->bi_max_vecs, bs); 2663 if (!bio) 2664 goto free_and_out; 2665 2666 __bio_clone(bio, bio_src); 2667 2668 if (bio_integrity(bio_src) && 2669 bio_integrity_clone(bio, bio_src, gfp_mask, bs)) 2670 goto free_and_out; 2671 2672 if (bio_ctr && bio_ctr(bio, bio_src, data)) 2673 goto free_and_out; 2674 2675 if (rq->bio) { 2676 rq->biotail->bi_next = bio; 2677 rq->biotail = bio; 2678 } else 2679 rq->bio = rq->biotail = bio; 2680 } 2681 2682 __blk_rq_prep_clone(rq, rq_src); 2683 2684 return 0; 2685 2686 free_and_out: 2687 if (bio) 2688 bio_free(bio, bs); 2689 blk_rq_unprep_clone(rq); 2690 2691 return -ENOMEM; 2692 } 2693 EXPORT_SYMBOL_GPL(blk_rq_prep_clone); 2694 2695 int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) 2696 { 2697 return queue_work(kblockd_workqueue, work); 2698 } 2699 EXPORT_SYMBOL(kblockd_schedule_work); 2700 2701 int kblockd_schedule_delayed_work(struct request_queue *q, 2702 struct delayed_work *dwork, unsigned long delay) 2703 { 2704 return queue_delayed_work(kblockd_workqueue, dwork, delay); 2705 } 2706 EXPORT_SYMBOL(kblockd_schedule_delayed_work); 2707 2708 #define PLUG_MAGIC 0x91827364 2709 2710 /** 2711 * blk_start_plug - initialize blk_plug and track it inside the task_struct 2712 * @plug: The &struct blk_plug that needs to be initialized 2713 * 2714 * Description: 2715 * Tracking blk_plug inside the task_struct will help with auto-flushing the 2716 * pending I/O should the task end up blocking between blk_start_plug() and 2717 * blk_finish_plug(). This is important from a performance perspective, but 2718 * also ensures that we don't deadlock. For instance, if the task is blocking 2719 * for a memory allocation, memory reclaim could end up wanting to free a 2720 * page belonging to that request that is currently residing in our private 2721 * plug. By flushing the pending I/O when the process goes to sleep, we avoid 2722 * this kind of deadlock. 2723 */ 2724 void blk_start_plug(struct blk_plug *plug) 2725 { 2726 struct task_struct *tsk = current; 2727 2728 plug->magic = PLUG_MAGIC; 2729 INIT_LIST_HEAD(&plug->list); 2730 INIT_LIST_HEAD(&plug->cb_list); 2731 plug->should_sort = 0; 2732 2733 /* 2734 * If this is a nested plug, don't actually assign it. It will be 2735 * flushed on its own. 2736 */ 2737 if (!tsk->plug) { 2738 /* 2739 * Store ordering should not be needed here, since a potential 2740 * preempt will imply a full memory barrier 2741 */ 2742 tsk->plug = plug; 2743 } 2744 } 2745 EXPORT_SYMBOL(blk_start_plug); 2746 2747 static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b) 2748 { 2749 struct request *rqa = container_of(a, struct request, queuelist); 2750 struct request *rqb = container_of(b, struct request, queuelist); 2751 2752 return !(rqa->q <= rqb->q); 2753 } 2754 2755 /* 2756 * If 'from_schedule' is true, then postpone the dispatch of requests 2757 * until a safe kblockd context. We due this to avoid accidental big 2758 * additional stack usage in driver dispatch, in places where the originally 2759 * plugger did not intend it. 2760 */ 2761 static void queue_unplugged(struct request_queue *q, unsigned int depth, 2762 bool from_schedule) 2763 __releases(q->queue_lock) 2764 { 2765 trace_block_unplug(q, depth, !from_schedule); 2766 2767 /* 2768 * Don't mess with dead queue. 2769 */ 2770 if (unlikely(blk_queue_dead(q))) { 2771 spin_unlock(q->queue_lock); 2772 return; 2773 } 2774 2775 /* 2776 * If we are punting this to kblockd, then we can safely drop 2777 * the queue_lock before waking kblockd (which needs to take 2778 * this lock). 2779 */ 2780 if (from_schedule) { 2781 spin_unlock(q->queue_lock); 2782 blk_run_queue_async(q); 2783 } else { 2784 __blk_run_queue(q); 2785 spin_unlock(q->queue_lock); 2786 } 2787 2788 } 2789 2790 static void flush_plug_callbacks(struct blk_plug *plug) 2791 { 2792 LIST_HEAD(callbacks); 2793 2794 if (list_empty(&plug->cb_list)) 2795 return; 2796 2797 list_splice_init(&plug->cb_list, &callbacks); 2798 2799 while (!list_empty(&callbacks)) { 2800 struct blk_plug_cb *cb = list_first_entry(&callbacks, 2801 struct blk_plug_cb, 2802 list); 2803 list_del(&cb->list); 2804 cb->callback(cb); 2805 } 2806 } 2807 2808 void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) 2809 { 2810 struct request_queue *q; 2811 unsigned long flags; 2812 struct request *rq; 2813 LIST_HEAD(list); 2814 unsigned int depth; 2815 2816 BUG_ON(plug->magic != PLUG_MAGIC); 2817 2818 flush_plug_callbacks(plug); 2819 if (list_empty(&plug->list)) 2820 return; 2821 2822 list_splice_init(&plug->list, &list); 2823 2824 if (plug->should_sort) { 2825 list_sort(NULL, &list, plug_rq_cmp); 2826 plug->should_sort = 0; 2827 } 2828 2829 q = NULL; 2830 depth = 0; 2831 2832 /* 2833 * Save and disable interrupts here, to avoid doing it for every 2834 * queue lock we have to take. 2835 */ 2836 local_irq_save(flags); 2837 while (!list_empty(&list)) { 2838 rq = list_entry_rq(list.next); 2839 list_del_init(&rq->queuelist); 2840 BUG_ON(!rq->q); 2841 if (rq->q != q) { 2842 /* 2843 * This drops the queue lock 2844 */ 2845 if (q) 2846 queue_unplugged(q, depth, from_schedule); 2847 q = rq->q; 2848 depth = 0; 2849 spin_lock(q->queue_lock); 2850 } 2851 2852 /* 2853 * Short-circuit if @q is dead 2854 */ 2855 if (unlikely(blk_queue_dead(q))) { 2856 __blk_end_request_all(rq, -ENODEV); 2857 continue; 2858 } 2859 2860 /* 2861 * rq is already accounted, so use raw insert 2862 */ 2863 if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) 2864 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); 2865 else 2866 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); 2867 2868 depth++; 2869 } 2870 2871 /* 2872 * This drops the queue lock 2873 */ 2874 if (q) 2875 queue_unplugged(q, depth, from_schedule); 2876 2877 local_irq_restore(flags); 2878 } 2879 2880 void blk_finish_plug(struct blk_plug *plug) 2881 { 2882 blk_flush_plug_list(plug, false); 2883 2884 if (plug == current->plug) 2885 current->plug = NULL; 2886 } 2887 EXPORT_SYMBOL(blk_finish_plug); 2888 2889 int __init blk_dev_init(void) 2890 { 2891 BUILD_BUG_ON(__REQ_NR_BITS > 8 * 2892 sizeof(((struct request *)0)->cmd_flags)); 2893 2894 /* used for unplugging and affects IO latency/throughput - HIGHPRI */ 2895 kblockd_workqueue = alloc_workqueue("kblockd", 2896 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); 2897 if (!kblockd_workqueue) 2898 panic("Failed to create kblockd\n"); 2899 2900 request_cachep = kmem_cache_create("blkdev_requests", 2901 sizeof(struct request), 0, SLAB_PANIC, NULL); 2902 2903 blk_requestq_cachep = kmem_cache_create("blkdev_queue", 2904 sizeof(struct request_queue), 0, SLAB_PANIC, NULL); 2905 2906 return 0; 2907 } 2908