1 /* 2 * Copyright (C) 1991, 1992 Linus Torvalds 3 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics 4 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> 6 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> 7 * - July2000 8 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 9 */ 10 11 /* 12 * This handles all read/write requests to block devices 13 */ 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/backing-dev.h> 17 #include <linux/bio.h> 18 #include <linux/blkdev.h> 19 #include <linux/blk-mq.h> 20 #include <linux/highmem.h> 21 #include <linux/mm.h> 22 #include <linux/kernel_stat.h> 23 #include <linux/string.h> 24 #include <linux/init.h> 25 #include <linux/completion.h> 26 #include <linux/slab.h> 27 #include <linux/swap.h> 28 #include <linux/writeback.h> 29 #include <linux/task_io_accounting_ops.h> 30 #include <linux/fault-inject.h> 31 #include <linux/list_sort.h> 32 #include <linux/delay.h> 33 #include <linux/ratelimit.h> 34 #include <linux/pm_runtime.h> 35 #include <linux/blk-cgroup.h> 36 37 #define CREATE_TRACE_POINTS 38 #include <trace/events/block.h> 39 40 #include "blk.h" 41 #include "blk-mq.h" 42 43 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); 44 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); 45 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); 46 EXPORT_TRACEPOINT_SYMBOL_GPL(block_split); 47 EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug); 48 49 DEFINE_IDA(blk_queue_ida); 50 51 /* 52 * For the allocated request tables 53 */ 54 struct kmem_cache *request_cachep = NULL; 55 56 /* 57 * For queue allocation 58 */ 59 struct kmem_cache *blk_requestq_cachep; 60 61 /* 62 * Controlling structure to kblockd 63 */ 64 static struct workqueue_struct *kblockd_workqueue; 65 66 static void blk_clear_congested(struct request_list *rl, int sync) 67 { 68 #ifdef CONFIG_CGROUP_WRITEBACK 69 clear_wb_congested(rl->blkg->wb_congested, sync); 70 #else 71 /* 72 * If !CGROUP_WRITEBACK, all blkg's map to bdi->wb and we shouldn't 73 * flip its congestion state for events on other blkcgs. 74 */ 75 if (rl == &rl->q->root_rl) 76 clear_wb_congested(rl->q->backing_dev_info.wb.congested, sync); 77 #endif 78 } 79 80 static void blk_set_congested(struct request_list *rl, int sync) 81 { 82 #ifdef CONFIG_CGROUP_WRITEBACK 83 set_wb_congested(rl->blkg->wb_congested, sync); 84 #else 85 /* see blk_clear_congested() */ 86 if (rl == &rl->q->root_rl) 87 set_wb_congested(rl->q->backing_dev_info.wb.congested, sync); 88 #endif 89 } 90 91 void blk_queue_congestion_threshold(struct request_queue *q) 92 { 93 int nr; 94 95 nr = q->nr_requests - (q->nr_requests / 8) + 1; 96 if (nr > q->nr_requests) 97 nr = q->nr_requests; 98 q->nr_congestion_on = nr; 99 100 nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1; 101 if (nr < 1) 102 nr = 1; 103 q->nr_congestion_off = nr; 104 } 105 106 /** 107 * blk_get_backing_dev_info - get the address of a queue's backing_dev_info 108 * @bdev: device 109 * 110 * Locates the passed device's request queue and returns the address of its 111 * backing_dev_info. This function can only be called if @bdev is opened 112 * and the return value is never NULL. 113 */ 114 struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) 115 { 116 struct request_queue *q = bdev_get_queue(bdev); 117 118 return &q->backing_dev_info; 119 } 120 EXPORT_SYMBOL(blk_get_backing_dev_info); 121 122 void blk_rq_init(struct request_queue *q, struct request *rq) 123 { 124 memset(rq, 0, sizeof(*rq)); 125 126 INIT_LIST_HEAD(&rq->queuelist); 127 INIT_LIST_HEAD(&rq->timeout_list); 128 rq->cpu = -1; 129 rq->q = q; 130 rq->__sector = (sector_t) -1; 131 INIT_HLIST_NODE(&rq->hash); 132 RB_CLEAR_NODE(&rq->rb_node); 133 rq->cmd = rq->__cmd; 134 rq->cmd_len = BLK_MAX_CDB; 135 rq->tag = -1; 136 rq->start_time = jiffies; 137 set_start_time_ns(rq); 138 rq->part = NULL; 139 } 140 EXPORT_SYMBOL(blk_rq_init); 141 142 static void req_bio_endio(struct request *rq, struct bio *bio, 143 unsigned int nbytes, int error) 144 { 145 if (error) 146 bio->bi_error = error; 147 148 if (unlikely(rq->cmd_flags & REQ_QUIET)) 149 bio_set_flag(bio, BIO_QUIET); 150 151 bio_advance(bio, nbytes); 152 153 /* don't actually finish bio if it's part of flush sequence */ 154 if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) 155 bio_endio(bio); 156 } 157 158 void blk_dump_rq_flags(struct request *rq, char *msg) 159 { 160 int bit; 161 162 printk(KERN_INFO "%s: dev %s: type=%x, flags=%llx\n", msg, 163 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, 164 (unsigned long long) rq->cmd_flags); 165 166 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", 167 (unsigned long long)blk_rq_pos(rq), 168 blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); 169 printk(KERN_INFO " bio %p, biotail %p, len %u\n", 170 rq->bio, rq->biotail, blk_rq_bytes(rq)); 171 172 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 173 printk(KERN_INFO " cdb: "); 174 for (bit = 0; bit < BLK_MAX_CDB; bit++) 175 printk("%02x ", rq->cmd[bit]); 176 printk("\n"); 177 } 178 } 179 EXPORT_SYMBOL(blk_dump_rq_flags); 180 181 static void blk_delay_work(struct work_struct *work) 182 { 183 struct request_queue *q; 184 185 q = container_of(work, struct request_queue, delay_work.work); 186 spin_lock_irq(q->queue_lock); 187 __blk_run_queue(q); 188 spin_unlock_irq(q->queue_lock); 189 } 190 191 /** 192 * blk_delay_queue - restart queueing after defined interval 193 * @q: The &struct request_queue in question 194 * @msecs: Delay in msecs 195 * 196 * Description: 197 * Sometimes queueing needs to be postponed for a little while, to allow 198 * resources to come back. This function will make sure that queueing is 199 * restarted around the specified time. Queue lock must be held. 200 */ 201 void blk_delay_queue(struct request_queue *q, unsigned long msecs) 202 { 203 if (likely(!blk_queue_dead(q))) 204 queue_delayed_work(kblockd_workqueue, &q->delay_work, 205 msecs_to_jiffies(msecs)); 206 } 207 EXPORT_SYMBOL(blk_delay_queue); 208 209 /** 210 * blk_start_queue_async - asynchronously restart a previously stopped queue 211 * @q: The &struct request_queue in question 212 * 213 * Description: 214 * blk_start_queue_async() will clear the stop flag on the queue, and 215 * ensure that the request_fn for the queue is run from an async 216 * context. 217 **/ 218 void blk_start_queue_async(struct request_queue *q) 219 { 220 queue_flag_clear(QUEUE_FLAG_STOPPED, q); 221 blk_run_queue_async(q); 222 } 223 EXPORT_SYMBOL(blk_start_queue_async); 224 225 /** 226 * blk_start_queue - restart a previously stopped queue 227 * @q: The &struct request_queue in question 228 * 229 * Description: 230 * blk_start_queue() will clear the stop flag on the queue, and call 231 * the request_fn for the queue if it was in a stopped state when 232 * entered. Also see blk_stop_queue(). Queue lock must be held. 233 **/ 234 void blk_start_queue(struct request_queue *q) 235 { 236 WARN_ON(!irqs_disabled()); 237 238 queue_flag_clear(QUEUE_FLAG_STOPPED, q); 239 __blk_run_queue(q); 240 } 241 EXPORT_SYMBOL(blk_start_queue); 242 243 /** 244 * blk_stop_queue - stop a queue 245 * @q: The &struct request_queue in question 246 * 247 * Description: 248 * The Linux block layer assumes that a block driver will consume all 249 * entries on the request queue when the request_fn strategy is called. 250 * Often this will not happen, because of hardware limitations (queue 251 * depth settings). If a device driver gets a 'queue full' response, 252 * or if it simply chooses not to queue more I/O at one point, it can 253 * call this function to prevent the request_fn from being called until 254 * the driver has signalled it's ready to go again. This happens by calling 255 * blk_start_queue() to restart queue operations. Queue lock must be held. 256 **/ 257 void blk_stop_queue(struct request_queue *q) 258 { 259 cancel_delayed_work(&q->delay_work); 260 queue_flag_set(QUEUE_FLAG_STOPPED, q); 261 } 262 EXPORT_SYMBOL(blk_stop_queue); 263 264 /** 265 * blk_sync_queue - cancel any pending callbacks on a queue 266 * @q: the queue 267 * 268 * Description: 269 * The block layer may perform asynchronous callback activity 270 * on a queue, such as calling the unplug function after a timeout. 271 * A block device may call blk_sync_queue to ensure that any 272 * such activity is cancelled, thus allowing it to release resources 273 * that the callbacks might use. The caller must already have made sure 274 * that its ->make_request_fn will not re-add plugging prior to calling 275 * this function. 276 * 277 * This function does not cancel any asynchronous activity arising 278 * out of elevator or throttling code. That would require elevator_exit() 279 * and blkcg_exit_queue() to be called with queue lock initialized. 280 * 281 */ 282 void blk_sync_queue(struct request_queue *q) 283 { 284 del_timer_sync(&q->timeout); 285 286 if (q->mq_ops) { 287 struct blk_mq_hw_ctx *hctx; 288 int i; 289 290 queue_for_each_hw_ctx(q, hctx, i) { 291 cancel_delayed_work_sync(&hctx->run_work); 292 cancel_delayed_work_sync(&hctx->delay_work); 293 } 294 } else { 295 cancel_delayed_work_sync(&q->delay_work); 296 } 297 } 298 EXPORT_SYMBOL(blk_sync_queue); 299 300 /** 301 * __blk_run_queue_uncond - run a queue whether or not it has been stopped 302 * @q: The queue to run 303 * 304 * Description: 305 * Invoke request handling on a queue if there are any pending requests. 306 * May be used to restart request handling after a request has completed. 307 * This variant runs the queue whether or not the queue has been 308 * stopped. Must be called with the queue lock held and interrupts 309 * disabled. See also @blk_run_queue. 310 */ 311 inline void __blk_run_queue_uncond(struct request_queue *q) 312 { 313 if (unlikely(blk_queue_dead(q))) 314 return; 315 316 /* 317 * Some request_fn implementations, e.g. scsi_request_fn(), unlock 318 * the queue lock internally. As a result multiple threads may be 319 * running such a request function concurrently. Keep track of the 320 * number of active request_fn invocations such that blk_drain_queue() 321 * can wait until all these request_fn calls have finished. 322 */ 323 q->request_fn_active++; 324 q->request_fn(q); 325 q->request_fn_active--; 326 } 327 EXPORT_SYMBOL_GPL(__blk_run_queue_uncond); 328 329 /** 330 * __blk_run_queue - run a single device queue 331 * @q: The queue to run 332 * 333 * Description: 334 * See @blk_run_queue. This variant must be called with the queue lock 335 * held and interrupts disabled. 336 */ 337 void __blk_run_queue(struct request_queue *q) 338 { 339 if (unlikely(blk_queue_stopped(q))) 340 return; 341 342 __blk_run_queue_uncond(q); 343 } 344 EXPORT_SYMBOL(__blk_run_queue); 345 346 /** 347 * blk_run_queue_async - run a single device queue in workqueue context 348 * @q: The queue to run 349 * 350 * Description: 351 * Tells kblockd to perform the equivalent of @blk_run_queue on behalf 352 * of us. The caller must hold the queue lock. 353 */ 354 void blk_run_queue_async(struct request_queue *q) 355 { 356 if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q))) 357 mod_delayed_work(kblockd_workqueue, &q->delay_work, 0); 358 } 359 EXPORT_SYMBOL(blk_run_queue_async); 360 361 /** 362 * blk_run_queue - run a single device queue 363 * @q: The queue to run 364 * 365 * Description: 366 * Invoke request handling on this queue, if it has pending work to do. 367 * May be used to restart queueing when a request has completed. 368 */ 369 void blk_run_queue(struct request_queue *q) 370 { 371 unsigned long flags; 372 373 spin_lock_irqsave(q->queue_lock, flags); 374 __blk_run_queue(q); 375 spin_unlock_irqrestore(q->queue_lock, flags); 376 } 377 EXPORT_SYMBOL(blk_run_queue); 378 379 void blk_put_queue(struct request_queue *q) 380 { 381 kobject_put(&q->kobj); 382 } 383 EXPORT_SYMBOL(blk_put_queue); 384 385 /** 386 * __blk_drain_queue - drain requests from request_queue 387 * @q: queue to drain 388 * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV 389 * 390 * Drain requests from @q. If @drain_all is set, all requests are drained. 391 * If not, only ELVPRIV requests are drained. The caller is responsible 392 * for ensuring that no new requests which need to be drained are queued. 393 */ 394 static void __blk_drain_queue(struct request_queue *q, bool drain_all) 395 __releases(q->queue_lock) 396 __acquires(q->queue_lock) 397 { 398 int i; 399 400 lockdep_assert_held(q->queue_lock); 401 402 while (true) { 403 bool drain = false; 404 405 /* 406 * The caller might be trying to drain @q before its 407 * elevator is initialized. 408 */ 409 if (q->elevator) 410 elv_drain_elevator(q); 411 412 blkcg_drain_queue(q); 413 414 /* 415 * This function might be called on a queue which failed 416 * driver init after queue creation or is not yet fully 417 * active yet. Some drivers (e.g. fd and loop) get unhappy 418 * in such cases. Kick queue iff dispatch queue has 419 * something on it and @q has request_fn set. 420 */ 421 if (!list_empty(&q->queue_head) && q->request_fn) 422 __blk_run_queue(q); 423 424 drain |= q->nr_rqs_elvpriv; 425 drain |= q->request_fn_active; 426 427 /* 428 * Unfortunately, requests are queued at and tracked from 429 * multiple places and there's no single counter which can 430 * be drained. Check all the queues and counters. 431 */ 432 if (drain_all) { 433 struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); 434 drain |= !list_empty(&q->queue_head); 435 for (i = 0; i < 2; i++) { 436 drain |= q->nr_rqs[i]; 437 drain |= q->in_flight[i]; 438 if (fq) 439 drain |= !list_empty(&fq->flush_queue[i]); 440 } 441 } 442 443 if (!drain) 444 break; 445 446 spin_unlock_irq(q->queue_lock); 447 448 msleep(10); 449 450 spin_lock_irq(q->queue_lock); 451 } 452 453 /* 454 * With queue marked dead, any woken up waiter will fail the 455 * allocation path, so the wakeup chaining is lost and we're 456 * left with hung waiters. We need to wake up those waiters. 457 */ 458 if (q->request_fn) { 459 struct request_list *rl; 460 461 blk_queue_for_each_rl(rl, q) 462 for (i = 0; i < ARRAY_SIZE(rl->wait); i++) 463 wake_up_all(&rl->wait[i]); 464 } 465 } 466 467 /** 468 * blk_queue_bypass_start - enter queue bypass mode 469 * @q: queue of interest 470 * 471 * In bypass mode, only the dispatch FIFO queue of @q is used. This 472 * function makes @q enter bypass mode and drains all requests which were 473 * throttled or issued before. On return, it's guaranteed that no request 474 * is being throttled or has ELVPRIV set and blk_queue_bypass() %true 475 * inside queue or RCU read lock. 476 */ 477 void blk_queue_bypass_start(struct request_queue *q) 478 { 479 spin_lock_irq(q->queue_lock); 480 q->bypass_depth++; 481 queue_flag_set(QUEUE_FLAG_BYPASS, q); 482 spin_unlock_irq(q->queue_lock); 483 484 /* 485 * Queues start drained. Skip actual draining till init is 486 * complete. This avoids lenghty delays during queue init which 487 * can happen many times during boot. 488 */ 489 if (blk_queue_init_done(q)) { 490 spin_lock_irq(q->queue_lock); 491 __blk_drain_queue(q, false); 492 spin_unlock_irq(q->queue_lock); 493 494 /* ensure blk_queue_bypass() is %true inside RCU read lock */ 495 synchronize_rcu(); 496 } 497 } 498 EXPORT_SYMBOL_GPL(blk_queue_bypass_start); 499 500 /** 501 * blk_queue_bypass_end - leave queue bypass mode 502 * @q: queue of interest 503 * 504 * Leave bypass mode and restore the normal queueing behavior. 505 */ 506 void blk_queue_bypass_end(struct request_queue *q) 507 { 508 spin_lock_irq(q->queue_lock); 509 if (!--q->bypass_depth) 510 queue_flag_clear(QUEUE_FLAG_BYPASS, q); 511 WARN_ON_ONCE(q->bypass_depth < 0); 512 spin_unlock_irq(q->queue_lock); 513 } 514 EXPORT_SYMBOL_GPL(blk_queue_bypass_end); 515 516 void blk_set_queue_dying(struct request_queue *q) 517 { 518 queue_flag_set_unlocked(QUEUE_FLAG_DYING, q); 519 520 if (q->mq_ops) 521 blk_mq_wake_waiters(q); 522 else { 523 struct request_list *rl; 524 525 blk_queue_for_each_rl(rl, q) { 526 if (rl->rq_pool) { 527 wake_up(&rl->wait[BLK_RW_SYNC]); 528 wake_up(&rl->wait[BLK_RW_ASYNC]); 529 } 530 } 531 } 532 } 533 EXPORT_SYMBOL_GPL(blk_set_queue_dying); 534 535 /** 536 * blk_cleanup_queue - shutdown a request queue 537 * @q: request queue to shutdown 538 * 539 * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and 540 * put it. All future requests will be failed immediately with -ENODEV. 541 */ 542 void blk_cleanup_queue(struct request_queue *q) 543 { 544 spinlock_t *lock = q->queue_lock; 545 546 /* mark @q DYING, no new request or merges will be allowed afterwards */ 547 mutex_lock(&q->sysfs_lock); 548 blk_set_queue_dying(q); 549 spin_lock_irq(lock); 550 551 /* 552 * A dying queue is permanently in bypass mode till released. Note 553 * that, unlike blk_queue_bypass_start(), we aren't performing 554 * synchronize_rcu() after entering bypass mode to avoid the delay 555 * as some drivers create and destroy a lot of queues while 556 * probing. This is still safe because blk_release_queue() will be 557 * called only after the queue refcnt drops to zero and nothing, 558 * RCU or not, would be traversing the queue by then. 559 */ 560 q->bypass_depth++; 561 queue_flag_set(QUEUE_FLAG_BYPASS, q); 562 563 queue_flag_set(QUEUE_FLAG_NOMERGES, q); 564 queue_flag_set(QUEUE_FLAG_NOXMERGES, q); 565 queue_flag_set(QUEUE_FLAG_DYING, q); 566 spin_unlock_irq(lock); 567 mutex_unlock(&q->sysfs_lock); 568 569 /* 570 * Drain all requests queued before DYING marking. Set DEAD flag to 571 * prevent that q->request_fn() gets invoked after draining finished. 572 */ 573 blk_freeze_queue(q); 574 spin_lock_irq(lock); 575 if (!q->mq_ops) 576 __blk_drain_queue(q, true); 577 queue_flag_set(QUEUE_FLAG_DEAD, q); 578 spin_unlock_irq(lock); 579 580 /* for synchronous bio-based driver finish in-flight integrity i/o */ 581 blk_flush_integrity(); 582 583 /* @q won't process any more request, flush async actions */ 584 del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); 585 blk_sync_queue(q); 586 587 if (q->mq_ops) 588 blk_mq_free_queue(q); 589 percpu_ref_exit(&q->q_usage_counter); 590 591 spin_lock_irq(lock); 592 if (q->queue_lock != &q->__queue_lock) 593 q->queue_lock = &q->__queue_lock; 594 spin_unlock_irq(lock); 595 596 bdi_unregister(&q->backing_dev_info); 597 598 /* @q is and will stay empty, shutdown and put */ 599 blk_put_queue(q); 600 } 601 EXPORT_SYMBOL(blk_cleanup_queue); 602 603 /* Allocate memory local to the request queue */ 604 static void *alloc_request_struct(gfp_t gfp_mask, void *data) 605 { 606 int nid = (int)(long)data; 607 return kmem_cache_alloc_node(request_cachep, gfp_mask, nid); 608 } 609 610 static void free_request_struct(void *element, void *unused) 611 { 612 kmem_cache_free(request_cachep, element); 613 } 614 615 int blk_init_rl(struct request_list *rl, struct request_queue *q, 616 gfp_t gfp_mask) 617 { 618 if (unlikely(rl->rq_pool)) 619 return 0; 620 621 rl->q = q; 622 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; 623 rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; 624 init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); 625 init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); 626 627 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, alloc_request_struct, 628 free_request_struct, 629 (void *)(long)q->node, gfp_mask, 630 q->node); 631 if (!rl->rq_pool) 632 return -ENOMEM; 633 634 return 0; 635 } 636 637 void blk_exit_rl(struct request_list *rl) 638 { 639 if (rl->rq_pool) 640 mempool_destroy(rl->rq_pool); 641 } 642 643 struct request_queue *blk_alloc_queue(gfp_t gfp_mask) 644 { 645 return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE); 646 } 647 EXPORT_SYMBOL(blk_alloc_queue); 648 649 int blk_queue_enter(struct request_queue *q, gfp_t gfp) 650 { 651 while (true) { 652 int ret; 653 654 if (percpu_ref_tryget_live(&q->q_usage_counter)) 655 return 0; 656 657 if (!gfpflags_allow_blocking(gfp)) 658 return -EBUSY; 659 660 ret = wait_event_interruptible(q->mq_freeze_wq, 661 !atomic_read(&q->mq_freeze_depth) || 662 blk_queue_dying(q)); 663 if (blk_queue_dying(q)) 664 return -ENODEV; 665 if (ret) 666 return ret; 667 } 668 } 669 670 void blk_queue_exit(struct request_queue *q) 671 { 672 percpu_ref_put(&q->q_usage_counter); 673 } 674 675 static void blk_queue_usage_counter_release(struct percpu_ref *ref) 676 { 677 struct request_queue *q = 678 container_of(ref, struct request_queue, q_usage_counter); 679 680 wake_up_all(&q->mq_freeze_wq); 681 } 682 683 struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) 684 { 685 struct request_queue *q; 686 int err; 687 688 q = kmem_cache_alloc_node(blk_requestq_cachep, 689 gfp_mask | __GFP_ZERO, node_id); 690 if (!q) 691 return NULL; 692 693 q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask); 694 if (q->id < 0) 695 goto fail_q; 696 697 q->bio_split = bioset_create(BIO_POOL_SIZE, 0); 698 if (!q->bio_split) 699 goto fail_id; 700 701 q->backing_dev_info.ra_pages = 702 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 703 q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK; 704 q->backing_dev_info.name = "block"; 705 q->node = node_id; 706 707 err = bdi_init(&q->backing_dev_info); 708 if (err) 709 goto fail_split; 710 711 setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, 712 laptop_mode_timer_fn, (unsigned long) q); 713 setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); 714 INIT_LIST_HEAD(&q->queue_head); 715 INIT_LIST_HEAD(&q->timeout_list); 716 INIT_LIST_HEAD(&q->icq_list); 717 #ifdef CONFIG_BLK_CGROUP 718 INIT_LIST_HEAD(&q->blkg_list); 719 #endif 720 INIT_DELAYED_WORK(&q->delay_work, blk_delay_work); 721 722 kobject_init(&q->kobj, &blk_queue_ktype); 723 724 mutex_init(&q->sysfs_lock); 725 spin_lock_init(&q->__queue_lock); 726 727 /* 728 * By default initialize queue_lock to internal lock and driver can 729 * override it later if need be. 730 */ 731 q->queue_lock = &q->__queue_lock; 732 733 /* 734 * A queue starts its life with bypass turned on to avoid 735 * unnecessary bypass on/off overhead and nasty surprises during 736 * init. The initial bypass will be finished when the queue is 737 * registered by blk_register_queue(). 738 */ 739 q->bypass_depth = 1; 740 __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags); 741 742 init_waitqueue_head(&q->mq_freeze_wq); 743 744 /* 745 * Init percpu_ref in atomic mode so that it's faster to shutdown. 746 * See blk_register_queue() for details. 747 */ 748 if (percpu_ref_init(&q->q_usage_counter, 749 blk_queue_usage_counter_release, 750 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL)) 751 goto fail_bdi; 752 753 if (blkcg_init_queue(q)) 754 goto fail_ref; 755 756 return q; 757 758 fail_ref: 759 percpu_ref_exit(&q->q_usage_counter); 760 fail_bdi: 761 bdi_destroy(&q->backing_dev_info); 762 fail_split: 763 bioset_free(q->bio_split); 764 fail_id: 765 ida_simple_remove(&blk_queue_ida, q->id); 766 fail_q: 767 kmem_cache_free(blk_requestq_cachep, q); 768 return NULL; 769 } 770 EXPORT_SYMBOL(blk_alloc_queue_node); 771 772 /** 773 * blk_init_queue - prepare a request queue for use with a block device 774 * @rfn: The function to be called to process requests that have been 775 * placed on the queue. 776 * @lock: Request queue spin lock 777 * 778 * Description: 779 * If a block device wishes to use the standard request handling procedures, 780 * which sorts requests and coalesces adjacent requests, then it must 781 * call blk_init_queue(). The function @rfn will be called when there 782 * are requests on the queue that need to be processed. If the device 783 * supports plugging, then @rfn may not be called immediately when requests 784 * are available on the queue, but may be called at some time later instead. 785 * Plugged queues are generally unplugged when a buffer belonging to one 786 * of the requests on the queue is needed, or due to memory pressure. 787 * 788 * @rfn is not required, or even expected, to remove all requests off the 789 * queue, but only as many as it can handle at a time. If it does leave 790 * requests on the queue, it is responsible for arranging that the requests 791 * get dealt with eventually. 792 * 793 * The queue spin lock must be held while manipulating the requests on the 794 * request queue; this lock will be taken also from interrupt context, so irq 795 * disabling is needed for it. 796 * 797 * Function returns a pointer to the initialized request queue, or %NULL if 798 * it didn't succeed. 799 * 800 * Note: 801 * blk_init_queue() must be paired with a blk_cleanup_queue() call 802 * when the block device is deactivated (such as at module unload). 803 **/ 804 805 struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) 806 { 807 return blk_init_queue_node(rfn, lock, NUMA_NO_NODE); 808 } 809 EXPORT_SYMBOL(blk_init_queue); 810 811 struct request_queue * 812 blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) 813 { 814 struct request_queue *uninit_q, *q; 815 816 uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id); 817 if (!uninit_q) 818 return NULL; 819 820 q = blk_init_allocated_queue(uninit_q, rfn, lock); 821 if (!q) 822 blk_cleanup_queue(uninit_q); 823 824 return q; 825 } 826 EXPORT_SYMBOL(blk_init_queue_node); 827 828 static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio); 829 830 struct request_queue * 831 blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, 832 spinlock_t *lock) 833 { 834 if (!q) 835 return NULL; 836 837 q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, 0); 838 if (!q->fq) 839 return NULL; 840 841 if (blk_init_rl(&q->root_rl, q, GFP_KERNEL)) 842 goto fail; 843 844 q->request_fn = rfn; 845 q->prep_rq_fn = NULL; 846 q->unprep_rq_fn = NULL; 847 q->queue_flags |= QUEUE_FLAG_DEFAULT; 848 849 /* Override internal queue lock with supplied lock pointer */ 850 if (lock) 851 q->queue_lock = lock; 852 853 /* 854 * This also sets hw/phys segments, boundary and size 855 */ 856 blk_queue_make_request(q, blk_queue_bio); 857 858 q->sg_reserved_size = INT_MAX; 859 860 /* Protect q->elevator from elevator_change */ 861 mutex_lock(&q->sysfs_lock); 862 863 /* init elevator */ 864 if (elevator_init(q, NULL)) { 865 mutex_unlock(&q->sysfs_lock); 866 goto fail; 867 } 868 869 mutex_unlock(&q->sysfs_lock); 870 871 return q; 872 873 fail: 874 blk_free_flush_queue(q->fq); 875 return NULL; 876 } 877 EXPORT_SYMBOL(blk_init_allocated_queue); 878 879 bool blk_get_queue(struct request_queue *q) 880 { 881 if (likely(!blk_queue_dying(q))) { 882 __blk_get_queue(q); 883 return true; 884 } 885 886 return false; 887 } 888 EXPORT_SYMBOL(blk_get_queue); 889 890 static inline void blk_free_request(struct request_list *rl, struct request *rq) 891 { 892 if (rq->cmd_flags & REQ_ELVPRIV) { 893 elv_put_request(rl->q, rq); 894 if (rq->elv.icq) 895 put_io_context(rq->elv.icq->ioc); 896 } 897 898 mempool_free(rq, rl->rq_pool); 899 } 900 901 /* 902 * ioc_batching returns true if the ioc is a valid batching request and 903 * should be given priority access to a request. 904 */ 905 static inline int ioc_batching(struct request_queue *q, struct io_context *ioc) 906 { 907 if (!ioc) 908 return 0; 909 910 /* 911 * Make sure the process is able to allocate at least 1 request 912 * even if the batch times out, otherwise we could theoretically 913 * lose wakeups. 914 */ 915 return ioc->nr_batch_requests == q->nr_batching || 916 (ioc->nr_batch_requests > 0 917 && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME)); 918 } 919 920 /* 921 * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This 922 * will cause the process to be a "batcher" on all queues in the system. This 923 * is the behaviour we want though - once it gets a wakeup it should be given 924 * a nice run. 925 */ 926 static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) 927 { 928 if (!ioc || ioc_batching(q, ioc)) 929 return; 930 931 ioc->nr_batch_requests = q->nr_batching; 932 ioc->last_waited = jiffies; 933 } 934 935 static void __freed_request(struct request_list *rl, int sync) 936 { 937 struct request_queue *q = rl->q; 938 939 if (rl->count[sync] < queue_congestion_off_threshold(q)) 940 blk_clear_congested(rl, sync); 941 942 if (rl->count[sync] + 1 <= q->nr_requests) { 943 if (waitqueue_active(&rl->wait[sync])) 944 wake_up(&rl->wait[sync]); 945 946 blk_clear_rl_full(rl, sync); 947 } 948 } 949 950 /* 951 * A request has just been released. Account for it, update the full and 952 * congestion status, wake up any waiters. Called under q->queue_lock. 953 */ 954 static void freed_request(struct request_list *rl, unsigned int flags) 955 { 956 struct request_queue *q = rl->q; 957 int sync = rw_is_sync(flags); 958 959 q->nr_rqs[sync]--; 960 rl->count[sync]--; 961 if (flags & REQ_ELVPRIV) 962 q->nr_rqs_elvpriv--; 963 964 __freed_request(rl, sync); 965 966 if (unlikely(rl->starved[sync ^ 1])) 967 __freed_request(rl, sync ^ 1); 968 } 969 970 int blk_update_nr_requests(struct request_queue *q, unsigned int nr) 971 { 972 struct request_list *rl; 973 int on_thresh, off_thresh; 974 975 spin_lock_irq(q->queue_lock); 976 q->nr_requests = nr; 977 blk_queue_congestion_threshold(q); 978 on_thresh = queue_congestion_on_threshold(q); 979 off_thresh = queue_congestion_off_threshold(q); 980 981 blk_queue_for_each_rl(rl, q) { 982 if (rl->count[BLK_RW_SYNC] >= on_thresh) 983 blk_set_congested(rl, BLK_RW_SYNC); 984 else if (rl->count[BLK_RW_SYNC] < off_thresh) 985 blk_clear_congested(rl, BLK_RW_SYNC); 986 987 if (rl->count[BLK_RW_ASYNC] >= on_thresh) 988 blk_set_congested(rl, BLK_RW_ASYNC); 989 else if (rl->count[BLK_RW_ASYNC] < off_thresh) 990 blk_clear_congested(rl, BLK_RW_ASYNC); 991 992 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { 993 blk_set_rl_full(rl, BLK_RW_SYNC); 994 } else { 995 blk_clear_rl_full(rl, BLK_RW_SYNC); 996 wake_up(&rl->wait[BLK_RW_SYNC]); 997 } 998 999 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { 1000 blk_set_rl_full(rl, BLK_RW_ASYNC); 1001 } else { 1002 blk_clear_rl_full(rl, BLK_RW_ASYNC); 1003 wake_up(&rl->wait[BLK_RW_ASYNC]); 1004 } 1005 } 1006 1007 spin_unlock_irq(q->queue_lock); 1008 return 0; 1009 } 1010 1011 /* 1012 * Determine if elevator data should be initialized when allocating the 1013 * request associated with @bio. 1014 */ 1015 static bool blk_rq_should_init_elevator(struct bio *bio) 1016 { 1017 if (!bio) 1018 return true; 1019 1020 /* 1021 * Flush requests do not use the elevator so skip initialization. 1022 * This allows a request to share the flush and elevator data. 1023 */ 1024 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) 1025 return false; 1026 1027 return true; 1028 } 1029 1030 /** 1031 * rq_ioc - determine io_context for request allocation 1032 * @bio: request being allocated is for this bio (can be %NULL) 1033 * 1034 * Determine io_context to use for request allocation for @bio. May return 1035 * %NULL if %current->io_context doesn't exist. 1036 */ 1037 static struct io_context *rq_ioc(struct bio *bio) 1038 { 1039 #ifdef CONFIG_BLK_CGROUP 1040 if (bio && bio->bi_ioc) 1041 return bio->bi_ioc; 1042 #endif 1043 return current->io_context; 1044 } 1045 1046 /** 1047 * __get_request - get a free request 1048 * @rl: request list to allocate from 1049 * @rw_flags: RW and SYNC flags 1050 * @bio: bio to allocate request for (can be %NULL) 1051 * @gfp_mask: allocation mask 1052 * 1053 * Get a free request from @q. This function may fail under memory 1054 * pressure or if @q is dead. 1055 * 1056 * Must be called with @q->queue_lock held and, 1057 * Returns ERR_PTR on failure, with @q->queue_lock held. 1058 * Returns request pointer on success, with @q->queue_lock *not held*. 1059 */ 1060 static struct request *__get_request(struct request_list *rl, int rw_flags, 1061 struct bio *bio, gfp_t gfp_mask) 1062 { 1063 struct request_queue *q = rl->q; 1064 struct request *rq; 1065 struct elevator_type *et = q->elevator->type; 1066 struct io_context *ioc = rq_ioc(bio); 1067 struct io_cq *icq = NULL; 1068 const bool is_sync = rw_is_sync(rw_flags) != 0; 1069 int may_queue; 1070 1071 if (unlikely(blk_queue_dying(q))) 1072 return ERR_PTR(-ENODEV); 1073 1074 may_queue = elv_may_queue(q, rw_flags); 1075 if (may_queue == ELV_MQUEUE_NO) 1076 goto rq_starved; 1077 1078 if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { 1079 if (rl->count[is_sync]+1 >= q->nr_requests) { 1080 /* 1081 * The queue will fill after this allocation, so set 1082 * it as full, and mark this process as "batching". 1083 * This process will be allowed to complete a batch of 1084 * requests, others will be blocked. 1085 */ 1086 if (!blk_rl_full(rl, is_sync)) { 1087 ioc_set_batching(q, ioc); 1088 blk_set_rl_full(rl, is_sync); 1089 } else { 1090 if (may_queue != ELV_MQUEUE_MUST 1091 && !ioc_batching(q, ioc)) { 1092 /* 1093 * The queue is full and the allocating 1094 * process is not a "batcher", and not 1095 * exempted by the IO scheduler 1096 */ 1097 return ERR_PTR(-ENOMEM); 1098 } 1099 } 1100 } 1101 blk_set_congested(rl, is_sync); 1102 } 1103 1104 /* 1105 * Only allow batching queuers to allocate up to 50% over the defined 1106 * limit of requests, otherwise we could have thousands of requests 1107 * allocated with any setting of ->nr_requests 1108 */ 1109 if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) 1110 return ERR_PTR(-ENOMEM); 1111 1112 q->nr_rqs[is_sync]++; 1113 rl->count[is_sync]++; 1114 rl->starved[is_sync] = 0; 1115 1116 /* 1117 * Decide whether the new request will be managed by elevator. If 1118 * so, mark @rw_flags and increment elvpriv. Non-zero elvpriv will 1119 * prevent the current elevator from being destroyed until the new 1120 * request is freed. This guarantees icq's won't be destroyed and 1121 * makes creating new ones safe. 1122 * 1123 * Also, lookup icq while holding queue_lock. If it doesn't exist, 1124 * it will be created after releasing queue_lock. 1125 */ 1126 if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) { 1127 rw_flags |= REQ_ELVPRIV; 1128 q->nr_rqs_elvpriv++; 1129 if (et->icq_cache && ioc) 1130 icq = ioc_lookup_icq(ioc, q); 1131 } 1132 1133 if (blk_queue_io_stat(q)) 1134 rw_flags |= REQ_IO_STAT; 1135 spin_unlock_irq(q->queue_lock); 1136 1137 /* allocate and init request */ 1138 rq = mempool_alloc(rl->rq_pool, gfp_mask); 1139 if (!rq) 1140 goto fail_alloc; 1141 1142 blk_rq_init(q, rq); 1143 blk_rq_set_rl(rq, rl); 1144 rq->cmd_flags = rw_flags | REQ_ALLOCED; 1145 1146 /* init elvpriv */ 1147 if (rw_flags & REQ_ELVPRIV) { 1148 if (unlikely(et->icq_cache && !icq)) { 1149 if (ioc) 1150 icq = ioc_create_icq(ioc, q, gfp_mask); 1151 if (!icq) 1152 goto fail_elvpriv; 1153 } 1154 1155 rq->elv.icq = icq; 1156 if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) 1157 goto fail_elvpriv; 1158 1159 /* @rq->elv.icq holds io_context until @rq is freed */ 1160 if (icq) 1161 get_io_context(icq->ioc); 1162 } 1163 out: 1164 /* 1165 * ioc may be NULL here, and ioc_batching will be false. That's 1166 * OK, if the queue is under the request limit then requests need 1167 * not count toward the nr_batch_requests limit. There will always 1168 * be some limit enforced by BLK_BATCH_TIME. 1169 */ 1170 if (ioc_batching(q, ioc)) 1171 ioc->nr_batch_requests--; 1172 1173 trace_block_getrq(q, bio, rw_flags & 1); 1174 return rq; 1175 1176 fail_elvpriv: 1177 /* 1178 * elvpriv init failed. ioc, icq and elvpriv aren't mempool backed 1179 * and may fail indefinitely under memory pressure and thus 1180 * shouldn't stall IO. Treat this request as !elvpriv. This will 1181 * disturb iosched and blkcg but weird is bettern than dead. 1182 */ 1183 printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n", 1184 __func__, dev_name(q->backing_dev_info.dev)); 1185 1186 rq->cmd_flags &= ~REQ_ELVPRIV; 1187 rq->elv.icq = NULL; 1188 1189 spin_lock_irq(q->queue_lock); 1190 q->nr_rqs_elvpriv--; 1191 spin_unlock_irq(q->queue_lock); 1192 goto out; 1193 1194 fail_alloc: 1195 /* 1196 * Allocation failed presumably due to memory. Undo anything we 1197 * might have messed up. 1198 * 1199 * Allocating task should really be put onto the front of the wait 1200 * queue, but this is pretty rare. 1201 */ 1202 spin_lock_irq(q->queue_lock); 1203 freed_request(rl, rw_flags); 1204 1205 /* 1206 * in the very unlikely event that allocation failed and no 1207 * requests for this direction was pending, mark us starved so that 1208 * freeing of a request in the other direction will notice 1209 * us. another possible fix would be to split the rq mempool into 1210 * READ and WRITE 1211 */ 1212 rq_starved: 1213 if (unlikely(rl->count[is_sync] == 0)) 1214 rl->starved[is_sync] = 1; 1215 return ERR_PTR(-ENOMEM); 1216 } 1217 1218 /** 1219 * get_request - get a free request 1220 * @q: request_queue to allocate request from 1221 * @rw_flags: RW and SYNC flags 1222 * @bio: bio to allocate request for (can be %NULL) 1223 * @gfp_mask: allocation mask 1224 * 1225 * Get a free request from @q. If %__GFP_DIRECT_RECLAIM is set in @gfp_mask, 1226 * this function keeps retrying under memory pressure and fails iff @q is dead. 1227 * 1228 * Must be called with @q->queue_lock held and, 1229 * Returns ERR_PTR on failure, with @q->queue_lock held. 1230 * Returns request pointer on success, with @q->queue_lock *not held*. 1231 */ 1232 static struct request *get_request(struct request_queue *q, int rw_flags, 1233 struct bio *bio, gfp_t gfp_mask) 1234 { 1235 const bool is_sync = rw_is_sync(rw_flags) != 0; 1236 DEFINE_WAIT(wait); 1237 struct request_list *rl; 1238 struct request *rq; 1239 1240 rl = blk_get_rl(q, bio); /* transferred to @rq on success */ 1241 retry: 1242 rq = __get_request(rl, rw_flags, bio, gfp_mask); 1243 if (!IS_ERR(rq)) 1244 return rq; 1245 1246 if (!gfpflags_allow_blocking(gfp_mask) || unlikely(blk_queue_dying(q))) { 1247 blk_put_rl(rl); 1248 return rq; 1249 } 1250 1251 /* wait on @rl and retry */ 1252 prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, 1253 TASK_UNINTERRUPTIBLE); 1254 1255 trace_block_sleeprq(q, bio, rw_flags & 1); 1256 1257 spin_unlock_irq(q->queue_lock); 1258 io_schedule(); 1259 1260 /* 1261 * After sleeping, we become a "batching" process and will be able 1262 * to allocate at least one request, and up to a big batch of them 1263 * for a small period time. See ioc_batching, ioc_set_batching 1264 */ 1265 ioc_set_batching(q, current->io_context); 1266 1267 spin_lock_irq(q->queue_lock); 1268 finish_wait(&rl->wait[is_sync], &wait); 1269 1270 goto retry; 1271 } 1272 1273 static struct request *blk_old_get_request(struct request_queue *q, int rw, 1274 gfp_t gfp_mask) 1275 { 1276 struct request *rq; 1277 1278 BUG_ON(rw != READ && rw != WRITE); 1279 1280 /* create ioc upfront */ 1281 create_io_context(gfp_mask, q->node); 1282 1283 spin_lock_irq(q->queue_lock); 1284 rq = get_request(q, rw, NULL, gfp_mask); 1285 if (IS_ERR(rq)) 1286 spin_unlock_irq(q->queue_lock); 1287 /* q->queue_lock is unlocked at this point */ 1288 1289 return rq; 1290 } 1291 1292 struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) 1293 { 1294 if (q->mq_ops) 1295 return blk_mq_alloc_request(q, rw, gfp_mask, false); 1296 else 1297 return blk_old_get_request(q, rw, gfp_mask); 1298 } 1299 EXPORT_SYMBOL(blk_get_request); 1300 1301 /** 1302 * blk_make_request - given a bio, allocate a corresponding struct request. 1303 * @q: target request queue 1304 * @bio: The bio describing the memory mappings that will be submitted for IO. 1305 * It may be a chained-bio properly constructed by block/bio layer. 1306 * @gfp_mask: gfp flags to be used for memory allocation 1307 * 1308 * blk_make_request is the parallel of generic_make_request for BLOCK_PC 1309 * type commands. Where the struct request needs to be farther initialized by 1310 * the caller. It is passed a &struct bio, which describes the memory info of 1311 * the I/O transfer. 1312 * 1313 * The caller of blk_make_request must make sure that bi_io_vec 1314 * are set to describe the memory buffers. That bio_data_dir() will return 1315 * the needed direction of the request. (And all bio's in the passed bio-chain 1316 * are properly set accordingly) 1317 * 1318 * If called under none-sleepable conditions, mapped bio buffers must not 1319 * need bouncing, by calling the appropriate masked or flagged allocator, 1320 * suitable for the target device. Otherwise the call to blk_queue_bounce will 1321 * BUG. 1322 * 1323 * WARNING: When allocating/cloning a bio-chain, careful consideration should be 1324 * given to how you allocate bios. In particular, you cannot use 1325 * __GFP_DIRECT_RECLAIM for anything but the first bio in the chain. Otherwise 1326 * you risk waiting for IO completion of a bio that hasn't been submitted yet, 1327 * thus resulting in a deadlock. Alternatively bios should be allocated using 1328 * bio_kmalloc() instead of bio_alloc(), as that avoids the mempool deadlock. 1329 * If possible a big IO should be split into smaller parts when allocation 1330 * fails. Partial allocation should not be an error, or you risk a live-lock. 1331 */ 1332 struct request *blk_make_request(struct request_queue *q, struct bio *bio, 1333 gfp_t gfp_mask) 1334 { 1335 struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask); 1336 1337 if (IS_ERR(rq)) 1338 return rq; 1339 1340 blk_rq_set_block_pc(rq); 1341 1342 for_each_bio(bio) { 1343 struct bio *bounce_bio = bio; 1344 int ret; 1345 1346 blk_queue_bounce(q, &bounce_bio); 1347 ret = blk_rq_append_bio(q, rq, bounce_bio); 1348 if (unlikely(ret)) { 1349 blk_put_request(rq); 1350 return ERR_PTR(ret); 1351 } 1352 } 1353 1354 return rq; 1355 } 1356 EXPORT_SYMBOL(blk_make_request); 1357 1358 /** 1359 * blk_rq_set_block_pc - initialize a request to type BLOCK_PC 1360 * @rq: request to be initialized 1361 * 1362 */ 1363 void blk_rq_set_block_pc(struct request *rq) 1364 { 1365 rq->cmd_type = REQ_TYPE_BLOCK_PC; 1366 rq->__data_len = 0; 1367 rq->__sector = (sector_t) -1; 1368 rq->bio = rq->biotail = NULL; 1369 memset(rq->__cmd, 0, sizeof(rq->__cmd)); 1370 } 1371 EXPORT_SYMBOL(blk_rq_set_block_pc); 1372 1373 /** 1374 * blk_requeue_request - put a request back on queue 1375 * @q: request queue where request should be inserted 1376 * @rq: request to be inserted 1377 * 1378 * Description: 1379 * Drivers often keep queueing requests until the hardware cannot accept 1380 * more, when that condition happens we need to put the request back 1381 * on the queue. Must be called with queue lock held. 1382 */ 1383 void blk_requeue_request(struct request_queue *q, struct request *rq) 1384 { 1385 blk_delete_timer(rq); 1386 blk_clear_rq_complete(rq); 1387 trace_block_rq_requeue(q, rq); 1388 1389 if (rq->cmd_flags & REQ_QUEUED) 1390 blk_queue_end_tag(q, rq); 1391 1392 BUG_ON(blk_queued_rq(rq)); 1393 1394 elv_requeue_request(q, rq); 1395 } 1396 EXPORT_SYMBOL(blk_requeue_request); 1397 1398 static void add_acct_request(struct request_queue *q, struct request *rq, 1399 int where) 1400 { 1401 blk_account_io_start(rq, true); 1402 __elv_add_request(q, rq, where); 1403 } 1404 1405 static void part_round_stats_single(int cpu, struct hd_struct *part, 1406 unsigned long now) 1407 { 1408 int inflight; 1409 1410 if (now == part->stamp) 1411 return; 1412 1413 inflight = part_in_flight(part); 1414 if (inflight) { 1415 __part_stat_add(cpu, part, time_in_queue, 1416 inflight * (now - part->stamp)); 1417 __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); 1418 } 1419 part->stamp = now; 1420 } 1421 1422 /** 1423 * part_round_stats() - Round off the performance stats on a struct disk_stats. 1424 * @cpu: cpu number for stats access 1425 * @part: target partition 1426 * 1427 * The average IO queue length and utilisation statistics are maintained 1428 * by observing the current state of the queue length and the amount of 1429 * time it has been in this state for. 1430 * 1431 * Normally, that accounting is done on IO completion, but that can result 1432 * in more than a second's worth of IO being accounted for within any one 1433 * second, leading to >100% utilisation. To deal with that, we call this 1434 * function to do a round-off before returning the results when reading 1435 * /proc/diskstats. This accounts immediately for all queue usage up to 1436 * the current jiffies and restarts the counters again. 1437 */ 1438 void part_round_stats(int cpu, struct hd_struct *part) 1439 { 1440 unsigned long now = jiffies; 1441 1442 if (part->partno) 1443 part_round_stats_single(cpu, &part_to_disk(part)->part0, now); 1444 part_round_stats_single(cpu, part, now); 1445 } 1446 EXPORT_SYMBOL_GPL(part_round_stats); 1447 1448 #ifdef CONFIG_PM 1449 static void blk_pm_put_request(struct request *rq) 1450 { 1451 if (rq->q->dev && !(rq->cmd_flags & REQ_PM) && !--rq->q->nr_pending) 1452 pm_runtime_mark_last_busy(rq->q->dev); 1453 } 1454 #else 1455 static inline void blk_pm_put_request(struct request *rq) {} 1456 #endif 1457 1458 /* 1459 * queue lock must be held 1460 */ 1461 void __blk_put_request(struct request_queue *q, struct request *req) 1462 { 1463 if (unlikely(!q)) 1464 return; 1465 1466 if (q->mq_ops) { 1467 blk_mq_free_request(req); 1468 return; 1469 } 1470 1471 blk_pm_put_request(req); 1472 1473 elv_completed_request(q, req); 1474 1475 /* this is a bio leak */ 1476 WARN_ON(req->bio != NULL); 1477 1478 /* 1479 * Request may not have originated from ll_rw_blk. if not, 1480 * it didn't come out of our reserved rq pools 1481 */ 1482 if (req->cmd_flags & REQ_ALLOCED) { 1483 unsigned int flags = req->cmd_flags; 1484 struct request_list *rl = blk_rq_rl(req); 1485 1486 BUG_ON(!list_empty(&req->queuelist)); 1487 BUG_ON(ELV_ON_HASH(req)); 1488 1489 blk_free_request(rl, req); 1490 freed_request(rl, flags); 1491 blk_put_rl(rl); 1492 } 1493 } 1494 EXPORT_SYMBOL_GPL(__blk_put_request); 1495 1496 void blk_put_request(struct request *req) 1497 { 1498 struct request_queue *q = req->q; 1499 1500 if (q->mq_ops) 1501 blk_mq_free_request(req); 1502 else { 1503 unsigned long flags; 1504 1505 spin_lock_irqsave(q->queue_lock, flags); 1506 __blk_put_request(q, req); 1507 spin_unlock_irqrestore(q->queue_lock, flags); 1508 } 1509 } 1510 EXPORT_SYMBOL(blk_put_request); 1511 1512 /** 1513 * blk_add_request_payload - add a payload to a request 1514 * @rq: request to update 1515 * @page: page backing the payload 1516 * @len: length of the payload. 1517 * 1518 * This allows to later add a payload to an already submitted request by 1519 * a block driver. The driver needs to take care of freeing the payload 1520 * itself. 1521 * 1522 * Note that this is a quite horrible hack and nothing but handling of 1523 * discard requests should ever use it. 1524 */ 1525 void blk_add_request_payload(struct request *rq, struct page *page, 1526 unsigned int len) 1527 { 1528 struct bio *bio = rq->bio; 1529 1530 bio->bi_io_vec->bv_page = page; 1531 bio->bi_io_vec->bv_offset = 0; 1532 bio->bi_io_vec->bv_len = len; 1533 1534 bio->bi_iter.bi_size = len; 1535 bio->bi_vcnt = 1; 1536 bio->bi_phys_segments = 1; 1537 1538 rq->__data_len = rq->resid_len = len; 1539 rq->nr_phys_segments = 1; 1540 } 1541 EXPORT_SYMBOL_GPL(blk_add_request_payload); 1542 1543 bool bio_attempt_back_merge(struct request_queue *q, struct request *req, 1544 struct bio *bio) 1545 { 1546 const int ff = bio->bi_rw & REQ_FAILFAST_MASK; 1547 1548 if (!ll_back_merge_fn(q, req, bio)) 1549 return false; 1550 1551 trace_block_bio_backmerge(q, req, bio); 1552 1553 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) 1554 blk_rq_set_mixed_merge(req); 1555 1556 req->biotail->bi_next = bio; 1557 req->biotail = bio; 1558 req->__data_len += bio->bi_iter.bi_size; 1559 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1560 1561 blk_account_io_start(req, false); 1562 return true; 1563 } 1564 1565 bool bio_attempt_front_merge(struct request_queue *q, struct request *req, 1566 struct bio *bio) 1567 { 1568 const int ff = bio->bi_rw & REQ_FAILFAST_MASK; 1569 1570 if (!ll_front_merge_fn(q, req, bio)) 1571 return false; 1572 1573 trace_block_bio_frontmerge(q, req, bio); 1574 1575 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) 1576 blk_rq_set_mixed_merge(req); 1577 1578 bio->bi_next = req->bio; 1579 req->bio = bio; 1580 1581 req->__sector = bio->bi_iter.bi_sector; 1582 req->__data_len += bio->bi_iter.bi_size; 1583 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1584 1585 blk_account_io_start(req, false); 1586 return true; 1587 } 1588 1589 /** 1590 * blk_attempt_plug_merge - try to merge with %current's plugged list 1591 * @q: request_queue new bio is being queued at 1592 * @bio: new bio being queued 1593 * @request_count: out parameter for number of traversed plugged requests 1594 * @same_queue_rq: pointer to &struct request that gets filled in when 1595 * another request associated with @q is found on the plug list 1596 * (optional, may be %NULL) 1597 * 1598 * Determine whether @bio being queued on @q can be merged with a request 1599 * on %current's plugged list. Returns %true if merge was successful, 1600 * otherwise %false. 1601 * 1602 * Plugging coalesces IOs from the same issuer for the same purpose without 1603 * going through @q->queue_lock. As such it's more of an issuing mechanism 1604 * than scheduling, and the request, while may have elvpriv data, is not 1605 * added on the elevator at this point. In addition, we don't have 1606 * reliable access to the elevator outside queue lock. Only check basic 1607 * merging parameters without querying the elevator. 1608 * 1609 * Caller must ensure !blk_queue_nomerges(q) beforehand. 1610 */ 1611 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, 1612 unsigned int *request_count, 1613 struct request **same_queue_rq) 1614 { 1615 struct blk_plug *plug; 1616 struct request *rq; 1617 bool ret = false; 1618 struct list_head *plug_list; 1619 1620 plug = current->plug; 1621 if (!plug) 1622 goto out; 1623 *request_count = 0; 1624 1625 if (q->mq_ops) 1626 plug_list = &plug->mq_list; 1627 else 1628 plug_list = &plug->list; 1629 1630 list_for_each_entry_reverse(rq, plug_list, queuelist) { 1631 int el_ret; 1632 1633 if (rq->q == q) { 1634 (*request_count)++; 1635 /* 1636 * Only blk-mq multiple hardware queues case checks the 1637 * rq in the same queue, there should be only one such 1638 * rq in a queue 1639 **/ 1640 if (same_queue_rq) 1641 *same_queue_rq = rq; 1642 } 1643 1644 if (rq->q != q || !blk_rq_merge_ok(rq, bio)) 1645 continue; 1646 1647 el_ret = blk_try_merge(rq, bio); 1648 if (el_ret == ELEVATOR_BACK_MERGE) { 1649 ret = bio_attempt_back_merge(q, rq, bio); 1650 if (ret) 1651 break; 1652 } else if (el_ret == ELEVATOR_FRONT_MERGE) { 1653 ret = bio_attempt_front_merge(q, rq, bio); 1654 if (ret) 1655 break; 1656 } 1657 } 1658 out: 1659 return ret; 1660 } 1661 1662 unsigned int blk_plug_queued_count(struct request_queue *q) 1663 { 1664 struct blk_plug *plug; 1665 struct request *rq; 1666 struct list_head *plug_list; 1667 unsigned int ret = 0; 1668 1669 plug = current->plug; 1670 if (!plug) 1671 goto out; 1672 1673 if (q->mq_ops) 1674 plug_list = &plug->mq_list; 1675 else 1676 plug_list = &plug->list; 1677 1678 list_for_each_entry(rq, plug_list, queuelist) { 1679 if (rq->q == q) 1680 ret++; 1681 } 1682 out: 1683 return ret; 1684 } 1685 1686 void init_request_from_bio(struct request *req, struct bio *bio) 1687 { 1688 req->cmd_type = REQ_TYPE_FS; 1689 1690 req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK; 1691 if (bio->bi_rw & REQ_RAHEAD) 1692 req->cmd_flags |= REQ_FAILFAST_MASK; 1693 1694 req->errors = 0; 1695 req->__sector = bio->bi_iter.bi_sector; 1696 req->ioprio = bio_prio(bio); 1697 blk_rq_bio_prep(req->q, req, bio); 1698 } 1699 1700 static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio) 1701 { 1702 const bool sync = !!(bio->bi_rw & REQ_SYNC); 1703 struct blk_plug *plug; 1704 int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT; 1705 struct request *req; 1706 unsigned int request_count = 0; 1707 1708 /* 1709 * low level driver can indicate that it wants pages above a 1710 * certain limit bounced to low memory (ie for highmem, or even 1711 * ISA dma in theory) 1712 */ 1713 blk_queue_bounce(q, &bio); 1714 1715 blk_queue_split(q, &bio, q->bio_split); 1716 1717 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { 1718 bio->bi_error = -EIO; 1719 bio_endio(bio); 1720 return BLK_QC_T_NONE; 1721 } 1722 1723 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) { 1724 spin_lock_irq(q->queue_lock); 1725 where = ELEVATOR_INSERT_FLUSH; 1726 goto get_rq; 1727 } 1728 1729 /* 1730 * Check if we can merge with the plugged list before grabbing 1731 * any locks. 1732 */ 1733 if (!blk_queue_nomerges(q)) { 1734 if (blk_attempt_plug_merge(q, bio, &request_count, NULL)) 1735 return BLK_QC_T_NONE; 1736 } else 1737 request_count = blk_plug_queued_count(q); 1738 1739 spin_lock_irq(q->queue_lock); 1740 1741 el_ret = elv_merge(q, &req, bio); 1742 if (el_ret == ELEVATOR_BACK_MERGE) { 1743 if (bio_attempt_back_merge(q, req, bio)) { 1744 elv_bio_merged(q, req, bio); 1745 if (!attempt_back_merge(q, req)) 1746 elv_merged_request(q, req, el_ret); 1747 goto out_unlock; 1748 } 1749 } else if (el_ret == ELEVATOR_FRONT_MERGE) { 1750 if (bio_attempt_front_merge(q, req, bio)) { 1751 elv_bio_merged(q, req, bio); 1752 if (!attempt_front_merge(q, req)) 1753 elv_merged_request(q, req, el_ret); 1754 goto out_unlock; 1755 } 1756 } 1757 1758 get_rq: 1759 /* 1760 * This sync check and mask will be re-done in init_request_from_bio(), 1761 * but we need to set it earlier to expose the sync flag to the 1762 * rq allocator and io schedulers. 1763 */ 1764 rw_flags = bio_data_dir(bio); 1765 if (sync) 1766 rw_flags |= REQ_SYNC; 1767 1768 /* 1769 * Grab a free request. This is might sleep but can not fail. 1770 * Returns with the queue unlocked. 1771 */ 1772 req = get_request(q, rw_flags, bio, GFP_NOIO); 1773 if (IS_ERR(req)) { 1774 bio->bi_error = PTR_ERR(req); 1775 bio_endio(bio); 1776 goto out_unlock; 1777 } 1778 1779 /* 1780 * After dropping the lock and possibly sleeping here, our request 1781 * may now be mergeable after it had proven unmergeable (above). 1782 * We don't worry about that case for efficiency. It won't happen 1783 * often, and the elevators are able to handle it. 1784 */ 1785 init_request_from_bio(req, bio); 1786 1787 if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags)) 1788 req->cpu = raw_smp_processor_id(); 1789 1790 plug = current->plug; 1791 if (plug) { 1792 /* 1793 * If this is the first request added after a plug, fire 1794 * of a plug trace. 1795 */ 1796 if (!request_count) 1797 trace_block_plug(q); 1798 else { 1799 if (request_count >= BLK_MAX_REQUEST_COUNT) { 1800 blk_flush_plug_list(plug, false); 1801 trace_block_plug(q); 1802 } 1803 } 1804 list_add_tail(&req->queuelist, &plug->list); 1805 blk_account_io_start(req, true); 1806 } else { 1807 spin_lock_irq(q->queue_lock); 1808 add_acct_request(q, req, where); 1809 __blk_run_queue(q); 1810 out_unlock: 1811 spin_unlock_irq(q->queue_lock); 1812 } 1813 1814 return BLK_QC_T_NONE; 1815 } 1816 1817 /* 1818 * If bio->bi_dev is a partition, remap the location 1819 */ 1820 static inline void blk_partition_remap(struct bio *bio) 1821 { 1822 struct block_device *bdev = bio->bi_bdev; 1823 1824 if (bio_sectors(bio) && bdev != bdev->bd_contains) { 1825 struct hd_struct *p = bdev->bd_part; 1826 1827 bio->bi_iter.bi_sector += p->start_sect; 1828 bio->bi_bdev = bdev->bd_contains; 1829 1830 trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio, 1831 bdev->bd_dev, 1832 bio->bi_iter.bi_sector - p->start_sect); 1833 } 1834 } 1835 1836 static void handle_bad_sector(struct bio *bio) 1837 { 1838 char b[BDEVNAME_SIZE]; 1839 1840 printk(KERN_INFO "attempt to access beyond end of device\n"); 1841 printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n", 1842 bdevname(bio->bi_bdev, b), 1843 bio->bi_rw, 1844 (unsigned long long)bio_end_sector(bio), 1845 (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9)); 1846 } 1847 1848 #ifdef CONFIG_FAIL_MAKE_REQUEST 1849 1850 static DECLARE_FAULT_ATTR(fail_make_request); 1851 1852 static int __init setup_fail_make_request(char *str) 1853 { 1854 return setup_fault_attr(&fail_make_request, str); 1855 } 1856 __setup("fail_make_request=", setup_fail_make_request); 1857 1858 static bool should_fail_request(struct hd_struct *part, unsigned int bytes) 1859 { 1860 return part->make_it_fail && should_fail(&fail_make_request, bytes); 1861 } 1862 1863 static int __init fail_make_request_debugfs(void) 1864 { 1865 struct dentry *dir = fault_create_debugfs_attr("fail_make_request", 1866 NULL, &fail_make_request); 1867 1868 return PTR_ERR_OR_ZERO(dir); 1869 } 1870 1871 late_initcall(fail_make_request_debugfs); 1872 1873 #else /* CONFIG_FAIL_MAKE_REQUEST */ 1874 1875 static inline bool should_fail_request(struct hd_struct *part, 1876 unsigned int bytes) 1877 { 1878 return false; 1879 } 1880 1881 #endif /* CONFIG_FAIL_MAKE_REQUEST */ 1882 1883 /* 1884 * Check whether this bio extends beyond the end of the device. 1885 */ 1886 static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors) 1887 { 1888 sector_t maxsector; 1889 1890 if (!nr_sectors) 1891 return 0; 1892 1893 /* Test device or partition size, when known. */ 1894 maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; 1895 if (maxsector) { 1896 sector_t sector = bio->bi_iter.bi_sector; 1897 1898 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) { 1899 /* 1900 * This may well happen - the kernel calls bread() 1901 * without checking the size of the device, e.g., when 1902 * mounting a device. 1903 */ 1904 handle_bad_sector(bio); 1905 return 1; 1906 } 1907 } 1908 1909 return 0; 1910 } 1911 1912 static noinline_for_stack bool 1913 generic_make_request_checks(struct bio *bio) 1914 { 1915 struct request_queue *q; 1916 int nr_sectors = bio_sectors(bio); 1917 int err = -EIO; 1918 char b[BDEVNAME_SIZE]; 1919 struct hd_struct *part; 1920 1921 might_sleep(); 1922 1923 if (bio_check_eod(bio, nr_sectors)) 1924 goto end_io; 1925 1926 q = bdev_get_queue(bio->bi_bdev); 1927 if (unlikely(!q)) { 1928 printk(KERN_ERR 1929 "generic_make_request: Trying to access " 1930 "nonexistent block-device %s (%Lu)\n", 1931 bdevname(bio->bi_bdev, b), 1932 (long long) bio->bi_iter.bi_sector); 1933 goto end_io; 1934 } 1935 1936 part = bio->bi_bdev->bd_part; 1937 if (should_fail_request(part, bio->bi_iter.bi_size) || 1938 should_fail_request(&part_to_disk(part)->part0, 1939 bio->bi_iter.bi_size)) 1940 goto end_io; 1941 1942 /* 1943 * If this device has partitions, remap block n 1944 * of partition p to block n+start(p) of the disk. 1945 */ 1946 blk_partition_remap(bio); 1947 1948 if (bio_check_eod(bio, nr_sectors)) 1949 goto end_io; 1950 1951 /* 1952 * Filter flush bio's early so that make_request based 1953 * drivers without flush support don't have to worry 1954 * about them. 1955 */ 1956 if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) { 1957 bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA); 1958 if (!nr_sectors) { 1959 err = 0; 1960 goto end_io; 1961 } 1962 } 1963 1964 if ((bio->bi_rw & REQ_DISCARD) && 1965 (!blk_queue_discard(q) || 1966 ((bio->bi_rw & REQ_SECURE) && !blk_queue_secdiscard(q)))) { 1967 err = -EOPNOTSUPP; 1968 goto end_io; 1969 } 1970 1971 if (bio->bi_rw & REQ_WRITE_SAME && !bdev_write_same(bio->bi_bdev)) { 1972 err = -EOPNOTSUPP; 1973 goto end_io; 1974 } 1975 1976 /* 1977 * Various block parts want %current->io_context and lazy ioc 1978 * allocation ends up trading a lot of pain for a small amount of 1979 * memory. Just allocate it upfront. This may fail and block 1980 * layer knows how to live with it. 1981 */ 1982 create_io_context(GFP_ATOMIC, q->node); 1983 1984 if (!blkcg_bio_issue_check(q, bio)) 1985 return false; 1986 1987 trace_block_bio_queue(q, bio); 1988 return true; 1989 1990 end_io: 1991 bio->bi_error = err; 1992 bio_endio(bio); 1993 return false; 1994 } 1995 1996 /** 1997 * generic_make_request - hand a buffer to its device driver for I/O 1998 * @bio: The bio describing the location in memory and on the device. 1999 * 2000 * generic_make_request() is used to make I/O requests of block 2001 * devices. It is passed a &struct bio, which describes the I/O that needs 2002 * to be done. 2003 * 2004 * generic_make_request() does not return any status. The 2005 * success/failure status of the request, along with notification of 2006 * completion, is delivered asynchronously through the bio->bi_end_io 2007 * function described (one day) else where. 2008 * 2009 * The caller of generic_make_request must make sure that bi_io_vec 2010 * are set to describe the memory buffer, and that bi_dev and bi_sector are 2011 * set to describe the device address, and the 2012 * bi_end_io and optionally bi_private are set to describe how 2013 * completion notification should be signaled. 2014 * 2015 * generic_make_request and the drivers it calls may use bi_next if this 2016 * bio happens to be merged with someone else, and may resubmit the bio to 2017 * a lower device by calling into generic_make_request recursively, which 2018 * means the bio should NOT be touched after the call to ->make_request_fn. 2019 */ 2020 blk_qc_t generic_make_request(struct bio *bio) 2021 { 2022 struct bio_list bio_list_on_stack; 2023 blk_qc_t ret = BLK_QC_T_NONE; 2024 2025 if (!generic_make_request_checks(bio)) 2026 goto out; 2027 2028 /* 2029 * We only want one ->make_request_fn to be active at a time, else 2030 * stack usage with stacked devices could be a problem. So use 2031 * current->bio_list to keep a list of requests submited by a 2032 * make_request_fn function. current->bio_list is also used as a 2033 * flag to say if generic_make_request is currently active in this 2034 * task or not. If it is NULL, then no make_request is active. If 2035 * it is non-NULL, then a make_request is active, and new requests 2036 * should be added at the tail 2037 */ 2038 if (current->bio_list) { 2039 bio_list_add(current->bio_list, bio); 2040 goto out; 2041 } 2042 2043 /* following loop may be a bit non-obvious, and so deserves some 2044 * explanation. 2045 * Before entering the loop, bio->bi_next is NULL (as all callers 2046 * ensure that) so we have a list with a single bio. 2047 * We pretend that we have just taken it off a longer list, so 2048 * we assign bio_list to a pointer to the bio_list_on_stack, 2049 * thus initialising the bio_list of new bios to be 2050 * added. ->make_request() may indeed add some more bios 2051 * through a recursive call to generic_make_request. If it 2052 * did, we find a non-NULL value in bio_list and re-enter the loop 2053 * from the top. In this case we really did just take the bio 2054 * of the top of the list (no pretending) and so remove it from 2055 * bio_list, and call into ->make_request() again. 2056 */ 2057 BUG_ON(bio->bi_next); 2058 bio_list_init(&bio_list_on_stack); 2059 current->bio_list = &bio_list_on_stack; 2060 do { 2061 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 2062 2063 if (likely(blk_queue_enter(q, __GFP_DIRECT_RECLAIM) == 0)) { 2064 2065 ret = q->make_request_fn(q, bio); 2066 2067 blk_queue_exit(q); 2068 2069 bio = bio_list_pop(current->bio_list); 2070 } else { 2071 struct bio *bio_next = bio_list_pop(current->bio_list); 2072 2073 bio_io_error(bio); 2074 bio = bio_next; 2075 } 2076 } while (bio); 2077 current->bio_list = NULL; /* deactivate */ 2078 2079 out: 2080 return ret; 2081 } 2082 EXPORT_SYMBOL(generic_make_request); 2083 2084 /** 2085 * submit_bio - submit a bio to the block device layer for I/O 2086 * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead) 2087 * @bio: The &struct bio which describes the I/O 2088 * 2089 * submit_bio() is very similar in purpose to generic_make_request(), and 2090 * uses that function to do most of the work. Both are fairly rough 2091 * interfaces; @bio must be presetup and ready for I/O. 2092 * 2093 */ 2094 blk_qc_t submit_bio(int rw, struct bio *bio) 2095 { 2096 bio->bi_rw |= rw; 2097 2098 /* 2099 * If it's a regular read/write or a barrier with data attached, 2100 * go through the normal accounting stuff before submission. 2101 */ 2102 if (bio_has_data(bio)) { 2103 unsigned int count; 2104 2105 if (unlikely(rw & REQ_WRITE_SAME)) 2106 count = bdev_logical_block_size(bio->bi_bdev) >> 9; 2107 else 2108 count = bio_sectors(bio); 2109 2110 if (rw & WRITE) { 2111 count_vm_events(PGPGOUT, count); 2112 } else { 2113 task_io_account_read(bio->bi_iter.bi_size); 2114 count_vm_events(PGPGIN, count); 2115 } 2116 2117 if (unlikely(block_dump)) { 2118 char b[BDEVNAME_SIZE]; 2119 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n", 2120 current->comm, task_pid_nr(current), 2121 (rw & WRITE) ? "WRITE" : "READ", 2122 (unsigned long long)bio->bi_iter.bi_sector, 2123 bdevname(bio->bi_bdev, b), 2124 count); 2125 } 2126 } 2127 2128 return generic_make_request(bio); 2129 } 2130 EXPORT_SYMBOL(submit_bio); 2131 2132 /** 2133 * blk_cloned_rq_check_limits - Helper function to check a cloned request 2134 * for new the queue limits 2135 * @q: the queue 2136 * @rq: the request being checked 2137 * 2138 * Description: 2139 * @rq may have been made based on weaker limitations of upper-level queues 2140 * in request stacking drivers, and it may violate the limitation of @q. 2141 * Since the block layer and the underlying device driver trust @rq 2142 * after it is inserted to @q, it should be checked against @q before 2143 * the insertion using this generic function. 2144 * 2145 * Request stacking drivers like request-based dm may change the queue 2146 * limits when retrying requests on other queues. Those requests need 2147 * to be checked against the new queue limits again during dispatch. 2148 */ 2149 static int blk_cloned_rq_check_limits(struct request_queue *q, 2150 struct request *rq) 2151 { 2152 if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) { 2153 printk(KERN_ERR "%s: over max size limit.\n", __func__); 2154 return -EIO; 2155 } 2156 2157 /* 2158 * queue's settings related to segment counting like q->bounce_pfn 2159 * may differ from that of other stacking queues. 2160 * Recalculate it to check the request correctly on this queue's 2161 * limitation. 2162 */ 2163 blk_recalc_rq_segments(rq); 2164 if (rq->nr_phys_segments > queue_max_segments(q)) { 2165 printk(KERN_ERR "%s: over max segments limit.\n", __func__); 2166 return -EIO; 2167 } 2168 2169 return 0; 2170 } 2171 2172 /** 2173 * blk_insert_cloned_request - Helper for stacking drivers to submit a request 2174 * @q: the queue to submit the request 2175 * @rq: the request being queued 2176 */ 2177 int blk_insert_cloned_request(struct request_queue *q, struct request *rq) 2178 { 2179 unsigned long flags; 2180 int where = ELEVATOR_INSERT_BACK; 2181 2182 if (blk_cloned_rq_check_limits(q, rq)) 2183 return -EIO; 2184 2185 if (rq->rq_disk && 2186 should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq))) 2187 return -EIO; 2188 2189 if (q->mq_ops) { 2190 if (blk_queue_io_stat(q)) 2191 blk_account_io_start(rq, true); 2192 blk_mq_insert_request(rq, false, true, true); 2193 return 0; 2194 } 2195 2196 spin_lock_irqsave(q->queue_lock, flags); 2197 if (unlikely(blk_queue_dying(q))) { 2198 spin_unlock_irqrestore(q->queue_lock, flags); 2199 return -ENODEV; 2200 } 2201 2202 /* 2203 * Submitting request must be dequeued before calling this function 2204 * because it will be linked to another request_queue 2205 */ 2206 BUG_ON(blk_queued_rq(rq)); 2207 2208 if (rq->cmd_flags & (REQ_FLUSH|REQ_FUA)) 2209 where = ELEVATOR_INSERT_FLUSH; 2210 2211 add_acct_request(q, rq, where); 2212 if (where == ELEVATOR_INSERT_FLUSH) 2213 __blk_run_queue(q); 2214 spin_unlock_irqrestore(q->queue_lock, flags); 2215 2216 return 0; 2217 } 2218 EXPORT_SYMBOL_GPL(blk_insert_cloned_request); 2219 2220 /** 2221 * blk_rq_err_bytes - determine number of bytes till the next failure boundary 2222 * @rq: request to examine 2223 * 2224 * Description: 2225 * A request could be merge of IOs which require different failure 2226 * handling. This function determines the number of bytes which 2227 * can be failed from the beginning of the request without 2228 * crossing into area which need to be retried further. 2229 * 2230 * Return: 2231 * The number of bytes to fail. 2232 * 2233 * Context: 2234 * queue_lock must be held. 2235 */ 2236 unsigned int blk_rq_err_bytes(const struct request *rq) 2237 { 2238 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; 2239 unsigned int bytes = 0; 2240 struct bio *bio; 2241 2242 if (!(rq->cmd_flags & REQ_MIXED_MERGE)) 2243 return blk_rq_bytes(rq); 2244 2245 /* 2246 * Currently the only 'mixing' which can happen is between 2247 * different fastfail types. We can safely fail portions 2248 * which have all the failfast bits that the first one has - 2249 * the ones which are at least as eager to fail as the first 2250 * one. 2251 */ 2252 for (bio = rq->bio; bio; bio = bio->bi_next) { 2253 if ((bio->bi_rw & ff) != ff) 2254 break; 2255 bytes += bio->bi_iter.bi_size; 2256 } 2257 2258 /* this could lead to infinite loop */ 2259 BUG_ON(blk_rq_bytes(rq) && !bytes); 2260 return bytes; 2261 } 2262 EXPORT_SYMBOL_GPL(blk_rq_err_bytes); 2263 2264 void blk_account_io_completion(struct request *req, unsigned int bytes) 2265 { 2266 if (blk_do_io_stat(req)) { 2267 const int rw = rq_data_dir(req); 2268 struct hd_struct *part; 2269 int cpu; 2270 2271 cpu = part_stat_lock(); 2272 part = req->part; 2273 part_stat_add(cpu, part, sectors[rw], bytes >> 9); 2274 part_stat_unlock(); 2275 } 2276 } 2277 2278 void blk_account_io_done(struct request *req) 2279 { 2280 /* 2281 * Account IO completion. flush_rq isn't accounted as a 2282 * normal IO on queueing nor completion. Accounting the 2283 * containing request is enough. 2284 */ 2285 if (blk_do_io_stat(req) && !(req->cmd_flags & REQ_FLUSH_SEQ)) { 2286 unsigned long duration = jiffies - req->start_time; 2287 const int rw = rq_data_dir(req); 2288 struct hd_struct *part; 2289 int cpu; 2290 2291 cpu = part_stat_lock(); 2292 part = req->part; 2293 2294 part_stat_inc(cpu, part, ios[rw]); 2295 part_stat_add(cpu, part, ticks[rw], duration); 2296 part_round_stats(cpu, part); 2297 part_dec_in_flight(part, rw); 2298 2299 hd_struct_put(part); 2300 part_stat_unlock(); 2301 } 2302 } 2303 2304 #ifdef CONFIG_PM 2305 /* 2306 * Don't process normal requests when queue is suspended 2307 * or in the process of suspending/resuming 2308 */ 2309 static struct request *blk_pm_peek_request(struct request_queue *q, 2310 struct request *rq) 2311 { 2312 if (q->dev && (q->rpm_status == RPM_SUSPENDED || 2313 (q->rpm_status != RPM_ACTIVE && !(rq->cmd_flags & REQ_PM)))) 2314 return NULL; 2315 else 2316 return rq; 2317 } 2318 #else 2319 static inline struct request *blk_pm_peek_request(struct request_queue *q, 2320 struct request *rq) 2321 { 2322 return rq; 2323 } 2324 #endif 2325 2326 void blk_account_io_start(struct request *rq, bool new_io) 2327 { 2328 struct hd_struct *part; 2329 int rw = rq_data_dir(rq); 2330 int cpu; 2331 2332 if (!blk_do_io_stat(rq)) 2333 return; 2334 2335 cpu = part_stat_lock(); 2336 2337 if (!new_io) { 2338 part = rq->part; 2339 part_stat_inc(cpu, part, merges[rw]); 2340 } else { 2341 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); 2342 if (!hd_struct_try_get(part)) { 2343 /* 2344 * The partition is already being removed, 2345 * the request will be accounted on the disk only 2346 * 2347 * We take a reference on disk->part0 although that 2348 * partition will never be deleted, so we can treat 2349 * it as any other partition. 2350 */ 2351 part = &rq->rq_disk->part0; 2352 hd_struct_get(part); 2353 } 2354 part_round_stats(cpu, part); 2355 part_inc_in_flight(part, rw); 2356 rq->part = part; 2357 } 2358 2359 part_stat_unlock(); 2360 } 2361 2362 /** 2363 * blk_peek_request - peek at the top of a request queue 2364 * @q: request queue to peek at 2365 * 2366 * Description: 2367 * Return the request at the top of @q. The returned request 2368 * should be started using blk_start_request() before LLD starts 2369 * processing it. 2370 * 2371 * Return: 2372 * Pointer to the request at the top of @q if available. Null 2373 * otherwise. 2374 * 2375 * Context: 2376 * queue_lock must be held. 2377 */ 2378 struct request *blk_peek_request(struct request_queue *q) 2379 { 2380 struct request *rq; 2381 int ret; 2382 2383 while ((rq = __elv_next_request(q)) != NULL) { 2384 2385 rq = blk_pm_peek_request(q, rq); 2386 if (!rq) 2387 break; 2388 2389 if (!(rq->cmd_flags & REQ_STARTED)) { 2390 /* 2391 * This is the first time the device driver 2392 * sees this request (possibly after 2393 * requeueing). Notify IO scheduler. 2394 */ 2395 if (rq->cmd_flags & REQ_SORTED) 2396 elv_activate_rq(q, rq); 2397 2398 /* 2399 * just mark as started even if we don't start 2400 * it, a request that has been delayed should 2401 * not be passed by new incoming requests 2402 */ 2403 rq->cmd_flags |= REQ_STARTED; 2404 trace_block_rq_issue(q, rq); 2405 } 2406 2407 if (!q->boundary_rq || q->boundary_rq == rq) { 2408 q->end_sector = rq_end_sector(rq); 2409 q->boundary_rq = NULL; 2410 } 2411 2412 if (rq->cmd_flags & REQ_DONTPREP) 2413 break; 2414 2415 if (q->dma_drain_size && blk_rq_bytes(rq)) { 2416 /* 2417 * make sure space for the drain appears we 2418 * know we can do this because max_hw_segments 2419 * has been adjusted to be one fewer than the 2420 * device can handle 2421 */ 2422 rq->nr_phys_segments++; 2423 } 2424 2425 if (!q->prep_rq_fn) 2426 break; 2427 2428 ret = q->prep_rq_fn(q, rq); 2429 if (ret == BLKPREP_OK) { 2430 break; 2431 } else if (ret == BLKPREP_DEFER) { 2432 /* 2433 * the request may have been (partially) prepped. 2434 * we need to keep this request in the front to 2435 * avoid resource deadlock. REQ_STARTED will 2436 * prevent other fs requests from passing this one. 2437 */ 2438 if (q->dma_drain_size && blk_rq_bytes(rq) && 2439 !(rq->cmd_flags & REQ_DONTPREP)) { 2440 /* 2441 * remove the space for the drain we added 2442 * so that we don't add it again 2443 */ 2444 --rq->nr_phys_segments; 2445 } 2446 2447 rq = NULL; 2448 break; 2449 } else if (ret == BLKPREP_KILL) { 2450 rq->cmd_flags |= REQ_QUIET; 2451 /* 2452 * Mark this request as started so we don't trigger 2453 * any debug logic in the end I/O path. 2454 */ 2455 blk_start_request(rq); 2456 __blk_end_request_all(rq, -EIO); 2457 } else { 2458 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); 2459 break; 2460 } 2461 } 2462 2463 return rq; 2464 } 2465 EXPORT_SYMBOL(blk_peek_request); 2466 2467 void blk_dequeue_request(struct request *rq) 2468 { 2469 struct request_queue *q = rq->q; 2470 2471 BUG_ON(list_empty(&rq->queuelist)); 2472 BUG_ON(ELV_ON_HASH(rq)); 2473 2474 list_del_init(&rq->queuelist); 2475 2476 /* 2477 * the time frame between a request being removed from the lists 2478 * and to it is freed is accounted as io that is in progress at 2479 * the driver side. 2480 */ 2481 if (blk_account_rq(rq)) { 2482 q->in_flight[rq_is_sync(rq)]++; 2483 set_io_start_time_ns(rq); 2484 } 2485 } 2486 2487 /** 2488 * blk_start_request - start request processing on the driver 2489 * @req: request to dequeue 2490 * 2491 * Description: 2492 * Dequeue @req and start timeout timer on it. This hands off the 2493 * request to the driver. 2494 * 2495 * Block internal functions which don't want to start timer should 2496 * call blk_dequeue_request(). 2497 * 2498 * Context: 2499 * queue_lock must be held. 2500 */ 2501 void blk_start_request(struct request *req) 2502 { 2503 blk_dequeue_request(req); 2504 2505 /* 2506 * We are now handing the request to the hardware, initialize 2507 * resid_len to full count and add the timeout handler. 2508 */ 2509 req->resid_len = blk_rq_bytes(req); 2510 if (unlikely(blk_bidi_rq(req))) 2511 req->next_rq->resid_len = blk_rq_bytes(req->next_rq); 2512 2513 BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags)); 2514 blk_add_timer(req); 2515 } 2516 EXPORT_SYMBOL(blk_start_request); 2517 2518 /** 2519 * blk_fetch_request - fetch a request from a request queue 2520 * @q: request queue to fetch a request from 2521 * 2522 * Description: 2523 * Return the request at the top of @q. The request is started on 2524 * return and LLD can start processing it immediately. 2525 * 2526 * Return: 2527 * Pointer to the request at the top of @q if available. Null 2528 * otherwise. 2529 * 2530 * Context: 2531 * queue_lock must be held. 2532 */ 2533 struct request *blk_fetch_request(struct request_queue *q) 2534 { 2535 struct request *rq; 2536 2537 rq = blk_peek_request(q); 2538 if (rq) 2539 blk_start_request(rq); 2540 return rq; 2541 } 2542 EXPORT_SYMBOL(blk_fetch_request); 2543 2544 /** 2545 * blk_update_request - Special helper function for request stacking drivers 2546 * @req: the request being processed 2547 * @error: %0 for success, < %0 for error 2548 * @nr_bytes: number of bytes to complete @req 2549 * 2550 * Description: 2551 * Ends I/O on a number of bytes attached to @req, but doesn't complete 2552 * the request structure even if @req doesn't have leftover. 2553 * If @req has leftover, sets it up for the next range of segments. 2554 * 2555 * This special helper function is only for request stacking drivers 2556 * (e.g. request-based dm) so that they can handle partial completion. 2557 * Actual device drivers should use blk_end_request instead. 2558 * 2559 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees 2560 * %false return from this function. 2561 * 2562 * Return: 2563 * %false - this request doesn't have any more data 2564 * %true - this request has more data 2565 **/ 2566 bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) 2567 { 2568 int total_bytes; 2569 2570 trace_block_rq_complete(req->q, req, nr_bytes); 2571 2572 if (!req->bio) 2573 return false; 2574 2575 /* 2576 * For fs requests, rq is just carrier of independent bio's 2577 * and each partial completion should be handled separately. 2578 * Reset per-request error on each partial completion. 2579 * 2580 * TODO: tj: This is too subtle. It would be better to let 2581 * low level drivers do what they see fit. 2582 */ 2583 if (req->cmd_type == REQ_TYPE_FS) 2584 req->errors = 0; 2585 2586 if (error && req->cmd_type == REQ_TYPE_FS && 2587 !(req->cmd_flags & REQ_QUIET)) { 2588 char *error_type; 2589 2590 switch (error) { 2591 case -ENOLINK: 2592 error_type = "recoverable transport"; 2593 break; 2594 case -EREMOTEIO: 2595 error_type = "critical target"; 2596 break; 2597 case -EBADE: 2598 error_type = "critical nexus"; 2599 break; 2600 case -ETIMEDOUT: 2601 error_type = "timeout"; 2602 break; 2603 case -ENOSPC: 2604 error_type = "critical space allocation"; 2605 break; 2606 case -ENODATA: 2607 error_type = "critical medium"; 2608 break; 2609 case -EIO: 2610 default: 2611 error_type = "I/O"; 2612 break; 2613 } 2614 printk_ratelimited(KERN_ERR "%s: %s error, dev %s, sector %llu\n", 2615 __func__, error_type, req->rq_disk ? 2616 req->rq_disk->disk_name : "?", 2617 (unsigned long long)blk_rq_pos(req)); 2618 2619 } 2620 2621 blk_account_io_completion(req, nr_bytes); 2622 2623 total_bytes = 0; 2624 while (req->bio) { 2625 struct bio *bio = req->bio; 2626 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes); 2627 2628 if (bio_bytes == bio->bi_iter.bi_size) 2629 req->bio = bio->bi_next; 2630 2631 req_bio_endio(req, bio, bio_bytes, error); 2632 2633 total_bytes += bio_bytes; 2634 nr_bytes -= bio_bytes; 2635 2636 if (!nr_bytes) 2637 break; 2638 } 2639 2640 /* 2641 * completely done 2642 */ 2643 if (!req->bio) { 2644 /* 2645 * Reset counters so that the request stacking driver 2646 * can find how many bytes remain in the request 2647 * later. 2648 */ 2649 req->__data_len = 0; 2650 return false; 2651 } 2652 2653 req->__data_len -= total_bytes; 2654 2655 /* update sector only for requests with clear definition of sector */ 2656 if (req->cmd_type == REQ_TYPE_FS) 2657 req->__sector += total_bytes >> 9; 2658 2659 /* mixed attributes always follow the first bio */ 2660 if (req->cmd_flags & REQ_MIXED_MERGE) { 2661 req->cmd_flags &= ~REQ_FAILFAST_MASK; 2662 req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK; 2663 } 2664 2665 /* 2666 * If total number of sectors is less than the first segment 2667 * size, something has gone terribly wrong. 2668 */ 2669 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { 2670 blk_dump_rq_flags(req, "request botched"); 2671 req->__data_len = blk_rq_cur_bytes(req); 2672 } 2673 2674 /* recalculate the number of segments */ 2675 blk_recalc_rq_segments(req); 2676 2677 return true; 2678 } 2679 EXPORT_SYMBOL_GPL(blk_update_request); 2680 2681 static bool blk_update_bidi_request(struct request *rq, int error, 2682 unsigned int nr_bytes, 2683 unsigned int bidi_bytes) 2684 { 2685 if (blk_update_request(rq, error, nr_bytes)) 2686 return true; 2687 2688 /* Bidi request must be completed as a whole */ 2689 if (unlikely(blk_bidi_rq(rq)) && 2690 blk_update_request(rq->next_rq, error, bidi_bytes)) 2691 return true; 2692 2693 if (blk_queue_add_random(rq->q)) 2694 add_disk_randomness(rq->rq_disk); 2695 2696 return false; 2697 } 2698 2699 /** 2700 * blk_unprep_request - unprepare a request 2701 * @req: the request 2702 * 2703 * This function makes a request ready for complete resubmission (or 2704 * completion). It happens only after all error handling is complete, 2705 * so represents the appropriate moment to deallocate any resources 2706 * that were allocated to the request in the prep_rq_fn. The queue 2707 * lock is held when calling this. 2708 */ 2709 void blk_unprep_request(struct request *req) 2710 { 2711 struct request_queue *q = req->q; 2712 2713 req->cmd_flags &= ~REQ_DONTPREP; 2714 if (q->unprep_rq_fn) 2715 q->unprep_rq_fn(q, req); 2716 } 2717 EXPORT_SYMBOL_GPL(blk_unprep_request); 2718 2719 /* 2720 * queue lock must be held 2721 */ 2722 void blk_finish_request(struct request *req, int error) 2723 { 2724 if (req->cmd_flags & REQ_QUEUED) 2725 blk_queue_end_tag(req->q, req); 2726 2727 BUG_ON(blk_queued_rq(req)); 2728 2729 if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS) 2730 laptop_io_completion(&req->q->backing_dev_info); 2731 2732 blk_delete_timer(req); 2733 2734 if (req->cmd_flags & REQ_DONTPREP) 2735 blk_unprep_request(req); 2736 2737 blk_account_io_done(req); 2738 2739 if (req->end_io) 2740 req->end_io(req, error); 2741 else { 2742 if (blk_bidi_rq(req)) 2743 __blk_put_request(req->next_rq->q, req->next_rq); 2744 2745 __blk_put_request(req->q, req); 2746 } 2747 } 2748 EXPORT_SYMBOL(blk_finish_request); 2749 2750 /** 2751 * blk_end_bidi_request - Complete a bidi request 2752 * @rq: the request to complete 2753 * @error: %0 for success, < %0 for error 2754 * @nr_bytes: number of bytes to complete @rq 2755 * @bidi_bytes: number of bytes to complete @rq->next_rq 2756 * 2757 * Description: 2758 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. 2759 * Drivers that supports bidi can safely call this member for any 2760 * type of request, bidi or uni. In the later case @bidi_bytes is 2761 * just ignored. 2762 * 2763 * Return: 2764 * %false - we are done with this request 2765 * %true - still buffers pending for this request 2766 **/ 2767 static bool blk_end_bidi_request(struct request *rq, int error, 2768 unsigned int nr_bytes, unsigned int bidi_bytes) 2769 { 2770 struct request_queue *q = rq->q; 2771 unsigned long flags; 2772 2773 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) 2774 return true; 2775 2776 spin_lock_irqsave(q->queue_lock, flags); 2777 blk_finish_request(rq, error); 2778 spin_unlock_irqrestore(q->queue_lock, flags); 2779 2780 return false; 2781 } 2782 2783 /** 2784 * __blk_end_bidi_request - Complete a bidi request with queue lock held 2785 * @rq: the request to complete 2786 * @error: %0 for success, < %0 for error 2787 * @nr_bytes: number of bytes to complete @rq 2788 * @bidi_bytes: number of bytes to complete @rq->next_rq 2789 * 2790 * Description: 2791 * Identical to blk_end_bidi_request() except that queue lock is 2792 * assumed to be locked on entry and remains so on return. 2793 * 2794 * Return: 2795 * %false - we are done with this request 2796 * %true - still buffers pending for this request 2797 **/ 2798 bool __blk_end_bidi_request(struct request *rq, int error, 2799 unsigned int nr_bytes, unsigned int bidi_bytes) 2800 { 2801 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) 2802 return true; 2803 2804 blk_finish_request(rq, error); 2805 2806 return false; 2807 } 2808 2809 /** 2810 * blk_end_request - Helper function for drivers to complete the request. 2811 * @rq: the request being processed 2812 * @error: %0 for success, < %0 for error 2813 * @nr_bytes: number of bytes to complete 2814 * 2815 * Description: 2816 * Ends I/O on a number of bytes attached to @rq. 2817 * If @rq has leftover, sets it up for the next range of segments. 2818 * 2819 * Return: 2820 * %false - we are done with this request 2821 * %true - still buffers pending for this request 2822 **/ 2823 bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes) 2824 { 2825 return blk_end_bidi_request(rq, error, nr_bytes, 0); 2826 } 2827 EXPORT_SYMBOL(blk_end_request); 2828 2829 /** 2830 * blk_end_request_all - Helper function for drives to finish the request. 2831 * @rq: the request to finish 2832 * @error: %0 for success, < %0 for error 2833 * 2834 * Description: 2835 * Completely finish @rq. 2836 */ 2837 void blk_end_request_all(struct request *rq, int error) 2838 { 2839 bool pending; 2840 unsigned int bidi_bytes = 0; 2841 2842 if (unlikely(blk_bidi_rq(rq))) 2843 bidi_bytes = blk_rq_bytes(rq->next_rq); 2844 2845 pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); 2846 BUG_ON(pending); 2847 } 2848 EXPORT_SYMBOL(blk_end_request_all); 2849 2850 /** 2851 * blk_end_request_cur - Helper function to finish the current request chunk. 2852 * @rq: the request to finish the current chunk for 2853 * @error: %0 for success, < %0 for error 2854 * 2855 * Description: 2856 * Complete the current consecutively mapped chunk from @rq. 2857 * 2858 * Return: 2859 * %false - we are done with this request 2860 * %true - still buffers pending for this request 2861 */ 2862 bool blk_end_request_cur(struct request *rq, int error) 2863 { 2864 return blk_end_request(rq, error, blk_rq_cur_bytes(rq)); 2865 } 2866 EXPORT_SYMBOL(blk_end_request_cur); 2867 2868 /** 2869 * blk_end_request_err - Finish a request till the next failure boundary. 2870 * @rq: the request to finish till the next failure boundary for 2871 * @error: must be negative errno 2872 * 2873 * Description: 2874 * Complete @rq till the next failure boundary. 2875 * 2876 * Return: 2877 * %false - we are done with this request 2878 * %true - still buffers pending for this request 2879 */ 2880 bool blk_end_request_err(struct request *rq, int error) 2881 { 2882 WARN_ON(error >= 0); 2883 return blk_end_request(rq, error, blk_rq_err_bytes(rq)); 2884 } 2885 EXPORT_SYMBOL_GPL(blk_end_request_err); 2886 2887 /** 2888 * __blk_end_request - Helper function for drivers to complete the request. 2889 * @rq: the request being processed 2890 * @error: %0 for success, < %0 for error 2891 * @nr_bytes: number of bytes to complete 2892 * 2893 * Description: 2894 * Must be called with queue lock held unlike blk_end_request(). 2895 * 2896 * Return: 2897 * %false - we are done with this request 2898 * %true - still buffers pending for this request 2899 **/ 2900 bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) 2901 { 2902 return __blk_end_bidi_request(rq, error, nr_bytes, 0); 2903 } 2904 EXPORT_SYMBOL(__blk_end_request); 2905 2906 /** 2907 * __blk_end_request_all - Helper function for drives to finish the request. 2908 * @rq: the request to finish 2909 * @error: %0 for success, < %0 for error 2910 * 2911 * Description: 2912 * Completely finish @rq. Must be called with queue lock held. 2913 */ 2914 void __blk_end_request_all(struct request *rq, int error) 2915 { 2916 bool pending; 2917 unsigned int bidi_bytes = 0; 2918 2919 if (unlikely(blk_bidi_rq(rq))) 2920 bidi_bytes = blk_rq_bytes(rq->next_rq); 2921 2922 pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); 2923 BUG_ON(pending); 2924 } 2925 EXPORT_SYMBOL(__blk_end_request_all); 2926 2927 /** 2928 * __blk_end_request_cur - Helper function to finish the current request chunk. 2929 * @rq: the request to finish the current chunk for 2930 * @error: %0 for success, < %0 for error 2931 * 2932 * Description: 2933 * Complete the current consecutively mapped chunk from @rq. Must 2934 * be called with queue lock held. 2935 * 2936 * Return: 2937 * %false - we are done with this request 2938 * %true - still buffers pending for this request 2939 */ 2940 bool __blk_end_request_cur(struct request *rq, int error) 2941 { 2942 return __blk_end_request(rq, error, blk_rq_cur_bytes(rq)); 2943 } 2944 EXPORT_SYMBOL(__blk_end_request_cur); 2945 2946 /** 2947 * __blk_end_request_err - Finish a request till the next failure boundary. 2948 * @rq: the request to finish till the next failure boundary for 2949 * @error: must be negative errno 2950 * 2951 * Description: 2952 * Complete @rq till the next failure boundary. Must be called 2953 * with queue lock held. 2954 * 2955 * Return: 2956 * %false - we are done with this request 2957 * %true - still buffers pending for this request 2958 */ 2959 bool __blk_end_request_err(struct request *rq, int error) 2960 { 2961 WARN_ON(error >= 0); 2962 return __blk_end_request(rq, error, blk_rq_err_bytes(rq)); 2963 } 2964 EXPORT_SYMBOL_GPL(__blk_end_request_err); 2965 2966 void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 2967 struct bio *bio) 2968 { 2969 /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */ 2970 rq->cmd_flags |= bio->bi_rw & REQ_WRITE; 2971 2972 if (bio_has_data(bio)) 2973 rq->nr_phys_segments = bio_phys_segments(q, bio); 2974 2975 rq->__data_len = bio->bi_iter.bi_size; 2976 rq->bio = rq->biotail = bio; 2977 2978 if (bio->bi_bdev) 2979 rq->rq_disk = bio->bi_bdev->bd_disk; 2980 } 2981 2982 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 2983 /** 2984 * rq_flush_dcache_pages - Helper function to flush all pages in a request 2985 * @rq: the request to be flushed 2986 * 2987 * Description: 2988 * Flush all pages in @rq. 2989 */ 2990 void rq_flush_dcache_pages(struct request *rq) 2991 { 2992 struct req_iterator iter; 2993 struct bio_vec bvec; 2994 2995 rq_for_each_segment(bvec, rq, iter) 2996 flush_dcache_page(bvec.bv_page); 2997 } 2998 EXPORT_SYMBOL_GPL(rq_flush_dcache_pages); 2999 #endif 3000 3001 /** 3002 * blk_lld_busy - Check if underlying low-level drivers of a device are busy 3003 * @q : the queue of the device being checked 3004 * 3005 * Description: 3006 * Check if underlying low-level drivers of a device are busy. 3007 * If the drivers want to export their busy state, they must set own 3008 * exporting function using blk_queue_lld_busy() first. 3009 * 3010 * Basically, this function is used only by request stacking drivers 3011 * to stop dispatching requests to underlying devices when underlying 3012 * devices are busy. This behavior helps more I/O merging on the queue 3013 * of the request stacking driver and prevents I/O throughput regression 3014 * on burst I/O load. 3015 * 3016 * Return: 3017 * 0 - Not busy (The request stacking driver should dispatch request) 3018 * 1 - Busy (The request stacking driver should stop dispatching request) 3019 */ 3020 int blk_lld_busy(struct request_queue *q) 3021 { 3022 if (q->lld_busy_fn) 3023 return q->lld_busy_fn(q); 3024 3025 return 0; 3026 } 3027 EXPORT_SYMBOL_GPL(blk_lld_busy); 3028 3029 /** 3030 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request 3031 * @rq: the clone request to be cleaned up 3032 * 3033 * Description: 3034 * Free all bios in @rq for a cloned request. 3035 */ 3036 void blk_rq_unprep_clone(struct request *rq) 3037 { 3038 struct bio *bio; 3039 3040 while ((bio = rq->bio) != NULL) { 3041 rq->bio = bio->bi_next; 3042 3043 bio_put(bio); 3044 } 3045 } 3046 EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); 3047 3048 /* 3049 * Copy attributes of the original request to the clone request. 3050 * The actual data parts (e.g. ->cmd, ->sense) are not copied. 3051 */ 3052 static void __blk_rq_prep_clone(struct request *dst, struct request *src) 3053 { 3054 dst->cpu = src->cpu; 3055 dst->cmd_flags |= (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE; 3056 dst->cmd_type = src->cmd_type; 3057 dst->__sector = blk_rq_pos(src); 3058 dst->__data_len = blk_rq_bytes(src); 3059 dst->nr_phys_segments = src->nr_phys_segments; 3060 dst->ioprio = src->ioprio; 3061 dst->extra_len = src->extra_len; 3062 } 3063 3064 /** 3065 * blk_rq_prep_clone - Helper function to setup clone request 3066 * @rq: the request to be setup 3067 * @rq_src: original request to be cloned 3068 * @bs: bio_set that bios for clone are allocated from 3069 * @gfp_mask: memory allocation mask for bio 3070 * @bio_ctr: setup function to be called for each clone bio. 3071 * Returns %0 for success, non %0 for failure. 3072 * @data: private data to be passed to @bio_ctr 3073 * 3074 * Description: 3075 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq. 3076 * The actual data parts of @rq_src (e.g. ->cmd, ->sense) 3077 * are not copied, and copying such parts is the caller's responsibility. 3078 * Also, pages which the original bios are pointing to are not copied 3079 * and the cloned bios just point same pages. 3080 * So cloned bios must be completed before original bios, which means 3081 * the caller must complete @rq before @rq_src. 3082 */ 3083 int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 3084 struct bio_set *bs, gfp_t gfp_mask, 3085 int (*bio_ctr)(struct bio *, struct bio *, void *), 3086 void *data) 3087 { 3088 struct bio *bio, *bio_src; 3089 3090 if (!bs) 3091 bs = fs_bio_set; 3092 3093 __rq_for_each_bio(bio_src, rq_src) { 3094 bio = bio_clone_fast(bio_src, gfp_mask, bs); 3095 if (!bio) 3096 goto free_and_out; 3097 3098 if (bio_ctr && bio_ctr(bio, bio_src, data)) 3099 goto free_and_out; 3100 3101 if (rq->bio) { 3102 rq->biotail->bi_next = bio; 3103 rq->biotail = bio; 3104 } else 3105 rq->bio = rq->biotail = bio; 3106 } 3107 3108 __blk_rq_prep_clone(rq, rq_src); 3109 3110 return 0; 3111 3112 free_and_out: 3113 if (bio) 3114 bio_put(bio); 3115 blk_rq_unprep_clone(rq); 3116 3117 return -ENOMEM; 3118 } 3119 EXPORT_SYMBOL_GPL(blk_rq_prep_clone); 3120 3121 int kblockd_schedule_work(struct work_struct *work) 3122 { 3123 return queue_work(kblockd_workqueue, work); 3124 } 3125 EXPORT_SYMBOL(kblockd_schedule_work); 3126 3127 int kblockd_schedule_delayed_work(struct delayed_work *dwork, 3128 unsigned long delay) 3129 { 3130 return queue_delayed_work(kblockd_workqueue, dwork, delay); 3131 } 3132 EXPORT_SYMBOL(kblockd_schedule_delayed_work); 3133 3134 int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, 3135 unsigned long delay) 3136 { 3137 return queue_delayed_work_on(cpu, kblockd_workqueue, dwork, delay); 3138 } 3139 EXPORT_SYMBOL(kblockd_schedule_delayed_work_on); 3140 3141 /** 3142 * blk_start_plug - initialize blk_plug and track it inside the task_struct 3143 * @plug: The &struct blk_plug that needs to be initialized 3144 * 3145 * Description: 3146 * Tracking blk_plug inside the task_struct will help with auto-flushing the 3147 * pending I/O should the task end up blocking between blk_start_plug() and 3148 * blk_finish_plug(). This is important from a performance perspective, but 3149 * also ensures that we don't deadlock. For instance, if the task is blocking 3150 * for a memory allocation, memory reclaim could end up wanting to free a 3151 * page belonging to that request that is currently residing in our private 3152 * plug. By flushing the pending I/O when the process goes to sleep, we avoid 3153 * this kind of deadlock. 3154 */ 3155 void blk_start_plug(struct blk_plug *plug) 3156 { 3157 struct task_struct *tsk = current; 3158 3159 /* 3160 * If this is a nested plug, don't actually assign it. 3161 */ 3162 if (tsk->plug) 3163 return; 3164 3165 INIT_LIST_HEAD(&plug->list); 3166 INIT_LIST_HEAD(&plug->mq_list); 3167 INIT_LIST_HEAD(&plug->cb_list); 3168 /* 3169 * Store ordering should not be needed here, since a potential 3170 * preempt will imply a full memory barrier 3171 */ 3172 tsk->plug = plug; 3173 } 3174 EXPORT_SYMBOL(blk_start_plug); 3175 3176 static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b) 3177 { 3178 struct request *rqa = container_of(a, struct request, queuelist); 3179 struct request *rqb = container_of(b, struct request, queuelist); 3180 3181 return !(rqa->q < rqb->q || 3182 (rqa->q == rqb->q && blk_rq_pos(rqa) < blk_rq_pos(rqb))); 3183 } 3184 3185 /* 3186 * If 'from_schedule' is true, then postpone the dispatch of requests 3187 * until a safe kblockd context. We due this to avoid accidental big 3188 * additional stack usage in driver dispatch, in places where the originally 3189 * plugger did not intend it. 3190 */ 3191 static void queue_unplugged(struct request_queue *q, unsigned int depth, 3192 bool from_schedule) 3193 __releases(q->queue_lock) 3194 { 3195 trace_block_unplug(q, depth, !from_schedule); 3196 3197 if (from_schedule) 3198 blk_run_queue_async(q); 3199 else 3200 __blk_run_queue(q); 3201 spin_unlock(q->queue_lock); 3202 } 3203 3204 static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) 3205 { 3206 LIST_HEAD(callbacks); 3207 3208 while (!list_empty(&plug->cb_list)) { 3209 list_splice_init(&plug->cb_list, &callbacks); 3210 3211 while (!list_empty(&callbacks)) { 3212 struct blk_plug_cb *cb = list_first_entry(&callbacks, 3213 struct blk_plug_cb, 3214 list); 3215 list_del(&cb->list); 3216 cb->callback(cb, from_schedule); 3217 } 3218 } 3219 } 3220 3221 struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data, 3222 int size) 3223 { 3224 struct blk_plug *plug = current->plug; 3225 struct blk_plug_cb *cb; 3226 3227 if (!plug) 3228 return NULL; 3229 3230 list_for_each_entry(cb, &plug->cb_list, list) 3231 if (cb->callback == unplug && cb->data == data) 3232 return cb; 3233 3234 /* Not currently on the callback list */ 3235 BUG_ON(size < sizeof(*cb)); 3236 cb = kzalloc(size, GFP_ATOMIC); 3237 if (cb) { 3238 cb->data = data; 3239 cb->callback = unplug; 3240 list_add(&cb->list, &plug->cb_list); 3241 } 3242 return cb; 3243 } 3244 EXPORT_SYMBOL(blk_check_plugged); 3245 3246 void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) 3247 { 3248 struct request_queue *q; 3249 unsigned long flags; 3250 struct request *rq; 3251 LIST_HEAD(list); 3252 unsigned int depth; 3253 3254 flush_plug_callbacks(plug, from_schedule); 3255 3256 if (!list_empty(&plug->mq_list)) 3257 blk_mq_flush_plug_list(plug, from_schedule); 3258 3259 if (list_empty(&plug->list)) 3260 return; 3261 3262 list_splice_init(&plug->list, &list); 3263 3264 list_sort(NULL, &list, plug_rq_cmp); 3265 3266 q = NULL; 3267 depth = 0; 3268 3269 /* 3270 * Save and disable interrupts here, to avoid doing it for every 3271 * queue lock we have to take. 3272 */ 3273 local_irq_save(flags); 3274 while (!list_empty(&list)) { 3275 rq = list_entry_rq(list.next); 3276 list_del_init(&rq->queuelist); 3277 BUG_ON(!rq->q); 3278 if (rq->q != q) { 3279 /* 3280 * This drops the queue lock 3281 */ 3282 if (q) 3283 queue_unplugged(q, depth, from_schedule); 3284 q = rq->q; 3285 depth = 0; 3286 spin_lock(q->queue_lock); 3287 } 3288 3289 /* 3290 * Short-circuit if @q is dead 3291 */ 3292 if (unlikely(blk_queue_dying(q))) { 3293 __blk_end_request_all(rq, -ENODEV); 3294 continue; 3295 } 3296 3297 /* 3298 * rq is already accounted, so use raw insert 3299 */ 3300 if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) 3301 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); 3302 else 3303 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); 3304 3305 depth++; 3306 } 3307 3308 /* 3309 * This drops the queue lock 3310 */ 3311 if (q) 3312 queue_unplugged(q, depth, from_schedule); 3313 3314 local_irq_restore(flags); 3315 } 3316 3317 void blk_finish_plug(struct blk_plug *plug) 3318 { 3319 if (plug != current->plug) 3320 return; 3321 blk_flush_plug_list(plug, false); 3322 3323 current->plug = NULL; 3324 } 3325 EXPORT_SYMBOL(blk_finish_plug); 3326 3327 bool blk_poll(struct request_queue *q, blk_qc_t cookie) 3328 { 3329 struct blk_plug *plug; 3330 long state; 3331 3332 if (!q->mq_ops || !q->mq_ops->poll || !blk_qc_t_valid(cookie) || 3333 !test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) 3334 return false; 3335 3336 plug = current->plug; 3337 if (plug) 3338 blk_flush_plug_list(plug, false); 3339 3340 state = current->state; 3341 while (!need_resched()) { 3342 unsigned int queue_num = blk_qc_t_to_queue_num(cookie); 3343 struct blk_mq_hw_ctx *hctx = q->queue_hw_ctx[queue_num]; 3344 int ret; 3345 3346 hctx->poll_invoked++; 3347 3348 ret = q->mq_ops->poll(hctx, blk_qc_t_to_tag(cookie)); 3349 if (ret > 0) { 3350 hctx->poll_success++; 3351 set_current_state(TASK_RUNNING); 3352 return true; 3353 } 3354 3355 if (signal_pending_state(state, current)) 3356 set_current_state(TASK_RUNNING); 3357 3358 if (current->state == TASK_RUNNING) 3359 return true; 3360 if (ret < 0) 3361 break; 3362 cpu_relax(); 3363 } 3364 3365 return false; 3366 } 3367 3368 #ifdef CONFIG_PM 3369 /** 3370 * blk_pm_runtime_init - Block layer runtime PM initialization routine 3371 * @q: the queue of the device 3372 * @dev: the device the queue belongs to 3373 * 3374 * Description: 3375 * Initialize runtime-PM-related fields for @q and start auto suspend for 3376 * @dev. Drivers that want to take advantage of request-based runtime PM 3377 * should call this function after @dev has been initialized, and its 3378 * request queue @q has been allocated, and runtime PM for it can not happen 3379 * yet(either due to disabled/forbidden or its usage_count > 0). In most 3380 * cases, driver should call this function before any I/O has taken place. 3381 * 3382 * This function takes care of setting up using auto suspend for the device, 3383 * the autosuspend delay is set to -1 to make runtime suspend impossible 3384 * until an updated value is either set by user or by driver. Drivers do 3385 * not need to touch other autosuspend settings. 3386 * 3387 * The block layer runtime PM is request based, so only works for drivers 3388 * that use request as their IO unit instead of those directly use bio's. 3389 */ 3390 void blk_pm_runtime_init(struct request_queue *q, struct device *dev) 3391 { 3392 q->dev = dev; 3393 q->rpm_status = RPM_ACTIVE; 3394 pm_runtime_set_autosuspend_delay(q->dev, -1); 3395 pm_runtime_use_autosuspend(q->dev); 3396 } 3397 EXPORT_SYMBOL(blk_pm_runtime_init); 3398 3399 /** 3400 * blk_pre_runtime_suspend - Pre runtime suspend check 3401 * @q: the queue of the device 3402 * 3403 * Description: 3404 * This function will check if runtime suspend is allowed for the device 3405 * by examining if there are any requests pending in the queue. If there 3406 * are requests pending, the device can not be runtime suspended; otherwise, 3407 * the queue's status will be updated to SUSPENDING and the driver can 3408 * proceed to suspend the device. 3409 * 3410 * For the not allowed case, we mark last busy for the device so that 3411 * runtime PM core will try to autosuspend it some time later. 3412 * 3413 * This function should be called near the start of the device's 3414 * runtime_suspend callback. 3415 * 3416 * Return: 3417 * 0 - OK to runtime suspend the device 3418 * -EBUSY - Device should not be runtime suspended 3419 */ 3420 int blk_pre_runtime_suspend(struct request_queue *q) 3421 { 3422 int ret = 0; 3423 3424 if (!q->dev) 3425 return ret; 3426 3427 spin_lock_irq(q->queue_lock); 3428 if (q->nr_pending) { 3429 ret = -EBUSY; 3430 pm_runtime_mark_last_busy(q->dev); 3431 } else { 3432 q->rpm_status = RPM_SUSPENDING; 3433 } 3434 spin_unlock_irq(q->queue_lock); 3435 return ret; 3436 } 3437 EXPORT_SYMBOL(blk_pre_runtime_suspend); 3438 3439 /** 3440 * blk_post_runtime_suspend - Post runtime suspend processing 3441 * @q: the queue of the device 3442 * @err: return value of the device's runtime_suspend function 3443 * 3444 * Description: 3445 * Update the queue's runtime status according to the return value of the 3446 * device's runtime suspend function and mark last busy for the device so 3447 * that PM core will try to auto suspend the device at a later time. 3448 * 3449 * This function should be called near the end of the device's 3450 * runtime_suspend callback. 3451 */ 3452 void blk_post_runtime_suspend(struct request_queue *q, int err) 3453 { 3454 if (!q->dev) 3455 return; 3456 3457 spin_lock_irq(q->queue_lock); 3458 if (!err) { 3459 q->rpm_status = RPM_SUSPENDED; 3460 } else { 3461 q->rpm_status = RPM_ACTIVE; 3462 pm_runtime_mark_last_busy(q->dev); 3463 } 3464 spin_unlock_irq(q->queue_lock); 3465 } 3466 EXPORT_SYMBOL(blk_post_runtime_suspend); 3467 3468 /** 3469 * blk_pre_runtime_resume - Pre runtime resume processing 3470 * @q: the queue of the device 3471 * 3472 * Description: 3473 * Update the queue's runtime status to RESUMING in preparation for the 3474 * runtime resume of the device. 3475 * 3476 * This function should be called near the start of the device's 3477 * runtime_resume callback. 3478 */ 3479 void blk_pre_runtime_resume(struct request_queue *q) 3480 { 3481 if (!q->dev) 3482 return; 3483 3484 spin_lock_irq(q->queue_lock); 3485 q->rpm_status = RPM_RESUMING; 3486 spin_unlock_irq(q->queue_lock); 3487 } 3488 EXPORT_SYMBOL(blk_pre_runtime_resume); 3489 3490 /** 3491 * blk_post_runtime_resume - Post runtime resume processing 3492 * @q: the queue of the device 3493 * @err: return value of the device's runtime_resume function 3494 * 3495 * Description: 3496 * Update the queue's runtime status according to the return value of the 3497 * device's runtime_resume function. If it is successfully resumed, process 3498 * the requests that are queued into the device's queue when it is resuming 3499 * and then mark last busy and initiate autosuspend for it. 3500 * 3501 * This function should be called near the end of the device's 3502 * runtime_resume callback. 3503 */ 3504 void blk_post_runtime_resume(struct request_queue *q, int err) 3505 { 3506 if (!q->dev) 3507 return; 3508 3509 spin_lock_irq(q->queue_lock); 3510 if (!err) { 3511 q->rpm_status = RPM_ACTIVE; 3512 __blk_run_queue(q); 3513 pm_runtime_mark_last_busy(q->dev); 3514 pm_request_autosuspend(q->dev); 3515 } else { 3516 q->rpm_status = RPM_SUSPENDED; 3517 } 3518 spin_unlock_irq(q->queue_lock); 3519 } 3520 EXPORT_SYMBOL(blk_post_runtime_resume); 3521 #endif 3522 3523 int __init blk_dev_init(void) 3524 { 3525 BUILD_BUG_ON(__REQ_NR_BITS > 8 * 3526 FIELD_SIZEOF(struct request, cmd_flags)); 3527 3528 /* used for unplugging and affects IO latency/throughput - HIGHPRI */ 3529 kblockd_workqueue = alloc_workqueue("kblockd", 3530 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); 3531 if (!kblockd_workqueue) 3532 panic("Failed to create kblockd\n"); 3533 3534 request_cachep = kmem_cache_create("blkdev_requests", 3535 sizeof(struct request), 0, SLAB_PANIC, NULL); 3536 3537 blk_requestq_cachep = kmem_cache_create("blkdev_queue", 3538 sizeof(struct request_queue), 0, SLAB_PANIC, NULL); 3539 3540 return 0; 3541 } 3542