1 /* 2 * Copyright (C) 1991, 1992 Linus Torvalds 3 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics 4 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> 6 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> 7 * - July2000 8 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 9 */ 10 11 /* 12 * This handles all read/write requests to block devices 13 */ 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/backing-dev.h> 17 #include <linux/bio.h> 18 #include <linux/blkdev.h> 19 #include <linux/blk-mq.h> 20 #include <linux/highmem.h> 21 #include <linux/mm.h> 22 #include <linux/kernel_stat.h> 23 #include <linux/string.h> 24 #include <linux/init.h> 25 #include <linux/completion.h> 26 #include <linux/slab.h> 27 #include <linux/swap.h> 28 #include <linux/writeback.h> 29 #include <linux/task_io_accounting_ops.h> 30 #include <linux/fault-inject.h> 31 #include <linux/list_sort.h> 32 #include <linux/delay.h> 33 #include <linux/ratelimit.h> 34 #include <linux/pm_runtime.h> 35 #include <linux/blk-cgroup.h> 36 #include <linux/debugfs.h> 37 #include <linux/bpf.h> 38 39 #define CREATE_TRACE_POINTS 40 #include <trace/events/block.h> 41 42 #include "blk.h" 43 #include "blk-mq.h" 44 #include "blk-mq-sched.h" 45 #include "blk-rq-qos.h" 46 47 #ifdef CONFIG_DEBUG_FS 48 struct dentry *blk_debugfs_root; 49 #endif 50 51 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); 52 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); 53 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); 54 EXPORT_TRACEPOINT_SYMBOL_GPL(block_split); 55 EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug); 56 57 DEFINE_IDA(blk_queue_ida); 58 59 /* 60 * For the allocated request tables 61 */ 62 struct kmem_cache *request_cachep; 63 64 /* 65 * For queue allocation 66 */ 67 struct kmem_cache *blk_requestq_cachep; 68 69 /* 70 * Controlling structure to kblockd 71 */ 72 static struct workqueue_struct *kblockd_workqueue; 73 74 /** 75 * blk_queue_flag_set - atomically set a queue flag 76 * @flag: flag to be set 77 * @q: request queue 78 */ 79 void blk_queue_flag_set(unsigned int flag, struct request_queue *q) 80 { 81 unsigned long flags; 82 83 spin_lock_irqsave(q->queue_lock, flags); 84 queue_flag_set(flag, q); 85 spin_unlock_irqrestore(q->queue_lock, flags); 86 } 87 EXPORT_SYMBOL(blk_queue_flag_set); 88 89 /** 90 * blk_queue_flag_clear - atomically clear a queue flag 91 * @flag: flag to be cleared 92 * @q: request queue 93 */ 94 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q) 95 { 96 unsigned long flags; 97 98 spin_lock_irqsave(q->queue_lock, flags); 99 queue_flag_clear(flag, q); 100 spin_unlock_irqrestore(q->queue_lock, flags); 101 } 102 EXPORT_SYMBOL(blk_queue_flag_clear); 103 104 /** 105 * blk_queue_flag_test_and_set - atomically test and set a queue flag 106 * @flag: flag to be set 107 * @q: request queue 108 * 109 * Returns the previous value of @flag - 0 if the flag was not set and 1 if 110 * the flag was already set. 111 */ 112 bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q) 113 { 114 unsigned long flags; 115 bool res; 116 117 spin_lock_irqsave(q->queue_lock, flags); 118 res = queue_flag_test_and_set(flag, q); 119 spin_unlock_irqrestore(q->queue_lock, flags); 120 121 return res; 122 } 123 EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set); 124 125 /** 126 * blk_queue_flag_test_and_clear - atomically test and clear a queue flag 127 * @flag: flag to be cleared 128 * @q: request queue 129 * 130 * Returns the previous value of @flag - 0 if the flag was not set and 1 if 131 * the flag was set. 132 */ 133 bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q) 134 { 135 unsigned long flags; 136 bool res; 137 138 spin_lock_irqsave(q->queue_lock, flags); 139 res = queue_flag_test_and_clear(flag, q); 140 spin_unlock_irqrestore(q->queue_lock, flags); 141 142 return res; 143 } 144 EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_clear); 145 146 static void blk_clear_congested(struct request_list *rl, int sync) 147 { 148 #ifdef CONFIG_CGROUP_WRITEBACK 149 clear_wb_congested(rl->blkg->wb_congested, sync); 150 #else 151 /* 152 * If !CGROUP_WRITEBACK, all blkg's map to bdi->wb and we shouldn't 153 * flip its congestion state for events on other blkcgs. 154 */ 155 if (rl == &rl->q->root_rl) 156 clear_wb_congested(rl->q->backing_dev_info->wb.congested, sync); 157 #endif 158 } 159 160 static void blk_set_congested(struct request_list *rl, int sync) 161 { 162 #ifdef CONFIG_CGROUP_WRITEBACK 163 set_wb_congested(rl->blkg->wb_congested, sync); 164 #else 165 /* see blk_clear_congested() */ 166 if (rl == &rl->q->root_rl) 167 set_wb_congested(rl->q->backing_dev_info->wb.congested, sync); 168 #endif 169 } 170 171 void blk_queue_congestion_threshold(struct request_queue *q) 172 { 173 int nr; 174 175 nr = q->nr_requests - (q->nr_requests / 8) + 1; 176 if (nr > q->nr_requests) 177 nr = q->nr_requests; 178 q->nr_congestion_on = nr; 179 180 nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1; 181 if (nr < 1) 182 nr = 1; 183 q->nr_congestion_off = nr; 184 } 185 186 void blk_rq_init(struct request_queue *q, struct request *rq) 187 { 188 memset(rq, 0, sizeof(*rq)); 189 190 INIT_LIST_HEAD(&rq->queuelist); 191 INIT_LIST_HEAD(&rq->timeout_list); 192 rq->cpu = -1; 193 rq->q = q; 194 rq->__sector = (sector_t) -1; 195 INIT_HLIST_NODE(&rq->hash); 196 RB_CLEAR_NODE(&rq->rb_node); 197 rq->tag = -1; 198 rq->internal_tag = -1; 199 rq->start_time_ns = ktime_get_ns(); 200 rq->part = NULL; 201 } 202 EXPORT_SYMBOL(blk_rq_init); 203 204 static const struct { 205 int errno; 206 const char *name; 207 } blk_errors[] = { 208 [BLK_STS_OK] = { 0, "" }, 209 [BLK_STS_NOTSUPP] = { -EOPNOTSUPP, "operation not supported" }, 210 [BLK_STS_TIMEOUT] = { -ETIMEDOUT, "timeout" }, 211 [BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" }, 212 [BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" }, 213 [BLK_STS_TARGET] = { -EREMOTEIO, "critical target" }, 214 [BLK_STS_NEXUS] = { -EBADE, "critical nexus" }, 215 [BLK_STS_MEDIUM] = { -ENODATA, "critical medium" }, 216 [BLK_STS_PROTECTION] = { -EILSEQ, "protection" }, 217 [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" }, 218 [BLK_STS_DEV_RESOURCE] = { -EBUSY, "device resource" }, 219 [BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" }, 220 221 /* device mapper special case, should not leak out: */ 222 [BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" }, 223 224 /* everything else not covered above: */ 225 [BLK_STS_IOERR] = { -EIO, "I/O" }, 226 }; 227 228 blk_status_t errno_to_blk_status(int errno) 229 { 230 int i; 231 232 for (i = 0; i < ARRAY_SIZE(blk_errors); i++) { 233 if (blk_errors[i].errno == errno) 234 return (__force blk_status_t)i; 235 } 236 237 return BLK_STS_IOERR; 238 } 239 EXPORT_SYMBOL_GPL(errno_to_blk_status); 240 241 int blk_status_to_errno(blk_status_t status) 242 { 243 int idx = (__force int)status; 244 245 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors))) 246 return -EIO; 247 return blk_errors[idx].errno; 248 } 249 EXPORT_SYMBOL_GPL(blk_status_to_errno); 250 251 static void print_req_error(struct request *req, blk_status_t status) 252 { 253 int idx = (__force int)status; 254 255 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors))) 256 return; 257 258 printk_ratelimited(KERN_ERR "%s: %s error, dev %s, sector %llu\n", 259 __func__, blk_errors[idx].name, req->rq_disk ? 260 req->rq_disk->disk_name : "?", 261 (unsigned long long)blk_rq_pos(req)); 262 } 263 264 static void req_bio_endio(struct request *rq, struct bio *bio, 265 unsigned int nbytes, blk_status_t error) 266 { 267 if (error) 268 bio->bi_status = error; 269 270 if (unlikely(rq->rq_flags & RQF_QUIET)) 271 bio_set_flag(bio, BIO_QUIET); 272 273 bio_advance(bio, nbytes); 274 275 /* don't actually finish bio if it's part of flush sequence */ 276 if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ)) 277 bio_endio(bio); 278 } 279 280 void blk_dump_rq_flags(struct request *rq, char *msg) 281 { 282 printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg, 283 rq->rq_disk ? rq->rq_disk->disk_name : "?", 284 (unsigned long long) rq->cmd_flags); 285 286 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", 287 (unsigned long long)blk_rq_pos(rq), 288 blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); 289 printk(KERN_INFO " bio %p, biotail %p, len %u\n", 290 rq->bio, rq->biotail, blk_rq_bytes(rq)); 291 } 292 EXPORT_SYMBOL(blk_dump_rq_flags); 293 294 static void blk_delay_work(struct work_struct *work) 295 { 296 struct request_queue *q; 297 298 q = container_of(work, struct request_queue, delay_work.work); 299 spin_lock_irq(q->queue_lock); 300 __blk_run_queue(q); 301 spin_unlock_irq(q->queue_lock); 302 } 303 304 /** 305 * blk_delay_queue - restart queueing after defined interval 306 * @q: The &struct request_queue in question 307 * @msecs: Delay in msecs 308 * 309 * Description: 310 * Sometimes queueing needs to be postponed for a little while, to allow 311 * resources to come back. This function will make sure that queueing is 312 * restarted around the specified time. 313 */ 314 void blk_delay_queue(struct request_queue *q, unsigned long msecs) 315 { 316 lockdep_assert_held(q->queue_lock); 317 WARN_ON_ONCE(q->mq_ops); 318 319 if (likely(!blk_queue_dead(q))) 320 queue_delayed_work(kblockd_workqueue, &q->delay_work, 321 msecs_to_jiffies(msecs)); 322 } 323 EXPORT_SYMBOL(blk_delay_queue); 324 325 /** 326 * blk_start_queue_async - asynchronously restart a previously stopped queue 327 * @q: The &struct request_queue in question 328 * 329 * Description: 330 * blk_start_queue_async() will clear the stop flag on the queue, and 331 * ensure that the request_fn for the queue is run from an async 332 * context. 333 **/ 334 void blk_start_queue_async(struct request_queue *q) 335 { 336 lockdep_assert_held(q->queue_lock); 337 WARN_ON_ONCE(q->mq_ops); 338 339 queue_flag_clear(QUEUE_FLAG_STOPPED, q); 340 blk_run_queue_async(q); 341 } 342 EXPORT_SYMBOL(blk_start_queue_async); 343 344 /** 345 * blk_start_queue - restart a previously stopped queue 346 * @q: The &struct request_queue in question 347 * 348 * Description: 349 * blk_start_queue() will clear the stop flag on the queue, and call 350 * the request_fn for the queue if it was in a stopped state when 351 * entered. Also see blk_stop_queue(). 352 **/ 353 void blk_start_queue(struct request_queue *q) 354 { 355 lockdep_assert_held(q->queue_lock); 356 WARN_ON_ONCE(q->mq_ops); 357 358 queue_flag_clear(QUEUE_FLAG_STOPPED, q); 359 __blk_run_queue(q); 360 } 361 EXPORT_SYMBOL(blk_start_queue); 362 363 /** 364 * blk_stop_queue - stop a queue 365 * @q: The &struct request_queue in question 366 * 367 * Description: 368 * The Linux block layer assumes that a block driver will consume all 369 * entries on the request queue when the request_fn strategy is called. 370 * Often this will not happen, because of hardware limitations (queue 371 * depth settings). If a device driver gets a 'queue full' response, 372 * or if it simply chooses not to queue more I/O at one point, it can 373 * call this function to prevent the request_fn from being called until 374 * the driver has signalled it's ready to go again. This happens by calling 375 * blk_start_queue() to restart queue operations. 376 **/ 377 void blk_stop_queue(struct request_queue *q) 378 { 379 lockdep_assert_held(q->queue_lock); 380 WARN_ON_ONCE(q->mq_ops); 381 382 cancel_delayed_work(&q->delay_work); 383 queue_flag_set(QUEUE_FLAG_STOPPED, q); 384 } 385 EXPORT_SYMBOL(blk_stop_queue); 386 387 /** 388 * blk_sync_queue - cancel any pending callbacks on a queue 389 * @q: the queue 390 * 391 * Description: 392 * The block layer may perform asynchronous callback activity 393 * on a queue, such as calling the unplug function after a timeout. 394 * A block device may call blk_sync_queue to ensure that any 395 * such activity is cancelled, thus allowing it to release resources 396 * that the callbacks might use. The caller must already have made sure 397 * that its ->make_request_fn will not re-add plugging prior to calling 398 * this function. 399 * 400 * This function does not cancel any asynchronous activity arising 401 * out of elevator or throttling code. That would require elevator_exit() 402 * and blkcg_exit_queue() to be called with queue lock initialized. 403 * 404 */ 405 void blk_sync_queue(struct request_queue *q) 406 { 407 del_timer_sync(&q->timeout); 408 cancel_work_sync(&q->timeout_work); 409 410 if (q->mq_ops) { 411 struct blk_mq_hw_ctx *hctx; 412 int i; 413 414 cancel_delayed_work_sync(&q->requeue_work); 415 queue_for_each_hw_ctx(q, hctx, i) 416 cancel_delayed_work_sync(&hctx->run_work); 417 } else { 418 cancel_delayed_work_sync(&q->delay_work); 419 } 420 } 421 EXPORT_SYMBOL(blk_sync_queue); 422 423 /** 424 * blk_set_preempt_only - set QUEUE_FLAG_PREEMPT_ONLY 425 * @q: request queue pointer 426 * 427 * Returns the previous value of the PREEMPT_ONLY flag - 0 if the flag was not 428 * set and 1 if the flag was already set. 429 */ 430 int blk_set_preempt_only(struct request_queue *q) 431 { 432 return blk_queue_flag_test_and_set(QUEUE_FLAG_PREEMPT_ONLY, q); 433 } 434 EXPORT_SYMBOL_GPL(blk_set_preempt_only); 435 436 void blk_clear_preempt_only(struct request_queue *q) 437 { 438 blk_queue_flag_clear(QUEUE_FLAG_PREEMPT_ONLY, q); 439 wake_up_all(&q->mq_freeze_wq); 440 } 441 EXPORT_SYMBOL_GPL(blk_clear_preempt_only); 442 443 /** 444 * __blk_run_queue_uncond - run a queue whether or not it has been stopped 445 * @q: The queue to run 446 * 447 * Description: 448 * Invoke request handling on a queue if there are any pending requests. 449 * May be used to restart request handling after a request has completed. 450 * This variant runs the queue whether or not the queue has been 451 * stopped. Must be called with the queue lock held and interrupts 452 * disabled. See also @blk_run_queue. 453 */ 454 inline void __blk_run_queue_uncond(struct request_queue *q) 455 { 456 lockdep_assert_held(q->queue_lock); 457 WARN_ON_ONCE(q->mq_ops); 458 459 if (unlikely(blk_queue_dead(q))) 460 return; 461 462 /* 463 * Some request_fn implementations, e.g. scsi_request_fn(), unlock 464 * the queue lock internally. As a result multiple threads may be 465 * running such a request function concurrently. Keep track of the 466 * number of active request_fn invocations such that blk_drain_queue() 467 * can wait until all these request_fn calls have finished. 468 */ 469 q->request_fn_active++; 470 q->request_fn(q); 471 q->request_fn_active--; 472 } 473 EXPORT_SYMBOL_GPL(__blk_run_queue_uncond); 474 475 /** 476 * __blk_run_queue - run a single device queue 477 * @q: The queue to run 478 * 479 * Description: 480 * See @blk_run_queue. 481 */ 482 void __blk_run_queue(struct request_queue *q) 483 { 484 lockdep_assert_held(q->queue_lock); 485 WARN_ON_ONCE(q->mq_ops); 486 487 if (unlikely(blk_queue_stopped(q))) 488 return; 489 490 __blk_run_queue_uncond(q); 491 } 492 EXPORT_SYMBOL(__blk_run_queue); 493 494 /** 495 * blk_run_queue_async - run a single device queue in workqueue context 496 * @q: The queue to run 497 * 498 * Description: 499 * Tells kblockd to perform the equivalent of @blk_run_queue on behalf 500 * of us. 501 * 502 * Note: 503 * Since it is not allowed to run q->delay_work after blk_cleanup_queue() 504 * has canceled q->delay_work, callers must hold the queue lock to avoid 505 * race conditions between blk_cleanup_queue() and blk_run_queue_async(). 506 */ 507 void blk_run_queue_async(struct request_queue *q) 508 { 509 lockdep_assert_held(q->queue_lock); 510 WARN_ON_ONCE(q->mq_ops); 511 512 if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q))) 513 mod_delayed_work(kblockd_workqueue, &q->delay_work, 0); 514 } 515 EXPORT_SYMBOL(blk_run_queue_async); 516 517 /** 518 * blk_run_queue - run a single device queue 519 * @q: The queue to run 520 * 521 * Description: 522 * Invoke request handling on this queue, if it has pending work to do. 523 * May be used to restart queueing when a request has completed. 524 */ 525 void blk_run_queue(struct request_queue *q) 526 { 527 unsigned long flags; 528 529 WARN_ON_ONCE(q->mq_ops); 530 531 spin_lock_irqsave(q->queue_lock, flags); 532 __blk_run_queue(q); 533 spin_unlock_irqrestore(q->queue_lock, flags); 534 } 535 EXPORT_SYMBOL(blk_run_queue); 536 537 void blk_put_queue(struct request_queue *q) 538 { 539 kobject_put(&q->kobj); 540 } 541 EXPORT_SYMBOL(blk_put_queue); 542 543 /** 544 * __blk_drain_queue - drain requests from request_queue 545 * @q: queue to drain 546 * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV 547 * 548 * Drain requests from @q. If @drain_all is set, all requests are drained. 549 * If not, only ELVPRIV requests are drained. The caller is responsible 550 * for ensuring that no new requests which need to be drained are queued. 551 */ 552 static void __blk_drain_queue(struct request_queue *q, bool drain_all) 553 __releases(q->queue_lock) 554 __acquires(q->queue_lock) 555 { 556 int i; 557 558 lockdep_assert_held(q->queue_lock); 559 WARN_ON_ONCE(q->mq_ops); 560 561 while (true) { 562 bool drain = false; 563 564 /* 565 * The caller might be trying to drain @q before its 566 * elevator is initialized. 567 */ 568 if (q->elevator) 569 elv_drain_elevator(q); 570 571 blkcg_drain_queue(q); 572 573 /* 574 * This function might be called on a queue which failed 575 * driver init after queue creation or is not yet fully 576 * active yet. Some drivers (e.g. fd and loop) get unhappy 577 * in such cases. Kick queue iff dispatch queue has 578 * something on it and @q has request_fn set. 579 */ 580 if (!list_empty(&q->queue_head) && q->request_fn) 581 __blk_run_queue(q); 582 583 drain |= q->nr_rqs_elvpriv; 584 drain |= q->request_fn_active; 585 586 /* 587 * Unfortunately, requests are queued at and tracked from 588 * multiple places and there's no single counter which can 589 * be drained. Check all the queues and counters. 590 */ 591 if (drain_all) { 592 struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); 593 drain |= !list_empty(&q->queue_head); 594 for (i = 0; i < 2; i++) { 595 drain |= q->nr_rqs[i]; 596 drain |= q->in_flight[i]; 597 if (fq) 598 drain |= !list_empty(&fq->flush_queue[i]); 599 } 600 } 601 602 if (!drain) 603 break; 604 605 spin_unlock_irq(q->queue_lock); 606 607 msleep(10); 608 609 spin_lock_irq(q->queue_lock); 610 } 611 612 /* 613 * With queue marked dead, any woken up waiter will fail the 614 * allocation path, so the wakeup chaining is lost and we're 615 * left with hung waiters. We need to wake up those waiters. 616 */ 617 if (q->request_fn) { 618 struct request_list *rl; 619 620 blk_queue_for_each_rl(rl, q) 621 for (i = 0; i < ARRAY_SIZE(rl->wait); i++) 622 wake_up_all(&rl->wait[i]); 623 } 624 } 625 626 void blk_drain_queue(struct request_queue *q) 627 { 628 spin_lock_irq(q->queue_lock); 629 __blk_drain_queue(q, true); 630 spin_unlock_irq(q->queue_lock); 631 } 632 633 /** 634 * blk_queue_bypass_start - enter queue bypass mode 635 * @q: queue of interest 636 * 637 * In bypass mode, only the dispatch FIFO queue of @q is used. This 638 * function makes @q enter bypass mode and drains all requests which were 639 * throttled or issued before. On return, it's guaranteed that no request 640 * is being throttled or has ELVPRIV set and blk_queue_bypass() %true 641 * inside queue or RCU read lock. 642 */ 643 void blk_queue_bypass_start(struct request_queue *q) 644 { 645 WARN_ON_ONCE(q->mq_ops); 646 647 spin_lock_irq(q->queue_lock); 648 q->bypass_depth++; 649 queue_flag_set(QUEUE_FLAG_BYPASS, q); 650 spin_unlock_irq(q->queue_lock); 651 652 /* 653 * Queues start drained. Skip actual draining till init is 654 * complete. This avoids lenghty delays during queue init which 655 * can happen many times during boot. 656 */ 657 if (blk_queue_init_done(q)) { 658 spin_lock_irq(q->queue_lock); 659 __blk_drain_queue(q, false); 660 spin_unlock_irq(q->queue_lock); 661 662 /* ensure blk_queue_bypass() is %true inside RCU read lock */ 663 synchronize_rcu(); 664 } 665 } 666 EXPORT_SYMBOL_GPL(blk_queue_bypass_start); 667 668 /** 669 * blk_queue_bypass_end - leave queue bypass mode 670 * @q: queue of interest 671 * 672 * Leave bypass mode and restore the normal queueing behavior. 673 * 674 * Note: although blk_queue_bypass_start() is only called for blk-sq queues, 675 * this function is called for both blk-sq and blk-mq queues. 676 */ 677 void blk_queue_bypass_end(struct request_queue *q) 678 { 679 spin_lock_irq(q->queue_lock); 680 if (!--q->bypass_depth) 681 queue_flag_clear(QUEUE_FLAG_BYPASS, q); 682 WARN_ON_ONCE(q->bypass_depth < 0); 683 spin_unlock_irq(q->queue_lock); 684 } 685 EXPORT_SYMBOL_GPL(blk_queue_bypass_end); 686 687 void blk_set_queue_dying(struct request_queue *q) 688 { 689 blk_queue_flag_set(QUEUE_FLAG_DYING, q); 690 691 /* 692 * When queue DYING flag is set, we need to block new req 693 * entering queue, so we call blk_freeze_queue_start() to 694 * prevent I/O from crossing blk_queue_enter(). 695 */ 696 blk_freeze_queue_start(q); 697 698 if (q->mq_ops) 699 blk_mq_wake_waiters(q); 700 else { 701 struct request_list *rl; 702 703 spin_lock_irq(q->queue_lock); 704 blk_queue_for_each_rl(rl, q) { 705 if (rl->rq_pool) { 706 wake_up_all(&rl->wait[BLK_RW_SYNC]); 707 wake_up_all(&rl->wait[BLK_RW_ASYNC]); 708 } 709 } 710 spin_unlock_irq(q->queue_lock); 711 } 712 713 /* Make blk_queue_enter() reexamine the DYING flag. */ 714 wake_up_all(&q->mq_freeze_wq); 715 } 716 EXPORT_SYMBOL_GPL(blk_set_queue_dying); 717 718 /* Unconfigure the I/O scheduler and dissociate from the cgroup controller. */ 719 void blk_exit_queue(struct request_queue *q) 720 { 721 /* 722 * Since the I/O scheduler exit code may access cgroup information, 723 * perform I/O scheduler exit before disassociating from the block 724 * cgroup controller. 725 */ 726 if (q->elevator) { 727 ioc_clear_queue(q); 728 elevator_exit(q, q->elevator); 729 q->elevator = NULL; 730 } 731 732 /* 733 * Remove all references to @q from the block cgroup controller before 734 * restoring @q->queue_lock to avoid that restoring this pointer causes 735 * e.g. blkcg_print_blkgs() to crash. 736 */ 737 blkcg_exit_queue(q); 738 739 /* 740 * Since the cgroup code may dereference the @q->backing_dev_info 741 * pointer, only decrease its reference count after having removed the 742 * association with the block cgroup controller. 743 */ 744 bdi_put(q->backing_dev_info); 745 } 746 747 /** 748 * blk_cleanup_queue - shutdown a request queue 749 * @q: request queue to shutdown 750 * 751 * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and 752 * put it. All future requests will be failed immediately with -ENODEV. 753 */ 754 void blk_cleanup_queue(struct request_queue *q) 755 { 756 spinlock_t *lock = q->queue_lock; 757 758 /* mark @q DYING, no new request or merges will be allowed afterwards */ 759 mutex_lock(&q->sysfs_lock); 760 blk_set_queue_dying(q); 761 spin_lock_irq(lock); 762 763 /* 764 * A dying queue is permanently in bypass mode till released. Note 765 * that, unlike blk_queue_bypass_start(), we aren't performing 766 * synchronize_rcu() after entering bypass mode to avoid the delay 767 * as some drivers create and destroy a lot of queues while 768 * probing. This is still safe because blk_release_queue() will be 769 * called only after the queue refcnt drops to zero and nothing, 770 * RCU or not, would be traversing the queue by then. 771 */ 772 q->bypass_depth++; 773 queue_flag_set(QUEUE_FLAG_BYPASS, q); 774 775 queue_flag_set(QUEUE_FLAG_NOMERGES, q); 776 queue_flag_set(QUEUE_FLAG_NOXMERGES, q); 777 queue_flag_set(QUEUE_FLAG_DYING, q); 778 spin_unlock_irq(lock); 779 mutex_unlock(&q->sysfs_lock); 780 781 /* 782 * Drain all requests queued before DYING marking. Set DEAD flag to 783 * prevent that q->request_fn() gets invoked after draining finished. 784 */ 785 blk_freeze_queue(q); 786 spin_lock_irq(lock); 787 queue_flag_set(QUEUE_FLAG_DEAD, q); 788 spin_unlock_irq(lock); 789 790 /* 791 * make sure all in-progress dispatch are completed because 792 * blk_freeze_queue() can only complete all requests, and 793 * dispatch may still be in-progress since we dispatch requests 794 * from more than one contexts. 795 * 796 * No need to quiesce queue if it isn't initialized yet since 797 * blk_freeze_queue() should be enough for cases of passthrough 798 * request. 799 */ 800 if (q->mq_ops && blk_queue_init_done(q)) 801 blk_mq_quiesce_queue(q); 802 803 /* for synchronous bio-based driver finish in-flight integrity i/o */ 804 blk_flush_integrity(); 805 806 /* @q won't process any more request, flush async actions */ 807 del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer); 808 blk_sync_queue(q); 809 810 /* 811 * I/O scheduler exit is only safe after the sysfs scheduler attribute 812 * has been removed. 813 */ 814 WARN_ON_ONCE(q->kobj.state_in_sysfs); 815 816 blk_exit_queue(q); 817 818 if (q->mq_ops) 819 blk_mq_free_queue(q); 820 percpu_ref_exit(&q->q_usage_counter); 821 822 spin_lock_irq(lock); 823 if (q->queue_lock != &q->__queue_lock) 824 q->queue_lock = &q->__queue_lock; 825 spin_unlock_irq(lock); 826 827 /* @q is and will stay empty, shutdown and put */ 828 blk_put_queue(q); 829 } 830 EXPORT_SYMBOL(blk_cleanup_queue); 831 832 /* Allocate memory local to the request queue */ 833 static void *alloc_request_simple(gfp_t gfp_mask, void *data) 834 { 835 struct request_queue *q = data; 836 837 return kmem_cache_alloc_node(request_cachep, gfp_mask, q->node); 838 } 839 840 static void free_request_simple(void *element, void *data) 841 { 842 kmem_cache_free(request_cachep, element); 843 } 844 845 static void *alloc_request_size(gfp_t gfp_mask, void *data) 846 { 847 struct request_queue *q = data; 848 struct request *rq; 849 850 rq = kmalloc_node(sizeof(struct request) + q->cmd_size, gfp_mask, 851 q->node); 852 if (rq && q->init_rq_fn && q->init_rq_fn(q, rq, gfp_mask) < 0) { 853 kfree(rq); 854 rq = NULL; 855 } 856 return rq; 857 } 858 859 static void free_request_size(void *element, void *data) 860 { 861 struct request_queue *q = data; 862 863 if (q->exit_rq_fn) 864 q->exit_rq_fn(q, element); 865 kfree(element); 866 } 867 868 int blk_init_rl(struct request_list *rl, struct request_queue *q, 869 gfp_t gfp_mask) 870 { 871 if (unlikely(rl->rq_pool) || q->mq_ops) 872 return 0; 873 874 rl->q = q; 875 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; 876 rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; 877 init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); 878 init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); 879 880 if (q->cmd_size) { 881 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, 882 alloc_request_size, free_request_size, 883 q, gfp_mask, q->node); 884 } else { 885 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, 886 alloc_request_simple, free_request_simple, 887 q, gfp_mask, q->node); 888 } 889 if (!rl->rq_pool) 890 return -ENOMEM; 891 892 if (rl != &q->root_rl) 893 WARN_ON_ONCE(!blk_get_queue(q)); 894 895 return 0; 896 } 897 898 void blk_exit_rl(struct request_queue *q, struct request_list *rl) 899 { 900 if (rl->rq_pool) { 901 mempool_destroy(rl->rq_pool); 902 if (rl != &q->root_rl) 903 blk_put_queue(q); 904 } 905 } 906 907 struct request_queue *blk_alloc_queue(gfp_t gfp_mask) 908 { 909 return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE, NULL); 910 } 911 EXPORT_SYMBOL(blk_alloc_queue); 912 913 /** 914 * blk_queue_enter() - try to increase q->q_usage_counter 915 * @q: request queue pointer 916 * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PREEMPT 917 */ 918 int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags) 919 { 920 const bool preempt = flags & BLK_MQ_REQ_PREEMPT; 921 922 while (true) { 923 bool success = false; 924 925 rcu_read_lock(); 926 if (percpu_ref_tryget_live(&q->q_usage_counter)) { 927 /* 928 * The code that sets the PREEMPT_ONLY flag is 929 * responsible for ensuring that that flag is globally 930 * visible before the queue is unfrozen. 931 */ 932 if (preempt || !blk_queue_preempt_only(q)) { 933 success = true; 934 } else { 935 percpu_ref_put(&q->q_usage_counter); 936 } 937 } 938 rcu_read_unlock(); 939 940 if (success) 941 return 0; 942 943 if (flags & BLK_MQ_REQ_NOWAIT) 944 return -EBUSY; 945 946 /* 947 * read pair of barrier in blk_freeze_queue_start(), 948 * we need to order reading __PERCPU_REF_DEAD flag of 949 * .q_usage_counter and reading .mq_freeze_depth or 950 * queue dying flag, otherwise the following wait may 951 * never return if the two reads are reordered. 952 */ 953 smp_rmb(); 954 955 wait_event(q->mq_freeze_wq, 956 (atomic_read(&q->mq_freeze_depth) == 0 && 957 (preempt || !blk_queue_preempt_only(q))) || 958 blk_queue_dying(q)); 959 if (blk_queue_dying(q)) 960 return -ENODEV; 961 } 962 } 963 964 void blk_queue_exit(struct request_queue *q) 965 { 966 percpu_ref_put(&q->q_usage_counter); 967 } 968 969 static void blk_queue_usage_counter_release(struct percpu_ref *ref) 970 { 971 struct request_queue *q = 972 container_of(ref, struct request_queue, q_usage_counter); 973 974 wake_up_all(&q->mq_freeze_wq); 975 } 976 977 static void blk_rq_timed_out_timer(struct timer_list *t) 978 { 979 struct request_queue *q = from_timer(q, t, timeout); 980 981 kblockd_schedule_work(&q->timeout_work); 982 } 983 984 /** 985 * blk_alloc_queue_node - allocate a request queue 986 * @gfp_mask: memory allocation flags 987 * @node_id: NUMA node to allocate memory from 988 * @lock: For legacy queues, pointer to a spinlock that will be used to e.g. 989 * serialize calls to the legacy .request_fn() callback. Ignored for 990 * blk-mq request queues. 991 * 992 * Note: pass the queue lock as the third argument to this function instead of 993 * setting the queue lock pointer explicitly to avoid triggering a sporadic 994 * crash in the blkcg code. This function namely calls blkcg_init_queue() and 995 * the queue lock pointer must be set before blkcg_init_queue() is called. 996 */ 997 struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, 998 spinlock_t *lock) 999 { 1000 struct request_queue *q; 1001 int ret; 1002 1003 q = kmem_cache_alloc_node(blk_requestq_cachep, 1004 gfp_mask | __GFP_ZERO, node_id); 1005 if (!q) 1006 return NULL; 1007 1008 INIT_LIST_HEAD(&q->queue_head); 1009 q->last_merge = NULL; 1010 q->end_sector = 0; 1011 q->boundary_rq = NULL; 1012 1013 q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask); 1014 if (q->id < 0) 1015 goto fail_q; 1016 1017 ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); 1018 if (ret) 1019 goto fail_id; 1020 1021 q->backing_dev_info = bdi_alloc_node(gfp_mask, node_id); 1022 if (!q->backing_dev_info) 1023 goto fail_split; 1024 1025 q->stats = blk_alloc_queue_stats(); 1026 if (!q->stats) 1027 goto fail_stats; 1028 1029 q->backing_dev_info->ra_pages = 1030 (VM_MAX_READAHEAD * 1024) / PAGE_SIZE; 1031 q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK; 1032 q->backing_dev_info->name = "block"; 1033 q->node = node_id; 1034 1035 timer_setup(&q->backing_dev_info->laptop_mode_wb_timer, 1036 laptop_mode_timer_fn, 0); 1037 timer_setup(&q->timeout, blk_rq_timed_out_timer, 0); 1038 INIT_WORK(&q->timeout_work, NULL); 1039 INIT_LIST_HEAD(&q->queue_head); 1040 INIT_LIST_HEAD(&q->timeout_list); 1041 INIT_LIST_HEAD(&q->icq_list); 1042 #ifdef CONFIG_BLK_CGROUP 1043 INIT_LIST_HEAD(&q->blkg_list); 1044 #endif 1045 INIT_DELAYED_WORK(&q->delay_work, blk_delay_work); 1046 1047 kobject_init(&q->kobj, &blk_queue_ktype); 1048 1049 #ifdef CONFIG_BLK_DEV_IO_TRACE 1050 mutex_init(&q->blk_trace_mutex); 1051 #endif 1052 mutex_init(&q->sysfs_lock); 1053 spin_lock_init(&q->__queue_lock); 1054 1055 if (!q->mq_ops) 1056 q->queue_lock = lock ? : &q->__queue_lock; 1057 1058 /* 1059 * A queue starts its life with bypass turned on to avoid 1060 * unnecessary bypass on/off overhead and nasty surprises during 1061 * init. The initial bypass will be finished when the queue is 1062 * registered by blk_register_queue(). 1063 */ 1064 q->bypass_depth = 1; 1065 queue_flag_set_unlocked(QUEUE_FLAG_BYPASS, q); 1066 1067 init_waitqueue_head(&q->mq_freeze_wq); 1068 1069 /* 1070 * Init percpu_ref in atomic mode so that it's faster to shutdown. 1071 * See blk_register_queue() for details. 1072 */ 1073 if (percpu_ref_init(&q->q_usage_counter, 1074 blk_queue_usage_counter_release, 1075 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL)) 1076 goto fail_bdi; 1077 1078 if (blkcg_init_queue(q)) 1079 goto fail_ref; 1080 1081 return q; 1082 1083 fail_ref: 1084 percpu_ref_exit(&q->q_usage_counter); 1085 fail_bdi: 1086 blk_free_queue_stats(q->stats); 1087 fail_stats: 1088 bdi_put(q->backing_dev_info); 1089 fail_split: 1090 bioset_exit(&q->bio_split); 1091 fail_id: 1092 ida_simple_remove(&blk_queue_ida, q->id); 1093 fail_q: 1094 kmem_cache_free(blk_requestq_cachep, q); 1095 return NULL; 1096 } 1097 EXPORT_SYMBOL(blk_alloc_queue_node); 1098 1099 /** 1100 * blk_init_queue - prepare a request queue for use with a block device 1101 * @rfn: The function to be called to process requests that have been 1102 * placed on the queue. 1103 * @lock: Request queue spin lock 1104 * 1105 * Description: 1106 * If a block device wishes to use the standard request handling procedures, 1107 * which sorts requests and coalesces adjacent requests, then it must 1108 * call blk_init_queue(). The function @rfn will be called when there 1109 * are requests on the queue that need to be processed. If the device 1110 * supports plugging, then @rfn may not be called immediately when requests 1111 * are available on the queue, but may be called at some time later instead. 1112 * Plugged queues are generally unplugged when a buffer belonging to one 1113 * of the requests on the queue is needed, or due to memory pressure. 1114 * 1115 * @rfn is not required, or even expected, to remove all requests off the 1116 * queue, but only as many as it can handle at a time. If it does leave 1117 * requests on the queue, it is responsible for arranging that the requests 1118 * get dealt with eventually. 1119 * 1120 * The queue spin lock must be held while manipulating the requests on the 1121 * request queue; this lock will be taken also from interrupt context, so irq 1122 * disabling is needed for it. 1123 * 1124 * Function returns a pointer to the initialized request queue, or %NULL if 1125 * it didn't succeed. 1126 * 1127 * Note: 1128 * blk_init_queue() must be paired with a blk_cleanup_queue() call 1129 * when the block device is deactivated (such as at module unload). 1130 **/ 1131 1132 struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) 1133 { 1134 return blk_init_queue_node(rfn, lock, NUMA_NO_NODE); 1135 } 1136 EXPORT_SYMBOL(blk_init_queue); 1137 1138 struct request_queue * 1139 blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) 1140 { 1141 struct request_queue *q; 1142 1143 q = blk_alloc_queue_node(GFP_KERNEL, node_id, lock); 1144 if (!q) 1145 return NULL; 1146 1147 q->request_fn = rfn; 1148 if (blk_init_allocated_queue(q) < 0) { 1149 blk_cleanup_queue(q); 1150 return NULL; 1151 } 1152 1153 return q; 1154 } 1155 EXPORT_SYMBOL(blk_init_queue_node); 1156 1157 static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio); 1158 1159 1160 int blk_init_allocated_queue(struct request_queue *q) 1161 { 1162 WARN_ON_ONCE(q->mq_ops); 1163 1164 q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size); 1165 if (!q->fq) 1166 return -ENOMEM; 1167 1168 if (q->init_rq_fn && q->init_rq_fn(q, q->fq->flush_rq, GFP_KERNEL)) 1169 goto out_free_flush_queue; 1170 1171 if (blk_init_rl(&q->root_rl, q, GFP_KERNEL)) 1172 goto out_exit_flush_rq; 1173 1174 INIT_WORK(&q->timeout_work, blk_timeout_work); 1175 q->queue_flags |= QUEUE_FLAG_DEFAULT; 1176 1177 /* 1178 * This also sets hw/phys segments, boundary and size 1179 */ 1180 blk_queue_make_request(q, blk_queue_bio); 1181 1182 q->sg_reserved_size = INT_MAX; 1183 1184 if (elevator_init(q)) 1185 goto out_exit_flush_rq; 1186 return 0; 1187 1188 out_exit_flush_rq: 1189 if (q->exit_rq_fn) 1190 q->exit_rq_fn(q, q->fq->flush_rq); 1191 out_free_flush_queue: 1192 blk_free_flush_queue(q->fq); 1193 q->fq = NULL; 1194 return -ENOMEM; 1195 } 1196 EXPORT_SYMBOL(blk_init_allocated_queue); 1197 1198 bool blk_get_queue(struct request_queue *q) 1199 { 1200 if (likely(!blk_queue_dying(q))) { 1201 __blk_get_queue(q); 1202 return true; 1203 } 1204 1205 return false; 1206 } 1207 EXPORT_SYMBOL(blk_get_queue); 1208 1209 static inline void blk_free_request(struct request_list *rl, struct request *rq) 1210 { 1211 if (rq->rq_flags & RQF_ELVPRIV) { 1212 elv_put_request(rl->q, rq); 1213 if (rq->elv.icq) 1214 put_io_context(rq->elv.icq->ioc); 1215 } 1216 1217 mempool_free(rq, rl->rq_pool); 1218 } 1219 1220 /* 1221 * ioc_batching returns true if the ioc is a valid batching request and 1222 * should be given priority access to a request. 1223 */ 1224 static inline int ioc_batching(struct request_queue *q, struct io_context *ioc) 1225 { 1226 if (!ioc) 1227 return 0; 1228 1229 /* 1230 * Make sure the process is able to allocate at least 1 request 1231 * even if the batch times out, otherwise we could theoretically 1232 * lose wakeups. 1233 */ 1234 return ioc->nr_batch_requests == q->nr_batching || 1235 (ioc->nr_batch_requests > 0 1236 && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME)); 1237 } 1238 1239 /* 1240 * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This 1241 * will cause the process to be a "batcher" on all queues in the system. This 1242 * is the behaviour we want though - once it gets a wakeup it should be given 1243 * a nice run. 1244 */ 1245 static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) 1246 { 1247 if (!ioc || ioc_batching(q, ioc)) 1248 return; 1249 1250 ioc->nr_batch_requests = q->nr_batching; 1251 ioc->last_waited = jiffies; 1252 } 1253 1254 static void __freed_request(struct request_list *rl, int sync) 1255 { 1256 struct request_queue *q = rl->q; 1257 1258 if (rl->count[sync] < queue_congestion_off_threshold(q)) 1259 blk_clear_congested(rl, sync); 1260 1261 if (rl->count[sync] + 1 <= q->nr_requests) { 1262 if (waitqueue_active(&rl->wait[sync])) 1263 wake_up(&rl->wait[sync]); 1264 1265 blk_clear_rl_full(rl, sync); 1266 } 1267 } 1268 1269 /* 1270 * A request has just been released. Account for it, update the full and 1271 * congestion status, wake up any waiters. Called under q->queue_lock. 1272 */ 1273 static void freed_request(struct request_list *rl, bool sync, 1274 req_flags_t rq_flags) 1275 { 1276 struct request_queue *q = rl->q; 1277 1278 q->nr_rqs[sync]--; 1279 rl->count[sync]--; 1280 if (rq_flags & RQF_ELVPRIV) 1281 q->nr_rqs_elvpriv--; 1282 1283 __freed_request(rl, sync); 1284 1285 if (unlikely(rl->starved[sync ^ 1])) 1286 __freed_request(rl, sync ^ 1); 1287 } 1288 1289 int blk_update_nr_requests(struct request_queue *q, unsigned int nr) 1290 { 1291 struct request_list *rl; 1292 int on_thresh, off_thresh; 1293 1294 WARN_ON_ONCE(q->mq_ops); 1295 1296 spin_lock_irq(q->queue_lock); 1297 q->nr_requests = nr; 1298 blk_queue_congestion_threshold(q); 1299 on_thresh = queue_congestion_on_threshold(q); 1300 off_thresh = queue_congestion_off_threshold(q); 1301 1302 blk_queue_for_each_rl(rl, q) { 1303 if (rl->count[BLK_RW_SYNC] >= on_thresh) 1304 blk_set_congested(rl, BLK_RW_SYNC); 1305 else if (rl->count[BLK_RW_SYNC] < off_thresh) 1306 blk_clear_congested(rl, BLK_RW_SYNC); 1307 1308 if (rl->count[BLK_RW_ASYNC] >= on_thresh) 1309 blk_set_congested(rl, BLK_RW_ASYNC); 1310 else if (rl->count[BLK_RW_ASYNC] < off_thresh) 1311 blk_clear_congested(rl, BLK_RW_ASYNC); 1312 1313 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { 1314 blk_set_rl_full(rl, BLK_RW_SYNC); 1315 } else { 1316 blk_clear_rl_full(rl, BLK_RW_SYNC); 1317 wake_up(&rl->wait[BLK_RW_SYNC]); 1318 } 1319 1320 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { 1321 blk_set_rl_full(rl, BLK_RW_ASYNC); 1322 } else { 1323 blk_clear_rl_full(rl, BLK_RW_ASYNC); 1324 wake_up(&rl->wait[BLK_RW_ASYNC]); 1325 } 1326 } 1327 1328 spin_unlock_irq(q->queue_lock); 1329 return 0; 1330 } 1331 1332 /** 1333 * __get_request - get a free request 1334 * @rl: request list to allocate from 1335 * @op: operation and flags 1336 * @bio: bio to allocate request for (can be %NULL) 1337 * @flags: BLQ_MQ_REQ_* flags 1338 * @gfp_mask: allocator flags 1339 * 1340 * Get a free request from @q. This function may fail under memory 1341 * pressure or if @q is dead. 1342 * 1343 * Must be called with @q->queue_lock held and, 1344 * Returns ERR_PTR on failure, with @q->queue_lock held. 1345 * Returns request pointer on success, with @q->queue_lock *not held*. 1346 */ 1347 static struct request *__get_request(struct request_list *rl, unsigned int op, 1348 struct bio *bio, blk_mq_req_flags_t flags, gfp_t gfp_mask) 1349 { 1350 struct request_queue *q = rl->q; 1351 struct request *rq; 1352 struct elevator_type *et = q->elevator->type; 1353 struct io_context *ioc = rq_ioc(bio); 1354 struct io_cq *icq = NULL; 1355 const bool is_sync = op_is_sync(op); 1356 int may_queue; 1357 req_flags_t rq_flags = RQF_ALLOCED; 1358 1359 lockdep_assert_held(q->queue_lock); 1360 1361 if (unlikely(blk_queue_dying(q))) 1362 return ERR_PTR(-ENODEV); 1363 1364 may_queue = elv_may_queue(q, op); 1365 if (may_queue == ELV_MQUEUE_NO) 1366 goto rq_starved; 1367 1368 if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { 1369 if (rl->count[is_sync]+1 >= q->nr_requests) { 1370 /* 1371 * The queue will fill after this allocation, so set 1372 * it as full, and mark this process as "batching". 1373 * This process will be allowed to complete a batch of 1374 * requests, others will be blocked. 1375 */ 1376 if (!blk_rl_full(rl, is_sync)) { 1377 ioc_set_batching(q, ioc); 1378 blk_set_rl_full(rl, is_sync); 1379 } else { 1380 if (may_queue != ELV_MQUEUE_MUST 1381 && !ioc_batching(q, ioc)) { 1382 /* 1383 * The queue is full and the allocating 1384 * process is not a "batcher", and not 1385 * exempted by the IO scheduler 1386 */ 1387 return ERR_PTR(-ENOMEM); 1388 } 1389 } 1390 } 1391 blk_set_congested(rl, is_sync); 1392 } 1393 1394 /* 1395 * Only allow batching queuers to allocate up to 50% over the defined 1396 * limit of requests, otherwise we could have thousands of requests 1397 * allocated with any setting of ->nr_requests 1398 */ 1399 if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) 1400 return ERR_PTR(-ENOMEM); 1401 1402 q->nr_rqs[is_sync]++; 1403 rl->count[is_sync]++; 1404 rl->starved[is_sync] = 0; 1405 1406 /* 1407 * Decide whether the new request will be managed by elevator. If 1408 * so, mark @rq_flags and increment elvpriv. Non-zero elvpriv will 1409 * prevent the current elevator from being destroyed until the new 1410 * request is freed. This guarantees icq's won't be destroyed and 1411 * makes creating new ones safe. 1412 * 1413 * Flush requests do not use the elevator so skip initialization. 1414 * This allows a request to share the flush and elevator data. 1415 * 1416 * Also, lookup icq while holding queue_lock. If it doesn't exist, 1417 * it will be created after releasing queue_lock. 1418 */ 1419 if (!op_is_flush(op) && !blk_queue_bypass(q)) { 1420 rq_flags |= RQF_ELVPRIV; 1421 q->nr_rqs_elvpriv++; 1422 if (et->icq_cache && ioc) 1423 icq = ioc_lookup_icq(ioc, q); 1424 } 1425 1426 if (blk_queue_io_stat(q)) 1427 rq_flags |= RQF_IO_STAT; 1428 spin_unlock_irq(q->queue_lock); 1429 1430 /* allocate and init request */ 1431 rq = mempool_alloc(rl->rq_pool, gfp_mask); 1432 if (!rq) 1433 goto fail_alloc; 1434 1435 blk_rq_init(q, rq); 1436 blk_rq_set_rl(rq, rl); 1437 rq->cmd_flags = op; 1438 rq->rq_flags = rq_flags; 1439 if (flags & BLK_MQ_REQ_PREEMPT) 1440 rq->rq_flags |= RQF_PREEMPT; 1441 1442 /* init elvpriv */ 1443 if (rq_flags & RQF_ELVPRIV) { 1444 if (unlikely(et->icq_cache && !icq)) { 1445 if (ioc) 1446 icq = ioc_create_icq(ioc, q, gfp_mask); 1447 if (!icq) 1448 goto fail_elvpriv; 1449 } 1450 1451 rq->elv.icq = icq; 1452 if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) 1453 goto fail_elvpriv; 1454 1455 /* @rq->elv.icq holds io_context until @rq is freed */ 1456 if (icq) 1457 get_io_context(icq->ioc); 1458 } 1459 out: 1460 /* 1461 * ioc may be NULL here, and ioc_batching will be false. That's 1462 * OK, if the queue is under the request limit then requests need 1463 * not count toward the nr_batch_requests limit. There will always 1464 * be some limit enforced by BLK_BATCH_TIME. 1465 */ 1466 if (ioc_batching(q, ioc)) 1467 ioc->nr_batch_requests--; 1468 1469 trace_block_getrq(q, bio, op); 1470 return rq; 1471 1472 fail_elvpriv: 1473 /* 1474 * elvpriv init failed. ioc, icq and elvpriv aren't mempool backed 1475 * and may fail indefinitely under memory pressure and thus 1476 * shouldn't stall IO. Treat this request as !elvpriv. This will 1477 * disturb iosched and blkcg but weird is bettern than dead. 1478 */ 1479 printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n", 1480 __func__, dev_name(q->backing_dev_info->dev)); 1481 1482 rq->rq_flags &= ~RQF_ELVPRIV; 1483 rq->elv.icq = NULL; 1484 1485 spin_lock_irq(q->queue_lock); 1486 q->nr_rqs_elvpriv--; 1487 spin_unlock_irq(q->queue_lock); 1488 goto out; 1489 1490 fail_alloc: 1491 /* 1492 * Allocation failed presumably due to memory. Undo anything we 1493 * might have messed up. 1494 * 1495 * Allocating task should really be put onto the front of the wait 1496 * queue, but this is pretty rare. 1497 */ 1498 spin_lock_irq(q->queue_lock); 1499 freed_request(rl, is_sync, rq_flags); 1500 1501 /* 1502 * in the very unlikely event that allocation failed and no 1503 * requests for this direction was pending, mark us starved so that 1504 * freeing of a request in the other direction will notice 1505 * us. another possible fix would be to split the rq mempool into 1506 * READ and WRITE 1507 */ 1508 rq_starved: 1509 if (unlikely(rl->count[is_sync] == 0)) 1510 rl->starved[is_sync] = 1; 1511 return ERR_PTR(-ENOMEM); 1512 } 1513 1514 /** 1515 * get_request - get a free request 1516 * @q: request_queue to allocate request from 1517 * @op: operation and flags 1518 * @bio: bio to allocate request for (can be %NULL) 1519 * @flags: BLK_MQ_REQ_* flags. 1520 * @gfp: allocator flags 1521 * 1522 * Get a free request from @q. If %BLK_MQ_REQ_NOWAIT is set in @flags, 1523 * this function keeps retrying under memory pressure and fails iff @q is dead. 1524 * 1525 * Must be called with @q->queue_lock held and, 1526 * Returns ERR_PTR on failure, with @q->queue_lock held. 1527 * Returns request pointer on success, with @q->queue_lock *not held*. 1528 */ 1529 static struct request *get_request(struct request_queue *q, unsigned int op, 1530 struct bio *bio, blk_mq_req_flags_t flags, gfp_t gfp) 1531 { 1532 const bool is_sync = op_is_sync(op); 1533 DEFINE_WAIT(wait); 1534 struct request_list *rl; 1535 struct request *rq; 1536 1537 lockdep_assert_held(q->queue_lock); 1538 WARN_ON_ONCE(q->mq_ops); 1539 1540 rl = blk_get_rl(q, bio); /* transferred to @rq on success */ 1541 retry: 1542 rq = __get_request(rl, op, bio, flags, gfp); 1543 if (!IS_ERR(rq)) 1544 return rq; 1545 1546 if (op & REQ_NOWAIT) { 1547 blk_put_rl(rl); 1548 return ERR_PTR(-EAGAIN); 1549 } 1550 1551 if ((flags & BLK_MQ_REQ_NOWAIT) || unlikely(blk_queue_dying(q))) { 1552 blk_put_rl(rl); 1553 return rq; 1554 } 1555 1556 /* wait on @rl and retry */ 1557 prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, 1558 TASK_UNINTERRUPTIBLE); 1559 1560 trace_block_sleeprq(q, bio, op); 1561 1562 spin_unlock_irq(q->queue_lock); 1563 io_schedule(); 1564 1565 /* 1566 * After sleeping, we become a "batching" process and will be able 1567 * to allocate at least one request, and up to a big batch of them 1568 * for a small period time. See ioc_batching, ioc_set_batching 1569 */ 1570 ioc_set_batching(q, current->io_context); 1571 1572 spin_lock_irq(q->queue_lock); 1573 finish_wait(&rl->wait[is_sync], &wait); 1574 1575 goto retry; 1576 } 1577 1578 /* flags: BLK_MQ_REQ_PREEMPT and/or BLK_MQ_REQ_NOWAIT. */ 1579 static struct request *blk_old_get_request(struct request_queue *q, 1580 unsigned int op, blk_mq_req_flags_t flags) 1581 { 1582 struct request *rq; 1583 gfp_t gfp_mask = flags & BLK_MQ_REQ_NOWAIT ? GFP_ATOMIC : GFP_NOIO; 1584 int ret = 0; 1585 1586 WARN_ON_ONCE(q->mq_ops); 1587 1588 /* create ioc upfront */ 1589 create_io_context(gfp_mask, q->node); 1590 1591 ret = blk_queue_enter(q, flags); 1592 if (ret) 1593 return ERR_PTR(ret); 1594 spin_lock_irq(q->queue_lock); 1595 rq = get_request(q, op, NULL, flags, gfp_mask); 1596 if (IS_ERR(rq)) { 1597 spin_unlock_irq(q->queue_lock); 1598 blk_queue_exit(q); 1599 return rq; 1600 } 1601 1602 /* q->queue_lock is unlocked at this point */ 1603 rq->__data_len = 0; 1604 rq->__sector = (sector_t) -1; 1605 rq->bio = rq->biotail = NULL; 1606 return rq; 1607 } 1608 1609 /** 1610 * blk_get_request - allocate a request 1611 * @q: request queue to allocate a request for 1612 * @op: operation (REQ_OP_*) and REQ_* flags, e.g. REQ_SYNC. 1613 * @flags: BLK_MQ_REQ_* flags, e.g. BLK_MQ_REQ_NOWAIT. 1614 */ 1615 struct request *blk_get_request(struct request_queue *q, unsigned int op, 1616 blk_mq_req_flags_t flags) 1617 { 1618 struct request *req; 1619 1620 WARN_ON_ONCE(op & REQ_NOWAIT); 1621 WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PREEMPT)); 1622 1623 if (q->mq_ops) { 1624 req = blk_mq_alloc_request(q, op, flags); 1625 if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn) 1626 q->mq_ops->initialize_rq_fn(req); 1627 } else { 1628 req = blk_old_get_request(q, op, flags); 1629 if (!IS_ERR(req) && q->initialize_rq_fn) 1630 q->initialize_rq_fn(req); 1631 } 1632 1633 return req; 1634 } 1635 EXPORT_SYMBOL(blk_get_request); 1636 1637 /** 1638 * blk_requeue_request - put a request back on queue 1639 * @q: request queue where request should be inserted 1640 * @rq: request to be inserted 1641 * 1642 * Description: 1643 * Drivers often keep queueing requests until the hardware cannot accept 1644 * more, when that condition happens we need to put the request back 1645 * on the queue. Must be called with queue lock held. 1646 */ 1647 void blk_requeue_request(struct request_queue *q, struct request *rq) 1648 { 1649 lockdep_assert_held(q->queue_lock); 1650 WARN_ON_ONCE(q->mq_ops); 1651 1652 blk_delete_timer(rq); 1653 blk_clear_rq_complete(rq); 1654 trace_block_rq_requeue(q, rq); 1655 rq_qos_requeue(q, rq); 1656 1657 if (rq->rq_flags & RQF_QUEUED) 1658 blk_queue_end_tag(q, rq); 1659 1660 BUG_ON(blk_queued_rq(rq)); 1661 1662 elv_requeue_request(q, rq); 1663 } 1664 EXPORT_SYMBOL(blk_requeue_request); 1665 1666 static void add_acct_request(struct request_queue *q, struct request *rq, 1667 int where) 1668 { 1669 blk_account_io_start(rq, true); 1670 __elv_add_request(q, rq, where); 1671 } 1672 1673 static void part_round_stats_single(struct request_queue *q, int cpu, 1674 struct hd_struct *part, unsigned long now, 1675 unsigned int inflight) 1676 { 1677 if (inflight) { 1678 __part_stat_add(cpu, part, time_in_queue, 1679 inflight * (now - part->stamp)); 1680 __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); 1681 } 1682 part->stamp = now; 1683 } 1684 1685 /** 1686 * part_round_stats() - Round off the performance stats on a struct disk_stats. 1687 * @q: target block queue 1688 * @cpu: cpu number for stats access 1689 * @part: target partition 1690 * 1691 * The average IO queue length and utilisation statistics are maintained 1692 * by observing the current state of the queue length and the amount of 1693 * time it has been in this state for. 1694 * 1695 * Normally, that accounting is done on IO completion, but that can result 1696 * in more than a second's worth of IO being accounted for within any one 1697 * second, leading to >100% utilisation. To deal with that, we call this 1698 * function to do a round-off before returning the results when reading 1699 * /proc/diskstats. This accounts immediately for all queue usage up to 1700 * the current jiffies and restarts the counters again. 1701 */ 1702 void part_round_stats(struct request_queue *q, int cpu, struct hd_struct *part) 1703 { 1704 struct hd_struct *part2 = NULL; 1705 unsigned long now = jiffies; 1706 unsigned int inflight[2]; 1707 int stats = 0; 1708 1709 if (part->stamp != now) 1710 stats |= 1; 1711 1712 if (part->partno) { 1713 part2 = &part_to_disk(part)->part0; 1714 if (part2->stamp != now) 1715 stats |= 2; 1716 } 1717 1718 if (!stats) 1719 return; 1720 1721 part_in_flight(q, part, inflight); 1722 1723 if (stats & 2) 1724 part_round_stats_single(q, cpu, part2, now, inflight[1]); 1725 if (stats & 1) 1726 part_round_stats_single(q, cpu, part, now, inflight[0]); 1727 } 1728 EXPORT_SYMBOL_GPL(part_round_stats); 1729 1730 #ifdef CONFIG_PM 1731 static void blk_pm_put_request(struct request *rq) 1732 { 1733 if (rq->q->dev && !(rq->rq_flags & RQF_PM) && !--rq->q->nr_pending) 1734 pm_runtime_mark_last_busy(rq->q->dev); 1735 } 1736 #else 1737 static inline void blk_pm_put_request(struct request *rq) {} 1738 #endif 1739 1740 void __blk_put_request(struct request_queue *q, struct request *req) 1741 { 1742 req_flags_t rq_flags = req->rq_flags; 1743 1744 if (unlikely(!q)) 1745 return; 1746 1747 if (q->mq_ops) { 1748 blk_mq_free_request(req); 1749 return; 1750 } 1751 1752 lockdep_assert_held(q->queue_lock); 1753 1754 blk_req_zone_write_unlock(req); 1755 blk_pm_put_request(req); 1756 1757 elv_completed_request(q, req); 1758 1759 /* this is a bio leak */ 1760 WARN_ON(req->bio != NULL); 1761 1762 rq_qos_done(q, req); 1763 1764 /* 1765 * Request may not have originated from ll_rw_blk. if not, 1766 * it didn't come out of our reserved rq pools 1767 */ 1768 if (rq_flags & RQF_ALLOCED) { 1769 struct request_list *rl = blk_rq_rl(req); 1770 bool sync = op_is_sync(req->cmd_flags); 1771 1772 BUG_ON(!list_empty(&req->queuelist)); 1773 BUG_ON(ELV_ON_HASH(req)); 1774 1775 blk_free_request(rl, req); 1776 freed_request(rl, sync, rq_flags); 1777 blk_put_rl(rl); 1778 blk_queue_exit(q); 1779 } 1780 } 1781 EXPORT_SYMBOL_GPL(__blk_put_request); 1782 1783 void blk_put_request(struct request *req) 1784 { 1785 struct request_queue *q = req->q; 1786 1787 if (q->mq_ops) 1788 blk_mq_free_request(req); 1789 else { 1790 unsigned long flags; 1791 1792 spin_lock_irqsave(q->queue_lock, flags); 1793 __blk_put_request(q, req); 1794 spin_unlock_irqrestore(q->queue_lock, flags); 1795 } 1796 } 1797 EXPORT_SYMBOL(blk_put_request); 1798 1799 bool bio_attempt_back_merge(struct request_queue *q, struct request *req, 1800 struct bio *bio) 1801 { 1802 const int ff = bio->bi_opf & REQ_FAILFAST_MASK; 1803 1804 if (!ll_back_merge_fn(q, req, bio)) 1805 return false; 1806 1807 trace_block_bio_backmerge(q, req, bio); 1808 1809 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) 1810 blk_rq_set_mixed_merge(req); 1811 1812 req->biotail->bi_next = bio; 1813 req->biotail = bio; 1814 req->__data_len += bio->bi_iter.bi_size; 1815 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1816 1817 blk_account_io_start(req, false); 1818 return true; 1819 } 1820 1821 bool bio_attempt_front_merge(struct request_queue *q, struct request *req, 1822 struct bio *bio) 1823 { 1824 const int ff = bio->bi_opf & REQ_FAILFAST_MASK; 1825 1826 if (!ll_front_merge_fn(q, req, bio)) 1827 return false; 1828 1829 trace_block_bio_frontmerge(q, req, bio); 1830 1831 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) 1832 blk_rq_set_mixed_merge(req); 1833 1834 bio->bi_next = req->bio; 1835 req->bio = bio; 1836 1837 req->__sector = bio->bi_iter.bi_sector; 1838 req->__data_len += bio->bi_iter.bi_size; 1839 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1840 1841 blk_account_io_start(req, false); 1842 return true; 1843 } 1844 1845 bool bio_attempt_discard_merge(struct request_queue *q, struct request *req, 1846 struct bio *bio) 1847 { 1848 unsigned short segments = blk_rq_nr_discard_segments(req); 1849 1850 if (segments >= queue_max_discard_segments(q)) 1851 goto no_merge; 1852 if (blk_rq_sectors(req) + bio_sectors(bio) > 1853 blk_rq_get_max_sectors(req, blk_rq_pos(req))) 1854 goto no_merge; 1855 1856 req->biotail->bi_next = bio; 1857 req->biotail = bio; 1858 req->__data_len += bio->bi_iter.bi_size; 1859 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1860 req->nr_phys_segments = segments + 1; 1861 1862 blk_account_io_start(req, false); 1863 return true; 1864 no_merge: 1865 req_set_nomerge(q, req); 1866 return false; 1867 } 1868 1869 /** 1870 * blk_attempt_plug_merge - try to merge with %current's plugged list 1871 * @q: request_queue new bio is being queued at 1872 * @bio: new bio being queued 1873 * @request_count: out parameter for number of traversed plugged requests 1874 * @same_queue_rq: pointer to &struct request that gets filled in when 1875 * another request associated with @q is found on the plug list 1876 * (optional, may be %NULL) 1877 * 1878 * Determine whether @bio being queued on @q can be merged with a request 1879 * on %current's plugged list. Returns %true if merge was successful, 1880 * otherwise %false. 1881 * 1882 * Plugging coalesces IOs from the same issuer for the same purpose without 1883 * going through @q->queue_lock. As such it's more of an issuing mechanism 1884 * than scheduling, and the request, while may have elvpriv data, is not 1885 * added on the elevator at this point. In addition, we don't have 1886 * reliable access to the elevator outside queue lock. Only check basic 1887 * merging parameters without querying the elevator. 1888 * 1889 * Caller must ensure !blk_queue_nomerges(q) beforehand. 1890 */ 1891 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, 1892 unsigned int *request_count, 1893 struct request **same_queue_rq) 1894 { 1895 struct blk_plug *plug; 1896 struct request *rq; 1897 struct list_head *plug_list; 1898 1899 plug = current->plug; 1900 if (!plug) 1901 return false; 1902 *request_count = 0; 1903 1904 if (q->mq_ops) 1905 plug_list = &plug->mq_list; 1906 else 1907 plug_list = &plug->list; 1908 1909 list_for_each_entry_reverse(rq, plug_list, queuelist) { 1910 bool merged = false; 1911 1912 if (rq->q == q) { 1913 (*request_count)++; 1914 /* 1915 * Only blk-mq multiple hardware queues case checks the 1916 * rq in the same queue, there should be only one such 1917 * rq in a queue 1918 **/ 1919 if (same_queue_rq) 1920 *same_queue_rq = rq; 1921 } 1922 1923 if (rq->q != q || !blk_rq_merge_ok(rq, bio)) 1924 continue; 1925 1926 switch (blk_try_merge(rq, bio)) { 1927 case ELEVATOR_BACK_MERGE: 1928 merged = bio_attempt_back_merge(q, rq, bio); 1929 break; 1930 case ELEVATOR_FRONT_MERGE: 1931 merged = bio_attempt_front_merge(q, rq, bio); 1932 break; 1933 case ELEVATOR_DISCARD_MERGE: 1934 merged = bio_attempt_discard_merge(q, rq, bio); 1935 break; 1936 default: 1937 break; 1938 } 1939 1940 if (merged) 1941 return true; 1942 } 1943 1944 return false; 1945 } 1946 1947 unsigned int blk_plug_queued_count(struct request_queue *q) 1948 { 1949 struct blk_plug *plug; 1950 struct request *rq; 1951 struct list_head *plug_list; 1952 unsigned int ret = 0; 1953 1954 plug = current->plug; 1955 if (!plug) 1956 goto out; 1957 1958 if (q->mq_ops) 1959 plug_list = &plug->mq_list; 1960 else 1961 plug_list = &plug->list; 1962 1963 list_for_each_entry(rq, plug_list, queuelist) { 1964 if (rq->q == q) 1965 ret++; 1966 } 1967 out: 1968 return ret; 1969 } 1970 1971 void blk_init_request_from_bio(struct request *req, struct bio *bio) 1972 { 1973 struct io_context *ioc = rq_ioc(bio); 1974 1975 if (bio->bi_opf & REQ_RAHEAD) 1976 req->cmd_flags |= REQ_FAILFAST_MASK; 1977 1978 req->__sector = bio->bi_iter.bi_sector; 1979 if (ioprio_valid(bio_prio(bio))) 1980 req->ioprio = bio_prio(bio); 1981 else if (ioc) 1982 req->ioprio = ioc->ioprio; 1983 else 1984 req->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0); 1985 req->write_hint = bio->bi_write_hint; 1986 blk_rq_bio_prep(req->q, req, bio); 1987 } 1988 EXPORT_SYMBOL_GPL(blk_init_request_from_bio); 1989 1990 static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio) 1991 { 1992 struct blk_plug *plug; 1993 int where = ELEVATOR_INSERT_SORT; 1994 struct request *req, *free; 1995 unsigned int request_count = 0; 1996 1997 /* 1998 * low level driver can indicate that it wants pages above a 1999 * certain limit bounced to low memory (ie for highmem, or even 2000 * ISA dma in theory) 2001 */ 2002 blk_queue_bounce(q, &bio); 2003 2004 blk_queue_split(q, &bio); 2005 2006 if (!bio_integrity_prep(bio)) 2007 return BLK_QC_T_NONE; 2008 2009 if (op_is_flush(bio->bi_opf)) { 2010 spin_lock_irq(q->queue_lock); 2011 where = ELEVATOR_INSERT_FLUSH; 2012 goto get_rq; 2013 } 2014 2015 /* 2016 * Check if we can merge with the plugged list before grabbing 2017 * any locks. 2018 */ 2019 if (!blk_queue_nomerges(q)) { 2020 if (blk_attempt_plug_merge(q, bio, &request_count, NULL)) 2021 return BLK_QC_T_NONE; 2022 } else 2023 request_count = blk_plug_queued_count(q); 2024 2025 spin_lock_irq(q->queue_lock); 2026 2027 switch (elv_merge(q, &req, bio)) { 2028 case ELEVATOR_BACK_MERGE: 2029 if (!bio_attempt_back_merge(q, req, bio)) 2030 break; 2031 elv_bio_merged(q, req, bio); 2032 free = attempt_back_merge(q, req); 2033 if (free) 2034 __blk_put_request(q, free); 2035 else 2036 elv_merged_request(q, req, ELEVATOR_BACK_MERGE); 2037 goto out_unlock; 2038 case ELEVATOR_FRONT_MERGE: 2039 if (!bio_attempt_front_merge(q, req, bio)) 2040 break; 2041 elv_bio_merged(q, req, bio); 2042 free = attempt_front_merge(q, req); 2043 if (free) 2044 __blk_put_request(q, free); 2045 else 2046 elv_merged_request(q, req, ELEVATOR_FRONT_MERGE); 2047 goto out_unlock; 2048 default: 2049 break; 2050 } 2051 2052 get_rq: 2053 rq_qos_throttle(q, bio, q->queue_lock); 2054 2055 /* 2056 * Grab a free request. This is might sleep but can not fail. 2057 * Returns with the queue unlocked. 2058 */ 2059 blk_queue_enter_live(q); 2060 req = get_request(q, bio->bi_opf, bio, 0, GFP_NOIO); 2061 if (IS_ERR(req)) { 2062 blk_queue_exit(q); 2063 rq_qos_cleanup(q, bio); 2064 if (PTR_ERR(req) == -ENOMEM) 2065 bio->bi_status = BLK_STS_RESOURCE; 2066 else 2067 bio->bi_status = BLK_STS_IOERR; 2068 bio_endio(bio); 2069 goto out_unlock; 2070 } 2071 2072 rq_qos_track(q, req, bio); 2073 2074 /* 2075 * After dropping the lock and possibly sleeping here, our request 2076 * may now be mergeable after it had proven unmergeable (above). 2077 * We don't worry about that case for efficiency. It won't happen 2078 * often, and the elevators are able to handle it. 2079 */ 2080 blk_init_request_from_bio(req, bio); 2081 2082 if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags)) 2083 req->cpu = raw_smp_processor_id(); 2084 2085 plug = current->plug; 2086 if (plug) { 2087 /* 2088 * If this is the first request added after a plug, fire 2089 * of a plug trace. 2090 * 2091 * @request_count may become stale because of schedule 2092 * out, so check plug list again. 2093 */ 2094 if (!request_count || list_empty(&plug->list)) 2095 trace_block_plug(q); 2096 else { 2097 struct request *last = list_entry_rq(plug->list.prev); 2098 if (request_count >= BLK_MAX_REQUEST_COUNT || 2099 blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE) { 2100 blk_flush_plug_list(plug, false); 2101 trace_block_plug(q); 2102 } 2103 } 2104 list_add_tail(&req->queuelist, &plug->list); 2105 blk_account_io_start(req, true); 2106 } else { 2107 spin_lock_irq(q->queue_lock); 2108 add_acct_request(q, req, where); 2109 __blk_run_queue(q); 2110 out_unlock: 2111 spin_unlock_irq(q->queue_lock); 2112 } 2113 2114 return BLK_QC_T_NONE; 2115 } 2116 2117 static void handle_bad_sector(struct bio *bio, sector_t maxsector) 2118 { 2119 char b[BDEVNAME_SIZE]; 2120 2121 printk(KERN_INFO "attempt to access beyond end of device\n"); 2122 printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n", 2123 bio_devname(bio, b), bio->bi_opf, 2124 (unsigned long long)bio_end_sector(bio), 2125 (long long)maxsector); 2126 } 2127 2128 #ifdef CONFIG_FAIL_MAKE_REQUEST 2129 2130 static DECLARE_FAULT_ATTR(fail_make_request); 2131 2132 static int __init setup_fail_make_request(char *str) 2133 { 2134 return setup_fault_attr(&fail_make_request, str); 2135 } 2136 __setup("fail_make_request=", setup_fail_make_request); 2137 2138 static bool should_fail_request(struct hd_struct *part, unsigned int bytes) 2139 { 2140 return part->make_it_fail && should_fail(&fail_make_request, bytes); 2141 } 2142 2143 static int __init fail_make_request_debugfs(void) 2144 { 2145 struct dentry *dir = fault_create_debugfs_attr("fail_make_request", 2146 NULL, &fail_make_request); 2147 2148 return PTR_ERR_OR_ZERO(dir); 2149 } 2150 2151 late_initcall(fail_make_request_debugfs); 2152 2153 #else /* CONFIG_FAIL_MAKE_REQUEST */ 2154 2155 static inline bool should_fail_request(struct hd_struct *part, 2156 unsigned int bytes) 2157 { 2158 return false; 2159 } 2160 2161 #endif /* CONFIG_FAIL_MAKE_REQUEST */ 2162 2163 static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part) 2164 { 2165 if (part->policy && op_is_write(bio_op(bio))) { 2166 char b[BDEVNAME_SIZE]; 2167 2168 WARN_ONCE(1, 2169 "generic_make_request: Trying to write " 2170 "to read-only block-device %s (partno %d)\n", 2171 bio_devname(bio, b), part->partno); 2172 /* Older lvm-tools actually trigger this */ 2173 return false; 2174 } 2175 2176 return false; 2177 } 2178 2179 static noinline int should_fail_bio(struct bio *bio) 2180 { 2181 if (should_fail_request(&bio->bi_disk->part0, bio->bi_iter.bi_size)) 2182 return -EIO; 2183 return 0; 2184 } 2185 ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO); 2186 2187 /* 2188 * Check whether this bio extends beyond the end of the device or partition. 2189 * This may well happen - the kernel calls bread() without checking the size of 2190 * the device, e.g., when mounting a file system. 2191 */ 2192 static inline int bio_check_eod(struct bio *bio, sector_t maxsector) 2193 { 2194 unsigned int nr_sectors = bio_sectors(bio); 2195 2196 if (nr_sectors && maxsector && 2197 (nr_sectors > maxsector || 2198 bio->bi_iter.bi_sector > maxsector - nr_sectors)) { 2199 handle_bad_sector(bio, maxsector); 2200 return -EIO; 2201 } 2202 return 0; 2203 } 2204 2205 /* 2206 * Remap block n of partition p to block n+start(p) of the disk. 2207 */ 2208 static inline int blk_partition_remap(struct bio *bio) 2209 { 2210 struct hd_struct *p; 2211 int ret = -EIO; 2212 2213 rcu_read_lock(); 2214 p = __disk_get_part(bio->bi_disk, bio->bi_partno); 2215 if (unlikely(!p)) 2216 goto out; 2217 if (unlikely(should_fail_request(p, bio->bi_iter.bi_size))) 2218 goto out; 2219 if (unlikely(bio_check_ro(bio, p))) 2220 goto out; 2221 2222 /* 2223 * Zone reset does not include bi_size so bio_sectors() is always 0. 2224 * Include a test for the reset op code and perform the remap if needed. 2225 */ 2226 if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET) { 2227 if (bio_check_eod(bio, part_nr_sects_read(p))) 2228 goto out; 2229 bio->bi_iter.bi_sector += p->start_sect; 2230 trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p), 2231 bio->bi_iter.bi_sector - p->start_sect); 2232 } 2233 bio->bi_partno = 0; 2234 ret = 0; 2235 out: 2236 rcu_read_unlock(); 2237 return ret; 2238 } 2239 2240 static noinline_for_stack bool 2241 generic_make_request_checks(struct bio *bio) 2242 { 2243 struct request_queue *q; 2244 int nr_sectors = bio_sectors(bio); 2245 blk_status_t status = BLK_STS_IOERR; 2246 char b[BDEVNAME_SIZE]; 2247 2248 might_sleep(); 2249 2250 q = bio->bi_disk->queue; 2251 if (unlikely(!q)) { 2252 printk(KERN_ERR 2253 "generic_make_request: Trying to access " 2254 "nonexistent block-device %s (%Lu)\n", 2255 bio_devname(bio, b), (long long)bio->bi_iter.bi_sector); 2256 goto end_io; 2257 } 2258 2259 /* 2260 * For a REQ_NOWAIT based request, return -EOPNOTSUPP 2261 * if queue is not a request based queue. 2262 */ 2263 if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_rq_based(q)) 2264 goto not_supported; 2265 2266 if (should_fail_bio(bio)) 2267 goto end_io; 2268 2269 if (bio->bi_partno) { 2270 if (unlikely(blk_partition_remap(bio))) 2271 goto end_io; 2272 } else { 2273 if (unlikely(bio_check_ro(bio, &bio->bi_disk->part0))) 2274 goto end_io; 2275 if (unlikely(bio_check_eod(bio, get_capacity(bio->bi_disk)))) 2276 goto end_io; 2277 } 2278 2279 /* 2280 * Filter flush bio's early so that make_request based 2281 * drivers without flush support don't have to worry 2282 * about them. 2283 */ 2284 if (op_is_flush(bio->bi_opf) && 2285 !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) { 2286 bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA); 2287 if (!nr_sectors) { 2288 status = BLK_STS_OK; 2289 goto end_io; 2290 } 2291 } 2292 2293 switch (bio_op(bio)) { 2294 case REQ_OP_DISCARD: 2295 if (!blk_queue_discard(q)) 2296 goto not_supported; 2297 break; 2298 case REQ_OP_SECURE_ERASE: 2299 if (!blk_queue_secure_erase(q)) 2300 goto not_supported; 2301 break; 2302 case REQ_OP_WRITE_SAME: 2303 if (!q->limits.max_write_same_sectors) 2304 goto not_supported; 2305 break; 2306 case REQ_OP_ZONE_REPORT: 2307 case REQ_OP_ZONE_RESET: 2308 if (!blk_queue_is_zoned(q)) 2309 goto not_supported; 2310 break; 2311 case REQ_OP_WRITE_ZEROES: 2312 if (!q->limits.max_write_zeroes_sectors) 2313 goto not_supported; 2314 break; 2315 default: 2316 break; 2317 } 2318 2319 /* 2320 * Various block parts want %current->io_context and lazy ioc 2321 * allocation ends up trading a lot of pain for a small amount of 2322 * memory. Just allocate it upfront. This may fail and block 2323 * layer knows how to live with it. 2324 */ 2325 create_io_context(GFP_ATOMIC, q->node); 2326 2327 if (!blkcg_bio_issue_check(q, bio)) 2328 return false; 2329 2330 if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) { 2331 trace_block_bio_queue(q, bio); 2332 /* Now that enqueuing has been traced, we need to trace 2333 * completion as well. 2334 */ 2335 bio_set_flag(bio, BIO_TRACE_COMPLETION); 2336 } 2337 return true; 2338 2339 not_supported: 2340 status = BLK_STS_NOTSUPP; 2341 end_io: 2342 bio->bi_status = status; 2343 bio_endio(bio); 2344 return false; 2345 } 2346 2347 /** 2348 * generic_make_request - hand a buffer to its device driver for I/O 2349 * @bio: The bio describing the location in memory and on the device. 2350 * 2351 * generic_make_request() is used to make I/O requests of block 2352 * devices. It is passed a &struct bio, which describes the I/O that needs 2353 * to be done. 2354 * 2355 * generic_make_request() does not return any status. The 2356 * success/failure status of the request, along with notification of 2357 * completion, is delivered asynchronously through the bio->bi_end_io 2358 * function described (one day) else where. 2359 * 2360 * The caller of generic_make_request must make sure that bi_io_vec 2361 * are set to describe the memory buffer, and that bi_dev and bi_sector are 2362 * set to describe the device address, and the 2363 * bi_end_io and optionally bi_private are set to describe how 2364 * completion notification should be signaled. 2365 * 2366 * generic_make_request and the drivers it calls may use bi_next if this 2367 * bio happens to be merged with someone else, and may resubmit the bio to 2368 * a lower device by calling into generic_make_request recursively, which 2369 * means the bio should NOT be touched after the call to ->make_request_fn. 2370 */ 2371 blk_qc_t generic_make_request(struct bio *bio) 2372 { 2373 /* 2374 * bio_list_on_stack[0] contains bios submitted by the current 2375 * make_request_fn. 2376 * bio_list_on_stack[1] contains bios that were submitted before 2377 * the current make_request_fn, but that haven't been processed 2378 * yet. 2379 */ 2380 struct bio_list bio_list_on_stack[2]; 2381 blk_mq_req_flags_t flags = 0; 2382 struct request_queue *q = bio->bi_disk->queue; 2383 blk_qc_t ret = BLK_QC_T_NONE; 2384 2385 if (bio->bi_opf & REQ_NOWAIT) 2386 flags = BLK_MQ_REQ_NOWAIT; 2387 if (bio_flagged(bio, BIO_QUEUE_ENTERED)) 2388 blk_queue_enter_live(q); 2389 else if (blk_queue_enter(q, flags) < 0) { 2390 if (!blk_queue_dying(q) && (bio->bi_opf & REQ_NOWAIT)) 2391 bio_wouldblock_error(bio); 2392 else 2393 bio_io_error(bio); 2394 return ret; 2395 } 2396 2397 if (!generic_make_request_checks(bio)) 2398 goto out; 2399 2400 /* 2401 * We only want one ->make_request_fn to be active at a time, else 2402 * stack usage with stacked devices could be a problem. So use 2403 * current->bio_list to keep a list of requests submited by a 2404 * make_request_fn function. current->bio_list is also used as a 2405 * flag to say if generic_make_request is currently active in this 2406 * task or not. If it is NULL, then no make_request is active. If 2407 * it is non-NULL, then a make_request is active, and new requests 2408 * should be added at the tail 2409 */ 2410 if (current->bio_list) { 2411 bio_list_add(¤t->bio_list[0], bio); 2412 goto out; 2413 } 2414 2415 /* following loop may be a bit non-obvious, and so deserves some 2416 * explanation. 2417 * Before entering the loop, bio->bi_next is NULL (as all callers 2418 * ensure that) so we have a list with a single bio. 2419 * We pretend that we have just taken it off a longer list, so 2420 * we assign bio_list to a pointer to the bio_list_on_stack, 2421 * thus initialising the bio_list of new bios to be 2422 * added. ->make_request() may indeed add some more bios 2423 * through a recursive call to generic_make_request. If it 2424 * did, we find a non-NULL value in bio_list and re-enter the loop 2425 * from the top. In this case we really did just take the bio 2426 * of the top of the list (no pretending) and so remove it from 2427 * bio_list, and call into ->make_request() again. 2428 */ 2429 BUG_ON(bio->bi_next); 2430 bio_list_init(&bio_list_on_stack[0]); 2431 current->bio_list = bio_list_on_stack; 2432 do { 2433 bool enter_succeeded = true; 2434 2435 if (unlikely(q != bio->bi_disk->queue)) { 2436 if (q) 2437 blk_queue_exit(q); 2438 q = bio->bi_disk->queue; 2439 flags = 0; 2440 if (bio->bi_opf & REQ_NOWAIT) 2441 flags = BLK_MQ_REQ_NOWAIT; 2442 if (blk_queue_enter(q, flags) < 0) { 2443 enter_succeeded = false; 2444 q = NULL; 2445 } 2446 } 2447 2448 if (enter_succeeded) { 2449 struct bio_list lower, same; 2450 2451 /* Create a fresh bio_list for all subordinate requests */ 2452 bio_list_on_stack[1] = bio_list_on_stack[0]; 2453 bio_list_init(&bio_list_on_stack[0]); 2454 ret = q->make_request_fn(q, bio); 2455 2456 /* sort new bios into those for a lower level 2457 * and those for the same level 2458 */ 2459 bio_list_init(&lower); 2460 bio_list_init(&same); 2461 while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL) 2462 if (q == bio->bi_disk->queue) 2463 bio_list_add(&same, bio); 2464 else 2465 bio_list_add(&lower, bio); 2466 /* now assemble so we handle the lowest level first */ 2467 bio_list_merge(&bio_list_on_stack[0], &lower); 2468 bio_list_merge(&bio_list_on_stack[0], &same); 2469 bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]); 2470 } else { 2471 if (unlikely(!blk_queue_dying(q) && 2472 (bio->bi_opf & REQ_NOWAIT))) 2473 bio_wouldblock_error(bio); 2474 else 2475 bio_io_error(bio); 2476 } 2477 bio = bio_list_pop(&bio_list_on_stack[0]); 2478 } while (bio); 2479 current->bio_list = NULL; /* deactivate */ 2480 2481 out: 2482 if (q) 2483 blk_queue_exit(q); 2484 return ret; 2485 } 2486 EXPORT_SYMBOL(generic_make_request); 2487 2488 /** 2489 * direct_make_request - hand a buffer directly to its device driver for I/O 2490 * @bio: The bio describing the location in memory and on the device. 2491 * 2492 * This function behaves like generic_make_request(), but does not protect 2493 * against recursion. Must only be used if the called driver is known 2494 * to not call generic_make_request (or direct_make_request) again from 2495 * its make_request function. (Calling direct_make_request again from 2496 * a workqueue is perfectly fine as that doesn't recurse). 2497 */ 2498 blk_qc_t direct_make_request(struct bio *bio) 2499 { 2500 struct request_queue *q = bio->bi_disk->queue; 2501 bool nowait = bio->bi_opf & REQ_NOWAIT; 2502 blk_qc_t ret; 2503 2504 if (!generic_make_request_checks(bio)) 2505 return BLK_QC_T_NONE; 2506 2507 if (unlikely(blk_queue_enter(q, nowait ? BLK_MQ_REQ_NOWAIT : 0))) { 2508 if (nowait && !blk_queue_dying(q)) 2509 bio->bi_status = BLK_STS_AGAIN; 2510 else 2511 bio->bi_status = BLK_STS_IOERR; 2512 bio_endio(bio); 2513 return BLK_QC_T_NONE; 2514 } 2515 2516 ret = q->make_request_fn(q, bio); 2517 blk_queue_exit(q); 2518 return ret; 2519 } 2520 EXPORT_SYMBOL_GPL(direct_make_request); 2521 2522 /** 2523 * submit_bio - submit a bio to the block device layer for I/O 2524 * @bio: The &struct bio which describes the I/O 2525 * 2526 * submit_bio() is very similar in purpose to generic_make_request(), and 2527 * uses that function to do most of the work. Both are fairly rough 2528 * interfaces; @bio must be presetup and ready for I/O. 2529 * 2530 */ 2531 blk_qc_t submit_bio(struct bio *bio) 2532 { 2533 /* 2534 * If it's a regular read/write or a barrier with data attached, 2535 * go through the normal accounting stuff before submission. 2536 */ 2537 if (bio_has_data(bio)) { 2538 unsigned int count; 2539 2540 if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) 2541 count = queue_logical_block_size(bio->bi_disk->queue) >> 9; 2542 else 2543 count = bio_sectors(bio); 2544 2545 if (op_is_write(bio_op(bio))) { 2546 count_vm_events(PGPGOUT, count); 2547 } else { 2548 task_io_account_read(bio->bi_iter.bi_size); 2549 count_vm_events(PGPGIN, count); 2550 } 2551 2552 if (unlikely(block_dump)) { 2553 char b[BDEVNAME_SIZE]; 2554 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n", 2555 current->comm, task_pid_nr(current), 2556 op_is_write(bio_op(bio)) ? "WRITE" : "READ", 2557 (unsigned long long)bio->bi_iter.bi_sector, 2558 bio_devname(bio, b), count); 2559 } 2560 } 2561 2562 return generic_make_request(bio); 2563 } 2564 EXPORT_SYMBOL(submit_bio); 2565 2566 bool blk_poll(struct request_queue *q, blk_qc_t cookie) 2567 { 2568 if (!q->poll_fn || !blk_qc_t_valid(cookie)) 2569 return false; 2570 2571 if (current->plug) 2572 blk_flush_plug_list(current->plug, false); 2573 return q->poll_fn(q, cookie); 2574 } 2575 EXPORT_SYMBOL_GPL(blk_poll); 2576 2577 /** 2578 * blk_cloned_rq_check_limits - Helper function to check a cloned request 2579 * for new the queue limits 2580 * @q: the queue 2581 * @rq: the request being checked 2582 * 2583 * Description: 2584 * @rq may have been made based on weaker limitations of upper-level queues 2585 * in request stacking drivers, and it may violate the limitation of @q. 2586 * Since the block layer and the underlying device driver trust @rq 2587 * after it is inserted to @q, it should be checked against @q before 2588 * the insertion using this generic function. 2589 * 2590 * Request stacking drivers like request-based dm may change the queue 2591 * limits when retrying requests on other queues. Those requests need 2592 * to be checked against the new queue limits again during dispatch. 2593 */ 2594 static int blk_cloned_rq_check_limits(struct request_queue *q, 2595 struct request *rq) 2596 { 2597 if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) { 2598 printk(KERN_ERR "%s: over max size limit.\n", __func__); 2599 return -EIO; 2600 } 2601 2602 /* 2603 * queue's settings related to segment counting like q->bounce_pfn 2604 * may differ from that of other stacking queues. 2605 * Recalculate it to check the request correctly on this queue's 2606 * limitation. 2607 */ 2608 blk_recalc_rq_segments(rq); 2609 if (rq->nr_phys_segments > queue_max_segments(q)) { 2610 printk(KERN_ERR "%s: over max segments limit.\n", __func__); 2611 return -EIO; 2612 } 2613 2614 return 0; 2615 } 2616 2617 /** 2618 * blk_insert_cloned_request - Helper for stacking drivers to submit a request 2619 * @q: the queue to submit the request 2620 * @rq: the request being queued 2621 */ 2622 blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq) 2623 { 2624 unsigned long flags; 2625 int where = ELEVATOR_INSERT_BACK; 2626 2627 if (blk_cloned_rq_check_limits(q, rq)) 2628 return BLK_STS_IOERR; 2629 2630 if (rq->rq_disk && 2631 should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq))) 2632 return BLK_STS_IOERR; 2633 2634 if (q->mq_ops) { 2635 if (blk_queue_io_stat(q)) 2636 blk_account_io_start(rq, true); 2637 /* 2638 * Since we have a scheduler attached on the top device, 2639 * bypass a potential scheduler on the bottom device for 2640 * insert. 2641 */ 2642 return blk_mq_request_issue_directly(rq); 2643 } 2644 2645 spin_lock_irqsave(q->queue_lock, flags); 2646 if (unlikely(blk_queue_dying(q))) { 2647 spin_unlock_irqrestore(q->queue_lock, flags); 2648 return BLK_STS_IOERR; 2649 } 2650 2651 /* 2652 * Submitting request must be dequeued before calling this function 2653 * because it will be linked to another request_queue 2654 */ 2655 BUG_ON(blk_queued_rq(rq)); 2656 2657 if (op_is_flush(rq->cmd_flags)) 2658 where = ELEVATOR_INSERT_FLUSH; 2659 2660 add_acct_request(q, rq, where); 2661 if (where == ELEVATOR_INSERT_FLUSH) 2662 __blk_run_queue(q); 2663 spin_unlock_irqrestore(q->queue_lock, flags); 2664 2665 return BLK_STS_OK; 2666 } 2667 EXPORT_SYMBOL_GPL(blk_insert_cloned_request); 2668 2669 /** 2670 * blk_rq_err_bytes - determine number of bytes till the next failure boundary 2671 * @rq: request to examine 2672 * 2673 * Description: 2674 * A request could be merge of IOs which require different failure 2675 * handling. This function determines the number of bytes which 2676 * can be failed from the beginning of the request without 2677 * crossing into area which need to be retried further. 2678 * 2679 * Return: 2680 * The number of bytes to fail. 2681 */ 2682 unsigned int blk_rq_err_bytes(const struct request *rq) 2683 { 2684 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; 2685 unsigned int bytes = 0; 2686 struct bio *bio; 2687 2688 if (!(rq->rq_flags & RQF_MIXED_MERGE)) 2689 return blk_rq_bytes(rq); 2690 2691 /* 2692 * Currently the only 'mixing' which can happen is between 2693 * different fastfail types. We can safely fail portions 2694 * which have all the failfast bits that the first one has - 2695 * the ones which are at least as eager to fail as the first 2696 * one. 2697 */ 2698 for (bio = rq->bio; bio; bio = bio->bi_next) { 2699 if ((bio->bi_opf & ff) != ff) 2700 break; 2701 bytes += bio->bi_iter.bi_size; 2702 } 2703 2704 /* this could lead to infinite loop */ 2705 BUG_ON(blk_rq_bytes(rq) && !bytes); 2706 return bytes; 2707 } 2708 EXPORT_SYMBOL_GPL(blk_rq_err_bytes); 2709 2710 void blk_account_io_completion(struct request *req, unsigned int bytes) 2711 { 2712 if (blk_do_io_stat(req)) { 2713 const int sgrp = op_stat_group(req_op(req)); 2714 struct hd_struct *part; 2715 int cpu; 2716 2717 cpu = part_stat_lock(); 2718 part = req->part; 2719 part_stat_add(cpu, part, sectors[sgrp], bytes >> 9); 2720 part_stat_unlock(); 2721 } 2722 } 2723 2724 void blk_account_io_done(struct request *req, u64 now) 2725 { 2726 /* 2727 * Account IO completion. flush_rq isn't accounted as a 2728 * normal IO on queueing nor completion. Accounting the 2729 * containing request is enough. 2730 */ 2731 if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) { 2732 unsigned long duration; 2733 const int sgrp = op_stat_group(req_op(req)); 2734 struct hd_struct *part; 2735 int cpu; 2736 2737 duration = nsecs_to_jiffies(now - req->start_time_ns); 2738 cpu = part_stat_lock(); 2739 part = req->part; 2740 2741 part_stat_inc(cpu, part, ios[sgrp]); 2742 part_stat_add(cpu, part, ticks[sgrp], duration); 2743 part_round_stats(req->q, cpu, part); 2744 part_dec_in_flight(req->q, part, rq_data_dir(req)); 2745 2746 hd_struct_put(part); 2747 part_stat_unlock(); 2748 } 2749 } 2750 2751 #ifdef CONFIG_PM 2752 /* 2753 * Don't process normal requests when queue is suspended 2754 * or in the process of suspending/resuming 2755 */ 2756 static bool blk_pm_allow_request(struct request *rq) 2757 { 2758 switch (rq->q->rpm_status) { 2759 case RPM_RESUMING: 2760 case RPM_SUSPENDING: 2761 return rq->rq_flags & RQF_PM; 2762 case RPM_SUSPENDED: 2763 return false; 2764 default: 2765 return true; 2766 } 2767 } 2768 #else 2769 static bool blk_pm_allow_request(struct request *rq) 2770 { 2771 return true; 2772 } 2773 #endif 2774 2775 void blk_account_io_start(struct request *rq, bool new_io) 2776 { 2777 struct hd_struct *part; 2778 int rw = rq_data_dir(rq); 2779 int cpu; 2780 2781 if (!blk_do_io_stat(rq)) 2782 return; 2783 2784 cpu = part_stat_lock(); 2785 2786 if (!new_io) { 2787 part = rq->part; 2788 part_stat_inc(cpu, part, merges[rw]); 2789 } else { 2790 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); 2791 if (!hd_struct_try_get(part)) { 2792 /* 2793 * The partition is already being removed, 2794 * the request will be accounted on the disk only 2795 * 2796 * We take a reference on disk->part0 although that 2797 * partition will never be deleted, so we can treat 2798 * it as any other partition. 2799 */ 2800 part = &rq->rq_disk->part0; 2801 hd_struct_get(part); 2802 } 2803 part_round_stats(rq->q, cpu, part); 2804 part_inc_in_flight(rq->q, part, rw); 2805 rq->part = part; 2806 } 2807 2808 part_stat_unlock(); 2809 } 2810 2811 static struct request *elv_next_request(struct request_queue *q) 2812 { 2813 struct request *rq; 2814 struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); 2815 2816 WARN_ON_ONCE(q->mq_ops); 2817 2818 while (1) { 2819 list_for_each_entry(rq, &q->queue_head, queuelist) { 2820 if (blk_pm_allow_request(rq)) 2821 return rq; 2822 2823 if (rq->rq_flags & RQF_SOFTBARRIER) 2824 break; 2825 } 2826 2827 /* 2828 * Flush request is running and flush request isn't queueable 2829 * in the drive, we can hold the queue till flush request is 2830 * finished. Even we don't do this, driver can't dispatch next 2831 * requests and will requeue them. And this can improve 2832 * throughput too. For example, we have request flush1, write1, 2833 * flush 2. flush1 is dispatched, then queue is hold, write1 2834 * isn't inserted to queue. After flush1 is finished, flush2 2835 * will be dispatched. Since disk cache is already clean, 2836 * flush2 will be finished very soon, so looks like flush2 is 2837 * folded to flush1. 2838 * Since the queue is hold, a flag is set to indicate the queue 2839 * should be restarted later. Please see flush_end_io() for 2840 * details. 2841 */ 2842 if (fq->flush_pending_idx != fq->flush_running_idx && 2843 !queue_flush_queueable(q)) { 2844 fq->flush_queue_delayed = 1; 2845 return NULL; 2846 } 2847 if (unlikely(blk_queue_bypass(q)) || 2848 !q->elevator->type->ops.sq.elevator_dispatch_fn(q, 0)) 2849 return NULL; 2850 } 2851 } 2852 2853 /** 2854 * blk_peek_request - peek at the top of a request queue 2855 * @q: request queue to peek at 2856 * 2857 * Description: 2858 * Return the request at the top of @q. The returned request 2859 * should be started using blk_start_request() before LLD starts 2860 * processing it. 2861 * 2862 * Return: 2863 * Pointer to the request at the top of @q if available. Null 2864 * otherwise. 2865 */ 2866 struct request *blk_peek_request(struct request_queue *q) 2867 { 2868 struct request *rq; 2869 int ret; 2870 2871 lockdep_assert_held(q->queue_lock); 2872 WARN_ON_ONCE(q->mq_ops); 2873 2874 while ((rq = elv_next_request(q)) != NULL) { 2875 if (!(rq->rq_flags & RQF_STARTED)) { 2876 /* 2877 * This is the first time the device driver 2878 * sees this request (possibly after 2879 * requeueing). Notify IO scheduler. 2880 */ 2881 if (rq->rq_flags & RQF_SORTED) 2882 elv_activate_rq(q, rq); 2883 2884 /* 2885 * just mark as started even if we don't start 2886 * it, a request that has been delayed should 2887 * not be passed by new incoming requests 2888 */ 2889 rq->rq_flags |= RQF_STARTED; 2890 trace_block_rq_issue(q, rq); 2891 } 2892 2893 if (!q->boundary_rq || q->boundary_rq == rq) { 2894 q->end_sector = rq_end_sector(rq); 2895 q->boundary_rq = NULL; 2896 } 2897 2898 if (rq->rq_flags & RQF_DONTPREP) 2899 break; 2900 2901 if (q->dma_drain_size && blk_rq_bytes(rq)) { 2902 /* 2903 * make sure space for the drain appears we 2904 * know we can do this because max_hw_segments 2905 * has been adjusted to be one fewer than the 2906 * device can handle 2907 */ 2908 rq->nr_phys_segments++; 2909 } 2910 2911 if (!q->prep_rq_fn) 2912 break; 2913 2914 ret = q->prep_rq_fn(q, rq); 2915 if (ret == BLKPREP_OK) { 2916 break; 2917 } else if (ret == BLKPREP_DEFER) { 2918 /* 2919 * the request may have been (partially) prepped. 2920 * we need to keep this request in the front to 2921 * avoid resource deadlock. RQF_STARTED will 2922 * prevent other fs requests from passing this one. 2923 */ 2924 if (q->dma_drain_size && blk_rq_bytes(rq) && 2925 !(rq->rq_flags & RQF_DONTPREP)) { 2926 /* 2927 * remove the space for the drain we added 2928 * so that we don't add it again 2929 */ 2930 --rq->nr_phys_segments; 2931 } 2932 2933 rq = NULL; 2934 break; 2935 } else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) { 2936 rq->rq_flags |= RQF_QUIET; 2937 /* 2938 * Mark this request as started so we don't trigger 2939 * any debug logic in the end I/O path. 2940 */ 2941 blk_start_request(rq); 2942 __blk_end_request_all(rq, ret == BLKPREP_INVALID ? 2943 BLK_STS_TARGET : BLK_STS_IOERR); 2944 } else { 2945 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); 2946 break; 2947 } 2948 } 2949 2950 return rq; 2951 } 2952 EXPORT_SYMBOL(blk_peek_request); 2953 2954 static void blk_dequeue_request(struct request *rq) 2955 { 2956 struct request_queue *q = rq->q; 2957 2958 BUG_ON(list_empty(&rq->queuelist)); 2959 BUG_ON(ELV_ON_HASH(rq)); 2960 2961 list_del_init(&rq->queuelist); 2962 2963 /* 2964 * the time frame between a request being removed from the lists 2965 * and to it is freed is accounted as io that is in progress at 2966 * the driver side. 2967 */ 2968 if (blk_account_rq(rq)) 2969 q->in_flight[rq_is_sync(rq)]++; 2970 } 2971 2972 /** 2973 * blk_start_request - start request processing on the driver 2974 * @req: request to dequeue 2975 * 2976 * Description: 2977 * Dequeue @req and start timeout timer on it. This hands off the 2978 * request to the driver. 2979 */ 2980 void blk_start_request(struct request *req) 2981 { 2982 lockdep_assert_held(req->q->queue_lock); 2983 WARN_ON_ONCE(req->q->mq_ops); 2984 2985 blk_dequeue_request(req); 2986 2987 if (test_bit(QUEUE_FLAG_STATS, &req->q->queue_flags)) { 2988 req->io_start_time_ns = ktime_get_ns(); 2989 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 2990 req->throtl_size = blk_rq_sectors(req); 2991 #endif 2992 req->rq_flags |= RQF_STATS; 2993 rq_qos_issue(req->q, req); 2994 } 2995 2996 BUG_ON(blk_rq_is_complete(req)); 2997 blk_add_timer(req); 2998 } 2999 EXPORT_SYMBOL(blk_start_request); 3000 3001 /** 3002 * blk_fetch_request - fetch a request from a request queue 3003 * @q: request queue to fetch a request from 3004 * 3005 * Description: 3006 * Return the request at the top of @q. The request is started on 3007 * return and LLD can start processing it immediately. 3008 * 3009 * Return: 3010 * Pointer to the request at the top of @q if available. Null 3011 * otherwise. 3012 */ 3013 struct request *blk_fetch_request(struct request_queue *q) 3014 { 3015 struct request *rq; 3016 3017 lockdep_assert_held(q->queue_lock); 3018 WARN_ON_ONCE(q->mq_ops); 3019 3020 rq = blk_peek_request(q); 3021 if (rq) 3022 blk_start_request(rq); 3023 return rq; 3024 } 3025 EXPORT_SYMBOL(blk_fetch_request); 3026 3027 /* 3028 * Steal bios from a request and add them to a bio list. 3029 * The request must not have been partially completed before. 3030 */ 3031 void blk_steal_bios(struct bio_list *list, struct request *rq) 3032 { 3033 if (rq->bio) { 3034 if (list->tail) 3035 list->tail->bi_next = rq->bio; 3036 else 3037 list->head = rq->bio; 3038 list->tail = rq->biotail; 3039 3040 rq->bio = NULL; 3041 rq->biotail = NULL; 3042 } 3043 3044 rq->__data_len = 0; 3045 } 3046 EXPORT_SYMBOL_GPL(blk_steal_bios); 3047 3048 /** 3049 * blk_update_request - Special helper function for request stacking drivers 3050 * @req: the request being processed 3051 * @error: block status code 3052 * @nr_bytes: number of bytes to complete @req 3053 * 3054 * Description: 3055 * Ends I/O on a number of bytes attached to @req, but doesn't complete 3056 * the request structure even if @req doesn't have leftover. 3057 * If @req has leftover, sets it up for the next range of segments. 3058 * 3059 * This special helper function is only for request stacking drivers 3060 * (e.g. request-based dm) so that they can handle partial completion. 3061 * Actual device drivers should use blk_end_request instead. 3062 * 3063 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees 3064 * %false return from this function. 3065 * 3066 * Note: 3067 * The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in both 3068 * blk_rq_bytes() and in blk_update_request(). 3069 * 3070 * Return: 3071 * %false - this request doesn't have any more data 3072 * %true - this request has more data 3073 **/ 3074 bool blk_update_request(struct request *req, blk_status_t error, 3075 unsigned int nr_bytes) 3076 { 3077 int total_bytes; 3078 3079 trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes); 3080 3081 if (!req->bio) 3082 return false; 3083 3084 if (unlikely(error && !blk_rq_is_passthrough(req) && 3085 !(req->rq_flags & RQF_QUIET))) 3086 print_req_error(req, error); 3087 3088 blk_account_io_completion(req, nr_bytes); 3089 3090 total_bytes = 0; 3091 while (req->bio) { 3092 struct bio *bio = req->bio; 3093 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes); 3094 3095 if (bio_bytes == bio->bi_iter.bi_size) 3096 req->bio = bio->bi_next; 3097 3098 /* Completion has already been traced */ 3099 bio_clear_flag(bio, BIO_TRACE_COMPLETION); 3100 req_bio_endio(req, bio, bio_bytes, error); 3101 3102 total_bytes += bio_bytes; 3103 nr_bytes -= bio_bytes; 3104 3105 if (!nr_bytes) 3106 break; 3107 } 3108 3109 /* 3110 * completely done 3111 */ 3112 if (!req->bio) { 3113 /* 3114 * Reset counters so that the request stacking driver 3115 * can find how many bytes remain in the request 3116 * later. 3117 */ 3118 req->__data_len = 0; 3119 return false; 3120 } 3121 3122 req->__data_len -= total_bytes; 3123 3124 /* update sector only for requests with clear definition of sector */ 3125 if (!blk_rq_is_passthrough(req)) 3126 req->__sector += total_bytes >> 9; 3127 3128 /* mixed attributes always follow the first bio */ 3129 if (req->rq_flags & RQF_MIXED_MERGE) { 3130 req->cmd_flags &= ~REQ_FAILFAST_MASK; 3131 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK; 3132 } 3133 3134 if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) { 3135 /* 3136 * If total number of sectors is less than the first segment 3137 * size, something has gone terribly wrong. 3138 */ 3139 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { 3140 blk_dump_rq_flags(req, "request botched"); 3141 req->__data_len = blk_rq_cur_bytes(req); 3142 } 3143 3144 /* recalculate the number of segments */ 3145 blk_recalc_rq_segments(req); 3146 } 3147 3148 return true; 3149 } 3150 EXPORT_SYMBOL_GPL(blk_update_request); 3151 3152 static bool blk_update_bidi_request(struct request *rq, blk_status_t error, 3153 unsigned int nr_bytes, 3154 unsigned int bidi_bytes) 3155 { 3156 if (blk_update_request(rq, error, nr_bytes)) 3157 return true; 3158 3159 /* Bidi request must be completed as a whole */ 3160 if (unlikely(blk_bidi_rq(rq)) && 3161 blk_update_request(rq->next_rq, error, bidi_bytes)) 3162 return true; 3163 3164 if (blk_queue_add_random(rq->q)) 3165 add_disk_randomness(rq->rq_disk); 3166 3167 return false; 3168 } 3169 3170 /** 3171 * blk_unprep_request - unprepare a request 3172 * @req: the request 3173 * 3174 * This function makes a request ready for complete resubmission (or 3175 * completion). It happens only after all error handling is complete, 3176 * so represents the appropriate moment to deallocate any resources 3177 * that were allocated to the request in the prep_rq_fn. The queue 3178 * lock is held when calling this. 3179 */ 3180 void blk_unprep_request(struct request *req) 3181 { 3182 struct request_queue *q = req->q; 3183 3184 req->rq_flags &= ~RQF_DONTPREP; 3185 if (q->unprep_rq_fn) 3186 q->unprep_rq_fn(q, req); 3187 } 3188 EXPORT_SYMBOL_GPL(blk_unprep_request); 3189 3190 void blk_finish_request(struct request *req, blk_status_t error) 3191 { 3192 struct request_queue *q = req->q; 3193 u64 now = ktime_get_ns(); 3194 3195 lockdep_assert_held(req->q->queue_lock); 3196 WARN_ON_ONCE(q->mq_ops); 3197 3198 if (req->rq_flags & RQF_STATS) 3199 blk_stat_add(req, now); 3200 3201 if (req->rq_flags & RQF_QUEUED) 3202 blk_queue_end_tag(q, req); 3203 3204 BUG_ON(blk_queued_rq(req)); 3205 3206 if (unlikely(laptop_mode) && !blk_rq_is_passthrough(req)) 3207 laptop_io_completion(req->q->backing_dev_info); 3208 3209 blk_delete_timer(req); 3210 3211 if (req->rq_flags & RQF_DONTPREP) 3212 blk_unprep_request(req); 3213 3214 blk_account_io_done(req, now); 3215 3216 if (req->end_io) { 3217 rq_qos_done(q, req); 3218 req->end_io(req, error); 3219 } else { 3220 if (blk_bidi_rq(req)) 3221 __blk_put_request(req->next_rq->q, req->next_rq); 3222 3223 __blk_put_request(q, req); 3224 } 3225 } 3226 EXPORT_SYMBOL(blk_finish_request); 3227 3228 /** 3229 * blk_end_bidi_request - Complete a bidi request 3230 * @rq: the request to complete 3231 * @error: block status code 3232 * @nr_bytes: number of bytes to complete @rq 3233 * @bidi_bytes: number of bytes to complete @rq->next_rq 3234 * 3235 * Description: 3236 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. 3237 * Drivers that supports bidi can safely call this member for any 3238 * type of request, bidi or uni. In the later case @bidi_bytes is 3239 * just ignored. 3240 * 3241 * Return: 3242 * %false - we are done with this request 3243 * %true - still buffers pending for this request 3244 **/ 3245 static bool blk_end_bidi_request(struct request *rq, blk_status_t error, 3246 unsigned int nr_bytes, unsigned int bidi_bytes) 3247 { 3248 struct request_queue *q = rq->q; 3249 unsigned long flags; 3250 3251 WARN_ON_ONCE(q->mq_ops); 3252 3253 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) 3254 return true; 3255 3256 spin_lock_irqsave(q->queue_lock, flags); 3257 blk_finish_request(rq, error); 3258 spin_unlock_irqrestore(q->queue_lock, flags); 3259 3260 return false; 3261 } 3262 3263 /** 3264 * __blk_end_bidi_request - Complete a bidi request with queue lock held 3265 * @rq: the request to complete 3266 * @error: block status code 3267 * @nr_bytes: number of bytes to complete @rq 3268 * @bidi_bytes: number of bytes to complete @rq->next_rq 3269 * 3270 * Description: 3271 * Identical to blk_end_bidi_request() except that queue lock is 3272 * assumed to be locked on entry and remains so on return. 3273 * 3274 * Return: 3275 * %false - we are done with this request 3276 * %true - still buffers pending for this request 3277 **/ 3278 static bool __blk_end_bidi_request(struct request *rq, blk_status_t error, 3279 unsigned int nr_bytes, unsigned int bidi_bytes) 3280 { 3281 lockdep_assert_held(rq->q->queue_lock); 3282 WARN_ON_ONCE(rq->q->mq_ops); 3283 3284 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) 3285 return true; 3286 3287 blk_finish_request(rq, error); 3288 3289 return false; 3290 } 3291 3292 /** 3293 * blk_end_request - Helper function for drivers to complete the request. 3294 * @rq: the request being processed 3295 * @error: block status code 3296 * @nr_bytes: number of bytes to complete 3297 * 3298 * Description: 3299 * Ends I/O on a number of bytes attached to @rq. 3300 * If @rq has leftover, sets it up for the next range of segments. 3301 * 3302 * Return: 3303 * %false - we are done with this request 3304 * %true - still buffers pending for this request 3305 **/ 3306 bool blk_end_request(struct request *rq, blk_status_t error, 3307 unsigned int nr_bytes) 3308 { 3309 WARN_ON_ONCE(rq->q->mq_ops); 3310 return blk_end_bidi_request(rq, error, nr_bytes, 0); 3311 } 3312 EXPORT_SYMBOL(blk_end_request); 3313 3314 /** 3315 * blk_end_request_all - Helper function for drives to finish the request. 3316 * @rq: the request to finish 3317 * @error: block status code 3318 * 3319 * Description: 3320 * Completely finish @rq. 3321 */ 3322 void blk_end_request_all(struct request *rq, blk_status_t error) 3323 { 3324 bool pending; 3325 unsigned int bidi_bytes = 0; 3326 3327 if (unlikely(blk_bidi_rq(rq))) 3328 bidi_bytes = blk_rq_bytes(rq->next_rq); 3329 3330 pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); 3331 BUG_ON(pending); 3332 } 3333 EXPORT_SYMBOL(blk_end_request_all); 3334 3335 /** 3336 * __blk_end_request - Helper function for drivers to complete the request. 3337 * @rq: the request being processed 3338 * @error: block status code 3339 * @nr_bytes: number of bytes to complete 3340 * 3341 * Description: 3342 * Must be called with queue lock held unlike blk_end_request(). 3343 * 3344 * Return: 3345 * %false - we are done with this request 3346 * %true - still buffers pending for this request 3347 **/ 3348 bool __blk_end_request(struct request *rq, blk_status_t error, 3349 unsigned int nr_bytes) 3350 { 3351 lockdep_assert_held(rq->q->queue_lock); 3352 WARN_ON_ONCE(rq->q->mq_ops); 3353 3354 return __blk_end_bidi_request(rq, error, nr_bytes, 0); 3355 } 3356 EXPORT_SYMBOL(__blk_end_request); 3357 3358 /** 3359 * __blk_end_request_all - Helper function for drives to finish the request. 3360 * @rq: the request to finish 3361 * @error: block status code 3362 * 3363 * Description: 3364 * Completely finish @rq. Must be called with queue lock held. 3365 */ 3366 void __blk_end_request_all(struct request *rq, blk_status_t error) 3367 { 3368 bool pending; 3369 unsigned int bidi_bytes = 0; 3370 3371 lockdep_assert_held(rq->q->queue_lock); 3372 WARN_ON_ONCE(rq->q->mq_ops); 3373 3374 if (unlikely(blk_bidi_rq(rq))) 3375 bidi_bytes = blk_rq_bytes(rq->next_rq); 3376 3377 pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); 3378 BUG_ON(pending); 3379 } 3380 EXPORT_SYMBOL(__blk_end_request_all); 3381 3382 /** 3383 * __blk_end_request_cur - Helper function to finish the current request chunk. 3384 * @rq: the request to finish the current chunk for 3385 * @error: block status code 3386 * 3387 * Description: 3388 * Complete the current consecutively mapped chunk from @rq. Must 3389 * be called with queue lock held. 3390 * 3391 * Return: 3392 * %false - we are done with this request 3393 * %true - still buffers pending for this request 3394 */ 3395 bool __blk_end_request_cur(struct request *rq, blk_status_t error) 3396 { 3397 return __blk_end_request(rq, error, blk_rq_cur_bytes(rq)); 3398 } 3399 EXPORT_SYMBOL(__blk_end_request_cur); 3400 3401 void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 3402 struct bio *bio) 3403 { 3404 if (bio_has_data(bio)) 3405 rq->nr_phys_segments = bio_phys_segments(q, bio); 3406 else if (bio_op(bio) == REQ_OP_DISCARD) 3407 rq->nr_phys_segments = 1; 3408 3409 rq->__data_len = bio->bi_iter.bi_size; 3410 rq->bio = rq->biotail = bio; 3411 3412 if (bio->bi_disk) 3413 rq->rq_disk = bio->bi_disk; 3414 } 3415 3416 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 3417 /** 3418 * rq_flush_dcache_pages - Helper function to flush all pages in a request 3419 * @rq: the request to be flushed 3420 * 3421 * Description: 3422 * Flush all pages in @rq. 3423 */ 3424 void rq_flush_dcache_pages(struct request *rq) 3425 { 3426 struct req_iterator iter; 3427 struct bio_vec bvec; 3428 3429 rq_for_each_segment(bvec, rq, iter) 3430 flush_dcache_page(bvec.bv_page); 3431 } 3432 EXPORT_SYMBOL_GPL(rq_flush_dcache_pages); 3433 #endif 3434 3435 /** 3436 * blk_lld_busy - Check if underlying low-level drivers of a device are busy 3437 * @q : the queue of the device being checked 3438 * 3439 * Description: 3440 * Check if underlying low-level drivers of a device are busy. 3441 * If the drivers want to export their busy state, they must set own 3442 * exporting function using blk_queue_lld_busy() first. 3443 * 3444 * Basically, this function is used only by request stacking drivers 3445 * to stop dispatching requests to underlying devices when underlying 3446 * devices are busy. This behavior helps more I/O merging on the queue 3447 * of the request stacking driver and prevents I/O throughput regression 3448 * on burst I/O load. 3449 * 3450 * Return: 3451 * 0 - Not busy (The request stacking driver should dispatch request) 3452 * 1 - Busy (The request stacking driver should stop dispatching request) 3453 */ 3454 int blk_lld_busy(struct request_queue *q) 3455 { 3456 if (q->lld_busy_fn) 3457 return q->lld_busy_fn(q); 3458 3459 return 0; 3460 } 3461 EXPORT_SYMBOL_GPL(blk_lld_busy); 3462 3463 /** 3464 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request 3465 * @rq: the clone request to be cleaned up 3466 * 3467 * Description: 3468 * Free all bios in @rq for a cloned request. 3469 */ 3470 void blk_rq_unprep_clone(struct request *rq) 3471 { 3472 struct bio *bio; 3473 3474 while ((bio = rq->bio) != NULL) { 3475 rq->bio = bio->bi_next; 3476 3477 bio_put(bio); 3478 } 3479 } 3480 EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); 3481 3482 /* 3483 * Copy attributes of the original request to the clone request. 3484 * The actual data parts (e.g. ->cmd, ->sense) are not copied. 3485 */ 3486 static void __blk_rq_prep_clone(struct request *dst, struct request *src) 3487 { 3488 dst->cpu = src->cpu; 3489 dst->__sector = blk_rq_pos(src); 3490 dst->__data_len = blk_rq_bytes(src); 3491 if (src->rq_flags & RQF_SPECIAL_PAYLOAD) { 3492 dst->rq_flags |= RQF_SPECIAL_PAYLOAD; 3493 dst->special_vec = src->special_vec; 3494 } 3495 dst->nr_phys_segments = src->nr_phys_segments; 3496 dst->ioprio = src->ioprio; 3497 dst->extra_len = src->extra_len; 3498 } 3499 3500 /** 3501 * blk_rq_prep_clone - Helper function to setup clone request 3502 * @rq: the request to be setup 3503 * @rq_src: original request to be cloned 3504 * @bs: bio_set that bios for clone are allocated from 3505 * @gfp_mask: memory allocation mask for bio 3506 * @bio_ctr: setup function to be called for each clone bio. 3507 * Returns %0 for success, non %0 for failure. 3508 * @data: private data to be passed to @bio_ctr 3509 * 3510 * Description: 3511 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq. 3512 * The actual data parts of @rq_src (e.g. ->cmd, ->sense) 3513 * are not copied, and copying such parts is the caller's responsibility. 3514 * Also, pages which the original bios are pointing to are not copied 3515 * and the cloned bios just point same pages. 3516 * So cloned bios must be completed before original bios, which means 3517 * the caller must complete @rq before @rq_src. 3518 */ 3519 int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 3520 struct bio_set *bs, gfp_t gfp_mask, 3521 int (*bio_ctr)(struct bio *, struct bio *, void *), 3522 void *data) 3523 { 3524 struct bio *bio, *bio_src; 3525 3526 if (!bs) 3527 bs = &fs_bio_set; 3528 3529 __rq_for_each_bio(bio_src, rq_src) { 3530 bio = bio_clone_fast(bio_src, gfp_mask, bs); 3531 if (!bio) 3532 goto free_and_out; 3533 3534 if (bio_ctr && bio_ctr(bio, bio_src, data)) 3535 goto free_and_out; 3536 3537 if (rq->bio) { 3538 rq->biotail->bi_next = bio; 3539 rq->biotail = bio; 3540 } else 3541 rq->bio = rq->biotail = bio; 3542 } 3543 3544 __blk_rq_prep_clone(rq, rq_src); 3545 3546 return 0; 3547 3548 free_and_out: 3549 if (bio) 3550 bio_put(bio); 3551 blk_rq_unprep_clone(rq); 3552 3553 return -ENOMEM; 3554 } 3555 EXPORT_SYMBOL_GPL(blk_rq_prep_clone); 3556 3557 int kblockd_schedule_work(struct work_struct *work) 3558 { 3559 return queue_work(kblockd_workqueue, work); 3560 } 3561 EXPORT_SYMBOL(kblockd_schedule_work); 3562 3563 int kblockd_schedule_work_on(int cpu, struct work_struct *work) 3564 { 3565 return queue_work_on(cpu, kblockd_workqueue, work); 3566 } 3567 EXPORT_SYMBOL(kblockd_schedule_work_on); 3568 3569 int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, 3570 unsigned long delay) 3571 { 3572 return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay); 3573 } 3574 EXPORT_SYMBOL(kblockd_mod_delayed_work_on); 3575 3576 /** 3577 * blk_start_plug - initialize blk_plug and track it inside the task_struct 3578 * @plug: The &struct blk_plug that needs to be initialized 3579 * 3580 * Description: 3581 * Tracking blk_plug inside the task_struct will help with auto-flushing the 3582 * pending I/O should the task end up blocking between blk_start_plug() and 3583 * blk_finish_plug(). This is important from a performance perspective, but 3584 * also ensures that we don't deadlock. For instance, if the task is blocking 3585 * for a memory allocation, memory reclaim could end up wanting to free a 3586 * page belonging to that request that is currently residing in our private 3587 * plug. By flushing the pending I/O when the process goes to sleep, we avoid 3588 * this kind of deadlock. 3589 */ 3590 void blk_start_plug(struct blk_plug *plug) 3591 { 3592 struct task_struct *tsk = current; 3593 3594 /* 3595 * If this is a nested plug, don't actually assign it. 3596 */ 3597 if (tsk->plug) 3598 return; 3599 3600 INIT_LIST_HEAD(&plug->list); 3601 INIT_LIST_HEAD(&plug->mq_list); 3602 INIT_LIST_HEAD(&plug->cb_list); 3603 /* 3604 * Store ordering should not be needed here, since a potential 3605 * preempt will imply a full memory barrier 3606 */ 3607 tsk->plug = plug; 3608 } 3609 EXPORT_SYMBOL(blk_start_plug); 3610 3611 static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b) 3612 { 3613 struct request *rqa = container_of(a, struct request, queuelist); 3614 struct request *rqb = container_of(b, struct request, queuelist); 3615 3616 return !(rqa->q < rqb->q || 3617 (rqa->q == rqb->q && blk_rq_pos(rqa) < blk_rq_pos(rqb))); 3618 } 3619 3620 /* 3621 * If 'from_schedule' is true, then postpone the dispatch of requests 3622 * until a safe kblockd context. We due this to avoid accidental big 3623 * additional stack usage in driver dispatch, in places where the originally 3624 * plugger did not intend it. 3625 */ 3626 static void queue_unplugged(struct request_queue *q, unsigned int depth, 3627 bool from_schedule) 3628 __releases(q->queue_lock) 3629 { 3630 lockdep_assert_held(q->queue_lock); 3631 3632 trace_block_unplug(q, depth, !from_schedule); 3633 3634 if (from_schedule) 3635 blk_run_queue_async(q); 3636 else 3637 __blk_run_queue(q); 3638 spin_unlock_irq(q->queue_lock); 3639 } 3640 3641 static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) 3642 { 3643 LIST_HEAD(callbacks); 3644 3645 while (!list_empty(&plug->cb_list)) { 3646 list_splice_init(&plug->cb_list, &callbacks); 3647 3648 while (!list_empty(&callbacks)) { 3649 struct blk_plug_cb *cb = list_first_entry(&callbacks, 3650 struct blk_plug_cb, 3651 list); 3652 list_del(&cb->list); 3653 cb->callback(cb, from_schedule); 3654 } 3655 } 3656 } 3657 3658 struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data, 3659 int size) 3660 { 3661 struct blk_plug *plug = current->plug; 3662 struct blk_plug_cb *cb; 3663 3664 if (!plug) 3665 return NULL; 3666 3667 list_for_each_entry(cb, &plug->cb_list, list) 3668 if (cb->callback == unplug && cb->data == data) 3669 return cb; 3670 3671 /* Not currently on the callback list */ 3672 BUG_ON(size < sizeof(*cb)); 3673 cb = kzalloc(size, GFP_ATOMIC); 3674 if (cb) { 3675 cb->data = data; 3676 cb->callback = unplug; 3677 list_add(&cb->list, &plug->cb_list); 3678 } 3679 return cb; 3680 } 3681 EXPORT_SYMBOL(blk_check_plugged); 3682 3683 void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) 3684 { 3685 struct request_queue *q; 3686 struct request *rq; 3687 LIST_HEAD(list); 3688 unsigned int depth; 3689 3690 flush_plug_callbacks(plug, from_schedule); 3691 3692 if (!list_empty(&plug->mq_list)) 3693 blk_mq_flush_plug_list(plug, from_schedule); 3694 3695 if (list_empty(&plug->list)) 3696 return; 3697 3698 list_splice_init(&plug->list, &list); 3699 3700 list_sort(NULL, &list, plug_rq_cmp); 3701 3702 q = NULL; 3703 depth = 0; 3704 3705 while (!list_empty(&list)) { 3706 rq = list_entry_rq(list.next); 3707 list_del_init(&rq->queuelist); 3708 BUG_ON(!rq->q); 3709 if (rq->q != q) { 3710 /* 3711 * This drops the queue lock 3712 */ 3713 if (q) 3714 queue_unplugged(q, depth, from_schedule); 3715 q = rq->q; 3716 depth = 0; 3717 spin_lock_irq(q->queue_lock); 3718 } 3719 3720 /* 3721 * Short-circuit if @q is dead 3722 */ 3723 if (unlikely(blk_queue_dying(q))) { 3724 __blk_end_request_all(rq, BLK_STS_IOERR); 3725 continue; 3726 } 3727 3728 /* 3729 * rq is already accounted, so use raw insert 3730 */ 3731 if (op_is_flush(rq->cmd_flags)) 3732 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); 3733 else 3734 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); 3735 3736 depth++; 3737 } 3738 3739 /* 3740 * This drops the queue lock 3741 */ 3742 if (q) 3743 queue_unplugged(q, depth, from_schedule); 3744 } 3745 3746 void blk_finish_plug(struct blk_plug *plug) 3747 { 3748 if (plug != current->plug) 3749 return; 3750 blk_flush_plug_list(plug, false); 3751 3752 current->plug = NULL; 3753 } 3754 EXPORT_SYMBOL(blk_finish_plug); 3755 3756 #ifdef CONFIG_PM 3757 /** 3758 * blk_pm_runtime_init - Block layer runtime PM initialization routine 3759 * @q: the queue of the device 3760 * @dev: the device the queue belongs to 3761 * 3762 * Description: 3763 * Initialize runtime-PM-related fields for @q and start auto suspend for 3764 * @dev. Drivers that want to take advantage of request-based runtime PM 3765 * should call this function after @dev has been initialized, and its 3766 * request queue @q has been allocated, and runtime PM for it can not happen 3767 * yet(either due to disabled/forbidden or its usage_count > 0). In most 3768 * cases, driver should call this function before any I/O has taken place. 3769 * 3770 * This function takes care of setting up using auto suspend for the device, 3771 * the autosuspend delay is set to -1 to make runtime suspend impossible 3772 * until an updated value is either set by user or by driver. Drivers do 3773 * not need to touch other autosuspend settings. 3774 * 3775 * The block layer runtime PM is request based, so only works for drivers 3776 * that use request as their IO unit instead of those directly use bio's. 3777 */ 3778 void blk_pm_runtime_init(struct request_queue *q, struct device *dev) 3779 { 3780 /* Don't enable runtime PM for blk-mq until it is ready */ 3781 if (q->mq_ops) { 3782 pm_runtime_disable(dev); 3783 return; 3784 } 3785 3786 q->dev = dev; 3787 q->rpm_status = RPM_ACTIVE; 3788 pm_runtime_set_autosuspend_delay(q->dev, -1); 3789 pm_runtime_use_autosuspend(q->dev); 3790 } 3791 EXPORT_SYMBOL(blk_pm_runtime_init); 3792 3793 /** 3794 * blk_pre_runtime_suspend - Pre runtime suspend check 3795 * @q: the queue of the device 3796 * 3797 * Description: 3798 * This function will check if runtime suspend is allowed for the device 3799 * by examining if there are any requests pending in the queue. If there 3800 * are requests pending, the device can not be runtime suspended; otherwise, 3801 * the queue's status will be updated to SUSPENDING and the driver can 3802 * proceed to suspend the device. 3803 * 3804 * For the not allowed case, we mark last busy for the device so that 3805 * runtime PM core will try to autosuspend it some time later. 3806 * 3807 * This function should be called near the start of the device's 3808 * runtime_suspend callback. 3809 * 3810 * Return: 3811 * 0 - OK to runtime suspend the device 3812 * -EBUSY - Device should not be runtime suspended 3813 */ 3814 int blk_pre_runtime_suspend(struct request_queue *q) 3815 { 3816 int ret = 0; 3817 3818 if (!q->dev) 3819 return ret; 3820 3821 spin_lock_irq(q->queue_lock); 3822 if (q->nr_pending) { 3823 ret = -EBUSY; 3824 pm_runtime_mark_last_busy(q->dev); 3825 } else { 3826 q->rpm_status = RPM_SUSPENDING; 3827 } 3828 spin_unlock_irq(q->queue_lock); 3829 return ret; 3830 } 3831 EXPORT_SYMBOL(blk_pre_runtime_suspend); 3832 3833 /** 3834 * blk_post_runtime_suspend - Post runtime suspend processing 3835 * @q: the queue of the device 3836 * @err: return value of the device's runtime_suspend function 3837 * 3838 * Description: 3839 * Update the queue's runtime status according to the return value of the 3840 * device's runtime suspend function and mark last busy for the device so 3841 * that PM core will try to auto suspend the device at a later time. 3842 * 3843 * This function should be called near the end of the device's 3844 * runtime_suspend callback. 3845 */ 3846 void blk_post_runtime_suspend(struct request_queue *q, int err) 3847 { 3848 if (!q->dev) 3849 return; 3850 3851 spin_lock_irq(q->queue_lock); 3852 if (!err) { 3853 q->rpm_status = RPM_SUSPENDED; 3854 } else { 3855 q->rpm_status = RPM_ACTIVE; 3856 pm_runtime_mark_last_busy(q->dev); 3857 } 3858 spin_unlock_irq(q->queue_lock); 3859 } 3860 EXPORT_SYMBOL(blk_post_runtime_suspend); 3861 3862 /** 3863 * blk_pre_runtime_resume - Pre runtime resume processing 3864 * @q: the queue of the device 3865 * 3866 * Description: 3867 * Update the queue's runtime status to RESUMING in preparation for the 3868 * runtime resume of the device. 3869 * 3870 * This function should be called near the start of the device's 3871 * runtime_resume callback. 3872 */ 3873 void blk_pre_runtime_resume(struct request_queue *q) 3874 { 3875 if (!q->dev) 3876 return; 3877 3878 spin_lock_irq(q->queue_lock); 3879 q->rpm_status = RPM_RESUMING; 3880 spin_unlock_irq(q->queue_lock); 3881 } 3882 EXPORT_SYMBOL(blk_pre_runtime_resume); 3883 3884 /** 3885 * blk_post_runtime_resume - Post runtime resume processing 3886 * @q: the queue of the device 3887 * @err: return value of the device's runtime_resume function 3888 * 3889 * Description: 3890 * Update the queue's runtime status according to the return value of the 3891 * device's runtime_resume function. If it is successfully resumed, process 3892 * the requests that are queued into the device's queue when it is resuming 3893 * and then mark last busy and initiate autosuspend for it. 3894 * 3895 * This function should be called near the end of the device's 3896 * runtime_resume callback. 3897 */ 3898 void blk_post_runtime_resume(struct request_queue *q, int err) 3899 { 3900 if (!q->dev) 3901 return; 3902 3903 spin_lock_irq(q->queue_lock); 3904 if (!err) { 3905 q->rpm_status = RPM_ACTIVE; 3906 __blk_run_queue(q); 3907 pm_runtime_mark_last_busy(q->dev); 3908 pm_request_autosuspend(q->dev); 3909 } else { 3910 q->rpm_status = RPM_SUSPENDED; 3911 } 3912 spin_unlock_irq(q->queue_lock); 3913 } 3914 EXPORT_SYMBOL(blk_post_runtime_resume); 3915 3916 /** 3917 * blk_set_runtime_active - Force runtime status of the queue to be active 3918 * @q: the queue of the device 3919 * 3920 * If the device is left runtime suspended during system suspend the resume 3921 * hook typically resumes the device and corrects runtime status 3922 * accordingly. However, that does not affect the queue runtime PM status 3923 * which is still "suspended". This prevents processing requests from the 3924 * queue. 3925 * 3926 * This function can be used in driver's resume hook to correct queue 3927 * runtime PM status and re-enable peeking requests from the queue. It 3928 * should be called before first request is added to the queue. 3929 */ 3930 void blk_set_runtime_active(struct request_queue *q) 3931 { 3932 spin_lock_irq(q->queue_lock); 3933 q->rpm_status = RPM_ACTIVE; 3934 pm_runtime_mark_last_busy(q->dev); 3935 pm_request_autosuspend(q->dev); 3936 spin_unlock_irq(q->queue_lock); 3937 } 3938 EXPORT_SYMBOL(blk_set_runtime_active); 3939 #endif 3940 3941 int __init blk_dev_init(void) 3942 { 3943 BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS)); 3944 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 * 3945 FIELD_SIZEOF(struct request, cmd_flags)); 3946 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 * 3947 FIELD_SIZEOF(struct bio, bi_opf)); 3948 3949 /* used for unplugging and affects IO latency/throughput - HIGHPRI */ 3950 kblockd_workqueue = alloc_workqueue("kblockd", 3951 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); 3952 if (!kblockd_workqueue) 3953 panic("Failed to create kblockd\n"); 3954 3955 request_cachep = kmem_cache_create("blkdev_requests", 3956 sizeof(struct request), 0, SLAB_PANIC, NULL); 3957 3958 blk_requestq_cachep = kmem_cache_create("request_queue", 3959 sizeof(struct request_queue), 0, SLAB_PANIC, NULL); 3960 3961 #ifdef CONFIG_DEBUG_FS 3962 blk_debugfs_root = debugfs_create_dir("block", NULL); 3963 #endif 3964 3965 return 0; 3966 } 3967