1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 1991, 1992 Linus Torvalds 4 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics 5 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 6 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> 7 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> 8 * - July2000 9 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 10 */ 11 12 /* 13 * This handles all read/write requests to block devices 14 */ 15 #include <linux/kernel.h> 16 #include <linux/module.h> 17 #include <linux/backing-dev.h> 18 #include <linux/bio.h> 19 #include <linux/blkdev.h> 20 #include <linux/blk-mq.h> 21 #include <linux/blk-pm.h> 22 #include <linux/highmem.h> 23 #include <linux/mm.h> 24 #include <linux/pagemap.h> 25 #include <linux/kernel_stat.h> 26 #include <linux/string.h> 27 #include <linux/init.h> 28 #include <linux/completion.h> 29 #include <linux/slab.h> 30 #include <linux/swap.h> 31 #include <linux/writeback.h> 32 #include <linux/task_io_accounting_ops.h> 33 #include <linux/fault-inject.h> 34 #include <linux/list_sort.h> 35 #include <linux/delay.h> 36 #include <linux/ratelimit.h> 37 #include <linux/pm_runtime.h> 38 #include <linux/blk-cgroup.h> 39 #include <linux/t10-pi.h> 40 #include <linux/debugfs.h> 41 #include <linux/bpf.h> 42 #include <linux/psi.h> 43 #include <linux/sched/sysctl.h> 44 #include <linux/blk-crypto.h> 45 46 #define CREATE_TRACE_POINTS 47 #include <trace/events/block.h> 48 49 #include "blk.h" 50 #include "blk-mq.h" 51 #include "blk-mq-sched.h" 52 #include "blk-pm.h" 53 #include "blk-rq-qos.h" 54 55 struct dentry *blk_debugfs_root; 56 57 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); 58 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); 59 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); 60 EXPORT_TRACEPOINT_SYMBOL_GPL(block_split); 61 EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug); 62 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_insert); 63 64 DEFINE_IDA(blk_queue_ida); 65 66 /* 67 * For queue allocation 68 */ 69 struct kmem_cache *blk_requestq_cachep; 70 71 /* 72 * Controlling structure to kblockd 73 */ 74 static struct workqueue_struct *kblockd_workqueue; 75 76 /** 77 * blk_queue_flag_set - atomically set a queue flag 78 * @flag: flag to be set 79 * @q: request queue 80 */ 81 void blk_queue_flag_set(unsigned int flag, struct request_queue *q) 82 { 83 set_bit(flag, &q->queue_flags); 84 } 85 EXPORT_SYMBOL(blk_queue_flag_set); 86 87 /** 88 * blk_queue_flag_clear - atomically clear a queue flag 89 * @flag: flag to be cleared 90 * @q: request queue 91 */ 92 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q) 93 { 94 clear_bit(flag, &q->queue_flags); 95 } 96 EXPORT_SYMBOL(blk_queue_flag_clear); 97 98 /** 99 * blk_queue_flag_test_and_set - atomically test and set a queue flag 100 * @flag: flag to be set 101 * @q: request queue 102 * 103 * Returns the previous value of @flag - 0 if the flag was not set and 1 if 104 * the flag was already set. 105 */ 106 bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q) 107 { 108 return test_and_set_bit(flag, &q->queue_flags); 109 } 110 EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set); 111 112 void blk_rq_init(struct request_queue *q, struct request *rq) 113 { 114 memset(rq, 0, sizeof(*rq)); 115 116 INIT_LIST_HEAD(&rq->queuelist); 117 rq->q = q; 118 rq->__sector = (sector_t) -1; 119 INIT_HLIST_NODE(&rq->hash); 120 RB_CLEAR_NODE(&rq->rb_node); 121 rq->tag = BLK_MQ_NO_TAG; 122 rq->internal_tag = BLK_MQ_NO_TAG; 123 rq->start_time_ns = ktime_get_ns(); 124 rq->part = NULL; 125 blk_crypto_rq_set_defaults(rq); 126 } 127 EXPORT_SYMBOL(blk_rq_init); 128 129 #define REQ_OP_NAME(name) [REQ_OP_##name] = #name 130 static const char *const blk_op_name[] = { 131 REQ_OP_NAME(READ), 132 REQ_OP_NAME(WRITE), 133 REQ_OP_NAME(FLUSH), 134 REQ_OP_NAME(DISCARD), 135 REQ_OP_NAME(SECURE_ERASE), 136 REQ_OP_NAME(ZONE_RESET), 137 REQ_OP_NAME(ZONE_RESET_ALL), 138 REQ_OP_NAME(ZONE_OPEN), 139 REQ_OP_NAME(ZONE_CLOSE), 140 REQ_OP_NAME(ZONE_FINISH), 141 REQ_OP_NAME(ZONE_APPEND), 142 REQ_OP_NAME(WRITE_SAME), 143 REQ_OP_NAME(WRITE_ZEROES), 144 REQ_OP_NAME(DRV_IN), 145 REQ_OP_NAME(DRV_OUT), 146 }; 147 #undef REQ_OP_NAME 148 149 /** 150 * blk_op_str - Return string XXX in the REQ_OP_XXX. 151 * @op: REQ_OP_XXX. 152 * 153 * Description: Centralize block layer function to convert REQ_OP_XXX into 154 * string format. Useful in the debugging and tracing bio or request. For 155 * invalid REQ_OP_XXX it returns string "UNKNOWN". 156 */ 157 inline const char *blk_op_str(unsigned int op) 158 { 159 const char *op_str = "UNKNOWN"; 160 161 if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op]) 162 op_str = blk_op_name[op]; 163 164 return op_str; 165 } 166 EXPORT_SYMBOL_GPL(blk_op_str); 167 168 static const struct { 169 int errno; 170 const char *name; 171 } blk_errors[] = { 172 [BLK_STS_OK] = { 0, "" }, 173 [BLK_STS_NOTSUPP] = { -EOPNOTSUPP, "operation not supported" }, 174 [BLK_STS_TIMEOUT] = { -ETIMEDOUT, "timeout" }, 175 [BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" }, 176 [BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" }, 177 [BLK_STS_TARGET] = { -EREMOTEIO, "critical target" }, 178 [BLK_STS_NEXUS] = { -EBADE, "critical nexus" }, 179 [BLK_STS_MEDIUM] = { -ENODATA, "critical medium" }, 180 [BLK_STS_PROTECTION] = { -EILSEQ, "protection" }, 181 [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" }, 182 [BLK_STS_DEV_RESOURCE] = { -EBUSY, "device resource" }, 183 [BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" }, 184 185 /* device mapper special case, should not leak out: */ 186 [BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" }, 187 188 /* zone device specific errors */ 189 [BLK_STS_ZONE_OPEN_RESOURCE] = { -ETOOMANYREFS, "open zones exceeded" }, 190 [BLK_STS_ZONE_ACTIVE_RESOURCE] = { -EOVERFLOW, "active zones exceeded" }, 191 192 /* everything else not covered above: */ 193 [BLK_STS_IOERR] = { -EIO, "I/O" }, 194 }; 195 196 blk_status_t errno_to_blk_status(int errno) 197 { 198 int i; 199 200 for (i = 0; i < ARRAY_SIZE(blk_errors); i++) { 201 if (blk_errors[i].errno == errno) 202 return (__force blk_status_t)i; 203 } 204 205 return BLK_STS_IOERR; 206 } 207 EXPORT_SYMBOL_GPL(errno_to_blk_status); 208 209 int blk_status_to_errno(blk_status_t status) 210 { 211 int idx = (__force int)status; 212 213 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors))) 214 return -EIO; 215 return blk_errors[idx].errno; 216 } 217 EXPORT_SYMBOL_GPL(blk_status_to_errno); 218 219 static void print_req_error(struct request *req, blk_status_t status, 220 const char *caller) 221 { 222 int idx = (__force int)status; 223 224 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors))) 225 return; 226 227 printk_ratelimited(KERN_ERR 228 "%s: %s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x " 229 "phys_seg %u prio class %u\n", 230 caller, blk_errors[idx].name, 231 req->rq_disk ? req->rq_disk->disk_name : "?", 232 blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)), 233 req->cmd_flags & ~REQ_OP_MASK, 234 req->nr_phys_segments, 235 IOPRIO_PRIO_CLASS(req->ioprio)); 236 } 237 238 static void req_bio_endio(struct request *rq, struct bio *bio, 239 unsigned int nbytes, blk_status_t error) 240 { 241 if (error) 242 bio->bi_status = error; 243 244 if (unlikely(rq->rq_flags & RQF_QUIET)) 245 bio_set_flag(bio, BIO_QUIET); 246 247 bio_advance(bio, nbytes); 248 249 if (req_op(rq) == REQ_OP_ZONE_APPEND && error == BLK_STS_OK) { 250 /* 251 * Partial zone append completions cannot be supported as the 252 * BIO fragments may end up not being written sequentially. 253 */ 254 if (bio->bi_iter.bi_size) 255 bio->bi_status = BLK_STS_IOERR; 256 else 257 bio->bi_iter.bi_sector = rq->__sector; 258 } 259 260 /* don't actually finish bio if it's part of flush sequence */ 261 if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ)) 262 bio_endio(bio); 263 } 264 265 void blk_dump_rq_flags(struct request *rq, char *msg) 266 { 267 printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg, 268 rq->rq_disk ? rq->rq_disk->disk_name : "?", 269 (unsigned long long) rq->cmd_flags); 270 271 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", 272 (unsigned long long)blk_rq_pos(rq), 273 blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); 274 printk(KERN_INFO " bio %p, biotail %p, len %u\n", 275 rq->bio, rq->biotail, blk_rq_bytes(rq)); 276 } 277 EXPORT_SYMBOL(blk_dump_rq_flags); 278 279 /** 280 * blk_sync_queue - cancel any pending callbacks on a queue 281 * @q: the queue 282 * 283 * Description: 284 * The block layer may perform asynchronous callback activity 285 * on a queue, such as calling the unplug function after a timeout. 286 * A block device may call blk_sync_queue to ensure that any 287 * such activity is cancelled, thus allowing it to release resources 288 * that the callbacks might use. The caller must already have made sure 289 * that its ->submit_bio will not re-add plugging prior to calling 290 * this function. 291 * 292 * This function does not cancel any asynchronous activity arising 293 * out of elevator or throttling code. That would require elevator_exit() 294 * and blkcg_exit_queue() to be called with queue lock initialized. 295 * 296 */ 297 void blk_sync_queue(struct request_queue *q) 298 { 299 del_timer_sync(&q->timeout); 300 cancel_work_sync(&q->timeout_work); 301 } 302 EXPORT_SYMBOL(blk_sync_queue); 303 304 /** 305 * blk_set_pm_only - increment pm_only counter 306 * @q: request queue pointer 307 */ 308 void blk_set_pm_only(struct request_queue *q) 309 { 310 atomic_inc(&q->pm_only); 311 } 312 EXPORT_SYMBOL_GPL(blk_set_pm_only); 313 314 void blk_clear_pm_only(struct request_queue *q) 315 { 316 int pm_only; 317 318 pm_only = atomic_dec_return(&q->pm_only); 319 WARN_ON_ONCE(pm_only < 0); 320 if (pm_only == 0) 321 wake_up_all(&q->mq_freeze_wq); 322 } 323 EXPORT_SYMBOL_GPL(blk_clear_pm_only); 324 325 /** 326 * blk_put_queue - decrement the request_queue refcount 327 * @q: the request_queue structure to decrement the refcount for 328 * 329 * Decrements the refcount of the request_queue kobject. When this reaches 0 330 * we'll have blk_release_queue() called. 331 * 332 * Context: Any context, but the last reference must not be dropped from 333 * atomic context. 334 */ 335 void blk_put_queue(struct request_queue *q) 336 { 337 kobject_put(&q->kobj); 338 } 339 EXPORT_SYMBOL(blk_put_queue); 340 341 void blk_set_queue_dying(struct request_queue *q) 342 { 343 blk_queue_flag_set(QUEUE_FLAG_DYING, q); 344 345 /* 346 * When queue DYING flag is set, we need to block new req 347 * entering queue, so we call blk_freeze_queue_start() to 348 * prevent I/O from crossing blk_queue_enter(). 349 */ 350 blk_freeze_queue_start(q); 351 352 if (queue_is_mq(q)) 353 blk_mq_wake_waiters(q); 354 355 /* Make blk_queue_enter() reexamine the DYING flag. */ 356 wake_up_all(&q->mq_freeze_wq); 357 } 358 EXPORT_SYMBOL_GPL(blk_set_queue_dying); 359 360 /** 361 * blk_cleanup_queue - shutdown a request queue 362 * @q: request queue to shutdown 363 * 364 * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and 365 * put it. All future requests will be failed immediately with -ENODEV. 366 * 367 * Context: can sleep 368 */ 369 void blk_cleanup_queue(struct request_queue *q) 370 { 371 /* cannot be called from atomic context */ 372 might_sleep(); 373 374 WARN_ON_ONCE(blk_queue_registered(q)); 375 376 /* mark @q DYING, no new request or merges will be allowed afterwards */ 377 blk_set_queue_dying(q); 378 379 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q); 380 blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q); 381 382 /* 383 * Drain all requests queued before DYING marking. Set DEAD flag to 384 * prevent that blk_mq_run_hw_queues() accesses the hardware queues 385 * after draining finished. 386 */ 387 blk_freeze_queue(q); 388 389 rq_qos_exit(q); 390 391 blk_queue_flag_set(QUEUE_FLAG_DEAD, q); 392 393 /* for synchronous bio-based driver finish in-flight integrity i/o */ 394 blk_flush_integrity(); 395 396 /* @q won't process any more request, flush async actions */ 397 del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer); 398 blk_sync_queue(q); 399 400 if (queue_is_mq(q)) 401 blk_mq_exit_queue(q); 402 403 /* 404 * In theory, request pool of sched_tags belongs to request queue. 405 * However, the current implementation requires tag_set for freeing 406 * requests, so free the pool now. 407 * 408 * Queue has become frozen, there can't be any in-queue requests, so 409 * it is safe to free requests now. 410 */ 411 mutex_lock(&q->sysfs_lock); 412 if (q->elevator) 413 blk_mq_sched_free_requests(q); 414 mutex_unlock(&q->sysfs_lock); 415 416 percpu_ref_exit(&q->q_usage_counter); 417 418 /* @q is and will stay empty, shutdown and put */ 419 blk_put_queue(q); 420 } 421 EXPORT_SYMBOL(blk_cleanup_queue); 422 423 /** 424 * blk_queue_enter() - try to increase q->q_usage_counter 425 * @q: request queue pointer 426 * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM 427 */ 428 int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags) 429 { 430 const bool pm = flags & BLK_MQ_REQ_PM; 431 432 while (true) { 433 bool success = false; 434 435 rcu_read_lock(); 436 if (percpu_ref_tryget_live(&q->q_usage_counter)) { 437 /* 438 * The code that increments the pm_only counter is 439 * responsible for ensuring that that counter is 440 * globally visible before the queue is unfrozen. 441 */ 442 if ((pm && queue_rpm_status(q) != RPM_SUSPENDED) || 443 !blk_queue_pm_only(q)) { 444 success = true; 445 } else { 446 percpu_ref_put(&q->q_usage_counter); 447 } 448 } 449 rcu_read_unlock(); 450 451 if (success) 452 return 0; 453 454 if (flags & BLK_MQ_REQ_NOWAIT) 455 return -EBUSY; 456 457 /* 458 * read pair of barrier in blk_freeze_queue_start(), 459 * we need to order reading __PERCPU_REF_DEAD flag of 460 * .q_usage_counter and reading .mq_freeze_depth or 461 * queue dying flag, otherwise the following wait may 462 * never return if the two reads are reordered. 463 */ 464 smp_rmb(); 465 466 wait_event(q->mq_freeze_wq, 467 (!q->mq_freeze_depth && 468 blk_pm_resume_queue(pm, q)) || 469 blk_queue_dying(q)); 470 if (blk_queue_dying(q)) 471 return -ENODEV; 472 } 473 } 474 475 static inline int bio_queue_enter(struct bio *bio) 476 { 477 struct request_queue *q = bio->bi_bdev->bd_disk->queue; 478 bool nowait = bio->bi_opf & REQ_NOWAIT; 479 int ret; 480 481 ret = blk_queue_enter(q, nowait ? BLK_MQ_REQ_NOWAIT : 0); 482 if (unlikely(ret)) { 483 if (nowait && !blk_queue_dying(q)) 484 bio_wouldblock_error(bio); 485 else 486 bio_io_error(bio); 487 } 488 489 return ret; 490 } 491 492 void blk_queue_exit(struct request_queue *q) 493 { 494 percpu_ref_put(&q->q_usage_counter); 495 } 496 497 static void blk_queue_usage_counter_release(struct percpu_ref *ref) 498 { 499 struct request_queue *q = 500 container_of(ref, struct request_queue, q_usage_counter); 501 502 wake_up_all(&q->mq_freeze_wq); 503 } 504 505 static void blk_rq_timed_out_timer(struct timer_list *t) 506 { 507 struct request_queue *q = from_timer(q, t, timeout); 508 509 kblockd_schedule_work(&q->timeout_work); 510 } 511 512 static void blk_timeout_work(struct work_struct *work) 513 { 514 } 515 516 struct request_queue *blk_alloc_queue(int node_id) 517 { 518 struct request_queue *q; 519 int ret; 520 521 q = kmem_cache_alloc_node(blk_requestq_cachep, 522 GFP_KERNEL | __GFP_ZERO, node_id); 523 if (!q) 524 return NULL; 525 526 q->last_merge = NULL; 527 528 q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL); 529 if (q->id < 0) 530 goto fail_q; 531 532 ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, 0); 533 if (ret) 534 goto fail_id; 535 536 q->backing_dev_info = bdi_alloc(node_id); 537 if (!q->backing_dev_info) 538 goto fail_split; 539 540 q->stats = blk_alloc_queue_stats(); 541 if (!q->stats) 542 goto fail_stats; 543 544 q->node = node_id; 545 546 atomic_set(&q->nr_active_requests_shared_sbitmap, 0); 547 548 timer_setup(&q->backing_dev_info->laptop_mode_wb_timer, 549 laptop_mode_timer_fn, 0); 550 timer_setup(&q->timeout, blk_rq_timed_out_timer, 0); 551 INIT_WORK(&q->timeout_work, blk_timeout_work); 552 INIT_LIST_HEAD(&q->icq_list); 553 #ifdef CONFIG_BLK_CGROUP 554 INIT_LIST_HEAD(&q->blkg_list); 555 #endif 556 557 kobject_init(&q->kobj, &blk_queue_ktype); 558 559 mutex_init(&q->debugfs_mutex); 560 mutex_init(&q->sysfs_lock); 561 mutex_init(&q->sysfs_dir_lock); 562 spin_lock_init(&q->queue_lock); 563 564 init_waitqueue_head(&q->mq_freeze_wq); 565 mutex_init(&q->mq_freeze_lock); 566 567 /* 568 * Init percpu_ref in atomic mode so that it's faster to shutdown. 569 * See blk_register_queue() for details. 570 */ 571 if (percpu_ref_init(&q->q_usage_counter, 572 blk_queue_usage_counter_release, 573 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL)) 574 goto fail_bdi; 575 576 if (blkcg_init_queue(q)) 577 goto fail_ref; 578 579 blk_queue_dma_alignment(q, 511); 580 blk_set_default_limits(&q->limits); 581 q->nr_requests = BLKDEV_MAX_RQ; 582 583 return q; 584 585 fail_ref: 586 percpu_ref_exit(&q->q_usage_counter); 587 fail_bdi: 588 blk_free_queue_stats(q->stats); 589 fail_stats: 590 bdi_put(q->backing_dev_info); 591 fail_split: 592 bioset_exit(&q->bio_split); 593 fail_id: 594 ida_simple_remove(&blk_queue_ida, q->id); 595 fail_q: 596 kmem_cache_free(blk_requestq_cachep, q); 597 return NULL; 598 } 599 600 /** 601 * blk_get_queue - increment the request_queue refcount 602 * @q: the request_queue structure to increment the refcount for 603 * 604 * Increment the refcount of the request_queue kobject. 605 * 606 * Context: Any context. 607 */ 608 bool blk_get_queue(struct request_queue *q) 609 { 610 if (likely(!blk_queue_dying(q))) { 611 __blk_get_queue(q); 612 return true; 613 } 614 615 return false; 616 } 617 EXPORT_SYMBOL(blk_get_queue); 618 619 /** 620 * blk_get_request - allocate a request 621 * @q: request queue to allocate a request for 622 * @op: operation (REQ_OP_*) and REQ_* flags, e.g. REQ_SYNC. 623 * @flags: BLK_MQ_REQ_* flags, e.g. BLK_MQ_REQ_NOWAIT. 624 */ 625 struct request *blk_get_request(struct request_queue *q, unsigned int op, 626 blk_mq_req_flags_t flags) 627 { 628 struct request *req; 629 630 WARN_ON_ONCE(op & REQ_NOWAIT); 631 WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PM)); 632 633 req = blk_mq_alloc_request(q, op, flags); 634 if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn) 635 q->mq_ops->initialize_rq_fn(req); 636 637 return req; 638 } 639 EXPORT_SYMBOL(blk_get_request); 640 641 void blk_put_request(struct request *req) 642 { 643 blk_mq_free_request(req); 644 } 645 EXPORT_SYMBOL(blk_put_request); 646 647 static void handle_bad_sector(struct bio *bio, sector_t maxsector) 648 { 649 char b[BDEVNAME_SIZE]; 650 651 pr_info_ratelimited("attempt to access beyond end of device\n" 652 "%s: rw=%d, want=%llu, limit=%llu\n", 653 bio_devname(bio, b), bio->bi_opf, 654 bio_end_sector(bio), maxsector); 655 } 656 657 #ifdef CONFIG_FAIL_MAKE_REQUEST 658 659 static DECLARE_FAULT_ATTR(fail_make_request); 660 661 static int __init setup_fail_make_request(char *str) 662 { 663 return setup_fault_attr(&fail_make_request, str); 664 } 665 __setup("fail_make_request=", setup_fail_make_request); 666 667 static bool should_fail_request(struct block_device *part, unsigned int bytes) 668 { 669 return part->bd_make_it_fail && should_fail(&fail_make_request, bytes); 670 } 671 672 static int __init fail_make_request_debugfs(void) 673 { 674 struct dentry *dir = fault_create_debugfs_attr("fail_make_request", 675 NULL, &fail_make_request); 676 677 return PTR_ERR_OR_ZERO(dir); 678 } 679 680 late_initcall(fail_make_request_debugfs); 681 682 #else /* CONFIG_FAIL_MAKE_REQUEST */ 683 684 static inline bool should_fail_request(struct block_device *part, 685 unsigned int bytes) 686 { 687 return false; 688 } 689 690 #endif /* CONFIG_FAIL_MAKE_REQUEST */ 691 692 static inline bool bio_check_ro(struct bio *bio) 693 { 694 if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) { 695 char b[BDEVNAME_SIZE]; 696 697 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) 698 return false; 699 700 WARN_ONCE(1, 701 "Trying to write to read-only block-device %s (partno %d)\n", 702 bio_devname(bio, b), bio->bi_bdev->bd_partno); 703 /* Older lvm-tools actually trigger this */ 704 return false; 705 } 706 707 return false; 708 } 709 710 static noinline int should_fail_bio(struct bio *bio) 711 { 712 if (should_fail_request(bdev_whole(bio->bi_bdev), bio->bi_iter.bi_size)) 713 return -EIO; 714 return 0; 715 } 716 ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO); 717 718 /* 719 * Check whether this bio extends beyond the end of the device or partition. 720 * This may well happen - the kernel calls bread() without checking the size of 721 * the device, e.g., when mounting a file system. 722 */ 723 static inline int bio_check_eod(struct bio *bio) 724 { 725 sector_t maxsector = bdev_nr_sectors(bio->bi_bdev); 726 unsigned int nr_sectors = bio_sectors(bio); 727 728 if (nr_sectors && maxsector && 729 (nr_sectors > maxsector || 730 bio->bi_iter.bi_sector > maxsector - nr_sectors)) { 731 handle_bad_sector(bio, maxsector); 732 return -EIO; 733 } 734 return 0; 735 } 736 737 /* 738 * Remap block n of partition p to block n+start(p) of the disk. 739 */ 740 static int blk_partition_remap(struct bio *bio) 741 { 742 struct block_device *p = bio->bi_bdev; 743 744 if (unlikely(should_fail_request(p, bio->bi_iter.bi_size))) 745 return -EIO; 746 if (bio_sectors(bio)) { 747 bio->bi_iter.bi_sector += p->bd_start_sect; 748 trace_block_bio_remap(bio, p->bd_dev, 749 bio->bi_iter.bi_sector - 750 p->bd_start_sect); 751 } 752 bio_set_flag(bio, BIO_REMAPPED); 753 return 0; 754 } 755 756 /* 757 * Check write append to a zoned block device. 758 */ 759 static inline blk_status_t blk_check_zone_append(struct request_queue *q, 760 struct bio *bio) 761 { 762 sector_t pos = bio->bi_iter.bi_sector; 763 int nr_sectors = bio_sectors(bio); 764 765 /* Only applicable to zoned block devices */ 766 if (!blk_queue_is_zoned(q)) 767 return BLK_STS_NOTSUPP; 768 769 /* The bio sector must point to the start of a sequential zone */ 770 if (pos & (blk_queue_zone_sectors(q) - 1) || 771 !blk_queue_zone_is_seq(q, pos)) 772 return BLK_STS_IOERR; 773 774 /* 775 * Not allowed to cross zone boundaries. Otherwise, the BIO will be 776 * split and could result in non-contiguous sectors being written in 777 * different zones. 778 */ 779 if (nr_sectors > q->limits.chunk_sectors) 780 return BLK_STS_IOERR; 781 782 /* Make sure the BIO is small enough and will not get split */ 783 if (nr_sectors > q->limits.max_zone_append_sectors) 784 return BLK_STS_IOERR; 785 786 bio->bi_opf |= REQ_NOMERGE; 787 788 return BLK_STS_OK; 789 } 790 791 static noinline_for_stack bool submit_bio_checks(struct bio *bio) 792 { 793 struct block_device *bdev = bio->bi_bdev; 794 struct request_queue *q = bdev->bd_disk->queue; 795 blk_status_t status = BLK_STS_IOERR; 796 struct blk_plug *plug; 797 798 might_sleep(); 799 800 plug = blk_mq_plug(q, bio); 801 if (plug && plug->nowait) 802 bio->bi_opf |= REQ_NOWAIT; 803 804 /* 805 * For a REQ_NOWAIT based request, return -EOPNOTSUPP 806 * if queue does not support NOWAIT. 807 */ 808 if ((bio->bi_opf & REQ_NOWAIT) && !blk_queue_nowait(q)) 809 goto not_supported; 810 811 if (should_fail_bio(bio)) 812 goto end_io; 813 if (unlikely(bio_check_ro(bio))) 814 goto end_io; 815 if (!bio_flagged(bio, BIO_REMAPPED)) { 816 if (unlikely(bio_check_eod(bio))) 817 goto end_io; 818 if (bdev->bd_partno && unlikely(blk_partition_remap(bio))) 819 goto end_io; 820 } 821 822 /* 823 * Filter flush bio's early so that bio based drivers without flush 824 * support don't have to worry about them. 825 */ 826 if (op_is_flush(bio->bi_opf) && 827 !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) { 828 bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA); 829 if (!bio_sectors(bio)) { 830 status = BLK_STS_OK; 831 goto end_io; 832 } 833 } 834 835 if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) 836 bio->bi_opf &= ~REQ_HIPRI; 837 838 switch (bio_op(bio)) { 839 case REQ_OP_DISCARD: 840 if (!blk_queue_discard(q)) 841 goto not_supported; 842 break; 843 case REQ_OP_SECURE_ERASE: 844 if (!blk_queue_secure_erase(q)) 845 goto not_supported; 846 break; 847 case REQ_OP_WRITE_SAME: 848 if (!q->limits.max_write_same_sectors) 849 goto not_supported; 850 break; 851 case REQ_OP_ZONE_APPEND: 852 status = blk_check_zone_append(q, bio); 853 if (status != BLK_STS_OK) 854 goto end_io; 855 break; 856 case REQ_OP_ZONE_RESET: 857 case REQ_OP_ZONE_OPEN: 858 case REQ_OP_ZONE_CLOSE: 859 case REQ_OP_ZONE_FINISH: 860 if (!blk_queue_is_zoned(q)) 861 goto not_supported; 862 break; 863 case REQ_OP_ZONE_RESET_ALL: 864 if (!blk_queue_is_zoned(q) || !blk_queue_zone_resetall(q)) 865 goto not_supported; 866 break; 867 case REQ_OP_WRITE_ZEROES: 868 if (!q->limits.max_write_zeroes_sectors) 869 goto not_supported; 870 break; 871 default: 872 break; 873 } 874 875 /* 876 * Various block parts want %current->io_context, so allocate it up 877 * front rather than dealing with lots of pain to allocate it only 878 * where needed. This may fail and the block layer knows how to live 879 * with it. 880 */ 881 if (unlikely(!current->io_context)) 882 create_task_io_context(current, GFP_ATOMIC, q->node); 883 884 if (blk_throtl_bio(bio)) { 885 blkcg_bio_issue_init(bio); 886 return false; 887 } 888 889 blk_cgroup_bio_start(bio); 890 blkcg_bio_issue_init(bio); 891 892 if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) { 893 trace_block_bio_queue(bio); 894 /* Now that enqueuing has been traced, we need to trace 895 * completion as well. 896 */ 897 bio_set_flag(bio, BIO_TRACE_COMPLETION); 898 } 899 return true; 900 901 not_supported: 902 status = BLK_STS_NOTSUPP; 903 end_io: 904 bio->bi_status = status; 905 bio_endio(bio); 906 return false; 907 } 908 909 static blk_qc_t __submit_bio(struct bio *bio) 910 { 911 struct gendisk *disk = bio->bi_bdev->bd_disk; 912 blk_qc_t ret = BLK_QC_T_NONE; 913 914 if (blk_crypto_bio_prep(&bio)) { 915 if (!disk->fops->submit_bio) 916 return blk_mq_submit_bio(bio); 917 ret = disk->fops->submit_bio(bio); 918 } 919 blk_queue_exit(disk->queue); 920 return ret; 921 } 922 923 /* 924 * The loop in this function may be a bit non-obvious, and so deserves some 925 * explanation: 926 * 927 * - Before entering the loop, bio->bi_next is NULL (as all callers ensure 928 * that), so we have a list with a single bio. 929 * - We pretend that we have just taken it off a longer list, so we assign 930 * bio_list to a pointer to the bio_list_on_stack, thus initialising the 931 * bio_list of new bios to be added. ->submit_bio() may indeed add some more 932 * bios through a recursive call to submit_bio_noacct. If it did, we find a 933 * non-NULL value in bio_list and re-enter the loop from the top. 934 * - In this case we really did just take the bio of the top of the list (no 935 * pretending) and so remove it from bio_list, and call into ->submit_bio() 936 * again. 937 * 938 * bio_list_on_stack[0] contains bios submitted by the current ->submit_bio. 939 * bio_list_on_stack[1] contains bios that were submitted before the current 940 * ->submit_bio_bio, but that haven't been processed yet. 941 */ 942 static blk_qc_t __submit_bio_noacct(struct bio *bio) 943 { 944 struct bio_list bio_list_on_stack[2]; 945 blk_qc_t ret = BLK_QC_T_NONE; 946 947 BUG_ON(bio->bi_next); 948 949 bio_list_init(&bio_list_on_stack[0]); 950 current->bio_list = bio_list_on_stack; 951 952 do { 953 struct request_queue *q = bio->bi_bdev->bd_disk->queue; 954 struct bio_list lower, same; 955 956 if (unlikely(bio_queue_enter(bio) != 0)) 957 continue; 958 959 /* 960 * Create a fresh bio_list for all subordinate requests. 961 */ 962 bio_list_on_stack[1] = bio_list_on_stack[0]; 963 bio_list_init(&bio_list_on_stack[0]); 964 965 ret = __submit_bio(bio); 966 967 /* 968 * Sort new bios into those for a lower level and those for the 969 * same level. 970 */ 971 bio_list_init(&lower); 972 bio_list_init(&same); 973 while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL) 974 if (q == bio->bi_bdev->bd_disk->queue) 975 bio_list_add(&same, bio); 976 else 977 bio_list_add(&lower, bio); 978 979 /* 980 * Now assemble so we handle the lowest level first. 981 */ 982 bio_list_merge(&bio_list_on_stack[0], &lower); 983 bio_list_merge(&bio_list_on_stack[0], &same); 984 bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]); 985 } while ((bio = bio_list_pop(&bio_list_on_stack[0]))); 986 987 current->bio_list = NULL; 988 return ret; 989 } 990 991 static blk_qc_t __submit_bio_noacct_mq(struct bio *bio) 992 { 993 struct bio_list bio_list[2] = { }; 994 blk_qc_t ret = BLK_QC_T_NONE; 995 996 current->bio_list = bio_list; 997 998 do { 999 struct gendisk *disk = bio->bi_bdev->bd_disk; 1000 1001 if (unlikely(bio_queue_enter(bio) != 0)) 1002 continue; 1003 1004 if (!blk_crypto_bio_prep(&bio)) { 1005 blk_queue_exit(disk->queue); 1006 ret = BLK_QC_T_NONE; 1007 continue; 1008 } 1009 1010 ret = blk_mq_submit_bio(bio); 1011 } while ((bio = bio_list_pop(&bio_list[0]))); 1012 1013 current->bio_list = NULL; 1014 return ret; 1015 } 1016 1017 /** 1018 * submit_bio_noacct - re-submit a bio to the block device layer for I/O 1019 * @bio: The bio describing the location in memory and on the device. 1020 * 1021 * This is a version of submit_bio() that shall only be used for I/O that is 1022 * resubmitted to lower level drivers by stacking block drivers. All file 1023 * systems and other upper level users of the block layer should use 1024 * submit_bio() instead. 1025 */ 1026 blk_qc_t submit_bio_noacct(struct bio *bio) 1027 { 1028 if (!submit_bio_checks(bio)) 1029 return BLK_QC_T_NONE; 1030 1031 /* 1032 * We only want one ->submit_bio to be active at a time, else stack 1033 * usage with stacked devices could be a problem. Use current->bio_list 1034 * to collect a list of requests submited by a ->submit_bio method while 1035 * it is active, and then process them after it returned. 1036 */ 1037 if (current->bio_list) { 1038 bio_list_add(¤t->bio_list[0], bio); 1039 return BLK_QC_T_NONE; 1040 } 1041 1042 if (!bio->bi_bdev->bd_disk->fops->submit_bio) 1043 return __submit_bio_noacct_mq(bio); 1044 return __submit_bio_noacct(bio); 1045 } 1046 EXPORT_SYMBOL(submit_bio_noacct); 1047 1048 /** 1049 * submit_bio - submit a bio to the block device layer for I/O 1050 * @bio: The &struct bio which describes the I/O 1051 * 1052 * submit_bio() is used to submit I/O requests to block devices. It is passed a 1053 * fully set up &struct bio that describes the I/O that needs to be done. The 1054 * bio will be send to the device described by the bi_bdev field. 1055 * 1056 * The success/failure status of the request, along with notification of 1057 * completion, is delivered asynchronously through the ->bi_end_io() callback 1058 * in @bio. The bio must NOT be touched by thecaller until ->bi_end_io() has 1059 * been called. 1060 */ 1061 blk_qc_t submit_bio(struct bio *bio) 1062 { 1063 if (blkcg_punt_bio_submit(bio)) 1064 return BLK_QC_T_NONE; 1065 1066 /* 1067 * If it's a regular read/write or a barrier with data attached, 1068 * go through the normal accounting stuff before submission. 1069 */ 1070 if (bio_has_data(bio)) { 1071 unsigned int count; 1072 1073 if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) 1074 count = queue_logical_block_size( 1075 bio->bi_bdev->bd_disk->queue) >> 9; 1076 else 1077 count = bio_sectors(bio); 1078 1079 if (op_is_write(bio_op(bio))) { 1080 count_vm_events(PGPGOUT, count); 1081 } else { 1082 task_io_account_read(bio->bi_iter.bi_size); 1083 count_vm_events(PGPGIN, count); 1084 } 1085 } 1086 1087 /* 1088 * If we're reading data that is part of the userspace workingset, count 1089 * submission time as memory stall. When the device is congested, or 1090 * the submitting cgroup IO-throttled, submission can be a significant 1091 * part of overall IO time. 1092 */ 1093 if (unlikely(bio_op(bio) == REQ_OP_READ && 1094 bio_flagged(bio, BIO_WORKINGSET))) { 1095 unsigned long pflags; 1096 blk_qc_t ret; 1097 1098 psi_memstall_enter(&pflags); 1099 ret = submit_bio_noacct(bio); 1100 psi_memstall_leave(&pflags); 1101 1102 return ret; 1103 } 1104 1105 return submit_bio_noacct(bio); 1106 } 1107 EXPORT_SYMBOL(submit_bio); 1108 1109 /** 1110 * blk_cloned_rq_check_limits - Helper function to check a cloned request 1111 * for the new queue limits 1112 * @q: the queue 1113 * @rq: the request being checked 1114 * 1115 * Description: 1116 * @rq may have been made based on weaker limitations of upper-level queues 1117 * in request stacking drivers, and it may violate the limitation of @q. 1118 * Since the block layer and the underlying device driver trust @rq 1119 * after it is inserted to @q, it should be checked against @q before 1120 * the insertion using this generic function. 1121 * 1122 * Request stacking drivers like request-based dm may change the queue 1123 * limits when retrying requests on other queues. Those requests need 1124 * to be checked against the new queue limits again during dispatch. 1125 */ 1126 static blk_status_t blk_cloned_rq_check_limits(struct request_queue *q, 1127 struct request *rq) 1128 { 1129 unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq)); 1130 1131 if (blk_rq_sectors(rq) > max_sectors) { 1132 /* 1133 * SCSI device does not have a good way to return if 1134 * Write Same/Zero is actually supported. If a device rejects 1135 * a non-read/write command (discard, write same,etc.) the 1136 * low-level device driver will set the relevant queue limit to 1137 * 0 to prevent blk-lib from issuing more of the offending 1138 * operations. Commands queued prior to the queue limit being 1139 * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O 1140 * errors being propagated to upper layers. 1141 */ 1142 if (max_sectors == 0) 1143 return BLK_STS_NOTSUPP; 1144 1145 printk(KERN_ERR "%s: over max size limit. (%u > %u)\n", 1146 __func__, blk_rq_sectors(rq), max_sectors); 1147 return BLK_STS_IOERR; 1148 } 1149 1150 /* 1151 * The queue settings related to segment counting may differ from the 1152 * original queue. 1153 */ 1154 rq->nr_phys_segments = blk_recalc_rq_segments(rq); 1155 if (rq->nr_phys_segments > queue_max_segments(q)) { 1156 printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n", 1157 __func__, rq->nr_phys_segments, queue_max_segments(q)); 1158 return BLK_STS_IOERR; 1159 } 1160 1161 return BLK_STS_OK; 1162 } 1163 1164 /** 1165 * blk_insert_cloned_request - Helper for stacking drivers to submit a request 1166 * @q: the queue to submit the request 1167 * @rq: the request being queued 1168 */ 1169 blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq) 1170 { 1171 blk_status_t ret; 1172 1173 ret = blk_cloned_rq_check_limits(q, rq); 1174 if (ret != BLK_STS_OK) 1175 return ret; 1176 1177 if (rq->rq_disk && 1178 should_fail_request(rq->rq_disk->part0, blk_rq_bytes(rq))) 1179 return BLK_STS_IOERR; 1180 1181 if (blk_crypto_insert_cloned_request(rq)) 1182 return BLK_STS_IOERR; 1183 1184 if (blk_queue_io_stat(q)) 1185 blk_account_io_start(rq); 1186 1187 /* 1188 * Since we have a scheduler attached on the top device, 1189 * bypass a potential scheduler on the bottom device for 1190 * insert. 1191 */ 1192 return blk_mq_request_issue_directly(rq, true); 1193 } 1194 EXPORT_SYMBOL_GPL(blk_insert_cloned_request); 1195 1196 /** 1197 * blk_rq_err_bytes - determine number of bytes till the next failure boundary 1198 * @rq: request to examine 1199 * 1200 * Description: 1201 * A request could be merge of IOs which require different failure 1202 * handling. This function determines the number of bytes which 1203 * can be failed from the beginning of the request without 1204 * crossing into area which need to be retried further. 1205 * 1206 * Return: 1207 * The number of bytes to fail. 1208 */ 1209 unsigned int blk_rq_err_bytes(const struct request *rq) 1210 { 1211 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; 1212 unsigned int bytes = 0; 1213 struct bio *bio; 1214 1215 if (!(rq->rq_flags & RQF_MIXED_MERGE)) 1216 return blk_rq_bytes(rq); 1217 1218 /* 1219 * Currently the only 'mixing' which can happen is between 1220 * different fastfail types. We can safely fail portions 1221 * which have all the failfast bits that the first one has - 1222 * the ones which are at least as eager to fail as the first 1223 * one. 1224 */ 1225 for (bio = rq->bio; bio; bio = bio->bi_next) { 1226 if ((bio->bi_opf & ff) != ff) 1227 break; 1228 bytes += bio->bi_iter.bi_size; 1229 } 1230 1231 /* this could lead to infinite loop */ 1232 BUG_ON(blk_rq_bytes(rq) && !bytes); 1233 return bytes; 1234 } 1235 EXPORT_SYMBOL_GPL(blk_rq_err_bytes); 1236 1237 static void update_io_ticks(struct block_device *part, unsigned long now, 1238 bool end) 1239 { 1240 unsigned long stamp; 1241 again: 1242 stamp = READ_ONCE(part->bd_stamp); 1243 if (unlikely(time_after(now, stamp))) { 1244 if (likely(cmpxchg(&part->bd_stamp, stamp, now) == stamp)) 1245 __part_stat_add(part, io_ticks, end ? now - stamp : 1); 1246 } 1247 if (part->bd_partno) { 1248 part = bdev_whole(part); 1249 goto again; 1250 } 1251 } 1252 1253 static void blk_account_io_completion(struct request *req, unsigned int bytes) 1254 { 1255 if (req->part && blk_do_io_stat(req)) { 1256 const int sgrp = op_stat_group(req_op(req)); 1257 1258 part_stat_lock(); 1259 part_stat_add(req->part, sectors[sgrp], bytes >> 9); 1260 part_stat_unlock(); 1261 } 1262 } 1263 1264 void blk_account_io_done(struct request *req, u64 now) 1265 { 1266 /* 1267 * Account IO completion. flush_rq isn't accounted as a 1268 * normal IO on queueing nor completion. Accounting the 1269 * containing request is enough. 1270 */ 1271 if (req->part && blk_do_io_stat(req) && 1272 !(req->rq_flags & RQF_FLUSH_SEQ)) { 1273 const int sgrp = op_stat_group(req_op(req)); 1274 1275 part_stat_lock(); 1276 update_io_ticks(req->part, jiffies, true); 1277 part_stat_inc(req->part, ios[sgrp]); 1278 part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns); 1279 part_stat_unlock(); 1280 } 1281 } 1282 1283 void blk_account_io_start(struct request *rq) 1284 { 1285 if (!blk_do_io_stat(rq)) 1286 return; 1287 1288 /* passthrough requests can hold bios that do not have ->bi_bdev set */ 1289 if (rq->bio && rq->bio->bi_bdev) 1290 rq->part = rq->bio->bi_bdev; 1291 else 1292 rq->part = rq->rq_disk->part0; 1293 1294 part_stat_lock(); 1295 update_io_ticks(rq->part, jiffies, false); 1296 part_stat_unlock(); 1297 } 1298 1299 static unsigned long __part_start_io_acct(struct block_device *part, 1300 unsigned int sectors, unsigned int op) 1301 { 1302 const int sgrp = op_stat_group(op); 1303 unsigned long now = READ_ONCE(jiffies); 1304 1305 part_stat_lock(); 1306 update_io_ticks(part, now, false); 1307 part_stat_inc(part, ios[sgrp]); 1308 part_stat_add(part, sectors[sgrp], sectors); 1309 part_stat_local_inc(part, in_flight[op_is_write(op)]); 1310 part_stat_unlock(); 1311 1312 return now; 1313 } 1314 1315 /** 1316 * bio_start_io_acct - start I/O accounting for bio based drivers 1317 * @bio: bio to start account for 1318 * 1319 * Returns the start time that should be passed back to bio_end_io_acct(). 1320 */ 1321 unsigned long bio_start_io_acct(struct bio *bio) 1322 { 1323 return __part_start_io_acct(bio->bi_bdev, bio_sectors(bio), bio_op(bio)); 1324 } 1325 EXPORT_SYMBOL_GPL(bio_start_io_acct); 1326 1327 unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors, 1328 unsigned int op) 1329 { 1330 return __part_start_io_acct(disk->part0, sectors, op); 1331 } 1332 EXPORT_SYMBOL(disk_start_io_acct); 1333 1334 static void __part_end_io_acct(struct block_device *part, unsigned int op, 1335 unsigned long start_time) 1336 { 1337 const int sgrp = op_stat_group(op); 1338 unsigned long now = READ_ONCE(jiffies); 1339 unsigned long duration = now - start_time; 1340 1341 part_stat_lock(); 1342 update_io_ticks(part, now, true); 1343 part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration)); 1344 part_stat_local_dec(part, in_flight[op_is_write(op)]); 1345 part_stat_unlock(); 1346 } 1347 1348 void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time, 1349 struct block_device *orig_bdev) 1350 { 1351 __part_end_io_acct(orig_bdev, bio_op(bio), start_time); 1352 } 1353 EXPORT_SYMBOL_GPL(bio_end_io_acct_remapped); 1354 1355 void disk_end_io_acct(struct gendisk *disk, unsigned int op, 1356 unsigned long start_time) 1357 { 1358 __part_end_io_acct(disk->part0, op, start_time); 1359 } 1360 EXPORT_SYMBOL(disk_end_io_acct); 1361 1362 /* 1363 * Steal bios from a request and add them to a bio list. 1364 * The request must not have been partially completed before. 1365 */ 1366 void blk_steal_bios(struct bio_list *list, struct request *rq) 1367 { 1368 if (rq->bio) { 1369 if (list->tail) 1370 list->tail->bi_next = rq->bio; 1371 else 1372 list->head = rq->bio; 1373 list->tail = rq->biotail; 1374 1375 rq->bio = NULL; 1376 rq->biotail = NULL; 1377 } 1378 1379 rq->__data_len = 0; 1380 } 1381 EXPORT_SYMBOL_GPL(blk_steal_bios); 1382 1383 /** 1384 * blk_update_request - Complete multiple bytes without completing the request 1385 * @req: the request being processed 1386 * @error: block status code 1387 * @nr_bytes: number of bytes to complete for @req 1388 * 1389 * Description: 1390 * Ends I/O on a number of bytes attached to @req, but doesn't complete 1391 * the request structure even if @req doesn't have leftover. 1392 * If @req has leftover, sets it up for the next range of segments. 1393 * 1394 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees 1395 * %false return from this function. 1396 * 1397 * Note: 1398 * The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in this function 1399 * except in the consistency check at the end of this function. 1400 * 1401 * Return: 1402 * %false - this request doesn't have any more data 1403 * %true - this request has more data 1404 **/ 1405 bool blk_update_request(struct request *req, blk_status_t error, 1406 unsigned int nr_bytes) 1407 { 1408 int total_bytes; 1409 1410 trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes); 1411 1412 if (!req->bio) 1413 return false; 1414 1415 #ifdef CONFIG_BLK_DEV_INTEGRITY 1416 if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ && 1417 error == BLK_STS_OK) 1418 req->q->integrity.profile->complete_fn(req, nr_bytes); 1419 #endif 1420 1421 if (unlikely(error && !blk_rq_is_passthrough(req) && 1422 !(req->rq_flags & RQF_QUIET))) 1423 print_req_error(req, error, __func__); 1424 1425 blk_account_io_completion(req, nr_bytes); 1426 1427 total_bytes = 0; 1428 while (req->bio) { 1429 struct bio *bio = req->bio; 1430 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes); 1431 1432 if (bio_bytes == bio->bi_iter.bi_size) 1433 req->bio = bio->bi_next; 1434 1435 /* Completion has already been traced */ 1436 bio_clear_flag(bio, BIO_TRACE_COMPLETION); 1437 req_bio_endio(req, bio, bio_bytes, error); 1438 1439 total_bytes += bio_bytes; 1440 nr_bytes -= bio_bytes; 1441 1442 if (!nr_bytes) 1443 break; 1444 } 1445 1446 /* 1447 * completely done 1448 */ 1449 if (!req->bio) { 1450 /* 1451 * Reset counters so that the request stacking driver 1452 * can find how many bytes remain in the request 1453 * later. 1454 */ 1455 req->__data_len = 0; 1456 return false; 1457 } 1458 1459 req->__data_len -= total_bytes; 1460 1461 /* update sector only for requests with clear definition of sector */ 1462 if (!blk_rq_is_passthrough(req)) 1463 req->__sector += total_bytes >> 9; 1464 1465 /* mixed attributes always follow the first bio */ 1466 if (req->rq_flags & RQF_MIXED_MERGE) { 1467 req->cmd_flags &= ~REQ_FAILFAST_MASK; 1468 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK; 1469 } 1470 1471 if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) { 1472 /* 1473 * If total number of sectors is less than the first segment 1474 * size, something has gone terribly wrong. 1475 */ 1476 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { 1477 blk_dump_rq_flags(req, "request botched"); 1478 req->__data_len = blk_rq_cur_bytes(req); 1479 } 1480 1481 /* recalculate the number of segments */ 1482 req->nr_phys_segments = blk_recalc_rq_segments(req); 1483 } 1484 1485 return true; 1486 } 1487 EXPORT_SYMBOL_GPL(blk_update_request); 1488 1489 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1490 /** 1491 * rq_flush_dcache_pages - Helper function to flush all pages in a request 1492 * @rq: the request to be flushed 1493 * 1494 * Description: 1495 * Flush all pages in @rq. 1496 */ 1497 void rq_flush_dcache_pages(struct request *rq) 1498 { 1499 struct req_iterator iter; 1500 struct bio_vec bvec; 1501 1502 rq_for_each_segment(bvec, rq, iter) 1503 flush_dcache_page(bvec.bv_page); 1504 } 1505 EXPORT_SYMBOL_GPL(rq_flush_dcache_pages); 1506 #endif 1507 1508 /** 1509 * blk_lld_busy - Check if underlying low-level drivers of a device are busy 1510 * @q : the queue of the device being checked 1511 * 1512 * Description: 1513 * Check if underlying low-level drivers of a device are busy. 1514 * If the drivers want to export their busy state, they must set own 1515 * exporting function using blk_queue_lld_busy() first. 1516 * 1517 * Basically, this function is used only by request stacking drivers 1518 * to stop dispatching requests to underlying devices when underlying 1519 * devices are busy. This behavior helps more I/O merging on the queue 1520 * of the request stacking driver and prevents I/O throughput regression 1521 * on burst I/O load. 1522 * 1523 * Return: 1524 * 0 - Not busy (The request stacking driver should dispatch request) 1525 * 1 - Busy (The request stacking driver should stop dispatching request) 1526 */ 1527 int blk_lld_busy(struct request_queue *q) 1528 { 1529 if (queue_is_mq(q) && q->mq_ops->busy) 1530 return q->mq_ops->busy(q); 1531 1532 return 0; 1533 } 1534 EXPORT_SYMBOL_GPL(blk_lld_busy); 1535 1536 /** 1537 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request 1538 * @rq: the clone request to be cleaned up 1539 * 1540 * Description: 1541 * Free all bios in @rq for a cloned request. 1542 */ 1543 void blk_rq_unprep_clone(struct request *rq) 1544 { 1545 struct bio *bio; 1546 1547 while ((bio = rq->bio) != NULL) { 1548 rq->bio = bio->bi_next; 1549 1550 bio_put(bio); 1551 } 1552 } 1553 EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); 1554 1555 /** 1556 * blk_rq_prep_clone - Helper function to setup clone request 1557 * @rq: the request to be setup 1558 * @rq_src: original request to be cloned 1559 * @bs: bio_set that bios for clone are allocated from 1560 * @gfp_mask: memory allocation mask for bio 1561 * @bio_ctr: setup function to be called for each clone bio. 1562 * Returns %0 for success, non %0 for failure. 1563 * @data: private data to be passed to @bio_ctr 1564 * 1565 * Description: 1566 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq. 1567 * Also, pages which the original bios are pointing to are not copied 1568 * and the cloned bios just point same pages. 1569 * So cloned bios must be completed before original bios, which means 1570 * the caller must complete @rq before @rq_src. 1571 */ 1572 int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 1573 struct bio_set *bs, gfp_t gfp_mask, 1574 int (*bio_ctr)(struct bio *, struct bio *, void *), 1575 void *data) 1576 { 1577 struct bio *bio, *bio_src; 1578 1579 if (!bs) 1580 bs = &fs_bio_set; 1581 1582 __rq_for_each_bio(bio_src, rq_src) { 1583 bio = bio_clone_fast(bio_src, gfp_mask, bs); 1584 if (!bio) 1585 goto free_and_out; 1586 1587 if (bio_ctr && bio_ctr(bio, bio_src, data)) 1588 goto free_and_out; 1589 1590 if (rq->bio) { 1591 rq->biotail->bi_next = bio; 1592 rq->biotail = bio; 1593 } else { 1594 rq->bio = rq->biotail = bio; 1595 } 1596 bio = NULL; 1597 } 1598 1599 /* Copy attributes of the original request to the clone request. */ 1600 rq->__sector = blk_rq_pos(rq_src); 1601 rq->__data_len = blk_rq_bytes(rq_src); 1602 if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) { 1603 rq->rq_flags |= RQF_SPECIAL_PAYLOAD; 1604 rq->special_vec = rq_src->special_vec; 1605 } 1606 rq->nr_phys_segments = rq_src->nr_phys_segments; 1607 rq->ioprio = rq_src->ioprio; 1608 1609 if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0) 1610 goto free_and_out; 1611 1612 return 0; 1613 1614 free_and_out: 1615 if (bio) 1616 bio_put(bio); 1617 blk_rq_unprep_clone(rq); 1618 1619 return -ENOMEM; 1620 } 1621 EXPORT_SYMBOL_GPL(blk_rq_prep_clone); 1622 1623 int kblockd_schedule_work(struct work_struct *work) 1624 { 1625 return queue_work(kblockd_workqueue, work); 1626 } 1627 EXPORT_SYMBOL(kblockd_schedule_work); 1628 1629 int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, 1630 unsigned long delay) 1631 { 1632 return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay); 1633 } 1634 EXPORT_SYMBOL(kblockd_mod_delayed_work_on); 1635 1636 /** 1637 * blk_start_plug - initialize blk_plug and track it inside the task_struct 1638 * @plug: The &struct blk_plug that needs to be initialized 1639 * 1640 * Description: 1641 * blk_start_plug() indicates to the block layer an intent by the caller 1642 * to submit multiple I/O requests in a batch. The block layer may use 1643 * this hint to defer submitting I/Os from the caller until blk_finish_plug() 1644 * is called. However, the block layer may choose to submit requests 1645 * before a call to blk_finish_plug() if the number of queued I/Os 1646 * exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than 1647 * %BLK_PLUG_FLUSH_SIZE. The queued I/Os may also be submitted early if 1648 * the task schedules (see below). 1649 * 1650 * Tracking blk_plug inside the task_struct will help with auto-flushing the 1651 * pending I/O should the task end up blocking between blk_start_plug() and 1652 * blk_finish_plug(). This is important from a performance perspective, but 1653 * also ensures that we don't deadlock. For instance, if the task is blocking 1654 * for a memory allocation, memory reclaim could end up wanting to free a 1655 * page belonging to that request that is currently residing in our private 1656 * plug. By flushing the pending I/O when the process goes to sleep, we avoid 1657 * this kind of deadlock. 1658 */ 1659 void blk_start_plug(struct blk_plug *plug) 1660 { 1661 struct task_struct *tsk = current; 1662 1663 /* 1664 * If this is a nested plug, don't actually assign it. 1665 */ 1666 if (tsk->plug) 1667 return; 1668 1669 INIT_LIST_HEAD(&plug->mq_list); 1670 INIT_LIST_HEAD(&plug->cb_list); 1671 plug->rq_count = 0; 1672 plug->multiple_queues = false; 1673 plug->nowait = false; 1674 1675 /* 1676 * Store ordering should not be needed here, since a potential 1677 * preempt will imply a full memory barrier 1678 */ 1679 tsk->plug = plug; 1680 } 1681 EXPORT_SYMBOL(blk_start_plug); 1682 1683 static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) 1684 { 1685 LIST_HEAD(callbacks); 1686 1687 while (!list_empty(&plug->cb_list)) { 1688 list_splice_init(&plug->cb_list, &callbacks); 1689 1690 while (!list_empty(&callbacks)) { 1691 struct blk_plug_cb *cb = list_first_entry(&callbacks, 1692 struct blk_plug_cb, 1693 list); 1694 list_del(&cb->list); 1695 cb->callback(cb, from_schedule); 1696 } 1697 } 1698 } 1699 1700 struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data, 1701 int size) 1702 { 1703 struct blk_plug *plug = current->plug; 1704 struct blk_plug_cb *cb; 1705 1706 if (!plug) 1707 return NULL; 1708 1709 list_for_each_entry(cb, &plug->cb_list, list) 1710 if (cb->callback == unplug && cb->data == data) 1711 return cb; 1712 1713 /* Not currently on the callback list */ 1714 BUG_ON(size < sizeof(*cb)); 1715 cb = kzalloc(size, GFP_ATOMIC); 1716 if (cb) { 1717 cb->data = data; 1718 cb->callback = unplug; 1719 list_add(&cb->list, &plug->cb_list); 1720 } 1721 return cb; 1722 } 1723 EXPORT_SYMBOL(blk_check_plugged); 1724 1725 void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) 1726 { 1727 flush_plug_callbacks(plug, from_schedule); 1728 1729 if (!list_empty(&plug->mq_list)) 1730 blk_mq_flush_plug_list(plug, from_schedule); 1731 } 1732 1733 /** 1734 * blk_finish_plug - mark the end of a batch of submitted I/O 1735 * @plug: The &struct blk_plug passed to blk_start_plug() 1736 * 1737 * Description: 1738 * Indicate that a batch of I/O submissions is complete. This function 1739 * must be paired with an initial call to blk_start_plug(). The intent 1740 * is to allow the block layer to optimize I/O submission. See the 1741 * documentation for blk_start_plug() for more information. 1742 */ 1743 void blk_finish_plug(struct blk_plug *plug) 1744 { 1745 if (plug != current->plug) 1746 return; 1747 blk_flush_plug_list(plug, false); 1748 1749 current->plug = NULL; 1750 } 1751 EXPORT_SYMBOL(blk_finish_plug); 1752 1753 void blk_io_schedule(void) 1754 { 1755 /* Prevent hang_check timer from firing at us during very long I/O */ 1756 unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2; 1757 1758 if (timeout) 1759 io_schedule_timeout(timeout); 1760 else 1761 io_schedule(); 1762 } 1763 EXPORT_SYMBOL_GPL(blk_io_schedule); 1764 1765 int __init blk_dev_init(void) 1766 { 1767 BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS)); 1768 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 * 1769 sizeof_field(struct request, cmd_flags)); 1770 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 * 1771 sizeof_field(struct bio, bi_opf)); 1772 1773 /* used for unplugging and affects IO latency/throughput - HIGHPRI */ 1774 kblockd_workqueue = alloc_workqueue("kblockd", 1775 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); 1776 if (!kblockd_workqueue) 1777 panic("Failed to create kblockd\n"); 1778 1779 blk_requestq_cachep = kmem_cache_create("request_queue", 1780 sizeof(struct request_queue), 0, SLAB_PANIC, NULL); 1781 1782 blk_debugfs_root = debugfs_create_dir("block", NULL); 1783 1784 return 0; 1785 } 1786