1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 1991, 1992 Linus Torvalds 4 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics 5 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 6 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> 7 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> 8 * - July2000 9 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 10 */ 11 12 /* 13 * This handles all read/write requests to block devices 14 */ 15 #include <linux/kernel.h> 16 #include <linux/module.h> 17 #include <linux/backing-dev.h> 18 #include <linux/bio.h> 19 #include <linux/blkdev.h> 20 #include <linux/blk-mq.h> 21 #include <linux/blk-pm.h> 22 #include <linux/highmem.h> 23 #include <linux/mm.h> 24 #include <linux/pagemap.h> 25 #include <linux/kernel_stat.h> 26 #include <linux/string.h> 27 #include <linux/init.h> 28 #include <linux/completion.h> 29 #include <linux/slab.h> 30 #include <linux/swap.h> 31 #include <linux/writeback.h> 32 #include <linux/task_io_accounting_ops.h> 33 #include <linux/fault-inject.h> 34 #include <linux/list_sort.h> 35 #include <linux/delay.h> 36 #include <linux/ratelimit.h> 37 #include <linux/pm_runtime.h> 38 #include <linux/blk-cgroup.h> 39 #include <linux/t10-pi.h> 40 #include <linux/debugfs.h> 41 #include <linux/bpf.h> 42 #include <linux/psi.h> 43 #include <linux/sched/sysctl.h> 44 #include <linux/blk-crypto.h> 45 46 #define CREATE_TRACE_POINTS 47 #include <trace/events/block.h> 48 49 #include "blk.h" 50 #include "blk-mq.h" 51 #include "blk-mq-sched.h" 52 #include "blk-pm.h" 53 #include "blk-rq-qos.h" 54 55 struct dentry *blk_debugfs_root; 56 57 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); 58 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); 59 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); 60 EXPORT_TRACEPOINT_SYMBOL_GPL(block_split); 61 EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug); 62 63 DEFINE_IDA(blk_queue_ida); 64 65 /* 66 * For queue allocation 67 */ 68 struct kmem_cache *blk_requestq_cachep; 69 70 /* 71 * Controlling structure to kblockd 72 */ 73 static struct workqueue_struct *kblockd_workqueue; 74 75 /** 76 * blk_queue_flag_set - atomically set a queue flag 77 * @flag: flag to be set 78 * @q: request queue 79 */ 80 void blk_queue_flag_set(unsigned int flag, struct request_queue *q) 81 { 82 set_bit(flag, &q->queue_flags); 83 } 84 EXPORT_SYMBOL(blk_queue_flag_set); 85 86 /** 87 * blk_queue_flag_clear - atomically clear a queue flag 88 * @flag: flag to be cleared 89 * @q: request queue 90 */ 91 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q) 92 { 93 clear_bit(flag, &q->queue_flags); 94 } 95 EXPORT_SYMBOL(blk_queue_flag_clear); 96 97 /** 98 * blk_queue_flag_test_and_set - atomically test and set a queue flag 99 * @flag: flag to be set 100 * @q: request queue 101 * 102 * Returns the previous value of @flag - 0 if the flag was not set and 1 if 103 * the flag was already set. 104 */ 105 bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q) 106 { 107 return test_and_set_bit(flag, &q->queue_flags); 108 } 109 EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set); 110 111 void blk_rq_init(struct request_queue *q, struct request *rq) 112 { 113 memset(rq, 0, sizeof(*rq)); 114 115 INIT_LIST_HEAD(&rq->queuelist); 116 rq->q = q; 117 rq->__sector = (sector_t) -1; 118 INIT_HLIST_NODE(&rq->hash); 119 RB_CLEAR_NODE(&rq->rb_node); 120 rq->tag = BLK_MQ_NO_TAG; 121 rq->internal_tag = BLK_MQ_NO_TAG; 122 rq->start_time_ns = ktime_get_ns(); 123 rq->part = NULL; 124 refcount_set(&rq->ref, 1); 125 blk_crypto_rq_set_defaults(rq); 126 } 127 EXPORT_SYMBOL(blk_rq_init); 128 129 #define REQ_OP_NAME(name) [REQ_OP_##name] = #name 130 static const char *const blk_op_name[] = { 131 REQ_OP_NAME(READ), 132 REQ_OP_NAME(WRITE), 133 REQ_OP_NAME(FLUSH), 134 REQ_OP_NAME(DISCARD), 135 REQ_OP_NAME(SECURE_ERASE), 136 REQ_OP_NAME(ZONE_RESET), 137 REQ_OP_NAME(ZONE_RESET_ALL), 138 REQ_OP_NAME(ZONE_OPEN), 139 REQ_OP_NAME(ZONE_CLOSE), 140 REQ_OP_NAME(ZONE_FINISH), 141 REQ_OP_NAME(ZONE_APPEND), 142 REQ_OP_NAME(WRITE_SAME), 143 REQ_OP_NAME(WRITE_ZEROES), 144 REQ_OP_NAME(SCSI_IN), 145 REQ_OP_NAME(SCSI_OUT), 146 REQ_OP_NAME(DRV_IN), 147 REQ_OP_NAME(DRV_OUT), 148 }; 149 #undef REQ_OP_NAME 150 151 /** 152 * blk_op_str - Return string XXX in the REQ_OP_XXX. 153 * @op: REQ_OP_XXX. 154 * 155 * Description: Centralize block layer function to convert REQ_OP_XXX into 156 * string format. Useful in the debugging and tracing bio or request. For 157 * invalid REQ_OP_XXX it returns string "UNKNOWN". 158 */ 159 inline const char *blk_op_str(unsigned int op) 160 { 161 const char *op_str = "UNKNOWN"; 162 163 if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op]) 164 op_str = blk_op_name[op]; 165 166 return op_str; 167 } 168 EXPORT_SYMBOL_GPL(blk_op_str); 169 170 static const struct { 171 int errno; 172 const char *name; 173 } blk_errors[] = { 174 [BLK_STS_OK] = { 0, "" }, 175 [BLK_STS_NOTSUPP] = { -EOPNOTSUPP, "operation not supported" }, 176 [BLK_STS_TIMEOUT] = { -ETIMEDOUT, "timeout" }, 177 [BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" }, 178 [BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" }, 179 [BLK_STS_TARGET] = { -EREMOTEIO, "critical target" }, 180 [BLK_STS_NEXUS] = { -EBADE, "critical nexus" }, 181 [BLK_STS_MEDIUM] = { -ENODATA, "critical medium" }, 182 [BLK_STS_PROTECTION] = { -EILSEQ, "protection" }, 183 [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" }, 184 [BLK_STS_DEV_RESOURCE] = { -EBUSY, "device resource" }, 185 [BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" }, 186 187 /* device mapper special case, should not leak out: */ 188 [BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" }, 189 190 /* zone device specific errors */ 191 [BLK_STS_ZONE_OPEN_RESOURCE] = { -ETOOMANYREFS, "open zones exceeded" }, 192 [BLK_STS_ZONE_ACTIVE_RESOURCE] = { -EOVERFLOW, "active zones exceeded" }, 193 194 /* everything else not covered above: */ 195 [BLK_STS_IOERR] = { -EIO, "I/O" }, 196 }; 197 198 blk_status_t errno_to_blk_status(int errno) 199 { 200 int i; 201 202 for (i = 0; i < ARRAY_SIZE(blk_errors); i++) { 203 if (blk_errors[i].errno == errno) 204 return (__force blk_status_t)i; 205 } 206 207 return BLK_STS_IOERR; 208 } 209 EXPORT_SYMBOL_GPL(errno_to_blk_status); 210 211 int blk_status_to_errno(blk_status_t status) 212 { 213 int idx = (__force int)status; 214 215 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors))) 216 return -EIO; 217 return blk_errors[idx].errno; 218 } 219 EXPORT_SYMBOL_GPL(blk_status_to_errno); 220 221 static void print_req_error(struct request *req, blk_status_t status, 222 const char *caller) 223 { 224 int idx = (__force int)status; 225 226 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors))) 227 return; 228 229 printk_ratelimited(KERN_ERR 230 "%s: %s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x " 231 "phys_seg %u prio class %u\n", 232 caller, blk_errors[idx].name, 233 req->rq_disk ? req->rq_disk->disk_name : "?", 234 blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)), 235 req->cmd_flags & ~REQ_OP_MASK, 236 req->nr_phys_segments, 237 IOPRIO_PRIO_CLASS(req->ioprio)); 238 } 239 240 static void req_bio_endio(struct request *rq, struct bio *bio, 241 unsigned int nbytes, blk_status_t error) 242 { 243 if (error) 244 bio->bi_status = error; 245 246 if (unlikely(rq->rq_flags & RQF_QUIET)) 247 bio_set_flag(bio, BIO_QUIET); 248 249 bio_advance(bio, nbytes); 250 251 if (req_op(rq) == REQ_OP_ZONE_APPEND && error == BLK_STS_OK) { 252 /* 253 * Partial zone append completions cannot be supported as the 254 * BIO fragments may end up not being written sequentially. 255 */ 256 if (bio->bi_iter.bi_size) 257 bio->bi_status = BLK_STS_IOERR; 258 else 259 bio->bi_iter.bi_sector = rq->__sector; 260 } 261 262 /* don't actually finish bio if it's part of flush sequence */ 263 if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ)) 264 bio_endio(bio); 265 } 266 267 void blk_dump_rq_flags(struct request *rq, char *msg) 268 { 269 printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg, 270 rq->rq_disk ? rq->rq_disk->disk_name : "?", 271 (unsigned long long) rq->cmd_flags); 272 273 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", 274 (unsigned long long)blk_rq_pos(rq), 275 blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); 276 printk(KERN_INFO " bio %p, biotail %p, len %u\n", 277 rq->bio, rq->biotail, blk_rq_bytes(rq)); 278 } 279 EXPORT_SYMBOL(blk_dump_rq_flags); 280 281 /** 282 * blk_sync_queue - cancel any pending callbacks on a queue 283 * @q: the queue 284 * 285 * Description: 286 * The block layer may perform asynchronous callback activity 287 * on a queue, such as calling the unplug function after a timeout. 288 * A block device may call blk_sync_queue to ensure that any 289 * such activity is cancelled, thus allowing it to release resources 290 * that the callbacks might use. The caller must already have made sure 291 * that its ->submit_bio will not re-add plugging prior to calling 292 * this function. 293 * 294 * This function does not cancel any asynchronous activity arising 295 * out of elevator or throttling code. That would require elevator_exit() 296 * and blkcg_exit_queue() to be called with queue lock initialized. 297 * 298 */ 299 void blk_sync_queue(struct request_queue *q) 300 { 301 del_timer_sync(&q->timeout); 302 cancel_work_sync(&q->timeout_work); 303 } 304 EXPORT_SYMBOL(blk_sync_queue); 305 306 /** 307 * blk_set_pm_only - increment pm_only counter 308 * @q: request queue pointer 309 */ 310 void blk_set_pm_only(struct request_queue *q) 311 { 312 atomic_inc(&q->pm_only); 313 } 314 EXPORT_SYMBOL_GPL(blk_set_pm_only); 315 316 void blk_clear_pm_only(struct request_queue *q) 317 { 318 int pm_only; 319 320 pm_only = atomic_dec_return(&q->pm_only); 321 WARN_ON_ONCE(pm_only < 0); 322 if (pm_only == 0) 323 wake_up_all(&q->mq_freeze_wq); 324 } 325 EXPORT_SYMBOL_GPL(blk_clear_pm_only); 326 327 /** 328 * blk_put_queue - decrement the request_queue refcount 329 * @q: the request_queue structure to decrement the refcount for 330 * 331 * Decrements the refcount of the request_queue kobject. When this reaches 0 332 * we'll have blk_release_queue() called. 333 * 334 * Context: Any context, but the last reference must not be dropped from 335 * atomic context. 336 */ 337 void blk_put_queue(struct request_queue *q) 338 { 339 kobject_put(&q->kobj); 340 } 341 EXPORT_SYMBOL(blk_put_queue); 342 343 void blk_set_queue_dying(struct request_queue *q) 344 { 345 blk_queue_flag_set(QUEUE_FLAG_DYING, q); 346 347 /* 348 * When queue DYING flag is set, we need to block new req 349 * entering queue, so we call blk_freeze_queue_start() to 350 * prevent I/O from crossing blk_queue_enter(). 351 */ 352 blk_freeze_queue_start(q); 353 354 if (queue_is_mq(q)) 355 blk_mq_wake_waiters(q); 356 357 /* Make blk_queue_enter() reexamine the DYING flag. */ 358 wake_up_all(&q->mq_freeze_wq); 359 } 360 EXPORT_SYMBOL_GPL(blk_set_queue_dying); 361 362 /** 363 * blk_cleanup_queue - shutdown a request queue 364 * @q: request queue to shutdown 365 * 366 * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and 367 * put it. All future requests will be failed immediately with -ENODEV. 368 * 369 * Context: can sleep 370 */ 371 void blk_cleanup_queue(struct request_queue *q) 372 { 373 /* cannot be called from atomic context */ 374 might_sleep(); 375 376 WARN_ON_ONCE(blk_queue_registered(q)); 377 378 /* mark @q DYING, no new request or merges will be allowed afterwards */ 379 blk_set_queue_dying(q); 380 381 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q); 382 blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q); 383 384 /* 385 * Drain all requests queued before DYING marking. Set DEAD flag to 386 * prevent that blk_mq_run_hw_queues() accesses the hardware queues 387 * after draining finished. 388 */ 389 blk_freeze_queue(q); 390 391 rq_qos_exit(q); 392 393 blk_queue_flag_set(QUEUE_FLAG_DEAD, q); 394 395 /* for synchronous bio-based driver finish in-flight integrity i/o */ 396 blk_flush_integrity(); 397 398 /* @q won't process any more request, flush async actions */ 399 del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer); 400 blk_sync_queue(q); 401 402 if (queue_is_mq(q)) 403 blk_mq_exit_queue(q); 404 405 /* 406 * In theory, request pool of sched_tags belongs to request queue. 407 * However, the current implementation requires tag_set for freeing 408 * requests, so free the pool now. 409 * 410 * Queue has become frozen, there can't be any in-queue requests, so 411 * it is safe to free requests now. 412 */ 413 mutex_lock(&q->sysfs_lock); 414 if (q->elevator) 415 blk_mq_sched_free_requests(q); 416 mutex_unlock(&q->sysfs_lock); 417 418 percpu_ref_exit(&q->q_usage_counter); 419 420 /* @q is and will stay empty, shutdown and put */ 421 blk_put_queue(q); 422 } 423 EXPORT_SYMBOL(blk_cleanup_queue); 424 425 /** 426 * blk_queue_enter() - try to increase q->q_usage_counter 427 * @q: request queue pointer 428 * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM 429 */ 430 int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags) 431 { 432 const bool pm = flags & BLK_MQ_REQ_PM; 433 434 while (true) { 435 bool success = false; 436 437 rcu_read_lock(); 438 if (percpu_ref_tryget_live(&q->q_usage_counter)) { 439 /* 440 * The code that increments the pm_only counter is 441 * responsible for ensuring that that counter is 442 * globally visible before the queue is unfrozen. 443 */ 444 if ((pm && queue_rpm_status(q) != RPM_SUSPENDED) || 445 !blk_queue_pm_only(q)) { 446 success = true; 447 } else { 448 percpu_ref_put(&q->q_usage_counter); 449 } 450 } 451 rcu_read_unlock(); 452 453 if (success) 454 return 0; 455 456 if (flags & BLK_MQ_REQ_NOWAIT) 457 return -EBUSY; 458 459 /* 460 * read pair of barrier in blk_freeze_queue_start(), 461 * we need to order reading __PERCPU_REF_DEAD flag of 462 * .q_usage_counter and reading .mq_freeze_depth or 463 * queue dying flag, otherwise the following wait may 464 * never return if the two reads are reordered. 465 */ 466 smp_rmb(); 467 468 wait_event(q->mq_freeze_wq, 469 (!q->mq_freeze_depth && 470 blk_pm_resume_queue(pm, q)) || 471 blk_queue_dying(q)); 472 if (blk_queue_dying(q)) 473 return -ENODEV; 474 } 475 } 476 477 static inline int bio_queue_enter(struct bio *bio) 478 { 479 struct request_queue *q = bio->bi_bdev->bd_disk->queue; 480 bool nowait = bio->bi_opf & REQ_NOWAIT; 481 int ret; 482 483 ret = blk_queue_enter(q, nowait ? BLK_MQ_REQ_NOWAIT : 0); 484 if (unlikely(ret)) { 485 if (nowait && !blk_queue_dying(q)) 486 bio_wouldblock_error(bio); 487 else 488 bio_io_error(bio); 489 } 490 491 return ret; 492 } 493 494 void blk_queue_exit(struct request_queue *q) 495 { 496 percpu_ref_put(&q->q_usage_counter); 497 } 498 499 static void blk_queue_usage_counter_release(struct percpu_ref *ref) 500 { 501 struct request_queue *q = 502 container_of(ref, struct request_queue, q_usage_counter); 503 504 wake_up_all(&q->mq_freeze_wq); 505 } 506 507 static void blk_rq_timed_out_timer(struct timer_list *t) 508 { 509 struct request_queue *q = from_timer(q, t, timeout); 510 511 kblockd_schedule_work(&q->timeout_work); 512 } 513 514 static void blk_timeout_work(struct work_struct *work) 515 { 516 } 517 518 struct request_queue *blk_alloc_queue(int node_id) 519 { 520 struct request_queue *q; 521 int ret; 522 523 q = kmem_cache_alloc_node(blk_requestq_cachep, 524 GFP_KERNEL | __GFP_ZERO, node_id); 525 if (!q) 526 return NULL; 527 528 q->last_merge = NULL; 529 530 q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL); 531 if (q->id < 0) 532 goto fail_q; 533 534 ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, 0); 535 if (ret) 536 goto fail_id; 537 538 q->backing_dev_info = bdi_alloc(node_id); 539 if (!q->backing_dev_info) 540 goto fail_split; 541 542 q->stats = blk_alloc_queue_stats(); 543 if (!q->stats) 544 goto fail_stats; 545 546 q->node = node_id; 547 548 atomic_set(&q->nr_active_requests_shared_sbitmap, 0); 549 550 timer_setup(&q->backing_dev_info->laptop_mode_wb_timer, 551 laptop_mode_timer_fn, 0); 552 timer_setup(&q->timeout, blk_rq_timed_out_timer, 0); 553 INIT_WORK(&q->timeout_work, blk_timeout_work); 554 INIT_LIST_HEAD(&q->icq_list); 555 #ifdef CONFIG_BLK_CGROUP 556 INIT_LIST_HEAD(&q->blkg_list); 557 #endif 558 559 kobject_init(&q->kobj, &blk_queue_ktype); 560 561 mutex_init(&q->debugfs_mutex); 562 mutex_init(&q->sysfs_lock); 563 mutex_init(&q->sysfs_dir_lock); 564 spin_lock_init(&q->queue_lock); 565 566 init_waitqueue_head(&q->mq_freeze_wq); 567 mutex_init(&q->mq_freeze_lock); 568 569 /* 570 * Init percpu_ref in atomic mode so that it's faster to shutdown. 571 * See blk_register_queue() for details. 572 */ 573 if (percpu_ref_init(&q->q_usage_counter, 574 blk_queue_usage_counter_release, 575 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL)) 576 goto fail_bdi; 577 578 if (blkcg_init_queue(q)) 579 goto fail_ref; 580 581 blk_queue_dma_alignment(q, 511); 582 blk_set_default_limits(&q->limits); 583 q->nr_requests = BLKDEV_MAX_RQ; 584 585 return q; 586 587 fail_ref: 588 percpu_ref_exit(&q->q_usage_counter); 589 fail_bdi: 590 blk_free_queue_stats(q->stats); 591 fail_stats: 592 bdi_put(q->backing_dev_info); 593 fail_split: 594 bioset_exit(&q->bio_split); 595 fail_id: 596 ida_simple_remove(&blk_queue_ida, q->id); 597 fail_q: 598 kmem_cache_free(blk_requestq_cachep, q); 599 return NULL; 600 } 601 EXPORT_SYMBOL(blk_alloc_queue); 602 603 /** 604 * blk_get_queue - increment the request_queue refcount 605 * @q: the request_queue structure to increment the refcount for 606 * 607 * Increment the refcount of the request_queue kobject. 608 * 609 * Context: Any context. 610 */ 611 bool blk_get_queue(struct request_queue *q) 612 { 613 if (likely(!blk_queue_dying(q))) { 614 __blk_get_queue(q); 615 return true; 616 } 617 618 return false; 619 } 620 EXPORT_SYMBOL(blk_get_queue); 621 622 /** 623 * blk_get_request - allocate a request 624 * @q: request queue to allocate a request for 625 * @op: operation (REQ_OP_*) and REQ_* flags, e.g. REQ_SYNC. 626 * @flags: BLK_MQ_REQ_* flags, e.g. BLK_MQ_REQ_NOWAIT. 627 */ 628 struct request *blk_get_request(struct request_queue *q, unsigned int op, 629 blk_mq_req_flags_t flags) 630 { 631 struct request *req; 632 633 WARN_ON_ONCE(op & REQ_NOWAIT); 634 WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PM)); 635 636 req = blk_mq_alloc_request(q, op, flags); 637 if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn) 638 q->mq_ops->initialize_rq_fn(req); 639 640 return req; 641 } 642 EXPORT_SYMBOL(blk_get_request); 643 644 void blk_put_request(struct request *req) 645 { 646 blk_mq_free_request(req); 647 } 648 EXPORT_SYMBOL(blk_put_request); 649 650 static void handle_bad_sector(struct bio *bio, sector_t maxsector) 651 { 652 char b[BDEVNAME_SIZE]; 653 654 pr_info_ratelimited("attempt to access beyond end of device\n" 655 "%s: rw=%d, want=%llu, limit=%llu\n", 656 bio_devname(bio, b), bio->bi_opf, 657 bio_end_sector(bio), maxsector); 658 } 659 660 #ifdef CONFIG_FAIL_MAKE_REQUEST 661 662 static DECLARE_FAULT_ATTR(fail_make_request); 663 664 static int __init setup_fail_make_request(char *str) 665 { 666 return setup_fault_attr(&fail_make_request, str); 667 } 668 __setup("fail_make_request=", setup_fail_make_request); 669 670 static bool should_fail_request(struct block_device *part, unsigned int bytes) 671 { 672 return part->bd_make_it_fail && should_fail(&fail_make_request, bytes); 673 } 674 675 static int __init fail_make_request_debugfs(void) 676 { 677 struct dentry *dir = fault_create_debugfs_attr("fail_make_request", 678 NULL, &fail_make_request); 679 680 return PTR_ERR_OR_ZERO(dir); 681 } 682 683 late_initcall(fail_make_request_debugfs); 684 685 #else /* CONFIG_FAIL_MAKE_REQUEST */ 686 687 static inline bool should_fail_request(struct block_device *part, 688 unsigned int bytes) 689 { 690 return false; 691 } 692 693 #endif /* CONFIG_FAIL_MAKE_REQUEST */ 694 695 static inline bool bio_check_ro(struct bio *bio) 696 { 697 if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) { 698 char b[BDEVNAME_SIZE]; 699 700 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) 701 return false; 702 703 WARN_ONCE(1, 704 "Trying to write to read-only block-device %s (partno %d)\n", 705 bio_devname(bio, b), bio->bi_bdev->bd_partno); 706 /* Older lvm-tools actually trigger this */ 707 return false; 708 } 709 710 return false; 711 } 712 713 static noinline int should_fail_bio(struct bio *bio) 714 { 715 if (should_fail_request(bdev_whole(bio->bi_bdev), bio->bi_iter.bi_size)) 716 return -EIO; 717 return 0; 718 } 719 ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO); 720 721 /* 722 * Check whether this bio extends beyond the end of the device or partition. 723 * This may well happen - the kernel calls bread() without checking the size of 724 * the device, e.g., when mounting a file system. 725 */ 726 static inline int bio_check_eod(struct bio *bio) 727 { 728 sector_t maxsector = bdev_nr_sectors(bio->bi_bdev); 729 unsigned int nr_sectors = bio_sectors(bio); 730 731 if (nr_sectors && maxsector && 732 (nr_sectors > maxsector || 733 bio->bi_iter.bi_sector > maxsector - nr_sectors)) { 734 handle_bad_sector(bio, maxsector); 735 return -EIO; 736 } 737 return 0; 738 } 739 740 /* 741 * Remap block n of partition p to block n+start(p) of the disk. 742 */ 743 static int blk_partition_remap(struct bio *bio) 744 { 745 struct block_device *p = bio->bi_bdev; 746 747 if (unlikely(should_fail_request(p, bio->bi_iter.bi_size))) 748 return -EIO; 749 if (bio_sectors(bio)) { 750 bio->bi_iter.bi_sector += p->bd_start_sect; 751 trace_block_bio_remap(bio, p->bd_dev, 752 bio->bi_iter.bi_sector - 753 p->bd_start_sect); 754 } 755 bio_set_flag(bio, BIO_REMAPPED); 756 return 0; 757 } 758 759 /* 760 * Check write append to a zoned block device. 761 */ 762 static inline blk_status_t blk_check_zone_append(struct request_queue *q, 763 struct bio *bio) 764 { 765 sector_t pos = bio->bi_iter.bi_sector; 766 int nr_sectors = bio_sectors(bio); 767 768 /* Only applicable to zoned block devices */ 769 if (!blk_queue_is_zoned(q)) 770 return BLK_STS_NOTSUPP; 771 772 /* The bio sector must point to the start of a sequential zone */ 773 if (pos & (blk_queue_zone_sectors(q) - 1) || 774 !blk_queue_zone_is_seq(q, pos)) 775 return BLK_STS_IOERR; 776 777 /* 778 * Not allowed to cross zone boundaries. Otherwise, the BIO will be 779 * split and could result in non-contiguous sectors being written in 780 * different zones. 781 */ 782 if (nr_sectors > q->limits.chunk_sectors) 783 return BLK_STS_IOERR; 784 785 /* Make sure the BIO is small enough and will not get split */ 786 if (nr_sectors > q->limits.max_zone_append_sectors) 787 return BLK_STS_IOERR; 788 789 bio->bi_opf |= REQ_NOMERGE; 790 791 return BLK_STS_OK; 792 } 793 794 static noinline_for_stack bool submit_bio_checks(struct bio *bio) 795 { 796 struct block_device *bdev = bio->bi_bdev; 797 struct request_queue *q = bdev->bd_disk->queue; 798 blk_status_t status = BLK_STS_IOERR; 799 struct blk_plug *plug; 800 801 might_sleep(); 802 803 plug = blk_mq_plug(q, bio); 804 if (plug && plug->nowait) 805 bio->bi_opf |= REQ_NOWAIT; 806 807 /* 808 * For a REQ_NOWAIT based request, return -EOPNOTSUPP 809 * if queue does not support NOWAIT. 810 */ 811 if ((bio->bi_opf & REQ_NOWAIT) && !blk_queue_nowait(q)) 812 goto not_supported; 813 814 if (should_fail_bio(bio)) 815 goto end_io; 816 if (unlikely(bio_check_ro(bio))) 817 goto end_io; 818 if (!bio_flagged(bio, BIO_REMAPPED)) { 819 if (unlikely(bio_check_eod(bio))) 820 goto end_io; 821 if (bdev->bd_partno && unlikely(blk_partition_remap(bio))) 822 goto end_io; 823 } 824 825 /* 826 * Filter flush bio's early so that bio based drivers without flush 827 * support don't have to worry about them. 828 */ 829 if (op_is_flush(bio->bi_opf) && 830 !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) { 831 bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA); 832 if (!bio_sectors(bio)) { 833 status = BLK_STS_OK; 834 goto end_io; 835 } 836 } 837 838 if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) 839 bio->bi_opf &= ~REQ_HIPRI; 840 841 switch (bio_op(bio)) { 842 case REQ_OP_DISCARD: 843 if (!blk_queue_discard(q)) 844 goto not_supported; 845 break; 846 case REQ_OP_SECURE_ERASE: 847 if (!blk_queue_secure_erase(q)) 848 goto not_supported; 849 break; 850 case REQ_OP_WRITE_SAME: 851 if (!q->limits.max_write_same_sectors) 852 goto not_supported; 853 break; 854 case REQ_OP_ZONE_APPEND: 855 status = blk_check_zone_append(q, bio); 856 if (status != BLK_STS_OK) 857 goto end_io; 858 break; 859 case REQ_OP_ZONE_RESET: 860 case REQ_OP_ZONE_OPEN: 861 case REQ_OP_ZONE_CLOSE: 862 case REQ_OP_ZONE_FINISH: 863 if (!blk_queue_is_zoned(q)) 864 goto not_supported; 865 break; 866 case REQ_OP_ZONE_RESET_ALL: 867 if (!blk_queue_is_zoned(q) || !blk_queue_zone_resetall(q)) 868 goto not_supported; 869 break; 870 case REQ_OP_WRITE_ZEROES: 871 if (!q->limits.max_write_zeroes_sectors) 872 goto not_supported; 873 break; 874 default: 875 break; 876 } 877 878 /* 879 * Various block parts want %current->io_context, so allocate it up 880 * front rather than dealing with lots of pain to allocate it only 881 * where needed. This may fail and the block layer knows how to live 882 * with it. 883 */ 884 if (unlikely(!current->io_context)) 885 create_task_io_context(current, GFP_ATOMIC, q->node); 886 887 if (blk_throtl_bio(bio)) { 888 blkcg_bio_issue_init(bio); 889 return false; 890 } 891 892 blk_cgroup_bio_start(bio); 893 blkcg_bio_issue_init(bio); 894 895 if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) { 896 trace_block_bio_queue(bio); 897 /* Now that enqueuing has been traced, we need to trace 898 * completion as well. 899 */ 900 bio_set_flag(bio, BIO_TRACE_COMPLETION); 901 } 902 return true; 903 904 not_supported: 905 status = BLK_STS_NOTSUPP; 906 end_io: 907 bio->bi_status = status; 908 bio_endio(bio); 909 return false; 910 } 911 912 static blk_qc_t __submit_bio(struct bio *bio) 913 { 914 struct gendisk *disk = bio->bi_bdev->bd_disk; 915 blk_qc_t ret = BLK_QC_T_NONE; 916 917 if (blk_crypto_bio_prep(&bio)) { 918 if (!disk->fops->submit_bio) 919 return blk_mq_submit_bio(bio); 920 ret = disk->fops->submit_bio(bio); 921 } 922 blk_queue_exit(disk->queue); 923 return ret; 924 } 925 926 /* 927 * The loop in this function may be a bit non-obvious, and so deserves some 928 * explanation: 929 * 930 * - Before entering the loop, bio->bi_next is NULL (as all callers ensure 931 * that), so we have a list with a single bio. 932 * - We pretend that we have just taken it off a longer list, so we assign 933 * bio_list to a pointer to the bio_list_on_stack, thus initialising the 934 * bio_list of new bios to be added. ->submit_bio() may indeed add some more 935 * bios through a recursive call to submit_bio_noacct. If it did, we find a 936 * non-NULL value in bio_list and re-enter the loop from the top. 937 * - In this case we really did just take the bio of the top of the list (no 938 * pretending) and so remove it from bio_list, and call into ->submit_bio() 939 * again. 940 * 941 * bio_list_on_stack[0] contains bios submitted by the current ->submit_bio. 942 * bio_list_on_stack[1] contains bios that were submitted before the current 943 * ->submit_bio_bio, but that haven't been processed yet. 944 */ 945 static blk_qc_t __submit_bio_noacct(struct bio *bio) 946 { 947 struct bio_list bio_list_on_stack[2]; 948 blk_qc_t ret = BLK_QC_T_NONE; 949 950 BUG_ON(bio->bi_next); 951 952 bio_list_init(&bio_list_on_stack[0]); 953 current->bio_list = bio_list_on_stack; 954 955 do { 956 struct request_queue *q = bio->bi_bdev->bd_disk->queue; 957 struct bio_list lower, same; 958 959 if (unlikely(bio_queue_enter(bio) != 0)) 960 continue; 961 962 /* 963 * Create a fresh bio_list for all subordinate requests. 964 */ 965 bio_list_on_stack[1] = bio_list_on_stack[0]; 966 bio_list_init(&bio_list_on_stack[0]); 967 968 ret = __submit_bio(bio); 969 970 /* 971 * Sort new bios into those for a lower level and those for the 972 * same level. 973 */ 974 bio_list_init(&lower); 975 bio_list_init(&same); 976 while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL) 977 if (q == bio->bi_bdev->bd_disk->queue) 978 bio_list_add(&same, bio); 979 else 980 bio_list_add(&lower, bio); 981 982 /* 983 * Now assemble so we handle the lowest level first. 984 */ 985 bio_list_merge(&bio_list_on_stack[0], &lower); 986 bio_list_merge(&bio_list_on_stack[0], &same); 987 bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]); 988 } while ((bio = bio_list_pop(&bio_list_on_stack[0]))); 989 990 current->bio_list = NULL; 991 return ret; 992 } 993 994 static blk_qc_t __submit_bio_noacct_mq(struct bio *bio) 995 { 996 struct bio_list bio_list[2] = { }; 997 blk_qc_t ret = BLK_QC_T_NONE; 998 999 current->bio_list = bio_list; 1000 1001 do { 1002 struct gendisk *disk = bio->bi_bdev->bd_disk; 1003 1004 if (unlikely(bio_queue_enter(bio) != 0)) 1005 continue; 1006 1007 if (!blk_crypto_bio_prep(&bio)) { 1008 blk_queue_exit(disk->queue); 1009 ret = BLK_QC_T_NONE; 1010 continue; 1011 } 1012 1013 ret = blk_mq_submit_bio(bio); 1014 } while ((bio = bio_list_pop(&bio_list[0]))); 1015 1016 current->bio_list = NULL; 1017 return ret; 1018 } 1019 1020 /** 1021 * submit_bio_noacct - re-submit a bio to the block device layer for I/O 1022 * @bio: The bio describing the location in memory and on the device. 1023 * 1024 * This is a version of submit_bio() that shall only be used for I/O that is 1025 * resubmitted to lower level drivers by stacking block drivers. All file 1026 * systems and other upper level users of the block layer should use 1027 * submit_bio() instead. 1028 */ 1029 blk_qc_t submit_bio_noacct(struct bio *bio) 1030 { 1031 if (!submit_bio_checks(bio)) 1032 return BLK_QC_T_NONE; 1033 1034 /* 1035 * We only want one ->submit_bio to be active at a time, else stack 1036 * usage with stacked devices could be a problem. Use current->bio_list 1037 * to collect a list of requests submited by a ->submit_bio method while 1038 * it is active, and then process them after it returned. 1039 */ 1040 if (current->bio_list) { 1041 bio_list_add(¤t->bio_list[0], bio); 1042 return BLK_QC_T_NONE; 1043 } 1044 1045 if (!bio->bi_bdev->bd_disk->fops->submit_bio) 1046 return __submit_bio_noacct_mq(bio); 1047 return __submit_bio_noacct(bio); 1048 } 1049 EXPORT_SYMBOL(submit_bio_noacct); 1050 1051 /** 1052 * submit_bio - submit a bio to the block device layer for I/O 1053 * @bio: The &struct bio which describes the I/O 1054 * 1055 * submit_bio() is used to submit I/O requests to block devices. It is passed a 1056 * fully set up &struct bio that describes the I/O that needs to be done. The 1057 * bio will be send to the device described by the bi_bdev field. 1058 * 1059 * The success/failure status of the request, along with notification of 1060 * completion, is delivered asynchronously through the ->bi_end_io() callback 1061 * in @bio. The bio must NOT be touched by thecaller until ->bi_end_io() has 1062 * been called. 1063 */ 1064 blk_qc_t submit_bio(struct bio *bio) 1065 { 1066 if (blkcg_punt_bio_submit(bio)) 1067 return BLK_QC_T_NONE; 1068 1069 /* 1070 * If it's a regular read/write or a barrier with data attached, 1071 * go through the normal accounting stuff before submission. 1072 */ 1073 if (bio_has_data(bio)) { 1074 unsigned int count; 1075 1076 if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) 1077 count = queue_logical_block_size( 1078 bio->bi_bdev->bd_disk->queue) >> 9; 1079 else 1080 count = bio_sectors(bio); 1081 1082 if (op_is_write(bio_op(bio))) { 1083 count_vm_events(PGPGOUT, count); 1084 } else { 1085 task_io_account_read(bio->bi_iter.bi_size); 1086 count_vm_events(PGPGIN, count); 1087 } 1088 1089 if (unlikely(block_dump)) { 1090 char b[BDEVNAME_SIZE]; 1091 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n", 1092 current->comm, task_pid_nr(current), 1093 op_is_write(bio_op(bio)) ? "WRITE" : "READ", 1094 (unsigned long long)bio->bi_iter.bi_sector, 1095 bio_devname(bio, b), count); 1096 } 1097 } 1098 1099 /* 1100 * If we're reading data that is part of the userspace workingset, count 1101 * submission time as memory stall. When the device is congested, or 1102 * the submitting cgroup IO-throttled, submission can be a significant 1103 * part of overall IO time. 1104 */ 1105 if (unlikely(bio_op(bio) == REQ_OP_READ && 1106 bio_flagged(bio, BIO_WORKINGSET))) { 1107 unsigned long pflags; 1108 blk_qc_t ret; 1109 1110 psi_memstall_enter(&pflags); 1111 ret = submit_bio_noacct(bio); 1112 psi_memstall_leave(&pflags); 1113 1114 return ret; 1115 } 1116 1117 return submit_bio_noacct(bio); 1118 } 1119 EXPORT_SYMBOL(submit_bio); 1120 1121 /** 1122 * blk_cloned_rq_check_limits - Helper function to check a cloned request 1123 * for the new queue limits 1124 * @q: the queue 1125 * @rq: the request being checked 1126 * 1127 * Description: 1128 * @rq may have been made based on weaker limitations of upper-level queues 1129 * in request stacking drivers, and it may violate the limitation of @q. 1130 * Since the block layer and the underlying device driver trust @rq 1131 * after it is inserted to @q, it should be checked against @q before 1132 * the insertion using this generic function. 1133 * 1134 * Request stacking drivers like request-based dm may change the queue 1135 * limits when retrying requests on other queues. Those requests need 1136 * to be checked against the new queue limits again during dispatch. 1137 */ 1138 static blk_status_t blk_cloned_rq_check_limits(struct request_queue *q, 1139 struct request *rq) 1140 { 1141 unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq)); 1142 1143 if (blk_rq_sectors(rq) > max_sectors) { 1144 /* 1145 * SCSI device does not have a good way to return if 1146 * Write Same/Zero is actually supported. If a device rejects 1147 * a non-read/write command (discard, write same,etc.) the 1148 * low-level device driver will set the relevant queue limit to 1149 * 0 to prevent blk-lib from issuing more of the offending 1150 * operations. Commands queued prior to the queue limit being 1151 * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O 1152 * errors being propagated to upper layers. 1153 */ 1154 if (max_sectors == 0) 1155 return BLK_STS_NOTSUPP; 1156 1157 printk(KERN_ERR "%s: over max size limit. (%u > %u)\n", 1158 __func__, blk_rq_sectors(rq), max_sectors); 1159 return BLK_STS_IOERR; 1160 } 1161 1162 /* 1163 * queue's settings related to segment counting like q->bounce_pfn 1164 * may differ from that of other stacking queues. 1165 * Recalculate it to check the request correctly on this queue's 1166 * limitation. 1167 */ 1168 rq->nr_phys_segments = blk_recalc_rq_segments(rq); 1169 if (rq->nr_phys_segments > queue_max_segments(q)) { 1170 printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n", 1171 __func__, rq->nr_phys_segments, queue_max_segments(q)); 1172 return BLK_STS_IOERR; 1173 } 1174 1175 return BLK_STS_OK; 1176 } 1177 1178 /** 1179 * blk_insert_cloned_request - Helper for stacking drivers to submit a request 1180 * @q: the queue to submit the request 1181 * @rq: the request being queued 1182 */ 1183 blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq) 1184 { 1185 blk_status_t ret; 1186 1187 ret = blk_cloned_rq_check_limits(q, rq); 1188 if (ret != BLK_STS_OK) 1189 return ret; 1190 1191 if (rq->rq_disk && 1192 should_fail_request(rq->rq_disk->part0, blk_rq_bytes(rq))) 1193 return BLK_STS_IOERR; 1194 1195 if (blk_crypto_insert_cloned_request(rq)) 1196 return BLK_STS_IOERR; 1197 1198 if (blk_queue_io_stat(q)) 1199 blk_account_io_start(rq); 1200 1201 /* 1202 * Since we have a scheduler attached on the top device, 1203 * bypass a potential scheduler on the bottom device for 1204 * insert. 1205 */ 1206 return blk_mq_request_issue_directly(rq, true); 1207 } 1208 EXPORT_SYMBOL_GPL(blk_insert_cloned_request); 1209 1210 /** 1211 * blk_rq_err_bytes - determine number of bytes till the next failure boundary 1212 * @rq: request to examine 1213 * 1214 * Description: 1215 * A request could be merge of IOs which require different failure 1216 * handling. This function determines the number of bytes which 1217 * can be failed from the beginning of the request without 1218 * crossing into area which need to be retried further. 1219 * 1220 * Return: 1221 * The number of bytes to fail. 1222 */ 1223 unsigned int blk_rq_err_bytes(const struct request *rq) 1224 { 1225 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; 1226 unsigned int bytes = 0; 1227 struct bio *bio; 1228 1229 if (!(rq->rq_flags & RQF_MIXED_MERGE)) 1230 return blk_rq_bytes(rq); 1231 1232 /* 1233 * Currently the only 'mixing' which can happen is between 1234 * different fastfail types. We can safely fail portions 1235 * which have all the failfast bits that the first one has - 1236 * the ones which are at least as eager to fail as the first 1237 * one. 1238 */ 1239 for (bio = rq->bio; bio; bio = bio->bi_next) { 1240 if ((bio->bi_opf & ff) != ff) 1241 break; 1242 bytes += bio->bi_iter.bi_size; 1243 } 1244 1245 /* this could lead to infinite loop */ 1246 BUG_ON(blk_rq_bytes(rq) && !bytes); 1247 return bytes; 1248 } 1249 EXPORT_SYMBOL_GPL(blk_rq_err_bytes); 1250 1251 static void update_io_ticks(struct block_device *part, unsigned long now, 1252 bool end) 1253 { 1254 unsigned long stamp; 1255 again: 1256 stamp = READ_ONCE(part->bd_stamp); 1257 if (unlikely(stamp != now)) { 1258 if (likely(cmpxchg(&part->bd_stamp, stamp, now) == stamp)) 1259 __part_stat_add(part, io_ticks, end ? now - stamp : 1); 1260 } 1261 if (part->bd_partno) { 1262 part = bdev_whole(part); 1263 goto again; 1264 } 1265 } 1266 1267 static void blk_account_io_completion(struct request *req, unsigned int bytes) 1268 { 1269 if (req->part && blk_do_io_stat(req)) { 1270 const int sgrp = op_stat_group(req_op(req)); 1271 1272 part_stat_lock(); 1273 part_stat_add(req->part, sectors[sgrp], bytes >> 9); 1274 part_stat_unlock(); 1275 } 1276 } 1277 1278 void blk_account_io_done(struct request *req, u64 now) 1279 { 1280 /* 1281 * Account IO completion. flush_rq isn't accounted as a 1282 * normal IO on queueing nor completion. Accounting the 1283 * containing request is enough. 1284 */ 1285 if (req->part && blk_do_io_stat(req) && 1286 !(req->rq_flags & RQF_FLUSH_SEQ)) { 1287 const int sgrp = op_stat_group(req_op(req)); 1288 1289 part_stat_lock(); 1290 update_io_ticks(req->part, jiffies, true); 1291 part_stat_inc(req->part, ios[sgrp]); 1292 part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns); 1293 part_stat_unlock(); 1294 } 1295 } 1296 1297 void blk_account_io_start(struct request *rq) 1298 { 1299 if (!blk_do_io_stat(rq)) 1300 return; 1301 1302 /* passthrough requests can hold bios that do not have ->bi_bdev set */ 1303 if (rq->bio && rq->bio->bi_bdev) 1304 rq->part = rq->bio->bi_bdev; 1305 else 1306 rq->part = rq->rq_disk->part0; 1307 1308 part_stat_lock(); 1309 update_io_ticks(rq->part, jiffies, false); 1310 part_stat_unlock(); 1311 } 1312 1313 static unsigned long __part_start_io_acct(struct block_device *part, 1314 unsigned int sectors, unsigned int op) 1315 { 1316 const int sgrp = op_stat_group(op); 1317 unsigned long now = READ_ONCE(jiffies); 1318 1319 part_stat_lock(); 1320 update_io_ticks(part, now, false); 1321 part_stat_inc(part, ios[sgrp]); 1322 part_stat_add(part, sectors[sgrp], sectors); 1323 part_stat_local_inc(part, in_flight[op_is_write(op)]); 1324 part_stat_unlock(); 1325 1326 return now; 1327 } 1328 1329 /** 1330 * bio_start_io_acct - start I/O accounting for bio based drivers 1331 * @bio: bio to start account for 1332 * 1333 * Returns the start time that should be passed back to bio_end_io_acct(). 1334 */ 1335 unsigned long bio_start_io_acct(struct bio *bio) 1336 { 1337 return __part_start_io_acct(bio->bi_bdev, bio_sectors(bio), bio_op(bio)); 1338 } 1339 EXPORT_SYMBOL_GPL(bio_start_io_acct); 1340 1341 unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors, 1342 unsigned int op) 1343 { 1344 return __part_start_io_acct(disk->part0, sectors, op); 1345 } 1346 EXPORT_SYMBOL(disk_start_io_acct); 1347 1348 static void __part_end_io_acct(struct block_device *part, unsigned int op, 1349 unsigned long start_time) 1350 { 1351 const int sgrp = op_stat_group(op); 1352 unsigned long now = READ_ONCE(jiffies); 1353 unsigned long duration = now - start_time; 1354 1355 part_stat_lock(); 1356 update_io_ticks(part, now, true); 1357 part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration)); 1358 part_stat_local_dec(part, in_flight[op_is_write(op)]); 1359 part_stat_unlock(); 1360 } 1361 1362 void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time, 1363 struct block_device *orig_bdev) 1364 { 1365 __part_end_io_acct(orig_bdev, bio_op(bio), start_time); 1366 } 1367 EXPORT_SYMBOL_GPL(bio_end_io_acct_remapped); 1368 1369 void disk_end_io_acct(struct gendisk *disk, unsigned int op, 1370 unsigned long start_time) 1371 { 1372 __part_end_io_acct(disk->part0, op, start_time); 1373 } 1374 EXPORT_SYMBOL(disk_end_io_acct); 1375 1376 /* 1377 * Steal bios from a request and add them to a bio list. 1378 * The request must not have been partially completed before. 1379 */ 1380 void blk_steal_bios(struct bio_list *list, struct request *rq) 1381 { 1382 if (rq->bio) { 1383 if (list->tail) 1384 list->tail->bi_next = rq->bio; 1385 else 1386 list->head = rq->bio; 1387 list->tail = rq->biotail; 1388 1389 rq->bio = NULL; 1390 rq->biotail = NULL; 1391 } 1392 1393 rq->__data_len = 0; 1394 } 1395 EXPORT_SYMBOL_GPL(blk_steal_bios); 1396 1397 /** 1398 * blk_update_request - Special helper function for request stacking drivers 1399 * @req: the request being processed 1400 * @error: block status code 1401 * @nr_bytes: number of bytes to complete @req 1402 * 1403 * Description: 1404 * Ends I/O on a number of bytes attached to @req, but doesn't complete 1405 * the request structure even if @req doesn't have leftover. 1406 * If @req has leftover, sets it up for the next range of segments. 1407 * 1408 * This special helper function is only for request stacking drivers 1409 * (e.g. request-based dm) so that they can handle partial completion. 1410 * Actual device drivers should use blk_mq_end_request instead. 1411 * 1412 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees 1413 * %false return from this function. 1414 * 1415 * Note: 1416 * The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in both 1417 * blk_rq_bytes() and in blk_update_request(). 1418 * 1419 * Return: 1420 * %false - this request doesn't have any more data 1421 * %true - this request has more data 1422 **/ 1423 bool blk_update_request(struct request *req, blk_status_t error, 1424 unsigned int nr_bytes) 1425 { 1426 int total_bytes; 1427 1428 trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes); 1429 1430 if (!req->bio) 1431 return false; 1432 1433 #ifdef CONFIG_BLK_DEV_INTEGRITY 1434 if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ && 1435 error == BLK_STS_OK) 1436 req->q->integrity.profile->complete_fn(req, nr_bytes); 1437 #endif 1438 1439 if (unlikely(error && !blk_rq_is_passthrough(req) && 1440 !(req->rq_flags & RQF_QUIET))) 1441 print_req_error(req, error, __func__); 1442 1443 blk_account_io_completion(req, nr_bytes); 1444 1445 total_bytes = 0; 1446 while (req->bio) { 1447 struct bio *bio = req->bio; 1448 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes); 1449 1450 if (bio_bytes == bio->bi_iter.bi_size) 1451 req->bio = bio->bi_next; 1452 1453 /* Completion has already been traced */ 1454 bio_clear_flag(bio, BIO_TRACE_COMPLETION); 1455 req_bio_endio(req, bio, bio_bytes, error); 1456 1457 total_bytes += bio_bytes; 1458 nr_bytes -= bio_bytes; 1459 1460 if (!nr_bytes) 1461 break; 1462 } 1463 1464 /* 1465 * completely done 1466 */ 1467 if (!req->bio) { 1468 /* 1469 * Reset counters so that the request stacking driver 1470 * can find how many bytes remain in the request 1471 * later. 1472 */ 1473 req->__data_len = 0; 1474 return false; 1475 } 1476 1477 req->__data_len -= total_bytes; 1478 1479 /* update sector only for requests with clear definition of sector */ 1480 if (!blk_rq_is_passthrough(req)) 1481 req->__sector += total_bytes >> 9; 1482 1483 /* mixed attributes always follow the first bio */ 1484 if (req->rq_flags & RQF_MIXED_MERGE) { 1485 req->cmd_flags &= ~REQ_FAILFAST_MASK; 1486 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK; 1487 } 1488 1489 if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) { 1490 /* 1491 * If total number of sectors is less than the first segment 1492 * size, something has gone terribly wrong. 1493 */ 1494 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { 1495 blk_dump_rq_flags(req, "request botched"); 1496 req->__data_len = blk_rq_cur_bytes(req); 1497 } 1498 1499 /* recalculate the number of segments */ 1500 req->nr_phys_segments = blk_recalc_rq_segments(req); 1501 } 1502 1503 return true; 1504 } 1505 EXPORT_SYMBOL_GPL(blk_update_request); 1506 1507 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1508 /** 1509 * rq_flush_dcache_pages - Helper function to flush all pages in a request 1510 * @rq: the request to be flushed 1511 * 1512 * Description: 1513 * Flush all pages in @rq. 1514 */ 1515 void rq_flush_dcache_pages(struct request *rq) 1516 { 1517 struct req_iterator iter; 1518 struct bio_vec bvec; 1519 1520 rq_for_each_segment(bvec, rq, iter) 1521 flush_dcache_page(bvec.bv_page); 1522 } 1523 EXPORT_SYMBOL_GPL(rq_flush_dcache_pages); 1524 #endif 1525 1526 /** 1527 * blk_lld_busy - Check if underlying low-level drivers of a device are busy 1528 * @q : the queue of the device being checked 1529 * 1530 * Description: 1531 * Check if underlying low-level drivers of a device are busy. 1532 * If the drivers want to export their busy state, they must set own 1533 * exporting function using blk_queue_lld_busy() first. 1534 * 1535 * Basically, this function is used only by request stacking drivers 1536 * to stop dispatching requests to underlying devices when underlying 1537 * devices are busy. This behavior helps more I/O merging on the queue 1538 * of the request stacking driver and prevents I/O throughput regression 1539 * on burst I/O load. 1540 * 1541 * Return: 1542 * 0 - Not busy (The request stacking driver should dispatch request) 1543 * 1 - Busy (The request stacking driver should stop dispatching request) 1544 */ 1545 int blk_lld_busy(struct request_queue *q) 1546 { 1547 if (queue_is_mq(q) && q->mq_ops->busy) 1548 return q->mq_ops->busy(q); 1549 1550 return 0; 1551 } 1552 EXPORT_SYMBOL_GPL(blk_lld_busy); 1553 1554 /** 1555 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request 1556 * @rq: the clone request to be cleaned up 1557 * 1558 * Description: 1559 * Free all bios in @rq for a cloned request. 1560 */ 1561 void blk_rq_unprep_clone(struct request *rq) 1562 { 1563 struct bio *bio; 1564 1565 while ((bio = rq->bio) != NULL) { 1566 rq->bio = bio->bi_next; 1567 1568 bio_put(bio); 1569 } 1570 } 1571 EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); 1572 1573 /** 1574 * blk_rq_prep_clone - Helper function to setup clone request 1575 * @rq: the request to be setup 1576 * @rq_src: original request to be cloned 1577 * @bs: bio_set that bios for clone are allocated from 1578 * @gfp_mask: memory allocation mask for bio 1579 * @bio_ctr: setup function to be called for each clone bio. 1580 * Returns %0 for success, non %0 for failure. 1581 * @data: private data to be passed to @bio_ctr 1582 * 1583 * Description: 1584 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq. 1585 * Also, pages which the original bios are pointing to are not copied 1586 * and the cloned bios just point same pages. 1587 * So cloned bios must be completed before original bios, which means 1588 * the caller must complete @rq before @rq_src. 1589 */ 1590 int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 1591 struct bio_set *bs, gfp_t gfp_mask, 1592 int (*bio_ctr)(struct bio *, struct bio *, void *), 1593 void *data) 1594 { 1595 struct bio *bio, *bio_src; 1596 1597 if (!bs) 1598 bs = &fs_bio_set; 1599 1600 __rq_for_each_bio(bio_src, rq_src) { 1601 bio = bio_clone_fast(bio_src, gfp_mask, bs); 1602 if (!bio) 1603 goto free_and_out; 1604 1605 if (bio_ctr && bio_ctr(bio, bio_src, data)) 1606 goto free_and_out; 1607 1608 if (rq->bio) { 1609 rq->biotail->bi_next = bio; 1610 rq->biotail = bio; 1611 } else { 1612 rq->bio = rq->biotail = bio; 1613 } 1614 bio = NULL; 1615 } 1616 1617 /* Copy attributes of the original request to the clone request. */ 1618 rq->__sector = blk_rq_pos(rq_src); 1619 rq->__data_len = blk_rq_bytes(rq_src); 1620 if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) { 1621 rq->rq_flags |= RQF_SPECIAL_PAYLOAD; 1622 rq->special_vec = rq_src->special_vec; 1623 } 1624 rq->nr_phys_segments = rq_src->nr_phys_segments; 1625 rq->ioprio = rq_src->ioprio; 1626 1627 if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0) 1628 goto free_and_out; 1629 1630 return 0; 1631 1632 free_and_out: 1633 if (bio) 1634 bio_put(bio); 1635 blk_rq_unprep_clone(rq); 1636 1637 return -ENOMEM; 1638 } 1639 EXPORT_SYMBOL_GPL(blk_rq_prep_clone); 1640 1641 int kblockd_schedule_work(struct work_struct *work) 1642 { 1643 return queue_work(kblockd_workqueue, work); 1644 } 1645 EXPORT_SYMBOL(kblockd_schedule_work); 1646 1647 int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, 1648 unsigned long delay) 1649 { 1650 return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay); 1651 } 1652 EXPORT_SYMBOL(kblockd_mod_delayed_work_on); 1653 1654 /** 1655 * blk_start_plug - initialize blk_plug and track it inside the task_struct 1656 * @plug: The &struct blk_plug that needs to be initialized 1657 * 1658 * Description: 1659 * blk_start_plug() indicates to the block layer an intent by the caller 1660 * to submit multiple I/O requests in a batch. The block layer may use 1661 * this hint to defer submitting I/Os from the caller until blk_finish_plug() 1662 * is called. However, the block layer may choose to submit requests 1663 * before a call to blk_finish_plug() if the number of queued I/Os 1664 * exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than 1665 * %BLK_PLUG_FLUSH_SIZE. The queued I/Os may also be submitted early if 1666 * the task schedules (see below). 1667 * 1668 * Tracking blk_plug inside the task_struct will help with auto-flushing the 1669 * pending I/O should the task end up blocking between blk_start_plug() and 1670 * blk_finish_plug(). This is important from a performance perspective, but 1671 * also ensures that we don't deadlock. For instance, if the task is blocking 1672 * for a memory allocation, memory reclaim could end up wanting to free a 1673 * page belonging to that request that is currently residing in our private 1674 * plug. By flushing the pending I/O when the process goes to sleep, we avoid 1675 * this kind of deadlock. 1676 */ 1677 void blk_start_plug(struct blk_plug *plug) 1678 { 1679 struct task_struct *tsk = current; 1680 1681 /* 1682 * If this is a nested plug, don't actually assign it. 1683 */ 1684 if (tsk->plug) 1685 return; 1686 1687 INIT_LIST_HEAD(&plug->mq_list); 1688 INIT_LIST_HEAD(&plug->cb_list); 1689 plug->rq_count = 0; 1690 plug->multiple_queues = false; 1691 plug->nowait = false; 1692 1693 /* 1694 * Store ordering should not be needed here, since a potential 1695 * preempt will imply a full memory barrier 1696 */ 1697 tsk->plug = plug; 1698 } 1699 EXPORT_SYMBOL(blk_start_plug); 1700 1701 static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) 1702 { 1703 LIST_HEAD(callbacks); 1704 1705 while (!list_empty(&plug->cb_list)) { 1706 list_splice_init(&plug->cb_list, &callbacks); 1707 1708 while (!list_empty(&callbacks)) { 1709 struct blk_plug_cb *cb = list_first_entry(&callbacks, 1710 struct blk_plug_cb, 1711 list); 1712 list_del(&cb->list); 1713 cb->callback(cb, from_schedule); 1714 } 1715 } 1716 } 1717 1718 struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data, 1719 int size) 1720 { 1721 struct blk_plug *plug = current->plug; 1722 struct blk_plug_cb *cb; 1723 1724 if (!plug) 1725 return NULL; 1726 1727 list_for_each_entry(cb, &plug->cb_list, list) 1728 if (cb->callback == unplug && cb->data == data) 1729 return cb; 1730 1731 /* Not currently on the callback list */ 1732 BUG_ON(size < sizeof(*cb)); 1733 cb = kzalloc(size, GFP_ATOMIC); 1734 if (cb) { 1735 cb->data = data; 1736 cb->callback = unplug; 1737 list_add(&cb->list, &plug->cb_list); 1738 } 1739 return cb; 1740 } 1741 EXPORT_SYMBOL(blk_check_plugged); 1742 1743 void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) 1744 { 1745 flush_plug_callbacks(plug, from_schedule); 1746 1747 if (!list_empty(&plug->mq_list)) 1748 blk_mq_flush_plug_list(plug, from_schedule); 1749 } 1750 1751 /** 1752 * blk_finish_plug - mark the end of a batch of submitted I/O 1753 * @plug: The &struct blk_plug passed to blk_start_plug() 1754 * 1755 * Description: 1756 * Indicate that a batch of I/O submissions is complete. This function 1757 * must be paired with an initial call to blk_start_plug(). The intent 1758 * is to allow the block layer to optimize I/O submission. See the 1759 * documentation for blk_start_plug() for more information. 1760 */ 1761 void blk_finish_plug(struct blk_plug *plug) 1762 { 1763 if (plug != current->plug) 1764 return; 1765 blk_flush_plug_list(plug, false); 1766 1767 current->plug = NULL; 1768 } 1769 EXPORT_SYMBOL(blk_finish_plug); 1770 1771 void blk_io_schedule(void) 1772 { 1773 /* Prevent hang_check timer from firing at us during very long I/O */ 1774 unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2; 1775 1776 if (timeout) 1777 io_schedule_timeout(timeout); 1778 else 1779 io_schedule(); 1780 } 1781 EXPORT_SYMBOL_GPL(blk_io_schedule); 1782 1783 int __init blk_dev_init(void) 1784 { 1785 BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS)); 1786 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 * 1787 sizeof_field(struct request, cmd_flags)); 1788 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 * 1789 sizeof_field(struct bio, bi_opf)); 1790 1791 /* used for unplugging and affects IO latency/throughput - HIGHPRI */ 1792 kblockd_workqueue = alloc_workqueue("kblockd", 1793 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); 1794 if (!kblockd_workqueue) 1795 panic("Failed to create kblockd\n"); 1796 1797 blk_requestq_cachep = kmem_cache_create("request_queue", 1798 sizeof(struct request_queue), 0, SLAB_PANIC, NULL); 1799 1800 blk_debugfs_root = debugfs_create_dir("block", NULL); 1801 1802 return 0; 1803 } 1804