1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 1991, 1992 Linus Torvalds 4 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics 5 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 6 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> 7 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> 8 * - July2000 9 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 10 */ 11 12 /* 13 * This handles all read/write requests to block devices 14 */ 15 #include <linux/kernel.h> 16 #include <linux/module.h> 17 #include <linux/bio.h> 18 #include <linux/blkdev.h> 19 #include <linux/blk-pm.h> 20 #include <linux/blk-integrity.h> 21 #include <linux/highmem.h> 22 #include <linux/mm.h> 23 #include <linux/pagemap.h> 24 #include <linux/kernel_stat.h> 25 #include <linux/string.h> 26 #include <linux/init.h> 27 #include <linux/completion.h> 28 #include <linux/slab.h> 29 #include <linux/swap.h> 30 #include <linux/writeback.h> 31 #include <linux/task_io_accounting_ops.h> 32 #include <linux/fault-inject.h> 33 #include <linux/list_sort.h> 34 #include <linux/delay.h> 35 #include <linux/ratelimit.h> 36 #include <linux/pm_runtime.h> 37 #include <linux/blk-cgroup.h> 38 #include <linux/t10-pi.h> 39 #include <linux/debugfs.h> 40 #include <linux/bpf.h> 41 #include <linux/psi.h> 42 #include <linux/part_stat.h> 43 #include <linux/sched/sysctl.h> 44 #include <linux/blk-crypto.h> 45 46 #define CREATE_TRACE_POINTS 47 #include <trace/events/block.h> 48 49 #include "blk.h" 50 #include "blk-mq-sched.h" 51 #include "blk-pm.h" 52 #include "blk-throttle.h" 53 54 struct dentry *blk_debugfs_root; 55 56 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); 57 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); 58 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); 59 EXPORT_TRACEPOINT_SYMBOL_GPL(block_split); 60 EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug); 61 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_insert); 62 63 DEFINE_IDA(blk_queue_ida); 64 65 /* 66 * For queue allocation 67 */ 68 struct kmem_cache *blk_requestq_cachep; 69 struct kmem_cache *blk_requestq_srcu_cachep; 70 71 /* 72 * Controlling structure to kblockd 73 */ 74 static struct workqueue_struct *kblockd_workqueue; 75 76 /** 77 * blk_queue_flag_set - atomically set a queue flag 78 * @flag: flag to be set 79 * @q: request queue 80 */ 81 void blk_queue_flag_set(unsigned int flag, struct request_queue *q) 82 { 83 set_bit(flag, &q->queue_flags); 84 } 85 EXPORT_SYMBOL(blk_queue_flag_set); 86 87 /** 88 * blk_queue_flag_clear - atomically clear a queue flag 89 * @flag: flag to be cleared 90 * @q: request queue 91 */ 92 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q) 93 { 94 clear_bit(flag, &q->queue_flags); 95 } 96 EXPORT_SYMBOL(blk_queue_flag_clear); 97 98 /** 99 * blk_queue_flag_test_and_set - atomically test and set a queue flag 100 * @flag: flag to be set 101 * @q: request queue 102 * 103 * Returns the previous value of @flag - 0 if the flag was not set and 1 if 104 * the flag was already set. 105 */ 106 bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q) 107 { 108 return test_and_set_bit(flag, &q->queue_flags); 109 } 110 EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set); 111 112 #define REQ_OP_NAME(name) [REQ_OP_##name] = #name 113 static const char *const blk_op_name[] = { 114 REQ_OP_NAME(READ), 115 REQ_OP_NAME(WRITE), 116 REQ_OP_NAME(FLUSH), 117 REQ_OP_NAME(DISCARD), 118 REQ_OP_NAME(SECURE_ERASE), 119 REQ_OP_NAME(ZONE_RESET), 120 REQ_OP_NAME(ZONE_RESET_ALL), 121 REQ_OP_NAME(ZONE_OPEN), 122 REQ_OP_NAME(ZONE_CLOSE), 123 REQ_OP_NAME(ZONE_FINISH), 124 REQ_OP_NAME(ZONE_APPEND), 125 REQ_OP_NAME(WRITE_SAME), 126 REQ_OP_NAME(WRITE_ZEROES), 127 REQ_OP_NAME(DRV_IN), 128 REQ_OP_NAME(DRV_OUT), 129 }; 130 #undef REQ_OP_NAME 131 132 /** 133 * blk_op_str - Return string XXX in the REQ_OP_XXX. 134 * @op: REQ_OP_XXX. 135 * 136 * Description: Centralize block layer function to convert REQ_OP_XXX into 137 * string format. Useful in the debugging and tracing bio or request. For 138 * invalid REQ_OP_XXX it returns string "UNKNOWN". 139 */ 140 inline const char *blk_op_str(unsigned int op) 141 { 142 const char *op_str = "UNKNOWN"; 143 144 if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op]) 145 op_str = blk_op_name[op]; 146 147 return op_str; 148 } 149 EXPORT_SYMBOL_GPL(blk_op_str); 150 151 static const struct { 152 int errno; 153 const char *name; 154 } blk_errors[] = { 155 [BLK_STS_OK] = { 0, "" }, 156 [BLK_STS_NOTSUPP] = { -EOPNOTSUPP, "operation not supported" }, 157 [BLK_STS_TIMEOUT] = { -ETIMEDOUT, "timeout" }, 158 [BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" }, 159 [BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" }, 160 [BLK_STS_TARGET] = { -EREMOTEIO, "critical target" }, 161 [BLK_STS_NEXUS] = { -EBADE, "critical nexus" }, 162 [BLK_STS_MEDIUM] = { -ENODATA, "critical medium" }, 163 [BLK_STS_PROTECTION] = { -EILSEQ, "protection" }, 164 [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" }, 165 [BLK_STS_DEV_RESOURCE] = { -EBUSY, "device resource" }, 166 [BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" }, 167 168 /* device mapper special case, should not leak out: */ 169 [BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" }, 170 171 /* zone device specific errors */ 172 [BLK_STS_ZONE_OPEN_RESOURCE] = { -ETOOMANYREFS, "open zones exceeded" }, 173 [BLK_STS_ZONE_ACTIVE_RESOURCE] = { -EOVERFLOW, "active zones exceeded" }, 174 175 /* everything else not covered above: */ 176 [BLK_STS_IOERR] = { -EIO, "I/O" }, 177 }; 178 179 blk_status_t errno_to_blk_status(int errno) 180 { 181 int i; 182 183 for (i = 0; i < ARRAY_SIZE(blk_errors); i++) { 184 if (blk_errors[i].errno == errno) 185 return (__force blk_status_t)i; 186 } 187 188 return BLK_STS_IOERR; 189 } 190 EXPORT_SYMBOL_GPL(errno_to_blk_status); 191 192 int blk_status_to_errno(blk_status_t status) 193 { 194 int idx = (__force int)status; 195 196 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors))) 197 return -EIO; 198 return blk_errors[idx].errno; 199 } 200 EXPORT_SYMBOL_GPL(blk_status_to_errno); 201 202 const char *blk_status_to_str(blk_status_t status) 203 { 204 int idx = (__force int)status; 205 206 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors))) 207 return "<null>"; 208 return blk_errors[idx].name; 209 } 210 211 /** 212 * blk_sync_queue - cancel any pending callbacks on a queue 213 * @q: the queue 214 * 215 * Description: 216 * The block layer may perform asynchronous callback activity 217 * on a queue, such as calling the unplug function after a timeout. 218 * A block device may call blk_sync_queue to ensure that any 219 * such activity is cancelled, thus allowing it to release resources 220 * that the callbacks might use. The caller must already have made sure 221 * that its ->submit_bio will not re-add plugging prior to calling 222 * this function. 223 * 224 * This function does not cancel any asynchronous activity arising 225 * out of elevator or throttling code. That would require elevator_exit() 226 * and blkcg_exit_queue() to be called with queue lock initialized. 227 * 228 */ 229 void blk_sync_queue(struct request_queue *q) 230 { 231 del_timer_sync(&q->timeout); 232 cancel_work_sync(&q->timeout_work); 233 } 234 EXPORT_SYMBOL(blk_sync_queue); 235 236 /** 237 * blk_set_pm_only - increment pm_only counter 238 * @q: request queue pointer 239 */ 240 void blk_set_pm_only(struct request_queue *q) 241 { 242 atomic_inc(&q->pm_only); 243 } 244 EXPORT_SYMBOL_GPL(blk_set_pm_only); 245 246 void blk_clear_pm_only(struct request_queue *q) 247 { 248 int pm_only; 249 250 pm_only = atomic_dec_return(&q->pm_only); 251 WARN_ON_ONCE(pm_only < 0); 252 if (pm_only == 0) 253 wake_up_all(&q->mq_freeze_wq); 254 } 255 EXPORT_SYMBOL_GPL(blk_clear_pm_only); 256 257 /** 258 * blk_put_queue - decrement the request_queue refcount 259 * @q: the request_queue structure to decrement the refcount for 260 * 261 * Decrements the refcount of the request_queue kobject. When this reaches 0 262 * we'll have blk_release_queue() called. 263 * 264 * Context: Any context, but the last reference must not be dropped from 265 * atomic context. 266 */ 267 void blk_put_queue(struct request_queue *q) 268 { 269 kobject_put(&q->kobj); 270 } 271 EXPORT_SYMBOL(blk_put_queue); 272 273 void blk_queue_start_drain(struct request_queue *q) 274 { 275 /* 276 * When queue DYING flag is set, we need to block new req 277 * entering queue, so we call blk_freeze_queue_start() to 278 * prevent I/O from crossing blk_queue_enter(). 279 */ 280 blk_freeze_queue_start(q); 281 if (queue_is_mq(q)) 282 blk_mq_wake_waiters(q); 283 /* Make blk_queue_enter() reexamine the DYING flag. */ 284 wake_up_all(&q->mq_freeze_wq); 285 } 286 287 /** 288 * blk_cleanup_queue - shutdown a request queue 289 * @q: request queue to shutdown 290 * 291 * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and 292 * put it. All future requests will be failed immediately with -ENODEV. 293 * 294 * Context: can sleep 295 */ 296 void blk_cleanup_queue(struct request_queue *q) 297 { 298 /* cannot be called from atomic context */ 299 might_sleep(); 300 301 WARN_ON_ONCE(blk_queue_registered(q)); 302 303 /* mark @q DYING, no new request or merges will be allowed afterwards */ 304 blk_queue_flag_set(QUEUE_FLAG_DYING, q); 305 blk_queue_start_drain(q); 306 307 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q); 308 blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q); 309 310 /* 311 * Drain all requests queued before DYING marking. Set DEAD flag to 312 * prevent that blk_mq_run_hw_queues() accesses the hardware queues 313 * after draining finished. 314 */ 315 blk_freeze_queue(q); 316 317 blk_queue_flag_set(QUEUE_FLAG_DEAD, q); 318 319 blk_sync_queue(q); 320 if (queue_is_mq(q)) { 321 blk_mq_cancel_work_sync(q); 322 blk_mq_exit_queue(q); 323 } 324 325 /* 326 * In theory, request pool of sched_tags belongs to request queue. 327 * However, the current implementation requires tag_set for freeing 328 * requests, so free the pool now. 329 * 330 * Queue has become frozen, there can't be any in-queue requests, so 331 * it is safe to free requests now. 332 */ 333 mutex_lock(&q->sysfs_lock); 334 if (q->elevator) 335 blk_mq_sched_free_rqs(q); 336 mutex_unlock(&q->sysfs_lock); 337 338 percpu_ref_exit(&q->q_usage_counter); 339 340 /* @q is and will stay empty, shutdown and put */ 341 blk_put_queue(q); 342 } 343 EXPORT_SYMBOL(blk_cleanup_queue); 344 345 /** 346 * blk_queue_enter() - try to increase q->q_usage_counter 347 * @q: request queue pointer 348 * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM 349 */ 350 int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags) 351 { 352 const bool pm = flags & BLK_MQ_REQ_PM; 353 354 while (!blk_try_enter_queue(q, pm)) { 355 if (flags & BLK_MQ_REQ_NOWAIT) 356 return -EBUSY; 357 358 /* 359 * read pair of barrier in blk_freeze_queue_start(), we need to 360 * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and 361 * reading .mq_freeze_depth or queue dying flag, otherwise the 362 * following wait may never return if the two reads are 363 * reordered. 364 */ 365 smp_rmb(); 366 wait_event(q->mq_freeze_wq, 367 (!q->mq_freeze_depth && 368 blk_pm_resume_queue(pm, q)) || 369 blk_queue_dying(q)); 370 if (blk_queue_dying(q)) 371 return -ENODEV; 372 } 373 374 return 0; 375 } 376 377 int __bio_queue_enter(struct request_queue *q, struct bio *bio) 378 { 379 while (!blk_try_enter_queue(q, false)) { 380 struct gendisk *disk = bio->bi_bdev->bd_disk; 381 382 if (bio->bi_opf & REQ_NOWAIT) { 383 if (test_bit(GD_DEAD, &disk->state)) 384 goto dead; 385 bio_wouldblock_error(bio); 386 return -EBUSY; 387 } 388 389 /* 390 * read pair of barrier in blk_freeze_queue_start(), we need to 391 * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and 392 * reading .mq_freeze_depth or queue dying flag, otherwise the 393 * following wait may never return if the two reads are 394 * reordered. 395 */ 396 smp_rmb(); 397 wait_event(q->mq_freeze_wq, 398 (!q->mq_freeze_depth && 399 blk_pm_resume_queue(false, q)) || 400 test_bit(GD_DEAD, &disk->state)); 401 if (test_bit(GD_DEAD, &disk->state)) 402 goto dead; 403 } 404 405 return 0; 406 dead: 407 bio_io_error(bio); 408 return -ENODEV; 409 } 410 411 void blk_queue_exit(struct request_queue *q) 412 { 413 percpu_ref_put(&q->q_usage_counter); 414 } 415 416 static void blk_queue_usage_counter_release(struct percpu_ref *ref) 417 { 418 struct request_queue *q = 419 container_of(ref, struct request_queue, q_usage_counter); 420 421 wake_up_all(&q->mq_freeze_wq); 422 } 423 424 static void blk_rq_timed_out_timer(struct timer_list *t) 425 { 426 struct request_queue *q = from_timer(q, t, timeout); 427 428 kblockd_schedule_work(&q->timeout_work); 429 } 430 431 static void blk_timeout_work(struct work_struct *work) 432 { 433 } 434 435 struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu) 436 { 437 struct request_queue *q; 438 int ret; 439 440 q = kmem_cache_alloc_node(blk_get_queue_kmem_cache(alloc_srcu), 441 GFP_KERNEL | __GFP_ZERO, node_id); 442 if (!q) 443 return NULL; 444 445 if (alloc_srcu) { 446 blk_queue_flag_set(QUEUE_FLAG_HAS_SRCU, q); 447 if (init_srcu_struct(q->srcu) != 0) 448 goto fail_q; 449 } 450 451 q->last_merge = NULL; 452 453 q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL); 454 if (q->id < 0) 455 goto fail_srcu; 456 457 ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, 0); 458 if (ret) 459 goto fail_id; 460 461 q->stats = blk_alloc_queue_stats(); 462 if (!q->stats) 463 goto fail_split; 464 465 q->node = node_id; 466 467 atomic_set(&q->nr_active_requests_shared_tags, 0); 468 469 timer_setup(&q->timeout, blk_rq_timed_out_timer, 0); 470 INIT_WORK(&q->timeout_work, blk_timeout_work); 471 INIT_LIST_HEAD(&q->icq_list); 472 #ifdef CONFIG_BLK_CGROUP 473 INIT_LIST_HEAD(&q->blkg_list); 474 #endif 475 476 kobject_init(&q->kobj, &blk_queue_ktype); 477 478 mutex_init(&q->debugfs_mutex); 479 mutex_init(&q->sysfs_lock); 480 mutex_init(&q->sysfs_dir_lock); 481 spin_lock_init(&q->queue_lock); 482 483 init_waitqueue_head(&q->mq_freeze_wq); 484 mutex_init(&q->mq_freeze_lock); 485 486 /* 487 * Init percpu_ref in atomic mode so that it's faster to shutdown. 488 * See blk_register_queue() for details. 489 */ 490 if (percpu_ref_init(&q->q_usage_counter, 491 blk_queue_usage_counter_release, 492 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL)) 493 goto fail_stats; 494 495 if (blkcg_init_queue(q)) 496 goto fail_ref; 497 498 blk_queue_dma_alignment(q, 511); 499 blk_set_default_limits(&q->limits); 500 q->nr_requests = BLKDEV_DEFAULT_RQ; 501 502 return q; 503 504 fail_ref: 505 percpu_ref_exit(&q->q_usage_counter); 506 fail_stats: 507 blk_free_queue_stats(q->stats); 508 fail_split: 509 bioset_exit(&q->bio_split); 510 fail_id: 511 ida_simple_remove(&blk_queue_ida, q->id); 512 fail_srcu: 513 if (alloc_srcu) 514 cleanup_srcu_struct(q->srcu); 515 fail_q: 516 kmem_cache_free(blk_get_queue_kmem_cache(alloc_srcu), q); 517 return NULL; 518 } 519 520 /** 521 * blk_get_queue - increment the request_queue refcount 522 * @q: the request_queue structure to increment the refcount for 523 * 524 * Increment the refcount of the request_queue kobject. 525 * 526 * Context: Any context. 527 */ 528 bool blk_get_queue(struct request_queue *q) 529 { 530 if (likely(!blk_queue_dying(q))) { 531 __blk_get_queue(q); 532 return true; 533 } 534 535 return false; 536 } 537 EXPORT_SYMBOL(blk_get_queue); 538 539 static void handle_bad_sector(struct bio *bio, sector_t maxsector) 540 { 541 char b[BDEVNAME_SIZE]; 542 543 pr_info_ratelimited("%s: attempt to access beyond end of device\n" 544 "%s: rw=%d, want=%llu, limit=%llu\n", 545 current->comm, 546 bio_devname(bio, b), bio->bi_opf, 547 bio_end_sector(bio), maxsector); 548 } 549 550 #ifdef CONFIG_FAIL_MAKE_REQUEST 551 552 static DECLARE_FAULT_ATTR(fail_make_request); 553 554 static int __init setup_fail_make_request(char *str) 555 { 556 return setup_fault_attr(&fail_make_request, str); 557 } 558 __setup("fail_make_request=", setup_fail_make_request); 559 560 bool should_fail_request(struct block_device *part, unsigned int bytes) 561 { 562 return part->bd_make_it_fail && should_fail(&fail_make_request, bytes); 563 } 564 565 static int __init fail_make_request_debugfs(void) 566 { 567 struct dentry *dir = fault_create_debugfs_attr("fail_make_request", 568 NULL, &fail_make_request); 569 570 return PTR_ERR_OR_ZERO(dir); 571 } 572 573 late_initcall(fail_make_request_debugfs); 574 #endif /* CONFIG_FAIL_MAKE_REQUEST */ 575 576 static inline bool bio_check_ro(struct bio *bio) 577 { 578 if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) { 579 char b[BDEVNAME_SIZE]; 580 581 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) 582 return false; 583 584 WARN_ONCE(1, 585 "Trying to write to read-only block-device %s (partno %d)\n", 586 bio_devname(bio, b), bio->bi_bdev->bd_partno); 587 /* Older lvm-tools actually trigger this */ 588 return false; 589 } 590 591 return false; 592 } 593 594 static noinline int should_fail_bio(struct bio *bio) 595 { 596 if (should_fail_request(bdev_whole(bio->bi_bdev), bio->bi_iter.bi_size)) 597 return -EIO; 598 return 0; 599 } 600 ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO); 601 602 /* 603 * Check whether this bio extends beyond the end of the device or partition. 604 * This may well happen - the kernel calls bread() without checking the size of 605 * the device, e.g., when mounting a file system. 606 */ 607 static inline int bio_check_eod(struct bio *bio) 608 { 609 sector_t maxsector = bdev_nr_sectors(bio->bi_bdev); 610 unsigned int nr_sectors = bio_sectors(bio); 611 612 if (nr_sectors && maxsector && 613 (nr_sectors > maxsector || 614 bio->bi_iter.bi_sector > maxsector - nr_sectors)) { 615 handle_bad_sector(bio, maxsector); 616 return -EIO; 617 } 618 return 0; 619 } 620 621 /* 622 * Remap block n of partition p to block n+start(p) of the disk. 623 */ 624 static int blk_partition_remap(struct bio *bio) 625 { 626 struct block_device *p = bio->bi_bdev; 627 628 if (unlikely(should_fail_request(p, bio->bi_iter.bi_size))) 629 return -EIO; 630 if (bio_sectors(bio)) { 631 bio->bi_iter.bi_sector += p->bd_start_sect; 632 trace_block_bio_remap(bio, p->bd_dev, 633 bio->bi_iter.bi_sector - 634 p->bd_start_sect); 635 } 636 bio_set_flag(bio, BIO_REMAPPED); 637 return 0; 638 } 639 640 /* 641 * Check write append to a zoned block device. 642 */ 643 static inline blk_status_t blk_check_zone_append(struct request_queue *q, 644 struct bio *bio) 645 { 646 sector_t pos = bio->bi_iter.bi_sector; 647 int nr_sectors = bio_sectors(bio); 648 649 /* Only applicable to zoned block devices */ 650 if (!blk_queue_is_zoned(q)) 651 return BLK_STS_NOTSUPP; 652 653 /* The bio sector must point to the start of a sequential zone */ 654 if (pos & (blk_queue_zone_sectors(q) - 1) || 655 !blk_queue_zone_is_seq(q, pos)) 656 return BLK_STS_IOERR; 657 658 /* 659 * Not allowed to cross zone boundaries. Otherwise, the BIO will be 660 * split and could result in non-contiguous sectors being written in 661 * different zones. 662 */ 663 if (nr_sectors > q->limits.chunk_sectors) 664 return BLK_STS_IOERR; 665 666 /* Make sure the BIO is small enough and will not get split */ 667 if (nr_sectors > q->limits.max_zone_append_sectors) 668 return BLK_STS_IOERR; 669 670 bio->bi_opf |= REQ_NOMERGE; 671 672 return BLK_STS_OK; 673 } 674 675 noinline_for_stack bool submit_bio_checks(struct bio *bio) 676 { 677 struct block_device *bdev = bio->bi_bdev; 678 struct request_queue *q = bdev_get_queue(bdev); 679 blk_status_t status = BLK_STS_IOERR; 680 struct blk_plug *plug; 681 682 might_sleep(); 683 684 plug = blk_mq_plug(q, bio); 685 if (plug && plug->nowait) 686 bio->bi_opf |= REQ_NOWAIT; 687 688 /* 689 * For a REQ_NOWAIT based request, return -EOPNOTSUPP 690 * if queue does not support NOWAIT. 691 */ 692 if ((bio->bi_opf & REQ_NOWAIT) && !blk_queue_nowait(q)) 693 goto not_supported; 694 695 if (should_fail_bio(bio)) 696 goto end_io; 697 if (unlikely(bio_check_ro(bio))) 698 goto end_io; 699 if (!bio_flagged(bio, BIO_REMAPPED)) { 700 if (unlikely(bio_check_eod(bio))) 701 goto end_io; 702 if (bdev->bd_partno && unlikely(blk_partition_remap(bio))) 703 goto end_io; 704 } 705 706 /* 707 * Filter flush bio's early so that bio based drivers without flush 708 * support don't have to worry about them. 709 */ 710 if (op_is_flush(bio->bi_opf) && 711 !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) { 712 bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA); 713 if (!bio_sectors(bio)) { 714 status = BLK_STS_OK; 715 goto end_io; 716 } 717 } 718 719 if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) 720 bio_clear_polled(bio); 721 722 switch (bio_op(bio)) { 723 case REQ_OP_DISCARD: 724 if (!blk_queue_discard(q)) 725 goto not_supported; 726 break; 727 case REQ_OP_SECURE_ERASE: 728 if (!blk_queue_secure_erase(q)) 729 goto not_supported; 730 break; 731 case REQ_OP_WRITE_SAME: 732 if (!q->limits.max_write_same_sectors) 733 goto not_supported; 734 break; 735 case REQ_OP_ZONE_APPEND: 736 status = blk_check_zone_append(q, bio); 737 if (status != BLK_STS_OK) 738 goto end_io; 739 break; 740 case REQ_OP_ZONE_RESET: 741 case REQ_OP_ZONE_OPEN: 742 case REQ_OP_ZONE_CLOSE: 743 case REQ_OP_ZONE_FINISH: 744 if (!blk_queue_is_zoned(q)) 745 goto not_supported; 746 break; 747 case REQ_OP_ZONE_RESET_ALL: 748 if (!blk_queue_is_zoned(q) || !blk_queue_zone_resetall(q)) 749 goto not_supported; 750 break; 751 case REQ_OP_WRITE_ZEROES: 752 if (!q->limits.max_write_zeroes_sectors) 753 goto not_supported; 754 break; 755 default: 756 break; 757 } 758 759 if (blk_throtl_bio(bio)) 760 return false; 761 762 blk_cgroup_bio_start(bio); 763 blkcg_bio_issue_init(bio); 764 765 if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) { 766 trace_block_bio_queue(bio); 767 /* Now that enqueuing has been traced, we need to trace 768 * completion as well. 769 */ 770 bio_set_flag(bio, BIO_TRACE_COMPLETION); 771 } 772 return true; 773 774 not_supported: 775 status = BLK_STS_NOTSUPP; 776 end_io: 777 bio->bi_status = status; 778 bio_endio(bio); 779 return false; 780 } 781 782 static void __submit_bio_fops(struct gendisk *disk, struct bio *bio) 783 { 784 if (blk_crypto_bio_prep(&bio)) { 785 if (likely(bio_queue_enter(bio) == 0)) { 786 disk->fops->submit_bio(bio); 787 blk_queue_exit(disk->queue); 788 } 789 } 790 } 791 792 static void __submit_bio(struct bio *bio) 793 { 794 struct gendisk *disk = bio->bi_bdev->bd_disk; 795 796 if (unlikely(!submit_bio_checks(bio))) 797 return; 798 799 if (!disk->fops->submit_bio) 800 blk_mq_submit_bio(bio); 801 else 802 __submit_bio_fops(disk, bio); 803 } 804 805 /* 806 * The loop in this function may be a bit non-obvious, and so deserves some 807 * explanation: 808 * 809 * - Before entering the loop, bio->bi_next is NULL (as all callers ensure 810 * that), so we have a list with a single bio. 811 * - We pretend that we have just taken it off a longer list, so we assign 812 * bio_list to a pointer to the bio_list_on_stack, thus initialising the 813 * bio_list of new bios to be added. ->submit_bio() may indeed add some more 814 * bios through a recursive call to submit_bio_noacct. If it did, we find a 815 * non-NULL value in bio_list and re-enter the loop from the top. 816 * - In this case we really did just take the bio of the top of the list (no 817 * pretending) and so remove it from bio_list, and call into ->submit_bio() 818 * again. 819 * 820 * bio_list_on_stack[0] contains bios submitted by the current ->submit_bio. 821 * bio_list_on_stack[1] contains bios that were submitted before the current 822 * ->submit_bio_bio, but that haven't been processed yet. 823 */ 824 static void __submit_bio_noacct(struct bio *bio) 825 { 826 struct bio_list bio_list_on_stack[2]; 827 828 BUG_ON(bio->bi_next); 829 830 bio_list_init(&bio_list_on_stack[0]); 831 current->bio_list = bio_list_on_stack; 832 833 do { 834 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 835 struct bio_list lower, same; 836 837 /* 838 * Create a fresh bio_list for all subordinate requests. 839 */ 840 bio_list_on_stack[1] = bio_list_on_stack[0]; 841 bio_list_init(&bio_list_on_stack[0]); 842 843 __submit_bio(bio); 844 845 /* 846 * Sort new bios into those for a lower level and those for the 847 * same level. 848 */ 849 bio_list_init(&lower); 850 bio_list_init(&same); 851 while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL) 852 if (q == bdev_get_queue(bio->bi_bdev)) 853 bio_list_add(&same, bio); 854 else 855 bio_list_add(&lower, bio); 856 857 /* 858 * Now assemble so we handle the lowest level first. 859 */ 860 bio_list_merge(&bio_list_on_stack[0], &lower); 861 bio_list_merge(&bio_list_on_stack[0], &same); 862 bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]); 863 } while ((bio = bio_list_pop(&bio_list_on_stack[0]))); 864 865 current->bio_list = NULL; 866 } 867 868 static void __submit_bio_noacct_mq(struct bio *bio) 869 { 870 struct bio_list bio_list[2] = { }; 871 872 current->bio_list = bio_list; 873 874 do { 875 __submit_bio(bio); 876 } while ((bio = bio_list_pop(&bio_list[0]))); 877 878 current->bio_list = NULL; 879 } 880 881 /** 882 * submit_bio_noacct - re-submit a bio to the block device layer for I/O 883 * @bio: The bio describing the location in memory and on the device. 884 * 885 * This is a version of submit_bio() that shall only be used for I/O that is 886 * resubmitted to lower level drivers by stacking block drivers. All file 887 * systems and other upper level users of the block layer should use 888 * submit_bio() instead. 889 */ 890 void submit_bio_noacct(struct bio *bio) 891 { 892 /* 893 * We only want one ->submit_bio to be active at a time, else stack 894 * usage with stacked devices could be a problem. Use current->bio_list 895 * to collect a list of requests submited by a ->submit_bio method while 896 * it is active, and then process them after it returned. 897 */ 898 if (current->bio_list) 899 bio_list_add(¤t->bio_list[0], bio); 900 else if (!bio->bi_bdev->bd_disk->fops->submit_bio) 901 __submit_bio_noacct_mq(bio); 902 else 903 __submit_bio_noacct(bio); 904 } 905 EXPORT_SYMBOL(submit_bio_noacct); 906 907 /** 908 * submit_bio - submit a bio to the block device layer for I/O 909 * @bio: The &struct bio which describes the I/O 910 * 911 * submit_bio() is used to submit I/O requests to block devices. It is passed a 912 * fully set up &struct bio that describes the I/O that needs to be done. The 913 * bio will be send to the device described by the bi_bdev field. 914 * 915 * The success/failure status of the request, along with notification of 916 * completion, is delivered asynchronously through the ->bi_end_io() callback 917 * in @bio. The bio must NOT be touched by thecaller until ->bi_end_io() has 918 * been called. 919 */ 920 void submit_bio(struct bio *bio) 921 { 922 if (blkcg_punt_bio_submit(bio)) 923 return; 924 925 /* 926 * If it's a regular read/write or a barrier with data attached, 927 * go through the normal accounting stuff before submission. 928 */ 929 if (bio_has_data(bio)) { 930 unsigned int count; 931 932 if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) 933 count = queue_logical_block_size( 934 bdev_get_queue(bio->bi_bdev)) >> 9; 935 else 936 count = bio_sectors(bio); 937 938 if (op_is_write(bio_op(bio))) { 939 count_vm_events(PGPGOUT, count); 940 } else { 941 task_io_account_read(bio->bi_iter.bi_size); 942 count_vm_events(PGPGIN, count); 943 } 944 } 945 946 /* 947 * If we're reading data that is part of the userspace workingset, count 948 * submission time as memory stall. When the device is congested, or 949 * the submitting cgroup IO-throttled, submission can be a significant 950 * part of overall IO time. 951 */ 952 if (unlikely(bio_op(bio) == REQ_OP_READ && 953 bio_flagged(bio, BIO_WORKINGSET))) { 954 unsigned long pflags; 955 956 psi_memstall_enter(&pflags); 957 submit_bio_noacct(bio); 958 psi_memstall_leave(&pflags); 959 return; 960 } 961 962 submit_bio_noacct(bio); 963 } 964 EXPORT_SYMBOL(submit_bio); 965 966 /** 967 * bio_poll - poll for BIO completions 968 * @bio: bio to poll for 969 * @iob: batches of IO 970 * @flags: BLK_POLL_* flags that control the behavior 971 * 972 * Poll for completions on queue associated with the bio. Returns number of 973 * completed entries found. 974 * 975 * Note: the caller must either be the context that submitted @bio, or 976 * be in a RCU critical section to prevent freeing of @bio. 977 */ 978 int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags) 979 { 980 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 981 blk_qc_t cookie = READ_ONCE(bio->bi_cookie); 982 int ret; 983 984 if (cookie == BLK_QC_T_NONE || 985 !test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) 986 return 0; 987 988 if (current->plug) 989 blk_flush_plug(current->plug, false); 990 991 if (blk_queue_enter(q, BLK_MQ_REQ_NOWAIT)) 992 return 0; 993 if (WARN_ON_ONCE(!queue_is_mq(q))) 994 ret = 0; /* not yet implemented, should not happen */ 995 else 996 ret = blk_mq_poll(q, cookie, iob, flags); 997 blk_queue_exit(q); 998 return ret; 999 } 1000 EXPORT_SYMBOL_GPL(bio_poll); 1001 1002 /* 1003 * Helper to implement file_operations.iopoll. Requires the bio to be stored 1004 * in iocb->private, and cleared before freeing the bio. 1005 */ 1006 int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob, 1007 unsigned int flags) 1008 { 1009 struct bio *bio; 1010 int ret = 0; 1011 1012 /* 1013 * Note: the bio cache only uses SLAB_TYPESAFE_BY_RCU, so bio can 1014 * point to a freshly allocated bio at this point. If that happens 1015 * we have a few cases to consider: 1016 * 1017 * 1) the bio is beeing initialized and bi_bdev is NULL. We can just 1018 * simply nothing in this case 1019 * 2) the bio points to a not poll enabled device. bio_poll will catch 1020 * this and return 0 1021 * 3) the bio points to a poll capable device, including but not 1022 * limited to the one that the original bio pointed to. In this 1023 * case we will call into the actual poll method and poll for I/O, 1024 * even if we don't need to, but it won't cause harm either. 1025 * 1026 * For cases 2) and 3) above the RCU grace period ensures that bi_bdev 1027 * is still allocated. Because partitions hold a reference to the whole 1028 * device bdev and thus disk, the disk is also still valid. Grabbing 1029 * a reference to the queue in bio_poll() ensures the hctxs and requests 1030 * are still valid as well. 1031 */ 1032 rcu_read_lock(); 1033 bio = READ_ONCE(kiocb->private); 1034 if (bio && bio->bi_bdev) 1035 ret = bio_poll(bio, iob, flags); 1036 rcu_read_unlock(); 1037 1038 return ret; 1039 } 1040 EXPORT_SYMBOL_GPL(iocb_bio_iopoll); 1041 1042 void update_io_ticks(struct block_device *part, unsigned long now, bool end) 1043 { 1044 unsigned long stamp; 1045 again: 1046 stamp = READ_ONCE(part->bd_stamp); 1047 if (unlikely(time_after(now, stamp))) { 1048 if (likely(cmpxchg(&part->bd_stamp, stamp, now) == stamp)) 1049 __part_stat_add(part, io_ticks, end ? now - stamp : 1); 1050 } 1051 if (part->bd_partno) { 1052 part = bdev_whole(part); 1053 goto again; 1054 } 1055 } 1056 1057 static unsigned long __part_start_io_acct(struct block_device *part, 1058 unsigned int sectors, unsigned int op, 1059 unsigned long start_time) 1060 { 1061 const int sgrp = op_stat_group(op); 1062 1063 part_stat_lock(); 1064 update_io_ticks(part, start_time, false); 1065 part_stat_inc(part, ios[sgrp]); 1066 part_stat_add(part, sectors[sgrp], sectors); 1067 part_stat_local_inc(part, in_flight[op_is_write(op)]); 1068 part_stat_unlock(); 1069 1070 return start_time; 1071 } 1072 1073 /** 1074 * bio_start_io_acct_time - start I/O accounting for bio based drivers 1075 * @bio: bio to start account for 1076 * @start_time: start time that should be passed back to bio_end_io_acct(). 1077 */ 1078 void bio_start_io_acct_time(struct bio *bio, unsigned long start_time) 1079 { 1080 __part_start_io_acct(bio->bi_bdev, bio_sectors(bio), 1081 bio_op(bio), start_time); 1082 } 1083 EXPORT_SYMBOL_GPL(bio_start_io_acct_time); 1084 1085 /** 1086 * bio_start_io_acct - start I/O accounting for bio based drivers 1087 * @bio: bio to start account for 1088 * 1089 * Returns the start time that should be passed back to bio_end_io_acct(). 1090 */ 1091 unsigned long bio_start_io_acct(struct bio *bio) 1092 { 1093 return __part_start_io_acct(bio->bi_bdev, bio_sectors(bio), 1094 bio_op(bio), jiffies); 1095 } 1096 EXPORT_SYMBOL_GPL(bio_start_io_acct); 1097 1098 unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors, 1099 unsigned int op) 1100 { 1101 return __part_start_io_acct(disk->part0, sectors, op, jiffies); 1102 } 1103 EXPORT_SYMBOL(disk_start_io_acct); 1104 1105 static void __part_end_io_acct(struct block_device *part, unsigned int op, 1106 unsigned long start_time) 1107 { 1108 const int sgrp = op_stat_group(op); 1109 unsigned long now = READ_ONCE(jiffies); 1110 unsigned long duration = now - start_time; 1111 1112 part_stat_lock(); 1113 update_io_ticks(part, now, true); 1114 part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration)); 1115 part_stat_local_dec(part, in_flight[op_is_write(op)]); 1116 part_stat_unlock(); 1117 } 1118 1119 void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time, 1120 struct block_device *orig_bdev) 1121 { 1122 __part_end_io_acct(orig_bdev, bio_op(bio), start_time); 1123 } 1124 EXPORT_SYMBOL_GPL(bio_end_io_acct_remapped); 1125 1126 void disk_end_io_acct(struct gendisk *disk, unsigned int op, 1127 unsigned long start_time) 1128 { 1129 __part_end_io_acct(disk->part0, op, start_time); 1130 } 1131 EXPORT_SYMBOL(disk_end_io_acct); 1132 1133 /** 1134 * blk_lld_busy - Check if underlying low-level drivers of a device are busy 1135 * @q : the queue of the device being checked 1136 * 1137 * Description: 1138 * Check if underlying low-level drivers of a device are busy. 1139 * If the drivers want to export their busy state, they must set own 1140 * exporting function using blk_queue_lld_busy() first. 1141 * 1142 * Basically, this function is used only by request stacking drivers 1143 * to stop dispatching requests to underlying devices when underlying 1144 * devices are busy. This behavior helps more I/O merging on the queue 1145 * of the request stacking driver and prevents I/O throughput regression 1146 * on burst I/O load. 1147 * 1148 * Return: 1149 * 0 - Not busy (The request stacking driver should dispatch request) 1150 * 1 - Busy (The request stacking driver should stop dispatching request) 1151 */ 1152 int blk_lld_busy(struct request_queue *q) 1153 { 1154 if (queue_is_mq(q) && q->mq_ops->busy) 1155 return q->mq_ops->busy(q); 1156 1157 return 0; 1158 } 1159 EXPORT_SYMBOL_GPL(blk_lld_busy); 1160 1161 int kblockd_schedule_work(struct work_struct *work) 1162 { 1163 return queue_work(kblockd_workqueue, work); 1164 } 1165 EXPORT_SYMBOL(kblockd_schedule_work); 1166 1167 int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, 1168 unsigned long delay) 1169 { 1170 return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay); 1171 } 1172 EXPORT_SYMBOL(kblockd_mod_delayed_work_on); 1173 1174 void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios) 1175 { 1176 struct task_struct *tsk = current; 1177 1178 /* 1179 * If this is a nested plug, don't actually assign it. 1180 */ 1181 if (tsk->plug) 1182 return; 1183 1184 plug->mq_list = NULL; 1185 plug->cached_rq = NULL; 1186 plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT); 1187 plug->rq_count = 0; 1188 plug->multiple_queues = false; 1189 plug->has_elevator = false; 1190 plug->nowait = false; 1191 INIT_LIST_HEAD(&plug->cb_list); 1192 1193 /* 1194 * Store ordering should not be needed here, since a potential 1195 * preempt will imply a full memory barrier 1196 */ 1197 tsk->plug = plug; 1198 } 1199 1200 /** 1201 * blk_start_plug - initialize blk_plug and track it inside the task_struct 1202 * @plug: The &struct blk_plug that needs to be initialized 1203 * 1204 * Description: 1205 * blk_start_plug() indicates to the block layer an intent by the caller 1206 * to submit multiple I/O requests in a batch. The block layer may use 1207 * this hint to defer submitting I/Os from the caller until blk_finish_plug() 1208 * is called. However, the block layer may choose to submit requests 1209 * before a call to blk_finish_plug() if the number of queued I/Os 1210 * exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than 1211 * %BLK_PLUG_FLUSH_SIZE. The queued I/Os may also be submitted early if 1212 * the task schedules (see below). 1213 * 1214 * Tracking blk_plug inside the task_struct will help with auto-flushing the 1215 * pending I/O should the task end up blocking between blk_start_plug() and 1216 * blk_finish_plug(). This is important from a performance perspective, but 1217 * also ensures that we don't deadlock. For instance, if the task is blocking 1218 * for a memory allocation, memory reclaim could end up wanting to free a 1219 * page belonging to that request that is currently residing in our private 1220 * plug. By flushing the pending I/O when the process goes to sleep, we avoid 1221 * this kind of deadlock. 1222 */ 1223 void blk_start_plug(struct blk_plug *plug) 1224 { 1225 blk_start_plug_nr_ios(plug, 1); 1226 } 1227 EXPORT_SYMBOL(blk_start_plug); 1228 1229 static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) 1230 { 1231 LIST_HEAD(callbacks); 1232 1233 while (!list_empty(&plug->cb_list)) { 1234 list_splice_init(&plug->cb_list, &callbacks); 1235 1236 while (!list_empty(&callbacks)) { 1237 struct blk_plug_cb *cb = list_first_entry(&callbacks, 1238 struct blk_plug_cb, 1239 list); 1240 list_del(&cb->list); 1241 cb->callback(cb, from_schedule); 1242 } 1243 } 1244 } 1245 1246 struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data, 1247 int size) 1248 { 1249 struct blk_plug *plug = current->plug; 1250 struct blk_plug_cb *cb; 1251 1252 if (!plug) 1253 return NULL; 1254 1255 list_for_each_entry(cb, &plug->cb_list, list) 1256 if (cb->callback == unplug && cb->data == data) 1257 return cb; 1258 1259 /* Not currently on the callback list */ 1260 BUG_ON(size < sizeof(*cb)); 1261 cb = kzalloc(size, GFP_ATOMIC); 1262 if (cb) { 1263 cb->data = data; 1264 cb->callback = unplug; 1265 list_add(&cb->list, &plug->cb_list); 1266 } 1267 return cb; 1268 } 1269 EXPORT_SYMBOL(blk_check_plugged); 1270 1271 void blk_flush_plug(struct blk_plug *plug, bool from_schedule) 1272 { 1273 if (!list_empty(&plug->cb_list)) 1274 flush_plug_callbacks(plug, from_schedule); 1275 if (!rq_list_empty(plug->mq_list)) 1276 blk_mq_flush_plug_list(plug, from_schedule); 1277 /* 1278 * Unconditionally flush out cached requests, even if the unplug 1279 * event came from schedule. Since we know hold references to the 1280 * queue for cached requests, we don't want a blocked task holding 1281 * up a queue freeze/quiesce event. 1282 */ 1283 if (unlikely(!rq_list_empty(plug->cached_rq))) 1284 blk_mq_free_plug_rqs(plug); 1285 } 1286 1287 /** 1288 * blk_finish_plug - mark the end of a batch of submitted I/O 1289 * @plug: The &struct blk_plug passed to blk_start_plug() 1290 * 1291 * Description: 1292 * Indicate that a batch of I/O submissions is complete. This function 1293 * must be paired with an initial call to blk_start_plug(). The intent 1294 * is to allow the block layer to optimize I/O submission. See the 1295 * documentation for blk_start_plug() for more information. 1296 */ 1297 void blk_finish_plug(struct blk_plug *plug) 1298 { 1299 if (plug == current->plug) { 1300 blk_flush_plug(plug, false); 1301 current->plug = NULL; 1302 } 1303 } 1304 EXPORT_SYMBOL(blk_finish_plug); 1305 1306 void blk_io_schedule(void) 1307 { 1308 /* Prevent hang_check timer from firing at us during very long I/O */ 1309 unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2; 1310 1311 if (timeout) 1312 io_schedule_timeout(timeout); 1313 else 1314 io_schedule(); 1315 } 1316 EXPORT_SYMBOL_GPL(blk_io_schedule); 1317 1318 int __init blk_dev_init(void) 1319 { 1320 BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS)); 1321 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 * 1322 sizeof_field(struct request, cmd_flags)); 1323 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 * 1324 sizeof_field(struct bio, bi_opf)); 1325 BUILD_BUG_ON(ALIGN(offsetof(struct request_queue, srcu), 1326 __alignof__(struct request_queue)) != 1327 sizeof(struct request_queue)); 1328 1329 /* used for unplugging and affects IO latency/throughput - HIGHPRI */ 1330 kblockd_workqueue = alloc_workqueue("kblockd", 1331 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); 1332 if (!kblockd_workqueue) 1333 panic("Failed to create kblockd\n"); 1334 1335 blk_requestq_cachep = kmem_cache_create("request_queue", 1336 sizeof(struct request_queue), 0, SLAB_PANIC, NULL); 1337 1338 blk_requestq_srcu_cachep = kmem_cache_create("request_queue_srcu", 1339 sizeof(struct request_queue) + 1340 sizeof(struct srcu_struct), 0, SLAB_PANIC, NULL); 1341 1342 blk_debugfs_root = debugfs_create_dir("block", NULL); 1343 1344 return 0; 1345 } 1346