1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 1991, 1992 Linus Torvalds 4 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE 5 * Copyright (C) 2016 - 2020 Christoph Hellwig 6 */ 7 8 #include <linux/init.h> 9 #include <linux/mm.h> 10 #include <linux/slab.h> 11 #include <linux/kmod.h> 12 #include <linux/major.h> 13 #include <linux/device_cgroup.h> 14 #include <linux/blkdev.h> 15 #include <linux/blk-integrity.h> 16 #include <linux/backing-dev.h> 17 #include <linux/module.h> 18 #include <linux/blkpg.h> 19 #include <linux/magic.h> 20 #include <linux/buffer_head.h> 21 #include <linux/swap.h> 22 #include <linux/writeback.h> 23 #include <linux/mount.h> 24 #include <linux/pseudo_fs.h> 25 #include <linux/uio.h> 26 #include <linux/namei.h> 27 #include <linux/part_stat.h> 28 #include <linux/uaccess.h> 29 #include <linux/stat.h> 30 #include "../fs/internal.h" 31 #include "blk.h" 32 33 struct bdev_inode { 34 struct block_device bdev; 35 struct inode vfs_inode; 36 }; 37 38 static inline struct bdev_inode *BDEV_I(struct inode *inode) 39 { 40 return container_of(inode, struct bdev_inode, vfs_inode); 41 } 42 43 struct block_device *I_BDEV(struct inode *inode) 44 { 45 return &BDEV_I(inode)->bdev; 46 } 47 EXPORT_SYMBOL(I_BDEV); 48 49 static void bdev_write_inode(struct block_device *bdev) 50 { 51 struct inode *inode = bdev->bd_inode; 52 int ret; 53 54 spin_lock(&inode->i_lock); 55 while (inode->i_state & I_DIRTY) { 56 spin_unlock(&inode->i_lock); 57 ret = write_inode_now(inode, true); 58 if (ret) 59 pr_warn_ratelimited( 60 "VFS: Dirty inode writeback failed for block device %pg (err=%d).\n", 61 bdev, ret); 62 spin_lock(&inode->i_lock); 63 } 64 spin_unlock(&inode->i_lock); 65 } 66 67 /* Kill _all_ buffers and pagecache , dirty or not.. */ 68 static void kill_bdev(struct block_device *bdev) 69 { 70 struct address_space *mapping = bdev->bd_inode->i_mapping; 71 72 if (mapping_empty(mapping)) 73 return; 74 75 invalidate_bh_lrus(); 76 truncate_inode_pages(mapping, 0); 77 } 78 79 /* Invalidate clean unused buffers and pagecache. */ 80 void invalidate_bdev(struct block_device *bdev) 81 { 82 struct address_space *mapping = bdev->bd_inode->i_mapping; 83 84 if (mapping->nrpages) { 85 invalidate_bh_lrus(); 86 lru_add_drain_all(); /* make sure all lru add caches are flushed */ 87 invalidate_mapping_pages(mapping, 0, -1); 88 } 89 } 90 EXPORT_SYMBOL(invalidate_bdev); 91 92 /* 93 * Drop all buffers & page cache for given bdev range. This function bails 94 * with error if bdev has other exclusive owner (such as filesystem). 95 */ 96 int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode, 97 loff_t lstart, loff_t lend) 98 { 99 /* 100 * If we don't hold exclusive handle for the device, upgrade to it 101 * while we discard the buffer cache to avoid discarding buffers 102 * under live filesystem. 103 */ 104 if (!(mode & BLK_OPEN_EXCL)) { 105 int err = bd_prepare_to_claim(bdev, truncate_bdev_range, NULL); 106 if (err) 107 goto invalidate; 108 } 109 110 truncate_inode_pages_range(bdev->bd_inode->i_mapping, lstart, lend); 111 if (!(mode & BLK_OPEN_EXCL)) 112 bd_abort_claiming(bdev, truncate_bdev_range); 113 return 0; 114 115 invalidate: 116 /* 117 * Someone else has handle exclusively open. Try invalidating instead. 118 * The 'end' argument is inclusive so the rounding is safe. 119 */ 120 return invalidate_inode_pages2_range(bdev->bd_inode->i_mapping, 121 lstart >> PAGE_SHIFT, 122 lend >> PAGE_SHIFT); 123 } 124 125 static void set_init_blocksize(struct block_device *bdev) 126 { 127 unsigned int bsize = bdev_logical_block_size(bdev); 128 loff_t size = i_size_read(bdev->bd_inode); 129 130 while (bsize < PAGE_SIZE) { 131 if (size & bsize) 132 break; 133 bsize <<= 1; 134 } 135 bdev->bd_inode->i_blkbits = blksize_bits(bsize); 136 } 137 138 int set_blocksize(struct block_device *bdev, int size) 139 { 140 /* Size must be a power of two, and between 512 and PAGE_SIZE */ 141 if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size)) 142 return -EINVAL; 143 144 /* Size cannot be smaller than the size supported by the device */ 145 if (size < bdev_logical_block_size(bdev)) 146 return -EINVAL; 147 148 /* Don't change the size if it is same as current */ 149 if (bdev->bd_inode->i_blkbits != blksize_bits(size)) { 150 sync_blockdev(bdev); 151 bdev->bd_inode->i_blkbits = blksize_bits(size); 152 kill_bdev(bdev); 153 } 154 return 0; 155 } 156 157 EXPORT_SYMBOL(set_blocksize); 158 159 int sb_set_blocksize(struct super_block *sb, int size) 160 { 161 if (set_blocksize(sb->s_bdev, size)) 162 return 0; 163 /* If we get here, we know size is power of two 164 * and it's value is between 512 and PAGE_SIZE */ 165 sb->s_blocksize = size; 166 sb->s_blocksize_bits = blksize_bits(size); 167 return sb->s_blocksize; 168 } 169 170 EXPORT_SYMBOL(sb_set_blocksize); 171 172 int sb_min_blocksize(struct super_block *sb, int size) 173 { 174 int minsize = bdev_logical_block_size(sb->s_bdev); 175 if (size < minsize) 176 size = minsize; 177 return sb_set_blocksize(sb, size); 178 } 179 180 EXPORT_SYMBOL(sb_min_blocksize); 181 182 int sync_blockdev_nowait(struct block_device *bdev) 183 { 184 if (!bdev) 185 return 0; 186 return filemap_flush(bdev->bd_inode->i_mapping); 187 } 188 EXPORT_SYMBOL_GPL(sync_blockdev_nowait); 189 190 /* 191 * Write out and wait upon all the dirty data associated with a block 192 * device via its mapping. Does not take the superblock lock. 193 */ 194 int sync_blockdev(struct block_device *bdev) 195 { 196 if (!bdev) 197 return 0; 198 return filemap_write_and_wait(bdev->bd_inode->i_mapping); 199 } 200 EXPORT_SYMBOL(sync_blockdev); 201 202 int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend) 203 { 204 return filemap_write_and_wait_range(bdev->bd_inode->i_mapping, 205 lstart, lend); 206 } 207 EXPORT_SYMBOL(sync_blockdev_range); 208 209 /** 210 * freeze_bdev - lock a filesystem and force it into a consistent state 211 * @bdev: blockdevice to lock 212 * 213 * If a superblock is found on this device, we take the s_umount semaphore 214 * on it to make sure nobody unmounts until the snapshot creation is done. 215 * The reference counter (bd_fsfreeze_count) guarantees that only the last 216 * unfreeze process can unfreeze the frozen filesystem actually when multiple 217 * freeze requests arrive simultaneously. It counts up in freeze_bdev() and 218 * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze 219 * actually. 220 */ 221 int freeze_bdev(struct block_device *bdev) 222 { 223 struct super_block *sb; 224 int error = 0; 225 226 mutex_lock(&bdev->bd_fsfreeze_mutex); 227 if (++bdev->bd_fsfreeze_count > 1) 228 goto done; 229 230 sb = get_active_super(bdev); 231 if (!sb) 232 goto sync; 233 if (sb->s_op->freeze_super) 234 error = sb->s_op->freeze_super(sb, FREEZE_HOLDER_USERSPACE); 235 else 236 error = freeze_super(sb, FREEZE_HOLDER_USERSPACE); 237 deactivate_super(sb); 238 239 if (error) { 240 bdev->bd_fsfreeze_count--; 241 goto done; 242 } 243 bdev->bd_fsfreeze_sb = sb; 244 245 sync: 246 sync_blockdev(bdev); 247 done: 248 mutex_unlock(&bdev->bd_fsfreeze_mutex); 249 return error; 250 } 251 EXPORT_SYMBOL(freeze_bdev); 252 253 /** 254 * thaw_bdev - unlock filesystem 255 * @bdev: blockdevice to unlock 256 * 257 * Unlocks the filesystem and marks it writeable again after freeze_bdev(). 258 */ 259 int thaw_bdev(struct block_device *bdev) 260 { 261 struct super_block *sb; 262 int error = -EINVAL; 263 264 mutex_lock(&bdev->bd_fsfreeze_mutex); 265 if (!bdev->bd_fsfreeze_count) 266 goto out; 267 268 error = 0; 269 if (--bdev->bd_fsfreeze_count > 0) 270 goto out; 271 272 sb = bdev->bd_fsfreeze_sb; 273 if (!sb) 274 goto out; 275 276 if (sb->s_op->thaw_super) 277 error = sb->s_op->thaw_super(sb, FREEZE_HOLDER_USERSPACE); 278 else 279 error = thaw_super(sb, FREEZE_HOLDER_USERSPACE); 280 if (error) 281 bdev->bd_fsfreeze_count++; 282 else 283 bdev->bd_fsfreeze_sb = NULL; 284 out: 285 mutex_unlock(&bdev->bd_fsfreeze_mutex); 286 return error; 287 } 288 EXPORT_SYMBOL(thaw_bdev); 289 290 /* 291 * pseudo-fs 292 */ 293 294 static __cacheline_aligned_in_smp DEFINE_MUTEX(bdev_lock); 295 static struct kmem_cache * bdev_cachep __read_mostly; 296 297 static struct inode *bdev_alloc_inode(struct super_block *sb) 298 { 299 struct bdev_inode *ei = alloc_inode_sb(sb, bdev_cachep, GFP_KERNEL); 300 301 if (!ei) 302 return NULL; 303 memset(&ei->bdev, 0, sizeof(ei->bdev)); 304 return &ei->vfs_inode; 305 } 306 307 static void bdev_free_inode(struct inode *inode) 308 { 309 struct block_device *bdev = I_BDEV(inode); 310 311 free_percpu(bdev->bd_stats); 312 kfree(bdev->bd_meta_info); 313 314 if (!bdev_is_partition(bdev)) { 315 if (bdev->bd_disk && bdev->bd_disk->bdi) 316 bdi_put(bdev->bd_disk->bdi); 317 kfree(bdev->bd_disk); 318 } 319 320 if (MAJOR(bdev->bd_dev) == BLOCK_EXT_MAJOR) 321 blk_free_ext_minor(MINOR(bdev->bd_dev)); 322 323 kmem_cache_free(bdev_cachep, BDEV_I(inode)); 324 } 325 326 static void init_once(void *data) 327 { 328 struct bdev_inode *ei = data; 329 330 inode_init_once(&ei->vfs_inode); 331 } 332 333 static void bdev_evict_inode(struct inode *inode) 334 { 335 truncate_inode_pages_final(&inode->i_data); 336 invalidate_inode_buffers(inode); /* is it needed here? */ 337 clear_inode(inode); 338 } 339 340 static const struct super_operations bdev_sops = { 341 .statfs = simple_statfs, 342 .alloc_inode = bdev_alloc_inode, 343 .free_inode = bdev_free_inode, 344 .drop_inode = generic_delete_inode, 345 .evict_inode = bdev_evict_inode, 346 }; 347 348 static int bd_init_fs_context(struct fs_context *fc) 349 { 350 struct pseudo_fs_context *ctx = init_pseudo(fc, BDEVFS_MAGIC); 351 if (!ctx) 352 return -ENOMEM; 353 fc->s_iflags |= SB_I_CGROUPWB; 354 ctx->ops = &bdev_sops; 355 return 0; 356 } 357 358 static struct file_system_type bd_type = { 359 .name = "bdev", 360 .init_fs_context = bd_init_fs_context, 361 .kill_sb = kill_anon_super, 362 }; 363 364 struct super_block *blockdev_superblock __read_mostly; 365 EXPORT_SYMBOL_GPL(blockdev_superblock); 366 367 void __init bdev_cache_init(void) 368 { 369 int err; 370 static struct vfsmount *bd_mnt; 371 372 bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode), 373 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| 374 SLAB_MEM_SPREAD|SLAB_ACCOUNT|SLAB_PANIC), 375 init_once); 376 err = register_filesystem(&bd_type); 377 if (err) 378 panic("Cannot register bdev pseudo-fs"); 379 bd_mnt = kern_mount(&bd_type); 380 if (IS_ERR(bd_mnt)) 381 panic("Cannot create bdev pseudo-fs"); 382 blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */ 383 } 384 385 struct block_device *bdev_alloc(struct gendisk *disk, u8 partno) 386 { 387 struct block_device *bdev; 388 struct inode *inode; 389 390 inode = new_inode(blockdev_superblock); 391 if (!inode) 392 return NULL; 393 inode->i_mode = S_IFBLK; 394 inode->i_rdev = 0; 395 inode->i_data.a_ops = &def_blk_aops; 396 mapping_set_gfp_mask(&inode->i_data, GFP_USER); 397 398 bdev = I_BDEV(inode); 399 mutex_init(&bdev->bd_fsfreeze_mutex); 400 spin_lock_init(&bdev->bd_size_lock); 401 mutex_init(&bdev->bd_holder_lock); 402 bdev->bd_partno = partno; 403 bdev->bd_inode = inode; 404 bdev->bd_queue = disk->queue; 405 if (partno) 406 bdev->bd_has_submit_bio = disk->part0->bd_has_submit_bio; 407 else 408 bdev->bd_has_submit_bio = false; 409 bdev->bd_stats = alloc_percpu(struct disk_stats); 410 if (!bdev->bd_stats) { 411 iput(inode); 412 return NULL; 413 } 414 bdev->bd_disk = disk; 415 return bdev; 416 } 417 418 void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors) 419 { 420 spin_lock(&bdev->bd_size_lock); 421 i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT); 422 bdev->bd_nr_sectors = sectors; 423 spin_unlock(&bdev->bd_size_lock); 424 } 425 426 void bdev_add(struct block_device *bdev, dev_t dev) 427 { 428 if (bdev_stable_writes(bdev)) 429 mapping_set_stable_writes(bdev->bd_inode->i_mapping); 430 bdev->bd_dev = dev; 431 bdev->bd_inode->i_rdev = dev; 432 bdev->bd_inode->i_ino = dev; 433 insert_inode_hash(bdev->bd_inode); 434 } 435 436 long nr_blockdev_pages(void) 437 { 438 struct inode *inode; 439 long ret = 0; 440 441 spin_lock(&blockdev_superblock->s_inode_list_lock); 442 list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) 443 ret += inode->i_mapping->nrpages; 444 spin_unlock(&blockdev_superblock->s_inode_list_lock); 445 446 return ret; 447 } 448 449 /** 450 * bd_may_claim - test whether a block device can be claimed 451 * @bdev: block device of interest 452 * @holder: holder trying to claim @bdev 453 * @hops: holder ops 454 * 455 * Test whether @bdev can be claimed by @holder. 456 * 457 * RETURNS: 458 * %true if @bdev can be claimed, %false otherwise. 459 */ 460 static bool bd_may_claim(struct block_device *bdev, void *holder, 461 const struct blk_holder_ops *hops) 462 { 463 struct block_device *whole = bdev_whole(bdev); 464 465 lockdep_assert_held(&bdev_lock); 466 467 if (bdev->bd_holder) { 468 /* 469 * The same holder can always re-claim. 470 */ 471 if (bdev->bd_holder == holder) { 472 if (WARN_ON_ONCE(bdev->bd_holder_ops != hops)) 473 return false; 474 return true; 475 } 476 return false; 477 } 478 479 /* 480 * If the whole devices holder is set to bd_may_claim, a partition on 481 * the device is claimed, but not the whole device. 482 */ 483 if (whole != bdev && 484 whole->bd_holder && whole->bd_holder != bd_may_claim) 485 return false; 486 return true; 487 } 488 489 /** 490 * bd_prepare_to_claim - claim a block device 491 * @bdev: block device of interest 492 * @holder: holder trying to claim @bdev 493 * @hops: holder ops. 494 * 495 * Claim @bdev. This function fails if @bdev is already claimed by another 496 * holder and waits if another claiming is in progress. return, the caller 497 * has ownership of bd_claiming and bd_holder[s]. 498 * 499 * RETURNS: 500 * 0 if @bdev can be claimed, -EBUSY otherwise. 501 */ 502 int bd_prepare_to_claim(struct block_device *bdev, void *holder, 503 const struct blk_holder_ops *hops) 504 { 505 struct block_device *whole = bdev_whole(bdev); 506 507 if (WARN_ON_ONCE(!holder)) 508 return -EINVAL; 509 retry: 510 mutex_lock(&bdev_lock); 511 /* if someone else claimed, fail */ 512 if (!bd_may_claim(bdev, holder, hops)) { 513 mutex_unlock(&bdev_lock); 514 return -EBUSY; 515 } 516 517 /* if claiming is already in progress, wait for it to finish */ 518 if (whole->bd_claiming) { 519 wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0); 520 DEFINE_WAIT(wait); 521 522 prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE); 523 mutex_unlock(&bdev_lock); 524 schedule(); 525 finish_wait(wq, &wait); 526 goto retry; 527 } 528 529 /* yay, all mine */ 530 whole->bd_claiming = holder; 531 mutex_unlock(&bdev_lock); 532 return 0; 533 } 534 EXPORT_SYMBOL_GPL(bd_prepare_to_claim); /* only for the loop driver */ 535 536 static void bd_clear_claiming(struct block_device *whole, void *holder) 537 { 538 lockdep_assert_held(&bdev_lock); 539 /* tell others that we're done */ 540 BUG_ON(whole->bd_claiming != holder); 541 whole->bd_claiming = NULL; 542 wake_up_bit(&whole->bd_claiming, 0); 543 } 544 545 /** 546 * bd_finish_claiming - finish claiming of a block device 547 * @bdev: block device of interest 548 * @holder: holder that has claimed @bdev 549 * @hops: block device holder operations 550 * 551 * Finish exclusive open of a block device. Mark the device as exlusively 552 * open by the holder and wake up all waiters for exclusive open to finish. 553 */ 554 static void bd_finish_claiming(struct block_device *bdev, void *holder, 555 const struct blk_holder_ops *hops) 556 { 557 struct block_device *whole = bdev_whole(bdev); 558 559 mutex_lock(&bdev_lock); 560 BUG_ON(!bd_may_claim(bdev, holder, hops)); 561 /* 562 * Note that for a whole device bd_holders will be incremented twice, 563 * and bd_holder will be set to bd_may_claim before being set to holder 564 */ 565 whole->bd_holders++; 566 whole->bd_holder = bd_may_claim; 567 bdev->bd_holders++; 568 mutex_lock(&bdev->bd_holder_lock); 569 bdev->bd_holder = holder; 570 bdev->bd_holder_ops = hops; 571 mutex_unlock(&bdev->bd_holder_lock); 572 bd_clear_claiming(whole, holder); 573 mutex_unlock(&bdev_lock); 574 } 575 576 /** 577 * bd_abort_claiming - abort claiming of a block device 578 * @bdev: block device of interest 579 * @holder: holder that has claimed @bdev 580 * 581 * Abort claiming of a block device when the exclusive open failed. This can be 582 * also used when exclusive open is not actually desired and we just needed 583 * to block other exclusive openers for a while. 584 */ 585 void bd_abort_claiming(struct block_device *bdev, void *holder) 586 { 587 mutex_lock(&bdev_lock); 588 bd_clear_claiming(bdev_whole(bdev), holder); 589 mutex_unlock(&bdev_lock); 590 } 591 EXPORT_SYMBOL(bd_abort_claiming); 592 593 static void bd_end_claim(struct block_device *bdev, void *holder) 594 { 595 struct block_device *whole = bdev_whole(bdev); 596 bool unblock = false; 597 598 /* 599 * Release a claim on the device. The holder fields are protected with 600 * bdev_lock. open_mutex is used to synchronize disk_holder unlinking. 601 */ 602 mutex_lock(&bdev_lock); 603 WARN_ON_ONCE(bdev->bd_holder != holder); 604 WARN_ON_ONCE(--bdev->bd_holders < 0); 605 WARN_ON_ONCE(--whole->bd_holders < 0); 606 if (!bdev->bd_holders) { 607 mutex_lock(&bdev->bd_holder_lock); 608 bdev->bd_holder = NULL; 609 bdev->bd_holder_ops = NULL; 610 mutex_unlock(&bdev->bd_holder_lock); 611 if (bdev->bd_write_holder) 612 unblock = true; 613 } 614 if (!whole->bd_holders) 615 whole->bd_holder = NULL; 616 mutex_unlock(&bdev_lock); 617 618 /* 619 * If this was the last claim, remove holder link and unblock evpoll if 620 * it was a write holder. 621 */ 622 if (unblock) { 623 disk_unblock_events(bdev->bd_disk); 624 bdev->bd_write_holder = false; 625 } 626 } 627 628 static void blkdev_flush_mapping(struct block_device *bdev) 629 { 630 WARN_ON_ONCE(bdev->bd_holders); 631 sync_blockdev(bdev); 632 kill_bdev(bdev); 633 bdev_write_inode(bdev); 634 } 635 636 static int blkdev_get_whole(struct block_device *bdev, blk_mode_t mode) 637 { 638 struct gendisk *disk = bdev->bd_disk; 639 int ret; 640 641 if (disk->fops->open) { 642 ret = disk->fops->open(disk, mode); 643 if (ret) { 644 /* avoid ghost partitions on a removed medium */ 645 if (ret == -ENOMEDIUM && 646 test_bit(GD_NEED_PART_SCAN, &disk->state)) 647 bdev_disk_changed(disk, true); 648 return ret; 649 } 650 } 651 652 if (!atomic_read(&bdev->bd_openers)) 653 set_init_blocksize(bdev); 654 if (test_bit(GD_NEED_PART_SCAN, &disk->state)) 655 bdev_disk_changed(disk, false); 656 atomic_inc(&bdev->bd_openers); 657 return 0; 658 } 659 660 static void blkdev_put_whole(struct block_device *bdev) 661 { 662 if (atomic_dec_and_test(&bdev->bd_openers)) 663 blkdev_flush_mapping(bdev); 664 if (bdev->bd_disk->fops->release) 665 bdev->bd_disk->fops->release(bdev->bd_disk); 666 } 667 668 static int blkdev_get_part(struct block_device *part, blk_mode_t mode) 669 { 670 struct gendisk *disk = part->bd_disk; 671 int ret; 672 673 ret = blkdev_get_whole(bdev_whole(part), mode); 674 if (ret) 675 return ret; 676 677 ret = -ENXIO; 678 if (!bdev_nr_sectors(part)) 679 goto out_blkdev_put; 680 681 if (!atomic_read(&part->bd_openers)) { 682 disk->open_partitions++; 683 set_init_blocksize(part); 684 } 685 atomic_inc(&part->bd_openers); 686 return 0; 687 688 out_blkdev_put: 689 blkdev_put_whole(bdev_whole(part)); 690 return ret; 691 } 692 693 static void blkdev_put_part(struct block_device *part) 694 { 695 struct block_device *whole = bdev_whole(part); 696 697 if (atomic_dec_and_test(&part->bd_openers)) { 698 blkdev_flush_mapping(part); 699 whole->bd_disk->open_partitions--; 700 } 701 blkdev_put_whole(whole); 702 } 703 704 struct block_device *blkdev_get_no_open(dev_t dev) 705 { 706 struct block_device *bdev; 707 struct inode *inode; 708 709 inode = ilookup(blockdev_superblock, dev); 710 if (!inode && IS_ENABLED(CONFIG_BLOCK_LEGACY_AUTOLOAD)) { 711 blk_request_module(dev); 712 inode = ilookup(blockdev_superblock, dev); 713 if (inode) 714 pr_warn_ratelimited( 715 "block device autoloading is deprecated and will be removed.\n"); 716 } 717 if (!inode) 718 return NULL; 719 720 /* switch from the inode reference to a device mode one: */ 721 bdev = &BDEV_I(inode)->bdev; 722 if (!kobject_get_unless_zero(&bdev->bd_device.kobj)) 723 bdev = NULL; 724 iput(inode); 725 return bdev; 726 } 727 728 void blkdev_put_no_open(struct block_device *bdev) 729 { 730 put_device(&bdev->bd_device); 731 } 732 733 /** 734 * blkdev_get_by_dev - open a block device by device number 735 * @dev: device number of block device to open 736 * @mode: open mode (BLK_OPEN_*) 737 * @holder: exclusive holder identifier 738 * @hops: holder operations 739 * 740 * Open the block device described by device number @dev. If @holder is not 741 * %NULL, the block device is opened with exclusive access. Exclusive opens may 742 * nest for the same @holder. 743 * 744 * Use this interface ONLY if you really do not have anything better - i.e. when 745 * you are behind a truly sucky interface and all you are given is a device 746 * number. Everything else should use blkdev_get_by_path(). 747 * 748 * CONTEXT: 749 * Might sleep. 750 * 751 * RETURNS: 752 * Reference to the block_device on success, ERR_PTR(-errno) on failure. 753 */ 754 struct block_device *blkdev_get_by_dev(dev_t dev, blk_mode_t mode, void *holder, 755 const struct blk_holder_ops *hops) 756 { 757 bool unblock_events = true; 758 struct block_device *bdev; 759 struct gendisk *disk; 760 int ret; 761 762 ret = devcgroup_check_permission(DEVCG_DEV_BLOCK, 763 MAJOR(dev), MINOR(dev), 764 ((mode & BLK_OPEN_READ) ? DEVCG_ACC_READ : 0) | 765 ((mode & BLK_OPEN_WRITE) ? DEVCG_ACC_WRITE : 0)); 766 if (ret) 767 return ERR_PTR(ret); 768 769 bdev = blkdev_get_no_open(dev); 770 if (!bdev) 771 return ERR_PTR(-ENXIO); 772 disk = bdev->bd_disk; 773 774 if (holder) { 775 mode |= BLK_OPEN_EXCL; 776 ret = bd_prepare_to_claim(bdev, holder, hops); 777 if (ret) 778 goto put_blkdev; 779 } else { 780 if (WARN_ON_ONCE(mode & BLK_OPEN_EXCL)) { 781 ret = -EIO; 782 goto put_blkdev; 783 } 784 } 785 786 disk_block_events(disk); 787 788 mutex_lock(&disk->open_mutex); 789 ret = -ENXIO; 790 if (!disk_live(disk)) 791 goto abort_claiming; 792 if (!try_module_get(disk->fops->owner)) 793 goto abort_claiming; 794 if (bdev_is_partition(bdev)) 795 ret = blkdev_get_part(bdev, mode); 796 else 797 ret = blkdev_get_whole(bdev, mode); 798 if (ret) 799 goto put_module; 800 if (holder) { 801 bd_finish_claiming(bdev, holder, hops); 802 803 /* 804 * Block event polling for write claims if requested. Any write 805 * holder makes the write_holder state stick until all are 806 * released. This is good enough and tracking individual 807 * writeable reference is too fragile given the way @mode is 808 * used in blkdev_get/put(). 809 */ 810 if ((mode & BLK_OPEN_WRITE) && !bdev->bd_write_holder && 811 (disk->event_flags & DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE)) { 812 bdev->bd_write_holder = true; 813 unblock_events = false; 814 } 815 } 816 mutex_unlock(&disk->open_mutex); 817 818 if (unblock_events) 819 disk_unblock_events(disk); 820 return bdev; 821 put_module: 822 module_put(disk->fops->owner); 823 abort_claiming: 824 if (holder) 825 bd_abort_claiming(bdev, holder); 826 mutex_unlock(&disk->open_mutex); 827 disk_unblock_events(disk); 828 put_blkdev: 829 blkdev_put_no_open(bdev); 830 return ERR_PTR(ret); 831 } 832 EXPORT_SYMBOL(blkdev_get_by_dev); 833 834 struct bdev_handle *bdev_open_by_dev(dev_t dev, blk_mode_t mode, void *holder, 835 const struct blk_holder_ops *hops) 836 { 837 struct bdev_handle *handle = kmalloc(sizeof(*handle), GFP_KERNEL); 838 struct block_device *bdev; 839 840 if (!handle) 841 return ERR_PTR(-ENOMEM); 842 bdev = blkdev_get_by_dev(dev, mode, holder, hops); 843 if (IS_ERR(bdev)) { 844 kfree(handle); 845 return ERR_CAST(bdev); 846 } 847 handle->bdev = bdev; 848 handle->holder = holder; 849 return handle; 850 } 851 EXPORT_SYMBOL(bdev_open_by_dev); 852 853 /** 854 * blkdev_get_by_path - open a block device by name 855 * @path: path to the block device to open 856 * @mode: open mode (BLK_OPEN_*) 857 * @holder: exclusive holder identifier 858 * @hops: holder operations 859 * 860 * Open the block device described by the device file at @path. If @holder is 861 * not %NULL, the block device is opened with exclusive access. Exclusive opens 862 * may nest for the same @holder. 863 * 864 * CONTEXT: 865 * Might sleep. 866 * 867 * RETURNS: 868 * Reference to the block_device on success, ERR_PTR(-errno) on failure. 869 */ 870 struct block_device *blkdev_get_by_path(const char *path, blk_mode_t mode, 871 void *holder, const struct blk_holder_ops *hops) 872 { 873 struct block_device *bdev; 874 dev_t dev; 875 int error; 876 877 error = lookup_bdev(path, &dev); 878 if (error) 879 return ERR_PTR(error); 880 881 bdev = blkdev_get_by_dev(dev, mode, holder, hops); 882 if (!IS_ERR(bdev) && (mode & BLK_OPEN_WRITE) && bdev_read_only(bdev)) { 883 blkdev_put(bdev, holder); 884 return ERR_PTR(-EACCES); 885 } 886 887 return bdev; 888 } 889 EXPORT_SYMBOL(blkdev_get_by_path); 890 891 struct bdev_handle *bdev_open_by_path(const char *path, blk_mode_t mode, 892 void *holder, const struct blk_holder_ops *hops) 893 { 894 struct bdev_handle *handle; 895 dev_t dev; 896 int error; 897 898 error = lookup_bdev(path, &dev); 899 if (error) 900 return ERR_PTR(error); 901 902 handle = bdev_open_by_dev(dev, mode, holder, hops); 903 if (!IS_ERR(handle) && (mode & BLK_OPEN_WRITE) && 904 bdev_read_only(handle->bdev)) { 905 bdev_release(handle); 906 return ERR_PTR(-EACCES); 907 } 908 909 return handle; 910 } 911 EXPORT_SYMBOL(bdev_open_by_path); 912 913 void blkdev_put(struct block_device *bdev, void *holder) 914 { 915 struct gendisk *disk = bdev->bd_disk; 916 917 /* 918 * Sync early if it looks like we're the last one. If someone else 919 * opens the block device between now and the decrement of bd_openers 920 * then we did a sync that we didn't need to, but that's not the end 921 * of the world and we want to avoid long (could be several minute) 922 * syncs while holding the mutex. 923 */ 924 if (atomic_read(&bdev->bd_openers) == 1) 925 sync_blockdev(bdev); 926 927 mutex_lock(&disk->open_mutex); 928 if (holder) 929 bd_end_claim(bdev, holder); 930 931 /* 932 * Trigger event checking and tell drivers to flush MEDIA_CHANGE 933 * event. This is to ensure detection of media removal commanded 934 * from userland - e.g. eject(1). 935 */ 936 disk_flush_events(disk, DISK_EVENT_MEDIA_CHANGE); 937 938 if (bdev_is_partition(bdev)) 939 blkdev_put_part(bdev); 940 else 941 blkdev_put_whole(bdev); 942 mutex_unlock(&disk->open_mutex); 943 944 module_put(disk->fops->owner); 945 blkdev_put_no_open(bdev); 946 } 947 EXPORT_SYMBOL(blkdev_put); 948 949 void bdev_release(struct bdev_handle *handle) 950 { 951 blkdev_put(handle->bdev, handle->holder); 952 kfree(handle); 953 } 954 EXPORT_SYMBOL(bdev_release); 955 956 /** 957 * lookup_bdev() - Look up a struct block_device by name. 958 * @pathname: Name of the block device in the filesystem. 959 * @dev: Pointer to the block device's dev_t, if found. 960 * 961 * Lookup the block device's dev_t at @pathname in the current 962 * namespace if possible and return it in @dev. 963 * 964 * Context: May sleep. 965 * Return: 0 if succeeded, negative errno otherwise. 966 */ 967 int lookup_bdev(const char *pathname, dev_t *dev) 968 { 969 struct inode *inode; 970 struct path path; 971 int error; 972 973 if (!pathname || !*pathname) 974 return -EINVAL; 975 976 error = kern_path(pathname, LOOKUP_FOLLOW, &path); 977 if (error) 978 return error; 979 980 inode = d_backing_inode(path.dentry); 981 error = -ENOTBLK; 982 if (!S_ISBLK(inode->i_mode)) 983 goto out_path_put; 984 error = -EACCES; 985 if (!may_open_dev(&path)) 986 goto out_path_put; 987 988 *dev = inode->i_rdev; 989 error = 0; 990 out_path_put: 991 path_put(&path); 992 return error; 993 } 994 EXPORT_SYMBOL(lookup_bdev); 995 996 /** 997 * bdev_mark_dead - mark a block device as dead 998 * @bdev: block device to operate on 999 * @surprise: indicate a surprise removal 1000 * 1001 * Tell the file system that this devices or media is dead. If @surprise is set 1002 * to %true the device or media is already gone, if not we are preparing for an 1003 * orderly removal. 1004 * 1005 * This calls into the file system, which then typicall syncs out all dirty data 1006 * and writes back inodes and then invalidates any cached data in the inodes on 1007 * the file system. In addition we also invalidate the block device mapping. 1008 */ 1009 void bdev_mark_dead(struct block_device *bdev, bool surprise) 1010 { 1011 mutex_lock(&bdev->bd_holder_lock); 1012 if (bdev->bd_holder_ops && bdev->bd_holder_ops->mark_dead) 1013 bdev->bd_holder_ops->mark_dead(bdev, surprise); 1014 else 1015 sync_blockdev(bdev); 1016 mutex_unlock(&bdev->bd_holder_lock); 1017 1018 invalidate_bdev(bdev); 1019 } 1020 #ifdef CONFIG_DASD_MODULE 1021 /* 1022 * Drivers should not use this directly, but the DASD driver has historically 1023 * had a shutdown to offline mode that doesn't actually remove the gendisk 1024 * that otherwise looks a lot like a safe device removal. 1025 */ 1026 EXPORT_SYMBOL_GPL(bdev_mark_dead); 1027 #endif 1028 1029 void sync_bdevs(bool wait) 1030 { 1031 struct inode *inode, *old_inode = NULL; 1032 1033 spin_lock(&blockdev_superblock->s_inode_list_lock); 1034 list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) { 1035 struct address_space *mapping = inode->i_mapping; 1036 struct block_device *bdev; 1037 1038 spin_lock(&inode->i_lock); 1039 if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) || 1040 mapping->nrpages == 0) { 1041 spin_unlock(&inode->i_lock); 1042 continue; 1043 } 1044 __iget(inode); 1045 spin_unlock(&inode->i_lock); 1046 spin_unlock(&blockdev_superblock->s_inode_list_lock); 1047 /* 1048 * We hold a reference to 'inode' so it couldn't have been 1049 * removed from s_inodes list while we dropped the 1050 * s_inode_list_lock We cannot iput the inode now as we can 1051 * be holding the last reference and we cannot iput it under 1052 * s_inode_list_lock. So we keep the reference and iput it 1053 * later. 1054 */ 1055 iput(old_inode); 1056 old_inode = inode; 1057 bdev = I_BDEV(inode); 1058 1059 mutex_lock(&bdev->bd_disk->open_mutex); 1060 if (!atomic_read(&bdev->bd_openers)) { 1061 ; /* skip */ 1062 } else if (wait) { 1063 /* 1064 * We keep the error status of individual mapping so 1065 * that applications can catch the writeback error using 1066 * fsync(2). See filemap_fdatawait_keep_errors() for 1067 * details. 1068 */ 1069 filemap_fdatawait_keep_errors(inode->i_mapping); 1070 } else { 1071 filemap_fdatawrite(inode->i_mapping); 1072 } 1073 mutex_unlock(&bdev->bd_disk->open_mutex); 1074 1075 spin_lock(&blockdev_superblock->s_inode_list_lock); 1076 } 1077 spin_unlock(&blockdev_superblock->s_inode_list_lock); 1078 iput(old_inode); 1079 } 1080 1081 /* 1082 * Handle STATX_DIOALIGN for block devices. 1083 * 1084 * Note that the inode passed to this is the inode of a block device node file, 1085 * not the block device's internal inode. Therefore it is *not* valid to use 1086 * I_BDEV() here; the block device has to be looked up by i_rdev instead. 1087 */ 1088 void bdev_statx_dioalign(struct inode *inode, struct kstat *stat) 1089 { 1090 struct block_device *bdev; 1091 1092 bdev = blkdev_get_no_open(inode->i_rdev); 1093 if (!bdev) 1094 return; 1095 1096 stat->dio_mem_align = bdev_dma_alignment(bdev) + 1; 1097 stat->dio_offset_align = bdev_logical_block_size(bdev); 1098 stat->result_mask |= STATX_DIOALIGN; 1099 1100 blkdev_put_no_open(bdev); 1101 } 1102