1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 1991, 1992 Linus Torvalds 4 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE 5 * Copyright (C) 2016 - 2020 Christoph Hellwig 6 */ 7 8 #include <linux/init.h> 9 #include <linux/mm.h> 10 #include <linux/slab.h> 11 #include <linux/kmod.h> 12 #include <linux/major.h> 13 #include <linux/device_cgroup.h> 14 #include <linux/blkdev.h> 15 #include <linux/blk-integrity.h> 16 #include <linux/backing-dev.h> 17 #include <linux/module.h> 18 #include <linux/blkpg.h> 19 #include <linux/magic.h> 20 #include <linux/buffer_head.h> 21 #include <linux/swap.h> 22 #include <linux/writeback.h> 23 #include <linux/mount.h> 24 #include <linux/pseudo_fs.h> 25 #include <linux/uio.h> 26 #include <linux/namei.h> 27 #include <linux/part_stat.h> 28 #include <linux/uaccess.h> 29 #include <linux/stat.h> 30 #include "../fs/internal.h" 31 #include "blk.h" 32 33 struct bdev_inode { 34 struct block_device bdev; 35 struct inode vfs_inode; 36 }; 37 38 static inline struct bdev_inode *BDEV_I(struct inode *inode) 39 { 40 return container_of(inode, struct bdev_inode, vfs_inode); 41 } 42 43 struct block_device *I_BDEV(struct inode *inode) 44 { 45 return &BDEV_I(inode)->bdev; 46 } 47 EXPORT_SYMBOL(I_BDEV); 48 49 static void bdev_write_inode(struct block_device *bdev) 50 { 51 struct inode *inode = bdev->bd_inode; 52 int ret; 53 54 spin_lock(&inode->i_lock); 55 while (inode->i_state & I_DIRTY) { 56 spin_unlock(&inode->i_lock); 57 ret = write_inode_now(inode, true); 58 if (ret) 59 pr_warn_ratelimited( 60 "VFS: Dirty inode writeback failed for block device %pg (err=%d).\n", 61 bdev, ret); 62 spin_lock(&inode->i_lock); 63 } 64 spin_unlock(&inode->i_lock); 65 } 66 67 /* Kill _all_ buffers and pagecache , dirty or not.. */ 68 static void kill_bdev(struct block_device *bdev) 69 { 70 struct address_space *mapping = bdev->bd_inode->i_mapping; 71 72 if (mapping_empty(mapping)) 73 return; 74 75 invalidate_bh_lrus(); 76 truncate_inode_pages(mapping, 0); 77 } 78 79 /* Invalidate clean unused buffers and pagecache. */ 80 void invalidate_bdev(struct block_device *bdev) 81 { 82 struct address_space *mapping = bdev->bd_inode->i_mapping; 83 84 if (mapping->nrpages) { 85 invalidate_bh_lrus(); 86 lru_add_drain_all(); /* make sure all lru add caches are flushed */ 87 invalidate_mapping_pages(mapping, 0, -1); 88 } 89 } 90 EXPORT_SYMBOL(invalidate_bdev); 91 92 /* 93 * Drop all buffers & page cache for given bdev range. This function bails 94 * with error if bdev has other exclusive owner (such as filesystem). 95 */ 96 int truncate_bdev_range(struct block_device *bdev, fmode_t mode, 97 loff_t lstart, loff_t lend) 98 { 99 /* 100 * If we don't hold exclusive handle for the device, upgrade to it 101 * while we discard the buffer cache to avoid discarding buffers 102 * under live filesystem. 103 */ 104 if (!(mode & FMODE_EXCL)) { 105 int err = bd_prepare_to_claim(bdev, truncate_bdev_range, NULL); 106 if (err) 107 goto invalidate; 108 } 109 110 truncate_inode_pages_range(bdev->bd_inode->i_mapping, lstart, lend); 111 if (!(mode & FMODE_EXCL)) 112 bd_abort_claiming(bdev, truncate_bdev_range); 113 return 0; 114 115 invalidate: 116 /* 117 * Someone else has handle exclusively open. Try invalidating instead. 118 * The 'end' argument is inclusive so the rounding is safe. 119 */ 120 return invalidate_inode_pages2_range(bdev->bd_inode->i_mapping, 121 lstart >> PAGE_SHIFT, 122 lend >> PAGE_SHIFT); 123 } 124 125 static void set_init_blocksize(struct block_device *bdev) 126 { 127 unsigned int bsize = bdev_logical_block_size(bdev); 128 loff_t size = i_size_read(bdev->bd_inode); 129 130 while (bsize < PAGE_SIZE) { 131 if (size & bsize) 132 break; 133 bsize <<= 1; 134 } 135 bdev->bd_inode->i_blkbits = blksize_bits(bsize); 136 } 137 138 int set_blocksize(struct block_device *bdev, int size) 139 { 140 /* Size must be a power of two, and between 512 and PAGE_SIZE */ 141 if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size)) 142 return -EINVAL; 143 144 /* Size cannot be smaller than the size supported by the device */ 145 if (size < bdev_logical_block_size(bdev)) 146 return -EINVAL; 147 148 /* Don't change the size if it is same as current */ 149 if (bdev->bd_inode->i_blkbits != blksize_bits(size)) { 150 sync_blockdev(bdev); 151 bdev->bd_inode->i_blkbits = blksize_bits(size); 152 kill_bdev(bdev); 153 } 154 return 0; 155 } 156 157 EXPORT_SYMBOL(set_blocksize); 158 159 int sb_set_blocksize(struct super_block *sb, int size) 160 { 161 if (set_blocksize(sb->s_bdev, size)) 162 return 0; 163 /* If we get here, we know size is power of two 164 * and it's value is between 512 and PAGE_SIZE */ 165 sb->s_blocksize = size; 166 sb->s_blocksize_bits = blksize_bits(size); 167 return sb->s_blocksize; 168 } 169 170 EXPORT_SYMBOL(sb_set_blocksize); 171 172 int sb_min_blocksize(struct super_block *sb, int size) 173 { 174 int minsize = bdev_logical_block_size(sb->s_bdev); 175 if (size < minsize) 176 size = minsize; 177 return sb_set_blocksize(sb, size); 178 } 179 180 EXPORT_SYMBOL(sb_min_blocksize); 181 182 int sync_blockdev_nowait(struct block_device *bdev) 183 { 184 if (!bdev) 185 return 0; 186 return filemap_flush(bdev->bd_inode->i_mapping); 187 } 188 EXPORT_SYMBOL_GPL(sync_blockdev_nowait); 189 190 /* 191 * Write out and wait upon all the dirty data associated with a block 192 * device via its mapping. Does not take the superblock lock. 193 */ 194 int sync_blockdev(struct block_device *bdev) 195 { 196 if (!bdev) 197 return 0; 198 return filemap_write_and_wait(bdev->bd_inode->i_mapping); 199 } 200 EXPORT_SYMBOL(sync_blockdev); 201 202 int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend) 203 { 204 return filemap_write_and_wait_range(bdev->bd_inode->i_mapping, 205 lstart, lend); 206 } 207 EXPORT_SYMBOL(sync_blockdev_range); 208 209 /* 210 * Write out and wait upon all dirty data associated with this 211 * device. Filesystem data as well as the underlying block 212 * device. Takes the superblock lock. 213 */ 214 int fsync_bdev(struct block_device *bdev) 215 { 216 struct super_block *sb = get_super(bdev); 217 if (sb) { 218 int res = sync_filesystem(sb); 219 drop_super(sb); 220 return res; 221 } 222 return sync_blockdev(bdev); 223 } 224 EXPORT_SYMBOL(fsync_bdev); 225 226 /** 227 * freeze_bdev - lock a filesystem and force it into a consistent state 228 * @bdev: blockdevice to lock 229 * 230 * If a superblock is found on this device, we take the s_umount semaphore 231 * on it to make sure nobody unmounts until the snapshot creation is done. 232 * The reference counter (bd_fsfreeze_count) guarantees that only the last 233 * unfreeze process can unfreeze the frozen filesystem actually when multiple 234 * freeze requests arrive simultaneously. It counts up in freeze_bdev() and 235 * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze 236 * actually. 237 */ 238 int freeze_bdev(struct block_device *bdev) 239 { 240 struct super_block *sb; 241 int error = 0; 242 243 mutex_lock(&bdev->bd_fsfreeze_mutex); 244 if (++bdev->bd_fsfreeze_count > 1) 245 goto done; 246 247 sb = get_active_super(bdev); 248 if (!sb) 249 goto sync; 250 if (sb->s_op->freeze_super) 251 error = sb->s_op->freeze_super(sb); 252 else 253 error = freeze_super(sb); 254 deactivate_super(sb); 255 256 if (error) { 257 bdev->bd_fsfreeze_count--; 258 goto done; 259 } 260 bdev->bd_fsfreeze_sb = sb; 261 262 sync: 263 sync_blockdev(bdev); 264 done: 265 mutex_unlock(&bdev->bd_fsfreeze_mutex); 266 return error; 267 } 268 EXPORT_SYMBOL(freeze_bdev); 269 270 /** 271 * thaw_bdev - unlock filesystem 272 * @bdev: blockdevice to unlock 273 * 274 * Unlocks the filesystem and marks it writeable again after freeze_bdev(). 275 */ 276 int thaw_bdev(struct block_device *bdev) 277 { 278 struct super_block *sb; 279 int error = -EINVAL; 280 281 mutex_lock(&bdev->bd_fsfreeze_mutex); 282 if (!bdev->bd_fsfreeze_count) 283 goto out; 284 285 error = 0; 286 if (--bdev->bd_fsfreeze_count > 0) 287 goto out; 288 289 sb = bdev->bd_fsfreeze_sb; 290 if (!sb) 291 goto out; 292 293 if (sb->s_op->thaw_super) 294 error = sb->s_op->thaw_super(sb); 295 else 296 error = thaw_super(sb); 297 if (error) 298 bdev->bd_fsfreeze_count++; 299 else 300 bdev->bd_fsfreeze_sb = NULL; 301 out: 302 mutex_unlock(&bdev->bd_fsfreeze_mutex); 303 return error; 304 } 305 EXPORT_SYMBOL(thaw_bdev); 306 307 /* 308 * pseudo-fs 309 */ 310 311 static __cacheline_aligned_in_smp DEFINE_MUTEX(bdev_lock); 312 static struct kmem_cache * bdev_cachep __read_mostly; 313 314 static struct inode *bdev_alloc_inode(struct super_block *sb) 315 { 316 struct bdev_inode *ei = alloc_inode_sb(sb, bdev_cachep, GFP_KERNEL); 317 318 if (!ei) 319 return NULL; 320 memset(&ei->bdev, 0, sizeof(ei->bdev)); 321 return &ei->vfs_inode; 322 } 323 324 static void bdev_free_inode(struct inode *inode) 325 { 326 struct block_device *bdev = I_BDEV(inode); 327 328 free_percpu(bdev->bd_stats); 329 kfree(bdev->bd_meta_info); 330 331 if (!bdev_is_partition(bdev)) { 332 if (bdev->bd_disk && bdev->bd_disk->bdi) 333 bdi_put(bdev->bd_disk->bdi); 334 kfree(bdev->bd_disk); 335 } 336 337 if (MAJOR(bdev->bd_dev) == BLOCK_EXT_MAJOR) 338 blk_free_ext_minor(MINOR(bdev->bd_dev)); 339 340 kmem_cache_free(bdev_cachep, BDEV_I(inode)); 341 } 342 343 static void init_once(void *data) 344 { 345 struct bdev_inode *ei = data; 346 347 inode_init_once(&ei->vfs_inode); 348 } 349 350 static void bdev_evict_inode(struct inode *inode) 351 { 352 truncate_inode_pages_final(&inode->i_data); 353 invalidate_inode_buffers(inode); /* is it needed here? */ 354 clear_inode(inode); 355 } 356 357 static const struct super_operations bdev_sops = { 358 .statfs = simple_statfs, 359 .alloc_inode = bdev_alloc_inode, 360 .free_inode = bdev_free_inode, 361 .drop_inode = generic_delete_inode, 362 .evict_inode = bdev_evict_inode, 363 }; 364 365 static int bd_init_fs_context(struct fs_context *fc) 366 { 367 struct pseudo_fs_context *ctx = init_pseudo(fc, BDEVFS_MAGIC); 368 if (!ctx) 369 return -ENOMEM; 370 fc->s_iflags |= SB_I_CGROUPWB; 371 ctx->ops = &bdev_sops; 372 return 0; 373 } 374 375 static struct file_system_type bd_type = { 376 .name = "bdev", 377 .init_fs_context = bd_init_fs_context, 378 .kill_sb = kill_anon_super, 379 }; 380 381 struct super_block *blockdev_superblock __read_mostly; 382 EXPORT_SYMBOL_GPL(blockdev_superblock); 383 384 void __init bdev_cache_init(void) 385 { 386 int err; 387 static struct vfsmount *bd_mnt; 388 389 bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode), 390 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| 391 SLAB_MEM_SPREAD|SLAB_ACCOUNT|SLAB_PANIC), 392 init_once); 393 err = register_filesystem(&bd_type); 394 if (err) 395 panic("Cannot register bdev pseudo-fs"); 396 bd_mnt = kern_mount(&bd_type); 397 if (IS_ERR(bd_mnt)) 398 panic("Cannot create bdev pseudo-fs"); 399 blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */ 400 } 401 402 struct block_device *bdev_alloc(struct gendisk *disk, u8 partno) 403 { 404 struct block_device *bdev; 405 struct inode *inode; 406 407 inode = new_inode(blockdev_superblock); 408 if (!inode) 409 return NULL; 410 inode->i_mode = S_IFBLK; 411 inode->i_rdev = 0; 412 inode->i_data.a_ops = &def_blk_aops; 413 mapping_set_gfp_mask(&inode->i_data, GFP_USER); 414 415 bdev = I_BDEV(inode); 416 mutex_init(&bdev->bd_fsfreeze_mutex); 417 spin_lock_init(&bdev->bd_size_lock); 418 mutex_init(&bdev->bd_holder_lock); 419 bdev->bd_partno = partno; 420 bdev->bd_inode = inode; 421 bdev->bd_queue = disk->queue; 422 if (partno) 423 bdev->bd_has_submit_bio = disk->part0->bd_has_submit_bio; 424 else 425 bdev->bd_has_submit_bio = false; 426 bdev->bd_stats = alloc_percpu(struct disk_stats); 427 if (!bdev->bd_stats) { 428 iput(inode); 429 return NULL; 430 } 431 bdev->bd_disk = disk; 432 return bdev; 433 } 434 435 void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors) 436 { 437 spin_lock(&bdev->bd_size_lock); 438 i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT); 439 bdev->bd_nr_sectors = sectors; 440 spin_unlock(&bdev->bd_size_lock); 441 } 442 443 void bdev_add(struct block_device *bdev, dev_t dev) 444 { 445 bdev->bd_dev = dev; 446 bdev->bd_inode->i_rdev = dev; 447 bdev->bd_inode->i_ino = dev; 448 insert_inode_hash(bdev->bd_inode); 449 } 450 451 long nr_blockdev_pages(void) 452 { 453 struct inode *inode; 454 long ret = 0; 455 456 spin_lock(&blockdev_superblock->s_inode_list_lock); 457 list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) 458 ret += inode->i_mapping->nrpages; 459 spin_unlock(&blockdev_superblock->s_inode_list_lock); 460 461 return ret; 462 } 463 464 /** 465 * bd_may_claim - test whether a block device can be claimed 466 * @bdev: block device of interest 467 * @holder: holder trying to claim @bdev 468 * @hops: holder ops 469 * 470 * Test whether @bdev can be claimed by @holder. 471 * 472 * RETURNS: 473 * %true if @bdev can be claimed, %false otherwise. 474 */ 475 static bool bd_may_claim(struct block_device *bdev, void *holder, 476 const struct blk_holder_ops *hops) 477 { 478 struct block_device *whole = bdev_whole(bdev); 479 480 lockdep_assert_held(&bdev_lock); 481 482 if (bdev->bd_holder) { 483 /* 484 * The same holder can always re-claim. 485 */ 486 if (bdev->bd_holder == holder) { 487 if (WARN_ON_ONCE(bdev->bd_holder_ops != hops)) 488 return false; 489 return true; 490 } 491 return false; 492 } 493 494 /* 495 * If the whole devices holder is set to bd_may_claim, a partition on 496 * the device is claimed, but not the whole device. 497 */ 498 if (whole != bdev && 499 whole->bd_holder && whole->bd_holder != bd_may_claim) 500 return false; 501 return true; 502 } 503 504 /** 505 * bd_prepare_to_claim - claim a block device 506 * @bdev: block device of interest 507 * @holder: holder trying to claim @bdev 508 * @hops: holder ops. 509 * 510 * Claim @bdev. This function fails if @bdev is already claimed by another 511 * holder and waits if another claiming is in progress. return, the caller 512 * has ownership of bd_claiming and bd_holder[s]. 513 * 514 * RETURNS: 515 * 0 if @bdev can be claimed, -EBUSY otherwise. 516 */ 517 int bd_prepare_to_claim(struct block_device *bdev, void *holder, 518 const struct blk_holder_ops *hops) 519 { 520 struct block_device *whole = bdev_whole(bdev); 521 522 if (WARN_ON_ONCE(!holder)) 523 return -EINVAL; 524 retry: 525 mutex_lock(&bdev_lock); 526 /* if someone else claimed, fail */ 527 if (!bd_may_claim(bdev, holder, hops)) { 528 mutex_unlock(&bdev_lock); 529 return -EBUSY; 530 } 531 532 /* if claiming is already in progress, wait for it to finish */ 533 if (whole->bd_claiming) { 534 wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0); 535 DEFINE_WAIT(wait); 536 537 prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE); 538 mutex_unlock(&bdev_lock); 539 schedule(); 540 finish_wait(wq, &wait); 541 goto retry; 542 } 543 544 /* yay, all mine */ 545 whole->bd_claiming = holder; 546 mutex_unlock(&bdev_lock); 547 return 0; 548 } 549 EXPORT_SYMBOL_GPL(bd_prepare_to_claim); /* only for the loop driver */ 550 551 static void bd_clear_claiming(struct block_device *whole, void *holder) 552 { 553 lockdep_assert_held(&bdev_lock); 554 /* tell others that we're done */ 555 BUG_ON(whole->bd_claiming != holder); 556 whole->bd_claiming = NULL; 557 wake_up_bit(&whole->bd_claiming, 0); 558 } 559 560 /** 561 * bd_finish_claiming - finish claiming of a block device 562 * @bdev: block device of interest 563 * @holder: holder that has claimed @bdev 564 * 565 * Finish exclusive open of a block device. Mark the device as exlusively 566 * open by the holder and wake up all waiters for exclusive open to finish. 567 */ 568 static void bd_finish_claiming(struct block_device *bdev, void *holder, 569 const struct blk_holder_ops *hops) 570 { 571 struct block_device *whole = bdev_whole(bdev); 572 573 mutex_lock(&bdev_lock); 574 BUG_ON(!bd_may_claim(bdev, holder, hops)); 575 /* 576 * Note that for a whole device bd_holders will be incremented twice, 577 * and bd_holder will be set to bd_may_claim before being set to holder 578 */ 579 whole->bd_holders++; 580 whole->bd_holder = bd_may_claim; 581 bdev->bd_holders++; 582 mutex_lock(&bdev->bd_holder_lock); 583 bdev->bd_holder = holder; 584 bdev->bd_holder_ops = hops; 585 mutex_unlock(&bdev->bd_holder_lock); 586 bd_clear_claiming(whole, holder); 587 mutex_unlock(&bdev_lock); 588 } 589 590 /** 591 * bd_abort_claiming - abort claiming of a block device 592 * @bdev: block device of interest 593 * @holder: holder that has claimed @bdev 594 * 595 * Abort claiming of a block device when the exclusive open failed. This can be 596 * also used when exclusive open is not actually desired and we just needed 597 * to block other exclusive openers for a while. 598 */ 599 void bd_abort_claiming(struct block_device *bdev, void *holder) 600 { 601 mutex_lock(&bdev_lock); 602 bd_clear_claiming(bdev_whole(bdev), holder); 603 mutex_unlock(&bdev_lock); 604 } 605 EXPORT_SYMBOL(bd_abort_claiming); 606 607 static void bd_end_claim(struct block_device *bdev) 608 { 609 struct block_device *whole = bdev_whole(bdev); 610 bool unblock = false; 611 612 /* 613 * Release a claim on the device. The holder fields are protected with 614 * bdev_lock. open_mutex is used to synchronize disk_holder unlinking. 615 */ 616 mutex_lock(&bdev_lock); 617 WARN_ON_ONCE(--bdev->bd_holders < 0); 618 WARN_ON_ONCE(--whole->bd_holders < 0); 619 if (!bdev->bd_holders) { 620 mutex_lock(&bdev->bd_holder_lock); 621 bdev->bd_holder = NULL; 622 bdev->bd_holder_ops = NULL; 623 mutex_unlock(&bdev->bd_holder_lock); 624 if (bdev->bd_write_holder) 625 unblock = true; 626 } 627 if (!whole->bd_holders) 628 whole->bd_holder = NULL; 629 mutex_unlock(&bdev_lock); 630 631 /* 632 * If this was the last claim, remove holder link and unblock evpoll if 633 * it was a write holder. 634 */ 635 if (unblock) { 636 disk_unblock_events(bdev->bd_disk); 637 bdev->bd_write_holder = false; 638 } 639 } 640 641 static void blkdev_flush_mapping(struct block_device *bdev) 642 { 643 WARN_ON_ONCE(bdev->bd_holders); 644 sync_blockdev(bdev); 645 kill_bdev(bdev); 646 bdev_write_inode(bdev); 647 } 648 649 static int blkdev_get_whole(struct block_device *bdev, fmode_t mode) 650 { 651 struct gendisk *disk = bdev->bd_disk; 652 int ret; 653 654 if (disk->fops->open) { 655 ret = disk->fops->open(bdev, mode); 656 if (ret) { 657 /* avoid ghost partitions on a removed medium */ 658 if (ret == -ENOMEDIUM && 659 test_bit(GD_NEED_PART_SCAN, &disk->state)) 660 bdev_disk_changed(disk, true); 661 return ret; 662 } 663 } 664 665 if (!atomic_read(&bdev->bd_openers)) 666 set_init_blocksize(bdev); 667 if (test_bit(GD_NEED_PART_SCAN, &disk->state)) 668 bdev_disk_changed(disk, false); 669 atomic_inc(&bdev->bd_openers); 670 return 0; 671 } 672 673 static void blkdev_put_whole(struct block_device *bdev, fmode_t mode) 674 { 675 if (atomic_dec_and_test(&bdev->bd_openers)) 676 blkdev_flush_mapping(bdev); 677 if (bdev->bd_disk->fops->release) 678 bdev->bd_disk->fops->release(bdev->bd_disk, mode); 679 } 680 681 static int blkdev_get_part(struct block_device *part, fmode_t mode) 682 { 683 struct gendisk *disk = part->bd_disk; 684 int ret; 685 686 ret = blkdev_get_whole(bdev_whole(part), mode); 687 if (ret) 688 return ret; 689 690 ret = -ENXIO; 691 if (!bdev_nr_sectors(part)) 692 goto out_blkdev_put; 693 694 if (!atomic_read(&part->bd_openers)) { 695 disk->open_partitions++; 696 set_init_blocksize(part); 697 } 698 atomic_inc(&part->bd_openers); 699 return 0; 700 701 out_blkdev_put: 702 blkdev_put_whole(bdev_whole(part), mode); 703 return ret; 704 } 705 706 static void blkdev_put_part(struct block_device *part, fmode_t mode) 707 { 708 struct block_device *whole = bdev_whole(part); 709 710 if (atomic_dec_and_test(&part->bd_openers)) { 711 blkdev_flush_mapping(part); 712 whole->bd_disk->open_partitions--; 713 } 714 blkdev_put_whole(whole, mode); 715 } 716 717 struct block_device *blkdev_get_no_open(dev_t dev) 718 { 719 struct block_device *bdev; 720 struct inode *inode; 721 722 inode = ilookup(blockdev_superblock, dev); 723 if (!inode && IS_ENABLED(CONFIG_BLOCK_LEGACY_AUTOLOAD)) { 724 blk_request_module(dev); 725 inode = ilookup(blockdev_superblock, dev); 726 if (inode) 727 pr_warn_ratelimited( 728 "block device autoloading is deprecated and will be removed.\n"); 729 } 730 if (!inode) 731 return NULL; 732 733 /* switch from the inode reference to a device mode one: */ 734 bdev = &BDEV_I(inode)->bdev; 735 if (!kobject_get_unless_zero(&bdev->bd_device.kobj)) 736 bdev = NULL; 737 iput(inode); 738 return bdev; 739 } 740 741 void blkdev_put_no_open(struct block_device *bdev) 742 { 743 put_device(&bdev->bd_device); 744 } 745 746 /** 747 * blkdev_get_by_dev - open a block device by device number 748 * @dev: device number of block device to open 749 * @mode: FMODE_* mask 750 * @holder: exclusive holder identifier 751 * @hops: holder operations 752 * 753 * Open the block device described by device number @dev. If @mode includes 754 * %FMODE_EXCL, the block device is opened with exclusive access. Specifying 755 * %FMODE_EXCL with a %NULL @holder is invalid. Exclusive opens may nest for 756 * the same @holder. 757 * 758 * Use this interface ONLY if you really do not have anything better - i.e. when 759 * you are behind a truly sucky interface and all you are given is a device 760 * number. Everything else should use blkdev_get_by_path(). 761 * 762 * CONTEXT: 763 * Might sleep. 764 * 765 * RETURNS: 766 * Reference to the block_device on success, ERR_PTR(-errno) on failure. 767 */ 768 struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder, 769 const struct blk_holder_ops *hops) 770 { 771 bool unblock_events = true; 772 struct block_device *bdev; 773 struct gendisk *disk; 774 int ret; 775 776 ret = devcgroup_check_permission(DEVCG_DEV_BLOCK, 777 MAJOR(dev), MINOR(dev), 778 ((mode & FMODE_READ) ? DEVCG_ACC_READ : 0) | 779 ((mode & FMODE_WRITE) ? DEVCG_ACC_WRITE : 0)); 780 if (ret) 781 return ERR_PTR(ret); 782 783 bdev = blkdev_get_no_open(dev); 784 if (!bdev) 785 return ERR_PTR(-ENXIO); 786 disk = bdev->bd_disk; 787 788 if (mode & FMODE_EXCL) { 789 ret = bd_prepare_to_claim(bdev, holder, hops); 790 if (ret) 791 goto put_blkdev; 792 } 793 794 disk_block_events(disk); 795 796 mutex_lock(&disk->open_mutex); 797 ret = -ENXIO; 798 if (!disk_live(disk)) 799 goto abort_claiming; 800 if (!try_module_get(disk->fops->owner)) 801 goto abort_claiming; 802 if (bdev_is_partition(bdev)) 803 ret = blkdev_get_part(bdev, mode); 804 else 805 ret = blkdev_get_whole(bdev, mode); 806 if (ret) 807 goto put_module; 808 if (mode & FMODE_EXCL) { 809 bd_finish_claiming(bdev, holder, hops); 810 811 /* 812 * Block event polling for write claims if requested. Any write 813 * holder makes the write_holder state stick until all are 814 * released. This is good enough and tracking individual 815 * writeable reference is too fragile given the way @mode is 816 * used in blkdev_get/put(). 817 */ 818 if ((mode & FMODE_WRITE) && !bdev->bd_write_holder && 819 (disk->event_flags & DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE)) { 820 bdev->bd_write_holder = true; 821 unblock_events = false; 822 } 823 } 824 mutex_unlock(&disk->open_mutex); 825 826 if (unblock_events) 827 disk_unblock_events(disk); 828 return bdev; 829 put_module: 830 module_put(disk->fops->owner); 831 abort_claiming: 832 if (mode & FMODE_EXCL) 833 bd_abort_claiming(bdev, holder); 834 mutex_unlock(&disk->open_mutex); 835 disk_unblock_events(disk); 836 put_blkdev: 837 blkdev_put_no_open(bdev); 838 return ERR_PTR(ret); 839 } 840 EXPORT_SYMBOL(blkdev_get_by_dev); 841 842 /** 843 * blkdev_get_by_path - open a block device by name 844 * @path: path to the block device to open 845 * @mode: FMODE_* mask 846 * @holder: exclusive holder identifier 847 * 848 * Open the block device described by the device file at @path. If @mode 849 * includes %FMODE_EXCL, the block device is opened with exclusive access. 850 * Specifying %FMODE_EXCL with a %NULL @holder is invalid. Exclusive opens may 851 * nest for the same @holder. 852 * 853 * CONTEXT: 854 * Might sleep. 855 * 856 * RETURNS: 857 * Reference to the block_device on success, ERR_PTR(-errno) on failure. 858 */ 859 struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, 860 void *holder, const struct blk_holder_ops *hops) 861 { 862 struct block_device *bdev; 863 dev_t dev; 864 int error; 865 866 error = lookup_bdev(path, &dev); 867 if (error) 868 return ERR_PTR(error); 869 870 bdev = blkdev_get_by_dev(dev, mode, holder, hops); 871 if (!IS_ERR(bdev) && (mode & FMODE_WRITE) && bdev_read_only(bdev)) { 872 blkdev_put(bdev, mode); 873 return ERR_PTR(-EACCES); 874 } 875 876 return bdev; 877 } 878 EXPORT_SYMBOL(blkdev_get_by_path); 879 880 void blkdev_put(struct block_device *bdev, fmode_t mode) 881 { 882 struct gendisk *disk = bdev->bd_disk; 883 884 /* 885 * Sync early if it looks like we're the last one. If someone else 886 * opens the block device between now and the decrement of bd_openers 887 * then we did a sync that we didn't need to, but that's not the end 888 * of the world and we want to avoid long (could be several minute) 889 * syncs while holding the mutex. 890 */ 891 if (atomic_read(&bdev->bd_openers) == 1) 892 sync_blockdev(bdev); 893 894 mutex_lock(&disk->open_mutex); 895 if (mode & FMODE_EXCL) 896 bd_end_claim(bdev); 897 898 /* 899 * Trigger event checking and tell drivers to flush MEDIA_CHANGE 900 * event. This is to ensure detection of media removal commanded 901 * from userland - e.g. eject(1). 902 */ 903 disk_flush_events(disk, DISK_EVENT_MEDIA_CHANGE); 904 905 if (bdev_is_partition(bdev)) 906 blkdev_put_part(bdev, mode); 907 else 908 blkdev_put_whole(bdev, mode); 909 mutex_unlock(&disk->open_mutex); 910 911 module_put(disk->fops->owner); 912 blkdev_put_no_open(bdev); 913 } 914 EXPORT_SYMBOL(blkdev_put); 915 916 /** 917 * lookup_bdev() - Look up a struct block_device by name. 918 * @pathname: Name of the block device in the filesystem. 919 * @dev: Pointer to the block device's dev_t, if found. 920 * 921 * Lookup the block device's dev_t at @pathname in the current 922 * namespace if possible and return it in @dev. 923 * 924 * Context: May sleep. 925 * Return: 0 if succeeded, negative errno otherwise. 926 */ 927 int lookup_bdev(const char *pathname, dev_t *dev) 928 { 929 struct inode *inode; 930 struct path path; 931 int error; 932 933 if (!pathname || !*pathname) 934 return -EINVAL; 935 936 error = kern_path(pathname, LOOKUP_FOLLOW, &path); 937 if (error) 938 return error; 939 940 inode = d_backing_inode(path.dentry); 941 error = -ENOTBLK; 942 if (!S_ISBLK(inode->i_mode)) 943 goto out_path_put; 944 error = -EACCES; 945 if (!may_open_dev(&path)) 946 goto out_path_put; 947 948 *dev = inode->i_rdev; 949 error = 0; 950 out_path_put: 951 path_put(&path); 952 return error; 953 } 954 EXPORT_SYMBOL(lookup_bdev); 955 956 int __invalidate_device(struct block_device *bdev, bool kill_dirty) 957 { 958 struct super_block *sb = get_super(bdev); 959 int res = 0; 960 961 if (sb) { 962 /* 963 * no need to lock the super, get_super holds the 964 * read mutex so the filesystem cannot go away 965 * under us (->put_super runs with the write lock 966 * hold). 967 */ 968 shrink_dcache_sb(sb); 969 res = invalidate_inodes(sb, kill_dirty); 970 drop_super(sb); 971 } 972 invalidate_bdev(bdev); 973 return res; 974 } 975 EXPORT_SYMBOL(__invalidate_device); 976 977 void sync_bdevs(bool wait) 978 { 979 struct inode *inode, *old_inode = NULL; 980 981 spin_lock(&blockdev_superblock->s_inode_list_lock); 982 list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) { 983 struct address_space *mapping = inode->i_mapping; 984 struct block_device *bdev; 985 986 spin_lock(&inode->i_lock); 987 if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) || 988 mapping->nrpages == 0) { 989 spin_unlock(&inode->i_lock); 990 continue; 991 } 992 __iget(inode); 993 spin_unlock(&inode->i_lock); 994 spin_unlock(&blockdev_superblock->s_inode_list_lock); 995 /* 996 * We hold a reference to 'inode' so it couldn't have been 997 * removed from s_inodes list while we dropped the 998 * s_inode_list_lock We cannot iput the inode now as we can 999 * be holding the last reference and we cannot iput it under 1000 * s_inode_list_lock. So we keep the reference and iput it 1001 * later. 1002 */ 1003 iput(old_inode); 1004 old_inode = inode; 1005 bdev = I_BDEV(inode); 1006 1007 mutex_lock(&bdev->bd_disk->open_mutex); 1008 if (!atomic_read(&bdev->bd_openers)) { 1009 ; /* skip */ 1010 } else if (wait) { 1011 /* 1012 * We keep the error status of individual mapping so 1013 * that applications can catch the writeback error using 1014 * fsync(2). See filemap_fdatawait_keep_errors() for 1015 * details. 1016 */ 1017 filemap_fdatawait_keep_errors(inode->i_mapping); 1018 } else { 1019 filemap_fdatawrite(inode->i_mapping); 1020 } 1021 mutex_unlock(&bdev->bd_disk->open_mutex); 1022 1023 spin_lock(&blockdev_superblock->s_inode_list_lock); 1024 } 1025 spin_unlock(&blockdev_superblock->s_inode_list_lock); 1026 iput(old_inode); 1027 } 1028 1029 /* 1030 * Handle STATX_DIOALIGN for block devices. 1031 * 1032 * Note that the inode passed to this is the inode of a block device node file, 1033 * not the block device's internal inode. Therefore it is *not* valid to use 1034 * I_BDEV() here; the block device has to be looked up by i_rdev instead. 1035 */ 1036 void bdev_statx_dioalign(struct inode *inode, struct kstat *stat) 1037 { 1038 struct block_device *bdev; 1039 1040 bdev = blkdev_get_no_open(inode->i_rdev); 1041 if (!bdev) 1042 return; 1043 1044 stat->dio_mem_align = bdev_dma_alignment(bdev) + 1; 1045 stat->dio_offset_align = bdev_logical_block_size(bdev); 1046 stat->result_mask |= STATX_DIOALIGN; 1047 1048 blkdev_put_no_open(bdev); 1049 } 1050