bdev.c (74e6464a987b2572771ac19163e961777fd0252e) bdev.c (0718afd47f70cf46877c39c25d06b786e1a3f36c)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright (C) 2016 - 2020 Christoph Hellwig
6 */
7
8#include <linux/init.h>

--- 88 unchanged lines hidden (view full) ---

97 loff_t lstart, loff_t lend)
98{
99 /*
100 * If we don't hold exclusive handle for the device, upgrade to it
101 * while we discard the buffer cache to avoid discarding buffers
102 * under live filesystem.
103 */
104 if (!(mode & FMODE_EXCL)) {
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright (C) 2016 - 2020 Christoph Hellwig
6 */
7
8#include <linux/init.h>

--- 88 unchanged lines hidden (view full) ---

97 loff_t lstart, loff_t lend)
98{
99 /*
100 * If we don't hold exclusive handle for the device, upgrade to it
101 * while we discard the buffer cache to avoid discarding buffers
102 * under live filesystem.
103 */
104 if (!(mode & FMODE_EXCL)) {
105 int err = bd_prepare_to_claim(bdev, truncate_bdev_range);
105 int err = bd_prepare_to_claim(bdev, truncate_bdev_range, NULL);
106 if (err)
107 goto invalidate;
108 }
109
110 truncate_inode_pages_range(bdev->bd_inode->i_mapping, lstart, lend);
111 if (!(mode & FMODE_EXCL))
112 bd_abort_claiming(bdev, truncate_bdev_range);
113 return 0;

--- 296 unchanged lines hidden (view full) ---

410 inode->i_mode = S_IFBLK;
411 inode->i_rdev = 0;
412 inode->i_data.a_ops = &def_blk_aops;
413 mapping_set_gfp_mask(&inode->i_data, GFP_USER);
414
415 bdev = I_BDEV(inode);
416 mutex_init(&bdev->bd_fsfreeze_mutex);
417 spin_lock_init(&bdev->bd_size_lock);
106 if (err)
107 goto invalidate;
108 }
109
110 truncate_inode_pages_range(bdev->bd_inode->i_mapping, lstart, lend);
111 if (!(mode & FMODE_EXCL))
112 bd_abort_claiming(bdev, truncate_bdev_range);
113 return 0;

--- 296 unchanged lines hidden (view full) ---

410 inode->i_mode = S_IFBLK;
411 inode->i_rdev = 0;
412 inode->i_data.a_ops = &def_blk_aops;
413 mapping_set_gfp_mask(&inode->i_data, GFP_USER);
414
415 bdev = I_BDEV(inode);
416 mutex_init(&bdev->bd_fsfreeze_mutex);
417 spin_lock_init(&bdev->bd_size_lock);
418 mutex_init(&bdev->bd_holder_lock);
418 bdev->bd_partno = partno;
419 bdev->bd_inode = inode;
420 bdev->bd_queue = disk->queue;
421 if (partno)
422 bdev->bd_has_submit_bio = disk->part0->bd_has_submit_bio;
423 else
424 bdev->bd_has_submit_bio = false;
425 bdev->bd_stats = alloc_percpu(struct disk_stats);

--- 33 unchanged lines hidden (view full) ---

459
460 return ret;
461}
462
463/**
464 * bd_may_claim - test whether a block device can be claimed
465 * @bdev: block device of interest
466 * @holder: holder trying to claim @bdev
419 bdev->bd_partno = partno;
420 bdev->bd_inode = inode;
421 bdev->bd_queue = disk->queue;
422 if (partno)
423 bdev->bd_has_submit_bio = disk->part0->bd_has_submit_bio;
424 else
425 bdev->bd_has_submit_bio = false;
426 bdev->bd_stats = alloc_percpu(struct disk_stats);

--- 33 unchanged lines hidden (view full) ---

460
461 return ret;
462}
463
464/**
465 * bd_may_claim - test whether a block device can be claimed
466 * @bdev: block device of interest
467 * @holder: holder trying to claim @bdev
468 * @hops: holder ops
467 *
468 * Test whether @bdev can be claimed by @holder.
469 *
470 * RETURNS:
471 * %true if @bdev can be claimed, %false otherwise.
472 */
469 *
470 * Test whether @bdev can be claimed by @holder.
471 *
472 * RETURNS:
473 * %true if @bdev can be claimed, %false otherwise.
474 */
473static bool bd_may_claim(struct block_device *bdev, void *holder)
475static bool bd_may_claim(struct block_device *bdev, void *holder,
476 const struct blk_holder_ops *hops)
474{
475 struct block_device *whole = bdev_whole(bdev);
476
477 lockdep_assert_held(&bdev_lock);
478
479 if (bdev->bd_holder) {
480 /*
481 * The same holder can always re-claim.
482 */
477{
478 struct block_device *whole = bdev_whole(bdev);
479
480 lockdep_assert_held(&bdev_lock);
481
482 if (bdev->bd_holder) {
483 /*
484 * The same holder can always re-claim.
485 */
483 if (bdev->bd_holder == holder)
486 if (bdev->bd_holder == holder) {
487 if (WARN_ON_ONCE(bdev->bd_holder_ops != hops))
488 return false;
484 return true;
489 return true;
490 }
485 return false;
486 }
487
488 /*
489 * If the whole devices holder is set to bd_may_claim, a partition on
490 * the device is claimed, but not the whole device.
491 */
492 if (whole != bdev &&
493 whole->bd_holder && whole->bd_holder != bd_may_claim)
494 return false;
495 return true;
496}
497
498/**
499 * bd_prepare_to_claim - claim a block device
500 * @bdev: block device of interest
501 * @holder: holder trying to claim @bdev
491 return false;
492 }
493
494 /*
495 * If the whole devices holder is set to bd_may_claim, a partition on
496 * the device is claimed, but not the whole device.
497 */
498 if (whole != bdev &&
499 whole->bd_holder && whole->bd_holder != bd_may_claim)
500 return false;
501 return true;
502}
503
504/**
505 * bd_prepare_to_claim - claim a block device
506 * @bdev: block device of interest
507 * @holder: holder trying to claim @bdev
508 * @hops: holder ops.
502 *
503 * Claim @bdev. This function fails if @bdev is already claimed by another
504 * holder and waits if another claiming is in progress. return, the caller
505 * has ownership of bd_claiming and bd_holder[s].
506 *
507 * RETURNS:
508 * 0 if @bdev can be claimed, -EBUSY otherwise.
509 */
509 *
510 * Claim @bdev. This function fails if @bdev is already claimed by another
511 * holder and waits if another claiming is in progress. return, the caller
512 * has ownership of bd_claiming and bd_holder[s].
513 *
514 * RETURNS:
515 * 0 if @bdev can be claimed, -EBUSY otherwise.
516 */
510int bd_prepare_to_claim(struct block_device *bdev, void *holder)
517int bd_prepare_to_claim(struct block_device *bdev, void *holder,
518 const struct blk_holder_ops *hops)
511{
512 struct block_device *whole = bdev_whole(bdev);
513
514 if (WARN_ON_ONCE(!holder))
515 return -EINVAL;
516retry:
517 mutex_lock(&bdev_lock);
518 /* if someone else claimed, fail */
519{
520 struct block_device *whole = bdev_whole(bdev);
521
522 if (WARN_ON_ONCE(!holder))
523 return -EINVAL;
524retry:
525 mutex_lock(&bdev_lock);
526 /* if someone else claimed, fail */
519 if (!bd_may_claim(bdev, holder)) {
527 if (!bd_may_claim(bdev, holder, hops)) {
520 mutex_unlock(&bdev_lock);
521 return -EBUSY;
522 }
523
524 /* if claiming is already in progress, wait for it to finish */
525 if (whole->bd_claiming) {
526 wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0);
527 DEFINE_WAIT(wait);

--- 24 unchanged lines hidden (view full) ---

552/**
553 * bd_finish_claiming - finish claiming of a block device
554 * @bdev: block device of interest
555 * @holder: holder that has claimed @bdev
556 *
557 * Finish exclusive open of a block device. Mark the device as exlusively
558 * open by the holder and wake up all waiters for exclusive open to finish.
559 */
528 mutex_unlock(&bdev_lock);
529 return -EBUSY;
530 }
531
532 /* if claiming is already in progress, wait for it to finish */
533 if (whole->bd_claiming) {
534 wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0);
535 DEFINE_WAIT(wait);

--- 24 unchanged lines hidden (view full) ---

560/**
561 * bd_finish_claiming - finish claiming of a block device
562 * @bdev: block device of interest
563 * @holder: holder that has claimed @bdev
564 *
565 * Finish exclusive open of a block device. Mark the device as exlusively
566 * open by the holder and wake up all waiters for exclusive open to finish.
567 */
560static void bd_finish_claiming(struct block_device *bdev, void *holder)
568static void bd_finish_claiming(struct block_device *bdev, void *holder,
569 const struct blk_holder_ops *hops)
561{
562 struct block_device *whole = bdev_whole(bdev);
563
564 mutex_lock(&bdev_lock);
570{
571 struct block_device *whole = bdev_whole(bdev);
572
573 mutex_lock(&bdev_lock);
565 BUG_ON(!bd_may_claim(bdev, holder));
574 BUG_ON(!bd_may_claim(bdev, holder, hops));
566 /*
567 * Note that for a whole device bd_holders will be incremented twice,
568 * and bd_holder will be set to bd_may_claim before being set to holder
569 */
570 whole->bd_holders++;
571 whole->bd_holder = bd_may_claim;
572 bdev->bd_holders++;
575 /*
576 * Note that for a whole device bd_holders will be incremented twice,
577 * and bd_holder will be set to bd_may_claim before being set to holder
578 */
579 whole->bd_holders++;
580 whole->bd_holder = bd_may_claim;
581 bdev->bd_holders++;
582 mutex_lock(&bdev->bd_holder_lock);
573 bdev->bd_holder = holder;
583 bdev->bd_holder = holder;
584 bdev->bd_holder_ops = hops;
585 mutex_unlock(&bdev->bd_holder_lock);
574 bd_clear_claiming(whole, holder);
575 mutex_unlock(&bdev_lock);
576}
577
578/**
579 * bd_abort_claiming - abort claiming of a block device
580 * @bdev: block device of interest
581 * @holder: holder that has claimed @bdev

--- 18 unchanged lines hidden (view full) ---

600 /*
601 * Release a claim on the device. The holder fields are protected with
602 * bdev_lock. open_mutex is used to synchronize disk_holder unlinking.
603 */
604 mutex_lock(&bdev_lock);
605 WARN_ON_ONCE(--bdev->bd_holders < 0);
606 WARN_ON_ONCE(--whole->bd_holders < 0);
607 if (!bdev->bd_holders) {
586 bd_clear_claiming(whole, holder);
587 mutex_unlock(&bdev_lock);
588}
589
590/**
591 * bd_abort_claiming - abort claiming of a block device
592 * @bdev: block device of interest
593 * @holder: holder that has claimed @bdev

--- 18 unchanged lines hidden (view full) ---

612 /*
613 * Release a claim on the device. The holder fields are protected with
614 * bdev_lock. open_mutex is used to synchronize disk_holder unlinking.
615 */
616 mutex_lock(&bdev_lock);
617 WARN_ON_ONCE(--bdev->bd_holders < 0);
618 WARN_ON_ONCE(--whole->bd_holders < 0);
619 if (!bdev->bd_holders) {
620 mutex_lock(&bdev->bd_holder_lock);
608 bdev->bd_holder = NULL;
621 bdev->bd_holder = NULL;
622 bdev->bd_holder_ops = NULL;
623 mutex_unlock(&bdev->bd_holder_lock);
609 if (bdev->bd_write_holder)
610 unblock = true;
611 }
612 if (!whole->bd_holders)
613 whole->bd_holder = NULL;
614 mutex_unlock(&bdev_lock);
615
616 /*

--- 113 unchanged lines hidden (view full) ---

730 put_device(&bdev->bd_device);
731}
732
733/**
734 * blkdev_get_by_dev - open a block device by device number
735 * @dev: device number of block device to open
736 * @mode: FMODE_* mask
737 * @holder: exclusive holder identifier
624 if (bdev->bd_write_holder)
625 unblock = true;
626 }
627 if (!whole->bd_holders)
628 whole->bd_holder = NULL;
629 mutex_unlock(&bdev_lock);
630
631 /*

--- 113 unchanged lines hidden (view full) ---

745 put_device(&bdev->bd_device);
746}
747
748/**
749 * blkdev_get_by_dev - open a block device by device number
750 * @dev: device number of block device to open
751 * @mode: FMODE_* mask
752 * @holder: exclusive holder identifier
753 * @hops: holder operations
738 *
739 * Open the block device described by device number @dev. If @mode includes
740 * %FMODE_EXCL, the block device is opened with exclusive access. Specifying
741 * %FMODE_EXCL with a %NULL @holder is invalid. Exclusive opens may nest for
742 * the same @holder.
743 *
744 * Use this interface ONLY if you really do not have anything better - i.e. when
745 * you are behind a truly sucky interface and all you are given is a device
746 * number. Everything else should use blkdev_get_by_path().
747 *
748 * CONTEXT:
749 * Might sleep.
750 *
751 * RETURNS:
752 * Reference to the block_device on success, ERR_PTR(-errno) on failure.
753 */
754 *
755 * Open the block device described by device number @dev. If @mode includes
756 * %FMODE_EXCL, the block device is opened with exclusive access. Specifying
757 * %FMODE_EXCL with a %NULL @holder is invalid. Exclusive opens may nest for
758 * the same @holder.
759 *
760 * Use this interface ONLY if you really do not have anything better - i.e. when
761 * you are behind a truly sucky interface and all you are given is a device
762 * number. Everything else should use blkdev_get_by_path().
763 *
764 * CONTEXT:
765 * Might sleep.
766 *
767 * RETURNS:
768 * Reference to the block_device on success, ERR_PTR(-errno) on failure.
769 */
754struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
770struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder,
771 const struct blk_holder_ops *hops)
755{
756 bool unblock_events = true;
757 struct block_device *bdev;
758 struct gendisk *disk;
759 int ret;
760
761 ret = devcgroup_check_permission(DEVCG_DEV_BLOCK,
762 MAJOR(dev), MINOR(dev),
763 ((mode & FMODE_READ) ? DEVCG_ACC_READ : 0) |
764 ((mode & FMODE_WRITE) ? DEVCG_ACC_WRITE : 0));
765 if (ret)
766 return ERR_PTR(ret);
767
768 bdev = blkdev_get_no_open(dev);
769 if (!bdev)
770 return ERR_PTR(-ENXIO);
771 disk = bdev->bd_disk;
772
773 if (mode & FMODE_EXCL) {
772{
773 bool unblock_events = true;
774 struct block_device *bdev;
775 struct gendisk *disk;
776 int ret;
777
778 ret = devcgroup_check_permission(DEVCG_DEV_BLOCK,
779 MAJOR(dev), MINOR(dev),
780 ((mode & FMODE_READ) ? DEVCG_ACC_READ : 0) |
781 ((mode & FMODE_WRITE) ? DEVCG_ACC_WRITE : 0));
782 if (ret)
783 return ERR_PTR(ret);
784
785 bdev = blkdev_get_no_open(dev);
786 if (!bdev)
787 return ERR_PTR(-ENXIO);
788 disk = bdev->bd_disk;
789
790 if (mode & FMODE_EXCL) {
774 ret = bd_prepare_to_claim(bdev, holder);
791 ret = bd_prepare_to_claim(bdev, holder, hops);
775 if (ret)
776 goto put_blkdev;
777 }
778
779 disk_block_events(disk);
780
781 mutex_lock(&disk->open_mutex);
782 ret = -ENXIO;
783 if (!disk_live(disk))
784 goto abort_claiming;
785 if (!try_module_get(disk->fops->owner))
786 goto abort_claiming;
787 if (bdev_is_partition(bdev))
788 ret = blkdev_get_part(bdev, mode);
789 else
790 ret = blkdev_get_whole(bdev, mode);
791 if (ret)
792 goto put_module;
793 if (mode & FMODE_EXCL) {
792 if (ret)
793 goto put_blkdev;
794 }
795
796 disk_block_events(disk);
797
798 mutex_lock(&disk->open_mutex);
799 ret = -ENXIO;
800 if (!disk_live(disk))
801 goto abort_claiming;
802 if (!try_module_get(disk->fops->owner))
803 goto abort_claiming;
804 if (bdev_is_partition(bdev))
805 ret = blkdev_get_part(bdev, mode);
806 else
807 ret = blkdev_get_whole(bdev, mode);
808 if (ret)
809 goto put_module;
810 if (mode & FMODE_EXCL) {
794 bd_finish_claiming(bdev, holder);
811 bd_finish_claiming(bdev, holder, hops);
795
796 /*
797 * Block event polling for write claims if requested. Any write
798 * holder makes the write_holder state stick until all are
799 * released. This is good enough and tracking individual
800 * writeable reference is too fragile given the way @mode is
801 * used in blkdev_get/put().
802 */

--- 34 unchanged lines hidden (view full) ---

837 *
838 * CONTEXT:
839 * Might sleep.
840 *
841 * RETURNS:
842 * Reference to the block_device on success, ERR_PTR(-errno) on failure.
843 */
844struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
812
813 /*
814 * Block event polling for write claims if requested. Any write
815 * holder makes the write_holder state stick until all are
816 * released. This is good enough and tracking individual
817 * writeable reference is too fragile given the way @mode is
818 * used in blkdev_get/put().
819 */

--- 34 unchanged lines hidden (view full) ---

854 *
855 * CONTEXT:
856 * Might sleep.
857 *
858 * RETURNS:
859 * Reference to the block_device on success, ERR_PTR(-errno) on failure.
860 */
861struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
845 void *holder)
862 void *holder, const struct blk_holder_ops *hops)
846{
847 struct block_device *bdev;
848 dev_t dev;
849 int error;
850
851 error = lookup_bdev(path, &dev);
852 if (error)
853 return ERR_PTR(error);
854
863{
864 struct block_device *bdev;
865 dev_t dev;
866 int error;
867
868 error = lookup_bdev(path, &dev);
869 if (error)
870 return ERR_PTR(error);
871
855 bdev = blkdev_get_by_dev(dev, mode, holder);
872 bdev = blkdev_get_by_dev(dev, mode, holder, hops);
856 if (!IS_ERR(bdev) && (mode & FMODE_WRITE) && bdev_read_only(bdev)) {
857 blkdev_put(bdev, mode);
858 return ERR_PTR(-EACCES);
859 }
860
861 return bdev;
862}
863EXPORT_SYMBOL(blkdev_get_by_path);

--- 171 unchanged lines hidden ---
873 if (!IS_ERR(bdev) && (mode & FMODE_WRITE) && bdev_read_only(bdev)) {
874 blkdev_put(bdev, mode);
875 return ERR_PTR(-EACCES);
876 }
877
878 return bdev;
879}
880EXPORT_SYMBOL(blkdev_get_by_path);

--- 171 unchanged lines hidden ---