raid10.c (51e9ac77035a3dfcb6fc0a88a0d80b6f99b5edb1) raid10.c (7b6d91daee5cac6402186ff224c3af39d79f4a0e)
1/*
2 * raid10.c : Multiple Devices driver for Linux
3 *
4 * Copyright (C) 2000-2004 Neil Brown
5 *
6 * RAID-10 support for md.
7 *
8 * Base on code in raid1.c. See raid1.c for futher copyright information.

--- 785 unchanged lines hidden (view full) ---

794{
795 conf_t *conf = mddev->private;
796 mirror_info_t *mirror;
797 r10bio_t *r10_bio;
798 struct bio *read_bio;
799 int i;
800 int chunk_sects = conf->chunk_mask + 1;
801 const int rw = bio_data_dir(bio);
1/*
2 * raid10.c : Multiple Devices driver for Linux
3 *
4 * Copyright (C) 2000-2004 Neil Brown
5 *
6 * RAID-10 support for md.
7 *
8 * Base on code in raid1.c. See raid1.c for futher copyright information.

--- 785 unchanged lines hidden (view full) ---

794{
795 conf_t *conf = mddev->private;
796 mirror_info_t *mirror;
797 r10bio_t *r10_bio;
798 struct bio *read_bio;
799 int i;
800 int chunk_sects = conf->chunk_mask + 1;
801 const int rw = bio_data_dir(bio);
802 const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
802 const bool do_sync = (bio->bi_rw & REQ_SYNC);
803 struct bio_list bl;
804 unsigned long flags;
805 mdk_rdev_t *blocked_rdev;
806
803 struct bio_list bl;
804 unsigned long flags;
805 mdk_rdev_t *blocked_rdev;
806
807 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
807 if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
808 md_barrier_request(mddev, bio);
809 return 0;
810 }
811
812 /* If this request crosses a chunk boundary, we need to
813 * split it. This will only happen for 1 PAGE (or less) requests.
814 */
815 if (unlikely( (bio->bi_sector & conf->chunk_mask) + (bio->bi_size >> 9)

--- 4 unchanged lines hidden (view full) ---

820 if (bio->bi_vcnt != 1 ||
821 bio->bi_idx != 0)
822 goto bad_map;
823 /* This is a one page bio that upper layers
824 * refuse to split for us, so we need to split it.
825 */
826 bp = bio_split(bio,
827 chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
808 md_barrier_request(mddev, bio);
809 return 0;
810 }
811
812 /* If this request crosses a chunk boundary, we need to
813 * split it. This will only happen for 1 PAGE (or less) requests.
814 */
815 if (unlikely( (bio->bi_sector & conf->chunk_mask) + (bio->bi_size >> 9)

--- 4 unchanged lines hidden (view full) ---

820 if (bio->bi_vcnt != 1 ||
821 bio->bi_idx != 0)
822 goto bad_map;
823 /* This is a one page bio that upper layers
824 * refuse to split for us, so we need to split it.
825 */
826 bp = bio_split(bio,
827 chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
828
829 /* Each of these 'make_request' calls will call 'wait_barrier'.
830 * If the first succeeds but the second blocks due to the resync
831 * thread raising the barrier, we will deadlock because the
832 * IO to the underlying device will be queued in generic_make_request
833 * and will never complete, so will never reduce nr_pending.
834 * So increment nr_waiting here so no new raise_barriers will
835 * succeed, and so the second wait_barrier cannot block.
836 */
837 spin_lock_irq(&conf->resync_lock);
838 conf->nr_waiting++;
839 spin_unlock_irq(&conf->resync_lock);
840
841 if (make_request(mddev, &bp->bio1))
842 generic_make_request(&bp->bio1);
843 if (make_request(mddev, &bp->bio2))
844 generic_make_request(&bp->bio2);
845
828 if (make_request(mddev, &bp->bio1))
829 generic_make_request(&bp->bio1);
830 if (make_request(mddev, &bp->bio2))
831 generic_make_request(&bp->bio2);
832
846 spin_lock_irq(&conf->resync_lock);
847 conf->nr_waiting--;
848 wake_up(&conf->wait_barrier);
849 spin_unlock_irq(&conf->resync_lock);
850
851 bio_pair_release(bp);
852 return 0;
853 bad_map:
854 printk("md/raid10:%s: make_request bug: can't convert block across chunks"
855 " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
856 (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
857
858 bio_io_error(bio);

--- 33 unchanged lines hidden (view full) ---

892 read_bio = bio_clone(bio, GFP_NOIO);
893
894 r10_bio->devs[slot].bio = read_bio;
895
896 read_bio->bi_sector = r10_bio->devs[slot].addr +
897 mirror->rdev->data_offset;
898 read_bio->bi_bdev = mirror->rdev->bdev;
899 read_bio->bi_end_io = raid10_end_read_request;
833 bio_pair_release(bp);
834 return 0;
835 bad_map:
836 printk("md/raid10:%s: make_request bug: can't convert block across chunks"
837 " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
838 (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
839
840 bio_io_error(bio);

--- 33 unchanged lines hidden (view full) ---

874 read_bio = bio_clone(bio, GFP_NOIO);
875
876 r10_bio->devs[slot].bio = read_bio;
877
878 read_bio->bi_sector = r10_bio->devs[slot].addr +
879 mirror->rdev->data_offset;
880 read_bio->bi_bdev = mirror->rdev->bdev;
881 read_bio->bi_end_io = raid10_end_read_request;
900 read_bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
882 read_bio->bi_rw = READ | do_sync;
901 read_bio->bi_private = r10_bio;
902
903 generic_make_request(read_bio);
904 return 0;
905 }
906
907 /*
908 * WRITE:

--- 51 unchanged lines hidden (view full) ---

960
961 mbio = bio_clone(bio, GFP_NOIO);
962 r10_bio->devs[i].bio = mbio;
963
964 mbio->bi_sector = r10_bio->devs[i].addr+
965 conf->mirrors[d].rdev->data_offset;
966 mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
967 mbio->bi_end_io = raid10_end_write_request;
883 read_bio->bi_private = r10_bio;
884
885 generic_make_request(read_bio);
886 return 0;
887 }
888
889 /*
890 * WRITE:

--- 51 unchanged lines hidden (view full) ---

942
943 mbio = bio_clone(bio, GFP_NOIO);
944 r10_bio->devs[i].bio = mbio;
945
946 mbio->bi_sector = r10_bio->devs[i].addr+
947 conf->mirrors[d].rdev->data_offset;
948 mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
949 mbio->bi_end_io = raid10_end_write_request;
968 mbio->bi_rw = WRITE | (do_sync << BIO_RW_SYNCIO);
950 mbio->bi_rw = WRITE | do_sync;
969 mbio->bi_private = r10_bio;
970
971 atomic_inc(&r10_bio->remaining);
972 bio_list_add(&bl, mbio);
973 }
974
975 if (unlikely(!atomic_read(&r10_bio->remaining))) {
976 /* the array is dead */

--- 752 unchanged lines hidden (view full) ---

1729 printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O"
1730 " read error for block %llu\n",
1731 mdname(mddev),
1732 bdevname(bio->bi_bdev,b),
1733 (unsigned long long)r10_bio->sector);
1734 raid_end_bio_io(r10_bio);
1735 bio_put(bio);
1736 } else {
951 mbio->bi_private = r10_bio;
952
953 atomic_inc(&r10_bio->remaining);
954 bio_list_add(&bl, mbio);
955 }
956
957 if (unlikely(!atomic_read(&r10_bio->remaining))) {
958 /* the array is dead */

--- 752 unchanged lines hidden (view full) ---

1711 printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O"
1712 " read error for block %llu\n",
1713 mdname(mddev),
1714 bdevname(bio->bi_bdev,b),
1715 (unsigned long long)r10_bio->sector);
1716 raid_end_bio_io(r10_bio);
1717 bio_put(bio);
1718 } else {
1737 const bool do_sync = bio_rw_flagged(r10_bio->master_bio, BIO_RW_SYNCIO);
1719 const bool do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
1738 bio_put(bio);
1739 rdev = conf->mirrors[mirror].rdev;
1740 if (printk_ratelimit())
1741 printk(KERN_ERR "md/raid10:%s: %s: redirecting sector %llu to"
1742 " another mirror\n",
1743 mdname(mddev),
1744 bdevname(rdev->bdev,b),
1745 (unsigned long long)r10_bio->sector);
1746 bio = bio_clone(r10_bio->master_bio, GFP_NOIO);
1747 r10_bio->devs[r10_bio->read_slot].bio = bio;
1748 bio->bi_sector = r10_bio->devs[r10_bio->read_slot].addr
1749 + rdev->data_offset;
1750 bio->bi_bdev = rdev->bdev;
1720 bio_put(bio);
1721 rdev = conf->mirrors[mirror].rdev;
1722 if (printk_ratelimit())
1723 printk(KERN_ERR "md/raid10:%s: %s: redirecting sector %llu to"
1724 " another mirror\n",
1725 mdname(mddev),
1726 bdevname(rdev->bdev,b),
1727 (unsigned long long)r10_bio->sector);
1728 bio = bio_clone(r10_bio->master_bio, GFP_NOIO);
1729 r10_bio->devs[r10_bio->read_slot].bio = bio;
1730 bio->bi_sector = r10_bio->devs[r10_bio->read_slot].addr
1731 + rdev->data_offset;
1732 bio->bi_bdev = rdev->bdev;
1751 bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
1733 bio->bi_rw = READ | do_sync;
1752 bio->bi_private = r10_bio;
1753 bio->bi_end_io = raid10_end_read_request;
1754 unplug = 1;
1755 generic_make_request(bio);
1756 }
1757 }
1758 cond_resched();
1759 }

--- 766 unchanged lines hidden ---
1734 bio->bi_private = r10_bio;
1735 bio->bi_end_io = raid10_end_read_request;
1736 unplug = 1;
1737 generic_make_request(bio);
1738 }
1739 }
1740 cond_resched();
1741 }

--- 766 unchanged lines hidden ---