raid5.c (8c57a5e7b2820f349c95b8c8393fec1e0f4070d2) raid5.c (70246286e94c335b5bea0cbc68a17a96dd620281)
1/*
2 * raid5.c : Multiple Devices driver for Linux
3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
4 * Copyright (C) 1999, 2000 Ingo Molnar
5 * Copyright (C) 2002, 2003 H. Peter Anvin
6 *
7 * RAID-4/5/6 management functions.
8 * Thanks to Penguin Computing for making the RAID-6 development possible

--- 792 unchanged lines hidden (view full) ---

801 goto unlock_out;
802
803 if (sh->batch_head)
804 goto unlock_out;
805
806 dd_idx = 0;
807 while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
808 dd_idx++;
1/*
2 * raid5.c : Multiple Devices driver for Linux
3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
4 * Copyright (C) 1999, 2000 Ingo Molnar
5 * Copyright (C) 2002, 2003 H. Peter Anvin
6 *
7 * RAID-4/5/6 management functions.
8 * Thanks to Penguin Computing for making the RAID-6 development possible

--- 792 unchanged lines hidden (view full) ---

801 goto unlock_out;
802
803 if (sh->batch_head)
804 goto unlock_out;
805
806 dd_idx = 0;
807 while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
808 dd_idx++;
809 if (head->dev[dd_idx].towrite->bi_rw != sh->dev[dd_idx].towrite->bi_rw)
809 if (head->dev[dd_idx].towrite->bi_rw != sh->dev[dd_idx].towrite->bi_rw ||
810 bio_op(head->dev[dd_idx].towrite) != bio_op(sh->dev[dd_idx].towrite))
810 goto unlock_out;
811
812 if (head->batch_head) {
813 spin_lock(&head->batch_head->batch_lock);
814 /* This batch list is already running */
815 if (!stripe_can_batch(head)) {
816 spin_unlock(&head->batch_head->batch_lock);
817 goto unlock_out;

--- 68 unchanged lines hidden (view full) ---

886 int i, disks = sh->disks;
887 struct stripe_head *head_sh = sh;
888
889 might_sleep();
890
891 if (r5l_write_stripe(conf->log, sh) == 0)
892 return;
893 for (i = disks; i--; ) {
811 goto unlock_out;
812
813 if (head->batch_head) {
814 spin_lock(&head->batch_head->batch_lock);
815 /* This batch list is already running */
816 if (!stripe_can_batch(head)) {
817 spin_unlock(&head->batch_head->batch_lock);
818 goto unlock_out;

--- 68 unchanged lines hidden (view full) ---

887 int i, disks = sh->disks;
888 struct stripe_head *head_sh = sh;
889
890 might_sleep();
891
892 if (r5l_write_stripe(conf->log, sh) == 0)
893 return;
894 for (i = disks; i--; ) {
894 int rw;
895 int op, op_flags = 0;
895 int replace_only = 0;
896 struct bio *bi, *rbi;
897 struct md_rdev *rdev, *rrdev = NULL;
898
899 sh = head_sh;
900 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
896 int replace_only = 0;
897 struct bio *bi, *rbi;
898 struct md_rdev *rdev, *rrdev = NULL;
899
900 sh = head_sh;
901 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
902 op = REQ_OP_WRITE;
901 if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
903 if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
902 rw = WRITE_FUA;
903 else
904 rw = WRITE;
904 op_flags = WRITE_FUA;
905 if (test_bit(R5_Discard, &sh->dev[i].flags))
905 if (test_bit(R5_Discard, &sh->dev[i].flags))
906 rw |= REQ_DISCARD;
906 op = REQ_OP_DISCARD;
907 } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
907 } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
908 rw = READ;
908 op = REQ_OP_READ;
909 else if (test_and_clear_bit(R5_WantReplace,
910 &sh->dev[i].flags)) {
909 else if (test_and_clear_bit(R5_WantReplace,
910 &sh->dev[i].flags)) {
911 rw = WRITE;
911 op = REQ_OP_WRITE;
912 replace_only = 1;
913 } else
914 continue;
915 if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags))
912 replace_only = 1;
913 } else
914 continue;
915 if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags))
916 rw |= REQ_SYNC;
916 op_flags |= REQ_SYNC;
917
918again:
919 bi = &sh->dev[i].req;
920 rbi = &sh->dev[i].rreq; /* For writing to replacement */
921
922 rcu_read_lock();
923 rrdev = rcu_dereference(conf->disks[i].replacement);
924 smp_mb(); /* Ensure that if rrdev is NULL, rdev won't be */
925 rdev = rcu_dereference(conf->disks[i].rdev);
926 if (!rdev) {
927 rdev = rrdev;
928 rrdev = NULL;
929 }
917
918again:
919 bi = &sh->dev[i].req;
920 rbi = &sh->dev[i].rreq; /* For writing to replacement */
921
922 rcu_read_lock();
923 rrdev = rcu_dereference(conf->disks[i].replacement);
924 smp_mb(); /* Ensure that if rrdev is NULL, rdev won't be */
925 rdev = rcu_dereference(conf->disks[i].rdev);
926 if (!rdev) {
927 rdev = rrdev;
928 rrdev = NULL;
929 }
930 if (rw & WRITE) {
930 if (op_is_write(op)) {
931 if (replace_only)
932 rdev = NULL;
933 if (rdev == rrdev)
934 /* We raced and saw duplicates */
935 rrdev = NULL;
936 } else {
937 if (test_bit(R5_ReadRepl, &head_sh->dev[i].flags) && rrdev)
938 rdev = rrdev;

--- 9 unchanged lines hidden (view full) ---

948 if (rrdev)
949 atomic_inc(&rrdev->nr_pending);
950 rcu_read_unlock();
951
952 /* We have already checked bad blocks for reads. Now
953 * need to check for writes. We never accept write errors
954 * on the replacement, so we don't to check rrdev.
955 */
931 if (replace_only)
932 rdev = NULL;
933 if (rdev == rrdev)
934 /* We raced and saw duplicates */
935 rrdev = NULL;
936 } else {
937 if (test_bit(R5_ReadRepl, &head_sh->dev[i].flags) && rrdev)
938 rdev = rrdev;

--- 9 unchanged lines hidden (view full) ---

948 if (rrdev)
949 atomic_inc(&rrdev->nr_pending);
950 rcu_read_unlock();
951
952 /* We have already checked bad blocks for reads. Now
953 * need to check for writes. We never accept write errors
954 * on the replacement, so we don't to check rrdev.
955 */
956 while ((rw & WRITE) && rdev &&
956 while (op_is_write(op) && rdev &&
957 test_bit(WriteErrorSeen, &rdev->flags)) {
958 sector_t first_bad;
959 int bad_sectors;
960 int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
961 &first_bad, &bad_sectors);
962 if (!bad)
963 break;
964

--- 25 unchanged lines hidden (view full) ---

990 if (s->syncing || s->expanding || s->expanded
991 || s->replacing)
992 md_sync_acct(rdev->bdev, STRIPE_SECTORS);
993
994 set_bit(STRIPE_IO_STARTED, &sh->state);
995
996 bio_reset(bi);
997 bi->bi_bdev = rdev->bdev;
957 test_bit(WriteErrorSeen, &rdev->flags)) {
958 sector_t first_bad;
959 int bad_sectors;
960 int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
961 &first_bad, &bad_sectors);
962 if (!bad)
963 break;
964

--- 25 unchanged lines hidden (view full) ---

990 if (s->syncing || s->expanding || s->expanded
991 || s->replacing)
992 md_sync_acct(rdev->bdev, STRIPE_SECTORS);
993
994 set_bit(STRIPE_IO_STARTED, &sh->state);
995
996 bio_reset(bi);
997 bi->bi_bdev = rdev->bdev;
998 bi->bi_rw = rw;
999 bi->bi_end_io = (rw & WRITE)
998 bio_set_op_attrs(bi, op, op_flags);
999 bi->bi_end_io = op_is_write(op)
1000 ? raid5_end_write_request
1001 : raid5_end_read_request;
1002 bi->bi_private = sh;
1003
1000 ? raid5_end_write_request
1001 : raid5_end_read_request;
1002 bi->bi_private = sh;
1003
1004 pr_debug("%s: for %llu schedule op %ld on disc %d\n",
1004 pr_debug("%s: for %llu schedule op %d on disc %d\n",
1005 __func__, (unsigned long long)sh->sector,
1006 bi->bi_rw, i);
1007 atomic_inc(&sh->count);
1008 if (sh != head_sh)
1009 atomic_inc(&head_sh->count);
1010 if (use_new_offset(conf, sh))
1011 bi->bi_iter.bi_sector = (sh->sector
1012 + rdev->new_data_offset);

--- 9 unchanged lines hidden (view full) ---

1022 bi->bi_vcnt = 1;
1023 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
1024 bi->bi_io_vec[0].bv_offset = 0;
1025 bi->bi_iter.bi_size = STRIPE_SIZE;
1026 /*
1027 * If this is discard request, set bi_vcnt 0. We don't
1028 * want to confuse SCSI because SCSI will replace payload
1029 */
1005 __func__, (unsigned long long)sh->sector,
1006 bi->bi_rw, i);
1007 atomic_inc(&sh->count);
1008 if (sh != head_sh)
1009 atomic_inc(&head_sh->count);
1010 if (use_new_offset(conf, sh))
1011 bi->bi_iter.bi_sector = (sh->sector
1012 + rdev->new_data_offset);

--- 9 unchanged lines hidden (view full) ---

1022 bi->bi_vcnt = 1;
1023 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
1024 bi->bi_io_vec[0].bv_offset = 0;
1025 bi->bi_iter.bi_size = STRIPE_SIZE;
1026 /*
1027 * If this is discard request, set bi_vcnt 0. We don't
1028 * want to confuse SCSI because SCSI will replace payload
1029 */
1030 if (rw & REQ_DISCARD)
1030 if (op == REQ_OP_DISCARD)
1031 bi->bi_vcnt = 0;
1032 if (rrdev)
1033 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
1034
1035 if (conf->mddev->gendisk)
1036 trace_block_bio_remap(bdev_get_queue(bi->bi_bdev),
1037 bi, disk_devt(conf->mddev->gendisk),
1038 sh->dev[i].sector);
1039 generic_make_request(bi);
1040 }
1041 if (rrdev) {
1042 if (s->syncing || s->expanding || s->expanded
1043 || s->replacing)
1044 md_sync_acct(rrdev->bdev, STRIPE_SECTORS);
1045
1046 set_bit(STRIPE_IO_STARTED, &sh->state);
1047
1048 bio_reset(rbi);
1049 rbi->bi_bdev = rrdev->bdev;
1031 bi->bi_vcnt = 0;
1032 if (rrdev)
1033 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
1034
1035 if (conf->mddev->gendisk)
1036 trace_block_bio_remap(bdev_get_queue(bi->bi_bdev),
1037 bi, disk_devt(conf->mddev->gendisk),
1038 sh->dev[i].sector);
1039 generic_make_request(bi);
1040 }
1041 if (rrdev) {
1042 if (s->syncing || s->expanding || s->expanded
1043 || s->replacing)
1044 md_sync_acct(rrdev->bdev, STRIPE_SECTORS);
1045
1046 set_bit(STRIPE_IO_STARTED, &sh->state);
1047
1048 bio_reset(rbi);
1049 rbi->bi_bdev = rrdev->bdev;
1050 rbi->bi_rw = rw;
1051 BUG_ON(!(rw & WRITE));
1050 bio_set_op_attrs(rbi, op, op_flags);
1051 BUG_ON(!op_is_write(op));
1052 rbi->bi_end_io = raid5_end_write_request;
1053 rbi->bi_private = sh;
1054
1052 rbi->bi_end_io = raid5_end_write_request;
1053 rbi->bi_private = sh;
1054
1055 pr_debug("%s: for %llu schedule op %ld on "
1055 pr_debug("%s: for %llu schedule op %d on "
1056 "replacement disc %d\n",
1057 __func__, (unsigned long long)sh->sector,
1058 rbi->bi_rw, i);
1059 atomic_inc(&sh->count);
1060 if (sh != head_sh)
1061 atomic_inc(&head_sh->count);
1062 if (use_new_offset(conf, sh))
1063 rbi->bi_iter.bi_sector = (sh->sector

--- 7 unchanged lines hidden (view full) ---

1071 rbi->bi_vcnt = 1;
1072 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
1073 rbi->bi_io_vec[0].bv_offset = 0;
1074 rbi->bi_iter.bi_size = STRIPE_SIZE;
1075 /*
1076 * If this is discard request, set bi_vcnt 0. We don't
1077 * want to confuse SCSI because SCSI will replace payload
1078 */
1056 "replacement disc %d\n",
1057 __func__, (unsigned long long)sh->sector,
1058 rbi->bi_rw, i);
1059 atomic_inc(&sh->count);
1060 if (sh != head_sh)
1061 atomic_inc(&head_sh->count);
1062 if (use_new_offset(conf, sh))
1063 rbi->bi_iter.bi_sector = (sh->sector

--- 7 unchanged lines hidden (view full) ---

1071 rbi->bi_vcnt = 1;
1072 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
1073 rbi->bi_io_vec[0].bv_offset = 0;
1074 rbi->bi_iter.bi_size = STRIPE_SIZE;
1075 /*
1076 * If this is discard request, set bi_vcnt 0. We don't
1077 * want to confuse SCSI because SCSI will replace payload
1078 */
1079 if (rw & REQ_DISCARD)
1079 if (op == REQ_OP_DISCARD)
1080 rbi->bi_vcnt = 0;
1081 if (conf->mddev->gendisk)
1082 trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
1083 rbi, disk_devt(conf->mddev->gendisk),
1084 sh->dev[i].sector);
1085 generic_make_request(rbi);
1086 }
1087 if (!rdev && !rrdev) {
1080 rbi->bi_vcnt = 0;
1081 if (conf->mddev->gendisk)
1082 trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
1083 rbi, disk_devt(conf->mddev->gendisk),
1084 sh->dev[i].sector);
1085 generic_make_request(rbi);
1086 }
1087 if (!rdev && !rrdev) {
1088 if (rw & WRITE)
1088 if (op_is_write(op))
1089 set_bit(STRIPE_DEGRADED, &sh->state);
1089 set_bit(STRIPE_DEGRADED, &sh->state);
1090 pr_debug("skip op %ld on disc %d for sector %llu\n",
1090 pr_debug("skip op %d on disc %d for sector %llu\n",
1091 bi->bi_rw, i, (unsigned long long)sh->sector);
1092 clear_bit(R5_LOCKED, &sh->dev[i].flags);
1093 set_bit(STRIPE_HANDLE, &sh->state);
1094 }
1095
1096 if (!head_sh->batch_head)
1097 continue;
1098 sh = list_first_entry(&sh->batch_list, struct stripe_head,

--- 519 unchanged lines hidden (view full) ---

1618 WARN_ON(dev->page != dev->orig_page);
1619
1620 while (wbi && wbi->bi_iter.bi_sector <
1621 dev->sector + STRIPE_SECTORS) {
1622 if (wbi->bi_rw & REQ_FUA)
1623 set_bit(R5_WantFUA, &dev->flags);
1624 if (wbi->bi_rw & REQ_SYNC)
1625 set_bit(R5_SyncIO, &dev->flags);
1091 bi->bi_rw, i, (unsigned long long)sh->sector);
1092 clear_bit(R5_LOCKED, &sh->dev[i].flags);
1093 set_bit(STRIPE_HANDLE, &sh->state);
1094 }
1095
1096 if (!head_sh->batch_head)
1097 continue;
1098 sh = list_first_entry(&sh->batch_list, struct stripe_head,

--- 519 unchanged lines hidden (view full) ---

1618 WARN_ON(dev->page != dev->orig_page);
1619
1620 while (wbi && wbi->bi_iter.bi_sector <
1621 dev->sector + STRIPE_SECTORS) {
1622 if (wbi->bi_rw & REQ_FUA)
1623 set_bit(R5_WantFUA, &dev->flags);
1624 if (wbi->bi_rw & REQ_SYNC)
1625 set_bit(R5_SyncIO, &dev->flags);
1626 if (wbi->bi_rw & REQ_DISCARD)
1626 if (bio_op(wbi) == REQ_OP_DISCARD)
1627 set_bit(R5_Discard, &dev->flags);
1628 else {
1629 tx = async_copy_data(1, wbi, &dev->page,
1630 dev->sector, tx, sh);
1631 if (dev->page != dev->orig_page) {
1632 set_bit(R5_SkipCopy, &dev->flags);
1633 clear_bit(R5_UPTODATE, &dev->flags);
1634 clear_bit(R5_OVERWRITE, &dev->flags);

--- 874 unchanged lines hidden (view full) ---

2509 spin_lock_irqsave(&conf->device_lock, flags);
2510 clear_bit(In_sync, &rdev->flags);
2511 mddev->degraded = calc_degraded(conf);
2512 spin_unlock_irqrestore(&conf->device_lock, flags);
2513 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2514
2515 set_bit(Blocked, &rdev->flags);
2516 set_bit(Faulty, &rdev->flags);
1627 set_bit(R5_Discard, &dev->flags);
1628 else {
1629 tx = async_copy_data(1, wbi, &dev->page,
1630 dev->sector, tx, sh);
1631 if (dev->page != dev->orig_page) {
1632 set_bit(R5_SkipCopy, &dev->flags);
1633 clear_bit(R5_UPTODATE, &dev->flags);
1634 clear_bit(R5_OVERWRITE, &dev->flags);

--- 874 unchanged lines hidden (view full) ---

2509 spin_lock_irqsave(&conf->device_lock, flags);
2510 clear_bit(In_sync, &rdev->flags);
2511 mddev->degraded = calc_degraded(conf);
2512 spin_unlock_irqrestore(&conf->device_lock, flags);
2513 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2514
2515 set_bit(Blocked, &rdev->flags);
2516 set_bit(Faulty, &rdev->flags);
2517 set_bit(MD_CHANGE_DEVS, &mddev->flags);
2518 set_bit(MD_CHANGE_PENDING, &mddev->flags);
2517 set_mask_bits(&mddev->flags, 0,
2518 BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
2519 printk(KERN_ALERT
2520 "md/raid:%s: Disk failure on %s, disabling device.\n"
2521 "md/raid:%s: Operation continuing on %d devices.\n",
2522 mdname(mddev),
2523 bdevname(rdev->bdev, b),
2524 mdname(mddev),
2525 conf->raid_disks - mddev->degraded);
2526}

--- 2618 unchanged lines hidden (view full) ---

5145 sector_t new_sector;
5146 sector_t logical_sector, last_sector;
5147 struct stripe_head *sh;
5148 const int rw = bio_data_dir(bi);
5149 int remaining;
5150 DEFINE_WAIT(w);
5151 bool do_prepare;
5152
2519 printk(KERN_ALERT
2520 "md/raid:%s: Disk failure on %s, disabling device.\n"
2521 "md/raid:%s: Operation continuing on %d devices.\n",
2522 mdname(mddev),
2523 bdevname(rdev->bdev, b),
2524 mdname(mddev),
2525 conf->raid_disks - mddev->degraded);
2526}

--- 2618 unchanged lines hidden (view full) ---

5145 sector_t new_sector;
5146 sector_t logical_sector, last_sector;
5147 struct stripe_head *sh;
5148 const int rw = bio_data_dir(bi);
5149 int remaining;
5150 DEFINE_WAIT(w);
5151 bool do_prepare;
5152
5153 if (unlikely(bi->bi_rw & REQ_FLUSH)) {
5153 if (unlikely(bi->bi_rw & REQ_PREFLUSH)) {
5154 int ret = r5l_handle_flush_request(conf->log, bi);
5155
5156 if (ret == 0)
5157 return;
5158 if (ret == -ENODEV) {
5159 md_flush_request(mddev, bi);
5160 return;
5161 }

--- 9 unchanged lines hidden (view full) ---

5171 */
5172 if (rw == READ && mddev->degraded == 0 &&
5173 mddev->reshape_position == MaxSector) {
5174 bi = chunk_aligned_read(mddev, bi);
5175 if (!bi)
5176 return;
5177 }
5178
5154 int ret = r5l_handle_flush_request(conf->log, bi);
5155
5156 if (ret == 0)
5157 return;
5158 if (ret == -ENODEV) {
5159 md_flush_request(mddev, bi);
5160 return;
5161 }

--- 9 unchanged lines hidden (view full) ---

5171 */
5172 if (rw == READ && mddev->degraded == 0 &&
5173 mddev->reshape_position == MaxSector) {
5174 bi = chunk_aligned_read(mddev, bi);
5175 if (!bi)
5176 return;
5177 }
5178
5179 if (unlikely(bi->bi_rw & REQ_DISCARD)) {
5179 if (unlikely(bio_op(bi) == REQ_OP_DISCARD)) {
5180 make_discard_request(mddev, bi);
5181 return;
5182 }
5183
5184 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
5185 last_sector = bio_end_sector(bi);
5186 bi->bi_next = NULL;
5187 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */

--- 40 unchanged lines hidden (view full) ---

5228 new_sector = raid5_compute_sector(conf, logical_sector,
5229 previous,
5230 &dd_idx, NULL);
5231 pr_debug("raid456: raid5_make_request, sector %llu logical %llu\n",
5232 (unsigned long long)new_sector,
5233 (unsigned long long)logical_sector);
5234
5235 sh = raid5_get_active_stripe(conf, new_sector, previous,
5180 make_discard_request(mddev, bi);
5181 return;
5182 }
5183
5184 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
5185 last_sector = bio_end_sector(bi);
5186 bi->bi_next = NULL;
5187 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */

--- 40 unchanged lines hidden (view full) ---

5228 new_sector = raid5_compute_sector(conf, logical_sector,
5229 previous,
5230 &dd_idx, NULL);
5231 pr_debug("raid456: raid5_make_request, sector %llu logical %llu\n",
5232 (unsigned long long)new_sector,
5233 (unsigned long long)logical_sector);
5234
5235 sh = raid5_get_active_stripe(conf, new_sector, previous,
5236 (bi->bi_rw&RWA_MASK), 0);
5236 (bi->bi_rw & REQ_RAHEAD), 0);
5237 if (sh) {
5238 if (unlikely(previous)) {
5239 /* expansion might have moved on while waiting for a
5240 * stripe, so we must do the range check again.
5241 * Expansion could still move past after this
5242 * test, but as we are holding a reference to
5243 * 'sh', we know that if that happens,
5244 * STRIPE_EXPANDING will get set and the expansion

--- 2322 unchanged lines hidden (view full) ---

7567static void raid5_finish_reshape(struct mddev *mddev)
7568{
7569 struct r5conf *conf = mddev->private;
7570
7571 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
7572
7573 if (mddev->delta_disks > 0) {
7574 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
5237 if (sh) {
5238 if (unlikely(previous)) {
5239 /* expansion might have moved on while waiting for a
5240 * stripe, so we must do the range check again.
5241 * Expansion could still move past after this
5242 * test, but as we are holding a reference to
5243 * 'sh', we know that if that happens,
5244 * STRIPE_EXPANDING will get set and the expansion

--- 2322 unchanged lines hidden (view full) ---

7567static void raid5_finish_reshape(struct mddev *mddev)
7568{
7569 struct r5conf *conf = mddev->private;
7570
7571 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
7572
7573 if (mddev->delta_disks > 0) {
7574 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
7575 set_capacity(mddev->gendisk, mddev->array_sectors);
7576 revalidate_disk(mddev->gendisk);
7575 if (mddev->queue) {
7576 set_capacity(mddev->gendisk, mddev->array_sectors);
7577 revalidate_disk(mddev->gendisk);
7578 }
7577 } else {
7578 int d;
7579 spin_lock_irq(&conf->device_lock);
7580 mddev->degraded = calc_degraded(conf);
7581 spin_unlock_irq(&conf->device_lock);
7582 for (d = conf->raid_disks ;
7583 d < conf->raid_disks - mddev->delta_disks;
7584 d++) {

--- 394 unchanged lines hidden ---
7579 } else {
7580 int d;
7581 spin_lock_irq(&conf->device_lock);
7582 mddev->degraded = calc_degraded(conf);
7583 spin_unlock_irq(&conf->device_lock);
7584 for (d = conf->raid_disks ;
7585 d < conf->raid_disks - mddev->delta_disks;
7586 d++) {

--- 394 unchanged lines hidden ---