Lines Matching refs:conf

46 static void allow_barrier(struct r1conf *conf, sector_t sector_nr);
47 static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
241 static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio) in put_all_bios() argument
245 for (i = 0; i < conf->raid_disks * 2; i++) { in put_all_bios()
255 struct r1conf *conf = r1_bio->mddev->private; in free_r1bio() local
257 put_all_bios(conf, r1_bio); in free_r1bio()
258 mempool_free(r1_bio, &conf->r1bio_pool); in free_r1bio()
263 struct r1conf *conf = r1_bio->mddev->private; in put_buf() local
267 for (i = 0; i < conf->raid_disks * 2; i++) { in put_buf()
270 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev); in put_buf()
273 mempool_free(r1_bio, &conf->r1buf_pool); in put_buf()
275 lower_barrier(conf, sect); in put_buf()
282 struct r1conf *conf = mddev->private; in reschedule_retry() local
286 spin_lock_irqsave(&conf->device_lock, flags); in reschedule_retry()
287 list_add(&r1_bio->retry_list, &conf->retry_list); in reschedule_retry()
288 atomic_inc(&conf->nr_queued[idx]); in reschedule_retry()
289 spin_unlock_irqrestore(&conf->device_lock, flags); in reschedule_retry()
291 wake_up(&conf->wait_barrier); in reschedule_retry()
313 struct r1conf *conf = r1_bio->mddev->private; in raid_end_bio_io() local
331 allow_barrier(conf, sector); in raid_end_bio_io()
339 struct r1conf *conf = r1_bio->mddev->private; in update_head_pos() local
341 conf->mirrors[disk].head_position = in update_head_pos()
351 struct r1conf *conf = r1_bio->mddev->private; in find_bio_disk() local
352 int raid_disks = conf->raid_disks; in find_bio_disk()
368 struct r1conf *conf = r1_bio->mddev->private; in raid1_end_read_request() local
369 struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev; in raid1_end_read_request()
389 spin_lock_irqsave(&conf->device_lock, flags); in raid1_end_read_request()
390 if (r1_bio->mddev->degraded == conf->raid_disks || in raid1_end_read_request()
391 (r1_bio->mddev->degraded == conf->raid_disks-1 && in raid1_end_read_request()
394 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1_end_read_request()
399 rdev_dec_pending(rdev, conf->mddev); in raid1_end_read_request()
405 mdname(conf->mddev), in raid1_end_read_request()
450 struct r1conf *conf = r1_bio->mddev->private; in raid1_end_write_request() local
453 struct md_rdev *rdev = conf->mirrors[mirror].rdev; in raid1_end_write_request()
467 conf->mddev->recovery); in raid1_end_write_request()
553 rdev_dec_pending(rdev, conf->mddev); in raid1_end_write_request()
598 static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors) in read_balance() argument
630 if ((conf->mddev->recovery_cp < this_sector + sectors) || in read_balance()
631 (mddev_is_clustered(conf->mddev) && in read_balance()
632 md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector, in read_balance()
638 for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) { in read_balance()
645 rdev = rcu_dereference(conf->mirrors[disk].rdev); in read_balance()
712 dist = abs(this_sector - conf->mirrors[disk].head_position); in read_balance()
718 if (conf->mirrors[disk].next_seq_sect == this_sector in read_balance()
721 struct raid1_info *mirror = &conf->mirrors[disk]; in read_balance()
776 rdev = rcu_dereference(conf->mirrors[best_disk].rdev); in read_balance()
782 if (conf->mirrors[best_disk].next_seq_sect != this_sector) in read_balance()
783 conf->mirrors[best_disk].seq_start = this_sector; in read_balance()
785 conf->mirrors[best_disk].next_seq_sect = this_sector + sectors; in read_balance()
793 static void wake_up_barrier(struct r1conf *conf) in wake_up_barrier() argument
795 if (wq_has_sleeper(&conf->wait_barrier)) in wake_up_barrier()
796 wake_up(&conf->wait_barrier); in wake_up_barrier()
799 static void flush_bio_list(struct r1conf *conf, struct bio *bio) in flush_bio_list() argument
802 raid1_prepare_flush_writes(conf->mddev->bitmap); in flush_bio_list()
803 wake_up_barrier(conf); in flush_bio_list()
814 static void flush_pending_writes(struct r1conf *conf) in flush_pending_writes() argument
819 spin_lock_irq(&conf->device_lock); in flush_pending_writes()
821 if (conf->pending_bio_list.head) { in flush_pending_writes()
825 bio = bio_list_get(&conf->pending_bio_list); in flush_pending_writes()
826 spin_unlock_irq(&conf->device_lock); in flush_pending_writes()
839 flush_bio_list(conf, bio); in flush_pending_writes()
842 spin_unlock_irq(&conf->device_lock); in flush_pending_writes()
869 static int raise_barrier(struct r1conf *conf, sector_t sector_nr) in raise_barrier() argument
873 spin_lock_irq(&conf->resync_lock); in raise_barrier()
876 wait_event_lock_irq(conf->wait_barrier, in raise_barrier()
877 !atomic_read(&conf->nr_waiting[idx]), in raise_barrier()
878 conf->resync_lock); in raise_barrier()
881 atomic_inc(&conf->barrier[idx]); in raise_barrier()
899 wait_event_lock_irq(conf->wait_barrier, in raise_barrier()
900 (!conf->array_frozen && in raise_barrier()
901 !atomic_read(&conf->nr_pending[idx]) && in raise_barrier()
902 atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH) || in raise_barrier()
903 test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery), in raise_barrier()
904 conf->resync_lock); in raise_barrier()
906 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { in raise_barrier()
907 atomic_dec(&conf->barrier[idx]); in raise_barrier()
908 spin_unlock_irq(&conf->resync_lock); in raise_barrier()
909 wake_up(&conf->wait_barrier); in raise_barrier()
913 atomic_inc(&conf->nr_sync_pending); in raise_barrier()
914 spin_unlock_irq(&conf->resync_lock); in raise_barrier()
919 static void lower_barrier(struct r1conf *conf, sector_t sector_nr) in lower_barrier() argument
923 BUG_ON(atomic_read(&conf->barrier[idx]) <= 0); in lower_barrier()
925 atomic_dec(&conf->barrier[idx]); in lower_barrier()
926 atomic_dec(&conf->nr_sync_pending); in lower_barrier()
927 wake_up(&conf->wait_barrier); in lower_barrier()
930 static bool _wait_barrier(struct r1conf *conf, int idx, bool nowait) in _wait_barrier() argument
942 atomic_inc(&conf->nr_pending[idx]); in _wait_barrier()
962 if (!READ_ONCE(conf->array_frozen) && in _wait_barrier()
963 !atomic_read(&conf->barrier[idx])) in _wait_barrier()
973 spin_lock_irq(&conf->resync_lock); in _wait_barrier()
974 atomic_inc(&conf->nr_waiting[idx]); in _wait_barrier()
975 atomic_dec(&conf->nr_pending[idx]); in _wait_barrier()
980 wake_up_barrier(conf); in _wait_barrier()
987 wait_event_lock_irq(conf->wait_barrier, in _wait_barrier()
988 !conf->array_frozen && in _wait_barrier()
989 !atomic_read(&conf->barrier[idx]), in _wait_barrier()
990 conf->resync_lock); in _wait_barrier()
991 atomic_inc(&conf->nr_pending[idx]); in _wait_barrier()
994 atomic_dec(&conf->nr_waiting[idx]); in _wait_barrier()
995 spin_unlock_irq(&conf->resync_lock); in _wait_barrier()
999 static bool wait_read_barrier(struct r1conf *conf, sector_t sector_nr, bool nowait) in wait_read_barrier() argument
1011 atomic_inc(&conf->nr_pending[idx]); in wait_read_barrier()
1013 if (!READ_ONCE(conf->array_frozen)) in wait_read_barrier()
1016 spin_lock_irq(&conf->resync_lock); in wait_read_barrier()
1017 atomic_inc(&conf->nr_waiting[idx]); in wait_read_barrier()
1018 atomic_dec(&conf->nr_pending[idx]); in wait_read_barrier()
1023 wake_up_barrier(conf); in wait_read_barrier()
1031 wait_event_lock_irq(conf->wait_barrier, in wait_read_barrier()
1032 !conf->array_frozen, in wait_read_barrier()
1033 conf->resync_lock); in wait_read_barrier()
1034 atomic_inc(&conf->nr_pending[idx]); in wait_read_barrier()
1037 atomic_dec(&conf->nr_waiting[idx]); in wait_read_barrier()
1038 spin_unlock_irq(&conf->resync_lock); in wait_read_barrier()
1042 static bool wait_barrier(struct r1conf *conf, sector_t sector_nr, bool nowait) in wait_barrier() argument
1046 return _wait_barrier(conf, idx, nowait); in wait_barrier()
1049 static void _allow_barrier(struct r1conf *conf, int idx) in _allow_barrier() argument
1051 atomic_dec(&conf->nr_pending[idx]); in _allow_barrier()
1052 wake_up_barrier(conf); in _allow_barrier()
1055 static void allow_barrier(struct r1conf *conf, sector_t sector_nr) in allow_barrier() argument
1059 _allow_barrier(conf, idx); in allow_barrier()
1063 static int get_unqueued_pending(struct r1conf *conf) in get_unqueued_pending() argument
1067 ret = atomic_read(&conf->nr_sync_pending); in get_unqueued_pending()
1069 ret += atomic_read(&conf->nr_pending[idx]) - in get_unqueued_pending()
1070 atomic_read(&conf->nr_queued[idx]); in get_unqueued_pending()
1075 static void freeze_array(struct r1conf *conf, int extra) in freeze_array() argument
1100 spin_lock_irq(&conf->resync_lock); in freeze_array()
1101 conf->array_frozen = 1; in freeze_array()
1102 raid1_log(conf->mddev, "wait freeze"); in freeze_array()
1104 conf->wait_barrier, in freeze_array()
1105 get_unqueued_pending(conf) == extra, in freeze_array()
1106 conf->resync_lock, in freeze_array()
1107 flush_pending_writes(conf)); in freeze_array()
1108 spin_unlock_irq(&conf->resync_lock); in freeze_array()
1110 static void unfreeze_array(struct r1conf *conf) in unfreeze_array() argument
1113 spin_lock_irq(&conf->resync_lock); in unfreeze_array()
1114 conf->array_frozen = 0; in unfreeze_array()
1115 spin_unlock_irq(&conf->resync_lock); in unfreeze_array()
1116 wake_up(&conf->wait_barrier); in unfreeze_array()
1174 struct r1conf *conf = mddev->private; in raid1_unplug() local
1178 spin_lock_irq(&conf->device_lock); in raid1_unplug()
1179 bio_list_merge(&conf->pending_bio_list, &plug->pending); in raid1_unplug()
1180 spin_unlock_irq(&conf->device_lock); in raid1_unplug()
1181 wake_up_barrier(conf); in raid1_unplug()
1189 flush_bio_list(conf, bio); in raid1_unplug()
1205 struct r1conf *conf = mddev->private; in alloc_r1bio() local
1208 r1_bio = mempool_alloc(&conf->r1bio_pool, GFP_NOIO); in alloc_r1bio()
1210 memset(r1_bio->bios, 0, conf->raid_disks * sizeof(r1_bio->bios[0])); in alloc_r1bio()
1218 struct r1conf *conf = mddev->private; in raid1_read_request() local
1240 rdev = rcu_dereference(conf->mirrors[r1_bio->read_disk].rdev); in raid1_read_request()
1252 if (!wait_read_barrier(conf, bio->bi_iter.bi_sector, in raid1_read_request()
1268 rdisk = read_balance(conf, r1_bio, &max_sectors); in raid1_read_request()
1281 mirror = conf->mirrors + rdisk; in raid1_read_request()
1302 gfp, &conf->bio_split); in raid1_read_request()
1339 struct r1conf *conf = mddev->private; in raid1_write_request() local
1359 prepare_to_wait(&conf->wait_barrier, in raid1_write_request()
1367 finish_wait(&conf->wait_barrier, &w); in raid1_write_request()
1375 if (!wait_barrier(conf, bio->bi_iter.bi_sector, in raid1_write_request()
1396 disks = conf->raid_disks * 2; in raid1_write_request()
1401 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); in raid1_write_request()
1418 if (i < conf->raid_disks) in raid1_write_request()
1475 rdev_dec_pending(conf->mirrors[j].rdev, mddev); in raid1_write_request()
1476 mempool_free(r1_bio, &conf->r1bio_pool); in raid1_write_request()
1477 allow_barrier(conf, bio->bi_iter.bi_sector); in raid1_write_request()
1485 wait_barrier(conf, bio->bi_iter.bi_sector, false); in raid1_write_request()
1500 GFP_NOIO, &conf->bio_split); in raid1_write_request()
1517 struct md_rdev *rdev = conf->mirrors[i].rdev; in raid1_write_request()
1561 conf->raid_disks - mddev->degraded > 1) in raid1_write_request()
1573 spin_lock_irqsave(&conf->device_lock, flags); in raid1_write_request()
1574 bio_list_add(&conf->pending_bio_list, mbio); in raid1_write_request()
1575 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1_write_request()
1583 wake_up_barrier(conf); in raid1_write_request()
1616 struct r1conf *conf = mddev->private; in raid1_status() local
1619 seq_printf(seq, " [%d/%d] [", conf->raid_disks, in raid1_status()
1620 conf->raid_disks - mddev->degraded); in raid1_status()
1622 for (i = 0; i < conf->raid_disks; i++) { in raid1_status()
1623 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); in raid1_status()
1649 struct r1conf *conf = mddev->private; in raid1_error() local
1652 spin_lock_irqsave(&conf->device_lock, flags); in raid1_error()
1655 (conf->raid_disks - mddev->degraded) == 1) { in raid1_error()
1659 conf->recovery_disabled = mddev->recovery_disabled; in raid1_error()
1660 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1_error()
1668 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1_error()
1678 mdname(mddev), conf->raid_disks - mddev->degraded); in raid1_error()
1681 static void print_conf(struct r1conf *conf) in print_conf() argument
1686 if (!conf) { in print_conf()
1690 pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded, in print_conf()
1691 conf->raid_disks); in print_conf()
1694 for (i = 0; i < conf->raid_disks; i++) { in print_conf()
1695 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); in print_conf()
1705 static void close_sync(struct r1conf *conf) in close_sync() argument
1710 _wait_barrier(conf, idx, false); in close_sync()
1711 _allow_barrier(conf, idx); in close_sync()
1714 mempool_exit(&conf->r1buf_pool); in close_sync()
1720 struct r1conf *conf = mddev->private; in raid1_spare_active() local
1731 spin_lock_irqsave(&conf->device_lock, flags); in raid1_spare_active()
1732 for (i = 0; i < conf->raid_disks; i++) { in raid1_spare_active()
1733 struct md_rdev *rdev = conf->mirrors[i].rdev; in raid1_spare_active()
1734 struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev; in raid1_spare_active()
1763 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1_spare_active()
1765 print_conf(conf); in raid1_spare_active()
1771 struct r1conf *conf = mddev->private; in raid1_add_disk() local
1776 int last = conf->raid_disks - 1; in raid1_add_disk()
1778 if (mddev->recovery_disabled == conf->recovery_disabled) in raid1_add_disk()
1793 rdev->saved_raid_disk < conf->raid_disks && in raid1_add_disk()
1794 conf->mirrors[rdev->saved_raid_disk].rdev == NULL) in raid1_add_disk()
1798 p = conf->mirrors + mirror; in raid1_add_disk()
1811 conf->fullsync = 1; in raid1_add_disk()
1816 p[conf->raid_disks].rdev == NULL && repl_slot < 0) in raid1_add_disk()
1822 p = conf->mirrors + repl_slot; in raid1_add_disk()
1827 conf->fullsync = 1; in raid1_add_disk()
1828 rcu_assign_pointer(p[conf->raid_disks].rdev, rdev); in raid1_add_disk()
1831 print_conf(conf); in raid1_add_disk()
1837 struct r1conf *conf = mddev->private; in raid1_remove_disk() local
1840 struct raid1_info *p = conf->mirrors + number; in raid1_remove_disk()
1842 if (unlikely(number >= conf->raid_disks)) in raid1_remove_disk()
1846 p = conf->mirrors + conf->raid_disks + number; in raid1_remove_disk()
1848 print_conf(conf); in raid1_remove_disk()
1859 mddev->recovery_disabled != conf->recovery_disabled && in raid1_remove_disk()
1860 mddev->degraded < conf->raid_disks) { in raid1_remove_disk()
1874 if (conf->mirrors[conf->raid_disks + number].rdev) { in raid1_remove_disk()
1880 conf->mirrors[conf->raid_disks + number].rdev; in raid1_remove_disk()
1881 freeze_array(conf, 0); in raid1_remove_disk()
1890 unfreeze_array(conf); in raid1_remove_disk()
1895 conf->mirrors[conf->raid_disks + number].rdev = NULL; in raid1_remove_disk()
1896 unfreeze_array(conf); in raid1_remove_disk()
1904 print_conf(conf); in raid1_remove_disk()
1961 struct r1conf *conf = mddev->private; in end_sync_write() local
1964 struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev; in end_sync_write()
1975 !is_badblock(conf->mirrors[r1_bio->read_disk].rdev, in end_sync_write()
2018 struct r1conf *conf = mddev->private; in fix_sync_read_error() local
2026 rdev = conf->mirrors[r1_bio->read_disk].rdev; in fix_sync_read_error()
2052 rdev = conf->mirrors[d].rdev; in fix_sync_read_error()
2061 if (d == conf->raid_disks * 2) in fix_sync_read_error()
2075 for (d = 0; d < conf->raid_disks * 2; d++) { in fix_sync_read_error()
2076 rdev = conf->mirrors[d].rdev; in fix_sync_read_error()
2083 conf->recovery_disabled = in fix_sync_read_error()
2101 d = conf->raid_disks * 2; in fix_sync_read_error()
2105 rdev = conf->mirrors[d].rdev; in fix_sync_read_error()
2116 d = conf->raid_disks * 2; in fix_sync_read_error()
2120 rdev = conf->mirrors[d].rdev; in fix_sync_read_error()
2145 struct r1conf *conf = mddev->private; in process_checks() local
2152 for (i = 0; i < conf->raid_disks * 2; i++) { in process_checks()
2160 bio_reset(b, conf->mirrors[i].rdev->bdev, REQ_OP_READ); in process_checks()
2163 conf->mirrors[i].rdev->data_offset; in process_checks()
2171 for (primary = 0; primary < conf->raid_disks * 2; primary++) in process_checks()
2175 rdev_dec_pending(conf->mirrors[primary].rdev, mddev); in process_checks()
2179 for (i = 0; i < conf->raid_disks * 2; i++) { in process_checks()
2213 rdev_dec_pending(conf->mirrors[i].rdev, mddev); in process_checks()
2223 struct r1conf *conf = mddev->private; in sync_request_write() local
2225 int disks = conf->raid_disks * 2; in sync_request_write()
2247 if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) { in sync_request_write()
2253 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags)) in sync_request_write()
2258 md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio)); in sync_request_write()
2274 static void fix_read_error(struct r1conf *conf, int read_disk, in fix_read_error() argument
2277 struct mddev *mddev = conf->mddev; in fix_read_error()
2293 rdev = rcu_dereference(conf->mirrors[d].rdev); in fix_read_error()
2303 conf->tmppage, REQ_OP_READ, false)) in fix_read_error()
2311 if (d == conf->raid_disks * 2) in fix_read_error()
2317 struct md_rdev *rdev = conf->mirrors[read_disk].rdev; in fix_read_error()
2326 d = conf->raid_disks * 2; in fix_read_error()
2329 rdev = rcu_dereference(conf->mirrors[d].rdev); in fix_read_error()
2335 conf->tmppage, REQ_OP_WRITE); in fix_read_error()
2343 d = conf->raid_disks * 2; in fix_read_error()
2346 rdev = rcu_dereference(conf->mirrors[d].rdev); in fix_read_error()
2352 conf->tmppage, REQ_OP_READ)) { in fix_read_error()
2372 struct r1conf *conf = mddev->private; in narrow_write_error() local
2373 struct md_rdev *rdev = conf->mirrors[i].rdev; in narrow_write_error()
2438 static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio) in handle_sync_write_finished() argument
2442 for (m = 0; m < conf->raid_disks * 2 ; m++) { in handle_sync_write_finished()
2443 struct md_rdev *rdev = conf->mirrors[m].rdev; in handle_sync_write_finished()
2454 md_error(conf->mddev, rdev); in handle_sync_write_finished()
2458 md_done_sync(conf->mddev, s, 1); in handle_sync_write_finished()
2461 static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio) in handle_write_finished() argument
2466 for (m = 0; m < conf->raid_disks * 2 ; m++) in handle_write_finished()
2468 struct md_rdev *rdev = conf->mirrors[m].rdev; in handle_write_finished()
2472 rdev_dec_pending(rdev, conf->mddev); in handle_write_finished()
2480 md_error(conf->mddev, in handle_write_finished()
2481 conf->mirrors[m].rdev); in handle_write_finished()
2485 rdev_dec_pending(conf->mirrors[m].rdev, in handle_write_finished()
2486 conf->mddev); in handle_write_finished()
2489 spin_lock_irq(&conf->device_lock); in handle_write_finished()
2490 list_add(&r1_bio->retry_list, &conf->bio_end_io_list); in handle_write_finished()
2492 atomic_inc(&conf->nr_queued[idx]); in handle_write_finished()
2493 spin_unlock_irq(&conf->device_lock); in handle_write_finished()
2498 wake_up(&conf->wait_barrier); in handle_write_finished()
2499 md_wakeup_thread(conf->mddev->thread); in handle_write_finished()
2507 static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) in handle_read_error() argument
2509 struct mddev *mddev = conf->mddev; in handle_read_error()
2528 rdev = conf->mirrors[r1_bio->read_disk].rdev; in handle_read_error()
2531 freeze_array(conf, 1); in handle_read_error()
2532 fix_read_error(conf, r1_bio->read_disk, in handle_read_error()
2534 unfreeze_array(conf); in handle_read_error()
2541 rdev_dec_pending(rdev, conf->mddev); in handle_read_error()
2548 allow_barrier(conf, sector); in handle_read_error()
2556 struct r1conf *conf = mddev->private; in raid1d() local
2557 struct list_head *head = &conf->retry_list; in raid1d()
2563 if (!list_empty_careful(&conf->bio_end_io_list) && in raid1d()
2566 spin_lock_irqsave(&conf->device_lock, flags); in raid1d()
2568 list_splice_init(&conf->bio_end_io_list, &tmp); in raid1d()
2569 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1d()
2575 atomic_dec(&conf->nr_queued[idx]); in raid1d()
2587 flush_pending_writes(conf); in raid1d()
2589 spin_lock_irqsave(&conf->device_lock, flags); in raid1d()
2591 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1d()
2597 atomic_dec(&conf->nr_queued[idx]); in raid1d()
2598 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1d()
2601 conf = mddev->private; in raid1d()
2605 handle_sync_write_finished(conf, r1_bio); in raid1d()
2610 handle_write_finished(conf, r1_bio); in raid1d()
2612 handle_read_error(conf, r1_bio); in raid1d()
2623 static int init_resync(struct r1conf *conf) in init_resync() argument
2628 BUG_ON(mempool_initialized(&conf->r1buf_pool)); in init_resync()
2630 return mempool_init(&conf->r1buf_pool, buffs, r1buf_pool_alloc, in init_resync()
2631 r1buf_pool_free, conf->poolinfo); in init_resync()
2634 static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf) in raid1_alloc_init_r1buf() argument
2636 struct r1bio *r1bio = mempool_alloc(&conf->r1buf_pool, GFP_NOIO); in raid1_alloc_init_r1buf()
2641 for (i = conf->poolinfo->raid_disks; i--; ) { in raid1_alloc_init_r1buf()
2664 struct r1conf *conf = mddev->private; in raid1_sync_request() local
2679 if (!mempool_initialized(&conf->r1buf_pool)) in raid1_sync_request()
2680 if (init_resync(conf)) in raid1_sync_request()
2694 conf->fullsync = 0; in raid1_sync_request()
2697 close_sync(conf); in raid1_sync_request()
2700 conf->cluster_sync_low = 0; in raid1_sync_request()
2701 conf->cluster_sync_high = 0; in raid1_sync_request()
2709 conf->fullsync == 0) { in raid1_sync_request()
2717 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { in raid1_sync_request()
2727 if (atomic_read(&conf->nr_waiting[idx])) in raid1_sync_request()
2735 mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high)); in raid1_sync_request()
2738 if (raise_barrier(conf, sector_nr)) in raid1_sync_request()
2741 r1_bio = raid1_alloc_init_r1buf(conf); in raid1_sync_request()
2760 for (i = 0; i < conf->raid_disks * 2; i++) { in raid1_sync_request()
2764 rdev = rcu_dereference(conf->mirrors[i].rdev); in raid1_sync_request()
2767 if (i < conf->raid_disks) in raid1_sync_request()
2832 for (i = 0 ; i < conf->raid_disks * 2 ; i++) in raid1_sync_request()
2834 struct md_rdev *rdev = conf->mirrors[i].rdev; in raid1_sync_request()
2849 conf->recovery_disabled = mddev->recovery_disabled; in raid1_sync_request()
2895 !conf->fullsync && in raid1_sync_request()
2902 for (i = 0 ; i < conf->raid_disks * 2; i++) { in raid1_sync_request()
2925 conf->cluster_sync_high < sector_nr + nr_sectors) { in raid1_sync_request()
2926 conf->cluster_sync_low = mddev->curr_resync_completed; in raid1_sync_request()
2927 conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS; in raid1_sync_request()
2930 conf->cluster_sync_low, in raid1_sync_request()
2931 conf->cluster_sync_high); in raid1_sync_request()
2939 for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) { in raid1_sync_request()
2970 struct r1conf *conf; in setup_conf() local
2976 conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL); in setup_conf()
2977 if (!conf) in setup_conf()
2980 conf->nr_pending = kcalloc(BARRIER_BUCKETS_NR, in setup_conf()
2982 if (!conf->nr_pending) in setup_conf()
2985 conf->nr_waiting = kcalloc(BARRIER_BUCKETS_NR, in setup_conf()
2987 if (!conf->nr_waiting) in setup_conf()
2990 conf->nr_queued = kcalloc(BARRIER_BUCKETS_NR, in setup_conf()
2992 if (!conf->nr_queued) in setup_conf()
2995 conf->barrier = kcalloc(BARRIER_BUCKETS_NR, in setup_conf()
2997 if (!conf->barrier) in setup_conf()
3000 conf->mirrors = kzalloc(array3_size(sizeof(struct raid1_info), in setup_conf()
3003 if (!conf->mirrors) in setup_conf()
3006 conf->tmppage = alloc_page(GFP_KERNEL); in setup_conf()
3007 if (!conf->tmppage) in setup_conf()
3010 conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL); in setup_conf()
3011 if (!conf->poolinfo) in setup_conf()
3013 conf->poolinfo->raid_disks = mddev->raid_disks * 2; in setup_conf()
3014 err = mempool_init(&conf->r1bio_pool, NR_RAID_BIOS, r1bio_pool_alloc, in setup_conf()
3015 rbio_pool_free, conf->poolinfo); in setup_conf()
3019 err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0); in setup_conf()
3023 conf->poolinfo->mddev = mddev; in setup_conf()
3026 spin_lock_init(&conf->device_lock); in setup_conf()
3033 disk = conf->mirrors + mddev->raid_disks + disk_idx; in setup_conf()
3035 disk = conf->mirrors + disk_idx; in setup_conf()
3043 conf->raid_disks = mddev->raid_disks; in setup_conf()
3044 conf->mddev = mddev; in setup_conf()
3045 INIT_LIST_HEAD(&conf->retry_list); in setup_conf()
3046 INIT_LIST_HEAD(&conf->bio_end_io_list); in setup_conf()
3048 spin_lock_init(&conf->resync_lock); in setup_conf()
3049 init_waitqueue_head(&conf->wait_barrier); in setup_conf()
3051 bio_list_init(&conf->pending_bio_list); in setup_conf()
3052 conf->recovery_disabled = mddev->recovery_disabled - 1; in setup_conf()
3055 for (i = 0; i < conf->raid_disks * 2; i++) { in setup_conf()
3057 disk = conf->mirrors + i; in setup_conf()
3059 if (i < conf->raid_disks && in setup_conf()
3060 disk[conf->raid_disks].rdev) { in setup_conf()
3067 disk[conf->raid_disks].rdev; in setup_conf()
3068 disk[conf->raid_disks].rdev = NULL; in setup_conf()
3079 conf->fullsync = 1; in setup_conf()
3084 rcu_assign_pointer(conf->thread, in setup_conf()
3086 if (!conf->thread) in setup_conf()
3089 return conf; in setup_conf()
3092 if (conf) { in setup_conf()
3093 mempool_exit(&conf->r1bio_pool); in setup_conf()
3094 kfree(conf->mirrors); in setup_conf()
3095 safe_put_page(conf->tmppage); in setup_conf()
3096 kfree(conf->poolinfo); in setup_conf()
3097 kfree(conf->nr_pending); in setup_conf()
3098 kfree(conf->nr_waiting); in setup_conf()
3099 kfree(conf->nr_queued); in setup_conf()
3100 kfree(conf->barrier); in setup_conf()
3101 bioset_exit(&conf->bio_split); in setup_conf()
3102 kfree(conf); in setup_conf()
3110 struct r1conf *conf; in raid1_run() local
3133 conf = setup_conf(mddev); in raid1_run()
3135 conf = mddev->private; in raid1_run()
3137 if (IS_ERR(conf)) in raid1_run()
3138 return PTR_ERR(conf); in raid1_run()
3151 for (i = 0; i < conf->raid_disks; i++) in raid1_run()
3152 if (conf->mirrors[i].rdev == NULL || in raid1_run()
3153 !test_bit(In_sync, &conf->mirrors[i].rdev->flags) || in raid1_run()
3154 test_bit(Faulty, &conf->mirrors[i].rdev->flags)) in raid1_run()
3159 if (conf->raid_disks - mddev->degraded < 1) { in raid1_run()
3160 md_unregister_thread(mddev, &conf->thread); in raid1_run()
3165 if (conf->raid_disks - mddev->degraded == 1) in raid1_run()
3178 rcu_assign_pointer(mddev->thread, conf->thread); in raid1_run()
3179 rcu_assign_pointer(conf->thread, NULL); in raid1_run()
3180 mddev->private = conf; in raid1_run()
3193 raid1_free(mddev, conf); in raid1_run()
3199 struct r1conf *conf = priv; in raid1_free() local
3201 mempool_exit(&conf->r1bio_pool); in raid1_free()
3202 kfree(conf->mirrors); in raid1_free()
3203 safe_put_page(conf->tmppage); in raid1_free()
3204 kfree(conf->poolinfo); in raid1_free()
3205 kfree(conf->nr_pending); in raid1_free()
3206 kfree(conf->nr_waiting); in raid1_free()
3207 kfree(conf->nr_queued); in raid1_free()
3208 kfree(conf->barrier); in raid1_free()
3209 bioset_exit(&conf->bio_split); in raid1_free()
3210 kfree(conf); in raid1_free()
3258 struct r1conf *conf = mddev->private; in raid1_reshape() local
3282 if (raid_disks < conf->raid_disks) { in raid1_reshape()
3284 for (d= 0; d < conf->raid_disks; d++) in raid1_reshape()
3285 if (conf->mirrors[d].rdev) in raid1_reshape()
3312 freeze_array(conf, 0); in raid1_reshape()
3315 oldpool = conf->r1bio_pool; in raid1_reshape()
3316 conf->r1bio_pool = newpool; in raid1_reshape()
3318 for (d = d2 = 0; d < conf->raid_disks; d++) { in raid1_reshape()
3319 struct md_rdev *rdev = conf->mirrors[d].rdev; in raid1_reshape()
3331 kfree(conf->mirrors); in raid1_reshape()
3332 conf->mirrors = newmirrors; in raid1_reshape()
3333 kfree(conf->poolinfo); in raid1_reshape()
3334 conf->poolinfo = newpoolinfo; in raid1_reshape()
3336 spin_lock_irqsave(&conf->device_lock, flags); in raid1_reshape()
3337 mddev->degraded += (raid_disks - conf->raid_disks); in raid1_reshape()
3338 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1_reshape()
3339 conf->raid_disks = mddev->raid_disks = raid_disks; in raid1_reshape()
3342 unfreeze_array(conf); in raid1_reshape()
3354 struct r1conf *conf = mddev->private; in raid1_quiesce() local
3357 freeze_array(conf, 0); in raid1_quiesce()
3359 unfreeze_array(conf); in raid1_quiesce()
3368 struct r1conf *conf; in raid1_takeover() local
3372 conf = setup_conf(mddev); in raid1_takeover()
3373 if (!IS_ERR(conf)) { in raid1_takeover()
3375 conf->array_frozen = 1; in raid1_takeover()
3379 return conf; in raid1_takeover()