Lines Matching full:conf

22  * conf->seq_write is the number of the last batch successfully written.
23 * conf->seq_flush is the number of the last batch that was closed to
72 static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect) in stripe_hash() argument
74 int hash = (sect >> RAID5_STRIPE_SHIFT(conf)) & HASH_MASK; in stripe_hash()
75 return &conf->stripe_hashtbl[hash]; in stripe_hash()
78 static inline int stripe_hash_locks_hash(struct r5conf *conf, sector_t sect) in stripe_hash_locks_hash() argument
80 return (sect >> RAID5_STRIPE_SHIFT(conf)) & STRIPE_HASH_LOCKS_MASK; in stripe_hash_locks_hash()
83 static inline void lock_device_hash_lock(struct r5conf *conf, int hash) in lock_device_hash_lock() argument
84 __acquires(&conf->device_lock) in lock_device_hash_lock()
86 spin_lock_irq(conf->hash_locks + hash); in lock_device_hash_lock()
87 spin_lock(&conf->device_lock); in lock_device_hash_lock()
90 static inline void unlock_device_hash_lock(struct r5conf *conf, int hash) in unlock_device_hash_lock() argument
91 __releases(&conf->device_lock) in unlock_device_hash_lock()
93 spin_unlock(&conf->device_lock); in unlock_device_hash_lock()
94 spin_unlock_irq(conf->hash_locks + hash); in unlock_device_hash_lock()
97 static inline void lock_all_device_hash_locks_irq(struct r5conf *conf) in lock_all_device_hash_locks_irq() argument
98 __acquires(&conf->device_lock) in lock_all_device_hash_locks_irq()
101 spin_lock_irq(conf->hash_locks); in lock_all_device_hash_locks_irq()
103 spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks); in lock_all_device_hash_locks_irq()
104 spin_lock(&conf->device_lock); in lock_all_device_hash_locks_irq()
107 static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf) in unlock_all_device_hash_locks_irq() argument
108 __releases(&conf->device_lock) in unlock_all_device_hash_locks_irq()
111 spin_unlock(&conf->device_lock); in unlock_all_device_hash_locks_irq()
113 spin_unlock(conf->hash_locks + i); in unlock_all_device_hash_locks_irq()
114 spin_unlock_irq(conf->hash_locks); in unlock_all_device_hash_locks_irq()
156 static void print_raid5_conf (struct r5conf *conf);
175 struct r5conf *conf = sh->raid_conf; in raid5_wakeup_stripe_thread() local
187 group = conf->worker_groups + cpu_to_group(cpu); in raid5_wakeup_stripe_thread()
196 if (conf->worker_cnt_per_group == 0) { in raid5_wakeup_stripe_thread()
197 md_wakeup_thread(conf->mddev->thread); in raid5_wakeup_stripe_thread()
201 group = conf->worker_groups + cpu_to_group(sh->cpu); in raid5_wakeup_stripe_thread()
209 for (i = 1; i < conf->worker_cnt_per_group && thread_cnt > 0; i++) { in raid5_wakeup_stripe_thread()
219 static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh, in do_release_stripe() argument
221 __must_hold(&conf->device_lock) in do_release_stripe()
227 BUG_ON(atomic_read(&conf->active_stripes)==0); in do_release_stripe()
229 if (r5c_is_writeback(conf->log)) in do_release_stripe()
241 (conf->quiesce && r5c_is_writeback(conf->log) && in do_release_stripe()
251 list_add_tail(&sh->lru, &conf->delayed_list); in do_release_stripe()
253 sh->bm_seq - conf->seq_write > 0) in do_release_stripe()
254 list_add_tail(&sh->lru, &conf->bitmap_list); in do_release_stripe()
258 if (conf->worker_cnt_per_group == 0) { in do_release_stripe()
261 &conf->loprio_list); in do_release_stripe()
264 &conf->handle_list); in do_release_stripe()
270 md_wakeup_thread(conf->mddev->thread); in do_release_stripe()
274 if (atomic_dec_return(&conf->preread_active_stripes) in do_release_stripe()
276 md_wakeup_thread(conf->mddev->thread); in do_release_stripe()
277 atomic_dec(&conf->active_stripes); in do_release_stripe()
279 if (!r5c_is_writeback(conf->log)) in do_release_stripe()
285 else if (injournal == conf->raid_disks - conf->max_degraded) { in do_release_stripe()
288 atomic_inc(&conf->r5c_cached_full_stripes); in do_release_stripe()
290 atomic_dec(&conf->r5c_cached_partial_stripes); in do_release_stripe()
291 list_add_tail(&sh->lru, &conf->r5c_full_stripe_list); in do_release_stripe()
292 r5c_check_cached_full_stripe(conf); in do_release_stripe()
299 list_add_tail(&sh->lru, &conf->r5c_partial_stripe_list); in do_release_stripe()
305 static void __release_stripe(struct r5conf *conf, struct stripe_head *sh, in __release_stripe() argument
307 __must_hold(&conf->device_lock) in __release_stripe()
310 do_release_stripe(conf, sh, temp_inactive_list); in __release_stripe()
320 static void release_inactive_stripe_list(struct r5conf *conf, in release_inactive_stripe_list() argument
341 spin_lock_irqsave(conf->hash_locks + hash, flags); in release_inactive_stripe_list()
342 if (list_empty(conf->inactive_list + hash) && in release_inactive_stripe_list()
344 atomic_dec(&conf->empty_inactive_list_nr); in release_inactive_stripe_list()
345 list_splice_tail_init(list, conf->inactive_list + hash); in release_inactive_stripe_list()
347 spin_unlock_irqrestore(conf->hash_locks + hash, flags); in release_inactive_stripe_list()
354 wake_up(&conf->wait_for_stripe); in release_inactive_stripe_list()
355 if (atomic_read(&conf->active_stripes) == 0) in release_inactive_stripe_list()
356 wake_up(&conf->wait_for_quiescent); in release_inactive_stripe_list()
357 if (conf->retry_read_aligned) in release_inactive_stripe_list()
358 md_wakeup_thread(conf->mddev->thread); in release_inactive_stripe_list()
362 static int release_stripe_list(struct r5conf *conf, in release_stripe_list() argument
364 __must_hold(&conf->device_lock) in release_stripe_list()
370 head = llist_del_all(&conf->released_stripes); in release_stripe_list()
384 __release_stripe(conf, sh, &temp_inactive_list[hash]); in release_stripe_list()
393 struct r5conf *conf = sh->raid_conf; in raid5_release_stripe() local
404 if (unlikely(!conf->mddev->thread) || in raid5_release_stripe()
407 wakeup = llist_add(&sh->release_list, &conf->released_stripes); in raid5_release_stripe()
409 md_wakeup_thread(conf->mddev->thread); in raid5_release_stripe()
413 if (atomic_dec_and_lock_irqsave(&sh->count, &conf->device_lock, flags)) { in raid5_release_stripe()
416 do_release_stripe(conf, sh, &list); in raid5_release_stripe()
417 spin_unlock_irqrestore(&conf->device_lock, flags); in raid5_release_stripe()
418 release_inactive_stripe_list(conf, &list, hash); in raid5_release_stripe()
430 static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh) in insert_hash() argument
432 struct hlist_head *hp = stripe_hash(conf, sh->sector); in insert_hash()
441 static struct stripe_head *get_free_stripe(struct r5conf *conf, int hash) in get_free_stripe() argument
446 if (list_empty(conf->inactive_list + hash)) in get_free_stripe()
448 first = (conf->inactive_list + hash)->next; in get_free_stripe()
452 atomic_inc(&conf->active_stripes); in get_free_stripe()
454 if (list_empty(conf->inactive_list + hash)) in get_free_stripe()
455 atomic_inc(&conf->empty_inactive_list_nr); in get_free_stripe()
499 init_stripe_shared_pages(struct stripe_head *sh, struct r5conf *conf, int disks) in init_stripe_shared_pages() argument
506 /* Each of the sh->dev[i] need one conf->stripe_size */ in init_stripe_shared_pages()
507 cnt = PAGE_SIZE / conf->stripe_size; in init_stripe_shared_pages()
571 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
576 struct r5conf *conf = sh->raid_conf; in init_stripe() local
587 seq = read_seqcount_begin(&conf->gen_lock); in init_stripe()
588 sh->generation = conf->generation - previous; in init_stripe()
589 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; in init_stripe()
591 stripe_set_idx(sector, conf, previous, sh); in init_stripe()
608 if (read_seqcount_retry(&conf->gen_lock, seq)) in init_stripe()
611 insert_hash(conf, sh); in init_stripe()
616 static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector, in __find_stripe() argument
622 hlist_for_each_entry(sh, stripe_hash(conf, sector), hash) in __find_stripe()
629 static struct stripe_head *find_get_stripe(struct r5conf *conf, in find_get_stripe() argument
635 sh = __find_stripe(conf, sector, generation); in find_get_stripe()
648 spin_lock(&conf->device_lock); in find_get_stripe()
651 atomic_inc(&conf->active_stripes); in find_get_stripe()
655 if (!list_empty(conf->inactive_list + hash)) in find_get_stripe()
658 if (list_empty(conf->inactive_list + hash) && in find_get_stripe()
660 atomic_inc(&conf->empty_inactive_list_nr); in find_get_stripe()
667 spin_unlock(&conf->device_lock); in find_get_stripe()
685 * Most calls to this function hold &conf->device_lock. Calls
689 int raid5_calc_degraded(struct r5conf *conf) in raid5_calc_degraded() argument
696 for (i = 0; i < conf->previous_raid_disks; i++) { in raid5_calc_degraded()
697 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); in raid5_calc_degraded()
699 rdev = rcu_dereference(conf->disks[i].replacement); in raid5_calc_degraded()
714 if (conf->raid_disks >= conf->previous_raid_disks) in raid5_calc_degraded()
718 if (conf->raid_disks == conf->previous_raid_disks) in raid5_calc_degraded()
722 for (i = 0; i < conf->raid_disks; i++) { in raid5_calc_degraded()
723 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); in raid5_calc_degraded()
725 rdev = rcu_dereference(conf->disks[i].replacement); in raid5_calc_degraded()
736 if (conf->raid_disks <= conf->previous_raid_disks) in raid5_calc_degraded()
745 static bool has_failed(struct r5conf *conf) in has_failed() argument
747 int degraded = conf->mddev->degraded; in has_failed()
749 if (test_bit(MD_BROKEN, &conf->mddev->flags)) in has_failed()
752 if (conf->mddev->reshape_position != MaxSector) in has_failed()
753 degraded = raid5_calc_degraded(conf); in has_failed()
755 return degraded > conf->max_degraded; in has_failed()
790 static bool is_inactive_blocked(struct r5conf *conf, int hash) in is_inactive_blocked() argument
792 if (list_empty(conf->inactive_list + hash)) in is_inactive_blocked()
795 if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) in is_inactive_blocked()
798 return (atomic_read(&conf->active_stripes) < in is_inactive_blocked()
799 (conf->max_nr_stripes * 3 / 4)); in is_inactive_blocked()
802 struct stripe_head *raid5_get_active_stripe(struct r5conf *conf, in raid5_get_active_stripe() argument
807 int hash = stripe_hash_locks_hash(conf, sector); in raid5_get_active_stripe()
812 spin_lock_irq(conf->hash_locks + hash); in raid5_get_active_stripe()
815 if (!(flags & R5_GAS_NOQUIESCE) && conf->quiesce) { in raid5_get_active_stripe()
828 wait_event_lock_irq(conf->wait_for_quiescent, in raid5_get_active_stripe()
829 !conf->quiesce, in raid5_get_active_stripe()
830 *(conf->hash_locks + hash)); in raid5_get_active_stripe()
833 sh = find_get_stripe(conf, sector, conf->generation - previous, in raid5_get_active_stripe()
838 if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) { in raid5_get_active_stripe()
839 sh = get_free_stripe(conf, hash); in raid5_get_active_stripe()
841 r5c_check_stripe_cache_usage(conf); in raid5_get_active_stripe()
847 if (!test_bit(R5_DID_ALLOC, &conf->cache_state)) in raid5_get_active_stripe()
848 set_bit(R5_ALLOC_MORE, &conf->cache_state); in raid5_get_active_stripe()
854 set_bit(R5_INACTIVE_BLOCKED, &conf->cache_state); in raid5_get_active_stripe()
855 r5l_wake_reclaim(conf->log, 0); in raid5_get_active_stripe()
863 wait_event_lock_irq(conf->wait_for_stripe, in raid5_get_active_stripe()
864 is_inactive_blocked(conf, hash), in raid5_get_active_stripe()
865 *(conf->hash_locks + hash)); in raid5_get_active_stripe()
866 clear_bit(R5_INACTIVE_BLOCKED, &conf->cache_state); in raid5_get_active_stripe()
869 spin_unlock_irq(conf->hash_locks + hash); in raid5_get_active_stripe()
903 struct r5conf *conf = sh->raid_conf; in stripe_can_batch() local
905 if (raid5_has_log(conf) || raid5_has_ppl(conf)) in stripe_can_batch()
912 static void stripe_add_to_batch_list(struct r5conf *conf, in stripe_add_to_batch_list() argument
922 if (!sector_div(tmp_sec, conf->chunk_sectors)) in stripe_add_to_batch_list()
924 head_sector = sh->sector - RAID5_STRIPE_SECTORS(conf); in stripe_add_to_batch_list()
930 hash = stripe_hash_locks_hash(conf, head_sector); in stripe_add_to_batch_list()
931 spin_lock_irq(conf->hash_locks + hash); in stripe_add_to_batch_list()
932 head = find_get_stripe(conf, head_sector, conf->generation, in stripe_add_to_batch_list()
934 spin_unlock_irq(conf->hash_locks + hash); in stripe_add_to_batch_list()
987 if (atomic_dec_return(&conf->preread_active_stripes) in stripe_add_to_batch_list()
989 md_wakeup_thread(conf->mddev->thread); in stripe_add_to_batch_list()
1010 static int use_new_offset(struct r5conf *conf, struct stripe_head *sh) in use_new_offset() argument
1012 sector_t progress = conf->reshape_progress; in use_new_offset()
1014 * of conf->generation, or ->data_offset that was set before in use_new_offset()
1020 if (sh->generation == conf->generation - 1) in use_new_offset()
1050 static void dispatch_defer_bios(struct r5conf *conf, int target, in dispatch_defer_bios() argument
1057 if (conf->pending_data_cnt == 0) in dispatch_defer_bios()
1060 list_sort(NULL, &conf->pending_list, cmp_stripe); in dispatch_defer_bios()
1062 first = conf->pending_list.next; in dispatch_defer_bios()
1065 if (conf->next_pending_data) in dispatch_defer_bios()
1066 list_move_tail(&conf->pending_list, in dispatch_defer_bios()
1067 &conf->next_pending_data->sibling); in dispatch_defer_bios()
1069 while (!list_empty(&conf->pending_list)) { in dispatch_defer_bios()
1070 data = list_first_entry(&conf->pending_list, in dispatch_defer_bios()
1077 list_move(&data->sibling, &conf->free_list); in dispatch_defer_bios()
1082 conf->pending_data_cnt -= cnt; in dispatch_defer_bios()
1083 BUG_ON(conf->pending_data_cnt < 0 || cnt < target); in dispatch_defer_bios()
1085 if (next != &conf->pending_list) in dispatch_defer_bios()
1086 conf->next_pending_data = list_entry(next, in dispatch_defer_bios()
1089 conf->next_pending_data = NULL; in dispatch_defer_bios()
1091 if (first != &conf->pending_list) in dispatch_defer_bios()
1092 list_move_tail(&conf->pending_list, first); in dispatch_defer_bios()
1095 static void flush_deferred_bios(struct r5conf *conf) in flush_deferred_bios() argument
1099 if (conf->pending_data_cnt == 0) in flush_deferred_bios()
1102 spin_lock(&conf->pending_bios_lock); in flush_deferred_bios()
1103 dispatch_defer_bios(conf, conf->pending_data_cnt, &tmp); in flush_deferred_bios()
1104 BUG_ON(conf->pending_data_cnt != 0); in flush_deferred_bios()
1105 spin_unlock(&conf->pending_bios_lock); in flush_deferred_bios()
1110 static void defer_issue_bios(struct r5conf *conf, sector_t sector, in defer_issue_bios() argument
1116 spin_lock(&conf->pending_bios_lock); in defer_issue_bios()
1117 ent = list_first_entry(&conf->free_list, struct r5pending_data, in defer_issue_bios()
1119 list_move_tail(&ent->sibling, &conf->pending_list); in defer_issue_bios()
1123 conf->pending_data_cnt++; in defer_issue_bios()
1124 if (conf->pending_data_cnt >= PENDING_IO_MAX) in defer_issue_bios()
1125 dispatch_defer_bios(conf, PENDING_IO_ONE_FLUSH, &tmp); in defer_issue_bios()
1127 spin_unlock(&conf->pending_bios_lock); in defer_issue_bios()
1139 struct r5conf *conf = sh->raid_conf; in ops_run_io() local
1151 should_defer = conf->batch_bio_dispatch && conf->group_cnt; in ops_run_io()
1184 rrdev = rcu_dereference(conf->disks[i].replacement); in ops_run_io()
1186 rdev = rcu_dereference(conf->disks[i].rdev); in ops_run_io()
1221 int bad = is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf), in ops_run_io()
1228 if (!conf->mddev->external && in ops_run_io()
1229 conf->mddev->sb_flags) { in ops_run_io()
1234 md_check_recovery(conf->mddev); in ops_run_io()
1242 md_wait_for_blocked_rdev(rdev, conf->mddev); in ops_run_io()
1245 rdev_dec_pending(rdev, conf->mddev); in ops_run_io()
1253 md_sync_acct(rdev->bdev, RAID5_STRIPE_SECTORS(conf)); in ops_run_io()
1269 if (use_new_offset(conf, sh)) in ops_run_io()
1292 bi->bi_io_vec[0].bv_len = RAID5_STRIPE_SIZE(conf); in ops_run_io()
1294 bi->bi_iter.bi_size = RAID5_STRIPE_SIZE(conf); in ops_run_io()
1304 if (conf->mddev->gendisk) in ops_run_io()
1306 disk_devt(conf->mddev->gendisk), in ops_run_io()
1316 md_sync_acct(rrdev->bdev, RAID5_STRIPE_SECTORS(conf)); in ops_run_io()
1332 if (use_new_offset(conf, sh)) in ops_run_io()
1342 rbi->bi_io_vec[0].bv_len = RAID5_STRIPE_SIZE(conf); in ops_run_io()
1344 rbi->bi_iter.bi_size = RAID5_STRIPE_SIZE(conf); in ops_run_io()
1351 if (conf->mddev->gendisk) in ops_run_io()
1353 disk_devt(conf->mddev->gendisk), in ops_run_io()
1376 defer_issue_bios(conf, head_sh->sector, &pending_bios); in ops_run_io()
1390 struct r5conf *conf = sh->raid_conf; in async_copy_data() local
1412 if (len > 0 && page_offset + len > RAID5_STRIPE_SIZE(conf)) in async_copy_data()
1413 clen = RAID5_STRIPE_SIZE(conf) - page_offset; in async_copy_data()
1421 if (conf->skip_copy && in async_copy_data()
1423 clen == RAID5_STRIPE_SIZE(conf) && in async_copy_data()
1448 struct r5conf *conf = sh->raid_conf; in ops_complete_biofill() local
1469 dev->sector + RAID5_STRIPE_SECTORS(conf)) { in ops_complete_biofill()
1470 rbi2 = r5_next_bio(conf, rbi, dev->sector); in ops_complete_biofill()
1487 struct r5conf *conf = sh->raid_conf; in ops_run_biofill() local
1502 dev->sector + RAID5_STRIPE_SECTORS(conf)) { in ops_run_biofill()
1506 rbi = r5_next_bio(conf, rbi, dev->sector); in ops_run_biofill()
1932 struct r5conf *conf = sh->raid_conf; in ops_run_biodrain() local
1965 dev->sector + RAID5_STRIPE_SECTORS(conf)) { in ops_run_biodrain()
1976 r5c_is_writeback(conf->log)); in ops_run_biodrain()
1978 !r5c_is_writeback(conf->log)) { in ops_run_biodrain()
1984 wbi = r5_next_bio(conf, wbi, dev->sector); in ops_run_biodrain()
2289 struct r5conf *conf = sh->raid_conf; in raid_run_ops() local
2290 int level = conf->level; in raid_run_ops()
2293 local_lock(&conf->percpu->lock); in raid_run_ops()
2294 percpu = this_cpu_ptr(conf->percpu); in raid_run_ops()
2354 local_unlock(&conf->percpu->lock); in raid_run_ops()
2368 int disks, struct r5conf *conf) in alloc_stripe() argument
2381 sh->raid_conf = conf; in alloc_stripe()
2384 if (raid5_has_ppl(conf)) { in alloc_stripe()
2392 if (init_stripe_shared_pages(sh, conf, disks)) { in alloc_stripe()
2400 static int grow_one_stripe(struct r5conf *conf, gfp_t gfp) in grow_one_stripe() argument
2404 sh = alloc_stripe(conf->slab_cache, gfp, conf->pool_size, conf); in grow_one_stripe()
2410 free_stripe(conf->slab_cache, sh); in grow_one_stripe()
2414 conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS; in grow_one_stripe()
2416 atomic_inc(&conf->active_stripes); in grow_one_stripe()
2419 WRITE_ONCE(conf->max_nr_stripes, conf->max_nr_stripes + 1); in grow_one_stripe()
2423 static int grow_stripes(struct r5conf *conf, int num) in grow_stripes() argument
2426 size_t namelen = sizeof(conf->cache_name[0]); in grow_stripes()
2427 int devs = max(conf->raid_disks, conf->previous_raid_disks); in grow_stripes()
2429 if (conf->mddev->gendisk) in grow_stripes()
2430 snprintf(conf->cache_name[0], namelen, in grow_stripes()
2431 "raid%d-%s", conf->level, mdname(conf->mddev)); in grow_stripes()
2433 snprintf(conf->cache_name[0], namelen, in grow_stripes()
2434 "raid%d-%p", conf->level, conf->mddev); in grow_stripes()
2435 snprintf(conf->cache_name[1], namelen, "%.27s-alt", conf->cache_name[0]); in grow_stripes()
2437 conf->active_name = 0; in grow_stripes()
2438 sc = kmem_cache_create(conf->cache_name[conf->active_name], in grow_stripes()
2443 conf->slab_cache = sc; in grow_stripes()
2444 conf->pool_size = devs; in grow_stripes()
2446 if (!grow_one_stripe(conf, GFP_KERNEL)) in grow_stripes()
2493 static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors) in resize_chunks() argument
2503 if (conf->scribble_disks >= new_disks && in resize_chunks()
2504 conf->scribble_sectors >= new_sectors) in resize_chunks()
2506 mddev_suspend(conf->mddev); in resize_chunks()
2512 percpu = per_cpu_ptr(conf->percpu, cpu); in resize_chunks()
2514 new_sectors / RAID5_STRIPE_SECTORS(conf)); in resize_chunks()
2520 mddev_resume(conf->mddev); in resize_chunks()
2522 conf->scribble_disks = new_disks; in resize_chunks()
2523 conf->scribble_sectors = new_sectors; in resize_chunks()
2528 static int resize_stripes(struct r5conf *conf, int newsize) in resize_stripes() argument
2542 * 3/ reallocate conf->disks to be suitable bigger. If this fails, in resize_stripes()
2561 md_allow_write(conf->mddev); in resize_stripes()
2564 sc = kmem_cache_create(conf->cache_name[1-conf->active_name], in resize_stripes()
2571 mutex_lock(&conf->cache_size_mutex); in resize_stripes()
2573 for (i = conf->max_nr_stripes; i; i--) { in resize_stripes()
2574 nsh = alloc_stripe(sc, GFP_KERNEL, newsize, conf); in resize_stripes()
2588 mutex_unlock(&conf->cache_size_mutex); in resize_stripes()
2598 lock_device_hash_lock(conf, hash); in resize_stripes()
2599 wait_event_cmd(conf->wait_for_stripe, in resize_stripes()
2600 !list_empty(conf->inactive_list + hash), in resize_stripes()
2601 unlock_device_hash_lock(conf, hash), in resize_stripes()
2602 lock_device_hash_lock(conf, hash)); in resize_stripes()
2603 osh = get_free_stripe(conf, hash); in resize_stripes()
2604 unlock_device_hash_lock(conf, hash); in resize_stripes()
2612 for(i=0; i<conf->pool_size; i++) { in resize_stripes()
2618 free_stripe(conf->slab_cache, osh); in resize_stripes()
2620 if (cnt >= conf->max_nr_stripes / NR_STRIPE_HASH_LOCKS + in resize_stripes()
2621 !!((conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS) > hash)) { in resize_stripes()
2626 kmem_cache_destroy(conf->slab_cache); in resize_stripes()
2631 * conf->disks and the scribble region in resize_stripes()
2635 for (i = 0; i < conf->pool_size; i++) in resize_stripes()
2636 ndisks[i] = conf->disks[i]; in resize_stripes()
2638 for (i = conf->pool_size; i < newsize; i++) { in resize_stripes()
2645 for (i = conf->pool_size; i < newsize; i++) in resize_stripes()
2650 kfree(conf->disks); in resize_stripes()
2651 conf->disks = ndisks; in resize_stripes()
2656 conf->slab_cache = sc; in resize_stripes()
2657 conf->active_name = 1-conf->active_name; in resize_stripes()
2673 for (i = conf->raid_disks; i < newsize; i++) { in resize_stripes()
2681 for (i=conf->raid_disks; i < newsize; i++) in resize_stripes()
2696 conf->pool_size = newsize; in resize_stripes()
2697 mutex_unlock(&conf->cache_size_mutex); in resize_stripes()
2702 static int drop_one_stripe(struct r5conf *conf) in drop_one_stripe() argument
2705 int hash = (conf->max_nr_stripes - 1) & STRIPE_HASH_LOCKS_MASK; in drop_one_stripe()
2707 spin_lock_irq(conf->hash_locks + hash); in drop_one_stripe()
2708 sh = get_free_stripe(conf, hash); in drop_one_stripe()
2709 spin_unlock_irq(conf->hash_locks + hash); in drop_one_stripe()
2714 free_stripe(conf->slab_cache, sh); in drop_one_stripe()
2715 atomic_dec(&conf->active_stripes); in drop_one_stripe()
2716 WRITE_ONCE(conf->max_nr_stripes, conf->max_nr_stripes - 1); in drop_one_stripe()
2720 static void shrink_stripes(struct r5conf *conf) in shrink_stripes() argument
2722 while (conf->max_nr_stripes && in shrink_stripes()
2723 drop_one_stripe(conf)) in shrink_stripes()
2726 kmem_cache_destroy(conf->slab_cache); in shrink_stripes()
2727 conf->slab_cache = NULL; in shrink_stripes()
2755 struct r5conf *conf = sh->raid_conf; in raid5_end_read_request() local
2777 rdev = rdev_pend_deref(conf->disks[i].replacement); in raid5_end_read_request()
2779 rdev = rdev_pend_deref(conf->disks[i].rdev); in raid5_end_read_request()
2781 if (use_new_offset(conf, sh)) in raid5_end_read_request()
2794 mdname(conf->mddev), RAID5_STRIPE_SECTORS(conf), in raid5_end_read_request()
2797 atomic_add(RAID5_STRIPE_SECTORS(conf), &rdev->corrected_errors); in raid5_end_read_request()
2822 mdname(conf->mddev), in raid5_end_read_request()
2825 else if (conf->mddev->degraded >= conf->max_degraded) { in raid5_end_read_request()
2829 mdname(conf->mddev), in raid5_end_read_request()
2837 mdname(conf->mddev), in raid5_end_read_request()
2841 > conf->max_nr_stripes) { in raid5_end_read_request()
2844 mdname(conf->mddev), in raid5_end_read_request()
2846 conf->max_nr_stripes); in raid5_end_read_request()
2848 mdname(conf->mddev), rdev->bdev); in raid5_end_read_request()
2869 rdev, sh->sector, RAID5_STRIPE_SECTORS(conf), 0))) in raid5_end_read_request()
2870 md_error(conf->mddev, rdev); in raid5_end_read_request()
2873 rdev_dec_pending(rdev, conf->mddev); in raid5_end_read_request()
2883 struct r5conf *conf = sh->raid_conf; in raid5_end_write_request() local
2892 rdev = rdev_pend_deref(conf->disks[i].rdev); in raid5_end_write_request()
2896 rdev = rdev_pend_deref(conf->disks[i].replacement); in raid5_end_write_request()
2904 rdev = rdev_pend_deref(conf->disks[i].rdev); in raid5_end_write_request()
2918 md_error(conf->mddev, rdev); in raid5_end_write_request()
2920 RAID5_STRIPE_SECTORS(conf), in raid5_end_write_request()
2931 RAID5_STRIPE_SECTORS(conf), in raid5_end_write_request()
2942 rdev_dec_pending(rdev, conf->mddev); in raid5_end_write_request()
2959 struct r5conf *conf = mddev->private; in raid5_error() local
2966 spin_lock_irqsave(&conf->device_lock, flags); in raid5_error()
2969 mddev->degraded = raid5_calc_degraded(conf); in raid5_error()
2971 if (has_failed(conf)) { in raid5_error()
2972 set_bit(MD_BROKEN, &conf->mddev->flags); in raid5_error()
2973 conf->recovery_disabled = mddev->recovery_disabled; in raid5_error()
2976 mdname(mddev), mddev->degraded, conf->raid_disks); in raid5_error()
2979 mdname(mddev), conf->raid_disks - mddev->degraded); in raid5_error()
2982 spin_unlock_irqrestore(&conf->device_lock, flags); in raid5_error()
2995 sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector, in raid5_compute_sector() argument
3005 int algorithm = previous ? conf->prev_algo in raid5_compute_sector()
3006 : conf->algorithm; in raid5_compute_sector()
3007 int sectors_per_chunk = previous ? conf->prev_chunk_sectors in raid5_compute_sector()
3008 : conf->chunk_sectors; in raid5_compute_sector()
3009 int raid_disks = previous ? conf->previous_raid_disks in raid5_compute_sector()
3010 : conf->raid_disks; in raid5_compute_sector()
3011 int data_disks = raid_disks - conf->max_degraded; in raid5_compute_sector()
3031 switch(conf->level) { in raid5_compute_sector()
3199 struct r5conf *conf = sh->raid_conf; in raid5_compute_blocknr() local
3201 int data_disks = raid_disks - conf->max_degraded; in raid5_compute_blocknr()
3203 int sectors_per_chunk = previous ? conf->prev_chunk_sectors in raid5_compute_blocknr()
3204 : conf->chunk_sectors; in raid5_compute_blocknr()
3205 int algorithm = previous ? conf->prev_algo in raid5_compute_blocknr()
3206 : conf->algorithm; in raid5_compute_blocknr()
3219 switch(conf->level) { in raid5_compute_blocknr()
3306 check = raid5_compute_sector(conf, r_sector, in raid5_compute_blocknr()
3311 mdname(conf->mddev)); in raid5_compute_blocknr()
3338 * stripe, we need to reserve (conf->raid_disk + 1) pages per stripe
3340 * operation, we only need (conf->max_degraded + 1) pages per stripe.
3355 static inline bool delay_towrite(struct r5conf *conf, in delay_towrite() argument
3364 if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) && in delay_towrite()
3378 struct r5conf *conf = sh->raid_conf; in schedule_reconstruction() local
3379 int level = conf->level; in schedule_reconstruction()
3393 if (dev->towrite && !delay_towrite(conf, dev, s)) { in schedule_reconstruction()
3419 if (s->locked + conf->max_degraded == disks) in schedule_reconstruction()
3421 atomic_inc(&conf->pending_full_writes); in schedule_reconstruction()
3485 struct r5conf *conf = sh->raid_conf; in stripe_bio_overlaps() local
3509 if (forwrite && raid5_has_ppl(conf)) { in stripe_bio_overlaps()
3535 if (first + conf->chunk_sectors * (count - 1) != last) in stripe_bio_overlaps()
3545 struct r5conf *conf = sh->raid_conf; in __add_stripe_bio() local
3568 md_write_inc(conf->mddev, bi); in __add_stripe_bio()
3574 sector < sh->dev[dd_idx].sector + RAID5_STRIPE_SECTORS(conf) && in __add_stripe_bio()
3576 bi = r5_next_bio(conf, bi, sh->dev[dd_idx].sector)) { in __add_stripe_bio()
3580 if (sector >= sh->dev[dd_idx].sector + RAID5_STRIPE_SECTORS(conf)) in __add_stripe_bio()
3589 if (conf->mddev->bitmap && firstwrite && !sh->batch_head) { in __add_stripe_bio()
3590 sh->bm_seq = conf->seq_flush+1; in __add_stripe_bio()
3616 static void end_reshape(struct r5conf *conf);
3618 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, in stripe_set_idx() argument
3622 previous ? conf->prev_chunk_sectors : conf->chunk_sectors; in stripe_set_idx()
3625 int disks = previous ? conf->previous_raid_disks : conf->raid_disks; in stripe_set_idx()
3627 raid5_compute_sector(conf, in stripe_set_idx()
3628 stripe * (disks - conf->max_degraded) in stripe_set_idx()
3635 handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, in handle_failed_stripe() argument
3646 rdev = rcu_dereference(conf->disks[i].rdev); in handle_failed_stripe()
3657 RAID5_STRIPE_SECTORS(conf), 0)) in handle_failed_stripe()
3658 md_error(conf->mddev, rdev); in handle_failed_stripe()
3659 rdev_dec_pending(rdev, conf->mddev); in handle_failed_stripe()
3672 wake_up(&conf->wait_for_overlap); in handle_failed_stripe()
3675 sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) { in handle_failed_stripe()
3676 struct bio *nextbi = r5_next_bio(conf, bi, sh->dev[i].sector); in handle_failed_stripe()
3678 md_write_end(conf->mddev); in handle_failed_stripe()
3691 sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) { in handle_failed_stripe()
3692 struct bio *bi2 = r5_next_bio(conf, bi, sh->dev[i].sector); in handle_failed_stripe()
3694 md_write_end(conf->mddev); in handle_failed_stripe()
3703 s->failed > conf->max_degraded && in handle_failed_stripe()
3711 wake_up(&conf->wait_for_overlap); in handle_failed_stripe()
3715 sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) { in handle_failed_stripe()
3717 r5_next_bio(conf, bi, sh->dev[i].sector); in handle_failed_stripe()
3732 if (atomic_dec_and_test(&conf->pending_full_writes)) in handle_failed_stripe()
3733 md_wakeup_thread(conf->mddev->thread); in handle_failed_stripe()
3737 handle_failed_sync(struct r5conf *conf, struct stripe_head *sh, in handle_failed_sync() argument
3746 wake_up(&conf->wait_for_overlap); in handle_failed_sync()
3756 if (test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) { in handle_failed_sync()
3761 for (i = 0; i < conf->raid_disks; i++) { in handle_failed_sync()
3762 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); in handle_failed_sync()
3767 RAID5_STRIPE_SECTORS(conf), 0)) in handle_failed_sync()
3769 rdev = rcu_dereference(conf->disks[i].replacement); in handle_failed_sync()
3774 RAID5_STRIPE_SECTORS(conf), 0)) in handle_failed_sync()
3779 conf->recovery_disabled = in handle_failed_sync()
3780 conf->mddev->recovery_disabled; in handle_failed_sync()
3782 md_done_sync(conf->mddev, RAID5_STRIPE_SECTORS(conf), !abort); in handle_failed_sync()
4036 static void handle_stripe_clean_event(struct r5conf *conf, in handle_stripe_clean_event() argument
4067 dev->sector + RAID5_STRIPE_SECTORS(conf)) { in handle_stripe_clean_event()
4068 wbi2 = r5_next_bio(conf, wbi, dev->sector); in handle_stripe_clean_event()
4069 md_write_end(conf->mddev); in handle_stripe_clean_event()
4109 spin_lock_irq(conf->hash_locks + hash); in handle_stripe_clean_event()
4111 spin_unlock_irq(conf->hash_locks + hash); in handle_stripe_clean_event()
4126 if (atomic_dec_and_test(&conf->pending_full_writes)) in handle_stripe_clean_event()
4127 md_wakeup_thread(conf->mddev->thread); in handle_stripe_clean_event()
4148 static int handle_stripe_dirtying(struct r5conf *conf, in handle_stripe_dirtying() argument
4154 sector_t recovery_cp = conf->mddev->recovery_cp; in handle_stripe_dirtying()
4163 if (conf->rmw_level == PARITY_DISABLE_RMW || in handle_stripe_dirtying()
4171 conf->rmw_level, (unsigned long long)recovery_cp, in handle_stripe_dirtying()
4176 if (((dev->towrite && !delay_towrite(conf, dev, s)) || in handle_stripe_dirtying()
4203 if ((rmw < rcw || (rmw == rcw && conf->rmw_level == PARITY_PREFER_RMW)) && rmw > 0) { in handle_stripe_dirtying()
4205 if (conf->mddev->queue) in handle_stripe_dirtying()
4206 blk_add_trace_msg(conf->mddev->queue, in handle_stripe_dirtying()
4227 &conf->cache_state)) { in handle_stripe_dirtying()
4241 if (((dev->towrite && !delay_towrite(conf, dev, s)) || in handle_stripe_dirtying()
4260 if ((rcw < rmw || (rcw == rmw && conf->rmw_level != PARITY_PREFER_RMW)) && rcw > 0) { in handle_stripe_dirtying()
4285 if (rcw && conf->mddev->queue) in handle_stripe_dirtying()
4286 blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d", in handle_stripe_dirtying()
4312 static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, in handle_parity_checks5() argument
4373 atomic64_add(RAID5_STRIPE_SECTORS(conf), &conf->mddev->resync_mismatches); in handle_parity_checks5()
4374 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) { in handle_parity_checks5()
4378 "%llu-%llu\n", mdname(conf->mddev), in handle_parity_checks5()
4381 RAID5_STRIPE_SECTORS(conf)); in handle_parity_checks5()
4404 static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, in handle_parity_checks6() argument
4500 mdname(conf->mddev), in handle_parity_checks6()
4537 atomic64_add(RAID5_STRIPE_SECTORS(conf), &conf->mddev->resync_mismatches); in handle_parity_checks6()
4538 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) { in handle_parity_checks6()
4542 "%llu-%llu\n", mdname(conf->mddev), in handle_parity_checks6()
4545 RAID5_STRIPE_SECTORS(conf)); in handle_parity_checks6()
4580 static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh) in handle_stripe_expansion() argument
4597 sector_t s = raid5_compute_sector(conf, bn, 0, in handle_stripe_expansion()
4599 sh2 = raid5_get_active_stripe(conf, NULL, s, in handle_stripe_expansion()
4618 sh->dev[i].offset, RAID5_STRIPE_SIZE(conf), in handle_stripe_expansion()
4623 for (j = 0; j < conf->raid_disks; j++) in handle_stripe_expansion()
4628 if (j == conf->raid_disks) { in handle_stripe_expansion()
4655 struct r5conf *conf = sh->raid_conf; in analyse_stripe() local
4667 s->log_failed = r5l_log_disk_error(conf); in analyse_stripe()
4715 rdev = rcu_dereference(conf->disks[i].replacement); in analyse_stripe()
4717 rdev->recovery_offset >= sh->sector + RAID5_STRIPE_SECTORS(conf) && in analyse_stripe()
4718 !is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf), in analyse_stripe()
4726 rdev = rcu_dereference(conf->disks[i].rdev); in analyse_stripe()
4732 is_bad = is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf), in analyse_stripe()
4759 else if (sh->sector + RAID5_STRIPE_SECTORS(conf) <= rdev->recovery_offset) in analyse_stripe()
4774 conf->disks[i].rdev); in analyse_stripe()
4787 conf->disks[i].rdev); in analyse_stripe()
4796 conf->disks[i].replacement); in analyse_stripe()
4818 conf->disks[i].replacement); in analyse_stripe()
4839 sh->sector >= conf->mddev->recovery_cp || in analyse_stripe()
4840 test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery))) in analyse_stripe()
4948 struct r5conf *conf = sh->raid_conf; in handle_stripe() local
5005 test_bit(MD_SB_CHANGE_PENDING, &conf->mddev->sb_flags)) { in handle_stripe()
5017 rdev_dec_pending(s.blocked_rdev, conf->mddev); in handle_stripe()
5037 if (s.failed > conf->max_degraded || in handle_stripe()
5043 handle_failed_stripe(conf, sh, &s, disks); in handle_stripe()
5045 handle_failed_sync(conf, sh, &s); in handle_stripe()
5098 || conf->level < 6; in handle_stripe()
5109 handle_stripe_clean_event(conf, sh, disks); in handle_stripe()
5112 r5c_handle_cached_data_endio(conf, sh, disks); in handle_stripe()
5131 r5c_finish_stripe_write_out(conf, sh, &s); in handle_stripe()
5143 if (!r5c_is_writeback(conf->log)) { in handle_stripe()
5145 handle_stripe_dirtying(conf, sh, &s, disks); in handle_stripe()
5151 ret = r5c_try_caching_write(conf, sh, &s, in handle_stripe()
5164 ret = handle_stripe_dirtying(conf, sh, &s, in handle_stripe()
5181 if (conf->level == 6) in handle_stripe()
5182 handle_parity_checks6(conf, sh, &s, disks); in handle_stripe()
5184 handle_parity_checks5(conf, sh, &s, disks); in handle_stripe()
5191 for (i = 0; i < conf->raid_disks; i++) in handle_stripe()
5205 md_done_sync(conf->mddev, RAID5_STRIPE_SECTORS(conf), 1); in handle_stripe()
5208 wake_up(&conf->wait_for_overlap); in handle_stripe()
5214 if (s.failed <= conf->max_degraded && !conf->mddev->ro) in handle_stripe()
5235 = raid5_get_active_stripe(conf, NULL, sh->sector, in handle_stripe()
5246 atomic_inc(&conf->preread_active_stripes); in handle_stripe()
5255 for (i = conf->raid_disks; i--; ) { in handle_stripe()
5265 sh->disks = conf->raid_disks; in handle_stripe()
5266 stripe_set_idx(sh->sector, conf, 0, sh); in handle_stripe()
5270 atomic_dec(&conf->reshape_stripes); in handle_stripe()
5271 wake_up(&conf->wait_for_overlap); in handle_stripe()
5272 md_done_sync(conf->mddev, RAID5_STRIPE_SECTORS(conf), 1); in handle_stripe()
5277 handle_stripe_expansion(conf, sh); in handle_stripe()
5282 if (conf->mddev->external) in handle_stripe()
5284 conf->mddev); in handle_stripe()
5291 conf->mddev); in handle_stripe()
5300 rdev = rdev_pend_deref(conf->disks[i].rdev); in handle_stripe()
5302 RAID5_STRIPE_SECTORS(conf), 0)) in handle_stripe()
5303 md_error(conf->mddev, rdev); in handle_stripe()
5304 rdev_dec_pending(rdev, conf->mddev); in handle_stripe()
5307 rdev = rdev_pend_deref(conf->disks[i].rdev); in handle_stripe()
5309 RAID5_STRIPE_SECTORS(conf), 0); in handle_stripe()
5310 rdev_dec_pending(rdev, conf->mddev); in handle_stripe()
5313 rdev = rdev_pend_deref(conf->disks[i].replacement); in handle_stripe()
5316 rdev = rdev_pend_deref(conf->disks[i].rdev); in handle_stripe()
5318 RAID5_STRIPE_SECTORS(conf), 0); in handle_stripe()
5319 rdev_dec_pending(rdev, conf->mddev); in handle_stripe()
5333 atomic_dec(&conf->preread_active_stripes); in handle_stripe()
5334 if (atomic_read(&conf->preread_active_stripes) < in handle_stripe()
5336 md_wakeup_thread(conf->mddev->thread); in handle_stripe()
5342 static void raid5_activate_delayed(struct r5conf *conf) in raid5_activate_delayed() argument
5343 __must_hold(&conf->device_lock) in raid5_activate_delayed()
5345 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { in raid5_activate_delayed()
5346 while (!list_empty(&conf->delayed_list)) { in raid5_activate_delayed()
5347 struct list_head *l = conf->delayed_list.next; in raid5_activate_delayed()
5353 atomic_inc(&conf->preread_active_stripes); in raid5_activate_delayed()
5354 list_add_tail(&sh->lru, &conf->hold_list); in raid5_activate_delayed()
5360 static void activate_bit_delay(struct r5conf *conf, in activate_bit_delay() argument
5362 __must_hold(&conf->device_lock) in activate_bit_delay()
5365 list_add(&head, &conf->bitmap_list); in activate_bit_delay()
5366 list_del_init(&conf->bitmap_list); in activate_bit_delay()
5373 __release_stripe(conf, sh, &temp_inactive_list[hash]); in activate_bit_delay()
5379 struct r5conf *conf = mddev->private; in in_chunk_boundary() local
5384 chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors); in in_chunk_boundary()
5393 static void add_bio_to_retry(struct bio *bi,struct r5conf *conf) in add_bio_to_retry() argument
5397 spin_lock_irqsave(&conf->device_lock, flags); in add_bio_to_retry()
5399 bi->bi_next = conf->retry_read_aligned_list; in add_bio_to_retry()
5400 conf->retry_read_aligned_list = bi; in add_bio_to_retry()
5402 spin_unlock_irqrestore(&conf->device_lock, flags); in add_bio_to_retry()
5403 md_wakeup_thread(conf->mddev->thread); in add_bio_to_retry()
5406 static struct bio *remove_bio_from_retry(struct r5conf *conf, in remove_bio_from_retry() argument
5411 bi = conf->retry_read_aligned; in remove_bio_from_retry()
5413 *offset = conf->retry_read_offset; in remove_bio_from_retry()
5414 conf->retry_read_aligned = NULL; in remove_bio_from_retry()
5417 bi = conf->retry_read_aligned_list; in remove_bio_from_retry()
5419 conf->retry_read_aligned_list = bi->bi_next; in remove_bio_from_retry()
5438 struct r5conf *conf = mddev->private; in raid5_align_endio() local
5443 rdev_dec_pending(rdev, conf->mddev); in raid5_align_endio()
5447 if (atomic_dec_and_test(&conf->active_aligned_reads)) in raid5_align_endio()
5448 wake_up(&conf->wait_for_quiescent); in raid5_align_endio()
5454 add_bio_to_retry(raid_bi, conf); in raid5_align_endio()
5459 struct r5conf *conf = mddev->private; in raid5_read_one_chunk() local
5471 sector = raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, 0, in raid5_read_one_chunk()
5476 if (r5c_big_stripe_cached(conf, sector)) in raid5_read_one_chunk()
5479 rdev = rcu_dereference(conf->disks[dd_idx].replacement); in raid5_read_one_chunk()
5482 rdev = rcu_dereference(conf->disks[dd_idx].rdev); in raid5_read_one_chunk()
5513 if (conf->quiesce == 0) { in raid5_read_one_chunk()
5514 atomic_inc(&conf->active_aligned_reads); in raid5_read_one_chunk()
5518 if (!did_inc || smp_load_acquire(&conf->quiesce) != 0) { in raid5_read_one_chunk()
5522 if (did_inc && atomic_dec_and_test(&conf->active_aligned_reads)) in raid5_read_one_chunk()
5523 wake_up(&conf->wait_for_quiescent); in raid5_read_one_chunk()
5524 spin_lock_irq(&conf->device_lock); in raid5_read_one_chunk()
5525 wait_event_lock_irq(conf->wait_for_quiescent, conf->quiesce == 0, in raid5_read_one_chunk()
5526 conf->device_lock); in raid5_read_one_chunk()
5527 atomic_inc(&conf->active_aligned_reads); in raid5_read_one_chunk()
5528 spin_unlock_irq(&conf->device_lock); in raid5_read_one_chunk()
5550 struct r5conf *conf = mddev->private; in chunk_aligned_read() local
5551 split = bio_split(raid_bio, sectors, GFP_NOIO, &conf->bio_split); in chunk_aligned_read()
5573 static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group) in __get_priority_stripe() argument
5574 __must_hold(&conf->device_lock) in __get_priority_stripe()
5579 bool second_try = !r5c_is_writeback(conf->log) && in __get_priority_stripe()
5580 !r5l_log_disk_error(conf); in __get_priority_stripe()
5581 bool try_loprio = test_bit(R5C_LOG_TIGHT, &conf->cache_state) || in __get_priority_stripe()
5582 r5l_log_disk_error(conf); in __get_priority_stripe()
5587 if (conf->worker_cnt_per_group == 0) { in __get_priority_stripe()
5588 handle_list = try_loprio ? &conf->loprio_list : in __get_priority_stripe()
5589 &conf->handle_list; in __get_priority_stripe()
5591 handle_list = try_loprio ? &conf->worker_groups[group].loprio_list : in __get_priority_stripe()
5592 &conf->worker_groups[group].handle_list; in __get_priority_stripe()
5593 wg = &conf->worker_groups[group]; in __get_priority_stripe()
5596 for (i = 0; i < conf->group_cnt; i++) { in __get_priority_stripe()
5597 handle_list = try_loprio ? &conf->worker_groups[i].loprio_list : in __get_priority_stripe()
5598 &conf->worker_groups[i].handle_list; in __get_priority_stripe()
5599 wg = &conf->worker_groups[i]; in __get_priority_stripe()
5608 list_empty(&conf->hold_list) ? "empty" : "busy", in __get_priority_stripe()
5609 atomic_read(&conf->pending_full_writes), conf->bypass_count); in __get_priority_stripe()
5614 if (list_empty(&conf->hold_list)) in __get_priority_stripe()
5615 conf->bypass_count = 0; in __get_priority_stripe()
5617 if (conf->hold_list.next == conf->last_hold) in __get_priority_stripe()
5618 conf->bypass_count++; in __get_priority_stripe()
5620 conf->last_hold = conf->hold_list.next; in __get_priority_stripe()
5621 conf->bypass_count -= conf->bypass_threshold; in __get_priority_stripe()
5622 if (conf->bypass_count < 0) in __get_priority_stripe()
5623 conf->bypass_count = 0; in __get_priority_stripe()
5626 } else if (!list_empty(&conf->hold_list) && in __get_priority_stripe()
5627 ((conf->bypass_threshold && in __get_priority_stripe()
5628 conf->bypass_count > conf->bypass_threshold) || in __get_priority_stripe()
5629 atomic_read(&conf->pending_full_writes) == 0)) { in __get_priority_stripe()
5631 list_for_each_entry(tmp, &conf->hold_list, lru) { in __get_priority_stripe()
5632 if (conf->worker_cnt_per_group == 0 || in __get_priority_stripe()
5642 conf->bypass_count -= conf->bypass_threshold; in __get_priority_stripe()
5643 if (conf->bypass_count < 0) in __get_priority_stripe()
5644 conf->bypass_count = 0; in __get_priority_stripe()
5678 struct r5conf *conf = mddev->private; in raid5_unplug() local
5683 spin_lock_irq(&conf->device_lock); in raid5_unplug()
5699 __release_stripe(conf, sh, &cb->temp_inactive_list[hash]); in raid5_unplug()
5702 spin_unlock_irq(&conf->device_lock); in raid5_unplug()
5704 release_inactive_stripe_list(conf, cb->temp_inactive_list, in raid5_unplug()
5741 struct r5conf *conf = mddev->private; in make_discard_request() local
5754 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)RAID5_STRIPE_SECTORS(conf)-1); in make_discard_request()
5759 stripe_sectors = conf->chunk_sectors * in make_discard_request()
5760 (conf->raid_disks - conf->max_degraded); in make_discard_request()
5765 logical_sector *= conf->chunk_sectors; in make_discard_request()
5766 last_sector *= conf->chunk_sectors; in make_discard_request()
5769 logical_sector += RAID5_STRIPE_SECTORS(conf)) { in make_discard_request()
5773 sh = raid5_get_active_stripe(conf, NULL, logical_sector, 0); in make_discard_request()
5774 prepare_to_wait(&conf->wait_for_overlap, &w, in make_discard_request()
5784 for (d = 0; d < conf->raid_disks; d++) { in make_discard_request()
5796 finish_wait(&conf->wait_for_overlap, &w); in make_discard_request()
5798 for (d = 0; d < conf->raid_disks; d++) { in make_discard_request()
5808 if (conf->mddev->bitmap) { in make_discard_request()
5809 sh->bm_seq = conf->seq_flush + 1; in make_discard_request()
5816 atomic_inc(&conf->preread_active_stripes); in make_discard_request()
5837 static bool stripe_ahead_of_reshape(struct mddev *mddev, struct r5conf *conf, in stripe_ahead_of_reshape() argument
5852 spin_lock_irq(&conf->device_lock); in stripe_ahead_of_reshape()
5855 conf->reshape_progress)) in stripe_ahead_of_reshape()
5859 spin_unlock_irq(&conf->device_lock); in stripe_ahead_of_reshape()
5864 static int add_all_stripe_bios(struct r5conf *conf, in add_all_stripe_bios() argument
5905 RAID5_STRIPE_SHIFT(conf), ctx->sectors_to_do); in add_all_stripe_bios()
5934 struct r5conf *conf, sector_t logical_sector) in get_reshape_loc() argument
5946 spin_lock_irq(&conf->device_lock); in get_reshape_loc()
5947 reshape_progress = conf->reshape_progress; in get_reshape_loc()
5948 reshape_safe = conf->reshape_safe; in get_reshape_loc()
5949 spin_unlock_irq(&conf->device_lock); in get_reshape_loc()
5962 struct r5conf *conf = mddev->private; in raid5_bitmap_sector() local
5971 sectors_per_chunk = conf->chunk_sectors * in raid5_bitmap_sector()
5972 (conf->raid_disks - conf->max_degraded); in raid5_bitmap_sector()
5976 start = raid5_compute_sector(conf, start, 0, &dd_idx, NULL); in raid5_bitmap_sector()
5977 end = raid5_compute_sector(conf, end, 0, &dd_idx, NULL); in raid5_bitmap_sector()
5983 loc = get_reshape_loc(mddev, conf, prev_start); in raid5_bitmap_sector()
5990 sectors_per_chunk = conf->prev_chunk_sectors * in raid5_bitmap_sector()
5991 (conf->previous_raid_disks - conf->max_degraded); in raid5_bitmap_sector()
5995 prev_start = raid5_compute_sector(conf, prev_start, 1, &dd_idx, NULL); in raid5_bitmap_sector()
5996 prev_end = raid5_compute_sector(conf, prev_end, 1, &dd_idx, NULL); in raid5_bitmap_sector()
6008 struct r5conf *conf, struct stripe_request_ctx *ctx, in make_stripe_request() argument
6018 seq = read_seqcount_begin(&conf->gen_lock); in make_stripe_request()
6020 if (unlikely(conf->reshape_progress != MaxSector)) { in make_stripe_request()
6021 enum reshape_loc loc = get_reshape_loc(mddev, conf, in make_stripe_request()
6031 new_sector = raid5_compute_sector(conf, logical_sector, previous, in make_stripe_request()
6040 sh = raid5_get_active_stripe(conf, ctx, new_sector, flags); in make_stripe_request()
6048 stripe_ahead_of_reshape(mddev, conf, sh)) { in make_stripe_request()
6061 if (read_seqcount_retry(&conf->gen_lock, seq)) { in make_stripe_request()
6068 !add_all_stripe_bios(conf, ctx, sh, bi, rw, previous)) { in make_stripe_request()
6079 stripe_add_to_batch_list(conf, sh, ctx->batch_last); in make_stripe_request()
6097 atomic_inc(&conf->preread_active_stripes); in make_stripe_request()
6120 static sector_t raid5_bio_lowest_chunk_sector(struct r5conf *conf, in raid5_bio_lowest_chunk_sector() argument
6123 int sectors_per_chunk = conf->chunk_sectors; in raid5_bio_lowest_chunk_sector()
6124 int raid_disks = conf->raid_disks; in raid5_bio_lowest_chunk_sector()
6128 sector_t r_sector = bi->bi_iter.bi_sector & ~((sector_t)RAID5_STRIPE_SECTORS(conf)-1); in raid5_bio_lowest_chunk_sector()
6132 sector = raid5_compute_sector(conf, r_sector, 0, &dd_idx, &sh); in raid5_bio_lowest_chunk_sector()
6151 struct r5conf *conf = mddev->private; in raid5_make_request() local
6159 int ret = log_handle_flush_request(conf, bi); in raid5_make_request()
6195 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)RAID5_STRIPE_SECTORS(conf)-1); in raid5_make_request()
6201 RAID5_STRIPE_SECTORS(conf)); in raid5_make_request()
6209 (conf->reshape_progress != MaxSector) && in raid5_make_request()
6210 get_reshape_loc(mddev, conf, logical_sector) == LOC_INSIDE_RESHAPE) { in raid5_make_request()
6225 if (likely(conf->reshape_progress == MaxSector)) in raid5_make_request()
6226 logical_sector = raid5_bio_lowest_chunk_sector(conf, bi); in raid5_make_request()
6227 s = (logical_sector - ctx.first_sector) >> RAID5_STRIPE_SHIFT(conf); in raid5_make_request()
6229 add_wait_queue(&conf->wait_for_overlap, &wait); in raid5_make_request()
6231 res = make_stripe_request(mddev, conf, &ctx, logical_sector, in raid5_make_request()
6262 (s << RAID5_STRIPE_SHIFT(conf)); in raid5_make_request()
6264 remove_wait_queue(&conf->wait_for_overlap, &wait); in raid5_make_request()
6288 struct r5conf *conf = mddev->private; in reshape_request() local
6292 int raid_disks = conf->previous_raid_disks; in reshape_request()
6293 int data_disks = raid_disks - conf->max_degraded; in reshape_request()
6294 int new_data_disks = conf->raid_disks - conf->max_degraded; in reshape_request()
6306 conf->reshape_progress < raid5_size(mddev, 0, 0)) { in reshape_request()
6308 - conf->reshape_progress; in reshape_request()
6310 conf->reshape_progress == MaxSector) { in reshape_request()
6314 conf->reshape_progress > 0) in reshape_request()
6315 sector_nr = conf->reshape_progress; in reshape_request()
6331 reshape_sectors = max(conf->chunk_sectors, conf->prev_chunk_sectors); in reshape_request()
6339 writepos = conf->reshape_progress; in reshape_request()
6341 readpos = conf->reshape_progress; in reshape_request()
6343 safepos = conf->reshape_safe; in reshape_request()
6366 if (WARN_ON(conf->reshape_progress == 0)) in reshape_request()
6401 if (conf->min_offset_diff < 0) { in reshape_request()
6402 safepos += -conf->min_offset_diff; in reshape_request()
6403 readpos += -conf->min_offset_diff; in reshape_request()
6405 writepos += conf->min_offset_diff; in reshape_request()
6410 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { in reshape_request()
6412 wait_event(conf->wait_for_overlap, in reshape_request()
6413 atomic_read(&conf->reshape_stripes)==0 in reshape_request()
6415 if (atomic_read(&conf->reshape_stripes) != 0) in reshape_request()
6417 mddev->reshape_position = conf->reshape_progress; in reshape_request()
6428 conf->reshape_checkpoint = jiffies; in reshape_request()
6435 spin_lock_irq(&conf->device_lock); in reshape_request()
6436 conf->reshape_safe = mddev->reshape_position; in reshape_request()
6437 spin_unlock_irq(&conf->device_lock); in reshape_request()
6438 wake_up(&conf->wait_for_overlap); in reshape_request()
6443 for (i = 0; i < reshape_sectors; i += RAID5_STRIPE_SECTORS(conf)) { in reshape_request()
6446 sh = raid5_get_active_stripe(conf, NULL, stripe_addr+i, in reshape_request()
6449 atomic_inc(&conf->reshape_stripes); in reshape_request()
6457 if (conf->level == 6 && in reshape_request()
6465 memset(page_address(sh->dev[j].page), 0, RAID5_STRIPE_SIZE(conf)); in reshape_request()
6475 spin_lock_irq(&conf->device_lock); in reshape_request()
6477 conf->reshape_progress -= reshape_sectors * new_data_disks; in reshape_request()
6479 conf->reshape_progress += reshape_sectors * new_data_disks; in reshape_request()
6480 spin_unlock_irq(&conf->device_lock); in reshape_request()
6487 raid5_compute_sector(conf, stripe_addr*(new_data_disks), in reshape_request()
6490 raid5_compute_sector(conf, ((stripe_addr+reshape_sectors) in reshape_request()
6496 sh = raid5_get_active_stripe(conf, NULL, first_sector, in reshape_request()
6501 first_sector += RAID5_STRIPE_SECTORS(conf); in reshape_request()
6521 wait_event(conf->wait_for_overlap, in reshape_request()
6522 atomic_read(&conf->reshape_stripes) == 0 in reshape_request()
6524 if (atomic_read(&conf->reshape_stripes) != 0) in reshape_request()
6526 mddev->reshape_position = conf->reshape_progress; in reshape_request()
6536 conf->reshape_checkpoint = jiffies; in reshape_request()
6544 spin_lock_irq(&conf->device_lock); in reshape_request()
6545 conf->reshape_safe = mddev->reshape_position; in reshape_request()
6546 spin_unlock_irq(&conf->device_lock); in reshape_request()
6547 wake_up(&conf->wait_for_overlap); in reshape_request()
6557 struct r5conf *conf = mddev->private; in raid5_sync_request() local
6568 end_reshape(conf); in raid5_sync_request()
6576 conf->fullsync = 0; in raid5_sync_request()
6583 wait_event(conf->wait_for_overlap, conf->quiesce != 2); in raid5_sync_request()
6598 if (mddev->degraded >= conf->max_degraded && in raid5_sync_request()
6605 !conf->fullsync && in raid5_sync_request()
6607 sync_blocks >= RAID5_STRIPE_SECTORS(conf)) { in raid5_sync_request()
6609 do_div(sync_blocks, RAID5_STRIPE_SECTORS(conf)); in raid5_sync_request()
6612 return sync_blocks * RAID5_STRIPE_SECTORS(conf); in raid5_sync_request()
6617 sh = raid5_get_active_stripe(conf, NULL, sector_nr, in raid5_sync_request()
6620 sh = raid5_get_active_stripe(conf, NULL, sector_nr, 0); in raid5_sync_request()
6631 for (i = 0; i < conf->raid_disks; i++) { in raid5_sync_request()
6632 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); in raid5_sync_request()
6646 return RAID5_STRIPE_SECTORS(conf); in raid5_sync_request()
6649 static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio, in retry_aligned_read() argument
6669 ~((sector_t)RAID5_STRIPE_SECTORS(conf)-1); in retry_aligned_read()
6670 sector = raid5_compute_sector(conf, logical_sector, in retry_aligned_read()
6675 logical_sector += RAID5_STRIPE_SECTORS(conf), in retry_aligned_read()
6676 sector += RAID5_STRIPE_SECTORS(conf), in retry_aligned_read()
6683 sh = raid5_get_active_stripe(conf, NULL, sector, in retry_aligned_read()
6687 conf->retry_read_aligned = raid_bio; in retry_aligned_read()
6688 conf->retry_read_offset = scnt; in retry_aligned_read()
6694 conf->retry_read_aligned = raid_bio; in retry_aligned_read()
6695 conf->retry_read_offset = scnt; in retry_aligned_read()
6707 if (atomic_dec_and_test(&conf->active_aligned_reads)) in retry_aligned_read()
6708 wake_up(&conf->wait_for_quiescent); in retry_aligned_read()
6712 static int handle_active_stripes(struct r5conf *conf, int group, in handle_active_stripes() argument
6715 __must_hold(&conf->device_lock) in handle_active_stripes()
6722 (sh = __get_priority_stripe(conf, group)) != NULL) in handle_active_stripes()
6730 spin_unlock_irq(&conf->device_lock); in handle_active_stripes()
6731 log_flush_stripe_to_raid(conf); in handle_active_stripes()
6732 spin_lock_irq(&conf->device_lock); in handle_active_stripes()
6737 spin_unlock_irq(&conf->device_lock); in handle_active_stripes()
6739 release_inactive_stripe_list(conf, temp_inactive_list, in handle_active_stripes()
6742 r5l_flush_stripe_to_raid(conf->log); in handle_active_stripes()
6744 spin_lock_irq(&conf->device_lock); in handle_active_stripes()
6750 log_write_stripe_run(conf); in handle_active_stripes()
6754 spin_lock_irq(&conf->device_lock); in handle_active_stripes()
6757 __release_stripe(conf, batch[i], &temp_inactive_list[hash]); in handle_active_stripes()
6766 struct r5conf *conf = group->conf; in raid5_do_work() local
6767 struct mddev *mddev = conf->mddev; in raid5_do_work()
6768 int group_id = group - conf->worker_groups; in raid5_do_work()
6776 spin_lock_irq(&conf->device_lock); in raid5_do_work()
6780 released = release_stripe_list(conf, worker->temp_inactive_list); in raid5_do_work()
6782 batch_size = handle_active_stripes(conf, group_id, worker, in raid5_do_work()
6790 conf->device_lock); in raid5_do_work()
6794 spin_unlock_irq(&conf->device_lock); in raid5_do_work()
6796 flush_deferred_bios(conf); in raid5_do_work()
6798 r5l_flush_stripe_to_raid(conf->log); in raid5_do_work()
6816 struct r5conf *conf = mddev->private; in raid5d() local
6826 spin_lock_irq(&conf->device_lock); in raid5d()
6835 released = release_stripe_list(conf, conf->temp_inactive_list); in raid5d()
6837 clear_bit(R5_DID_ALLOC, &conf->cache_state); in raid5d()
6840 !list_empty(&conf->bitmap_list)) { in raid5d()
6842 conf->seq_flush++; in raid5d()
6843 spin_unlock_irq(&conf->device_lock); in raid5d()
6845 spin_lock_irq(&conf->device_lock); in raid5d()
6846 conf->seq_write = conf->seq_flush; in raid5d()
6847 activate_bit_delay(conf, conf->temp_inactive_list); in raid5d()
6849 raid5_activate_delayed(conf); in raid5d()
6851 while ((bio = remove_bio_from_retry(conf, &offset))) { in raid5d()
6853 spin_unlock_irq(&conf->device_lock); in raid5d()
6854 ok = retry_aligned_read(conf, bio, offset); in raid5d()
6855 spin_lock_irq(&conf->device_lock); in raid5d()
6861 batch_size = handle_active_stripes(conf, ANY_GROUP, NULL, in raid5d()
6862 conf->temp_inactive_list); in raid5d()
6868 spin_unlock_irq(&conf->device_lock); in raid5d()
6870 spin_lock_irq(&conf->device_lock); in raid5d()
6875 spin_unlock_irq(&conf->device_lock); in raid5d()
6876 if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state) && in raid5d()
6877 mutex_trylock(&conf->cache_size_mutex)) { in raid5d()
6878 grow_one_stripe(conf, __GFP_NOWARN); in raid5d()
6882 set_bit(R5_DID_ALLOC, &conf->cache_state); in raid5d()
6883 mutex_unlock(&conf->cache_size_mutex); in raid5d()
6886 flush_deferred_bios(conf); in raid5d()
6888 r5l_flush_stripe_to_raid(conf->log); in raid5d()
6899 struct r5conf *conf; in raid5_show_stripe_cache_size() local
6902 conf = mddev->private; in raid5_show_stripe_cache_size()
6903 if (conf) in raid5_show_stripe_cache_size()
6904 ret = sprintf(page, "%d\n", conf->min_nr_stripes); in raid5_show_stripe_cache_size()
6913 struct r5conf *conf = mddev->private; in raid5_set_cache_size() local
6918 WRITE_ONCE(conf->min_nr_stripes, size); in raid5_set_cache_size()
6919 mutex_lock(&conf->cache_size_mutex); in raid5_set_cache_size()
6920 while (size < conf->max_nr_stripes && in raid5_set_cache_size()
6921 drop_one_stripe(conf)) in raid5_set_cache_size()
6923 mutex_unlock(&conf->cache_size_mutex); in raid5_set_cache_size()
6927 mutex_lock(&conf->cache_size_mutex); in raid5_set_cache_size()
6928 while (size > conf->max_nr_stripes) in raid5_set_cache_size()
6929 if (!grow_one_stripe(conf, GFP_KERNEL)) { in raid5_set_cache_size()
6930 WRITE_ONCE(conf->min_nr_stripes, conf->max_nr_stripes); in raid5_set_cache_size()
6934 mutex_unlock(&conf->cache_size_mutex); in raid5_set_cache_size()
6943 struct r5conf *conf; in raid5_store_stripe_cache_size() local
6954 conf = mddev->private; in raid5_store_stripe_cache_size()
6955 if (!conf) in raid5_store_stripe_cache_size()
6972 struct r5conf *conf = mddev->private; in raid5_show_rmw_level() local
6973 if (conf) in raid5_show_rmw_level()
6974 return sprintf(page, "%d\n", conf->rmw_level); in raid5_show_rmw_level()
6982 struct r5conf *conf = mddev->private; in raid5_store_rmw_level() local
6985 if (!conf) in raid5_store_rmw_level()
7002 conf->rmw_level = new; in raid5_store_rmw_level()
7014 struct r5conf *conf; in raid5_show_stripe_size() local
7018 conf = mddev->private; in raid5_show_stripe_size()
7019 if (conf) in raid5_show_stripe_size()
7020 ret = sprintf(page, "%lu\n", RAID5_STRIPE_SIZE(conf)); in raid5_show_stripe_size()
7029 struct r5conf *conf; in raid5_store_stripe_size() local
7053 conf = mddev->private; in raid5_store_stripe_size()
7054 if (!conf) { in raid5_store_stripe_size()
7059 if (new == conf->stripe_size) in raid5_store_stripe_size()
7063 conf->stripe_size, new); in raid5_store_stripe_size()
7074 mutex_lock(&conf->cache_size_mutex); in raid5_store_stripe_size()
7075 size = conf->max_nr_stripes; in raid5_store_stripe_size()
7077 shrink_stripes(conf); in raid5_store_stripe_size()
7079 conf->stripe_size = new; in raid5_store_stripe_size()
7080 conf->stripe_shift = ilog2(new) - 9; in raid5_store_stripe_size()
7081 conf->stripe_sectors = new >> 9; in raid5_store_stripe_size()
7082 if (grow_stripes(conf, size)) { in raid5_store_stripe_size()
7087 mutex_unlock(&conf->cache_size_mutex); in raid5_store_stripe_size()
7109 struct r5conf *conf; in raid5_show_preread_threshold() local
7112 conf = mddev->private; in raid5_show_preread_threshold()
7113 if (conf) in raid5_show_preread_threshold()
7114 ret = sprintf(page, "%d\n", conf->bypass_threshold); in raid5_show_preread_threshold()
7122 struct r5conf *conf; in raid5_store_preread_threshold() local
7134 conf = mddev->private; in raid5_store_preread_threshold()
7135 if (!conf) in raid5_store_preread_threshold()
7137 else if (new > conf->min_nr_stripes) in raid5_store_preread_threshold()
7140 conf->bypass_threshold = new; in raid5_store_preread_threshold()
7154 struct r5conf *conf; in raid5_show_skip_copy() local
7157 conf = mddev->private; in raid5_show_skip_copy()
7158 if (conf) in raid5_show_skip_copy()
7159 ret = sprintf(page, "%d\n", conf->skip_copy); in raid5_show_skip_copy()
7167 struct r5conf *conf; in raid5_store_skip_copy() local
7180 conf = mddev->private; in raid5_store_skip_copy()
7181 if (!conf) in raid5_store_skip_copy()
7183 else if (new != conf->skip_copy) { in raid5_store_skip_copy()
7187 conf->skip_copy = new; in raid5_store_skip_copy()
7206 struct r5conf *conf = mddev->private; in stripe_cache_active_show() local
7207 if (conf) in stripe_cache_active_show()
7208 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); in stripe_cache_active_show()
7219 struct r5conf *conf; in raid5_show_group_thread_cnt() local
7222 conf = mddev->private; in raid5_show_group_thread_cnt()
7223 if (conf) in raid5_show_group_thread_cnt()
7224 ret = sprintf(page, "%d\n", conf->worker_cnt_per_group); in raid5_show_group_thread_cnt()
7229 static int alloc_thread_groups(struct r5conf *conf, int cnt,
7235 struct r5conf *conf; in raid5_store_group_thread_cnt() local
7252 conf = mddev->private; in raid5_store_group_thread_cnt()
7253 if (!conf) in raid5_store_group_thread_cnt()
7255 else if (new != conf->worker_cnt_per_group) { in raid5_store_group_thread_cnt()
7258 old_groups = conf->worker_groups; in raid5_store_group_thread_cnt()
7262 err = alloc_thread_groups(conf, new, &group_cnt, &new_groups); in raid5_store_group_thread_cnt()
7264 spin_lock_irq(&conf->device_lock); in raid5_store_group_thread_cnt()
7265 conf->group_cnt = group_cnt; in raid5_store_group_thread_cnt()
7266 conf->worker_cnt_per_group = new; in raid5_store_group_thread_cnt()
7267 conf->worker_groups = new_groups; in raid5_store_group_thread_cnt()
7268 spin_unlock_irq(&conf->device_lock); in raid5_store_group_thread_cnt()
7303 static int alloc_thread_groups(struct r5conf *conf, int cnt, int *group_cnt, in alloc_thread_groups() argument
7332 group->conf = conf; in alloc_thread_groups()
7348 static void free_thread_groups(struct r5conf *conf) in free_thread_groups() argument
7350 if (conf->worker_groups) in free_thread_groups()
7351 kfree(conf->worker_groups[0].workers); in free_thread_groups()
7352 kfree(conf->worker_groups); in free_thread_groups()
7353 conf->worker_groups = NULL; in free_thread_groups()
7359 struct r5conf *conf = mddev->private; in raid5_size() local
7365 raid_disks = min(conf->raid_disks, conf->previous_raid_disks); in raid5_size()
7367 sectors &= ~((sector_t)conf->chunk_sectors - 1); in raid5_size()
7368 sectors &= ~((sector_t)conf->prev_chunk_sectors - 1); in raid5_size()
7369 return sectors * (raid_disks - conf->max_degraded); in raid5_size()
7372 static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) in free_scratch_buffer() argument
7380 static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) in alloc_scratch_buffer() argument
7382 if (conf->level == 6 && !percpu->spare_page) { in alloc_scratch_buffer()
7389 max(conf->raid_disks, in alloc_scratch_buffer()
7390 conf->previous_raid_disks), in alloc_scratch_buffer()
7391 max(conf->chunk_sectors, in alloc_scratch_buffer()
7392 conf->prev_chunk_sectors) in alloc_scratch_buffer()
7393 / RAID5_STRIPE_SECTORS(conf))) { in alloc_scratch_buffer()
7394 free_scratch_buffer(conf, percpu); in alloc_scratch_buffer()
7404 struct r5conf *conf = hlist_entry_safe(node, struct r5conf, node); in raid456_cpu_dead() local
7406 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); in raid456_cpu_dead()
7410 static void raid5_free_percpu(struct r5conf *conf) in raid5_free_percpu() argument
7412 if (!conf->percpu) in raid5_free_percpu()
7415 cpuhp_state_remove_instance(CPUHP_MD_RAID5_PREPARE, &conf->node); in raid5_free_percpu()
7416 free_percpu(conf->percpu); in raid5_free_percpu()
7419 static void free_conf(struct r5conf *conf) in free_conf() argument
7423 log_exit(conf); in free_conf()
7425 unregister_shrinker(&conf->shrinker); in free_conf()
7426 free_thread_groups(conf); in free_conf()
7427 shrink_stripes(conf); in free_conf()
7428 raid5_free_percpu(conf); in free_conf()
7429 for (i = 0; i < conf->pool_size; i++) in free_conf()
7430 if (conf->disks[i].extra_page) in free_conf()
7431 put_page(conf->disks[i].extra_page); in free_conf()
7432 kfree(conf->disks); in free_conf()
7433 bioset_exit(&conf->bio_split); in free_conf()
7434 kfree(conf->stripe_hashtbl); in free_conf()
7435 kfree(conf->pending_data); in free_conf()
7436 kfree(conf); in free_conf()
7441 struct r5conf *conf = hlist_entry_safe(node, struct r5conf, node); in raid456_cpu_up_prepare() local
7442 struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu); in raid456_cpu_up_prepare()
7444 if (alloc_scratch_buffer(conf, percpu)) { in raid456_cpu_up_prepare()
7452 static int raid5_alloc_percpu(struct r5conf *conf) in raid5_alloc_percpu() argument
7456 conf->percpu = alloc_percpu(struct raid5_percpu); in raid5_alloc_percpu()
7457 if (!conf->percpu) in raid5_alloc_percpu()
7460 err = cpuhp_state_add_instance(CPUHP_MD_RAID5_PREPARE, &conf->node); in raid5_alloc_percpu()
7462 conf->scribble_disks = max(conf->raid_disks, in raid5_alloc_percpu()
7463 conf->previous_raid_disks); in raid5_alloc_percpu()
7464 conf->scribble_sectors = max(conf->chunk_sectors, in raid5_alloc_percpu()
7465 conf->prev_chunk_sectors); in raid5_alloc_percpu()
7473 struct r5conf *conf = container_of(shrink, struct r5conf, shrinker); in raid5_cache_scan() local
7476 if (mutex_trylock(&conf->cache_size_mutex)) { in raid5_cache_scan()
7479 conf->max_nr_stripes > conf->min_nr_stripes) { in raid5_cache_scan()
7480 if (drop_one_stripe(conf) == 0) { in raid5_cache_scan()
7486 mutex_unlock(&conf->cache_size_mutex); in raid5_cache_scan()
7494 struct r5conf *conf = container_of(shrink, struct r5conf, shrinker); in raid5_cache_count() local
7495 int max_stripes = READ_ONCE(conf->max_nr_stripes); in raid5_cache_count()
7496 int min_stripes = READ_ONCE(conf->min_nr_stripes); in raid5_cache_count()
7506 struct r5conf *conf; in setup_conf() local
7545 conf = kzalloc(sizeof(struct r5conf), GFP_KERNEL); in setup_conf()
7546 if (conf == NULL) in setup_conf()
7550 conf->stripe_size = DEFAULT_STRIPE_SIZE; in setup_conf()
7551 conf->stripe_shift = ilog2(DEFAULT_STRIPE_SIZE) - 9; in setup_conf()
7552 conf->stripe_sectors = DEFAULT_STRIPE_SIZE >> 9; in setup_conf()
7554 INIT_LIST_HEAD(&conf->free_list); in setup_conf()
7555 INIT_LIST_HEAD(&conf->pending_list); in setup_conf()
7556 conf->pending_data = kcalloc(PENDING_IO_MAX, in setup_conf()
7559 if (!conf->pending_data) in setup_conf()
7562 list_add(&conf->pending_data[i].sibling, &conf->free_list); in setup_conf()
7564 if (!alloc_thread_groups(conf, 0, &group_cnt, &new_group)) { in setup_conf()
7565 conf->group_cnt = group_cnt; in setup_conf()
7566 conf->worker_cnt_per_group = 0; in setup_conf()
7567 conf->worker_groups = new_group; in setup_conf()
7570 spin_lock_init(&conf->device_lock); in setup_conf()
7571 seqcount_spinlock_init(&conf->gen_lock, &conf->device_lock); in setup_conf()
7572 mutex_init(&conf->cache_size_mutex); in setup_conf()
7574 init_waitqueue_head(&conf->wait_for_quiescent); in setup_conf()
7575 init_waitqueue_head(&conf->wait_for_stripe); in setup_conf()
7576 init_waitqueue_head(&conf->wait_for_overlap); in setup_conf()
7577 INIT_LIST_HEAD(&conf->handle_list); in setup_conf()
7578 INIT_LIST_HEAD(&conf->loprio_list); in setup_conf()
7579 INIT_LIST_HEAD(&conf->hold_list); in setup_conf()
7580 INIT_LIST_HEAD(&conf->delayed_list); in setup_conf()
7581 INIT_LIST_HEAD(&conf->bitmap_list); in setup_conf()
7582 init_llist_head(&conf->released_stripes); in setup_conf()
7583 atomic_set(&conf->active_stripes, 0); in setup_conf()
7584 atomic_set(&conf->preread_active_stripes, 0); in setup_conf()
7585 atomic_set(&conf->active_aligned_reads, 0); in setup_conf()
7586 spin_lock_init(&conf->pending_bios_lock); in setup_conf()
7587 conf->batch_bio_dispatch = true; in setup_conf()
7592 conf->batch_bio_dispatch = false; in setup_conf()
7597 conf->bypass_threshold = BYPASS_THRESHOLD; in setup_conf()
7598 conf->recovery_disabled = mddev->recovery_disabled - 1; in setup_conf()
7600 conf->raid_disks = mddev->raid_disks; in setup_conf()
7602 conf->previous_raid_disks = mddev->raid_disks; in setup_conf()
7604 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; in setup_conf()
7605 max_disks = max(conf->raid_disks, conf->previous_raid_disks); in setup_conf()
7607 conf->disks = kcalloc(max_disks, sizeof(struct disk_info), in setup_conf()
7610 if (!conf->disks) in setup_conf()
7614 conf->disks[i].extra_page = alloc_page(GFP_KERNEL); in setup_conf()
7615 if (!conf->disks[i].extra_page) in setup_conf()
7619 ret = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0); in setup_conf()
7622 conf->mddev = mddev; in setup_conf()
7625 conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL); in setup_conf()
7626 if (!conf->stripe_hashtbl) in setup_conf()
7634 spin_lock_init(conf->hash_locks); in setup_conf()
7636 spin_lock_init(conf->hash_locks + i); in setup_conf()
7639 INIT_LIST_HEAD(conf->inactive_list + i); in setup_conf()
7642 INIT_LIST_HEAD(conf->temp_inactive_list + i); in setup_conf()
7644 atomic_set(&conf->r5c_cached_full_stripes, 0); in setup_conf()
7645 INIT_LIST_HEAD(&conf->r5c_full_stripe_list); in setup_conf()
7646 atomic_set(&conf->r5c_cached_partial_stripes, 0); in setup_conf()
7647 INIT_LIST_HEAD(&conf->r5c_partial_stripe_list); in setup_conf()
7648 atomic_set(&conf->r5c_flushing_full_stripes, 0); in setup_conf()
7649 atomic_set(&conf->r5c_flushing_partial_stripes, 0); in setup_conf()
7651 conf->level = mddev->new_level; in setup_conf()
7652 conf->chunk_sectors = mddev->new_chunk_sectors; in setup_conf()
7653 ret = raid5_alloc_percpu(conf); in setup_conf()
7665 disk = conf->disks + raid_disk; in setup_conf()
7682 conf->fullsync = 1; in setup_conf()
7685 conf->level = mddev->new_level; in setup_conf()
7686 if (conf->level == 6) { in setup_conf()
7687 conf->max_degraded = 2; in setup_conf()
7689 conf->rmw_level = PARITY_ENABLE_RMW; in setup_conf()
7691 conf->rmw_level = PARITY_DISABLE_RMW; in setup_conf()
7693 conf->max_degraded = 1; in setup_conf()
7694 conf->rmw_level = PARITY_ENABLE_RMW; in setup_conf()
7696 conf->algorithm = mddev->new_layout; in setup_conf()
7697 conf->reshape_progress = mddev->reshape_position; in setup_conf()
7698 if (conf->reshape_progress != MaxSector) { in setup_conf()
7699 conf->prev_chunk_sectors = mddev->chunk_sectors; in setup_conf()
7700 conf->prev_algo = mddev->layout; in setup_conf()
7702 conf->prev_chunk_sectors = conf->chunk_sectors; in setup_conf()
7703 conf->prev_algo = conf->algorithm; in setup_conf()
7706 conf->min_nr_stripes = NR_STRIPES; in setup_conf()
7709 ((mddev->chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4, in setup_conf()
7710 ((mddev->new_chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4); in setup_conf()
7711 conf->min_nr_stripes = max(NR_STRIPES, stripes); in setup_conf()
7712 if (conf->min_nr_stripes != NR_STRIPES) in setup_conf()
7714 mdname(mddev), conf->min_nr_stripes); in setup_conf()
7716 memory = conf->min_nr_stripes * (sizeof(struct stripe_head) + in setup_conf()
7718 atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS); in setup_conf()
7719 if (grow_stripes(conf, conf->min_nr_stripes)) { in setup_conf()
7731 conf->shrinker.seeks = DEFAULT_SEEKS * conf->raid_disks * 4; in setup_conf()
7732 conf->shrinker.scan_objects = raid5_cache_scan; in setup_conf()
7733 conf->shrinker.count_objects = raid5_cache_count; in setup_conf()
7734 conf->shrinker.batch = 128; in setup_conf()
7735 conf->shrinker.flags = 0; in setup_conf()
7736 ret = register_shrinker(&conf->shrinker, "md-raid5:%s", mdname(mddev)); in setup_conf()
7744 rcu_assign_pointer(conf->thread, in setup_conf()
7746 if (!conf->thread) { in setup_conf()
7753 return conf; in setup_conf()
7756 if (conf) in setup_conf()
7757 free_conf(conf); in setup_conf()
7787 static void raid5_set_io_opt(struct r5conf *conf) in raid5_set_io_opt() argument
7789 blk_queue_io_opt(conf->mddev->queue, (conf->chunk_sectors << 9) * in raid5_set_io_opt()
7790 (conf->raid_disks - conf->max_degraded)); in raid5_set_io_opt()
7795 struct r5conf *conf; in raid5_run() local
7935 conf = setup_conf(mddev); in raid5_run()
7937 conf = mddev->private; in raid5_run()
7939 if (IS_ERR(conf)) in raid5_run()
7940 return PTR_ERR(conf); in raid5_run()
7952 conf->min_offset_diff = min_offset_diff; in raid5_run()
7953 rcu_assign_pointer(mddev->thread, conf->thread); in raid5_run()
7954 rcu_assign_pointer(conf->thread, NULL); in raid5_run()
7955 mddev->private = conf; in raid5_run()
7957 for (i = 0; i < conf->raid_disks && conf->previous_raid_disks; in raid5_run()
7959 rdev = rdev_mdlock_deref(mddev, conf->disks[i].rdev); in raid5_run()
7960 if (!rdev && conf->disks[i].replacement) { in raid5_run()
7963 conf->disks[i].replacement); in raid5_run()
7964 conf->disks[i].replacement = NULL; in raid5_run()
7966 rcu_assign_pointer(conf->disks[i].rdev, rdev); in raid5_run()
7970 if (rcu_access_pointer(conf->disks[i].replacement) && in raid5_run()
7971 conf->reshape_progress != MaxSector) { in raid5_run()
7994 conf->algorithm, in raid5_run()
7995 conf->raid_disks, in raid5_run()
7996 conf->max_degraded)) in raid5_run()
8000 conf->prev_algo, in raid5_run()
8001 conf->previous_raid_disks, in raid5_run()
8002 conf->max_degraded)) in raid5_run()
8010 mddev->degraded = raid5_calc_degraded(conf); in raid5_run()
8012 if (has_failed(conf)) { in raid5_run()
8014 mdname(mddev), mddev->degraded, conf->raid_disks); in raid5_run()
8038 mdname(mddev), conf->level, in raid5_run()
8042 print_raid5_conf(conf); in raid5_run()
8044 if (conf->reshape_progress != MaxSector) { in raid5_run()
8045 conf->reshape_safe = conf->reshape_progress; in raid5_run()
8046 atomic_set(&conf->reshape_stripes, 0); in raid5_run()
8072 int data_disks = conf->previous_raid_disks - conf->max_degraded; in raid5_run()
8078 raid5_set_io_opt(conf); in raid5_run()
8122 RAID5_MAX_REQ_STRIPES << RAID5_STRIPE_SHIFT(conf)); in raid5_run()
8128 if (log_init(conf, journal_dev, raid5_has_ppl(conf))) in raid5_run()
8134 print_raid5_conf(conf); in raid5_run()
8135 free_conf(conf); in raid5_run()
8143 struct r5conf *conf = priv; in raid5_free() local
8145 free_conf(conf); in raid5_free()
8151 struct r5conf *conf = mddev->private; in raid5_status() local
8155 conf->chunk_sectors / 2, mddev->layout); in raid5_status()
8156 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); in raid5_status()
8158 for (i = 0; i < conf->raid_disks; i++) { in raid5_status()
8159 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); in raid5_status()
8166 static void print_raid5_conf (struct r5conf *conf) in print_raid5_conf() argument
8171 pr_debug("RAID conf printout:\n"); in print_raid5_conf()
8172 if (!conf) { in print_raid5_conf()
8173 pr_debug("(conf==NULL)\n"); in print_raid5_conf()
8176 pr_debug(" --- level:%d rd:%d wd:%d\n", conf->level, in print_raid5_conf()
8177 conf->raid_disks, in print_raid5_conf()
8178 conf->raid_disks - conf->mddev->degraded); in print_raid5_conf()
8181 for (i = 0; i < conf->raid_disks; i++) { in print_raid5_conf()
8182 rdev = rcu_dereference(conf->disks[i].rdev); in print_raid5_conf()
8194 struct r5conf *conf = mddev->private; in raid5_spare_active() local
8199 for (i = 0; i < conf->raid_disks; i++) { in raid5_spare_active()
8200 rdev = rdev_mdlock_deref(mddev, conf->disks[i].rdev); in raid5_spare_active()
8202 conf->disks[i].replacement); in raid5_spare_active()
8229 spin_lock_irqsave(&conf->device_lock, flags); in raid5_spare_active()
8230 mddev->degraded = raid5_calc_degraded(conf); in raid5_spare_active()
8231 spin_unlock_irqrestore(&conf->device_lock, flags); in raid5_spare_active()
8232 print_raid5_conf(conf); in raid5_spare_active()
8238 struct r5conf *conf = mddev->private; in raid5_remove_disk() local
8245 print_raid5_conf(conf); in raid5_remove_disk()
8246 if (test_bit(Journal, &rdev->flags) && conf->log) { in raid5_remove_disk()
8253 if (atomic_read(&conf->active_stripes) || in raid5_remove_disk()
8254 atomic_read(&conf->r5c_cached_full_stripes) || in raid5_remove_disk()
8255 atomic_read(&conf->r5c_cached_partial_stripes)) { in raid5_remove_disk()
8258 log_exit(conf); in raid5_remove_disk()
8261 if (unlikely(number >= conf->pool_size)) in raid5_remove_disk()
8263 p = conf->disks + number; in raid5_remove_disk()
8271 if (number >= conf->raid_disks && in raid5_remove_disk()
8272 conf->reshape_progress == MaxSector) in raid5_remove_disk()
8284 mddev->recovery_disabled != conf->recovery_disabled && in raid5_remove_disk()
8285 !has_failed(conf) && in raid5_remove_disk()
8288 number < conf->raid_disks) { in raid5_remove_disk()
8303 err = log_modify(conf, rdev, false); in raid5_remove_disk()
8319 err = log_modify(conf, tmp, true); in raid5_remove_disk()
8325 print_raid5_conf(conf); in raid5_remove_disk()
8331 struct r5conf *conf = mddev->private; in raid5_add_disk() local
8337 int last = conf->raid_disks - 1; in raid5_add_disk()
8340 if (conf->log) in raid5_add_disk()
8348 ret = log_init(conf, rdev, false); in raid5_add_disk()
8352 ret = r5l_start(conf->log); in raid5_add_disk()
8358 if (mddev->recovery_disabled == conf->recovery_disabled) in raid5_add_disk()
8361 if (rdev->saved_raid_disk < 0 && has_failed(conf)) in raid5_add_disk()
8374 conf->disks[rdev->saved_raid_disk].rdev == NULL) in raid5_add_disk()
8378 p = conf->disks + disk; in raid5_add_disk()
8383 conf->fullsync = 1; in raid5_add_disk()
8386 err = log_modify(conf, rdev, true); in raid5_add_disk()
8392 p = conf->disks + disk; in raid5_add_disk()
8401 conf->fullsync = 1; in raid5_add_disk()
8407 print_raid5_conf(conf); in raid5_add_disk()
8421 struct r5conf *conf = mddev->private; in raid5_resize() local
8423 if (raid5_has_log(conf) || raid5_has_ppl(conf)) in raid5_resize()
8425 sectors &= ~((sector_t)conf->chunk_sectors - 1); in raid5_resize()
8456 struct r5conf *conf = mddev->private; in check_stripe_cache() local
8457 if (((mddev->chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4 in check_stripe_cache()
8458 > conf->min_nr_stripes || in check_stripe_cache()
8459 ((mddev->new_chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4 in check_stripe_cache()
8460 > conf->min_nr_stripes) { in check_stripe_cache()
8464 / RAID5_STRIPE_SIZE(conf))*4); in check_stripe_cache()
8472 struct r5conf *conf = mddev->private; in check_reshape() local
8474 if (raid5_has_log(conf) || raid5_has_ppl(conf)) in check_reshape()
8480 if (has_failed(conf)) in check_reshape()
8500 if (resize_chunks(conf, in check_reshape()
8501 conf->previous_raid_disks in check_reshape()
8508 if (conf->previous_raid_disks + mddev->delta_disks <= conf->pool_size) in check_reshape()
8510 return resize_stripes(conf, (conf->previous_raid_disks in check_reshape()
8516 struct r5conf *conf = mddev->private; in raid5_start_reshape() local
8528 if (has_failed(conf)) in raid5_start_reshape()
8534 for (i = 0; i < conf->raid_disks; i++) in raid5_start_reshape()
8535 if (rdev_mdlock_deref(mddev, conf->disks[i].replacement)) in raid5_start_reshape()
8544 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) in raid5_start_reshape()
8554 if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks) in raid5_start_reshape()
8561 atomic_set(&conf->reshape_stripes, 0); in raid5_start_reshape()
8562 spin_lock_irq(&conf->device_lock); in raid5_start_reshape()
8563 write_seqcount_begin(&conf->gen_lock); in raid5_start_reshape()
8564 conf->previous_raid_disks = conf->raid_disks; in raid5_start_reshape()
8565 conf->raid_disks += mddev->delta_disks; in raid5_start_reshape()
8566 conf->prev_chunk_sectors = conf->chunk_sectors; in raid5_start_reshape()
8567 conf->chunk_sectors = mddev->new_chunk_sectors; in raid5_start_reshape()
8568 conf->prev_algo = conf->algorithm; in raid5_start_reshape()
8569 conf->algorithm = mddev->new_layout; in raid5_start_reshape()
8570 conf->generation++; in raid5_start_reshape()
8576 conf->reshape_progress = raid5_size(mddev, 0, 0); in raid5_start_reshape()
8578 conf->reshape_progress = 0; in raid5_start_reshape()
8579 conf->reshape_safe = conf->reshape_progress; in raid5_start_reshape()
8580 write_seqcount_end(&conf->gen_lock); in raid5_start_reshape()
8581 spin_unlock_irq(&conf->device_lock); in raid5_start_reshape()
8603 >= conf->previous_raid_disks) in raid5_start_reshape()
8611 } else if (rdev->raid_disk >= conf->previous_raid_disks in raid5_start_reshape()
8621 spin_lock_irqsave(&conf->device_lock, flags); in raid5_start_reshape()
8622 mddev->degraded = raid5_calc_degraded(conf); in raid5_start_reshape()
8623 spin_unlock_irqrestore(&conf->device_lock, flags); in raid5_start_reshape()
8625 mddev->raid_disks = conf->raid_disks; in raid5_start_reshape()
8626 mddev->reshape_position = conf->reshape_progress; in raid5_start_reshape()
8638 spin_lock_irq(&conf->device_lock); in raid5_start_reshape()
8639 write_seqcount_begin(&conf->gen_lock); in raid5_start_reshape()
8640 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; in raid5_start_reshape()
8642 conf->chunk_sectors = conf->prev_chunk_sectors; in raid5_start_reshape()
8643 mddev->new_layout = conf->algorithm = conf->prev_algo; in raid5_start_reshape()
8647 conf->generation --; in raid5_start_reshape()
8648 conf->reshape_progress = MaxSector; in raid5_start_reshape()
8650 write_seqcount_end(&conf->gen_lock); in raid5_start_reshape()
8651 spin_unlock_irq(&conf->device_lock); in raid5_start_reshape()
8654 conf->reshape_checkpoint = jiffies; in raid5_start_reshape()
8661 * changes needed in 'conf'
8663 static void end_reshape(struct r5conf *conf) in end_reshape() argument
8666 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { in end_reshape()
8669 spin_lock_irq(&conf->device_lock); in end_reshape()
8670 conf->previous_raid_disks = conf->raid_disks; in end_reshape()
8671 md_finish_reshape(conf->mddev); in end_reshape()
8673 conf->reshape_progress = MaxSector; in end_reshape()
8674 conf->mddev->reshape_position = MaxSector; in end_reshape()
8675 rdev_for_each(rdev, conf->mddev) in end_reshape()
8680 spin_unlock_irq(&conf->device_lock); in end_reshape()
8681 wake_up(&conf->wait_for_overlap); in end_reshape()
8683 if (conf->mddev->queue) in end_reshape()
8684 raid5_set_io_opt(conf); in end_reshape()
8693 struct r5conf *conf = mddev->private; in raid5_finish_reshape() local
8700 spin_lock_irq(&conf->device_lock); in raid5_finish_reshape()
8701 mddev->degraded = raid5_calc_degraded(conf); in raid5_finish_reshape()
8702 spin_unlock_irq(&conf->device_lock); in raid5_finish_reshape()
8703 for (d = conf->raid_disks ; in raid5_finish_reshape()
8704 d < conf->raid_disks - mddev->delta_disks; in raid5_finish_reshape()
8707 conf->disks[d].rdev); in raid5_finish_reshape()
8711 conf->disks[d].replacement); in raid5_finish_reshape()
8716 mddev->layout = conf->algorithm; in raid5_finish_reshape()
8717 mddev->chunk_sectors = conf->chunk_sectors; in raid5_finish_reshape()
8726 struct r5conf *conf = mddev->private; in raid5_quiesce() local
8730 lock_all_device_hash_locks_irq(conf); in raid5_quiesce()
8734 r5c_flush_cache(conf, INT_MAX); in raid5_quiesce()
8738 smp_store_release(&conf->quiesce, 2); in raid5_quiesce()
8739 wait_event_cmd(conf->wait_for_quiescent, in raid5_quiesce()
8740 atomic_read(&conf->active_stripes) == 0 && in raid5_quiesce()
8741 atomic_read(&conf->active_aligned_reads) == 0, in raid5_quiesce()
8742 unlock_all_device_hash_locks_irq(conf), in raid5_quiesce()
8743 lock_all_device_hash_locks_irq(conf)); in raid5_quiesce()
8744 conf->quiesce = 1; in raid5_quiesce()
8745 unlock_all_device_hash_locks_irq(conf); in raid5_quiesce()
8747 wake_up(&conf->wait_for_overlap); in raid5_quiesce()
8750 lock_all_device_hash_locks_irq(conf); in raid5_quiesce()
8751 conf->quiesce = 0; in raid5_quiesce()
8752 wake_up(&conf->wait_for_quiescent); in raid5_quiesce()
8753 wake_up(&conf->wait_for_overlap); in raid5_quiesce()
8754 unlock_all_device_hash_locks_irq(conf); in raid5_quiesce()
8756 log_quiesce(conf, quiesce); in raid5_quiesce()
8857 struct r5conf *conf = mddev->private; in raid5_check_reshape() local
8877 conf->algorithm = mddev->new_layout; in raid5_check_reshape()
8881 conf->chunk_sectors = new_chunk ; in raid5_check_reshape()
9000 struct r5conf *conf; in raid5_change_consistency_policy() local
9006 conf = mddev->private; in raid5_change_consistency_policy()
9007 if (!conf) { in raid5_change_consistency_policy()
9014 if (!raid5_has_ppl(conf) && conf->level == 5) { in raid5_change_consistency_policy()
9015 err = log_init(conf, NULL, true); in raid5_change_consistency_policy()
9017 err = resize_stripes(conf, conf->pool_size); in raid5_change_consistency_policy()
9020 log_exit(conf); in raid5_change_consistency_policy()
9027 if (raid5_has_ppl(conf)) { in raid5_change_consistency_policy()
9029 log_exit(conf); in raid5_change_consistency_policy()
9031 err = resize_stripes(conf, conf->pool_size); in raid5_change_consistency_policy()
9032 } else if (test_bit(MD_HAS_JOURNAL, &conf->mddev->flags) && in raid5_change_consistency_policy()
9033 r5l_log_disk_error(conf)) { in raid5_change_consistency_policy()
9065 struct r5conf *conf = mddev->private; in raid5_start() local
9067 return r5l_start(conf->log); in raid5_start()
9072 struct r5conf *conf = mddev->private; in raid5_prepare_suspend() local
9083 wake_up(&conf->wait_for_overlap); in raid5_prepare_suspend()