Lines Matching refs:mddev

88 static int remove_and_add_spares(struct mddev *mddev,
90 static void mddev_detach(struct mddev *mddev);
91 static void export_rdev(struct md_rdev *rdev, struct mddev *mddev);
117 static inline int speed_min(struct mddev *mddev) in speed_min() argument
119 return mddev->sync_speed_min ? in speed_min()
120 mddev->sync_speed_min : sysctl_speed_limit_min; in speed_min()
123 static inline int speed_max(struct mddev *mddev) in speed_max() argument
125 return mddev->sync_speed_max ? in speed_max()
126 mddev->sync_speed_max : sysctl_speed_limit_max; in speed_max()
138 static void rdevs_uninit_serial(struct mddev *mddev) in rdevs_uninit_serial() argument
142 rdev_for_each(rdev, mddev) in rdevs_uninit_serial()
174 static int rdevs_init_serial(struct mddev *mddev) in rdevs_init_serial() argument
179 rdev_for_each(rdev, mddev) { in rdevs_init_serial()
186 if (ret && !mddev->serial_info_pool) in rdevs_init_serial()
187 rdevs_uninit_serial(mddev); in rdevs_init_serial()
199 return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 && in rdev_need_serial()
209 void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev, in mddev_create_serial_pool() argument
219 mddev_suspend(mddev); in mddev_create_serial_pool()
222 ret = rdevs_init_serial(mddev); in mddev_create_serial_pool()
228 if (mddev->serial_info_pool == NULL) { in mddev_create_serial_pool()
233 mddev->serial_info_pool = in mddev_create_serial_pool()
236 if (!mddev->serial_info_pool) { in mddev_create_serial_pool()
237 rdevs_uninit_serial(mddev); in mddev_create_serial_pool()
244 mddev_resume(mddev); in mddev_create_serial_pool()
253 void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev, in mddev_destroy_serial_pool() argument
259 if (mddev->serial_info_pool) { in mddev_destroy_serial_pool()
264 mddev_suspend(mddev); in mddev_destroy_serial_pool()
265 rdev_for_each(temp, mddev) { in mddev_destroy_serial_pool()
267 if (!mddev->serialize_policy || in mddev_destroy_serial_pool()
283 mempool_destroy(mddev->serial_info_pool); in mddev_destroy_serial_pool()
284 mddev->serial_info_pool = NULL; in mddev_destroy_serial_pool()
287 mddev_resume(mddev); in mddev_destroy_serial_pool()
356 static bool is_suspended(struct mddev *mddev, struct bio *bio) in is_suspended() argument
358 if (is_md_suspended(mddev)) in is_suspended()
362 if (mddev->suspend_lo >= mddev->suspend_hi) in is_suspended()
364 if (bio->bi_iter.bi_sector >= mddev->suspend_hi) in is_suspended()
366 if (bio_end_sector(bio) < mddev->suspend_lo) in is_suspended()
371 void md_handle_request(struct mddev *mddev, struct bio *bio) in md_handle_request() argument
374 if (is_suspended(mddev, bio)) { in md_handle_request()
382 prepare_to_wait(&mddev->sb_wait, &__wait, in md_handle_request()
384 if (!is_suspended(mddev, bio)) in md_handle_request()
388 finish_wait(&mddev->sb_wait, &__wait); in md_handle_request()
390 if (!percpu_ref_tryget_live(&mddev->active_io)) in md_handle_request()
393 if (!mddev->pers->make_request(mddev, bio)) { in md_handle_request()
394 percpu_ref_put(&mddev->active_io); in md_handle_request()
398 percpu_ref_put(&mddev->active_io); in md_handle_request()
405 struct mddev *mddev = bio->bi_bdev->bd_disk->private_data; in md_submit_bio() local
407 if (mddev == NULL || mddev->pers == NULL) { in md_submit_bio()
412 if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) { in md_submit_bio()
421 if (mddev->ro == MD_RDONLY && unlikely(rw == WRITE)) { in md_submit_bio()
431 md_handle_request(mddev, bio); in md_submit_bio()
440 void mddev_suspend(struct mddev *mddev) in mddev_suspend() argument
442 struct md_thread *thread = rcu_dereference_protected(mddev->thread, in mddev_suspend()
443 lockdep_is_held(&mddev->reconfig_mutex)); in mddev_suspend()
446 if (mddev->suspended++) in mddev_suspend()
448 wake_up(&mddev->sb_wait); in mddev_suspend()
449 set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags); in mddev_suspend()
450 percpu_ref_kill(&mddev->active_io); in mddev_suspend()
452 if (mddev->pers && mddev->pers->prepare_suspend) in mddev_suspend()
453 mddev->pers->prepare_suspend(mddev); in mddev_suspend()
455 wait_event(mddev->sb_wait, percpu_ref_is_zero(&mddev->active_io)); in mddev_suspend()
456 clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags); in mddev_suspend()
457 wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags)); in mddev_suspend()
460 mddev->noio_flag = memalloc_noio_save(); in mddev_suspend()
464 void mddev_resume(struct mddev *mddev) in mddev_resume() argument
466 lockdep_assert_held(&mddev->reconfig_mutex); in mddev_resume()
467 if (--mddev->suspended) in mddev_resume()
471 memalloc_noio_restore(mddev->noio_flag); in mddev_resume()
473 percpu_ref_resurrect(&mddev->active_io); in mddev_resume()
474 wake_up(&mddev->sb_wait); in mddev_resume()
476 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in mddev_resume()
477 md_wakeup_thread(mddev->thread); in mddev_resume()
478 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ in mddev_resume()
489 struct mddev *mddev = rdev->mddev; in md_end_flush() local
493 rdev_dec_pending(rdev, mddev); in md_end_flush()
495 if (atomic_dec_and_test(&mddev->flush_pending)) in md_end_flush()
497 queue_work(md_wq, &mddev->flush_work); in md_end_flush()
504 struct mddev *mddev = container_of(ws, struct mddev, flush_work); in submit_flushes() local
507 mddev->start_flush = ktime_get_boottime(); in submit_flushes()
508 INIT_WORK(&mddev->flush_work, md_submit_flush_data); in submit_flushes()
509 atomic_set(&mddev->flush_pending, 1); in submit_flushes()
511 rdev_for_each_rcu(rdev, mddev) in submit_flushes()
520 GFP_NOIO, &mddev->bio_set); in submit_flushes()
523 atomic_inc(&mddev->flush_pending); in submit_flushes()
528 if (atomic_dec_and_test(&mddev->flush_pending)) in submit_flushes()
529 queue_work(md_wq, &mddev->flush_work); in submit_flushes()
534 struct mddev *mddev = container_of(ws, struct mddev, flush_work); in md_submit_flush_data() local
535 struct bio *bio = mddev->flush_bio; in md_submit_flush_data()
543 spin_lock_irq(&mddev->lock); in md_submit_flush_data()
544 mddev->prev_flush_start = mddev->start_flush; in md_submit_flush_data()
545 mddev->flush_bio = NULL; in md_submit_flush_data()
546 spin_unlock_irq(&mddev->lock); in md_submit_flush_data()
547 wake_up(&mddev->sb_wait); in md_submit_flush_data()
562 if (WARN_ON_ONCE(!mddev->pers->make_request(mddev, bio))) in md_submit_flush_data()
567 percpu_ref_put(&mddev->active_io); in md_submit_flush_data()
576 bool md_flush_request(struct mddev *mddev, struct bio *bio) in md_flush_request() argument
579 spin_lock_irq(&mddev->lock); in md_flush_request()
583 wait_event_lock_irq(mddev->sb_wait, in md_flush_request()
584 !mddev->flush_bio || in md_flush_request()
585 ktime_before(req_start, mddev->prev_flush_start), in md_flush_request()
586 mddev->lock); in md_flush_request()
588 if (ktime_after(req_start, mddev->prev_flush_start)) { in md_flush_request()
589 WARN_ON(mddev->flush_bio); in md_flush_request()
600 WARN_ON(percpu_ref_is_zero(&mddev->active_io)); in md_flush_request()
601 percpu_ref_get(&mddev->active_io); in md_flush_request()
602 mddev->flush_bio = bio; in md_flush_request()
605 spin_unlock_irq(&mddev->lock); in md_flush_request()
608 INIT_WORK(&mddev->flush_work, submit_flushes); in md_flush_request()
609 queue_work(md_wq, &mddev->flush_work); in md_flush_request()
624 static inline struct mddev *mddev_get(struct mddev *mddev) in mddev_get() argument
628 if (test_bit(MD_DELETED, &mddev->flags)) in mddev_get()
630 atomic_inc(&mddev->active); in mddev_get()
631 return mddev; in mddev_get()
636 void mddev_put(struct mddev *mddev) in mddev_put() argument
638 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) in mddev_put()
640 if (!mddev->raid_disks && list_empty(&mddev->disks) && in mddev_put()
641 mddev->ctime == 0 && !mddev->hold_active) { in mddev_put()
644 set_bit(MD_DELETED, &mddev->flags); in mddev_put()
651 INIT_WORK(&mddev->del_work, mddev_delayed_delete); in mddev_put()
652 queue_work(md_misc_wq, &mddev->del_work); in mddev_put()
659 void mddev_init(struct mddev *mddev) in mddev_init() argument
661 mutex_init(&mddev->open_mutex); in mddev_init()
662 mutex_init(&mddev->reconfig_mutex); in mddev_init()
663 mutex_init(&mddev->sync_mutex); in mddev_init()
664 mutex_init(&mddev->bitmap_info.mutex); in mddev_init()
665 INIT_LIST_HEAD(&mddev->disks); in mddev_init()
666 INIT_LIST_HEAD(&mddev->all_mddevs); in mddev_init()
667 INIT_LIST_HEAD(&mddev->deleting); in mddev_init()
668 timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0); in mddev_init()
669 atomic_set(&mddev->active, 1); in mddev_init()
670 atomic_set(&mddev->openers, 0); in mddev_init()
671 atomic_set(&mddev->sync_seq, 0); in mddev_init()
672 spin_lock_init(&mddev->lock); in mddev_init()
673 atomic_set(&mddev->flush_pending, 0); in mddev_init()
674 init_waitqueue_head(&mddev->sb_wait); in mddev_init()
675 init_waitqueue_head(&mddev->recovery_wait); in mddev_init()
676 mddev->reshape_position = MaxSector; in mddev_init()
677 mddev->reshape_backwards = 0; in mddev_init()
678 mddev->last_sync_action = "none"; in mddev_init()
679 mddev->resync_min = 0; in mddev_init()
680 mddev->resync_max = MaxSector; in mddev_init()
681 mddev->level = LEVEL_NONE; in mddev_init()
685 static struct mddev *mddev_find_locked(dev_t unit) in mddev_find_locked()
687 struct mddev *mddev; in mddev_find_locked() local
689 list_for_each_entry(mddev, &all_mddevs, all_mddevs) in mddev_find_locked()
690 if (mddev->unit == unit) in mddev_find_locked()
691 return mddev; in mddev_find_locked()
717 static struct mddev *mddev_alloc(dev_t unit) in mddev_alloc()
719 struct mddev *new; in mddev_alloc()
759 static void mddev_free(struct mddev *mddev) in mddev_free() argument
762 list_del(&mddev->all_mddevs); in mddev_free()
765 kfree(mddev); in mddev_free()
770 void mddev_unlock(struct mddev *mddev) in mddev_unlock() argument
776 if (!list_empty(&mddev->deleting)) in mddev_unlock()
777 list_splice_init(&mddev->deleting, &delete); in mddev_unlock()
779 if (mddev->to_remove) { in mddev_unlock()
792 const struct attribute_group *to_remove = mddev->to_remove; in mddev_unlock()
793 mddev->to_remove = NULL; in mddev_unlock()
794 mddev->sysfs_active = 1; in mddev_unlock()
795 mutex_unlock(&mddev->reconfig_mutex); in mddev_unlock()
797 if (mddev->kobj.sd) { in mddev_unlock()
799 sysfs_remove_group(&mddev->kobj, to_remove); in mddev_unlock()
800 if (mddev->pers == NULL || in mddev_unlock()
801 mddev->pers->sync_request == NULL) { in mddev_unlock()
802 sysfs_remove_group(&mddev->kobj, &md_redundancy_group); in mddev_unlock()
803 if (mddev->sysfs_action) in mddev_unlock()
804 sysfs_put(mddev->sysfs_action); in mddev_unlock()
805 if (mddev->sysfs_completed) in mddev_unlock()
806 sysfs_put(mddev->sysfs_completed); in mddev_unlock()
807 if (mddev->sysfs_degraded) in mddev_unlock()
808 sysfs_put(mddev->sysfs_degraded); in mddev_unlock()
809 mddev->sysfs_action = NULL; in mddev_unlock()
810 mddev->sysfs_completed = NULL; in mddev_unlock()
811 mddev->sysfs_degraded = NULL; in mddev_unlock()
814 mddev->sysfs_active = 0; in mddev_unlock()
816 mutex_unlock(&mddev->reconfig_mutex); in mddev_unlock()
818 md_wakeup_thread(mddev->thread); in mddev_unlock()
819 wake_up(&mddev->sb_wait); in mddev_unlock()
824 export_rdev(rdev, mddev); in mddev_unlock()
829 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr) in md_find_rdev_nr_rcu() argument
833 rdev_for_each_rcu(rdev, mddev) in md_find_rdev_nr_rcu()
841 static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev) in find_rdev() argument
845 rdev_for_each(rdev, mddev) in find_rdev()
852 struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev) in md_find_rdev_rcu() argument
856 rdev_for_each_rcu(rdev, mddev) in md_find_rdev_rcu()
910 struct mddev *mddev = rdev->mddev; in super_written() local
915 md_error(mddev, rdev); in super_written()
918 set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags); in super_written()
926 rdev_dec_pending(rdev, mddev); in super_written()
928 if (atomic_dec_and_test(&mddev->pending_writes)) in super_written()
929 wake_up(&mddev->sb_wait); in super_written()
932 void md_super_write(struct mddev *mddev, struct md_rdev *rdev, in md_super_write() argument
953 GFP_NOIO, &mddev->sync_set); in md_super_write()
962 if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) && in md_super_write()
967 atomic_inc(&mddev->pending_writes); in md_super_write()
971 int md_super_wait(struct mddev *mddev) in md_super_wait() argument
974 wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0); in md_super_wait()
975 if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags)) in md_super_wait()
993 else if (rdev->mddev->reshape_position != MaxSector && in sync_page_io()
994 (rdev->mddev->reshape_backwards == in sync_page_io()
995 (sector >= rdev->mddev->reshape_position))) in sync_page_io()
1132 int (*validate_super)(struct mddev *mddev,
1135 void (*sync_super)(struct mddev *mddev,
1151 int md_check_no_bitmap(struct mddev *mddev) in md_check_no_bitmap() argument
1153 if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset) in md_check_no_bitmap()
1156 mdname(mddev), mddev->pers->name); in md_check_no_bitmap()
1273 static int super_90_validate(struct mddev *mddev, struct md_rdev *freshest, struct md_rdev *rdev) in super_90_validate() argument
1285 if (mddev->raid_disks == 0) { in super_90_validate()
1286 mddev->major_version = 0; in super_90_validate()
1287 mddev->minor_version = sb->minor_version; in super_90_validate()
1288 mddev->patch_version = sb->patch_version; in super_90_validate()
1289 mddev->external = 0; in super_90_validate()
1290 mddev->chunk_sectors = sb->chunk_size >> 9; in super_90_validate()
1291 mddev->ctime = sb->ctime; in super_90_validate()
1292 mddev->utime = sb->utime; in super_90_validate()
1293 mddev->level = sb->level; in super_90_validate()
1294 mddev->clevel[0] = 0; in super_90_validate()
1295 mddev->layout = sb->layout; in super_90_validate()
1296 mddev->raid_disks = sb->raid_disks; in super_90_validate()
1297 mddev->dev_sectors = ((sector_t)sb->size) * 2; in super_90_validate()
1298 mddev->events = ev1; in super_90_validate()
1299 mddev->bitmap_info.offset = 0; in super_90_validate()
1300 mddev->bitmap_info.space = 0; in super_90_validate()
1302 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; in super_90_validate()
1303 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); in super_90_validate()
1304 mddev->reshape_backwards = 0; in super_90_validate()
1306 if (mddev->minor_version >= 91) { in super_90_validate()
1307 mddev->reshape_position = sb->reshape_position; in super_90_validate()
1308 mddev->delta_disks = sb->delta_disks; in super_90_validate()
1309 mddev->new_level = sb->new_level; in super_90_validate()
1310 mddev->new_layout = sb->new_layout; in super_90_validate()
1311 mddev->new_chunk_sectors = sb->new_chunk >> 9; in super_90_validate()
1312 if (mddev->delta_disks < 0) in super_90_validate()
1313 mddev->reshape_backwards = 1; in super_90_validate()
1315 mddev->reshape_position = MaxSector; in super_90_validate()
1316 mddev->delta_disks = 0; in super_90_validate()
1317 mddev->new_level = mddev->level; in super_90_validate()
1318 mddev->new_layout = mddev->layout; in super_90_validate()
1319 mddev->new_chunk_sectors = mddev->chunk_sectors; in super_90_validate()
1321 if (mddev->level == 0) in super_90_validate()
1322 mddev->layout = -1; in super_90_validate()
1325 mddev->recovery_cp = MaxSector; in super_90_validate()
1329 mddev->recovery_cp = sb->recovery_cp; in super_90_validate()
1331 mddev->recovery_cp = 0; in super_90_validate()
1334 memcpy(mddev->uuid+0, &sb->set_uuid0, 4); in super_90_validate()
1335 memcpy(mddev->uuid+4, &sb->set_uuid1, 4); in super_90_validate()
1336 memcpy(mddev->uuid+8, &sb->set_uuid2, 4); in super_90_validate()
1337 memcpy(mddev->uuid+12,&sb->set_uuid3, 4); in super_90_validate()
1339 mddev->max_disks = MD_SB_DISKS; in super_90_validate()
1342 mddev->bitmap_info.file == NULL) { in super_90_validate()
1343 mddev->bitmap_info.offset = in super_90_validate()
1344 mddev->bitmap_info.default_offset; in super_90_validate()
1345 mddev->bitmap_info.space = in super_90_validate()
1346 mddev->bitmap_info.default_space; in super_90_validate()
1349 } else if (mddev->pers == NULL) { in super_90_validate()
1355 if (ev1 < mddev->events) in super_90_validate()
1357 } else if (mddev->bitmap) { in super_90_validate()
1361 if (ev1 < mddev->bitmap->events_cleared) in super_90_validate()
1363 if (ev1 < mddev->events) in super_90_validate()
1366 if (ev1 < mddev->events) in super_90_validate()
1371 if (mddev->level != LEVEL_MULTIPATH) { in super_90_validate()
1385 if (mddev->minor_version >= 91) { in super_90_validate()
1402 static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev) in super_90_sync() argument
1406 int next_spare = mddev->raid_disks; in super_90_sync()
1428 sb->major_version = mddev->major_version; in super_90_sync()
1429 sb->patch_version = mddev->patch_version; in super_90_sync()
1431 memcpy(&sb->set_uuid0, mddev->uuid+0, 4); in super_90_sync()
1432 memcpy(&sb->set_uuid1, mddev->uuid+4, 4); in super_90_sync()
1433 memcpy(&sb->set_uuid2, mddev->uuid+8, 4); in super_90_sync()
1434 memcpy(&sb->set_uuid3, mddev->uuid+12,4); in super_90_sync()
1436 sb->ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX); in super_90_sync()
1437 sb->level = mddev->level; in super_90_sync()
1438 sb->size = mddev->dev_sectors / 2; in super_90_sync()
1439 sb->raid_disks = mddev->raid_disks; in super_90_sync()
1440 sb->md_minor = mddev->md_minor; in super_90_sync()
1442 sb->utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX); in super_90_sync()
1444 sb->events_hi = (mddev->events>>32); in super_90_sync()
1445 sb->events_lo = (u32)mddev->events; in super_90_sync()
1447 if (mddev->reshape_position == MaxSector) in super_90_sync()
1451 sb->reshape_position = mddev->reshape_position; in super_90_sync()
1452 sb->new_level = mddev->new_level; in super_90_sync()
1453 sb->delta_disks = mddev->delta_disks; in super_90_sync()
1454 sb->new_layout = mddev->new_layout; in super_90_sync()
1455 sb->new_chunk = mddev->new_chunk_sectors << 9; in super_90_sync()
1457 mddev->minor_version = sb->minor_version; in super_90_sync()
1458 if (mddev->in_sync) in super_90_sync()
1460 sb->recovery_cp = mddev->recovery_cp; in super_90_sync()
1461 sb->cp_events_hi = (mddev->events>>32); in super_90_sync()
1462 sb->cp_events_lo = (u32)mddev->events; in super_90_sync()
1463 if (mddev->recovery_cp == MaxSector) in super_90_sync()
1468 sb->layout = mddev->layout; in super_90_sync()
1469 sb->chunk_size = mddev->chunk_sectors << 9; in super_90_sync()
1471 if (mddev->bitmap && mddev->bitmap_info.file == NULL) in super_90_sync()
1475 rdev_for_each(rdev2, mddev) { in super_90_sync()
1523 for (i=0 ; i < mddev->raid_disks ; i++) { in super_90_sync()
1549 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) in super_90_rdev_size_change()
1551 if (rdev->mddev->bitmap_info.offset) in super_90_rdev_size_change()
1559 if ((u64)num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1) in super_90_rdev_size_change()
1562 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, in super_90_rdev_size_change()
1564 } while (md_super_wait(rdev->mddev) < 0); in super_90_rdev_size_change()
1785 static int super_1_validate(struct mddev *mddev, struct md_rdev *freshest, struct md_rdev *rdev) in super_1_validate() argument
1796 if (mddev->raid_disks == 0) { in super_1_validate()
1797 mddev->major_version = 1; in super_1_validate()
1798 mddev->patch_version = 0; in super_1_validate()
1799 mddev->external = 0; in super_1_validate()
1800 mddev->chunk_sectors = le32_to_cpu(sb->chunksize); in super_1_validate()
1801 mddev->ctime = le64_to_cpu(sb->ctime); in super_1_validate()
1802 mddev->utime = le64_to_cpu(sb->utime); in super_1_validate()
1803 mddev->level = le32_to_cpu(sb->level); in super_1_validate()
1804 mddev->clevel[0] = 0; in super_1_validate()
1805 mddev->layout = le32_to_cpu(sb->layout); in super_1_validate()
1806 mddev->raid_disks = le32_to_cpu(sb->raid_disks); in super_1_validate()
1807 mddev->dev_sectors = le64_to_cpu(sb->size); in super_1_validate()
1808 mddev->events = ev1; in super_1_validate()
1809 mddev->bitmap_info.offset = 0; in super_1_validate()
1810 mddev->bitmap_info.space = 0; in super_1_validate()
1814 mddev->bitmap_info.default_offset = 1024 >> 9; in super_1_validate()
1815 mddev->bitmap_info.default_space = (4096-1024) >> 9; in super_1_validate()
1816 mddev->reshape_backwards = 0; in super_1_validate()
1818 mddev->recovery_cp = le64_to_cpu(sb->resync_offset); in super_1_validate()
1819 memcpy(mddev->uuid, sb->set_uuid, 16); in super_1_validate()
1821 mddev->max_disks = (4096-256)/2; in super_1_validate()
1824 mddev->bitmap_info.file == NULL) { in super_1_validate()
1825 mddev->bitmap_info.offset = in super_1_validate()
1832 if (mddev->minor_version > 0) in super_1_validate()
1833 mddev->bitmap_info.space = 0; in super_1_validate()
1834 else if (mddev->bitmap_info.offset > 0) in super_1_validate()
1835 mddev->bitmap_info.space = in super_1_validate()
1836 8 - mddev->bitmap_info.offset; in super_1_validate()
1838 mddev->bitmap_info.space = in super_1_validate()
1839 -mddev->bitmap_info.offset; in super_1_validate()
1843 mddev->reshape_position = le64_to_cpu(sb->reshape_position); in super_1_validate()
1844 mddev->delta_disks = le32_to_cpu(sb->delta_disks); in super_1_validate()
1845 mddev->new_level = le32_to_cpu(sb->new_level); in super_1_validate()
1846 mddev->new_layout = le32_to_cpu(sb->new_layout); in super_1_validate()
1847 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk); in super_1_validate()
1848 if (mddev->delta_disks < 0 || in super_1_validate()
1849 (mddev->delta_disks == 0 && in super_1_validate()
1852 mddev->reshape_backwards = 1; in super_1_validate()
1854 mddev->reshape_position = MaxSector; in super_1_validate()
1855 mddev->delta_disks = 0; in super_1_validate()
1856 mddev->new_level = mddev->level; in super_1_validate()
1857 mddev->new_layout = mddev->layout; in super_1_validate()
1858 mddev->new_chunk_sectors = mddev->chunk_sectors; in super_1_validate()
1861 if (mddev->level == 0 && in super_1_validate()
1863 mddev->layout = -1; in super_1_validate()
1866 set_bit(MD_HAS_JOURNAL, &mddev->flags); in super_1_validate()
1877 set_bit(MD_HAS_PPL, &mddev->flags); in super_1_validate()
1879 } else if (mddev->pers == NULL) { in super_1_validate()
1889 if (ev1 + 1 < mddev->events) in super_1_validate()
1891 } else if (mddev->bitmap) { in super_1_validate()
1895 if (ev1 < mddev->bitmap->events_cleared) in super_1_validate()
1897 if (ev1 < mddev->events) in super_1_validate()
1900 if (ev1 < mddev->events) in super_1_validate()
1904 if (mddev->level != LEVEL_MULTIPATH) { in super_1_validate()
1910 } else if (mddev->pers == NULL && freshest && ev1 < mddev->events) { in super_1_validate()
1931 mdname(mddev), rdev->bdev, rdev->desc_nr, in super_1_validate()
1938 mdname(mddev), rdev->bdev, role, role, freshest->bdev); in super_1_validate()
1972 &mddev->recovery)) in super_1_validate()
1990 static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev) in super_1_sync() argument
2004 sb->utime = cpu_to_le64((__u64)mddev->utime); in super_1_sync()
2005 sb->events = cpu_to_le64(mddev->events); in super_1_sync()
2006 if (mddev->in_sync) in super_1_sync()
2007 sb->resync_offset = cpu_to_le64(mddev->recovery_cp); in super_1_sync()
2008 else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags)) in super_1_sync()
2015 sb->raid_disks = cpu_to_le32(mddev->raid_disks); in super_1_sync()
2016 sb->size = cpu_to_le64(mddev->dev_sectors); in super_1_sync()
2017 sb->chunksize = cpu_to_le32(mddev->chunk_sectors); in super_1_sync()
2018 sb->level = cpu_to_le32(mddev->level); in super_1_sync()
2019 sb->layout = cpu_to_le32(mddev->layout); in super_1_sync()
2032 if (mddev->bitmap && mddev->bitmap_info.file == NULL) { in super_1_sync()
2033 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset); in super_1_sync()
2043 if (rdev->saved_raid_disk >= 0 && mddev->bitmap) in super_1_sync()
2054 if (mddev->reshape_position != MaxSector) { in super_1_sync()
2056 sb->reshape_position = cpu_to_le64(mddev->reshape_position); in super_1_sync()
2057 sb->new_layout = cpu_to_le32(mddev->new_layout); in super_1_sync()
2058 sb->delta_disks = cpu_to_le32(mddev->delta_disks); in super_1_sync()
2059 sb->new_level = cpu_to_le32(mddev->new_level); in super_1_sync()
2060 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors); in super_1_sync()
2061 if (mddev->delta_disks == 0 && in super_1_sync()
2062 mddev->reshape_backwards) in super_1_sync()
2073 if (mddev_is_clustered(mddev)) in super_1_sync()
2080 md_error(mddev, rdev); in super_1_sync()
2111 rdev_for_each(rdev2, mddev) in super_1_sync()
2128 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) in super_1_sync()
2131 if (test_bit(MD_HAS_PPL, &mddev->flags)) { in super_1_sync()
2132 if (test_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags)) in super_1_sync()
2141 rdev_for_each(rdev2, mddev) { in super_1_sync()
2181 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) in super_1_rdev_size_change()
2190 } else if (rdev->mddev->bitmap_info.offset) { in super_1_rdev_size_change()
2218 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, in super_1_rdev_size_change()
2220 } while (md_super_wait(rdev->mddev) < 0); in super_1_rdev_size_change()
2236 if (rdev->mddev->minor_version == 0) in super_1_allow_new_offset()
2247 bitmap = rdev->mddev->bitmap; in super_1_allow_new_offset()
2248 if (bitmap && !rdev->mddev->bitmap_info.file && in super_1_allow_new_offset()
2249 rdev->sb_start + rdev->mddev->bitmap_info.offset + in super_1_allow_new_offset()
2279 static void sync_super(struct mddev *mddev, struct md_rdev *rdev) in sync_super() argument
2281 if (mddev->sync_super) { in sync_super()
2282 mddev->sync_super(mddev, rdev); in sync_super()
2286 BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types)); in sync_super()
2288 super_types[mddev->major_version].sync_super(mddev, rdev); in sync_super()
2291 static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2) in match_mddev_units()
2325 int md_integrity_register(struct mddev *mddev) in md_integrity_register() argument
2329 if (list_empty(&mddev->disks)) in md_integrity_register()
2331 if (!mddev->gendisk || blk_get_integrity(mddev->gendisk)) in md_integrity_register()
2333 rdev_for_each(rdev, mddev) { in md_integrity_register()
2355 blk_integrity_register(mddev->gendisk, in md_integrity_register()
2358 pr_debug("md: data integrity enabled on %s\n", mdname(mddev)); in md_integrity_register()
2359 if (bioset_integrity_create(&mddev->bio_set, BIO_POOL_SIZE) || in md_integrity_register()
2360 (mddev->level != 1 && mddev->level != 10 && in md_integrity_register()
2361 bioset_integrity_create(&mddev->io_clone_set, BIO_POOL_SIZE))) { in md_integrity_register()
2369 mdname(mddev)); in md_integrity_register()
2380 int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev) in md_integrity_add_rdev() argument
2384 if (!mddev->gendisk) in md_integrity_add_rdev()
2387 bi_mddev = blk_get_integrity(mddev->gendisk); in md_integrity_add_rdev()
2392 if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) { in md_integrity_add_rdev()
2394 mdname(mddev), rdev->bdev); in md_integrity_add_rdev()
2408 static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev) in bind_rdev_to_array() argument
2414 if (find_rdev(mddev, rdev->bdev->bd_dev)) in bind_rdev_to_array()
2417 if (rdev_read_only(rdev) && mddev->pers) in bind_rdev_to_array()
2423 (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) { in bind_rdev_to_array()
2424 if (mddev->pers) { in bind_rdev_to_array()
2429 if (mddev->level > 0) in bind_rdev_to_array()
2432 mddev->dev_sectors = rdev->sectors; in bind_rdev_to_array()
2442 if (mddev->pers) in bind_rdev_to_array()
2443 choice = mddev->raid_disks; in bind_rdev_to_array()
2444 while (md_find_rdev_nr_rcu(mddev, choice)) in bind_rdev_to_array()
2448 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) { in bind_rdev_to_array()
2455 mddev->max_disks && rdev->desc_nr >= mddev->max_disks) { in bind_rdev_to_array()
2457 mdname(mddev), mddev->max_disks); in bind_rdev_to_array()
2463 rdev->mddev = mddev; in bind_rdev_to_array()
2466 if (mddev->raid_disks) in bind_rdev_to_array()
2467 mddev_create_serial_pool(mddev, rdev, false); in bind_rdev_to_array()
2469 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b))) in bind_rdev_to_array()
2480 list_add_rcu(&rdev->same_set, &mddev->disks); in bind_rdev_to_array()
2481 bd_link_disk_holder(rdev->bdev, mddev->gendisk); in bind_rdev_to_array()
2484 mddev->recovery_disabled++; in bind_rdev_to_array()
2490 b, mdname(mddev)); in bind_rdev_to_array()
2491 mddev_destroy_serial_pool(mddev, rdev, false); in bind_rdev_to_array()
2500 static void export_rdev(struct md_rdev *rdev, struct mddev *mddev) in export_rdev() argument
2516 struct mddev *mddev = rdev->mddev; in md_kick_rdev_from_array() local
2518 bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk); in md_kick_rdev_from_array()
2521 mddev_destroy_serial_pool(rdev->mddev, rdev, false); in md_kick_rdev_from_array()
2522 rdev->mddev = NULL; in md_kick_rdev_from_array()
2539 list_add(&rdev->same_set, &mddev->deleting); in md_kick_rdev_from_array()
2542 static void export_array(struct mddev *mddev) in export_array() argument
2546 while (!list_empty(&mddev->disks)) { in export_array()
2547 rdev = list_first_entry(&mddev->disks, struct md_rdev, in export_array()
2551 mddev->raid_disks = 0; in export_array()
2552 mddev->major_version = 0; in export_array()
2555 static bool set_in_sync(struct mddev *mddev) in set_in_sync() argument
2557 lockdep_assert_held(&mddev->lock); in set_in_sync()
2558 if (!mddev->in_sync) { in set_in_sync()
2559 mddev->sync_checkers++; in set_in_sync()
2560 spin_unlock(&mddev->lock); in set_in_sync()
2561 percpu_ref_switch_to_atomic_sync(&mddev->writes_pending); in set_in_sync()
2562 spin_lock(&mddev->lock); in set_in_sync()
2563 if (!mddev->in_sync && in set_in_sync()
2564 percpu_ref_is_zero(&mddev->writes_pending)) { in set_in_sync()
2565 mddev->in_sync = 1; in set_in_sync()
2571 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); in set_in_sync()
2572 sysfs_notify_dirent_safe(mddev->sysfs_state); in set_in_sync()
2574 if (--mddev->sync_checkers == 0) in set_in_sync()
2575 percpu_ref_switch_to_percpu(&mddev->writes_pending); in set_in_sync()
2577 if (mddev->safemode == 1) in set_in_sync()
2578 mddev->safemode = 0; in set_in_sync()
2579 return mddev->in_sync; in set_in_sync()
2582 static void sync_sbs(struct mddev *mddev, int nospares) in sync_sbs() argument
2591 rdev_for_each(rdev, mddev) { in sync_sbs()
2592 if (rdev->sb_events == mddev->events || in sync_sbs()
2595 rdev->sb_events+1 == mddev->events)) { in sync_sbs()
2599 sync_super(mddev, rdev); in sync_sbs()
2605 static bool does_sb_need_changing(struct mddev *mddev) in does_sb_need_changing() argument
2612 rdev_for_each(iter, mddev) in does_sb_need_changing()
2624 rdev_for_each(rdev, mddev) { in does_sb_need_changing()
2636 if ((mddev->dev_sectors != le64_to_cpu(sb->size)) || in does_sb_need_changing()
2637 (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) || in does_sb_need_changing()
2638 (mddev->layout != le32_to_cpu(sb->layout)) || in does_sb_need_changing()
2639 (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) || in does_sb_need_changing()
2640 (mddev->chunk_sectors != le32_to_cpu(sb->chunksize))) in does_sb_need_changing()
2646 void md_update_sb(struct mddev *mddev, int force_change) in md_update_sb() argument
2654 if (!md_is_rdwr(mddev)) { in md_update_sb()
2656 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in md_update_sb()
2661 if (mddev_is_clustered(mddev)) { in md_update_sb()
2662 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) in md_update_sb()
2664 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags)) in md_update_sb()
2666 ret = md_cluster_ops->metadata_update_start(mddev); in md_update_sb()
2668 if (!does_sb_need_changing(mddev)) { in md_update_sb()
2670 md_cluster_ops->metadata_update_cancel(mddev); in md_update_sb()
2671 bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING), in md_update_sb()
2684 rdev_for_each(rdev, mddev) { in md_update_sb()
2686 mddev->delta_disks >= 0 && in md_update_sb()
2687 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && in md_update_sb()
2688 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery) && in md_update_sb()
2689 !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in md_update_sb()
2692 mddev->curr_resync_completed > rdev->recovery_offset) in md_update_sb()
2693 rdev->recovery_offset = mddev->curr_resync_completed; in md_update_sb()
2696 if (!mddev->persistent) { in md_update_sb()
2697 clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); in md_update_sb()
2698 clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in md_update_sb()
2699 if (!mddev->external) { in md_update_sb()
2700 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); in md_update_sb()
2701 rdev_for_each(rdev, mddev) { in md_update_sb()
2705 md_error(mddev, rdev); in md_update_sb()
2712 wake_up(&mddev->sb_wait); in md_update_sb()
2716 spin_lock(&mddev->lock); in md_update_sb()
2718 mddev->utime = ktime_get_real_seconds(); in md_update_sb()
2720 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) in md_update_sb()
2722 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags)) in md_update_sb()
2730 if (mddev->degraded) in md_update_sb()
2742 sync_req = mddev->in_sync; in md_update_sb()
2747 && (mddev->in_sync && mddev->recovery_cp == MaxSector) in md_update_sb()
2748 && mddev->can_decrease_events in md_update_sb()
2749 && mddev->events != 1) { in md_update_sb()
2750 mddev->events--; in md_update_sb()
2751 mddev->can_decrease_events = 0; in md_update_sb()
2754 mddev->events ++; in md_update_sb()
2755 mddev->can_decrease_events = nospares; in md_update_sb()
2763 WARN_ON(mddev->events == 0); in md_update_sb()
2765 rdev_for_each(rdev, mddev) { in md_update_sb()
2772 sync_sbs(mddev, nospares); in md_update_sb()
2773 spin_unlock(&mddev->lock); in md_update_sb()
2776 mdname(mddev), mddev->in_sync); in md_update_sb()
2778 if (mddev->queue) in md_update_sb()
2779 blk_add_trace_msg(mddev->queue, "md md_update_sb"); in md_update_sb()
2781 md_bitmap_update_sb(mddev->bitmap); in md_update_sb()
2782 rdev_for_each(rdev, mddev) { in md_update_sb()
2787 md_super_write(mddev,rdev, in md_update_sb()
2793 rdev->sb_events = mddev->events; in md_update_sb()
2795 md_super_write(mddev, rdev, in md_update_sb()
2806 if (mddev->level == LEVEL_MULTIPATH) in md_update_sb()
2810 if (md_super_wait(mddev) < 0) in md_update_sb()
2814 if (mddev_is_clustered(mddev) && ret == 0) in md_update_sb()
2815 md_cluster_ops->metadata_update_finish(mddev); in md_update_sb()
2817 if (mddev->in_sync != sync_req || in md_update_sb()
2818 !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING), in md_update_sb()
2822 wake_up(&mddev->sb_wait); in md_update_sb()
2823 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in md_update_sb()
2824 sysfs_notify_dirent_safe(mddev->sysfs_completed); in md_update_sb()
2826 rdev_for_each(rdev, mddev) { in md_update_sb()
2840 struct mddev *mddev = rdev->mddev; in add_bound_rdev() local
2844 if (!mddev->pers->hot_remove_disk || add_journal) { in add_bound_rdev()
2849 super_types[mddev->major_version]. in add_bound_rdev()
2850 validate_super(mddev, NULL/*freshest*/, rdev); in add_bound_rdev()
2852 mddev_suspend(mddev); in add_bound_rdev()
2853 err = mddev->pers->hot_add_disk(mddev, rdev); in add_bound_rdev()
2855 mddev_resume(mddev); in add_bound_rdev()
2863 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in add_bound_rdev()
2864 if (mddev->degraded) in add_bound_rdev()
2865 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in add_bound_rdev()
2866 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in add_bound_rdev()
2868 md_wakeup_thread(mddev->thread); in add_bound_rdev()
2958 struct mddev *mddev = rdev->mddev; in state_store() local
2962 if (cmd_match(buf, "faulty") && rdev->mddev->pers) { in state_store()
2963 md_error(rdev->mddev, rdev); in state_store()
2965 if (test_bit(MD_BROKEN, &rdev->mddev->flags)) in state_store()
2970 if (rdev->mddev->pers) { in state_store()
2972 remove_and_add_spares(rdev->mddev, rdev); in state_store()
2978 if (mddev_is_clustered(mddev)) in state_store()
2979 err = md_cluster_ops->remove_disk(mddev, rdev); in state_store()
2983 if (mddev->pers) { in state_store()
2984 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in state_store()
2985 md_wakeup_thread(mddev->thread); in state_store()
2992 mddev_create_serial_pool(rdev->mddev, rdev, false); in state_store()
2996 mddev_destroy_serial_pool(rdev->mddev, rdev, false); in state_store()
3010 md_error(rdev->mddev, rdev); in state_store()
3015 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); in state_store()
3016 md_wakeup_thread(rdev->mddev->thread); in state_store()
3032 if (rdev->mddev->pers == NULL) { in state_store()
3053 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); in state_store()
3054 md_wakeup_thread(rdev->mddev->thread); in state_store()
3067 if (rdev->mddev->pers) in state_store()
3075 if (rdev->mddev->pers) in state_store()
3082 if (!rdev->mddev->pers) in state_store()
3092 if (!mddev_is_clustered(rdev->mddev) || in state_store()
3099 } else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) { in state_store()
3103 } else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) { in state_store()
3108 md_update_sb(mddev, 1); in state_store()
3166 if (rdev->mddev->pers && slot == -1) { in slot_store()
3177 if (rdev->mddev->pers->hot_remove_disk == NULL) in slot_store()
3180 remove_and_add_spares(rdev->mddev, rdev); in slot_store()
3183 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); in slot_store()
3184 md_wakeup_thread(rdev->mddev->thread); in slot_store()
3185 } else if (rdev->mddev->pers) { in slot_store()
3194 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery)) in slot_store()
3197 if (rdev->mddev->pers->hot_add_disk == NULL) in slot_store()
3200 if (slot >= rdev->mddev->raid_disks && in slot_store()
3201 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) in slot_store()
3211 err = rdev->mddev->pers->hot_add_disk(rdev->mddev, rdev); in slot_store()
3218 sysfs_link_rdev(rdev->mddev, rdev); in slot_store()
3221 if (slot >= rdev->mddev->raid_disks && in slot_store()
3222 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) in slot_store()
3249 if (rdev->mddev->pers && rdev->raid_disk >= 0) in offset_store()
3251 if (rdev->sectors && rdev->mddev->external) in offset_store()
3273 struct mddev *mddev = rdev->mddev; in new_offset_store() local
3278 if (mddev->sync_thread || in new_offset_store()
3279 test_bit(MD_RECOVERY_RUNNING,&mddev->recovery)) in new_offset_store()
3287 + mddev->dev_sectors > rdev->sectors) in new_offset_store()
3296 mddev->reshape_backwards) in new_offset_store()
3303 !mddev->reshape_backwards) in new_offset_store()
3306 if (mddev->pers && mddev->persistent && in new_offset_store()
3307 !super_types[mddev->major_version] in new_offset_store()
3312 mddev->reshape_backwards = 1; in new_offset_store()
3314 mddev->reshape_backwards = 0; in new_offset_store()
3339 struct mddev *mddev; in md_rdev_overlaps() local
3343 list_for_each_entry(mddev, &all_mddevs, all_mddevs) { in md_rdev_overlaps()
3344 if (test_bit(MD_DELETED, &mddev->flags)) in md_rdev_overlaps()
3346 rdev_for_each(rdev2, mddev) { in md_rdev_overlaps()
3380 struct mddev *my_mddev = rdev->mddev; in rdev_size_store()
3449 if (rdev->mddev->pers && in recovery_start_store()
3517 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) && in ppl_sector_store()
3521 if (rdev->mddev->persistent) { in ppl_sector_store()
3522 if (rdev->mddev->major_version == 0) in ppl_sector_store()
3530 } else if (!rdev->mddev->external) { in ppl_sector_store()
3554 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) && in ppl_size_store()
3558 if (rdev->mddev->persistent) { in ppl_size_store()
3559 if (rdev->mddev->major_version == 0) in ppl_size_store()
3563 } else if (!rdev->mddev->external) { in ppl_size_store()
3596 if (!rdev->mddev) in rdev_attr_show()
3609 struct mddev *mddev = rdev->mddev; in rdev_attr_store() local
3619 rv = mddev ? mddev_lock(mddev) : -ENODEV; in rdev_attr_store()
3621 if (rdev->mddev == NULL) in rdev_attr_store()
3625 mddev_unlock(mddev); in rdev_attr_store()
3761 static int analyze_sbs(struct mddev *mddev) in analyze_sbs() argument
3767 rdev_for_each_safe(rdev, tmp, mddev) in analyze_sbs()
3768 switch (super_types[mddev->major_version]. in analyze_sbs()
3769 load_super(rdev, freshest, mddev->minor_version)) { in analyze_sbs()
3787 super_types[mddev->major_version]. in analyze_sbs()
3788 validate_super(mddev, NULL/*freshest*/, freshest); in analyze_sbs()
3791 rdev_for_each_safe(rdev, tmp, mddev) { in analyze_sbs()
3792 if (mddev->max_disks && in analyze_sbs()
3793 (rdev->desc_nr >= mddev->max_disks || in analyze_sbs()
3794 i > mddev->max_disks)) { in analyze_sbs()
3796 mdname(mddev), rdev->bdev, in analyze_sbs()
3797 mddev->max_disks); in analyze_sbs()
3802 if (super_types[mddev->major_version]. in analyze_sbs()
3803 validate_super(mddev, freshest, rdev)) { in analyze_sbs()
3810 if (mddev->level == LEVEL_MULTIPATH) { in analyze_sbs()
3815 (mddev->raid_disks - min(0, mddev->delta_disks)) && in analyze_sbs()
3862 safe_delay_show(struct mddev *mddev, char *page) in safe_delay_show() argument
3864 unsigned int msec = ((unsigned long)mddev->safemode_delay*1000)/HZ; in safe_delay_show()
3869 safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len) in safe_delay_store() argument
3873 if (mddev_is_clustered(mddev)) { in safe_delay_store()
3881 mddev->safemode_delay = 0; in safe_delay_store()
3883 unsigned long old_delay = mddev->safemode_delay; in safe_delay_store()
3888 mddev->safemode_delay = new_delay; in safe_delay_store()
3890 mod_timer(&mddev->safemode_timer, jiffies+1); in safe_delay_store()
3898 level_show(struct mddev *mddev, char *page) in level_show() argument
3902 spin_lock(&mddev->lock); in level_show()
3903 p = mddev->pers; in level_show()
3906 else if (mddev->clevel[0]) in level_show()
3907 ret = sprintf(page, "%s\n", mddev->clevel); in level_show()
3908 else if (mddev->level != LEVEL_NONE) in level_show()
3909 ret = sprintf(page, "%d\n", mddev->level); in level_show()
3912 spin_unlock(&mddev->lock); in level_show()
3917 level_store(struct mddev *mddev, const char *buf, size_t len) in level_store() argument
3930 rv = mddev_lock(mddev); in level_store()
3934 if (mddev->pers == NULL) { in level_store()
3935 strncpy(mddev->clevel, buf, slen); in level_store()
3936 if (mddev->clevel[slen-1] == '\n') in level_store()
3938 mddev->clevel[slen] = 0; in level_store()
3939 mddev->level = LEVEL_NONE; in level_store()
3944 if (!md_is_rdwr(mddev)) in level_store()
3954 if (mddev->sync_thread || in level_store()
3955 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || in level_store()
3956 mddev->reshape_position != MaxSector || in level_store()
3957 mddev->sysfs_active) in level_store()
3961 if (!mddev->pers->quiesce) { in level_store()
3963 mdname(mddev), mddev->pers->name); in level_store()
3987 if (pers == mddev->pers) { in level_store()
3996 mdname(mddev), clevel); in level_store()
4001 rdev_for_each(rdev, mddev) in level_store()
4007 priv = pers->takeover(mddev); in level_store()
4009 mddev->new_level = mddev->level; in level_store()
4010 mddev->new_layout = mddev->layout; in level_store()
4011 mddev->new_chunk_sectors = mddev->chunk_sectors; in level_store()
4012 mddev->raid_disks -= mddev->delta_disks; in level_store()
4013 mddev->delta_disks = 0; in level_store()
4014 mddev->reshape_backwards = 0; in level_store()
4017 mdname(mddev), clevel); in level_store()
4023 mddev_suspend(mddev); in level_store()
4024 mddev_detach(mddev); in level_store()
4026 spin_lock(&mddev->lock); in level_store()
4027 oldpers = mddev->pers; in level_store()
4028 oldpriv = mddev->private; in level_store()
4029 mddev->pers = pers; in level_store()
4030 mddev->private = priv; in level_store()
4031 strscpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); in level_store()
4032 mddev->level = mddev->new_level; in level_store()
4033 mddev->layout = mddev->new_layout; in level_store()
4034 mddev->chunk_sectors = mddev->new_chunk_sectors; in level_store()
4035 mddev->delta_disks = 0; in level_store()
4036 mddev->reshape_backwards = 0; in level_store()
4037 mddev->degraded = 0; in level_store()
4038 spin_unlock(&mddev->lock); in level_store()
4041 mddev->external) { in level_store()
4049 mddev->in_sync = 0; in level_store()
4050 mddev->safemode_delay = 0; in level_store()
4051 mddev->safemode = 0; in level_store()
4054 oldpers->free(mddev, oldpriv); in level_store()
4059 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group)) in level_store()
4061 mdname(mddev)); in level_store()
4062 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action"); in level_store()
4063 mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed"); in level_store()
4064 mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded"); in level_store()
4069 if (mddev->to_remove == NULL) in level_store()
4070 mddev->to_remove = &md_redundancy_group; in level_store()
4075 rdev_for_each(rdev, mddev) { in level_store()
4078 if (rdev->new_raid_disk >= mddev->raid_disks) in level_store()
4082 sysfs_unlink_rdev(mddev, rdev); in level_store()
4084 rdev_for_each(rdev, mddev) { in level_store()
4093 if (sysfs_link_rdev(mddev, rdev)) in level_store()
4095 rdev->raid_disk, mdname(mddev)); in level_store()
4103 mddev->in_sync = 1; in level_store()
4104 del_timer_sync(&mddev->safemode_timer); in level_store()
4106 blk_set_stacking_limits(&mddev->queue->limits); in level_store()
4107 pers->run(mddev); in level_store()
4108 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in level_store()
4109 mddev_resume(mddev); in level_store()
4110 if (!mddev->thread) in level_store()
4111 md_update_sb(mddev, 1); in level_store()
4112 sysfs_notify_dirent_safe(mddev->sysfs_level); in level_store()
4116 mddev_unlock(mddev); in level_store()
4124 layout_show(struct mddev *mddev, char *page) in layout_show() argument
4127 if (mddev->reshape_position != MaxSector && in layout_show()
4128 mddev->layout != mddev->new_layout) in layout_show()
4130 mddev->new_layout, mddev->layout); in layout_show()
4131 return sprintf(page, "%d\n", mddev->layout); in layout_show()
4135 layout_store(struct mddev *mddev, const char *buf, size_t len) in layout_store() argument
4143 err = mddev_lock(mddev); in layout_store()
4147 if (mddev->pers) { in layout_store()
4148 if (mddev->pers->check_reshape == NULL) in layout_store()
4150 else if (!md_is_rdwr(mddev)) in layout_store()
4153 mddev->new_layout = n; in layout_store()
4154 err = mddev->pers->check_reshape(mddev); in layout_store()
4156 mddev->new_layout = mddev->layout; in layout_store()
4159 mddev->new_layout = n; in layout_store()
4160 if (mddev->reshape_position == MaxSector) in layout_store()
4161 mddev->layout = n; in layout_store()
4163 mddev_unlock(mddev); in layout_store()
4170 raid_disks_show(struct mddev *mddev, char *page) in raid_disks_show() argument
4172 if (mddev->raid_disks == 0) in raid_disks_show()
4174 if (mddev->reshape_position != MaxSector && in raid_disks_show()
4175 mddev->delta_disks != 0) in raid_disks_show()
4176 return sprintf(page, "%d (%d)\n", mddev->raid_disks, in raid_disks_show()
4177 mddev->raid_disks - mddev->delta_disks); in raid_disks_show()
4178 return sprintf(page, "%d\n", mddev->raid_disks); in raid_disks_show()
4181 static int update_raid_disks(struct mddev *mddev, int raid_disks);
4184 raid_disks_store(struct mddev *mddev, const char *buf, size_t len) in raid_disks_store() argument
4193 err = mddev_lock(mddev); in raid_disks_store()
4196 if (mddev->pers) in raid_disks_store()
4197 err = update_raid_disks(mddev, n); in raid_disks_store()
4198 else if (mddev->reshape_position != MaxSector) { in raid_disks_store()
4200 int olddisks = mddev->raid_disks - mddev->delta_disks; in raid_disks_store()
4203 rdev_for_each(rdev, mddev) { in raid_disks_store()
4212 mddev->delta_disks = n - olddisks; in raid_disks_store()
4213 mddev->raid_disks = n; in raid_disks_store()
4214 mddev->reshape_backwards = (mddev->delta_disks < 0); in raid_disks_store()
4216 mddev->raid_disks = n; in raid_disks_store()
4218 mddev_unlock(mddev); in raid_disks_store()
4225 uuid_show(struct mddev *mddev, char *page) in uuid_show() argument
4227 return sprintf(page, "%pU\n", mddev->uuid); in uuid_show()
4233 chunk_size_show(struct mddev *mddev, char *page) in chunk_size_show() argument
4235 if (mddev->reshape_position != MaxSector && in chunk_size_show()
4236 mddev->chunk_sectors != mddev->new_chunk_sectors) in chunk_size_show()
4238 mddev->new_chunk_sectors << 9, in chunk_size_show()
4239 mddev->chunk_sectors << 9); in chunk_size_show()
4240 return sprintf(page, "%d\n", mddev->chunk_sectors << 9); in chunk_size_show()
4244 chunk_size_store(struct mddev *mddev, const char *buf, size_t len) in chunk_size_store() argument
4253 err = mddev_lock(mddev); in chunk_size_store()
4256 if (mddev->pers) { in chunk_size_store()
4257 if (mddev->pers->check_reshape == NULL) in chunk_size_store()
4259 else if (!md_is_rdwr(mddev)) in chunk_size_store()
4262 mddev->new_chunk_sectors = n >> 9; in chunk_size_store()
4263 err = mddev->pers->check_reshape(mddev); in chunk_size_store()
4265 mddev->new_chunk_sectors = mddev->chunk_sectors; in chunk_size_store()
4268 mddev->new_chunk_sectors = n >> 9; in chunk_size_store()
4269 if (mddev->reshape_position == MaxSector) in chunk_size_store()
4270 mddev->chunk_sectors = n >> 9; in chunk_size_store()
4272 mddev_unlock(mddev); in chunk_size_store()
4279 resync_start_show(struct mddev *mddev, char *page) in resync_start_show() argument
4281 if (mddev->recovery_cp == MaxSector) in resync_start_show()
4283 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp); in resync_start_show()
4287 resync_start_store(struct mddev *mddev, const char *buf, size_t len) in resync_start_store() argument
4302 err = mddev_lock(mddev); in resync_start_store()
4305 if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) in resync_start_store()
4309 mddev->recovery_cp = n; in resync_start_store()
4310 if (mddev->pers) in resync_start_store()
4311 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); in resync_start_store()
4313 mddev_unlock(mddev); in resync_start_store()
4376 array_state_show(struct mddev *mddev, char *page) in array_state_show() argument
4380 if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags)) { in array_state_show()
4381 switch(mddev->ro) { in array_state_show()
4389 spin_lock(&mddev->lock); in array_state_show()
4390 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) in array_state_show()
4392 else if (mddev->in_sync) in array_state_show()
4394 else if (mddev->safemode) in array_state_show()
4398 spin_unlock(&mddev->lock); in array_state_show()
4401 if (test_bit(MD_BROKEN, &mddev->flags) && st == clean) in array_state_show()
4404 if (list_empty(&mddev->disks) && in array_state_show()
4405 mddev->raid_disks == 0 && in array_state_show()
4406 mddev->dev_sectors == 0) in array_state_show()
4414 static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
4415 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
4416 static int restart_array(struct mddev *mddev);
4419 array_state_store(struct mddev *mddev, const char *buf, size_t len) in array_state_store() argument
4424 if (mddev->pers && (st == active || st == clean) && in array_state_store()
4425 mddev->ro != MD_RDONLY) { in array_state_store()
4429 spin_lock(&mddev->lock); in array_state_store()
4431 restart_array(mddev); in array_state_store()
4432 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); in array_state_store()
4433 md_wakeup_thread(mddev->thread); in array_state_store()
4434 wake_up(&mddev->sb_wait); in array_state_store()
4436 restart_array(mddev); in array_state_store()
4437 if (!set_in_sync(mddev)) in array_state_store()
4441 sysfs_notify_dirent_safe(mddev->sysfs_state); in array_state_store()
4442 spin_unlock(&mddev->lock); in array_state_store()
4445 err = mddev_lock(mddev); in array_state_store()
4454 err = do_md_stop(mddev, 0, NULL); in array_state_store()
4458 if (mddev->pers) in array_state_store()
4459 err = do_md_stop(mddev, 2, NULL); in array_state_store()
4466 if (mddev->pers) in array_state_store()
4467 err = md_set_readonly(mddev, NULL); in array_state_store()
4469 mddev->ro = MD_RDONLY; in array_state_store()
4470 set_disk_ro(mddev->gendisk, 1); in array_state_store()
4471 err = do_md_run(mddev); in array_state_store()
4475 if (mddev->pers) { in array_state_store()
4476 if (md_is_rdwr(mddev)) in array_state_store()
4477 err = md_set_readonly(mddev, NULL); in array_state_store()
4478 else if (mddev->ro == MD_RDONLY) in array_state_store()
4479 err = restart_array(mddev); in array_state_store()
4481 mddev->ro = MD_AUTO_READ; in array_state_store()
4482 set_disk_ro(mddev->gendisk, 0); in array_state_store()
4485 mddev->ro = MD_AUTO_READ; in array_state_store()
4486 err = do_md_run(mddev); in array_state_store()
4490 if (mddev->pers) { in array_state_store()
4491 err = restart_array(mddev); in array_state_store()
4494 spin_lock(&mddev->lock); in array_state_store()
4495 if (!set_in_sync(mddev)) in array_state_store()
4497 spin_unlock(&mddev->lock); in array_state_store()
4502 if (mddev->pers) { in array_state_store()
4503 err = restart_array(mddev); in array_state_store()
4506 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); in array_state_store()
4507 wake_up(&mddev->sb_wait); in array_state_store()
4510 mddev->ro = MD_RDWR; in array_state_store()
4511 set_disk_ro(mddev->gendisk, 0); in array_state_store()
4512 err = do_md_run(mddev); in array_state_store()
4523 if (mddev->hold_active == UNTIL_IOCTL) in array_state_store()
4524 mddev->hold_active = 0; in array_state_store()
4525 sysfs_notify_dirent_safe(mddev->sysfs_state); in array_state_store()
4527 mddev_unlock(mddev); in array_state_store()
4534 max_corrected_read_errors_show(struct mddev *mddev, char *page) { in max_corrected_read_errors_show() argument
4536 atomic_read(&mddev->max_corr_read_errors)); in max_corrected_read_errors_show()
4540 max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len) in max_corrected_read_errors_store() argument
4550 atomic_set(&mddev->max_corr_read_errors, n); in max_corrected_read_errors_store()
4559 null_show(struct mddev *mddev, char *page) in null_show() argument
4565 new_dev_store(struct mddev *mddev, const char *buf, size_t len) in new_dev_store() argument
4591 err = mddev_lock(mddev); in new_dev_store()
4594 if (mddev->persistent) { in new_dev_store()
4595 rdev = md_import_device(dev, mddev->major_version, in new_dev_store()
4596 mddev->minor_version); in new_dev_store()
4597 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { in new_dev_store()
4599 = list_entry(mddev->disks.next, in new_dev_store()
4601 err = super_types[mddev->major_version] in new_dev_store()
4602 .load_super(rdev, rdev0, mddev->minor_version); in new_dev_store()
4606 } else if (mddev->external) in new_dev_store()
4612 mddev_unlock(mddev); in new_dev_store()
4615 err = bind_rdev_to_array(rdev, mddev); in new_dev_store()
4618 export_rdev(rdev, mddev); in new_dev_store()
4619 mddev_unlock(mddev); in new_dev_store()
4629 bitmap_store(struct mddev *mddev, const char *buf, size_t len) in bitmap_store() argument
4635 err = mddev_lock(mddev); in bitmap_store()
4638 if (!mddev->bitmap) in bitmap_store()
4650 md_bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk); in bitmap_store()
4653 md_bitmap_unplug(mddev->bitmap); /* flush the bits to disk */ in bitmap_store()
4655 mddev_unlock(mddev); in bitmap_store()
4663 size_show(struct mddev *mddev, char *page) in size_show() argument
4666 (unsigned long long)mddev->dev_sectors / 2); in size_show()
4669 static int update_size(struct mddev *mddev, sector_t num_sectors);
4672 size_store(struct mddev *mddev, const char *buf, size_t len) in size_store() argument
4683 err = mddev_lock(mddev); in size_store()
4686 if (mddev->pers) { in size_store()
4687 err = update_size(mddev, sectors); in size_store()
4689 md_update_sb(mddev, 1); in size_store()
4691 if (mddev->dev_sectors == 0 || in size_store()
4692 mddev->dev_sectors > sectors) in size_store()
4693 mddev->dev_sectors = sectors; in size_store()
4697 mddev_unlock(mddev); in size_store()
4711 metadata_show(struct mddev *mddev, char *page) in metadata_show() argument
4713 if (mddev->persistent) in metadata_show()
4715 mddev->major_version, mddev->minor_version); in metadata_show()
4716 else if (mddev->external) in metadata_show()
4717 return sprintf(page, "external:%s\n", mddev->metadata_type); in metadata_show()
4723 metadata_store(struct mddev *mddev, const char *buf, size_t len) in metadata_store() argument
4733 err = mddev_lock(mddev); in metadata_store()
4737 if (mddev->external && strncmp(buf, "external:", 9) == 0) in metadata_store()
4739 else if (!list_empty(&mddev->disks)) in metadata_store()
4744 mddev->persistent = 0; in metadata_store()
4745 mddev->external = 0; in metadata_store()
4746 mddev->major_version = 0; in metadata_store()
4747 mddev->minor_version = 90; in metadata_store()
4752 if (namelen >= sizeof(mddev->metadata_type)) in metadata_store()
4753 namelen = sizeof(mddev->metadata_type)-1; in metadata_store()
4754 strncpy(mddev->metadata_type, buf+9, namelen); in metadata_store()
4755 mddev->metadata_type[namelen] = 0; in metadata_store()
4756 if (namelen && mddev->metadata_type[namelen-1] == '\n') in metadata_store()
4757 mddev->metadata_type[--namelen] = 0; in metadata_store()
4758 mddev->persistent = 0; in metadata_store()
4759 mddev->external = 1; in metadata_store()
4760 mddev->major_version = 0; in metadata_store()
4761 mddev->minor_version = 90; in metadata_store()
4775 mddev->major_version = major; in metadata_store()
4776 mddev->minor_version = minor; in metadata_store()
4777 mddev->persistent = 1; in metadata_store()
4778 mddev->external = 0; in metadata_store()
4781 mddev_unlock(mddev); in metadata_store()
4789 action_show(struct mddev *mddev, char *page) in action_show() argument
4792 unsigned long recovery = mddev->recovery; in action_show()
4796 (md_is_rdwr(mddev) && test_bit(MD_RECOVERY_NEEDED, &recovery))) { in action_show()
4808 else if (mddev->reshape_position != MaxSector) in action_show()
4814 static void stop_sync_thread(struct mddev *mddev) in stop_sync_thread() argument
4816 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in stop_sync_thread()
4819 if (mddev_lock(mddev)) in stop_sync_thread()
4826 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { in stop_sync_thread()
4827 mddev_unlock(mddev); in stop_sync_thread()
4831 if (work_pending(&mddev->del_work)) in stop_sync_thread()
4834 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in stop_sync_thread()
4839 md_wakeup_thread_directly(mddev->sync_thread); in stop_sync_thread()
4841 mddev_unlock(mddev); in stop_sync_thread()
4844 static void idle_sync_thread(struct mddev *mddev) in idle_sync_thread() argument
4846 int sync_seq = atomic_read(&mddev->sync_seq); in idle_sync_thread()
4848 mutex_lock(&mddev->sync_mutex); in idle_sync_thread()
4849 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in idle_sync_thread()
4850 stop_sync_thread(mddev); in idle_sync_thread()
4852 wait_event(resync_wait, sync_seq != atomic_read(&mddev->sync_seq) || in idle_sync_thread()
4853 !test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)); in idle_sync_thread()
4855 mutex_unlock(&mddev->sync_mutex); in idle_sync_thread()
4858 static void frozen_sync_thread(struct mddev *mddev) in frozen_sync_thread() argument
4860 mutex_lock(&mddev->sync_mutex); in frozen_sync_thread()
4861 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in frozen_sync_thread()
4862 stop_sync_thread(mddev); in frozen_sync_thread()
4864 wait_event(resync_wait, mddev->sync_thread == NULL && in frozen_sync_thread()
4865 !test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)); in frozen_sync_thread()
4867 mutex_unlock(&mddev->sync_mutex); in frozen_sync_thread()
4871 action_store(struct mddev *mddev, const char *page, size_t len) in action_store() argument
4873 if (!mddev->pers || !mddev->pers->sync_request) in action_store()
4878 idle_sync_thread(mddev); in action_store()
4880 frozen_sync_thread(mddev); in action_store()
4881 else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in action_store()
4884 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in action_store()
4886 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in action_store()
4887 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in action_store()
4890 if (mddev->pers->start_reshape == NULL) in action_store()
4892 err = mddev_lock(mddev); in action_store()
4894 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { in action_store()
4896 } else if (mddev->reshape_position == MaxSector || in action_store()
4897 mddev->pers->check_reshape == NULL || in action_store()
4898 mddev->pers->check_reshape(mddev)) { in action_store()
4899 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in action_store()
4900 err = mddev->pers->start_reshape(mddev); in action_store()
4908 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in action_store()
4910 mddev_unlock(mddev); in action_store()
4914 sysfs_notify_dirent_safe(mddev->sysfs_degraded); in action_store()
4917 set_bit(MD_RECOVERY_CHECK, &mddev->recovery); in action_store()
4920 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in action_store()
4921 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); in action_store()
4922 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); in action_store()
4924 if (mddev->ro == MD_AUTO_READ) { in action_store()
4928 mddev->ro = MD_RDWR; in action_store()
4929 md_wakeup_thread(mddev->sync_thread); in action_store()
4931 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in action_store()
4932 md_wakeup_thread(mddev->thread); in action_store()
4933 sysfs_notify_dirent_safe(mddev->sysfs_action); in action_store()
4941 last_sync_action_show(struct mddev *mddev, char *page) in last_sync_action_show() argument
4943 return sprintf(page, "%s\n", mddev->last_sync_action); in last_sync_action_show()
4949 mismatch_cnt_show(struct mddev *mddev, char *page) in mismatch_cnt_show() argument
4953 atomic64_read(&mddev->resync_mismatches)); in mismatch_cnt_show()
4959 sync_min_show(struct mddev *mddev, char *page) in sync_min_show() argument
4961 return sprintf(page, "%d (%s)\n", speed_min(mddev), in sync_min_show()
4962 mddev->sync_speed_min ? "local": "system"); in sync_min_show()
4966 sync_min_store(struct mddev *mddev, const char *buf, size_t len) in sync_min_store() argument
4980 mddev->sync_speed_min = min; in sync_min_store()
4988 sync_max_show(struct mddev *mddev, char *page) in sync_max_show() argument
4990 return sprintf(page, "%d (%s)\n", speed_max(mddev), in sync_max_show()
4991 mddev->sync_speed_max ? "local": "system"); in sync_max_show()
4995 sync_max_store(struct mddev *mddev, const char *buf, size_t len) in sync_max_store() argument
5009 mddev->sync_speed_max = max; in sync_max_store()
5017 degraded_show(struct mddev *mddev, char *page) in degraded_show() argument
5019 return sprintf(page, "%d\n", mddev->degraded); in degraded_show()
5024 sync_force_parallel_show(struct mddev *mddev, char *page) in sync_force_parallel_show() argument
5026 return sprintf(page, "%d\n", mddev->parallel_resync); in sync_force_parallel_show()
5030 sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len) in sync_force_parallel_store() argument
5040 mddev->parallel_resync = n; in sync_force_parallel_store()
5042 if (mddev->sync_thread) in sync_force_parallel_store()
5054 sync_speed_show(struct mddev *mddev, char *page) in sync_speed_show() argument
5057 if (mddev->curr_resync == MD_RESYNC_NONE) in sync_speed_show()
5059 resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active); in sync_speed_show()
5060 dt = (jiffies - mddev->resync_mark) / HZ; in sync_speed_show()
5062 db = resync - mddev->resync_mark_cnt; in sync_speed_show()
5069 sync_completed_show(struct mddev *mddev, char *page) in sync_completed_show() argument
5073 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in sync_completed_show()
5076 if (mddev->curr_resync == MD_RESYNC_YIELDED || in sync_completed_show()
5077 mddev->curr_resync == MD_RESYNC_DELAYED) in sync_completed_show()
5080 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || in sync_completed_show()
5081 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in sync_completed_show()
5082 max_sectors = mddev->resync_max_sectors; in sync_completed_show()
5084 max_sectors = mddev->dev_sectors; in sync_completed_show()
5086 resync = mddev->curr_resync_completed; in sync_completed_show()
5094 min_sync_show(struct mddev *mddev, char *page) in min_sync_show() argument
5097 (unsigned long long)mddev->resync_min); in min_sync_show()
5100 min_sync_store(struct mddev *mddev, const char *buf, size_t len) in min_sync_store() argument
5108 spin_lock(&mddev->lock); in min_sync_store()
5110 if (min > mddev->resync_max) in min_sync_store()
5114 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in min_sync_store()
5118 mddev->resync_min = round_down(min, 8); in min_sync_store()
5122 spin_unlock(&mddev->lock); in min_sync_store()
5130 max_sync_show(struct mddev *mddev, char *page) in max_sync_show() argument
5132 if (mddev->resync_max == MaxSector) in max_sync_show()
5136 (unsigned long long)mddev->resync_max); in max_sync_show()
5139 max_sync_store(struct mddev *mddev, const char *buf, size_t len) in max_sync_store() argument
5142 spin_lock(&mddev->lock); in max_sync_store()
5144 mddev->resync_max = MaxSector; in max_sync_store()
5152 if (max < mddev->resync_min) in max_sync_store()
5156 if (max < mddev->resync_max && md_is_rdwr(mddev) && in max_sync_store()
5157 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in max_sync_store()
5161 chunk = mddev->chunk_sectors; in max_sync_store()
5169 mddev->resync_max = max; in max_sync_store()
5171 wake_up(&mddev->recovery_wait); in max_sync_store()
5174 spin_unlock(&mddev->lock); in max_sync_store()
5182 suspend_lo_show(struct mddev *mddev, char *page) in suspend_lo_show() argument
5184 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo); in suspend_lo_show()
5188 suspend_lo_store(struct mddev *mddev, const char *buf, size_t len) in suspend_lo_store() argument
5199 err = mddev_lock(mddev); in suspend_lo_store()
5203 if (mddev->pers == NULL || in suspend_lo_store()
5204 mddev->pers->quiesce == NULL) in suspend_lo_store()
5206 mddev_suspend(mddev); in suspend_lo_store()
5207 mddev->suspend_lo = new; in suspend_lo_store()
5208 mddev_resume(mddev); in suspend_lo_store()
5212 mddev_unlock(mddev); in suspend_lo_store()
5219 suspend_hi_show(struct mddev *mddev, char *page) in suspend_hi_show() argument
5221 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi); in suspend_hi_show()
5225 suspend_hi_store(struct mddev *mddev, const char *buf, size_t len) in suspend_hi_store() argument
5236 err = mddev_lock(mddev); in suspend_hi_store()
5240 if (mddev->pers == NULL) in suspend_hi_store()
5243 mddev_suspend(mddev); in suspend_hi_store()
5244 mddev->suspend_hi = new; in suspend_hi_store()
5245 mddev_resume(mddev); in suspend_hi_store()
5249 mddev_unlock(mddev); in suspend_hi_store()
5256 reshape_position_show(struct mddev *mddev, char *page) in reshape_position_show() argument
5258 if (mddev->reshape_position != MaxSector) in reshape_position_show()
5260 (unsigned long long)mddev->reshape_position); in reshape_position_show()
5266 reshape_position_store(struct mddev *mddev, const char *buf, size_t len) in reshape_position_store() argument
5277 err = mddev_lock(mddev); in reshape_position_store()
5281 if (mddev->pers) in reshape_position_store()
5283 mddev->reshape_position = new; in reshape_position_store()
5284 mddev->delta_disks = 0; in reshape_position_store()
5285 mddev->reshape_backwards = 0; in reshape_position_store()
5286 mddev->new_level = mddev->level; in reshape_position_store()
5287 mddev->new_layout = mddev->layout; in reshape_position_store()
5288 mddev->new_chunk_sectors = mddev->chunk_sectors; in reshape_position_store()
5289 rdev_for_each(rdev, mddev) in reshape_position_store()
5293 mddev_unlock(mddev); in reshape_position_store()
5302 reshape_direction_show(struct mddev *mddev, char *page) in reshape_direction_show() argument
5305 mddev->reshape_backwards ? "backwards" : "forwards"); in reshape_direction_show()
5309 reshape_direction_store(struct mddev *mddev, const char *buf, size_t len) in reshape_direction_store() argument
5320 if (mddev->reshape_backwards == backwards) in reshape_direction_store()
5323 err = mddev_lock(mddev); in reshape_direction_store()
5327 if (mddev->delta_disks) in reshape_direction_store()
5329 else if (mddev->persistent && in reshape_direction_store()
5330 mddev->major_version == 0) in reshape_direction_store()
5333 mddev->reshape_backwards = backwards; in reshape_direction_store()
5334 mddev_unlock(mddev); in reshape_direction_store()
5343 array_size_show(struct mddev *mddev, char *page) in array_size_show() argument
5345 if (mddev->external_size) in array_size_show()
5347 (unsigned long long)mddev->array_sectors/2); in array_size_show()
5353 array_size_store(struct mddev *mddev, const char *buf, size_t len) in array_size_store() argument
5358 err = mddev_lock(mddev); in array_size_store()
5363 if (mddev_is_clustered(mddev)) { in array_size_store()
5364 mddev_unlock(mddev); in array_size_store()
5369 if (mddev->pers) in array_size_store()
5370 sectors = mddev->pers->size(mddev, 0, 0); in array_size_store()
5372 sectors = mddev->array_sectors; in array_size_store()
5374 mddev->external_size = 0; in array_size_store()
5378 else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors) in array_size_store()
5381 mddev->external_size = 1; in array_size_store()
5385 mddev->array_sectors = sectors; in array_size_store()
5386 if (mddev->pers) in array_size_store()
5387 set_capacity_and_notify(mddev->gendisk, in array_size_store()
5388 mddev->array_sectors); in array_size_store()
5390 mddev_unlock(mddev); in array_size_store()
5399 consistency_policy_show(struct mddev *mddev, char *page) in consistency_policy_show() argument
5403 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { in consistency_policy_show()
5405 } else if (test_bit(MD_HAS_PPL, &mddev->flags)) { in consistency_policy_show()
5407 } else if (mddev->bitmap) { in consistency_policy_show()
5409 } else if (mddev->pers) { in consistency_policy_show()
5410 if (mddev->pers->sync_request) in consistency_policy_show()
5422 consistency_policy_store(struct mddev *mddev, const char *buf, size_t len) in consistency_policy_store() argument
5426 if (mddev->pers) { in consistency_policy_store()
5427 if (mddev->pers->change_consistency_policy) in consistency_policy_store()
5428 err = mddev->pers->change_consistency_policy(mddev, buf); in consistency_policy_store()
5431 } else if (mddev->external && strncmp(buf, "ppl", 3) == 0) { in consistency_policy_store()
5432 set_bit(MD_HAS_PPL, &mddev->flags); in consistency_policy_store()
5444 static ssize_t fail_last_dev_show(struct mddev *mddev, char *page) in fail_last_dev_show() argument
5446 return sprintf(page, "%d\n", mddev->fail_last_dev); in fail_last_dev_show()
5454 fail_last_dev_store(struct mddev *mddev, const char *buf, size_t len) in fail_last_dev_store() argument
5463 if (value != mddev->fail_last_dev) in fail_last_dev_store()
5464 mddev->fail_last_dev = value; in fail_last_dev_store()
5472 static ssize_t serialize_policy_show(struct mddev *mddev, char *page) in serialize_policy_show() argument
5474 if (mddev->pers == NULL || (mddev->pers->level != 1)) in serialize_policy_show()
5477 return sprintf(page, "%d\n", mddev->serialize_policy); in serialize_policy_show()
5485 serialize_policy_store(struct mddev *mddev, const char *buf, size_t len) in serialize_policy_store() argument
5494 if (value == mddev->serialize_policy) in serialize_policy_store()
5497 err = mddev_lock(mddev); in serialize_policy_store()
5500 if (mddev->pers == NULL || (mddev->pers->level != 1)) { in serialize_policy_store()
5506 mddev_suspend(mddev); in serialize_policy_store()
5508 mddev_create_serial_pool(mddev, NULL, true); in serialize_policy_store()
5510 mddev_destroy_serial_pool(mddev, NULL, true); in serialize_policy_store()
5511 mddev->serialize_policy = value; in serialize_policy_store()
5512 mddev_resume(mddev); in serialize_policy_store()
5514 mddev_unlock(mddev); in serialize_policy_store()
5581 struct mddev *mddev = container_of(kobj, struct mddev, kobj); in md_attr_show() local
5587 if (!mddev_get(mddev)) { in md_attr_show()
5593 rv = entry->show(mddev, page); in md_attr_show()
5594 mddev_put(mddev); in md_attr_show()
5603 struct mddev *mddev = container_of(kobj, struct mddev, kobj); in md_attr_store() local
5611 if (!mddev_get(mddev)) { in md_attr_store()
5616 rv = entry->store(mddev, page, length); in md_attr_store()
5617 mddev_put(mddev); in md_attr_store()
5623 struct mddev *mddev = container_of(ko, struct mddev, kobj); in md_kobj_release() local
5625 if (mddev->sysfs_state) in md_kobj_release()
5626 sysfs_put(mddev->sysfs_state); in md_kobj_release()
5627 if (mddev->sysfs_level) in md_kobj_release()
5628 sysfs_put(mddev->sysfs_level); in md_kobj_release()
5630 del_gendisk(mddev->gendisk); in md_kobj_release()
5631 put_disk(mddev->gendisk); in md_kobj_release()
5648 struct mddev *mddev = container_of(ws, struct mddev, del_work); in mddev_delayed_delete() local
5650 kobject_put(&mddev->kobj); in mddev_delayed_delete()
5655 int mddev_init_writes_pending(struct mddev *mddev) in mddev_init_writes_pending() argument
5657 if (mddev->writes_pending.percpu_count_ptr) in mddev_init_writes_pending()
5659 if (percpu_ref_init(&mddev->writes_pending, no_op, in mddev_init_writes_pending()
5663 percpu_ref_put(&mddev->writes_pending); in mddev_init_writes_pending()
5668 struct mddev *md_alloc(dev_t dev, char *name) in md_alloc()
5680 struct mddev *mddev; in md_alloc() local
5694 mddev = mddev_alloc(dev); in md_alloc()
5695 if (IS_ERR(mddev)) { in md_alloc()
5696 error = PTR_ERR(mddev); in md_alloc()
5700 partitioned = (MAJOR(mddev->unit) != MD_MAJOR); in md_alloc()
5702 unit = MINOR(mddev->unit) >> shift; in md_alloc()
5707 struct mddev *mddev2; in md_alloc()
5723 mddev->hold_active = UNTIL_STOP; in md_alloc()
5730 disk->major = MAJOR(mddev->unit); in md_alloc()
5740 disk->private_data = mddev; in md_alloc()
5742 mddev->queue = disk->queue; in md_alloc()
5743 blk_set_stacking_limits(&mddev->queue->limits); in md_alloc()
5744 blk_queue_write_cache(mddev->queue, true, true); in md_alloc()
5746 mddev->gendisk = disk; in md_alloc()
5751 kobject_init(&mddev->kobj, &md_ktype); in md_alloc()
5752 error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md"); in md_alloc()
5759 mddev->hold_active = 0; in md_alloc()
5761 mddev_put(mddev); in md_alloc()
5765 kobject_uevent(&mddev->kobj, KOBJ_ADD); in md_alloc()
5766 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state"); in md_alloc()
5767 mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level"); in md_alloc()
5769 return mddev; in md_alloc()
5774 mddev_free(mddev); in md_alloc()
5782 struct mddev *mddev = md_alloc(dev, name); in md_alloc_and_put() local
5784 if (IS_ERR(mddev)) in md_alloc_and_put()
5785 return PTR_ERR(mddev); in md_alloc_and_put()
5786 mddev_put(mddev); in md_alloc_and_put()
5829 struct mddev *mddev = from_timer(mddev, t, safemode_timer); in md_safemode_timeout() local
5831 mddev->safemode = 1; in md_safemode_timeout()
5832 if (mddev->external) in md_safemode_timeout()
5833 sysfs_notify_dirent_safe(mddev->sysfs_state); in md_safemode_timeout()
5835 md_wakeup_thread(mddev->thread); in md_safemode_timeout()
5841 struct mddev *mddev = container_of(ref, struct mddev, active_io); in active_io_release() local
5843 wake_up(&mddev->sb_wait); in active_io_release()
5846 int md_run(struct mddev *mddev) in md_run() argument
5853 if (list_empty(&mddev->disks)) in md_run()
5857 if (mddev->pers) in md_run()
5860 if (mddev->sysfs_active) in md_run()
5866 if (!mddev->raid_disks) { in md_run()
5867 if (!mddev->persistent) in md_run()
5869 err = analyze_sbs(mddev); in md_run()
5874 if (mddev->level != LEVEL_NONE) in md_run()
5875 request_module("md-level-%d", mddev->level); in md_run()
5876 else if (mddev->clevel[0]) in md_run()
5877 request_module("md-%s", mddev->clevel); in md_run()
5884 mddev->has_superblocks = false; in md_run()
5885 rdev_for_each(rdev, mddev) { in md_run()
5890 if (mddev->ro != MD_RDONLY && rdev_read_only(rdev)) { in md_run()
5891 mddev->ro = MD_RDONLY; in md_run()
5892 if (mddev->gendisk) in md_run()
5893 set_disk_ro(mddev->gendisk, 1); in md_run()
5897 mddev->has_superblocks = true; in md_run()
5906 if (mddev->dev_sectors && in md_run()
5907 rdev->data_offset + mddev->dev_sectors in md_run()
5910 mdname(mddev)); in md_run()
5917 mdname(mddev)); in md_run()
5925 err = percpu_ref_init(&mddev->active_io, active_io_release, in md_run()
5930 if (!bioset_initialized(&mddev->bio_set)) { in md_run()
5931 err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); in md_run()
5935 if (!bioset_initialized(&mddev->sync_set)) { in md_run()
5936 err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); in md_run()
5941 if (!bioset_initialized(&mddev->io_clone_set)) { in md_run()
5942 err = bioset_init(&mddev->io_clone_set, BIO_POOL_SIZE, in md_run()
5949 pers = find_pers(mddev->level, mddev->clevel); in md_run()
5952 if (mddev->level != LEVEL_NONE) in md_run()
5954 mddev->level); in md_run()
5957 mddev->clevel); in md_run()
5962 if (mddev->level != pers->level) { in md_run()
5963 mddev->level = pers->level; in md_run()
5964 mddev->new_level = pers->level; in md_run()
5966 strscpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); in md_run()
5968 if (mddev->reshape_position != MaxSector && in md_run()
5983 rdev_for_each(rdev, mddev) in md_run()
5984 rdev_for_each(rdev2, mddev) { in md_run()
5989 mdname(mddev), in md_run()
6000 mddev->recovery = 0; in md_run()
6002 mddev->resync_max_sectors = mddev->dev_sectors; in md_run()
6004 mddev->ok_start_degraded = start_dirty_degraded; in md_run()
6006 if (start_readonly && md_is_rdwr(mddev)) in md_run()
6007 mddev->ro = MD_AUTO_READ; /* read-only, but switch on first write */ in md_run()
6009 err = pers->run(mddev); in md_run()
6012 else if (pers->size(mddev, 0, 0) < mddev->array_sectors) { in md_run()
6013 WARN_ONCE(!mddev->external_size, in md_run()
6017 (unsigned long long)mddev->array_sectors / 2, in md_run()
6018 (unsigned long long)pers->size(mddev, 0, 0) / 2); in md_run()
6022 (mddev->bitmap_info.file || mddev->bitmap_info.offset)) { in md_run()
6025 bitmap = md_bitmap_create(mddev, -1); in md_run()
6029 mdname(mddev), err); in md_run()
6031 mddev->bitmap = bitmap; in md_run()
6037 if (mddev->bitmap_info.max_write_behind > 0) { in md_run()
6040 rdev_for_each(rdev, mddev) { in md_run()
6045 if (create_pool && mddev->serial_info_pool == NULL) { in md_run()
6046 mddev->serial_info_pool = in md_run()
6049 if (!mddev->serial_info_pool) { in md_run()
6056 if (mddev->queue) { in md_run()
6059 rdev_for_each(rdev, mddev) { in md_run()
6065 if (mddev->degraded) in md_run()
6068 blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue); in md_run()
6070 blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue); in md_run()
6071 blk_queue_flag_set(QUEUE_FLAG_IO_STAT, mddev->queue); in md_run()
6075 blk_queue_flag_set(QUEUE_FLAG_NOWAIT, mddev->queue); in md_run()
6078 if (mddev->kobj.sd && in md_run()
6079 sysfs_create_group(&mddev->kobj, &md_redundancy_group)) in md_run()
6081 mdname(mddev)); in md_run()
6082 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action"); in md_run()
6083 mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed"); in md_run()
6084 mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded"); in md_run()
6085 } else if (mddev->ro == MD_AUTO_READ) in md_run()
6086 mddev->ro = MD_RDWR; in md_run()
6088 atomic_set(&mddev->max_corr_read_errors, in md_run()
6090 mddev->safemode = 0; in md_run()
6091 if (mddev_is_clustered(mddev)) in md_run()
6092 mddev->safemode_delay = 0; in md_run()
6094 mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY; in md_run()
6095 mddev->in_sync = 1; in md_run()
6097 spin_lock(&mddev->lock); in md_run()
6098 mddev->pers = pers; in md_run()
6099 spin_unlock(&mddev->lock); in md_run()
6100 rdev_for_each(rdev, mddev) in md_run()
6102 sysfs_link_rdev(mddev, rdev); /* failure here is OK */ in md_run()
6104 if (mddev->degraded && md_is_rdwr(mddev)) in md_run()
6108 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in md_run()
6109 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_run()
6111 if (mddev->sb_flags) in md_run()
6112 md_update_sb(mddev, 0); in md_run()
6118 mddev_detach(mddev); in md_run()
6119 if (mddev->private) in md_run()
6120 pers->free(mddev, mddev->private); in md_run()
6121 mddev->private = NULL; in md_run()
6123 md_bitmap_destroy(mddev); in md_run()
6125 bioset_exit(&mddev->io_clone_set); in md_run()
6127 bioset_exit(&mddev->sync_set); in md_run()
6129 bioset_exit(&mddev->bio_set); in md_run()
6131 percpu_ref_exit(&mddev->active_io); in md_run()
6136 int do_md_run(struct mddev *mddev) in do_md_run() argument
6140 set_bit(MD_NOT_READY, &mddev->flags); in do_md_run()
6141 err = md_run(mddev); in do_md_run()
6144 err = md_bitmap_load(mddev); in do_md_run()
6146 md_bitmap_destroy(mddev); in do_md_run()
6150 if (mddev_is_clustered(mddev)) in do_md_run()
6151 md_allow_write(mddev); in do_md_run()
6154 md_start(mddev); in do_md_run()
6156 md_wakeup_thread(mddev->thread); in do_md_run()
6157 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ in do_md_run()
6159 set_capacity_and_notify(mddev->gendisk, mddev->array_sectors); in do_md_run()
6160 clear_bit(MD_NOT_READY, &mddev->flags); in do_md_run()
6161 mddev->changed = 1; in do_md_run()
6162 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); in do_md_run()
6163 sysfs_notify_dirent_safe(mddev->sysfs_state); in do_md_run()
6164 sysfs_notify_dirent_safe(mddev->sysfs_action); in do_md_run()
6165 sysfs_notify_dirent_safe(mddev->sysfs_degraded); in do_md_run()
6167 clear_bit(MD_NOT_READY, &mddev->flags); in do_md_run()
6171 int md_start(struct mddev *mddev) in md_start() argument
6175 if (mddev->pers->start) { in md_start()
6176 set_bit(MD_RECOVERY_WAIT, &mddev->recovery); in md_start()
6177 md_wakeup_thread(mddev->thread); in md_start()
6178 ret = mddev->pers->start(mddev); in md_start()
6179 clear_bit(MD_RECOVERY_WAIT, &mddev->recovery); in md_start()
6180 md_wakeup_thread(mddev->sync_thread); in md_start()
6186 static int restart_array(struct mddev *mddev) in restart_array() argument
6188 struct gendisk *disk = mddev->gendisk; in restart_array()
6194 if (list_empty(&mddev->disks)) in restart_array()
6196 if (!mddev->pers) in restart_array()
6198 if (md_is_rdwr(mddev)) in restart_array()
6202 rdev_for_each_rcu(rdev, mddev) { in restart_array()
6210 if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !has_journal) in restart_array()
6216 mddev->safemode = 0; in restart_array()
6217 mddev->ro = MD_RDWR; in restart_array()
6219 pr_debug("md: %s switched to read-write mode.\n", mdname(mddev)); in restart_array()
6221 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in restart_array()
6222 md_wakeup_thread(mddev->thread); in restart_array()
6223 md_wakeup_thread(mddev->sync_thread); in restart_array()
6224 sysfs_notify_dirent_safe(mddev->sysfs_state); in restart_array()
6228 static void md_clean(struct mddev *mddev) in md_clean() argument
6230 mddev->array_sectors = 0; in md_clean()
6231 mddev->external_size = 0; in md_clean()
6232 mddev->dev_sectors = 0; in md_clean()
6233 mddev->raid_disks = 0; in md_clean()
6234 mddev->recovery_cp = 0; in md_clean()
6235 mddev->resync_min = 0; in md_clean()
6236 mddev->resync_max = MaxSector; in md_clean()
6237 mddev->reshape_position = MaxSector; in md_clean()
6239 mddev->persistent = 0; in md_clean()
6240 mddev->level = LEVEL_NONE; in md_clean()
6241 mddev->clevel[0] = 0; in md_clean()
6247 if (mddev->hold_active) in md_clean()
6248 mddev->flags = 0; in md_clean()
6250 mddev->flags &= BIT_ULL_MASK(MD_CLOSING); in md_clean()
6251 mddev->sb_flags = 0; in md_clean()
6252 mddev->ro = MD_RDWR; in md_clean()
6253 mddev->metadata_type[0] = 0; in md_clean()
6254 mddev->chunk_sectors = 0; in md_clean()
6255 mddev->ctime = mddev->utime = 0; in md_clean()
6256 mddev->layout = 0; in md_clean()
6257 mddev->max_disks = 0; in md_clean()
6258 mddev->events = 0; in md_clean()
6259 mddev->can_decrease_events = 0; in md_clean()
6260 mddev->delta_disks = 0; in md_clean()
6261 mddev->reshape_backwards = 0; in md_clean()
6262 mddev->new_level = LEVEL_NONE; in md_clean()
6263 mddev->new_layout = 0; in md_clean()
6264 mddev->new_chunk_sectors = 0; in md_clean()
6265 mddev->curr_resync = MD_RESYNC_NONE; in md_clean()
6266 atomic64_set(&mddev->resync_mismatches, 0); in md_clean()
6267 mddev->suspend_lo = mddev->suspend_hi = 0; in md_clean()
6268 mddev->sync_speed_min = mddev->sync_speed_max = 0; in md_clean()
6269 mddev->recovery = 0; in md_clean()
6270 mddev->in_sync = 0; in md_clean()
6271 mddev->changed = 0; in md_clean()
6272 mddev->degraded = 0; in md_clean()
6273 mddev->safemode = 0; in md_clean()
6274 mddev->private = NULL; in md_clean()
6275 mddev->cluster_info = NULL; in md_clean()
6276 mddev->bitmap_info.offset = 0; in md_clean()
6277 mddev->bitmap_info.default_offset = 0; in md_clean()
6278 mddev->bitmap_info.default_space = 0; in md_clean()
6279 mddev->bitmap_info.chunksize = 0; in md_clean()
6280 mddev->bitmap_info.daemon_sleep = 0; in md_clean()
6281 mddev->bitmap_info.max_write_behind = 0; in md_clean()
6282 mddev->bitmap_info.nodes = 0; in md_clean()
6285 static void __md_stop_writes(struct mddev *mddev) in __md_stop_writes() argument
6287 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in __md_stop_writes()
6288 if (work_pending(&mddev->del_work)) in __md_stop_writes()
6290 if (mddev->sync_thread) { in __md_stop_writes()
6291 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in __md_stop_writes()
6292 md_reap_sync_thread(mddev); in __md_stop_writes()
6295 del_timer_sync(&mddev->safemode_timer); in __md_stop_writes()
6297 if (mddev->pers && mddev->pers->quiesce) { in __md_stop_writes()
6298 mddev->pers->quiesce(mddev, 1); in __md_stop_writes()
6299 mddev->pers->quiesce(mddev, 0); in __md_stop_writes()
6301 md_bitmap_flush(mddev); in __md_stop_writes()
6303 if (md_is_rdwr(mddev) && in __md_stop_writes()
6304 ((!mddev->in_sync && !mddev_is_clustered(mddev)) || in __md_stop_writes()
6305 mddev->sb_flags)) { in __md_stop_writes()
6307 if (!mddev_is_clustered(mddev)) in __md_stop_writes()
6308 mddev->in_sync = 1; in __md_stop_writes()
6309 md_update_sb(mddev, 1); in __md_stop_writes()
6312 mddev->serialize_policy = 0; in __md_stop_writes()
6313 mddev_destroy_serial_pool(mddev, NULL, true); in __md_stop_writes()
6316 void md_stop_writes(struct mddev *mddev) in md_stop_writes() argument
6318 mddev_lock_nointr(mddev); in md_stop_writes()
6319 __md_stop_writes(mddev); in md_stop_writes()
6320 mddev_unlock(mddev); in md_stop_writes()
6324 static void mddev_detach(struct mddev *mddev) in mddev_detach() argument
6326 md_bitmap_wait_behind_writes(mddev); in mddev_detach()
6327 if (mddev->pers && mddev->pers->quiesce && !is_md_suspended(mddev)) { in mddev_detach()
6328 mddev->pers->quiesce(mddev, 1); in mddev_detach()
6329 mddev->pers->quiesce(mddev, 0); in mddev_detach()
6331 md_unregister_thread(mddev, &mddev->thread); in mddev_detach()
6332 if (mddev->queue) in mddev_detach()
6333 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ in mddev_detach()
6336 static void __md_stop(struct mddev *mddev) in __md_stop() argument
6338 struct md_personality *pers = mddev->pers; in __md_stop()
6339 md_bitmap_destroy(mddev); in __md_stop()
6340 mddev_detach(mddev); in __md_stop()
6342 if (mddev->event_work.func) in __md_stop()
6344 spin_lock(&mddev->lock); in __md_stop()
6345 mddev->pers = NULL; in __md_stop()
6346 spin_unlock(&mddev->lock); in __md_stop()
6347 if (mddev->private) in __md_stop()
6348 pers->free(mddev, mddev->private); in __md_stop()
6349 mddev->private = NULL; in __md_stop()
6350 if (pers->sync_request && mddev->to_remove == NULL) in __md_stop()
6351 mddev->to_remove = &md_redundancy_group; in __md_stop()
6353 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in __md_stop()
6355 percpu_ref_exit(&mddev->active_io); in __md_stop()
6356 bioset_exit(&mddev->bio_set); in __md_stop()
6357 bioset_exit(&mddev->sync_set); in __md_stop()
6358 bioset_exit(&mddev->io_clone_set); in __md_stop()
6361 void md_stop(struct mddev *mddev) in md_stop() argument
6363 lockdep_assert_held(&mddev->reconfig_mutex); in md_stop()
6368 __md_stop_writes(mddev); in md_stop()
6369 __md_stop(mddev); in md_stop()
6370 percpu_ref_exit(&mddev->writes_pending); in md_stop()
6375 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) in md_set_readonly() argument
6380 if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) in md_set_readonly()
6383 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { in md_set_readonly()
6385 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in md_set_readonly()
6386 md_wakeup_thread(mddev->thread); in md_set_readonly()
6388 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in md_set_readonly()
6389 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_set_readonly()
6395 md_wakeup_thread_directly(mddev->sync_thread); in md_set_readonly()
6397 mddev_unlock(mddev); in md_set_readonly()
6399 &mddev->recovery)); in md_set_readonly()
6400 wait_event(mddev->sb_wait, in md_set_readonly()
6401 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); in md_set_readonly()
6402 mddev_lock_nointr(mddev); in md_set_readonly()
6404 mutex_lock(&mddev->open_mutex); in md_set_readonly()
6405 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || in md_set_readonly()
6406 mddev->sync_thread || in md_set_readonly()
6407 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { in md_set_readonly()
6408 pr_warn("md: %s still in use.\n",mdname(mddev)); in md_set_readonly()
6413 if (mddev->pers) { in md_set_readonly()
6414 __md_stop_writes(mddev); in md_set_readonly()
6416 if (mddev->ro == MD_RDONLY) { in md_set_readonly()
6421 mddev->ro = MD_RDONLY; in md_set_readonly()
6422 set_disk_ro(mddev->gendisk, 1); in md_set_readonly()
6426 if ((mddev->pers && !err) || did_freeze) { in md_set_readonly()
6427 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in md_set_readonly()
6428 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_set_readonly()
6429 md_wakeup_thread(mddev->thread); in md_set_readonly()
6430 sysfs_notify_dirent_safe(mddev->sysfs_state); in md_set_readonly()
6433 mutex_unlock(&mddev->open_mutex); in md_set_readonly()
6441 static int do_md_stop(struct mddev *mddev, int mode, in do_md_stop() argument
6444 struct gendisk *disk = mddev->gendisk; in do_md_stop()
6448 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { in do_md_stop()
6450 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in do_md_stop()
6451 md_wakeup_thread(mddev->thread); in do_md_stop()
6453 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in do_md_stop()
6454 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in do_md_stop()
6460 md_wakeup_thread_directly(mddev->sync_thread); in do_md_stop()
6462 mddev_unlock(mddev); in do_md_stop()
6463 wait_event(resync_wait, (mddev->sync_thread == NULL && in do_md_stop()
6465 &mddev->recovery))); in do_md_stop()
6466 mddev_lock_nointr(mddev); in do_md_stop()
6468 mutex_lock(&mddev->open_mutex); in do_md_stop()
6469 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || in do_md_stop()
6470 mddev->sysfs_active || in do_md_stop()
6471 mddev->sync_thread || in do_md_stop()
6472 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { in do_md_stop()
6473 pr_warn("md: %s still in use.\n",mdname(mddev)); in do_md_stop()
6474 mutex_unlock(&mddev->open_mutex); in do_md_stop()
6476 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in do_md_stop()
6477 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in do_md_stop()
6478 md_wakeup_thread(mddev->thread); in do_md_stop()
6482 if (mddev->pers) { in do_md_stop()
6483 if (!md_is_rdwr(mddev)) in do_md_stop()
6486 __md_stop_writes(mddev); in do_md_stop()
6487 __md_stop(mddev); in do_md_stop()
6490 sysfs_notify_dirent_safe(mddev->sysfs_state); in do_md_stop()
6492 rdev_for_each(rdev, mddev) in do_md_stop()
6494 sysfs_unlink_rdev(mddev, rdev); in do_md_stop()
6497 mutex_unlock(&mddev->open_mutex); in do_md_stop()
6498 mddev->changed = 1; in do_md_stop()
6500 if (!md_is_rdwr(mddev)) in do_md_stop()
6501 mddev->ro = MD_RDWR; in do_md_stop()
6503 mutex_unlock(&mddev->open_mutex); in do_md_stop()
6508 pr_info("md: %s stopped.\n", mdname(mddev)); in do_md_stop()
6510 if (mddev->bitmap_info.file) { in do_md_stop()
6511 struct file *f = mddev->bitmap_info.file; in do_md_stop()
6512 spin_lock(&mddev->lock); in do_md_stop()
6513 mddev->bitmap_info.file = NULL; in do_md_stop()
6514 spin_unlock(&mddev->lock); in do_md_stop()
6517 mddev->bitmap_info.offset = 0; in do_md_stop()
6519 export_array(mddev); in do_md_stop()
6521 md_clean(mddev); in do_md_stop()
6522 if (mddev->hold_active == UNTIL_STOP) in do_md_stop()
6523 mddev->hold_active = 0; in do_md_stop()
6526 sysfs_notify_dirent_safe(mddev->sysfs_state); in do_md_stop()
6531 static void autorun_array(struct mddev *mddev) in autorun_array() argument
6536 if (list_empty(&mddev->disks)) in autorun_array()
6541 rdev_for_each(rdev, mddev) { in autorun_array()
6546 err = do_md_run(mddev); in autorun_array()
6549 do_md_stop(mddev, 0, NULL); in autorun_array()
6568 struct mddev *mddev; in autorun_devices() local
6605 mddev = md_alloc(dev, NULL); in autorun_devices()
6606 if (IS_ERR(mddev)) in autorun_devices()
6609 if (mddev_lock(mddev)) in autorun_devices()
6610 pr_warn("md: %s locked, cannot run\n", mdname(mddev)); in autorun_devices()
6611 else if (mddev->raid_disks || mddev->major_version in autorun_devices()
6612 || !list_empty(&mddev->disks)) { in autorun_devices()
6614 mdname(mddev), rdev0->bdev); in autorun_devices()
6615 mddev_unlock(mddev); in autorun_devices()
6617 pr_debug("md: created %s\n", mdname(mddev)); in autorun_devices()
6618 mddev->persistent = 1; in autorun_devices()
6621 if (bind_rdev_to_array(rdev, mddev)) in autorun_devices()
6622 export_rdev(rdev, mddev); in autorun_devices()
6624 autorun_array(mddev); in autorun_devices()
6625 mddev_unlock(mddev); in autorun_devices()
6632 export_rdev(rdev, mddev); in autorun_devices()
6634 mddev_put(mddev); in autorun_devices()
6654 static int get_array_info(struct mddev *mddev, void __user *arg) in get_array_info() argument
6662 rdev_for_each_rcu(rdev, mddev) { in get_array_info()
6679 info.major_version = mddev->major_version; in get_array_info()
6680 info.minor_version = mddev->minor_version; in get_array_info()
6682 info.ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX); in get_array_info()
6683 info.level = mddev->level; in get_array_info()
6684 info.size = mddev->dev_sectors / 2; in get_array_info()
6685 if (info.size != mddev->dev_sectors / 2) /* overflow */ in get_array_info()
6688 info.raid_disks = mddev->raid_disks; in get_array_info()
6689 info.md_minor = mddev->md_minor; in get_array_info()
6690 info.not_persistent= !mddev->persistent; in get_array_info()
6692 info.utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX); in get_array_info()
6694 if (mddev->in_sync) in get_array_info()
6696 if (mddev->bitmap && mddev->bitmap_info.offset) in get_array_info()
6698 if (mddev_is_clustered(mddev)) in get_array_info()
6705 info.layout = mddev->layout; in get_array_info()
6706 info.chunk_size = mddev->chunk_sectors << 9; in get_array_info()
6714 static int get_bitmap_file(struct mddev *mddev, void __user * arg) in get_bitmap_file() argument
6725 spin_lock(&mddev->lock); in get_bitmap_file()
6727 if (mddev->bitmap_info.file) { in get_bitmap_file()
6728 ptr = file_path(mddev->bitmap_info.file, file->pathname, in get_bitmap_file()
6736 spin_unlock(&mddev->lock); in get_bitmap_file()
6746 static int get_disk_info(struct mddev *mddev, void __user * arg) in get_disk_info() argument
6755 rdev = md_find_rdev_nr_rcu(mddev, info.number); in get_disk_info()
6786 int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info) in md_add_new_disk() argument
6791 if (mddev_is_clustered(mddev) && in md_add_new_disk()
6794 mdname(mddev)); in md_add_new_disk()
6801 if (!mddev->raid_disks) { in md_add_new_disk()
6804 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version); in md_add_new_disk()
6810 if (!list_empty(&mddev->disks)) { in md_add_new_disk()
6812 = list_entry(mddev->disks.next, in md_add_new_disk()
6814 err = super_types[mddev->major_version] in md_add_new_disk()
6815 .load_super(rdev, rdev0, mddev->minor_version); in md_add_new_disk()
6820 export_rdev(rdev, mddev); in md_add_new_disk()
6824 err = bind_rdev_to_array(rdev, mddev); in md_add_new_disk()
6826 export_rdev(rdev, mddev); in md_add_new_disk()
6835 if (mddev->pers) { in md_add_new_disk()
6837 if (!mddev->pers->hot_add_disk) { in md_add_new_disk()
6839 mdname(mddev)); in md_add_new_disk()
6842 if (mddev->persistent) in md_add_new_disk()
6843 rdev = md_import_device(dev, mddev->major_version, in md_add_new_disk()
6844 mddev->minor_version); in md_add_new_disk()
6853 if (!mddev->persistent) { in md_add_new_disk()
6855 info->raid_disk < mddev->raid_disks) { in md_add_new_disk()
6862 super_types[mddev->major_version]. in md_add_new_disk()
6863 validate_super(mddev, NULL/*freshest*/, rdev); in md_add_new_disk()
6869 export_rdev(rdev, mddev); in md_add_new_disk()
6888 rdev_for_each(rdev2, mddev) { in md_add_new_disk()
6894 if (has_journal || mddev->bitmap) { in md_add_new_disk()
6895 export_rdev(rdev, mddev); in md_add_new_disk()
6903 if (mddev_is_clustered(mddev)) { in md_add_new_disk()
6908 err = md_cluster_ops->add_new_disk(mddev, rdev); in md_add_new_disk()
6910 export_rdev(rdev, mddev); in md_add_new_disk()
6917 err = bind_rdev_to_array(rdev, mddev); in md_add_new_disk()
6920 export_rdev(rdev, mddev); in md_add_new_disk()
6922 if (mddev_is_clustered(mddev)) { in md_add_new_disk()
6925 err = md_cluster_ops->new_disk_ack(mddev, in md_add_new_disk()
6932 md_cluster_ops->add_new_disk_cancel(mddev); in md_add_new_disk()
6946 if (mddev->major_version != 0) { in md_add_new_disk()
6947 pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev)); in md_add_new_disk()
6960 if (info->raid_disk < mddev->raid_disks) in md_add_new_disk()
6965 if (rdev->raid_disk < mddev->raid_disks) in md_add_new_disk()
6974 if (!mddev->persistent) { in md_add_new_disk()
6981 err = bind_rdev_to_array(rdev, mddev); in md_add_new_disk()
6983 export_rdev(rdev, mddev); in md_add_new_disk()
6991 static int hot_remove_disk(struct mddev *mddev, dev_t dev) in hot_remove_disk() argument
6995 if (!mddev->pers) in hot_remove_disk()
6998 rdev = find_rdev(mddev, dev); in hot_remove_disk()
7006 remove_and_add_spares(mddev, rdev); in hot_remove_disk()
7012 if (mddev_is_clustered(mddev)) { in hot_remove_disk()
7013 if (md_cluster_ops->remove_disk(mddev, rdev)) in hot_remove_disk()
7018 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in hot_remove_disk()
7019 if (mddev->thread) in hot_remove_disk()
7020 md_wakeup_thread(mddev->thread); in hot_remove_disk()
7022 md_update_sb(mddev, 1); in hot_remove_disk()
7028 rdev->bdev, mdname(mddev)); in hot_remove_disk()
7032 static int hot_add_disk(struct mddev *mddev, dev_t dev) in hot_add_disk() argument
7037 if (!mddev->pers) in hot_add_disk()
7040 if (mddev->major_version != 0) { in hot_add_disk()
7042 mdname(mddev)); in hot_add_disk()
7045 if (!mddev->pers->hot_add_disk) { in hot_add_disk()
7047 mdname(mddev)); in hot_add_disk()
7058 if (mddev->persistent) in hot_add_disk()
7067 rdev->bdev, mdname(mddev)); in hot_add_disk()
7075 err = bind_rdev_to_array(rdev, mddev); in hot_add_disk()
7086 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in hot_add_disk()
7087 if (!mddev->thread) in hot_add_disk()
7088 md_update_sb(mddev, 1); in hot_add_disk()
7095 mdname(mddev), rdev->bdev); in hot_add_disk()
7096 blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, mddev->queue); in hot_add_disk()
7102 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in hot_add_disk()
7103 md_wakeup_thread(mddev->thread); in hot_add_disk()
7108 export_rdev(rdev, mddev); in hot_add_disk()
7112 static int set_bitmap_file(struct mddev *mddev, int fd) in set_bitmap_file() argument
7116 if (mddev->pers) { in set_bitmap_file()
7117 if (!mddev->pers->quiesce || !mddev->thread) in set_bitmap_file()
7119 if (mddev->recovery || mddev->sync_thread) in set_bitmap_file()
7128 if (mddev->bitmap || mddev->bitmap_info.file) in set_bitmap_file()
7133 mdname(mddev)); in set_bitmap_file()
7137 mdname(mddev)); in set_bitmap_file()
7143 mdname(mddev)); in set_bitmap_file()
7150 mdname(mddev)); in set_bitmap_file()
7154 mdname(mddev)); in set_bitmap_file()
7158 mdname(mddev)); in set_bitmap_file()
7165 mddev->bitmap_info.file = f; in set_bitmap_file()
7166 mddev->bitmap_info.offset = 0; /* file overrides offset */ in set_bitmap_file()
7167 } else if (mddev->bitmap == NULL) in set_bitmap_file()
7170 if (mddev->pers) { in set_bitmap_file()
7174 bitmap = md_bitmap_create(mddev, -1); in set_bitmap_file()
7175 mddev_suspend(mddev); in set_bitmap_file()
7177 mddev->bitmap = bitmap; in set_bitmap_file()
7178 err = md_bitmap_load(mddev); in set_bitmap_file()
7182 md_bitmap_destroy(mddev); in set_bitmap_file()
7185 mddev_resume(mddev); in set_bitmap_file()
7187 mddev_suspend(mddev); in set_bitmap_file()
7188 md_bitmap_destroy(mddev); in set_bitmap_file()
7189 mddev_resume(mddev); in set_bitmap_file()
7193 struct file *f = mddev->bitmap_info.file; in set_bitmap_file()
7195 spin_lock(&mddev->lock); in set_bitmap_file()
7196 mddev->bitmap_info.file = NULL; in set_bitmap_file()
7197 spin_unlock(&mddev->lock); in set_bitmap_file()
7218 int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info) in md_set_array_info() argument
7230 mddev->major_version = info->major_version; in md_set_array_info()
7231 mddev->minor_version = info->minor_version; in md_set_array_info()
7232 mddev->patch_version = info->patch_version; in md_set_array_info()
7233 mddev->persistent = !info->not_persistent; in md_set_array_info()
7237 mddev->ctime = ktime_get_real_seconds(); in md_set_array_info()
7240 mddev->major_version = MD_MAJOR_VERSION; in md_set_array_info()
7241 mddev->minor_version = MD_MINOR_VERSION; in md_set_array_info()
7242 mddev->patch_version = MD_PATCHLEVEL_VERSION; in md_set_array_info()
7243 mddev->ctime = ktime_get_real_seconds(); in md_set_array_info()
7245 mddev->level = info->level; in md_set_array_info()
7246 mddev->clevel[0] = 0; in md_set_array_info()
7247 mddev->dev_sectors = 2 * (sector_t)info->size; in md_set_array_info()
7248 mddev->raid_disks = info->raid_disks; in md_set_array_info()
7253 mddev->recovery_cp = MaxSector; in md_set_array_info()
7255 mddev->recovery_cp = 0; in md_set_array_info()
7256 mddev->persistent = ! info->not_persistent; in md_set_array_info()
7257 mddev->external = 0; in md_set_array_info()
7259 mddev->layout = info->layout; in md_set_array_info()
7260 if (mddev->level == 0) in md_set_array_info()
7262 mddev->layout = -1; in md_set_array_info()
7263 mddev->chunk_sectors = info->chunk_size >> 9; in md_set_array_info()
7265 if (mddev->persistent) { in md_set_array_info()
7266 mddev->max_disks = MD_SB_DISKS; in md_set_array_info()
7267 mddev->flags = 0; in md_set_array_info()
7268 mddev->sb_flags = 0; in md_set_array_info()
7270 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in md_set_array_info()
7272 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; in md_set_array_info()
7273 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); in md_set_array_info()
7274 mddev->bitmap_info.offset = 0; in md_set_array_info()
7276 mddev->reshape_position = MaxSector; in md_set_array_info()
7281 get_random_bytes(mddev->uuid, 16); in md_set_array_info()
7283 mddev->new_level = mddev->level; in md_set_array_info()
7284 mddev->new_chunk_sectors = mddev->chunk_sectors; in md_set_array_info()
7285 mddev->new_layout = mddev->layout; in md_set_array_info()
7286 mddev->delta_disks = 0; in md_set_array_info()
7287 mddev->reshape_backwards = 0; in md_set_array_info()
7292 void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors) in md_set_array_sectors() argument
7294 lockdep_assert_held(&mddev->reconfig_mutex); in md_set_array_sectors()
7296 if (mddev->external_size) in md_set_array_sectors()
7299 mddev->array_sectors = array_sectors; in md_set_array_sectors()
7303 static int update_size(struct mddev *mddev, sector_t num_sectors) in update_size() argument
7308 sector_t old_dev_sectors = mddev->dev_sectors; in update_size()
7310 if (mddev->pers->resize == NULL) in update_size()
7321 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || in update_size()
7322 mddev->sync_thread) in update_size()
7324 if (!md_is_rdwr(mddev)) in update_size()
7327 rdev_for_each(rdev, mddev) { in update_size()
7335 rv = mddev->pers->resize(mddev, num_sectors); in update_size()
7337 if (mddev_is_clustered(mddev)) in update_size()
7338 md_cluster_ops->update_size(mddev, old_dev_sectors); in update_size()
7339 else if (mddev->queue) { in update_size()
7340 set_capacity_and_notify(mddev->gendisk, in update_size()
7341 mddev->array_sectors); in update_size()
7347 static int update_raid_disks(struct mddev *mddev, int raid_disks) in update_raid_disks() argument
7352 if (mddev->pers->check_reshape == NULL) in update_raid_disks()
7354 if (!md_is_rdwr(mddev)) in update_raid_disks()
7357 (mddev->max_disks && raid_disks >= mddev->max_disks)) in update_raid_disks()
7359 if (mddev->sync_thread || in update_raid_disks()
7360 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || in update_raid_disks()
7361 test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) || in update_raid_disks()
7362 mddev->reshape_position != MaxSector) in update_raid_disks()
7365 rdev_for_each(rdev, mddev) { in update_raid_disks()
7366 if (mddev->raid_disks < raid_disks && in update_raid_disks()
7369 if (mddev->raid_disks > raid_disks && in update_raid_disks()
7374 mddev->delta_disks = raid_disks - mddev->raid_disks; in update_raid_disks()
7375 if (mddev->delta_disks < 0) in update_raid_disks()
7376 mddev->reshape_backwards = 1; in update_raid_disks()
7377 else if (mddev->delta_disks > 0) in update_raid_disks()
7378 mddev->reshape_backwards = 0; in update_raid_disks()
7380 rv = mddev->pers->check_reshape(mddev); in update_raid_disks()
7382 mddev->delta_disks = 0; in update_raid_disks()
7383 mddev->reshape_backwards = 0; in update_raid_disks()
7396 static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) in update_array_info() argument
7403 if (mddev->bitmap && mddev->bitmap_info.offset) in update_array_info()
7406 if (mddev->major_version != info->major_version || in update_array_info()
7407 mddev->minor_version != info->minor_version || in update_array_info()
7409 mddev->ctime != info->ctime || in update_array_info()
7410 mddev->level != info->level || in update_array_info()
7412 mddev->persistent != !info->not_persistent || in update_array_info()
7413 mddev->chunk_sectors != info->chunk_size >> 9 || in update_array_info()
7419 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) in update_array_info()
7421 if (mddev->raid_disks != info->raid_disks) in update_array_info()
7423 if (mddev->layout != info->layout) in update_array_info()
7432 if (mddev->layout != info->layout) { in update_array_info()
7437 if (mddev->pers->check_reshape == NULL) in update_array_info()
7440 mddev->new_layout = info->layout; in update_array_info()
7441 rv = mddev->pers->check_reshape(mddev); in update_array_info()
7443 mddev->new_layout = mddev->layout; in update_array_info()
7447 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) in update_array_info()
7448 rv = update_size(mddev, (sector_t)info->size * 2); in update_array_info()
7450 if (mddev->raid_disks != info->raid_disks) in update_array_info()
7451 rv = update_raid_disks(mddev, info->raid_disks); in update_array_info()
7454 if (mddev->pers->quiesce == NULL || mddev->thread == NULL) { in update_array_info()
7458 if (mddev->recovery || mddev->sync_thread) { in update_array_info()
7465 if (mddev->bitmap) { in update_array_info()
7469 if (mddev->bitmap_info.default_offset == 0) { in update_array_info()
7473 mddev->bitmap_info.offset = in update_array_info()
7474 mddev->bitmap_info.default_offset; in update_array_info()
7475 mddev->bitmap_info.space = in update_array_info()
7476 mddev->bitmap_info.default_space; in update_array_info()
7477 bitmap = md_bitmap_create(mddev, -1); in update_array_info()
7478 mddev_suspend(mddev); in update_array_info()
7480 mddev->bitmap = bitmap; in update_array_info()
7481 rv = md_bitmap_load(mddev); in update_array_info()
7485 md_bitmap_destroy(mddev); in update_array_info()
7486 mddev_resume(mddev); in update_array_info()
7489 if (!mddev->bitmap) { in update_array_info()
7493 if (mddev->bitmap->storage.file) { in update_array_info()
7497 if (mddev->bitmap_info.nodes) { in update_array_info()
7499 if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) { in update_array_info()
7502 md_cluster_ops->unlock_all_bitmaps(mddev); in update_array_info()
7506 mddev->bitmap_info.nodes = 0; in update_array_info()
7507 md_cluster_ops->leave(mddev); in update_array_info()
7509 mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY; in update_array_info()
7511 mddev_suspend(mddev); in update_array_info()
7512 md_bitmap_destroy(mddev); in update_array_info()
7513 mddev_resume(mddev); in update_array_info()
7514 mddev->bitmap_info.offset = 0; in update_array_info()
7517 md_update_sb(mddev, 1); in update_array_info()
7523 static int set_disk_faulty(struct mddev *mddev, dev_t dev) in set_disk_faulty() argument
7528 if (mddev->pers == NULL) in set_disk_faulty()
7532 rdev = md_find_rdev_rcu(mddev, dev); in set_disk_faulty()
7536 md_error(mddev, rdev); in set_disk_faulty()
7537 if (test_bit(MD_BROKEN, &mddev->flags)) in set_disk_faulty()
7552 struct mddev *mddev = bdev->bd_disk->private_data; in md_getgeo() local
7556 geo->cylinders = mddev->array_sectors / 8; in md_getgeo()
7584 static int __md_set_array_info(struct mddev *mddev, void __user *argp) in __md_set_array_info() argument
7594 if (mddev->pers) { in __md_set_array_info()
7595 err = update_array_info(mddev, &info); in __md_set_array_info()
7601 if (!list_empty(&mddev->disks)) { in __md_set_array_info()
7602 pr_warn("md: array %s already has disks!\n", mdname(mddev)); in __md_set_array_info()
7606 if (mddev->raid_disks) { in __md_set_array_info()
7607 pr_warn("md: array %s already initialised!\n", mdname(mddev)); in __md_set_array_info()
7611 err = md_set_array_info(mddev, &info); in __md_set_array_info()
7623 struct mddev *mddev = NULL; in md_ioctl() local
7653 mddev = bdev->bd_disk->private_data; in md_ioctl()
7658 if (!mddev->raid_disks && !mddev->external) in md_ioctl()
7661 err = get_array_info(mddev, argp); in md_ioctl()
7665 if (!mddev->raid_disks && !mddev->external) in md_ioctl()
7668 err = get_disk_info(mddev, argp); in md_ioctl()
7672 err = set_disk_faulty(mddev, new_decode_dev(arg)); in md_ioctl()
7676 err = get_bitmap_file(mddev, argp); in md_ioctl()
7685 mutex_lock(&mddev->open_mutex); in md_ioctl()
7686 if (mddev->pers && atomic_read(&mddev->openers) > 1) { in md_ioctl()
7687 mutex_unlock(&mddev->open_mutex); in md_ioctl()
7691 if (test_and_set_bit(MD_CLOSING, &mddev->flags)) { in md_ioctl()
7692 mutex_unlock(&mddev->open_mutex); in md_ioctl()
7696 mutex_unlock(&mddev->open_mutex); in md_ioctl()
7699 err = mddev_lock(mddev); in md_ioctl()
7707 err = __md_set_array_info(mddev, argp); in md_ioctl()
7716 if ((!mddev->raid_disks && !mddev->external) in md_ioctl()
7729 err = restart_array(mddev); in md_ioctl()
7733 err = do_md_stop(mddev, 0, bdev); in md_ioctl()
7737 err = md_set_readonly(mddev, bdev); in md_ioctl()
7741 err = hot_remove_disk(mddev, new_decode_dev(arg)); in md_ioctl()
7749 if (mddev->pers) { in md_ioctl()
7757 err = md_add_new_disk(mddev, &info); in md_ioctl()
7767 if (!md_is_rdwr(mddev) && mddev->pers) { in md_ioctl()
7768 if (mddev->ro != MD_AUTO_READ) { in md_ioctl()
7772 mddev->ro = MD_RDWR; in md_ioctl()
7773 sysfs_notify_dirent_safe(mddev->sysfs_state); in md_ioctl()
7774 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_ioctl()
7779 if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) { in md_ioctl()
7780 mddev_unlock(mddev); in md_ioctl()
7781 wait_event(mddev->sb_wait, in md_ioctl()
7782 !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) && in md_ioctl()
7783 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); in md_ioctl()
7784 mddev_lock_nointr(mddev); in md_ioctl()
7795 err = md_add_new_disk(mddev, &info); in md_ioctl()
7800 if (mddev_is_clustered(mddev)) in md_ioctl()
7801 md_cluster_ops->new_disk_ack(mddev, false); in md_ioctl()
7807 err = hot_add_disk(mddev, new_decode_dev(arg)); in md_ioctl()
7811 err = do_md_run(mddev); in md_ioctl()
7815 err = set_bitmap_file(mddev, (int)arg); in md_ioctl()
7824 if (mddev->hold_active == UNTIL_IOCTL && in md_ioctl()
7826 mddev->hold_active = 0; in md_ioctl()
7827 mddev_unlock(mddev); in md_ioctl()
7830 clear_bit(MD_CLOSING, &mddev->flags); in md_ioctl()
7855 struct mddev *mddev = bdev->bd_disk->private_data; in md_set_read_only() local
7858 err = mddev_lock(mddev); in md_set_read_only()
7862 if (!mddev->raid_disks && !mddev->external) { in md_set_read_only()
7871 if (!ro && mddev->ro == MD_RDONLY && mddev->pers) { in md_set_read_only()
7872 err = restart_array(mddev); in md_set_read_only()
7875 mddev->ro = MD_AUTO_READ; in md_set_read_only()
7879 mddev_unlock(mddev); in md_set_read_only()
7885 struct mddev *mddev; in md_open() local
7889 mddev = mddev_get(disk->private_data); in md_open()
7891 if (!mddev) in md_open()
7894 err = mutex_lock_interruptible(&mddev->open_mutex); in md_open()
7899 if (test_bit(MD_CLOSING, &mddev->flags)) in md_open()
7902 atomic_inc(&mddev->openers); in md_open()
7903 mutex_unlock(&mddev->open_mutex); in md_open()
7909 mutex_unlock(&mddev->open_mutex); in md_open()
7911 mddev_put(mddev); in md_open()
7917 struct mddev *mddev = disk->private_data; in md_release() local
7919 BUG_ON(!mddev); in md_release()
7920 atomic_dec(&mddev->openers); in md_release()
7921 mddev_put(mddev); in md_release()
7926 struct mddev *mddev = disk->private_data; in md_check_events() local
7929 if (mddev->changed) in md_check_events()
7931 mddev->changed = 0; in md_check_events()
7937 struct mddev *mddev = disk->private_data; in md_free_disk() local
7939 percpu_ref_exit(&mddev->writes_pending); in md_free_disk()
7940 mddev_free(mddev); in md_free_disk()
8029 struct mddev *mddev, const char *name) in md_register_thread() argument
8040 thread->mddev = mddev; in md_register_thread()
8044 mdname(thread->mddev), in md_register_thread()
8054 void md_unregister_thread(struct mddev *mddev, struct md_thread __rcu **threadp) in md_unregister_thread() argument
8057 lockdep_is_held(&mddev->reconfig_mutex)); in md_unregister_thread()
8071 void md_error(struct mddev *mddev, struct md_rdev *rdev) in md_error() argument
8076 if (!mddev->pers || !mddev->pers->error_handler) in md_error()
8078 mddev->pers->error_handler(mddev, rdev); in md_error()
8080 if (mddev->pers->level == 0 || mddev->pers->level == LEVEL_LINEAR) in md_error()
8083 if (mddev->degraded && !test_bit(MD_BROKEN, &mddev->flags)) in md_error()
8084 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in md_error()
8086 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_error()
8087 if (!test_bit(MD_BROKEN, &mddev->flags)) { in md_error()
8088 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_error()
8089 md_wakeup_thread(mddev->thread); in md_error()
8091 if (mddev->event_work.func) in md_error()
8092 queue_work(md_misc_wq, &mddev->event_work); in md_error()
8116 static int status_resync(struct seq_file *seq, struct mddev *mddev) in status_resync() argument
8124 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || in status_resync()
8125 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in status_resync()
8126 max_sectors = mddev->resync_max_sectors; in status_resync()
8128 max_sectors = mddev->dev_sectors; in status_resync()
8130 resync = mddev->curr_resync; in status_resync()
8132 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) in status_resync()
8138 res = atomic_read(&mddev->recovery_active); in status_resync()
8151 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery)) { in status_resync()
8154 rdev_for_each(rdev, mddev) in status_resync()
8162 if (mddev->reshape_position != MaxSector) in status_resync()
8168 if (mddev->recovery_cp < MaxSector) { in status_resync()
8205 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)? in status_resync()
8207 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)? in status_resync()
8209 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? in status_resync()
8232 dt = ((jiffies - mddev->resync_mark) / HZ); in status_resync()
8235 curr_mark_cnt = mddev->curr_mark_cnt; in status_resync()
8236 recovery_active = atomic_read(&mddev->recovery_active); in status_resync()
8237 resync_mark_cnt = mddev->resync_mark_cnt; in status_resync()
8258 struct mddev *mddev; in md_seq_start() local
8273 mddev = list_entry(tmp, struct mddev, all_mddevs); in md_seq_start()
8274 if (!mddev_get(mddev)) in md_seq_start()
8277 return mddev; in md_seq_start()
8288 struct mddev *next_mddev, *mddev = v; in md_seq_next() local
8289 struct mddev *to_put = NULL; in md_seq_next()
8299 to_put = mddev; in md_seq_next()
8300 tmp = mddev->all_mddevs.next; in md_seq_next()
8309 next_mddev = list_entry(tmp, struct mddev, all_mddevs); in md_seq_next()
8312 mddev = next_mddev; in md_seq_next()
8313 tmp = mddev->all_mddevs.next; in md_seq_next()
8325 struct mddev *mddev = v; in md_seq_stop() local
8327 if (mddev && v != (void*)1 && v != (void*)2) in md_seq_stop()
8328 mddev_put(mddev); in md_seq_stop()
8333 struct mddev *mddev = v; in md_seq_show() local
8354 spin_lock(&mddev->lock); in md_seq_show()
8355 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) { in md_seq_show()
8356 seq_printf(seq, "%s : %sactive", mdname(mddev), in md_seq_show()
8357 mddev->pers ? "" : "in"); in md_seq_show()
8358 if (mddev->pers) { in md_seq_show()
8359 if (mddev->ro == MD_RDONLY) in md_seq_show()
8361 if (mddev->ro == MD_AUTO_READ) in md_seq_show()
8363 seq_printf(seq, " %s", mddev->pers->name); in md_seq_show()
8368 rdev_for_each_rcu(rdev, mddev) { in md_seq_show()
8387 if (!list_empty(&mddev->disks)) { in md_seq_show()
8388 if (mddev->pers) in md_seq_show()
8391 mddev->array_sectors / 2); in md_seq_show()
8396 if (mddev->persistent) { in md_seq_show()
8397 if (mddev->major_version != 0 || in md_seq_show()
8398 mddev->minor_version != 90) { in md_seq_show()
8400 mddev->major_version, in md_seq_show()
8401 mddev->minor_version); in md_seq_show()
8403 } else if (mddev->external) in md_seq_show()
8405 mddev->metadata_type); in md_seq_show()
8409 if (mddev->pers) { in md_seq_show()
8410 mddev->pers->status(seq, mddev); in md_seq_show()
8412 if (mddev->pers->sync_request) { in md_seq_show()
8413 if (status_resync(seq, mddev)) in md_seq_show()
8419 md_bitmap_status(seq, mddev->bitmap); in md_seq_show()
8423 spin_unlock(&mddev->lock); in md_seq_show()
8521 int md_setup_cluster(struct mddev *mddev, int nodes) in md_setup_cluster() argument
8535 ret = md_cluster_ops->join(mddev, nodes); in md_setup_cluster()
8537 mddev->safemode_delay = 0; in md_setup_cluster()
8541 void md_cluster_stop(struct mddev *mddev) in md_cluster_stop() argument
8545 md_cluster_ops->leave(mddev); in md_cluster_stop()
8549 static int is_mddev_idle(struct mddev *mddev, int init) in is_mddev_idle() argument
8557 rdev_for_each_rcu(rdev, mddev) { in is_mddev_idle()
8592 void md_done_sync(struct mddev *mddev, int blocks, int ok) in md_done_sync() argument
8595 atomic_sub(blocks, &mddev->recovery_active); in md_done_sync()
8596 wake_up(&mddev->recovery_wait); in md_done_sync()
8598 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_done_sync()
8599 set_bit(MD_RECOVERY_ERROR, &mddev->recovery); in md_done_sync()
8600 md_wakeup_thread(mddev->thread); in md_done_sync()
8613 bool md_write_start(struct mddev *mddev, struct bio *bi) in md_write_start() argument
8620 BUG_ON(mddev->ro == MD_RDONLY); in md_write_start()
8621 if (mddev->ro == MD_AUTO_READ) { in md_write_start()
8623 mddev->ro = MD_RDWR; in md_write_start()
8624 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_write_start()
8625 md_wakeup_thread(mddev->thread); in md_write_start()
8626 md_wakeup_thread(mddev->sync_thread); in md_write_start()
8630 percpu_ref_get(&mddev->writes_pending); in md_write_start()
8632 if (mddev->safemode == 1) in md_write_start()
8633 mddev->safemode = 0; in md_write_start()
8635 if (mddev->in_sync || mddev->sync_checkers) { in md_write_start()
8636 spin_lock(&mddev->lock); in md_write_start()
8637 if (mddev->in_sync) { in md_write_start()
8638 mddev->in_sync = 0; in md_write_start()
8639 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); in md_write_start()
8640 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); in md_write_start()
8641 md_wakeup_thread(mddev->thread); in md_write_start()
8644 spin_unlock(&mddev->lock); in md_write_start()
8648 sysfs_notify_dirent_safe(mddev->sysfs_state); in md_write_start()
8649 if (!mddev->has_superblocks) in md_write_start()
8651 wait_event(mddev->sb_wait, in md_write_start()
8652 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) || in md_write_start()
8653 is_md_suspended(mddev)); in md_write_start()
8654 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { in md_write_start()
8655 percpu_ref_put(&mddev->writes_pending); in md_write_start()
8670 void md_write_inc(struct mddev *mddev, struct bio *bi) in md_write_inc() argument
8674 WARN_ON_ONCE(mddev->in_sync || !md_is_rdwr(mddev)); in md_write_inc()
8675 percpu_ref_get(&mddev->writes_pending); in md_write_inc()
8679 void md_write_end(struct mddev *mddev) in md_write_end() argument
8681 percpu_ref_put(&mddev->writes_pending); in md_write_end()
8683 if (mddev->safemode == 2) in md_write_end()
8684 md_wakeup_thread(mddev->thread); in md_write_end()
8685 else if (mddev->safemode_delay) in md_write_end()
8689 mod_timer(&mddev->safemode_timer, in md_write_end()
8690 roundup(jiffies, mddev->safemode_delay) + in md_write_end()
8691 mddev->safemode_delay); in md_write_end()
8697 void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev, in md_submit_discard_bio() argument
8708 if (mddev->gendisk) in md_submit_discard_bio()
8710 disk_devt(mddev->gendisk), in md_submit_discard_bio()
8720 struct mddev *mddev = md_io_clone->mddev; in md_end_clone_io() local
8730 percpu_ref_put(&mddev->active_io); in md_end_clone_io()
8733 static void md_clone_bio(struct mddev *mddev, struct bio **bio) in md_clone_bio() argument
8738 bio_alloc_clone(bdev, *bio, GFP_NOIO, &mddev->io_clone_set); in md_clone_bio()
8742 md_io_clone->mddev = mddev; in md_clone_bio()
8751 void md_account_bio(struct mddev *mddev, struct bio **bio) in md_account_bio() argument
8753 percpu_ref_get(&mddev->active_io); in md_account_bio()
8754 md_clone_bio(mddev, bio); in md_account_bio()
8764 void md_allow_write(struct mddev *mddev) in md_allow_write() argument
8766 if (!mddev->pers) in md_allow_write()
8768 if (!md_is_rdwr(mddev)) in md_allow_write()
8770 if (!mddev->pers->sync_request) in md_allow_write()
8773 spin_lock(&mddev->lock); in md_allow_write()
8774 if (mddev->in_sync) { in md_allow_write()
8775 mddev->in_sync = 0; in md_allow_write()
8776 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); in md_allow_write()
8777 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); in md_allow_write()
8778 if (mddev->safemode_delay && in md_allow_write()
8779 mddev->safemode == 0) in md_allow_write()
8780 mddev->safemode = 1; in md_allow_write()
8781 spin_unlock(&mddev->lock); in md_allow_write()
8782 md_update_sb(mddev, 0); in md_allow_write()
8783 sysfs_notify_dirent_safe(mddev->sysfs_state); in md_allow_write()
8785 wait_event(mddev->sb_wait, in md_allow_write()
8786 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); in md_allow_write()
8788 spin_unlock(&mddev->lock); in md_allow_write()
8797 struct mddev *mddev = thread->mddev; in md_do_sync() local
8798 struct mddev *mddev2; in md_do_sync()
8813 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) || in md_do_sync()
8814 test_bit(MD_RECOVERY_WAIT, &mddev->recovery)) in md_do_sync()
8816 if (!md_is_rdwr(mddev)) {/* never try to sync a read-only array */ in md_do_sync()
8817 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_do_sync()
8821 if (mddev_is_clustered(mddev)) { in md_do_sync()
8822 ret = md_cluster_ops->resync_start(mddev); in md_do_sync()
8826 set_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags); in md_do_sync()
8827 if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || in md_do_sync()
8828 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) || in md_do_sync()
8829 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) in md_do_sync()
8830 && ((unsigned long long)mddev->curr_resync_completed in md_do_sync()
8831 < (unsigned long long)mddev->resync_max_sectors)) in md_do_sync()
8835 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in md_do_sync()
8836 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { in md_do_sync()
8839 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { in md_do_sync()
8844 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in md_do_sync()
8849 mddev->last_sync_action = action ?: desc; in md_do_sync()
8863 mddev->curr_resync = MD_RESYNC_DELAYED; in md_do_sync()
8866 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in md_do_sync()
8872 if (mddev2 == mddev) in md_do_sync()
8874 if (!mddev->parallel_resync in md_do_sync()
8876 && match_mddev_units(mddev, mddev2)) { in md_do_sync()
8878 if (mddev < mddev2 && in md_do_sync()
8879 mddev->curr_resync == MD_RESYNC_DELAYED) { in md_do_sync()
8881 mddev->curr_resync = MD_RESYNC_YIELDED; in md_do_sync()
8884 if (mddev > mddev2 && in md_do_sync()
8885 mddev->curr_resync == MD_RESYNC_YIELDED) in md_do_sync()
8895 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && in md_do_sync()
8896 mddev2->curr_resync >= mddev->curr_resync) { in md_do_sync()
8900 desc, mdname(mddev), in md_do_sync()
8915 } while (mddev->curr_resync < MD_RESYNC_DELAYED); in md_do_sync()
8918 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in md_do_sync()
8922 max_sectors = mddev->resync_max_sectors; in md_do_sync()
8923 atomic64_set(&mddev->resync_mismatches, 0); in md_do_sync()
8925 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) in md_do_sync()
8926 j = mddev->resync_min; in md_do_sync()
8927 else if (!mddev->bitmap) in md_do_sync()
8928 j = mddev->recovery_cp; in md_do_sync()
8930 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { in md_do_sync()
8931 max_sectors = mddev->resync_max_sectors; in md_do_sync()
8937 if (mddev_is_clustered(mddev) && in md_do_sync()
8938 mddev->reshape_position != MaxSector) in md_do_sync()
8939 j = mddev->reshape_position; in md_do_sync()
8942 max_sectors = mddev->dev_sectors; in md_do_sync()
8945 rdev_for_each_rcu(rdev, mddev) in md_do_sync()
8962 if (mddev->bitmap) { in md_do_sync()
8963 mddev->pers->quiesce(mddev, 1); in md_do_sync()
8964 mddev->pers->quiesce(mddev, 0); in md_do_sync()
8968 pr_info("md: %s of RAID array %s\n", desc, mdname(mddev)); in md_do_sync()
8969 pr_debug("md: minimum _guaranteed_ speed: %d KB/sec/disk.\n", speed_min(mddev)); in md_do_sync()
8971 speed_max(mddev), desc); in md_do_sync()
8973 is_mddev_idle(mddev, 1); /* this initializes IO event counters */ in md_do_sync()
8981 mddev->resync_mark = mark[last_mark]; in md_do_sync()
8982 mddev->resync_mark_cnt = mark_cnt[last_mark]; in md_do_sync()
8991 atomic_set(&mddev->recovery_active, 0); in md_do_sync()
8996 desc, mdname(mddev)); in md_do_sync()
8997 mddev->curr_resync = j; in md_do_sync()
8999 mddev->curr_resync = MD_RESYNC_ACTIVE; /* no longer delayed */ in md_do_sync()
9000 mddev->curr_resync_completed = j; in md_do_sync()
9001 sysfs_notify_dirent_safe(mddev->sysfs_completed); in md_do_sync()
9011 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in md_do_sync()
9012 ((mddev->curr_resync > mddev->curr_resync_completed && in md_do_sync()
9013 (mddev->curr_resync - mddev->curr_resync_completed) in md_do_sync()
9016 (j - mddev->curr_resync_completed)*2 in md_do_sync()
9017 >= mddev->resync_max - mddev->curr_resync_completed || in md_do_sync()
9018 mddev->curr_resync_completed > mddev->resync_max in md_do_sync()
9021 wait_event(mddev->recovery_wait, in md_do_sync()
9022 atomic_read(&mddev->recovery_active) == 0); in md_do_sync()
9023 mddev->curr_resync_completed = j; in md_do_sync()
9024 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && in md_do_sync()
9025 j > mddev->recovery_cp) in md_do_sync()
9026 mddev->recovery_cp = j; in md_do_sync()
9028 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); in md_do_sync()
9029 sysfs_notify_dirent_safe(mddev->sysfs_completed); in md_do_sync()
9032 while (j >= mddev->resync_max && in md_do_sync()
9033 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { in md_do_sync()
9039 wait_event_interruptible(mddev->recovery_wait, in md_do_sync()
9040 mddev->resync_max > j in md_do_sync()
9042 &mddev->recovery)); in md_do_sync()
9045 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in md_do_sync()
9048 sectors = mddev->pers->sync_request(mddev, j, &skipped); in md_do_sync()
9050 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_do_sync()
9056 atomic_add(sectors, &mddev->recovery_active); in md_do_sync()
9059 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in md_do_sync()
9067 mddev->curr_resync = j; in md_do_sync()
9068 mddev->curr_mark_cnt = io_sectors; in md_do_sync()
9084 mddev->resync_mark = mark[next]; in md_do_sync()
9085 mddev->resync_mark_cnt = mark_cnt[next]; in md_do_sync()
9087 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active); in md_do_sync()
9091 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in md_do_sync()
9104 recovery_done = io_sectors - atomic_read(&mddev->recovery_active); in md_do_sync()
9105 currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2 in md_do_sync()
9106 /((jiffies-mddev->resync_mark)/HZ +1) +1; in md_do_sync()
9108 if (currspeed > speed_min(mddev)) { in md_do_sync()
9109 if (currspeed > speed_max(mddev)) { in md_do_sync()
9113 if (!is_mddev_idle(mddev, 0)) { in md_do_sync()
9118 wait_event(mddev->recovery_wait, in md_do_sync()
9119 !atomic_read(&mddev->recovery_active)); in md_do_sync()
9123 pr_info("md: %s: %s %s.\n",mdname(mddev), desc, in md_do_sync()
9124 test_bit(MD_RECOVERY_INTR, &mddev->recovery) in md_do_sync()
9130 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); in md_do_sync()
9132 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in md_do_sync()
9133 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) && in md_do_sync()
9134 mddev->curr_resync >= MD_RESYNC_ACTIVE) { in md_do_sync()
9135 mddev->curr_resync_completed = mddev->curr_resync; in md_do_sync()
9136 sysfs_notify_dirent_safe(mddev->sysfs_completed); in md_do_sync()
9138 mddev->pers->sync_request(mddev, max_sectors, &skipped); in md_do_sync()
9140 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && in md_do_sync()
9141 mddev->curr_resync > MD_RESYNC_ACTIVE) { in md_do_sync()
9142 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in md_do_sync()
9143 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { in md_do_sync()
9144 if (mddev->curr_resync >= mddev->recovery_cp) { in md_do_sync()
9146 desc, mdname(mddev)); in md_do_sync()
9148 &mddev->recovery)) in md_do_sync()
9149 mddev->recovery_cp = in md_do_sync()
9150 mddev->curr_resync_completed; in md_do_sync()
9152 mddev->recovery_cp = in md_do_sync()
9153 mddev->curr_resync; in md_do_sync()
9156 mddev->recovery_cp = MaxSector; in md_do_sync()
9158 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in md_do_sync()
9159 mddev->curr_resync = MaxSector; in md_do_sync()
9160 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in md_do_sync()
9161 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) { in md_do_sync()
9163 rdev_for_each_rcu(rdev, mddev) in md_do_sync()
9165 mddev->delta_disks >= 0 && in md_do_sync()
9169 rdev->recovery_offset < mddev->curr_resync) in md_do_sync()
9170 rdev->recovery_offset = mddev->curr_resync; in md_do_sync()
9179 set_mask_bits(&mddev->sb_flags, 0, in md_do_sync()
9182 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in md_do_sync()
9183 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) && in md_do_sync()
9184 mddev->delta_disks > 0 && in md_do_sync()
9185 mddev->pers->finish_reshape && in md_do_sync()
9186 mddev->pers->size && in md_do_sync()
9187 mddev->queue) { in md_do_sync()
9188 mddev_lock_nointr(mddev); in md_do_sync()
9189 md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0)); in md_do_sync()
9190 mddev_unlock(mddev); in md_do_sync()
9191 if (!mddev_is_clustered(mddev)) in md_do_sync()
9192 set_capacity_and_notify(mddev->gendisk, in md_do_sync()
9193 mddev->array_sectors); in md_do_sync()
9196 spin_lock(&mddev->lock); in md_do_sync()
9197 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { in md_do_sync()
9199 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) in md_do_sync()
9200 mddev->resync_min = 0; in md_do_sync()
9201 mddev->resync_max = MaxSector; in md_do_sync()
9202 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) in md_do_sync()
9203 mddev->resync_min = mddev->curr_resync_completed; in md_do_sync()
9204 set_bit(MD_RECOVERY_DONE, &mddev->recovery); in md_do_sync()
9205 mddev->curr_resync = MD_RESYNC_NONE; in md_do_sync()
9206 spin_unlock(&mddev->lock); in md_do_sync()
9209 wake_up(&mddev->sb_wait); in md_do_sync()
9210 md_wakeup_thread(mddev->thread); in md_do_sync()
9215 static int remove_and_add_spares(struct mddev *mddev, in remove_and_add_spares() argument
9223 if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in remove_and_add_spares()
9227 rdev_for_each(rdev, mddev) { in remove_and_add_spares()
9245 rdev_for_each(rdev, mddev) { in remove_and_add_spares()
9253 if (mddev->pers->hot_remove_disk( in remove_and_add_spares()
9254 mddev, rdev) == 0) { in remove_and_add_spares()
9255 sysfs_unlink_rdev(mddev, rdev); in remove_and_add_spares()
9265 if (removed && mddev->kobj.sd) in remove_and_add_spares()
9266 sysfs_notify_dirent_safe(mddev->sysfs_degraded); in remove_and_add_spares()
9271 rdev_for_each(rdev, mddev) { in remove_and_add_spares()
9286 if (!md_is_rdwr(mddev) && in remove_and_add_spares()
9293 if (mddev->pers->hot_add_disk(mddev, rdev) == 0) { in remove_and_add_spares()
9295 sysfs_link_rdev(mddev, rdev); in remove_and_add_spares()
9299 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in remove_and_add_spares()
9304 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in remove_and_add_spares()
9310 struct mddev *mddev = container_of(ws, struct mddev, del_work); in md_start_sync() local
9312 rcu_assign_pointer(mddev->sync_thread, in md_start_sync()
9313 md_register_thread(md_do_sync, mddev, "resync")); in md_start_sync()
9314 if (!mddev->sync_thread) { in md_start_sync()
9316 mdname(mddev)); in md_start_sync()
9318 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in md_start_sync()
9319 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in md_start_sync()
9320 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); in md_start_sync()
9321 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in md_start_sync()
9322 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in md_start_sync()
9325 &mddev->recovery)) in md_start_sync()
9326 if (mddev->sysfs_action) in md_start_sync()
9327 sysfs_notify_dirent_safe(mddev->sysfs_action); in md_start_sync()
9329 md_wakeup_thread(mddev->sync_thread); in md_start_sync()
9330 sysfs_notify_dirent_safe(mddev->sysfs_action); in md_start_sync()
9356 void md_check_recovery(struct mddev *mddev) in md_check_recovery() argument
9358 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) { in md_check_recovery()
9362 set_bit(MD_UPDATING_SB, &mddev->flags); in md_check_recovery()
9364 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags)) in md_check_recovery()
9365 md_update_sb(mddev, 0); in md_check_recovery()
9366 clear_bit_unlock(MD_UPDATING_SB, &mddev->flags); in md_check_recovery()
9367 wake_up(&mddev->sb_wait); in md_check_recovery()
9370 if (is_md_suspended(mddev)) in md_check_recovery()
9373 if (mddev->bitmap) in md_check_recovery()
9374 md_bitmap_daemon_work(mddev); in md_check_recovery()
9377 if (mddev->pers->sync_request && !mddev->external) { in md_check_recovery()
9379 mdname(mddev)); in md_check_recovery()
9380 mddev->safemode = 2; in md_check_recovery()
9385 if (!md_is_rdwr(mddev) && in md_check_recovery()
9386 !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) in md_check_recovery()
9389 (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) || in md_check_recovery()
9390 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || in md_check_recovery()
9391 test_bit(MD_RECOVERY_DONE, &mddev->recovery) || in md_check_recovery()
9392 (mddev->external == 0 && mddev->safemode == 1) || in md_check_recovery()
9393 (mddev->safemode == 2 in md_check_recovery()
9394 && !mddev->in_sync && mddev->recovery_cp == MaxSector) in md_check_recovery()
9398 if (mddev_trylock(mddev)) { in md_check_recovery()
9400 bool try_set_sync = mddev->safemode != 0; in md_check_recovery()
9402 if (!mddev->external && mddev->safemode == 1) in md_check_recovery()
9403 mddev->safemode = 0; in md_check_recovery()
9405 if (!md_is_rdwr(mddev)) { in md_check_recovery()
9407 if (!mddev->external && mddev->in_sync) in md_check_recovery()
9413 rdev_for_each(rdev, mddev) in md_check_recovery()
9422 remove_and_add_spares(mddev, NULL); in md_check_recovery()
9426 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_check_recovery()
9427 md_reap_sync_thread(mddev); in md_check_recovery()
9428 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in md_check_recovery()
9429 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_check_recovery()
9430 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); in md_check_recovery()
9434 if (mddev_is_clustered(mddev)) { in md_check_recovery()
9439 rdev_for_each_safe(rdev, tmp, mddev) { in md_check_recovery()
9446 if (try_set_sync && !mddev->external && !mddev->in_sync) { in md_check_recovery()
9447 spin_lock(&mddev->lock); in md_check_recovery()
9448 set_in_sync(mddev); in md_check_recovery()
9449 spin_unlock(&mddev->lock); in md_check_recovery()
9452 if (mddev->sb_flags) in md_check_recovery()
9453 md_update_sb(mddev, 0); in md_check_recovery()
9459 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { in md_check_recovery()
9460 if (!test_bit(MD_RECOVERY_DONE, &mddev->recovery)) { in md_check_recovery()
9462 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_check_recovery()
9466 if (WARN_ON_ONCE(!mddev->sync_thread)) in md_check_recovery()
9469 md_reap_sync_thread(mddev); in md_check_recovery()
9476 mddev->curr_resync_completed = 0; in md_check_recovery()
9477 spin_lock(&mddev->lock); in md_check_recovery()
9478 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in md_check_recovery()
9479 spin_unlock(&mddev->lock); in md_check_recovery()
9483 clear_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_check_recovery()
9484 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); in md_check_recovery()
9486 if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || in md_check_recovery()
9487 test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) in md_check_recovery()
9496 if (mddev->reshape_position != MaxSector) { in md_check_recovery()
9497 if (mddev->pers->check_reshape == NULL || in md_check_recovery()
9498 mddev->pers->check_reshape(mddev) != 0) in md_check_recovery()
9501 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in md_check_recovery()
9502 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in md_check_recovery()
9503 } else if ((spares = remove_and_add_spares(mddev, NULL))) { in md_check_recovery()
9504 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in md_check_recovery()
9505 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in md_check_recovery()
9506 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); in md_check_recovery()
9507 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in md_check_recovery()
9508 } else if (mddev->recovery_cp < MaxSector) { in md_check_recovery()
9509 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); in md_check_recovery()
9510 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in md_check_recovery()
9511 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) in md_check_recovery()
9515 if (mddev->pers->sync_request) { in md_check_recovery()
9521 md_bitmap_write_all(mddev->bitmap); in md_check_recovery()
9523 INIT_WORK(&mddev->del_work, md_start_sync); in md_check_recovery()
9524 queue_work(md_misc_wq, &mddev->del_work); in md_check_recovery()
9528 if (!mddev->sync_thread) { in md_check_recovery()
9529 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in md_check_recovery()
9532 &mddev->recovery)) in md_check_recovery()
9533 if (mddev->sysfs_action) in md_check_recovery()
9534 sysfs_notify_dirent_safe(mddev->sysfs_action); in md_check_recovery()
9537 wake_up(&mddev->sb_wait); in md_check_recovery()
9538 mddev_unlock(mddev); in md_check_recovery()
9543 void md_reap_sync_thread(struct mddev *mddev) in md_reap_sync_thread() argument
9546 sector_t old_dev_sectors = mddev->dev_sectors; in md_reap_sync_thread()
9550 md_unregister_thread(mddev, &mddev->sync_thread); in md_reap_sync_thread()
9551 atomic_inc(&mddev->sync_seq); in md_reap_sync_thread()
9553 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && in md_reap_sync_thread()
9554 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && in md_reap_sync_thread()
9555 mddev->degraded != mddev->raid_disks) { in md_reap_sync_thread()
9558 if (mddev->pers->spare_active(mddev)) { in md_reap_sync_thread()
9559 sysfs_notify_dirent_safe(mddev->sysfs_degraded); in md_reap_sync_thread()
9560 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in md_reap_sync_thread()
9563 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in md_reap_sync_thread()
9564 mddev->pers->finish_reshape) { in md_reap_sync_thread()
9565 mddev->pers->finish_reshape(mddev); in md_reap_sync_thread()
9566 if (mddev_is_clustered(mddev)) in md_reap_sync_thread()
9573 if (!mddev->degraded) in md_reap_sync_thread()
9574 rdev_for_each(rdev, mddev) in md_reap_sync_thread()
9577 md_update_sb(mddev, 1); in md_reap_sync_thread()
9581 if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags)) in md_reap_sync_thread()
9582 md_cluster_ops->resync_finish(mddev); in md_reap_sync_thread()
9583 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in md_reap_sync_thread()
9584 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); in md_reap_sync_thread()
9585 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in md_reap_sync_thread()
9586 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in md_reap_sync_thread()
9587 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); in md_reap_sync_thread()
9588 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in md_reap_sync_thread()
9594 if (mddev_is_clustered(mddev) && is_reshaped in md_reap_sync_thread()
9595 && !test_bit(MD_CLOSING, &mddev->flags)) in md_reap_sync_thread()
9596 md_cluster_ops->update_size(mddev, old_dev_sectors); in md_reap_sync_thread()
9598 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_reap_sync_thread()
9599 sysfs_notify_dirent_safe(mddev->sysfs_completed); in md_reap_sync_thread()
9600 sysfs_notify_dirent_safe(mddev->sysfs_action); in md_reap_sync_thread()
9602 if (mddev->event_work.func) in md_reap_sync_thread()
9603 queue_work(md_misc_wq, &mddev->event_work); in md_reap_sync_thread()
9608 void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev) in md_wait_for_blocked_rdev() argument
9615 rdev_dec_pending(rdev, mddev); in md_wait_for_blocked_rdev()
9619 void md_finish_reshape(struct mddev *mddev) in md_finish_reshape() argument
9624 rdev_for_each(rdev, mddev) { in md_finish_reshape()
9640 struct mddev *mddev = rdev->mddev; in rdev_set_badblocks() local
9652 set_mask_bits(&mddev->sb_flags, 0, in rdev_set_badblocks()
9654 md_wakeup_thread(rdev->mddev->thread); in rdev_set_badblocks()
9679 struct mddev *mddev, *n; in md_notify_reboot() local
9683 list_for_each_entry_safe(mddev, n, &all_mddevs, all_mddevs) { in md_notify_reboot()
9684 if (!mddev_get(mddev)) in md_notify_reboot()
9687 if (mddev_trylock(mddev)) { in md_notify_reboot()
9688 if (mddev->pers) in md_notify_reboot()
9689 __md_stop_writes(mddev); in md_notify_reboot()
9690 if (mddev->persistent) in md_notify_reboot()
9691 mddev->safemode = 2; in md_notify_reboot()
9692 mddev_unlock(mddev); in md_notify_reboot()
9695 mddev_put(mddev); in md_notify_reboot()
9769 static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev) in check_sb_changes() argument
9779 if (mddev->dev_sectors != le64_to_cpu(sb->size)) { in check_sb_changes()
9780 ret = mddev->pers->resize(mddev, le64_to_cpu(sb->size)); in check_sb_changes()
9784 md_bitmap_update_sb(mddev->bitmap); in check_sb_changes()
9788 rdev_for_each_safe(rdev2, tmp, mddev) { in check_sb_changes()
9814 ret = remove_and_add_spares(mddev, rdev2); in check_sb_changes()
9819 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in check_sb_changes()
9820 md_wakeup_thread(mddev->thread); in check_sb_changes()
9829 md_error(mddev, rdev2); in check_sb_changes()
9835 if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) { in check_sb_changes()
9836 ret = update_raid_disks(mddev, le32_to_cpu(sb->raid_disks)); in check_sb_changes()
9845 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) && in check_sb_changes()
9851 mddev->reshape_position = le64_to_cpu(sb->reshape_position); in check_sb_changes()
9852 if (mddev->pers->update_reshape_pos) in check_sb_changes()
9853 mddev->pers->update_reshape_pos(mddev); in check_sb_changes()
9854 if (mddev->pers->start_reshape) in check_sb_changes()
9855 mddev->pers->start_reshape(mddev); in check_sb_changes()
9856 } else if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) && in check_sb_changes()
9857 mddev->reshape_position != MaxSector && in check_sb_changes()
9860 mddev->reshape_position = MaxSector; in check_sb_changes()
9861 if (mddev->pers->update_reshape_pos) in check_sb_changes()
9862 mddev->pers->update_reshape_pos(mddev); in check_sb_changes()
9866 mddev->events = le64_to_cpu(sb->events); in check_sb_changes()
9869 static int read_rdev(struct mddev *mddev, struct md_rdev *rdev) in read_rdev() argument
9883 err = super_types[mddev->major_version]. in read_rdev()
9884 load_super(rdev, NULL, mddev->minor_version); in read_rdev()
9909 mddev->pers->spare_active(mddev)) in read_rdev()
9910 sysfs_notify_dirent_safe(mddev->sysfs_degraded); in read_rdev()
9916 void md_reload_sb(struct mddev *mddev, int nr) in md_reload_sb() argument
9922 rdev_for_each_rcu(iter, mddev) { in md_reload_sb()
9934 err = read_rdev(mddev, rdev); in md_reload_sb()
9938 check_sb_changes(mddev, rdev); in md_reload_sb()
9941 rdev_for_each_rcu(rdev, mddev) { in md_reload_sb()
9943 read_rdev(mddev, rdev); in md_reload_sb()
10019 struct mddev *mddev, *n; in md_exit() local
10040 list_for_each_entry_safe(mddev, n, &all_mddevs, all_mddevs) { in md_exit()
10041 if (!mddev_get(mddev)) in md_exit()
10044 export_array(mddev); in md_exit()
10045 mddev->ctime = 0; in md_exit()
10046 mddev->hold_active = 0; in md_exit()
10052 mddev_put(mddev); in md_exit()