1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 md.c : Multiple Devices driver for Linux
4 Copyright (C) 1998, 1999, 2000 Ingo Molnar
5
6 completely rewritten, based on the MD driver code from Marc Zyngier
7
8 Changes:
9
10 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
11 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
12 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
13 - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
14 - kmod support by: Cyrus Durgin
15 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
16 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
17
18 - lots of fixes and improvements to the RAID1/RAID5 and generic
19 RAID code (such as request based resynchronization):
20
21 Neil Brown <neilb@cse.unsw.edu.au>.
22
23 - persistent bitmap code
24 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
25
26
27 Errors, Warnings, etc.
28 Please use:
29 pr_crit() for error conditions that risk data loss
30 pr_err() for error conditions that are unexpected, like an IO error
31 or internal inconsistency
32 pr_warn() for error conditions that could have been predicated, like
33 adding a device to an array when it has incompatible metadata
34 pr_info() for every interesting, very rare events, like an array starting
35 or stopping, or resync starting or stopping
36 pr_debug() for everything else.
37
38 */
39
40 #include <linux/sched/mm.h>
41 #include <linux/sched/signal.h>
42 #include <linux/kthread.h>
43 #include <linux/blkdev.h>
44 #include <linux/blk-integrity.h>
45 #include <linux/badblocks.h>
46 #include <linux/sysctl.h>
47 #include <linux/seq_file.h>
48 #include <linux/fs.h>
49 #include <linux/poll.h>
50 #include <linux/ctype.h>
51 #include <linux/string.h>
52 #include <linux/hdreg.h>
53 #include <linux/proc_fs.h>
54 #include <linux/random.h>
55 #include <linux/major.h>
56 #include <linux/module.h>
57 #include <linux/reboot.h>
58 #include <linux/file.h>
59 #include <linux/compat.h>
60 #include <linux/delay.h>
61 #include <linux/raid/md_p.h>
62 #include <linux/raid/md_u.h>
63 #include <linux/raid/detect.h>
64 #include <linux/slab.h>
65 #include <linux/percpu-refcount.h>
66 #include <linux/part_stat.h>
67
68 #include <trace/events/block.h>
69 #include "md.h"
70 #include "md-bitmap.h"
71 #include "md-cluster.h"
72
73 /* pers_list is a list of registered personalities protected by pers_lock. */
74 static LIST_HEAD(pers_list);
75 static DEFINE_SPINLOCK(pers_lock);
76
77 static const struct kobj_type md_ktype;
78
79 struct md_cluster_operations *md_cluster_ops;
80 EXPORT_SYMBOL(md_cluster_ops);
81 static struct module *md_cluster_mod;
82
83 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
84 static struct workqueue_struct *md_wq;
85 static struct workqueue_struct *md_misc_wq;
86 struct workqueue_struct *md_bitmap_wq;
87
88 static int remove_and_add_spares(struct mddev *mddev,
89 struct md_rdev *this);
90 static void mddev_detach(struct mddev *mddev);
91 static void export_rdev(struct md_rdev *rdev, struct mddev *mddev);
92 static void md_wakeup_thread_directly(struct md_thread __rcu *thread);
93
94 /*
95 * Default number of read corrections we'll attempt on an rdev
96 * before ejecting it from the array. We divide the read error
97 * count by 2 for every hour elapsed between read errors.
98 */
99 #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
100 /* Default safemode delay: 200 msec */
101 #define DEFAULT_SAFEMODE_DELAY ((200 * HZ)/1000 +1)
102 /*
103 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
104 * is 1000 KB/sec, so the extra system load does not show up that much.
105 * Increase it if you want to have more _guaranteed_ speed. Note that
106 * the RAID driver will use the maximum available bandwidth if the IO
107 * subsystem is idle. There is also an 'absolute maximum' reconstruction
108 * speed limit - in case reconstruction slows down your system despite
109 * idle IO detection.
110 *
111 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
112 * or /sys/block/mdX/md/sync_speed_{min,max}
113 */
114
115 static int sysctl_speed_limit_min = 1000;
116 static int sysctl_speed_limit_max = 200000;
speed_min(struct mddev * mddev)117 static inline int speed_min(struct mddev *mddev)
118 {
119 return mddev->sync_speed_min ?
120 mddev->sync_speed_min : sysctl_speed_limit_min;
121 }
122
speed_max(struct mddev * mddev)123 static inline int speed_max(struct mddev *mddev)
124 {
125 return mddev->sync_speed_max ?
126 mddev->sync_speed_max : sysctl_speed_limit_max;
127 }
128
rdev_uninit_serial(struct md_rdev * rdev)129 static void rdev_uninit_serial(struct md_rdev *rdev)
130 {
131 if (!test_and_clear_bit(CollisionCheck, &rdev->flags))
132 return;
133
134 kvfree(rdev->serial);
135 rdev->serial = NULL;
136 }
137
rdevs_uninit_serial(struct mddev * mddev)138 static void rdevs_uninit_serial(struct mddev *mddev)
139 {
140 struct md_rdev *rdev;
141
142 rdev_for_each(rdev, mddev)
143 rdev_uninit_serial(rdev);
144 }
145
rdev_init_serial(struct md_rdev * rdev)146 static int rdev_init_serial(struct md_rdev *rdev)
147 {
148 /* serial_nums equals with BARRIER_BUCKETS_NR */
149 int i, serial_nums = 1 << ((PAGE_SHIFT - ilog2(sizeof(atomic_t))));
150 struct serial_in_rdev *serial = NULL;
151
152 if (test_bit(CollisionCheck, &rdev->flags))
153 return 0;
154
155 serial = kvmalloc(sizeof(struct serial_in_rdev) * serial_nums,
156 GFP_KERNEL);
157 if (!serial)
158 return -ENOMEM;
159
160 for (i = 0; i < serial_nums; i++) {
161 struct serial_in_rdev *serial_tmp = &serial[i];
162
163 spin_lock_init(&serial_tmp->serial_lock);
164 serial_tmp->serial_rb = RB_ROOT_CACHED;
165 init_waitqueue_head(&serial_tmp->serial_io_wait);
166 }
167
168 rdev->serial = serial;
169 set_bit(CollisionCheck, &rdev->flags);
170
171 return 0;
172 }
173
rdevs_init_serial(struct mddev * mddev)174 static int rdevs_init_serial(struct mddev *mddev)
175 {
176 struct md_rdev *rdev;
177 int ret = 0;
178
179 rdev_for_each(rdev, mddev) {
180 ret = rdev_init_serial(rdev);
181 if (ret)
182 break;
183 }
184
185 /* Free all resources if pool is not existed */
186 if (ret && !mddev->serial_info_pool)
187 rdevs_uninit_serial(mddev);
188
189 return ret;
190 }
191
192 /*
193 * rdev needs to enable serial stuffs if it meets the conditions:
194 * 1. it is multi-queue device flaged with writemostly.
195 * 2. the write-behind mode is enabled.
196 */
rdev_need_serial(struct md_rdev * rdev)197 static int rdev_need_serial(struct md_rdev *rdev)
198 {
199 return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 &&
200 rdev->bdev->bd_disk->queue->nr_hw_queues != 1 &&
201 test_bit(WriteMostly, &rdev->flags));
202 }
203
204 /*
205 * Init resource for rdev(s), then create serial_info_pool if:
206 * 1. rdev is the first device which return true from rdev_enable_serial.
207 * 2. rdev is NULL, means we want to enable serialization for all rdevs.
208 */
mddev_create_serial_pool(struct mddev * mddev,struct md_rdev * rdev,bool is_suspend)209 void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
210 bool is_suspend)
211 {
212 int ret = 0;
213
214 if (rdev && !rdev_need_serial(rdev) &&
215 !test_bit(CollisionCheck, &rdev->flags))
216 return;
217
218 if (!is_suspend)
219 mddev_suspend(mddev);
220
221 if (!rdev)
222 ret = rdevs_init_serial(mddev);
223 else
224 ret = rdev_init_serial(rdev);
225 if (ret)
226 goto abort;
227
228 if (mddev->serial_info_pool == NULL) {
229 /*
230 * already in memalloc noio context by
231 * mddev_suspend()
232 */
233 mddev->serial_info_pool =
234 mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
235 sizeof(struct serial_info));
236 if (!mddev->serial_info_pool) {
237 rdevs_uninit_serial(mddev);
238 pr_err("can't alloc memory pool for serialization\n");
239 }
240 }
241
242 abort:
243 if (!is_suspend)
244 mddev_resume(mddev);
245 }
246
247 /*
248 * Free resource from rdev(s), and destroy serial_info_pool under conditions:
249 * 1. rdev is the last device flaged with CollisionCheck.
250 * 2. when bitmap is destroyed while policy is not enabled.
251 * 3. for disable policy, the pool is destroyed only when no rdev needs it.
252 */
mddev_destroy_serial_pool(struct mddev * mddev,struct md_rdev * rdev,bool is_suspend)253 void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
254 bool is_suspend)
255 {
256 if (rdev && !test_bit(CollisionCheck, &rdev->flags))
257 return;
258
259 if (mddev->serial_info_pool) {
260 struct md_rdev *temp;
261 int num = 0; /* used to track if other rdevs need the pool */
262
263 if (!is_suspend)
264 mddev_suspend(mddev);
265 rdev_for_each(temp, mddev) {
266 if (!rdev) {
267 if (!mddev->serialize_policy ||
268 !rdev_need_serial(temp))
269 rdev_uninit_serial(temp);
270 else
271 num++;
272 } else if (temp != rdev &&
273 test_bit(CollisionCheck, &temp->flags))
274 num++;
275 }
276
277 if (rdev)
278 rdev_uninit_serial(rdev);
279
280 if (num)
281 pr_info("The mempool could be used by other devices\n");
282 else {
283 mempool_destroy(mddev->serial_info_pool);
284 mddev->serial_info_pool = NULL;
285 }
286 if (!is_suspend)
287 mddev_resume(mddev);
288 }
289 }
290
291 static struct ctl_table_header *raid_table_header;
292
293 static struct ctl_table raid_table[] = {
294 {
295 .procname = "speed_limit_min",
296 .data = &sysctl_speed_limit_min,
297 .maxlen = sizeof(int),
298 .mode = S_IRUGO|S_IWUSR,
299 .proc_handler = proc_dointvec,
300 },
301 {
302 .procname = "speed_limit_max",
303 .data = &sysctl_speed_limit_max,
304 .maxlen = sizeof(int),
305 .mode = S_IRUGO|S_IWUSR,
306 .proc_handler = proc_dointvec,
307 },
308 { }
309 };
310
311 static int start_readonly;
312
313 /*
314 * The original mechanism for creating an md device is to create
315 * a device node in /dev and to open it. This causes races with device-close.
316 * The preferred method is to write to the "new_array" module parameter.
317 * This can avoid races.
318 * Setting create_on_open to false disables the original mechanism
319 * so all the races disappear.
320 */
321 static bool create_on_open = true;
322
323 /*
324 * We have a system wide 'event count' that is incremented
325 * on any 'interesting' event, and readers of /proc/mdstat
326 * can use 'poll' or 'select' to find out when the event
327 * count increases.
328 *
329 * Events are:
330 * start array, stop array, error, add device, remove device,
331 * start build, activate spare
332 */
333 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
334 static atomic_t md_event_count;
md_new_event(void)335 void md_new_event(void)
336 {
337 atomic_inc(&md_event_count);
338 wake_up(&md_event_waiters);
339 }
340 EXPORT_SYMBOL_GPL(md_new_event);
341
342 /*
343 * Enables to iterate over all existing md arrays
344 * all_mddevs_lock protects this list.
345 */
346 static LIST_HEAD(all_mddevs);
347 static DEFINE_SPINLOCK(all_mddevs_lock);
348
349 /* Rather than calling directly into the personality make_request function,
350 * IO requests come here first so that we can check if the device is
351 * being suspended pending a reconfiguration.
352 * We hold a refcount over the call to ->make_request. By the time that
353 * call has finished, the bio has been linked into some internal structure
354 * and so is visible to ->quiesce(), so we don't need the refcount any more.
355 */
is_suspended(struct mddev * mddev,struct bio * bio)356 static bool is_suspended(struct mddev *mddev, struct bio *bio)
357 {
358 if (is_md_suspended(mddev))
359 return true;
360 if (bio_data_dir(bio) != WRITE)
361 return false;
362 if (mddev->suspend_lo >= mddev->suspend_hi)
363 return false;
364 if (bio->bi_iter.bi_sector >= mddev->suspend_hi)
365 return false;
366 if (bio_end_sector(bio) < mddev->suspend_lo)
367 return false;
368 return true;
369 }
370
md_handle_request(struct mddev * mddev,struct bio * bio)371 void md_handle_request(struct mddev *mddev, struct bio *bio)
372 {
373 check_suspended:
374 if (is_suspended(mddev, bio)) {
375 DEFINE_WAIT(__wait);
376 /* Bail out if REQ_NOWAIT is set for the bio */
377 if (bio->bi_opf & REQ_NOWAIT) {
378 bio_wouldblock_error(bio);
379 return;
380 }
381 for (;;) {
382 prepare_to_wait(&mddev->sb_wait, &__wait,
383 TASK_UNINTERRUPTIBLE);
384 if (!is_suspended(mddev, bio))
385 break;
386 schedule();
387 }
388 finish_wait(&mddev->sb_wait, &__wait);
389 }
390 if (!percpu_ref_tryget_live(&mddev->active_io))
391 goto check_suspended;
392
393 if (!mddev->pers->make_request(mddev, bio)) {
394 percpu_ref_put(&mddev->active_io);
395 goto check_suspended;
396 }
397
398 percpu_ref_put(&mddev->active_io);
399 }
400 EXPORT_SYMBOL(md_handle_request);
401
md_submit_bio(struct bio * bio)402 static void md_submit_bio(struct bio *bio)
403 {
404 const int rw = bio_data_dir(bio);
405 struct mddev *mddev = bio->bi_bdev->bd_disk->private_data;
406
407 if (mddev == NULL || mddev->pers == NULL) {
408 bio_io_error(bio);
409 return;
410 }
411
412 if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) {
413 bio_io_error(bio);
414 return;
415 }
416
417 bio = bio_split_to_limits(bio);
418 if (!bio)
419 return;
420
421 if (mddev->ro == MD_RDONLY && unlikely(rw == WRITE)) {
422 if (bio_sectors(bio) != 0)
423 bio->bi_status = BLK_STS_IOERR;
424 bio_endio(bio);
425 return;
426 }
427
428 /* bio could be mergeable after passing to underlayer */
429 bio->bi_opf &= ~REQ_NOMERGE;
430
431 md_handle_request(mddev, bio);
432 }
433
434 /* mddev_suspend makes sure no new requests are submitted
435 * to the device, and that any requests that have been submitted
436 * are completely handled.
437 * Once mddev_detach() is called and completes, the module will be
438 * completely unused.
439 */
mddev_suspend(struct mddev * mddev)440 void mddev_suspend(struct mddev *mddev)
441 {
442 struct md_thread *thread = rcu_dereference_protected(mddev->thread,
443 lockdep_is_held(&mddev->reconfig_mutex));
444
445 WARN_ON_ONCE(thread && current == thread->tsk);
446 if (mddev->suspended++)
447 return;
448 wake_up(&mddev->sb_wait);
449 set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags);
450 percpu_ref_kill(&mddev->active_io);
451
452 if (mddev->pers && mddev->pers->prepare_suspend)
453 mddev->pers->prepare_suspend(mddev);
454
455 wait_event(mddev->sb_wait, percpu_ref_is_zero(&mddev->active_io));
456 clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags);
457 wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags));
458
459 del_timer_sync(&mddev->safemode_timer);
460 /* restrict memory reclaim I/O during raid array is suspend */
461 mddev->noio_flag = memalloc_noio_save();
462 }
463 EXPORT_SYMBOL_GPL(mddev_suspend);
464
mddev_resume(struct mddev * mddev)465 void mddev_resume(struct mddev *mddev)
466 {
467 lockdep_assert_held(&mddev->reconfig_mutex);
468 if (--mddev->suspended)
469 return;
470
471 /* entred the memalloc scope from mddev_suspend() */
472 memalloc_noio_restore(mddev->noio_flag);
473
474 percpu_ref_resurrect(&mddev->active_io);
475 wake_up(&mddev->sb_wait);
476
477 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
478 md_wakeup_thread(mddev->thread);
479 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
480 }
481 EXPORT_SYMBOL_GPL(mddev_resume);
482
483 /*
484 * Generic flush handling for md
485 */
486
md_end_flush(struct bio * bio)487 static void md_end_flush(struct bio *bio)
488 {
489 struct md_rdev *rdev = bio->bi_private;
490 struct mddev *mddev = rdev->mddev;
491
492 bio_put(bio);
493
494 rdev_dec_pending(rdev, mddev);
495
496 if (atomic_dec_and_test(&mddev->flush_pending)) {
497 /* The pair is percpu_ref_get() from md_flush_request() */
498 percpu_ref_put(&mddev->active_io);
499
500 /* The pre-request flush has finished */
501 queue_work(md_wq, &mddev->flush_work);
502 }
503 }
504
505 static void md_submit_flush_data(struct work_struct *ws);
506
submit_flushes(struct work_struct * ws)507 static void submit_flushes(struct work_struct *ws)
508 {
509 struct mddev *mddev = container_of(ws, struct mddev, flush_work);
510 struct md_rdev *rdev;
511
512 mddev->start_flush = ktime_get_boottime();
513 INIT_WORK(&mddev->flush_work, md_submit_flush_data);
514 atomic_set(&mddev->flush_pending, 1);
515 rcu_read_lock();
516 rdev_for_each_rcu(rdev, mddev)
517 if (rdev->raid_disk >= 0 &&
518 !test_bit(Faulty, &rdev->flags)) {
519 struct bio *bi;
520
521 atomic_inc(&rdev->nr_pending);
522 rcu_read_unlock();
523 bi = bio_alloc_bioset(rdev->bdev, 0,
524 REQ_OP_WRITE | REQ_PREFLUSH,
525 GFP_NOIO, &mddev->bio_set);
526 bi->bi_end_io = md_end_flush;
527 bi->bi_private = rdev;
528 atomic_inc(&mddev->flush_pending);
529 submit_bio(bi);
530 rcu_read_lock();
531 }
532 rcu_read_unlock();
533 if (atomic_dec_and_test(&mddev->flush_pending)) {
534 /* The pair is percpu_ref_get() from md_flush_request() */
535 percpu_ref_put(&mddev->active_io);
536
537 queue_work(md_wq, &mddev->flush_work);
538 }
539 }
540
md_submit_flush_data(struct work_struct * ws)541 static void md_submit_flush_data(struct work_struct *ws)
542 {
543 struct mddev *mddev = container_of(ws, struct mddev, flush_work);
544 struct bio *bio = mddev->flush_bio;
545
546 /*
547 * must reset flush_bio before calling into md_handle_request to avoid a
548 * deadlock, because other bios passed md_handle_request suspend check
549 * could wait for this and below md_handle_request could wait for those
550 * bios because of suspend check
551 */
552 spin_lock_irq(&mddev->lock);
553 mddev->prev_flush_start = mddev->start_flush;
554 mddev->flush_bio = NULL;
555 spin_unlock_irq(&mddev->lock);
556 wake_up(&mddev->sb_wait);
557
558 if (bio->bi_iter.bi_size == 0) {
559 /* an empty barrier - all done */
560 bio_endio(bio);
561 } else {
562 bio->bi_opf &= ~REQ_PREFLUSH;
563 md_handle_request(mddev, bio);
564 }
565 }
566
567 /*
568 * Manages consolidation of flushes and submitting any flushes needed for
569 * a bio with REQ_PREFLUSH. Returns true if the bio is finished or is
570 * being finished in another context. Returns false if the flushing is
571 * complete but still needs the I/O portion of the bio to be processed.
572 */
md_flush_request(struct mddev * mddev,struct bio * bio)573 bool md_flush_request(struct mddev *mddev, struct bio *bio)
574 {
575 ktime_t req_start = ktime_get_boottime();
576 spin_lock_irq(&mddev->lock);
577 /* flush requests wait until ongoing flush completes,
578 * hence coalescing all the pending requests.
579 */
580 wait_event_lock_irq(mddev->sb_wait,
581 !mddev->flush_bio ||
582 ktime_before(req_start, mddev->prev_flush_start),
583 mddev->lock);
584 /* new request after previous flush is completed */
585 if (ktime_after(req_start, mddev->prev_flush_start)) {
586 WARN_ON(mddev->flush_bio);
587 /*
588 * Grab a reference to make sure mddev_suspend() will wait for
589 * this flush to be done.
590 *
591 * md_flush_reqeust() is called under md_handle_request() and
592 * 'active_io' is already grabbed, hence percpu_ref_is_zero()
593 * won't pass, percpu_ref_tryget_live() can't be used because
594 * percpu_ref_kill() can be called by mddev_suspend()
595 * concurrently.
596 */
597 WARN_ON(percpu_ref_is_zero(&mddev->active_io));
598 percpu_ref_get(&mddev->active_io);
599 mddev->flush_bio = bio;
600 bio = NULL;
601 }
602 spin_unlock_irq(&mddev->lock);
603
604 if (!bio) {
605 INIT_WORK(&mddev->flush_work, submit_flushes);
606 queue_work(md_wq, &mddev->flush_work);
607 } else {
608 /* flush was performed for some other bio while we waited. */
609 if (bio->bi_iter.bi_size == 0)
610 /* an empty barrier - all done */
611 bio_endio(bio);
612 else {
613 bio->bi_opf &= ~REQ_PREFLUSH;
614 return false;
615 }
616 }
617 return true;
618 }
619 EXPORT_SYMBOL(md_flush_request);
620
mddev_get(struct mddev * mddev)621 static inline struct mddev *mddev_get(struct mddev *mddev)
622 {
623 lockdep_assert_held(&all_mddevs_lock);
624
625 if (test_bit(MD_DELETED, &mddev->flags))
626 return NULL;
627 atomic_inc(&mddev->active);
628 return mddev;
629 }
630
631 static void mddev_delayed_delete(struct work_struct *ws);
632
mddev_put(struct mddev * mddev)633 void mddev_put(struct mddev *mddev)
634 {
635 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
636 return;
637 if (!mddev->raid_disks && list_empty(&mddev->disks) &&
638 mddev->ctime == 0 && !mddev->hold_active) {
639 /* Array is not configured at all, and not held active,
640 * so destroy it */
641 set_bit(MD_DELETED, &mddev->flags);
642
643 /*
644 * Call queue_work inside the spinlock so that
645 * flush_workqueue() after mddev_find will succeed in waiting
646 * for the work to be done.
647 */
648 INIT_WORK(&mddev->del_work, mddev_delayed_delete);
649 queue_work(md_misc_wq, &mddev->del_work);
650 }
651 spin_unlock(&all_mddevs_lock);
652 }
653
654 static void md_safemode_timeout(struct timer_list *t);
655
mddev_init(struct mddev * mddev)656 void mddev_init(struct mddev *mddev)
657 {
658 mutex_init(&mddev->open_mutex);
659 mutex_init(&mddev->reconfig_mutex);
660 mutex_init(&mddev->sync_mutex);
661 mutex_init(&mddev->bitmap_info.mutex);
662 INIT_LIST_HEAD(&mddev->disks);
663 INIT_LIST_HEAD(&mddev->all_mddevs);
664 INIT_LIST_HEAD(&mddev->deleting);
665 timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0);
666 atomic_set(&mddev->active, 1);
667 atomic_set(&mddev->openers, 0);
668 atomic_set(&mddev->sync_seq, 0);
669 spin_lock_init(&mddev->lock);
670 atomic_set(&mddev->flush_pending, 0);
671 init_waitqueue_head(&mddev->sb_wait);
672 init_waitqueue_head(&mddev->recovery_wait);
673 mddev->reshape_position = MaxSector;
674 mddev->reshape_backwards = 0;
675 mddev->last_sync_action = "none";
676 mddev->resync_min = 0;
677 mddev->resync_max = MaxSector;
678 mddev->level = LEVEL_NONE;
679 }
680 EXPORT_SYMBOL_GPL(mddev_init);
681
mddev_find_locked(dev_t unit)682 static struct mddev *mddev_find_locked(dev_t unit)
683 {
684 struct mddev *mddev;
685
686 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
687 if (mddev->unit == unit)
688 return mddev;
689
690 return NULL;
691 }
692
693 /* find an unused unit number */
mddev_alloc_unit(void)694 static dev_t mddev_alloc_unit(void)
695 {
696 static int next_minor = 512;
697 int start = next_minor;
698 bool is_free = 0;
699 dev_t dev = 0;
700
701 while (!is_free) {
702 dev = MKDEV(MD_MAJOR, next_minor);
703 next_minor++;
704 if (next_minor > MINORMASK)
705 next_minor = 0;
706 if (next_minor == start)
707 return 0; /* Oh dear, all in use. */
708 is_free = !mddev_find_locked(dev);
709 }
710
711 return dev;
712 }
713
mddev_alloc(dev_t unit)714 static struct mddev *mddev_alloc(dev_t unit)
715 {
716 struct mddev *new;
717 int error;
718
719 if (unit && MAJOR(unit) != MD_MAJOR)
720 unit &= ~((1 << MdpMinorShift) - 1);
721
722 new = kzalloc(sizeof(*new), GFP_KERNEL);
723 if (!new)
724 return ERR_PTR(-ENOMEM);
725 mddev_init(new);
726
727 spin_lock(&all_mddevs_lock);
728 if (unit) {
729 error = -EEXIST;
730 if (mddev_find_locked(unit))
731 goto out_free_new;
732 new->unit = unit;
733 if (MAJOR(unit) == MD_MAJOR)
734 new->md_minor = MINOR(unit);
735 else
736 new->md_minor = MINOR(unit) >> MdpMinorShift;
737 new->hold_active = UNTIL_IOCTL;
738 } else {
739 error = -ENODEV;
740 new->unit = mddev_alloc_unit();
741 if (!new->unit)
742 goto out_free_new;
743 new->md_minor = MINOR(new->unit);
744 new->hold_active = UNTIL_STOP;
745 }
746
747 list_add(&new->all_mddevs, &all_mddevs);
748 spin_unlock(&all_mddevs_lock);
749 return new;
750 out_free_new:
751 spin_unlock(&all_mddevs_lock);
752 kfree(new);
753 return ERR_PTR(error);
754 }
755
mddev_free(struct mddev * mddev)756 static void mddev_free(struct mddev *mddev)
757 {
758 spin_lock(&all_mddevs_lock);
759 list_del(&mddev->all_mddevs);
760 spin_unlock(&all_mddevs_lock);
761
762 kfree(mddev);
763 }
764
765 static const struct attribute_group md_redundancy_group;
766
mddev_unlock(struct mddev * mddev)767 void mddev_unlock(struct mddev *mddev)
768 {
769 struct md_rdev *rdev;
770 struct md_rdev *tmp;
771 LIST_HEAD(delete);
772
773 if (!list_empty(&mddev->deleting))
774 list_splice_init(&mddev->deleting, &delete);
775
776 if (mddev->to_remove) {
777 /* These cannot be removed under reconfig_mutex as
778 * an access to the files will try to take reconfig_mutex
779 * while holding the file unremovable, which leads to
780 * a deadlock.
781 * So hold set sysfs_active while the remove in happeing,
782 * and anything else which might set ->to_remove or my
783 * otherwise change the sysfs namespace will fail with
784 * -EBUSY if sysfs_active is still set.
785 * We set sysfs_active under reconfig_mutex and elsewhere
786 * test it under the same mutex to ensure its correct value
787 * is seen.
788 */
789 const struct attribute_group *to_remove = mddev->to_remove;
790 mddev->to_remove = NULL;
791 mddev->sysfs_active = 1;
792 mutex_unlock(&mddev->reconfig_mutex);
793
794 if (mddev->kobj.sd) {
795 if (to_remove != &md_redundancy_group)
796 sysfs_remove_group(&mddev->kobj, to_remove);
797 if (mddev->pers == NULL ||
798 mddev->pers->sync_request == NULL) {
799 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
800 if (mddev->sysfs_action)
801 sysfs_put(mddev->sysfs_action);
802 if (mddev->sysfs_completed)
803 sysfs_put(mddev->sysfs_completed);
804 if (mddev->sysfs_degraded)
805 sysfs_put(mddev->sysfs_degraded);
806 mddev->sysfs_action = NULL;
807 mddev->sysfs_completed = NULL;
808 mddev->sysfs_degraded = NULL;
809 }
810 }
811 mddev->sysfs_active = 0;
812 } else
813 mutex_unlock(&mddev->reconfig_mutex);
814
815 md_wakeup_thread(mddev->thread);
816 wake_up(&mddev->sb_wait);
817
818 list_for_each_entry_safe(rdev, tmp, &delete, same_set) {
819 list_del_init(&rdev->same_set);
820 kobject_del(&rdev->kobj);
821 export_rdev(rdev, mddev);
822 }
823 }
824 EXPORT_SYMBOL_GPL(mddev_unlock);
825
md_find_rdev_nr_rcu(struct mddev * mddev,int nr)826 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr)
827 {
828 struct md_rdev *rdev;
829
830 rdev_for_each_rcu(rdev, mddev)
831 if (rdev->desc_nr == nr)
832 return rdev;
833
834 return NULL;
835 }
836 EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu);
837
find_rdev(struct mddev * mddev,dev_t dev)838 static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
839 {
840 struct md_rdev *rdev;
841
842 rdev_for_each(rdev, mddev)
843 if (rdev->bdev->bd_dev == dev)
844 return rdev;
845
846 return NULL;
847 }
848
md_find_rdev_rcu(struct mddev * mddev,dev_t dev)849 struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev)
850 {
851 struct md_rdev *rdev;
852
853 rdev_for_each_rcu(rdev, mddev)
854 if (rdev->bdev->bd_dev == dev)
855 return rdev;
856
857 return NULL;
858 }
859 EXPORT_SYMBOL_GPL(md_find_rdev_rcu);
860
find_pers(int level,char * clevel)861 static struct md_personality *find_pers(int level, char *clevel)
862 {
863 struct md_personality *pers;
864 list_for_each_entry(pers, &pers_list, list) {
865 if (level != LEVEL_NONE && pers->level == level)
866 return pers;
867 if (strcmp(pers->name, clevel)==0)
868 return pers;
869 }
870 return NULL;
871 }
872
873 /* return the offset of the super block in 512byte sectors */
calc_dev_sboffset(struct md_rdev * rdev)874 static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
875 {
876 return MD_NEW_SIZE_SECTORS(bdev_nr_sectors(rdev->bdev));
877 }
878
alloc_disk_sb(struct md_rdev * rdev)879 static int alloc_disk_sb(struct md_rdev *rdev)
880 {
881 rdev->sb_page = alloc_page(GFP_KERNEL);
882 if (!rdev->sb_page)
883 return -ENOMEM;
884 return 0;
885 }
886
md_rdev_clear(struct md_rdev * rdev)887 void md_rdev_clear(struct md_rdev *rdev)
888 {
889 if (rdev->sb_page) {
890 put_page(rdev->sb_page);
891 rdev->sb_loaded = 0;
892 rdev->sb_page = NULL;
893 rdev->sb_start = 0;
894 rdev->sectors = 0;
895 }
896 if (rdev->bb_page) {
897 put_page(rdev->bb_page);
898 rdev->bb_page = NULL;
899 }
900 badblocks_exit(&rdev->badblocks);
901 }
902 EXPORT_SYMBOL_GPL(md_rdev_clear);
903
super_written(struct bio * bio)904 static void super_written(struct bio *bio)
905 {
906 struct md_rdev *rdev = bio->bi_private;
907 struct mddev *mddev = rdev->mddev;
908
909 if (bio->bi_status) {
910 pr_err("md: %s gets error=%d\n", __func__,
911 blk_status_to_errno(bio->bi_status));
912 md_error(mddev, rdev);
913 if (!test_bit(Faulty, &rdev->flags)
914 && (bio->bi_opf & MD_FAILFAST)) {
915 set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags);
916 set_bit(LastDev, &rdev->flags);
917 }
918 } else
919 clear_bit(LastDev, &rdev->flags);
920
921 bio_put(bio);
922
923 rdev_dec_pending(rdev, mddev);
924
925 if (atomic_dec_and_test(&mddev->pending_writes))
926 wake_up(&mddev->sb_wait);
927 }
928
md_super_write(struct mddev * mddev,struct md_rdev * rdev,sector_t sector,int size,struct page * page)929 void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
930 sector_t sector, int size, struct page *page)
931 {
932 /* write first size bytes of page to sector of rdev
933 * Increment mddev->pending_writes before returning
934 * and decrement it on completion, waking up sb_wait
935 * if zero is reached.
936 * If an error occurred, call md_error
937 */
938 struct bio *bio;
939
940 if (!page)
941 return;
942
943 if (test_bit(Faulty, &rdev->flags))
944 return;
945
946 bio = bio_alloc_bioset(rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev,
947 1,
948 REQ_OP_WRITE | REQ_SYNC | REQ_IDLE | REQ_META
949 | REQ_PREFLUSH | REQ_FUA,
950 GFP_NOIO, &mddev->sync_set);
951
952 atomic_inc(&rdev->nr_pending);
953
954 bio->bi_iter.bi_sector = sector;
955 __bio_add_page(bio, page, size, 0);
956 bio->bi_private = rdev;
957 bio->bi_end_io = super_written;
958
959 if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) &&
960 test_bit(FailFast, &rdev->flags) &&
961 !test_bit(LastDev, &rdev->flags))
962 bio->bi_opf |= MD_FAILFAST;
963
964 atomic_inc(&mddev->pending_writes);
965 submit_bio(bio);
966 }
967
md_super_wait(struct mddev * mddev)968 int md_super_wait(struct mddev *mddev)
969 {
970 /* wait for all superblock writes that were scheduled to complete */
971 wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
972 if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags))
973 return -EAGAIN;
974 return 0;
975 }
976
sync_page_io(struct md_rdev * rdev,sector_t sector,int size,struct page * page,blk_opf_t opf,bool metadata_op)977 int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
978 struct page *page, blk_opf_t opf, bool metadata_op)
979 {
980 struct bio bio;
981 struct bio_vec bvec;
982
983 if (metadata_op && rdev->meta_bdev)
984 bio_init(&bio, rdev->meta_bdev, &bvec, 1, opf);
985 else
986 bio_init(&bio, rdev->bdev, &bvec, 1, opf);
987
988 if (metadata_op)
989 bio.bi_iter.bi_sector = sector + rdev->sb_start;
990 else if (rdev->mddev->reshape_position != MaxSector &&
991 (rdev->mddev->reshape_backwards ==
992 (sector >= rdev->mddev->reshape_position)))
993 bio.bi_iter.bi_sector = sector + rdev->new_data_offset;
994 else
995 bio.bi_iter.bi_sector = sector + rdev->data_offset;
996 __bio_add_page(&bio, page, size, 0);
997
998 submit_bio_wait(&bio);
999
1000 return !bio.bi_status;
1001 }
1002 EXPORT_SYMBOL_GPL(sync_page_io);
1003
read_disk_sb(struct md_rdev * rdev,int size)1004 static int read_disk_sb(struct md_rdev *rdev, int size)
1005 {
1006 if (rdev->sb_loaded)
1007 return 0;
1008
1009 if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, true))
1010 goto fail;
1011 rdev->sb_loaded = 1;
1012 return 0;
1013
1014 fail:
1015 pr_err("md: disabled device %pg, could not read superblock.\n",
1016 rdev->bdev);
1017 return -EINVAL;
1018 }
1019
md_uuid_equal(mdp_super_t * sb1,mdp_super_t * sb2)1020 static int md_uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
1021 {
1022 return sb1->set_uuid0 == sb2->set_uuid0 &&
1023 sb1->set_uuid1 == sb2->set_uuid1 &&
1024 sb1->set_uuid2 == sb2->set_uuid2 &&
1025 sb1->set_uuid3 == sb2->set_uuid3;
1026 }
1027
md_sb_equal(mdp_super_t * sb1,mdp_super_t * sb2)1028 static int md_sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
1029 {
1030 int ret;
1031 mdp_super_t *tmp1, *tmp2;
1032
1033 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
1034 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
1035
1036 if (!tmp1 || !tmp2) {
1037 ret = 0;
1038 goto abort;
1039 }
1040
1041 *tmp1 = *sb1;
1042 *tmp2 = *sb2;
1043
1044 /*
1045 * nr_disks is not constant
1046 */
1047 tmp1->nr_disks = 0;
1048 tmp2->nr_disks = 0;
1049
1050 ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
1051 abort:
1052 kfree(tmp1);
1053 kfree(tmp2);
1054 return ret;
1055 }
1056
md_csum_fold(u32 csum)1057 static u32 md_csum_fold(u32 csum)
1058 {
1059 csum = (csum & 0xffff) + (csum >> 16);
1060 return (csum & 0xffff) + (csum >> 16);
1061 }
1062
calc_sb_csum(mdp_super_t * sb)1063 static unsigned int calc_sb_csum(mdp_super_t *sb)
1064 {
1065 u64 newcsum = 0;
1066 u32 *sb32 = (u32*)sb;
1067 int i;
1068 unsigned int disk_csum, csum;
1069
1070 disk_csum = sb->sb_csum;
1071 sb->sb_csum = 0;
1072
1073 for (i = 0; i < MD_SB_BYTES/4 ; i++)
1074 newcsum += sb32[i];
1075 csum = (newcsum & 0xffffffff) + (newcsum>>32);
1076
1077 #ifdef CONFIG_ALPHA
1078 /* This used to use csum_partial, which was wrong for several
1079 * reasons including that different results are returned on
1080 * different architectures. It isn't critical that we get exactly
1081 * the same return value as before (we always csum_fold before
1082 * testing, and that removes any differences). However as we
1083 * know that csum_partial always returned a 16bit value on
1084 * alphas, do a fold to maximise conformity to previous behaviour.
1085 */
1086 sb->sb_csum = md_csum_fold(disk_csum);
1087 #else
1088 sb->sb_csum = disk_csum;
1089 #endif
1090 return csum;
1091 }
1092
1093 /*
1094 * Handle superblock details.
1095 * We want to be able to handle multiple superblock formats
1096 * so we have a common interface to them all, and an array of
1097 * different handlers.
1098 * We rely on user-space to write the initial superblock, and support
1099 * reading and updating of superblocks.
1100 * Interface methods are:
1101 * int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version)
1102 * loads and validates a superblock on dev.
1103 * if refdev != NULL, compare superblocks on both devices
1104 * Return:
1105 * 0 - dev has a superblock that is compatible with refdev
1106 * 1 - dev has a superblock that is compatible and newer than refdev
1107 * so dev should be used as the refdev in future
1108 * -EINVAL superblock incompatible or invalid
1109 * -othererror e.g. -EIO
1110 *
1111 * int validate_super(struct mddev *mddev, struct md_rdev *dev)
1112 * Verify that dev is acceptable into mddev.
1113 * The first time, mddev->raid_disks will be 0, and data from
1114 * dev should be merged in. Subsequent calls check that dev
1115 * is new enough. Return 0 or -EINVAL
1116 *
1117 * void sync_super(struct mddev *mddev, struct md_rdev *dev)
1118 * Update the superblock for rdev with data in mddev
1119 * This does not write to disc.
1120 *
1121 */
1122
1123 struct super_type {
1124 char *name;
1125 struct module *owner;
1126 int (*load_super)(struct md_rdev *rdev,
1127 struct md_rdev *refdev,
1128 int minor_version);
1129 int (*validate_super)(struct mddev *mddev,
1130 struct md_rdev *freshest,
1131 struct md_rdev *rdev);
1132 void (*sync_super)(struct mddev *mddev,
1133 struct md_rdev *rdev);
1134 unsigned long long (*rdev_size_change)(struct md_rdev *rdev,
1135 sector_t num_sectors);
1136 int (*allow_new_offset)(struct md_rdev *rdev,
1137 unsigned long long new_offset);
1138 };
1139
1140 /*
1141 * Check that the given mddev has no bitmap.
1142 *
1143 * This function is called from the run method of all personalities that do not
1144 * support bitmaps. It prints an error message and returns non-zero if mddev
1145 * has a bitmap. Otherwise, it returns 0.
1146 *
1147 */
md_check_no_bitmap(struct mddev * mddev)1148 int md_check_no_bitmap(struct mddev *mddev)
1149 {
1150 if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
1151 return 0;
1152 pr_warn("%s: bitmaps are not supported for %s\n",
1153 mdname(mddev), mddev->pers->name);
1154 return 1;
1155 }
1156 EXPORT_SYMBOL(md_check_no_bitmap);
1157
1158 /*
1159 * load_super for 0.90.0
1160 */
super_90_load(struct md_rdev * rdev,struct md_rdev * refdev,int minor_version)1161 static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
1162 {
1163 mdp_super_t *sb;
1164 int ret;
1165 bool spare_disk = true;
1166
1167 /*
1168 * Calculate the position of the superblock (512byte sectors),
1169 * it's at the end of the disk.
1170 *
1171 * It also happens to be a multiple of 4Kb.
1172 */
1173 rdev->sb_start = calc_dev_sboffset(rdev);
1174
1175 ret = read_disk_sb(rdev, MD_SB_BYTES);
1176 if (ret)
1177 return ret;
1178
1179 ret = -EINVAL;
1180
1181 sb = page_address(rdev->sb_page);
1182
1183 if (sb->md_magic != MD_SB_MAGIC) {
1184 pr_warn("md: invalid raid superblock magic on %pg\n",
1185 rdev->bdev);
1186 goto abort;
1187 }
1188
1189 if (sb->major_version != 0 ||
1190 sb->minor_version < 90 ||
1191 sb->minor_version > 91) {
1192 pr_warn("Bad version number %d.%d on %pg\n",
1193 sb->major_version, sb->minor_version, rdev->bdev);
1194 goto abort;
1195 }
1196
1197 if (sb->raid_disks <= 0)
1198 goto abort;
1199
1200 if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
1201 pr_warn("md: invalid superblock checksum on %pg\n", rdev->bdev);
1202 goto abort;
1203 }
1204
1205 rdev->preferred_minor = sb->md_minor;
1206 rdev->data_offset = 0;
1207 rdev->new_data_offset = 0;
1208 rdev->sb_size = MD_SB_BYTES;
1209 rdev->badblocks.shift = -1;
1210
1211 if (sb->level == LEVEL_MULTIPATH)
1212 rdev->desc_nr = -1;
1213 else
1214 rdev->desc_nr = sb->this_disk.number;
1215
1216 /* not spare disk, or LEVEL_MULTIPATH */
1217 if (sb->level == LEVEL_MULTIPATH ||
1218 (rdev->desc_nr >= 0 &&
1219 rdev->desc_nr < MD_SB_DISKS &&
1220 sb->disks[rdev->desc_nr].state &
1221 ((1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))))
1222 spare_disk = false;
1223
1224 if (!refdev) {
1225 if (!spare_disk)
1226 ret = 1;
1227 else
1228 ret = 0;
1229 } else {
1230 __u64 ev1, ev2;
1231 mdp_super_t *refsb = page_address(refdev->sb_page);
1232 if (!md_uuid_equal(refsb, sb)) {
1233 pr_warn("md: %pg has different UUID to %pg\n",
1234 rdev->bdev, refdev->bdev);
1235 goto abort;
1236 }
1237 if (!md_sb_equal(refsb, sb)) {
1238 pr_warn("md: %pg has same UUID but different superblock to %pg\n",
1239 rdev->bdev, refdev->bdev);
1240 goto abort;
1241 }
1242 ev1 = md_event(sb);
1243 ev2 = md_event(refsb);
1244
1245 if (!spare_disk && ev1 > ev2)
1246 ret = 1;
1247 else
1248 ret = 0;
1249 }
1250 rdev->sectors = rdev->sb_start;
1251 /* Limit to 4TB as metadata cannot record more than that.
1252 * (not needed for Linear and RAID0 as metadata doesn't
1253 * record this size)
1254 */
1255 if ((u64)rdev->sectors >= (2ULL << 32) && sb->level >= 1)
1256 rdev->sectors = (sector_t)(2ULL << 32) - 2;
1257
1258 if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
1259 /* "this cannot possibly happen" ... */
1260 ret = -EINVAL;
1261
1262 abort:
1263 return ret;
1264 }
1265
1266 /*
1267 * validate_super for 0.90.0
1268 * note: we are not using "freshest" for 0.9 superblock
1269 */
super_90_validate(struct mddev * mddev,struct md_rdev * freshest,struct md_rdev * rdev)1270 static int super_90_validate(struct mddev *mddev, struct md_rdev *freshest, struct md_rdev *rdev)
1271 {
1272 mdp_disk_t *desc;
1273 mdp_super_t *sb = page_address(rdev->sb_page);
1274 __u64 ev1 = md_event(sb);
1275
1276 rdev->raid_disk = -1;
1277 clear_bit(Faulty, &rdev->flags);
1278 clear_bit(In_sync, &rdev->flags);
1279 clear_bit(Bitmap_sync, &rdev->flags);
1280 clear_bit(WriteMostly, &rdev->flags);
1281
1282 if (mddev->raid_disks == 0) {
1283 mddev->major_version = 0;
1284 mddev->minor_version = sb->minor_version;
1285 mddev->patch_version = sb->patch_version;
1286 mddev->external = 0;
1287 mddev->chunk_sectors = sb->chunk_size >> 9;
1288 mddev->ctime = sb->ctime;
1289 mddev->utime = sb->utime;
1290 mddev->level = sb->level;
1291 mddev->clevel[0] = 0;
1292 mddev->layout = sb->layout;
1293 mddev->raid_disks = sb->raid_disks;
1294 mddev->dev_sectors = ((sector_t)sb->size) * 2;
1295 mddev->events = ev1;
1296 mddev->bitmap_info.offset = 0;
1297 mddev->bitmap_info.space = 0;
1298 /* bitmap can use 60 K after the 4K superblocks */
1299 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
1300 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
1301 mddev->reshape_backwards = 0;
1302
1303 if (mddev->minor_version >= 91) {
1304 mddev->reshape_position = sb->reshape_position;
1305 mddev->delta_disks = sb->delta_disks;
1306 mddev->new_level = sb->new_level;
1307 mddev->new_layout = sb->new_layout;
1308 mddev->new_chunk_sectors = sb->new_chunk >> 9;
1309 if (mddev->delta_disks < 0)
1310 mddev->reshape_backwards = 1;
1311 } else {
1312 mddev->reshape_position = MaxSector;
1313 mddev->delta_disks = 0;
1314 mddev->new_level = mddev->level;
1315 mddev->new_layout = mddev->layout;
1316 mddev->new_chunk_sectors = mddev->chunk_sectors;
1317 }
1318 if (mddev->level == 0)
1319 mddev->layout = -1;
1320
1321 if (sb->state & (1<<MD_SB_CLEAN))
1322 mddev->recovery_cp = MaxSector;
1323 else {
1324 if (sb->events_hi == sb->cp_events_hi &&
1325 sb->events_lo == sb->cp_events_lo) {
1326 mddev->recovery_cp = sb->recovery_cp;
1327 } else
1328 mddev->recovery_cp = 0;
1329 }
1330
1331 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
1332 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
1333 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
1334 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
1335
1336 mddev->max_disks = MD_SB_DISKS;
1337
1338 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
1339 mddev->bitmap_info.file == NULL) {
1340 mddev->bitmap_info.offset =
1341 mddev->bitmap_info.default_offset;
1342 mddev->bitmap_info.space =
1343 mddev->bitmap_info.default_space;
1344 }
1345
1346 } else if (mddev->pers == NULL) {
1347 /* Insist on good event counter while assembling, except
1348 * for spares (which don't need an event count) */
1349 ++ev1;
1350 if (sb->disks[rdev->desc_nr].state & (
1351 (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
1352 if (ev1 < mddev->events)
1353 return -EINVAL;
1354 } else if (mddev->bitmap) {
1355 /* if adding to array with a bitmap, then we can accept an
1356 * older device ... but not too old.
1357 */
1358 if (ev1 < mddev->bitmap->events_cleared)
1359 return 0;
1360 if (ev1 < mddev->events)
1361 set_bit(Bitmap_sync, &rdev->flags);
1362 } else {
1363 if (ev1 < mddev->events)
1364 /* just a hot-add of a new device, leave raid_disk at -1 */
1365 return 0;
1366 }
1367
1368 if (mddev->level != LEVEL_MULTIPATH) {
1369 desc = sb->disks + rdev->desc_nr;
1370
1371 if (desc->state & (1<<MD_DISK_FAULTY))
1372 set_bit(Faulty, &rdev->flags);
1373 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
1374 desc->raid_disk < mddev->raid_disks */) {
1375 set_bit(In_sync, &rdev->flags);
1376 rdev->raid_disk = desc->raid_disk;
1377 rdev->saved_raid_disk = desc->raid_disk;
1378 } else if (desc->state & (1<<MD_DISK_ACTIVE)) {
1379 /* active but not in sync implies recovery up to
1380 * reshape position. We don't know exactly where
1381 * that is, so set to zero for now */
1382 if (mddev->minor_version >= 91) {
1383 rdev->recovery_offset = 0;
1384 rdev->raid_disk = desc->raid_disk;
1385 }
1386 }
1387 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
1388 set_bit(WriteMostly, &rdev->flags);
1389 if (desc->state & (1<<MD_DISK_FAILFAST))
1390 set_bit(FailFast, &rdev->flags);
1391 } else /* MULTIPATH are always insync */
1392 set_bit(In_sync, &rdev->flags);
1393 return 0;
1394 }
1395
1396 /*
1397 * sync_super for 0.90.0
1398 */
super_90_sync(struct mddev * mddev,struct md_rdev * rdev)1399 static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
1400 {
1401 mdp_super_t *sb;
1402 struct md_rdev *rdev2;
1403 int next_spare = mddev->raid_disks;
1404
1405 /* make rdev->sb match mddev data..
1406 *
1407 * 1/ zero out disks
1408 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
1409 * 3/ any empty disks < next_spare become removed
1410 *
1411 * disks[0] gets initialised to REMOVED because
1412 * we cannot be sure from other fields if it has
1413 * been initialised or not.
1414 */
1415 int i;
1416 int active=0, working=0,failed=0,spare=0,nr_disks=0;
1417
1418 rdev->sb_size = MD_SB_BYTES;
1419
1420 sb = page_address(rdev->sb_page);
1421
1422 memset(sb, 0, sizeof(*sb));
1423
1424 sb->md_magic = MD_SB_MAGIC;
1425 sb->major_version = mddev->major_version;
1426 sb->patch_version = mddev->patch_version;
1427 sb->gvalid_words = 0; /* ignored */
1428 memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
1429 memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
1430 memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
1431 memcpy(&sb->set_uuid3, mddev->uuid+12,4);
1432
1433 sb->ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
1434 sb->level = mddev->level;
1435 sb->size = mddev->dev_sectors / 2;
1436 sb->raid_disks = mddev->raid_disks;
1437 sb->md_minor = mddev->md_minor;
1438 sb->not_persistent = 0;
1439 sb->utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
1440 sb->state = 0;
1441 sb->events_hi = (mddev->events>>32);
1442 sb->events_lo = (u32)mddev->events;
1443
1444 if (mddev->reshape_position == MaxSector)
1445 sb->minor_version = 90;
1446 else {
1447 sb->minor_version = 91;
1448 sb->reshape_position = mddev->reshape_position;
1449 sb->new_level = mddev->new_level;
1450 sb->delta_disks = mddev->delta_disks;
1451 sb->new_layout = mddev->new_layout;
1452 sb->new_chunk = mddev->new_chunk_sectors << 9;
1453 }
1454 mddev->minor_version = sb->minor_version;
1455 if (mddev->in_sync)
1456 {
1457 sb->recovery_cp = mddev->recovery_cp;
1458 sb->cp_events_hi = (mddev->events>>32);
1459 sb->cp_events_lo = (u32)mddev->events;
1460 if (mddev->recovery_cp == MaxSector)
1461 sb->state = (1<< MD_SB_CLEAN);
1462 } else
1463 sb->recovery_cp = 0;
1464
1465 sb->layout = mddev->layout;
1466 sb->chunk_size = mddev->chunk_sectors << 9;
1467
1468 if (mddev->bitmap && mddev->bitmap_info.file == NULL)
1469 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
1470
1471 sb->disks[0].state = (1<<MD_DISK_REMOVED);
1472 rdev_for_each(rdev2, mddev) {
1473 mdp_disk_t *d;
1474 int desc_nr;
1475 int is_active = test_bit(In_sync, &rdev2->flags);
1476
1477 if (rdev2->raid_disk >= 0 &&
1478 sb->minor_version >= 91)
1479 /* we have nowhere to store the recovery_offset,
1480 * but if it is not below the reshape_position,
1481 * we can piggy-back on that.
1482 */
1483 is_active = 1;
1484 if (rdev2->raid_disk < 0 ||
1485 test_bit(Faulty, &rdev2->flags))
1486 is_active = 0;
1487 if (is_active)
1488 desc_nr = rdev2->raid_disk;
1489 else
1490 desc_nr = next_spare++;
1491 rdev2->desc_nr = desc_nr;
1492 d = &sb->disks[rdev2->desc_nr];
1493 nr_disks++;
1494 d->number = rdev2->desc_nr;
1495 d->major = MAJOR(rdev2->bdev->bd_dev);
1496 d->minor = MINOR(rdev2->bdev->bd_dev);
1497 if (is_active)
1498 d->raid_disk = rdev2->raid_disk;
1499 else
1500 d->raid_disk = rdev2->desc_nr; /* compatibility */
1501 if (test_bit(Faulty, &rdev2->flags))
1502 d->state = (1<<MD_DISK_FAULTY);
1503 else if (is_active) {
1504 d->state = (1<<MD_DISK_ACTIVE);
1505 if (test_bit(In_sync, &rdev2->flags))
1506 d->state |= (1<<MD_DISK_SYNC);
1507 active++;
1508 working++;
1509 } else {
1510 d->state = 0;
1511 spare++;
1512 working++;
1513 }
1514 if (test_bit(WriteMostly, &rdev2->flags))
1515 d->state |= (1<<MD_DISK_WRITEMOSTLY);
1516 if (test_bit(FailFast, &rdev2->flags))
1517 d->state |= (1<<MD_DISK_FAILFAST);
1518 }
1519 /* now set the "removed" and "faulty" bits on any missing devices */
1520 for (i=0 ; i < mddev->raid_disks ; i++) {
1521 mdp_disk_t *d = &sb->disks[i];
1522 if (d->state == 0 && d->number == 0) {
1523 d->number = i;
1524 d->raid_disk = i;
1525 d->state = (1<<MD_DISK_REMOVED);
1526 d->state |= (1<<MD_DISK_FAULTY);
1527 failed++;
1528 }
1529 }
1530 sb->nr_disks = nr_disks;
1531 sb->active_disks = active;
1532 sb->working_disks = working;
1533 sb->failed_disks = failed;
1534 sb->spare_disks = spare;
1535
1536 sb->this_disk = sb->disks[rdev->desc_nr];
1537 sb->sb_csum = calc_sb_csum(sb);
1538 }
1539
1540 /*
1541 * rdev_size_change for 0.90.0
1542 */
1543 static unsigned long long
super_90_rdev_size_change(struct md_rdev * rdev,sector_t num_sectors)1544 super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1545 {
1546 if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1547 return 0; /* component must fit device */
1548 if (rdev->mddev->bitmap_info.offset)
1549 return 0; /* can't move bitmap */
1550 rdev->sb_start = calc_dev_sboffset(rdev);
1551 if (!num_sectors || num_sectors > rdev->sb_start)
1552 num_sectors = rdev->sb_start;
1553 /* Limit to 4TB as metadata cannot record more than that.
1554 * 4TB == 2^32 KB, or 2*2^32 sectors.
1555 */
1556 if ((u64)num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
1557 num_sectors = (sector_t)(2ULL << 32) - 2;
1558 do {
1559 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1560 rdev->sb_page);
1561 } while (md_super_wait(rdev->mddev) < 0);
1562 return num_sectors;
1563 }
1564
1565 static int
super_90_allow_new_offset(struct md_rdev * rdev,unsigned long long new_offset)1566 super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset)
1567 {
1568 /* non-zero offset changes not possible with v0.90 */
1569 return new_offset == 0;
1570 }
1571
1572 /*
1573 * version 1 superblock
1574 */
1575
calc_sb_1_csum(struct mdp_superblock_1 * sb)1576 static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb)
1577 {
1578 __le32 disk_csum;
1579 u32 csum;
1580 unsigned long long newcsum;
1581 int size = 256 + le32_to_cpu(sb->max_dev)*2;
1582 __le32 *isuper = (__le32*)sb;
1583
1584 disk_csum = sb->sb_csum;
1585 sb->sb_csum = 0;
1586 newcsum = 0;
1587 for (; size >= 4; size -= 4)
1588 newcsum += le32_to_cpu(*isuper++);
1589
1590 if (size == 2)
1591 newcsum += le16_to_cpu(*(__le16*) isuper);
1592
1593 csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1594 sb->sb_csum = disk_csum;
1595 return cpu_to_le32(csum);
1596 }
1597
super_1_load(struct md_rdev * rdev,struct md_rdev * refdev,int minor_version)1598 static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
1599 {
1600 struct mdp_superblock_1 *sb;
1601 int ret;
1602 sector_t sb_start;
1603 sector_t sectors;
1604 int bmask;
1605 bool spare_disk = true;
1606
1607 /*
1608 * Calculate the position of the superblock in 512byte sectors.
1609 * It is always aligned to a 4K boundary and
1610 * depeding on minor_version, it can be:
1611 * 0: At least 8K, but less than 12K, from end of device
1612 * 1: At start of device
1613 * 2: 4K from start of device.
1614 */
1615 switch(minor_version) {
1616 case 0:
1617 sb_start = bdev_nr_sectors(rdev->bdev) - 8 * 2;
1618 sb_start &= ~(sector_t)(4*2-1);
1619 break;
1620 case 1:
1621 sb_start = 0;
1622 break;
1623 case 2:
1624 sb_start = 8;
1625 break;
1626 default:
1627 return -EINVAL;
1628 }
1629 rdev->sb_start = sb_start;
1630
1631 /* superblock is rarely larger than 1K, but it can be larger,
1632 * and it is safe to read 4k, so we do that
1633 */
1634 ret = read_disk_sb(rdev, 4096);
1635 if (ret) return ret;
1636
1637 sb = page_address(rdev->sb_page);
1638
1639 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1640 sb->major_version != cpu_to_le32(1) ||
1641 le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1642 le64_to_cpu(sb->super_offset) != rdev->sb_start ||
1643 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1644 return -EINVAL;
1645
1646 if (calc_sb_1_csum(sb) != sb->sb_csum) {
1647 pr_warn("md: invalid superblock checksum on %pg\n",
1648 rdev->bdev);
1649 return -EINVAL;
1650 }
1651 if (le64_to_cpu(sb->data_size) < 10) {
1652 pr_warn("md: data_size too small on %pg\n",
1653 rdev->bdev);
1654 return -EINVAL;
1655 }
1656 if (sb->pad0 ||
1657 sb->pad3[0] ||
1658 memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1])))
1659 /* Some padding is non-zero, might be a new feature */
1660 return -EINVAL;
1661
1662 rdev->preferred_minor = 0xffff;
1663 rdev->data_offset = le64_to_cpu(sb->data_offset);
1664 rdev->new_data_offset = rdev->data_offset;
1665 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
1666 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
1667 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
1668 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1669
1670 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1671 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1672 if (rdev->sb_size & bmask)
1673 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1674
1675 if (minor_version
1676 && rdev->data_offset < sb_start + (rdev->sb_size/512))
1677 return -EINVAL;
1678 if (minor_version
1679 && rdev->new_data_offset < sb_start + (rdev->sb_size/512))
1680 return -EINVAL;
1681
1682 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1683 rdev->desc_nr = -1;
1684 else
1685 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1686
1687 if (!rdev->bb_page) {
1688 rdev->bb_page = alloc_page(GFP_KERNEL);
1689 if (!rdev->bb_page)
1690 return -ENOMEM;
1691 }
1692 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
1693 rdev->badblocks.count == 0) {
1694 /* need to load the bad block list.
1695 * Currently we limit it to one page.
1696 */
1697 s32 offset;
1698 sector_t bb_sector;
1699 __le64 *bbp;
1700 int i;
1701 int sectors = le16_to_cpu(sb->bblog_size);
1702 if (sectors > (PAGE_SIZE / 512))
1703 return -EINVAL;
1704 offset = le32_to_cpu(sb->bblog_offset);
1705 if (offset == 0)
1706 return -EINVAL;
1707 bb_sector = (long long)offset;
1708 if (!sync_page_io(rdev, bb_sector, sectors << 9,
1709 rdev->bb_page, REQ_OP_READ, true))
1710 return -EIO;
1711 bbp = (__le64 *)page_address(rdev->bb_page);
1712 rdev->badblocks.shift = sb->bblog_shift;
1713 for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
1714 u64 bb = le64_to_cpu(*bbp);
1715 int count = bb & (0x3ff);
1716 u64 sector = bb >> 10;
1717 sector <<= sb->bblog_shift;
1718 count <<= sb->bblog_shift;
1719 if (bb + 1 == 0)
1720 break;
1721 if (badblocks_set(&rdev->badblocks, sector, count, 1))
1722 return -EINVAL;
1723 }
1724 } else if (sb->bblog_offset != 0)
1725 rdev->badblocks.shift = 0;
1726
1727 if ((le32_to_cpu(sb->feature_map) &
1728 (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS))) {
1729 rdev->ppl.offset = (__s16)le16_to_cpu(sb->ppl.offset);
1730 rdev->ppl.size = le16_to_cpu(sb->ppl.size);
1731 rdev->ppl.sector = rdev->sb_start + rdev->ppl.offset;
1732 }
1733
1734 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT) &&
1735 sb->level != 0)
1736 return -EINVAL;
1737
1738 /* not spare disk, or LEVEL_MULTIPATH */
1739 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH) ||
1740 (rdev->desc_nr >= 0 &&
1741 rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1742 (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
1743 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL)))
1744 spare_disk = false;
1745
1746 if (!refdev) {
1747 if (!spare_disk)
1748 ret = 1;
1749 else
1750 ret = 0;
1751 } else {
1752 __u64 ev1, ev2;
1753 struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
1754
1755 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1756 sb->level != refsb->level ||
1757 sb->layout != refsb->layout ||
1758 sb->chunksize != refsb->chunksize) {
1759 pr_warn("md: %pg has strangely different superblock to %pg\n",
1760 rdev->bdev,
1761 refdev->bdev);
1762 return -EINVAL;
1763 }
1764 ev1 = le64_to_cpu(sb->events);
1765 ev2 = le64_to_cpu(refsb->events);
1766
1767 if (!spare_disk && ev1 > ev2)
1768 ret = 1;
1769 else
1770 ret = 0;
1771 }
1772 if (minor_version)
1773 sectors = bdev_nr_sectors(rdev->bdev) - rdev->data_offset;
1774 else
1775 sectors = rdev->sb_start;
1776 if (sectors < le64_to_cpu(sb->data_size))
1777 return -EINVAL;
1778 rdev->sectors = le64_to_cpu(sb->data_size);
1779 return ret;
1780 }
1781
super_1_validate(struct mddev * mddev,struct md_rdev * freshest,struct md_rdev * rdev)1782 static int super_1_validate(struct mddev *mddev, struct md_rdev *freshest, struct md_rdev *rdev)
1783 {
1784 struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
1785 __u64 ev1 = le64_to_cpu(sb->events);
1786
1787 rdev->raid_disk = -1;
1788 clear_bit(Faulty, &rdev->flags);
1789 clear_bit(In_sync, &rdev->flags);
1790 clear_bit(Bitmap_sync, &rdev->flags);
1791 clear_bit(WriteMostly, &rdev->flags);
1792
1793 if (mddev->raid_disks == 0) {
1794 mddev->major_version = 1;
1795 mddev->patch_version = 0;
1796 mddev->external = 0;
1797 mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
1798 mddev->ctime = le64_to_cpu(sb->ctime);
1799 mddev->utime = le64_to_cpu(sb->utime);
1800 mddev->level = le32_to_cpu(sb->level);
1801 mddev->clevel[0] = 0;
1802 mddev->layout = le32_to_cpu(sb->layout);
1803 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1804 mddev->dev_sectors = le64_to_cpu(sb->size);
1805 mddev->events = ev1;
1806 mddev->bitmap_info.offset = 0;
1807 mddev->bitmap_info.space = 0;
1808 /* Default location for bitmap is 1K after superblock
1809 * using 3K - total of 4K
1810 */
1811 mddev->bitmap_info.default_offset = 1024 >> 9;
1812 mddev->bitmap_info.default_space = (4096-1024) >> 9;
1813 mddev->reshape_backwards = 0;
1814
1815 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1816 memcpy(mddev->uuid, sb->set_uuid, 16);
1817
1818 mddev->max_disks = (4096-256)/2;
1819
1820 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1821 mddev->bitmap_info.file == NULL) {
1822 mddev->bitmap_info.offset =
1823 (__s32)le32_to_cpu(sb->bitmap_offset);
1824 /* Metadata doesn't record how much space is available.
1825 * For 1.0, we assume we can use up to the superblock
1826 * if before, else to 4K beyond superblock.
1827 * For others, assume no change is possible.
1828 */
1829 if (mddev->minor_version > 0)
1830 mddev->bitmap_info.space = 0;
1831 else if (mddev->bitmap_info.offset > 0)
1832 mddev->bitmap_info.space =
1833 8 - mddev->bitmap_info.offset;
1834 else
1835 mddev->bitmap_info.space =
1836 -mddev->bitmap_info.offset;
1837 }
1838
1839 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1840 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1841 mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1842 mddev->new_level = le32_to_cpu(sb->new_level);
1843 mddev->new_layout = le32_to_cpu(sb->new_layout);
1844 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
1845 if (mddev->delta_disks < 0 ||
1846 (mddev->delta_disks == 0 &&
1847 (le32_to_cpu(sb->feature_map)
1848 & MD_FEATURE_RESHAPE_BACKWARDS)))
1849 mddev->reshape_backwards = 1;
1850 } else {
1851 mddev->reshape_position = MaxSector;
1852 mddev->delta_disks = 0;
1853 mddev->new_level = mddev->level;
1854 mddev->new_layout = mddev->layout;
1855 mddev->new_chunk_sectors = mddev->chunk_sectors;
1856 }
1857
1858 if (mddev->level == 0 &&
1859 !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT))
1860 mddev->layout = -1;
1861
1862 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)
1863 set_bit(MD_HAS_JOURNAL, &mddev->flags);
1864
1865 if (le32_to_cpu(sb->feature_map) &
1866 (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS)) {
1867 if (le32_to_cpu(sb->feature_map) &
1868 (MD_FEATURE_BITMAP_OFFSET | MD_FEATURE_JOURNAL))
1869 return -EINVAL;
1870 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_PPL) &&
1871 (le32_to_cpu(sb->feature_map) &
1872 MD_FEATURE_MULTIPLE_PPLS))
1873 return -EINVAL;
1874 set_bit(MD_HAS_PPL, &mddev->flags);
1875 }
1876 } else if (mddev->pers == NULL) {
1877 /* Insist of good event counter while assembling, except for
1878 * spares (which don't need an event count).
1879 * Similar to mdadm, we allow event counter difference of 1
1880 * from the freshest device.
1881 */
1882 if (rdev->desc_nr >= 0 &&
1883 rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1884 (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
1885 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL))
1886 if (ev1 + 1 < mddev->events)
1887 return -EINVAL;
1888 } else if (mddev->bitmap) {
1889 /* If adding to array with a bitmap, then we can accept an
1890 * older device, but not too old.
1891 */
1892 if (ev1 < mddev->bitmap->events_cleared)
1893 return 0;
1894 if (ev1 < mddev->events)
1895 set_bit(Bitmap_sync, &rdev->flags);
1896 } else {
1897 if (ev1 < mddev->events)
1898 /* just a hot-add of a new device, leave raid_disk at -1 */
1899 return 0;
1900 }
1901 if (mddev->level != LEVEL_MULTIPATH) {
1902 int role;
1903 if (rdev->desc_nr < 0 ||
1904 rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
1905 role = MD_DISK_ROLE_SPARE;
1906 rdev->desc_nr = -1;
1907 } else if (mddev->pers == NULL && freshest && ev1 < mddev->events) {
1908 /*
1909 * If we are assembling, and our event counter is smaller than the
1910 * highest event counter, we cannot trust our superblock about the role.
1911 * It could happen that our rdev was marked as Faulty, and all other
1912 * superblocks were updated with +1 event counter.
1913 * Then, before the next superblock update, which typically happens when
1914 * remove_and_add_spares() removes the device from the array, there was
1915 * a crash or reboot.
1916 * If we allow current rdev without consulting the freshest superblock,
1917 * we could cause data corruption.
1918 * Note that in this case our event counter is smaller by 1 than the
1919 * highest, otherwise, this rdev would not be allowed into array;
1920 * both kernel and mdadm allow event counter difference of 1.
1921 */
1922 struct mdp_superblock_1 *freshest_sb = page_address(freshest->sb_page);
1923 u32 freshest_max_dev = le32_to_cpu(freshest_sb->max_dev);
1924
1925 if (rdev->desc_nr >= freshest_max_dev) {
1926 /* this is unexpected, better not proceed */
1927 pr_warn("md: %s: rdev[%pg]: desc_nr(%d) >= freshest(%pg)->sb->max_dev(%u)\n",
1928 mdname(mddev), rdev->bdev, rdev->desc_nr,
1929 freshest->bdev, freshest_max_dev);
1930 return -EUCLEAN;
1931 }
1932
1933 role = le16_to_cpu(freshest_sb->dev_roles[rdev->desc_nr]);
1934 pr_debug("md: %s: rdev[%pg]: role=%d(0x%x) according to freshest %pg\n",
1935 mdname(mddev), rdev->bdev, role, role, freshest->bdev);
1936 } else {
1937 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1938 }
1939 switch(role) {
1940 case MD_DISK_ROLE_SPARE: /* spare */
1941 break;
1942 case MD_DISK_ROLE_FAULTY: /* faulty */
1943 set_bit(Faulty, &rdev->flags);
1944 break;
1945 case MD_DISK_ROLE_JOURNAL: /* journal device */
1946 if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) {
1947 /* journal device without journal feature */
1948 pr_warn("md: journal device provided without journal feature, ignoring the device\n");
1949 return -EINVAL;
1950 }
1951 set_bit(Journal, &rdev->flags);
1952 rdev->journal_tail = le64_to_cpu(sb->journal_tail);
1953 rdev->raid_disk = 0;
1954 break;
1955 default:
1956 rdev->saved_raid_disk = role;
1957 if ((le32_to_cpu(sb->feature_map) &
1958 MD_FEATURE_RECOVERY_OFFSET)) {
1959 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1960 if (!(le32_to_cpu(sb->feature_map) &
1961 MD_FEATURE_RECOVERY_BITMAP))
1962 rdev->saved_raid_disk = -1;
1963 } else {
1964 /*
1965 * If the array is FROZEN, then the device can't
1966 * be in_sync with rest of array.
1967 */
1968 if (!test_bit(MD_RECOVERY_FROZEN,
1969 &mddev->recovery))
1970 set_bit(In_sync, &rdev->flags);
1971 }
1972 rdev->raid_disk = role;
1973 break;
1974 }
1975 if (sb->devflags & WriteMostly1)
1976 set_bit(WriteMostly, &rdev->flags);
1977 if (sb->devflags & FailFast1)
1978 set_bit(FailFast, &rdev->flags);
1979 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
1980 set_bit(Replacement, &rdev->flags);
1981 } else /* MULTIPATH are always insync */
1982 set_bit(In_sync, &rdev->flags);
1983
1984 return 0;
1985 }
1986
super_1_sync(struct mddev * mddev,struct md_rdev * rdev)1987 static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
1988 {
1989 struct mdp_superblock_1 *sb;
1990 struct md_rdev *rdev2;
1991 int max_dev, i;
1992 /* make rdev->sb match mddev and rdev data. */
1993
1994 sb = page_address(rdev->sb_page);
1995
1996 sb->feature_map = 0;
1997 sb->pad0 = 0;
1998 sb->recovery_offset = cpu_to_le64(0);
1999 memset(sb->pad3, 0, sizeof(sb->pad3));
2000
2001 sb->utime = cpu_to_le64((__u64)mddev->utime);
2002 sb->events = cpu_to_le64(mddev->events);
2003 if (mddev->in_sync)
2004 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
2005 else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags))
2006 sb->resync_offset = cpu_to_le64(MaxSector);
2007 else
2008 sb->resync_offset = cpu_to_le64(0);
2009
2010 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
2011
2012 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
2013 sb->size = cpu_to_le64(mddev->dev_sectors);
2014 sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
2015 sb->level = cpu_to_le32(mddev->level);
2016 sb->layout = cpu_to_le32(mddev->layout);
2017 if (test_bit(FailFast, &rdev->flags))
2018 sb->devflags |= FailFast1;
2019 else
2020 sb->devflags &= ~FailFast1;
2021
2022 if (test_bit(WriteMostly, &rdev->flags))
2023 sb->devflags |= WriteMostly1;
2024 else
2025 sb->devflags &= ~WriteMostly1;
2026 sb->data_offset = cpu_to_le64(rdev->data_offset);
2027 sb->data_size = cpu_to_le64(rdev->sectors);
2028
2029 if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
2030 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
2031 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
2032 }
2033
2034 if (rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags) &&
2035 !test_bit(In_sync, &rdev->flags)) {
2036 sb->feature_map |=
2037 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
2038 sb->recovery_offset =
2039 cpu_to_le64(rdev->recovery_offset);
2040 if (rdev->saved_raid_disk >= 0 && mddev->bitmap)
2041 sb->feature_map |=
2042 cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP);
2043 }
2044 /* Note: recovery_offset and journal_tail share space */
2045 if (test_bit(Journal, &rdev->flags))
2046 sb->journal_tail = cpu_to_le64(rdev->journal_tail);
2047 if (test_bit(Replacement, &rdev->flags))
2048 sb->feature_map |=
2049 cpu_to_le32(MD_FEATURE_REPLACEMENT);
2050
2051 if (mddev->reshape_position != MaxSector) {
2052 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
2053 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
2054 sb->new_layout = cpu_to_le32(mddev->new_layout);
2055 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
2056 sb->new_level = cpu_to_le32(mddev->new_level);
2057 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
2058 if (mddev->delta_disks == 0 &&
2059 mddev->reshape_backwards)
2060 sb->feature_map
2061 |= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS);
2062 if (rdev->new_data_offset != rdev->data_offset) {
2063 sb->feature_map
2064 |= cpu_to_le32(MD_FEATURE_NEW_OFFSET);
2065 sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset
2066 - rdev->data_offset));
2067 }
2068 }
2069
2070 if (mddev_is_clustered(mddev))
2071 sb->feature_map |= cpu_to_le32(MD_FEATURE_CLUSTERED);
2072
2073 if (rdev->badblocks.count == 0)
2074 /* Nothing to do for bad blocks*/ ;
2075 else if (sb->bblog_offset == 0)
2076 /* Cannot record bad blocks on this device */
2077 md_error(mddev, rdev);
2078 else {
2079 struct badblocks *bb = &rdev->badblocks;
2080 __le64 *bbp = (__le64 *)page_address(rdev->bb_page);
2081 u64 *p = bb->page;
2082 sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
2083 if (bb->changed) {
2084 unsigned seq;
2085
2086 retry:
2087 seq = read_seqbegin(&bb->lock);
2088
2089 memset(bbp, 0xff, PAGE_SIZE);
2090
2091 for (i = 0 ; i < bb->count ; i++) {
2092 u64 internal_bb = p[i];
2093 u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
2094 | BB_LEN(internal_bb));
2095 bbp[i] = cpu_to_le64(store_bb);
2096 }
2097 bb->changed = 0;
2098 if (read_seqretry(&bb->lock, seq))
2099 goto retry;
2100
2101 bb->sector = (rdev->sb_start +
2102 (int)le32_to_cpu(sb->bblog_offset));
2103 bb->size = le16_to_cpu(sb->bblog_size);
2104 }
2105 }
2106
2107 max_dev = 0;
2108 rdev_for_each(rdev2, mddev)
2109 if (rdev2->desc_nr+1 > max_dev)
2110 max_dev = rdev2->desc_nr+1;
2111
2112 if (max_dev > le32_to_cpu(sb->max_dev)) {
2113 int bmask;
2114 sb->max_dev = cpu_to_le32(max_dev);
2115 rdev->sb_size = max_dev * 2 + 256;
2116 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
2117 if (rdev->sb_size & bmask)
2118 rdev->sb_size = (rdev->sb_size | bmask) + 1;
2119 } else
2120 max_dev = le32_to_cpu(sb->max_dev);
2121
2122 for (i=0; i<max_dev;i++)
2123 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
2124
2125 if (test_bit(MD_HAS_JOURNAL, &mddev->flags))
2126 sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL);
2127
2128 if (test_bit(MD_HAS_PPL, &mddev->flags)) {
2129 if (test_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags))
2130 sb->feature_map |=
2131 cpu_to_le32(MD_FEATURE_MULTIPLE_PPLS);
2132 else
2133 sb->feature_map |= cpu_to_le32(MD_FEATURE_PPL);
2134 sb->ppl.offset = cpu_to_le16(rdev->ppl.offset);
2135 sb->ppl.size = cpu_to_le16(rdev->ppl.size);
2136 }
2137
2138 rdev_for_each(rdev2, mddev) {
2139 i = rdev2->desc_nr;
2140 if (test_bit(Faulty, &rdev2->flags))
2141 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY);
2142 else if (test_bit(In_sync, &rdev2->flags))
2143 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
2144 else if (test_bit(Journal, &rdev2->flags))
2145 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_JOURNAL);
2146 else if (rdev2->raid_disk >= 0)
2147 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
2148 else
2149 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
2150 }
2151
2152 sb->sb_csum = calc_sb_1_csum(sb);
2153 }
2154
super_1_choose_bm_space(sector_t dev_size)2155 static sector_t super_1_choose_bm_space(sector_t dev_size)
2156 {
2157 sector_t bm_space;
2158
2159 /* if the device is bigger than 8Gig, save 64k for bitmap
2160 * usage, if bigger than 200Gig, save 128k
2161 */
2162 if (dev_size < 64*2)
2163 bm_space = 0;
2164 else if (dev_size - 64*2 >= 200*1024*1024*2)
2165 bm_space = 128*2;
2166 else if (dev_size - 4*2 > 8*1024*1024*2)
2167 bm_space = 64*2;
2168 else
2169 bm_space = 4*2;
2170 return bm_space;
2171 }
2172
2173 static unsigned long long
super_1_rdev_size_change(struct md_rdev * rdev,sector_t num_sectors)2174 super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
2175 {
2176 struct mdp_superblock_1 *sb;
2177 sector_t max_sectors;
2178 if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
2179 return 0; /* component must fit device */
2180 if (rdev->data_offset != rdev->new_data_offset)
2181 return 0; /* too confusing */
2182 if (rdev->sb_start < rdev->data_offset) {
2183 /* minor versions 1 and 2; superblock before data */
2184 max_sectors = bdev_nr_sectors(rdev->bdev) - rdev->data_offset;
2185 if (!num_sectors || num_sectors > max_sectors)
2186 num_sectors = max_sectors;
2187 } else if (rdev->mddev->bitmap_info.offset) {
2188 /* minor version 0 with bitmap we can't move */
2189 return 0;
2190 } else {
2191 /* minor version 0; superblock after data */
2192 sector_t sb_start, bm_space;
2193 sector_t dev_size = bdev_nr_sectors(rdev->bdev);
2194
2195 /* 8K is for superblock */
2196 sb_start = dev_size - 8*2;
2197 sb_start &= ~(sector_t)(4*2 - 1);
2198
2199 bm_space = super_1_choose_bm_space(dev_size);
2200
2201 /* Space that can be used to store date needs to decrease
2202 * superblock bitmap space and bad block space(4K)
2203 */
2204 max_sectors = sb_start - bm_space - 4*2;
2205
2206 if (!num_sectors || num_sectors > max_sectors)
2207 num_sectors = max_sectors;
2208 rdev->sb_start = sb_start;
2209 }
2210 sb = page_address(rdev->sb_page);
2211 sb->data_size = cpu_to_le64(num_sectors);
2212 sb->super_offset = cpu_to_le64(rdev->sb_start);
2213 sb->sb_csum = calc_sb_1_csum(sb);
2214 do {
2215 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
2216 rdev->sb_page);
2217 } while (md_super_wait(rdev->mddev) < 0);
2218 return num_sectors;
2219
2220 }
2221
2222 static int
super_1_allow_new_offset(struct md_rdev * rdev,unsigned long long new_offset)2223 super_1_allow_new_offset(struct md_rdev *rdev,
2224 unsigned long long new_offset)
2225 {
2226 /* All necessary checks on new >= old have been done */
2227 struct bitmap *bitmap;
2228 if (new_offset >= rdev->data_offset)
2229 return 1;
2230
2231 /* with 1.0 metadata, there is no metadata to tread on
2232 * so we can always move back */
2233 if (rdev->mddev->minor_version == 0)
2234 return 1;
2235
2236 /* otherwise we must be sure not to step on
2237 * any metadata, so stay:
2238 * 36K beyond start of superblock
2239 * beyond end of badblocks
2240 * beyond write-intent bitmap
2241 */
2242 if (rdev->sb_start + (32+4)*2 > new_offset)
2243 return 0;
2244 bitmap = rdev->mddev->bitmap;
2245 if (bitmap && !rdev->mddev->bitmap_info.file &&
2246 rdev->sb_start + rdev->mddev->bitmap_info.offset +
2247 bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset)
2248 return 0;
2249 if (rdev->badblocks.sector + rdev->badblocks.size > new_offset)
2250 return 0;
2251
2252 return 1;
2253 }
2254
2255 static struct super_type super_types[] = {
2256 [0] = {
2257 .name = "0.90.0",
2258 .owner = THIS_MODULE,
2259 .load_super = super_90_load,
2260 .validate_super = super_90_validate,
2261 .sync_super = super_90_sync,
2262 .rdev_size_change = super_90_rdev_size_change,
2263 .allow_new_offset = super_90_allow_new_offset,
2264 },
2265 [1] = {
2266 .name = "md-1",
2267 .owner = THIS_MODULE,
2268 .load_super = super_1_load,
2269 .validate_super = super_1_validate,
2270 .sync_super = super_1_sync,
2271 .rdev_size_change = super_1_rdev_size_change,
2272 .allow_new_offset = super_1_allow_new_offset,
2273 },
2274 };
2275
sync_super(struct mddev * mddev,struct md_rdev * rdev)2276 static void sync_super(struct mddev *mddev, struct md_rdev *rdev)
2277 {
2278 if (mddev->sync_super) {
2279 mddev->sync_super(mddev, rdev);
2280 return;
2281 }
2282
2283 BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
2284
2285 super_types[mddev->major_version].sync_super(mddev, rdev);
2286 }
2287
match_mddev_units(struct mddev * mddev1,struct mddev * mddev2)2288 static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2)
2289 {
2290 struct md_rdev *rdev, *rdev2;
2291
2292 rcu_read_lock();
2293 rdev_for_each_rcu(rdev, mddev1) {
2294 if (test_bit(Faulty, &rdev->flags) ||
2295 test_bit(Journal, &rdev->flags) ||
2296 rdev->raid_disk == -1)
2297 continue;
2298 rdev_for_each_rcu(rdev2, mddev2) {
2299 if (test_bit(Faulty, &rdev2->flags) ||
2300 test_bit(Journal, &rdev2->flags) ||
2301 rdev2->raid_disk == -1)
2302 continue;
2303 if (rdev->bdev->bd_disk == rdev2->bdev->bd_disk) {
2304 rcu_read_unlock();
2305 return 1;
2306 }
2307 }
2308 }
2309 rcu_read_unlock();
2310 return 0;
2311 }
2312
2313 static LIST_HEAD(pending_raid_disks);
2314
2315 /*
2316 * Try to register data integrity profile for an mddev
2317 *
2318 * This is called when an array is started and after a disk has been kicked
2319 * from the array. It only succeeds if all working and active component devices
2320 * are integrity capable with matching profiles.
2321 */
md_integrity_register(struct mddev * mddev)2322 int md_integrity_register(struct mddev *mddev)
2323 {
2324 struct md_rdev *rdev, *reference = NULL;
2325
2326 if (list_empty(&mddev->disks))
2327 return 0; /* nothing to do */
2328 if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
2329 return 0; /* shouldn't register, or already is */
2330 rdev_for_each(rdev, mddev) {
2331 /* skip spares and non-functional disks */
2332 if (test_bit(Faulty, &rdev->flags))
2333 continue;
2334 if (rdev->raid_disk < 0)
2335 continue;
2336 if (!reference) {
2337 /* Use the first rdev as the reference */
2338 reference = rdev;
2339 continue;
2340 }
2341 /* does this rdev's profile match the reference profile? */
2342 if (blk_integrity_compare(reference->bdev->bd_disk,
2343 rdev->bdev->bd_disk) < 0)
2344 return -EINVAL;
2345 }
2346 if (!reference || !bdev_get_integrity(reference->bdev))
2347 return 0;
2348 /*
2349 * All component devices are integrity capable and have matching
2350 * profiles, register the common profile for the md device.
2351 */
2352 blk_integrity_register(mddev->gendisk,
2353 bdev_get_integrity(reference->bdev));
2354
2355 pr_debug("md: data integrity enabled on %s\n", mdname(mddev));
2356 if (bioset_integrity_create(&mddev->bio_set, BIO_POOL_SIZE) ||
2357 (mddev->level != 1 && mddev->level != 10 &&
2358 bioset_integrity_create(&mddev->io_clone_set, BIO_POOL_SIZE))) {
2359 /*
2360 * No need to handle the failure of bioset_integrity_create,
2361 * because the function is called by md_run() -> pers->run(),
2362 * md_run calls bioset_exit -> bioset_integrity_free in case
2363 * of failure case.
2364 */
2365 pr_err("md: failed to create integrity pool for %s\n",
2366 mdname(mddev));
2367 return -EINVAL;
2368 }
2369 return 0;
2370 }
2371 EXPORT_SYMBOL(md_integrity_register);
2372
2373 /*
2374 * Attempt to add an rdev, but only if it is consistent with the current
2375 * integrity profile
2376 */
md_integrity_add_rdev(struct md_rdev * rdev,struct mddev * mddev)2377 int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
2378 {
2379 struct blk_integrity *bi_mddev;
2380
2381 if (!mddev->gendisk)
2382 return 0;
2383
2384 bi_mddev = blk_get_integrity(mddev->gendisk);
2385
2386 if (!bi_mddev) /* nothing to do */
2387 return 0;
2388
2389 if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) {
2390 pr_err("%s: incompatible integrity profile for %pg\n",
2391 mdname(mddev), rdev->bdev);
2392 return -ENXIO;
2393 }
2394
2395 return 0;
2396 }
2397 EXPORT_SYMBOL(md_integrity_add_rdev);
2398
rdev_read_only(struct md_rdev * rdev)2399 static bool rdev_read_only(struct md_rdev *rdev)
2400 {
2401 return bdev_read_only(rdev->bdev) ||
2402 (rdev->meta_bdev && bdev_read_only(rdev->meta_bdev));
2403 }
2404
bind_rdev_to_array(struct md_rdev * rdev,struct mddev * mddev)2405 static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
2406 {
2407 char b[BDEVNAME_SIZE];
2408 int err;
2409
2410 /* prevent duplicates */
2411 if (find_rdev(mddev, rdev->bdev->bd_dev))
2412 return -EEXIST;
2413
2414 if (rdev_read_only(rdev) && mddev->pers)
2415 return -EROFS;
2416
2417 /* make sure rdev->sectors exceeds mddev->dev_sectors */
2418 if (!test_bit(Journal, &rdev->flags) &&
2419 rdev->sectors &&
2420 (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) {
2421 if (mddev->pers) {
2422 /* Cannot change size, so fail
2423 * If mddev->level <= 0, then we don't care
2424 * about aligning sizes (e.g. linear)
2425 */
2426 if (mddev->level > 0)
2427 return -ENOSPC;
2428 } else
2429 mddev->dev_sectors = rdev->sectors;
2430 }
2431
2432 /* Verify rdev->desc_nr is unique.
2433 * If it is -1, assign a free number, else
2434 * check number is not in use
2435 */
2436 rcu_read_lock();
2437 if (rdev->desc_nr < 0) {
2438 int choice = 0;
2439 if (mddev->pers)
2440 choice = mddev->raid_disks;
2441 while (md_find_rdev_nr_rcu(mddev, choice))
2442 choice++;
2443 rdev->desc_nr = choice;
2444 } else {
2445 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) {
2446 rcu_read_unlock();
2447 return -EBUSY;
2448 }
2449 }
2450 rcu_read_unlock();
2451 if (!test_bit(Journal, &rdev->flags) &&
2452 mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
2453 pr_warn("md: %s: array is limited to %d devices\n",
2454 mdname(mddev), mddev->max_disks);
2455 return -EBUSY;
2456 }
2457 snprintf(b, sizeof(b), "%pg", rdev->bdev);
2458 strreplace(b, '/', '!');
2459
2460 rdev->mddev = mddev;
2461 pr_debug("md: bind<%s>\n", b);
2462
2463 if (mddev->raid_disks)
2464 mddev_create_serial_pool(mddev, rdev, false);
2465
2466 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
2467 goto fail;
2468
2469 /* failure here is OK */
2470 err = sysfs_create_link(&rdev->kobj, bdev_kobj(rdev->bdev), "block");
2471 rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
2472 rdev->sysfs_unack_badblocks =
2473 sysfs_get_dirent_safe(rdev->kobj.sd, "unacknowledged_bad_blocks");
2474 rdev->sysfs_badblocks =
2475 sysfs_get_dirent_safe(rdev->kobj.sd, "bad_blocks");
2476
2477 list_add_rcu(&rdev->same_set, &mddev->disks);
2478 bd_link_disk_holder(rdev->bdev, mddev->gendisk);
2479
2480 /* May as well allow recovery to be retried once */
2481 mddev->recovery_disabled++;
2482
2483 return 0;
2484
2485 fail:
2486 pr_warn("md: failed to register dev-%s for %s\n",
2487 b, mdname(mddev));
2488 mddev_destroy_serial_pool(mddev, rdev, false);
2489 return err;
2490 }
2491
2492 void md_autodetect_dev(dev_t dev);
2493
2494 /* just for claiming the bdev */
2495 static struct md_rdev claim_rdev;
2496
export_rdev(struct md_rdev * rdev,struct mddev * mddev)2497 static void export_rdev(struct md_rdev *rdev, struct mddev *mddev)
2498 {
2499 pr_debug("md: export_rdev(%pg)\n", rdev->bdev);
2500 md_rdev_clear(rdev);
2501 #ifndef MODULE
2502 if (test_bit(AutoDetected, &rdev->flags))
2503 md_autodetect_dev(rdev->bdev->bd_dev);
2504 #endif
2505 blkdev_put(rdev->bdev,
2506 test_bit(Holder, &rdev->flags) ? rdev : &claim_rdev);
2507 rdev->bdev = NULL;
2508 kobject_put(&rdev->kobj);
2509 }
2510
md_kick_rdev_from_array(struct md_rdev * rdev)2511 static void md_kick_rdev_from_array(struct md_rdev *rdev)
2512 {
2513 struct mddev *mddev = rdev->mddev;
2514
2515 bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
2516 list_del_rcu(&rdev->same_set);
2517 pr_debug("md: unbind<%pg>\n", rdev->bdev);
2518 mddev_destroy_serial_pool(rdev->mddev, rdev, false);
2519 rdev->mddev = NULL;
2520 sysfs_remove_link(&rdev->kobj, "block");
2521 sysfs_put(rdev->sysfs_state);
2522 sysfs_put(rdev->sysfs_unack_badblocks);
2523 sysfs_put(rdev->sysfs_badblocks);
2524 rdev->sysfs_state = NULL;
2525 rdev->sysfs_unack_badblocks = NULL;
2526 rdev->sysfs_badblocks = NULL;
2527 rdev->badblocks.count = 0;
2528
2529 synchronize_rcu();
2530
2531 /*
2532 * kobject_del() will wait for all in progress writers to be done, where
2533 * reconfig_mutex is held, hence it can't be called under
2534 * reconfig_mutex and it's delayed to mddev_unlock().
2535 */
2536 list_add(&rdev->same_set, &mddev->deleting);
2537 }
2538
export_array(struct mddev * mddev)2539 static void export_array(struct mddev *mddev)
2540 {
2541 struct md_rdev *rdev;
2542
2543 while (!list_empty(&mddev->disks)) {
2544 rdev = list_first_entry(&mddev->disks, struct md_rdev,
2545 same_set);
2546 md_kick_rdev_from_array(rdev);
2547 }
2548 mddev->raid_disks = 0;
2549 mddev->major_version = 0;
2550 }
2551
set_in_sync(struct mddev * mddev)2552 static bool set_in_sync(struct mddev *mddev)
2553 {
2554 lockdep_assert_held(&mddev->lock);
2555 if (!mddev->in_sync) {
2556 mddev->sync_checkers++;
2557 spin_unlock(&mddev->lock);
2558 percpu_ref_switch_to_atomic_sync(&mddev->writes_pending);
2559 spin_lock(&mddev->lock);
2560 if (!mddev->in_sync &&
2561 percpu_ref_is_zero(&mddev->writes_pending)) {
2562 mddev->in_sync = 1;
2563 /*
2564 * Ensure ->in_sync is visible before we clear
2565 * ->sync_checkers.
2566 */
2567 smp_mb();
2568 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
2569 sysfs_notify_dirent_safe(mddev->sysfs_state);
2570 }
2571 if (--mddev->sync_checkers == 0)
2572 percpu_ref_switch_to_percpu(&mddev->writes_pending);
2573 }
2574 if (mddev->safemode == 1)
2575 mddev->safemode = 0;
2576 return mddev->in_sync;
2577 }
2578
sync_sbs(struct mddev * mddev,int nospares)2579 static void sync_sbs(struct mddev *mddev, int nospares)
2580 {
2581 /* Update each superblock (in-memory image), but
2582 * if we are allowed to, skip spares which already
2583 * have the right event counter, or have one earlier
2584 * (which would mean they aren't being marked as dirty
2585 * with the rest of the array)
2586 */
2587 struct md_rdev *rdev;
2588 rdev_for_each(rdev, mddev) {
2589 if (rdev->sb_events == mddev->events ||
2590 (nospares &&
2591 rdev->raid_disk < 0 &&
2592 rdev->sb_events+1 == mddev->events)) {
2593 /* Don't update this superblock */
2594 rdev->sb_loaded = 2;
2595 } else {
2596 sync_super(mddev, rdev);
2597 rdev->sb_loaded = 1;
2598 }
2599 }
2600 }
2601
does_sb_need_changing(struct mddev * mddev)2602 static bool does_sb_need_changing(struct mddev *mddev)
2603 {
2604 struct md_rdev *rdev = NULL, *iter;
2605 struct mdp_superblock_1 *sb;
2606 int role;
2607
2608 /* Find a good rdev */
2609 rdev_for_each(iter, mddev)
2610 if ((iter->raid_disk >= 0) && !test_bit(Faulty, &iter->flags)) {
2611 rdev = iter;
2612 break;
2613 }
2614
2615 /* No good device found. */
2616 if (!rdev)
2617 return false;
2618
2619 sb = page_address(rdev->sb_page);
2620 /* Check if a device has become faulty or a spare become active */
2621 rdev_for_each(rdev, mddev) {
2622 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
2623 /* Device activated? */
2624 if (role == MD_DISK_ROLE_SPARE && rdev->raid_disk >= 0 &&
2625 !test_bit(Faulty, &rdev->flags))
2626 return true;
2627 /* Device turned faulty? */
2628 if (test_bit(Faulty, &rdev->flags) && (role < MD_DISK_ROLE_MAX))
2629 return true;
2630 }
2631
2632 /* Check if any mddev parameters have changed */
2633 if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
2634 (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
2635 (mddev->layout != le32_to_cpu(sb->layout)) ||
2636 (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
2637 (mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
2638 return true;
2639
2640 return false;
2641 }
2642
md_update_sb(struct mddev * mddev,int force_change)2643 void md_update_sb(struct mddev *mddev, int force_change)
2644 {
2645 struct md_rdev *rdev;
2646 int sync_req;
2647 int nospares = 0;
2648 int any_badblocks_changed = 0;
2649 int ret = -1;
2650
2651 if (!md_is_rdwr(mddev)) {
2652 if (force_change)
2653 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2654 return;
2655 }
2656
2657 repeat:
2658 if (mddev_is_clustered(mddev)) {
2659 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
2660 force_change = 1;
2661 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
2662 nospares = 1;
2663 ret = md_cluster_ops->metadata_update_start(mddev);
2664 /* Has someone else has updated the sb */
2665 if (!does_sb_need_changing(mddev)) {
2666 if (ret == 0)
2667 md_cluster_ops->metadata_update_cancel(mddev);
2668 bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
2669 BIT(MD_SB_CHANGE_DEVS) |
2670 BIT(MD_SB_CHANGE_CLEAN));
2671 return;
2672 }
2673 }
2674
2675 /*
2676 * First make sure individual recovery_offsets are correct
2677 * curr_resync_completed can only be used during recovery.
2678 * During reshape/resync it might use array-addresses rather
2679 * that device addresses.
2680 */
2681 rdev_for_each(rdev, mddev) {
2682 if (rdev->raid_disk >= 0 &&
2683 mddev->delta_disks >= 0 &&
2684 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
2685 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery) &&
2686 !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
2687 !test_bit(Journal, &rdev->flags) &&
2688 !test_bit(In_sync, &rdev->flags) &&
2689 mddev->curr_resync_completed > rdev->recovery_offset)
2690 rdev->recovery_offset = mddev->curr_resync_completed;
2691
2692 }
2693 if (!mddev->persistent) {
2694 clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
2695 clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2696 if (!mddev->external) {
2697 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
2698 rdev_for_each(rdev, mddev) {
2699 if (rdev->badblocks.changed) {
2700 rdev->badblocks.changed = 0;
2701 ack_all_badblocks(&rdev->badblocks);
2702 md_error(mddev, rdev);
2703 }
2704 clear_bit(Blocked, &rdev->flags);
2705 clear_bit(BlockedBadBlocks, &rdev->flags);
2706 wake_up(&rdev->blocked_wait);
2707 }
2708 }
2709 wake_up(&mddev->sb_wait);
2710 return;
2711 }
2712
2713 spin_lock(&mddev->lock);
2714
2715 mddev->utime = ktime_get_real_seconds();
2716
2717 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
2718 force_change = 1;
2719 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
2720 /* just a clean<-> dirty transition, possibly leave spares alone,
2721 * though if events isn't the right even/odd, we will have to do
2722 * spares after all
2723 */
2724 nospares = 1;
2725 if (force_change)
2726 nospares = 0;
2727 if (mddev->degraded)
2728 /* If the array is degraded, then skipping spares is both
2729 * dangerous and fairly pointless.
2730 * Dangerous because a device that was removed from the array
2731 * might have a event_count that still looks up-to-date,
2732 * so it can be re-added without a resync.
2733 * Pointless because if there are any spares to skip,
2734 * then a recovery will happen and soon that array won't
2735 * be degraded any more and the spare can go back to sleep then.
2736 */
2737 nospares = 0;
2738
2739 sync_req = mddev->in_sync;
2740
2741 /* If this is just a dirty<->clean transition, and the array is clean
2742 * and 'events' is odd, we can roll back to the previous clean state */
2743 if (nospares
2744 && (mddev->in_sync && mddev->recovery_cp == MaxSector)
2745 && mddev->can_decrease_events
2746 && mddev->events != 1) {
2747 mddev->events--;
2748 mddev->can_decrease_events = 0;
2749 } else {
2750 /* otherwise we have to go forward and ... */
2751 mddev->events ++;
2752 mddev->can_decrease_events = nospares;
2753 }
2754
2755 /*
2756 * This 64-bit counter should never wrap.
2757 * Either we are in around ~1 trillion A.C., assuming
2758 * 1 reboot per second, or we have a bug...
2759 */
2760 WARN_ON(mddev->events == 0);
2761
2762 rdev_for_each(rdev, mddev) {
2763 if (rdev->badblocks.changed)
2764 any_badblocks_changed++;
2765 if (test_bit(Faulty, &rdev->flags))
2766 set_bit(FaultRecorded, &rdev->flags);
2767 }
2768
2769 sync_sbs(mddev, nospares);
2770 spin_unlock(&mddev->lock);
2771
2772 pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
2773 mdname(mddev), mddev->in_sync);
2774
2775 if (mddev->queue)
2776 blk_add_trace_msg(mddev->queue, "md md_update_sb");
2777 rewrite:
2778 md_bitmap_update_sb(mddev->bitmap);
2779 rdev_for_each(rdev, mddev) {
2780 if (rdev->sb_loaded != 1)
2781 continue; /* no noise on spare devices */
2782
2783 if (!test_bit(Faulty, &rdev->flags)) {
2784 md_super_write(mddev,rdev,
2785 rdev->sb_start, rdev->sb_size,
2786 rdev->sb_page);
2787 pr_debug("md: (write) %pg's sb offset: %llu\n",
2788 rdev->bdev,
2789 (unsigned long long)rdev->sb_start);
2790 rdev->sb_events = mddev->events;
2791 if (rdev->badblocks.size) {
2792 md_super_write(mddev, rdev,
2793 rdev->badblocks.sector,
2794 rdev->badblocks.size << 9,
2795 rdev->bb_page);
2796 rdev->badblocks.size = 0;
2797 }
2798
2799 } else
2800 pr_debug("md: %pg (skipping faulty)\n",
2801 rdev->bdev);
2802
2803 if (mddev->level == LEVEL_MULTIPATH)
2804 /* only need to write one superblock... */
2805 break;
2806 }
2807 if (md_super_wait(mddev) < 0)
2808 goto rewrite;
2809 /* if there was a failure, MD_SB_CHANGE_DEVS was set, and we re-write super */
2810
2811 if (mddev_is_clustered(mddev) && ret == 0)
2812 md_cluster_ops->metadata_update_finish(mddev);
2813
2814 if (mddev->in_sync != sync_req ||
2815 !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
2816 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_CLEAN)))
2817 /* have to write it out again */
2818 goto repeat;
2819 wake_up(&mddev->sb_wait);
2820 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
2821 sysfs_notify_dirent_safe(mddev->sysfs_completed);
2822
2823 rdev_for_each(rdev, mddev) {
2824 if (test_and_clear_bit(FaultRecorded, &rdev->flags))
2825 clear_bit(Blocked, &rdev->flags);
2826
2827 if (any_badblocks_changed)
2828 ack_all_badblocks(&rdev->badblocks);
2829 clear_bit(BlockedBadBlocks, &rdev->flags);
2830 wake_up(&rdev->blocked_wait);
2831 }
2832 }
2833 EXPORT_SYMBOL(md_update_sb);
2834
add_bound_rdev(struct md_rdev * rdev)2835 static int add_bound_rdev(struct md_rdev *rdev)
2836 {
2837 struct mddev *mddev = rdev->mddev;
2838 int err = 0;
2839 bool add_journal = test_bit(Journal, &rdev->flags);
2840
2841 if (!mddev->pers->hot_remove_disk || add_journal) {
2842 /* If there is hot_add_disk but no hot_remove_disk
2843 * then added disks for geometry changes,
2844 * and should be added immediately.
2845 */
2846 super_types[mddev->major_version].
2847 validate_super(mddev, NULL/*freshest*/, rdev);
2848 if (add_journal)
2849 mddev_suspend(mddev);
2850 err = mddev->pers->hot_add_disk(mddev, rdev);
2851 if (add_journal)
2852 mddev_resume(mddev);
2853 if (err) {
2854 md_kick_rdev_from_array(rdev);
2855 return err;
2856 }
2857 }
2858 sysfs_notify_dirent_safe(rdev->sysfs_state);
2859
2860 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2861 if (mddev->degraded)
2862 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
2863 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2864 md_new_event();
2865 md_wakeup_thread(mddev->thread);
2866 return 0;
2867 }
2868
2869 /* words written to sysfs files may, or may not, be \n terminated.
2870 * We want to accept with case. For this we use cmd_match.
2871 */
cmd_match(const char * cmd,const char * str)2872 static int cmd_match(const char *cmd, const char *str)
2873 {
2874 /* See if cmd, written into a sysfs file, matches
2875 * str. They must either be the same, or cmd can
2876 * have a trailing newline
2877 */
2878 while (*cmd && *str && *cmd == *str) {
2879 cmd++;
2880 str++;
2881 }
2882 if (*cmd == '\n')
2883 cmd++;
2884 if (*str || *cmd)
2885 return 0;
2886 return 1;
2887 }
2888
2889 struct rdev_sysfs_entry {
2890 struct attribute attr;
2891 ssize_t (*show)(struct md_rdev *, char *);
2892 ssize_t (*store)(struct md_rdev *, const char *, size_t);
2893 };
2894
2895 static ssize_t
state_show(struct md_rdev * rdev,char * page)2896 state_show(struct md_rdev *rdev, char *page)
2897 {
2898 char *sep = ",";
2899 size_t len = 0;
2900 unsigned long flags = READ_ONCE(rdev->flags);
2901
2902 if (test_bit(Faulty, &flags) ||
2903 (!test_bit(ExternalBbl, &flags) &&
2904 rdev->badblocks.unacked_exist))
2905 len += sprintf(page+len, "faulty%s", sep);
2906 if (test_bit(In_sync, &flags))
2907 len += sprintf(page+len, "in_sync%s", sep);
2908 if (test_bit(Journal, &flags))
2909 len += sprintf(page+len, "journal%s", sep);
2910 if (test_bit(WriteMostly, &flags))
2911 len += sprintf(page+len, "write_mostly%s", sep);
2912 if (test_bit(Blocked, &flags) ||
2913 (rdev->badblocks.unacked_exist
2914 && !test_bit(Faulty, &flags)))
2915 len += sprintf(page+len, "blocked%s", sep);
2916 if (!test_bit(Faulty, &flags) &&
2917 !test_bit(Journal, &flags) &&
2918 !test_bit(In_sync, &flags))
2919 len += sprintf(page+len, "spare%s", sep);
2920 if (test_bit(WriteErrorSeen, &flags))
2921 len += sprintf(page+len, "write_error%s", sep);
2922 if (test_bit(WantReplacement, &flags))
2923 len += sprintf(page+len, "want_replacement%s", sep);
2924 if (test_bit(Replacement, &flags))
2925 len += sprintf(page+len, "replacement%s", sep);
2926 if (test_bit(ExternalBbl, &flags))
2927 len += sprintf(page+len, "external_bbl%s", sep);
2928 if (test_bit(FailFast, &flags))
2929 len += sprintf(page+len, "failfast%s", sep);
2930
2931 if (len)
2932 len -= strlen(sep);
2933
2934 return len+sprintf(page+len, "\n");
2935 }
2936
2937 static ssize_t
state_store(struct md_rdev * rdev,const char * buf,size_t len)2938 state_store(struct md_rdev *rdev, const char *buf, size_t len)
2939 {
2940 /* can write
2941 * faulty - simulates an error
2942 * remove - disconnects the device
2943 * writemostly - sets write_mostly
2944 * -writemostly - clears write_mostly
2945 * blocked - sets the Blocked flags
2946 * -blocked - clears the Blocked and possibly simulates an error
2947 * insync - sets Insync providing device isn't active
2948 * -insync - clear Insync for a device with a slot assigned,
2949 * so that it gets rebuilt based on bitmap
2950 * write_error - sets WriteErrorSeen
2951 * -write_error - clears WriteErrorSeen
2952 * {,-}failfast - set/clear FailFast
2953 */
2954
2955 struct mddev *mddev = rdev->mddev;
2956 int err = -EINVAL;
2957 bool need_update_sb = false;
2958
2959 if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
2960 md_error(rdev->mddev, rdev);
2961
2962 if (test_bit(MD_BROKEN, &rdev->mddev->flags))
2963 err = -EBUSY;
2964 else
2965 err = 0;
2966 } else if (cmd_match(buf, "remove")) {
2967 if (rdev->mddev->pers) {
2968 clear_bit(Blocked, &rdev->flags);
2969 remove_and_add_spares(rdev->mddev, rdev);
2970 }
2971 if (rdev->raid_disk >= 0)
2972 err = -EBUSY;
2973 else {
2974 err = 0;
2975 if (mddev_is_clustered(mddev))
2976 err = md_cluster_ops->remove_disk(mddev, rdev);
2977
2978 if (err == 0) {
2979 md_kick_rdev_from_array(rdev);
2980 if (mddev->pers) {
2981 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2982 md_wakeup_thread(mddev->thread);
2983 }
2984 md_new_event();
2985 }
2986 }
2987 } else if (cmd_match(buf, "writemostly")) {
2988 set_bit(WriteMostly, &rdev->flags);
2989 mddev_create_serial_pool(rdev->mddev, rdev, false);
2990 need_update_sb = true;
2991 err = 0;
2992 } else if (cmd_match(buf, "-writemostly")) {
2993 mddev_destroy_serial_pool(rdev->mddev, rdev, false);
2994 clear_bit(WriteMostly, &rdev->flags);
2995 need_update_sb = true;
2996 err = 0;
2997 } else if (cmd_match(buf, "blocked")) {
2998 set_bit(Blocked, &rdev->flags);
2999 err = 0;
3000 } else if (cmd_match(buf, "-blocked")) {
3001 if (!test_bit(Faulty, &rdev->flags) &&
3002 !test_bit(ExternalBbl, &rdev->flags) &&
3003 rdev->badblocks.unacked_exist) {
3004 /* metadata handler doesn't understand badblocks,
3005 * so we need to fail the device
3006 */
3007 md_error(rdev->mddev, rdev);
3008 }
3009 clear_bit(Blocked, &rdev->flags);
3010 clear_bit(BlockedBadBlocks, &rdev->flags);
3011 wake_up(&rdev->blocked_wait);
3012 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3013 md_wakeup_thread(rdev->mddev->thread);
3014
3015 err = 0;
3016 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
3017 set_bit(In_sync, &rdev->flags);
3018 err = 0;
3019 } else if (cmd_match(buf, "failfast")) {
3020 set_bit(FailFast, &rdev->flags);
3021 need_update_sb = true;
3022 err = 0;
3023 } else if (cmd_match(buf, "-failfast")) {
3024 clear_bit(FailFast, &rdev->flags);
3025 need_update_sb = true;
3026 err = 0;
3027 } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 &&
3028 !test_bit(Journal, &rdev->flags)) {
3029 if (rdev->mddev->pers == NULL) {
3030 clear_bit(In_sync, &rdev->flags);
3031 rdev->saved_raid_disk = rdev->raid_disk;
3032 rdev->raid_disk = -1;
3033 err = 0;
3034 }
3035 } else if (cmd_match(buf, "write_error")) {
3036 set_bit(WriteErrorSeen, &rdev->flags);
3037 err = 0;
3038 } else if (cmd_match(buf, "-write_error")) {
3039 clear_bit(WriteErrorSeen, &rdev->flags);
3040 err = 0;
3041 } else if (cmd_match(buf, "want_replacement")) {
3042 /* Any non-spare device that is not a replacement can
3043 * become want_replacement at any time, but we then need to
3044 * check if recovery is needed.
3045 */
3046 if (rdev->raid_disk >= 0 &&
3047 !test_bit(Journal, &rdev->flags) &&
3048 !test_bit(Replacement, &rdev->flags))
3049 set_bit(WantReplacement, &rdev->flags);
3050 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3051 md_wakeup_thread(rdev->mddev->thread);
3052 err = 0;
3053 } else if (cmd_match(buf, "-want_replacement")) {
3054 /* Clearing 'want_replacement' is always allowed.
3055 * Once replacements starts it is too late though.
3056 */
3057 err = 0;
3058 clear_bit(WantReplacement, &rdev->flags);
3059 } else if (cmd_match(buf, "replacement")) {
3060 /* Can only set a device as a replacement when array has not
3061 * yet been started. Once running, replacement is automatic
3062 * from spares, or by assigning 'slot'.
3063 */
3064 if (rdev->mddev->pers)
3065 err = -EBUSY;
3066 else {
3067 set_bit(Replacement, &rdev->flags);
3068 err = 0;
3069 }
3070 } else if (cmd_match(buf, "-replacement")) {
3071 /* Similarly, can only clear Replacement before start */
3072 if (rdev->mddev->pers)
3073 err = -EBUSY;
3074 else {
3075 clear_bit(Replacement, &rdev->flags);
3076 err = 0;
3077 }
3078 } else if (cmd_match(buf, "re-add")) {
3079 if (!rdev->mddev->pers)
3080 err = -EINVAL;
3081 else if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
3082 rdev->saved_raid_disk >= 0) {
3083 /* clear_bit is performed _after_ all the devices
3084 * have their local Faulty bit cleared. If any writes
3085 * happen in the meantime in the local node, they
3086 * will land in the local bitmap, which will be synced
3087 * by this node eventually
3088 */
3089 if (!mddev_is_clustered(rdev->mddev) ||
3090 (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) {
3091 clear_bit(Faulty, &rdev->flags);
3092 err = add_bound_rdev(rdev);
3093 }
3094 } else
3095 err = -EBUSY;
3096 } else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) {
3097 set_bit(ExternalBbl, &rdev->flags);
3098 rdev->badblocks.shift = 0;
3099 err = 0;
3100 } else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) {
3101 clear_bit(ExternalBbl, &rdev->flags);
3102 err = 0;
3103 }
3104 if (need_update_sb)
3105 md_update_sb(mddev, 1);
3106 if (!err)
3107 sysfs_notify_dirent_safe(rdev->sysfs_state);
3108 return err ? err : len;
3109 }
3110 static struct rdev_sysfs_entry rdev_state =
3111 __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
3112
3113 static ssize_t
errors_show(struct md_rdev * rdev,char * page)3114 errors_show(struct md_rdev *rdev, char *page)
3115 {
3116 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
3117 }
3118
3119 static ssize_t
errors_store(struct md_rdev * rdev,const char * buf,size_t len)3120 errors_store(struct md_rdev *rdev, const char *buf, size_t len)
3121 {
3122 unsigned int n;
3123 int rv;
3124
3125 rv = kstrtouint(buf, 10, &n);
3126 if (rv < 0)
3127 return rv;
3128 atomic_set(&rdev->corrected_errors, n);
3129 return len;
3130 }
3131 static struct rdev_sysfs_entry rdev_errors =
3132 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
3133
3134 static ssize_t
slot_show(struct md_rdev * rdev,char * page)3135 slot_show(struct md_rdev *rdev, char *page)
3136 {
3137 if (test_bit(Journal, &rdev->flags))
3138 return sprintf(page, "journal\n");
3139 else if (rdev->raid_disk < 0)
3140 return sprintf(page, "none\n");
3141 else
3142 return sprintf(page, "%d\n", rdev->raid_disk);
3143 }
3144
3145 static ssize_t
slot_store(struct md_rdev * rdev,const char * buf,size_t len)3146 slot_store(struct md_rdev *rdev, const char *buf, size_t len)
3147 {
3148 int slot;
3149 int err;
3150
3151 if (test_bit(Journal, &rdev->flags))
3152 return -EBUSY;
3153 if (strncmp(buf, "none", 4)==0)
3154 slot = -1;
3155 else {
3156 err = kstrtouint(buf, 10, (unsigned int *)&slot);
3157 if (err < 0)
3158 return err;
3159 if (slot < 0)
3160 /* overflow */
3161 return -ENOSPC;
3162 }
3163 if (rdev->mddev->pers && slot == -1) {
3164 /* Setting 'slot' on an active array requires also
3165 * updating the 'rd%d' link, and communicating
3166 * with the personality with ->hot_*_disk.
3167 * For now we only support removing
3168 * failed/spare devices. This normally happens automatically,
3169 * but not when the metadata is externally managed.
3170 */
3171 if (rdev->raid_disk == -1)
3172 return -EEXIST;
3173 /* personality does all needed checks */
3174 if (rdev->mddev->pers->hot_remove_disk == NULL)
3175 return -EINVAL;
3176 clear_bit(Blocked, &rdev->flags);
3177 remove_and_add_spares(rdev->mddev, rdev);
3178 if (rdev->raid_disk >= 0)
3179 return -EBUSY;
3180 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3181 md_wakeup_thread(rdev->mddev->thread);
3182 } else if (rdev->mddev->pers) {
3183 /* Activating a spare .. or possibly reactivating
3184 * if we ever get bitmaps working here.
3185 */
3186 int err;
3187
3188 if (rdev->raid_disk != -1)
3189 return -EBUSY;
3190
3191 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
3192 return -EBUSY;
3193
3194 if (rdev->mddev->pers->hot_add_disk == NULL)
3195 return -EINVAL;
3196
3197 if (slot >= rdev->mddev->raid_disks &&
3198 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
3199 return -ENOSPC;
3200
3201 rdev->raid_disk = slot;
3202 if (test_bit(In_sync, &rdev->flags))
3203 rdev->saved_raid_disk = slot;
3204 else
3205 rdev->saved_raid_disk = -1;
3206 clear_bit(In_sync, &rdev->flags);
3207 clear_bit(Bitmap_sync, &rdev->flags);
3208 err = rdev->mddev->pers->hot_add_disk(rdev->mddev, rdev);
3209 if (err) {
3210 rdev->raid_disk = -1;
3211 return err;
3212 } else
3213 sysfs_notify_dirent_safe(rdev->sysfs_state);
3214 /* failure here is OK */;
3215 sysfs_link_rdev(rdev->mddev, rdev);
3216 /* don't wakeup anyone, leave that to userspace. */
3217 } else {
3218 if (slot >= rdev->mddev->raid_disks &&
3219 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
3220 return -ENOSPC;
3221 rdev->raid_disk = slot;
3222 /* assume it is working */
3223 clear_bit(Faulty, &rdev->flags);
3224 clear_bit(WriteMostly, &rdev->flags);
3225 set_bit(In_sync, &rdev->flags);
3226 sysfs_notify_dirent_safe(rdev->sysfs_state);
3227 }
3228 return len;
3229 }
3230
3231 static struct rdev_sysfs_entry rdev_slot =
3232 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
3233
3234 static ssize_t
offset_show(struct md_rdev * rdev,char * page)3235 offset_show(struct md_rdev *rdev, char *page)
3236 {
3237 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
3238 }
3239
3240 static ssize_t
offset_store(struct md_rdev * rdev,const char * buf,size_t len)3241 offset_store(struct md_rdev *rdev, const char *buf, size_t len)
3242 {
3243 unsigned long long offset;
3244 if (kstrtoull(buf, 10, &offset) < 0)
3245 return -EINVAL;
3246 if (rdev->mddev->pers && rdev->raid_disk >= 0)
3247 return -EBUSY;
3248 if (rdev->sectors && rdev->mddev->external)
3249 /* Must set offset before size, so overlap checks
3250 * can be sane */
3251 return -EBUSY;
3252 rdev->data_offset = offset;
3253 rdev->new_data_offset = offset;
3254 return len;
3255 }
3256
3257 static struct rdev_sysfs_entry rdev_offset =
3258 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
3259
new_offset_show(struct md_rdev * rdev,char * page)3260 static ssize_t new_offset_show(struct md_rdev *rdev, char *page)
3261 {
3262 return sprintf(page, "%llu\n",
3263 (unsigned long long)rdev->new_data_offset);
3264 }
3265
new_offset_store(struct md_rdev * rdev,const char * buf,size_t len)3266 static ssize_t new_offset_store(struct md_rdev *rdev,
3267 const char *buf, size_t len)
3268 {
3269 unsigned long long new_offset;
3270 struct mddev *mddev = rdev->mddev;
3271
3272 if (kstrtoull(buf, 10, &new_offset) < 0)
3273 return -EINVAL;
3274
3275 if (mddev->sync_thread ||
3276 test_bit(MD_RECOVERY_RUNNING,&mddev->recovery))
3277 return -EBUSY;
3278 if (new_offset == rdev->data_offset)
3279 /* reset is always permitted */
3280 ;
3281 else if (new_offset > rdev->data_offset) {
3282 /* must not push array size beyond rdev_sectors */
3283 if (new_offset - rdev->data_offset
3284 + mddev->dev_sectors > rdev->sectors)
3285 return -E2BIG;
3286 }
3287 /* Metadata worries about other space details. */
3288
3289 /* decreasing the offset is inconsistent with a backwards
3290 * reshape.
3291 */
3292 if (new_offset < rdev->data_offset &&
3293 mddev->reshape_backwards)
3294 return -EINVAL;
3295 /* Increasing offset is inconsistent with forwards
3296 * reshape. reshape_direction should be set to
3297 * 'backwards' first.
3298 */
3299 if (new_offset > rdev->data_offset &&
3300 !mddev->reshape_backwards)
3301 return -EINVAL;
3302
3303 if (mddev->pers && mddev->persistent &&
3304 !super_types[mddev->major_version]
3305 .allow_new_offset(rdev, new_offset))
3306 return -E2BIG;
3307 rdev->new_data_offset = new_offset;
3308 if (new_offset > rdev->data_offset)
3309 mddev->reshape_backwards = 1;
3310 else if (new_offset < rdev->data_offset)
3311 mddev->reshape_backwards = 0;
3312
3313 return len;
3314 }
3315 static struct rdev_sysfs_entry rdev_new_offset =
3316 __ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store);
3317
3318 static ssize_t
rdev_size_show(struct md_rdev * rdev,char * page)3319 rdev_size_show(struct md_rdev *rdev, char *page)
3320 {
3321 return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
3322 }
3323
md_rdevs_overlap(struct md_rdev * a,struct md_rdev * b)3324 static int md_rdevs_overlap(struct md_rdev *a, struct md_rdev *b)
3325 {
3326 /* check if two start/length pairs overlap */
3327 if (a->data_offset + a->sectors <= b->data_offset)
3328 return false;
3329 if (b->data_offset + b->sectors <= a->data_offset)
3330 return false;
3331 return true;
3332 }
3333
md_rdev_overlaps(struct md_rdev * rdev)3334 static bool md_rdev_overlaps(struct md_rdev *rdev)
3335 {
3336 struct mddev *mddev;
3337 struct md_rdev *rdev2;
3338
3339 spin_lock(&all_mddevs_lock);
3340 list_for_each_entry(mddev, &all_mddevs, all_mddevs) {
3341 if (test_bit(MD_DELETED, &mddev->flags))
3342 continue;
3343 rdev_for_each(rdev2, mddev) {
3344 if (rdev != rdev2 && rdev->bdev == rdev2->bdev &&
3345 md_rdevs_overlap(rdev, rdev2)) {
3346 spin_unlock(&all_mddevs_lock);
3347 return true;
3348 }
3349 }
3350 }
3351 spin_unlock(&all_mddevs_lock);
3352 return false;
3353 }
3354
strict_blocks_to_sectors(const char * buf,sector_t * sectors)3355 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
3356 {
3357 unsigned long long blocks;
3358 sector_t new;
3359
3360 if (kstrtoull(buf, 10, &blocks) < 0)
3361 return -EINVAL;
3362
3363 if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
3364 return -EINVAL; /* sector conversion overflow */
3365
3366 new = blocks * 2;
3367 if (new != blocks * 2)
3368 return -EINVAL; /* unsigned long long to sector_t overflow */
3369
3370 *sectors = new;
3371 return 0;
3372 }
3373
3374 static ssize_t
rdev_size_store(struct md_rdev * rdev,const char * buf,size_t len)3375 rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
3376 {
3377 struct mddev *my_mddev = rdev->mddev;
3378 sector_t oldsectors = rdev->sectors;
3379 sector_t sectors;
3380
3381 if (test_bit(Journal, &rdev->flags))
3382 return -EBUSY;
3383 if (strict_blocks_to_sectors(buf, §ors) < 0)
3384 return -EINVAL;
3385 if (rdev->data_offset != rdev->new_data_offset)
3386 return -EINVAL; /* too confusing */
3387 if (my_mddev->pers && rdev->raid_disk >= 0) {
3388 if (my_mddev->persistent) {
3389 sectors = super_types[my_mddev->major_version].
3390 rdev_size_change(rdev, sectors);
3391 if (!sectors)
3392 return -EBUSY;
3393 } else if (!sectors)
3394 sectors = bdev_nr_sectors(rdev->bdev) -
3395 rdev->data_offset;
3396 if (!my_mddev->pers->resize)
3397 /* Cannot change size for RAID0 or Linear etc */
3398 return -EINVAL;
3399 }
3400 if (sectors < my_mddev->dev_sectors)
3401 return -EINVAL; /* component must fit device */
3402
3403 rdev->sectors = sectors;
3404
3405 /*
3406 * Check that all other rdevs with the same bdev do not overlap. This
3407 * check does not provide a hard guarantee, it just helps avoid
3408 * dangerous mistakes.
3409 */
3410 if (sectors > oldsectors && my_mddev->external &&
3411 md_rdev_overlaps(rdev)) {
3412 /*
3413 * Someone else could have slipped in a size change here, but
3414 * doing so is just silly. We put oldsectors back because we
3415 * know it is safe, and trust userspace not to race with itself.
3416 */
3417 rdev->sectors = oldsectors;
3418 return -EBUSY;
3419 }
3420 return len;
3421 }
3422
3423 static struct rdev_sysfs_entry rdev_size =
3424 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
3425
recovery_start_show(struct md_rdev * rdev,char * page)3426 static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
3427 {
3428 unsigned long long recovery_start = rdev->recovery_offset;
3429
3430 if (test_bit(In_sync, &rdev->flags) ||
3431 recovery_start == MaxSector)
3432 return sprintf(page, "none\n");
3433
3434 return sprintf(page, "%llu\n", recovery_start);
3435 }
3436
recovery_start_store(struct md_rdev * rdev,const char * buf,size_t len)3437 static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len)
3438 {
3439 unsigned long long recovery_start;
3440
3441 if (cmd_match(buf, "none"))
3442 recovery_start = MaxSector;
3443 else if (kstrtoull(buf, 10, &recovery_start))
3444 return -EINVAL;
3445
3446 if (rdev->mddev->pers &&
3447 rdev->raid_disk >= 0)
3448 return -EBUSY;
3449
3450 rdev->recovery_offset = recovery_start;
3451 if (recovery_start == MaxSector)
3452 set_bit(In_sync, &rdev->flags);
3453 else
3454 clear_bit(In_sync, &rdev->flags);
3455 return len;
3456 }
3457
3458 static struct rdev_sysfs_entry rdev_recovery_start =
3459 __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
3460
3461 /* sysfs access to bad-blocks list.
3462 * We present two files.
3463 * 'bad-blocks' lists sector numbers and lengths of ranges that
3464 * are recorded as bad. The list is truncated to fit within
3465 * the one-page limit of sysfs.
3466 * Writing "sector length" to this file adds an acknowledged
3467 * bad block list.
3468 * 'unacknowledged-bad-blocks' lists bad blocks that have not yet
3469 * been acknowledged. Writing to this file adds bad blocks
3470 * without acknowledging them. This is largely for testing.
3471 */
bb_show(struct md_rdev * rdev,char * page)3472 static ssize_t bb_show(struct md_rdev *rdev, char *page)
3473 {
3474 return badblocks_show(&rdev->badblocks, page, 0);
3475 }
bb_store(struct md_rdev * rdev,const char * page,size_t len)3476 static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
3477 {
3478 int rv = badblocks_store(&rdev->badblocks, page, len, 0);
3479 /* Maybe that ack was all we needed */
3480 if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
3481 wake_up(&rdev->blocked_wait);
3482 return rv;
3483 }
3484 static struct rdev_sysfs_entry rdev_bad_blocks =
3485 __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
3486
ubb_show(struct md_rdev * rdev,char * page)3487 static ssize_t ubb_show(struct md_rdev *rdev, char *page)
3488 {
3489 return badblocks_show(&rdev->badblocks, page, 1);
3490 }
ubb_store(struct md_rdev * rdev,const char * page,size_t len)3491 static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
3492 {
3493 return badblocks_store(&rdev->badblocks, page, len, 1);
3494 }
3495 static struct rdev_sysfs_entry rdev_unack_bad_blocks =
3496 __ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);
3497
3498 static ssize_t
ppl_sector_show(struct md_rdev * rdev,char * page)3499 ppl_sector_show(struct md_rdev *rdev, char *page)
3500 {
3501 return sprintf(page, "%llu\n", (unsigned long long)rdev->ppl.sector);
3502 }
3503
3504 static ssize_t
ppl_sector_store(struct md_rdev * rdev,const char * buf,size_t len)3505 ppl_sector_store(struct md_rdev *rdev, const char *buf, size_t len)
3506 {
3507 unsigned long long sector;
3508
3509 if (kstrtoull(buf, 10, §or) < 0)
3510 return -EINVAL;
3511 if (sector != (sector_t)sector)
3512 return -EINVAL;
3513
3514 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) &&
3515 rdev->raid_disk >= 0)
3516 return -EBUSY;
3517
3518 if (rdev->mddev->persistent) {
3519 if (rdev->mddev->major_version == 0)
3520 return -EINVAL;
3521 if ((sector > rdev->sb_start &&
3522 sector - rdev->sb_start > S16_MAX) ||
3523 (sector < rdev->sb_start &&
3524 rdev->sb_start - sector > -S16_MIN))
3525 return -EINVAL;
3526 rdev->ppl.offset = sector - rdev->sb_start;
3527 } else if (!rdev->mddev->external) {
3528 return -EBUSY;
3529 }
3530 rdev->ppl.sector = sector;
3531 return len;
3532 }
3533
3534 static struct rdev_sysfs_entry rdev_ppl_sector =
3535 __ATTR(ppl_sector, S_IRUGO|S_IWUSR, ppl_sector_show, ppl_sector_store);
3536
3537 static ssize_t
ppl_size_show(struct md_rdev * rdev,char * page)3538 ppl_size_show(struct md_rdev *rdev, char *page)
3539 {
3540 return sprintf(page, "%u\n", rdev->ppl.size);
3541 }
3542
3543 static ssize_t
ppl_size_store(struct md_rdev * rdev,const char * buf,size_t len)3544 ppl_size_store(struct md_rdev *rdev, const char *buf, size_t len)
3545 {
3546 unsigned int size;
3547
3548 if (kstrtouint(buf, 10, &size) < 0)
3549 return -EINVAL;
3550
3551 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) &&
3552 rdev->raid_disk >= 0)
3553 return -EBUSY;
3554
3555 if (rdev->mddev->persistent) {
3556 if (rdev->mddev->major_version == 0)
3557 return -EINVAL;
3558 if (size > U16_MAX)
3559 return -EINVAL;
3560 } else if (!rdev->mddev->external) {
3561 return -EBUSY;
3562 }
3563 rdev->ppl.size = size;
3564 return len;
3565 }
3566
3567 static struct rdev_sysfs_entry rdev_ppl_size =
3568 __ATTR(ppl_size, S_IRUGO|S_IWUSR, ppl_size_show, ppl_size_store);
3569
3570 static struct attribute *rdev_default_attrs[] = {
3571 &rdev_state.attr,
3572 &rdev_errors.attr,
3573 &rdev_slot.attr,
3574 &rdev_offset.attr,
3575 &rdev_new_offset.attr,
3576 &rdev_size.attr,
3577 &rdev_recovery_start.attr,
3578 &rdev_bad_blocks.attr,
3579 &rdev_unack_bad_blocks.attr,
3580 &rdev_ppl_sector.attr,
3581 &rdev_ppl_size.attr,
3582 NULL,
3583 };
3584 ATTRIBUTE_GROUPS(rdev_default);
3585 static ssize_t
rdev_attr_show(struct kobject * kobj,struct attribute * attr,char * page)3586 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
3587 {
3588 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
3589 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3590
3591 if (!entry->show)
3592 return -EIO;
3593 if (!rdev->mddev)
3594 return -ENODEV;
3595 return entry->show(rdev, page);
3596 }
3597
3598 static ssize_t
rdev_attr_store(struct kobject * kobj,struct attribute * attr,const char * page,size_t length)3599 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
3600 const char *page, size_t length)
3601 {
3602 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
3603 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3604 struct kernfs_node *kn = NULL;
3605 ssize_t rv;
3606 struct mddev *mddev = rdev->mddev;
3607
3608 if (!entry->store)
3609 return -EIO;
3610 if (!capable(CAP_SYS_ADMIN))
3611 return -EACCES;
3612
3613 if (entry->store == state_store && cmd_match(page, "remove"))
3614 kn = sysfs_break_active_protection(kobj, attr);
3615
3616 rv = mddev ? mddev_lock(mddev) : -ENODEV;
3617 if (!rv) {
3618 if (rdev->mddev == NULL)
3619 rv = -ENODEV;
3620 else
3621 rv = entry->store(rdev, page, length);
3622 mddev_unlock(mddev);
3623 }
3624
3625 if (kn)
3626 sysfs_unbreak_active_protection(kn);
3627
3628 return rv;
3629 }
3630
rdev_free(struct kobject * ko)3631 static void rdev_free(struct kobject *ko)
3632 {
3633 struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj);
3634 kfree(rdev);
3635 }
3636 static const struct sysfs_ops rdev_sysfs_ops = {
3637 .show = rdev_attr_show,
3638 .store = rdev_attr_store,
3639 };
3640 static const struct kobj_type rdev_ktype = {
3641 .release = rdev_free,
3642 .sysfs_ops = &rdev_sysfs_ops,
3643 .default_groups = rdev_default_groups,
3644 };
3645
md_rdev_init(struct md_rdev * rdev)3646 int md_rdev_init(struct md_rdev *rdev)
3647 {
3648 rdev->desc_nr = -1;
3649 rdev->saved_raid_disk = -1;
3650 rdev->raid_disk = -1;
3651 rdev->flags = 0;
3652 rdev->data_offset = 0;
3653 rdev->new_data_offset = 0;
3654 rdev->sb_events = 0;
3655 rdev->last_read_error = 0;
3656 rdev->sb_loaded = 0;
3657 rdev->bb_page = NULL;
3658 atomic_set(&rdev->nr_pending, 0);
3659 atomic_set(&rdev->read_errors, 0);
3660 atomic_set(&rdev->corrected_errors, 0);
3661
3662 INIT_LIST_HEAD(&rdev->same_set);
3663 init_waitqueue_head(&rdev->blocked_wait);
3664
3665 /* Add space to store bad block list.
3666 * This reserves the space even on arrays where it cannot
3667 * be used - I wonder if that matters
3668 */
3669 return badblocks_init(&rdev->badblocks, 0);
3670 }
3671 EXPORT_SYMBOL_GPL(md_rdev_init);
3672
3673 /*
3674 * Import a device. If 'super_format' >= 0, then sanity check the superblock
3675 *
3676 * mark the device faulty if:
3677 *
3678 * - the device is nonexistent (zero size)
3679 * - the device has no valid superblock
3680 *
3681 * a faulty rdev _never_ has rdev->sb set.
3682 */
md_import_device(dev_t newdev,int super_format,int super_minor)3683 static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
3684 {
3685 struct md_rdev *rdev;
3686 struct md_rdev *holder;
3687 sector_t size;
3688 int err;
3689
3690 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
3691 if (!rdev)
3692 return ERR_PTR(-ENOMEM);
3693
3694 err = md_rdev_init(rdev);
3695 if (err)
3696 goto out_free_rdev;
3697 err = alloc_disk_sb(rdev);
3698 if (err)
3699 goto out_clear_rdev;
3700
3701 if (super_format == -2) {
3702 holder = &claim_rdev;
3703 } else {
3704 holder = rdev;
3705 set_bit(Holder, &rdev->flags);
3706 }
3707
3708 rdev->bdev = blkdev_get_by_dev(newdev, BLK_OPEN_READ | BLK_OPEN_WRITE,
3709 holder, NULL);
3710 if (IS_ERR(rdev->bdev)) {
3711 pr_warn("md: could not open device unknown-block(%u,%u).\n",
3712 MAJOR(newdev), MINOR(newdev));
3713 err = PTR_ERR(rdev->bdev);
3714 goto out_clear_rdev;
3715 }
3716
3717 kobject_init(&rdev->kobj, &rdev_ktype);
3718
3719 size = bdev_nr_bytes(rdev->bdev) >> BLOCK_SIZE_BITS;
3720 if (!size) {
3721 pr_warn("md: %pg has zero or unknown size, marking faulty!\n",
3722 rdev->bdev);
3723 err = -EINVAL;
3724 goto out_blkdev_put;
3725 }
3726
3727 if (super_format >= 0) {
3728 err = super_types[super_format].
3729 load_super(rdev, NULL, super_minor);
3730 if (err == -EINVAL) {
3731 pr_warn("md: %pg does not have a valid v%d.%d superblock, not importing!\n",
3732 rdev->bdev,
3733 super_format, super_minor);
3734 goto out_blkdev_put;
3735 }
3736 if (err < 0) {
3737 pr_warn("md: could not read %pg's sb, not importing!\n",
3738 rdev->bdev);
3739 goto out_blkdev_put;
3740 }
3741 }
3742
3743 return rdev;
3744
3745 out_blkdev_put:
3746 blkdev_put(rdev->bdev, holder);
3747 out_clear_rdev:
3748 md_rdev_clear(rdev);
3749 out_free_rdev:
3750 kfree(rdev);
3751 return ERR_PTR(err);
3752 }
3753
3754 /*
3755 * Check a full RAID array for plausibility
3756 */
3757
analyze_sbs(struct mddev * mddev)3758 static int analyze_sbs(struct mddev *mddev)
3759 {
3760 int i;
3761 struct md_rdev *rdev, *freshest, *tmp;
3762
3763 freshest = NULL;
3764 rdev_for_each_safe(rdev, tmp, mddev)
3765 switch (super_types[mddev->major_version].
3766 load_super(rdev, freshest, mddev->minor_version)) {
3767 case 1:
3768 freshest = rdev;
3769 break;
3770 case 0:
3771 break;
3772 default:
3773 pr_warn("md: fatal superblock inconsistency in %pg -- removing from array\n",
3774 rdev->bdev);
3775 md_kick_rdev_from_array(rdev);
3776 }
3777
3778 /* Cannot find a valid fresh disk */
3779 if (!freshest) {
3780 pr_warn("md: cannot find a valid disk\n");
3781 return -EINVAL;
3782 }
3783
3784 super_types[mddev->major_version].
3785 validate_super(mddev, NULL/*freshest*/, freshest);
3786
3787 i = 0;
3788 rdev_for_each_safe(rdev, tmp, mddev) {
3789 if (mddev->max_disks &&
3790 (rdev->desc_nr >= mddev->max_disks ||
3791 i > mddev->max_disks)) {
3792 pr_warn("md: %s: %pg: only %d devices permitted\n",
3793 mdname(mddev), rdev->bdev,
3794 mddev->max_disks);
3795 md_kick_rdev_from_array(rdev);
3796 continue;
3797 }
3798 if (rdev != freshest) {
3799 if (super_types[mddev->major_version].
3800 validate_super(mddev, freshest, rdev)) {
3801 pr_warn("md: kicking non-fresh %pg from array!\n",
3802 rdev->bdev);
3803 md_kick_rdev_from_array(rdev);
3804 continue;
3805 }
3806 }
3807 if (mddev->level == LEVEL_MULTIPATH) {
3808 rdev->desc_nr = i++;
3809 rdev->raid_disk = rdev->desc_nr;
3810 set_bit(In_sync, &rdev->flags);
3811 } else if (rdev->raid_disk >=
3812 (mddev->raid_disks - min(0, mddev->delta_disks)) &&
3813 !test_bit(Journal, &rdev->flags)) {
3814 rdev->raid_disk = -1;
3815 clear_bit(In_sync, &rdev->flags);
3816 }
3817 }
3818
3819 return 0;
3820 }
3821
3822 /* Read a fixed-point number.
3823 * Numbers in sysfs attributes should be in "standard" units where
3824 * possible, so time should be in seconds.
3825 * However we internally use a a much smaller unit such as
3826 * milliseconds or jiffies.
3827 * This function takes a decimal number with a possible fractional
3828 * component, and produces an integer which is the result of
3829 * multiplying that number by 10^'scale'.
3830 * all without any floating-point arithmetic.
3831 */
strict_strtoul_scaled(const char * cp,unsigned long * res,int scale)3832 int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
3833 {
3834 unsigned long result = 0;
3835 long decimals = -1;
3836 while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
3837 if (*cp == '.')
3838 decimals = 0;
3839 else if (decimals < scale) {
3840 unsigned int value;
3841 value = *cp - '0';
3842 result = result * 10 + value;
3843 if (decimals >= 0)
3844 decimals++;
3845 }
3846 cp++;
3847 }
3848 if (*cp == '\n')
3849 cp++;
3850 if (*cp)
3851 return -EINVAL;
3852 if (decimals < 0)
3853 decimals = 0;
3854 *res = result * int_pow(10, scale - decimals);
3855 return 0;
3856 }
3857
3858 static ssize_t
safe_delay_show(struct mddev * mddev,char * page)3859 safe_delay_show(struct mddev *mddev, char *page)
3860 {
3861 unsigned int msec = ((unsigned long)mddev->safemode_delay*1000)/HZ;
3862
3863 return sprintf(page, "%u.%03u\n", msec/1000, msec%1000);
3864 }
3865 static ssize_t
safe_delay_store(struct mddev * mddev,const char * cbuf,size_t len)3866 safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
3867 {
3868 unsigned long msec;
3869
3870 if (mddev_is_clustered(mddev)) {
3871 pr_warn("md: Safemode is disabled for clustered mode\n");
3872 return -EINVAL;
3873 }
3874
3875 if (strict_strtoul_scaled(cbuf, &msec, 3) < 0 || msec > UINT_MAX / HZ)
3876 return -EINVAL;
3877 if (msec == 0)
3878 mddev->safemode_delay = 0;
3879 else {
3880 unsigned long old_delay = mddev->safemode_delay;
3881 unsigned long new_delay = (msec*HZ)/1000;
3882
3883 if (new_delay == 0)
3884 new_delay = 1;
3885 mddev->safemode_delay = new_delay;
3886 if (new_delay < old_delay || old_delay == 0)
3887 mod_timer(&mddev->safemode_timer, jiffies+1);
3888 }
3889 return len;
3890 }
3891 static struct md_sysfs_entry md_safe_delay =
3892 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
3893
3894 static ssize_t
level_show(struct mddev * mddev,char * page)3895 level_show(struct mddev *mddev, char *page)
3896 {
3897 struct md_personality *p;
3898 int ret;
3899 spin_lock(&mddev->lock);
3900 p = mddev->pers;
3901 if (p)
3902 ret = sprintf(page, "%s\n", p->name);
3903 else if (mddev->clevel[0])
3904 ret = sprintf(page, "%s\n", mddev->clevel);
3905 else if (mddev->level != LEVEL_NONE)
3906 ret = sprintf(page, "%d\n", mddev->level);
3907 else
3908 ret = 0;
3909 spin_unlock(&mddev->lock);
3910 return ret;
3911 }
3912
3913 static ssize_t
level_store(struct mddev * mddev,const char * buf,size_t len)3914 level_store(struct mddev *mddev, const char *buf, size_t len)
3915 {
3916 char clevel[16];
3917 ssize_t rv;
3918 size_t slen = len;
3919 struct md_personality *pers, *oldpers;
3920 long level;
3921 void *priv, *oldpriv;
3922 struct md_rdev *rdev;
3923
3924 if (slen == 0 || slen >= sizeof(clevel))
3925 return -EINVAL;
3926
3927 rv = mddev_lock(mddev);
3928 if (rv)
3929 return rv;
3930
3931 if (mddev->pers == NULL) {
3932 strncpy(mddev->clevel, buf, slen);
3933 if (mddev->clevel[slen-1] == '\n')
3934 slen--;
3935 mddev->clevel[slen] = 0;
3936 mddev->level = LEVEL_NONE;
3937 rv = len;
3938 goto out_unlock;
3939 }
3940 rv = -EROFS;
3941 if (!md_is_rdwr(mddev))
3942 goto out_unlock;
3943
3944 /* request to change the personality. Need to ensure:
3945 * - array is not engaged in resync/recovery/reshape
3946 * - old personality can be suspended
3947 * - new personality will access other array.
3948 */
3949
3950 rv = -EBUSY;
3951 if (mddev->sync_thread ||
3952 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3953 mddev->reshape_position != MaxSector ||
3954 mddev->sysfs_active)
3955 goto out_unlock;
3956
3957 rv = -EINVAL;
3958 if (!mddev->pers->quiesce) {
3959 pr_warn("md: %s: %s does not support online personality change\n",
3960 mdname(mddev), mddev->pers->name);
3961 goto out_unlock;
3962 }
3963
3964 /* Now find the new personality */
3965 strncpy(clevel, buf, slen);
3966 if (clevel[slen-1] == '\n')
3967 slen--;
3968 clevel[slen] = 0;
3969 if (kstrtol(clevel, 10, &level))
3970 level = LEVEL_NONE;
3971
3972 if (request_module("md-%s", clevel) != 0)
3973 request_module("md-level-%s", clevel);
3974 spin_lock(&pers_lock);
3975 pers = find_pers(level, clevel);
3976 if (!pers || !try_module_get(pers->owner)) {
3977 spin_unlock(&pers_lock);
3978 pr_warn("md: personality %s not loaded\n", clevel);
3979 rv = -EINVAL;
3980 goto out_unlock;
3981 }
3982 spin_unlock(&pers_lock);
3983
3984 if (pers == mddev->pers) {
3985 /* Nothing to do! */
3986 module_put(pers->owner);
3987 rv = len;
3988 goto out_unlock;
3989 }
3990 if (!pers->takeover) {
3991 module_put(pers->owner);
3992 pr_warn("md: %s: %s does not support personality takeover\n",
3993 mdname(mddev), clevel);
3994 rv = -EINVAL;
3995 goto out_unlock;
3996 }
3997
3998 rdev_for_each(rdev, mddev)
3999 rdev->new_raid_disk = rdev->raid_disk;
4000
4001 /* ->takeover must set new_* and/or delta_disks
4002 * if it succeeds, and may set them when it fails.
4003 */
4004 priv = pers->takeover(mddev);
4005 if (IS_ERR(priv)) {
4006 mddev->new_level = mddev->level;
4007 mddev->new_layout = mddev->layout;
4008 mddev->new_chunk_sectors = mddev->chunk_sectors;
4009 mddev->raid_disks -= mddev->delta_disks;
4010 mddev->delta_disks = 0;
4011 mddev->reshape_backwards = 0;
4012 module_put(pers->owner);
4013 pr_warn("md: %s: %s would not accept array\n",
4014 mdname(mddev), clevel);
4015 rv = PTR_ERR(priv);
4016 goto out_unlock;
4017 }
4018
4019 /* Looks like we have a winner */
4020 mddev_suspend(mddev);
4021 mddev_detach(mddev);
4022
4023 spin_lock(&mddev->lock);
4024 oldpers = mddev->pers;
4025 oldpriv = mddev->private;
4026 mddev->pers = pers;
4027 mddev->private = priv;
4028 strscpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
4029 mddev->level = mddev->new_level;
4030 mddev->layout = mddev->new_layout;
4031 mddev->chunk_sectors = mddev->new_chunk_sectors;
4032 mddev->delta_disks = 0;
4033 mddev->reshape_backwards = 0;
4034 mddev->degraded = 0;
4035 spin_unlock(&mddev->lock);
4036
4037 if (oldpers->sync_request == NULL &&
4038 mddev->external) {
4039 /* We are converting from a no-redundancy array
4040 * to a redundancy array and metadata is managed
4041 * externally so we need to be sure that writes
4042 * won't block due to a need to transition
4043 * clean->dirty
4044 * until external management is started.
4045 */
4046 mddev->in_sync = 0;
4047 mddev->safemode_delay = 0;
4048 mddev->safemode = 0;
4049 }
4050
4051 oldpers->free(mddev, oldpriv);
4052
4053 if (oldpers->sync_request == NULL &&
4054 pers->sync_request != NULL) {
4055 /* need to add the md_redundancy_group */
4056 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
4057 pr_warn("md: cannot register extra attributes for %s\n",
4058 mdname(mddev));
4059 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
4060 mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
4061 mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
4062 }
4063 if (oldpers->sync_request != NULL &&
4064 pers->sync_request == NULL) {
4065 /* need to remove the md_redundancy_group */
4066 if (mddev->to_remove == NULL)
4067 mddev->to_remove = &md_redundancy_group;
4068 }
4069
4070 module_put(oldpers->owner);
4071
4072 rdev_for_each(rdev, mddev) {
4073 if (rdev->raid_disk < 0)
4074 continue;
4075 if (rdev->new_raid_disk >= mddev->raid_disks)
4076 rdev->new_raid_disk = -1;
4077 if (rdev->new_raid_disk == rdev->raid_disk)
4078 continue;
4079 sysfs_unlink_rdev(mddev, rdev);
4080 }
4081 rdev_for_each(rdev, mddev) {
4082 if (rdev->raid_disk < 0)
4083 continue;
4084 if (rdev->new_raid_disk == rdev->raid_disk)
4085 continue;
4086 rdev->raid_disk = rdev->new_raid_disk;
4087 if (rdev->raid_disk < 0)
4088 clear_bit(In_sync, &rdev->flags);
4089 else {
4090 if (sysfs_link_rdev(mddev, rdev))
4091 pr_warn("md: cannot register rd%d for %s after level change\n",
4092 rdev->raid_disk, mdname(mddev));
4093 }
4094 }
4095
4096 if (pers->sync_request == NULL) {
4097 /* this is now an array without redundancy, so
4098 * it must always be in_sync
4099 */
4100 mddev->in_sync = 1;
4101 del_timer_sync(&mddev->safemode_timer);
4102 }
4103 blk_set_stacking_limits(&mddev->queue->limits);
4104 pers->run(mddev);
4105 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
4106 mddev_resume(mddev);
4107 if (!mddev->thread)
4108 md_update_sb(mddev, 1);
4109 sysfs_notify_dirent_safe(mddev->sysfs_level);
4110 md_new_event();
4111 rv = len;
4112 out_unlock:
4113 mddev_unlock(mddev);
4114 return rv;
4115 }
4116
4117 static struct md_sysfs_entry md_level =
4118 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
4119
4120 static ssize_t
layout_show(struct mddev * mddev,char * page)4121 layout_show(struct mddev *mddev, char *page)
4122 {
4123 /* just a number, not meaningful for all levels */
4124 if (mddev->reshape_position != MaxSector &&
4125 mddev->layout != mddev->new_layout)
4126 return sprintf(page, "%d (%d)\n",
4127 mddev->new_layout, mddev->layout);
4128 return sprintf(page, "%d\n", mddev->layout);
4129 }
4130
4131 static ssize_t
layout_store(struct mddev * mddev,const char * buf,size_t len)4132 layout_store(struct mddev *mddev, const char *buf, size_t len)
4133 {
4134 unsigned int n;
4135 int err;
4136
4137 err = kstrtouint(buf, 10, &n);
4138 if (err < 0)
4139 return err;
4140 err = mddev_lock(mddev);
4141 if (err)
4142 return err;
4143
4144 if (mddev->pers) {
4145 if (mddev->pers->check_reshape == NULL)
4146 err = -EBUSY;
4147 else if (!md_is_rdwr(mddev))
4148 err = -EROFS;
4149 else {
4150 mddev->new_layout = n;
4151 err = mddev->pers->check_reshape(mddev);
4152 if (err)
4153 mddev->new_layout = mddev->layout;
4154 }
4155 } else {
4156 mddev->new_layout = n;
4157 if (mddev->reshape_position == MaxSector)
4158 mddev->layout = n;
4159 }
4160 mddev_unlock(mddev);
4161 return err ?: len;
4162 }
4163 static struct md_sysfs_entry md_layout =
4164 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
4165
4166 static ssize_t
raid_disks_show(struct mddev * mddev,char * page)4167 raid_disks_show(struct mddev *mddev, char *page)
4168 {
4169 if (mddev->raid_disks == 0)
4170 return 0;
4171 if (mddev->reshape_position != MaxSector &&
4172 mddev->delta_disks != 0)
4173 return sprintf(page, "%d (%d)\n", mddev->raid_disks,
4174 mddev->raid_disks - mddev->delta_disks);
4175 return sprintf(page, "%d\n", mddev->raid_disks);
4176 }
4177
4178 static int update_raid_disks(struct mddev *mddev, int raid_disks);
4179
4180 static ssize_t
raid_disks_store(struct mddev * mddev,const char * buf,size_t len)4181 raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
4182 {
4183 unsigned int n;
4184 int err;
4185
4186 err = kstrtouint(buf, 10, &n);
4187 if (err < 0)
4188 return err;
4189
4190 err = mddev_lock(mddev);
4191 if (err)
4192 return err;
4193 if (mddev->pers)
4194 err = update_raid_disks(mddev, n);
4195 else if (mddev->reshape_position != MaxSector) {
4196 struct md_rdev *rdev;
4197 int olddisks = mddev->raid_disks - mddev->delta_disks;
4198
4199 err = -EINVAL;
4200 rdev_for_each(rdev, mddev) {
4201 if (olddisks < n &&
4202 rdev->data_offset < rdev->new_data_offset)
4203 goto out_unlock;
4204 if (olddisks > n &&
4205 rdev->data_offset > rdev->new_data_offset)
4206 goto out_unlock;
4207 }
4208 err = 0;
4209 mddev->delta_disks = n - olddisks;
4210 mddev->raid_disks = n;
4211 mddev->reshape_backwards = (mddev->delta_disks < 0);
4212 } else
4213 mddev->raid_disks = n;
4214 out_unlock:
4215 mddev_unlock(mddev);
4216 return err ? err : len;
4217 }
4218 static struct md_sysfs_entry md_raid_disks =
4219 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
4220
4221 static ssize_t
uuid_show(struct mddev * mddev,char * page)4222 uuid_show(struct mddev *mddev, char *page)
4223 {
4224 return sprintf(page, "%pU\n", mddev->uuid);
4225 }
4226 static struct md_sysfs_entry md_uuid =
4227 __ATTR(uuid, S_IRUGO, uuid_show, NULL);
4228
4229 static ssize_t
chunk_size_show(struct mddev * mddev,char * page)4230 chunk_size_show(struct mddev *mddev, char *page)
4231 {
4232 if (mddev->reshape_position != MaxSector &&
4233 mddev->chunk_sectors != mddev->new_chunk_sectors)
4234 return sprintf(page, "%d (%d)\n",
4235 mddev->new_chunk_sectors << 9,
4236 mddev->chunk_sectors << 9);
4237 return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
4238 }
4239
4240 static ssize_t
chunk_size_store(struct mddev * mddev,const char * buf,size_t len)4241 chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
4242 {
4243 unsigned long n;
4244 int err;
4245
4246 err = kstrtoul(buf, 10, &n);
4247 if (err < 0)
4248 return err;
4249
4250 err = mddev_lock(mddev);
4251 if (err)
4252 return err;
4253 if (mddev->pers) {
4254 if (mddev->pers->check_reshape == NULL)
4255 err = -EBUSY;
4256 else if (!md_is_rdwr(mddev))
4257 err = -EROFS;
4258 else {
4259 mddev->new_chunk_sectors = n >> 9;
4260 err = mddev->pers->check_reshape(mddev);
4261 if (err)
4262 mddev->new_chunk_sectors = mddev->chunk_sectors;
4263 }
4264 } else {
4265 mddev->new_chunk_sectors = n >> 9;
4266 if (mddev->reshape_position == MaxSector)
4267 mddev->chunk_sectors = n >> 9;
4268 }
4269 mddev_unlock(mddev);
4270 return err ?: len;
4271 }
4272 static struct md_sysfs_entry md_chunk_size =
4273 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
4274
4275 static ssize_t
resync_start_show(struct mddev * mddev,char * page)4276 resync_start_show(struct mddev *mddev, char *page)
4277 {
4278 if (mddev->recovery_cp == MaxSector)
4279 return sprintf(page, "none\n");
4280 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
4281 }
4282
4283 static ssize_t
resync_start_store(struct mddev * mddev,const char * buf,size_t len)4284 resync_start_store(struct mddev *mddev, const char *buf, size_t len)
4285 {
4286 unsigned long long n;
4287 int err;
4288
4289 if (cmd_match(buf, "none"))
4290 n = MaxSector;
4291 else {
4292 err = kstrtoull(buf, 10, &n);
4293 if (err < 0)
4294 return err;
4295 if (n != (sector_t)n)
4296 return -EINVAL;
4297 }
4298
4299 err = mddev_lock(mddev);
4300 if (err)
4301 return err;
4302 if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
4303 err = -EBUSY;
4304
4305 if (!err) {
4306 mddev->recovery_cp = n;
4307 if (mddev->pers)
4308 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
4309 }
4310 mddev_unlock(mddev);
4311 return err ?: len;
4312 }
4313 static struct md_sysfs_entry md_resync_start =
4314 __ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR,
4315 resync_start_show, resync_start_store);
4316
4317 /*
4318 * The array state can be:
4319 *
4320 * clear
4321 * No devices, no size, no level
4322 * Equivalent to STOP_ARRAY ioctl
4323 * inactive
4324 * May have some settings, but array is not active
4325 * all IO results in error
4326 * When written, doesn't tear down array, but just stops it
4327 * suspended (not supported yet)
4328 * All IO requests will block. The array can be reconfigured.
4329 * Writing this, if accepted, will block until array is quiescent
4330 * readonly
4331 * no resync can happen. no superblocks get written.
4332 * write requests fail
4333 * read-auto
4334 * like readonly, but behaves like 'clean' on a write request.
4335 *
4336 * clean - no pending writes, but otherwise active.
4337 * When written to inactive array, starts without resync
4338 * If a write request arrives then
4339 * if metadata is known, mark 'dirty' and switch to 'active'.
4340 * if not known, block and switch to write-pending
4341 * If written to an active array that has pending writes, then fails.
4342 * active
4343 * fully active: IO and resync can be happening.
4344 * When written to inactive array, starts with resync
4345 *
4346 * write-pending
4347 * clean, but writes are blocked waiting for 'active' to be written.
4348 *
4349 * active-idle
4350 * like active, but no writes have been seen for a while (100msec).
4351 *
4352 * broken
4353 * Array is failed. It's useful because mounted-arrays aren't stopped
4354 * when array is failed, so this state will at least alert the user that
4355 * something is wrong.
4356 */
4357 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
4358 write_pending, active_idle, broken, bad_word};
4359 static char *array_states[] = {
4360 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
4361 "write-pending", "active-idle", "broken", NULL };
4362
match_word(const char * word,char ** list)4363 static int match_word(const char *word, char **list)
4364 {
4365 int n;
4366 for (n=0; list[n]; n++)
4367 if (cmd_match(word, list[n]))
4368 break;
4369 return n;
4370 }
4371
4372 static ssize_t
array_state_show(struct mddev * mddev,char * page)4373 array_state_show(struct mddev *mddev, char *page)
4374 {
4375 enum array_state st = inactive;
4376
4377 if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags)) {
4378 switch(mddev->ro) {
4379 case MD_RDONLY:
4380 st = readonly;
4381 break;
4382 case MD_AUTO_READ:
4383 st = read_auto;
4384 break;
4385 case MD_RDWR:
4386 spin_lock(&mddev->lock);
4387 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
4388 st = write_pending;
4389 else if (mddev->in_sync)
4390 st = clean;
4391 else if (mddev->safemode)
4392 st = active_idle;
4393 else
4394 st = active;
4395 spin_unlock(&mddev->lock);
4396 }
4397
4398 if (test_bit(MD_BROKEN, &mddev->flags) && st == clean)
4399 st = broken;
4400 } else {
4401 if (list_empty(&mddev->disks) &&
4402 mddev->raid_disks == 0 &&
4403 mddev->dev_sectors == 0)
4404 st = clear;
4405 else
4406 st = inactive;
4407 }
4408 return sprintf(page, "%s\n", array_states[st]);
4409 }
4410
4411 static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
4412 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
4413 static int restart_array(struct mddev *mddev);
4414
4415 static ssize_t
array_state_store(struct mddev * mddev,const char * buf,size_t len)4416 array_state_store(struct mddev *mddev, const char *buf, size_t len)
4417 {
4418 int err = 0;
4419 enum array_state st = match_word(buf, array_states);
4420
4421 if (mddev->pers && (st == active || st == clean) &&
4422 mddev->ro != MD_RDONLY) {
4423 /* don't take reconfig_mutex when toggling between
4424 * clean and active
4425 */
4426 spin_lock(&mddev->lock);
4427 if (st == active) {
4428 restart_array(mddev);
4429 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
4430 md_wakeup_thread(mddev->thread);
4431 wake_up(&mddev->sb_wait);
4432 } else /* st == clean */ {
4433 restart_array(mddev);
4434 if (!set_in_sync(mddev))
4435 err = -EBUSY;
4436 }
4437 if (!err)
4438 sysfs_notify_dirent_safe(mddev->sysfs_state);
4439 spin_unlock(&mddev->lock);
4440 return err ?: len;
4441 }
4442 err = mddev_lock(mddev);
4443 if (err)
4444 return err;
4445 err = -EINVAL;
4446 switch(st) {
4447 case bad_word:
4448 break;
4449 case clear:
4450 /* stopping an active array */
4451 err = do_md_stop(mddev, 0, NULL);
4452 break;
4453 case inactive:
4454 /* stopping an active array */
4455 if (mddev->pers)
4456 err = do_md_stop(mddev, 2, NULL);
4457 else
4458 err = 0; /* already inactive */
4459 break;
4460 case suspended:
4461 break; /* not supported yet */
4462 case readonly:
4463 if (mddev->pers)
4464 err = md_set_readonly(mddev, NULL);
4465 else {
4466 mddev->ro = MD_RDONLY;
4467 set_disk_ro(mddev->gendisk, 1);
4468 err = do_md_run(mddev);
4469 }
4470 break;
4471 case read_auto:
4472 if (mddev->pers) {
4473 if (md_is_rdwr(mddev))
4474 err = md_set_readonly(mddev, NULL);
4475 else if (mddev->ro == MD_RDONLY)
4476 err = restart_array(mddev);
4477 if (err == 0) {
4478 mddev->ro = MD_AUTO_READ;
4479 set_disk_ro(mddev->gendisk, 0);
4480 }
4481 } else {
4482 mddev->ro = MD_AUTO_READ;
4483 err = do_md_run(mddev);
4484 }
4485 break;
4486 case clean:
4487 if (mddev->pers) {
4488 err = restart_array(mddev);
4489 if (err)
4490 break;
4491 spin_lock(&mddev->lock);
4492 if (!set_in_sync(mddev))
4493 err = -EBUSY;
4494 spin_unlock(&mddev->lock);
4495 } else
4496 err = -EINVAL;
4497 break;
4498 case active:
4499 if (mddev->pers) {
4500 err = restart_array(mddev);
4501 if (err)
4502 break;
4503 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
4504 wake_up(&mddev->sb_wait);
4505 err = 0;
4506 } else {
4507 mddev->ro = MD_RDWR;
4508 set_disk_ro(mddev->gendisk, 0);
4509 err = do_md_run(mddev);
4510 }
4511 break;
4512 case write_pending:
4513 case active_idle:
4514 case broken:
4515 /* these cannot be set */
4516 break;
4517 }
4518
4519 if (!err) {
4520 if (mddev->hold_active == UNTIL_IOCTL)
4521 mddev->hold_active = 0;
4522 sysfs_notify_dirent_safe(mddev->sysfs_state);
4523 }
4524 mddev_unlock(mddev);
4525 return err ?: len;
4526 }
4527 static struct md_sysfs_entry md_array_state =
4528 __ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
4529
4530 static ssize_t
max_corrected_read_errors_show(struct mddev * mddev,char * page)4531 max_corrected_read_errors_show(struct mddev *mddev, char *page) {
4532 return sprintf(page, "%d\n",
4533 atomic_read(&mddev->max_corr_read_errors));
4534 }
4535
4536 static ssize_t
max_corrected_read_errors_store(struct mddev * mddev,const char * buf,size_t len)4537 max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len)
4538 {
4539 unsigned int n;
4540 int rv;
4541
4542 rv = kstrtouint(buf, 10, &n);
4543 if (rv < 0)
4544 return rv;
4545 if (n > INT_MAX)
4546 return -EINVAL;
4547 atomic_set(&mddev->max_corr_read_errors, n);
4548 return len;
4549 }
4550
4551 static struct md_sysfs_entry max_corr_read_errors =
4552 __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
4553 max_corrected_read_errors_store);
4554
4555 static ssize_t
null_show(struct mddev * mddev,char * page)4556 null_show(struct mddev *mddev, char *page)
4557 {
4558 return -EINVAL;
4559 }
4560
4561 static ssize_t
new_dev_store(struct mddev * mddev,const char * buf,size_t len)4562 new_dev_store(struct mddev *mddev, const char *buf, size_t len)
4563 {
4564 /* buf must be %d:%d\n? giving major and minor numbers */
4565 /* The new device is added to the array.
4566 * If the array has a persistent superblock, we read the
4567 * superblock to initialise info and check validity.
4568 * Otherwise, only checking done is that in bind_rdev_to_array,
4569 * which mainly checks size.
4570 */
4571 char *e;
4572 int major = simple_strtoul(buf, &e, 10);
4573 int minor;
4574 dev_t dev;
4575 struct md_rdev *rdev;
4576 int err;
4577
4578 if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
4579 return -EINVAL;
4580 minor = simple_strtoul(e+1, &e, 10);
4581 if (*e && *e != '\n')
4582 return -EINVAL;
4583 dev = MKDEV(major, minor);
4584 if (major != MAJOR(dev) ||
4585 minor != MINOR(dev))
4586 return -EOVERFLOW;
4587
4588 err = mddev_lock(mddev);
4589 if (err)
4590 return err;
4591 if (mddev->persistent) {
4592 rdev = md_import_device(dev, mddev->major_version,
4593 mddev->minor_version);
4594 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
4595 struct md_rdev *rdev0
4596 = list_entry(mddev->disks.next,
4597 struct md_rdev, same_set);
4598 err = super_types[mddev->major_version]
4599 .load_super(rdev, rdev0, mddev->minor_version);
4600 if (err < 0)
4601 goto out;
4602 }
4603 } else if (mddev->external)
4604 rdev = md_import_device(dev, -2, -1);
4605 else
4606 rdev = md_import_device(dev, -1, -1);
4607
4608 if (IS_ERR(rdev)) {
4609 mddev_unlock(mddev);
4610 return PTR_ERR(rdev);
4611 }
4612 err = bind_rdev_to_array(rdev, mddev);
4613 out:
4614 if (err)
4615 export_rdev(rdev, mddev);
4616 mddev_unlock(mddev);
4617 if (!err)
4618 md_new_event();
4619 return err ? err : len;
4620 }
4621
4622 static struct md_sysfs_entry md_new_device =
4623 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
4624
4625 static ssize_t
bitmap_store(struct mddev * mddev,const char * buf,size_t len)4626 bitmap_store(struct mddev *mddev, const char *buf, size_t len)
4627 {
4628 char *end;
4629 unsigned long chunk, end_chunk;
4630 int err;
4631
4632 err = mddev_lock(mddev);
4633 if (err)
4634 return err;
4635 if (!mddev->bitmap)
4636 goto out;
4637 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
4638 while (*buf) {
4639 chunk = end_chunk = simple_strtoul(buf, &end, 0);
4640 if (buf == end) break;
4641 if (*end == '-') { /* range */
4642 buf = end + 1;
4643 end_chunk = simple_strtoul(buf, &end, 0);
4644 if (buf == end) break;
4645 }
4646 if (*end && !isspace(*end)) break;
4647 md_bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
4648 buf = skip_spaces(end);
4649 }
4650 md_bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
4651 out:
4652 mddev_unlock(mddev);
4653 return len;
4654 }
4655
4656 static struct md_sysfs_entry md_bitmap =
4657 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
4658
4659 static ssize_t
size_show(struct mddev * mddev,char * page)4660 size_show(struct mddev *mddev, char *page)
4661 {
4662 return sprintf(page, "%llu\n",
4663 (unsigned long long)mddev->dev_sectors / 2);
4664 }
4665
4666 static int update_size(struct mddev *mddev, sector_t num_sectors);
4667
4668 static ssize_t
size_store(struct mddev * mddev,const char * buf,size_t len)4669 size_store(struct mddev *mddev, const char *buf, size_t len)
4670 {
4671 /* If array is inactive, we can reduce the component size, but
4672 * not increase it (except from 0).
4673 * If array is active, we can try an on-line resize
4674 */
4675 sector_t sectors;
4676 int err = strict_blocks_to_sectors(buf, §ors);
4677
4678 if (err < 0)
4679 return err;
4680 err = mddev_lock(mddev);
4681 if (err)
4682 return err;
4683 if (mddev->pers) {
4684 err = update_size(mddev, sectors);
4685 if (err == 0)
4686 md_update_sb(mddev, 1);
4687 } else {
4688 if (mddev->dev_sectors == 0 ||
4689 mddev->dev_sectors > sectors)
4690 mddev->dev_sectors = sectors;
4691 else
4692 err = -ENOSPC;
4693 }
4694 mddev_unlock(mddev);
4695 return err ? err : len;
4696 }
4697
4698 static struct md_sysfs_entry md_size =
4699 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
4700
4701 /* Metadata version.
4702 * This is one of
4703 * 'none' for arrays with no metadata (good luck...)
4704 * 'external' for arrays with externally managed metadata,
4705 * or N.M for internally known formats
4706 */
4707 static ssize_t
metadata_show(struct mddev * mddev,char * page)4708 metadata_show(struct mddev *mddev, char *page)
4709 {
4710 if (mddev->persistent)
4711 return sprintf(page, "%d.%d\n",
4712 mddev->major_version, mddev->minor_version);
4713 else if (mddev->external)
4714 return sprintf(page, "external:%s\n", mddev->metadata_type);
4715 else
4716 return sprintf(page, "none\n");
4717 }
4718
4719 static ssize_t
metadata_store(struct mddev * mddev,const char * buf,size_t len)4720 metadata_store(struct mddev *mddev, const char *buf, size_t len)
4721 {
4722 int major, minor;
4723 char *e;
4724 int err;
4725 /* Changing the details of 'external' metadata is
4726 * always permitted. Otherwise there must be
4727 * no devices attached to the array.
4728 */
4729
4730 err = mddev_lock(mddev);
4731 if (err)
4732 return err;
4733 err = -EBUSY;
4734 if (mddev->external && strncmp(buf, "external:", 9) == 0)
4735 ;
4736 else if (!list_empty(&mddev->disks))
4737 goto out_unlock;
4738
4739 err = 0;
4740 if (cmd_match(buf, "none")) {
4741 mddev->persistent = 0;
4742 mddev->external = 0;
4743 mddev->major_version = 0;
4744 mddev->minor_version = 90;
4745 goto out_unlock;
4746 }
4747 if (strncmp(buf, "external:", 9) == 0) {
4748 size_t namelen = len-9;
4749 if (namelen >= sizeof(mddev->metadata_type))
4750 namelen = sizeof(mddev->metadata_type)-1;
4751 strncpy(mddev->metadata_type, buf+9, namelen);
4752 mddev->metadata_type[namelen] = 0;
4753 if (namelen && mddev->metadata_type[namelen-1] == '\n')
4754 mddev->metadata_type[--namelen] = 0;
4755 mddev->persistent = 0;
4756 mddev->external = 1;
4757 mddev->major_version = 0;
4758 mddev->minor_version = 90;
4759 goto out_unlock;
4760 }
4761 major = simple_strtoul(buf, &e, 10);
4762 err = -EINVAL;
4763 if (e==buf || *e != '.')
4764 goto out_unlock;
4765 buf = e+1;
4766 minor = simple_strtoul(buf, &e, 10);
4767 if (e==buf || (*e && *e != '\n') )
4768 goto out_unlock;
4769 err = -ENOENT;
4770 if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
4771 goto out_unlock;
4772 mddev->major_version = major;
4773 mddev->minor_version = minor;
4774 mddev->persistent = 1;
4775 mddev->external = 0;
4776 err = 0;
4777 out_unlock:
4778 mddev_unlock(mddev);
4779 return err ?: len;
4780 }
4781
4782 static struct md_sysfs_entry md_metadata =
4783 __ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
4784
4785 static ssize_t
action_show(struct mddev * mddev,char * page)4786 action_show(struct mddev *mddev, char *page)
4787 {
4788 char *type = "idle";
4789 unsigned long recovery = mddev->recovery;
4790 if (test_bit(MD_RECOVERY_FROZEN, &recovery))
4791 type = "frozen";
4792 else if (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
4793 (md_is_rdwr(mddev) && test_bit(MD_RECOVERY_NEEDED, &recovery))) {
4794 if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
4795 type = "reshape";
4796 else if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
4797 if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
4798 type = "resync";
4799 else if (test_bit(MD_RECOVERY_CHECK, &recovery))
4800 type = "check";
4801 else
4802 type = "repair";
4803 } else if (test_bit(MD_RECOVERY_RECOVER, &recovery))
4804 type = "recover";
4805 else if (mddev->reshape_position != MaxSector)
4806 type = "reshape";
4807 }
4808 return sprintf(page, "%s\n", type);
4809 }
4810
stop_sync_thread(struct mddev * mddev)4811 static void stop_sync_thread(struct mddev *mddev)
4812 {
4813 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4814 return;
4815
4816 if (mddev_lock(mddev))
4817 return;
4818
4819 /*
4820 * Check again in case MD_RECOVERY_RUNNING is cleared before lock is
4821 * held.
4822 */
4823 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
4824 mddev_unlock(mddev);
4825 return;
4826 }
4827
4828 if (work_pending(&mddev->del_work))
4829 flush_workqueue(md_misc_wq);
4830
4831 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4832 /*
4833 * Thread might be blocked waiting for metadata update which will now
4834 * never happen
4835 */
4836 md_wakeup_thread_directly(mddev->sync_thread);
4837
4838 mddev_unlock(mddev);
4839 }
4840
idle_sync_thread(struct mddev * mddev)4841 static void idle_sync_thread(struct mddev *mddev)
4842 {
4843 int sync_seq = atomic_read(&mddev->sync_seq);
4844
4845 mutex_lock(&mddev->sync_mutex);
4846 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4847 stop_sync_thread(mddev);
4848
4849 wait_event(resync_wait, sync_seq != atomic_read(&mddev->sync_seq) ||
4850 !test_bit(MD_RECOVERY_RUNNING, &mddev->recovery));
4851
4852 mutex_unlock(&mddev->sync_mutex);
4853 }
4854
frozen_sync_thread(struct mddev * mddev)4855 static void frozen_sync_thread(struct mddev *mddev)
4856 {
4857 mutex_lock(&mddev->sync_mutex);
4858 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4859 stop_sync_thread(mddev);
4860
4861 wait_event(resync_wait, mddev->sync_thread == NULL &&
4862 !test_bit(MD_RECOVERY_RUNNING, &mddev->recovery));
4863
4864 mutex_unlock(&mddev->sync_mutex);
4865 }
4866
4867 static ssize_t
action_store(struct mddev * mddev,const char * page,size_t len)4868 action_store(struct mddev *mddev, const char *page, size_t len)
4869 {
4870 if (!mddev->pers || !mddev->pers->sync_request)
4871 return -EINVAL;
4872
4873
4874 if (cmd_match(page, "idle"))
4875 idle_sync_thread(mddev);
4876 else if (cmd_match(page, "frozen"))
4877 frozen_sync_thread(mddev);
4878 else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4879 return -EBUSY;
4880 else if (cmd_match(page, "resync"))
4881 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4882 else if (cmd_match(page, "recover")) {
4883 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4884 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
4885 } else if (cmd_match(page, "reshape")) {
4886 int err;
4887 if (mddev->pers->start_reshape == NULL)
4888 return -EINVAL;
4889 err = mddev_lock(mddev);
4890 if (!err) {
4891 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
4892 err = -EBUSY;
4893 } else if (mddev->reshape_position == MaxSector ||
4894 mddev->pers->check_reshape == NULL ||
4895 mddev->pers->check_reshape(mddev)) {
4896 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4897 err = mddev->pers->start_reshape(mddev);
4898 } else {
4899 /*
4900 * If reshape is still in progress, and
4901 * md_check_recovery() can continue to reshape,
4902 * don't restart reshape because data can be
4903 * corrupted for raid456.
4904 */
4905 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4906 }
4907 mddev_unlock(mddev);
4908 }
4909 if (err)
4910 return err;
4911 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
4912 } else {
4913 if (cmd_match(page, "check"))
4914 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4915 else if (!cmd_match(page, "repair"))
4916 return -EINVAL;
4917 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4918 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
4919 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4920 }
4921 if (mddev->ro == MD_AUTO_READ) {
4922 /* A write to sync_action is enough to justify
4923 * canceling read-auto mode
4924 */
4925 mddev->ro = MD_RDWR;
4926 md_wakeup_thread(mddev->sync_thread);
4927 }
4928 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4929 md_wakeup_thread(mddev->thread);
4930 sysfs_notify_dirent_safe(mddev->sysfs_action);
4931 return len;
4932 }
4933
4934 static struct md_sysfs_entry md_scan_mode =
4935 __ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
4936
4937 static ssize_t
last_sync_action_show(struct mddev * mddev,char * page)4938 last_sync_action_show(struct mddev *mddev, char *page)
4939 {
4940 return sprintf(page, "%s\n", mddev->last_sync_action);
4941 }
4942
4943 static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action);
4944
4945 static ssize_t
mismatch_cnt_show(struct mddev * mddev,char * page)4946 mismatch_cnt_show(struct mddev *mddev, char *page)
4947 {
4948 return sprintf(page, "%llu\n",
4949 (unsigned long long)
4950 atomic64_read(&mddev->resync_mismatches));
4951 }
4952
4953 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
4954
4955 static ssize_t
sync_min_show(struct mddev * mddev,char * page)4956 sync_min_show(struct mddev *mddev, char *page)
4957 {
4958 return sprintf(page, "%d (%s)\n", speed_min(mddev),
4959 mddev->sync_speed_min ? "local": "system");
4960 }
4961
4962 static ssize_t
sync_min_store(struct mddev * mddev,const char * buf,size_t len)4963 sync_min_store(struct mddev *mddev, const char *buf, size_t len)
4964 {
4965 unsigned int min;
4966 int rv;
4967
4968 if (strncmp(buf, "system", 6)==0) {
4969 min = 0;
4970 } else {
4971 rv = kstrtouint(buf, 10, &min);
4972 if (rv < 0)
4973 return rv;
4974 if (min == 0)
4975 return -EINVAL;
4976 }
4977 mddev->sync_speed_min = min;
4978 return len;
4979 }
4980
4981 static struct md_sysfs_entry md_sync_min =
4982 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
4983
4984 static ssize_t
sync_max_show(struct mddev * mddev,char * page)4985 sync_max_show(struct mddev *mddev, char *page)
4986 {
4987 return sprintf(page, "%d (%s)\n", speed_max(mddev),
4988 mddev->sync_speed_max ? "local": "system");
4989 }
4990
4991 static ssize_t
sync_max_store(struct mddev * mddev,const char * buf,size_t len)4992 sync_max_store(struct mddev *mddev, const char *buf, size_t len)
4993 {
4994 unsigned int max;
4995 int rv;
4996
4997 if (strncmp(buf, "system", 6)==0) {
4998 max = 0;
4999 } else {
5000 rv = kstrtouint(buf, 10, &max);
5001 if (rv < 0)
5002 return rv;
5003 if (max == 0)
5004 return -EINVAL;
5005 }
5006 mddev->sync_speed_max = max;
5007 return len;
5008 }
5009
5010 static struct md_sysfs_entry md_sync_max =
5011 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
5012
5013 static ssize_t
degraded_show(struct mddev * mddev,char * page)5014 degraded_show(struct mddev *mddev, char *page)
5015 {
5016 return sprintf(page, "%d\n", mddev->degraded);
5017 }
5018 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
5019
5020 static ssize_t
sync_force_parallel_show(struct mddev * mddev,char * page)5021 sync_force_parallel_show(struct mddev *mddev, char *page)
5022 {
5023 return sprintf(page, "%d\n", mddev->parallel_resync);
5024 }
5025
5026 static ssize_t
sync_force_parallel_store(struct mddev * mddev,const char * buf,size_t len)5027 sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len)
5028 {
5029 long n;
5030
5031 if (kstrtol(buf, 10, &n))
5032 return -EINVAL;
5033
5034 if (n != 0 && n != 1)
5035 return -EINVAL;
5036
5037 mddev->parallel_resync = n;
5038
5039 if (mddev->sync_thread)
5040 wake_up(&resync_wait);
5041
5042 return len;
5043 }
5044
5045 /* force parallel resync, even with shared block devices */
5046 static struct md_sysfs_entry md_sync_force_parallel =
5047 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
5048 sync_force_parallel_show, sync_force_parallel_store);
5049
5050 static ssize_t
sync_speed_show(struct mddev * mddev,char * page)5051 sync_speed_show(struct mddev *mddev, char *page)
5052 {
5053 unsigned long resync, dt, db;
5054 if (mddev->curr_resync == MD_RESYNC_NONE)
5055 return sprintf(page, "none\n");
5056 resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
5057 dt = (jiffies - mddev->resync_mark) / HZ;
5058 if (!dt) dt++;
5059 db = resync - mddev->resync_mark_cnt;
5060 return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
5061 }
5062
5063 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
5064
5065 static ssize_t
sync_completed_show(struct mddev * mddev,char * page)5066 sync_completed_show(struct mddev *mddev, char *page)
5067 {
5068 unsigned long long max_sectors, resync;
5069
5070 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5071 return sprintf(page, "none\n");
5072
5073 if (mddev->curr_resync == MD_RESYNC_YIELDED ||
5074 mddev->curr_resync == MD_RESYNC_DELAYED)
5075 return sprintf(page, "delayed\n");
5076
5077 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
5078 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
5079 max_sectors = mddev->resync_max_sectors;
5080 else
5081 max_sectors = mddev->dev_sectors;
5082
5083 resync = mddev->curr_resync_completed;
5084 return sprintf(page, "%llu / %llu\n", resync, max_sectors);
5085 }
5086
5087 static struct md_sysfs_entry md_sync_completed =
5088 __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL);
5089
5090 static ssize_t
min_sync_show(struct mddev * mddev,char * page)5091 min_sync_show(struct mddev *mddev, char *page)
5092 {
5093 return sprintf(page, "%llu\n",
5094 (unsigned long long)mddev->resync_min);
5095 }
5096 static ssize_t
min_sync_store(struct mddev * mddev,const char * buf,size_t len)5097 min_sync_store(struct mddev *mddev, const char *buf, size_t len)
5098 {
5099 unsigned long long min;
5100 int err;
5101
5102 if (kstrtoull(buf, 10, &min))
5103 return -EINVAL;
5104
5105 spin_lock(&mddev->lock);
5106 err = -EINVAL;
5107 if (min > mddev->resync_max)
5108 goto out_unlock;
5109
5110 err = -EBUSY;
5111 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5112 goto out_unlock;
5113
5114 /* Round down to multiple of 4K for safety */
5115 mddev->resync_min = round_down(min, 8);
5116 err = 0;
5117
5118 out_unlock:
5119 spin_unlock(&mddev->lock);
5120 return err ?: len;
5121 }
5122
5123 static struct md_sysfs_entry md_min_sync =
5124 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
5125
5126 static ssize_t
max_sync_show(struct mddev * mddev,char * page)5127 max_sync_show(struct mddev *mddev, char *page)
5128 {
5129 if (mddev->resync_max == MaxSector)
5130 return sprintf(page, "max\n");
5131 else
5132 return sprintf(page, "%llu\n",
5133 (unsigned long long)mddev->resync_max);
5134 }
5135 static ssize_t
max_sync_store(struct mddev * mddev,const char * buf,size_t len)5136 max_sync_store(struct mddev *mddev, const char *buf, size_t len)
5137 {
5138 int err;
5139 spin_lock(&mddev->lock);
5140 if (strncmp(buf, "max", 3) == 0)
5141 mddev->resync_max = MaxSector;
5142 else {
5143 unsigned long long max;
5144 int chunk;
5145
5146 err = -EINVAL;
5147 if (kstrtoull(buf, 10, &max))
5148 goto out_unlock;
5149 if (max < mddev->resync_min)
5150 goto out_unlock;
5151
5152 err = -EBUSY;
5153 if (max < mddev->resync_max && md_is_rdwr(mddev) &&
5154 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5155 goto out_unlock;
5156
5157 /* Must be a multiple of chunk_size */
5158 chunk = mddev->chunk_sectors;
5159 if (chunk) {
5160 sector_t temp = max;
5161
5162 err = -EINVAL;
5163 if (sector_div(temp, chunk))
5164 goto out_unlock;
5165 }
5166 mddev->resync_max = max;
5167 }
5168 wake_up(&mddev->recovery_wait);
5169 err = 0;
5170 out_unlock:
5171 spin_unlock(&mddev->lock);
5172 return err ?: len;
5173 }
5174
5175 static struct md_sysfs_entry md_max_sync =
5176 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
5177
5178 static ssize_t
suspend_lo_show(struct mddev * mddev,char * page)5179 suspend_lo_show(struct mddev *mddev, char *page)
5180 {
5181 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
5182 }
5183
5184 static ssize_t
suspend_lo_store(struct mddev * mddev,const char * buf,size_t len)5185 suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
5186 {
5187 unsigned long long new;
5188 int err;
5189
5190 err = kstrtoull(buf, 10, &new);
5191 if (err < 0)
5192 return err;
5193 if (new != (sector_t)new)
5194 return -EINVAL;
5195
5196 err = mddev_lock(mddev);
5197 if (err)
5198 return err;
5199 err = -EINVAL;
5200 if (mddev->pers == NULL ||
5201 mddev->pers->quiesce == NULL)
5202 goto unlock;
5203 mddev_suspend(mddev);
5204 mddev->suspend_lo = new;
5205 mddev_resume(mddev);
5206
5207 err = 0;
5208 unlock:
5209 mddev_unlock(mddev);
5210 return err ?: len;
5211 }
5212 static struct md_sysfs_entry md_suspend_lo =
5213 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
5214
5215 static ssize_t
suspend_hi_show(struct mddev * mddev,char * page)5216 suspend_hi_show(struct mddev *mddev, char *page)
5217 {
5218 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
5219 }
5220
5221 static ssize_t
suspend_hi_store(struct mddev * mddev,const char * buf,size_t len)5222 suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
5223 {
5224 unsigned long long new;
5225 int err;
5226
5227 err = kstrtoull(buf, 10, &new);
5228 if (err < 0)
5229 return err;
5230 if (new != (sector_t)new)
5231 return -EINVAL;
5232
5233 err = mddev_lock(mddev);
5234 if (err)
5235 return err;
5236 err = -EINVAL;
5237 if (mddev->pers == NULL)
5238 goto unlock;
5239
5240 mddev_suspend(mddev);
5241 mddev->suspend_hi = new;
5242 mddev_resume(mddev);
5243
5244 err = 0;
5245 unlock:
5246 mddev_unlock(mddev);
5247 return err ?: len;
5248 }
5249 static struct md_sysfs_entry md_suspend_hi =
5250 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
5251
5252 static ssize_t
reshape_position_show(struct mddev * mddev,char * page)5253 reshape_position_show(struct mddev *mddev, char *page)
5254 {
5255 if (mddev->reshape_position != MaxSector)
5256 return sprintf(page, "%llu\n",
5257 (unsigned long long)mddev->reshape_position);
5258 strcpy(page, "none\n");
5259 return 5;
5260 }
5261
5262 static ssize_t
reshape_position_store(struct mddev * mddev,const char * buf,size_t len)5263 reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
5264 {
5265 struct md_rdev *rdev;
5266 unsigned long long new;
5267 int err;
5268
5269 err = kstrtoull(buf, 10, &new);
5270 if (err < 0)
5271 return err;
5272 if (new != (sector_t)new)
5273 return -EINVAL;
5274 err = mddev_lock(mddev);
5275 if (err)
5276 return err;
5277 err = -EBUSY;
5278 if (mddev->pers)
5279 goto unlock;
5280 mddev->reshape_position = new;
5281 mddev->delta_disks = 0;
5282 mddev->reshape_backwards = 0;
5283 mddev->new_level = mddev->level;
5284 mddev->new_layout = mddev->layout;
5285 mddev->new_chunk_sectors = mddev->chunk_sectors;
5286 rdev_for_each(rdev, mddev)
5287 rdev->new_data_offset = rdev->data_offset;
5288 err = 0;
5289 unlock:
5290 mddev_unlock(mddev);
5291 return err ?: len;
5292 }
5293
5294 static struct md_sysfs_entry md_reshape_position =
5295 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
5296 reshape_position_store);
5297
5298 static ssize_t
reshape_direction_show(struct mddev * mddev,char * page)5299 reshape_direction_show(struct mddev *mddev, char *page)
5300 {
5301 return sprintf(page, "%s\n",
5302 mddev->reshape_backwards ? "backwards" : "forwards");
5303 }
5304
5305 static ssize_t
reshape_direction_store(struct mddev * mddev,const char * buf,size_t len)5306 reshape_direction_store(struct mddev *mddev, const char *buf, size_t len)
5307 {
5308 int backwards = 0;
5309 int err;
5310
5311 if (cmd_match(buf, "forwards"))
5312 backwards = 0;
5313 else if (cmd_match(buf, "backwards"))
5314 backwards = 1;
5315 else
5316 return -EINVAL;
5317 if (mddev->reshape_backwards == backwards)
5318 return len;
5319
5320 err = mddev_lock(mddev);
5321 if (err)
5322 return err;
5323 /* check if we are allowed to change */
5324 if (mddev->delta_disks)
5325 err = -EBUSY;
5326 else if (mddev->persistent &&
5327 mddev->major_version == 0)
5328 err = -EINVAL;
5329 else
5330 mddev->reshape_backwards = backwards;
5331 mddev_unlock(mddev);
5332 return err ?: len;
5333 }
5334
5335 static struct md_sysfs_entry md_reshape_direction =
5336 __ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show,
5337 reshape_direction_store);
5338
5339 static ssize_t
array_size_show(struct mddev * mddev,char * page)5340 array_size_show(struct mddev *mddev, char *page)
5341 {
5342 if (mddev->external_size)
5343 return sprintf(page, "%llu\n",
5344 (unsigned long long)mddev->array_sectors/2);
5345 else
5346 return sprintf(page, "default\n");
5347 }
5348
5349 static ssize_t
array_size_store(struct mddev * mddev,const char * buf,size_t len)5350 array_size_store(struct mddev *mddev, const char *buf, size_t len)
5351 {
5352 sector_t sectors;
5353 int err;
5354
5355 err = mddev_lock(mddev);
5356 if (err)
5357 return err;
5358
5359 /* cluster raid doesn't support change array_sectors */
5360 if (mddev_is_clustered(mddev)) {
5361 mddev_unlock(mddev);
5362 return -EINVAL;
5363 }
5364
5365 if (strncmp(buf, "default", 7) == 0) {
5366 if (mddev->pers)
5367 sectors = mddev->pers->size(mddev, 0, 0);
5368 else
5369 sectors = mddev->array_sectors;
5370
5371 mddev->external_size = 0;
5372 } else {
5373 if (strict_blocks_to_sectors(buf, §ors) < 0)
5374 err = -EINVAL;
5375 else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
5376 err = -E2BIG;
5377 else
5378 mddev->external_size = 1;
5379 }
5380
5381 if (!err) {
5382 mddev->array_sectors = sectors;
5383 if (mddev->pers)
5384 set_capacity_and_notify(mddev->gendisk,
5385 mddev->array_sectors);
5386 }
5387 mddev_unlock(mddev);
5388 return err ?: len;
5389 }
5390
5391 static struct md_sysfs_entry md_array_size =
5392 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
5393 array_size_store);
5394
5395 static ssize_t
consistency_policy_show(struct mddev * mddev,char * page)5396 consistency_policy_show(struct mddev *mddev, char *page)
5397 {
5398 int ret;
5399
5400 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
5401 ret = sprintf(page, "journal\n");
5402 } else if (test_bit(MD_HAS_PPL, &mddev->flags)) {
5403 ret = sprintf(page, "ppl\n");
5404 } else if (mddev->bitmap) {
5405 ret = sprintf(page, "bitmap\n");
5406 } else if (mddev->pers) {
5407 if (mddev->pers->sync_request)
5408 ret = sprintf(page, "resync\n");
5409 else
5410 ret = sprintf(page, "none\n");
5411 } else {
5412 ret = sprintf(page, "unknown\n");
5413 }
5414
5415 return ret;
5416 }
5417
5418 static ssize_t
consistency_policy_store(struct mddev * mddev,const char * buf,size_t len)5419 consistency_policy_store(struct mddev *mddev, const char *buf, size_t len)
5420 {
5421 int err = 0;
5422
5423 if (mddev->pers) {
5424 if (mddev->pers->change_consistency_policy)
5425 err = mddev->pers->change_consistency_policy(mddev, buf);
5426 else
5427 err = -EBUSY;
5428 } else if (mddev->external && strncmp(buf, "ppl", 3) == 0) {
5429 set_bit(MD_HAS_PPL, &mddev->flags);
5430 } else {
5431 err = -EINVAL;
5432 }
5433
5434 return err ? err : len;
5435 }
5436
5437 static struct md_sysfs_entry md_consistency_policy =
5438 __ATTR(consistency_policy, S_IRUGO | S_IWUSR, consistency_policy_show,
5439 consistency_policy_store);
5440
fail_last_dev_show(struct mddev * mddev,char * page)5441 static ssize_t fail_last_dev_show(struct mddev *mddev, char *page)
5442 {
5443 return sprintf(page, "%d\n", mddev->fail_last_dev);
5444 }
5445
5446 /*
5447 * Setting fail_last_dev to true to allow last device to be forcibly removed
5448 * from RAID1/RAID10.
5449 */
5450 static ssize_t
fail_last_dev_store(struct mddev * mddev,const char * buf,size_t len)5451 fail_last_dev_store(struct mddev *mddev, const char *buf, size_t len)
5452 {
5453 int ret;
5454 bool value;
5455
5456 ret = kstrtobool(buf, &value);
5457 if (ret)
5458 return ret;
5459
5460 if (value != mddev->fail_last_dev)
5461 mddev->fail_last_dev = value;
5462
5463 return len;
5464 }
5465 static struct md_sysfs_entry md_fail_last_dev =
5466 __ATTR(fail_last_dev, S_IRUGO | S_IWUSR, fail_last_dev_show,
5467 fail_last_dev_store);
5468
serialize_policy_show(struct mddev * mddev,char * page)5469 static ssize_t serialize_policy_show(struct mddev *mddev, char *page)
5470 {
5471 if (mddev->pers == NULL || (mddev->pers->level != 1))
5472 return sprintf(page, "n/a\n");
5473 else
5474 return sprintf(page, "%d\n", mddev->serialize_policy);
5475 }
5476
5477 /*
5478 * Setting serialize_policy to true to enforce write IO is not reordered
5479 * for raid1.
5480 */
5481 static ssize_t
serialize_policy_store(struct mddev * mddev,const char * buf,size_t len)5482 serialize_policy_store(struct mddev *mddev, const char *buf, size_t len)
5483 {
5484 int err;
5485 bool value;
5486
5487 err = kstrtobool(buf, &value);
5488 if (err)
5489 return err;
5490
5491 if (value == mddev->serialize_policy)
5492 return len;
5493
5494 err = mddev_lock(mddev);
5495 if (err)
5496 return err;
5497 if (mddev->pers == NULL || (mddev->pers->level != 1)) {
5498 pr_err("md: serialize_policy is only effective for raid1\n");
5499 err = -EINVAL;
5500 goto unlock;
5501 }
5502
5503 mddev_suspend(mddev);
5504 if (value)
5505 mddev_create_serial_pool(mddev, NULL, true);
5506 else
5507 mddev_destroy_serial_pool(mddev, NULL, true);
5508 mddev->serialize_policy = value;
5509 mddev_resume(mddev);
5510 unlock:
5511 mddev_unlock(mddev);
5512 return err ?: len;
5513 }
5514
5515 static struct md_sysfs_entry md_serialize_policy =
5516 __ATTR(serialize_policy, S_IRUGO | S_IWUSR, serialize_policy_show,
5517 serialize_policy_store);
5518
5519
5520 static struct attribute *md_default_attrs[] = {
5521 &md_level.attr,
5522 &md_layout.attr,
5523 &md_raid_disks.attr,
5524 &md_uuid.attr,
5525 &md_chunk_size.attr,
5526 &md_size.attr,
5527 &md_resync_start.attr,
5528 &md_metadata.attr,
5529 &md_new_device.attr,
5530 &md_safe_delay.attr,
5531 &md_array_state.attr,
5532 &md_reshape_position.attr,
5533 &md_reshape_direction.attr,
5534 &md_array_size.attr,
5535 &max_corr_read_errors.attr,
5536 &md_consistency_policy.attr,
5537 &md_fail_last_dev.attr,
5538 &md_serialize_policy.attr,
5539 NULL,
5540 };
5541
5542 static const struct attribute_group md_default_group = {
5543 .attrs = md_default_attrs,
5544 };
5545
5546 static struct attribute *md_redundancy_attrs[] = {
5547 &md_scan_mode.attr,
5548 &md_last_scan_mode.attr,
5549 &md_mismatches.attr,
5550 &md_sync_min.attr,
5551 &md_sync_max.attr,
5552 &md_sync_speed.attr,
5553 &md_sync_force_parallel.attr,
5554 &md_sync_completed.attr,
5555 &md_min_sync.attr,
5556 &md_max_sync.attr,
5557 &md_suspend_lo.attr,
5558 &md_suspend_hi.attr,
5559 &md_bitmap.attr,
5560 &md_degraded.attr,
5561 NULL,
5562 };
5563 static const struct attribute_group md_redundancy_group = {
5564 .name = NULL,
5565 .attrs = md_redundancy_attrs,
5566 };
5567
5568 static const struct attribute_group *md_attr_groups[] = {
5569 &md_default_group,
5570 &md_bitmap_group,
5571 NULL,
5572 };
5573
5574 static ssize_t
md_attr_show(struct kobject * kobj,struct attribute * attr,char * page)5575 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
5576 {
5577 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
5578 struct mddev *mddev = container_of(kobj, struct mddev, kobj);
5579 ssize_t rv;
5580
5581 if (!entry->show)
5582 return -EIO;
5583 spin_lock(&all_mddevs_lock);
5584 if (!mddev_get(mddev)) {
5585 spin_unlock(&all_mddevs_lock);
5586 return -EBUSY;
5587 }
5588 spin_unlock(&all_mddevs_lock);
5589
5590 rv = entry->show(mddev, page);
5591 mddev_put(mddev);
5592 return rv;
5593 }
5594
5595 static ssize_t
md_attr_store(struct kobject * kobj,struct attribute * attr,const char * page,size_t length)5596 md_attr_store(struct kobject *kobj, struct attribute *attr,
5597 const char *page, size_t length)
5598 {
5599 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
5600 struct mddev *mddev = container_of(kobj, struct mddev, kobj);
5601 ssize_t rv;
5602
5603 if (!entry->store)
5604 return -EIO;
5605 if (!capable(CAP_SYS_ADMIN))
5606 return -EACCES;
5607 spin_lock(&all_mddevs_lock);
5608 if (!mddev_get(mddev)) {
5609 spin_unlock(&all_mddevs_lock);
5610 return -EBUSY;
5611 }
5612 spin_unlock(&all_mddevs_lock);
5613 rv = entry->store(mddev, page, length);
5614 mddev_put(mddev);
5615 return rv;
5616 }
5617
md_kobj_release(struct kobject * ko)5618 static void md_kobj_release(struct kobject *ko)
5619 {
5620 struct mddev *mddev = container_of(ko, struct mddev, kobj);
5621
5622 if (mddev->sysfs_state)
5623 sysfs_put(mddev->sysfs_state);
5624 if (mddev->sysfs_level)
5625 sysfs_put(mddev->sysfs_level);
5626
5627 del_gendisk(mddev->gendisk);
5628 put_disk(mddev->gendisk);
5629 }
5630
5631 static const struct sysfs_ops md_sysfs_ops = {
5632 .show = md_attr_show,
5633 .store = md_attr_store,
5634 };
5635 static const struct kobj_type md_ktype = {
5636 .release = md_kobj_release,
5637 .sysfs_ops = &md_sysfs_ops,
5638 .default_groups = md_attr_groups,
5639 };
5640
5641 int mdp_major = 0;
5642
mddev_delayed_delete(struct work_struct * ws)5643 static void mddev_delayed_delete(struct work_struct *ws)
5644 {
5645 struct mddev *mddev = container_of(ws, struct mddev, del_work);
5646
5647 kobject_put(&mddev->kobj);
5648 }
5649
no_op(struct percpu_ref * r)5650 static void no_op(struct percpu_ref *r) {}
5651
mddev_init_writes_pending(struct mddev * mddev)5652 int mddev_init_writes_pending(struct mddev *mddev)
5653 {
5654 if (mddev->writes_pending.percpu_count_ptr)
5655 return 0;
5656 if (percpu_ref_init(&mddev->writes_pending, no_op,
5657 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL) < 0)
5658 return -ENOMEM;
5659 /* We want to start with the refcount at zero */
5660 percpu_ref_put(&mddev->writes_pending);
5661 return 0;
5662 }
5663 EXPORT_SYMBOL_GPL(mddev_init_writes_pending);
5664
md_alloc(dev_t dev,char * name)5665 struct mddev *md_alloc(dev_t dev, char *name)
5666 {
5667 /*
5668 * If dev is zero, name is the name of a device to allocate with
5669 * an arbitrary minor number. It will be "md_???"
5670 * If dev is non-zero it must be a device number with a MAJOR of
5671 * MD_MAJOR or mdp_major. In this case, if "name" is NULL, then
5672 * the device is being created by opening a node in /dev.
5673 * If "name" is not NULL, the device is being created by
5674 * writing to /sys/module/md_mod/parameters/new_array.
5675 */
5676 static DEFINE_MUTEX(disks_mutex);
5677 struct mddev *mddev;
5678 struct gendisk *disk;
5679 int partitioned;
5680 int shift;
5681 int unit;
5682 int error ;
5683
5684 /*
5685 * Wait for any previous instance of this device to be completely
5686 * removed (mddev_delayed_delete).
5687 */
5688 flush_workqueue(md_misc_wq);
5689
5690 mutex_lock(&disks_mutex);
5691 mddev = mddev_alloc(dev);
5692 if (IS_ERR(mddev)) {
5693 error = PTR_ERR(mddev);
5694 goto out_unlock;
5695 }
5696
5697 partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
5698 shift = partitioned ? MdpMinorShift : 0;
5699 unit = MINOR(mddev->unit) >> shift;
5700
5701 if (name && !dev) {
5702 /* Need to ensure that 'name' is not a duplicate.
5703 */
5704 struct mddev *mddev2;
5705 spin_lock(&all_mddevs_lock);
5706
5707 list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
5708 if (mddev2->gendisk &&
5709 strcmp(mddev2->gendisk->disk_name, name) == 0) {
5710 spin_unlock(&all_mddevs_lock);
5711 error = -EEXIST;
5712 goto out_free_mddev;
5713 }
5714 spin_unlock(&all_mddevs_lock);
5715 }
5716 if (name && dev)
5717 /*
5718 * Creating /dev/mdNNN via "newarray", so adjust hold_active.
5719 */
5720 mddev->hold_active = UNTIL_STOP;
5721
5722 error = -ENOMEM;
5723 disk = blk_alloc_disk(NUMA_NO_NODE);
5724 if (!disk)
5725 goto out_free_mddev;
5726
5727 disk->major = MAJOR(mddev->unit);
5728 disk->first_minor = unit << shift;
5729 disk->minors = 1 << shift;
5730 if (name)
5731 strcpy(disk->disk_name, name);
5732 else if (partitioned)
5733 sprintf(disk->disk_name, "md_d%d", unit);
5734 else
5735 sprintf(disk->disk_name, "md%d", unit);
5736 disk->fops = &md_fops;
5737 disk->private_data = mddev;
5738
5739 mddev->queue = disk->queue;
5740 blk_set_stacking_limits(&mddev->queue->limits);
5741 blk_queue_write_cache(mddev->queue, true, true);
5742 disk->events |= DISK_EVENT_MEDIA_CHANGE;
5743 mddev->gendisk = disk;
5744 error = add_disk(disk);
5745 if (error)
5746 goto out_put_disk;
5747
5748 kobject_init(&mddev->kobj, &md_ktype);
5749 error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md");
5750 if (error) {
5751 /*
5752 * The disk is already live at this point. Clear the hold flag
5753 * and let mddev_put take care of the deletion, as it isn't any
5754 * different from a normal close on last release now.
5755 */
5756 mddev->hold_active = 0;
5757 mutex_unlock(&disks_mutex);
5758 mddev_put(mddev);
5759 return ERR_PTR(error);
5760 }
5761
5762 kobject_uevent(&mddev->kobj, KOBJ_ADD);
5763 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
5764 mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level");
5765 mutex_unlock(&disks_mutex);
5766 return mddev;
5767
5768 out_put_disk:
5769 put_disk(disk);
5770 out_free_mddev:
5771 mddev_free(mddev);
5772 out_unlock:
5773 mutex_unlock(&disks_mutex);
5774 return ERR_PTR(error);
5775 }
5776
md_alloc_and_put(dev_t dev,char * name)5777 static int md_alloc_and_put(dev_t dev, char *name)
5778 {
5779 struct mddev *mddev = md_alloc(dev, name);
5780
5781 if (IS_ERR(mddev))
5782 return PTR_ERR(mddev);
5783 mddev_put(mddev);
5784 return 0;
5785 }
5786
md_probe(dev_t dev)5787 static void md_probe(dev_t dev)
5788 {
5789 if (MAJOR(dev) == MD_MAJOR && MINOR(dev) >= 512)
5790 return;
5791 if (create_on_open)
5792 md_alloc_and_put(dev, NULL);
5793 }
5794
add_named_array(const char * val,const struct kernel_param * kp)5795 static int add_named_array(const char *val, const struct kernel_param *kp)
5796 {
5797 /*
5798 * val must be "md_*" or "mdNNN".
5799 * For "md_*" we allocate an array with a large free minor number, and
5800 * set the name to val. val must not already be an active name.
5801 * For "mdNNN" we allocate an array with the minor number NNN
5802 * which must not already be in use.
5803 */
5804 int len = strlen(val);
5805 char buf[DISK_NAME_LEN];
5806 unsigned long devnum;
5807
5808 while (len && val[len-1] == '\n')
5809 len--;
5810 if (len >= DISK_NAME_LEN)
5811 return -E2BIG;
5812 strscpy(buf, val, len+1);
5813 if (strncmp(buf, "md_", 3) == 0)
5814 return md_alloc_and_put(0, buf);
5815 if (strncmp(buf, "md", 2) == 0 &&
5816 isdigit(buf[2]) &&
5817 kstrtoul(buf+2, 10, &devnum) == 0 &&
5818 devnum <= MINORMASK)
5819 return md_alloc_and_put(MKDEV(MD_MAJOR, devnum), NULL);
5820
5821 return -EINVAL;
5822 }
5823
md_safemode_timeout(struct timer_list * t)5824 static void md_safemode_timeout(struct timer_list *t)
5825 {
5826 struct mddev *mddev = from_timer(mddev, t, safemode_timer);
5827
5828 mddev->safemode = 1;
5829 if (mddev->external)
5830 sysfs_notify_dirent_safe(mddev->sysfs_state);
5831
5832 md_wakeup_thread(mddev->thread);
5833 }
5834
5835 static int start_dirty_degraded;
active_io_release(struct percpu_ref * ref)5836 static void active_io_release(struct percpu_ref *ref)
5837 {
5838 struct mddev *mddev = container_of(ref, struct mddev, active_io);
5839
5840 wake_up(&mddev->sb_wait);
5841 }
5842
md_run(struct mddev * mddev)5843 int md_run(struct mddev *mddev)
5844 {
5845 int err;
5846 struct md_rdev *rdev;
5847 struct md_personality *pers;
5848 bool nowait = true;
5849
5850 if (list_empty(&mddev->disks))
5851 /* cannot run an array with no devices.. */
5852 return -EINVAL;
5853
5854 if (mddev->pers)
5855 return -EBUSY;
5856 /* Cannot run until previous stop completes properly */
5857 if (mddev->sysfs_active)
5858 return -EBUSY;
5859
5860 /*
5861 * Analyze all RAID superblock(s)
5862 */
5863 if (!mddev->raid_disks) {
5864 if (!mddev->persistent)
5865 return -EINVAL;
5866 err = analyze_sbs(mddev);
5867 if (err)
5868 return -EINVAL;
5869 }
5870
5871 if (mddev->level != LEVEL_NONE)
5872 request_module("md-level-%d", mddev->level);
5873 else if (mddev->clevel[0])
5874 request_module("md-%s", mddev->clevel);
5875
5876 /*
5877 * Drop all container device buffers, from now on
5878 * the only valid external interface is through the md
5879 * device.
5880 */
5881 mddev->has_superblocks = false;
5882 rdev_for_each(rdev, mddev) {
5883 if (test_bit(Faulty, &rdev->flags))
5884 continue;
5885 sync_blockdev(rdev->bdev);
5886 invalidate_bdev(rdev->bdev);
5887 if (mddev->ro != MD_RDONLY && rdev_read_only(rdev)) {
5888 mddev->ro = MD_RDONLY;
5889 if (mddev->gendisk)
5890 set_disk_ro(mddev->gendisk, 1);
5891 }
5892
5893 if (rdev->sb_page)
5894 mddev->has_superblocks = true;
5895
5896 /* perform some consistency tests on the device.
5897 * We don't want the data to overlap the metadata,
5898 * Internal Bitmap issues have been handled elsewhere.
5899 */
5900 if (rdev->meta_bdev) {
5901 /* Nothing to check */;
5902 } else if (rdev->data_offset < rdev->sb_start) {
5903 if (mddev->dev_sectors &&
5904 rdev->data_offset + mddev->dev_sectors
5905 > rdev->sb_start) {
5906 pr_warn("md: %s: data overlaps metadata\n",
5907 mdname(mddev));
5908 return -EINVAL;
5909 }
5910 } else {
5911 if (rdev->sb_start + rdev->sb_size/512
5912 > rdev->data_offset) {
5913 pr_warn("md: %s: metadata overlaps data\n",
5914 mdname(mddev));
5915 return -EINVAL;
5916 }
5917 }
5918 sysfs_notify_dirent_safe(rdev->sysfs_state);
5919 nowait = nowait && bdev_nowait(rdev->bdev);
5920 }
5921
5922 err = percpu_ref_init(&mddev->active_io, active_io_release,
5923 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
5924 if (err)
5925 return err;
5926
5927 if (!bioset_initialized(&mddev->bio_set)) {
5928 err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
5929 if (err)
5930 goto exit_active_io;
5931 }
5932 if (!bioset_initialized(&mddev->sync_set)) {
5933 err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
5934 if (err)
5935 goto exit_bio_set;
5936 }
5937
5938 if (!bioset_initialized(&mddev->io_clone_set)) {
5939 err = bioset_init(&mddev->io_clone_set, BIO_POOL_SIZE,
5940 offsetof(struct md_io_clone, bio_clone), 0);
5941 if (err)
5942 goto exit_sync_set;
5943 }
5944
5945 spin_lock(&pers_lock);
5946 pers = find_pers(mddev->level, mddev->clevel);
5947 if (!pers || !try_module_get(pers->owner)) {
5948 spin_unlock(&pers_lock);
5949 if (mddev->level != LEVEL_NONE)
5950 pr_warn("md: personality for level %d is not loaded!\n",
5951 mddev->level);
5952 else
5953 pr_warn("md: personality for level %s is not loaded!\n",
5954 mddev->clevel);
5955 err = -EINVAL;
5956 goto abort;
5957 }
5958 spin_unlock(&pers_lock);
5959 if (mddev->level != pers->level) {
5960 mddev->level = pers->level;
5961 mddev->new_level = pers->level;
5962 }
5963 strscpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
5964
5965 if (mddev->reshape_position != MaxSector &&
5966 pers->start_reshape == NULL) {
5967 /* This personality cannot handle reshaping... */
5968 module_put(pers->owner);
5969 err = -EINVAL;
5970 goto abort;
5971 }
5972
5973 if (pers->sync_request) {
5974 /* Warn if this is a potentially silly
5975 * configuration.
5976 */
5977 struct md_rdev *rdev2;
5978 int warned = 0;
5979
5980 rdev_for_each(rdev, mddev)
5981 rdev_for_each(rdev2, mddev) {
5982 if (rdev < rdev2 &&
5983 rdev->bdev->bd_disk ==
5984 rdev2->bdev->bd_disk) {
5985 pr_warn("%s: WARNING: %pg appears to be on the same physical disk as %pg.\n",
5986 mdname(mddev),
5987 rdev->bdev,
5988 rdev2->bdev);
5989 warned = 1;
5990 }
5991 }
5992
5993 if (warned)
5994 pr_warn("True protection against single-disk failure might be compromised.\n");
5995 }
5996
5997 mddev->recovery = 0;
5998 /* may be over-ridden by personality */
5999 mddev->resync_max_sectors = mddev->dev_sectors;
6000
6001 mddev->ok_start_degraded = start_dirty_degraded;
6002
6003 if (start_readonly && md_is_rdwr(mddev))
6004 mddev->ro = MD_AUTO_READ; /* read-only, but switch on first write */
6005
6006 err = pers->run(mddev);
6007 if (err)
6008 pr_warn("md: pers->run() failed ...\n");
6009 else if (pers->size(mddev, 0, 0) < mddev->array_sectors) {
6010 WARN_ONCE(!mddev->external_size,
6011 "%s: default size too small, but 'external_size' not in effect?\n",
6012 __func__);
6013 pr_warn("md: invalid array_size %llu > default size %llu\n",
6014 (unsigned long long)mddev->array_sectors / 2,
6015 (unsigned long long)pers->size(mddev, 0, 0) / 2);
6016 err = -EINVAL;
6017 }
6018 if (err == 0 && pers->sync_request &&
6019 (mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
6020 struct bitmap *bitmap;
6021
6022 bitmap = md_bitmap_create(mddev, -1);
6023 if (IS_ERR(bitmap)) {
6024 err = PTR_ERR(bitmap);
6025 pr_warn("%s: failed to create bitmap (%d)\n",
6026 mdname(mddev), err);
6027 } else
6028 mddev->bitmap = bitmap;
6029
6030 }
6031 if (err)
6032 goto bitmap_abort;
6033
6034 if (mddev->bitmap_info.max_write_behind > 0) {
6035 bool create_pool = false;
6036
6037 rdev_for_each(rdev, mddev) {
6038 if (test_bit(WriteMostly, &rdev->flags) &&
6039 rdev_init_serial(rdev))
6040 create_pool = true;
6041 }
6042 if (create_pool && mddev->serial_info_pool == NULL) {
6043 mddev->serial_info_pool =
6044 mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
6045 sizeof(struct serial_info));
6046 if (!mddev->serial_info_pool) {
6047 err = -ENOMEM;
6048 goto bitmap_abort;
6049 }
6050 }
6051 }
6052
6053 if (mddev->queue) {
6054 bool nonrot = true;
6055
6056 rdev_for_each(rdev, mddev) {
6057 if (rdev->raid_disk >= 0 && !bdev_nonrot(rdev->bdev)) {
6058 nonrot = false;
6059 break;
6060 }
6061 }
6062 if (mddev->degraded)
6063 nonrot = false;
6064 if (nonrot)
6065 blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue);
6066 else
6067 blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue);
6068 blk_queue_flag_set(QUEUE_FLAG_IO_STAT, mddev->queue);
6069
6070 /* Set the NOWAIT flags if all underlying devices support it */
6071 if (nowait)
6072 blk_queue_flag_set(QUEUE_FLAG_NOWAIT, mddev->queue);
6073 }
6074 if (pers->sync_request) {
6075 if (mddev->kobj.sd &&
6076 sysfs_create_group(&mddev->kobj, &md_redundancy_group))
6077 pr_warn("md: cannot register extra attributes for %s\n",
6078 mdname(mddev));
6079 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
6080 mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
6081 mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
6082 } else if (mddev->ro == MD_AUTO_READ)
6083 mddev->ro = MD_RDWR;
6084
6085 atomic_set(&mddev->max_corr_read_errors,
6086 MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
6087 mddev->safemode = 0;
6088 if (mddev_is_clustered(mddev))
6089 mddev->safemode_delay = 0;
6090 else
6091 mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY;
6092 mddev->in_sync = 1;
6093 smp_wmb();
6094 spin_lock(&mddev->lock);
6095 mddev->pers = pers;
6096 spin_unlock(&mddev->lock);
6097 rdev_for_each(rdev, mddev)
6098 if (rdev->raid_disk >= 0)
6099 sysfs_link_rdev(mddev, rdev); /* failure here is OK */
6100
6101 if (mddev->degraded && md_is_rdwr(mddev))
6102 /* This ensures that recovering status is reported immediately
6103 * via sysfs - until a lack of spares is confirmed.
6104 */
6105 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
6106 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6107
6108 if (mddev->sb_flags)
6109 md_update_sb(mddev, 0);
6110
6111 md_new_event();
6112 return 0;
6113
6114 bitmap_abort:
6115 mddev_detach(mddev);
6116 if (mddev->private)
6117 pers->free(mddev, mddev->private);
6118 mddev->private = NULL;
6119 module_put(pers->owner);
6120 md_bitmap_destroy(mddev);
6121 abort:
6122 bioset_exit(&mddev->io_clone_set);
6123 exit_sync_set:
6124 bioset_exit(&mddev->sync_set);
6125 exit_bio_set:
6126 bioset_exit(&mddev->bio_set);
6127 exit_active_io:
6128 percpu_ref_exit(&mddev->active_io);
6129 return err;
6130 }
6131 EXPORT_SYMBOL_GPL(md_run);
6132
do_md_run(struct mddev * mddev)6133 int do_md_run(struct mddev *mddev)
6134 {
6135 int err;
6136
6137 set_bit(MD_NOT_READY, &mddev->flags);
6138 err = md_run(mddev);
6139 if (err)
6140 goto out;
6141 err = md_bitmap_load(mddev);
6142 if (err) {
6143 md_bitmap_destroy(mddev);
6144 goto out;
6145 }
6146
6147 if (mddev_is_clustered(mddev))
6148 md_allow_write(mddev);
6149
6150 /* run start up tasks that require md_thread */
6151 md_start(mddev);
6152
6153 md_wakeup_thread(mddev->thread);
6154 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
6155
6156 set_capacity_and_notify(mddev->gendisk, mddev->array_sectors);
6157 clear_bit(MD_NOT_READY, &mddev->flags);
6158 mddev->changed = 1;
6159 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
6160 sysfs_notify_dirent_safe(mddev->sysfs_state);
6161 sysfs_notify_dirent_safe(mddev->sysfs_action);
6162 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
6163 out:
6164 clear_bit(MD_NOT_READY, &mddev->flags);
6165 return err;
6166 }
6167
md_start(struct mddev * mddev)6168 int md_start(struct mddev *mddev)
6169 {
6170 int ret = 0;
6171
6172 if (mddev->pers->start) {
6173 set_bit(MD_RECOVERY_WAIT, &mddev->recovery);
6174 md_wakeup_thread(mddev->thread);
6175 ret = mddev->pers->start(mddev);
6176 clear_bit(MD_RECOVERY_WAIT, &mddev->recovery);
6177 md_wakeup_thread(mddev->sync_thread);
6178 }
6179 return ret;
6180 }
6181 EXPORT_SYMBOL_GPL(md_start);
6182
restart_array(struct mddev * mddev)6183 static int restart_array(struct mddev *mddev)
6184 {
6185 struct gendisk *disk = mddev->gendisk;
6186 struct md_rdev *rdev;
6187 bool has_journal = false;
6188 bool has_readonly = false;
6189
6190 /* Complain if it has no devices */
6191 if (list_empty(&mddev->disks))
6192 return -ENXIO;
6193 if (!mddev->pers)
6194 return -EINVAL;
6195 if (md_is_rdwr(mddev))
6196 return -EBUSY;
6197
6198 rcu_read_lock();
6199 rdev_for_each_rcu(rdev, mddev) {
6200 if (test_bit(Journal, &rdev->flags) &&
6201 !test_bit(Faulty, &rdev->flags))
6202 has_journal = true;
6203 if (rdev_read_only(rdev))
6204 has_readonly = true;
6205 }
6206 rcu_read_unlock();
6207 if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !has_journal)
6208 /* Don't restart rw with journal missing/faulty */
6209 return -EINVAL;
6210 if (has_readonly)
6211 return -EROFS;
6212
6213 mddev->safemode = 0;
6214 mddev->ro = MD_RDWR;
6215 set_disk_ro(disk, 0);
6216 pr_debug("md: %s switched to read-write mode.\n", mdname(mddev));
6217 /* Kick recovery or resync if necessary */
6218 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6219 md_wakeup_thread(mddev->thread);
6220 md_wakeup_thread(mddev->sync_thread);
6221 sysfs_notify_dirent_safe(mddev->sysfs_state);
6222 return 0;
6223 }
6224
md_clean(struct mddev * mddev)6225 static void md_clean(struct mddev *mddev)
6226 {
6227 mddev->array_sectors = 0;
6228 mddev->external_size = 0;
6229 mddev->dev_sectors = 0;
6230 mddev->raid_disks = 0;
6231 mddev->recovery_cp = 0;
6232 mddev->resync_min = 0;
6233 mddev->resync_max = MaxSector;
6234 mddev->reshape_position = MaxSector;
6235 /* we still need mddev->external in export_rdev, do not clear it yet */
6236 mddev->persistent = 0;
6237 mddev->level = LEVEL_NONE;
6238 mddev->clevel[0] = 0;
6239 /*
6240 * Don't clear MD_CLOSING, or mddev can be opened again.
6241 * 'hold_active != 0' means mddev is still in the creation
6242 * process and will be used later.
6243 */
6244 if (mddev->hold_active)
6245 mddev->flags = 0;
6246 else
6247 mddev->flags &= BIT_ULL_MASK(MD_CLOSING);
6248 mddev->sb_flags = 0;
6249 mddev->ro = MD_RDWR;
6250 mddev->metadata_type[0] = 0;
6251 mddev->chunk_sectors = 0;
6252 mddev->ctime = mddev->utime = 0;
6253 mddev->layout = 0;
6254 mddev->max_disks = 0;
6255 mddev->events = 0;
6256 mddev->can_decrease_events = 0;
6257 mddev->delta_disks = 0;
6258 mddev->reshape_backwards = 0;
6259 mddev->new_level = LEVEL_NONE;
6260 mddev->new_layout = 0;
6261 mddev->new_chunk_sectors = 0;
6262 mddev->curr_resync = MD_RESYNC_NONE;
6263 atomic64_set(&mddev->resync_mismatches, 0);
6264 mddev->suspend_lo = mddev->suspend_hi = 0;
6265 mddev->sync_speed_min = mddev->sync_speed_max = 0;
6266 mddev->recovery = 0;
6267 mddev->in_sync = 0;
6268 mddev->changed = 0;
6269 mddev->degraded = 0;
6270 mddev->safemode = 0;
6271 mddev->private = NULL;
6272 mddev->cluster_info = NULL;
6273 mddev->bitmap_info.offset = 0;
6274 mddev->bitmap_info.default_offset = 0;
6275 mddev->bitmap_info.default_space = 0;
6276 mddev->bitmap_info.chunksize = 0;
6277 mddev->bitmap_info.daemon_sleep = 0;
6278 mddev->bitmap_info.max_write_behind = 0;
6279 mddev->bitmap_info.nodes = 0;
6280 }
6281
__md_stop_writes(struct mddev * mddev)6282 static void __md_stop_writes(struct mddev *mddev)
6283 {
6284 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6285 if (work_pending(&mddev->del_work))
6286 flush_workqueue(md_misc_wq);
6287 if (mddev->sync_thread) {
6288 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6289 md_reap_sync_thread(mddev);
6290 }
6291
6292 del_timer_sync(&mddev->safemode_timer);
6293
6294 if (mddev->pers && mddev->pers->quiesce) {
6295 mddev->pers->quiesce(mddev, 1);
6296 mddev->pers->quiesce(mddev, 0);
6297 }
6298 md_bitmap_flush(mddev);
6299
6300 if (md_is_rdwr(mddev) &&
6301 ((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
6302 mddev->sb_flags)) {
6303 /* mark array as shutdown cleanly */
6304 if (!mddev_is_clustered(mddev))
6305 mddev->in_sync = 1;
6306 md_update_sb(mddev, 1);
6307 }
6308 /* disable policy to guarantee rdevs free resources for serialization */
6309 mddev->serialize_policy = 0;
6310 mddev_destroy_serial_pool(mddev, NULL, true);
6311 }
6312
md_stop_writes(struct mddev * mddev)6313 void md_stop_writes(struct mddev *mddev)
6314 {
6315 mddev_lock_nointr(mddev);
6316 __md_stop_writes(mddev);
6317 mddev_unlock(mddev);
6318 }
6319 EXPORT_SYMBOL_GPL(md_stop_writes);
6320
mddev_detach(struct mddev * mddev)6321 static void mddev_detach(struct mddev *mddev)
6322 {
6323 md_bitmap_wait_behind_writes(mddev);
6324 if (mddev->pers && mddev->pers->quiesce && !is_md_suspended(mddev)) {
6325 mddev->pers->quiesce(mddev, 1);
6326 mddev->pers->quiesce(mddev, 0);
6327 }
6328 md_unregister_thread(mddev, &mddev->thread);
6329 if (mddev->queue)
6330 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
6331 }
6332
__md_stop(struct mddev * mddev)6333 static void __md_stop(struct mddev *mddev)
6334 {
6335 struct md_personality *pers = mddev->pers;
6336 md_bitmap_destroy(mddev);
6337 mddev_detach(mddev);
6338 /* Ensure ->event_work is done */
6339 if (mddev->event_work.func)
6340 flush_workqueue(md_misc_wq);
6341 spin_lock(&mddev->lock);
6342 mddev->pers = NULL;
6343 spin_unlock(&mddev->lock);
6344 if (mddev->private)
6345 pers->free(mddev, mddev->private);
6346 mddev->private = NULL;
6347 if (pers->sync_request && mddev->to_remove == NULL)
6348 mddev->to_remove = &md_redundancy_group;
6349 module_put(pers->owner);
6350 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6351
6352 percpu_ref_exit(&mddev->active_io);
6353 bioset_exit(&mddev->bio_set);
6354 bioset_exit(&mddev->sync_set);
6355 bioset_exit(&mddev->io_clone_set);
6356 }
6357
md_stop(struct mddev * mddev)6358 void md_stop(struct mddev *mddev)
6359 {
6360 lockdep_assert_held(&mddev->reconfig_mutex);
6361
6362 /* stop the array and free an attached data structures.
6363 * This is called from dm-raid
6364 */
6365 __md_stop_writes(mddev);
6366 __md_stop(mddev);
6367 percpu_ref_exit(&mddev->writes_pending);
6368 }
6369
6370 EXPORT_SYMBOL_GPL(md_stop);
6371
md_set_readonly(struct mddev * mddev,struct block_device * bdev)6372 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
6373 {
6374 int err = 0;
6375 int did_freeze = 0;
6376
6377 if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
6378 return -EBUSY;
6379
6380 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
6381 did_freeze = 1;
6382 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6383 md_wakeup_thread(mddev->thread);
6384 }
6385 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
6386 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6387
6388 /*
6389 * Thread might be blocked waiting for metadata update which will now
6390 * never happen
6391 */
6392 md_wakeup_thread_directly(mddev->sync_thread);
6393
6394 mddev_unlock(mddev);
6395 wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
6396 &mddev->recovery));
6397 wait_event(mddev->sb_wait,
6398 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
6399 mddev_lock_nointr(mddev);
6400
6401 mutex_lock(&mddev->open_mutex);
6402 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
6403 mddev->sync_thread ||
6404 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
6405 pr_warn("md: %s still in use.\n",mdname(mddev));
6406 err = -EBUSY;
6407 goto out;
6408 }
6409
6410 if (mddev->pers) {
6411 __md_stop_writes(mddev);
6412
6413 if (mddev->ro == MD_RDONLY) {
6414 err = -ENXIO;
6415 goto out;
6416 }
6417
6418 mddev->ro = MD_RDONLY;
6419 set_disk_ro(mddev->gendisk, 1);
6420 }
6421
6422 out:
6423 if ((mddev->pers && !err) || did_freeze) {
6424 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6425 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6426 md_wakeup_thread(mddev->thread);
6427 sysfs_notify_dirent_safe(mddev->sysfs_state);
6428 }
6429
6430 mutex_unlock(&mddev->open_mutex);
6431 return err;
6432 }
6433
6434 /* mode:
6435 * 0 - completely stop and dis-assemble array
6436 * 2 - stop but do not disassemble array
6437 */
do_md_stop(struct mddev * mddev,int mode,struct block_device * bdev)6438 static int do_md_stop(struct mddev *mddev, int mode,
6439 struct block_device *bdev)
6440 {
6441 struct gendisk *disk = mddev->gendisk;
6442 struct md_rdev *rdev;
6443 int did_freeze = 0;
6444
6445 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
6446 did_freeze = 1;
6447 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6448 md_wakeup_thread(mddev->thread);
6449 }
6450 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
6451 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6452
6453 /*
6454 * Thread might be blocked waiting for metadata update which will now
6455 * never happen
6456 */
6457 md_wakeup_thread_directly(mddev->sync_thread);
6458
6459 mddev_unlock(mddev);
6460 wait_event(resync_wait, (mddev->sync_thread == NULL &&
6461 !test_bit(MD_RECOVERY_RUNNING,
6462 &mddev->recovery)));
6463 mddev_lock_nointr(mddev);
6464
6465 mutex_lock(&mddev->open_mutex);
6466 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
6467 mddev->sysfs_active ||
6468 mddev->sync_thread ||
6469 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
6470 pr_warn("md: %s still in use.\n",mdname(mddev));
6471 mutex_unlock(&mddev->open_mutex);
6472 if (did_freeze) {
6473 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6474 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6475 md_wakeup_thread(mddev->thread);
6476 }
6477 return -EBUSY;
6478 }
6479 if (mddev->pers) {
6480 if (!md_is_rdwr(mddev))
6481 set_disk_ro(disk, 0);
6482
6483 __md_stop_writes(mddev);
6484 __md_stop(mddev);
6485
6486 /* tell userspace to handle 'inactive' */
6487 sysfs_notify_dirent_safe(mddev->sysfs_state);
6488
6489 rdev_for_each(rdev, mddev)
6490 if (rdev->raid_disk >= 0)
6491 sysfs_unlink_rdev(mddev, rdev);
6492
6493 set_capacity_and_notify(disk, 0);
6494 mutex_unlock(&mddev->open_mutex);
6495 mddev->changed = 1;
6496
6497 if (!md_is_rdwr(mddev))
6498 mddev->ro = MD_RDWR;
6499 } else
6500 mutex_unlock(&mddev->open_mutex);
6501 /*
6502 * Free resources if final stop
6503 */
6504 if (mode == 0) {
6505 pr_info("md: %s stopped.\n", mdname(mddev));
6506
6507 if (mddev->bitmap_info.file) {
6508 struct file *f = mddev->bitmap_info.file;
6509 spin_lock(&mddev->lock);
6510 mddev->bitmap_info.file = NULL;
6511 spin_unlock(&mddev->lock);
6512 fput(f);
6513 }
6514 mddev->bitmap_info.offset = 0;
6515
6516 export_array(mddev);
6517
6518 md_clean(mddev);
6519 if (mddev->hold_active == UNTIL_STOP)
6520 mddev->hold_active = 0;
6521 }
6522 md_new_event();
6523 sysfs_notify_dirent_safe(mddev->sysfs_state);
6524 return 0;
6525 }
6526
6527 #ifndef MODULE
autorun_array(struct mddev * mddev)6528 static void autorun_array(struct mddev *mddev)
6529 {
6530 struct md_rdev *rdev;
6531 int err;
6532
6533 if (list_empty(&mddev->disks))
6534 return;
6535
6536 pr_info("md: running: ");
6537
6538 rdev_for_each(rdev, mddev) {
6539 pr_cont("<%pg>", rdev->bdev);
6540 }
6541 pr_cont("\n");
6542
6543 err = do_md_run(mddev);
6544 if (err) {
6545 pr_warn("md: do_md_run() returned %d\n", err);
6546 do_md_stop(mddev, 0, NULL);
6547 }
6548 }
6549
6550 /*
6551 * lets try to run arrays based on all disks that have arrived
6552 * until now. (those are in pending_raid_disks)
6553 *
6554 * the method: pick the first pending disk, collect all disks with
6555 * the same UUID, remove all from the pending list and put them into
6556 * the 'same_array' list. Then order this list based on superblock
6557 * update time (freshest comes first), kick out 'old' disks and
6558 * compare superblocks. If everything's fine then run it.
6559 *
6560 * If "unit" is allocated, then bump its reference count
6561 */
autorun_devices(int part)6562 static void autorun_devices(int part)
6563 {
6564 struct md_rdev *rdev0, *rdev, *tmp;
6565 struct mddev *mddev;
6566
6567 pr_info("md: autorun ...\n");
6568 while (!list_empty(&pending_raid_disks)) {
6569 int unit;
6570 dev_t dev;
6571 LIST_HEAD(candidates);
6572 rdev0 = list_entry(pending_raid_disks.next,
6573 struct md_rdev, same_set);
6574
6575 pr_debug("md: considering %pg ...\n", rdev0->bdev);
6576 INIT_LIST_HEAD(&candidates);
6577 rdev_for_each_list(rdev, tmp, &pending_raid_disks)
6578 if (super_90_load(rdev, rdev0, 0) >= 0) {
6579 pr_debug("md: adding %pg ...\n",
6580 rdev->bdev);
6581 list_move(&rdev->same_set, &candidates);
6582 }
6583 /*
6584 * now we have a set of devices, with all of them having
6585 * mostly sane superblocks. It's time to allocate the
6586 * mddev.
6587 */
6588 if (part) {
6589 dev = MKDEV(mdp_major,
6590 rdev0->preferred_minor << MdpMinorShift);
6591 unit = MINOR(dev) >> MdpMinorShift;
6592 } else {
6593 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
6594 unit = MINOR(dev);
6595 }
6596 if (rdev0->preferred_minor != unit) {
6597 pr_warn("md: unit number in %pg is bad: %d\n",
6598 rdev0->bdev, rdev0->preferred_minor);
6599 break;
6600 }
6601
6602 mddev = md_alloc(dev, NULL);
6603 if (IS_ERR(mddev))
6604 break;
6605
6606 if (mddev_lock(mddev))
6607 pr_warn("md: %s locked, cannot run\n", mdname(mddev));
6608 else if (mddev->raid_disks || mddev->major_version
6609 || !list_empty(&mddev->disks)) {
6610 pr_warn("md: %s already running, cannot run %pg\n",
6611 mdname(mddev), rdev0->bdev);
6612 mddev_unlock(mddev);
6613 } else {
6614 pr_debug("md: created %s\n", mdname(mddev));
6615 mddev->persistent = 1;
6616 rdev_for_each_list(rdev, tmp, &candidates) {
6617 list_del_init(&rdev->same_set);
6618 if (bind_rdev_to_array(rdev, mddev))
6619 export_rdev(rdev, mddev);
6620 }
6621 autorun_array(mddev);
6622 mddev_unlock(mddev);
6623 }
6624 /* on success, candidates will be empty, on error
6625 * it won't...
6626 */
6627 rdev_for_each_list(rdev, tmp, &candidates) {
6628 list_del_init(&rdev->same_set);
6629 export_rdev(rdev, mddev);
6630 }
6631 mddev_put(mddev);
6632 }
6633 pr_info("md: ... autorun DONE.\n");
6634 }
6635 #endif /* !MODULE */
6636
get_version(void __user * arg)6637 static int get_version(void __user *arg)
6638 {
6639 mdu_version_t ver;
6640
6641 ver.major = MD_MAJOR_VERSION;
6642 ver.minor = MD_MINOR_VERSION;
6643 ver.patchlevel = MD_PATCHLEVEL_VERSION;
6644
6645 if (copy_to_user(arg, &ver, sizeof(ver)))
6646 return -EFAULT;
6647
6648 return 0;
6649 }
6650
get_array_info(struct mddev * mddev,void __user * arg)6651 static int get_array_info(struct mddev *mddev, void __user *arg)
6652 {
6653 mdu_array_info_t info;
6654 int nr,working,insync,failed,spare;
6655 struct md_rdev *rdev;
6656
6657 nr = working = insync = failed = spare = 0;
6658 rcu_read_lock();
6659 rdev_for_each_rcu(rdev, mddev) {
6660 nr++;
6661 if (test_bit(Faulty, &rdev->flags))
6662 failed++;
6663 else {
6664 working++;
6665 if (test_bit(In_sync, &rdev->flags))
6666 insync++;
6667 else if (test_bit(Journal, &rdev->flags))
6668 /* TODO: add journal count to md_u.h */
6669 ;
6670 else
6671 spare++;
6672 }
6673 }
6674 rcu_read_unlock();
6675
6676 info.major_version = mddev->major_version;
6677 info.minor_version = mddev->minor_version;
6678 info.patch_version = MD_PATCHLEVEL_VERSION;
6679 info.ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
6680 info.level = mddev->level;
6681 info.size = mddev->dev_sectors / 2;
6682 if (info.size != mddev->dev_sectors / 2) /* overflow */
6683 info.size = -1;
6684 info.nr_disks = nr;
6685 info.raid_disks = mddev->raid_disks;
6686 info.md_minor = mddev->md_minor;
6687 info.not_persistent= !mddev->persistent;
6688
6689 info.utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
6690 info.state = 0;
6691 if (mddev->in_sync)
6692 info.state = (1<<MD_SB_CLEAN);
6693 if (mddev->bitmap && mddev->bitmap_info.offset)
6694 info.state |= (1<<MD_SB_BITMAP_PRESENT);
6695 if (mddev_is_clustered(mddev))
6696 info.state |= (1<<MD_SB_CLUSTERED);
6697 info.active_disks = insync;
6698 info.working_disks = working;
6699 info.failed_disks = failed;
6700 info.spare_disks = spare;
6701
6702 info.layout = mddev->layout;
6703 info.chunk_size = mddev->chunk_sectors << 9;
6704
6705 if (copy_to_user(arg, &info, sizeof(info)))
6706 return -EFAULT;
6707
6708 return 0;
6709 }
6710
get_bitmap_file(struct mddev * mddev,void __user * arg)6711 static int get_bitmap_file(struct mddev *mddev, void __user * arg)
6712 {
6713 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
6714 char *ptr;
6715 int err;
6716
6717 file = kzalloc(sizeof(*file), GFP_NOIO);
6718 if (!file)
6719 return -ENOMEM;
6720
6721 err = 0;
6722 spin_lock(&mddev->lock);
6723 /* bitmap enabled */
6724 if (mddev->bitmap_info.file) {
6725 ptr = file_path(mddev->bitmap_info.file, file->pathname,
6726 sizeof(file->pathname));
6727 if (IS_ERR(ptr))
6728 err = PTR_ERR(ptr);
6729 else
6730 memmove(file->pathname, ptr,
6731 sizeof(file->pathname)-(ptr-file->pathname));
6732 }
6733 spin_unlock(&mddev->lock);
6734
6735 if (err == 0 &&
6736 copy_to_user(arg, file, sizeof(*file)))
6737 err = -EFAULT;
6738
6739 kfree(file);
6740 return err;
6741 }
6742
get_disk_info(struct mddev * mddev,void __user * arg)6743 static int get_disk_info(struct mddev *mddev, void __user * arg)
6744 {
6745 mdu_disk_info_t info;
6746 struct md_rdev *rdev;
6747
6748 if (copy_from_user(&info, arg, sizeof(info)))
6749 return -EFAULT;
6750
6751 rcu_read_lock();
6752 rdev = md_find_rdev_nr_rcu(mddev, info.number);
6753 if (rdev) {
6754 info.major = MAJOR(rdev->bdev->bd_dev);
6755 info.minor = MINOR(rdev->bdev->bd_dev);
6756 info.raid_disk = rdev->raid_disk;
6757 info.state = 0;
6758 if (test_bit(Faulty, &rdev->flags))
6759 info.state |= (1<<MD_DISK_FAULTY);
6760 else if (test_bit(In_sync, &rdev->flags)) {
6761 info.state |= (1<<MD_DISK_ACTIVE);
6762 info.state |= (1<<MD_DISK_SYNC);
6763 }
6764 if (test_bit(Journal, &rdev->flags))
6765 info.state |= (1<<MD_DISK_JOURNAL);
6766 if (test_bit(WriteMostly, &rdev->flags))
6767 info.state |= (1<<MD_DISK_WRITEMOSTLY);
6768 if (test_bit(FailFast, &rdev->flags))
6769 info.state |= (1<<MD_DISK_FAILFAST);
6770 } else {
6771 info.major = info.minor = 0;
6772 info.raid_disk = -1;
6773 info.state = (1<<MD_DISK_REMOVED);
6774 }
6775 rcu_read_unlock();
6776
6777 if (copy_to_user(arg, &info, sizeof(info)))
6778 return -EFAULT;
6779
6780 return 0;
6781 }
6782
md_add_new_disk(struct mddev * mddev,struct mdu_disk_info_s * info)6783 int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
6784 {
6785 struct md_rdev *rdev;
6786 dev_t dev = MKDEV(info->major,info->minor);
6787
6788 if (mddev_is_clustered(mddev) &&
6789 !(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) {
6790 pr_warn("%s: Cannot add to clustered mddev.\n",
6791 mdname(mddev));
6792 return -EINVAL;
6793 }
6794
6795 if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
6796 return -EOVERFLOW;
6797
6798 if (!mddev->raid_disks) {
6799 int err;
6800 /* expecting a device which has a superblock */
6801 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
6802 if (IS_ERR(rdev)) {
6803 pr_warn("md: md_import_device returned %ld\n",
6804 PTR_ERR(rdev));
6805 return PTR_ERR(rdev);
6806 }
6807 if (!list_empty(&mddev->disks)) {
6808 struct md_rdev *rdev0
6809 = list_entry(mddev->disks.next,
6810 struct md_rdev, same_set);
6811 err = super_types[mddev->major_version]
6812 .load_super(rdev, rdev0, mddev->minor_version);
6813 if (err < 0) {
6814 pr_warn("md: %pg has different UUID to %pg\n",
6815 rdev->bdev,
6816 rdev0->bdev);
6817 export_rdev(rdev, mddev);
6818 return -EINVAL;
6819 }
6820 }
6821 err = bind_rdev_to_array(rdev, mddev);
6822 if (err)
6823 export_rdev(rdev, mddev);
6824 return err;
6825 }
6826
6827 /*
6828 * md_add_new_disk can be used once the array is assembled
6829 * to add "hot spares". They must already have a superblock
6830 * written
6831 */
6832 if (mddev->pers) {
6833 int err;
6834 if (!mddev->pers->hot_add_disk) {
6835 pr_warn("%s: personality does not support diskops!\n",
6836 mdname(mddev));
6837 return -EINVAL;
6838 }
6839 if (mddev->persistent)
6840 rdev = md_import_device(dev, mddev->major_version,
6841 mddev->minor_version);
6842 else
6843 rdev = md_import_device(dev, -1, -1);
6844 if (IS_ERR(rdev)) {
6845 pr_warn("md: md_import_device returned %ld\n",
6846 PTR_ERR(rdev));
6847 return PTR_ERR(rdev);
6848 }
6849 /* set saved_raid_disk if appropriate */
6850 if (!mddev->persistent) {
6851 if (info->state & (1<<MD_DISK_SYNC) &&
6852 info->raid_disk < mddev->raid_disks) {
6853 rdev->raid_disk = info->raid_disk;
6854 clear_bit(Bitmap_sync, &rdev->flags);
6855 } else
6856 rdev->raid_disk = -1;
6857 rdev->saved_raid_disk = rdev->raid_disk;
6858 } else
6859 super_types[mddev->major_version].
6860 validate_super(mddev, NULL/*freshest*/, rdev);
6861 if ((info->state & (1<<MD_DISK_SYNC)) &&
6862 rdev->raid_disk != info->raid_disk) {
6863 /* This was a hot-add request, but events doesn't
6864 * match, so reject it.
6865 */
6866 export_rdev(rdev, mddev);
6867 return -EINVAL;
6868 }
6869
6870 clear_bit(In_sync, &rdev->flags); /* just to be sure */
6871 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6872 set_bit(WriteMostly, &rdev->flags);
6873 else
6874 clear_bit(WriteMostly, &rdev->flags);
6875 if (info->state & (1<<MD_DISK_FAILFAST))
6876 set_bit(FailFast, &rdev->flags);
6877 else
6878 clear_bit(FailFast, &rdev->flags);
6879
6880 if (info->state & (1<<MD_DISK_JOURNAL)) {
6881 struct md_rdev *rdev2;
6882 bool has_journal = false;
6883
6884 /* make sure no existing journal disk */
6885 rdev_for_each(rdev2, mddev) {
6886 if (test_bit(Journal, &rdev2->flags)) {
6887 has_journal = true;
6888 break;
6889 }
6890 }
6891 if (has_journal || mddev->bitmap) {
6892 export_rdev(rdev, mddev);
6893 return -EBUSY;
6894 }
6895 set_bit(Journal, &rdev->flags);
6896 }
6897 /*
6898 * check whether the device shows up in other nodes
6899 */
6900 if (mddev_is_clustered(mddev)) {
6901 if (info->state & (1 << MD_DISK_CANDIDATE))
6902 set_bit(Candidate, &rdev->flags);
6903 else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) {
6904 /* --add initiated by this node */
6905 err = md_cluster_ops->add_new_disk(mddev, rdev);
6906 if (err) {
6907 export_rdev(rdev, mddev);
6908 return err;
6909 }
6910 }
6911 }
6912
6913 rdev->raid_disk = -1;
6914 err = bind_rdev_to_array(rdev, mddev);
6915
6916 if (err)
6917 export_rdev(rdev, mddev);
6918
6919 if (mddev_is_clustered(mddev)) {
6920 if (info->state & (1 << MD_DISK_CANDIDATE)) {
6921 if (!err) {
6922 err = md_cluster_ops->new_disk_ack(mddev,
6923 err == 0);
6924 if (err)
6925 md_kick_rdev_from_array(rdev);
6926 }
6927 } else {
6928 if (err)
6929 md_cluster_ops->add_new_disk_cancel(mddev);
6930 else
6931 err = add_bound_rdev(rdev);
6932 }
6933
6934 } else if (!err)
6935 err = add_bound_rdev(rdev);
6936
6937 return err;
6938 }
6939
6940 /* otherwise, md_add_new_disk is only allowed
6941 * for major_version==0 superblocks
6942 */
6943 if (mddev->major_version != 0) {
6944 pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev));
6945 return -EINVAL;
6946 }
6947
6948 if (!(info->state & (1<<MD_DISK_FAULTY))) {
6949 int err;
6950 rdev = md_import_device(dev, -1, 0);
6951 if (IS_ERR(rdev)) {
6952 pr_warn("md: error, md_import_device() returned %ld\n",
6953 PTR_ERR(rdev));
6954 return PTR_ERR(rdev);
6955 }
6956 rdev->desc_nr = info->number;
6957 if (info->raid_disk < mddev->raid_disks)
6958 rdev->raid_disk = info->raid_disk;
6959 else
6960 rdev->raid_disk = -1;
6961
6962 if (rdev->raid_disk < mddev->raid_disks)
6963 if (info->state & (1<<MD_DISK_SYNC))
6964 set_bit(In_sync, &rdev->flags);
6965
6966 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6967 set_bit(WriteMostly, &rdev->flags);
6968 if (info->state & (1<<MD_DISK_FAILFAST))
6969 set_bit(FailFast, &rdev->flags);
6970
6971 if (!mddev->persistent) {
6972 pr_debug("md: nonpersistent superblock ...\n");
6973 rdev->sb_start = bdev_nr_sectors(rdev->bdev);
6974 } else
6975 rdev->sb_start = calc_dev_sboffset(rdev);
6976 rdev->sectors = rdev->sb_start;
6977
6978 err = bind_rdev_to_array(rdev, mddev);
6979 if (err) {
6980 export_rdev(rdev, mddev);
6981 return err;
6982 }
6983 }
6984
6985 return 0;
6986 }
6987
hot_remove_disk(struct mddev * mddev,dev_t dev)6988 static int hot_remove_disk(struct mddev *mddev, dev_t dev)
6989 {
6990 struct md_rdev *rdev;
6991
6992 if (!mddev->pers)
6993 return -ENODEV;
6994
6995 rdev = find_rdev(mddev, dev);
6996 if (!rdev)
6997 return -ENXIO;
6998
6999 if (rdev->raid_disk < 0)
7000 goto kick_rdev;
7001
7002 clear_bit(Blocked, &rdev->flags);
7003 remove_and_add_spares(mddev, rdev);
7004
7005 if (rdev->raid_disk >= 0)
7006 goto busy;
7007
7008 kick_rdev:
7009 if (mddev_is_clustered(mddev)) {
7010 if (md_cluster_ops->remove_disk(mddev, rdev))
7011 goto busy;
7012 }
7013
7014 md_kick_rdev_from_array(rdev);
7015 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
7016 if (mddev->thread)
7017 md_wakeup_thread(mddev->thread);
7018 else
7019 md_update_sb(mddev, 1);
7020 md_new_event();
7021
7022 return 0;
7023 busy:
7024 pr_debug("md: cannot remove active disk %pg from %s ...\n",
7025 rdev->bdev, mdname(mddev));
7026 return -EBUSY;
7027 }
7028
hot_add_disk(struct mddev * mddev,dev_t dev)7029 static int hot_add_disk(struct mddev *mddev, dev_t dev)
7030 {
7031 int err;
7032 struct md_rdev *rdev;
7033
7034 if (!mddev->pers)
7035 return -ENODEV;
7036
7037 if (mddev->major_version != 0) {
7038 pr_warn("%s: HOT_ADD may only be used with version-0 superblocks.\n",
7039 mdname(mddev));
7040 return -EINVAL;
7041 }
7042 if (!mddev->pers->hot_add_disk) {
7043 pr_warn("%s: personality does not support diskops!\n",
7044 mdname(mddev));
7045 return -EINVAL;
7046 }
7047
7048 rdev = md_import_device(dev, -1, 0);
7049 if (IS_ERR(rdev)) {
7050 pr_warn("md: error, md_import_device() returned %ld\n",
7051 PTR_ERR(rdev));
7052 return -EINVAL;
7053 }
7054
7055 if (mddev->persistent)
7056 rdev->sb_start = calc_dev_sboffset(rdev);
7057 else
7058 rdev->sb_start = bdev_nr_sectors(rdev->bdev);
7059
7060 rdev->sectors = rdev->sb_start;
7061
7062 if (test_bit(Faulty, &rdev->flags)) {
7063 pr_warn("md: can not hot-add faulty %pg disk to %s!\n",
7064 rdev->bdev, mdname(mddev));
7065 err = -EINVAL;
7066 goto abort_export;
7067 }
7068
7069 clear_bit(In_sync, &rdev->flags);
7070 rdev->desc_nr = -1;
7071 rdev->saved_raid_disk = -1;
7072 err = bind_rdev_to_array(rdev, mddev);
7073 if (err)
7074 goto abort_export;
7075
7076 /*
7077 * The rest should better be atomic, we can have disk failures
7078 * noticed in interrupt contexts ...
7079 */
7080
7081 rdev->raid_disk = -1;
7082
7083 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
7084 if (!mddev->thread)
7085 md_update_sb(mddev, 1);
7086 /*
7087 * If the new disk does not support REQ_NOWAIT,
7088 * disable on the whole MD.
7089 */
7090 if (!bdev_nowait(rdev->bdev)) {
7091 pr_info("%s: Disabling nowait because %pg does not support nowait\n",
7092 mdname(mddev), rdev->bdev);
7093 blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, mddev->queue);
7094 }
7095 /*
7096 * Kick recovery, maybe this spare has to be added to the
7097 * array immediately.
7098 */
7099 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7100 md_wakeup_thread(mddev->thread);
7101 md_new_event();
7102 return 0;
7103
7104 abort_export:
7105 export_rdev(rdev, mddev);
7106 return err;
7107 }
7108
set_bitmap_file(struct mddev * mddev,int fd)7109 static int set_bitmap_file(struct mddev *mddev, int fd)
7110 {
7111 int err = 0;
7112
7113 if (mddev->pers) {
7114 if (!mddev->pers->quiesce || !mddev->thread)
7115 return -EBUSY;
7116 if (mddev->recovery || mddev->sync_thread)
7117 return -EBUSY;
7118 /* we should be able to change the bitmap.. */
7119 }
7120
7121 if (fd >= 0) {
7122 struct inode *inode;
7123 struct file *f;
7124
7125 if (mddev->bitmap || mddev->bitmap_info.file)
7126 return -EEXIST; /* cannot add when bitmap is present */
7127
7128 if (!IS_ENABLED(CONFIG_MD_BITMAP_FILE)) {
7129 pr_warn("%s: bitmap files not supported by this kernel\n",
7130 mdname(mddev));
7131 return -EINVAL;
7132 }
7133 pr_warn("%s: using deprecated bitmap file support\n",
7134 mdname(mddev));
7135
7136 f = fget(fd);
7137
7138 if (f == NULL) {
7139 pr_warn("%s: error: failed to get bitmap file\n",
7140 mdname(mddev));
7141 return -EBADF;
7142 }
7143
7144 inode = f->f_mapping->host;
7145 if (!S_ISREG(inode->i_mode)) {
7146 pr_warn("%s: error: bitmap file must be a regular file\n",
7147 mdname(mddev));
7148 err = -EBADF;
7149 } else if (!(f->f_mode & FMODE_WRITE)) {
7150 pr_warn("%s: error: bitmap file must open for write\n",
7151 mdname(mddev));
7152 err = -EBADF;
7153 } else if (atomic_read(&inode->i_writecount) != 1) {
7154 pr_warn("%s: error: bitmap file is already in use\n",
7155 mdname(mddev));
7156 err = -EBUSY;
7157 }
7158 if (err) {
7159 fput(f);
7160 return err;
7161 }
7162 mddev->bitmap_info.file = f;
7163 mddev->bitmap_info.offset = 0; /* file overrides offset */
7164 } else if (mddev->bitmap == NULL)
7165 return -ENOENT; /* cannot remove what isn't there */
7166 err = 0;
7167 if (mddev->pers) {
7168 if (fd >= 0) {
7169 struct bitmap *bitmap;
7170
7171 bitmap = md_bitmap_create(mddev, -1);
7172 mddev_suspend(mddev);
7173 if (!IS_ERR(bitmap)) {
7174 mddev->bitmap = bitmap;
7175 err = md_bitmap_load(mddev);
7176 } else
7177 err = PTR_ERR(bitmap);
7178 if (err) {
7179 md_bitmap_destroy(mddev);
7180 fd = -1;
7181 }
7182 mddev_resume(mddev);
7183 } else if (fd < 0) {
7184 mddev_suspend(mddev);
7185 md_bitmap_destroy(mddev);
7186 mddev_resume(mddev);
7187 }
7188 }
7189 if (fd < 0) {
7190 struct file *f = mddev->bitmap_info.file;
7191 if (f) {
7192 spin_lock(&mddev->lock);
7193 mddev->bitmap_info.file = NULL;
7194 spin_unlock(&mddev->lock);
7195 fput(f);
7196 }
7197 }
7198
7199 return err;
7200 }
7201
7202 /*
7203 * md_set_array_info is used two different ways
7204 * The original usage is when creating a new array.
7205 * In this usage, raid_disks is > 0 and it together with
7206 * level, size, not_persistent,layout,chunksize determine the
7207 * shape of the array.
7208 * This will always create an array with a type-0.90.0 superblock.
7209 * The newer usage is when assembling an array.
7210 * In this case raid_disks will be 0, and the major_version field is
7211 * use to determine which style super-blocks are to be found on the devices.
7212 * The minor and patch _version numbers are also kept incase the
7213 * super_block handler wishes to interpret them.
7214 */
md_set_array_info(struct mddev * mddev,struct mdu_array_info_s * info)7215 int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info)
7216 {
7217 if (info->raid_disks == 0) {
7218 /* just setting version number for superblock loading */
7219 if (info->major_version < 0 ||
7220 info->major_version >= ARRAY_SIZE(super_types) ||
7221 super_types[info->major_version].name == NULL) {
7222 /* maybe try to auto-load a module? */
7223 pr_warn("md: superblock version %d not known\n",
7224 info->major_version);
7225 return -EINVAL;
7226 }
7227 mddev->major_version = info->major_version;
7228 mddev->minor_version = info->minor_version;
7229 mddev->patch_version = info->patch_version;
7230 mddev->persistent = !info->not_persistent;
7231 /* ensure mddev_put doesn't delete this now that there
7232 * is some minimal configuration.
7233 */
7234 mddev->ctime = ktime_get_real_seconds();
7235 return 0;
7236 }
7237 mddev->major_version = MD_MAJOR_VERSION;
7238 mddev->minor_version = MD_MINOR_VERSION;
7239 mddev->patch_version = MD_PATCHLEVEL_VERSION;
7240 mddev->ctime = ktime_get_real_seconds();
7241
7242 mddev->level = info->level;
7243 mddev->clevel[0] = 0;
7244 mddev->dev_sectors = 2 * (sector_t)info->size;
7245 mddev->raid_disks = info->raid_disks;
7246 /* don't set md_minor, it is determined by which /dev/md* was
7247 * openned
7248 */
7249 if (info->state & (1<<MD_SB_CLEAN))
7250 mddev->recovery_cp = MaxSector;
7251 else
7252 mddev->recovery_cp = 0;
7253 mddev->persistent = ! info->not_persistent;
7254 mddev->external = 0;
7255
7256 mddev->layout = info->layout;
7257 if (mddev->level == 0)
7258 /* Cannot trust RAID0 layout info here */
7259 mddev->layout = -1;
7260 mddev->chunk_sectors = info->chunk_size >> 9;
7261
7262 if (mddev->persistent) {
7263 mddev->max_disks = MD_SB_DISKS;
7264 mddev->flags = 0;
7265 mddev->sb_flags = 0;
7266 }
7267 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
7268
7269 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
7270 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
7271 mddev->bitmap_info.offset = 0;
7272
7273 mddev->reshape_position = MaxSector;
7274
7275 /*
7276 * Generate a 128 bit UUID
7277 */
7278 get_random_bytes(mddev->uuid, 16);
7279
7280 mddev->new_level = mddev->level;
7281 mddev->new_chunk_sectors = mddev->chunk_sectors;
7282 mddev->new_layout = mddev->layout;
7283 mddev->delta_disks = 0;
7284 mddev->reshape_backwards = 0;
7285
7286 return 0;
7287 }
7288
md_set_array_sectors(struct mddev * mddev,sector_t array_sectors)7289 void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
7290 {
7291 lockdep_assert_held(&mddev->reconfig_mutex);
7292
7293 if (mddev->external_size)
7294 return;
7295
7296 mddev->array_sectors = array_sectors;
7297 }
7298 EXPORT_SYMBOL(md_set_array_sectors);
7299
update_size(struct mddev * mddev,sector_t num_sectors)7300 static int update_size(struct mddev *mddev, sector_t num_sectors)
7301 {
7302 struct md_rdev *rdev;
7303 int rv;
7304 int fit = (num_sectors == 0);
7305 sector_t old_dev_sectors = mddev->dev_sectors;
7306
7307 if (mddev->pers->resize == NULL)
7308 return -EINVAL;
7309 /* The "num_sectors" is the number of sectors of each device that
7310 * is used. This can only make sense for arrays with redundancy.
7311 * linear and raid0 always use whatever space is available. We can only
7312 * consider changing this number if no resync or reconstruction is
7313 * happening, and if the new size is acceptable. It must fit before the
7314 * sb_start or, if that is <data_offset, it must fit before the size
7315 * of each device. If num_sectors is zero, we find the largest size
7316 * that fits.
7317 */
7318 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
7319 mddev->sync_thread)
7320 return -EBUSY;
7321 if (!md_is_rdwr(mddev))
7322 return -EROFS;
7323
7324 rdev_for_each(rdev, mddev) {
7325 sector_t avail = rdev->sectors;
7326
7327 if (fit && (num_sectors == 0 || num_sectors > avail))
7328 num_sectors = avail;
7329 if (avail < num_sectors)
7330 return -ENOSPC;
7331 }
7332 rv = mddev->pers->resize(mddev, num_sectors);
7333 if (!rv) {
7334 if (mddev_is_clustered(mddev))
7335 md_cluster_ops->update_size(mddev, old_dev_sectors);
7336 else if (mddev->queue) {
7337 set_capacity_and_notify(mddev->gendisk,
7338 mddev->array_sectors);
7339 }
7340 }
7341 return rv;
7342 }
7343
update_raid_disks(struct mddev * mddev,int raid_disks)7344 static int update_raid_disks(struct mddev *mddev, int raid_disks)
7345 {
7346 int rv;
7347 struct md_rdev *rdev;
7348 /* change the number of raid disks */
7349 if (mddev->pers->check_reshape == NULL)
7350 return -EINVAL;
7351 if (!md_is_rdwr(mddev))
7352 return -EROFS;
7353 if (raid_disks <= 0 ||
7354 (mddev->max_disks && raid_disks >= mddev->max_disks))
7355 return -EINVAL;
7356 if (mddev->sync_thread ||
7357 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
7358 test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) ||
7359 mddev->reshape_position != MaxSector)
7360 return -EBUSY;
7361
7362 rdev_for_each(rdev, mddev) {
7363 if (mddev->raid_disks < raid_disks &&
7364 rdev->data_offset < rdev->new_data_offset)
7365 return -EINVAL;
7366 if (mddev->raid_disks > raid_disks &&
7367 rdev->data_offset > rdev->new_data_offset)
7368 return -EINVAL;
7369 }
7370
7371 mddev->delta_disks = raid_disks - mddev->raid_disks;
7372 if (mddev->delta_disks < 0)
7373 mddev->reshape_backwards = 1;
7374 else if (mddev->delta_disks > 0)
7375 mddev->reshape_backwards = 0;
7376
7377 rv = mddev->pers->check_reshape(mddev);
7378 if (rv < 0) {
7379 mddev->delta_disks = 0;
7380 mddev->reshape_backwards = 0;
7381 }
7382 return rv;
7383 }
7384
7385 /*
7386 * update_array_info is used to change the configuration of an
7387 * on-line array.
7388 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
7389 * fields in the info are checked against the array.
7390 * Any differences that cannot be handled will cause an error.
7391 * Normally, only one change can be managed at a time.
7392 */
update_array_info(struct mddev * mddev,mdu_array_info_t * info)7393 static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
7394 {
7395 int rv = 0;
7396 int cnt = 0;
7397 int state = 0;
7398
7399 /* calculate expected state,ignoring low bits */
7400 if (mddev->bitmap && mddev->bitmap_info.offset)
7401 state |= (1 << MD_SB_BITMAP_PRESENT);
7402
7403 if (mddev->major_version != info->major_version ||
7404 mddev->minor_version != info->minor_version ||
7405 /* mddev->patch_version != info->patch_version || */
7406 mddev->ctime != info->ctime ||
7407 mddev->level != info->level ||
7408 /* mddev->layout != info->layout || */
7409 mddev->persistent != !info->not_persistent ||
7410 mddev->chunk_sectors != info->chunk_size >> 9 ||
7411 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
7412 ((state^info->state) & 0xfffffe00)
7413 )
7414 return -EINVAL;
7415 /* Check there is only one change */
7416 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
7417 cnt++;
7418 if (mddev->raid_disks != info->raid_disks)
7419 cnt++;
7420 if (mddev->layout != info->layout)
7421 cnt++;
7422 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
7423 cnt++;
7424 if (cnt == 0)
7425 return 0;
7426 if (cnt > 1)
7427 return -EINVAL;
7428
7429 if (mddev->layout != info->layout) {
7430 /* Change layout
7431 * we don't need to do anything at the md level, the
7432 * personality will take care of it all.
7433 */
7434 if (mddev->pers->check_reshape == NULL)
7435 return -EINVAL;
7436 else {
7437 mddev->new_layout = info->layout;
7438 rv = mddev->pers->check_reshape(mddev);
7439 if (rv)
7440 mddev->new_layout = mddev->layout;
7441 return rv;
7442 }
7443 }
7444 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
7445 rv = update_size(mddev, (sector_t)info->size * 2);
7446
7447 if (mddev->raid_disks != info->raid_disks)
7448 rv = update_raid_disks(mddev, info->raid_disks);
7449
7450 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
7451 if (mddev->pers->quiesce == NULL || mddev->thread == NULL) {
7452 rv = -EINVAL;
7453 goto err;
7454 }
7455 if (mddev->recovery || mddev->sync_thread) {
7456 rv = -EBUSY;
7457 goto err;
7458 }
7459 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
7460 struct bitmap *bitmap;
7461 /* add the bitmap */
7462 if (mddev->bitmap) {
7463 rv = -EEXIST;
7464 goto err;
7465 }
7466 if (mddev->bitmap_info.default_offset == 0) {
7467 rv = -EINVAL;
7468 goto err;
7469 }
7470 mddev->bitmap_info.offset =
7471 mddev->bitmap_info.default_offset;
7472 mddev->bitmap_info.space =
7473 mddev->bitmap_info.default_space;
7474 bitmap = md_bitmap_create(mddev, -1);
7475 mddev_suspend(mddev);
7476 if (!IS_ERR(bitmap)) {
7477 mddev->bitmap = bitmap;
7478 rv = md_bitmap_load(mddev);
7479 } else
7480 rv = PTR_ERR(bitmap);
7481 if (rv)
7482 md_bitmap_destroy(mddev);
7483 mddev_resume(mddev);
7484 } else {
7485 /* remove the bitmap */
7486 if (!mddev->bitmap) {
7487 rv = -ENOENT;
7488 goto err;
7489 }
7490 if (mddev->bitmap->storage.file) {
7491 rv = -EINVAL;
7492 goto err;
7493 }
7494 if (mddev->bitmap_info.nodes) {
7495 /* hold PW on all the bitmap lock */
7496 if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) {
7497 pr_warn("md: can't change bitmap to none since the array is in use by more than one node\n");
7498 rv = -EPERM;
7499 md_cluster_ops->unlock_all_bitmaps(mddev);
7500 goto err;
7501 }
7502
7503 mddev->bitmap_info.nodes = 0;
7504 md_cluster_ops->leave(mddev);
7505 module_put(md_cluster_mod);
7506 mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY;
7507 }
7508 mddev_suspend(mddev);
7509 md_bitmap_destroy(mddev);
7510 mddev_resume(mddev);
7511 mddev->bitmap_info.offset = 0;
7512 }
7513 }
7514 md_update_sb(mddev, 1);
7515 return rv;
7516 err:
7517 return rv;
7518 }
7519
set_disk_faulty(struct mddev * mddev,dev_t dev)7520 static int set_disk_faulty(struct mddev *mddev, dev_t dev)
7521 {
7522 struct md_rdev *rdev;
7523 int err = 0;
7524
7525 if (mddev->pers == NULL)
7526 return -ENODEV;
7527
7528 rcu_read_lock();
7529 rdev = md_find_rdev_rcu(mddev, dev);
7530 if (!rdev)
7531 err = -ENODEV;
7532 else {
7533 md_error(mddev, rdev);
7534 if (test_bit(MD_BROKEN, &mddev->flags))
7535 err = -EBUSY;
7536 }
7537 rcu_read_unlock();
7538 return err;
7539 }
7540
7541 /*
7542 * We have a problem here : there is no easy way to give a CHS
7543 * virtual geometry. We currently pretend that we have a 2 heads
7544 * 4 sectors (with a BIG number of cylinders...). This drives
7545 * dosfs just mad... ;-)
7546 */
md_getgeo(struct block_device * bdev,struct hd_geometry * geo)7547 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
7548 {
7549 struct mddev *mddev = bdev->bd_disk->private_data;
7550
7551 geo->heads = 2;
7552 geo->sectors = 4;
7553 geo->cylinders = mddev->array_sectors / 8;
7554 return 0;
7555 }
7556
md_ioctl_valid(unsigned int cmd)7557 static inline bool md_ioctl_valid(unsigned int cmd)
7558 {
7559 switch (cmd) {
7560 case ADD_NEW_DISK:
7561 case GET_ARRAY_INFO:
7562 case GET_BITMAP_FILE:
7563 case GET_DISK_INFO:
7564 case HOT_ADD_DISK:
7565 case HOT_REMOVE_DISK:
7566 case RAID_VERSION:
7567 case RESTART_ARRAY_RW:
7568 case RUN_ARRAY:
7569 case SET_ARRAY_INFO:
7570 case SET_BITMAP_FILE:
7571 case SET_DISK_FAULTY:
7572 case STOP_ARRAY:
7573 case STOP_ARRAY_RO:
7574 case CLUSTERED_DISK_NACK:
7575 return true;
7576 default:
7577 return false;
7578 }
7579 }
7580
__md_set_array_info(struct mddev * mddev,void __user * argp)7581 static int __md_set_array_info(struct mddev *mddev, void __user *argp)
7582 {
7583 mdu_array_info_t info;
7584 int err;
7585
7586 if (!argp)
7587 memset(&info, 0, sizeof(info));
7588 else if (copy_from_user(&info, argp, sizeof(info)))
7589 return -EFAULT;
7590
7591 if (mddev->pers) {
7592 err = update_array_info(mddev, &info);
7593 if (err)
7594 pr_warn("md: couldn't update array info. %d\n", err);
7595 return err;
7596 }
7597
7598 if (!list_empty(&mddev->disks)) {
7599 pr_warn("md: array %s already has disks!\n", mdname(mddev));
7600 return -EBUSY;
7601 }
7602
7603 if (mddev->raid_disks) {
7604 pr_warn("md: array %s already initialised!\n", mdname(mddev));
7605 return -EBUSY;
7606 }
7607
7608 err = md_set_array_info(mddev, &info);
7609 if (err)
7610 pr_warn("md: couldn't set array info. %d\n", err);
7611
7612 return err;
7613 }
7614
md_ioctl(struct block_device * bdev,blk_mode_t mode,unsigned int cmd,unsigned long arg)7615 static int md_ioctl(struct block_device *bdev, blk_mode_t mode,
7616 unsigned int cmd, unsigned long arg)
7617 {
7618 int err = 0;
7619 void __user *argp = (void __user *)arg;
7620 struct mddev *mddev = NULL;
7621
7622 if (!md_ioctl_valid(cmd))
7623 return -ENOTTY;
7624
7625 switch (cmd) {
7626 case RAID_VERSION:
7627 case GET_ARRAY_INFO:
7628 case GET_DISK_INFO:
7629 break;
7630 default:
7631 if (!capable(CAP_SYS_ADMIN))
7632 return -EACCES;
7633 }
7634
7635 /*
7636 * Commands dealing with the RAID driver but not any
7637 * particular array:
7638 */
7639 switch (cmd) {
7640 case RAID_VERSION:
7641 err = get_version(argp);
7642 goto out;
7643 default:;
7644 }
7645
7646 /*
7647 * Commands creating/starting a new array:
7648 */
7649
7650 mddev = bdev->bd_disk->private_data;
7651
7652 if (!mddev) {
7653 BUG();
7654 goto out;
7655 }
7656
7657 /* Some actions do not requires the mutex */
7658 switch (cmd) {
7659 case GET_ARRAY_INFO:
7660 if (!mddev->raid_disks && !mddev->external)
7661 err = -ENODEV;
7662 else
7663 err = get_array_info(mddev, argp);
7664 goto out;
7665
7666 case GET_DISK_INFO:
7667 if (!mddev->raid_disks && !mddev->external)
7668 err = -ENODEV;
7669 else
7670 err = get_disk_info(mddev, argp);
7671 goto out;
7672
7673 case SET_DISK_FAULTY:
7674 err = set_disk_faulty(mddev, new_decode_dev(arg));
7675 goto out;
7676
7677 case GET_BITMAP_FILE:
7678 err = get_bitmap_file(mddev, argp);
7679 goto out;
7680
7681 }
7682
7683 if (cmd == HOT_REMOVE_DISK)
7684 /* need to ensure recovery thread has run */
7685 wait_event_interruptible_timeout(mddev->sb_wait,
7686 !test_bit(MD_RECOVERY_NEEDED,
7687 &mddev->recovery),
7688 msecs_to_jiffies(5000));
7689 if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
7690 /* Need to flush page cache, and ensure no-one else opens
7691 * and writes
7692 */
7693 mutex_lock(&mddev->open_mutex);
7694 if (mddev->pers && atomic_read(&mddev->openers) > 1) {
7695 mutex_unlock(&mddev->open_mutex);
7696 err = -EBUSY;
7697 goto out;
7698 }
7699 if (test_and_set_bit(MD_CLOSING, &mddev->flags)) {
7700 mutex_unlock(&mddev->open_mutex);
7701 err = -EBUSY;
7702 goto out;
7703 }
7704 mutex_unlock(&mddev->open_mutex);
7705 sync_blockdev(bdev);
7706 }
7707 err = mddev_lock(mddev);
7708 if (err) {
7709 pr_debug("md: ioctl lock interrupted, reason %d, cmd %d\n",
7710 err, cmd);
7711 goto out;
7712 }
7713
7714 if (cmd == SET_ARRAY_INFO) {
7715 err = __md_set_array_info(mddev, argp);
7716 goto unlock;
7717 }
7718
7719 /*
7720 * Commands querying/configuring an existing array:
7721 */
7722 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
7723 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
7724 if ((!mddev->raid_disks && !mddev->external)
7725 && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
7726 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
7727 && cmd != GET_BITMAP_FILE) {
7728 err = -ENODEV;
7729 goto unlock;
7730 }
7731
7732 /*
7733 * Commands even a read-only array can execute:
7734 */
7735 switch (cmd) {
7736 case RESTART_ARRAY_RW:
7737 err = restart_array(mddev);
7738 goto unlock;
7739
7740 case STOP_ARRAY:
7741 err = do_md_stop(mddev, 0, bdev);
7742 goto unlock;
7743
7744 case STOP_ARRAY_RO:
7745 err = md_set_readonly(mddev, bdev);
7746 goto unlock;
7747
7748 case HOT_REMOVE_DISK:
7749 err = hot_remove_disk(mddev, new_decode_dev(arg));
7750 goto unlock;
7751
7752 case ADD_NEW_DISK:
7753 /* We can support ADD_NEW_DISK on read-only arrays
7754 * only if we are re-adding a preexisting device.
7755 * So require mddev->pers and MD_DISK_SYNC.
7756 */
7757 if (mddev->pers) {
7758 mdu_disk_info_t info;
7759 if (copy_from_user(&info, argp, sizeof(info)))
7760 err = -EFAULT;
7761 else if (!(info.state & (1<<MD_DISK_SYNC)))
7762 /* Need to clear read-only for this */
7763 break;
7764 else
7765 err = md_add_new_disk(mddev, &info);
7766 goto unlock;
7767 }
7768 break;
7769 }
7770
7771 /*
7772 * The remaining ioctls are changing the state of the
7773 * superblock, so we do not allow them on read-only arrays.
7774 */
7775 if (!md_is_rdwr(mddev) && mddev->pers) {
7776 if (mddev->ro != MD_AUTO_READ) {
7777 err = -EROFS;
7778 goto unlock;
7779 }
7780 mddev->ro = MD_RDWR;
7781 sysfs_notify_dirent_safe(mddev->sysfs_state);
7782 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7783 /* mddev_unlock will wake thread */
7784 /* If a device failed while we were read-only, we
7785 * need to make sure the metadata is updated now.
7786 */
7787 if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) {
7788 mddev_unlock(mddev);
7789 wait_event(mddev->sb_wait,
7790 !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) &&
7791 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
7792 mddev_lock_nointr(mddev);
7793 }
7794 }
7795
7796 switch (cmd) {
7797 case ADD_NEW_DISK:
7798 {
7799 mdu_disk_info_t info;
7800 if (copy_from_user(&info, argp, sizeof(info)))
7801 err = -EFAULT;
7802 else
7803 err = md_add_new_disk(mddev, &info);
7804 goto unlock;
7805 }
7806
7807 case CLUSTERED_DISK_NACK:
7808 if (mddev_is_clustered(mddev))
7809 md_cluster_ops->new_disk_ack(mddev, false);
7810 else
7811 err = -EINVAL;
7812 goto unlock;
7813
7814 case HOT_ADD_DISK:
7815 err = hot_add_disk(mddev, new_decode_dev(arg));
7816 goto unlock;
7817
7818 case RUN_ARRAY:
7819 err = do_md_run(mddev);
7820 goto unlock;
7821
7822 case SET_BITMAP_FILE:
7823 err = set_bitmap_file(mddev, (int)arg);
7824 goto unlock;
7825
7826 default:
7827 err = -EINVAL;
7828 goto unlock;
7829 }
7830
7831 unlock:
7832 if (mddev->hold_active == UNTIL_IOCTL &&
7833 err != -EINVAL)
7834 mddev->hold_active = 0;
7835 mddev_unlock(mddev);
7836 out:
7837 if (cmd == STOP_ARRAY_RO || (err && cmd == STOP_ARRAY))
7838 clear_bit(MD_CLOSING, &mddev->flags);
7839 return err;
7840 }
7841 #ifdef CONFIG_COMPAT
md_compat_ioctl(struct block_device * bdev,blk_mode_t mode,unsigned int cmd,unsigned long arg)7842 static int md_compat_ioctl(struct block_device *bdev, blk_mode_t mode,
7843 unsigned int cmd, unsigned long arg)
7844 {
7845 switch (cmd) {
7846 case HOT_REMOVE_DISK:
7847 case HOT_ADD_DISK:
7848 case SET_DISK_FAULTY:
7849 case SET_BITMAP_FILE:
7850 /* These take in integer arg, do not convert */
7851 break;
7852 default:
7853 arg = (unsigned long)compat_ptr(arg);
7854 break;
7855 }
7856
7857 return md_ioctl(bdev, mode, cmd, arg);
7858 }
7859 #endif /* CONFIG_COMPAT */
7860
md_set_read_only(struct block_device * bdev,bool ro)7861 static int md_set_read_only(struct block_device *bdev, bool ro)
7862 {
7863 struct mddev *mddev = bdev->bd_disk->private_data;
7864 int err;
7865
7866 err = mddev_lock(mddev);
7867 if (err)
7868 return err;
7869
7870 if (!mddev->raid_disks && !mddev->external) {
7871 err = -ENODEV;
7872 goto out_unlock;
7873 }
7874
7875 /*
7876 * Transitioning to read-auto need only happen for arrays that call
7877 * md_write_start and which are not ready for writes yet.
7878 */
7879 if (!ro && mddev->ro == MD_RDONLY && mddev->pers) {
7880 err = restart_array(mddev);
7881 if (err)
7882 goto out_unlock;
7883 mddev->ro = MD_AUTO_READ;
7884 }
7885
7886 out_unlock:
7887 mddev_unlock(mddev);
7888 return err;
7889 }
7890
md_open(struct gendisk * disk,blk_mode_t mode)7891 static int md_open(struct gendisk *disk, blk_mode_t mode)
7892 {
7893 struct mddev *mddev;
7894 int err;
7895
7896 spin_lock(&all_mddevs_lock);
7897 mddev = mddev_get(disk->private_data);
7898 spin_unlock(&all_mddevs_lock);
7899 if (!mddev)
7900 return -ENODEV;
7901
7902 err = mutex_lock_interruptible(&mddev->open_mutex);
7903 if (err)
7904 goto out;
7905
7906 err = -ENODEV;
7907 if (test_bit(MD_CLOSING, &mddev->flags))
7908 goto out_unlock;
7909
7910 atomic_inc(&mddev->openers);
7911 mutex_unlock(&mddev->open_mutex);
7912
7913 disk_check_media_change(disk);
7914 return 0;
7915
7916 out_unlock:
7917 mutex_unlock(&mddev->open_mutex);
7918 out:
7919 mddev_put(mddev);
7920 return err;
7921 }
7922
md_release(struct gendisk * disk)7923 static void md_release(struct gendisk *disk)
7924 {
7925 struct mddev *mddev = disk->private_data;
7926
7927 BUG_ON(!mddev);
7928 atomic_dec(&mddev->openers);
7929 mddev_put(mddev);
7930 }
7931
md_check_events(struct gendisk * disk,unsigned int clearing)7932 static unsigned int md_check_events(struct gendisk *disk, unsigned int clearing)
7933 {
7934 struct mddev *mddev = disk->private_data;
7935 unsigned int ret = 0;
7936
7937 if (mddev->changed)
7938 ret = DISK_EVENT_MEDIA_CHANGE;
7939 mddev->changed = 0;
7940 return ret;
7941 }
7942
md_free_disk(struct gendisk * disk)7943 static void md_free_disk(struct gendisk *disk)
7944 {
7945 struct mddev *mddev = disk->private_data;
7946
7947 percpu_ref_exit(&mddev->writes_pending);
7948 mddev_free(mddev);
7949 }
7950
7951 const struct block_device_operations md_fops =
7952 {
7953 .owner = THIS_MODULE,
7954 .submit_bio = md_submit_bio,
7955 .open = md_open,
7956 .release = md_release,
7957 .ioctl = md_ioctl,
7958 #ifdef CONFIG_COMPAT
7959 .compat_ioctl = md_compat_ioctl,
7960 #endif
7961 .getgeo = md_getgeo,
7962 .check_events = md_check_events,
7963 .set_read_only = md_set_read_only,
7964 .free_disk = md_free_disk,
7965 };
7966
md_thread(void * arg)7967 static int md_thread(void *arg)
7968 {
7969 struct md_thread *thread = arg;
7970
7971 /*
7972 * md_thread is a 'system-thread', it's priority should be very
7973 * high. We avoid resource deadlocks individually in each
7974 * raid personality. (RAID5 does preallocation) We also use RR and
7975 * the very same RT priority as kswapd, thus we will never get
7976 * into a priority inversion deadlock.
7977 *
7978 * we definitely have to have equal or higher priority than
7979 * bdflush, otherwise bdflush will deadlock if there are too
7980 * many dirty RAID5 blocks.
7981 */
7982
7983 allow_signal(SIGKILL);
7984 while (!kthread_should_stop()) {
7985
7986 /* We need to wait INTERRUPTIBLE so that
7987 * we don't add to the load-average.
7988 * That means we need to be sure no signals are
7989 * pending
7990 */
7991 if (signal_pending(current))
7992 flush_signals(current);
7993
7994 wait_event_interruptible_timeout
7995 (thread->wqueue,
7996 test_bit(THREAD_WAKEUP, &thread->flags)
7997 || kthread_should_stop() || kthread_should_park(),
7998 thread->timeout);
7999
8000 clear_bit(THREAD_WAKEUP, &thread->flags);
8001 if (kthread_should_park())
8002 kthread_parkme();
8003 if (!kthread_should_stop())
8004 thread->run(thread);
8005 }
8006
8007 return 0;
8008 }
8009
md_wakeup_thread_directly(struct md_thread __rcu * thread)8010 static void md_wakeup_thread_directly(struct md_thread __rcu *thread)
8011 {
8012 struct md_thread *t;
8013
8014 rcu_read_lock();
8015 t = rcu_dereference(thread);
8016 if (t)
8017 wake_up_process(t->tsk);
8018 rcu_read_unlock();
8019 }
8020
md_wakeup_thread(struct md_thread __rcu * thread)8021 void md_wakeup_thread(struct md_thread __rcu *thread)
8022 {
8023 struct md_thread *t;
8024
8025 rcu_read_lock();
8026 t = rcu_dereference(thread);
8027 if (t) {
8028 pr_debug("md: waking up MD thread %s.\n", t->tsk->comm);
8029 set_bit(THREAD_WAKEUP, &t->flags);
8030 wake_up(&t->wqueue);
8031 }
8032 rcu_read_unlock();
8033 }
8034 EXPORT_SYMBOL(md_wakeup_thread);
8035
md_register_thread(void (* run)(struct md_thread *),struct mddev * mddev,const char * name)8036 struct md_thread *md_register_thread(void (*run) (struct md_thread *),
8037 struct mddev *mddev, const char *name)
8038 {
8039 struct md_thread *thread;
8040
8041 thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL);
8042 if (!thread)
8043 return NULL;
8044
8045 init_waitqueue_head(&thread->wqueue);
8046
8047 thread->run = run;
8048 thread->mddev = mddev;
8049 thread->timeout = MAX_SCHEDULE_TIMEOUT;
8050 thread->tsk = kthread_run(md_thread, thread,
8051 "%s_%s",
8052 mdname(thread->mddev),
8053 name);
8054 if (IS_ERR(thread->tsk)) {
8055 kfree(thread);
8056 return NULL;
8057 }
8058 return thread;
8059 }
8060 EXPORT_SYMBOL(md_register_thread);
8061
md_unregister_thread(struct mddev * mddev,struct md_thread __rcu ** threadp)8062 void md_unregister_thread(struct mddev *mddev, struct md_thread __rcu **threadp)
8063 {
8064 struct md_thread *thread = rcu_dereference_protected(*threadp,
8065 lockdep_is_held(&mddev->reconfig_mutex));
8066
8067 if (!thread)
8068 return;
8069
8070 rcu_assign_pointer(*threadp, NULL);
8071 synchronize_rcu();
8072
8073 pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
8074 kthread_stop(thread->tsk);
8075 kfree(thread);
8076 }
8077 EXPORT_SYMBOL(md_unregister_thread);
8078
md_error(struct mddev * mddev,struct md_rdev * rdev)8079 void md_error(struct mddev *mddev, struct md_rdev *rdev)
8080 {
8081 if (!rdev || test_bit(Faulty, &rdev->flags))
8082 return;
8083
8084 if (!mddev->pers || !mddev->pers->error_handler)
8085 return;
8086 mddev->pers->error_handler(mddev, rdev);
8087
8088 if (mddev->pers->level == 0 || mddev->pers->level == LEVEL_LINEAR)
8089 return;
8090
8091 if (mddev->degraded && !test_bit(MD_BROKEN, &mddev->flags))
8092 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8093 sysfs_notify_dirent_safe(rdev->sysfs_state);
8094 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8095 if (!test_bit(MD_BROKEN, &mddev->flags)) {
8096 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8097 md_wakeup_thread(mddev->thread);
8098 }
8099 if (mddev->event_work.func)
8100 queue_work(md_misc_wq, &mddev->event_work);
8101 md_new_event();
8102 }
8103 EXPORT_SYMBOL(md_error);
8104
8105 /* seq_file implementation /proc/mdstat */
8106
status_unused(struct seq_file * seq)8107 static void status_unused(struct seq_file *seq)
8108 {
8109 int i = 0;
8110 struct md_rdev *rdev;
8111
8112 seq_printf(seq, "unused devices: ");
8113
8114 list_for_each_entry(rdev, &pending_raid_disks, same_set) {
8115 i++;
8116 seq_printf(seq, "%pg ", rdev->bdev);
8117 }
8118 if (!i)
8119 seq_printf(seq, "<none>");
8120
8121 seq_printf(seq, "\n");
8122 }
8123
status_resync(struct seq_file * seq,struct mddev * mddev)8124 static int status_resync(struct seq_file *seq, struct mddev *mddev)
8125 {
8126 sector_t max_sectors, resync, res;
8127 unsigned long dt, db = 0;
8128 sector_t rt, curr_mark_cnt, resync_mark_cnt;
8129 int scale, recovery_active;
8130 unsigned int per_milli;
8131
8132 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
8133 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
8134 max_sectors = mddev->resync_max_sectors;
8135 else
8136 max_sectors = mddev->dev_sectors;
8137
8138 resync = mddev->curr_resync;
8139 if (resync < MD_RESYNC_ACTIVE) {
8140 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
8141 /* Still cleaning up */
8142 resync = max_sectors;
8143 } else if (resync > max_sectors) {
8144 resync = max_sectors;
8145 } else {
8146 res = atomic_read(&mddev->recovery_active);
8147 /*
8148 * Resync has started, but the subtraction has overflowed or
8149 * yielded one of the special values. Force it to active to
8150 * ensure the status reports an active resync.
8151 */
8152 if (resync < res || resync - res < MD_RESYNC_ACTIVE)
8153 resync = MD_RESYNC_ACTIVE;
8154 else
8155 resync -= res;
8156 }
8157
8158 if (resync == MD_RESYNC_NONE) {
8159 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery)) {
8160 struct md_rdev *rdev;
8161
8162 rdev_for_each(rdev, mddev)
8163 if (rdev->raid_disk >= 0 &&
8164 !test_bit(Faulty, &rdev->flags) &&
8165 rdev->recovery_offset != MaxSector &&
8166 rdev->recovery_offset) {
8167 seq_printf(seq, "\trecover=REMOTE");
8168 return 1;
8169 }
8170 if (mddev->reshape_position != MaxSector)
8171 seq_printf(seq, "\treshape=REMOTE");
8172 else
8173 seq_printf(seq, "\tresync=REMOTE");
8174 return 1;
8175 }
8176 if (mddev->recovery_cp < MaxSector) {
8177 seq_printf(seq, "\tresync=PENDING");
8178 return 1;
8179 }
8180 return 0;
8181 }
8182 if (resync < MD_RESYNC_ACTIVE) {
8183 seq_printf(seq, "\tresync=DELAYED");
8184 return 1;
8185 }
8186
8187 WARN_ON(max_sectors == 0);
8188 /* Pick 'scale' such that (resync>>scale)*1000 will fit
8189 * in a sector_t, and (max_sectors>>scale) will fit in a
8190 * u32, as those are the requirements for sector_div.
8191 * Thus 'scale' must be at least 10
8192 */
8193 scale = 10;
8194 if (sizeof(sector_t) > sizeof(unsigned long)) {
8195 while ( max_sectors/2 > (1ULL<<(scale+32)))
8196 scale++;
8197 }
8198 res = (resync>>scale)*1000;
8199 sector_div(res, (u32)((max_sectors>>scale)+1));
8200
8201 per_milli = res;
8202 {
8203 int i, x = per_milli/50, y = 20-x;
8204 seq_printf(seq, "[");
8205 for (i = 0; i < x; i++)
8206 seq_printf(seq, "=");
8207 seq_printf(seq, ">");
8208 for (i = 0; i < y; i++)
8209 seq_printf(seq, ".");
8210 seq_printf(seq, "] ");
8211 }
8212 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
8213 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
8214 "reshape" :
8215 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
8216 "check" :
8217 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
8218 "resync" : "recovery"))),
8219 per_milli/10, per_milli % 10,
8220 (unsigned long long) resync/2,
8221 (unsigned long long) max_sectors/2);
8222
8223 /*
8224 * dt: time from mark until now
8225 * db: blocks written from mark until now
8226 * rt: remaining time
8227 *
8228 * rt is a sector_t, which is always 64bit now. We are keeping
8229 * the original algorithm, but it is not really necessary.
8230 *
8231 * Original algorithm:
8232 * So we divide before multiply in case it is 32bit and close
8233 * to the limit.
8234 * We scale the divisor (db) by 32 to avoid losing precision
8235 * near the end of resync when the number of remaining sectors
8236 * is close to 'db'.
8237 * We then divide rt by 32 after multiplying by db to compensate.
8238 * The '+1' avoids division by zero if db is very small.
8239 */
8240 dt = ((jiffies - mddev->resync_mark) / HZ);
8241 if (!dt) dt++;
8242
8243 curr_mark_cnt = mddev->curr_mark_cnt;
8244 recovery_active = atomic_read(&mddev->recovery_active);
8245 resync_mark_cnt = mddev->resync_mark_cnt;
8246
8247 if (curr_mark_cnt >= (recovery_active + resync_mark_cnt))
8248 db = curr_mark_cnt - (recovery_active + resync_mark_cnt);
8249
8250 rt = max_sectors - resync; /* number of remaining sectors */
8251 rt = div64_u64(rt, db/32+1);
8252 rt *= dt;
8253 rt >>= 5;
8254
8255 seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
8256 ((unsigned long)rt % 60)/6);
8257
8258 seq_printf(seq, " speed=%ldK/sec", db/2/dt);
8259 return 1;
8260 }
8261
md_seq_start(struct seq_file * seq,loff_t * pos)8262 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
8263 {
8264 struct list_head *tmp;
8265 loff_t l = *pos;
8266 struct mddev *mddev;
8267
8268 if (l == 0x10000) {
8269 ++*pos;
8270 return (void *)2;
8271 }
8272 if (l > 0x10000)
8273 return NULL;
8274 if (!l--)
8275 /* header */
8276 return (void*)1;
8277
8278 spin_lock(&all_mddevs_lock);
8279 list_for_each(tmp,&all_mddevs)
8280 if (!l--) {
8281 mddev = list_entry(tmp, struct mddev, all_mddevs);
8282 if (!mddev_get(mddev))
8283 continue;
8284 spin_unlock(&all_mddevs_lock);
8285 return mddev;
8286 }
8287 spin_unlock(&all_mddevs_lock);
8288 if (!l--)
8289 return (void*)2;/* tail */
8290 return NULL;
8291 }
8292
md_seq_next(struct seq_file * seq,void * v,loff_t * pos)8293 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
8294 {
8295 struct list_head *tmp;
8296 struct mddev *next_mddev, *mddev = v;
8297 struct mddev *to_put = NULL;
8298
8299 ++*pos;
8300 if (v == (void*)2)
8301 return NULL;
8302
8303 spin_lock(&all_mddevs_lock);
8304 if (v == (void*)1) {
8305 tmp = all_mddevs.next;
8306 } else {
8307 to_put = mddev;
8308 tmp = mddev->all_mddevs.next;
8309 }
8310
8311 for (;;) {
8312 if (tmp == &all_mddevs) {
8313 next_mddev = (void*)2;
8314 *pos = 0x10000;
8315 break;
8316 }
8317 next_mddev = list_entry(tmp, struct mddev, all_mddevs);
8318 if (mddev_get(next_mddev))
8319 break;
8320 mddev = next_mddev;
8321 tmp = mddev->all_mddevs.next;
8322 }
8323 spin_unlock(&all_mddevs_lock);
8324
8325 if (to_put)
8326 mddev_put(to_put);
8327 return next_mddev;
8328
8329 }
8330
md_seq_stop(struct seq_file * seq,void * v)8331 static void md_seq_stop(struct seq_file *seq, void *v)
8332 {
8333 struct mddev *mddev = v;
8334
8335 if (mddev && v != (void*)1 && v != (void*)2)
8336 mddev_put(mddev);
8337 }
8338
md_seq_show(struct seq_file * seq,void * v)8339 static int md_seq_show(struct seq_file *seq, void *v)
8340 {
8341 struct mddev *mddev = v;
8342 sector_t sectors;
8343 struct md_rdev *rdev;
8344
8345 if (v == (void*)1) {
8346 struct md_personality *pers;
8347 seq_printf(seq, "Personalities : ");
8348 spin_lock(&pers_lock);
8349 list_for_each_entry(pers, &pers_list, list)
8350 seq_printf(seq, "[%s] ", pers->name);
8351
8352 spin_unlock(&pers_lock);
8353 seq_printf(seq, "\n");
8354 seq->poll_event = atomic_read(&md_event_count);
8355 return 0;
8356 }
8357 if (v == (void*)2) {
8358 status_unused(seq);
8359 return 0;
8360 }
8361
8362 spin_lock(&mddev->lock);
8363 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
8364 seq_printf(seq, "%s : %sactive", mdname(mddev),
8365 mddev->pers ? "" : "in");
8366 if (mddev->pers) {
8367 if (mddev->ro == MD_RDONLY)
8368 seq_printf(seq, " (read-only)");
8369 if (mddev->ro == MD_AUTO_READ)
8370 seq_printf(seq, " (auto-read-only)");
8371 seq_printf(seq, " %s", mddev->pers->name);
8372 }
8373
8374 sectors = 0;
8375 rcu_read_lock();
8376 rdev_for_each_rcu(rdev, mddev) {
8377 seq_printf(seq, " %pg[%d]", rdev->bdev, rdev->desc_nr);
8378
8379 if (test_bit(WriteMostly, &rdev->flags))
8380 seq_printf(seq, "(W)");
8381 if (test_bit(Journal, &rdev->flags))
8382 seq_printf(seq, "(J)");
8383 if (test_bit(Faulty, &rdev->flags)) {
8384 seq_printf(seq, "(F)");
8385 continue;
8386 }
8387 if (rdev->raid_disk < 0)
8388 seq_printf(seq, "(S)"); /* spare */
8389 if (test_bit(Replacement, &rdev->flags))
8390 seq_printf(seq, "(R)");
8391 sectors += rdev->sectors;
8392 }
8393 rcu_read_unlock();
8394
8395 if (!list_empty(&mddev->disks)) {
8396 if (mddev->pers)
8397 seq_printf(seq, "\n %llu blocks",
8398 (unsigned long long)
8399 mddev->array_sectors / 2);
8400 else
8401 seq_printf(seq, "\n %llu blocks",
8402 (unsigned long long)sectors / 2);
8403 }
8404 if (mddev->persistent) {
8405 if (mddev->major_version != 0 ||
8406 mddev->minor_version != 90) {
8407 seq_printf(seq," super %d.%d",
8408 mddev->major_version,
8409 mddev->minor_version);
8410 }
8411 } else if (mddev->external)
8412 seq_printf(seq, " super external:%s",
8413 mddev->metadata_type);
8414 else
8415 seq_printf(seq, " super non-persistent");
8416
8417 if (mddev->pers) {
8418 mddev->pers->status(seq, mddev);
8419 seq_printf(seq, "\n ");
8420 if (mddev->pers->sync_request) {
8421 if (status_resync(seq, mddev))
8422 seq_printf(seq, "\n ");
8423 }
8424 } else
8425 seq_printf(seq, "\n ");
8426
8427 md_bitmap_status(seq, mddev->bitmap);
8428
8429 seq_printf(seq, "\n");
8430 }
8431 spin_unlock(&mddev->lock);
8432
8433 return 0;
8434 }
8435
8436 static const struct seq_operations md_seq_ops = {
8437 .start = md_seq_start,
8438 .next = md_seq_next,
8439 .stop = md_seq_stop,
8440 .show = md_seq_show,
8441 };
8442
md_seq_open(struct inode * inode,struct file * file)8443 static int md_seq_open(struct inode *inode, struct file *file)
8444 {
8445 struct seq_file *seq;
8446 int error;
8447
8448 error = seq_open(file, &md_seq_ops);
8449 if (error)
8450 return error;
8451
8452 seq = file->private_data;
8453 seq->poll_event = atomic_read(&md_event_count);
8454 return error;
8455 }
8456
8457 static int md_unloading;
mdstat_poll(struct file * filp,poll_table * wait)8458 static __poll_t mdstat_poll(struct file *filp, poll_table *wait)
8459 {
8460 struct seq_file *seq = filp->private_data;
8461 __poll_t mask;
8462
8463 if (md_unloading)
8464 return EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI;
8465 poll_wait(filp, &md_event_waiters, wait);
8466
8467 /* always allow read */
8468 mask = EPOLLIN | EPOLLRDNORM;
8469
8470 if (seq->poll_event != atomic_read(&md_event_count))
8471 mask |= EPOLLERR | EPOLLPRI;
8472 return mask;
8473 }
8474
8475 static const struct proc_ops mdstat_proc_ops = {
8476 .proc_open = md_seq_open,
8477 .proc_read = seq_read,
8478 .proc_lseek = seq_lseek,
8479 .proc_release = seq_release,
8480 .proc_poll = mdstat_poll,
8481 };
8482
register_md_personality(struct md_personality * p)8483 int register_md_personality(struct md_personality *p)
8484 {
8485 pr_debug("md: %s personality registered for level %d\n",
8486 p->name, p->level);
8487 spin_lock(&pers_lock);
8488 list_add_tail(&p->list, &pers_list);
8489 spin_unlock(&pers_lock);
8490 return 0;
8491 }
8492 EXPORT_SYMBOL(register_md_personality);
8493
unregister_md_personality(struct md_personality * p)8494 int unregister_md_personality(struct md_personality *p)
8495 {
8496 pr_debug("md: %s personality unregistered\n", p->name);
8497 spin_lock(&pers_lock);
8498 list_del_init(&p->list);
8499 spin_unlock(&pers_lock);
8500 return 0;
8501 }
8502 EXPORT_SYMBOL(unregister_md_personality);
8503
register_md_cluster_operations(struct md_cluster_operations * ops,struct module * module)8504 int register_md_cluster_operations(struct md_cluster_operations *ops,
8505 struct module *module)
8506 {
8507 int ret = 0;
8508 spin_lock(&pers_lock);
8509 if (md_cluster_ops != NULL)
8510 ret = -EALREADY;
8511 else {
8512 md_cluster_ops = ops;
8513 md_cluster_mod = module;
8514 }
8515 spin_unlock(&pers_lock);
8516 return ret;
8517 }
8518 EXPORT_SYMBOL(register_md_cluster_operations);
8519
unregister_md_cluster_operations(void)8520 int unregister_md_cluster_operations(void)
8521 {
8522 spin_lock(&pers_lock);
8523 md_cluster_ops = NULL;
8524 spin_unlock(&pers_lock);
8525 return 0;
8526 }
8527 EXPORT_SYMBOL(unregister_md_cluster_operations);
8528
md_setup_cluster(struct mddev * mddev,int nodes)8529 int md_setup_cluster(struct mddev *mddev, int nodes)
8530 {
8531 int ret;
8532 if (!md_cluster_ops)
8533 request_module("md-cluster");
8534 spin_lock(&pers_lock);
8535 /* ensure module won't be unloaded */
8536 if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
8537 pr_warn("can't find md-cluster module or get its reference.\n");
8538 spin_unlock(&pers_lock);
8539 return -ENOENT;
8540 }
8541 spin_unlock(&pers_lock);
8542
8543 ret = md_cluster_ops->join(mddev, nodes);
8544 if (!ret)
8545 mddev->safemode_delay = 0;
8546 return ret;
8547 }
8548
md_cluster_stop(struct mddev * mddev)8549 void md_cluster_stop(struct mddev *mddev)
8550 {
8551 if (!md_cluster_ops)
8552 return;
8553 md_cluster_ops->leave(mddev);
8554 module_put(md_cluster_mod);
8555 }
8556
is_mddev_idle(struct mddev * mddev,int init)8557 static int is_mddev_idle(struct mddev *mddev, int init)
8558 {
8559 struct md_rdev *rdev;
8560 int idle;
8561 int curr_events;
8562
8563 idle = 1;
8564 rcu_read_lock();
8565 rdev_for_each_rcu(rdev, mddev) {
8566 struct gendisk *disk = rdev->bdev->bd_disk;
8567 curr_events = (int)part_stat_read_accum(disk->part0, sectors) -
8568 atomic_read(&disk->sync_io);
8569 /* sync IO will cause sync_io to increase before the disk_stats
8570 * as sync_io is counted when a request starts, and
8571 * disk_stats is counted when it completes.
8572 * So resync activity will cause curr_events to be smaller than
8573 * when there was no such activity.
8574 * non-sync IO will cause disk_stat to increase without
8575 * increasing sync_io so curr_events will (eventually)
8576 * be larger than it was before. Once it becomes
8577 * substantially larger, the test below will cause
8578 * the array to appear non-idle, and resync will slow
8579 * down.
8580 * If there is a lot of outstanding resync activity when
8581 * we set last_event to curr_events, then all that activity
8582 * completing might cause the array to appear non-idle
8583 * and resync will be slowed down even though there might
8584 * not have been non-resync activity. This will only
8585 * happen once though. 'last_events' will soon reflect
8586 * the state where there is little or no outstanding
8587 * resync requests, and further resync activity will
8588 * always make curr_events less than last_events.
8589 *
8590 */
8591 if (init || curr_events - rdev->last_events > 64) {
8592 rdev->last_events = curr_events;
8593 idle = 0;
8594 }
8595 }
8596 rcu_read_unlock();
8597 return idle;
8598 }
8599
md_done_sync(struct mddev * mddev,int blocks,int ok)8600 void md_done_sync(struct mddev *mddev, int blocks, int ok)
8601 {
8602 /* another "blocks" (512byte) blocks have been synced */
8603 atomic_sub(blocks, &mddev->recovery_active);
8604 wake_up(&mddev->recovery_wait);
8605 if (!ok) {
8606 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8607 set_bit(MD_RECOVERY_ERROR, &mddev->recovery);
8608 md_wakeup_thread(mddev->thread);
8609 // stop recovery, signal do_sync ....
8610 }
8611 }
8612 EXPORT_SYMBOL(md_done_sync);
8613
8614 /* md_write_start(mddev, bi)
8615 * If we need to update some array metadata (e.g. 'active' flag
8616 * in superblock) before writing, schedule a superblock update
8617 * and wait for it to complete.
8618 * A return value of 'false' means that the write wasn't recorded
8619 * and cannot proceed as the array is being suspend.
8620 */
md_write_start(struct mddev * mddev,struct bio * bi)8621 bool md_write_start(struct mddev *mddev, struct bio *bi)
8622 {
8623 int did_change = 0;
8624
8625 if (bio_data_dir(bi) != WRITE)
8626 return true;
8627
8628 BUG_ON(mddev->ro == MD_RDONLY);
8629 if (mddev->ro == MD_AUTO_READ) {
8630 /* need to switch to read/write */
8631 mddev->ro = MD_RDWR;
8632 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8633 md_wakeup_thread(mddev->thread);
8634 md_wakeup_thread(mddev->sync_thread);
8635 did_change = 1;
8636 }
8637 rcu_read_lock();
8638 percpu_ref_get(&mddev->writes_pending);
8639 smp_mb(); /* Match smp_mb in set_in_sync() */
8640 if (mddev->safemode == 1)
8641 mddev->safemode = 0;
8642 /* sync_checkers is always 0 when writes_pending is in per-cpu mode */
8643 if (mddev->in_sync || mddev->sync_checkers) {
8644 spin_lock(&mddev->lock);
8645 if (mddev->in_sync) {
8646 mddev->in_sync = 0;
8647 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
8648 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
8649 md_wakeup_thread(mddev->thread);
8650 did_change = 1;
8651 }
8652 spin_unlock(&mddev->lock);
8653 }
8654 rcu_read_unlock();
8655 if (did_change)
8656 sysfs_notify_dirent_safe(mddev->sysfs_state);
8657 if (!mddev->has_superblocks)
8658 return true;
8659 wait_event(mddev->sb_wait,
8660 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) ||
8661 is_md_suspended(mddev));
8662 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
8663 percpu_ref_put(&mddev->writes_pending);
8664 return false;
8665 }
8666 return true;
8667 }
8668 EXPORT_SYMBOL(md_write_start);
8669
8670 /* md_write_inc can only be called when md_write_start() has
8671 * already been called at least once of the current request.
8672 * It increments the counter and is useful when a single request
8673 * is split into several parts. Each part causes an increment and
8674 * so needs a matching md_write_end().
8675 * Unlike md_write_start(), it is safe to call md_write_inc() inside
8676 * a spinlocked region.
8677 */
md_write_inc(struct mddev * mddev,struct bio * bi)8678 void md_write_inc(struct mddev *mddev, struct bio *bi)
8679 {
8680 if (bio_data_dir(bi) != WRITE)
8681 return;
8682 WARN_ON_ONCE(mddev->in_sync || !md_is_rdwr(mddev));
8683 percpu_ref_get(&mddev->writes_pending);
8684 }
8685 EXPORT_SYMBOL(md_write_inc);
8686
md_write_end(struct mddev * mddev)8687 void md_write_end(struct mddev *mddev)
8688 {
8689 percpu_ref_put(&mddev->writes_pending);
8690
8691 if (mddev->safemode == 2)
8692 md_wakeup_thread(mddev->thread);
8693 else if (mddev->safemode_delay)
8694 /* The roundup() ensures this only performs locking once
8695 * every ->safemode_delay jiffies
8696 */
8697 mod_timer(&mddev->safemode_timer,
8698 roundup(jiffies, mddev->safemode_delay) +
8699 mddev->safemode_delay);
8700 }
8701
8702 EXPORT_SYMBOL(md_write_end);
8703
8704 /* This is used by raid0 and raid10 */
md_submit_discard_bio(struct mddev * mddev,struct md_rdev * rdev,struct bio * bio,sector_t start,sector_t size)8705 void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
8706 struct bio *bio, sector_t start, sector_t size)
8707 {
8708 struct bio *discard_bio = NULL;
8709
8710 if (__blkdev_issue_discard(rdev->bdev, start, size, GFP_NOIO,
8711 &discard_bio) || !discard_bio)
8712 return;
8713
8714 bio_chain(discard_bio, bio);
8715 bio_clone_blkg_association(discard_bio, bio);
8716 if (mddev->gendisk)
8717 trace_block_bio_remap(discard_bio,
8718 disk_devt(mddev->gendisk),
8719 bio->bi_iter.bi_sector);
8720 submit_bio_noacct(discard_bio);
8721 }
8722 EXPORT_SYMBOL_GPL(md_submit_discard_bio);
8723
md_end_clone_io(struct bio * bio)8724 static void md_end_clone_io(struct bio *bio)
8725 {
8726 struct md_io_clone *md_io_clone = bio->bi_private;
8727 struct bio *orig_bio = md_io_clone->orig_bio;
8728 struct mddev *mddev = md_io_clone->mddev;
8729
8730 if (bio->bi_status && !orig_bio->bi_status)
8731 orig_bio->bi_status = bio->bi_status;
8732
8733 if (md_io_clone->start_time)
8734 bio_end_io_acct(orig_bio, md_io_clone->start_time);
8735
8736 bio_put(bio);
8737 bio_endio(orig_bio);
8738 percpu_ref_put(&mddev->active_io);
8739 }
8740
md_clone_bio(struct mddev * mddev,struct bio ** bio)8741 static void md_clone_bio(struct mddev *mddev, struct bio **bio)
8742 {
8743 struct block_device *bdev = (*bio)->bi_bdev;
8744 struct md_io_clone *md_io_clone;
8745 struct bio *clone =
8746 bio_alloc_clone(bdev, *bio, GFP_NOIO, &mddev->io_clone_set);
8747
8748 md_io_clone = container_of(clone, struct md_io_clone, bio_clone);
8749 md_io_clone->orig_bio = *bio;
8750 md_io_clone->mddev = mddev;
8751 if (blk_queue_io_stat(bdev->bd_disk->queue))
8752 md_io_clone->start_time = bio_start_io_acct(*bio);
8753
8754 clone->bi_end_io = md_end_clone_io;
8755 clone->bi_private = md_io_clone;
8756 *bio = clone;
8757 }
8758
md_account_bio(struct mddev * mddev,struct bio ** bio)8759 void md_account_bio(struct mddev *mddev, struct bio **bio)
8760 {
8761 percpu_ref_get(&mddev->active_io);
8762 md_clone_bio(mddev, bio);
8763 }
8764 EXPORT_SYMBOL_GPL(md_account_bio);
8765
8766 /* md_allow_write(mddev)
8767 * Calling this ensures that the array is marked 'active' so that writes
8768 * may proceed without blocking. It is important to call this before
8769 * attempting a GFP_KERNEL allocation while holding the mddev lock.
8770 * Must be called with mddev_lock held.
8771 */
md_allow_write(struct mddev * mddev)8772 void md_allow_write(struct mddev *mddev)
8773 {
8774 if (!mddev->pers)
8775 return;
8776 if (!md_is_rdwr(mddev))
8777 return;
8778 if (!mddev->pers->sync_request)
8779 return;
8780
8781 spin_lock(&mddev->lock);
8782 if (mddev->in_sync) {
8783 mddev->in_sync = 0;
8784 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
8785 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
8786 if (mddev->safemode_delay &&
8787 mddev->safemode == 0)
8788 mddev->safemode = 1;
8789 spin_unlock(&mddev->lock);
8790 md_update_sb(mddev, 0);
8791 sysfs_notify_dirent_safe(mddev->sysfs_state);
8792 /* wait for the dirty state to be recorded in the metadata */
8793 wait_event(mddev->sb_wait,
8794 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
8795 } else
8796 spin_unlock(&mddev->lock);
8797 }
8798 EXPORT_SYMBOL_GPL(md_allow_write);
8799
8800 #define SYNC_MARKS 10
8801 #define SYNC_MARK_STEP (3*HZ)
8802 #define UPDATE_FREQUENCY (5*60*HZ)
md_do_sync(struct md_thread * thread)8803 void md_do_sync(struct md_thread *thread)
8804 {
8805 struct mddev *mddev = thread->mddev;
8806 struct mddev *mddev2;
8807 unsigned int currspeed = 0, window;
8808 sector_t max_sectors,j, io_sectors, recovery_done;
8809 unsigned long mark[SYNC_MARKS];
8810 unsigned long update_time;
8811 sector_t mark_cnt[SYNC_MARKS];
8812 int last_mark,m;
8813 sector_t last_check;
8814 int skipped = 0;
8815 struct md_rdev *rdev;
8816 char *desc, *action = NULL;
8817 struct blk_plug plug;
8818 int ret;
8819
8820 /* just incase thread restarts... */
8821 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
8822 test_bit(MD_RECOVERY_WAIT, &mddev->recovery))
8823 return;
8824 if (!md_is_rdwr(mddev)) {/* never try to sync a read-only array */
8825 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8826 return;
8827 }
8828
8829 if (mddev_is_clustered(mddev)) {
8830 ret = md_cluster_ops->resync_start(mddev);
8831 if (ret)
8832 goto skip;
8833
8834 set_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags);
8835 if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
8836 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ||
8837 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
8838 && ((unsigned long long)mddev->curr_resync_completed
8839 < (unsigned long long)mddev->resync_max_sectors))
8840 goto skip;
8841 }
8842
8843 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
8844 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
8845 desc = "data-check";
8846 action = "check";
8847 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
8848 desc = "requested-resync";
8849 action = "repair";
8850 } else
8851 desc = "resync";
8852 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
8853 desc = "reshape";
8854 else
8855 desc = "recovery";
8856
8857 mddev->last_sync_action = action ?: desc;
8858
8859 /*
8860 * Before starting a resync we must have set curr_resync to
8861 * 2, and then checked that every "conflicting" array has curr_resync
8862 * less than ours. When we find one that is the same or higher
8863 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync
8864 * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
8865 * This will mean we have to start checking from the beginning again.
8866 *
8867 */
8868
8869 do {
8870 int mddev2_minor = -1;
8871 mddev->curr_resync = MD_RESYNC_DELAYED;
8872
8873 try_again:
8874 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8875 goto skip;
8876 spin_lock(&all_mddevs_lock);
8877 list_for_each_entry(mddev2, &all_mddevs, all_mddevs) {
8878 if (test_bit(MD_DELETED, &mddev2->flags))
8879 continue;
8880 if (mddev2 == mddev)
8881 continue;
8882 if (!mddev->parallel_resync
8883 && mddev2->curr_resync
8884 && match_mddev_units(mddev, mddev2)) {
8885 DEFINE_WAIT(wq);
8886 if (mddev < mddev2 &&
8887 mddev->curr_resync == MD_RESYNC_DELAYED) {
8888 /* arbitrarily yield */
8889 mddev->curr_resync = MD_RESYNC_YIELDED;
8890 wake_up(&resync_wait);
8891 }
8892 if (mddev > mddev2 &&
8893 mddev->curr_resync == MD_RESYNC_YIELDED)
8894 /* no need to wait here, we can wait the next
8895 * time 'round when curr_resync == 2
8896 */
8897 continue;
8898 /* We need to wait 'interruptible' so as not to
8899 * contribute to the load average, and not to
8900 * be caught by 'softlockup'
8901 */
8902 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
8903 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
8904 mddev2->curr_resync >= mddev->curr_resync) {
8905 if (mddev2_minor != mddev2->md_minor) {
8906 mddev2_minor = mddev2->md_minor;
8907 pr_info("md: delaying %s of %s until %s has finished (they share one or more physical units)\n",
8908 desc, mdname(mddev),
8909 mdname(mddev2));
8910 }
8911 spin_unlock(&all_mddevs_lock);
8912
8913 if (signal_pending(current))
8914 flush_signals(current);
8915 schedule();
8916 finish_wait(&resync_wait, &wq);
8917 goto try_again;
8918 }
8919 finish_wait(&resync_wait, &wq);
8920 }
8921 }
8922 spin_unlock(&all_mddevs_lock);
8923 } while (mddev->curr_resync < MD_RESYNC_DELAYED);
8924
8925 j = 0;
8926 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
8927 /* resync follows the size requested by the personality,
8928 * which defaults to physical size, but can be virtual size
8929 */
8930 max_sectors = mddev->resync_max_sectors;
8931 atomic64_set(&mddev->resync_mismatches, 0);
8932 /* we don't use the checkpoint if there's a bitmap */
8933 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
8934 j = mddev->resync_min;
8935 else if (!mddev->bitmap)
8936 j = mddev->recovery_cp;
8937
8938 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
8939 max_sectors = mddev->resync_max_sectors;
8940 /*
8941 * If the original node aborts reshaping then we continue the
8942 * reshaping, so set j again to avoid restart reshape from the
8943 * first beginning
8944 */
8945 if (mddev_is_clustered(mddev) &&
8946 mddev->reshape_position != MaxSector)
8947 j = mddev->reshape_position;
8948 } else {
8949 /* recovery follows the physical size of devices */
8950 max_sectors = mddev->dev_sectors;
8951 j = MaxSector;
8952 rcu_read_lock();
8953 rdev_for_each_rcu(rdev, mddev)
8954 if (rdev->raid_disk >= 0 &&
8955 !test_bit(Journal, &rdev->flags) &&
8956 !test_bit(Faulty, &rdev->flags) &&
8957 !test_bit(In_sync, &rdev->flags) &&
8958 rdev->recovery_offset < j)
8959 j = rdev->recovery_offset;
8960 rcu_read_unlock();
8961
8962 /* If there is a bitmap, we need to make sure all
8963 * writes that started before we added a spare
8964 * complete before we start doing a recovery.
8965 * Otherwise the write might complete and (via
8966 * bitmap_endwrite) set a bit in the bitmap after the
8967 * recovery has checked that bit and skipped that
8968 * region.
8969 */
8970 if (mddev->bitmap) {
8971 mddev->pers->quiesce(mddev, 1);
8972 mddev->pers->quiesce(mddev, 0);
8973 }
8974 }
8975
8976 pr_info("md: %s of RAID array %s\n", desc, mdname(mddev));
8977 pr_debug("md: minimum _guaranteed_ speed: %d KB/sec/disk.\n", speed_min(mddev));
8978 pr_debug("md: using maximum available idle IO bandwidth (but not more than %d KB/sec) for %s.\n",
8979 speed_max(mddev), desc);
8980
8981 is_mddev_idle(mddev, 1); /* this initializes IO event counters */
8982
8983 io_sectors = 0;
8984 for (m = 0; m < SYNC_MARKS; m++) {
8985 mark[m] = jiffies;
8986 mark_cnt[m] = io_sectors;
8987 }
8988 last_mark = 0;
8989 mddev->resync_mark = mark[last_mark];
8990 mddev->resync_mark_cnt = mark_cnt[last_mark];
8991
8992 /*
8993 * Tune reconstruction:
8994 */
8995 window = 32 * (PAGE_SIZE / 512);
8996 pr_debug("md: using %dk window, over a total of %lluk.\n",
8997 window/2, (unsigned long long)max_sectors/2);
8998
8999 atomic_set(&mddev->recovery_active, 0);
9000 last_check = 0;
9001
9002 if (j >= MD_RESYNC_ACTIVE) {
9003 pr_debug("md: resuming %s of %s from checkpoint.\n",
9004 desc, mdname(mddev));
9005 mddev->curr_resync = j;
9006 } else
9007 mddev->curr_resync = MD_RESYNC_ACTIVE; /* no longer delayed */
9008 mddev->curr_resync_completed = j;
9009 sysfs_notify_dirent_safe(mddev->sysfs_completed);
9010 md_new_event();
9011 update_time = jiffies;
9012
9013 blk_start_plug(&plug);
9014 while (j < max_sectors) {
9015 sector_t sectors;
9016
9017 skipped = 0;
9018
9019 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
9020 ((mddev->curr_resync > mddev->curr_resync_completed &&
9021 (mddev->curr_resync - mddev->curr_resync_completed)
9022 > (max_sectors >> 4)) ||
9023 time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) ||
9024 (j - mddev->curr_resync_completed)*2
9025 >= mddev->resync_max - mddev->curr_resync_completed ||
9026 mddev->curr_resync_completed > mddev->resync_max
9027 )) {
9028 /* time to update curr_resync_completed */
9029 wait_event(mddev->recovery_wait,
9030 atomic_read(&mddev->recovery_active) == 0);
9031 mddev->curr_resync_completed = j;
9032 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
9033 j > mddev->recovery_cp)
9034 mddev->recovery_cp = j;
9035 update_time = jiffies;
9036 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
9037 sysfs_notify_dirent_safe(mddev->sysfs_completed);
9038 }
9039
9040 while (j >= mddev->resync_max &&
9041 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
9042 /* As this condition is controlled by user-space,
9043 * we can block indefinitely, so use '_interruptible'
9044 * to avoid triggering warnings.
9045 */
9046 flush_signals(current); /* just in case */
9047 wait_event_interruptible(mddev->recovery_wait,
9048 mddev->resync_max > j
9049 || test_bit(MD_RECOVERY_INTR,
9050 &mddev->recovery));
9051 }
9052
9053 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
9054 break;
9055
9056 sectors = mddev->pers->sync_request(mddev, j, &skipped);
9057 if (sectors == 0) {
9058 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
9059 break;
9060 }
9061
9062 if (!skipped) { /* actual IO requested */
9063 io_sectors += sectors;
9064 atomic_add(sectors, &mddev->recovery_active);
9065 }
9066
9067 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
9068 break;
9069
9070 j += sectors;
9071 if (j > max_sectors)
9072 /* when skipping, extra large numbers can be returned. */
9073 j = max_sectors;
9074 if (j >= MD_RESYNC_ACTIVE)
9075 mddev->curr_resync = j;
9076 mddev->curr_mark_cnt = io_sectors;
9077 if (last_check == 0)
9078 /* this is the earliest that rebuild will be
9079 * visible in /proc/mdstat
9080 */
9081 md_new_event();
9082
9083 if (last_check + window > io_sectors || j == max_sectors)
9084 continue;
9085
9086 last_check = io_sectors;
9087 repeat:
9088 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
9089 /* step marks */
9090 int next = (last_mark+1) % SYNC_MARKS;
9091
9092 mddev->resync_mark = mark[next];
9093 mddev->resync_mark_cnt = mark_cnt[next];
9094 mark[next] = jiffies;
9095 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
9096 last_mark = next;
9097 }
9098
9099 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
9100 break;
9101
9102 /*
9103 * this loop exits only if either when we are slower than
9104 * the 'hard' speed limit, or the system was IO-idle for
9105 * a jiffy.
9106 * the system might be non-idle CPU-wise, but we only care
9107 * about not overloading the IO subsystem. (things like an
9108 * e2fsck being done on the RAID array should execute fast)
9109 */
9110 cond_resched();
9111
9112 recovery_done = io_sectors - atomic_read(&mddev->recovery_active);
9113 currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2
9114 /((jiffies-mddev->resync_mark)/HZ +1) +1;
9115
9116 if (currspeed > speed_min(mddev)) {
9117 if (currspeed > speed_max(mddev)) {
9118 msleep(500);
9119 goto repeat;
9120 }
9121 if (!is_mddev_idle(mddev, 0)) {
9122 /*
9123 * Give other IO more of a chance.
9124 * The faster the devices, the less we wait.
9125 */
9126 wait_event(mddev->recovery_wait,
9127 !atomic_read(&mddev->recovery_active));
9128 }
9129 }
9130 }
9131 pr_info("md: %s: %s %s.\n",mdname(mddev), desc,
9132 test_bit(MD_RECOVERY_INTR, &mddev->recovery)
9133 ? "interrupted" : "done");
9134 /*
9135 * this also signals 'finished resyncing' to md_stop
9136 */
9137 blk_finish_plug(&plug);
9138 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
9139
9140 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
9141 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
9142 mddev->curr_resync >= MD_RESYNC_ACTIVE) {
9143 mddev->curr_resync_completed = mddev->curr_resync;
9144 sysfs_notify_dirent_safe(mddev->sysfs_completed);
9145 }
9146 mddev->pers->sync_request(mddev, max_sectors, &skipped);
9147
9148 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
9149 mddev->curr_resync > MD_RESYNC_ACTIVE) {
9150 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
9151 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
9152 if (mddev->curr_resync >= mddev->recovery_cp) {
9153 pr_debug("md: checkpointing %s of %s.\n",
9154 desc, mdname(mddev));
9155 if (test_bit(MD_RECOVERY_ERROR,
9156 &mddev->recovery))
9157 mddev->recovery_cp =
9158 mddev->curr_resync_completed;
9159 else
9160 mddev->recovery_cp =
9161 mddev->curr_resync;
9162 }
9163 } else
9164 mddev->recovery_cp = MaxSector;
9165 } else {
9166 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
9167 mddev->curr_resync = MaxSector;
9168 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
9169 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) {
9170 rcu_read_lock();
9171 rdev_for_each_rcu(rdev, mddev)
9172 if (rdev->raid_disk >= 0 &&
9173 mddev->delta_disks >= 0 &&
9174 !test_bit(Journal, &rdev->flags) &&
9175 !test_bit(Faulty, &rdev->flags) &&
9176 !test_bit(In_sync, &rdev->flags) &&
9177 rdev->recovery_offset < mddev->curr_resync)
9178 rdev->recovery_offset = mddev->curr_resync;
9179 rcu_read_unlock();
9180 }
9181 }
9182 }
9183 skip:
9184 /* set CHANGE_PENDING here since maybe another update is needed,
9185 * so other nodes are informed. It should be harmless for normal
9186 * raid */
9187 set_mask_bits(&mddev->sb_flags, 0,
9188 BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS));
9189
9190 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
9191 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
9192 mddev->delta_disks > 0 &&
9193 mddev->pers->finish_reshape &&
9194 mddev->pers->size &&
9195 mddev->queue) {
9196 mddev_lock_nointr(mddev);
9197 md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
9198 mddev_unlock(mddev);
9199 if (!mddev_is_clustered(mddev))
9200 set_capacity_and_notify(mddev->gendisk,
9201 mddev->array_sectors);
9202 }
9203
9204 spin_lock(&mddev->lock);
9205 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
9206 /* We completed so min/max setting can be forgotten if used. */
9207 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
9208 mddev->resync_min = 0;
9209 mddev->resync_max = MaxSector;
9210 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
9211 mddev->resync_min = mddev->curr_resync_completed;
9212 set_bit(MD_RECOVERY_DONE, &mddev->recovery);
9213 mddev->curr_resync = MD_RESYNC_NONE;
9214 spin_unlock(&mddev->lock);
9215
9216 wake_up(&resync_wait);
9217 wake_up(&mddev->sb_wait);
9218 md_wakeup_thread(mddev->thread);
9219 return;
9220 }
9221 EXPORT_SYMBOL_GPL(md_do_sync);
9222
remove_and_add_spares(struct mddev * mddev,struct md_rdev * this)9223 static int remove_and_add_spares(struct mddev *mddev,
9224 struct md_rdev *this)
9225 {
9226 struct md_rdev *rdev;
9227 int spares = 0;
9228 int removed = 0;
9229 bool remove_some = false;
9230
9231 if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
9232 /* Mustn't remove devices when resync thread is running */
9233 return 0;
9234
9235 rdev_for_each(rdev, mddev) {
9236 if ((this == NULL || rdev == this) &&
9237 rdev->raid_disk >= 0 &&
9238 !test_bit(Blocked, &rdev->flags) &&
9239 test_bit(Faulty, &rdev->flags) &&
9240 atomic_read(&rdev->nr_pending)==0) {
9241 /* Faulty non-Blocked devices with nr_pending == 0
9242 * never get nr_pending incremented,
9243 * never get Faulty cleared, and never get Blocked set.
9244 * So we can synchronize_rcu now rather than once per device
9245 */
9246 remove_some = true;
9247 set_bit(RemoveSynchronized, &rdev->flags);
9248 }
9249 }
9250
9251 if (remove_some)
9252 synchronize_rcu();
9253 rdev_for_each(rdev, mddev) {
9254 if ((this == NULL || rdev == this) &&
9255 rdev->raid_disk >= 0 &&
9256 !test_bit(Blocked, &rdev->flags) &&
9257 ((test_bit(RemoveSynchronized, &rdev->flags) ||
9258 (!test_bit(In_sync, &rdev->flags) &&
9259 !test_bit(Journal, &rdev->flags))) &&
9260 atomic_read(&rdev->nr_pending)==0)) {
9261 if (mddev->pers->hot_remove_disk(
9262 mddev, rdev) == 0) {
9263 sysfs_unlink_rdev(mddev, rdev);
9264 rdev->saved_raid_disk = rdev->raid_disk;
9265 rdev->raid_disk = -1;
9266 removed++;
9267 }
9268 }
9269 if (remove_some && test_bit(RemoveSynchronized, &rdev->flags))
9270 clear_bit(RemoveSynchronized, &rdev->flags);
9271 }
9272
9273 if (removed && mddev->kobj.sd)
9274 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
9275
9276 if (this && removed)
9277 goto no_add;
9278
9279 rdev_for_each(rdev, mddev) {
9280 if (this && this != rdev)
9281 continue;
9282 if (test_bit(Candidate, &rdev->flags))
9283 continue;
9284 if (rdev->raid_disk >= 0 &&
9285 !test_bit(In_sync, &rdev->flags) &&
9286 !test_bit(Journal, &rdev->flags) &&
9287 !test_bit(Faulty, &rdev->flags))
9288 spares++;
9289 if (rdev->raid_disk >= 0)
9290 continue;
9291 if (test_bit(Faulty, &rdev->flags))
9292 continue;
9293 if (!test_bit(Journal, &rdev->flags)) {
9294 if (!md_is_rdwr(mddev) &&
9295 !(rdev->saved_raid_disk >= 0 &&
9296 !test_bit(Bitmap_sync, &rdev->flags)))
9297 continue;
9298
9299 rdev->recovery_offset = 0;
9300 }
9301 if (mddev->pers->hot_add_disk(mddev, rdev) == 0) {
9302 /* failure here is OK */
9303 sysfs_link_rdev(mddev, rdev);
9304 if (!test_bit(Journal, &rdev->flags))
9305 spares++;
9306 md_new_event();
9307 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
9308 }
9309 }
9310 no_add:
9311 if (removed)
9312 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
9313 return spares;
9314 }
9315
md_start_sync(struct work_struct * ws)9316 static void md_start_sync(struct work_struct *ws)
9317 {
9318 struct mddev *mddev = container_of(ws, struct mddev, del_work);
9319
9320 rcu_assign_pointer(mddev->sync_thread,
9321 md_register_thread(md_do_sync, mddev, "resync"));
9322 if (!mddev->sync_thread) {
9323 pr_warn("%s: could not start resync thread...\n",
9324 mdname(mddev));
9325 /* leave the spares where they are, it shouldn't hurt */
9326 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9327 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
9328 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
9329 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
9330 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
9331 wake_up(&resync_wait);
9332 if (test_and_clear_bit(MD_RECOVERY_RECOVER,
9333 &mddev->recovery))
9334 if (mddev->sysfs_action)
9335 sysfs_notify_dirent_safe(mddev->sysfs_action);
9336 } else
9337 md_wakeup_thread(mddev->sync_thread);
9338 sysfs_notify_dirent_safe(mddev->sysfs_action);
9339 md_new_event();
9340 }
9341
9342 /*
9343 * This routine is regularly called by all per-raid-array threads to
9344 * deal with generic issues like resync and super-block update.
9345 * Raid personalities that don't have a thread (linear/raid0) do not
9346 * need this as they never do any recovery or update the superblock.
9347 *
9348 * It does not do any resync itself, but rather "forks" off other threads
9349 * to do that as needed.
9350 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
9351 * "->recovery" and create a thread at ->sync_thread.
9352 * When the thread finishes it sets MD_RECOVERY_DONE
9353 * and wakeups up this thread which will reap the thread and finish up.
9354 * This thread also removes any faulty devices (with nr_pending == 0).
9355 *
9356 * The overall approach is:
9357 * 1/ if the superblock needs updating, update it.
9358 * 2/ If a recovery thread is running, don't do anything else.
9359 * 3/ If recovery has finished, clean up, possibly marking spares active.
9360 * 4/ If there are any faulty devices, remove them.
9361 * 5/ If array is degraded, try to add spares devices
9362 * 6/ If array has spares or is not in-sync, start a resync thread.
9363 */
md_check_recovery(struct mddev * mddev)9364 void md_check_recovery(struct mddev *mddev)
9365 {
9366 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) {
9367 /* Write superblock - thread that called mddev_suspend()
9368 * holds reconfig_mutex for us.
9369 */
9370 set_bit(MD_UPDATING_SB, &mddev->flags);
9371 smp_mb__after_atomic();
9372 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags))
9373 md_update_sb(mddev, 0);
9374 clear_bit_unlock(MD_UPDATING_SB, &mddev->flags);
9375 wake_up(&mddev->sb_wait);
9376 }
9377
9378 if (is_md_suspended(mddev))
9379 return;
9380
9381 if (mddev->bitmap)
9382 md_bitmap_daemon_work(mddev);
9383
9384 if (signal_pending(current)) {
9385 if (mddev->pers->sync_request && !mddev->external) {
9386 pr_debug("md: %s in immediate safe mode\n",
9387 mdname(mddev));
9388 mddev->safemode = 2;
9389 }
9390 flush_signals(current);
9391 }
9392
9393 if (!md_is_rdwr(mddev) &&
9394 !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
9395 return;
9396 if ( ! (
9397 (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) ||
9398 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
9399 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
9400 (mddev->external == 0 && mddev->safemode == 1) ||
9401 (mddev->safemode == 2
9402 && !mddev->in_sync && mddev->recovery_cp == MaxSector)
9403 ))
9404 return;
9405
9406 if (mddev_trylock(mddev)) {
9407 int spares = 0;
9408 bool try_set_sync = mddev->safemode != 0;
9409
9410 if (!mddev->external && mddev->safemode == 1)
9411 mddev->safemode = 0;
9412
9413 if (!md_is_rdwr(mddev)) {
9414 struct md_rdev *rdev;
9415 if (!mddev->external && mddev->in_sync)
9416 /* 'Blocked' flag not needed as failed devices
9417 * will be recorded if array switched to read/write.
9418 * Leaving it set will prevent the device
9419 * from being removed.
9420 */
9421 rdev_for_each(rdev, mddev)
9422 clear_bit(Blocked, &rdev->flags);
9423 /* On a read-only array we can:
9424 * - remove failed devices
9425 * - add already-in_sync devices if the array itself
9426 * is in-sync.
9427 * As we only add devices that are already in-sync,
9428 * we can activate the spares immediately.
9429 */
9430 remove_and_add_spares(mddev, NULL);
9431 /* There is no thread, but we need to call
9432 * ->spare_active and clear saved_raid_disk
9433 */
9434 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
9435 md_reap_sync_thread(mddev);
9436 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
9437 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9438 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
9439 goto unlock;
9440 }
9441
9442 if (mddev_is_clustered(mddev)) {
9443 struct md_rdev *rdev, *tmp;
9444 /* kick the device if another node issued a
9445 * remove disk.
9446 */
9447 rdev_for_each_safe(rdev, tmp, mddev) {
9448 if (test_and_clear_bit(ClusterRemove, &rdev->flags) &&
9449 rdev->raid_disk < 0)
9450 md_kick_rdev_from_array(rdev);
9451 }
9452 }
9453
9454 if (try_set_sync && !mddev->external && !mddev->in_sync) {
9455 spin_lock(&mddev->lock);
9456 set_in_sync(mddev);
9457 spin_unlock(&mddev->lock);
9458 }
9459
9460 if (mddev->sb_flags)
9461 md_update_sb(mddev, 0);
9462
9463 /*
9464 * Never start a new sync thread if MD_RECOVERY_RUNNING is
9465 * still set.
9466 */
9467 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
9468 if (!test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
9469 /* resync/recovery still happening */
9470 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9471 goto unlock;
9472 }
9473
9474 if (WARN_ON_ONCE(!mddev->sync_thread))
9475 goto unlock;
9476
9477 md_reap_sync_thread(mddev);
9478 goto unlock;
9479 }
9480
9481 /* Set RUNNING before clearing NEEDED to avoid
9482 * any transients in the value of "sync_action".
9483 */
9484 mddev->curr_resync_completed = 0;
9485 spin_lock(&mddev->lock);
9486 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
9487 spin_unlock(&mddev->lock);
9488 /* Clear some bits that don't mean anything, but
9489 * might be left set
9490 */
9491 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
9492 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
9493
9494 if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
9495 test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
9496 goto not_running;
9497 /* no recovery is running.
9498 * remove any failed drives, then
9499 * add spares if possible.
9500 * Spares are also removed and re-added, to allow
9501 * the personality to fail the re-add.
9502 */
9503
9504 if (mddev->reshape_position != MaxSector) {
9505 if (mddev->pers->check_reshape == NULL ||
9506 mddev->pers->check_reshape(mddev) != 0)
9507 /* Cannot proceed */
9508 goto not_running;
9509 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
9510 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
9511 } else if ((spares = remove_and_add_spares(mddev, NULL))) {
9512 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9513 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
9514 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
9515 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
9516 } else if (mddev->recovery_cp < MaxSector) {
9517 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9518 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
9519 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
9520 /* nothing to be done ... */
9521 goto not_running;
9522
9523 if (mddev->pers->sync_request) {
9524 if (spares) {
9525 /* We are adding a device or devices to an array
9526 * which has the bitmap stored on all devices.
9527 * So make sure all bitmap pages get written
9528 */
9529 md_bitmap_write_all(mddev->bitmap);
9530 }
9531 INIT_WORK(&mddev->del_work, md_start_sync);
9532 queue_work(md_misc_wq, &mddev->del_work);
9533 goto unlock;
9534 }
9535 not_running:
9536 if (!mddev->sync_thread) {
9537 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
9538 wake_up(&resync_wait);
9539 if (test_and_clear_bit(MD_RECOVERY_RECOVER,
9540 &mddev->recovery))
9541 if (mddev->sysfs_action)
9542 sysfs_notify_dirent_safe(mddev->sysfs_action);
9543 }
9544 unlock:
9545 wake_up(&mddev->sb_wait);
9546 mddev_unlock(mddev);
9547 }
9548 }
9549 EXPORT_SYMBOL(md_check_recovery);
9550
md_reap_sync_thread(struct mddev * mddev)9551 void md_reap_sync_thread(struct mddev *mddev)
9552 {
9553 struct md_rdev *rdev;
9554 sector_t old_dev_sectors = mddev->dev_sectors;
9555 bool is_reshaped = false;
9556
9557 /* resync has finished, collect result */
9558 md_unregister_thread(mddev, &mddev->sync_thread);
9559 atomic_inc(&mddev->sync_seq);
9560
9561 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
9562 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
9563 mddev->degraded != mddev->raid_disks) {
9564 /* success...*/
9565 /* activate any spares */
9566 if (mddev->pers->spare_active(mddev)) {
9567 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
9568 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
9569 }
9570 }
9571 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
9572 mddev->pers->finish_reshape) {
9573 mddev->pers->finish_reshape(mddev);
9574 if (mddev_is_clustered(mddev))
9575 is_reshaped = true;
9576 }
9577
9578 /* If array is no-longer degraded, then any saved_raid_disk
9579 * information must be scrapped.
9580 */
9581 if (!mddev->degraded)
9582 rdev_for_each(rdev, mddev)
9583 rdev->saved_raid_disk = -1;
9584
9585 md_update_sb(mddev, 1);
9586 /* MD_SB_CHANGE_PENDING should be cleared by md_update_sb, so we can
9587 * call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by
9588 * clustered raid */
9589 if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags))
9590 md_cluster_ops->resync_finish(mddev);
9591 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
9592 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
9593 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9594 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
9595 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
9596 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
9597 /*
9598 * We call md_cluster_ops->update_size here because sync_size could
9599 * be changed by md_update_sb, and MD_RECOVERY_RESHAPE is cleared,
9600 * so it is time to update size across cluster.
9601 */
9602 if (mddev_is_clustered(mddev) && is_reshaped
9603 && !test_bit(MD_CLOSING, &mddev->flags))
9604 md_cluster_ops->update_size(mddev, old_dev_sectors);
9605 /* flag recovery needed just to double check */
9606 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9607 sysfs_notify_dirent_safe(mddev->sysfs_completed);
9608 sysfs_notify_dirent_safe(mddev->sysfs_action);
9609 md_new_event();
9610 if (mddev->event_work.func)
9611 queue_work(md_misc_wq, &mddev->event_work);
9612 wake_up(&resync_wait);
9613 }
9614 EXPORT_SYMBOL(md_reap_sync_thread);
9615
md_wait_for_blocked_rdev(struct md_rdev * rdev,struct mddev * mddev)9616 void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
9617 {
9618 sysfs_notify_dirent_safe(rdev->sysfs_state);
9619 wait_event_timeout(rdev->blocked_wait,
9620 !test_bit(Blocked, &rdev->flags) &&
9621 !test_bit(BlockedBadBlocks, &rdev->flags),
9622 msecs_to_jiffies(5000));
9623 rdev_dec_pending(rdev, mddev);
9624 }
9625 EXPORT_SYMBOL(md_wait_for_blocked_rdev);
9626
md_finish_reshape(struct mddev * mddev)9627 void md_finish_reshape(struct mddev *mddev)
9628 {
9629 /* called be personality module when reshape completes. */
9630 struct md_rdev *rdev;
9631
9632 rdev_for_each(rdev, mddev) {
9633 if (rdev->data_offset > rdev->new_data_offset)
9634 rdev->sectors += rdev->data_offset - rdev->new_data_offset;
9635 else
9636 rdev->sectors -= rdev->new_data_offset - rdev->data_offset;
9637 rdev->data_offset = rdev->new_data_offset;
9638 }
9639 }
9640 EXPORT_SYMBOL(md_finish_reshape);
9641
9642 /* Bad block management */
9643
9644 /* Returns 1 on success, 0 on failure */
rdev_set_badblocks(struct md_rdev * rdev,sector_t s,int sectors,int is_new)9645 int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
9646 int is_new)
9647 {
9648 struct mddev *mddev = rdev->mddev;
9649 int rv;
9650 if (is_new)
9651 s += rdev->new_data_offset;
9652 else
9653 s += rdev->data_offset;
9654 rv = badblocks_set(&rdev->badblocks, s, sectors, 0);
9655 if (rv == 0) {
9656 /* Make sure they get written out promptly */
9657 if (test_bit(ExternalBbl, &rdev->flags))
9658 sysfs_notify_dirent_safe(rdev->sysfs_unack_badblocks);
9659 sysfs_notify_dirent_safe(rdev->sysfs_state);
9660 set_mask_bits(&mddev->sb_flags, 0,
9661 BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING));
9662 md_wakeup_thread(rdev->mddev->thread);
9663 return 1;
9664 } else
9665 return 0;
9666 }
9667 EXPORT_SYMBOL_GPL(rdev_set_badblocks);
9668
rdev_clear_badblocks(struct md_rdev * rdev,sector_t s,int sectors,int is_new)9669 int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
9670 int is_new)
9671 {
9672 int rv;
9673 if (is_new)
9674 s += rdev->new_data_offset;
9675 else
9676 s += rdev->data_offset;
9677 rv = badblocks_clear(&rdev->badblocks, s, sectors);
9678 if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags))
9679 sysfs_notify_dirent_safe(rdev->sysfs_badblocks);
9680 return rv;
9681 }
9682 EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
9683
md_notify_reboot(struct notifier_block * this,unsigned long code,void * x)9684 static int md_notify_reboot(struct notifier_block *this,
9685 unsigned long code, void *x)
9686 {
9687 struct mddev *mddev, *n;
9688 int need_delay = 0;
9689
9690 spin_lock(&all_mddevs_lock);
9691 list_for_each_entry_safe(mddev, n, &all_mddevs, all_mddevs) {
9692 if (!mddev_get(mddev))
9693 continue;
9694 spin_unlock(&all_mddevs_lock);
9695 if (mddev_trylock(mddev)) {
9696 if (mddev->pers)
9697 __md_stop_writes(mddev);
9698 if (mddev->persistent)
9699 mddev->safemode = 2;
9700 mddev_unlock(mddev);
9701 }
9702 need_delay = 1;
9703 mddev_put(mddev);
9704 spin_lock(&all_mddevs_lock);
9705 }
9706 spin_unlock(&all_mddevs_lock);
9707
9708 /*
9709 * certain more exotic SCSI devices are known to be
9710 * volatile wrt too early system reboots. While the
9711 * right place to handle this issue is the given
9712 * driver, we do want to have a safe RAID driver ...
9713 */
9714 if (need_delay)
9715 msleep(1000);
9716
9717 return NOTIFY_DONE;
9718 }
9719
9720 static struct notifier_block md_notifier = {
9721 .notifier_call = md_notify_reboot,
9722 .next = NULL,
9723 .priority = INT_MAX, /* before any real devices */
9724 };
9725
md_geninit(void)9726 static void md_geninit(void)
9727 {
9728 pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
9729
9730 proc_create("mdstat", S_IRUGO, NULL, &mdstat_proc_ops);
9731 }
9732
md_init(void)9733 static int __init md_init(void)
9734 {
9735 int ret = -ENOMEM;
9736
9737 md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
9738 if (!md_wq)
9739 goto err_wq;
9740
9741 md_misc_wq = alloc_workqueue("md_misc", 0, 0);
9742 if (!md_misc_wq)
9743 goto err_misc_wq;
9744
9745 md_bitmap_wq = alloc_workqueue("md_bitmap", WQ_MEM_RECLAIM | WQ_UNBOUND,
9746 0);
9747 if (!md_bitmap_wq)
9748 goto err_bitmap_wq;
9749
9750 ret = __register_blkdev(MD_MAJOR, "md", md_probe);
9751 if (ret < 0)
9752 goto err_md;
9753
9754 ret = __register_blkdev(0, "mdp", md_probe);
9755 if (ret < 0)
9756 goto err_mdp;
9757 mdp_major = ret;
9758
9759 register_reboot_notifier(&md_notifier);
9760 raid_table_header = register_sysctl("dev/raid", raid_table);
9761
9762 md_geninit();
9763 return 0;
9764
9765 err_mdp:
9766 unregister_blkdev(MD_MAJOR, "md");
9767 err_md:
9768 destroy_workqueue(md_bitmap_wq);
9769 err_bitmap_wq:
9770 destroy_workqueue(md_misc_wq);
9771 err_misc_wq:
9772 destroy_workqueue(md_wq);
9773 err_wq:
9774 return ret;
9775 }
9776
check_sb_changes(struct mddev * mddev,struct md_rdev * rdev)9777 static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
9778 {
9779 struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
9780 struct md_rdev *rdev2, *tmp;
9781 int role, ret;
9782
9783 /*
9784 * If size is changed in another node then we need to
9785 * do resize as well.
9786 */
9787 if (mddev->dev_sectors != le64_to_cpu(sb->size)) {
9788 ret = mddev->pers->resize(mddev, le64_to_cpu(sb->size));
9789 if (ret)
9790 pr_info("md-cluster: resize failed\n");
9791 else
9792 md_bitmap_update_sb(mddev->bitmap);
9793 }
9794
9795 /* Check for change of roles in the active devices */
9796 rdev_for_each_safe(rdev2, tmp, mddev) {
9797 if (test_bit(Faulty, &rdev2->flags))
9798 continue;
9799
9800 /* Check if the roles changed */
9801 role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]);
9802
9803 if (test_bit(Candidate, &rdev2->flags)) {
9804 if (role == MD_DISK_ROLE_FAULTY) {
9805 pr_info("md: Removing Candidate device %pg because add failed\n",
9806 rdev2->bdev);
9807 md_kick_rdev_from_array(rdev2);
9808 continue;
9809 }
9810 else
9811 clear_bit(Candidate, &rdev2->flags);
9812 }
9813
9814 if (role != rdev2->raid_disk) {
9815 /*
9816 * got activated except reshape is happening.
9817 */
9818 if (rdev2->raid_disk == -1 && role != MD_DISK_ROLE_SPARE &&
9819 !(le32_to_cpu(sb->feature_map) &
9820 MD_FEATURE_RESHAPE_ACTIVE)) {
9821 rdev2->saved_raid_disk = role;
9822 ret = remove_and_add_spares(mddev, rdev2);
9823 pr_info("Activated spare: %pg\n",
9824 rdev2->bdev);
9825 /* wakeup mddev->thread here, so array could
9826 * perform resync with the new activated disk */
9827 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9828 md_wakeup_thread(mddev->thread);
9829 }
9830 /* device faulty
9831 * We just want to do the minimum to mark the disk
9832 * as faulty. The recovery is performed by the
9833 * one who initiated the error.
9834 */
9835 if (role == MD_DISK_ROLE_FAULTY ||
9836 role == MD_DISK_ROLE_JOURNAL) {
9837 md_error(mddev, rdev2);
9838 clear_bit(Blocked, &rdev2->flags);
9839 }
9840 }
9841 }
9842
9843 if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) {
9844 ret = update_raid_disks(mddev, le32_to_cpu(sb->raid_disks));
9845 if (ret)
9846 pr_warn("md: updating array disks failed. %d\n", ret);
9847 }
9848
9849 /*
9850 * Since mddev->delta_disks has already updated in update_raid_disks,
9851 * so it is time to check reshape.
9852 */
9853 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
9854 (le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
9855 /*
9856 * reshape is happening in the remote node, we need to
9857 * update reshape_position and call start_reshape.
9858 */
9859 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
9860 if (mddev->pers->update_reshape_pos)
9861 mddev->pers->update_reshape_pos(mddev);
9862 if (mddev->pers->start_reshape)
9863 mddev->pers->start_reshape(mddev);
9864 } else if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
9865 mddev->reshape_position != MaxSector &&
9866 !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
9867 /* reshape is just done in another node. */
9868 mddev->reshape_position = MaxSector;
9869 if (mddev->pers->update_reshape_pos)
9870 mddev->pers->update_reshape_pos(mddev);
9871 }
9872
9873 /* Finally set the event to be up to date */
9874 mddev->events = le64_to_cpu(sb->events);
9875 }
9876
read_rdev(struct mddev * mddev,struct md_rdev * rdev)9877 static int read_rdev(struct mddev *mddev, struct md_rdev *rdev)
9878 {
9879 int err;
9880 struct page *swapout = rdev->sb_page;
9881 struct mdp_superblock_1 *sb;
9882
9883 /* Store the sb page of the rdev in the swapout temporary
9884 * variable in case we err in the future
9885 */
9886 rdev->sb_page = NULL;
9887 err = alloc_disk_sb(rdev);
9888 if (err == 0) {
9889 ClearPageUptodate(rdev->sb_page);
9890 rdev->sb_loaded = 0;
9891 err = super_types[mddev->major_version].
9892 load_super(rdev, NULL, mddev->minor_version);
9893 }
9894 if (err < 0) {
9895 pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n",
9896 __func__, __LINE__, rdev->desc_nr, err);
9897 if (rdev->sb_page)
9898 put_page(rdev->sb_page);
9899 rdev->sb_page = swapout;
9900 rdev->sb_loaded = 1;
9901 return err;
9902 }
9903
9904 sb = page_address(rdev->sb_page);
9905 /* Read the offset unconditionally, even if MD_FEATURE_RECOVERY_OFFSET
9906 * is not set
9907 */
9908
9909 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_OFFSET))
9910 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
9911
9912 /* The other node finished recovery, call spare_active to set
9913 * device In_sync and mddev->degraded
9914 */
9915 if (rdev->recovery_offset == MaxSector &&
9916 !test_bit(In_sync, &rdev->flags) &&
9917 mddev->pers->spare_active(mddev))
9918 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
9919
9920 put_page(swapout);
9921 return 0;
9922 }
9923
md_reload_sb(struct mddev * mddev,int nr)9924 void md_reload_sb(struct mddev *mddev, int nr)
9925 {
9926 struct md_rdev *rdev = NULL, *iter;
9927 int err;
9928
9929 /* Find the rdev */
9930 rdev_for_each_rcu(iter, mddev) {
9931 if (iter->desc_nr == nr) {
9932 rdev = iter;
9933 break;
9934 }
9935 }
9936
9937 if (!rdev) {
9938 pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr);
9939 return;
9940 }
9941
9942 err = read_rdev(mddev, rdev);
9943 if (err < 0)
9944 return;
9945
9946 check_sb_changes(mddev, rdev);
9947
9948 /* Read all rdev's to update recovery_offset */
9949 rdev_for_each_rcu(rdev, mddev) {
9950 if (!test_bit(Faulty, &rdev->flags))
9951 read_rdev(mddev, rdev);
9952 }
9953 }
9954 EXPORT_SYMBOL(md_reload_sb);
9955
9956 #ifndef MODULE
9957
9958 /*
9959 * Searches all registered partitions for autorun RAID arrays
9960 * at boot time.
9961 */
9962
9963 static DEFINE_MUTEX(detected_devices_mutex);
9964 static LIST_HEAD(all_detected_devices);
9965 struct detected_devices_node {
9966 struct list_head list;
9967 dev_t dev;
9968 };
9969
md_autodetect_dev(dev_t dev)9970 void md_autodetect_dev(dev_t dev)
9971 {
9972 struct detected_devices_node *node_detected_dev;
9973
9974 node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
9975 if (node_detected_dev) {
9976 node_detected_dev->dev = dev;
9977 mutex_lock(&detected_devices_mutex);
9978 list_add_tail(&node_detected_dev->list, &all_detected_devices);
9979 mutex_unlock(&detected_devices_mutex);
9980 }
9981 }
9982
md_autostart_arrays(int part)9983 void md_autostart_arrays(int part)
9984 {
9985 struct md_rdev *rdev;
9986 struct detected_devices_node *node_detected_dev;
9987 dev_t dev;
9988 int i_scanned, i_passed;
9989
9990 i_scanned = 0;
9991 i_passed = 0;
9992
9993 pr_info("md: Autodetecting RAID arrays.\n");
9994
9995 mutex_lock(&detected_devices_mutex);
9996 while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
9997 i_scanned++;
9998 node_detected_dev = list_entry(all_detected_devices.next,
9999 struct detected_devices_node, list);
10000 list_del(&node_detected_dev->list);
10001 dev = node_detected_dev->dev;
10002 kfree(node_detected_dev);
10003 mutex_unlock(&detected_devices_mutex);
10004 rdev = md_import_device(dev,0, 90);
10005 mutex_lock(&detected_devices_mutex);
10006 if (IS_ERR(rdev))
10007 continue;
10008
10009 if (test_bit(Faulty, &rdev->flags))
10010 continue;
10011
10012 set_bit(AutoDetected, &rdev->flags);
10013 list_add(&rdev->same_set, &pending_raid_disks);
10014 i_passed++;
10015 }
10016 mutex_unlock(&detected_devices_mutex);
10017
10018 pr_debug("md: Scanned %d and added %d devices.\n", i_scanned, i_passed);
10019
10020 autorun_devices(part);
10021 }
10022
10023 #endif /* !MODULE */
10024
md_exit(void)10025 static __exit void md_exit(void)
10026 {
10027 struct mddev *mddev, *n;
10028 int delay = 1;
10029
10030 unregister_blkdev(MD_MAJOR,"md");
10031 unregister_blkdev(mdp_major, "mdp");
10032 unregister_reboot_notifier(&md_notifier);
10033 unregister_sysctl_table(raid_table_header);
10034
10035 /* We cannot unload the modules while some process is
10036 * waiting for us in select() or poll() - wake them up
10037 */
10038 md_unloading = 1;
10039 while (waitqueue_active(&md_event_waiters)) {
10040 /* not safe to leave yet */
10041 wake_up(&md_event_waiters);
10042 msleep(delay);
10043 delay += delay;
10044 }
10045 remove_proc_entry("mdstat", NULL);
10046
10047 spin_lock(&all_mddevs_lock);
10048 list_for_each_entry_safe(mddev, n, &all_mddevs, all_mddevs) {
10049 if (!mddev_get(mddev))
10050 continue;
10051 spin_unlock(&all_mddevs_lock);
10052 export_array(mddev);
10053 mddev->ctime = 0;
10054 mddev->hold_active = 0;
10055 /*
10056 * As the mddev is now fully clear, mddev_put will schedule
10057 * the mddev for destruction by a workqueue, and the
10058 * destroy_workqueue() below will wait for that to complete.
10059 */
10060 mddev_put(mddev);
10061 spin_lock(&all_mddevs_lock);
10062 }
10063 spin_unlock(&all_mddevs_lock);
10064
10065 destroy_workqueue(md_misc_wq);
10066 destroy_workqueue(md_bitmap_wq);
10067 destroy_workqueue(md_wq);
10068 }
10069
10070 subsys_initcall(md_init);
module_exit(md_exit)10071 module_exit(md_exit)
10072
10073 static int get_ro(char *buffer, const struct kernel_param *kp)
10074 {
10075 return sprintf(buffer, "%d\n", start_readonly);
10076 }
set_ro(const char * val,const struct kernel_param * kp)10077 static int set_ro(const char *val, const struct kernel_param *kp)
10078 {
10079 return kstrtouint(val, 10, (unsigned int *)&start_readonly);
10080 }
10081
10082 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
10083 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
10084 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
10085 module_param(create_on_open, bool, S_IRUSR|S_IWUSR);
10086
10087 MODULE_LICENSE("GPL");
10088 MODULE_DESCRIPTION("MD RAID framework");
10089 MODULE_ALIAS("md");
10090 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);
10091